Update to current version of Go library.

From-SVN: r171427
This commit is contained in:
Ian Lance Taylor 2011-03-24 23:46:17 +00:00
parent 7114321ee4
commit 8039ca76a5
168 changed files with 8977 additions and 2743 deletions

View File

@ -1,4 +1,4 @@
94d654be2064 31d7feb9281b
The first line of this file holds the Mercurial revision number of the The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources. last merge done from the master library sources.

View File

@ -182,6 +182,7 @@ toolexeclibgocrypto_DATA = \
crypto/cast5.gox \ crypto/cast5.gox \
crypto/cipher.gox \ crypto/cipher.gox \
crypto/dsa.gox \ crypto/dsa.gox \
crypto/ecdsa.gox \
crypto/elliptic.gox \ crypto/elliptic.gox \
crypto/hmac.gox \ crypto/hmac.gox \
crypto/md4.gox \ crypto/md4.gox \
@ -254,11 +255,14 @@ toolexeclibgohashdir = $(toolexeclibgodir)/hash
toolexeclibgohash_DATA = \ toolexeclibgohash_DATA = \
hash/adler32.gox \ hash/adler32.gox \
hash/crc32.gox \ hash/crc32.gox \
hash/crc64.gox hash/crc64.gox \
hash/fnv.gox
toolexeclibgohttpdir = $(toolexeclibgodir)/http toolexeclibgohttpdir = $(toolexeclibgodir)/http
toolexeclibgohttp_DATA = \ toolexeclibgohttp_DATA = \
http/cgi.gox \
http/httptest.gox \
http/pprof.gox http/pprof.gox
toolexeclibgoimagedir = $(toolexeclibgodir)/image toolexeclibgoimagedir = $(toolexeclibgodir)/image
@ -301,6 +305,11 @@ toolexeclibgoos_DATA = \
$(os_inotify_gox) \ $(os_inotify_gox) \
os/signal.gox os/signal.gox
toolexeclibgopathdir = $(toolexeclibgodir)/path
toolexeclibgopath_DATA = \
path/filepath.gox
toolexeclibgorpcdir = $(toolexeclibgodir)/rpc toolexeclibgorpcdir = $(toolexeclibgodir)/rpc
toolexeclibgorpc_DATA = \ toolexeclibgorpc_DATA = \
@ -543,6 +552,7 @@ go_html_files = \
go_http_files = \ go_http_files = \
go/http/chunked.go \ go/http/chunked.go \
go/http/client.go \ go/http/client.go \
go/http/cookie.go \
go/http/dump.go \ go/http/dump.go \
go/http/fs.go \ go/http/fs.go \
go/http/header.go \ go/http/header.go \
@ -726,8 +736,7 @@ go_patch_files = \
go_path_files = \ go_path_files = \
go/path/match.go \ go/path/match.go \
go/path/path.go \ go/path/path.go
go/path/path_unix.go
go_rand_files = \ go_rand_files = \
go/rand/exp.go \ go/rand/exp.go \
@ -753,6 +762,7 @@ go_runtime_files = \
go/runtime/debug.go \ go/runtime/debug.go \
go/runtime/error.go \ go/runtime/error.go \
go/runtime/extern.go \ go/runtime/extern.go \
go/runtime/mem.go \
go/runtime/sig.go \ go/runtime/sig.go \
go/runtime/softfloat64.go \ go/runtime/softfloat64.go \
go/runtime/type.go \ go/runtime/type.go \
@ -826,6 +836,7 @@ go_testing_files = \
go_time_files = \ go_time_files = \
go/time/format.go \ go/time/format.go \
go/time/sleep.go \ go/time/sleep.go \
go/time/sys.go \
go/time/tick.go \ go/time/tick.go \
go/time/time.go \ go/time/time.go \
go/time/zoneinfo_unix.go go/time/zoneinfo_unix.go
@ -936,6 +947,8 @@ go_crypto_cipher_files = \
go/crypto/cipher/ofb.go go/crypto/cipher/ofb.go
go_crypto_dsa_files = \ go_crypto_dsa_files = \
go/crypto/dsa/dsa.go go/crypto/dsa/dsa.go
go_crypto_ecdsa_files = \
go/crypto/ecdsa/ecdsa.go
go_crypto_elliptic_files = \ go_crypto_elliptic_files = \
go/crypto/elliptic/elliptic.go go/crypto/elliptic/elliptic.go
go_crypto_hmac_files = \ go_crypto_hmac_files = \
@ -1101,6 +1114,7 @@ go_go_token_files = \
go/go/token/token.go go/go/token/token.go
go_go_typechecker_files = \ go_go_typechecker_files = \
go/go/typechecker/scope.go \ go/go/typechecker/scope.go \
go/go/typechecker/type.go \
go/go/typechecker/typechecker.go \ go/go/typechecker/typechecker.go \
go/go/typechecker/universe.go go/go/typechecker/universe.go
@ -1110,7 +1124,15 @@ go_hash_crc32_files = \
go/hash/crc32/crc32.go go/hash/crc32/crc32.go
go_hash_crc64_files = \ go_hash_crc64_files = \
go/hash/crc64/crc64.go go/hash/crc64/crc64.go
go_hash_fnv_files = \
go/hash/fnv/fnv.go
go_http_cgi_files = \
go/http/cgi/child.go \
go/http/cgi/host.go
go_http_httptest_files = \
go/http/httptest/recorder.go \
go/http/httptest/server.go
go_http_pprof_files = \ go_http_pprof_files = \
go/http/pprof/pprof.go go/http/pprof/pprof.go
@ -1151,6 +1173,11 @@ go_os_signal_files = \
go/os/signal/signal.go \ go/os/signal/signal.go \
unix.go unix.go
go_path_filepath_files = \
go/path/filepath/match.go \
go/path/filepath/path.go \
go/path/filepath/path_unix.go
go_rpc_jsonrpc_files = \ go_rpc_jsonrpc_files = \
go/rpc/jsonrpc/client.go \ go/rpc/jsonrpc/client.go \
go/rpc/jsonrpc/server.go go/rpc/jsonrpc/server.go
@ -1377,6 +1404,7 @@ libgo_go_objs = \
crypto/cast5.lo \ crypto/cast5.lo \
crypto/cipher.lo \ crypto/cipher.lo \
crypto/dsa.lo \ crypto/dsa.lo \
crypto/ecdsa.lo \
crypto/elliptic.lo \ crypto/elliptic.lo \
crypto/hmac.lo \ crypto/hmac.lo \
crypto/md4.lo \ crypto/md4.lo \
@ -1426,6 +1454,9 @@ libgo_go_objs = \
hash/adler32.lo \ hash/adler32.lo \
hash/crc32.lo \ hash/crc32.lo \
hash/crc64.lo \ hash/crc64.lo \
hash/fnv.lo \
http/cgi.lo \
http/httptest.lo \
http/pprof.lo \ http/pprof.lo \
image/jpeg.lo \ image/jpeg.lo \
image/png.lo \ image/png.lo \
@ -1436,6 +1467,7 @@ libgo_go_objs = \
net/textproto.lo \ net/textproto.lo \
$(os_lib_inotify_lo) \ $(os_lib_inotify_lo) \
os/signal.lo \ os/signal.lo \
path/filepath.lo \
rpc/jsonrpc.lo \ rpc/jsonrpc.lo \
runtime/debug.lo \ runtime/debug.lo \
runtime/pprof.lo \ runtime/pprof.lo \
@ -1532,7 +1564,7 @@ asn1/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: asn1/check .PHONY: asn1/check
big/big.lo: $(go_big_files) fmt.gox rand.gox strings.gox big/big.lo: $(go_big_files) fmt.gox rand.gox strings.gox os.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
big/check: $(CHECK_DEPS) big/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
@ -1597,9 +1629,9 @@ fmt/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: fmt/check .PHONY: fmt/check
gob/gob.lo: $(go_gob_files) bytes.gox fmt.gox io.gox math.gox os.gox \ gob/gob.lo: $(go_gob_files) bufio.gox bytes.gox fmt.gox io.gox math.gox \
reflect.gox runtime.gox strings.gox sync.gox unicode.gox \ os.gox reflect.gox runtime.gox strings.gox sync.gox \
utf8.gox unicode.gox utf8.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
gob/check: $(CHECK_DEPS) gob/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
@ -1621,8 +1653,8 @@ html/check: $(CHECK_DEPS)
http/http.lo: $(go_http_files) bufio.gox bytes.gox container/vector.gox \ http/http.lo: $(go_http_files) bufio.gox bytes.gox container/vector.gox \
crypto/rand.gox crypto/tls.gox encoding/base64.gox fmt.gox \ crypto/rand.gox crypto/tls.gox encoding/base64.gox fmt.gox \
io.gox io/ioutil.gox log.gox mime.gox mime/multipart.gox \ io.gox io/ioutil.gox log.gox mime.gox mime/multipart.gox \
net.gox net/textproto.gox os.gox path.gox sort.gox \ net.gox net/textproto.gox os.gox path.gox path/filepath.gox \
strconv.gox strings.gox sync.gox time.gox utf8.gox sort.gox strconv.gox strings.gox sync.gox time.gox utf8.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
http/check: $(CHECK_DEPS) http/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
@ -1634,7 +1666,7 @@ image/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: image/check .PHONY: image/check
io/io.lo: $(go_io_files) os.gox runtime.gox sync.gox io/io.lo: $(go_io_files) os.gox sync.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
io/check: $(CHECK_DEPS) io/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
@ -1697,8 +1729,7 @@ patch/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: patch/check .PHONY: patch/check
path/path.lo: $(go_path_files) io/ioutil.gox os.gox sort.gox strings.gox \ path/path.lo: $(go_path_files) os.gox strings.gox utf8.gox
utf8.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
path/check: $(CHECK_DEPS) path/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
@ -1799,7 +1830,7 @@ template/check: $(CHECK_DEPS)
.PHONY: template/check .PHONY: template/check
testing/testing.lo: $(go_testing_files) flag.gox fmt.gox os.gox regexp.gox \ testing/testing.lo: $(go_testing_files) flag.gox fmt.gox os.gox regexp.gox \
runtime.gox time.gox runtime.gox runtime/pprof.gox time.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
testing/check: $(CHECK_DEPS) testing/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
@ -1862,7 +1893,7 @@ archive/tar/check: $(CHECK_DEPS)
archive/zip.lo: $(go_archive_zip_files) bufio.gox bytes.gox \ archive/zip.lo: $(go_archive_zip_files) bufio.gox bytes.gox \
compress/flate.gox hash.gox hash/crc32.gox \ compress/flate.gox hash.gox hash/crc32.gox \
encoding/binary.gox io.gox os.gox encoding/binary.gox io.gox io/ioutil.gox os.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
archive/zip/check: $(CHECK_DEPS) archive/zip/check: $(CHECK_DEPS)
@$(MKDIR_P) archive/zip @$(MKDIR_P) archive/zip
@ -1977,6 +2008,14 @@ crypto/dsa/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: crypto/dsa/check .PHONY: crypto/dsa/check
crypto/ecdsa.lo: $(go_crypto_ecdsa_files) big.gox crypto/elliptic.gox io.gox \
os.gox
$(BUILDPACKAGE)
crypto/ecdsa/check: $(CHECK_DEPS)
@$(MKDIR_P) crypto/ecdsa
$(CHECK)
.PHONY: crypto/ecdsa/check
crypto/elliptic.lo: $(go_crypto_elliptic_files) big.gox io.gox os.gox sync.gox crypto/elliptic.lo: $(go_crypto_elliptic_files) big.gox io.gox os.gox sync.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
crypto/elliptic/check: $(CHECK_DEPS) crypto/elliptic/check: $(CHECK_DEPS)
@ -2014,8 +2053,8 @@ crypto/ocsp/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: crypto/ocsp/check .PHONY: crypto/ocsp/check
crypto/openpgp.lo: $(go_crypto_openpgp_files) crypto.gox \ crypto/openpgp.lo: $(go_crypto_openpgp_files) crypto.gox crypto/dsa.gox \
crypto/openpgp/armor.gox crypto/openpgp/error.gox \ crypto/openpgp/armor.gox crypto/openpgp/error.gox \
crypto/openpgp/packet.gox crypto/rsa.gox crypto/sha256.gox \ crypto/openpgp/packet.gox crypto/rsa.gox crypto/sha256.gox \
hash.gox io.gox os.gox strconv.gox time.gox hash.gox io.gox os.gox strconv.gox time.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
@ -2137,10 +2176,10 @@ crypto/openpgp/error/check: $(CHECK_DEPS)
crypto/openpgp/packet.lo: $(go_crypto_openpgp_packet_files) big.gox bytes.gox \ crypto/openpgp/packet.lo: $(go_crypto_openpgp_packet_files) big.gox bytes.gox \
compress/flate.gox compress/zlib.gox crypto.gox \ compress/flate.gox compress/zlib.gox crypto.gox \
crypto/aes.gox crypto/cast5.gox crypto/cipher.gox \ crypto/aes.gox crypto/cast5.gox crypto/cipher.gox \
crypto/openpgp/error.gox crypto/openpgp/s2k.gox \ crypto/dsa.gox crypto/openpgp/error.gox \
crypto/rand.gox crypto/rsa.gox crypto/sha1.gox \ crypto/openpgp/s2k.gox crypto/rand.gox crypto/rsa.gox \
crypto/subtle.gox encoding/binary.gox hash.gox io.gox \ crypto/sha1.gox crypto/subtle.gox encoding/binary.gox fmt.gox \
io/ioutil.gox os.gox strconv.gox strings.gox hash.gox io.gox io/ioutil.gox os.gox strconv.gox strings.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
crypto/openpgp/packet/check: $(CHECK_DEPS) crypto/openpgp/packet/check: $(CHECK_DEPS)
@$(MKDIR_P) crypto/openpgp/packet @$(MKDIR_P) crypto/openpgp/packet
@ -2288,8 +2327,8 @@ exp/eval/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: exp/eval/check .PHONY: exp/eval/check
go/ast.lo: $(go_go_ast_files) fmt.gox go/token.gox io.gox os.gox reflect.gox \ go/ast.lo: $(go_go_ast_files) bytes.gox fmt.gox go/token.gox io.gox os.gox \
unicode.gox utf8.gox reflect.gox unicode.gox utf8.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
go/ast/check: $(CHECK_DEPS) go/ast/check: $(CHECK_DEPS)
@$(MKDIR_P) go/ast @$(MKDIR_P) go/ast
@ -2306,7 +2345,7 @@ go/doc/check: $(CHECK_DEPS)
go/parser.lo: $(go_go_parser_files) bytes.gox fmt.gox go/ast.gox \ go/parser.lo: $(go_go_parser_files) bytes.gox fmt.gox go/ast.gox \
go/scanner.gox go/token.gox io.gox io/ioutil.gox os.gox \ go/scanner.gox go/token.gox io.gox io/ioutil.gox os.gox \
path.gox strings.gox path/filepath.gox strings.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
go/parser/check: $(CHECK_DEPS) go/parser/check: $(CHECK_DEPS)
@$(MKDIR_P) go/parser @$(MKDIR_P) go/parser
@ -2314,8 +2353,8 @@ go/parser/check: $(CHECK_DEPS)
.PHONY: go/parser/check .PHONY: go/parser/check
go/printer.lo: $(go_go_printer_files) bytes.gox fmt.gox go/ast.gox \ go/printer.lo: $(go_go_printer_files) bytes.gox fmt.gox go/ast.gox \
go/token.gox io.gox os.gox reflect.gox runtime.gox \ go/token.gox io.gox os.gox path/filepath.gox reflect.gox \
strings.gox tabwriter.gox runtime.gox strings.gox tabwriter.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
go/printer/check: $(CHECK_DEPS) go/printer/check: $(CHECK_DEPS)
@$(MKDIR_P) go/printer @$(MKDIR_P) go/printer
@ -2323,8 +2362,8 @@ go/printer/check: $(CHECK_DEPS)
.PHONY: go/printer/check .PHONY: go/printer/check
go/scanner.lo: $(go_go_scanner_files) bytes.gox container/vector.gox fmt.gox \ go/scanner.lo: $(go_go_scanner_files) bytes.gox container/vector.gox fmt.gox \
go/token.gox io.gox os.gox path.gox sort.gox strconv.gox \ go/token.gox io.gox os.gox path/filepath.gox sort.gox \
unicode.gox utf8.gox strconv.gox unicode.gox utf8.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
go/scanner/check: $(CHECK_DEPS) go/scanner/check: $(CHECK_DEPS)
@$(MKDIR_P) go/scanner @$(MKDIR_P) go/scanner
@ -2367,6 +2406,30 @@ hash/crc64/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: hash/crc64/check .PHONY: hash/crc64/check
hash/fnv.lo: $(go_hash_fnv_files) encoding/binary.gox hash.gox os.gox
$(BUILDPACKAGE)
hash/fnv/check: $(CHECK_DEPS)
@$(MKDIR_P) hash/fnv
$(CHECK)
.PHONY: hash/fnv/check
http/cgi.lo: $(go_http_cgi_files) bufio.gox bytes.gox encoding/line.gox \
exec.gox fmt.gox http.gox io.gox io/ioutil.gox log.gox \
os.gox path/filepath.gox regexp.gox strconv.gox strings.gox
$(BUILDPACKAGE)
http/cgi/check: $(CHECK_DEPS)
@$(MKDIR_P) http/cgi
$(CHECK)
.PHONY: http/cgi/check
http/httptest.lo: $(go_http_httptest_files) bytes.gox fmt.gox http.gox \
net.gox os.gox
$(BUILDPACKAGE)
http/httptest/check: $(CHECK_DEPS)
@$(MKDIR_P) http/httptest
$(CHECK)
.PHONY: http/httptest/check
http/pprof.lo: $(go_http_pprof_files) bufio.gox fmt.gox http.gox os.gox \ http/pprof.lo: $(go_http_pprof_files) bufio.gox fmt.gox http.gox os.gox \
runtime.gox runtime/pprof.gox strconv.gox strings.gox runtime.gox runtime/pprof.gox strconv.gox strings.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
@ -2398,8 +2461,8 @@ index/suffixarray/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: index/suffixarray/check .PHONY: index/suffixarray/check
io/ioutil.lo: $(go_io_ioutil_files) bytes.gox io.gox os.gox sort.gox \ io/ioutil.lo: $(go_io_ioutil_files) bytes.gox io.gox os.gox path/filepath.gox \
strconv.gox sort.gox strconv.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
io/ioutil/check: $(CHECK_DEPS) io/ioutil/check: $(CHECK_DEPS)
@$(MKDIR_P) io/ioutil @$(MKDIR_P) io/ioutil
@ -2407,7 +2470,7 @@ io/ioutil/check: $(CHECK_DEPS)
.PHONY: io/ioutil/check .PHONY: io/ioutil/check
mime/multipart.lo: $(go_mime_multipart_files) bufio.gox bytes.gox io.gox \ mime/multipart.lo: $(go_mime_multipart_files) bufio.gox bytes.gox io.gox \
mime.gox os.gox regexp.gox strings.gox mime.gox net/textproto.gox os.gox regexp.gox strings.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
mime/multipart/check: $(CHECK_DEPS) mime/multipart/check: $(CHECK_DEPS)
@$(MKDIR_P) mime/multipart @$(MKDIR_P) mime/multipart
@ -2445,6 +2508,14 @@ unix.go: $(srcdir)/go/os/signal/mkunix.sh sysinfo.go
$(SHELL) $(srcdir)/go/os/signal/mkunix.sh sysinfo.go > $@.tmp $(SHELL) $(srcdir)/go/os/signal/mkunix.sh sysinfo.go > $@.tmp
mv -f $@.tmp $@ mv -f $@.tmp $@
path/filepath.lo: $(go_path_filepath_files) bytes.gox os.gox sort.gox \
strings.gox utf8.gox
$(BUILDPACKAGE)
path/filepath/check: $(CHECK_DEPS)
@$(MKDIR_P) path/filepath
$(CHECK)
.PHONY: path/filepath/check
rpc/jsonrpc.lo: $(go_rpc_jsonrpc_files) fmt.gox io.gox json.gox net.gox \ rpc/jsonrpc.lo: $(go_rpc_jsonrpc_files) fmt.gox io.gox json.gox net.gox \
os.gox rpc.gox sync.gox os.gox rpc.gox sync.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
@ -2462,7 +2533,7 @@ runtime/debug/check: $(CHECK_DEPS)
.PHONY: runtime/debug/check .PHONY: runtime/debug/check
runtime/pprof.lo: $(go_runtime_pprof_files) bufio.gox fmt.gox io.gox os.gox \ runtime/pprof.lo: $(go_runtime_pprof_files) bufio.gox fmt.gox io.gox os.gox \
runtime.gox runtime.gox sync.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
runtime/pprof/check: $(CHECK_DEPS) runtime/pprof/check: $(CHECK_DEPS)
@$(MKDIR_P) runtime/pprof @$(MKDIR_P) runtime/pprof
@ -2653,6 +2724,8 @@ crypto/cipher.gox: crypto/cipher.lo
$(BUILDGOX) $(BUILDGOX)
crypto/dsa.gox: crypto/dsa.lo crypto/dsa.gox: crypto/dsa.lo
$(BUILDGOX) $(BUILDGOX)
crypto/ecdsa.gox: crypto/ecdsa.lo
$(BUILDGOX)
crypto/elliptic.gox: crypto/elliptic.lo crypto/elliptic.gox: crypto/elliptic.lo
$(BUILDGOX) $(BUILDGOX)
crypto/hmac.gox: crypto/hmac.lo crypto/hmac.gox: crypto/hmac.lo
@ -2757,7 +2830,13 @@ hash/crc32.gox: hash/crc32.lo
$(BUILDGOX) $(BUILDGOX)
hash/crc64.gox: hash/crc64.lo hash/crc64.gox: hash/crc64.lo
$(BUILDGOX) $(BUILDGOX)
hash/fnv.gox: hash/fnv.lo
$(BUILDGOX)
http/cgi.gox: http/cgi.lo
$(BUILDGOX)
http/httptest.gox: http/httptest.lo
$(BUILDGOX)
http/pprof.gox: http/pprof.lo http/pprof.gox: http/pprof.lo
$(BUILDGOX) $(BUILDGOX)
@ -2785,6 +2864,9 @@ os/inotify.gox: os/inotify.lo
os/signal.gox: os/signal.lo os/signal.gox: os/signal.lo
$(BUILDGOX) $(BUILDGOX)
path/filepath.gox: path/filepath.lo
$(BUILDGOX)
rpc/jsonrpc.gox: rpc/jsonrpc.lo rpc/jsonrpc.gox: rpc/jsonrpc.lo
$(BUILDGOX) $(BUILDGOX)
@ -2823,7 +2905,7 @@ TEST_PACKAGES = \
fmt/check \ fmt/check \
gob/check \ gob/check \
html/check \ html/check \
$(if $(GCCGO_RUN_ALL_TESTS),http/check) \ http/check \
io/check \ io/check \
json/check \ json/check \
log/check \ log/check \
@ -2872,6 +2954,7 @@ TEST_PACKAGES = \
crypto/cast5/check \ crypto/cast5/check \
crypto/cipher/check \ crypto/cipher/check \
crypto/dsa/check \ crypto/dsa/check \
crypto/ecdsa/check \
crypto/elliptic/check \ crypto/elliptic/check \
crypto/hmac/check \ crypto/hmac/check \
crypto/md4/check \ crypto/md4/check \
@ -2916,6 +2999,8 @@ TEST_PACKAGES = \
hash/adler32/check \ hash/adler32/check \
hash/crc32/check \ hash/crc32/check \
hash/crc64/check \ hash/crc64/check \
hash/fnv/check \
http/cgi/check \
image/png/check \ image/png/check \
index/suffixarray/check \ index/suffixarray/check \
io/ioutil/check \ io/ioutil/check \
@ -2923,6 +3008,7 @@ TEST_PACKAGES = \
net/textproto/check \ net/textproto/check \
$(os_inotify_check) \ $(os_inotify_check) \
os/signal/check \ os/signal/check \
path/filepath/check \
rpc/jsonrpc/check \ rpc/jsonrpc/check \
sync/atomic/check \ sync/atomic/check \
testing/quick/check \ testing/quick/check \

View File

@ -110,6 +110,7 @@ am__installdirs = "$(DESTDIR)$(toolexeclibdir)" \
"$(DESTDIR)$(toolexeclibgomimedir)" \ "$(DESTDIR)$(toolexeclibgomimedir)" \
"$(DESTDIR)$(toolexeclibgonetdir)" \ "$(DESTDIR)$(toolexeclibgonetdir)" \
"$(DESTDIR)$(toolexeclibgoosdir)" \ "$(DESTDIR)$(toolexeclibgoosdir)" \
"$(DESTDIR)$(toolexeclibgopathdir)" \
"$(DESTDIR)$(toolexeclibgorpcdir)" \ "$(DESTDIR)$(toolexeclibgorpcdir)" \
"$(DESTDIR)$(toolexeclibgoruntimedir)" \ "$(DESTDIR)$(toolexeclibgoruntimedir)" \
"$(DESTDIR)$(toolexeclibgosyncdir)" \ "$(DESTDIR)$(toolexeclibgosyncdir)" \
@ -141,9 +142,10 @@ am__DEPENDENCIES_2 = asn1/asn1.lo big/big.lo bufio/bufio.lo \
container/heap.lo container/list.lo container/ring.lo \ container/heap.lo container/list.lo container/ring.lo \
container/vector.lo crypto/aes.lo crypto/block.lo \ container/vector.lo crypto/aes.lo crypto/block.lo \
crypto/blowfish.lo crypto/cast5.lo crypto/cipher.lo \ crypto/blowfish.lo crypto/cast5.lo crypto/cipher.lo \
crypto/dsa.lo crypto/elliptic.lo crypto/hmac.lo crypto/md4.lo \ crypto/dsa.lo crypto/ecdsa.lo crypto/elliptic.lo \
crypto/md5.lo crypto/ocsp.lo crypto/openpgp.lo crypto/rand.lo \ crypto/hmac.lo crypto/md4.lo crypto/md5.lo crypto/ocsp.lo \
crypto/rc4.lo crypto/ripemd160.lo crypto/rsa.lo crypto/sha1.lo \ crypto/openpgp.lo crypto/rand.lo crypto/rc4.lo \
crypto/ripemd160.lo crypto/rsa.lo crypto/sha1.lo \
crypto/sha256.lo crypto/sha512.lo crypto/subtle.lo \ crypto/sha256.lo crypto/sha512.lo crypto/subtle.lo \
crypto/tls.lo crypto/twofish.lo crypto/x509.lo crypto/xtea.lo \ crypto/tls.lo crypto/twofish.lo crypto/x509.lo crypto/xtea.lo \
crypto/openpgp/armor.lo crypto/openpgp/error.lo \ crypto/openpgp/armor.lo crypto/openpgp/error.lo \
@ -155,13 +157,14 @@ am__DEPENDENCIES_2 = asn1/asn1.lo big/big.lo bufio/bufio.lo \
exp/datafmt.lo exp/draw.lo exp/eval.lo go/ast.lo go/doc.lo \ exp/datafmt.lo exp/draw.lo exp/eval.lo go/ast.lo go/doc.lo \
go/parser.lo go/printer.lo go/scanner.lo go/token.lo \ go/parser.lo go/printer.lo go/scanner.lo go/token.lo \
go/typechecker.lo hash/adler32.lo hash/crc32.lo hash/crc64.lo \ go/typechecker.lo hash/adler32.lo hash/crc32.lo hash/crc64.lo \
http/pprof.lo image/jpeg.lo image/png.lo index/suffixarray.lo \ hash/fnv.lo http/cgi.lo http/httptest.lo http/pprof.lo \
io/ioutil.lo mime/multipart.lo net/dict.lo net/textproto.lo \ image/jpeg.lo image/png.lo index/suffixarray.lo io/ioutil.lo \
$(am__DEPENDENCIES_1) os/signal.lo rpc/jsonrpc.lo \ mime/multipart.lo net/dict.lo net/textproto.lo \
runtime/debug.lo runtime/pprof.lo sync/atomic.lo \ $(am__DEPENDENCIES_1) os/signal.lo path/filepath.lo \
sync/atomic_c.lo syscalls/syscall.lo syscalls/errno.lo \ rpc/jsonrpc.lo runtime/debug.lo runtime/pprof.lo \
testing/testing.lo testing/iotest.lo testing/quick.lo \ sync/atomic.lo sync/atomic_c.lo syscalls/syscall.lo \
testing/script.lo syscalls/errno.lo testing/testing.lo testing/iotest.lo \
testing/quick.lo testing/script.lo
libgo_la_DEPENDENCIES = $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_1) \ libgo_la_DEPENDENCIES = $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
@ -280,8 +283,9 @@ DATA = $(toolexeclibgo_DATA) $(toolexeclibgoarchive_DATA) \
$(toolexeclibgoimage_DATA) $(toolexeclibgoindex_DATA) \ $(toolexeclibgoimage_DATA) $(toolexeclibgoindex_DATA) \
$(toolexeclibgoio_DATA) $(toolexeclibgomime_DATA) \ $(toolexeclibgoio_DATA) $(toolexeclibgomime_DATA) \
$(toolexeclibgonet_DATA) $(toolexeclibgoos_DATA) \ $(toolexeclibgonet_DATA) $(toolexeclibgoos_DATA) \
$(toolexeclibgorpc_DATA) $(toolexeclibgoruntime_DATA) \ $(toolexeclibgopath_DATA) $(toolexeclibgorpc_DATA) \
$(toolexeclibgosync_DATA) $(toolexeclibgotesting_DATA) $(toolexeclibgoruntime_DATA) $(toolexeclibgosync_DATA) \
$(toolexeclibgotesting_DATA)
RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
distclean-recursive maintainer-clean-recursive distclean-recursive maintainer-clean-recursive
AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
@ -620,6 +624,7 @@ toolexeclibgocrypto_DATA = \
crypto/cast5.gox \ crypto/cast5.gox \
crypto/cipher.gox \ crypto/cipher.gox \
crypto/dsa.gox \ crypto/dsa.gox \
crypto/ecdsa.gox \
crypto/elliptic.gox \ crypto/elliptic.gox \
crypto/hmac.gox \ crypto/hmac.gox \
crypto/md4.gox \ crypto/md4.gox \
@ -686,10 +691,13 @@ toolexeclibgohashdir = $(toolexeclibgodir)/hash
toolexeclibgohash_DATA = \ toolexeclibgohash_DATA = \
hash/adler32.gox \ hash/adler32.gox \
hash/crc32.gox \ hash/crc32.gox \
hash/crc64.gox hash/crc64.gox \
hash/fnv.gox
toolexeclibgohttpdir = $(toolexeclibgodir)/http toolexeclibgohttpdir = $(toolexeclibgodir)/http
toolexeclibgohttp_DATA = \ toolexeclibgohttp_DATA = \
http/cgi.gox \
http/httptest.gox \
http/pprof.gox http/pprof.gox
toolexeclibgoimagedir = $(toolexeclibgodir)/image toolexeclibgoimagedir = $(toolexeclibgodir)/image
@ -723,6 +731,10 @@ toolexeclibgoos_DATA = \
$(os_inotify_gox) \ $(os_inotify_gox) \
os/signal.gox os/signal.gox
toolexeclibgopathdir = $(toolexeclibgodir)/path
toolexeclibgopath_DATA = \
path/filepath.gox
toolexeclibgorpcdir = $(toolexeclibgodir)/rpc toolexeclibgorpcdir = $(toolexeclibgodir)/rpc
toolexeclibgorpc_DATA = \ toolexeclibgorpc_DATA = \
rpc/jsonrpc.gox rpc/jsonrpc.gox
@ -928,6 +940,7 @@ go_html_files = \
go_http_files = \ go_http_files = \
go/http/chunked.go \ go/http/chunked.go \
go/http/client.go \ go/http/client.go \
go/http/cookie.go \
go/http/dump.go \ go/http/dump.go \
go/http/fs.go \ go/http/fs.go \
go/http/header.go \ go/http/header.go \
@ -1084,8 +1097,7 @@ go_patch_files = \
go_path_files = \ go_path_files = \
go/path/match.go \ go/path/match.go \
go/path/path.go \ go/path/path.go
go/path/path_unix.go
go_rand_files = \ go_rand_files = \
go/rand/exp.go \ go/rand/exp.go \
@ -1111,6 +1123,7 @@ go_runtime_files = \
go/runtime/debug.go \ go/runtime/debug.go \
go/runtime/error.go \ go/runtime/error.go \
go/runtime/extern.go \ go/runtime/extern.go \
go/runtime/mem.go \
go/runtime/sig.go \ go/runtime/sig.go \
go/runtime/softfloat64.go \ go/runtime/softfloat64.go \
go/runtime/type.go \ go/runtime/type.go \
@ -1170,6 +1183,7 @@ go_testing_files = \
go_time_files = \ go_time_files = \
go/time/format.go \ go/time/format.go \
go/time/sleep.go \ go/time/sleep.go \
go/time/sys.go \
go/time/tick.go \ go/time/tick.go \
go/time/time.go \ go/time/time.go \
go/time/zoneinfo_unix.go go/time/zoneinfo_unix.go
@ -1286,6 +1300,9 @@ go_crypto_cipher_files = \
go_crypto_dsa_files = \ go_crypto_dsa_files = \
go/crypto/dsa/dsa.go go/crypto/dsa/dsa.go
go_crypto_ecdsa_files = \
go/crypto/ecdsa/ecdsa.go
go_crypto_elliptic_files = \ go_crypto_elliptic_files = \
go/crypto/elliptic/elliptic.go go/crypto/elliptic/elliptic.go
@ -1490,6 +1507,7 @@ go_go_token_files = \
go_go_typechecker_files = \ go_go_typechecker_files = \
go/go/typechecker/scope.go \ go/go/typechecker/scope.go \
go/go/typechecker/type.go \
go/go/typechecker/typechecker.go \ go/go/typechecker/typechecker.go \
go/go/typechecker/universe.go go/go/typechecker/universe.go
@ -1502,6 +1520,17 @@ go_hash_crc32_files = \
go_hash_crc64_files = \ go_hash_crc64_files = \
go/hash/crc64/crc64.go go/hash/crc64/crc64.go
go_hash_fnv_files = \
go/hash/fnv/fnv.go
go_http_cgi_files = \
go/http/cgi/child.go \
go/http/cgi/host.go
go_http_httptest_files = \
go/http/httptest/recorder.go \
go/http/httptest/server.go
go_http_pprof_files = \ go_http_pprof_files = \
go/http/pprof/pprof.go go/http/pprof/pprof.go
@ -1542,6 +1571,11 @@ go_os_signal_files = \
go/os/signal/signal.go \ go/os/signal/signal.go \
unix.go unix.go
go_path_filepath_files = \
go/path/filepath/match.go \
go/path/filepath/path.go \
go/path/filepath/path_unix.go
go_rpc_jsonrpc_files = \ go_rpc_jsonrpc_files = \
go/rpc/jsonrpc/client.go \ go/rpc/jsonrpc/client.go \
go/rpc/jsonrpc/server.go go/rpc/jsonrpc/server.go
@ -1718,6 +1752,7 @@ libgo_go_objs = \
crypto/cast5.lo \ crypto/cast5.lo \
crypto/cipher.lo \ crypto/cipher.lo \
crypto/dsa.lo \ crypto/dsa.lo \
crypto/ecdsa.lo \
crypto/elliptic.lo \ crypto/elliptic.lo \
crypto/hmac.lo \ crypto/hmac.lo \
crypto/md4.lo \ crypto/md4.lo \
@ -1767,6 +1802,9 @@ libgo_go_objs = \
hash/adler32.lo \ hash/adler32.lo \
hash/crc32.lo \ hash/crc32.lo \
hash/crc64.lo \ hash/crc64.lo \
hash/fnv.lo \
http/cgi.lo \
http/httptest.lo \
http/pprof.lo \ http/pprof.lo \
image/jpeg.lo \ image/jpeg.lo \
image/png.lo \ image/png.lo \
@ -1777,6 +1815,7 @@ libgo_go_objs = \
net/textproto.lo \ net/textproto.lo \
$(os_lib_inotify_lo) \ $(os_lib_inotify_lo) \
os/signal.lo \ os/signal.lo \
path/filepath.lo \
rpc/jsonrpc.lo \ rpc/jsonrpc.lo \
runtime/debug.lo \ runtime/debug.lo \
runtime/pprof.lo \ runtime/pprof.lo \
@ -1883,7 +1922,7 @@ TEST_PACKAGES = \
fmt/check \ fmt/check \
gob/check \ gob/check \
html/check \ html/check \
$(if $(GCCGO_RUN_ALL_TESTS),http/check) \ http/check \
io/check \ io/check \
json/check \ json/check \
log/check \ log/check \
@ -1932,6 +1971,7 @@ TEST_PACKAGES = \
crypto/cast5/check \ crypto/cast5/check \
crypto/cipher/check \ crypto/cipher/check \
crypto/dsa/check \ crypto/dsa/check \
crypto/ecdsa/check \
crypto/elliptic/check \ crypto/elliptic/check \
crypto/hmac/check \ crypto/hmac/check \
crypto/md4/check \ crypto/md4/check \
@ -1976,6 +2016,8 @@ TEST_PACKAGES = \
hash/adler32/check \ hash/adler32/check \
hash/crc32/check \ hash/crc32/check \
hash/crc64/check \ hash/crc64/check \
hash/fnv/check \
http/cgi/check \
image/png/check \ image/png/check \
index/suffixarray/check \ index/suffixarray/check \
io/ioutil/check \ io/ioutil/check \
@ -1983,6 +2025,7 @@ TEST_PACKAGES = \
net/textproto/check \ net/textproto/check \
$(os_inotify_check) \ $(os_inotify_check) \
os/signal/check \ os/signal/check \
path/filepath/check \
rpc/jsonrpc/check \ rpc/jsonrpc/check \
sync/atomic/check \ sync/atomic/check \
testing/quick/check \ testing/quick/check \
@ -3271,6 +3314,26 @@ uninstall-toolexeclibgoosDATA:
test -n "$$files" || exit 0; \ test -n "$$files" || exit 0; \
echo " ( cd '$(DESTDIR)$(toolexeclibgoosdir)' && rm -f" $$files ")"; \ echo " ( cd '$(DESTDIR)$(toolexeclibgoosdir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(toolexeclibgoosdir)" && rm -f $$files cd "$(DESTDIR)$(toolexeclibgoosdir)" && rm -f $$files
install-toolexeclibgopathDATA: $(toolexeclibgopath_DATA)
@$(NORMAL_INSTALL)
test -z "$(toolexeclibgopathdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgopathdir)"
@list='$(toolexeclibgopath_DATA)'; test -n "$(toolexeclibgopathdir)" || list=; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
done | $(am__base_list) | \
while read files; do \
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgopathdir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgopathdir)" || exit $$?; \
done
uninstall-toolexeclibgopathDATA:
@$(NORMAL_UNINSTALL)
@list='$(toolexeclibgopath_DATA)'; test -n "$(toolexeclibgopathdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
test -n "$$files" || exit 0; \
echo " ( cd '$(DESTDIR)$(toolexeclibgopathdir)' && rm -f" $$files ")"; \
cd "$(DESTDIR)$(toolexeclibgopathdir)" && rm -f $$files
install-toolexeclibgorpcDATA: $(toolexeclibgorpc_DATA) install-toolexeclibgorpcDATA: $(toolexeclibgorpc_DATA)
@$(NORMAL_INSTALL) @$(NORMAL_INSTALL)
test -z "$(toolexeclibgorpcdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgorpcdir)" test -z "$(toolexeclibgorpcdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgorpcdir)"
@ -3668,7 +3731,7 @@ all-am: Makefile $(LIBRARIES) $(LTLIBRARIES) all-multi $(DATA) \
config.h config.h
installdirs: installdirs-recursive installdirs: installdirs-recursive
installdirs-am: installdirs-am:
for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibgodir)" "$(DESTDIR)$(toolexeclibgoarchivedir)" "$(DESTDIR)$(toolexeclibgocompressdir)" "$(DESTDIR)$(toolexeclibgocontainerdir)" "$(DESTDIR)$(toolexeclibgocryptodir)" "$(DESTDIR)$(toolexeclibgocryptoopenpgpdir)" "$(DESTDIR)$(toolexeclibgodebugdir)" "$(DESTDIR)$(toolexeclibgoencodingdir)" "$(DESTDIR)$(toolexeclibgoexpdir)" "$(DESTDIR)$(toolexeclibgogodir)" "$(DESTDIR)$(toolexeclibgohashdir)" "$(DESTDIR)$(toolexeclibgohttpdir)" "$(DESTDIR)$(toolexeclibgoimagedir)" "$(DESTDIR)$(toolexeclibgoindexdir)" "$(DESTDIR)$(toolexeclibgoiodir)" "$(DESTDIR)$(toolexeclibgomimedir)" "$(DESTDIR)$(toolexeclibgonetdir)" "$(DESTDIR)$(toolexeclibgoosdir)" "$(DESTDIR)$(toolexeclibgorpcdir)" "$(DESTDIR)$(toolexeclibgoruntimedir)" "$(DESTDIR)$(toolexeclibgosyncdir)" "$(DESTDIR)$(toolexeclibgotestingdir)"; do \ for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibgodir)" "$(DESTDIR)$(toolexeclibgoarchivedir)" "$(DESTDIR)$(toolexeclibgocompressdir)" "$(DESTDIR)$(toolexeclibgocontainerdir)" "$(DESTDIR)$(toolexeclibgocryptodir)" "$(DESTDIR)$(toolexeclibgocryptoopenpgpdir)" "$(DESTDIR)$(toolexeclibgodebugdir)" "$(DESTDIR)$(toolexeclibgoencodingdir)" "$(DESTDIR)$(toolexeclibgoexpdir)" "$(DESTDIR)$(toolexeclibgogodir)" "$(DESTDIR)$(toolexeclibgohashdir)" "$(DESTDIR)$(toolexeclibgohttpdir)" "$(DESTDIR)$(toolexeclibgoimagedir)" "$(DESTDIR)$(toolexeclibgoindexdir)" "$(DESTDIR)$(toolexeclibgoiodir)" "$(DESTDIR)$(toolexeclibgomimedir)" "$(DESTDIR)$(toolexeclibgonetdir)" "$(DESTDIR)$(toolexeclibgoosdir)" "$(DESTDIR)$(toolexeclibgopathdir)" "$(DESTDIR)$(toolexeclibgorpcdir)" "$(DESTDIR)$(toolexeclibgoruntimedir)" "$(DESTDIR)$(toolexeclibgosyncdir)" "$(DESTDIR)$(toolexeclibgotestingdir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done done
install: install-recursive install: install-recursive
@ -3741,9 +3804,9 @@ install-exec-am: install-multi install-toolexeclibLIBRARIES \
install-toolexeclibgohttpDATA install-toolexeclibgoimageDATA \ install-toolexeclibgohttpDATA install-toolexeclibgoimageDATA \
install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \ install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \
install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \ install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \
install-toolexeclibgoosDATA install-toolexeclibgorpcDATA \ install-toolexeclibgoosDATA install-toolexeclibgopathDATA \
install-toolexeclibgoruntimeDATA install-toolexeclibgosyncDATA \ install-toolexeclibgorpcDATA install-toolexeclibgoruntimeDATA \
install-toolexeclibgotestingDATA install-toolexeclibgosyncDATA install-toolexeclibgotestingDATA
install-html: install-html-recursive install-html: install-html-recursive
@ -3800,7 +3863,8 @@ uninstall-am: uninstall-toolexeclibLIBRARIES \
uninstall-toolexeclibgoimageDATA \ uninstall-toolexeclibgoimageDATA \
uninstall-toolexeclibgoindexDATA uninstall-toolexeclibgoioDATA \ uninstall-toolexeclibgoindexDATA uninstall-toolexeclibgoioDATA \
uninstall-toolexeclibgomimeDATA uninstall-toolexeclibgonetDATA \ uninstall-toolexeclibgomimeDATA uninstall-toolexeclibgonetDATA \
uninstall-toolexeclibgoosDATA uninstall-toolexeclibgorpcDATA \ uninstall-toolexeclibgoosDATA uninstall-toolexeclibgopathDATA \
uninstall-toolexeclibgorpcDATA \
uninstall-toolexeclibgoruntimeDATA \ uninstall-toolexeclibgoruntimeDATA \
uninstall-toolexeclibgosyncDATA \ uninstall-toolexeclibgosyncDATA \
uninstall-toolexeclibgotestingDATA uninstall-toolexeclibgotestingDATA
@ -3836,15 +3900,15 @@ uninstall-am: uninstall-toolexeclibLIBRARIES \
install-toolexeclibgohttpDATA install-toolexeclibgoimageDATA \ install-toolexeclibgohttpDATA install-toolexeclibgoimageDATA \
install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \ install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \
install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \ install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \
install-toolexeclibgoosDATA install-toolexeclibgorpcDATA \ install-toolexeclibgoosDATA install-toolexeclibgopathDATA \
install-toolexeclibgoruntimeDATA install-toolexeclibgosyncDATA \ install-toolexeclibgorpcDATA install-toolexeclibgoruntimeDATA \
install-toolexeclibgotestingDATA installcheck installcheck-am \ install-toolexeclibgosyncDATA install-toolexeclibgotestingDATA \
installdirs installdirs-am maintainer-clean \ installcheck installcheck-am installdirs installdirs-am \
maintainer-clean-generic maintainer-clean-multi mostlyclean \ maintainer-clean maintainer-clean-generic \
mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ maintainer-clean-multi mostlyclean mostlyclean-compile \
mostlyclean-local mostlyclean-multi pdf pdf-am ps ps-am tags \ mostlyclean-generic mostlyclean-libtool mostlyclean-local \
tags-recursive uninstall uninstall-am \ mostlyclean-multi pdf pdf-am ps ps-am tags tags-recursive \
uninstall-toolexeclibLIBRARIES \ uninstall uninstall-am uninstall-toolexeclibLIBRARIES \
uninstall-toolexeclibLTLIBRARIES uninstall-toolexeclibgoDATA \ uninstall-toolexeclibLTLIBRARIES uninstall-toolexeclibgoDATA \
uninstall-toolexeclibgoarchiveDATA \ uninstall-toolexeclibgoarchiveDATA \
uninstall-toolexeclibgocompressDATA \ uninstall-toolexeclibgocompressDATA \
@ -3859,7 +3923,8 @@ uninstall-am: uninstall-toolexeclibLIBRARIES \
uninstall-toolexeclibgoimageDATA \ uninstall-toolexeclibgoimageDATA \
uninstall-toolexeclibgoindexDATA uninstall-toolexeclibgoioDATA \ uninstall-toolexeclibgoindexDATA uninstall-toolexeclibgoioDATA \
uninstall-toolexeclibgomimeDATA uninstall-toolexeclibgonetDATA \ uninstall-toolexeclibgomimeDATA uninstall-toolexeclibgonetDATA \
uninstall-toolexeclibgoosDATA uninstall-toolexeclibgorpcDATA \ uninstall-toolexeclibgoosDATA uninstall-toolexeclibgopathDATA \
uninstall-toolexeclibgorpcDATA \
uninstall-toolexeclibgoruntimeDATA \ uninstall-toolexeclibgoruntimeDATA \
uninstall-toolexeclibgosyncDATA \ uninstall-toolexeclibgosyncDATA \
uninstall-toolexeclibgotestingDATA uninstall-toolexeclibgotestingDATA
@ -3918,7 +3983,7 @@ asn1/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: asn1/check .PHONY: asn1/check
big/big.lo: $(go_big_files) fmt.gox rand.gox strings.gox big/big.lo: $(go_big_files) fmt.gox rand.gox strings.gox os.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
big/check: $(CHECK_DEPS) big/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
@ -3983,9 +4048,9 @@ fmt/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: fmt/check .PHONY: fmt/check
gob/gob.lo: $(go_gob_files) bytes.gox fmt.gox io.gox math.gox os.gox \ gob/gob.lo: $(go_gob_files) bufio.gox bytes.gox fmt.gox io.gox math.gox \
reflect.gox runtime.gox strings.gox sync.gox unicode.gox \ os.gox reflect.gox runtime.gox strings.gox sync.gox \
utf8.gox unicode.gox utf8.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
gob/check: $(CHECK_DEPS) gob/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
@ -4007,8 +4072,8 @@ html/check: $(CHECK_DEPS)
http/http.lo: $(go_http_files) bufio.gox bytes.gox container/vector.gox \ http/http.lo: $(go_http_files) bufio.gox bytes.gox container/vector.gox \
crypto/rand.gox crypto/tls.gox encoding/base64.gox fmt.gox \ crypto/rand.gox crypto/tls.gox encoding/base64.gox fmt.gox \
io.gox io/ioutil.gox log.gox mime.gox mime/multipart.gox \ io.gox io/ioutil.gox log.gox mime.gox mime/multipart.gox \
net.gox net/textproto.gox os.gox path.gox sort.gox \ net.gox net/textproto.gox os.gox path.gox path/filepath.gox \
strconv.gox strings.gox sync.gox time.gox utf8.gox sort.gox strconv.gox strings.gox sync.gox time.gox utf8.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
http/check: $(CHECK_DEPS) http/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
@ -4020,7 +4085,7 @@ image/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: image/check .PHONY: image/check
io/io.lo: $(go_io_files) os.gox runtime.gox sync.gox io/io.lo: $(go_io_files) os.gox sync.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
io/check: $(CHECK_DEPS) io/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
@ -4083,8 +4148,7 @@ patch/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: patch/check .PHONY: patch/check
path/path.lo: $(go_path_files) io/ioutil.gox os.gox sort.gox strings.gox \ path/path.lo: $(go_path_files) os.gox strings.gox utf8.gox
utf8.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
path/check: $(CHECK_DEPS) path/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
@ -4185,7 +4249,7 @@ template/check: $(CHECK_DEPS)
.PHONY: template/check .PHONY: template/check
testing/testing.lo: $(go_testing_files) flag.gox fmt.gox os.gox regexp.gox \ testing/testing.lo: $(go_testing_files) flag.gox fmt.gox os.gox regexp.gox \
runtime.gox time.gox runtime.gox runtime/pprof.gox time.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
testing/check: $(CHECK_DEPS) testing/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
@ -4248,7 +4312,7 @@ archive/tar/check: $(CHECK_DEPS)
archive/zip.lo: $(go_archive_zip_files) bufio.gox bytes.gox \ archive/zip.lo: $(go_archive_zip_files) bufio.gox bytes.gox \
compress/flate.gox hash.gox hash/crc32.gox \ compress/flate.gox hash.gox hash/crc32.gox \
encoding/binary.gox io.gox os.gox encoding/binary.gox io.gox io/ioutil.gox os.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
archive/zip/check: $(CHECK_DEPS) archive/zip/check: $(CHECK_DEPS)
@$(MKDIR_P) archive/zip @$(MKDIR_P) archive/zip
@ -4363,6 +4427,14 @@ crypto/dsa/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: crypto/dsa/check .PHONY: crypto/dsa/check
crypto/ecdsa.lo: $(go_crypto_ecdsa_files) big.gox crypto/elliptic.gox io.gox \
os.gox
$(BUILDPACKAGE)
crypto/ecdsa/check: $(CHECK_DEPS)
@$(MKDIR_P) crypto/ecdsa
$(CHECK)
.PHONY: crypto/ecdsa/check
crypto/elliptic.lo: $(go_crypto_elliptic_files) big.gox io.gox os.gox sync.gox crypto/elliptic.lo: $(go_crypto_elliptic_files) big.gox io.gox os.gox sync.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
crypto/elliptic/check: $(CHECK_DEPS) crypto/elliptic/check: $(CHECK_DEPS)
@ -4400,8 +4472,8 @@ crypto/ocsp/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: crypto/ocsp/check .PHONY: crypto/ocsp/check
crypto/openpgp.lo: $(go_crypto_openpgp_files) crypto.gox \ crypto/openpgp.lo: $(go_crypto_openpgp_files) crypto.gox crypto/dsa.gox \
crypto/openpgp/armor.gox crypto/openpgp/error.gox \ crypto/openpgp/armor.gox crypto/openpgp/error.gox \
crypto/openpgp/packet.gox crypto/rsa.gox crypto/sha256.gox \ crypto/openpgp/packet.gox crypto/rsa.gox crypto/sha256.gox \
hash.gox io.gox os.gox strconv.gox time.gox hash.gox io.gox os.gox strconv.gox time.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
@ -4523,10 +4595,10 @@ crypto/openpgp/error/check: $(CHECK_DEPS)
crypto/openpgp/packet.lo: $(go_crypto_openpgp_packet_files) big.gox bytes.gox \ crypto/openpgp/packet.lo: $(go_crypto_openpgp_packet_files) big.gox bytes.gox \
compress/flate.gox compress/zlib.gox crypto.gox \ compress/flate.gox compress/zlib.gox crypto.gox \
crypto/aes.gox crypto/cast5.gox crypto/cipher.gox \ crypto/aes.gox crypto/cast5.gox crypto/cipher.gox \
crypto/openpgp/error.gox crypto/openpgp/s2k.gox \ crypto/dsa.gox crypto/openpgp/error.gox \
crypto/rand.gox crypto/rsa.gox crypto/sha1.gox \ crypto/openpgp/s2k.gox crypto/rand.gox crypto/rsa.gox \
crypto/subtle.gox encoding/binary.gox hash.gox io.gox \ crypto/sha1.gox crypto/subtle.gox encoding/binary.gox fmt.gox \
io/ioutil.gox os.gox strconv.gox strings.gox hash.gox io.gox io/ioutil.gox os.gox strconv.gox strings.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
crypto/openpgp/packet/check: $(CHECK_DEPS) crypto/openpgp/packet/check: $(CHECK_DEPS)
@$(MKDIR_P) crypto/openpgp/packet @$(MKDIR_P) crypto/openpgp/packet
@ -4674,8 +4746,8 @@ exp/eval/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: exp/eval/check .PHONY: exp/eval/check
go/ast.lo: $(go_go_ast_files) fmt.gox go/token.gox io.gox os.gox reflect.gox \ go/ast.lo: $(go_go_ast_files) bytes.gox fmt.gox go/token.gox io.gox os.gox \
unicode.gox utf8.gox reflect.gox unicode.gox utf8.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
go/ast/check: $(CHECK_DEPS) go/ast/check: $(CHECK_DEPS)
@$(MKDIR_P) go/ast @$(MKDIR_P) go/ast
@ -4692,7 +4764,7 @@ go/doc/check: $(CHECK_DEPS)
go/parser.lo: $(go_go_parser_files) bytes.gox fmt.gox go/ast.gox \ go/parser.lo: $(go_go_parser_files) bytes.gox fmt.gox go/ast.gox \
go/scanner.gox go/token.gox io.gox io/ioutil.gox os.gox \ go/scanner.gox go/token.gox io.gox io/ioutil.gox os.gox \
path.gox strings.gox path/filepath.gox strings.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
go/parser/check: $(CHECK_DEPS) go/parser/check: $(CHECK_DEPS)
@$(MKDIR_P) go/parser @$(MKDIR_P) go/parser
@ -4700,8 +4772,8 @@ go/parser/check: $(CHECK_DEPS)
.PHONY: go/parser/check .PHONY: go/parser/check
go/printer.lo: $(go_go_printer_files) bytes.gox fmt.gox go/ast.gox \ go/printer.lo: $(go_go_printer_files) bytes.gox fmt.gox go/ast.gox \
go/token.gox io.gox os.gox reflect.gox runtime.gox \ go/token.gox io.gox os.gox path/filepath.gox reflect.gox \
strings.gox tabwriter.gox runtime.gox strings.gox tabwriter.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
go/printer/check: $(CHECK_DEPS) go/printer/check: $(CHECK_DEPS)
@$(MKDIR_P) go/printer @$(MKDIR_P) go/printer
@ -4709,8 +4781,8 @@ go/printer/check: $(CHECK_DEPS)
.PHONY: go/printer/check .PHONY: go/printer/check
go/scanner.lo: $(go_go_scanner_files) bytes.gox container/vector.gox fmt.gox \ go/scanner.lo: $(go_go_scanner_files) bytes.gox container/vector.gox fmt.gox \
go/token.gox io.gox os.gox path.gox sort.gox strconv.gox \ go/token.gox io.gox os.gox path/filepath.gox sort.gox \
unicode.gox utf8.gox strconv.gox unicode.gox utf8.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
go/scanner/check: $(CHECK_DEPS) go/scanner/check: $(CHECK_DEPS)
@$(MKDIR_P) go/scanner @$(MKDIR_P) go/scanner
@ -4753,6 +4825,30 @@ hash/crc64/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: hash/crc64/check .PHONY: hash/crc64/check
hash/fnv.lo: $(go_hash_fnv_files) encoding/binary.gox hash.gox os.gox
$(BUILDPACKAGE)
hash/fnv/check: $(CHECK_DEPS)
@$(MKDIR_P) hash/fnv
$(CHECK)
.PHONY: hash/fnv/check
http/cgi.lo: $(go_http_cgi_files) bufio.gox bytes.gox encoding/line.gox \
exec.gox fmt.gox http.gox io.gox io/ioutil.gox log.gox \
os.gox path/filepath.gox regexp.gox strconv.gox strings.gox
$(BUILDPACKAGE)
http/cgi/check: $(CHECK_DEPS)
@$(MKDIR_P) http/cgi
$(CHECK)
.PHONY: http/cgi/check
http/httptest.lo: $(go_http_httptest_files) bytes.gox fmt.gox http.gox \
net.gox os.gox
$(BUILDPACKAGE)
http/httptest/check: $(CHECK_DEPS)
@$(MKDIR_P) http/httptest
$(CHECK)
.PHONY: http/httptest/check
http/pprof.lo: $(go_http_pprof_files) bufio.gox fmt.gox http.gox os.gox \ http/pprof.lo: $(go_http_pprof_files) bufio.gox fmt.gox http.gox os.gox \
runtime.gox runtime/pprof.gox strconv.gox strings.gox runtime.gox runtime/pprof.gox strconv.gox strings.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
@ -4784,8 +4880,8 @@ index/suffixarray/check: $(CHECK_DEPS)
$(CHECK) $(CHECK)
.PHONY: index/suffixarray/check .PHONY: index/suffixarray/check
io/ioutil.lo: $(go_io_ioutil_files) bytes.gox io.gox os.gox sort.gox \ io/ioutil.lo: $(go_io_ioutil_files) bytes.gox io.gox os.gox path/filepath.gox \
strconv.gox sort.gox strconv.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
io/ioutil/check: $(CHECK_DEPS) io/ioutil/check: $(CHECK_DEPS)
@$(MKDIR_P) io/ioutil @$(MKDIR_P) io/ioutil
@ -4793,7 +4889,7 @@ io/ioutil/check: $(CHECK_DEPS)
.PHONY: io/ioutil/check .PHONY: io/ioutil/check
mime/multipart.lo: $(go_mime_multipart_files) bufio.gox bytes.gox io.gox \ mime/multipart.lo: $(go_mime_multipart_files) bufio.gox bytes.gox io.gox \
mime.gox os.gox regexp.gox strings.gox mime.gox net/textproto.gox os.gox regexp.gox strings.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
mime/multipart/check: $(CHECK_DEPS) mime/multipart/check: $(CHECK_DEPS)
@$(MKDIR_P) mime/multipart @$(MKDIR_P) mime/multipart
@ -4831,6 +4927,14 @@ unix.go: $(srcdir)/go/os/signal/mkunix.sh sysinfo.go
$(SHELL) $(srcdir)/go/os/signal/mkunix.sh sysinfo.go > $@.tmp $(SHELL) $(srcdir)/go/os/signal/mkunix.sh sysinfo.go > $@.tmp
mv -f $@.tmp $@ mv -f $@.tmp $@
path/filepath.lo: $(go_path_filepath_files) bytes.gox os.gox sort.gox \
strings.gox utf8.gox
$(BUILDPACKAGE)
path/filepath/check: $(CHECK_DEPS)
@$(MKDIR_P) path/filepath
$(CHECK)
.PHONY: path/filepath/check
rpc/jsonrpc.lo: $(go_rpc_jsonrpc_files) fmt.gox io.gox json.gox net.gox \ rpc/jsonrpc.lo: $(go_rpc_jsonrpc_files) fmt.gox io.gox json.gox net.gox \
os.gox rpc.gox sync.gox os.gox rpc.gox sync.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
@ -4848,7 +4952,7 @@ runtime/debug/check: $(CHECK_DEPS)
.PHONY: runtime/debug/check .PHONY: runtime/debug/check
runtime/pprof.lo: $(go_runtime_pprof_files) bufio.gox fmt.gox io.gox os.gox \ runtime/pprof.lo: $(go_runtime_pprof_files) bufio.gox fmt.gox io.gox os.gox \
runtime.gox runtime.gox sync.gox
$(BUILDPACKAGE) $(BUILDPACKAGE)
runtime/pprof/check: $(CHECK_DEPS) runtime/pprof/check: $(CHECK_DEPS)
@$(MKDIR_P) runtime/pprof @$(MKDIR_P) runtime/pprof
@ -5034,6 +5138,8 @@ crypto/cipher.gox: crypto/cipher.lo
$(BUILDGOX) $(BUILDGOX)
crypto/dsa.gox: crypto/dsa.lo crypto/dsa.gox: crypto/dsa.lo
$(BUILDGOX) $(BUILDGOX)
crypto/ecdsa.gox: crypto/ecdsa.lo
$(BUILDGOX)
crypto/elliptic.gox: crypto/elliptic.lo crypto/elliptic.gox: crypto/elliptic.lo
$(BUILDGOX) $(BUILDGOX)
crypto/hmac.gox: crypto/hmac.lo crypto/hmac.gox: crypto/hmac.lo
@ -5138,7 +5244,13 @@ hash/crc32.gox: hash/crc32.lo
$(BUILDGOX) $(BUILDGOX)
hash/crc64.gox: hash/crc64.lo hash/crc64.gox: hash/crc64.lo
$(BUILDGOX) $(BUILDGOX)
hash/fnv.gox: hash/fnv.lo
$(BUILDGOX)
http/cgi.gox: http/cgi.lo
$(BUILDGOX)
http/httptest.gox: http/httptest.lo
$(BUILDGOX)
http/pprof.gox: http/pprof.lo http/pprof.gox: http/pprof.lo
$(BUILDGOX) $(BUILDGOX)
@ -5166,6 +5278,9 @@ os/inotify.gox: os/inotify.lo
os/signal.gox: os/signal.lo os/signal.gox: os/signal.lo
$(BUILDGOX) $(BUILDGOX)
path/filepath.gox: path/filepath.lo
$(BUILDGOX)
rpc/jsonrpc.gox: rpc/jsonrpc.lo rpc/jsonrpc.gox: rpc/jsonrpc.lo
$(BUILDGOX) $(BUILDGOX)

View File

@ -19,6 +19,7 @@ import (
"hash/crc32" "hash/crc32"
"encoding/binary" "encoding/binary"
"io" "io"
"io/ioutil"
"os" "os"
) )
@ -109,7 +110,7 @@ func (f *File) Open() (rc io.ReadCloser, err os.Error) {
r := io.NewSectionReader(f.zipr, off+f.bodyOffset, size) r := io.NewSectionReader(f.zipr, off+f.bodyOffset, size)
switch f.Method { switch f.Method {
case 0: // store (no compression) case 0: // store (no compression)
rc = nopCloser{r} rc = ioutil.NopCloser(r)
case 8: // DEFLATE case 8: // DEFLATE
rc = flate.NewReader(r) rc = flate.NewReader(r)
default: default:
@ -147,12 +148,6 @@ func (r *checksumReader) Read(b []byte) (n int, err os.Error) {
func (r *checksumReader) Close() os.Error { return r.rc.Close() } func (r *checksumReader) Close() os.Error { return r.rc.Close() }
type nopCloser struct {
io.Reader
}
func (f nopCloser) Close() os.Error { return nil }
func readFileHeader(f *File, r io.Reader) (err os.Error) { func readFileHeader(f *File, r io.Reader) (err os.Error) {
defer func() { defer func() {
if rerr, ok := recover().(os.Error); ok { if rerr, ok := recover().(os.Error); ok {

View File

@ -8,6 +8,7 @@ package big
import ( import (
"fmt" "fmt"
"os"
"rand" "rand"
) )
@ -393,62 +394,19 @@ func (z *Int) SetString(s string, base int) (*Int, bool) {
} }
// SetBytes interprets b as the bytes of a big-endian, unsigned integer and // SetBytes interprets buf as the bytes of a big-endian unsigned
// sets z to that value. // integer, sets z to that value, and returns z.
func (z *Int) SetBytes(b []byte) *Int { func (z *Int) SetBytes(buf []byte) *Int {
const s = _S z.abs = z.abs.setBytes(buf)
z.abs = z.abs.make((len(b) + s - 1) / s)
j := 0
for len(b) >= s {
var w Word
for i := s; i > 0; i-- {
w <<= 8
w |= Word(b[len(b)-i])
}
z.abs[j] = w
j++
b = b[0 : len(b)-s]
}
if len(b) > 0 {
var w Word
for i := len(b); i > 0; i-- {
w <<= 8
w |= Word(b[len(b)-i])
}
z.abs[j] = w
}
z.abs = z.abs.norm()
z.neg = false z.neg = false
return z return z
} }
// Bytes returns the absolute value of x as a big-endian byte array. // Bytes returns the absolute value of z as a big-endian byte slice.
func (z *Int) Bytes() []byte { func (z *Int) Bytes() []byte {
const s = _S buf := make([]byte, len(z.abs)*_S)
b := make([]byte, len(z.abs)*s) return buf[z.abs.bytes(buf):]
for i, w := range z.abs {
wordBytes := b[(len(z.abs)-i-1)*s : (len(z.abs)-i)*s]
for j := s - 1; j >= 0; j-- {
wordBytes[j] = byte(w)
w >>= 8
}
}
i := 0
for i < len(b) && b[i] == 0 {
i++
}
return b[i:]
} }
@ -739,3 +697,34 @@ func (z *Int) Not(x *Int) *Int {
z.neg = true // z cannot be zero if x is positive z.neg = true // z cannot be zero if x is positive
return z return z
} }
// Gob codec version. Permits backward-compatible changes to the encoding.
const version byte = 1
// GobEncode implements the gob.GobEncoder interface.
func (z *Int) GobEncode() ([]byte, os.Error) {
buf := make([]byte, len(z.abs)*_S+1) // extra byte for version and sign bit
i := z.abs.bytes(buf) - 1 // i >= 0
b := version << 1 // make space for sign bit
if z.neg {
b |= 1
}
buf[i] = b
return buf[i:], nil
}
// GobDecode implements the gob.GobDecoder interface.
func (z *Int) GobDecode(buf []byte) os.Error {
if len(buf) == 0 {
return os.NewError("Int.GobDecode: no data")
}
b := buf[0]
if b>>1 != version {
return os.NewError(fmt.Sprintf("Int.GobDecode: encoding version %d not supported", b>>1))
}
z.neg = b&1 != 0
z.abs = z.abs.setBytes(buf[1:])
return nil
}

View File

@ -8,6 +8,7 @@ import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"gob"
"testing" "testing"
"testing/quick" "testing/quick"
) )
@ -1053,3 +1054,41 @@ func TestModInverse(t *testing.T) {
} }
} }
} }
var gobEncodingTests = []string{
"0",
"1",
"2",
"10",
"42",
"1234567890",
"298472983472983471903246121093472394872319615612417471234712061",
}
func TestGobEncoding(t *testing.T) {
var medium bytes.Buffer
enc := gob.NewEncoder(&medium)
dec := gob.NewDecoder(&medium)
for i, test := range gobEncodingTests {
for j := 0; j < 2; j++ {
medium.Reset() // empty buffer for each test case (in case of failures)
stest := test
if j == 0 {
stest = "-" + test
}
var tx Int
tx.SetString(stest, 10)
if err := enc.Encode(&tx); err != nil {
t.Errorf("#%d%c: encoding failed: %s", i, 'a'+j, err)
}
var rx Int
if err := dec.Decode(&rx); err != nil {
t.Errorf("#%d%c: decoding failed: %s", i, 'a'+j, err)
}
if rx.Cmp(&tx) != 0 {
t.Errorf("#%d%c: transmission failed: got %s want %s", i, 'a'+j, &rx, &tx)
}
}
}
}

View File

@ -1065,3 +1065,50 @@ NextRandom:
return true return true
} }
// bytes writes the value of z into buf using big-endian encoding.
// len(buf) must be >= len(z)*_S. The value of z is encoded in the
// slice buf[i:]. The number i of unused bytes at the beginning of
// buf is returned as result.
func (z nat) bytes(buf []byte) (i int) {
i = len(buf)
for _, d := range z {
for j := 0; j < _S; j++ {
i--
buf[i] = byte(d)
d >>= 8
}
}
for i < len(buf) && buf[i] == 0 {
i++
}
return
}
// setBytes interprets buf as the bytes of a big-endian unsigned
// integer, sets z to that value, and returns z.
func (z nat) setBytes(buf []byte) nat {
z = z.make((len(buf) + _S - 1) / _S)
k := 0
s := uint(0)
var d Word
for i := len(buf); i > 0; i-- {
d |= Word(buf[i-1]) << s
if s += 8; s == _S*8 {
z[k] = d
k++
s = 0
d = 0
}
}
if k < len(z) {
z[k] = d
}
return z.norm()
}

View File

@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package bufio package bufio_test
import ( import (
. "bufio"
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
@ -502,9 +503,8 @@ func TestWriteString(t *testing.T) {
b.WriteString("7890") // easy after flush b.WriteString("7890") // easy after flush
b.WriteString("abcdefghijklmnopqrstuvwxy") // hard b.WriteString("abcdefghijklmnopqrstuvwxy") // hard
b.WriteString("z") b.WriteString("z")
b.Flush() if err := b.Flush(); err != nil {
if b.err != nil { t.Error("WriteString", err)
t.Error("WriteString", b.err)
} }
s := "01234567890abcdefghijklmnopqrstuvwxyz" s := "01234567890abcdefghijklmnopqrstuvwxyz"
if string(buf.Bytes()) != s { if string(buf.Bytes()) != s {

View File

@ -191,9 +191,16 @@ func testSync(t *testing.T, level int, input []byte, name string) {
t.Errorf("testSync/%d: read wrong bytes: %x vs %x", i, input[lo:hi], out[:hi-lo]) t.Errorf("testSync/%d: read wrong bytes: %x vs %x", i, input[lo:hi], out[:hi-lo])
return return
} }
if i == 0 && buf.buf.Len() != 0 { // This test originally checked that after reading
t.Errorf("testSync/%d (%d, %d, %s): extra data after %d", i, level, len(input), name, hi-lo) // the first half of the input, there was nothing left
} // in the read buffer (buf.buf.Len() != 0) but that is
// not necessarily the case: the write Flush may emit
// some extra framing bits that are not necessary
// to process to obtain the first half of the uncompressed
// data. The test ran correctly most of the time, because
// the background goroutine had usually read even
// those extra bits by now, but it's not a useful thing to
// check.
buf.WriteMode() buf.WriteMode()
} }
buf.ReadMode() buf.ReadMode()

View File

@ -9,6 +9,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"runtime"
"strconv" "strconv"
"strings" "strings"
"testing" "testing"
@ -117,16 +118,34 @@ func (devNull) Write(p []byte) (int, os.Error) {
return len(p), nil return len(p), nil
} }
func BenchmarkDecoder(b *testing.B) { func benchmarkDecoder(b *testing.B, n int) {
b.StopTimer() b.StopTimer()
b.SetBytes(int64(n))
buf0, _ := ioutil.ReadFile("../testdata/e.txt") buf0, _ := ioutil.ReadFile("../testdata/e.txt")
buf0 = buf0[:10000]
compressed := bytes.NewBuffer(nil) compressed := bytes.NewBuffer(nil)
w := NewWriter(compressed, LSB, 8) w := NewWriter(compressed, LSB, 8)
io.Copy(w, bytes.NewBuffer(buf0)) for i := 0; i < n; i += len(buf0) {
io.Copy(w, bytes.NewBuffer(buf0))
}
w.Close() w.Close()
buf1 := compressed.Bytes() buf1 := compressed.Bytes()
buf0, compressed, w = nil, nil, nil
runtime.GC()
b.StartTimer() b.StartTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
io.Copy(devNull{}, NewReader(bytes.NewBuffer(buf1), LSB, 8)) io.Copy(devNull{}, NewReader(bytes.NewBuffer(buf1), LSB, 8))
} }
} }
func BenchmarkDecoder1e4(b *testing.B) {
benchmarkDecoder(b, 1e4)
}
func BenchmarkDecoder1e5(b *testing.B) {
benchmarkDecoder(b, 1e5)
}
func BenchmarkDecoder1e6(b *testing.B) {
benchmarkDecoder(b, 1e6)
}

View File

@ -8,6 +8,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"runtime"
"testing" "testing"
) )
@ -99,13 +100,33 @@ func TestWriter(t *testing.T) {
} }
} }
func BenchmarkEncoder(b *testing.B) { func benchmarkEncoder(b *testing.B, n int) {
b.StopTimer() b.StopTimer()
buf, _ := ioutil.ReadFile("../testdata/e.txt") b.SetBytes(int64(n))
buf0, _ := ioutil.ReadFile("../testdata/e.txt")
buf0 = buf0[:10000]
buf1 := make([]byte, n)
for i := 0; i < n; i += len(buf0) {
copy(buf1[i:], buf0)
}
buf0 = nil
runtime.GC()
b.StartTimer() b.StartTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
w := NewWriter(devNull{}, LSB, 8) w := NewWriter(devNull{}, LSB, 8)
w.Write(buf) w.Write(buf1)
w.Close() w.Close()
} }
} }
func BenchmarkEncoder1e4(b *testing.B) {
benchmarkEncoder(b, 1e4)
}
func BenchmarkEncoder1e5(b *testing.B) {
benchmarkEncoder(b, 1e5)
}
func BenchmarkEncoder1e6(b *testing.B) {
benchmarkEncoder(b, 1e6)
}

View File

@ -0,0 +1,149 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ecdsa implements the Elliptic Curve Digital Signature Algorithm, as
// defined in FIPS 186-3.
package ecdsa
// References:
// [NSA]: Suite B implementor's guide to FIPS 186-3,
// http://www.nsa.gov/ia/_files/ecdsa.pdf
// [SECG]: SECG, SEC1
// http://www.secg.org/download/aid-780/sec1-v2.pdf
import (
"big"
"crypto/elliptic"
"io"
"os"
)
// PublicKey represents an ECDSA public key.
type PublicKey struct {
*elliptic.Curve
X, Y *big.Int
}
// PrivateKey represents a ECDSA private key.
type PrivateKey struct {
PublicKey
D *big.Int
}
var one = new(big.Int).SetInt64(1)
// randFieldElement returns a random element of the field underlying the given
// curve using the procedure given in [NSA] A.2.1.
func randFieldElement(c *elliptic.Curve, rand io.Reader) (k *big.Int, err os.Error) {
b := make([]byte, c.BitSize/8+8)
_, err = rand.Read(b)
if err != nil {
return
}
k = new(big.Int).SetBytes(b)
n := new(big.Int).Sub(c.N, one)
k.Mod(k, n)
k.Add(k, one)
return
}
// GenerateKey generates a public&private key pair.
func GenerateKey(c *elliptic.Curve, rand io.Reader) (priv *PrivateKey, err os.Error) {
k, err := randFieldElement(c, rand)
if err != nil {
return
}
priv = new(PrivateKey)
priv.PublicKey.Curve = c
priv.D = k
priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())
return
}
// hashToInt converts a hash value to an integer. There is some disagreement
// about how this is done. [NSA] suggests that this is done in the obvious
// manner, but [SECG] truncates the hash to the bit-length of the curve order
// first. We follow [SECG] because that's what OpenSSL does.
func hashToInt(hash []byte, c *elliptic.Curve) *big.Int {
orderBits := c.N.BitLen()
orderBytes := (orderBits + 7) / 8
if len(hash) > orderBytes {
hash = hash[:orderBytes]
}
ret := new(big.Int).SetBytes(hash)
excess := orderBytes*8 - orderBits
if excess > 0 {
ret.Rsh(ret, uint(excess))
}
return ret
}
// Sign signs an arbitrary length hash (which should be the result of hashing a
// larger message) using the private key, priv. It returns the signature as a
// pair of integers. The security of the private key depends on the entropy of
// rand.
func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err os.Error) {
// See [NSA] 3.4.1
c := priv.PublicKey.Curve
var k, kInv *big.Int
for {
for {
k, err = randFieldElement(c, rand)
if err != nil {
r = nil
return
}
kInv = new(big.Int).ModInverse(k, c.N)
r, _ = priv.Curve.ScalarBaseMult(k.Bytes())
r.Mod(r, priv.Curve.N)
if r.Sign() != 0 {
break
}
}
e := hashToInt(hash, c)
s = new(big.Int).Mul(priv.D, r)
s.Add(s, e)
s.Mul(s, kInv)
s.Mod(s, priv.PublicKey.Curve.N)
if s.Sign() != 0 {
break
}
}
return
}
// Verify verifies the signature in r, s of hash using the public key, pub. It
// returns true iff the signature is valid.
func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
// See [NSA] 3.4.2
c := pub.Curve
if r.Sign() == 0 || s.Sign() == 0 {
return false
}
if r.Cmp(c.N) >= 0 || s.Cmp(c.N) >= 0 {
return false
}
e := hashToInt(hash, c)
w := new(big.Int).ModInverse(s, c.N)
u1 := e.Mul(e, w)
u2 := w.Mul(r, w)
x1, y1 := c.ScalarBaseMult(u1.Bytes())
x2, y2 := c.ScalarMult(pub.X, pub.Y, u2.Bytes())
if x1.Cmp(x2) == 0 {
return false
}
x, _ := c.Add(x1, y1, x2, y2)
x.Mod(x, c.N)
return x.Cmp(r) == 0
}

View File

@ -0,0 +1,218 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ecdsa
import (
"big"
"crypto/elliptic"
"crypto/sha1"
"crypto/rand"
"encoding/hex"
"testing"
)
func testKeyGeneration(t *testing.T, c *elliptic.Curve, tag string) {
priv, err := GenerateKey(c, rand.Reader)
if err != nil {
t.Errorf("%s: error: %s", tag, err)
return
}
if !c.IsOnCurve(priv.PublicKey.X, priv.PublicKey.Y) {
t.Errorf("%s: public key invalid", tag, err)
}
}
func TestKeyGeneration(t *testing.T) {
testKeyGeneration(t, elliptic.P224(), "p224")
testKeyGeneration(t, elliptic.P256(), "p256")
testKeyGeneration(t, elliptic.P384(), "p384")
testKeyGeneration(t, elliptic.P521(), "p521")
}
func testSignAndVerify(t *testing.T, c *elliptic.Curve, tag string) {
priv, _ := GenerateKey(c, rand.Reader)
hashed := []byte("testing")
r, s, err := Sign(rand.Reader, priv, hashed)
if err != nil {
t.Errorf("%s: error signing: %s", tag, err)
return
}
if !Verify(&priv.PublicKey, hashed, r, s) {
t.Errorf("%s: Verify failed", tag)
}
hashed[0] ^= 0xff
if Verify(&priv.PublicKey, hashed, r, s) {
t.Errorf("%s: Verify always works!", tag)
}
}
func TestSignAndVerify(t *testing.T) {
testSignAndVerify(t, elliptic.P224(), "p224")
testSignAndVerify(t, elliptic.P256(), "p256")
testSignAndVerify(t, elliptic.P384(), "p384")
testSignAndVerify(t, elliptic.P521(), "p521")
}
func fromHex(s string) *big.Int {
r, ok := new(big.Int).SetString(s, 16)
if !ok {
panic("bad hex")
}
return r
}
// These test vectors were taken from
// http://csrc.nist.gov/groups/STM/cavp/documents/dss/ecdsatestvectors.zip
var testVectors = []struct {
msg string
Qx, Qy string
r, s string
ok bool
}{
{
"09626b45493672e48f3d1226a3aff3201960e577d33a7f72c7eb055302db8fe8ed61685dd036b554942a5737cd1512cdf811ee0c00e6dd2f08c69f08643be396e85dafda664801e772cdb7396868ac47b172245b41986aa2648cb77fbbfa562581be06651355a0c4b090f9d17d8f0ab6cced4e0c9d386cf465a516630f0231bd",
"9504b5b82d97a264d8b3735e0568decabc4b6ca275bc53cbadfc1c40",
"03426f80e477603b10dee670939623e3da91a94267fc4e51726009ed",
"81d3ac609f9575d742028dd496450a58a60eea2dcf8b9842994916e1",
"96a8c5f382c992e8f30ccce9af120b067ec1d74678fa8445232f75a5",
false,
},
{
"96b2b6536f6df29be8567a72528aceeaccbaa66c66c534f3868ca9778b02faadb182e4ed34662e73b9d52ecbe9dc8e875fc05033c493108b380689ebf47e5b062e6a0cdb3dd34ce5fe347d92768d72f7b9b377c20aea927043b509c078ed2467d7113405d2ddd458811e6faf41c403a2a239240180f1430a6f4330df5d77de37",
"851e3100368a22478a0029353045ae40d1d8202ef4d6533cfdddafd8",
"205302ac69457dd345e86465afa72ee8c74ca97e2b0b999aec1f10c2",
"4450c2d38b697e990721aa2dbb56578d32b4f5aeb3b9072baa955ee0",
"e26d4b589166f7b4ba4b1c8fce823fa47aad22f8c9c396b8c6526e12",
false,
},
{
"86778dbb4a068a01047a8d245d632f636c11d2ad350740b36fad90428b454ad0f120cb558d12ea5c8a23db595d87543d06d1ef489263d01ee529871eb68737efdb8ff85bc7787b61514bed85b7e01d6be209e0a4eb0db5c8df58a5c5bf706d76cb2bdf7800208639e05b89517155d11688236e6a47ed37d8e5a2b1e0adea338e",
"ad5bda09d319a717c1721acd6688d17020b31b47eef1edea57ceeffc",
"c8ce98e181770a7c9418c73c63d01494b8b80a41098c5ea50692c984",
"de5558c257ab4134e52c19d8db3b224a1899cbd08cc508ce8721d5e9",
"745db7af5a477e5046705c0a5eff1f52cb94a79d481f0c5a5e108ecd",
true,
},
{
"4bc6ef1958556686dab1e39c3700054a304cbd8f5928603dcd97fafd1f29e69394679b638f71c9344ce6a535d104803d22119f57b5f9477e253817a52afa9bfbc9811d6cc8c8be6b6566c6ef48b439bbb532abe30627548c598867f3861ba0b154dc1c3deca06eb28df8efd28258554b5179883a36fbb1eecf4f93ee19d41e3d",
"cc5eea2edf964018bdc0504a3793e4d2145142caa09a72ac5fb8d3e8",
"a48d78ae5d08aa725342773975a00d4219cf7a8029bb8cf3c17c374a",
"67b861344b4e416d4094472faf4272f6d54a497177fbc5f9ef292836",
"1d54f3fcdad795bf3b23408ecbac3e1321d1d66f2e4e3d05f41f7020",
false,
},
{
"bb658732acbf3147729959eb7318a2058308b2739ec58907dd5b11cfa3ecf69a1752b7b7d806fe00ec402d18f96039f0b78dbb90a59c4414fb33f1f4e02e4089de4122cd93df5263a95be4d7084e2126493892816e6a5b4ed123cb705bf930c8f67af0fb4514d5769232a9b008a803af225160ce63f675bd4872c4c97b146e5e",
"6234c936e27bf141fc7534bfc0a7eedc657f91308203f1dcbd642855",
"27983d87ca785ef4892c3591ef4a944b1deb125dd58bd351034a6f84",
"e94e05b42d01d0b965ffdd6c3a97a36a771e8ea71003de76c4ecb13f",
"1dc6464ffeefbd7872a081a5926e9fc3e66d123f1784340ba17737e9",
false,
},
{
"7c00be9123bfa2c4290be1d8bc2942c7f897d9a5b7917e3aabd97ef1aab890f148400a89abd554d19bec9d8ed911ce57b22fbcf6d30ca2115f13ce0a3f569a23bad39ee645f624c49c60dcfc11e7d2be24de9c905596d8f23624d63dc46591d1f740e46f982bfae453f107e80db23545782be23ce43708245896fc54e1ee5c43",
"9f3f037282aaf14d4772edffff331bbdda845c3f65780498cde334f1",
"8308ee5a16e3bcb721b6bc30000a0419bc1aaedd761be7f658334066",
"6381d7804a8808e3c17901e4d283b89449096a8fba993388fa11dc54",
"8e858f6b5b253686a86b757bad23658cda53115ac565abca4e3d9f57",
false,
},
{
"cffc122a44840dc705bb37130069921be313d8bde0b66201aebc48add028ca131914ef2e705d6bedd19dc6cf9459bbb0f27cdfe3c50483808ffcdaffbeaa5f062e097180f07a40ef4ab6ed03fe07ed6bcfb8afeb42c97eafa2e8a8df469de07317c5e1494c41547478eff4d8c7d9f0f484ad90fedf6e1c35ee68fa73f1691601",
"a03b88a10d930002c7b17ca6af2fd3e88fa000edf787dc594f8d4fd4",
"e0cf7acd6ddc758e64847fe4df9915ebda2f67cdd5ec979aa57421f5",
"387b84dcf37dc343c7d2c5beb82f0bf8bd894b395a7b894565d296c1",
"4adc12ce7d20a89ce3925e10491c731b15ddb3f339610857a21b53b4",
false,
},
{
"26e0e0cafd85b43d16255908ccfd1f061c680df75aba3081246b337495783052ba06c60f4a486c1591a4048bae11b4d7fec4f161d80bdc9a7b79d23e44433ed625eab280521a37f23dd3e1bdc5c6a6cfaa026f3c45cf703e76dab57add93fe844dd4cda67dc3bddd01f9152579e49df60969b10f09ce9372fdd806b0c7301866",
"9a8983c42f2b5a87c37a00458b5970320d247f0c8a88536440173f7d",
"15e489ec6355351361900299088cfe8359f04fe0cab78dde952be80c",
"929a21baa173d438ec9f28d6a585a2f9abcfc0a4300898668e476dc0",
"59a853f046da8318de77ff43f26fe95a92ee296fa3f7e56ce086c872",
true,
},
{
"1078eac124f48ae4f807e946971d0de3db3748dd349b14cca5c942560fb25401b2252744f18ad5e455d2d97ed5ae745f55ff509c6c8e64606afe17809affa855c4c4cdcaf6b69ab4846aa5624ed0687541aee6f2224d929685736c6a23906d974d3c257abce1a3fb8db5951b89ecb0cda92b5207d93f6618fd0f893c32cf6a6e",
"d6e55820bb62c2be97650302d59d667a411956138306bd566e5c3c2b",
"631ab0d64eaf28a71b9cbd27a7a88682a2167cee6251c44e3810894f",
"65af72bc7721eb71c2298a0eb4eed3cec96a737cc49125706308b129",
"bd5a987c78e2d51598dbd9c34a9035b0069c580edefdacee17ad892a",
false,
},
{
"919deb1fdd831c23481dfdb2475dcbe325b04c34f82561ced3d2df0b3d749b36e255c4928973769d46de8b95f162b53cd666cad9ae145e7fcfba97919f703d864efc11eac5f260a5d920d780c52899e5d76f8fe66936ff82130761231f536e6a3d59792f784902c469aa897aabf9a0678f93446610d56d5e0981e4c8a563556b",
"269b455b1024eb92d860a420f143ac1286b8cce43031562ae7664574",
"baeb6ca274a77c44a0247e5eb12ca72bdd9a698b3f3ae69c9f1aaa57",
"cb4ec2160f04613eb0dfe4608486091a25eb12aa4dec1afe91cfb008",
"40b01d8cd06589481574f958b98ca08ade9d2a8fe31024375c01bb40",
false,
},
{
"6e012361250dacf6166d2dd1aa7be544c3206a9d43464b3fcd90f3f8cf48d08ec099b59ba6fe7d9bdcfaf244120aed1695d8be32d1b1cd6f143982ab945d635fb48a7c76831c0460851a3d62b7209c30cd9c2abdbe3d2a5282a9fcde1a6f418dd23c409bc351896b9b34d7d3a1a63bbaf3d677e612d4a80fa14829386a64b33f",
"6d2d695efc6b43b13c14111f2109608f1020e3e03b5e21cfdbc82fcd",
"26a4859296b7e360b69cf40be7bd97ceaffa3d07743c8489fc47ca1b",
"9a8cb5f2fdc288b7183c5b32d8e546fc2ed1ca4285eeae00c8b572ad",
"8c623f357b5d0057b10cdb1a1593dab57cda7bdec9cf868157a79b97",
true,
},
{
"bf6bd7356a52b234fe24d25557200971fc803836f6fec3cade9642b13a8e7af10ab48b749de76aada9d8927f9b12f75a2c383ca7358e2566c4bb4f156fce1fd4e87ef8c8d2b6b1bdd351460feb22cdca0437ac10ca5e0abbbce9834483af20e4835386f8b1c96daaa41554ceee56730aac04f23a5c765812efa746051f396566",
"14250131b2599939cf2d6bc491be80ddfe7ad9de644387ee67de2d40",
"b5dc473b5d014cd504022043c475d3f93c319a8bdcb7262d9e741803",
"4f21642f2201278a95339a80f75cc91f8321fcb3c9462562f6cbf145",
"452a5f816ea1f75dee4fd514fa91a0d6a43622981966c59a1b371ff8",
false,
},
{
"0eb7f4032f90f0bd3cf9473d6d9525d264d14c031a10acd31a053443ed5fe919d5ac35e0be77813071b4062f0b5fdf58ad5f637b76b0b305aec18f82441b6e607b44cdf6e0e3c7c57f24e6fd565e39430af4a6b1d979821ed0175fa03e3125506847654d7e1ae904ce1190ae38dc5919e257bdac2db142a6e7cd4da6c2e83770",
"d1f342b7790a1667370a1840255ac5bbbdc66f0bc00ae977d99260ac",
"76416cabae2de9a1000b4646338b774baabfa3db4673790771220cdb",
"bc85e3fc143d19a7271b2f9e1c04b86146073f3fab4dda1c3b1f35ca",
"9a5c70ede3c48d5f43307a0c2a4871934424a3303b815df4bb0f128e",
false,
},
{
"5cc25348a05d85e56d4b03cec450128727bc537c66ec3a9fb613c151033b5e86878632249cba83adcefc6c1e35dcd31702929c3b57871cda5c18d1cf8f9650a25b917efaed56032e43b6fc398509f0d2997306d8f26675f3a8683b79ce17128e006aa0903b39eeb2f1001be65de0520115e6f919de902b32c38d691a69c58c92",
"7e49a7abf16a792e4c7bbc4d251820a2abd22d9f2fc252a7bf59c9a6",
"44236a8fb4791c228c26637c28ae59503a2f450d4cfb0dc42aa843b9",
"084461b4050285a1a85b2113be76a17878d849e6bc489f4d84f15cd8",
"079b5bddcc4d45de8dbdfd39f69817c7e5afa454a894d03ee1eaaac3",
false,
},
{
"1951533ce33afb58935e39e363d8497a8dd0442018fd96dff167b3b23d7206a3ee182a3194765df4768a3284e23b8696c199b4686e670d60c9d782f08794a4bccc05cffffbd1a12acd9eb1cfa01f7ebe124da66ecff4599ea7720c3be4bb7285daa1a86ebf53b042bd23208d468c1b3aa87381f8e1ad63e2b4c2ba5efcf05845",
"31945d12ebaf4d81f02be2b1768ed80784bf35cf5e2ff53438c11493",
"a62bebffac987e3b9d3ec451eb64c462cdf7b4aa0b1bbb131ceaa0a4",
"bc3c32b19e42b710bca5c6aaa128564da3ddb2726b25f33603d2af3c",
"ed1a719cc0c507edc5239d76fe50e2306c145ad252bd481da04180c0",
false,
},
}
func TestVectors(t *testing.T) {
sha := sha1.New()
for i, test := range testVectors {
pub := PublicKey{
Curve: elliptic.P224(),
X: fromHex(test.Qx),
Y: fromHex(test.Qy),
}
msg, _ := hex.DecodeString(test.msg)
sha.Reset()
sha.Write(msg)
hashed := sha.Sum()
r := fromHex(test.r)
s := fromHex(test.s)
if Verify(&pub, hashed, r, s) != test.ok {
t.Errorf("%d: bad result", i)
}
}
}

View File

@ -24,6 +24,7 @@ import (
// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html // See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html
type Curve struct { type Curve struct {
P *big.Int // the order of the underlying field P *big.Int // the order of the underlying field
N *big.Int // the order of the base point
B *big.Int // the constant of the curve equation B *big.Int // the constant of the curve equation
Gx, Gy *big.Int // (x,y) of the base point Gx, Gy *big.Int // (x,y) of the base point
BitSize int // the size of the underlying field BitSize int // the size of the underlying field
@ -315,6 +316,7 @@ func initP224() {
// See FIPS 186-3, section D.2.2 // See FIPS 186-3, section D.2.2
p224 = new(Curve) p224 = new(Curve)
p224.P, _ = new(big.Int).SetString("26959946667150639794667015087019630673557916260026308143510066298881", 10) p224.P, _ = new(big.Int).SetString("26959946667150639794667015087019630673557916260026308143510066298881", 10)
p224.N, _ = new(big.Int).SetString("26959946667150639794667015087019625940457807714424391721682722368061", 10)
p224.B, _ = new(big.Int).SetString("b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4", 16) p224.B, _ = new(big.Int).SetString("b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4", 16)
p224.Gx, _ = new(big.Int).SetString("b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21", 16) p224.Gx, _ = new(big.Int).SetString("b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21", 16)
p224.Gy, _ = new(big.Int).SetString("bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34", 16) p224.Gy, _ = new(big.Int).SetString("bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34", 16)
@ -325,6 +327,7 @@ func initP256() {
// See FIPS 186-3, section D.2.3 // See FIPS 186-3, section D.2.3
p256 = new(Curve) p256 = new(Curve)
p256.P, _ = new(big.Int).SetString("115792089210356248762697446949407573530086143415290314195533631308867097853951", 10) p256.P, _ = new(big.Int).SetString("115792089210356248762697446949407573530086143415290314195533631308867097853951", 10)
p256.N, _ = new(big.Int).SetString("115792089210356248762697446949407573529996955224135760342422259061068512044369", 10)
p256.B, _ = new(big.Int).SetString("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", 16) p256.B, _ = new(big.Int).SetString("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", 16)
p256.Gx, _ = new(big.Int).SetString("6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", 16) p256.Gx, _ = new(big.Int).SetString("6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", 16)
p256.Gy, _ = new(big.Int).SetString("4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5", 16) p256.Gy, _ = new(big.Int).SetString("4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5", 16)
@ -335,6 +338,7 @@ func initP384() {
// See FIPS 186-3, section D.2.4 // See FIPS 186-3, section D.2.4
p384 = new(Curve) p384 = new(Curve)
p384.P, _ = new(big.Int).SetString("39402006196394479212279040100143613805079739270465446667948293404245721771496870329047266088258938001861606973112319", 10) p384.P, _ = new(big.Int).SetString("39402006196394479212279040100143613805079739270465446667948293404245721771496870329047266088258938001861606973112319", 10)
p384.N, _ = new(big.Int).SetString("39402006196394479212279040100143613805079739270465446667946905279627659399113263569398956308152294913554433653942643", 10)
p384.B, _ = new(big.Int).SetString("b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef", 16) p384.B, _ = new(big.Int).SetString("b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef", 16)
p384.Gx, _ = new(big.Int).SetString("aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7", 16) p384.Gx, _ = new(big.Int).SetString("aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7", 16)
p384.Gy, _ = new(big.Int).SetString("3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f", 16) p384.Gy, _ = new(big.Int).SetString("3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f", 16)
@ -345,6 +349,7 @@ func initP521() {
// See FIPS 186-3, section D.2.5 // See FIPS 186-3, section D.2.5
p521 = new(Curve) p521 = new(Curve)
p521.P, _ = new(big.Int).SetString("6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151", 10) p521.P, _ = new(big.Int).SetString("6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151", 10)
p521.N, _ = new(big.Int).SetString("6864797660130609714981900799081393217269435300143305409394463459185543183397655394245057746333217197532963996371363321113864768612440380340372808892707005449", 10)
p521.B, _ = new(big.Int).SetString("051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00", 16) p521.B, _ = new(big.Int).SetString("051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00", 16)
p521.Gx, _ = new(big.Int).SetString("c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66", 16) p521.Gx, _ = new(big.Int).SetString("c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66", 16)
p521.Gy, _ = new(big.Int).SetString("11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650", 16) p521.Gy, _ = new(big.Int).SetString("11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650", 16)

View File

@ -7,6 +7,7 @@
package packet package packet
import ( import (
"big"
"crypto/aes" "crypto/aes"
"crypto/cast5" "crypto/cast5"
"crypto/cipher" "crypto/cipher"
@ -166,10 +167,10 @@ func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader,
return return
} }
// serialiseHeader writes an OpenPGP packet header to w. See RFC 4880, section // serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
// 4.2. // 4.2.
func serialiseHeader(w io.Writer, ptype packetType, length int) (err os.Error) { func serializeHeader(w io.Writer, ptype packetType, length int) (err os.Error) {
var buf [5]byte var buf [6]byte
var n int var n int
buf[0] = 0x80 | 0x40 | byte(ptype) buf[0] = 0x80 | 0x40 | byte(ptype)
@ -178,16 +179,16 @@ func serialiseHeader(w io.Writer, ptype packetType, length int) (err os.Error) {
n = 2 n = 2
} else if length < 8384 { } else if length < 8384 {
length -= 192 length -= 192
buf[1] = byte(length >> 8) buf[1] = 192 + byte(length>>8)
buf[2] = byte(length) buf[2] = byte(length)
n = 3 n = 3
} else { } else {
buf[0] = 255 buf[1] = 255
buf[1] = byte(length >> 24) buf[2] = byte(length >> 24)
buf[2] = byte(length >> 16) buf[3] = byte(length >> 16)
buf[3] = byte(length >> 8) buf[4] = byte(length >> 8)
buf[4] = byte(length) buf[5] = byte(length)
n = 5 n = 6
} }
_, err = w.Write(buf[:n]) _, err = w.Write(buf[:n])
@ -371,7 +372,7 @@ func (cipher CipherFunction) new(key []byte) (block cipher.Block) {
// readMPI reads a big integer from r. The bit length returned is the bit // readMPI reads a big integer from r. The bit length returned is the bit
// length that was specified in r. This is preserved so that the integer can be // length that was specified in r. This is preserved so that the integer can be
// reserialised exactly. // reserialized exactly.
func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err os.Error) { func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err os.Error) {
var buf [2]byte var buf [2]byte
_, err = readFull(r, buf[0:]) _, err = readFull(r, buf[0:])
@ -385,7 +386,7 @@ func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err os.Error) {
return return
} }
// writeMPI serialises a big integer to r. // writeMPI serializes a big integer to w.
func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err os.Error) { func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err os.Error) {
_, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
if err == nil { if err == nil {
@ -393,3 +394,8 @@ func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err os.Error) {
} }
return return
} }
// writeBig serializes a *big.Int to w.
func writeBig(w io.Writer, i *big.Int) os.Error {
return writeMPI(w, uint16(i.BitLen()), i.Bytes())
}

View File

@ -190,3 +190,23 @@ func TestReadHeader(t *testing.T) {
} }
} }
} }
func TestSerializeHeader(t *testing.T) {
tag := packetTypePublicKey
lengths := []int{0, 1, 2, 64, 192, 193, 8000, 8384, 8385, 10000}
for _, length := range lengths {
buf := bytes.NewBuffer(nil)
serializeHeader(buf, tag, length)
tag2, length2, _, err := readHeader(buf)
if err != nil {
t.Errorf("length %d, err: %s", length, err)
}
if tag2 != tag {
t.Errorf("length %d, tag incorrect (got %d, want %d)", length, tag2, tag)
}
if int(length2) != length {
t.Errorf("length %d, length incorrect (got %d)", length, length2)
}
}
}

View File

@ -8,6 +8,7 @@ import (
"big" "big"
"bytes" "bytes"
"crypto/cipher" "crypto/cipher"
"crypto/dsa"
"crypto/openpgp/error" "crypto/openpgp/error"
"crypto/openpgp/s2k" "crypto/openpgp/s2k"
"crypto/rsa" "crypto/rsa"
@ -134,7 +135,16 @@ func (pk *PrivateKey) Decrypt(passphrase []byte) os.Error {
} }
func (pk *PrivateKey) parsePrivateKey(data []byte) (err os.Error) { func (pk *PrivateKey) parsePrivateKey(data []byte) (err os.Error) {
// TODO(agl): support DSA and ECDSA private keys. switch pk.PublicKey.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly:
return pk.parseRSAPrivateKey(data)
case PubKeyAlgoDSA:
return pk.parseDSAPrivateKey(data)
}
panic("impossible")
}
func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err os.Error) {
rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey)
rsaPriv := new(rsa.PrivateKey) rsaPriv := new(rsa.PrivateKey)
rsaPriv.PublicKey = *rsaPub rsaPriv.PublicKey = *rsaPub
@ -162,3 +172,22 @@ func (pk *PrivateKey) parsePrivateKey(data []byte) (err os.Error) {
return nil return nil
} }
func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err os.Error) {
dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey)
dsaPriv := new(dsa.PrivateKey)
dsaPriv.PublicKey = *dsaPub
buf := bytes.NewBuffer(data)
x, _, err := readMPI(buf)
if err != nil {
return
}
dsaPriv.X = new(big.Int).SetBytes(x)
pk.PrivateKey = dsaPriv
pk.Encrypted = false
pk.encryptedData = nil
return nil
}

View File

@ -11,6 +11,7 @@ import (
"crypto/rsa" "crypto/rsa"
"crypto/sha1" "crypto/sha1"
"encoding/binary" "encoding/binary"
"fmt"
"hash" "hash"
"io" "io"
"os" "os"
@ -178,12 +179,6 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err os.E
return error.InvalidArgumentError("public key cannot generate signatures") return error.InvalidArgumentError("public key cannot generate signatures")
} }
rsaPublicKey, ok := pk.PublicKey.(*rsa.PublicKey)
if !ok {
// TODO(agl): support DSA and ECDSA keys.
return error.UnsupportedError("non-RSA public key")
}
signed.Write(sig.HashSuffix) signed.Write(sig.HashSuffix)
hashBytes := signed.Sum() hashBytes := signed.Sum()
@ -191,11 +186,28 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err os.E
return error.SignatureError("hash tag doesn't match") return error.SignatureError("hash tag doesn't match")
} }
err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.Signature) if pk.PubKeyAlgo != sig.PubKeyAlgo {
if err != nil { return error.InvalidArgumentError("public key and signature use different algorithms")
return error.SignatureError("RSA verification failure")
} }
return nil
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature)
if err != nil {
return error.SignatureError("RSA verification failure")
}
return nil
case PubKeyAlgoDSA:
dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey)
if !dsa.Verify(dsaPublicKey, hashBytes, sig.DSASigR, sig.DSASigS) {
return error.SignatureError("DSA verification failure")
}
return nil
default:
panic("shouldn't happen")
}
panic("unreachable")
} }
// VerifyKeySignature returns nil iff sig is a valid signature, make by this // VerifyKeySignature returns nil iff sig is a valid signature, make by this
@ -239,9 +251,21 @@ func (pk *PublicKey) VerifyUserIdSignature(id string, sig *Signature) (err os.Er
return pk.VerifySignature(h, sig) return pk.VerifySignature(h, sig)
} }
// KeyIdString returns the public key's fingerprint in capital hex
// (e.g. "6C7EE1B8621CC013").
func (pk *PublicKey) KeyIdString() string {
return fmt.Sprintf("%X", pk.Fingerprint[12:20])
}
// KeyIdShortString returns the short form of public key's fingerprint
// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
func (pk *PublicKey) KeyIdShortString() string {
return fmt.Sprintf("%X", pk.Fingerprint[16:20])
}
// A parsedMPI is used to store the contents of a big integer, along with the // A parsedMPI is used to store the contents of a big integer, along with the
// bit length that was specified in the original input. This allows the MPI to // bit length that was specified in the original input. This allows the MPI to
// be reserialised exactly. // be reserialized exactly.
type parsedMPI struct { type parsedMPI struct {
bytes []byte bytes []byte
bitLength uint16 bitLength uint16

View File

@ -16,9 +16,11 @@ var pubKeyTests = []struct {
creationTime uint32 creationTime uint32
pubKeyAlgo PublicKeyAlgorithm pubKeyAlgo PublicKeyAlgorithm
keyId uint64 keyId uint64
keyIdString string
keyIdShort string
}{ }{
{rsaPkDataHex, rsaFingerprintHex, 0x4d3c5c10, PubKeyAlgoRSA, 0xa34d7e18c20c31bb}, {rsaPkDataHex, rsaFingerprintHex, 0x4d3c5c10, PubKeyAlgoRSA, 0xa34d7e18c20c31bb, "A34D7E18C20C31BB", "C20C31BB"},
{dsaPkDataHex, dsaFingerprintHex, 0x4d432f89, PubKeyAlgoDSA, 0x8e8fbe54062f19ed}, {dsaPkDataHex, dsaFingerprintHex, 0x4d432f89, PubKeyAlgoDSA, 0x8e8fbe54062f19ed, "8E8FBE54062F19ED", "062F19ED"},
} }
func TestPublicKeyRead(t *testing.T) { func TestPublicKeyRead(t *testing.T) {
@ -46,6 +48,12 @@ func TestPublicKeyRead(t *testing.T) {
if pk.KeyId != test.keyId { if pk.KeyId != test.keyId {
t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId) t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId)
} }
if g, e := pk.KeyIdString(), test.keyIdString; g != e {
t.Errorf("#%d: bad KeyIdString got:%q want:%q", i, g, e)
}
if g, e := pk.KeyIdShortString(), test.keyIdShort; g != e {
t.Errorf("#%d: bad KeyIdShortString got:%q want:%q", i, g, e)
}
} }
} }

View File

@ -5,7 +5,9 @@
package packet package packet
import ( import (
"big"
"crypto" "crypto"
"crypto/dsa"
"crypto/openpgp/error" "crypto/openpgp/error"
"crypto/openpgp/s2k" "crypto/openpgp/s2k"
"crypto/rand" "crypto/rand"
@ -29,7 +31,9 @@ type Signature struct {
// of bad signed data. // of bad signed data.
HashTag [2]byte HashTag [2]byte
CreationTime uint32 // Unix epoch time CreationTime uint32 // Unix epoch time
Signature []byte
RSASignature []byte
DSASigR, DSASigS *big.Int
// The following are optional so are nil when not included in the // The following are optional so are nil when not included in the
// signature. // signature.
@ -66,7 +70,7 @@ func (sig *Signature) parse(r io.Reader) (err os.Error) {
sig.SigType = SignatureType(buf[0]) sig.SigType = SignatureType(buf[0])
sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1])
switch sig.PubKeyAlgo { switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA:
default: default:
err = error.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) err = error.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
return return
@ -122,8 +126,20 @@ func (sig *Signature) parse(r io.Reader) (err os.Error) {
return return
} }
// We have already checked that the public key algorithm is RSA. switch sig.PubKeyAlgo {
sig.Signature, _, err = readMPI(r) case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
sig.RSASignature, _, err = readMPI(r)
case PubKeyAlgoDSA:
var rBytes, sBytes []byte
rBytes, _, err = readMPI(r)
sig.DSASigR = new(big.Int).SetBytes(rBytes)
if err == nil {
sBytes, _, err = readMPI(r)
sig.DSASigS = new(big.Int).SetBytes(sBytes)
}
default:
panic("unreachable")
}
return return
} }
@ -316,8 +332,8 @@ func subpacketLengthLength(length int) int {
return 5 return 5
} }
// serialiseSubpacketLength marshals the given length into to. // serializeSubpacketLength marshals the given length into to.
func serialiseSubpacketLength(to []byte, length int) int { func serializeSubpacketLength(to []byte, length int) int {
if length < 192 { if length < 192 {
to[0] = byte(length) to[0] = byte(length)
return 1 return 1
@ -336,7 +352,7 @@ func serialiseSubpacketLength(to []byte, length int) int {
return 5 return 5
} }
// subpacketsLength returns the serialised length, in bytes, of the given // subpacketsLength returns the serialized length, in bytes, of the given
// subpackets. // subpackets.
func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) {
for _, subpacket := range subpackets { for _, subpacket := range subpackets {
@ -349,11 +365,11 @@ func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) {
return return
} }
// serialiseSubpackets marshals the given subpackets into to. // serializeSubpackets marshals the given subpackets into to.
func serialiseSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) {
for _, subpacket := range subpackets { for _, subpacket := range subpackets {
if subpacket.hashed == hashed { if subpacket.hashed == hashed {
n := serialiseSubpacketLength(to, len(subpacket.contents)+1) n := serializeSubpacketLength(to, len(subpacket.contents)+1)
to[n] = byte(subpacket.subpacketType) to[n] = byte(subpacket.subpacketType)
to = to[1+n:] to = to[1+n:]
n = copy(to, subpacket.contents) n = copy(to, subpacket.contents)
@ -381,7 +397,7 @@ func (sig *Signature) buildHashSuffix() (err os.Error) {
} }
sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8)
sig.HashSuffix[5] = byte(hashedSubpacketsLen) sig.HashSuffix[5] = byte(hashedSubpacketsLen)
serialiseSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true)
trailer := sig.HashSuffix[l:] trailer := sig.HashSuffix[l:]
trailer[0] = 4 trailer[0] = 4
trailer[1] = 0xff trailer[1] = 0xff
@ -392,32 +408,66 @@ func (sig *Signature) buildHashSuffix() (err os.Error) {
return return
} }
// SignRSA signs a message with an RSA private key. The hash, h, must contain func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err os.Error) {
// the hash of message to be signed and will be mutated by this function.
func (sig *Signature) SignRSA(h hash.Hash, priv *rsa.PrivateKey) (err os.Error) {
err = sig.buildHashSuffix() err = sig.buildHashSuffix()
if err != nil { if err != nil {
return return
} }
h.Write(sig.HashSuffix) h.Write(sig.HashSuffix)
digest := h.Sum() digest = h.Sum()
copy(sig.HashTag[:], digest) copy(sig.HashTag[:], digest)
sig.Signature, err = rsa.SignPKCS1v15(rand.Reader, priv, sig.Hash, digest)
return return
} }
// Serialize marshals sig to w. SignRSA must have been called first. // SignRSA signs a message with an RSA private key. The hash, h, must contain
// the hash of the message to be signed and will be mutated by this function.
// On success, the signature is stored in sig. Call Serialize to write it out.
func (sig *Signature) SignRSA(h hash.Hash, priv *rsa.PrivateKey) (err os.Error) {
digest, err := sig.signPrepareHash(h)
if err != nil {
return
}
sig.RSASignature, err = rsa.SignPKCS1v15(rand.Reader, priv, sig.Hash, digest)
return
}
// SignDSA signs a message with a DSA private key. The hash, h, must contain
// the hash of the message to be signed and will be mutated by this function.
// On success, the signature is stored in sig. Call Serialize to write it out.
func (sig *Signature) SignDSA(h hash.Hash, priv *dsa.PrivateKey) (err os.Error) {
digest, err := sig.signPrepareHash(h)
if err != nil {
return
}
sig.DSASigR, sig.DSASigS, err = dsa.Sign(rand.Reader, priv, digest)
return
}
// Serialize marshals sig to w. SignRSA or SignDSA must have been called first.
func (sig *Signature) Serialize(w io.Writer) (err os.Error) { func (sig *Signature) Serialize(w io.Writer) (err os.Error) {
if sig.Signature == nil { if sig.RSASignature == nil && sig.DSASigR == nil {
return error.InvalidArgumentError("Signature: need to call SignRSA before Serialize") return error.InvalidArgumentError("Signature: need to call SignRSA or SignDSA before Serialize")
}
sigLength := 0
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
sigLength = len(sig.RSASignature)
case PubKeyAlgoDSA:
sigLength = 2 /* MPI length */
sigLength += (sig.DSASigR.BitLen() + 7) / 8
sigLength += 2 /* MPI length */
sigLength += (sig.DSASigS.BitLen() + 7) / 8
default:
panic("impossible")
} }
unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
length := len(sig.HashSuffix) - 6 /* trailer not included */ + length := len(sig.HashSuffix) - 6 /* trailer not included */ +
2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen +
2 /* hash tag */ + 2 /* length of signature MPI */ + len(sig.Signature) 2 /* hash tag */ + 2 /* length of signature MPI */ + sigLength
err = serialiseHeader(w, packetTypeSignature, length) err = serializeHeader(w, packetTypeSignature, length)
if err != nil { if err != nil {
return return
} }
@ -430,7 +480,7 @@ func (sig *Signature) Serialize(w io.Writer) (err os.Error) {
unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen)
unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8)
unhashedSubpackets[1] = byte(unhashedSubpacketsLen) unhashedSubpackets[1] = byte(unhashedSubpacketsLen)
serialiseSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false)
_, err = w.Write(unhashedSubpackets) _, err = w.Write(unhashedSubpackets)
if err != nil { if err != nil {
@ -440,7 +490,19 @@ func (sig *Signature) Serialize(w io.Writer) (err os.Error) {
if err != nil { if err != nil {
return return
} }
return writeMPI(w, 8*uint16(len(sig.Signature)), sig.Signature)
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
err = writeMPI(w, 8*uint16(len(sig.RSASignature)), sig.RSASignature)
case PubKeyAlgoDSA:
err = writeBig(w, sig.DSASigR)
if err == nil {
err = writeBig(w, sig.DSASigS)
}
default:
panic("impossible")
}
return
} }
// outputSubpacket represents a subpacket to be marshaled. // outputSubpacket represents a subpacket to be marshaled.

File diff suppressed because one or more lines are too long

View File

@ -6,6 +6,7 @@ package openpgp
import ( import (
"crypto" "crypto"
"crypto/dsa"
"crypto/openpgp/armor" "crypto/openpgp/armor"
"crypto/openpgp/error" "crypto/openpgp/error"
"crypto/openpgp/packet" "crypto/openpgp/packet"
@ -39,7 +40,7 @@ func DetachSignText(w io.Writer, signer *Entity, message io.Reader) os.Error {
// ArmoredDetachSignText signs message (after canonicalising the line endings) // ArmoredDetachSignText signs message (after canonicalising the line endings)
// with the private key from signer (which must already have been decrypted) // with the private key from signer (which must already have been decrypted)
// and writes an armored signature to w. // and writes an armored signature to w.
func SignTextDetachedArmored(w io.Writer, signer *Entity, message io.Reader) os.Error { func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader) os.Error {
return armoredDetachSign(w, signer, message, packet.SigTypeText) return armoredDetachSign(w, signer, message, packet.SigTypeText)
} }
@ -80,6 +81,9 @@ func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.S
case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSASignOnly: case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSASignOnly:
priv := signer.PrivateKey.PrivateKey.(*rsa.PrivateKey) priv := signer.PrivateKey.PrivateKey.(*rsa.PrivateKey)
err = sig.SignRSA(h, priv) err = sig.SignRSA(h, priv)
case packet.PubKeyAlgoDSA:
priv := signer.PrivateKey.PrivateKey.(*dsa.PrivateKey)
err = sig.SignDSA(h, priv)
default: default:
err = error.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) err = error.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
} }

View File

@ -18,7 +18,7 @@ func TestSignDetached(t *testing.T) {
t.Error(err) t.Error(err)
} }
testDetachedSignature(t, kring, out, signedInput, "check") testDetachedSignature(t, kring, out, signedInput, "check", testKey1KeyId)
} }
func TestSignTextDetached(t *testing.T) { func TestSignTextDetached(t *testing.T) {
@ -30,5 +30,17 @@ func TestSignTextDetached(t *testing.T) {
t.Error(err) t.Error(err)
} }
testDetachedSignature(t, kring, out, signedInput, "check") testDetachedSignature(t, kring, out, signedInput, "check", testKey1KeyId)
}
func TestSignDetachedDSA(t *testing.T) {
kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyPrivateHex))
out := bytes.NewBuffer(nil)
message := bytes.NewBufferString(signedInput)
err := DetachSign(out, kring[0], message)
if err != nil {
t.Error(err)
}
testDetachedSignature(t, kring, out, signedInput, "check", testKey3KeyId)
} }

View File

@ -7,6 +7,7 @@ package tls
import ( import (
"crypto/rand" "crypto/rand"
"crypto/rsa" "crypto/rsa"
"crypto/x509"
"io" "io"
"io/ioutil" "io/ioutil"
"sync" "sync"
@ -95,6 +96,9 @@ type ConnectionState struct {
HandshakeComplete bool HandshakeComplete bool
CipherSuite uint16 CipherSuite uint16
NegotiatedProtocol string NegotiatedProtocol string
// the certificate chain that was presented by the other side
PeerCertificates []*x509.Certificate
} }
// A Config structure is used to configure a TLS client or server. After one // A Config structure is used to configure a TLS client or server. After one

View File

@ -762,6 +762,7 @@ func (c *Conn) ConnectionState() ConnectionState {
if c.handshakeComplete { if c.handshakeComplete {
state.NegotiatedProtocol = c.clientProtocol state.NegotiatedProtocol = c.clientProtocol
state.CipherSuite = c.cipherSuite state.CipherSuite = c.cipherSuite
state.PeerCertificates = c.peerCertificates
} }
return state return state
@ -776,15 +777,6 @@ func (c *Conn) OCSPResponse() []byte {
return c.ocspResponse return c.ocspResponse
} }
// PeerCertificates returns the certificate chain that was presented by the
// other side.
func (c *Conn) PeerCertificates() []*x509.Certificate {
c.handshakeMutex.Lock()
defer c.handshakeMutex.Unlock()
return c.peerCertificates
}
// VerifyHostname checks that the peer certificate chain is valid for // VerifyHostname checks that the peer certificate chain is valid for
// connecting to host. If so, it returns nil; if not, it returns an os.Error // connecting to host. If so, it returns nil; if not, it returns an os.Error
// describing the problem. // describing the problem.

View File

@ -25,7 +25,7 @@ func main() {
priv, err := rsa.GenerateKey(rand.Reader, 1024) priv, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil { if err != nil {
log.Exitf("failed to generate private key: %s", err) log.Fatalf("failed to generate private key: %s", err)
return return
} }
@ -46,13 +46,13 @@ func main() {
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil { if err != nil {
log.Exitf("Failed to create certificate: %s", err) log.Fatalf("Failed to create certificate: %s", err)
return return
} }
certOut, err := os.Open("cert.pem", os.O_WRONLY|os.O_CREAT, 0644) certOut, err := os.Open("cert.pem", os.O_WRONLY|os.O_CREAT, 0644)
if err != nil { if err != nil {
log.Exitf("failed to open cert.pem for writing: %s", err) log.Fatalf("failed to open cert.pem for writing: %s", err)
return return
} }
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})

View File

@ -12,6 +12,6 @@ func Attach(pid int) (Process, os.Error) {
return nil, os.NewError("debug/proc not implemented on OS X") return nil, os.NewError("debug/proc not implemented on OS X")
} }
func ForkExec(argv0 string, argv []string, envv []string, dir string, fd []*os.File) (Process, os.Error) { func StartProcess(argv0 string, argv []string, attr *os.ProcAttr) (Process, os.Error) {
return Attach(0) return Attach(0)
} }

View File

@ -12,6 +12,6 @@ func Attach(pid int) (Process, os.Error) {
return nil, os.NewError("debug/proc not implemented on FreeBSD") return nil, os.NewError("debug/proc not implemented on FreeBSD")
} }
func ForkExec(argv0 string, argv []string, envv []string, dir string, fd []*os.File) (Process, os.Error) { func StartProcess(argv0 string, argv []string, attr *os.ProcAttr) (Process, os.Error) {
return Attach(0) return Attach(0)
} }

View File

@ -1279,25 +1279,31 @@ func Attach(pid int) (Process, os.Error) {
return p, nil return p, nil
} }
// ForkExec forks the current process and execs argv0, stopping the // StartProcess forks the current process and execs argv0, stopping the
// new process after the exec syscall. See os.ForkExec for additional // new process after the exec syscall. See os.StartProcess for additional
// details. // details.
func ForkExec(argv0 string, argv []string, envv []string, dir string, fd []*os.File) (Process, os.Error) { func StartProcess(argv0 string, argv []string, attr *os.ProcAttr) (Process, os.Error) {
sysattr := &syscall.ProcAttr{
Dir: attr.Dir,
Env: attr.Env,
Ptrace: true,
}
p := newProcess(-1) p := newProcess(-1)
// Create array of integer (system) fds. // Create array of integer (system) fds.
intfd := make([]int, len(fd)) intfd := make([]int, len(attr.Files))
for i, f := range fd { for i, f := range attr.Files {
if f == nil { if f == nil {
intfd[i] = -1 intfd[i] = -1
} else { } else {
intfd[i] = f.Fd() intfd[i] = f.Fd()
} }
} }
sysattr.Files = intfd
// Fork from the monitor thread so we get the right tracer pid. // Fork from the monitor thread so we get the right tracer pid.
err := p.do(func() os.Error { err := p.do(func() os.Error {
pid, errno := syscall.PtraceForkExec(argv0, argv, envv, dir, intfd) pid, _, errno := syscall.StartProcess(argv0, argv, sysattr)
if errno != 0 { if errno != 0 {
return &os.PathError{"fork/exec", argv0, os.Errno(errno)} return &os.PathError{"fork/exec", argv0, os.Errno(errno)}
} }

View File

@ -12,6 +12,6 @@ func Attach(pid int) (Process, os.Error) {
return nil, os.NewError("debug/proc not implemented on windows") return nil, os.NewError("debug/proc not implemented on windows")
} }
func ForkExec(argv0 string, argv []string, envv []string, dir string, fd []*os.File) (Process, os.Error) { func StartProcess(argv0 string, argv []string, attr *os.ProcAttr) (Process, os.Error) {
return Attach(0) return Attach(0)
} }

View File

@ -75,17 +75,19 @@ func modeToFiles(mode, fd int) (*os.File, *os.File, os.Error) {
// Run starts the named binary running with // Run starts the named binary running with
// arguments argv and environment envv. // arguments argv and environment envv.
// If the dir argument is not empty, the child changes
// into the directory before executing the binary.
// It returns a pointer to a new Cmd representing // It returns a pointer to a new Cmd representing
// the command or an error. // the command or an error.
// //
// The parameters stdin, stdout, and stderr // The arguments stdin, stdout, and stderr
// specify how to handle standard input, output, and error. // specify how to handle standard input, output, and error.
// The choices are DevNull (connect to /dev/null), // The choices are DevNull (connect to /dev/null),
// PassThrough (connect to the current process's standard stream), // PassThrough (connect to the current process's standard stream),
// Pipe (connect to an operating system pipe), and // Pipe (connect to an operating system pipe), and
// MergeWithStdout (only for standard error; use the same // MergeWithStdout (only for standard error; use the same
// file descriptor as was used for standard output). // file descriptor as was used for standard output).
// If a parameter is Pipe, then the corresponding field (Stdin, Stdout, Stderr) // If an argument is Pipe, then the corresponding field (Stdin, Stdout, Stderr)
// of the returned Cmd is the other end of the pipe. // of the returned Cmd is the other end of the pipe.
// Otherwise the field in Cmd is nil. // Otherwise the field in Cmd is nil.
func Run(name string, argv, envv []string, dir string, stdin, stdout, stderr int) (c *Cmd, err os.Error) { func Run(name string, argv, envv []string, dir string, stdin, stdout, stderr int) (c *Cmd, err os.Error) {
@ -105,7 +107,7 @@ func Run(name string, argv, envv []string, dir string, stdin, stdout, stderr int
} }
// Run command. // Run command.
c.Process, err = os.StartProcess(name, argv, envv, dir, fd[0:]) c.Process, err = os.StartProcess(name, argv, &os.ProcAttr{Dir: dir, Files: fd[:], Env: envv})
if err != nil { if err != nil {
goto Error goto Error
} }

View File

@ -118,3 +118,55 @@ func TestAddEnvVar(t *testing.T) {
t.Fatal("close:", err) t.Fatal("close:", err)
} }
} }
var tryargs = []string{
`2`,
`2 `,
"2 \t",
`2" "`,
`2 ab `,
`2 "ab" `,
`2 \ `,
`2 \\ `,
`2 \" `,
`2 \`,
`2\`,
`2"`,
`2\"`,
`2 "`,
`2 \"`,
``,
`2 ^ `,
`2 \^`,
}
func TestArgs(t *testing.T) {
for _, a := range tryargs {
argv := []string{
"awk",
`BEGIN{printf("%s|%s|%s",ARGV[1],ARGV[2],ARGV[3])}`,
"/dev/null",
a,
"EOF",
}
exe, err := LookPath(argv[0])
if err != nil {
t.Fatal("run:", err)
}
cmd, err := Run(exe, argv, nil, "", DevNull, Pipe, DevNull)
if err != nil {
t.Fatal("run:", err)
}
buf, err := ioutil.ReadAll(cmd.Stdout)
if err != nil {
t.Fatal("read:", err)
}
expect := "/dev/null|" + a + "|EOF"
if string(buf) != expect {
t.Errorf("read: got %q expect %q", buf, expect)
}
if err = cmd.Close(); err != nil {
t.Fatal("close:", err)
}
}
}

View File

@ -287,9 +287,6 @@ func (a *stmtCompiler) compile(s ast.Stmt) {
case *ast.SwitchStmt: case *ast.SwitchStmt:
a.compileSwitchStmt(s) a.compileSwitchStmt(s)
case *ast.TypeCaseClause:
notimpl = true
case *ast.TypeSwitchStmt: case *ast.TypeSwitchStmt:
notimpl = true notimpl = true
@ -1012,13 +1009,13 @@ func (a *stmtCompiler) compileSwitchStmt(s *ast.SwitchStmt) {
a.diagAt(clause.Pos(), "switch statement must contain case clauses") a.diagAt(clause.Pos(), "switch statement must contain case clauses")
continue continue
} }
if clause.Values == nil { if clause.List == nil {
if hasDefault { if hasDefault {
a.diagAt(clause.Pos(), "switch statement contains more than one default case") a.diagAt(clause.Pos(), "switch statement contains more than one default case")
} }
hasDefault = true hasDefault = true
} else { } else {
ncases += len(clause.Values) ncases += len(clause.List)
} }
} }
@ -1030,7 +1027,7 @@ func (a *stmtCompiler) compileSwitchStmt(s *ast.SwitchStmt) {
if !ok { if !ok {
continue continue
} }
for _, v := range clause.Values { for _, v := range clause.List {
e := condbc.compileExpr(condbc.block, false, v) e := condbc.compileExpr(condbc.block, false, v)
switch { switch {
case e == nil: case e == nil:
@ -1077,8 +1074,8 @@ func (a *stmtCompiler) compileSwitchStmt(s *ast.SwitchStmt) {
// Save jump PC's // Save jump PC's
pc := a.nextPC() pc := a.nextPC()
if clause.Values != nil { if clause.List != nil {
for _ = range clause.Values { for _ = range clause.List {
casePCs[i] = &pc casePCs[i] = &pc
i++ i++
} }

View File

@ -27,7 +27,7 @@ var stmtTests = []test{
CErr("i, u := 1, 2", atLeastOneDecl), CErr("i, u := 1, 2", atLeastOneDecl),
Val2("i, x := 2, f", "i", 2, "x", 1.0), Val2("i, x := 2, f", "i", 2, "x", 1.0),
// Various errors // Various errors
CErr("1 := 2", "left side of := must be a name"), CErr("1 := 2", "expected identifier"),
CErr("c, a := 1, 1", "cannot assign"), CErr("c, a := 1, 1", "cannot assign"),
// Unpacking // Unpacking
Val2("x, y := oneTwo()", "x", 1, "y", 2), Val2("x, y := oneTwo()", "x", 1, "y", 2),

View File

@ -160,7 +160,7 @@ func cmdLoad(args []byte) os.Error {
} else { } else {
fname = parts[0] fname = parts[0]
} }
tproc, err = proc.ForkExec(fname, parts, os.Environ(), "", []*os.File{os.Stdin, os.Stdout, os.Stderr}) tproc, err = proc.StartProcess(fname, parts, &os.ProcAttr{Files: []*os.File{os.Stdin, os.Stdout, os.Stderr}})
if err != nil { if err != nil {
return err return err
} }

View File

@ -269,7 +269,7 @@ func Iter() <-chan KeyValue {
} }
func expvarHandler(w http.ResponseWriter, r *http.Request) { func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.SetHeader("content-type", "application/json; charset=utf-8") w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n") fmt.Fprintf(w, "{\n")
first := true first := true
for name, value := range vars { for name, value := range vars {

View File

@ -56,7 +56,7 @@
flag.Bool(...) // global options flag.Bool(...) // global options
flag.Parse() // parse leading command flag.Parse() // parse leading command
subcmd := flag.Args(0) subcmd := flag.Arg[0]
switch subcmd { switch subcmd {
// add per-subcommand options // add per-subcommand options
} }
@ -68,6 +68,7 @@ package flag
import ( import (
"fmt" "fmt"
"os" "os"
"sort"
"strconv" "strconv"
) )
@ -205,16 +206,34 @@ type allFlags struct {
var flags *allFlags var flags *allFlags
// VisitAll visits the flags, calling fn for each. It visits all flags, even those not set. // sortFlags returns the flags as a slice in lexicographical sorted order.
func sortFlags(flags map[string]*Flag) []*Flag {
list := make(sort.StringArray, len(flags))
i := 0
for _, f := range flags {
list[i] = f.Name
i++
}
list.Sort()
result := make([]*Flag, len(list))
for i, name := range list {
result[i] = flags[name]
}
return result
}
// VisitAll visits the flags in lexicographical order, calling fn for each.
// It visits all flags, even those not set.
func VisitAll(fn func(*Flag)) { func VisitAll(fn func(*Flag)) {
for _, f := range flags.formal { for _, f := range sortFlags(flags.formal) {
fn(f) fn(f)
} }
} }
// Visit visits the flags, calling fn for each. It visits only those flags that have been set. // Visit visits the flags in lexicographical order, calling fn for each.
// It visits only those flags that have been set.
func Visit(fn func(*Flag)) { func Visit(fn func(*Flag)) {
for _, f := range flags.actual { for _, f := range sortFlags(flags.actual) {
fn(f) fn(f)
} }
} }
@ -260,7 +279,9 @@ var Usage = func() {
var panicOnError = false var panicOnError = false
func fail() { // failf prints to standard error a formatted error and Usage, and then exits the program.
func failf(format string, a ...interface{}) {
fmt.Fprintf(os.Stderr, format, a...)
Usage() Usage()
if panicOnError { if panicOnError {
panic("flag parse error") panic("flag parse error")
@ -268,6 +289,7 @@ func fail() {
os.Exit(2) os.Exit(2)
} }
// NFlag returns the number of flags that have been set.
func NFlag() int { return len(flags.actual) } func NFlag() int { return len(flags.actual) }
// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument // Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
@ -415,8 +437,7 @@ func (f *allFlags) parseOne() (ok bool) {
} }
name := s[num_minuses:] name := s[num_minuses:]
if len(name) == 0 || name[0] == '-' || name[0] == '=' { if len(name) == 0 || name[0] == '-' || name[0] == '=' {
fmt.Fprintln(os.Stderr, "bad flag syntax:", s) failf("bad flag syntax: %s\n", s)
fail()
} }
// it's a flag. does it have an argument? // it's a flag. does it have an argument?
@ -434,14 +455,12 @@ func (f *allFlags) parseOne() (ok bool) {
m := flags.formal m := flags.formal
flag, alreadythere := m[name] // BUG flag, alreadythere := m[name] // BUG
if !alreadythere { if !alreadythere {
fmt.Fprintf(os.Stderr, "flag provided but not defined: -%s\n", name) failf("flag provided but not defined: -%s\n", name)
fail()
} }
if fv, ok := flag.Value.(*boolValue); ok { // special case: doesn't need an arg if fv, ok := flag.Value.(*boolValue); ok { // special case: doesn't need an arg
if has_value { if has_value {
if !fv.Set(value) { if !fv.Set(value) {
fmt.Fprintf(os.Stderr, "invalid boolean value %q for flag: -%s\n", value, name) failf("invalid boolean value %q for flag: -%s\n", value, name)
fail()
} }
} else { } else {
fv.Set("true") fv.Set("true")
@ -454,13 +473,11 @@ func (f *allFlags) parseOne() (ok bool) {
value, f.args = f.args[0], f.args[1:] value, f.args = f.args[0], f.args[1:]
} }
if !has_value { if !has_value {
fmt.Fprintf(os.Stderr, "flag needs an argument: -%s\n", name) failf("flag needs an argument: -%s\n", name)
fail()
} }
ok = flag.Value.Set(value) ok = flag.Value.Set(value)
if !ok { if !ok {
fmt.Fprintf(os.Stderr, "invalid value %q for flag: -%s\n", value, name) failf("invalid value %q for flag: -%s\n", value, name)
fail()
} }
} }
flags.actual[name] = flag flags.actual[name] = flag

View File

@ -8,6 +8,7 @@ import (
. "flag" . "flag"
"fmt" "fmt"
"os" "os"
"sort"
"testing" "testing"
) )
@ -77,6 +78,12 @@ func TestEverything(t *testing.T) {
t.Log(k, *v) t.Log(k, *v)
} }
} }
// Now test they're visited in sort order.
var flagNames []string
Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) })
if !sort.StringsAreSorted(flagNames) {
t.Errorf("flag names not sorted: %v", flagNames)
}
} }
func TestUsage(t *testing.T) { func TestUsage(t *testing.T) {

View File

@ -107,7 +107,7 @@ func (f *fmt) writePadding(n int, padding []byte) {
} }
// Append b to f.buf, padded on left (w > 0) or right (w < 0 or f.minus) // Append b to f.buf, padded on left (w > 0) or right (w < 0 or f.minus)
// clear flags aftewards. // clear flags afterwards.
func (f *fmt) pad(b []byte) { func (f *fmt) pad(b []byte) {
var padding []byte var padding []byte
var left, right int var left, right int
@ -124,7 +124,7 @@ func (f *fmt) pad(b []byte) {
} }
// append s to buf, padded on left (w > 0) or right (w < 0 or f.minus). // append s to buf, padded on left (w > 0) or right (w < 0 or f.minus).
// clear flags aftewards. // clear flags afterwards.
func (f *fmt) padString(s string) { func (f *fmt) padString(s string) {
var padding []byte var padding []byte
var left, right int var left, right int

View File

@ -35,10 +35,15 @@ type ScanState interface {
ReadRune() (rune int, size int, err os.Error) ReadRune() (rune int, size int, err os.Error)
// UnreadRune causes the next call to ReadRune to return the same rune. // UnreadRune causes the next call to ReadRune to return the same rune.
UnreadRune() os.Error UnreadRune() os.Error
// Token returns the next space-delimited token from the input. If // Token skips space in the input if skipSpace is true, then returns the
// a width has been specified, the returned token will be no longer // run of Unicode code points c satisfying f(c). If f is nil,
// than the width. // !unicode.IsSpace(c) is used; that is, the token will hold non-space
Token() (token string, err os.Error) // characters. Newlines are treated as space unless the scan operation
// is Scanln, Fscanln or Sscanln, in which case a newline is treated as
// EOF. The returned slice points to shared data that may be overwritten
// by the next call to Token, a call to a Scan function using the ScanState
// as input, or when the calling Scan method returns.
Token(skipSpace bool, f func(int) bool) (token []byte, err os.Error)
// Width returns the value of the width option and whether it has been set. // Width returns the value of the width option and whether it has been set.
// The unit is Unicode code points. // The unit is Unicode code points.
Width() (wid int, ok bool) Width() (wid int, ok bool)
@ -134,7 +139,7 @@ type scanError struct {
err os.Error err os.Error
} }
const EOF = -1 const eof = -1
// ss is the internal implementation of ScanState. // ss is the internal implementation of ScanState.
type ss struct { type ss struct {
@ -202,7 +207,7 @@ func (s *ss) getRune() (rune int) {
rune, _, err := s.ReadRune() rune, _, err := s.ReadRune()
if err != nil { if err != nil {
if err == os.EOF { if err == os.EOF {
return EOF return eof
} }
s.error(err) s.error(err)
} }
@ -214,7 +219,7 @@ func (s *ss) getRune() (rune int) {
// syntax error. // syntax error.
func (s *ss) mustReadRune() (rune int) { func (s *ss) mustReadRune() (rune int) {
rune = s.getRune() rune = s.getRune()
if rune == EOF { if rune == eof {
s.error(io.ErrUnexpectedEOF) s.error(io.ErrUnexpectedEOF)
} }
return return
@ -238,7 +243,7 @@ func (s *ss) errorString(err string) {
panic(scanError{os.ErrorString(err)}) panic(scanError{os.ErrorString(err)})
} }
func (s *ss) Token() (tok string, err os.Error) { func (s *ss) Token(skipSpace bool, f func(int) bool) (tok []byte, err os.Error) {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
if se, ok := e.(scanError); ok { if se, ok := e.(scanError); ok {
@ -248,10 +253,19 @@ func (s *ss) Token() (tok string, err os.Error) {
} }
} }
}() }()
tok = s.token() if f == nil {
f = notSpace
}
s.buf.Reset()
tok = s.token(skipSpace, f)
return return
} }
// notSpace is the default scanning function used in Token.
func notSpace(r int) bool {
return !unicode.IsSpace(r)
}
// readRune is a structure to enable reading UTF-8 encoded code points // readRune is a structure to enable reading UTF-8 encoded code points
// from an io.Reader. It is used if the Reader given to the scanner does // from an io.Reader. It is used if the Reader given to the scanner does
// not already implement io.RuneReader. // not already implement io.RuneReader.
@ -364,7 +378,7 @@ func (s *ss) free(old ssave) {
func (s *ss) skipSpace(stopAtNewline bool) { func (s *ss) skipSpace(stopAtNewline bool) {
for { for {
rune := s.getRune() rune := s.getRune()
if rune == EOF { if rune == eof {
return return
} }
if rune == '\n' { if rune == '\n' {
@ -384,24 +398,27 @@ func (s *ss) skipSpace(stopAtNewline bool) {
} }
} }
// token returns the next space-delimited string from the input. It // token returns the next space-delimited string from the input. It
// skips white space. For Scanln, it stops at newlines. For Scan, // skips white space. For Scanln, it stops at newlines. For Scan,
// newlines are treated as spaces. // newlines are treated as spaces.
func (s *ss) token() string { func (s *ss) token(skipSpace bool, f func(int) bool) []byte {
s.skipSpace(false) if skipSpace {
s.skipSpace(false)
}
// read until white space or newline // read until white space or newline
for { for {
rune := s.getRune() rune := s.getRune()
if rune == EOF { if rune == eof {
break break
} }
if unicode.IsSpace(rune) { if !f(rune) {
s.UnreadRune() s.UnreadRune()
break break
} }
s.buf.WriteRune(rune) s.buf.WriteRune(rune)
} }
return s.buf.String() return s.buf.Bytes()
} }
// typeError indicates that the type of the operand did not match the format // typeError indicates that the type of the operand did not match the format
@ -416,7 +433,7 @@ var boolError = os.ErrorString("syntax error scanning boolean")
// If accept is true, it puts the character into the input token. // If accept is true, it puts the character into the input token.
func (s *ss) consume(ok string, accept bool) bool { func (s *ss) consume(ok string, accept bool) bool {
rune := s.getRune() rune := s.getRune()
if rune == EOF { if rune == eof {
return false return false
} }
if strings.IndexRune(ok, rune) >= 0 { if strings.IndexRune(ok, rune) >= 0 {
@ -425,7 +442,7 @@ func (s *ss) consume(ok string, accept bool) bool {
} }
return true return true
} }
if rune != EOF && accept { if rune != eof && accept {
s.UnreadRune() s.UnreadRune()
} }
return false return false
@ -434,7 +451,7 @@ func (s *ss) consume(ok string, accept bool) bool {
// peek reports whether the next character is in the ok string, without consuming it. // peek reports whether the next character is in the ok string, without consuming it.
func (s *ss) peek(ok string) bool { func (s *ss) peek(ok string) bool {
rune := s.getRune() rune := s.getRune()
if rune != EOF { if rune != eof {
s.UnreadRune() s.UnreadRune()
} }
return strings.IndexRune(ok, rune) >= 0 return strings.IndexRune(ok, rune) >= 0
@ -729,7 +746,7 @@ func (s *ss) convertString(verb int) (str string) {
case 'x': case 'x':
str = s.hexString() str = s.hexString()
default: default:
str = s.token() // %s and %v just return the next word str = string(s.token(true, notSpace)) // %s and %v just return the next word
} }
// Empty strings other than with %q are not OK. // Empty strings other than with %q are not OK.
if len(str) == 0 && verb != 'q' && s.maxWid > 0 { if len(str) == 0 && verb != 'q' && s.maxWid > 0 {
@ -797,7 +814,7 @@ func (s *ss) hexDigit(digit int) int {
// There must be either two hexadecimal digits or a space character in the input. // There must be either two hexadecimal digits or a space character in the input.
func (s *ss) hexByte() (b byte, ok bool) { func (s *ss) hexByte() (b byte, ok bool) {
rune1 := s.getRune() rune1 := s.getRune()
if rune1 == EOF { if rune1 == eof {
return return
} }
if unicode.IsSpace(rune1) { if unicode.IsSpace(rune1) {
@ -953,7 +970,7 @@ func (s *ss) doScan(a []interface{}) (numProcessed int, err os.Error) {
if !s.nlIsSpace { if !s.nlIsSpace {
for { for {
rune := s.getRune() rune := s.getRune()
if rune == '\n' || rune == EOF { if rune == '\n' || rune == eof {
break break
} }
if !unicode.IsSpace(rune) { if !unicode.IsSpace(rune) {
@ -993,7 +1010,7 @@ func (s *ss) advance(format string) (i int) {
// There was space in the format, so there should be space (EOF) // There was space in the format, so there should be space (EOF)
// in the input. // in the input.
inputc := s.getRune() inputc := s.getRune()
if inputc == EOF { if inputc == eof {
return return
} }
if !unicode.IsSpace(inputc) { if !unicode.IsSpace(inputc) {

View File

@ -88,14 +88,15 @@ type FloatTest struct {
type Xs string type Xs string
func (x *Xs) Scan(state ScanState, verb int) os.Error { func (x *Xs) Scan(state ScanState, verb int) os.Error {
tok, err := state.Token() tok, err := state.Token(true, func(r int) bool { return r == verb })
if err != nil { if err != nil {
return err return err
} }
if !regexp.MustCompile("^" + string(verb) + "+$").MatchString(tok) { s := string(tok)
if !regexp.MustCompile("^" + string(verb) + "+$").MatchString(s) {
return os.ErrorString("syntax error for xs") return os.ErrorString("syntax error for xs")
} }
*x = Xs(tok) *x = Xs(s)
return nil return nil
} }
@ -113,9 +114,11 @@ func (s *IntString) Scan(state ScanState, verb int) os.Error {
return err return err
} }
if _, err := Fscan(state, &s.s); err != nil { tok, err := state.Token(true, nil)
if err != nil {
return err return err
} }
s.s = string(tok)
return nil return nil
} }
@ -331,7 +334,7 @@ var multiTests = []ScanfMultiTest{
{"%c%c%c", "2\u50c2X", args(&i, &j, &k), args('2', '\u50c2', 'X'), ""}, {"%c%c%c", "2\u50c2X", args(&i, &j, &k), args('2', '\u50c2', 'X'), ""},
// Custom scanners. // Custom scanners.
{"%2e%f", "eefffff", args(&x, &y), args(Xs("ee"), Xs("fffff")), ""}, {"%e%f", "eefffff", args(&x, &y), args(Xs("ee"), Xs("fffff")), ""},
{"%4v%s", "12abcd", args(&z, &s), args(IntString{12, "ab"}, "cd"), ""}, {"%4v%s", "12abcd", args(&z, &s), args(IntString{12, "ab"}, "cd"), ""},
// Errors // Errors
@ -476,22 +479,12 @@ func verifyInf(str string, t *testing.T) {
} }
} }
func TestInf(t *testing.T) { func TestInf(t *testing.T) {
for _, s := range []string{"inf", "+inf", "-inf", "INF", "-INF", "+INF", "Inf", "-Inf", "+Inf"} { for _, s := range []string{"inf", "+inf", "-inf", "INF", "-INF", "+INF", "Inf", "-Inf", "+Inf"} {
verifyInf(s, t) verifyInf(s, t)
} }
} }
// TODO: there's no conversion from []T to ...T, but we can fake it. These
// functions do the faking. We index the table by the length of the param list.
var fscanf = []func(io.Reader, string, []interface{}) (int, os.Error){
0: func(r io.Reader, f string, i []interface{}) (int, os.Error) { return Fscanf(r, f) },
1: func(r io.Reader, f string, i []interface{}) (int, os.Error) { return Fscanf(r, f, i[0]) },
2: func(r io.Reader, f string, i []interface{}) (int, os.Error) { return Fscanf(r, f, i[0], i[1]) },
3: func(r io.Reader, f string, i []interface{}) (int, os.Error) { return Fscanf(r, f, i[0], i[1], i[2]) },
}
func testScanfMulti(name string, t *testing.T) { func testScanfMulti(name string, t *testing.T) {
sliceType := reflect.Typeof(make([]interface{}, 1)).(*reflect.SliceType) sliceType := reflect.Typeof(make([]interface{}, 1)).(*reflect.SliceType)
for _, test := range multiTests { for _, test := range multiTests {
@ -501,7 +494,7 @@ func testScanfMulti(name string, t *testing.T) {
} else { } else {
r = newReader(test.text) r = newReader(test.text)
} }
n, err := fscanf[len(test.in)](r, test.format, test.in) n, err := Fscanf(r, test.format, test.in...)
if err != nil { if err != nil {
if test.err == "" { if test.err == "" {
t.Errorf("got error scanning (%q, %q): %q", test.format, test.text, err) t.Errorf("got error scanning (%q, %q): %q", test.format, test.text, err)
@ -830,12 +823,12 @@ func testScanInts(t *testing.T, scan func(*RecursiveInt, *bytes.Buffer) os.Error
i := 1 i := 1
for ; r != nil; r = r.next { for ; r != nil; r = r.next {
if r.i != i { if r.i != i {
t.Fatal("bad scan: expected %d got %d", i, r.i) t.Fatalf("bad scan: expected %d got %d", i, r.i)
} }
i++ i++
} }
if i-1 != intCount { if i-1 != intCount {
t.Fatal("bad scan count: expected %d got %d", intCount, i-1) t.Fatalf("bad scan count: expected %d got %d", intCount, i-1)
} }
} }

View File

@ -602,12 +602,12 @@ type (
Else Stmt // else branch; or nil Else Stmt // else branch; or nil
} }
// A CaseClause represents a case of an expression switch statement. // A CaseClause represents a case of an expression or type switch statement.
CaseClause struct { CaseClause struct {
Case token.Pos // position of "case" or "default" keyword Case token.Pos // position of "case" or "default" keyword
Values []Expr // nil means default case List []Expr // list of expressions or types; nil means default case
Colon token.Pos // position of ":" Colon token.Pos // position of ":"
Body []Stmt // statement list; or nil Body []Stmt // statement list; or nil
} }
// A SwitchStmt node represents an expression switch statement. // A SwitchStmt node represents an expression switch statement.
@ -618,20 +618,12 @@ type (
Body *BlockStmt // CaseClauses only Body *BlockStmt // CaseClauses only
} }
// A TypeCaseClause represents a case of a type switch statement.
TypeCaseClause struct {
Case token.Pos // position of "case" or "default" keyword
Types []Expr // nil means default case
Colon token.Pos // position of ":"
Body []Stmt // statement list; or nil
}
// An TypeSwitchStmt node represents a type switch statement. // An TypeSwitchStmt node represents a type switch statement.
TypeSwitchStmt struct { TypeSwitchStmt struct {
Switch token.Pos // position of "switch" keyword Switch token.Pos // position of "switch" keyword
Init Stmt // initalization statement; or nil Init Stmt // initalization statement; or nil
Assign Stmt // x := y.(type) Assign Stmt // x := y.(type) or y.(type)
Body *BlockStmt // TypeCaseClauses only Body *BlockStmt // CaseClauses only
} }
// A CommClause node represents a case of a select statement. // A CommClause node represents a case of a select statement.
@ -687,7 +679,6 @@ func (s *BlockStmt) Pos() token.Pos { return s.Lbrace }
func (s *IfStmt) Pos() token.Pos { return s.If } func (s *IfStmt) Pos() token.Pos { return s.If }
func (s *CaseClause) Pos() token.Pos { return s.Case } func (s *CaseClause) Pos() token.Pos { return s.Case }
func (s *SwitchStmt) Pos() token.Pos { return s.Switch } func (s *SwitchStmt) Pos() token.Pos { return s.Switch }
func (s *TypeCaseClause) Pos() token.Pos { return s.Case }
func (s *TypeSwitchStmt) Pos() token.Pos { return s.Switch } func (s *TypeSwitchStmt) Pos() token.Pos { return s.Switch }
func (s *CommClause) Pos() token.Pos { return s.Case } func (s *CommClause) Pos() token.Pos { return s.Case }
func (s *SelectStmt) Pos() token.Pos { return s.Select } func (s *SelectStmt) Pos() token.Pos { return s.Select }
@ -734,13 +725,7 @@ func (s *CaseClause) End() token.Pos {
} }
return s.Colon + 1 return s.Colon + 1
} }
func (s *SwitchStmt) End() token.Pos { return s.Body.End() } func (s *SwitchStmt) End() token.Pos { return s.Body.End() }
func (s *TypeCaseClause) End() token.Pos {
if n := len(s.Body); n > 0 {
return s.Body[n-1].End()
}
return s.Colon + 1
}
func (s *TypeSwitchStmt) End() token.Pos { return s.Body.End() } func (s *TypeSwitchStmt) End() token.Pos { return s.Body.End() }
func (s *CommClause) End() token.Pos { func (s *CommClause) End() token.Pos {
if n := len(s.Body); n > 0 { if n := len(s.Body); n > 0 {
@ -772,7 +757,6 @@ func (s *BlockStmt) stmtNode() {}
func (s *IfStmt) stmtNode() {} func (s *IfStmt) stmtNode() {}
func (s *CaseClause) stmtNode() {} func (s *CaseClause) stmtNode() {}
func (s *SwitchStmt) stmtNode() {} func (s *SwitchStmt) stmtNode() {}
func (s *TypeCaseClause) stmtNode() {}
func (s *TypeSwitchStmt) stmtNode() {} func (s *TypeSwitchStmt) stmtNode() {}
func (s *CommClause) stmtNode() {} func (s *CommClause) stmtNode() {}
func (s *SelectStmt) stmtNode() {} func (s *SelectStmt) stmtNode() {}
@ -937,11 +921,13 @@ func (d *FuncDecl) declNode() {}
// via Doc and Comment fields. // via Doc and Comment fields.
// //
type File struct { type File struct {
Doc *CommentGroup // associated documentation; or nil Doc *CommentGroup // associated documentation; or nil
Package token.Pos // position of "package" keyword Package token.Pos // position of "package" keyword
Name *Ident // package name Name *Ident // package name
Decls []Decl // top-level declarations; or nil Decls []Decl // top-level declarations; or nil
Comments []*CommentGroup // list of all comments in the source file Scope *Scope // package scope
Unresolved []*Ident // unresolved global identifiers
Comments []*CommentGroup // list of all comments in the source file
} }
@ -959,7 +945,7 @@ func (f *File) End() token.Pos {
// //
type Package struct { type Package struct {
Name string // package name Name string // package name
Scope *Scope // package scope; or nil Scope *Scope // package scope
Files map[string]*File // Go source files by filename Files map[string]*File // Go source files by filename
} }

View File

@ -425,5 +425,6 @@ func MergePackageFiles(pkg *Package, mode MergeMode) *File {
} }
} }
return &File{doc, pos, NewIdent(pkg.Name), decls, comments} // TODO(gri) need to compute pkgScope and unresolved identifiers!
return &File{doc, pos, NewIdent(pkg.Name), decls, nil, nil, comments}
} }

View File

@ -30,15 +30,19 @@ func NotNilFilter(_ string, value reflect.Value) bool {
// Fprint prints the (sub-)tree starting at AST node x to w. // Fprint prints the (sub-)tree starting at AST node x to w.
// If fset != nil, position information is interpreted relative
// to that file set. Otherwise positions are printed as integer
// values (file set specific offsets).
// //
// A non-nil FieldFilter f may be provided to control the output: // A non-nil FieldFilter f may be provided to control the output:
// struct fields for which f(fieldname, fieldvalue) is true are // struct fields for which f(fieldname, fieldvalue) is true are
// are printed; all others are filtered from the output. // are printed; all others are filtered from the output.
// //
func Fprint(w io.Writer, x interface{}, f FieldFilter) (n int, err os.Error) { func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (n int, err os.Error) {
// setup printer // setup printer
p := printer{ p := printer{
output: w, output: w,
fset: fset,
filter: f, filter: f,
ptrmap: make(map[interface{}]int), ptrmap: make(map[interface{}]int),
last: '\n', // force printing of line number on first line last: '\n', // force printing of line number on first line
@ -65,14 +69,15 @@ func Fprint(w io.Writer, x interface{}, f FieldFilter) (n int, err os.Error) {
// Print prints x to standard output, skipping nil fields. // Print prints x to standard output, skipping nil fields.
// Print(x) is the same as Fprint(os.Stdout, x, NotNilFilter). // Print(fset, x) is the same as Fprint(os.Stdout, fset, x, NotNilFilter).
func Print(x interface{}) (int, os.Error) { func Print(fset *token.FileSet, x interface{}) (int, os.Error) {
return Fprint(os.Stdout, x, NotNilFilter) return Fprint(os.Stdout, fset, x, NotNilFilter)
} }
type printer struct { type printer struct {
output io.Writer output io.Writer
fset *token.FileSet
filter FieldFilter filter FieldFilter
ptrmap map[interface{}]int // *reflect.PtrValue -> line number ptrmap map[interface{}]int // *reflect.PtrValue -> line number
written int // number of bytes written to output written int // number of bytes written to output
@ -137,16 +142,6 @@ func (p *printer) printf(format string, args ...interface{}) {
// probably be in a different package. // probably be in a different package.
func (p *printer) print(x reflect.Value) { func (p *printer) print(x reflect.Value) {
// Note: This test is only needed because AST nodes
// embed a token.Position, and thus all of them
// understand the String() method (but it only
// applies to the Position field).
// TODO: Should reconsider this AST design decision.
if pos, ok := x.Interface().(token.Position); ok {
p.printf("%s", pos)
return
}
if !NotNilFilter("", x) { if !NotNilFilter("", x) {
p.printf("nil") p.printf("nil")
return return
@ -163,6 +158,7 @@ func (p *printer) print(x reflect.Value) {
p.print(key) p.print(key)
p.printf(": ") p.printf(": ")
p.print(v.Elem(key)) p.print(v.Elem(key))
p.printf("\n")
} }
p.indent-- p.indent--
p.printf("}") p.printf("}")
@ -212,6 +208,11 @@ func (p *printer) print(x reflect.Value) {
p.printf("}") p.printf("}")
default: default:
p.printf("%v", x.Interface()) value := x.Interface()
// position values can be printed nicely if we have a file set
if pos, ok := value.(token.Pos); ok && p.fset != nil {
value = p.fset.Position(pos)
}
p.printf("%v", value)
} }
} }

View File

@ -2,31 +2,31 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// This file implements scopes, the objects they contain, // This file implements scopes and the objects they contain.
// and object types.
package ast package ast
import (
"bytes"
"fmt"
"go/token"
)
// A Scope maintains the set of named language entities declared // A Scope maintains the set of named language entities declared
// in the scope and a link to the immediately surrounding (outer) // in the scope and a link to the immediately surrounding (outer)
// scope. // scope.
// //
type Scope struct { type Scope struct {
Outer *Scope Outer *Scope
Objects []*Object // in declaration order Objects map[string]*Object
// Implementation note: In some cases (struct fields,
// function parameters) we need the source order of
// variables. Thus for now, we store scope entries
// in a linear list. If scopes become very large
// (say, for packages), we may need to change this
// to avoid slow lookups.
} }
// NewScope creates a new scope nested in the outer scope. // NewScope creates a new scope nested in the outer scope.
func NewScope(outer *Scope) *Scope { func NewScope(outer *Scope) *Scope {
const n = 4 // initial scope capacity, must be > 0 const n = 4 // initial scope capacity
return &Scope{outer, make([]*Object, 0, n)} return &Scope{outer, make(map[string]*Object, n)}
} }
@ -34,73 +34,108 @@ func NewScope(outer *Scope) *Scope {
// found in scope s, otherwise it returns nil. Outer scopes // found in scope s, otherwise it returns nil. Outer scopes
// are ignored. // are ignored.
// //
// Lookup always returns nil if name is "_", even if the scope
// contains objects with that name.
//
func (s *Scope) Lookup(name string) *Object { func (s *Scope) Lookup(name string) *Object {
if name != "_" { return s.Objects[name]
for _, obj := range s.Objects {
if obj.Name == name {
return obj
}
}
}
return nil
} }
// Insert attempts to insert a named object into the scope s. // Insert attempts to insert a named object into the scope s.
// If the scope does not contain an object with that name yet // If the scope does not contain an object with that name yet,
// or if the object is named "_", Insert inserts the object // Insert inserts the object and returns it. Otherwise, Insert
// and returns it. Otherwise, Insert leaves the scope unchanged // leaves the scope unchanged and returns the object found in
// and returns the object found in the scope instead. // the scope instead.
// //
func (s *Scope) Insert(obj *Object) *Object { func (s *Scope) Insert(obj *Object) (alt *Object) {
alt := s.Lookup(obj.Name) if alt = s.Objects[obj.Name]; alt == nil {
if alt == nil { s.Objects[obj.Name] = obj
s.append(obj)
alt = obj alt = obj
} }
return alt return
} }
func (s *Scope) append(obj *Object) { // Debugging support
s.Objects = append(s.Objects, obj) func (s *Scope) String() string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "scope %p {", s)
if s != nil && len(s.Objects) > 0 {
fmt.Fprintln(&buf)
for _, obj := range s.Objects {
fmt.Fprintf(&buf, "\t%s %s\n", obj.Kind, obj.Name)
}
}
fmt.Fprintf(&buf, "}\n")
return buf.String()
} }
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Objects // Objects
// An Object describes a language entity such as a package, // An Object describes a named language entity such as a package,
// constant, type, variable, or function (incl. methods). // constant, type, variable, function (incl. methods), or label.
// //
type Object struct { type Object struct {
Kind Kind Kind ObjKind
Name string // declared name Name string // declared name
Type *Type Decl interface{} // corresponding Field, XxxSpec, FuncDecl, or LabeledStmt; or nil
Decl interface{} // corresponding Field, XxxSpec or FuncDecl Type interface{} // place holder for type information; may be nil
N int // value of iota for this declaration
} }
// NewObj creates a new object of a given kind and name. // NewObj creates a new object of a given kind and name.
func NewObj(kind Kind, name string) *Object { func NewObj(kind ObjKind, name string) *Object {
return &Object{Kind: kind, Name: name} return &Object{Kind: kind, Name: name}
} }
// Kind describes what an object represents. // Pos computes the source position of the declaration of an object name.
type Kind int // The result may be an invalid position if it cannot be computed
// (obj.Decl may be nil or not correct).
func (obj *Object) Pos() token.Pos {
name := obj.Name
switch d := obj.Decl.(type) {
case *Field:
for _, n := range d.Names {
if n.Name == name {
return n.Pos()
}
}
case *ValueSpec:
for _, n := range d.Names {
if n.Name == name {
return n.Pos()
}
}
case *TypeSpec:
if d.Name.Name == name {
return d.Name.Pos()
}
case *FuncDecl:
if d.Name.Name == name {
return d.Name.Pos()
}
case *LabeledStmt:
if d.Label.Name == name {
return d.Label.Pos()
}
}
return token.NoPos
}
// ObKind describes what an object represents.
type ObjKind int
// The list of possible Object kinds. // The list of possible Object kinds.
const ( const (
Bad Kind = iota // for error handling Bad ObjKind = iota // for error handling
Pkg // package Pkg // package
Con // constant Con // constant
Typ // type Typ // type
Var // variable Var // variable
Fun // function or method Fun // function or method
Lbl // label
) )
@ -111,132 +146,8 @@ var objKindStrings = [...]string{
Typ: "type", Typ: "type",
Var: "var", Var: "var",
Fun: "func", Fun: "func",
Lbl: "label",
} }
func (kind Kind) String() string { return objKindStrings[kind] } func (kind ObjKind) String() string { return objKindStrings[kind] }
// IsExported returns whether obj is exported.
func (obj *Object) IsExported() bool { return IsExported(obj.Name) }
// ----------------------------------------------------------------------------
// Types
// A Type represents a Go type.
type Type struct {
Form Form
Obj *Object // corresponding type name, or nil
Scope *Scope // fields and methods, always present
N uint // basic type id, array length, number of function results, or channel direction
Key, Elt *Type // map key and array, pointer, slice, map or channel element
Params *Scope // function (receiver, input and result) parameters, tuple expressions (results of function calls), or nil
Expr Expr // corresponding AST expression
}
// NewType creates a new type of a given form.
func NewType(form Form) *Type {
return &Type{Form: form, Scope: NewScope(nil)}
}
// Form describes the form of a type.
type Form int
// The list of possible type forms.
const (
BadType Form = iota // for error handling
Unresolved // type not fully setup
Basic
Array
Struct
Pointer
Function
Method
Interface
Slice
Map
Channel
Tuple
)
var formStrings = [...]string{
BadType: "badType",
Unresolved: "unresolved",
Basic: "basic",
Array: "array",
Struct: "struct",
Pointer: "pointer",
Function: "function",
Method: "method",
Interface: "interface",
Slice: "slice",
Map: "map",
Channel: "channel",
Tuple: "tuple",
}
func (form Form) String() string { return formStrings[form] }
// The list of basic type id's.
const (
Bool = iota
Byte
Uint
Int
Float
Complex
Uintptr
String
Uint8
Uint16
Uint32
Uint64
Int8
Int16
Int32
Int64
Float32
Float64
Complex64
Complex128
// TODO(gri) ideal types are missing
)
var BasicTypes = map[uint]string{
Bool: "bool",
Byte: "byte",
Uint: "uint",
Int: "int",
Float: "float",
Complex: "complex",
Uintptr: "uintptr",
String: "string",
Uint8: "uint8",
Uint16: "uint16",
Uint32: "uint32",
Uint64: "uint64",
Int8: "int8",
Int16: "int16",
Int32: "int32",
Int64: "int64",
Float32: "float32",
Float64: "float64",
Complex64: "complex64",
Complex128: "complex128",
}

View File

@ -234,7 +234,7 @@ func Walk(v Visitor, node Node) {
} }
case *CaseClause: case *CaseClause:
walkExprList(v, n.Values) walkExprList(v, n.List)
walkStmtList(v, n.Body) walkStmtList(v, n.Body)
case *SwitchStmt: case *SwitchStmt:
@ -246,12 +246,6 @@ func Walk(v Visitor, node Node) {
} }
Walk(v, n.Body) Walk(v, n.Body)
case *TypeCaseClause:
for _, x := range n.Types {
Walk(v, x)
}
walkStmtList(v, n.Body)
case *TypeSwitchStmt: case *TypeSwitchStmt:
if n.Init != nil { if n.Init != nil {
Walk(v, n.Init) Walk(v, n.Init)

View File

@ -14,7 +14,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
pathutil "path" "path/filepath"
) )
@ -198,7 +198,7 @@ func ParseDir(fset *token.FileSet, path string, filter func(*os.FileInfo) bool,
for i := 0; i < len(list); i++ { for i := 0; i < len(list); i++ {
d := &list[i] d := &list[i]
if filter == nil || filter(d) { if filter == nil || filter(d) {
filenames[n] = pathutil.Join(path, d.Name) filenames[n] = filepath.Join(path, d.Name)
n++ n++
} }
} }

View File

@ -17,10 +17,6 @@ import (
) )
// noPos is used when there is no corresponding source position for a token.
var noPos token.Position
// The mode parameter to the Parse* functions is a set of flags (or 0). // The mode parameter to the Parse* functions is a set of flags (or 0).
// They control the amount of source code parsed and other optional // They control the amount of source code parsed and other optional
// parser functionality. // parser functionality.
@ -30,6 +26,7 @@ const (
ImportsOnly // parsing stops after import declarations ImportsOnly // parsing stops after import declarations
ParseComments // parse comments and add them to AST ParseComments // parse comments and add them to AST
Trace // print a trace of parsed productions Trace // print a trace of parsed productions
DeclarationErrors // report declaration errors
) )
@ -46,16 +43,26 @@ type parser struct {
// Comments // Comments
comments []*ast.CommentGroup comments []*ast.CommentGroup
leadComment *ast.CommentGroup // the last lead comment leadComment *ast.CommentGroup // last lead comment
lineComment *ast.CommentGroup // the last line comment lineComment *ast.CommentGroup // last line comment
// Next token // Next token
pos token.Pos // token position pos token.Pos // token position
tok token.Token // one token look-ahead tok token.Token // one token look-ahead
lit []byte // token literal lit_ []byte // token literal (slice into original source, don't hold on to it)
// Non-syntactic parser control // Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression exprLev int // < 0: in control clause, >= 0: in expression
// Ordinary identifer scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved global identifiers
// Label scope
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
} }
@ -72,9 +79,126 @@ func scannerMode(mode uint) uint {
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) { func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) {
p.file = fset.AddFile(filename, fset.Base(), len(src)) p.file = fset.AddFile(filename, fset.Base(), len(src))
p.scanner.Init(p.file, src, p, scannerMode(mode)) p.scanner.Init(p.file, src, p, scannerMode(mode))
p.mode = mode p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently) p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.next() p.next()
// set up the pkgScope here (as opposed to in parseFile) because
// there are other parser entry points (ParseExpr, etc.)
p.openScope()
p.pkgScope = p.topScope
// for the same reason, set up a label scope
p.openLabelScope()
}
func (p *parser) lit() []byte {
// make a copy of p.lit_ so that we don't hold on to
// a copy of the entire source indirectly in the AST
t := make([]byte, len(p.lit_))
copy(t, p.lit_)
return t
}
// ----------------------------------------------------------------------------
// Scoping support
func (p *parser) openScope() {
p.topScope = ast.NewScope(p.topScope)
}
func (p *parser) closeScope() {
p.topScope = p.topScope.Outer
}
func (p *parser) openLabelScope() {
p.labelScope = ast.NewScope(p.labelScope)
p.targetStack = append(p.targetStack, nil)
}
func (p *parser) closeLabelScope() {
// resolve labels
n := len(p.targetStack) - 1
scope := p.labelScope
for _, ident := range p.targetStack[n] {
ident.Obj = scope.Lookup(ident.Name)
if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
}
}
// pop label scope
p.targetStack = p.targetStack[0:n]
p.labelScope = p.labelScope.Outer
}
func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
if ident.Name != "_" {
obj := ast.NewObj(kind, ident.Name)
// remember the corresponding declaration for redeclaration
// errors and global variable resolution/typechecking phase
obj.Decl = decl
alt := scope.Insert(obj)
if alt != obj && p.mode&DeclarationErrors != 0 {
prevDecl := ""
if pos := alt.Pos(); pos.IsValid() {
prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
}
p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
}
ident.Obj = obj
}
}
}
func (p *parser) shortVarDecl(idents []*ast.Ident) {
// Go spec: A short variable declaration may redeclare variables
// provided they were originally declared in the same block with
// the same type, and at least one of the non-blank variables is new.
n := 0 // number of new variables
for _, ident := range idents {
if ident.Name != "_" {
obj := ast.NewObj(ast.Var, ident.Name)
// short var declarations cannot have redeclaration errors
// and are not global => no need to remember the respective
// declaration
alt := p.topScope.Insert(obj)
if alt == obj {
n++ // new declaration
}
ident.Obj = alt
}
}
if n == 0 && p.mode&DeclarationErrors != 0 {
p.error(idents[0].Pos(), "no new variables on left side of :=")
}
}
func (p *parser) resolve(ident *ast.Ident) {
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
ident.Obj = obj
return
}
}
// collect unresolved global identifiers; ignore the others
if p.topScope == p.pkgScope {
p.unresolved = append(p.unresolved, ident)
}
} }
@ -120,7 +244,7 @@ func (p *parser) next0() {
s := p.tok.String() s := p.tok.String()
switch { switch {
case p.tok.IsLiteral(): case p.tok.IsLiteral():
p.printTrace(s, string(p.lit)) p.printTrace(s, string(p.lit_))
case p.tok.IsOperator(), p.tok.IsKeyword(): case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"") p.printTrace("\"" + s + "\"")
default: default:
@ -128,7 +252,7 @@ func (p *parser) next0() {
} }
} }
p.pos, p.tok, p.lit = p.scanner.Scan() p.pos, p.tok, p.lit_ = p.scanner.Scan()
} }
// Consume a comment and return it and the line on which it ends. // Consume a comment and return it and the line on which it ends.
@ -136,15 +260,15 @@ func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start. // /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly. // Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.file.Line(p.pos) endline = p.file.Line(p.pos)
if p.lit[1] == '*' { if p.lit_[1] == '*' {
for _, b := range p.lit { for _, b := range p.lit_ {
if b == '\n' { if b == '\n' {
endline++ endline++
} }
} }
} }
comment = &ast.Comment{p.pos, p.lit} comment = &ast.Comment{p.pos, p.lit()}
p.next0() p.next0()
return return
@ -234,12 +358,12 @@ func (p *parser) errorExpected(pos token.Pos, msg string) {
if pos == p.pos { if pos == p.pos {
// the error happened at the current position; // the error happened at the current position;
// make the error message more specific // make the error message more specific
if p.tok == token.SEMICOLON && p.lit[0] == '\n' { if p.tok == token.SEMICOLON && p.lit_[0] == '\n' {
msg += ", found newline" msg += ", found newline"
} else { } else {
msg += ", found '" + p.tok.String() + "'" msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() { if p.tok.IsLiteral() {
msg += " " + string(p.lit) msg += " " + string(p.lit_)
} }
} }
} }
@ -271,7 +395,7 @@ func (p *parser) parseIdent() *ast.Ident {
pos := p.pos pos := p.pos
name := "_" name := "_"
if p.tok == token.IDENT { if p.tok == token.IDENT {
name = string(p.lit) name = string(p.lit_)
p.next() p.next()
} else { } else {
p.expect(token.IDENT) // use expect() error handling p.expect(token.IDENT) // use expect() error handling
@ -339,13 +463,16 @@ func (p *parser) parseQualifiedIdent() ast.Expr {
defer un(trace(p, "QualifiedIdent")) defer un(trace(p, "QualifiedIdent"))
} }
var x ast.Expr = p.parseIdent() ident := p.parseIdent()
p.resolve(ident)
var x ast.Expr = ident
if p.tok == token.PERIOD { if p.tok == token.PERIOD {
// first identifier is a package identifier // first identifier is a package identifier
p.next() p.next()
sel := p.parseIdent() sel := p.parseIdent()
x = &ast.SelectorExpr{x, sel} x = &ast.SelectorExpr{x, sel}
} }
return x return x
} }
@ -407,7 +534,7 @@ func (p *parser) parseFieldDecl() *ast.Field {
// optional tag // optional tag
var tag *ast.BasicLit var tag *ast.BasicLit
if p.tok == token.STRING { if p.tok == token.STRING {
tag = &ast.BasicLit{p.pos, p.tok, p.lit} tag = &ast.BasicLit{p.pos, p.tok, p.lit()}
p.next() p.next()
} }
@ -426,7 +553,7 @@ func (p *parser) parseFieldDecl() *ast.Field {
} }
} }
p.expectSemi() p.expectSemi() // call before accessing p.linecomment
return &ast.Field{doc, idents, typ, tag, p.lineComment} return &ast.Field{doc, idents, typ, tag, p.lineComment}
} }
@ -519,7 +646,7 @@ func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
} }
func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) { func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
if p.trace { if p.trace {
defer un(trace(p, "ParameterList")) defer un(trace(p, "ParameterList"))
} }
@ -528,7 +655,11 @@ func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) {
if typ != nil { if typ != nil {
// IdentifierList Type // IdentifierList Type
idents := p.makeIdentList(list) idents := p.makeIdentList(list)
params = append(params, &ast.Field{nil, idents, typ, nil, nil}) field := &ast.Field{nil, idents, typ, nil, nil}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, scope, ast.Var, idents...)
if p.tok == token.COMMA { if p.tok == token.COMMA {
p.next() p.next()
} }
@ -536,7 +667,11 @@ func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) {
for p.tok != token.RPAREN && p.tok != token.EOF { for p.tok != token.RPAREN && p.tok != token.EOF {
idents := p.parseIdentList() idents := p.parseIdentList()
typ := p.parseVarType(ellipsisOk) typ := p.parseVarType(ellipsisOk)
params = append(params, &ast.Field{nil, idents, typ, nil, nil}) field := &ast.Field{nil, idents, typ, nil, nil}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, scope, ast.Var, idents...)
if p.tok != token.COMMA { if p.tok != token.COMMA {
break break
} }
@ -555,7 +690,7 @@ func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) {
} }
func (p *parser) parseParameters(ellipsisOk bool) *ast.FieldList { func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
if p.trace { if p.trace {
defer un(trace(p, "Parameters")) defer un(trace(p, "Parameters"))
} }
@ -563,7 +698,7 @@ func (p *parser) parseParameters(ellipsisOk bool) *ast.FieldList {
var params []*ast.Field var params []*ast.Field
lparen := p.expect(token.LPAREN) lparen := p.expect(token.LPAREN)
if p.tok != token.RPAREN { if p.tok != token.RPAREN {
params = p.parseParameterList(ellipsisOk) params = p.parseParameterList(scope, ellipsisOk)
} }
rparen := p.expect(token.RPAREN) rparen := p.expect(token.RPAREN)
@ -571,13 +706,13 @@ func (p *parser) parseParameters(ellipsisOk bool) *ast.FieldList {
} }
func (p *parser) parseResult() *ast.FieldList { func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
if p.trace { if p.trace {
defer un(trace(p, "Result")) defer un(trace(p, "Result"))
} }
if p.tok == token.LPAREN { if p.tok == token.LPAREN {
return p.parseParameters(false) return p.parseParameters(scope, false)
} }
typ := p.tryType() typ := p.tryType()
@ -591,27 +726,28 @@ func (p *parser) parseResult() *ast.FieldList {
} }
func (p *parser) parseSignature() (params, results *ast.FieldList) { func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
if p.trace { if p.trace {
defer un(trace(p, "Signature")) defer un(trace(p, "Signature"))
} }
params = p.parseParameters(true) params = p.parseParameters(scope, true)
results = p.parseResult() results = p.parseResult(scope)
return return
} }
func (p *parser) parseFuncType() *ast.FuncType { func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
if p.trace { if p.trace {
defer un(trace(p, "FuncType")) defer un(trace(p, "FuncType"))
} }
pos := p.expect(token.FUNC) pos := p.expect(token.FUNC)
params, results := p.parseSignature() scope := ast.NewScope(p.topScope) // function scope
params, results := p.parseSignature(scope)
return &ast.FuncType{pos, params, results} return &ast.FuncType{pos, params, results}, scope
} }
@ -627,13 +763,14 @@ func (p *parser) parseMethodSpec() *ast.Field {
if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN { if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
// method // method
idents = []*ast.Ident{ident} idents = []*ast.Ident{ident}
params, results := p.parseSignature() scope := ast.NewScope(nil) // method scope
params, results := p.parseSignature(scope)
typ = &ast.FuncType{token.NoPos, params, results} typ = &ast.FuncType{token.NoPos, params, results}
} else { } else {
// embedded interface // embedded interface
typ = x typ = x
} }
p.expectSemi() p.expectSemi() // call before accessing p.linecomment
return &ast.Field{doc, idents, typ, nil, p.lineComment} return &ast.Field{doc, idents, typ, nil, p.lineComment}
} }
@ -706,7 +843,8 @@ func (p *parser) tryRawType(ellipsisOk bool) ast.Expr {
case token.MUL: case token.MUL:
return p.parsePointerType() return p.parsePointerType()
case token.FUNC: case token.FUNC:
return p.parseFuncType() typ, _ := p.parseFuncType()
return typ
case token.INTERFACE: case token.INTERFACE:
return p.parseInterfaceType() return p.parseInterfaceType()
case token.MAP: case token.MAP:
@ -745,13 +883,17 @@ func (p *parser) parseStmtList() (list []ast.Stmt) {
} }
func (p *parser) parseBody() *ast.BlockStmt { func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
if p.trace { if p.trace {
defer un(trace(p, "Body")) defer un(trace(p, "Body"))
} }
lbrace := p.expect(token.LBRACE) lbrace := p.expect(token.LBRACE)
p.topScope = scope // open function scope
p.openLabelScope()
list := p.parseStmtList() list := p.parseStmtList()
p.closeLabelScope()
p.closeScope()
rbrace := p.expect(token.RBRACE) rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{lbrace, list, rbrace} return &ast.BlockStmt{lbrace, list, rbrace}
@ -764,7 +906,9 @@ func (p *parser) parseBlockStmt() *ast.BlockStmt {
} }
lbrace := p.expect(token.LBRACE) lbrace := p.expect(token.LBRACE)
p.openScope()
list := p.parseStmtList() list := p.parseStmtList()
p.closeScope()
rbrace := p.expect(token.RBRACE) rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{lbrace, list, rbrace} return &ast.BlockStmt{lbrace, list, rbrace}
@ -779,14 +923,14 @@ func (p *parser) parseFuncTypeOrLit() ast.Expr {
defer un(trace(p, "FuncTypeOrLit")) defer un(trace(p, "FuncTypeOrLit"))
} }
typ := p.parseFuncType() typ, scope := p.parseFuncType()
if p.tok != token.LBRACE { if p.tok != token.LBRACE {
// function type only // function type only
return typ return typ
} }
p.exprLev++ p.exprLev++
body := p.parseBody() body := p.parseBody(scope)
p.exprLev-- p.exprLev--
return &ast.FuncLit{typ, body} return &ast.FuncLit{typ, body}
@ -803,10 +947,12 @@ func (p *parser) parseOperand() ast.Expr {
switch p.tok { switch p.tok {
case token.IDENT: case token.IDENT:
return p.parseIdent() ident := p.parseIdent()
p.resolve(ident)
return ident
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING: case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{p.pos, p.tok, p.lit} x := &ast.BasicLit{p.pos, p.tok, p.lit()}
p.next() p.next()
return x return x
@ -1202,6 +1348,9 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
pos, tok := p.pos, p.tok pos, tok := p.pos, p.tok
p.next() p.next()
y := p.parseExprList() y := p.parseExprList()
if tok == token.DEFINE {
p.shortVarDecl(p.makeIdentList(x))
}
return &ast.AssignStmt{x, pos, tok, y} return &ast.AssignStmt{x, pos, tok, y}
} }
@ -1216,7 +1365,12 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
colon := p.pos colon := p.pos
p.next() p.next()
if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent { if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent {
return &ast.LabeledStmt{label, colon, p.parseStmt()} // Go spec: The scope of a label is the body of the function
// in which it is declared and excludes the body of any nested
// function.
stmt := &ast.LabeledStmt{label, colon, p.parseStmt()}
p.declare(stmt, p.labelScope, ast.Lbl, label)
return stmt
} }
p.error(x[0].Pos(), "illegal label declaration") p.error(x[0].Pos(), "illegal label declaration")
return &ast.BadStmt{x[0].Pos(), colon + 1} return &ast.BadStmt{x[0].Pos(), colon + 1}
@ -1304,14 +1458,17 @@ func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
defer un(trace(p, "BranchStmt")) defer un(trace(p, "BranchStmt"))
} }
s := &ast.BranchStmt{p.pos, tok, nil} pos := p.expect(tok)
p.expect(tok) var label *ast.Ident
if tok != token.FALLTHROUGH && p.tok == token.IDENT { if tok != token.FALLTHROUGH && p.tok == token.IDENT {
s.Label = p.parseIdent() label = p.parseIdent()
// add to list of unresolved targets
n := len(p.targetStack) - 1
p.targetStack[n] = append(p.targetStack[n], label)
} }
p.expectSemi() p.expectSemi()
return s return &ast.BranchStmt{pos, tok, label}
} }
@ -1333,6 +1490,8 @@ func (p *parser) parseIfStmt() *ast.IfStmt {
} }
pos := p.expect(token.IF) pos := p.expect(token.IF)
p.openScope()
defer p.closeScope()
var s ast.Stmt var s ast.Stmt
var x ast.Expr var x ast.Expr
@ -1368,28 +1527,6 @@ func (p *parser) parseIfStmt() *ast.IfStmt {
} }
func (p *parser) parseCaseClause() *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
}
// SwitchCase
pos := p.pos
var x []ast.Expr
if p.tok == token.CASE {
p.next()
x = p.parseExprList()
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
return &ast.CaseClause{pos, x, colon, body}
}
func (p *parser) parseTypeList() (list []ast.Expr) { func (p *parser) parseTypeList() (list []ast.Expr) {
if p.trace { if p.trace {
defer un(trace(p, "TypeList")) defer un(trace(p, "TypeList"))
@ -1405,25 +1542,30 @@ func (p *parser) parseTypeList() (list []ast.Expr) {
} }
func (p *parser) parseTypeCaseClause() *ast.TypeCaseClause { func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause {
if p.trace { if p.trace {
defer un(trace(p, "TypeCaseClause")) defer un(trace(p, "CaseClause"))
} }
// TypeSwitchCase
pos := p.pos pos := p.pos
var types []ast.Expr var list []ast.Expr
if p.tok == token.CASE { if p.tok == token.CASE {
p.next() p.next()
types = p.parseTypeList() if exprSwitch {
list = p.parseExprList()
} else {
list = p.parseTypeList()
}
} else { } else {
p.expect(token.DEFAULT) p.expect(token.DEFAULT)
} }
colon := p.expect(token.COLON) colon := p.expect(token.COLON)
p.openScope()
body := p.parseStmtList() body := p.parseStmtList()
p.closeScope()
return &ast.TypeCaseClause{pos, types, colon, body} return &ast.CaseClause{pos, list, colon, body}
} }
@ -1447,6 +1589,8 @@ func (p *parser) parseSwitchStmt() ast.Stmt {
} }
pos := p.expect(token.SWITCH) pos := p.expect(token.SWITCH)
p.openScope()
defer p.closeScope()
var s1, s2 ast.Stmt var s1, s2 ast.Stmt
if p.tok != token.LBRACE { if p.tok != token.LBRACE {
@ -1466,28 +1610,21 @@ func (p *parser) parseSwitchStmt() ast.Stmt {
p.exprLev = prevLev p.exprLev = prevLev
} }
if isExprSwitch(s2) { exprSwitch := isExprSwitch(s2)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCaseClause())
}
rbrace := p.expect(token.RBRACE)
body := &ast.BlockStmt{lbrace, list, rbrace}
p.expectSemi()
return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body}
}
// type switch
// TODO(gri): do all the checks!
lbrace := p.expect(token.LBRACE) lbrace := p.expect(token.LBRACE)
var list []ast.Stmt var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT { for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseTypeCaseClause()) list = append(list, p.parseCaseClause(exprSwitch))
} }
rbrace := p.expect(token.RBRACE) rbrace := p.expect(token.RBRACE)
p.expectSemi() p.expectSemi()
body := &ast.BlockStmt{lbrace, list, rbrace} body := &ast.BlockStmt{lbrace, list, rbrace}
if exprSwitch {
return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body}
}
// type switch
// TODO(gri): do all the checks!
return &ast.TypeSwitchStmt{pos, s1, s2, body} return &ast.TypeSwitchStmt{pos, s1, s2, body}
} }
@ -1497,7 +1634,7 @@ func (p *parser) parseCommClause() *ast.CommClause {
defer un(trace(p, "CommClause")) defer un(trace(p, "CommClause"))
} }
// CommCase p.openScope()
pos := p.pos pos := p.pos
var comm ast.Stmt var comm ast.Stmt
if p.tok == token.CASE { if p.tok == token.CASE {
@ -1518,7 +1655,7 @@ func (p *parser) parseCommClause() *ast.CommClause {
pos := p.pos pos := p.pos
tok := p.tok tok := p.tok
var rhs ast.Expr var rhs ast.Expr
if p.tok == token.ASSIGN || p.tok == token.DEFINE { if tok == token.ASSIGN || tok == token.DEFINE {
// RecvStmt with assignment // RecvStmt with assignment
if len(lhs) > 2 { if len(lhs) > 2 {
p.errorExpected(lhs[0].Pos(), "1 or 2 expressions") p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
@ -1527,6 +1664,9 @@ func (p *parser) parseCommClause() *ast.CommClause {
} }
p.next() p.next()
rhs = p.parseExpr() rhs = p.parseExpr()
if tok == token.DEFINE {
p.shortVarDecl(p.makeIdentList(lhs))
}
} else { } else {
// rhs must be single receive operation // rhs must be single receive operation
if len(lhs) > 1 { if len(lhs) > 1 {
@ -1552,6 +1692,7 @@ func (p *parser) parseCommClause() *ast.CommClause {
colon := p.expect(token.COLON) colon := p.expect(token.COLON)
body := p.parseStmtList() body := p.parseStmtList()
p.closeScope()
return &ast.CommClause{pos, comm, colon, body} return &ast.CommClause{pos, comm, colon, body}
} }
@ -1582,6 +1723,8 @@ func (p *parser) parseForStmt() ast.Stmt {
} }
pos := p.expect(token.FOR) pos := p.expect(token.FOR)
p.openScope()
defer p.closeScope()
var s1, s2, s3 ast.Stmt var s1, s2, s3 ast.Stmt
if p.tok != token.LBRACE { if p.tok != token.LBRACE {
@ -1631,18 +1774,16 @@ func (p *parser) parseForStmt() ast.Stmt {
return &ast.BadStmt{pos, body.End()} return &ast.BadStmt{pos, body.End()}
} }
if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE { if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE {
// rhs is range expression; check lhs // rhs is range expression
// (any short variable declaration was handled by parseSimpleStat above)
return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body} return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body}
} else {
p.errorExpected(s2.Pos(), "range clause")
return &ast.BadStmt{pos, body.End()}
} }
} else { p.errorExpected(s2.Pos(), "range clause")
// regular for statement return &ast.BadStmt{pos, body.End()}
return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
} }
panic("unreachable") // regular for statement
return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
} }
@ -1706,36 +1847,37 @@ func (p *parser) parseStmt() (s ast.Stmt) {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Declarations // Declarations
type parseSpecFunction func(p *parser, doc *ast.CommentGroup) ast.Spec type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
func parseImportSpec(p *parser, doc *ast.CommentGroup) ast.Spec { func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace { if p.trace {
defer un(trace(p, "ImportSpec")) defer un(trace(p, "ImportSpec"))
} }
var ident *ast.Ident var ident *ast.Ident
if p.tok == token.PERIOD { switch p.tok {
case token.PERIOD:
ident = &ast.Ident{p.pos, ".", nil} ident = &ast.Ident{p.pos, ".", nil}
p.next() p.next()
} else if p.tok == token.IDENT { case token.IDENT:
ident = p.parseIdent() ident = p.parseIdent()
} }
var path *ast.BasicLit var path *ast.BasicLit
if p.tok == token.STRING { if p.tok == token.STRING {
path = &ast.BasicLit{p.pos, p.tok, p.lit} path = &ast.BasicLit{p.pos, p.tok, p.lit()}
p.next() p.next()
} else { } else {
p.expect(token.STRING) // use expect() error handling p.expect(token.STRING) // use expect() error handling
} }
p.expectSemi() p.expectSemi() // call before accessing p.linecomment
return &ast.ImportSpec{doc, ident, path, p.lineComment} return &ast.ImportSpec{doc, ident, path, p.lineComment}
} }
func parseConstSpec(p *parser, doc *ast.CommentGroup) ast.Spec { func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
if p.trace { if p.trace {
defer un(trace(p, "ConstSpec")) defer un(trace(p, "ConstSpec"))
} }
@ -1743,30 +1885,44 @@ func parseConstSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
idents := p.parseIdentList() idents := p.parseIdentList()
typ := p.tryType() typ := p.tryType()
var values []ast.Expr var values []ast.Expr
if typ != nil || p.tok == token.ASSIGN { if typ != nil || p.tok == token.ASSIGN || iota == 0 {
p.expect(token.ASSIGN) p.expect(token.ASSIGN)
values = p.parseExprList() values = p.parseExprList()
} }
p.expectSemi() p.expectSemi() // call before accessing p.linecomment
return &ast.ValueSpec{doc, idents, typ, values, p.lineComment} // Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
p.declare(spec, p.topScope, ast.Con, idents...)
return spec
} }
func parseTypeSpec(p *parser, doc *ast.CommentGroup) ast.Spec { func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace { if p.trace {
defer un(trace(p, "TypeSpec")) defer un(trace(p, "TypeSpec"))
} }
ident := p.parseIdent() ident := p.parseIdent()
typ := p.parseType() typ := p.parseType()
p.expectSemi() p.expectSemi() // call before accessing p.linecomment
return &ast.TypeSpec{doc, ident, typ, p.lineComment} // Go spec: The scope of a type identifier declared inside a function begins
// at the identifier in the TypeSpec and ends at the end of the innermost
// containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.TypeSpec{doc, ident, typ, p.lineComment}
p.declare(spec, p.topScope, ast.Typ, ident)
return spec
} }
func parseVarSpec(p *parser, doc *ast.CommentGroup) ast.Spec { func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace { if p.trace {
defer un(trace(p, "VarSpec")) defer un(trace(p, "VarSpec"))
} }
@ -1778,9 +1934,16 @@ func parseVarSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
p.expect(token.ASSIGN) p.expect(token.ASSIGN)
values = p.parseExprList() values = p.parseExprList()
} }
p.expectSemi() p.expectSemi() // call before accessing p.linecomment
return &ast.ValueSpec{doc, idents, typ, values, p.lineComment} // Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
p.declare(spec, p.topScope, ast.Var, idents...)
return spec
} }
@ -1796,26 +1959,26 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.Gen
if p.tok == token.LPAREN { if p.tok == token.LPAREN {
lparen = p.pos lparen = p.pos
p.next() p.next()
for p.tok != token.RPAREN && p.tok != token.EOF { for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
list = append(list, f(p, p.leadComment)) list = append(list, f(p, p.leadComment, iota))
} }
rparen = p.expect(token.RPAREN) rparen = p.expect(token.RPAREN)
p.expectSemi() p.expectSemi()
} else { } else {
list = append(list, f(p, nil)) list = append(list, f(p, nil, 0))
} }
return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen} return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen}
} }
func (p *parser) parseReceiver() *ast.FieldList { func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
if p.trace { if p.trace {
defer un(trace(p, "Receiver")) defer un(trace(p, "Receiver"))
} }
pos := p.pos pos := p.pos
par := p.parseParameters(false) par := p.parseParameters(scope, false)
// must have exactly one receiver // must have exactly one receiver
if par.NumFields() != 1 { if par.NumFields() != 1 {
@ -1844,22 +2007,37 @@ func (p *parser) parseFuncDecl() *ast.FuncDecl {
doc := p.leadComment doc := p.leadComment
pos := p.expect(token.FUNC) pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
var recv *ast.FieldList var recv *ast.FieldList
if p.tok == token.LPAREN { if p.tok == token.LPAREN {
recv = p.parseReceiver() recv = p.parseReceiver(scope)
} }
ident := p.parseIdent() ident := p.parseIdent()
params, results := p.parseSignature()
params, results := p.parseSignature(scope)
var body *ast.BlockStmt var body *ast.BlockStmt
if p.tok == token.LBRACE { if p.tok == token.LBRACE {
body = p.parseBody() body = p.parseBody(scope)
} }
p.expectSemi() p.expectSemi()
return &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body} decl := &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body}
if recv == nil {
// Go spec: The scope of an identifier denoting a constant, type,
// variable, or function (but not method) declared at top level
// (outside any function) is the package block.
//
// init() functions cannot be referred to and there may
// be more than one - don't put them in the pkgScope
if ident.Name != "init" {
p.declare(decl, p.pkgScope, ast.Fun, ident)
}
}
return decl
} }
@ -1918,6 +2096,8 @@ func (p *parser) parseFile() *ast.File {
// package clause // package clause
doc := p.leadComment doc := p.leadComment
pos := p.expect(token.PACKAGE) pos := p.expect(token.PACKAGE)
// Go spec: The package clause is not a declaration;
// the package name does not appear in any scope.
ident := p.parseIdent() ident := p.parseIdent()
p.expectSemi() p.expectSemi()
@ -1940,5 +2120,20 @@ func (p *parser) parseFile() *ast.File {
} }
} }
return &ast.File{doc, pos, ident, decls, p.comments} if p.topScope != p.pkgScope {
panic("internal error: imbalanced scopes")
}
// resolve global identifiers within the same file
i := 0
for _, ident := range p.unresolved {
// i <= index for current ident
ident.Obj = p.pkgScope.Lookup(ident.Name)
if ident.Obj == nil {
p.unresolved[i] = ident
i++
}
}
return &ast.File{doc, pos, ident, decls, p.pkgScope, p.unresolved[0:i], p.comments}
} }

View File

@ -21,6 +21,7 @@ var illegalInputs = []interface{}{
`package p; func f() { if /* should have condition */ {} };`, `package p; func f() { if /* should have condition */ {} };`,
`package p; func f() { if ; /* should have condition */ {} };`, `package p; func f() { if ; /* should have condition */ {} };`,
`package p; func f() { if f(); /* should have condition */ {} };`, `package p; func f() { if f(); /* should have condition */ {} };`,
`package p; const c; /* should have constant value */`,
} }
@ -73,7 +74,7 @@ var validFiles = []string{
func TestParse3(t *testing.T) { func TestParse3(t *testing.T) {
for _, filename := range validFiles { for _, filename := range validFiles {
_, err := ParseFile(fset, filename, nil, 0) _, err := ParseFile(fset, filename, nil, DeclarationErrors)
if err != nil { if err != nil {
t.Errorf("ParseFile(%s): %v", filename, err) t.Errorf("ParseFile(%s): %v", filename, err)
} }

View File

@ -108,17 +108,6 @@ func (p *printer) identList(list []*ast.Ident, indent bool, multiLine *bool) {
} }
// Compute the key size of a key:value expression.
// Returns 0 if the expression doesn't fit onto a single line.
func (p *printer) keySize(pair *ast.KeyValueExpr) int {
if p.nodeSize(pair, infinity) <= infinity {
// entire expression fits on one line - return key size
return p.nodeSize(pair.Key, infinity)
}
return 0
}
// Print a list of expressions. If the list spans multiple // Print a list of expressions. If the list spans multiple
// source lines, the original line breaks are respected between // source lines, the original line breaks are respected between
// expressions. Sets multiLine to true if the list spans multiple // expressions. Sets multiLine to true if the list spans multiple
@ -204,17 +193,21 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
// the key and the node size into the decision process // the key and the node size into the decision process
useFF := true useFF := true
// determine size // determine element size: all bets are off if we don't have
// position information for the previous and next token (likely
// generated code - simply ignore the size in this case by setting
// it to 0)
prevSize := size prevSize := size
const infinity = 1e6 // larger than any source line const infinity = 1e6 // larger than any source line
size = p.nodeSize(x, infinity) size = p.nodeSize(x, infinity)
pair, isPair := x.(*ast.KeyValueExpr) pair, isPair := x.(*ast.KeyValueExpr)
if size <= infinity { if size <= infinity && prev.IsValid() && next.IsValid() {
// x fits on a single line // x fits on a single line
if isPair { if isPair {
size = p.nodeSize(pair.Key, infinity) // size <= infinity size = p.nodeSize(pair.Key, infinity) // size <= infinity
} }
} else { } else {
// size too large or we don't have good layout information
size = 0 size = 0
} }
@ -244,7 +237,6 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
// lines are broken using newlines so comments remain aligned // lines are broken using newlines so comments remain aligned
// unless forceFF is set or there are multiple expressions on // unless forceFF is set or there are multiple expressions on
// the same line in which case formfeed is used // the same line in which case formfeed is used
// broken with a formfeed
if p.linebreak(line, linebreakMin, ws, useFF || prevBreak+1 < i) { if p.linebreak(line, linebreakMin, ws, useFF || prevBreak+1 < i) {
ws = ignore ws = ignore
*multiLine = true *multiLine = true
@ -375,7 +367,7 @@ func (p *printer) setLineComment(text string) {
} }
func (p *printer) fieldList(fields *ast.FieldList, isIncomplete bool, ctxt exprContext) { func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) {
p.nesting++ p.nesting++
defer func() { defer func() {
p.nesting-- p.nesting--
@ -384,15 +376,15 @@ func (p *printer) fieldList(fields *ast.FieldList, isIncomplete bool, ctxt exprC
lbrace := fields.Opening lbrace := fields.Opening
list := fields.List list := fields.List
rbrace := fields.Closing rbrace := fields.Closing
srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.fset.Position(lbrace).Line == p.fset.Position(rbrace).Line
if !isIncomplete && !p.commentBefore(p.fset.Position(rbrace)) { if !isIncomplete && !p.commentBefore(p.fset.Position(rbrace)) && srcIsOneLine {
// possibly a one-line struct/interface // possibly a one-line struct/interface
if len(list) == 0 { if len(list) == 0 {
// no blank between keyword and {} in this case // no blank between keyword and {} in this case
p.print(lbrace, token.LBRACE, rbrace, token.RBRACE) p.print(lbrace, token.LBRACE, rbrace, token.RBRACE)
return return
} else if ctxt&(compositeLit|structType) == compositeLit|structType && } else if isStruct && p.isOneLineFieldList(list) { // for now ignore interfaces
p.isOneLineFieldList(list) { // for now ignore interfaces
// small enough - print on one line // small enough - print on one line
// (don't use identList and ignore source line breaks) // (don't use identList and ignore source line breaks)
p.print(lbrace, token.LBRACE, blank) p.print(lbrace, token.LBRACE, blank)
@ -414,7 +406,7 @@ func (p *printer) fieldList(fields *ast.FieldList, isIncomplete bool, ctxt exprC
// at least one entry or incomplete // at least one entry or incomplete
p.print(blank, lbrace, token.LBRACE, indent, formfeed) p.print(blank, lbrace, token.LBRACE, indent, formfeed)
if ctxt&structType != 0 { if isStruct {
sep := vtab sep := vtab
if len(list) == 1 { if len(list) == 1 {
@ -497,15 +489,6 @@ func (p *printer) fieldList(fields *ast.FieldList, isIncomplete bool, ctxt exprC
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Expressions // Expressions
// exprContext describes the syntactic environment in which an expression node is printed.
type exprContext uint
const (
compositeLit exprContext = 1 << iota
structType
)
func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) { func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) {
switch e.Op.Precedence() { switch e.Op.Precedence() {
case 4: case 4:
@ -650,7 +633,7 @@ func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiL
printBlank := prec < cutoff printBlank := prec < cutoff
ws := indent ws := indent
p.expr1(x.X, prec, depth+diffPrec(x.X, prec), 0, multiLine) p.expr1(x.X, prec, depth+diffPrec(x.X, prec), multiLine)
if printBlank { if printBlank {
p.print(blank) p.print(blank)
} }
@ -669,7 +652,7 @@ func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiL
if printBlank { if printBlank {
p.print(blank) p.print(blank)
} }
p.expr1(x.Y, prec+1, depth+1, 0, multiLine) p.expr1(x.Y, prec+1, depth+1, multiLine)
if ws == ignore { if ws == ignore {
p.print(unindent) p.print(unindent)
} }
@ -742,7 +725,7 @@ func selectorExprList(expr ast.Expr) (list []ast.Expr) {
// Sets multiLine to true if the expression spans multiple lines. // Sets multiLine to true if the expression spans multiple lines.
func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multiLine *bool) { func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
p.print(expr.Pos()) p.print(expr.Pos())
switch x := expr.(type) { switch x := expr.(type) {
@ -792,7 +775,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
// TODO(gri) Remove this code if it cannot be reached. // TODO(gri) Remove this code if it cannot be reached.
p.print(blank) p.print(blank)
} }
p.expr1(x.X, prec, depth, 0, multiLine) p.expr1(x.X, prec, depth, multiLine)
} }
case *ast.BasicLit: case *ast.BasicLit:
@ -818,7 +801,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
p.exprList(token.NoPos, parts, depth, periodSep, multiLine, token.NoPos) p.exprList(token.NoPos, parts, depth, periodSep, multiLine, token.NoPos)
case *ast.TypeAssertExpr: case *ast.TypeAssertExpr:
p.expr1(x.X, token.HighestPrec, depth, 0, multiLine) p.expr1(x.X, token.HighestPrec, depth, multiLine)
p.print(token.PERIOD, token.LPAREN) p.print(token.PERIOD, token.LPAREN)
if x.Type != nil { if x.Type != nil {
p.expr(x.Type, multiLine) p.expr(x.Type, multiLine)
@ -829,14 +812,14 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
case *ast.IndexExpr: case *ast.IndexExpr:
// TODO(gri): should treat[] like parentheses and undo one level of depth // TODO(gri): should treat[] like parentheses and undo one level of depth
p.expr1(x.X, token.HighestPrec, 1, 0, multiLine) p.expr1(x.X, token.HighestPrec, 1, multiLine)
p.print(x.Lbrack, token.LBRACK) p.print(x.Lbrack, token.LBRACK)
p.expr0(x.Index, depth+1, multiLine) p.expr0(x.Index, depth+1, multiLine)
p.print(x.Rbrack, token.RBRACK) p.print(x.Rbrack, token.RBRACK)
case *ast.SliceExpr: case *ast.SliceExpr:
// TODO(gri): should treat[] like parentheses and undo one level of depth // TODO(gri): should treat[] like parentheses and undo one level of depth
p.expr1(x.X, token.HighestPrec, 1, 0, multiLine) p.expr1(x.X, token.HighestPrec, 1, multiLine)
p.print(x.Lbrack, token.LBRACK) p.print(x.Lbrack, token.LBRACK)
if x.Low != nil { if x.Low != nil {
p.expr0(x.Low, depth+1, multiLine) p.expr0(x.Low, depth+1, multiLine)
@ -856,7 +839,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
if len(x.Args) > 1 { if len(x.Args) > 1 {
depth++ depth++
} }
p.expr1(x.Fun, token.HighestPrec, depth, 0, multiLine) p.expr1(x.Fun, token.HighestPrec, depth, multiLine)
p.print(x.Lparen, token.LPAREN) p.print(x.Lparen, token.LPAREN)
p.exprList(x.Lparen, x.Args, depth, commaSep|commaTerm, multiLine, x.Rparen) p.exprList(x.Lparen, x.Args, depth, commaSep|commaTerm, multiLine, x.Rparen)
if x.Ellipsis.IsValid() { if x.Ellipsis.IsValid() {
@ -867,7 +850,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
case *ast.CompositeLit: case *ast.CompositeLit:
// composite literal elements that are composite literals themselves may have the type omitted // composite literal elements that are composite literals themselves may have the type omitted
if x.Type != nil { if x.Type != nil {
p.expr1(x.Type, token.HighestPrec, depth, compositeLit, multiLine) p.expr1(x.Type, token.HighestPrec, depth, multiLine)
} }
p.print(x.Lbrace, token.LBRACE) p.print(x.Lbrace, token.LBRACE)
p.exprList(x.Lbrace, x.Elts, 1, commaSep|commaTerm, multiLine, x.Rbrace) p.exprList(x.Lbrace, x.Elts, 1, commaSep|commaTerm, multiLine, x.Rbrace)
@ -892,7 +875,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
case *ast.StructType: case *ast.StructType:
p.print(token.STRUCT) p.print(token.STRUCT)
p.fieldList(x.Fields, x.Incomplete, ctxt|structType) p.fieldList(x.Fields, true, x.Incomplete)
case *ast.FuncType: case *ast.FuncType:
p.print(token.FUNC) p.print(token.FUNC)
@ -900,7 +883,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
case *ast.InterfaceType: case *ast.InterfaceType:
p.print(token.INTERFACE) p.print(token.INTERFACE)
p.fieldList(x.Methods, x.Incomplete, ctxt) p.fieldList(x.Methods, false, x.Incomplete)
case *ast.MapType: case *ast.MapType:
p.print(token.MAP, token.LBRACK) p.print(token.MAP, token.LBRACK)
@ -929,14 +912,14 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
func (p *printer) expr0(x ast.Expr, depth int, multiLine *bool) { func (p *printer) expr0(x ast.Expr, depth int, multiLine *bool) {
p.expr1(x, token.LowestPrec, depth, 0, multiLine) p.expr1(x, token.LowestPrec, depth, multiLine)
} }
// Sets multiLine to true if the expression spans multiple lines. // Sets multiLine to true if the expression spans multiple lines.
func (p *printer) expr(x ast.Expr, multiLine *bool) { func (p *printer) expr(x ast.Expr, multiLine *bool) {
const depth = 1 const depth = 1
p.expr1(x, token.LowestPrec, depth, 0, multiLine) p.expr1(x, token.LowestPrec, depth, multiLine)
} }
@ -1145,9 +1128,9 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
} }
case *ast.CaseClause: case *ast.CaseClause:
if s.Values != nil { if s.List != nil {
p.print(token.CASE) p.print(token.CASE)
p.exprList(s.Pos(), s.Values, 1, blankStart|commaSep, multiLine, s.Colon) p.exprList(s.Pos(), s.List, 1, blankStart|commaSep, multiLine, s.Colon)
} else { } else {
p.print(token.DEFAULT) p.print(token.DEFAULT)
} }
@ -1160,16 +1143,6 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
p.block(s.Body, 0) p.block(s.Body, 0)
*multiLine = true *multiLine = true
case *ast.TypeCaseClause:
if s.Types != nil {
p.print(token.CASE)
p.exprList(s.Pos(), s.Types, 1, blankStart|commaSep, multiLine, s.Colon)
} else {
p.print(token.DEFAULT)
}
p.print(s.Colon, token.COLON)
p.stmtList(s.Body, 1, nextIsRBrace)
case *ast.TypeSwitchStmt: case *ast.TypeSwitchStmt:
p.print(token.SWITCH) p.print(token.SWITCH)
if s.Init != nil { if s.Init != nil {
@ -1331,13 +1304,23 @@ func (p *printer) genDecl(d *ast.GenDecl, multiLine *bool) {
// any control chars. Otherwise, the result is > maxSize. // any control chars. Otherwise, the result is > maxSize.
// //
func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) { func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) {
// nodeSize invokes the printer, which may invoke nodeSize
// recursively. For deep composite literal nests, this can
// lead to an exponential algorithm. Remember previous
// results to prune the recursion (was issue 1628).
if size, found := p.nodeSizes[n]; found {
return size
}
size = maxSize + 1 // assume n doesn't fit size = maxSize + 1 // assume n doesn't fit
p.nodeSizes[n] = size
// nodeSize computation must be indendent of particular // nodeSize computation must be indendent of particular
// style so that we always get the same decision; print // style so that we always get the same decision; print
// in RawFormat // in RawFormat
cfg := Config{Mode: RawFormat} cfg := Config{Mode: RawFormat}
var buf bytes.Buffer var buf bytes.Buffer
if _, err := cfg.Fprint(&buf, p.fset, n); err != nil { if _, err := cfg.fprint(&buf, p.fset, n, p.nodeSizes); err != nil {
return return
} }
if buf.Len() <= maxSize { if buf.Len() <= maxSize {
@ -1347,6 +1330,7 @@ func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) {
} }
} }
size = buf.Len() // n fits size = buf.Len() // n fits
p.nodeSizes[n] = size
} }
return return
} }

View File

@ -12,7 +12,7 @@ import (
"go/token" "go/token"
"io" "io"
"os" "os"
"path" "path/filepath"
"runtime" "runtime"
"tabwriter" "tabwriter"
) )
@ -94,22 +94,23 @@ type printer struct {
// written using writeItem. // written using writeItem.
last token.Position last token.Position
// HTML support
lastTaggedLine int // last line for which a line tag was written
// The list of all source comments, in order of appearance. // The list of all source comments, in order of appearance.
comments []*ast.CommentGroup // may be nil comments []*ast.CommentGroup // may be nil
cindex int // current comment index cindex int // current comment index
useNodeComments bool // if not set, ignore lead and line comments of nodes useNodeComments bool // if not set, ignore lead and line comments of nodes
// Cache of already computed node sizes.
nodeSizes map[ast.Node]int
} }
func (p *printer) init(output io.Writer, cfg *Config, fset *token.FileSet) { func (p *printer) init(output io.Writer, cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) {
p.output = output p.output = output
p.Config = *cfg p.Config = *cfg
p.fset = fset p.fset = fset
p.errors = make(chan os.Error) p.errors = make(chan os.Error)
p.buffer = make([]whiteSpace, 0, 16) // whitespace sequences are short p.buffer = make([]whiteSpace, 0, 16) // whitespace sequences are short
p.nodeSizes = nodeSizes
} }
@ -244,7 +245,7 @@ func (p *printer) writeItem(pos token.Position, data []byte) {
} }
if debug { if debug {
// do not update p.pos - use write0 // do not update p.pos - use write0
_, filename := path.Split(pos.Filename) _, filename := filepath.Split(pos.Filename)
p.write0([]byte(fmt.Sprintf("[%s:%d:%d]", filename, pos.Line, pos.Column))) p.write0([]byte(fmt.Sprintf("[%s:%d:%d]", filename, pos.Line, pos.Column)))
} }
p.write(data) p.write(data)
@ -994,13 +995,8 @@ type Config struct {
} }
// Fprint "pretty-prints" an AST node to output and returns the number // fprint implements Fprint and takes a nodesSizes map for setting up the printer state.
// of bytes written and an error (if any) for a given configuration cfg. func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (int, os.Error) {
// Position information is interpreted relative to the file set fset.
// The node type must be *ast.File, or assignment-compatible to ast.Expr,
// ast.Decl, ast.Spec, or ast.Stmt.
//
func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) (int, os.Error) {
// redirect output through a trimmer to eliminate trailing whitespace // redirect output through a trimmer to eliminate trailing whitespace
// (Input to a tabwriter must be untrimmed since trailing tabs provide // (Input to a tabwriter must be untrimmed since trailing tabs provide
// formatting information. The tabwriter could provide trimming // formatting information. The tabwriter could provide trimming
@ -1029,7 +1025,7 @@ func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{
// setup printer and print node // setup printer and print node
var p printer var p printer
p.init(output, cfg, fset) p.init(output, cfg, fset, nodeSizes)
go func() { go func() {
switch n := node.(type) { switch n := node.(type) {
case ast.Expr: case ast.Expr:
@ -1076,6 +1072,17 @@ func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{
} }
// Fprint "pretty-prints" an AST node to output and returns the number
// of bytes written and an error (if any) for a given configuration cfg.
// Position information is interpreted relative to the file set fset.
// The node type must be *ast.File, or assignment-compatible to ast.Expr,
// ast.Decl, ast.Spec, or ast.Stmt.
//
func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) (int, os.Error) {
return cfg.fprint(output, fset, node, make(map[ast.Node]int))
}
// Fprint "pretty-prints" an AST node to output. // Fprint "pretty-prints" an AST node to output.
// It calls Config.Fprint with default settings. // It calls Config.Fprint with default settings.
// //

View File

@ -11,8 +11,9 @@ import (
"go/ast" "go/ast"
"go/parser" "go/parser"
"go/token" "go/token"
"path" "path/filepath"
"testing" "testing"
"time"
) )
@ -45,7 +46,7 @@ const (
) )
func check(t *testing.T, source, golden string, mode checkMode) { func runcheck(t *testing.T, source, golden string, mode checkMode) {
// parse source // parse source
prog, err := parser.ParseFile(fset, source, nil, parser.ParseComments) prog, err := parser.ParseFile(fset, source, nil, parser.ParseComments)
if err != nil { if err != nil {
@ -109,6 +110,32 @@ func check(t *testing.T, source, golden string, mode checkMode) {
} }
func check(t *testing.T, source, golden string, mode checkMode) {
// start a timer to produce a time-out signal
tc := make(chan int)
go func() {
time.Sleep(20e9) // plenty of a safety margin, even for very slow machines
tc <- 0
}()
// run the test
cc := make(chan int)
go func() {
runcheck(t, source, golden, mode)
cc <- 0
}()
// wait for the first finisher
select {
case <-tc:
// test running past time out
t.Errorf("%s: running too slowly", source)
case <-cc:
// test finished within alloted time margin
}
}
type entry struct { type entry struct {
source, golden string source, golden string
mode checkMode mode checkMode
@ -124,13 +151,14 @@ var data = []entry{
{"expressions.input", "expressions.raw", rawFormat}, {"expressions.input", "expressions.raw", rawFormat},
{"declarations.input", "declarations.golden", 0}, {"declarations.input", "declarations.golden", 0},
{"statements.input", "statements.golden", 0}, {"statements.input", "statements.golden", 0},
{"slow.input", "slow.golden", 0},
} }
func TestFiles(t *testing.T) { func TestFiles(t *testing.T) {
for _, e := range data { for _, e := range data {
source := path.Join(dataDir, e.source) source := filepath.Join(dataDir, e.source)
golden := path.Join(dataDir, e.golden) golden := filepath.Join(dataDir, e.golden)
check(t, source, golden, e.mode) check(t, source, golden, e.mode)
// TODO(gri) check that golden is idempotent // TODO(gri) check that golden is idempotent
//check(t, golden, golden, e.mode); //check(t, golden, golden, e.mode);

View File

@ -224,11 +224,7 @@ func _() {
_ = struct{ x int }{0} _ = struct{ x int }{0}
_ = struct{ x, y, z int }{0, 1, 2} _ = struct{ x, y, z int }{0, 1, 2}
_ = struct{ int }{0} _ = struct{ int }{0}
_ = struct { _ = struct{ s struct{ int } }{struct{ int }{0}}
s struct {
int
}
}{struct{ int }{0}} // compositeLit context not propagated => multiLine result
} }

View File

@ -224,7 +224,7 @@ func _() {
_ = struct{ x int }{0} _ = struct{ x int }{0}
_ = struct{ x, y, z int }{0, 1, 2} _ = struct{ x, y, z int }{0, 1, 2}
_ = struct{ int }{0} _ = struct{ int }{0}
_ = struct{ s struct { int } }{struct{ int}{0}} // compositeLit context not propagated => multiLine result _ = struct{ s struct { int } }{struct{ int}{0} }
} }

View File

@ -224,11 +224,7 @@ func _() {
_ = struct{ x int }{0} _ = struct{ x int }{0}
_ = struct{ x, y, z int }{0, 1, 2} _ = struct{ x, y, z int }{0, 1, 2}
_ = struct{ int }{0} _ = struct{ int }{0}
_ = struct { _ = struct{ s struct{ int } }{struct{ int }{0}}
s struct {
int
}
}{struct{ int }{0}} // compositeLit context not propagated => multiLine result
} }

View File

@ -0,0 +1,85 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepequal_test
import (
"testing"
"google3/spam/archer/frontend/deepequal"
)
func TestTwoNilValues(t *testing.T) {
if err := deepequal.Check(nil, nil); err != nil {
t.Errorf("expected nil, saw %v", err)
}
}
type Foo struct {
bar *Bar
bang *Bar
}
type Bar struct {
baz *Baz
foo []*Foo
}
type Baz struct {
entries map[int]interface{}
whatever string
}
func newFoo() *Foo {
return &Foo{bar: &Bar{baz: &Baz{
entries: map[int]interface{}{
42: &Foo{},
21: &Bar{},
11: &Baz{whatever: "it's just a test"}}}},
bang: &Bar{foo: []*Foo{
&Foo{bar: &Bar{baz: &Baz{
entries: map[int]interface{}{
43: &Foo{},
22: &Bar{},
13: &Baz{whatever: "this is nuts"}}}},
bang: &Bar{foo: []*Foo{
&Foo{bar: &Bar{baz: &Baz{
entries: map[int]interface{}{
61: &Foo{},
71: &Bar{},
11: &Baz{whatever: "no, it's Go"}}}},
bang: &Bar{foo: []*Foo{
&Foo{bar: &Bar{baz: &Baz{
entries: map[int]interface{}{
0: &Foo{},
-2: &Bar{},
-11: &Baz{whatever: "we need to go deeper"}}}},
bang: &Bar{foo: []*Foo{
&Foo{bar: &Bar{baz: &Baz{
entries: map[int]interface{}{
-2: &Foo{},
-5: &Bar{},
-7: &Baz{whatever: "are you serious?"}}}},
bang: &Bar{foo: []*Foo{}}},
&Foo{bar: &Bar{baz: &Baz{
entries: map[int]interface{}{
-100: &Foo{},
50: &Bar{},
20: &Baz{whatever: "na, not really ..."}}}},
bang: &Bar{foo: []*Foo{}}}}}}}}},
&Foo{bar: &Bar{baz: &Baz{
entries: map[int]interface{}{
2: &Foo{},
1: &Bar{},
-1: &Baz{whatever: "... it's just a test."}}}},
bang: &Bar{foo: []*Foo{}}}}}}}}}
}
func TestElaborate(t *testing.T) {
a := newFoo()
b := newFoo()
if err := deepequal.Check(a, b); err != nil {
t.Errorf("expected nil, saw %v", err)
}
}

85
libgo/go/go/printer/testdata/slow.input vendored Normal file
View File

@ -0,0 +1,85 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package deepequal_test
import (
"testing"
"google3/spam/archer/frontend/deepequal"
)
func TestTwoNilValues(t *testing.T) {
if err := deepequal.Check(nil, nil); err != nil {
t.Errorf("expected nil, saw %v", err)
}
}
type Foo struct {
bar *Bar
bang *Bar
}
type Bar struct {
baz *Baz
foo []*Foo
}
type Baz struct {
entries map[int]interface{}
whatever string
}
func newFoo() (*Foo) {
return &Foo{bar: &Bar{ baz: &Baz{
entries: map[int]interface{}{
42: &Foo{},
21: &Bar{},
11: &Baz{ whatever: "it's just a test" }}}},
bang: &Bar{foo: []*Foo{
&Foo{bar: &Bar{ baz: &Baz{
entries: map[int]interface{}{
43: &Foo{},
22: &Bar{},
13: &Baz{ whatever: "this is nuts" }}}},
bang: &Bar{foo: []*Foo{
&Foo{bar: &Bar{ baz: &Baz{
entries: map[int]interface{}{
61: &Foo{},
71: &Bar{},
11: &Baz{ whatever: "no, it's Go" }}}},
bang: &Bar{foo: []*Foo{
&Foo{bar: &Bar{ baz: &Baz{
entries: map[int]interface{}{
0: &Foo{},
-2: &Bar{},
-11: &Baz{ whatever: "we need to go deeper" }}}},
bang: &Bar{foo: []*Foo{
&Foo{bar: &Bar{ baz: &Baz{
entries: map[int]interface{}{
-2: &Foo{},
-5: &Bar{},
-7: &Baz{ whatever: "are you serious?" }}}},
bang: &Bar{foo: []*Foo{}}},
&Foo{bar: &Bar{ baz: &Baz{
entries: map[int]interface{}{
-100: &Foo{},
50: &Bar{},
20: &Baz{ whatever: "na, not really ..." }}}},
bang: &Bar{foo: []*Foo{}}}}}}}}},
&Foo{bar: &Bar{ baz: &Baz{
entries: map[int]interface{}{
2: &Foo{},
1: &Bar{},
-1: &Baz{ whatever: "... it's just a test." }}}},
bang: &Bar{foo: []*Foo{}}}}}}}}}
}
func TestElaborate(t *testing.T) {
a := newFoo()
b := newFoo()
if err := deepequal.Check(a, b); err != nil {
t.Errorf("expected nil, saw %v", err)
}
}

View File

@ -23,7 +23,7 @@ package scanner
import ( import (
"bytes" "bytes"
"go/token" "go/token"
"path" "path/filepath"
"strconv" "strconv"
"unicode" "unicode"
"utf8" "utf8"
@ -118,7 +118,7 @@ func (S *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode uint
panic("file size does not match src len") panic("file size does not match src len")
} }
S.file = file S.file = file
S.dir, _ = path.Split(file.Name()) S.dir, _ = filepath.Split(file.Name())
S.src = src S.src = src
S.err = err S.err = err
S.mode = mode S.mode = mode
@ -177,13 +177,13 @@ var prefix = []byte("//line ")
func (S *Scanner) interpretLineComment(text []byte) { func (S *Scanner) interpretLineComment(text []byte) {
if bytes.HasPrefix(text, prefix) { if bytes.HasPrefix(text, prefix) {
// get filename and line number, if any // get filename and line number, if any
if i := bytes.Index(text, []byte{':'}); i > 0 { if i := bytes.LastIndex(text, []byte{':'}); i > 0 {
if line, err := strconv.Atoi(string(text[i+1:])); err == nil && line > 0 { if line, err := strconv.Atoi(string(text[i+1:])); err == nil && line > 0 {
// valid //line filename:line comment; // valid //line filename:line comment;
filename := path.Clean(string(text[len(prefix):i])) filename := filepath.Clean(string(text[len(prefix):i]))
if filename[0] != '/' { if !filepath.IsAbs(filename) {
// make filename relative to current directory // make filename relative to current directory
filename = path.Join(S.dir, filename) filename = filepath.Join(S.dir, filename)
} }
// update scanner position // update scanner position
S.file.AddLineInfo(S.lineOffset, filename, line-1) // -1 since comment applies to next line S.file.AddLineInfo(S.lineOffset, filename, line-1) // -1 since comment applies to next line

View File

@ -7,6 +7,8 @@ package scanner
import ( import (
"go/token" "go/token"
"os" "os"
"path/filepath"
"runtime"
"testing" "testing"
) )
@ -443,32 +445,41 @@ func TestSemis(t *testing.T) {
} }
} }
type segment struct {
var segments = []struct {
srcline string // a line of source text srcline string // a line of source text
filename string // filename for current token filename string // filename for current token
line int // line number for current token line int // line number for current token
}{ }
var segments = []segment{
// exactly one token per line since the test consumes one token per segment // exactly one token per line since the test consumes one token per segment
{" line1", "dir/TestLineComments", 1}, {" line1", filepath.Join("dir", "TestLineComments"), 1},
{"\nline2", "dir/TestLineComments", 2}, {"\nline2", filepath.Join("dir", "TestLineComments"), 2},
{"\nline3 //line File1.go:100", "dir/TestLineComments", 3}, // bad line comment, ignored {"\nline3 //line File1.go:100", filepath.Join("dir", "TestLineComments"), 3}, // bad line comment, ignored
{"\nline4", "dir/TestLineComments", 4}, {"\nline4", filepath.Join("dir", "TestLineComments"), 4},
{"\n//line File1.go:100\n line100", "dir/File1.go", 100}, {"\n//line File1.go:100\n line100", filepath.Join("dir", "File1.go"), 100},
{"\n//line File2.go:200\n line200", "dir/File2.go", 200}, {"\n//line File2.go:200\n line200", filepath.Join("dir", "File2.go"), 200},
{"\n//line :1\n line1", "dir", 1}, {"\n//line :1\n line1", "dir", 1},
{"\n//line foo:42\n line42", "dir/foo", 42}, {"\n//line foo:42\n line42", filepath.Join("dir", "foo"), 42},
{"\n //line foo:42\n line44", "dir/foo", 44}, // bad line comment, ignored {"\n //line foo:42\n line44", filepath.Join("dir", "foo"), 44}, // bad line comment, ignored
{"\n//line foo 42\n line46", "dir/foo", 46}, // bad line comment, ignored {"\n//line foo 42\n line46", filepath.Join("dir", "foo"), 46}, // bad line comment, ignored
{"\n//line foo:42 extra text\n line48", "dir/foo", 48}, // bad line comment, ignored {"\n//line foo:42 extra text\n line48", filepath.Join("dir", "foo"), 48}, // bad line comment, ignored
{"\n//line /bar:42\n line42", "/bar", 42}, {"\n//line /bar:42\n line42", string(filepath.Separator) + "bar", 42},
{"\n//line ./foo:42\n line42", "dir/foo", 42}, {"\n//line ./foo:42\n line42", filepath.Join("dir", "foo"), 42},
{"\n//line a/b/c/File1.go:100\n line100", "dir/a/b/c/File1.go", 100}, {"\n//line a/b/c/File1.go:100\n line100", filepath.Join("dir", "a", "b", "c", "File1.go"), 100},
}
var winsegments = []segment{
{"\n//line c:\\dir\\File1.go:100\n line100", "c:\\dir\\File1.go", 100},
} }
// Verify that comments of the form "//line filename:line" are interpreted correctly. // Verify that comments of the form "//line filename:line" are interpreted correctly.
func TestLineComments(t *testing.T) { func TestLineComments(t *testing.T) {
if runtime.GOOS == "windows" {
segments = append(segments, winsegments...)
}
// make source // make source
var src string var src string
for _, e := range segments { for _, e := range segments {
@ -477,7 +488,7 @@ func TestLineComments(t *testing.T) {
// verify scan // verify scan
var S Scanner var S Scanner
file := fset.AddFile("dir/TestLineComments", fset.Base(), len(src)) file := fset.AddFile(filepath.Join("dir", "TestLineComments"), fset.Base(), len(src))
S.Init(file, []byte(src), nil, 0) S.Init(file, []byte(src), nil, 0)
for _, s := range segments { for _, s := range segments {
p, _, lit := S.Scan() p, _, lit := S.Scan()

View File

@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// This file implements scope support functions. // DEPRECATED FILE - WILL GO AWAY EVENTUALLY.
//
// Scope handling is now done in go/parser.
// The functionality here is only present to
// keep the typechecker running for now.
package typechecker package typechecker
import ( import "go/ast"
"fmt"
"go/ast"
"go/token"
)
func (tc *typechecker) openScope() *ast.Scope { func (tc *typechecker) openScope() *ast.Scope {
@ -24,52 +24,25 @@ func (tc *typechecker) closeScope() {
} }
// objPos computes the source position of the declaration of an object name.
// Only required for error reporting, so doesn't have to be fast.
func objPos(obj *ast.Object) (pos token.Pos) {
switch d := obj.Decl.(type) {
case *ast.Field:
for _, n := range d.Names {
if n.Name == obj.Name {
return n.Pos()
}
}
case *ast.ValueSpec:
for _, n := range d.Names {
if n.Name == obj.Name {
return n.Pos()
}
}
case *ast.TypeSpec:
return d.Name.Pos()
case *ast.FuncDecl:
return d.Name.Pos()
}
if debug {
fmt.Printf("decl = %T\n", obj.Decl)
}
panic("unreachable")
}
// declInScope declares an object of a given kind and name in scope and sets the object's Decl and N fields. // declInScope declares an object of a given kind and name in scope and sets the object's Decl and N fields.
// It returns the newly allocated object. If an object with the same name already exists in scope, an error // It returns the newly allocated object. If an object with the same name already exists in scope, an error
// is reported and the object is not inserted. // is reported and the object is not inserted.
// (Objects with _ name are always inserted into a scope without errors, but they cannot be found.) func (tc *typechecker) declInScope(scope *ast.Scope, kind ast.ObjKind, name *ast.Ident, decl interface{}, n int) *ast.Object {
func (tc *typechecker) declInScope(scope *ast.Scope, kind ast.Kind, name *ast.Ident, decl interface{}, n int) *ast.Object {
obj := ast.NewObj(kind, name.Name) obj := ast.NewObj(kind, name.Name)
obj.Decl = decl obj.Decl = decl
obj.N = n //obj.N = n
name.Obj = obj name.Obj = obj
if alt := scope.Insert(obj); alt != obj { if name.Name != "_" {
tc.Errorf(name.Pos(), "%s already declared at %s", name.Name, objPos(alt)) if alt := scope.Insert(obj); alt != obj {
tc.Errorf(name.Pos(), "%s already declared at %s", name.Name, tc.fset.Position(alt.Pos()).String())
}
} }
return obj return obj
} }
// decl is the same as declInScope(tc.topScope, ...) // decl is the same as declInScope(tc.topScope, ...)
func (tc *typechecker) decl(kind ast.Kind, name *ast.Ident, decl interface{}, n int) *ast.Object { func (tc *typechecker) decl(kind ast.ObjKind, name *ast.Ident, decl interface{}, n int) *ast.Object {
return tc.declInScope(tc.topScope, kind, name, decl, n) return tc.declInScope(tc.topScope, kind, name, decl, n)
} }
@ -91,7 +64,7 @@ func (tc *typechecker) find(name *ast.Ident) (obj *ast.Object) {
// findField returns the object with the given name if visible in the type's scope. // findField returns the object with the given name if visible in the type's scope.
// If no such object is found, an error is reported and a bad object is returned instead. // If no such object is found, an error is reported and a bad object is returned instead.
func (tc *typechecker) findField(typ *ast.Type, name *ast.Ident) (obj *ast.Object) { func (tc *typechecker) findField(typ *Type, name *ast.Ident) (obj *ast.Object) {
// TODO(gri) This is simplistic at the moment and ignores anonymous fields. // TODO(gri) This is simplistic at the moment and ignores anonymous fields.
obj = typ.Scope.Lookup(name.Name) obj = typ.Scope.Lookup(name.Name)
if obj == nil { if obj == nil {
@ -100,20 +73,3 @@ func (tc *typechecker) findField(typ *ast.Type, name *ast.Ident) (obj *ast.Objec
} }
return return
} }
// printScope prints the objects in a scope.
func printScope(scope *ast.Scope) {
fmt.Printf("scope %p {", scope)
if scope != nil && len(scope.Objects) > 0 {
fmt.Println()
for _, obj := range scope.Objects {
form := "void"
if obj.Type != nil {
form = obj.Type.Form.String()
}
fmt.Printf("\t%s\t%s\n", obj.Name, form)
}
}
fmt.Printf("}\n")
}

View File

@ -7,7 +7,7 @@
package P1 package P1
const ( const (
c1 /* ERROR "missing initializer" */ c1 = 0
c2 int = 0 c2 int = 0
c3, c4 = 0 c3, c4 = 0
) )

View File

@ -27,8 +27,11 @@ func (T) m1 /* ERROR "already declared" */ () {}
func (x *T) m2(u, x /* ERROR "already declared" */ int) {} func (x *T) m2(u, x /* ERROR "already declared" */ int) {}
func (x *T) m3(a, b, c int) (u, x /* ERROR "already declared" */ int) {} func (x *T) m3(a, b, c int) (u, x /* ERROR "already declared" */ int) {}
func (T) _(x, x /* ERROR "already declared" */ int) {} // The following are disabled for now because the typechecker
func (T) _() (x, x /* ERROR "already declared" */ int) {} // in in the process of being rewritten and cannot handle them
// at the moment
//func (T) _(x, x /* "already declared" */ int) {}
//func (T) _() (x, x /* "already declared" */ int) {}
//func (PT) _() {} //func (PT) _() {}

View File

@ -7,5 +7,5 @@
package P4 package P4
const ( const (
c0 /* ERROR "missing initializer" */ c0 = 0
) )

View File

@ -0,0 +1,125 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typechecker
import "go/ast"
// A Type represents a Go type.
type Type struct {
Form Form
Obj *ast.Object // corresponding type name, or nil
Scope *ast.Scope // fields and methods, always present
N uint // basic type id, array length, number of function results, or channel direction
Key, Elt *Type // map key and array, pointer, slice, map or channel element
Params *ast.Scope // function (receiver, input and result) parameters, tuple expressions (results of function calls), or nil
Expr ast.Expr // corresponding AST expression
}
// NewType creates a new type of a given form.
func NewType(form Form) *Type {
return &Type{Form: form, Scope: ast.NewScope(nil)}
}
// Form describes the form of a type.
type Form int
// The list of possible type forms.
const (
BadType Form = iota // for error handling
Unresolved // type not fully setup
Basic
Array
Struct
Pointer
Function
Method
Interface
Slice
Map
Channel
Tuple
)
var formStrings = [...]string{
BadType: "badType",
Unresolved: "unresolved",
Basic: "basic",
Array: "array",
Struct: "struct",
Pointer: "pointer",
Function: "function",
Method: "method",
Interface: "interface",
Slice: "slice",
Map: "map",
Channel: "channel",
Tuple: "tuple",
}
func (form Form) String() string { return formStrings[form] }
// The list of basic type id's.
const (
Bool = iota
Byte
Uint
Int
Float
Complex
Uintptr
String
Uint8
Uint16
Uint32
Uint64
Int8
Int16
Int32
Int64
Float32
Float64
Complex64
Complex128
// TODO(gri) ideal types are missing
)
var BasicTypes = map[uint]string{
Bool: "bool",
Byte: "byte",
Uint: "uint",
Int: "int",
Float: "float",
Complex: "complex",
Uintptr: "uintptr",
String: "string",
Uint8: "uint8",
Uint16: "uint16",
Uint32: "uint32",
Uint64: "uint64",
Int8: "int8",
Int16: "int16",
Int32: "int32",
Int64: "int64",
Float32: "float32",
Float64: "float64",
Complex64: "complex64",
Complex128: "complex128",
}

View File

@ -65,6 +65,7 @@ type typechecker struct {
fset *token.FileSet fset *token.FileSet
scanner.ErrorVector scanner.ErrorVector
importer Importer importer Importer
globals []*ast.Object // list of global objects
topScope *ast.Scope // current top-most scope topScope *ast.Scope // current top-most scope
cyclemap map[*ast.Object]bool // for cycle detection cyclemap map[*ast.Object]bool // for cycle detection
iota int // current value of iota iota int // current value of iota
@ -94,7 +95,7 @@ phase 1: declare all global objects; also collect all function and method declar
- report global double declarations - report global double declarations
phase 2: bind methods to their receiver base types phase 2: bind methods to their receiver base types
- received base types must be declared in the package, thus for - receiver base types must be declared in the package, thus for
each method a corresponding (unresolved) type must exist each method a corresponding (unresolved) type must exist
- report method double declarations and errors with base types - report method double declarations and errors with base types
@ -142,16 +143,16 @@ func (tc *typechecker) checkPackage(pkg *ast.Package) {
} }
// phase 3: resolve all global objects // phase 3: resolve all global objects
// (note that objects with _ name are also in the scope)
tc.cyclemap = make(map[*ast.Object]bool) tc.cyclemap = make(map[*ast.Object]bool)
for _, obj := range tc.topScope.Objects { for _, obj := range tc.globals {
tc.resolve(obj) tc.resolve(obj)
} }
assert(len(tc.cyclemap) == 0) assert(len(tc.cyclemap) == 0)
// 4: sequentially typecheck function and method bodies // 4: sequentially typecheck function and method bodies
for _, f := range funcs { for _, f := range funcs {
tc.checkBlock(f.Body.List, f.Name.Obj.Type) ftype, _ := f.Name.Obj.Type.(*Type)
tc.checkBlock(f.Body.List, ftype)
} }
pkg.Scope = tc.topScope pkg.Scope = tc.topScope
@ -183,11 +184,11 @@ func (tc *typechecker) declGlobal(global ast.Decl) {
} }
} }
for _, name := range s.Names { for _, name := range s.Names {
tc.decl(ast.Con, name, s, iota) tc.globals = append(tc.globals, tc.decl(ast.Con, name, s, iota))
} }
case token.VAR: case token.VAR:
for _, name := range s.Names { for _, name := range s.Names {
tc.decl(ast.Var, name, s, 0) tc.globals = append(tc.globals, tc.decl(ast.Var, name, s, 0))
} }
default: default:
panic("unreachable") panic("unreachable")
@ -196,9 +197,10 @@ func (tc *typechecker) declGlobal(global ast.Decl) {
iota++ iota++
case *ast.TypeSpec: case *ast.TypeSpec:
obj := tc.decl(ast.Typ, s.Name, s, 0) obj := tc.decl(ast.Typ, s.Name, s, 0)
tc.globals = append(tc.globals, obj)
// give all type objects an unresolved type so // give all type objects an unresolved type so
// that we can collect methods in the type scope // that we can collect methods in the type scope
typ := ast.NewType(ast.Unresolved) typ := NewType(Unresolved)
obj.Type = typ obj.Type = typ
typ.Obj = obj typ.Obj = obj
default: default:
@ -208,7 +210,7 @@ func (tc *typechecker) declGlobal(global ast.Decl) {
case *ast.FuncDecl: case *ast.FuncDecl:
if d.Recv == nil { if d.Recv == nil {
tc.decl(ast.Fun, d.Name, d, 0) tc.globals = append(tc.globals, tc.decl(ast.Fun, d.Name, d, 0))
} }
default: default:
@ -239,8 +241,8 @@ func (tc *typechecker) bindMethod(method *ast.FuncDecl) {
} else if obj.Kind != ast.Typ { } else if obj.Kind != ast.Typ {
tc.Errorf(name.Pos(), "invalid receiver: %s is not a type", name.Name) tc.Errorf(name.Pos(), "invalid receiver: %s is not a type", name.Name)
} else { } else {
typ := obj.Type typ := obj.Type.(*Type)
assert(typ.Form == ast.Unresolved) assert(typ.Form == Unresolved)
scope = typ.Scope scope = typ.Scope
} }
} }
@ -261,7 +263,7 @@ func (tc *typechecker) bindMethod(method *ast.FuncDecl) {
func (tc *typechecker) resolve(obj *ast.Object) { func (tc *typechecker) resolve(obj *ast.Object) {
// check for declaration cycles // check for declaration cycles
if tc.cyclemap[obj] { if tc.cyclemap[obj] {
tc.Errorf(objPos(obj), "illegal cycle in declaration of %s", obj.Name) tc.Errorf(obj.Pos(), "illegal cycle in declaration of %s", obj.Name)
obj.Kind = ast.Bad obj.Kind = ast.Bad
return return
} }
@ -271,7 +273,7 @@ func (tc *typechecker) resolve(obj *ast.Object) {
}() }()
// resolve non-type objects // resolve non-type objects
typ := obj.Type typ, _ := obj.Type.(*Type)
if typ == nil { if typ == nil {
switch obj.Kind { switch obj.Kind {
case ast.Bad: case ast.Bad:
@ -282,12 +284,12 @@ func (tc *typechecker) resolve(obj *ast.Object) {
case ast.Var: case ast.Var:
tc.declVar(obj) tc.declVar(obj)
//obj.Type = tc.typeFor(nil, obj.Decl.(*ast.ValueSpec).Type, false) obj.Type = tc.typeFor(nil, obj.Decl.(*ast.ValueSpec).Type, false)
case ast.Fun: case ast.Fun:
obj.Type = ast.NewType(ast.Function) obj.Type = NewType(Function)
t := obj.Decl.(*ast.FuncDecl).Type t := obj.Decl.(*ast.FuncDecl).Type
tc.declSignature(obj.Type, nil, t.Params, t.Results) tc.declSignature(obj.Type.(*Type), nil, t.Params, t.Results)
default: default:
// type objects have non-nil types when resolve is called // type objects have non-nil types when resolve is called
@ -300,32 +302,34 @@ func (tc *typechecker) resolve(obj *ast.Object) {
} }
// resolve type objects // resolve type objects
if typ.Form == ast.Unresolved { if typ.Form == Unresolved {
tc.typeFor(typ, typ.Obj.Decl.(*ast.TypeSpec).Type, false) tc.typeFor(typ, typ.Obj.Decl.(*ast.TypeSpec).Type, false)
// provide types for all methods // provide types for all methods
for _, obj := range typ.Scope.Objects { for _, obj := range typ.Scope.Objects {
if obj.Kind == ast.Fun { if obj.Kind == ast.Fun {
assert(obj.Type == nil) assert(obj.Type == nil)
obj.Type = ast.NewType(ast.Method) obj.Type = NewType(Method)
f := obj.Decl.(*ast.FuncDecl) f := obj.Decl.(*ast.FuncDecl)
t := f.Type t := f.Type
tc.declSignature(obj.Type, f.Recv, t.Params, t.Results) tc.declSignature(obj.Type.(*Type), f.Recv, t.Params, t.Results)
} }
} }
} }
} }
func (tc *typechecker) checkBlock(body []ast.Stmt, ftype *ast.Type) { func (tc *typechecker) checkBlock(body []ast.Stmt, ftype *Type) {
tc.openScope() tc.openScope()
defer tc.closeScope() defer tc.closeScope()
// inject function/method parameters into block scope, if any // inject function/method parameters into block scope, if any
if ftype != nil { if ftype != nil {
for _, par := range ftype.Params.Objects { for _, par := range ftype.Params.Objects {
obj := tc.topScope.Insert(par) if par.Name != "_" {
assert(obj == par) // ftype has no double declarations obj := tc.topScope.Insert(par)
assert(obj == par) // ftype has no double declarations
}
} }
} }
@ -362,8 +366,8 @@ func (tc *typechecker) declFields(scope *ast.Scope, fields *ast.FieldList, ref b
} }
func (tc *typechecker) declSignature(typ *ast.Type, recv, params, results *ast.FieldList) { func (tc *typechecker) declSignature(typ *Type, recv, params, results *ast.FieldList) {
assert((typ.Form == ast.Method) == (recv != nil)) assert((typ.Form == Method) == (recv != nil))
typ.Params = ast.NewScope(nil) typ.Params = ast.NewScope(nil)
tc.declFields(typ.Params, recv, true) tc.declFields(typ.Params, recv, true)
tc.declFields(typ.Params, params, true) tc.declFields(typ.Params, params, true)
@ -371,7 +375,7 @@ func (tc *typechecker) declSignature(typ *ast.Type, recv, params, results *ast.F
} }
func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Type) { func (tc *typechecker) typeFor(def *Type, x ast.Expr, ref bool) (typ *Type) {
x = unparen(x) x = unparen(x)
// type name // type name
@ -381,10 +385,10 @@ func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Ty
if obj.Kind != ast.Typ { if obj.Kind != ast.Typ {
tc.Errorf(t.Pos(), "%s is not a type", t.Name) tc.Errorf(t.Pos(), "%s is not a type", t.Name)
if def == nil { if def == nil {
typ = ast.NewType(ast.BadType) typ = NewType(BadType)
} else { } else {
typ = def typ = def
typ.Form = ast.BadType typ.Form = BadType
} }
typ.Expr = x typ.Expr = x
return return
@ -393,7 +397,7 @@ func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Ty
if !ref { if !ref {
tc.resolve(obj) // check for cycles even if type resolved tc.resolve(obj) // check for cycles even if type resolved
} }
typ = obj.Type typ = obj.Type.(*Type)
if def != nil { if def != nil {
// new type declaration: copy type structure // new type declaration: copy type structure
@ -410,7 +414,7 @@ func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Ty
// type literal // type literal
typ = def typ = def
if typ == nil { if typ == nil {
typ = ast.NewType(ast.BadType) typ = NewType(BadType)
} }
typ.Expr = x typ.Expr = x
@ -419,42 +423,42 @@ func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Ty
if debug { if debug {
fmt.Println("qualified identifier unimplemented") fmt.Println("qualified identifier unimplemented")
} }
typ.Form = ast.BadType typ.Form = BadType
case *ast.StarExpr: case *ast.StarExpr:
typ.Form = ast.Pointer typ.Form = Pointer
typ.Elt = tc.typeFor(nil, t.X, true) typ.Elt = tc.typeFor(nil, t.X, true)
case *ast.ArrayType: case *ast.ArrayType:
if t.Len != nil { if t.Len != nil {
typ.Form = ast.Array typ.Form = Array
// TODO(gri) compute the real length // TODO(gri) compute the real length
// (this may call resolve recursively) // (this may call resolve recursively)
(*typ).N = 42 (*typ).N = 42
} else { } else {
typ.Form = ast.Slice typ.Form = Slice
} }
typ.Elt = tc.typeFor(nil, t.Elt, t.Len == nil) typ.Elt = tc.typeFor(nil, t.Elt, t.Len == nil)
case *ast.StructType: case *ast.StructType:
typ.Form = ast.Struct typ.Form = Struct
tc.declFields(typ.Scope, t.Fields, false) tc.declFields(typ.Scope, t.Fields, false)
case *ast.FuncType: case *ast.FuncType:
typ.Form = ast.Function typ.Form = Function
tc.declSignature(typ, nil, t.Params, t.Results) tc.declSignature(typ, nil, t.Params, t.Results)
case *ast.InterfaceType: case *ast.InterfaceType:
typ.Form = ast.Interface typ.Form = Interface
tc.declFields(typ.Scope, t.Methods, true) tc.declFields(typ.Scope, t.Methods, true)
case *ast.MapType: case *ast.MapType:
typ.Form = ast.Map typ.Form = Map
typ.Key = tc.typeFor(nil, t.Key, true) typ.Key = tc.typeFor(nil, t.Key, true)
typ.Elt = tc.typeFor(nil, t.Value, true) typ.Elt = tc.typeFor(nil, t.Value, true)
case *ast.ChanType: case *ast.ChanType:
typ.Form = ast.Channel typ.Form = Channel
typ.N = uint(t.Dir) typ.N = uint(t.Dir)
typ.Elt = tc.typeFor(nil, t.Value, true) typ.Elt = tc.typeFor(nil, t.Value, true)

View File

@ -93,7 +93,7 @@ func expectedErrors(t *testing.T, pkg *ast.Package) (list scanner.ErrorList) {
func testFilter(f *os.FileInfo) bool { func testFilter(f *os.FileInfo) bool {
return strings.HasSuffix(f.Name, ".go") && f.Name[0] != '.' return strings.HasSuffix(f.Name, ".src") && f.Name[0] != '.'
} }

View File

@ -24,8 +24,8 @@ func init() {
Universe = ast.NewScope(nil) Universe = ast.NewScope(nil)
// basic types // basic types
for n, name := range ast.BasicTypes { for n, name := range BasicTypes {
typ := ast.NewType(ast.Basic) typ := NewType(Basic)
typ.N = n typ.N = n
obj := ast.NewObj(ast.Typ, name) obj := ast.NewObj(ast.Typ, name)
obj.Type = typ obj.Type = typ

View File

@ -50,7 +50,7 @@ func testError(t *testing.T) {
func TestUintCodec(t *testing.T) { func TestUintCodec(t *testing.T) {
defer testError(t) defer testError(t)
b := new(bytes.Buffer) b := new(bytes.Buffer)
encState := newEncoderState(nil, b) encState := newEncoderState(b)
for _, tt := range encodeT { for _, tt := range encodeT {
b.Reset() b.Reset()
encState.encodeUint(tt.x) encState.encodeUint(tt.x)
@ -58,7 +58,7 @@ func TestUintCodec(t *testing.T) {
t.Errorf("encodeUint: %#x encode: expected % x got % x", tt.x, tt.b, b.Bytes()) t.Errorf("encodeUint: %#x encode: expected % x got % x", tt.x, tt.b, b.Bytes())
} }
} }
decState := newDecodeState(nil, b) decState := newDecodeState(b)
for u := uint64(0); ; u = (u + 1) * 7 { for u := uint64(0); ; u = (u + 1) * 7 {
b.Reset() b.Reset()
encState.encodeUint(u) encState.encodeUint(u)
@ -75,9 +75,9 @@ func TestUintCodec(t *testing.T) {
func verifyInt(i int64, t *testing.T) { func verifyInt(i int64, t *testing.T) {
defer testError(t) defer testError(t)
var b = new(bytes.Buffer) var b = new(bytes.Buffer)
encState := newEncoderState(nil, b) encState := newEncoderState(b)
encState.encodeInt(i) encState.encodeInt(i)
decState := newDecodeState(nil, b) decState := newDecodeState(b)
decState.buf = make([]byte, 8) decState.buf = make([]byte, 8)
j := decState.decodeInt() j := decState.decodeInt()
if i != j { if i != j {
@ -111,9 +111,16 @@ var complexResult = []byte{0x07, 0xFE, 0x31, 0x40, 0xFE, 0x33, 0x40}
// The result of encoding "hello" with field number 7 // The result of encoding "hello" with field number 7
var bytesResult = []byte{0x07, 0x05, 'h', 'e', 'l', 'l', 'o'} var bytesResult = []byte{0x07, 0x05, 'h', 'e', 'l', 'l', 'o'}
func newencoderState(b *bytes.Buffer) *encoderState { func newDecodeState(buf *bytes.Buffer) *decoderState {
d := new(decoderState)
d.b = buf
d.buf = make([]byte, uint64Size)
return d
}
func newEncoderState(b *bytes.Buffer) *encoderState {
b.Reset() b.Reset()
state := newEncoderState(nil, b) state := &encoderState{enc: nil, b: b}
state.fieldnum = -1 state.fieldnum = -1
return state return state
} }
@ -127,7 +134,7 @@ func TestScalarEncInstructions(t *testing.T) {
{ {
data := struct{ a bool }{true} data := struct{ a bool }{true}
instr := &encInstr{encBool, 6, 0, 0} instr := &encInstr{encBool, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(boolResult, b.Bytes()) { if !bytes.Equal(boolResult, b.Bytes()) {
t.Errorf("bool enc instructions: expected % x got % x", boolResult, b.Bytes()) t.Errorf("bool enc instructions: expected % x got % x", boolResult, b.Bytes())
@ -139,7 +146,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a int }{17} data := struct{ a int }{17}
instr := &encInstr{encInt, 6, 0, 0} instr := &encInstr{encInt, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(signedResult, b.Bytes()) { if !bytes.Equal(signedResult, b.Bytes()) {
t.Errorf("int enc instructions: expected % x got % x", signedResult, b.Bytes()) t.Errorf("int enc instructions: expected % x got % x", signedResult, b.Bytes())
@ -151,7 +158,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a uint }{17} data := struct{ a uint }{17}
instr := &encInstr{encUint, 6, 0, 0} instr := &encInstr{encUint, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(unsignedResult, b.Bytes()) { if !bytes.Equal(unsignedResult, b.Bytes()) {
t.Errorf("uint enc instructions: expected % x got % x", unsignedResult, b.Bytes()) t.Errorf("uint enc instructions: expected % x got % x", unsignedResult, b.Bytes())
@ -163,7 +170,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a int8 }{17} data := struct{ a int8 }{17}
instr := &encInstr{encInt8, 6, 0, 0} instr := &encInstr{encInt8, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(signedResult, b.Bytes()) { if !bytes.Equal(signedResult, b.Bytes()) {
t.Errorf("int8 enc instructions: expected % x got % x", signedResult, b.Bytes()) t.Errorf("int8 enc instructions: expected % x got % x", signedResult, b.Bytes())
@ -175,7 +182,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a uint8 }{17} data := struct{ a uint8 }{17}
instr := &encInstr{encUint8, 6, 0, 0} instr := &encInstr{encUint8, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(unsignedResult, b.Bytes()) { if !bytes.Equal(unsignedResult, b.Bytes()) {
t.Errorf("uint8 enc instructions: expected % x got % x", unsignedResult, b.Bytes()) t.Errorf("uint8 enc instructions: expected % x got % x", unsignedResult, b.Bytes())
@ -187,7 +194,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a int16 }{17} data := struct{ a int16 }{17}
instr := &encInstr{encInt16, 6, 0, 0} instr := &encInstr{encInt16, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(signedResult, b.Bytes()) { if !bytes.Equal(signedResult, b.Bytes()) {
t.Errorf("int16 enc instructions: expected % x got % x", signedResult, b.Bytes()) t.Errorf("int16 enc instructions: expected % x got % x", signedResult, b.Bytes())
@ -199,7 +206,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a uint16 }{17} data := struct{ a uint16 }{17}
instr := &encInstr{encUint16, 6, 0, 0} instr := &encInstr{encUint16, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(unsignedResult, b.Bytes()) { if !bytes.Equal(unsignedResult, b.Bytes()) {
t.Errorf("uint16 enc instructions: expected % x got % x", unsignedResult, b.Bytes()) t.Errorf("uint16 enc instructions: expected % x got % x", unsignedResult, b.Bytes())
@ -211,7 +218,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a int32 }{17} data := struct{ a int32 }{17}
instr := &encInstr{encInt32, 6, 0, 0} instr := &encInstr{encInt32, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(signedResult, b.Bytes()) { if !bytes.Equal(signedResult, b.Bytes()) {
t.Errorf("int32 enc instructions: expected % x got % x", signedResult, b.Bytes()) t.Errorf("int32 enc instructions: expected % x got % x", signedResult, b.Bytes())
@ -223,7 +230,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a uint32 }{17} data := struct{ a uint32 }{17}
instr := &encInstr{encUint32, 6, 0, 0} instr := &encInstr{encUint32, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(unsignedResult, b.Bytes()) { if !bytes.Equal(unsignedResult, b.Bytes()) {
t.Errorf("uint32 enc instructions: expected % x got % x", unsignedResult, b.Bytes()) t.Errorf("uint32 enc instructions: expected % x got % x", unsignedResult, b.Bytes())
@ -235,7 +242,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a int64 }{17} data := struct{ a int64 }{17}
instr := &encInstr{encInt64, 6, 0, 0} instr := &encInstr{encInt64, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(signedResult, b.Bytes()) { if !bytes.Equal(signedResult, b.Bytes()) {
t.Errorf("int64 enc instructions: expected % x got % x", signedResult, b.Bytes()) t.Errorf("int64 enc instructions: expected % x got % x", signedResult, b.Bytes())
@ -247,7 +254,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a uint64 }{17} data := struct{ a uint64 }{17}
instr := &encInstr{encUint64, 6, 0, 0} instr := &encInstr{encUint64, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(unsignedResult, b.Bytes()) { if !bytes.Equal(unsignedResult, b.Bytes()) {
t.Errorf("uint64 enc instructions: expected % x got % x", unsignedResult, b.Bytes()) t.Errorf("uint64 enc instructions: expected % x got % x", unsignedResult, b.Bytes())
@ -259,7 +266,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a float32 }{17} data := struct{ a float32 }{17}
instr := &encInstr{encFloat32, 6, 0, 0} instr := &encInstr{encFloat32, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(floatResult, b.Bytes()) { if !bytes.Equal(floatResult, b.Bytes()) {
t.Errorf("float32 enc instructions: expected % x got % x", floatResult, b.Bytes()) t.Errorf("float32 enc instructions: expected % x got % x", floatResult, b.Bytes())
@ -271,7 +278,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a float64 }{17} data := struct{ a float64 }{17}
instr := &encInstr{encFloat64, 6, 0, 0} instr := &encInstr{encFloat64, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(floatResult, b.Bytes()) { if !bytes.Equal(floatResult, b.Bytes()) {
t.Errorf("float64 enc instructions: expected % x got % x", floatResult, b.Bytes()) t.Errorf("float64 enc instructions: expected % x got % x", floatResult, b.Bytes())
@ -283,7 +290,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a []byte }{[]byte("hello")} data := struct{ a []byte }{[]byte("hello")}
instr := &encInstr{encUint8Array, 6, 0, 0} instr := &encInstr{encUint8Array, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(bytesResult, b.Bytes()) { if !bytes.Equal(bytesResult, b.Bytes()) {
t.Errorf("bytes enc instructions: expected % x got % x", bytesResult, b.Bytes()) t.Errorf("bytes enc instructions: expected % x got % x", bytesResult, b.Bytes())
@ -295,7 +302,7 @@ func TestScalarEncInstructions(t *testing.T) {
b.Reset() b.Reset()
data := struct{ a string }{"hello"} data := struct{ a string }{"hello"}
instr := &encInstr{encString, 6, 0, 0} instr := &encInstr{encString, 6, 0, 0}
state := newencoderState(b) state := newEncoderState(b)
instr.op(instr, state, unsafe.Pointer(&data)) instr.op(instr, state, unsafe.Pointer(&data))
if !bytes.Equal(bytesResult, b.Bytes()) { if !bytes.Equal(bytesResult, b.Bytes()) {
t.Errorf("string enc instructions: expected % x got % x", bytesResult, b.Bytes()) t.Errorf("string enc instructions: expected % x got % x", bytesResult, b.Bytes())
@ -303,7 +310,7 @@ func TestScalarEncInstructions(t *testing.T) {
} }
} }
func execDec(typ string, instr *decInstr, state *decodeState, t *testing.T, p unsafe.Pointer) { func execDec(typ string, instr *decInstr, state *decoderState, t *testing.T, p unsafe.Pointer) {
defer testError(t) defer testError(t)
v := int(state.decodeUint()) v := int(state.decodeUint())
if v+state.fieldnum != 6 { if v+state.fieldnum != 6 {
@ -313,9 +320,9 @@ func execDec(typ string, instr *decInstr, state *decodeState, t *testing.T, p un
state.fieldnum = 6 state.fieldnum = 6
} }
func newDecodeStateFromData(data []byte) *decodeState { func newDecodeStateFromData(data []byte) *decoderState {
b := bytes.NewBuffer(data) b := bytes.NewBuffer(data)
state := newDecodeState(nil, b) state := newDecodeState(b)
state.fieldnum = -1 state.fieldnum = -1
return state return state
} }
@ -997,9 +1004,9 @@ func TestInvalidField(t *testing.T) {
var bad0 Bad0 var bad0 Bad0
bad0.CH = make(chan int) bad0.CH = make(chan int)
b := new(bytes.Buffer) b := new(bytes.Buffer)
var nilEncoder *Encoder dummyEncoder := new(Encoder) // sufficient for this purpose.
err := nilEncoder.encode(b, reflect.NewValue(&bad0), userType(reflect.Typeof(&bad0))) dummyEncoder.encode(b, reflect.NewValue(&bad0), userType(reflect.Typeof(&bad0)))
if err == nil { if err := dummyEncoder.err; err == nil {
t.Error("expected error; got none") t.Error("expected error; got none")
} else if strings.Index(err.String(), "type") < 0 { } else if strings.Index(err.String(), "type") < 0 {
t.Error("expected type error; got", err) t.Error("expected type error; got", err)

View File

@ -13,38 +13,47 @@ import (
"math" "math"
"os" "os"
"reflect" "reflect"
"unicode"
"unsafe" "unsafe"
"utf8"
) )
var ( var (
errBadUint = os.ErrorString("gob: encoded unsigned integer out of range") errBadUint = os.ErrorString("gob: encoded unsigned integer out of range")
errBadType = os.ErrorString("gob: unknown type id or corrupted data") errBadType = os.ErrorString("gob: unknown type id or corrupted data")
errRange = os.ErrorString("gob: internal error: field numbers out of bounds") errRange = os.ErrorString("gob: bad data: field numbers out of bounds")
) )
// The execution state of an instance of the decoder. A new state // decoderState is the execution state of an instance of the decoder. A new state
// is created for nested objects. // is created for nested objects.
type decodeState struct { type decoderState struct {
dec *Decoder dec *Decoder
// The buffer is stored with an extra indirection because it may be replaced // The buffer is stored with an extra indirection because it may be replaced
// if we load a type during decode (when reading an interface value). // if we load a type during decode (when reading an interface value).
b *bytes.Buffer b *bytes.Buffer
fieldnum int // the last field number read. fieldnum int // the last field number read.
buf []byte buf []byte
next *decoderState // for free list
} }
// We pass the bytes.Buffer separately for easier testing of the infrastructure // We pass the bytes.Buffer separately for easier testing of the infrastructure
// without requiring a full Decoder. // without requiring a full Decoder.
func newDecodeState(dec *Decoder, buf *bytes.Buffer) *decodeState { func (dec *Decoder) newDecoderState(buf *bytes.Buffer) *decoderState {
d := new(decodeState) d := dec.freeList
d.dec = dec if d == nil {
d = new(decoderState)
d.dec = dec
d.buf = make([]byte, uint64Size)
} else {
dec.freeList = d.next
}
d.b = buf d.b = buf
d.buf = make([]byte, uint64Size)
return d return d
} }
func (dec *Decoder) freeDecoderState(d *decoderState) {
d.next = dec.freeList
dec.freeList = d
}
func overflow(name string) os.ErrorString { func overflow(name string) os.ErrorString {
return os.ErrorString(`value for "` + name + `" out of range`) return os.ErrorString(`value for "` + name + `" out of range`)
} }
@ -85,7 +94,7 @@ func decodeUintReader(r io.Reader, buf []byte) (x uint64, width int, err os.Erro
// decodeUint reads an encoded unsigned integer from state.r. // decodeUint reads an encoded unsigned integer from state.r.
// Does not check for overflow. // Does not check for overflow.
func (state *decodeState) decodeUint() (x uint64) { func (state *decoderState) decodeUint() (x uint64) {
b, err := state.b.ReadByte() b, err := state.b.ReadByte()
if err != nil { if err != nil {
error(err) error(err)
@ -112,7 +121,7 @@ func (state *decodeState) decodeUint() (x uint64) {
// decodeInt reads an encoded signed integer from state.r. // decodeInt reads an encoded signed integer from state.r.
// Does not check for overflow. // Does not check for overflow.
func (state *decodeState) decodeInt() int64 { func (state *decoderState) decodeInt() int64 {
x := state.decodeUint() x := state.decodeUint()
if x&1 != 0 { if x&1 != 0 {
return ^int64(x >> 1) return ^int64(x >> 1)
@ -120,7 +129,8 @@ func (state *decodeState) decodeInt() int64 {
return int64(x >> 1) return int64(x >> 1)
} }
type decOp func(i *decInstr, state *decodeState, p unsafe.Pointer) // decOp is the signature of a decoding operator for a given type.
type decOp func(i *decInstr, state *decoderState, p unsafe.Pointer)
// The 'instructions' of the decoding machine // The 'instructions' of the decoding machine
type decInstr struct { type decInstr struct {
@ -150,26 +160,31 @@ func decIndirect(p unsafe.Pointer, indir int) unsafe.Pointer {
return p return p
} }
func ignoreUint(i *decInstr, state *decodeState, p unsafe.Pointer) { // ignoreUint discards a uint value with no destination.
func ignoreUint(i *decInstr, state *decoderState, p unsafe.Pointer) {
state.decodeUint() state.decodeUint()
} }
func ignoreTwoUints(i *decInstr, state *decodeState, p unsafe.Pointer) { // ignoreTwoUints discards a uint value with no destination. It's used to skip
// complex values.
func ignoreTwoUints(i *decInstr, state *decoderState, p unsafe.Pointer) {
state.decodeUint() state.decodeUint()
state.decodeUint() state.decodeUint()
} }
func decBool(i *decInstr, state *decodeState, p unsafe.Pointer) { // decBool decodes a uiint and stores it as a boolean through p.
func decBool(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(bool)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(bool))
} }
p = *(*unsafe.Pointer)(p) p = *(*unsafe.Pointer)(p)
} }
*(*bool)(p) = state.decodeInt() != 0 *(*bool)(p) = state.decodeUint() != 0
} }
func decInt8(i *decInstr, state *decodeState, p unsafe.Pointer) { // decInt8 decodes an integer and stores it as an int8 through p.
func decInt8(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(int8)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int8))
@ -184,7 +199,8 @@ func decInt8(i *decInstr, state *decodeState, p unsafe.Pointer) {
} }
} }
func decUint8(i *decInstr, state *decodeState, p unsafe.Pointer) { // decUint8 decodes an unsigned integer and stores it as a uint8 through p.
func decUint8(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint8)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint8))
@ -199,7 +215,8 @@ func decUint8(i *decInstr, state *decodeState, p unsafe.Pointer) {
} }
} }
func decInt16(i *decInstr, state *decodeState, p unsafe.Pointer) { // decInt16 decodes an integer and stores it as an int16 through p.
func decInt16(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(int16)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int16))
@ -214,7 +231,8 @@ func decInt16(i *decInstr, state *decodeState, p unsafe.Pointer) {
} }
} }
func decUint16(i *decInstr, state *decodeState, p unsafe.Pointer) { // decUint16 decodes an unsigned integer and stores it as a uint16 through p.
func decUint16(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint16)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint16))
@ -229,7 +247,8 @@ func decUint16(i *decInstr, state *decodeState, p unsafe.Pointer) {
} }
} }
func decInt32(i *decInstr, state *decodeState, p unsafe.Pointer) { // decInt32 decodes an integer and stores it as an int32 through p.
func decInt32(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(int32)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int32))
@ -244,7 +263,8 @@ func decInt32(i *decInstr, state *decodeState, p unsafe.Pointer) {
} }
} }
func decUint32(i *decInstr, state *decodeState, p unsafe.Pointer) { // decUint32 decodes an unsigned integer and stores it as a uint32 through p.
func decUint32(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint32)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint32))
@ -259,7 +279,8 @@ func decUint32(i *decInstr, state *decodeState, p unsafe.Pointer) {
} }
} }
func decInt64(i *decInstr, state *decodeState, p unsafe.Pointer) { // decInt64 decodes an integer and stores it as an int64 through p.
func decInt64(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(int64)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int64))
@ -269,7 +290,8 @@ func decInt64(i *decInstr, state *decodeState, p unsafe.Pointer) {
*(*int64)(p) = int64(state.decodeInt()) *(*int64)(p) = int64(state.decodeInt())
} }
func decUint64(i *decInstr, state *decodeState, p unsafe.Pointer) { // decUint64 decodes an unsigned integer and stores it as a uint64 through p.
func decUint64(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint64)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint64))
@ -294,7 +316,9 @@ func floatFromBits(u uint64) float64 {
return math.Float64frombits(v) return math.Float64frombits(v)
} }
func storeFloat32(i *decInstr, state *decodeState, p unsafe.Pointer) { // storeFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point
// number, and stores it through p. It's a helper function for float32 and complex64.
func storeFloat32(i *decInstr, state *decoderState, p unsafe.Pointer) {
v := floatFromBits(state.decodeUint()) v := floatFromBits(state.decodeUint())
av := v av := v
if av < 0 { if av < 0 {
@ -308,7 +332,9 @@ func storeFloat32(i *decInstr, state *decodeState, p unsafe.Pointer) {
} }
} }
func decFloat32(i *decInstr, state *decodeState, p unsafe.Pointer) { // decFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point
// number, and stores it through p.
func decFloat32(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(float32)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(float32))
@ -318,7 +344,9 @@ func decFloat32(i *decInstr, state *decodeState, p unsafe.Pointer) {
storeFloat32(i, state, p) storeFloat32(i, state, p)
} }
func decFloat64(i *decInstr, state *decodeState, p unsafe.Pointer) { // decFloat64 decodes an unsigned integer, treats it as a 64-bit floating-point
// number, and stores it through p.
func decFloat64(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(float64)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(float64))
@ -328,8 +356,10 @@ func decFloat64(i *decInstr, state *decodeState, p unsafe.Pointer) {
*(*float64)(p) = floatFromBits(uint64(state.decodeUint())) *(*float64)(p) = floatFromBits(uint64(state.decodeUint()))
} }
// Complex numbers are just a pair of floating-point numbers, real part first. // decComplex64 decodes a pair of unsigned integers, treats them as a
func decComplex64(i *decInstr, state *decodeState, p unsafe.Pointer) { // pair of floating point numbers, and stores them as a complex64 through p.
// The real part comes first.
func decComplex64(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex64)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex64))
@ -340,7 +370,10 @@ func decComplex64(i *decInstr, state *decodeState, p unsafe.Pointer) {
storeFloat32(i, state, unsafe.Pointer(uintptr(p)+uintptr(unsafe.Sizeof(float32(0))))) storeFloat32(i, state, unsafe.Pointer(uintptr(p)+uintptr(unsafe.Sizeof(float32(0)))))
} }
func decComplex128(i *decInstr, state *decodeState, p unsafe.Pointer) { // decComplex128 decodes a pair of unsigned integers, treats them as a
// pair of floating point numbers, and stores them as a complex128 through p.
// The real part comes first.
func decComplex128(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex128)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex128))
@ -352,8 +385,10 @@ func decComplex128(i *decInstr, state *decodeState, p unsafe.Pointer) {
*(*complex128)(p) = complex(real, imag) *(*complex128)(p) = complex(real, imag)
} }
// decUint8Array decodes byte array and stores through p a slice header
// describing the data.
// uint8 arrays are encoded as an unsigned count followed by the raw bytes. // uint8 arrays are encoded as an unsigned count followed by the raw bytes.
func decUint8Array(i *decInstr, state *decodeState, p unsafe.Pointer) { func decUint8Array(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new([]uint8)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new([]uint8))
@ -365,8 +400,10 @@ func decUint8Array(i *decInstr, state *decodeState, p unsafe.Pointer) {
*(*[]uint8)(p) = b *(*[]uint8)(p) = b
} }
// decString decodes byte array and stores through p a string header
// describing the data.
// Strings are encoded as an unsigned count followed by the raw bytes. // Strings are encoded as an unsigned count followed by the raw bytes.
func decString(i *decInstr, state *decodeState, p unsafe.Pointer) { func decString(i *decInstr, state *decoderState, p unsafe.Pointer) {
if i.indir > 0 { if i.indir > 0 {
if *(*unsafe.Pointer)(p) == nil { if *(*unsafe.Pointer)(p) == nil {
*(*unsafe.Pointer)(p) = unsafe.Pointer(new([]byte)) *(*unsafe.Pointer)(p) = unsafe.Pointer(new([]byte))
@ -375,10 +412,18 @@ func decString(i *decInstr, state *decodeState, p unsafe.Pointer) {
} }
b := make([]byte, state.decodeUint()) b := make([]byte, state.decodeUint())
state.b.Read(b) state.b.Read(b)
*(*string)(p) = string(b) // It would be a shame to do the obvious thing here,
// *(*string)(p) = string(b)
// because we've already allocated the storage and this would
// allocate again and copy. So we do this ugly hack, which is even
// even more unsafe than it looks as it depends the memory
// representation of a string matching the beginning of the memory
// representation of a byte slice (a byte slice is longer).
*(*string)(p) = *(*string)(unsafe.Pointer(&b))
} }
func ignoreUint8Array(i *decInstr, state *decodeState, p unsafe.Pointer) { // ignoreUint8Array skips over the data for a byte slice value with no destination.
func ignoreUint8Array(i *decInstr, state *decoderState, p unsafe.Pointer) {
b := make([]byte, state.decodeUint()) b := make([]byte, state.decodeUint())
state.b.Read(b) state.b.Read(b)
} }
@ -409,9 +454,16 @@ func allocate(rtyp reflect.Type, p uintptr, indir int) uintptr {
return *(*uintptr)(up) return *(*uintptr)(up)
} }
// decodeSingle decodes a top-level value that is not a struct and stores it through p.
// Such values are preceded by a zero, making them have the memory layout of a
// struct field (although with an illegal field number).
func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, p uintptr) (err os.Error) { func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, p uintptr) (err os.Error) {
p = allocate(ut.base, p, ut.indir) indir := ut.indir
state := newDecodeState(dec, &dec.buf) if ut.isGobDecoder {
indir = int(ut.decIndir)
}
p = allocate(ut.base, p, indir)
state := dec.newDecoderState(&dec.buf)
state.fieldnum = singletonField state.fieldnum = singletonField
basep := p basep := p
delta := int(state.decodeUint()) delta := int(state.decodeUint())
@ -424,16 +476,18 @@ func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, p uintptr)
ptr = decIndirect(ptr, instr.indir) ptr = decIndirect(ptr, instr.indir)
} }
instr.op(instr, state, ptr) instr.op(instr, state, ptr)
dec.freeDecoderState(state)
return nil return nil
} }
// decodeSingle decodes a top-level struct and stores it through p.
// Indir is for the value, not the type. At the time of the call it may // Indir is for the value, not the type. At the time of the call it may
// differ from ut.indir, which was computed when the engine was built. // differ from ut.indir, which was computed when the engine was built.
// This state cannot arise for decodeSingle, which is called directly // This state cannot arise for decodeSingle, which is called directly
// from the user's value, not from the innards of an engine. // from the user's value, not from the innards of an engine.
func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, p uintptr, indir int) (err os.Error) { func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, p uintptr, indir int) {
p = allocate(ut.base.(*reflect.StructType), p, indir) p = allocate(ut.base.(*reflect.StructType), p, indir)
state := newDecodeState(dec, &dec.buf) state := dec.newDecoderState(&dec.buf)
state.fieldnum = -1 state.fieldnum = -1
basep := p basep := p
for state.b.Len() > 0 { for state.b.Len() > 0 {
@ -457,11 +511,12 @@ func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, p uintptr,
instr.op(instr, state, p) instr.op(instr, state, p)
state.fieldnum = fieldnum state.fieldnum = fieldnum
} }
return nil dec.freeDecoderState(state)
} }
func (dec *Decoder) ignoreStruct(engine *decEngine) (err os.Error) { // ignoreStruct discards the data for a struct with no destination.
state := newDecodeState(dec, &dec.buf) func (dec *Decoder) ignoreStruct(engine *decEngine) {
state := dec.newDecoderState(&dec.buf)
state.fieldnum = -1 state.fieldnum = -1
for state.b.Len() > 0 { for state.b.Len() > 0 {
delta := int(state.decodeUint()) delta := int(state.decodeUint())
@ -479,11 +534,13 @@ func (dec *Decoder) ignoreStruct(engine *decEngine) (err os.Error) {
instr.op(instr, state, unsafe.Pointer(nil)) instr.op(instr, state, unsafe.Pointer(nil))
state.fieldnum = fieldnum state.fieldnum = fieldnum
} }
return nil dec.freeDecoderState(state)
} }
func (dec *Decoder) ignoreSingle(engine *decEngine) (err os.Error) { // ignoreSingle discards the data for a top-level non-struct value with no
state := newDecodeState(dec, &dec.buf) // destination. It's used when calling Decode with a nil value.
func (dec *Decoder) ignoreSingle(engine *decEngine) {
state := dec.newDecoderState(&dec.buf)
state.fieldnum = singletonField state.fieldnum = singletonField
delta := int(state.decodeUint()) delta := int(state.decodeUint())
if delta != 0 { if delta != 0 {
@ -491,10 +548,11 @@ func (dec *Decoder) ignoreSingle(engine *decEngine) (err os.Error) {
} }
instr := &engine.instr[singletonField] instr := &engine.instr[singletonField]
instr.op(instr, state, unsafe.Pointer(nil)) instr.op(instr, state, unsafe.Pointer(nil))
return nil dec.freeDecoderState(state)
} }
func (dec *Decoder) decodeArrayHelper(state *decodeState, p uintptr, elemOp decOp, elemWid uintptr, length, elemIndir int, ovfl os.ErrorString) { // decodeArrayHelper does the work for decoding arrays and slices.
func (dec *Decoder) decodeArrayHelper(state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, elemIndir int, ovfl os.ErrorString) {
instr := &decInstr{elemOp, 0, elemIndir, 0, ovfl} instr := &decInstr{elemOp, 0, elemIndir, 0, ovfl}
for i := 0; i < length; i++ { for i := 0; i < length; i++ {
up := unsafe.Pointer(p) up := unsafe.Pointer(p)
@ -506,7 +564,10 @@ func (dec *Decoder) decodeArrayHelper(state *decodeState, p uintptr, elemOp decO
} }
} }
func (dec *Decoder) decodeArray(atyp *reflect.ArrayType, state *decodeState, p uintptr, elemOp decOp, elemWid uintptr, length, indir, elemIndir int, ovfl os.ErrorString) { // decodeArray decodes an array and stores it through p, that is, p points to the zeroth element.
// The length is an unsigned integer preceding the elements. Even though the length is redundant
// (it's part of the type), it's a useful check and is included in the encoding.
func (dec *Decoder) decodeArray(atyp *reflect.ArrayType, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, indir, elemIndir int, ovfl os.ErrorString) {
if indir > 0 { if indir > 0 {
p = allocate(atyp, p, 1) // All but the last level has been allocated by dec.Indirect p = allocate(atyp, p, 1) // All but the last level has been allocated by dec.Indirect
} }
@ -516,7 +577,9 @@ func (dec *Decoder) decodeArray(atyp *reflect.ArrayType, state *decodeState, p u
dec.decodeArrayHelper(state, p, elemOp, elemWid, length, elemIndir, ovfl) dec.decodeArrayHelper(state, p, elemOp, elemWid, length, elemIndir, ovfl)
} }
func decodeIntoValue(state *decodeState, op decOp, indir int, v reflect.Value, ovfl os.ErrorString) reflect.Value { // decodeIntoValue is a helper for map decoding. Since maps are decoded using reflection,
// unlike the other items we can't use a pointer directly.
func decodeIntoValue(state *decoderState, op decOp, indir int, v reflect.Value, ovfl os.ErrorString) reflect.Value {
instr := &decInstr{op, 0, indir, 0, ovfl} instr := &decInstr{op, 0, indir, 0, ovfl}
up := unsafe.Pointer(v.UnsafeAddr()) up := unsafe.Pointer(v.UnsafeAddr())
if indir > 1 { if indir > 1 {
@ -526,7 +589,11 @@ func decodeIntoValue(state *decodeState, op decOp, indir int, v reflect.Value, o
return v return v
} }
func (dec *Decoder) decodeMap(mtyp *reflect.MapType, state *decodeState, p uintptr, keyOp, elemOp decOp, indir, keyIndir, elemIndir int, ovfl os.ErrorString) { // decodeMap decodes a map and stores its header through p.
// Maps are encoded as a length followed by key:value pairs.
// Because the internals of maps are not visible to us, we must
// use reflection rather than pointer magic.
func (dec *Decoder) decodeMap(mtyp *reflect.MapType, state *decoderState, p uintptr, keyOp, elemOp decOp, indir, keyIndir, elemIndir int, ovfl os.ErrorString) {
if indir > 0 { if indir > 0 {
p = allocate(mtyp, p, 1) // All but the last level has been allocated by dec.Indirect p = allocate(mtyp, p, 1) // All but the last level has been allocated by dec.Indirect
} }
@ -538,7 +605,7 @@ func (dec *Decoder) decodeMap(mtyp *reflect.MapType, state *decodeState, p uintp
// Maps cannot be accessed by moving addresses around the way // Maps cannot be accessed by moving addresses around the way
// that slices etc. can. We must recover a full reflection value for // that slices etc. can. We must recover a full reflection value for
// the iteration. // the iteration.
v := reflect.NewValue(unsafe.Unreflect(mtyp, unsafe.Pointer((p)))).(*reflect.MapValue) v := reflect.NewValue(unsafe.Unreflect(mtyp, unsafe.Pointer(p))).(*reflect.MapValue)
n := int(state.decodeUint()) n := int(state.decodeUint())
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
key := decodeIntoValue(state, keyOp, keyIndir, reflect.MakeZero(mtyp.Key()), ovfl) key := decodeIntoValue(state, keyOp, keyIndir, reflect.MakeZero(mtyp.Key()), ovfl)
@ -547,21 +614,24 @@ func (dec *Decoder) decodeMap(mtyp *reflect.MapType, state *decodeState, p uintp
} }
} }
func (dec *Decoder) ignoreArrayHelper(state *decodeState, elemOp decOp, length int) { // ignoreArrayHelper does the work for discarding arrays and slices.
func (dec *Decoder) ignoreArrayHelper(state *decoderState, elemOp decOp, length int) {
instr := &decInstr{elemOp, 0, 0, 0, os.ErrorString("no error")} instr := &decInstr{elemOp, 0, 0, 0, os.ErrorString("no error")}
for i := 0; i < length; i++ { for i := 0; i < length; i++ {
elemOp(instr, state, nil) elemOp(instr, state, nil)
} }
} }
func (dec *Decoder) ignoreArray(state *decodeState, elemOp decOp, length int) { // ignoreArray discards the data for an array value with no destination.
func (dec *Decoder) ignoreArray(state *decoderState, elemOp decOp, length int) {
if n := state.decodeUint(); n != uint64(length) { if n := state.decodeUint(); n != uint64(length) {
errorf("gob: length mismatch in ignoreArray") errorf("gob: length mismatch in ignoreArray")
} }
dec.ignoreArrayHelper(state, elemOp, length) dec.ignoreArrayHelper(state, elemOp, length)
} }
func (dec *Decoder) ignoreMap(state *decodeState, keyOp, elemOp decOp) { // ignoreMap discards the data for a map value with no destination.
func (dec *Decoder) ignoreMap(state *decoderState, keyOp, elemOp decOp) {
n := int(state.decodeUint()) n := int(state.decodeUint())
keyInstr := &decInstr{keyOp, 0, 0, 0, os.ErrorString("no error")} keyInstr := &decInstr{keyOp, 0, 0, 0, os.ErrorString("no error")}
elemInstr := &decInstr{elemOp, 0, 0, 0, os.ErrorString("no error")} elemInstr := &decInstr{elemOp, 0, 0, 0, os.ErrorString("no error")}
@ -571,7 +641,9 @@ func (dec *Decoder) ignoreMap(state *decodeState, keyOp, elemOp decOp) {
} }
} }
func (dec *Decoder) decodeSlice(atyp *reflect.SliceType, state *decodeState, p uintptr, elemOp decOp, elemWid uintptr, indir, elemIndir int, ovfl os.ErrorString) { // decodeSlice decodes a slice and stores the slice header through p.
// Slices are encoded as an unsigned length followed by the elements.
func (dec *Decoder) decodeSlice(atyp *reflect.SliceType, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, indir, elemIndir int, ovfl os.ErrorString) {
n := int(uintptr(state.decodeUint())) n := int(uintptr(state.decodeUint()))
if indir > 0 { if indir > 0 {
up := unsafe.Pointer(p) up := unsafe.Pointer(p)
@ -590,7 +662,8 @@ func (dec *Decoder) decodeSlice(atyp *reflect.SliceType, state *decodeState, p u
dec.decodeArrayHelper(state, hdrp.Data, elemOp, elemWid, n, elemIndir, ovfl) dec.decodeArrayHelper(state, hdrp.Data, elemOp, elemWid, n, elemIndir, ovfl)
} }
func (dec *Decoder) ignoreSlice(state *decodeState, elemOp decOp) { // ignoreSlice skips over the data for a slice value with no destination.
func (dec *Decoder) ignoreSlice(state *decoderState, elemOp decOp) {
dec.ignoreArrayHelper(state, elemOp, int(state.decodeUint())) dec.ignoreArrayHelper(state, elemOp, int(state.decodeUint()))
} }
@ -609,9 +682,10 @@ func setInterfaceValue(ivalue *reflect.InterfaceValue, value reflect.Value) {
ivalue.Set(value) ivalue.Set(value)
} }
// decodeInterface receives the name of a concrete type followed by its value. // decodeInterface decodes an interface value and stores it through p.
// Interfaces are encoded as the name of a concrete type followed by a value.
// If the name is empty, the value is nil and no value is sent. // If the name is empty, the value is nil and no value is sent.
func (dec *Decoder) decodeInterface(ityp *reflect.InterfaceType, state *decodeState, p uintptr, indir int) { func (dec *Decoder) decodeInterface(ityp *reflect.InterfaceType, state *decoderState, p uintptr, indir int) {
// Create an interface reflect.Value. We need one even for the nil case. // Create an interface reflect.Value. We need one even for the nil case.
ivalue := reflect.MakeZero(ityp).(*reflect.InterfaceValue) ivalue := reflect.MakeZero(ityp).(*reflect.InterfaceValue)
// Read the name of the concrete type. // Read the name of the concrete type.
@ -655,7 +729,8 @@ func (dec *Decoder) decodeInterface(ityp *reflect.InterfaceType, state *decodeSt
*(*[2]uintptr)(unsafe.Pointer(p)) = ivalue.Get() *(*[2]uintptr)(unsafe.Pointer(p)) = ivalue.Get()
} }
func (dec *Decoder) ignoreInterface(state *decodeState) { // ignoreInterface discards the data for an interface value with no destination.
func (dec *Decoder) ignoreInterface(state *decoderState) {
// Read the name of the concrete type. // Read the name of the concrete type.
b := make([]byte, state.decodeUint()) b := make([]byte, state.decodeUint())
_, err := state.b.Read(b) _, err := state.b.Read(b)
@ -670,6 +745,32 @@ func (dec *Decoder) ignoreInterface(state *decodeState) {
state.b.Next(int(state.decodeUint())) state.b.Next(int(state.decodeUint()))
} }
// decodeGobDecoder decodes something implementing the GobDecoder interface.
// The data is encoded as a byte slice.
func (dec *Decoder) decodeGobDecoder(state *decoderState, v reflect.Value, index int) {
// Read the bytes for the value.
b := make([]byte, state.decodeUint())
_, err := state.b.Read(b)
if err != nil {
error(err)
}
// We know it's a GobDecoder, so just call the method directly.
err = v.Interface().(GobDecoder).GobDecode(b)
if err != nil {
error(err)
}
}
// ignoreGobDecoder discards the data for a GobDecoder value with no destination.
func (dec *Decoder) ignoreGobDecoder(state *decoderState) {
// Read the bytes for the value.
b := make([]byte, state.decodeUint())
_, err := state.b.Read(b)
if err != nil {
error(err)
}
}
// Index by Go types. // Index by Go types.
var decOpTable = [...]decOp{ var decOpTable = [...]decOp{
reflect.Bool: decBool, reflect.Bool: decBool,
@ -699,10 +800,14 @@ var decIgnoreOpMap = map[typeId]decOp{
tComplex: ignoreTwoUints, tComplex: ignoreTwoUints,
} }
// Return the decoding op for the base type under rt and // decOpFor returns the decoding op for the base type under rt and
// the indirection count to reach it. // the indirection count to reach it.
func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProgress map[reflect.Type]*decOp) (*decOp, int) { func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProgress map[reflect.Type]*decOp) (*decOp, int) {
ut := userType(rt) ut := userType(rt)
// If the type implements GobEncoder, we handle it without further processing.
if ut.isGobDecoder {
return dec.gobDecodeOpFor(ut)
}
// If this type is already in progress, it's a recursive type (e.g. map[string]*T). // If this type is already in progress, it's a recursive type (e.g. map[string]*T).
// Return the pointer to the op we're already building. // Return the pointer to the op we're already building.
if opPtr := inProgress[rt]; opPtr != nil { if opPtr := inProgress[rt]; opPtr != nil {
@ -724,7 +829,7 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg
elemId := dec.wireType[wireId].ArrayT.Elem elemId := dec.wireType[wireId].ArrayT.Elem
elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress) elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
ovfl := overflow(name) ovfl := overflow(name)
op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
state.dec.decodeArray(t, state, uintptr(p), *elemOp, t.Elem().Size(), t.Len(), i.indir, elemIndir, ovfl) state.dec.decodeArray(t, state, uintptr(p), *elemOp, t.Elem().Size(), t.Len(), i.indir, elemIndir, ovfl)
} }
@ -735,7 +840,7 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg
keyOp, keyIndir := dec.decOpFor(keyId, t.Key(), name, inProgress) keyOp, keyIndir := dec.decOpFor(keyId, t.Key(), name, inProgress)
elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress) elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
ovfl := overflow(name) ovfl := overflow(name)
op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
up := unsafe.Pointer(p) up := unsafe.Pointer(p)
state.dec.decodeMap(t, state, uintptr(up), *keyOp, *elemOp, i.indir, keyIndir, elemIndir, ovfl) state.dec.decodeMap(t, state, uintptr(up), *keyOp, *elemOp, i.indir, keyIndir, elemIndir, ovfl)
} }
@ -754,26 +859,23 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg
} }
elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress) elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
ovfl := overflow(name) ovfl := overflow(name)
op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
state.dec.decodeSlice(t, state, uintptr(p), *elemOp, t.Elem().Size(), i.indir, elemIndir, ovfl) state.dec.decodeSlice(t, state, uintptr(p), *elemOp, t.Elem().Size(), i.indir, elemIndir, ovfl)
} }
case *reflect.StructType: case *reflect.StructType:
// Generate a closure that calls out to the engine for the nested type. // Generate a closure that calls out to the engine for the nested type.
enginePtr, err := dec.getDecEnginePtr(wireId, typ) enginePtr, err := dec.getDecEnginePtr(wireId, userType(typ))
if err != nil { if err != nil {
error(err) error(err)
} }
op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
// indirect through enginePtr to delay evaluation for recursive structs. // indirect through enginePtr to delay evaluation for recursive structs.
err = dec.decodeStruct(*enginePtr, userType(typ), uintptr(p), i.indir) dec.decodeStruct(*enginePtr, userType(typ), uintptr(p), i.indir)
if err != nil {
error(err)
}
} }
case *reflect.InterfaceType: case *reflect.InterfaceType:
op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
dec.decodeInterface(t, state, uintptr(p), i.indir) state.dec.decodeInterface(t, state, uintptr(p), i.indir)
} }
} }
} }
@ -783,15 +885,15 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg
return &op, indir return &op, indir
} }
// Return the decoding op for a field that has no destination. // decIgnoreOpFor returns the decoding op for a field that has no destination.
func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp { func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp {
op, ok := decIgnoreOpMap[wireId] op, ok := decIgnoreOpMap[wireId]
if !ok { if !ok {
if wireId == tInterface { if wireId == tInterface {
// Special case because it's a method: the ignored item might // Special case because it's a method: the ignored item might
// define types and we need to record their state in the decoder. // define types and we need to record their state in the decoder.
op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
dec.ignoreInterface(state) state.dec.ignoreInterface(state)
} }
return op return op
} }
@ -799,11 +901,11 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp {
wire := dec.wireType[wireId] wire := dec.wireType[wireId]
switch { switch {
case wire == nil: case wire == nil:
panic("internal error: can't find ignore op for type " + wireId.string()) errorf("gob: bad data: undefined type %s", wireId.string())
case wire.ArrayT != nil: case wire.ArrayT != nil:
elemId := wire.ArrayT.Elem elemId := wire.ArrayT.Elem
elemOp := dec.decIgnoreOpFor(elemId) elemOp := dec.decIgnoreOpFor(elemId)
op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
state.dec.ignoreArray(state, elemOp, wire.ArrayT.Len) state.dec.ignoreArray(state, elemOp, wire.ArrayT.Len)
} }
@ -812,14 +914,14 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp {
elemId := dec.wireType[wireId].MapT.Elem elemId := dec.wireType[wireId].MapT.Elem
keyOp := dec.decIgnoreOpFor(keyId) keyOp := dec.decIgnoreOpFor(keyId)
elemOp := dec.decIgnoreOpFor(elemId) elemOp := dec.decIgnoreOpFor(elemId)
op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
state.dec.ignoreMap(state, keyOp, elemOp) state.dec.ignoreMap(state, keyOp, elemOp)
} }
case wire.SliceT != nil: case wire.SliceT != nil:
elemId := wire.SliceT.Elem elemId := wire.SliceT.Elem
elemOp := dec.decIgnoreOpFor(elemId) elemOp := dec.decIgnoreOpFor(elemId)
op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
state.dec.ignoreSlice(state, elemOp) state.dec.ignoreSlice(state, elemOp)
} }
@ -829,28 +931,75 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp {
if err != nil { if err != nil {
error(err) error(err)
} }
op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
// indirect through enginePtr to delay evaluation for recursive structs // indirect through enginePtr to delay evaluation for recursive structs
state.dec.ignoreStruct(*enginePtr) state.dec.ignoreStruct(*enginePtr)
} }
case wire.GobEncoderT != nil:
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
state.dec.ignoreGobDecoder(state)
}
} }
} }
if op == nil { if op == nil {
errorf("ignore can't handle type %s", wireId.string()) errorf("gob: bad data: ignore can't handle type %s", wireId.string())
} }
return op return op
} }
// Are these two gob Types compatible? // gobDecodeOpFor returns the op for a type that is known to implement
// Answers the question for basic types, arrays, and slices. // GobDecoder.
func (dec *Decoder) gobDecodeOpFor(ut *userTypeInfo) (*decOp, int) {
rt := ut.user
if ut.decIndir == -1 {
rt = reflect.PtrTo(rt)
} else if ut.decIndir > 0 {
for i := int8(0); i < ut.decIndir; i++ {
rt = rt.(*reflect.PtrType).Elem()
}
}
var op decOp
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
// Allocate the underlying data, but hold on to the address we have,
// since we need it to get to the receiver's address.
allocate(ut.base, uintptr(p), ut.indir)
var v reflect.Value
if ut.decIndir == -1 {
// Need to climb up one level to turn value into pointer.
v = reflect.NewValue(unsafe.Unreflect(rt, unsafe.Pointer(&p)))
} else {
if ut.decIndir > 0 {
p = decIndirect(p, int(ut.decIndir))
}
v = reflect.NewValue(unsafe.Unreflect(rt, p))
}
state.dec.decodeGobDecoder(state, v, methodIndex(rt, gobDecodeMethodName))
}
return &op, int(ut.decIndir)
}
// compatibleType asks: Are these two gob Types compatible?
// Answers the question for basic types, arrays, maps and slices, plus
// GobEncoder/Decoder pairs.
// Structs are considered ok; fields will be checked later. // Structs are considered ok; fields will be checked later.
func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId, inProgress map[reflect.Type]typeId) bool { func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId, inProgress map[reflect.Type]typeId) bool {
if rhs, ok := inProgress[fr]; ok { if rhs, ok := inProgress[fr]; ok {
return rhs == fw return rhs == fw
} }
inProgress[fr] = fw inProgress[fr] = fw
fr = userType(fr).base ut := userType(fr)
switch t := fr.(type) { wire, ok := dec.wireType[fw]
// If fr is a GobDecoder, the wire type must be GobEncoder.
// And if fr is not a GobDecoder, the wire type must not be either.
if ut.isGobDecoder != (ok && wire.GobEncoderT != nil) { // the parentheses look odd but are correct.
return false
}
if ut.isGobDecoder { // This test trumps all others.
return true
}
switch t := ut.base.(type) {
default: default:
// chan, etc: cannot handle. // chan, etc: cannot handle.
return false return false
@ -869,14 +1018,12 @@ func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId, inProgress map[re
case *reflect.InterfaceType: case *reflect.InterfaceType:
return fw == tInterface return fw == tInterface
case *reflect.ArrayType: case *reflect.ArrayType:
wire, ok := dec.wireType[fw]
if !ok || wire.ArrayT == nil { if !ok || wire.ArrayT == nil {
return false return false
} }
array := wire.ArrayT array := wire.ArrayT
return t.Len() == array.Len && dec.compatibleType(t.Elem(), array.Elem, inProgress) return t.Len() == array.Len && dec.compatibleType(t.Elem(), array.Elem, inProgress)
case *reflect.MapType: case *reflect.MapType:
wire, ok := dec.wireType[fw]
if !ok || wire.MapT == nil { if !ok || wire.MapT == nil {
return false return false
} }
@ -911,8 +1058,13 @@ func (dec *Decoder) typeString(remoteId typeId) string {
return dec.wireType[remoteId].string() return dec.wireType[remoteId].string()
} }
// compileSingle compiles the decoder engine for a non-struct top-level value, including
func (dec *Decoder) compileSingle(remoteId typeId, rt reflect.Type) (engine *decEngine, err os.Error) { // GobDecoders.
func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err os.Error) {
rt := ut.base
if ut.isGobDecoder {
rt = ut.user
}
engine = new(decEngine) engine = new(decEngine)
engine.instr = make([]decInstr, 1) // one item engine.instr = make([]decInstr, 1) // one item
name := rt.String() // best we can do name := rt.String() // best we can do
@ -926,6 +1078,7 @@ func (dec *Decoder) compileSingle(remoteId typeId, rt reflect.Type) (engine *dec
return return
} }
// compileIgnoreSingle compiles the decoder engine for a non-struct top-level value that will be discarded.
func (dec *Decoder) compileIgnoreSingle(remoteId typeId) (engine *decEngine, err os.Error) { func (dec *Decoder) compileIgnoreSingle(remoteId typeId) (engine *decEngine, err os.Error) {
engine = new(decEngine) engine = new(decEngine)
engine.instr = make([]decInstr, 1) // one item engine.instr = make([]decInstr, 1) // one item
@ -936,16 +1089,13 @@ func (dec *Decoder) compileIgnoreSingle(remoteId typeId) (engine *decEngine, err
return return
} }
// Is this an exported - upper case - name? // compileDec compiles the decoder engine for a value. If the value is not a struct,
func isExported(name string) bool { // it calls out to compileSingle.
rune, _ := utf8.DecodeRuneInString(name) func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err os.Error) {
return unicode.IsUpper(rune) rt := ut.base
}
func (dec *Decoder) compileDec(remoteId typeId, rt reflect.Type) (engine *decEngine, err os.Error) {
srt, ok := rt.(*reflect.StructType) srt, ok := rt.(*reflect.StructType)
if !ok { if !ok || ut.isGobDecoder {
return dec.compileSingle(remoteId, rt) return dec.compileSingle(remoteId, ut)
} }
var wireStruct *structType var wireStruct *structType
// Builtin types can come from global pool; the rest must be defined by the decoder. // Builtin types can come from global pool; the rest must be defined by the decoder.
@ -990,7 +1140,9 @@ func (dec *Decoder) compileDec(remoteId typeId, rt reflect.Type) (engine *decEng
return return
} }
func (dec *Decoder) getDecEnginePtr(remoteId typeId, rt reflect.Type) (enginePtr **decEngine, err os.Error) { // getDecEnginePtr returns the engine for the specified type.
func (dec *Decoder) getDecEnginePtr(remoteId typeId, ut *userTypeInfo) (enginePtr **decEngine, err os.Error) {
rt := ut.base
decoderMap, ok := dec.decoderCache[rt] decoderMap, ok := dec.decoderCache[rt]
if !ok { if !ok {
decoderMap = make(map[typeId]**decEngine) decoderMap = make(map[typeId]**decEngine)
@ -1000,7 +1152,7 @@ func (dec *Decoder) getDecEnginePtr(remoteId typeId, rt reflect.Type) (enginePtr
// To handle recursive types, mark this engine as underway before compiling. // To handle recursive types, mark this engine as underway before compiling.
enginePtr = new(*decEngine) enginePtr = new(*decEngine)
decoderMap[remoteId] = enginePtr decoderMap[remoteId] = enginePtr
*enginePtr, err = dec.compileDec(remoteId, rt) *enginePtr, err = dec.compileDec(remoteId, ut)
if err != nil { if err != nil {
decoderMap[remoteId] = nil, false decoderMap[remoteId] = nil, false
} }
@ -1008,11 +1160,12 @@ func (dec *Decoder) getDecEnginePtr(remoteId typeId, rt reflect.Type) (enginePtr
return return
} }
// When ignoring struct data, in effect we compile it into this type // emptyStruct is the type we compile into when ignoring a struct value.
type emptyStruct struct{} type emptyStruct struct{}
var emptyStructType = reflect.Typeof(emptyStruct{}) var emptyStructType = reflect.Typeof(emptyStruct{})
// getDecEnginePtr returns the engine for the specified type when the value is to be discarded.
func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, err os.Error) { func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, err os.Error) {
var ok bool var ok bool
if enginePtr, ok = dec.ignorerCache[wireId]; !ok { if enginePtr, ok = dec.ignorerCache[wireId]; !ok {
@ -1021,7 +1174,7 @@ func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, er
dec.ignorerCache[wireId] = enginePtr dec.ignorerCache[wireId] = enginePtr
wire := dec.wireType[wireId] wire := dec.wireType[wireId]
if wire != nil && wire.StructT != nil { if wire != nil && wire.StructT != nil {
*enginePtr, err = dec.compileDec(wireId, emptyStructType) *enginePtr, err = dec.compileDec(wireId, userType(emptyStructType))
} else { } else {
*enginePtr, err = dec.compileIgnoreSingle(wireId) *enginePtr, err = dec.compileIgnoreSingle(wireId)
} }
@ -1032,41 +1185,51 @@ func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, er
return return
} }
func (dec *Decoder) decodeValue(wireId typeId, val reflect.Value) (err os.Error) { // decodeValue decodes the data stream representing a value and stores it in val.
defer catchError(&err) func (dec *Decoder) decodeValue(wireId typeId, val reflect.Value) {
defer catchError(&dec.err)
// If the value is nil, it means we should just ignore this item. // If the value is nil, it means we should just ignore this item.
if val == nil { if val == nil {
return dec.decodeIgnoredValue(wireId) dec.decodeIgnoredValue(wireId)
return
} }
// Dereference down to the underlying struct type. // Dereference down to the underlying struct type.
ut := userType(val.Type()) ut := userType(val.Type())
base := ut.base base := ut.base
indir := ut.indir indir := ut.indir
enginePtr, err := dec.getDecEnginePtr(wireId, base) if ut.isGobDecoder {
if err != nil { indir = int(ut.decIndir)
return err }
var enginePtr **decEngine
enginePtr, dec.err = dec.getDecEnginePtr(wireId, ut)
if dec.err != nil {
return
} }
engine := *enginePtr engine := *enginePtr
if st, ok := base.(*reflect.StructType); ok { if st, ok := base.(*reflect.StructType); ok && !ut.isGobDecoder {
if engine.numInstr == 0 && st.NumField() > 0 && len(dec.wireType[wireId].StructT.Field) > 0 { if engine.numInstr == 0 && st.NumField() > 0 && len(dec.wireType[wireId].StructT.Field) > 0 {
name := base.Name() name := base.Name()
return os.ErrorString("gob: type mismatch: no fields matched compiling decoder for " + name) errorf("gob: type mismatch: no fields matched compiling decoder for %s", name)
} }
return dec.decodeStruct(engine, ut, uintptr(val.UnsafeAddr()), indir) dec.decodeStruct(engine, ut, uintptr(val.UnsafeAddr()), indir)
} else {
dec.decodeSingle(engine, ut, uintptr(val.UnsafeAddr()))
} }
return dec.decodeSingle(engine, ut, uintptr(val.UnsafeAddr()))
} }
func (dec *Decoder) decodeIgnoredValue(wireId typeId) os.Error { // decodeIgnoredValue decodes the data stream representing a value of the specified type and discards it.
enginePtr, err := dec.getIgnoreEnginePtr(wireId) func (dec *Decoder) decodeIgnoredValue(wireId typeId) {
if err != nil { var enginePtr **decEngine
return err enginePtr, dec.err = dec.getIgnoreEnginePtr(wireId)
if dec.err != nil {
return
} }
wire := dec.wireType[wireId] wire := dec.wireType[wireId]
if wire != nil && wire.StructT != nil { if wire != nil && wire.StructT != nil {
return dec.ignoreStruct(*enginePtr) dec.ignoreStruct(*enginePtr)
} else {
dec.ignoreSingle(*enginePtr)
} }
return dec.ignoreSingle(*enginePtr)
} }
func init() { func init() {

View File

@ -5,6 +5,7 @@
package gob package gob
import ( import (
"bufio"
"bytes" "bytes"
"io" "io"
"os" "os"
@ -21,7 +22,7 @@ type Decoder struct {
wireType map[typeId]*wireType // map from remote ID to local description wireType map[typeId]*wireType // map from remote ID to local description
decoderCache map[reflect.Type]map[typeId]**decEngine // cache of compiled engines decoderCache map[reflect.Type]map[typeId]**decEngine // cache of compiled engines
ignorerCache map[typeId]**decEngine // ditto for ignored objects ignorerCache map[typeId]**decEngine // ditto for ignored objects
countState *decodeState // reads counts from wire freeList *decoderState // list of free decoderStates; avoids reallocation
countBuf []byte // used for decoding integers while parsing messages countBuf []byte // used for decoding integers while parsing messages
tmp []byte // temporary storage for i/o; saves reallocating tmp []byte // temporary storage for i/o; saves reallocating
err os.Error err os.Error
@ -30,7 +31,7 @@ type Decoder struct {
// NewDecoder returns a new decoder that reads from the io.Reader. // NewDecoder returns a new decoder that reads from the io.Reader.
func NewDecoder(r io.Reader) *Decoder { func NewDecoder(r io.Reader) *Decoder {
dec := new(Decoder) dec := new(Decoder)
dec.r = r dec.r = bufio.NewReader(r)
dec.wireType = make(map[typeId]*wireType) dec.wireType = make(map[typeId]*wireType)
dec.decoderCache = make(map[reflect.Type]map[typeId]**decEngine) dec.decoderCache = make(map[reflect.Type]map[typeId]**decEngine)
dec.ignorerCache = make(map[typeId]**decEngine) dec.ignorerCache = make(map[typeId]**decEngine)
@ -49,7 +50,7 @@ func (dec *Decoder) recvType(id typeId) {
// Type: // Type:
wire := new(wireType) wire := new(wireType)
dec.err = dec.decodeValue(tWireType, reflect.NewValue(wire)) dec.decodeValue(tWireType, reflect.NewValue(wire))
if dec.err != nil { if dec.err != nil {
return return
} }
@ -184,7 +185,7 @@ func (dec *Decoder) DecodeValue(value reflect.Value) os.Error {
dec.err = nil dec.err = nil
id := dec.decodeTypeSequence(false) id := dec.decodeTypeSequence(false)
if dec.err == nil { if dec.err == nil {
dec.err = dec.decodeValue(id, value) dec.decodeValue(id, value)
} }
return dec.err return dec.err
} }

View File

@ -6,16 +6,14 @@ package gob
import ( import (
"bytes" "bytes"
"io"
"math" "math"
"os"
"reflect" "reflect"
"unsafe" "unsafe"
) )
const uint64Size = unsafe.Sizeof(uint64(0)) const uint64Size = unsafe.Sizeof(uint64(0))
// The global execution state of an instance of the encoder. // encoderState is the global execution state of an instance of the encoder.
// Field numbers are delta encoded and always increase. The field // Field numbers are delta encoded and always increase. The field
// number is initialized to -1 so 0 comes out as delta(1). A delta of // number is initialized to -1 so 0 comes out as delta(1). A delta of
// 0 terminates the structure. // 0 terminates the structure.
@ -25,10 +23,26 @@ type encoderState struct {
sendZero bool // encoding an array element or map key/value pair; send zero values sendZero bool // encoding an array element or map key/value pair; send zero values
fieldnum int // the last field number written. fieldnum int // the last field number written.
buf [1 + uint64Size]byte // buffer used by the encoder; here to avoid allocation. buf [1 + uint64Size]byte // buffer used by the encoder; here to avoid allocation.
next *encoderState // for free list
} }
func newEncoderState(enc *Encoder, b *bytes.Buffer) *encoderState { func (enc *Encoder) newEncoderState(b *bytes.Buffer) *encoderState {
return &encoderState{enc: enc, b: b} e := enc.freeList
if e == nil {
e = new(encoderState)
e.enc = enc
} else {
enc.freeList = e.next
}
e.sendZero = false
e.fieldnum = 0
e.b = b
return e
}
func (enc *Encoder) freeEncoderState(e *encoderState) {
e.next = enc.freeList
enc.freeList = e
} }
// Unsigned integers have a two-state encoding. If the number is less // Unsigned integers have a two-state encoding. If the number is less
@ -72,6 +86,7 @@ func (state *encoderState) encodeInt(i int64) {
state.encodeUint(uint64(x)) state.encodeUint(uint64(x))
} }
// encOp is the signature of an encoding operator for a given type.
type encOp func(i *encInstr, state *encoderState, p unsafe.Pointer) type encOp func(i *encInstr, state *encoderState, p unsafe.Pointer)
// The 'instructions' of the encoding machine // The 'instructions' of the encoding machine
@ -82,8 +97,8 @@ type encInstr struct {
offset uintptr // offset in the structure of the field to encode offset uintptr // offset in the structure of the field to encode
} }
// Emit a field number and update the state to record its value for delta encoding. // update emits a field number and updates the state to record its value for delta encoding.
// If the instruction pointer is nil, do nothing // If the instruction pointer is nil, it does nothing
func (state *encoderState) update(instr *encInstr) { func (state *encoderState) update(instr *encInstr) {
if instr != nil { if instr != nil {
state.encodeUint(uint64(instr.field - state.fieldnum)) state.encodeUint(uint64(instr.field - state.fieldnum))
@ -91,12 +106,16 @@ func (state *encoderState) update(instr *encInstr) {
} }
} }
// Each encoder is responsible for handling any indirections associated // Each encoder for a composite is responsible for handling any
// with the data structure. If any pointer so reached is nil, no bytes are written. // indirections associated with the elements of the data structure.
// If the data item is zero, no bytes are written. // If any pointer so reached is nil, no bytes are written. If the
// Otherwise, the output (for a scalar) is the field number, as an encoded integer, // data item is zero, no bytes are written. Single values - ints,
// followed by the field data in its appropriate format. // strings etc. - are indirected before calling their encoders.
// Otherwise, the output (for a scalar) is the field number, as an
// encoded integer, followed by the field data in its appropriate
// format.
// encIndirect dereferences p indir times and returns the result.
func encIndirect(p unsafe.Pointer, indir int) unsafe.Pointer { func encIndirect(p unsafe.Pointer, indir int) unsafe.Pointer {
for ; indir > 0; indir-- { for ; indir > 0; indir-- {
p = *(*unsafe.Pointer)(p) p = *(*unsafe.Pointer)(p)
@ -107,6 +126,7 @@ func encIndirect(p unsafe.Pointer, indir int) unsafe.Pointer {
return p return p
} }
// encBool encodes the bool with address p as an unsigned 0 or 1.
func encBool(i *encInstr, state *encoderState, p unsafe.Pointer) { func encBool(i *encInstr, state *encoderState, p unsafe.Pointer) {
b := *(*bool)(p) b := *(*bool)(p)
if b || state.sendZero { if b || state.sendZero {
@ -119,6 +139,7 @@ func encBool(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encInt encodes the int with address p.
func encInt(i *encInstr, state *encoderState, p unsafe.Pointer) { func encInt(i *encInstr, state *encoderState, p unsafe.Pointer) {
v := int64(*(*int)(p)) v := int64(*(*int)(p))
if v != 0 || state.sendZero { if v != 0 || state.sendZero {
@ -127,6 +148,7 @@ func encInt(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encUint encodes the uint with address p.
func encUint(i *encInstr, state *encoderState, p unsafe.Pointer) { func encUint(i *encInstr, state *encoderState, p unsafe.Pointer) {
v := uint64(*(*uint)(p)) v := uint64(*(*uint)(p))
if v != 0 || state.sendZero { if v != 0 || state.sendZero {
@ -135,6 +157,7 @@ func encUint(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encInt8 encodes the int8 with address p.
func encInt8(i *encInstr, state *encoderState, p unsafe.Pointer) { func encInt8(i *encInstr, state *encoderState, p unsafe.Pointer) {
v := int64(*(*int8)(p)) v := int64(*(*int8)(p))
if v != 0 || state.sendZero { if v != 0 || state.sendZero {
@ -143,6 +166,7 @@ func encInt8(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encUint8 encodes the uint8 with address p.
func encUint8(i *encInstr, state *encoderState, p unsafe.Pointer) { func encUint8(i *encInstr, state *encoderState, p unsafe.Pointer) {
v := uint64(*(*uint8)(p)) v := uint64(*(*uint8)(p))
if v != 0 || state.sendZero { if v != 0 || state.sendZero {
@ -151,6 +175,7 @@ func encUint8(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encInt16 encodes the int16 with address p.
func encInt16(i *encInstr, state *encoderState, p unsafe.Pointer) { func encInt16(i *encInstr, state *encoderState, p unsafe.Pointer) {
v := int64(*(*int16)(p)) v := int64(*(*int16)(p))
if v != 0 || state.sendZero { if v != 0 || state.sendZero {
@ -159,6 +184,7 @@ func encInt16(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encUint16 encodes the uint16 with address p.
func encUint16(i *encInstr, state *encoderState, p unsafe.Pointer) { func encUint16(i *encInstr, state *encoderState, p unsafe.Pointer) {
v := uint64(*(*uint16)(p)) v := uint64(*(*uint16)(p))
if v != 0 || state.sendZero { if v != 0 || state.sendZero {
@ -167,6 +193,7 @@ func encUint16(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encInt32 encodes the int32 with address p.
func encInt32(i *encInstr, state *encoderState, p unsafe.Pointer) { func encInt32(i *encInstr, state *encoderState, p unsafe.Pointer) {
v := int64(*(*int32)(p)) v := int64(*(*int32)(p))
if v != 0 || state.sendZero { if v != 0 || state.sendZero {
@ -175,6 +202,7 @@ func encInt32(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encUint encodes the uint32 with address p.
func encUint32(i *encInstr, state *encoderState, p unsafe.Pointer) { func encUint32(i *encInstr, state *encoderState, p unsafe.Pointer) {
v := uint64(*(*uint32)(p)) v := uint64(*(*uint32)(p))
if v != 0 || state.sendZero { if v != 0 || state.sendZero {
@ -183,6 +211,7 @@ func encUint32(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encInt64 encodes the int64 with address p.
func encInt64(i *encInstr, state *encoderState, p unsafe.Pointer) { func encInt64(i *encInstr, state *encoderState, p unsafe.Pointer) {
v := *(*int64)(p) v := *(*int64)(p)
if v != 0 || state.sendZero { if v != 0 || state.sendZero {
@ -191,6 +220,7 @@ func encInt64(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encInt64 encodes the uint64 with address p.
func encUint64(i *encInstr, state *encoderState, p unsafe.Pointer) { func encUint64(i *encInstr, state *encoderState, p unsafe.Pointer) {
v := *(*uint64)(p) v := *(*uint64)(p)
if v != 0 || state.sendZero { if v != 0 || state.sendZero {
@ -199,6 +229,7 @@ func encUint64(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encUintptr encodes the uintptr with address p.
func encUintptr(i *encInstr, state *encoderState, p unsafe.Pointer) { func encUintptr(i *encInstr, state *encoderState, p unsafe.Pointer) {
v := uint64(*(*uintptr)(p)) v := uint64(*(*uintptr)(p))
if v != 0 || state.sendZero { if v != 0 || state.sendZero {
@ -207,6 +238,7 @@ func encUintptr(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// floatBits returns a uint64 holding the bits of a floating-point number.
// Floating-point numbers are transmitted as uint64s holding the bits // Floating-point numbers are transmitted as uint64s holding the bits
// of the underlying representation. They are sent byte-reversed, with // of the underlying representation. They are sent byte-reversed, with
// the exponent end coming out first, so integer floating point numbers // the exponent end coming out first, so integer floating point numbers
@ -223,6 +255,7 @@ func floatBits(f float64) uint64 {
return v return v
} }
// encFloat32 encodes the float32 with address p.
func encFloat32(i *encInstr, state *encoderState, p unsafe.Pointer) { func encFloat32(i *encInstr, state *encoderState, p unsafe.Pointer) {
f := *(*float32)(p) f := *(*float32)(p)
if f != 0 || state.sendZero { if f != 0 || state.sendZero {
@ -232,6 +265,7 @@ func encFloat32(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encFloat64 encodes the float64 with address p.
func encFloat64(i *encInstr, state *encoderState, p unsafe.Pointer) { func encFloat64(i *encInstr, state *encoderState, p unsafe.Pointer) {
f := *(*float64)(p) f := *(*float64)(p)
if f != 0 || state.sendZero { if f != 0 || state.sendZero {
@ -241,6 +275,7 @@ func encFloat64(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encComplex64 encodes the complex64 with address p.
// Complex numbers are just a pair of floating-point numbers, real part first. // Complex numbers are just a pair of floating-point numbers, real part first.
func encComplex64(i *encInstr, state *encoderState, p unsafe.Pointer) { func encComplex64(i *encInstr, state *encoderState, p unsafe.Pointer) {
c := *(*complex64)(p) c := *(*complex64)(p)
@ -253,6 +288,7 @@ func encComplex64(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encComplex128 encodes the complex128 with address p.
func encComplex128(i *encInstr, state *encoderState, p unsafe.Pointer) { func encComplex128(i *encInstr, state *encoderState, p unsafe.Pointer) {
c := *(*complex128)(p) c := *(*complex128)(p)
if c != 0+0i || state.sendZero { if c != 0+0i || state.sendZero {
@ -264,6 +300,7 @@ func encComplex128(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encUint8Array encodes the byte slice whose header has address p.
// Byte arrays are encoded as an unsigned count followed by the raw bytes. // Byte arrays are encoded as an unsigned count followed by the raw bytes.
func encUint8Array(i *encInstr, state *encoderState, p unsafe.Pointer) { func encUint8Array(i *encInstr, state *encoderState, p unsafe.Pointer) {
b := *(*[]byte)(p) b := *(*[]byte)(p)
@ -274,24 +311,26 @@ func encUint8Array(i *encInstr, state *encoderState, p unsafe.Pointer) {
} }
} }
// encString encodes the string whose header has address p.
// Strings are encoded as an unsigned count followed by the raw bytes. // Strings are encoded as an unsigned count followed by the raw bytes.
func encString(i *encInstr, state *encoderState, p unsafe.Pointer) { func encString(i *encInstr, state *encoderState, p unsafe.Pointer) {
s := *(*string)(p) s := *(*string)(p)
if len(s) > 0 || state.sendZero { if len(s) > 0 || state.sendZero {
state.update(i) state.update(i)
state.encodeUint(uint64(len(s))) state.encodeUint(uint64(len(s)))
io.WriteString(state.b, s) state.b.WriteString(s)
} }
} }
// The end of a struct is marked by a delta field number of 0. // encStructTerminator encodes the end of an encoded struct
// as delta field number of 0.
func encStructTerminator(i *encInstr, state *encoderState, p unsafe.Pointer) { func encStructTerminator(i *encInstr, state *encoderState, p unsafe.Pointer) {
state.encodeUint(0) state.encodeUint(0)
} }
// Execution engine // Execution engine
// The encoder engine is an array of instructions indexed by field number of the encoding // encEngine an array of instructions indexed by field number of the encoding
// data, typically a struct. It is executed top to bottom, walking the struct. // data, typically a struct. It is executed top to bottom, walking the struct.
type encEngine struct { type encEngine struct {
instr []encInstr instr []encInstr
@ -299,8 +338,9 @@ type encEngine struct {
const singletonField = 0 const singletonField = 0
// encodeSingle encodes a single top-level non-struct value.
func (enc *Encoder) encodeSingle(b *bytes.Buffer, engine *encEngine, basep uintptr) { func (enc *Encoder) encodeSingle(b *bytes.Buffer, engine *encEngine, basep uintptr) {
state := newEncoderState(enc, b) state := enc.newEncoderState(b)
state.fieldnum = singletonField state.fieldnum = singletonField
// There is no surrounding struct to frame the transmission, so we must // There is no surrounding struct to frame the transmission, so we must
// generate data even if the item is zero. To do this, set sendZero. // generate data even if the item is zero. To do this, set sendZero.
@ -313,10 +353,12 @@ func (enc *Encoder) encodeSingle(b *bytes.Buffer, engine *encEngine, basep uintp
} }
} }
instr.op(instr, state, p) instr.op(instr, state, p)
enc.freeEncoderState(state)
} }
// encodeStruct encodes a single struct value.
func (enc *Encoder) encodeStruct(b *bytes.Buffer, engine *encEngine, basep uintptr) { func (enc *Encoder) encodeStruct(b *bytes.Buffer, engine *encEngine, basep uintptr) {
state := newEncoderState(enc, b) state := enc.newEncoderState(b)
state.fieldnum = -1 state.fieldnum = -1
for i := 0; i < len(engine.instr); i++ { for i := 0; i < len(engine.instr); i++ {
instr := &engine.instr[i] instr := &engine.instr[i]
@ -328,10 +370,12 @@ func (enc *Encoder) encodeStruct(b *bytes.Buffer, engine *encEngine, basep uintp
} }
instr.op(instr, state, p) instr.op(instr, state, p)
} }
enc.freeEncoderState(state)
} }
// encodeArray encodes the array whose 0th element is at p.
func (enc *Encoder) encodeArray(b *bytes.Buffer, p uintptr, op encOp, elemWid uintptr, elemIndir int, length int) { func (enc *Encoder) encodeArray(b *bytes.Buffer, p uintptr, op encOp, elemWid uintptr, elemIndir int, length int) {
state := newEncoderState(enc, b) state := enc.newEncoderState(b)
state.fieldnum = -1 state.fieldnum = -1
state.sendZero = true state.sendZero = true
state.encodeUint(uint64(length)) state.encodeUint(uint64(length))
@ -347,8 +391,10 @@ func (enc *Encoder) encodeArray(b *bytes.Buffer, p uintptr, op encOp, elemWid ui
op(nil, state, unsafe.Pointer(elemp)) op(nil, state, unsafe.Pointer(elemp))
p += uintptr(elemWid) p += uintptr(elemWid)
} }
enc.freeEncoderState(state)
} }
// encodeReflectValue is a helper for maps. It encodes the value v.
func encodeReflectValue(state *encoderState, v reflect.Value, op encOp, indir int) { func encodeReflectValue(state *encoderState, v reflect.Value, op encOp, indir int) {
for i := 0; i < indir && v != nil; i++ { for i := 0; i < indir && v != nil; i++ {
v = reflect.Indirect(v) v = reflect.Indirect(v)
@ -359,8 +405,11 @@ func encodeReflectValue(state *encoderState, v reflect.Value, op encOp, indir in
op(nil, state, unsafe.Pointer(v.UnsafeAddr())) op(nil, state, unsafe.Pointer(v.UnsafeAddr()))
} }
// encodeMap encodes a map as unsigned count followed by key:value pairs.
// Because map internals are not exposed, we must use reflection rather than
// addresses.
func (enc *Encoder) encodeMap(b *bytes.Buffer, mv *reflect.MapValue, keyOp, elemOp encOp, keyIndir, elemIndir int) { func (enc *Encoder) encodeMap(b *bytes.Buffer, mv *reflect.MapValue, keyOp, elemOp encOp, keyIndir, elemIndir int) {
state := newEncoderState(enc, b) state := enc.newEncoderState(b)
state.fieldnum = -1 state.fieldnum = -1
state.sendZero = true state.sendZero = true
keys := mv.Keys() keys := mv.Keys()
@ -369,14 +418,16 @@ func (enc *Encoder) encodeMap(b *bytes.Buffer, mv *reflect.MapValue, keyOp, elem
encodeReflectValue(state, key, keyOp, keyIndir) encodeReflectValue(state, key, keyOp, keyIndir)
encodeReflectValue(state, mv.Elem(key), elemOp, elemIndir) encodeReflectValue(state, mv.Elem(key), elemOp, elemIndir)
} }
enc.freeEncoderState(state)
} }
// encodeInterface encodes the interface value iv.
// To send an interface, we send a string identifying the concrete type, followed // To send an interface, we send a string identifying the concrete type, followed
// by the type identifier (which might require defining that type right now), followed // by the type identifier (which might require defining that type right now), followed
// by the concrete value. A nil value gets sent as the empty string for the name, // by the concrete value. A nil value gets sent as the empty string for the name,
// followed by no value. // followed by no value.
func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv *reflect.InterfaceValue) { func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv *reflect.InterfaceValue) {
state := newEncoderState(enc, b) state := enc.newEncoderState(b)
state.fieldnum = -1 state.fieldnum = -1
state.sendZero = true state.sendZero = true
if iv.IsNil() { if iv.IsNil() {
@ -391,7 +442,7 @@ func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv *reflect.InterfaceValue)
} }
// Send the name. // Send the name.
state.encodeUint(uint64(len(name))) state.encodeUint(uint64(len(name)))
_, err := io.WriteString(state.b, name) _, err := state.b.WriteString(name)
if err != nil { if err != nil {
error(err) error(err)
} }
@ -403,15 +454,32 @@ func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv *reflect.InterfaceValue)
// should be written to b, before the encoded value. // should be written to b, before the encoded value.
enc.pushWriter(b) enc.pushWriter(b)
data := new(bytes.Buffer) data := new(bytes.Buffer)
err = enc.encode(data, iv.Elem(), ut) enc.encode(data, iv.Elem(), ut)
if err != nil { if enc.err != nil {
error(err) error(enc.err)
} }
enc.popWriter() enc.popWriter()
enc.writeMessage(b, data) enc.writeMessage(b, data)
if enc.err != nil { if enc.err != nil {
error(err) error(err)
} }
enc.freeEncoderState(state)
}
// encGobEncoder encodes a value that implements the GobEncoder interface.
// The data is sent as a byte array.
func (enc *Encoder) encodeGobEncoder(b *bytes.Buffer, v reflect.Value, index int) {
// TODO: should we catch panics from the called method?
// We know it's a GobEncoder, so just call the method directly.
data, err := v.Interface().(GobEncoder).GobEncode()
if err != nil {
error(err)
}
state := enc.newEncoderState(b)
state.fieldnum = -1
state.encodeUint(uint64(len(data)))
state.b.Write(data)
enc.freeEncoderState(state)
} }
var encOpTable = [...]encOp{ var encOpTable = [...]encOp{
@ -434,10 +502,14 @@ var encOpTable = [...]encOp{
reflect.String: encString, reflect.String: encString,
} }
// Return (a pointer to) the encoding op for the base type under rt and // encOpFor returns (a pointer to) the encoding op for the base type under rt and
// the indirection count to reach it. // the indirection count to reach it.
func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp) (*encOp, int) { func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp) (*encOp, int) {
ut := userType(rt) ut := userType(rt)
// If the type implements GobEncoder, we handle it without further processing.
if ut.isGobEncoder {
return enc.gobEncodeOpFor(ut)
}
// If this type is already in progress, it's a recursive type (e.g. map[string]*T). // If this type is already in progress, it's a recursive type (e.g. map[string]*T).
// Return the pointer to the op we're already building. // Return the pointer to the op we're already building.
if opPtr := inProgress[rt]; opPtr != nil { if opPtr := inProgress[rt]; opPtr != nil {
@ -483,7 +555,7 @@ func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp
// Maps cannot be accessed by moving addresses around the way // Maps cannot be accessed by moving addresses around the way
// that slices etc. can. We must recover a full reflection value for // that slices etc. can. We must recover a full reflection value for
// the iteration. // the iteration.
v := reflect.NewValue(unsafe.Unreflect(t, unsafe.Pointer((p)))) v := reflect.NewValue(unsafe.Unreflect(t, unsafe.Pointer(p)))
mv := reflect.Indirect(v).(*reflect.MapValue) mv := reflect.Indirect(v).(*reflect.MapValue)
if !state.sendZero && mv.Len() == 0 { if !state.sendZero && mv.Len() == 0 {
return return
@ -493,7 +565,7 @@ func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp
} }
case *reflect.StructType: case *reflect.StructType:
// Generate a closure that calls out to the engine for the nested type. // Generate a closure that calls out to the engine for the nested type.
enc.getEncEngine(typ) enc.getEncEngine(userType(typ))
info := mustGetTypeInfo(typ) info := mustGetTypeInfo(typ)
op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
state.update(i) state.update(i)
@ -504,7 +576,7 @@ func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp
op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
// Interfaces transmit the name and contents of the concrete // Interfaces transmit the name and contents of the concrete
// value they contain. // value they contain.
v := reflect.NewValue(unsafe.Unreflect(t, unsafe.Pointer((p)))) v := reflect.NewValue(unsafe.Unreflect(t, unsafe.Pointer(p)))
iv := reflect.Indirect(v).(*reflect.InterfaceValue) iv := reflect.Indirect(v).(*reflect.InterfaceValue)
if !state.sendZero && (iv == nil || iv.IsNil()) { if !state.sendZero && (iv == nil || iv.IsNil()) {
return return
@ -520,22 +592,64 @@ func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp
return &op, indir return &op, indir
} }
// The local Type was compiled from the actual value, so we know it's compatible. // methodIndex returns which method of rt implements the method.
func (enc *Encoder) compileEnc(rt reflect.Type) *encEngine { func methodIndex(rt reflect.Type, method string) int {
srt, isStruct := rt.(*reflect.StructType) for i := 0; i < rt.NumMethod(); i++ {
if rt.Method(i).Name == method {
return i
}
}
errorf("gob: internal error: can't find method %s", method)
return 0
}
// gobEncodeOpFor returns the op for a type that is known to implement
// GobEncoder.
func (enc *Encoder) gobEncodeOpFor(ut *userTypeInfo) (*encOp, int) {
rt := ut.user
if ut.encIndir == -1 {
rt = reflect.PtrTo(rt)
} else if ut.encIndir > 0 {
for i := int8(0); i < ut.encIndir; i++ {
rt = rt.(*reflect.PtrType).Elem()
}
}
var op encOp
op = func(i *encInstr, state *encoderState, p unsafe.Pointer) {
var v reflect.Value
if ut.encIndir == -1 {
// Need to climb up one level to turn value into pointer.
v = reflect.NewValue(unsafe.Unreflect(rt, unsafe.Pointer(&p)))
} else {
v = reflect.NewValue(unsafe.Unreflect(rt, p))
}
state.update(i)
state.enc.encodeGobEncoder(state.b, v, methodIndex(rt, gobEncodeMethodName))
}
return &op, int(ut.encIndir) // encIndir: op will get called with p == address of receiver.
}
// compileEnc returns the engine to compile the type.
func (enc *Encoder) compileEnc(ut *userTypeInfo) *encEngine {
srt, isStruct := ut.base.(*reflect.StructType)
engine := new(encEngine) engine := new(encEngine)
seen := make(map[reflect.Type]*encOp) seen := make(map[reflect.Type]*encOp)
if isStruct { rt := ut.base
for fieldNum := 0; fieldNum < srt.NumField(); fieldNum++ { if ut.isGobEncoder {
rt = ut.user
}
if !ut.isGobEncoder && isStruct {
for fieldNum, wireFieldNum := 0, 0; fieldNum < srt.NumField(); fieldNum++ {
f := srt.Field(fieldNum) f := srt.Field(fieldNum)
if !isExported(f.Name) { if !isExported(f.Name) {
continue continue
} }
op, indir := enc.encOpFor(f.Type, seen) op, indir := enc.encOpFor(f.Type, seen)
engine.instr = append(engine.instr, encInstr{*op, fieldNum, indir, uintptr(f.Offset)}) engine.instr = append(engine.instr, encInstr{*op, wireFieldNum, indir, uintptr(f.Offset)})
wireFieldNum++
} }
if srt.NumField() > 0 && len(engine.instr) == 0 { if srt.NumField() > 0 && len(engine.instr) == 0 {
errorf("type %s has no exported fields", rt) errorf("gob: type %s has no exported fields", rt)
} }
engine.instr = append(engine.instr, encInstr{encStructTerminator, 0, 0, 0}) engine.instr = append(engine.instr, encInstr{encStructTerminator, 0, 0, 0})
} else { } else {
@ -546,38 +660,42 @@ func (enc *Encoder) compileEnc(rt reflect.Type) *encEngine {
return engine return engine
} }
// getEncEngine returns the engine to compile the type.
// typeLock must be held (or we're in initialization and guaranteed single-threaded). // typeLock must be held (or we're in initialization and guaranteed single-threaded).
// The reflection type must have all its indirections processed out. func (enc *Encoder) getEncEngine(ut *userTypeInfo) *encEngine {
func (enc *Encoder) getEncEngine(rt reflect.Type) *encEngine { info, err1 := getTypeInfo(ut)
info, err1 := getTypeInfo(rt)
if err1 != nil { if err1 != nil {
error(err1) error(err1)
} }
if info.encoder == nil { if info.encoder == nil {
// mark this engine as underway before compiling to handle recursive types. // mark this engine as underway before compiling to handle recursive types.
info.encoder = new(encEngine) info.encoder = new(encEngine)
info.encoder = enc.compileEnc(rt) info.encoder = enc.compileEnc(ut)
} }
return info.encoder return info.encoder
} }
// Put this in a function so we can hold the lock only while compiling, not when encoding. // lockAndGetEncEngine is a function that locks and compiles.
func (enc *Encoder) lockAndGetEncEngine(rt reflect.Type) *encEngine { // This lets us hold the lock only while compiling, not when encoding.
func (enc *Encoder) lockAndGetEncEngine(ut *userTypeInfo) *encEngine {
typeLock.Lock() typeLock.Lock()
defer typeLock.Unlock() defer typeLock.Unlock()
return enc.getEncEngine(rt) return enc.getEncEngine(ut)
} }
func (enc *Encoder) encode(b *bytes.Buffer, value reflect.Value, ut *userTypeInfo) (err os.Error) { func (enc *Encoder) encode(b *bytes.Buffer, value reflect.Value, ut *userTypeInfo) {
defer catchError(&err) defer catchError(&enc.err)
for i := 0; i < ut.indir; i++ { engine := enc.lockAndGetEncEngine(ut)
indir := ut.indir
if ut.isGobEncoder {
indir = int(ut.encIndir)
}
for i := 0; i < indir; i++ {
value = reflect.Indirect(value) value = reflect.Indirect(value)
} }
engine := enc.lockAndGetEncEngine(ut.base) if !ut.isGobEncoder && value.Type().Kind() == reflect.Struct {
if value.Type().Kind() == reflect.Struct {
enc.encodeStruct(b, engine, value.UnsafeAddr()) enc.encodeStruct(b, engine, value.UnsafeAddr())
} else { } else {
enc.encodeSingle(b, engine, value.UnsafeAddr()) enc.encodeSingle(b, engine, value.UnsafeAddr())
} }
return nil
} }

View File

@ -19,7 +19,9 @@ type Encoder struct {
w []io.Writer // where to send the data w []io.Writer // where to send the data
sent map[reflect.Type]typeId // which types we've already sent sent map[reflect.Type]typeId // which types we've already sent
countState *encoderState // stage for writing counts countState *encoderState // stage for writing counts
freeList *encoderState // list of free encoderStates; avoids reallocation
buf []byte // for collecting the output. buf []byte // for collecting the output.
byteBuf bytes.Buffer // buffer for top-level encoderState
err os.Error err os.Error
} }
@ -28,7 +30,7 @@ func NewEncoder(w io.Writer) *Encoder {
enc := new(Encoder) enc := new(Encoder)
enc.w = []io.Writer{w} enc.w = []io.Writer{w}
enc.sent = make(map[reflect.Type]typeId) enc.sent = make(map[reflect.Type]typeId)
enc.countState = newEncoderState(enc, new(bytes.Buffer)) enc.countState = enc.newEncoderState(new(bytes.Buffer))
return enc return enc
} }
@ -78,12 +80,57 @@ func (enc *Encoder) writeMessage(w io.Writer, b *bytes.Buffer) {
} }
} }
func (enc *Encoder) sendType(w io.Writer, state *encoderState, origt reflect.Type) (sent bool) { // sendActualType sends the requested type, without further investigation, unless
// Drill down to the base type. // it's been sent before.
ut := userType(origt) func (enc *Encoder) sendActualType(w io.Writer, state *encoderState, ut *userTypeInfo, actual reflect.Type) (sent bool) {
rt := ut.base if _, alreadySent := enc.sent[actual]; alreadySent {
return false
}
typeLock.Lock()
info, err := getTypeInfo(ut)
typeLock.Unlock()
if err != nil {
enc.setError(err)
return
}
// Send the pair (-id, type)
// Id:
state.encodeInt(-int64(info.id))
// Type:
enc.encode(state.b, reflect.NewValue(info.wire), wireTypeUserInfo)
enc.writeMessage(w, state.b)
if enc.err != nil {
return
}
switch rt := rt.(type) { // Remember we've sent this type, both what the user gave us and the base type.
enc.sent[ut.base] = info.id
if ut.user != ut.base {
enc.sent[ut.user] = info.id
}
// Now send the inner types
switch st := actual.(type) {
case *reflect.StructType:
for i := 0; i < st.NumField(); i++ {
enc.sendType(w, state, st.Field(i).Type)
}
case reflect.ArrayOrSliceType:
enc.sendType(w, state, st.Elem())
}
return true
}
// sendType sends the type info to the other side, if necessary.
func (enc *Encoder) sendType(w io.Writer, state *encoderState, origt reflect.Type) (sent bool) {
ut := userType(origt)
if ut.isGobEncoder {
// The rules are different: regardless of the underlying type's representation,
// we need to tell the other side that this exact type is a GobEncoder.
return enc.sendActualType(w, state, ut, ut.user)
}
// It's a concrete value, so drill down to the base type.
switch rt := ut.base.(type) {
default: default:
// Basic types and interfaces do not need to be described. // Basic types and interfaces do not need to be described.
return return
@ -109,43 +156,7 @@ func (enc *Encoder) sendType(w io.Writer, state *encoderState, origt reflect.Typ
return return
} }
// Have we already sent this type? This time we ask about the base type. return enc.sendActualType(w, state, ut, ut.base)
if _, alreadySent := enc.sent[rt]; alreadySent {
return
}
// Need to send it.
typeLock.Lock()
info, err := getTypeInfo(rt)
typeLock.Unlock()
if err != nil {
enc.setError(err)
return
}
// Send the pair (-id, type)
// Id:
state.encodeInt(-int64(info.id))
// Type:
enc.encode(state.b, reflect.NewValue(info.wire), wireTypeUserInfo)
enc.writeMessage(w, state.b)
if enc.err != nil {
return
}
// Remember we've sent this type.
enc.sent[rt] = info.id
// Remember we've sent the top-level, possibly indirect type too.
enc.sent[origt] = info.id
// Now send the inner types
switch st := rt.(type) {
case *reflect.StructType:
for i := 0; i < st.NumField(); i++ {
enc.sendType(w, state, st.Field(i).Type)
}
case reflect.ArrayOrSliceType:
enc.sendType(w, state, st.Elem())
}
return true
} }
// Encode transmits the data item represented by the empty interface value, // Encode transmits the data item represented by the empty interface value,
@ -159,11 +170,14 @@ func (enc *Encoder) Encode(e interface{}) os.Error {
// sent. // sent.
func (enc *Encoder) sendTypeDescriptor(w io.Writer, state *encoderState, ut *userTypeInfo) { func (enc *Encoder) sendTypeDescriptor(w io.Writer, state *encoderState, ut *userTypeInfo) {
// Make sure the type is known to the other side. // Make sure the type is known to the other side.
// First, have we already sent this (base) type? // First, have we already sent this type?
base := ut.base rt := ut.base
if _, alreadySent := enc.sent[base]; !alreadySent { if ut.isGobEncoder {
rt = ut.user
}
if _, alreadySent := enc.sent[rt]; !alreadySent {
// No, so send it. // No, so send it.
sent := enc.sendType(w, state, base) sent := enc.sendType(w, state, rt)
if enc.err != nil { if enc.err != nil {
return return
} }
@ -172,13 +186,13 @@ func (enc *Encoder) sendTypeDescriptor(w io.Writer, state *encoderState, ut *use
// need to send the type info but we do need to update enc.sent. // need to send the type info but we do need to update enc.sent.
if !sent { if !sent {
typeLock.Lock() typeLock.Lock()
info, err := getTypeInfo(base) info, err := getTypeInfo(ut)
typeLock.Unlock() typeLock.Unlock()
if err != nil { if err != nil {
enc.setError(err) enc.setError(err)
return return
} }
enc.sent[base] = info.id enc.sent[rt] = info.id
} }
} }
} }
@ -206,7 +220,8 @@ func (enc *Encoder) EncodeValue(value reflect.Value) os.Error {
} }
enc.err = nil enc.err = nil
state := newEncoderState(enc, new(bytes.Buffer)) enc.byteBuf.Reset()
state := enc.newEncoderState(&enc.byteBuf)
enc.sendTypeDescriptor(enc.writer(), state, ut) enc.sendTypeDescriptor(enc.writer(), state, ut)
enc.sendTypeId(state, ut) enc.sendTypeId(state, ut)
@ -215,12 +230,11 @@ func (enc *Encoder) EncodeValue(value reflect.Value) os.Error {
} }
// Encode the object. // Encode the object.
err = enc.encode(state.b, value, ut) enc.encode(state.b, value, ut)
if err != nil { if enc.err == nil {
enc.setError(err)
} else {
enc.writeMessage(enc.writer(), state.b) enc.writeMessage(enc.writer(), state.b)
} }
enc.freeEncoderState(state)
return enc.err return enc.err
} }

View File

@ -0,0 +1,384 @@
// Copyright 20011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains tests of the GobEncoder/GobDecoder support.
package gob
import (
"bytes"
"fmt"
"os"
"strings"
"testing"
)
// Types that implement the GobEncoder/Decoder interfaces.
type ByteStruct struct {
a byte // not an exported field
}
type StringStruct struct {
s string // not an exported field
}
type Gobber int
type ValueGobber string // encodes with a value, decodes with a pointer.
// The relevant methods
func (g *ByteStruct) GobEncode() ([]byte, os.Error) {
b := make([]byte, 3)
b[0] = g.a
b[1] = g.a + 1
b[2] = g.a + 2
return b, nil
}
func (g *ByteStruct) GobDecode(data []byte) os.Error {
if g == nil {
return os.ErrorString("NIL RECEIVER")
}
// Expect N sequential-valued bytes.
if len(data) == 0 {
return os.EOF
}
g.a = data[0]
for i, c := range data {
if c != g.a+byte(i) {
return os.ErrorString("invalid data sequence")
}
}
return nil
}
func (g *StringStruct) GobEncode() ([]byte, os.Error) {
return []byte(g.s), nil
}
func (g *StringStruct) GobDecode(data []byte) os.Error {
// Expect N sequential-valued bytes.
if len(data) == 0 {
return os.EOF
}
a := data[0]
for i, c := range data {
if c != a+byte(i) {
return os.ErrorString("invalid data sequence")
}
}
g.s = string(data)
return nil
}
func (g *Gobber) GobEncode() ([]byte, os.Error) {
return []byte(fmt.Sprintf("VALUE=%d", *g)), nil
}
func (g *Gobber) GobDecode(data []byte) os.Error {
_, err := fmt.Sscanf(string(data), "VALUE=%d", (*int)(g))
return err
}
func (v ValueGobber) GobEncode() ([]byte, os.Error) {
return []byte(fmt.Sprintf("VALUE=%s", v)), nil
}
func (v *ValueGobber) GobDecode(data []byte) os.Error {
_, err := fmt.Sscanf(string(data), "VALUE=%s", (*string)(v))
return err
}
// Structs that include GobEncodable fields.
type GobTest0 struct {
X int // guarantee we have something in common with GobTest*
G *ByteStruct
}
type GobTest1 struct {
X int // guarantee we have something in common with GobTest*
G *StringStruct
}
type GobTest2 struct {
X int // guarantee we have something in common with GobTest*
G string // not a GobEncoder - should give us errors
}
type GobTest3 struct {
X int // guarantee we have something in common with GobTest*
G *Gobber
}
type GobTest4 struct {
X int // guarantee we have something in common with GobTest*
V ValueGobber
}
type GobTest5 struct {
X int // guarantee we have something in common with GobTest*
V *ValueGobber
}
type GobTestIgnoreEncoder struct {
X int // guarantee we have something in common with GobTest*
}
type GobTestValueEncDec struct {
X int // guarantee we have something in common with GobTest*
G StringStruct // not a pointer.
}
type GobTestIndirectEncDec struct {
X int // guarantee we have something in common with GobTest*
G ***StringStruct // indirections to the receiver.
}
func TestGobEncoderField(t *testing.T) {
b := new(bytes.Buffer)
// First a field that's a structure.
enc := NewEncoder(b)
err := enc.Encode(GobTest0{17, &ByteStruct{'A'}})
if err != nil {
t.Fatal("encode error:", err)
}
dec := NewDecoder(b)
x := new(GobTest0)
err = dec.Decode(x)
if err != nil {
t.Fatal("decode error:", err)
}
if x.G.a != 'A' {
t.Errorf("expected 'A' got %c", x.G.a)
}
// Now a field that's not a structure.
b.Reset()
gobber := Gobber(23)
err = enc.Encode(GobTest3{17, &gobber})
if err != nil {
t.Fatal("encode error:", err)
}
y := new(GobTest3)
err = dec.Decode(y)
if err != nil {
t.Fatal("decode error:", err)
}
if *y.G != 23 {
t.Errorf("expected '23 got %d", *y.G)
}
}
// Even though the field is a value, we can still take its address
// and should be able to call the methods.
func TestGobEncoderValueField(t *testing.T) {
b := new(bytes.Buffer)
// First a field that's a structure.
enc := NewEncoder(b)
err := enc.Encode(GobTestValueEncDec{17, StringStruct{"HIJKL"}})
if err != nil {
t.Fatal("encode error:", err)
}
dec := NewDecoder(b)
x := new(GobTestValueEncDec)
err = dec.Decode(x)
if err != nil {
t.Fatal("decode error:", err)
}
if x.G.s != "HIJKL" {
t.Errorf("expected `HIJKL` got %s", x.G.s)
}
}
// GobEncode/Decode should work even if the value is
// more indirect than the receiver.
func TestGobEncoderIndirectField(t *testing.T) {
b := new(bytes.Buffer)
// First a field that's a structure.
enc := NewEncoder(b)
s := &StringStruct{"HIJKL"}
sp := &s
err := enc.Encode(GobTestIndirectEncDec{17, &sp})
if err != nil {
t.Fatal("encode error:", err)
}
dec := NewDecoder(b)
x := new(GobTestIndirectEncDec)
err = dec.Decode(x)
if err != nil {
t.Fatal("decode error:", err)
}
if (***x.G).s != "HIJKL" {
t.Errorf("expected `HIJKL` got %s", (***x.G).s)
}
}
// As long as the fields have the same name and implement the
// interface, we can cross-connect them. Not sure it's useful
// and may even be bad but it works and it's hard to prevent
// without exposing the contents of the object, which would
// defeat the purpose.
func TestGobEncoderFieldsOfDifferentType(t *testing.T) {
// first, string in field to byte in field
b := new(bytes.Buffer)
enc := NewEncoder(b)
err := enc.Encode(GobTest1{17, &StringStruct{"ABC"}})
if err != nil {
t.Fatal("encode error:", err)
}
dec := NewDecoder(b)
x := new(GobTest0)
err = dec.Decode(x)
if err != nil {
t.Fatal("decode error:", err)
}
if x.G.a != 'A' {
t.Errorf("expected 'A' got %c", x.G.a)
}
// now the other direction, byte in field to string in field
b.Reset()
err = enc.Encode(GobTest0{17, &ByteStruct{'X'}})
if err != nil {
t.Fatal("encode error:", err)
}
y := new(GobTest1)
err = dec.Decode(y)
if err != nil {
t.Fatal("decode error:", err)
}
if y.G.s != "XYZ" {
t.Fatalf("expected `XYZ` got %c", y.G.s)
}
}
// Test that we can encode a value and decode into a pointer.
func TestGobEncoderValueEncoder(t *testing.T) {
// first, string in field to byte in field
b := new(bytes.Buffer)
enc := NewEncoder(b)
err := enc.Encode(GobTest4{17, ValueGobber("hello")})
if err != nil {
t.Fatal("encode error:", err)
}
dec := NewDecoder(b)
x := new(GobTest5)
err = dec.Decode(x)
if err != nil {
t.Fatal("decode error:", err)
}
if *x.V != "hello" {
t.Errorf("expected `hello` got %s", x.V)
}
}
func TestGobEncoderFieldTypeError(t *testing.T) {
// GobEncoder to non-decoder: error
b := new(bytes.Buffer)
enc := NewEncoder(b)
err := enc.Encode(GobTest1{17, &StringStruct{"ABC"}})
if err != nil {
t.Fatal("encode error:", err)
}
dec := NewDecoder(b)
x := &GobTest2{}
err = dec.Decode(x)
if err == nil {
t.Fatal("expected decode error for mismatched fields (encoder to non-decoder)")
}
if strings.Index(err.String(), "type") < 0 {
t.Fatal("expected type error; got", err)
}
// Non-encoder to GobDecoder: error
b.Reset()
err = enc.Encode(GobTest2{17, "ABC"})
if err != nil {
t.Fatal("encode error:", err)
}
y := &GobTest1{}
err = dec.Decode(y)
if err == nil {
t.Fatal("expected decode error for mistmatched fields (non-encoder to decoder)")
}
if strings.Index(err.String(), "type") < 0 {
t.Fatal("expected type error; got", err)
}
}
// Even though ByteStruct is a struct, it's treated as a singleton at the top level.
func TestGobEncoderStructSingleton(t *testing.T) {
b := new(bytes.Buffer)
enc := NewEncoder(b)
err := enc.Encode(&ByteStruct{'A'})
if err != nil {
t.Fatal("encode error:", err)
}
dec := NewDecoder(b)
x := new(ByteStruct)
err = dec.Decode(x)
if err != nil {
t.Fatal("decode error:", err)
}
if x.a != 'A' {
t.Errorf("expected 'A' got %c", x.a)
}
}
func TestGobEncoderNonStructSingleton(t *testing.T) {
b := new(bytes.Buffer)
enc := NewEncoder(b)
err := enc.Encode(Gobber(1234))
if err != nil {
t.Fatal("encode error:", err)
}
dec := NewDecoder(b)
var x Gobber
err = dec.Decode(&x)
if err != nil {
t.Fatal("decode error:", err)
}
if x != 1234 {
t.Errorf("expected 1234 got %c", x)
}
}
func TestGobEncoderIgnoreStructField(t *testing.T) {
b := new(bytes.Buffer)
// First a field that's a structure.
enc := NewEncoder(b)
err := enc.Encode(GobTest0{17, &ByteStruct{'A'}})
if err != nil {
t.Fatal("encode error:", err)
}
dec := NewDecoder(b)
x := new(GobTestIgnoreEncoder)
err = dec.Decode(x)
if err != nil {
t.Fatal("decode error:", err)
}
if x.X != 17 {
t.Errorf("expected 17 got %c", x.X)
}
}
func TestGobEncoderIgnoreNonStructField(t *testing.T) {
b := new(bytes.Buffer)
// First a field that's a structure.
enc := NewEncoder(b)
gobber := Gobber(23)
err := enc.Encode(GobTest3{17, &gobber})
if err != nil {
t.Fatal("encode error:", err)
}
dec := NewDecoder(b)
x := new(GobTestIgnoreEncoder)
err = dec.Decode(x)
if err != nil {
t.Fatal("decode error:", err)
}
if x.X != 17 {
t.Errorf("expected 17 got %c", x.X)
}
}

View File

@ -0,0 +1,90 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gob
import (
"bytes"
"fmt"
"io"
"os"
"runtime"
"testing"
)
type Bench struct {
A int
B float64
C string
D []byte
}
func benchmarkEndToEnd(r io.Reader, w io.Writer, b *testing.B) {
b.StopTimer()
enc := NewEncoder(w)
dec := NewDecoder(r)
bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")}
b.StartTimer()
for i := 0; i < b.N; i++ {
if enc.Encode(bench) != nil {
panic("encode error")
}
if dec.Decode(bench) != nil {
panic("decode error")
}
}
}
func BenchmarkEndToEndPipe(b *testing.B) {
r, w, err := os.Pipe()
if err != nil {
panic("can't get pipe:" + err.String())
}
benchmarkEndToEnd(r, w, b)
}
func BenchmarkEndToEndByteBuffer(b *testing.B) {
var buf bytes.Buffer
benchmarkEndToEnd(&buf, &buf, b)
}
func TestCountEncodeMallocs(t *testing.T) {
var buf bytes.Buffer
enc := NewEncoder(&buf)
bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")}
mallocs := 0 - runtime.MemStats.Mallocs
const count = 1000
for i := 0; i < count; i++ {
err := enc.Encode(bench)
if err != nil {
t.Fatal("encode:", err)
}
}
mallocs += runtime.MemStats.Mallocs
fmt.Printf("mallocs per encode of type Bench: %d\n", mallocs/count)
}
func TestCountDecodeMallocs(t *testing.T) {
var buf bytes.Buffer
enc := NewEncoder(&buf)
bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")}
const count = 1000
for i := 0; i < count; i++ {
err := enc.Encode(bench)
if err != nil {
t.Fatal("encode:", err)
}
}
dec := NewDecoder(&buf)
mallocs := 0 - runtime.MemStats.Mallocs
for i := 0; i < count; i++ {
*bench = Bench{}
err := dec.Decode(&bench)
if err != nil {
t.Fatal("decode:", err)
}
}
mallocs += runtime.MemStats.Mallocs
fmt.Printf("mallocs per decode of type Bench: %d\n", mallocs/count)
}

View File

@ -9,15 +9,21 @@ import (
"os" "os"
"reflect" "reflect"
"sync" "sync"
"unicode"
"utf8"
) )
// userTypeInfo stores the information associated with a type the user has handed // userTypeInfo stores the information associated with a type the user has handed
// to the package. It's computed once and stored in a map keyed by reflection // to the package. It's computed once and stored in a map keyed by reflection
// type. // type.
type userTypeInfo struct { type userTypeInfo struct {
user reflect.Type // the type the user handed us user reflect.Type // the type the user handed us
base reflect.Type // the base type after all indirections base reflect.Type // the base type after all indirections
indir int // number of indirections to reach the base type indir int // number of indirections to reach the base type
isGobEncoder bool // does the type implement GobEncoder?
isGobDecoder bool // does the type implement GobDecoder?
encIndir int8 // number of indirections to reach the receiver type; may be negative
decIndir int8 // number of indirections to reach the receiver type; may be negative
} }
var ( var (
@ -68,10 +74,73 @@ func validUserType(rt reflect.Type) (ut *userTypeInfo, err os.Error) {
} }
ut.indir++ ut.indir++
} }
ut.isGobEncoder, ut.encIndir = implementsInterface(ut.user, gobEncoderCheck)
ut.isGobDecoder, ut.decIndir = implementsInterface(ut.user, gobDecoderCheck)
userTypeCache[rt] = ut userTypeCache[rt] = ut
return return
} }
const (
gobEncodeMethodName = "GobEncode"
gobDecodeMethodName = "GobDecode"
)
// implements returns whether the type implements the interface, as encoded
// in the check function.
func implements(typ reflect.Type, check func(typ reflect.Type) bool) bool {
if typ.NumMethod() == 0 { // avoid allocations etc. unless there's some chance
return false
}
return check(typ)
}
// gobEncoderCheck makes the type assertion a boolean function.
func gobEncoderCheck(typ reflect.Type) bool {
_, ok := reflect.MakeZero(typ).Interface().(GobEncoder)
return ok
}
// gobDecoderCheck makes the type assertion a boolean function.
func gobDecoderCheck(typ reflect.Type) bool {
_, ok := reflect.MakeZero(typ).Interface().(GobDecoder)
return ok
}
// implementsInterface reports whether the type implements the
// interface. (The actual check is done through the provided function.)
// It also returns the number of indirections required to get to the
// implementation.
func implementsInterface(typ reflect.Type, check func(typ reflect.Type) bool) (success bool, indir int8) {
if typ == nil {
return
}
rt := typ
// The type might be a pointer and we need to keep
// dereferencing to the base type until we find an implementation.
for {
if implements(rt, check) {
return true, indir
}
if p, ok := rt.(*reflect.PtrType); ok {
indir++
if indir > 100 { // insane number of indirections
return false, 0
}
rt = p.Elem()
continue
}
break
}
// No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
if _, ok := typ.(*reflect.PtrType); !ok {
// Not a pointer, but does the pointer work?
if implements(reflect.PtrTo(typ), check) {
return true, -1
}
}
return false, 0
}
// userType returns, and saves, the information associated with user-provided type rt. // userType returns, and saves, the information associated with user-provided type rt.
// If the user type is not valid, it calls error. // If the user type is not valid, it calls error.
func userType(rt reflect.Type) *userTypeInfo { func userType(rt reflect.Type) *userTypeInfo {
@ -153,22 +222,24 @@ func (t *CommonType) name() string { return t.Name }
var ( var (
// Primordial types, needed during initialization. // Primordial types, needed during initialization.
tBool = bootstrapType("bool", false, 1) // Always passed as pointers so the interface{} type
tInt = bootstrapType("int", int(0), 2) // goes through without losing its interfaceness.
tUint = bootstrapType("uint", uint(0), 3) tBool = bootstrapType("bool", (*bool)(nil), 1)
tFloat = bootstrapType("float", float64(0), 4) tInt = bootstrapType("int", (*int)(nil), 2)
tBytes = bootstrapType("bytes", make([]byte, 0), 5) tUint = bootstrapType("uint", (*uint)(nil), 3)
tString = bootstrapType("string", "", 6) tFloat = bootstrapType("float", (*float64)(nil), 4)
tComplex = bootstrapType("complex", 0+0i, 7) tBytes = bootstrapType("bytes", (*[]byte)(nil), 5)
tInterface = bootstrapType("interface", interface{}(nil), 8) tString = bootstrapType("string", (*string)(nil), 6)
tComplex = bootstrapType("complex", (*complex128)(nil), 7)
tInterface = bootstrapType("interface", (*interface{})(nil), 8)
// Reserve some Ids for compatible expansion // Reserve some Ids for compatible expansion
tReserved7 = bootstrapType("_reserved1", struct{ r7 int }{}, 9) tReserved7 = bootstrapType("_reserved1", (*struct{ r7 int })(nil), 9)
tReserved6 = bootstrapType("_reserved1", struct{ r6 int }{}, 10) tReserved6 = bootstrapType("_reserved1", (*struct{ r6 int })(nil), 10)
tReserved5 = bootstrapType("_reserved1", struct{ r5 int }{}, 11) tReserved5 = bootstrapType("_reserved1", (*struct{ r5 int })(nil), 11)
tReserved4 = bootstrapType("_reserved1", struct{ r4 int }{}, 12) tReserved4 = bootstrapType("_reserved1", (*struct{ r4 int })(nil), 12)
tReserved3 = bootstrapType("_reserved1", struct{ r3 int }{}, 13) tReserved3 = bootstrapType("_reserved1", (*struct{ r3 int })(nil), 13)
tReserved2 = bootstrapType("_reserved1", struct{ r2 int }{}, 14) tReserved2 = bootstrapType("_reserved1", (*struct{ r2 int })(nil), 14)
tReserved1 = bootstrapType("_reserved1", struct{ r1 int }{}, 15) tReserved1 = bootstrapType("_reserved1", (*struct{ r1 int })(nil), 15)
) )
// Predefined because it's needed by the Decoder // Predefined because it's needed by the Decoder
@ -229,6 +300,23 @@ func (a *arrayType) safeString(seen map[typeId]bool) string {
func (a *arrayType) string() string { return a.safeString(make(map[typeId]bool)) } func (a *arrayType) string() string { return a.safeString(make(map[typeId]bool)) }
// GobEncoder type (something that implements the GobEncoder interface)
type gobEncoderType struct {
CommonType
}
func newGobEncoderType(name string) *gobEncoderType {
g := &gobEncoderType{CommonType{Name: name}}
setTypeId(g)
return g
}
func (g *gobEncoderType) safeString(seen map[typeId]bool) string {
return g.Name
}
func (g *gobEncoderType) string() string { return g.Name }
// Map type // Map type
type mapType struct { type mapType struct {
CommonType CommonType
@ -324,11 +412,16 @@ func newStructType(name string) *structType {
return s return s
} }
func (s *structType) init(field []*fieldType) { // newTypeObject allocates a gobType for the reflection type rt.
s.Field = field // Unless ut represents a GobEncoder, rt should be the base type
} // of ut.
// This is only called from the encoding side. The decoding side
func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) { // works through typeIds and userTypeInfos alone.
func newTypeObject(name string, ut *userTypeInfo, rt reflect.Type) (gobType, os.Error) {
// Does this type implement GobEncoder?
if ut.isGobEncoder {
return newGobEncoderType(name), nil
}
var err os.Error var err os.Error
var type0, type1 gobType var type0, type1 gobType
defer func() { defer func() {
@ -364,7 +457,7 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) {
case *reflect.ArrayType: case *reflect.ArrayType:
at := newArrayType(name) at := newArrayType(name)
types[rt] = at types[rt] = at
type0, err = getType("", t.Elem()) type0, err = getBaseType("", t.Elem())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -382,11 +475,11 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) {
case *reflect.MapType: case *reflect.MapType:
mt := newMapType(name) mt := newMapType(name)
types[rt] = mt types[rt] = mt
type0, err = getType("", t.Key()) type0, err = getBaseType("", t.Key())
if err != nil { if err != nil {
return nil, err return nil, err
} }
type1, err = getType("", t.Elem()) type1, err = getBaseType("", t.Elem())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -400,7 +493,7 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) {
} }
st := newSliceType(name) st := newSliceType(name)
types[rt] = st types[rt] = st
type0, err = getType(t.Elem().Name(), t.Elem()) type0, err = getBaseType(t.Elem().Name(), t.Elem())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -411,22 +504,23 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) {
st := newStructType(name) st := newStructType(name)
types[rt] = st types[rt] = st
idToType[st.id()] = st idToType[st.id()] = st
field := make([]*fieldType, t.NumField())
for i := 0; i < t.NumField(); i++ { for i := 0; i < t.NumField(); i++ {
f := t.Field(i) f := t.Field(i)
if !isExported(f.Name) {
continue
}
typ := userType(f.Type).base typ := userType(f.Type).base
tname := typ.Name() tname := typ.Name()
if tname == "" { if tname == "" {
t := userType(f.Type).base t := userType(f.Type).base
tname = t.String() tname = t.String()
} }
gt, err := getType(tname, f.Type) gt, err := getBaseType(tname, f.Type)
if err != nil { if err != nil {
return nil, err return nil, err
} }
field[i] = &fieldType{f.Name, gt.id()} st.Field = append(st.Field, &fieldType{f.Name, gt.id()})
} }
st.init(field)
return st, nil return st, nil
default: default:
@ -435,15 +529,30 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) {
return nil, nil return nil, nil
} }
// getType returns the Gob type describing the given reflect.Type. // isExported reports whether this is an exported - upper case - name.
func isExported(name string) bool {
rune, _ := utf8.DecodeRuneInString(name)
return unicode.IsUpper(rune)
}
// getBaseType returns the Gob type describing the given reflect.Type's base type.
// typeLock must be held. // typeLock must be held.
func getType(name string, rt reflect.Type) (gobType, os.Error) { func getBaseType(name string, rt reflect.Type) (gobType, os.Error) {
rt = userType(rt).base ut := userType(rt)
return getType(name, ut, ut.base)
}
// getType returns the Gob type describing the given reflect.Type.
// Should be called only when handling GobEncoders/Decoders,
// which may be pointers. All other types are handled through the
// base type, never a pointer.
// typeLock must be held.
func getType(name string, ut *userTypeInfo, rt reflect.Type) (gobType, os.Error) {
typ, present := types[rt] typ, present := types[rt]
if present { if present {
return typ, nil return typ, nil
} }
typ, err := newTypeObject(name, rt) typ, err := newTypeObject(name, ut, rt)
if err == nil { if err == nil {
types[rt] = typ types[rt] = typ
} }
@ -457,9 +566,10 @@ func checkId(want, got typeId) {
} }
} }
// used for building the basic types; called only from init() // used for building the basic types; called only from init(). the incoming
// interface always refers to a pointer.
func bootstrapType(name string, e interface{}, expect typeId) typeId { func bootstrapType(name string, e interface{}, expect typeId) typeId {
rt := reflect.Typeof(e) rt := reflect.Typeof(e).(*reflect.PtrType).Elem()
_, present := types[rt] _, present := types[rt]
if present { if present {
panic("bootstrap type already present: " + name + ", " + rt.String()) panic("bootstrap type already present: " + name + ", " + rt.String())
@ -484,10 +594,11 @@ func bootstrapType(name string, e interface{}, expect typeId) typeId {
// To maintain binary compatibility, if you extend this type, always put // To maintain binary compatibility, if you extend this type, always put
// the new fields last. // the new fields last.
type wireType struct { type wireType struct {
ArrayT *arrayType ArrayT *arrayType
SliceT *sliceType SliceT *sliceType
StructT *structType StructT *structType
MapT *mapType MapT *mapType
GobEncoderT *gobEncoderType
} }
func (w *wireType) string() string { func (w *wireType) string() string {
@ -504,6 +615,8 @@ func (w *wireType) string() string {
return w.StructT.Name return w.StructT.Name
case w.MapT != nil: case w.MapT != nil:
return w.MapT.Name return w.MapT.Name
case w.GobEncoderT != nil:
return w.GobEncoderT.Name
} }
return unknown return unknown
} }
@ -516,49 +629,88 @@ type typeInfo struct {
var typeInfoMap = make(map[reflect.Type]*typeInfo) // protected by typeLock var typeInfoMap = make(map[reflect.Type]*typeInfo) // protected by typeLock
// The reflection type must have all its indirections processed out.
// typeLock must be held. // typeLock must be held.
func getTypeInfo(rt reflect.Type) (*typeInfo, os.Error) { func getTypeInfo(ut *userTypeInfo) (*typeInfo, os.Error) {
if rt.Kind() == reflect.Ptr { rt := ut.base
panic("pointer type in getTypeInfo: " + rt.String()) if ut.isGobEncoder {
// We want the user type, not the base type.
rt = ut.user
} }
info, ok := typeInfoMap[rt] info, ok := typeInfoMap[rt]
if !ok { if ok {
info = new(typeInfo) return info, nil
name := rt.Name() }
gt, err := getType(name, rt) info = new(typeInfo)
gt, err := getBaseType(rt.Name(), rt)
if err != nil {
return nil, err
}
info.id = gt.id()
if ut.isGobEncoder {
userType, err := getType(rt.Name(), ut, rt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
info.id = gt.id() info.wire = &wireType{GobEncoderT: userType.id().gobType().(*gobEncoderType)}
t := info.id.gobType() typeInfoMap[ut.user] = info
switch typ := rt.(type) { return info, nil
case *reflect.ArrayType:
info.wire = &wireType{ArrayT: t.(*arrayType)}
case *reflect.MapType:
info.wire = &wireType{MapT: t.(*mapType)}
case *reflect.SliceType:
// []byte == []uint8 is a special case handled separately
if typ.Elem().Kind() != reflect.Uint8 {
info.wire = &wireType{SliceT: t.(*sliceType)}
}
case *reflect.StructType:
info.wire = &wireType{StructT: t.(*structType)}
}
typeInfoMap[rt] = info
} }
t := info.id.gobType()
switch typ := rt.(type) {
case *reflect.ArrayType:
info.wire = &wireType{ArrayT: t.(*arrayType)}
case *reflect.MapType:
info.wire = &wireType{MapT: t.(*mapType)}
case *reflect.SliceType:
// []byte == []uint8 is a special case handled separately
if typ.Elem().Kind() != reflect.Uint8 {
info.wire = &wireType{SliceT: t.(*sliceType)}
}
case *reflect.StructType:
info.wire = &wireType{StructT: t.(*structType)}
}
typeInfoMap[rt] = info
return info, nil return info, nil
} }
// Called only when a panic is acceptable and unexpected. // Called only when a panic is acceptable and unexpected.
func mustGetTypeInfo(rt reflect.Type) *typeInfo { func mustGetTypeInfo(rt reflect.Type) *typeInfo {
t, err := getTypeInfo(rt) t, err := getTypeInfo(userType(rt))
if err != nil { if err != nil {
panic("getTypeInfo: " + err.String()) panic("getTypeInfo: " + err.String())
} }
return t return t
} }
// GobEncoder is the interface describing data that provides its own
// representation for encoding values for transmission to a GobDecoder.
// A type that implements GobEncoder and GobDecoder has complete
// control over the representation of its data and may therefore
// contain things such as private fields, channels, and functions,
// which are not usually transmissable in gob streams.
//
// Note: Since gobs can be stored permanently, It is good design
// to guarantee the encoding used by a GobEncoder is stable as the
// software evolves. For instance, it might make sense for GobEncode
// to include a version number in the encoding.
type GobEncoder interface {
// GobEncode returns a byte slice representing the encoding of the
// receiver for transmission to a GobDecoder, usually of the same
// concrete type.
GobEncode() ([]byte, os.Error)
}
// GobDecoder is the interface describing data that provides its own
// routine for decoding transmitted values sent by a GobEncoder.
type GobDecoder interface {
// GobDecode overwrites the receiver, which must be a pointer,
// with the value represented by the byte slice, which was written
// by GobEncode, usually for the same concrete type.
GobDecode([]byte) os.Error
}
var ( var (
nameToConcreteType = make(map[string]reflect.Type) nameToConcreteType = make(map[string]reflect.Type)
concreteTypeToName = make(map[reflect.Type]string) concreteTypeToName = make(map[reflect.Type]string)

View File

@ -26,7 +26,7 @@ var basicTypes = []typeT{
func getTypeUnlocked(name string, rt reflect.Type) gobType { func getTypeUnlocked(name string, rt reflect.Type) gobType {
typeLock.Lock() typeLock.Lock()
defer typeLock.Unlock() defer typeLock.Unlock()
t, err := getType(name, rt) t, err := getBaseType(name, rt)
if err != nil { if err != nil {
panic("getTypeUnlocked: " + err.String()) panic("getTypeUnlocked: " + err.String())
} }
@ -126,27 +126,27 @@ func TestMapType(t *testing.T) {
} }
type Bar struct { type Bar struct {
x string X string
} }
// This structure has pointers and refers to itself, making it a good test case. // This structure has pointers and refers to itself, making it a good test case.
type Foo struct { type Foo struct {
a int A int
b int32 // will become int B int32 // will become int
c string C string
d []byte D []byte
e *float64 // will become float64 E *float64 // will become float64
f ****float64 // will become float64 F ****float64 // will become float64
g *Bar G *Bar
h *Bar // should not interpolate the definition of Bar again H *Bar // should not interpolate the definition of Bar again
i *Foo // will not explode I *Foo // will not explode
} }
func TestStructType(t *testing.T) { func TestStructType(t *testing.T) {
sstruct := getTypeUnlocked("Foo", reflect.Typeof(Foo{})) sstruct := getTypeUnlocked("Foo", reflect.Typeof(Foo{}))
str := sstruct.string() str := sstruct.string()
// If we can print it correctly, we built it correctly. // If we can print it correctly, we built it correctly.
expected := "Foo = struct { a int; b int; c string; d bytes; e float; f float; g Bar = struct { x string; }; h Bar; i Foo; }" expected := "Foo = struct { A int; B int; C string; D bytes; E float; F float; G Bar = struct { X string; }; H Bar; I Foo; }"
if str != expected { if str != expected {
t.Errorf("struct printed as %q; expected %q", str, expected) t.Errorf("struct printed as %q; expected %q", str, expected)
} }

133
libgo/go/hash/fnv/fnv.go Normal file
View File

@ -0,0 +1,133 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The fnv package implements FNV-1 and FNV-1a,
// non-cryptographic hash functions created by
// Glenn Fowler, Landon Curt Noll, and Phong Vo.
// See http://isthe.com/chongo/tech/comp/fnv/.
package fnv
import (
"encoding/binary"
"hash"
"os"
"unsafe"
)
type (
sum32 uint32
sum32a uint32
sum64 uint64
sum64a uint64
)
const (
offset32 = 2166136261
offset64 = 14695981039346656037
prime32 = 16777619
prime64 = 1099511628211
)
// New32 returns a new 32-bit FNV-1 hash.Hash.
func New32() hash.Hash32 {
var s sum32 = offset32
return &s
}
// New32a returns a new 32-bit FNV-1a hash.Hash.
func New32a() hash.Hash32 {
var s sum32a = offset32
return &s
}
// New64 returns a new 64-bit FNV-1 hash.Hash.
func New64() hash.Hash64 {
var s sum64 = offset64
return &s
}
// New64a returns a new 64-bit FNV-1a hash.Hash.
func New64a() hash.Hash64 {
var s sum64a = offset64
return &s
}
func (s *sum32) Reset() { *s = offset32 }
func (s *sum32a) Reset() { *s = offset32 }
func (s *sum64) Reset() { *s = offset64 }
func (s *sum64a) Reset() { *s = offset64 }
func (s *sum32) Sum32() uint32 { return uint32(*s) }
func (s *sum32a) Sum32() uint32 { return uint32(*s) }
func (s *sum64) Sum64() uint64 { return uint64(*s) }
func (s *sum64a) Sum64() uint64 { return uint64(*s) }
func (s *sum32) Write(data []byte) (int, os.Error) {
hash := *s
for _, c := range data {
hash *= prime32
hash ^= sum32(c)
}
*s = hash
return len(data), nil
}
func (s *sum32a) Write(data []byte) (int, os.Error) {
hash := *s
for _, c := range data {
hash ^= sum32a(c)
hash *= prime32
}
*s = hash
return len(data), nil
}
func (s *sum64) Write(data []byte) (int, os.Error) {
hash := *s
for _, c := range data {
hash *= prime64
hash ^= sum64(c)
}
*s = hash
return len(data), nil
}
func (s *sum64a) Write(data []byte) (int, os.Error) {
hash := *s
for _, c := range data {
hash ^= sum64a(c)
hash *= prime64
}
*s = hash
return len(data), nil
}
func (s *sum32) Size() int { return unsafe.Sizeof(*s) }
func (s *sum32a) Size() int { return unsafe.Sizeof(*s) }
func (s *sum64) Size() int { return unsafe.Sizeof(*s) }
func (s *sum64a) Size() int { return unsafe.Sizeof(*s) }
func (s *sum32) Sum() []byte {
a := make([]byte, unsafe.Sizeof(*s))
binary.BigEndian.PutUint32(a, uint32(*s))
return a
}
func (s *sum32a) Sum() []byte {
a := make([]byte, unsafe.Sizeof(*s))
binary.BigEndian.PutUint32(a, uint32(*s))
return a
}
func (s *sum64) Sum() []byte {
a := make([]byte, unsafe.Sizeof(*s))
binary.BigEndian.PutUint64(a, uint64(*s))
return a
}
func (s *sum64a) Sum() []byte {
a := make([]byte, unsafe.Sizeof(*s))
binary.BigEndian.PutUint64(a, uint64(*s))
return a
}

View File

@ -0,0 +1,167 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fnv
import (
"bytes"
"encoding/binary"
"hash"
"testing"
)
const testDataSize = 40
type golden struct {
sum []byte
text string
}
var golden32 = []golden{
{[]byte{0x81, 0x1c, 0x9d, 0xc5}, ""},
{[]byte{0x05, 0x0c, 0x5d, 0x7e}, "a"},
{[]byte{0x70, 0x77, 0x2d, 0x38}, "ab"},
{[]byte{0x43, 0x9c, 0x2f, 0x4b}, "abc"},
}
var golden32a = []golden{
{[]byte{0x81, 0x1c, 0x9d, 0xc5}, ""},
{[]byte{0xe4, 0x0c, 0x29, 0x2c}, "a"},
{[]byte{0x4d, 0x25, 0x05, 0xca}, "ab"},
{[]byte{0x1a, 0x47, 0xe9, 0x0b}, "abc"},
}
var golden64 = []golden{
{[]byte{0xcb, 0xf2, 0x9c, 0xe4, 0x84, 0x22, 0x23, 0x25}, ""},
{[]byte{0xaf, 0x63, 0xbd, 0x4c, 0x86, 0x01, 0xb7, 0xbe}, "a"},
{[]byte{0x08, 0x32, 0x67, 0x07, 0xb4, 0xeb, 0x37, 0xb8}, "ab"},
{[]byte{0xd8, 0xdc, 0xca, 0x18, 0x6b, 0xaf, 0xad, 0xcb}, "abc"},
}
var golden64a = []golden{
{[]byte{0xcb, 0xf2, 0x9c, 0xe4, 0x84, 0x22, 0x23, 0x25}, ""},
{[]byte{0xaf, 0x63, 0xdc, 0x4c, 0x86, 0x01, 0xec, 0x8c}, "a"},
{[]byte{0x08, 0x9c, 0x44, 0x07, 0xb5, 0x45, 0x98, 0x6a}, "ab"},
{[]byte{0xe7, 0x1f, 0xa2, 0x19, 0x05, 0x41, 0x57, 0x4b}, "abc"},
}
func TestGolden32(t *testing.T) {
testGolden(t, New32(), golden32)
}
func TestGolden32a(t *testing.T) {
testGolden(t, New32a(), golden32a)
}
func TestGolden64(t *testing.T) {
testGolden(t, New64(), golden64)
}
func TestGolden64a(t *testing.T) {
testGolden(t, New64a(), golden64a)
}
func testGolden(t *testing.T, hash hash.Hash, gold []golden) {
for _, g := range gold {
hash.Reset()
done, error := hash.Write([]byte(g.text))
if error != nil {
t.Fatalf("write error: %s", error)
}
if done != len(g.text) {
t.Fatalf("wrote only %d out of %d bytes", done, len(g.text))
}
if actual := hash.Sum(); !bytes.Equal(g.sum, actual) {
t.Errorf("hash(%q) = 0x%x want 0x%x", g.text, actual, g.sum)
}
}
}
func TestIntegrity32(t *testing.T) {
testIntegrity(t, New32())
}
func TestIntegrity32a(t *testing.T) {
testIntegrity(t, New32a())
}
func TestIntegrity64(t *testing.T) {
testIntegrity(t, New64())
}
func TestIntegrity64a(t *testing.T) {
testIntegrity(t, New64a())
}
func testIntegrity(t *testing.T, h hash.Hash) {
data := []byte{'1', '2', 3, 4, 5}
h.Write(data)
sum := h.Sum()
if size := h.Size(); size != len(sum) {
t.Fatalf("Size()=%d but len(Sum())=%d", size, len(sum))
}
if a := h.Sum(); !bytes.Equal(sum, a) {
t.Fatalf("first Sum()=0x%x, second Sum()=0x%x", sum, a)
}
h.Reset()
h.Write(data)
if a := h.Sum(); !bytes.Equal(sum, a) {
t.Fatalf("Sum()=0x%x, but after Reset() Sum()=0x%x", sum, a)
}
h.Reset()
h.Write(data[:2])
h.Write(data[2:])
if a := h.Sum(); !bytes.Equal(sum, a) {
t.Fatalf("Sum()=0x%x, but with partial writes, Sum()=0x%x", sum, a)
}
switch h.Size() {
case 4:
sum32 := h.(hash.Hash32).Sum32()
if sum32 != binary.BigEndian.Uint32(sum) {
t.Fatalf("Sum()=0x%x, but Sum32()=0x%x", sum, sum32)
}
case 8:
sum64 := h.(hash.Hash64).Sum64()
if sum64 != binary.BigEndian.Uint64(sum) {
t.Fatalf("Sum()=0x%x, but Sum64()=0x%x", sum, sum64)
}
}
}
func Benchmark32(b *testing.B) {
benchmark(b, New32())
}
func Benchmark32a(b *testing.B) {
benchmark(b, New32a())
}
func Benchmark64(b *testing.B) {
benchmark(b, New64())
}
func Benchmark64a(b *testing.B) {
benchmark(b, New64a())
}
func benchmark(b *testing.B, h hash.Hash) {
b.ResetTimer()
b.SetBytes(testDataSize)
data := make([]byte, testDataSize)
for i, _ := range data {
data[i] = byte(i + 'a')
}
b.StartTimer()
for todo := b.N; todo != 0; todo-- {
h.Reset()
h.Write(data)
h.Sum()
}
}

192
libgo/go/http/cgi/child.go Normal file
View File

@ -0,0 +1,192 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements CGI from the perspective of a child
// process.
package cgi
import (
"bufio"
"fmt"
"http"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
)
// Request returns the HTTP request as represented in the current
// environment. This assumes the current program is being run
// by a web server in a CGI environment.
func Request() (*http.Request, os.Error) {
return requestFromEnvironment(envMap(os.Environ()))
}
func envMap(env []string) map[string]string {
m := make(map[string]string)
for _, kv := range env {
if idx := strings.Index(kv, "="); idx != -1 {
m[kv[:idx]] = kv[idx+1:]
}
}
return m
}
// These environment variables are manually copied into Request
var skipHeader = map[string]bool{
"HTTP_HOST": true,
"HTTP_REFERER": true,
"HTTP_USER_AGENT": true,
}
func requestFromEnvironment(env map[string]string) (*http.Request, os.Error) {
r := new(http.Request)
r.Method = env["REQUEST_METHOD"]
if r.Method == "" {
return nil, os.NewError("cgi: no REQUEST_METHOD in environment")
}
r.Close = true
r.Trailer = http.Header{}
r.Header = http.Header{}
r.Host = env["HTTP_HOST"]
r.Referer = env["HTTP_REFERER"]
r.UserAgent = env["HTTP_USER_AGENT"]
// CGI doesn't allow chunked requests, so these should all be accurate:
r.Proto = "HTTP/1.0"
r.ProtoMajor = 1
r.ProtoMinor = 0
r.TransferEncoding = nil
if lenstr := env["CONTENT_LENGTH"]; lenstr != "" {
clen, err := strconv.Atoi64(lenstr)
if err != nil {
return nil, os.NewError("cgi: bad CONTENT_LENGTH in environment: " + lenstr)
}
r.ContentLength = clen
r.Body = ioutil.NopCloser(io.LimitReader(os.Stdin, clen))
}
// Copy "HTTP_FOO_BAR" variables to "Foo-Bar" Headers
for k, v := range env {
if !strings.HasPrefix(k, "HTTP_") || skipHeader[k] {
continue
}
r.Header.Add(strings.Replace(k[5:], "_", "-", -1), v)
}
// TODO: cookies. parsing them isn't exported, though.
if r.Host != "" {
// Hostname is provided, so we can reasonably construct a URL,
// even if we have to assume 'http' for the scheme.
r.RawURL = "http://" + r.Host + env["REQUEST_URI"]
url, err := http.ParseURL(r.RawURL)
if err != nil {
return nil, os.NewError("cgi: failed to parse host and REQUEST_URI into a URL: " + r.RawURL)
}
r.URL = url
}
// Fallback logic if we don't have a Host header or the URL
// failed to parse
if r.URL == nil {
r.RawURL = env["REQUEST_URI"]
url, err := http.ParseURL(r.RawURL)
if err != nil {
return nil, os.NewError("cgi: failed to parse REQUEST_URI into a URL: " + r.RawURL)
}
r.URL = url
}
return r, nil
}
// Serve executes the provided Handler on the currently active CGI
// request, if any. If there's no current CGI environment
// an error is returned. The provided handler may be nil to use
// http.DefaultServeMux.
func Serve(handler http.Handler) os.Error {
req, err := Request()
if err != nil {
return err
}
if handler == nil {
handler = http.DefaultServeMux
}
rw := &response{
req: req,
header: make(http.Header),
bufw: bufio.NewWriter(os.Stdout),
}
handler.ServeHTTP(rw, req)
if err = rw.bufw.Flush(); err != nil {
return err
}
return nil
}
type response struct {
req *http.Request
header http.Header
bufw *bufio.Writer
headerSent bool
}
func (r *response) Flush() {
r.bufw.Flush()
}
func (r *response) RemoteAddr() string {
return os.Getenv("REMOTE_ADDR")
}
func (r *response) Header() http.Header {
return r.header
}
func (r *response) Write(p []byte) (n int, err os.Error) {
if !r.headerSent {
r.WriteHeader(http.StatusOK)
}
return r.bufw.Write(p)
}
func (r *response) WriteHeader(code int) {
if r.headerSent {
// Note: explicitly using Stderr, as Stdout is our HTTP output.
fmt.Fprintf(os.Stderr, "CGI attempted to write header twice on request for %s", r.req.URL)
return
}
r.headerSent = true
fmt.Fprintf(r.bufw, "Status: %d %s\r\n", code, http.StatusText(code))
// Set a default Content-Type
if _, hasType := r.header["Content-Type"]; !hasType {
r.header.Add("Content-Type", "text/html; charset=utf-8")
}
// TODO: add a method on http.Header to write itself to an io.Writer?
// This is duplicated code.
for k, vv := range r.header {
for _, v := range vv {
v = strings.Replace(v, "\n", "", -1)
v = strings.Replace(v, "\r", "", -1)
v = strings.TrimSpace(v)
fmt.Fprintf(r.bufw, "%s: %s\r\n", k, v)
}
}
r.bufw.Write([]byte("\r\n"))
r.bufw.Flush()
}
func (r *response) UsingTLS() bool {
// There's apparently a de-facto standard for this.
// http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636
if s := os.Getenv("HTTPS"); s == "on" || s == "ON" || s == "1" {
return true
}
return false
}

View File

@ -0,0 +1,83 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests for CGI (the child process perspective)
package cgi
import (
"testing"
)
func TestRequest(t *testing.T) {
env := map[string]string{
"REQUEST_METHOD": "GET",
"HTTP_HOST": "example.com",
"HTTP_REFERER": "elsewhere",
"HTTP_USER_AGENT": "goclient",
"HTTP_FOO_BAR": "baz",
"REQUEST_URI": "/path?a=b",
"CONTENT_LENGTH": "123",
}
req, err := requestFromEnvironment(env)
if err != nil {
t.Fatalf("requestFromEnvironment: %v", err)
}
if g, e := req.UserAgent, "goclient"; e != g {
t.Errorf("expected UserAgent %q; got %q", e, g)
}
if g, e := req.Method, "GET"; e != g {
t.Errorf("expected Method %q; got %q", e, g)
}
if g, e := req.Header.Get("User-Agent"), ""; e != g {
// Tests that we don't put recognized headers in the map
t.Errorf("expected User-Agent %q; got %q", e, g)
}
if g, e := req.ContentLength, int64(123); e != g {
t.Errorf("expected ContentLength %d; got %d", e, g)
}
if g, e := req.Referer, "elsewhere"; e != g {
t.Errorf("expected Referer %q; got %q", e, g)
}
if req.Header == nil {
t.Fatalf("unexpected nil Header")
}
if g, e := req.Header.Get("Foo-Bar"), "baz"; e != g {
t.Errorf("expected Foo-Bar %q; got %q", e, g)
}
if g, e := req.RawURL, "http://example.com/path?a=b"; e != g {
t.Errorf("expected RawURL %q; got %q", e, g)
}
if g, e := req.URL.String(), "http://example.com/path?a=b"; e != g {
t.Errorf("expected URL %q; got %q", e, g)
}
if g, e := req.FormValue("a"), "b"; e != g {
t.Errorf("expected FormValue(a) %q; got %q", e, g)
}
if req.Trailer == nil {
t.Errorf("unexpected nil Trailer")
}
}
func TestRequestWithoutHost(t *testing.T) {
env := map[string]string{
"HTTP_HOST": "",
"REQUEST_METHOD": "GET",
"REQUEST_URI": "/path?a=b",
"CONTENT_LENGTH": "123",
}
req, err := requestFromEnvironment(env)
if err != nil {
t.Fatalf("requestFromEnvironment: %v", err)
}
if g, e := req.RawURL, "/path?a=b"; e != g {
t.Errorf("expected RawURL %q; got %q", e, g)
}
if req.URL == nil {
t.Fatalf("unexpected nil URL")
}
if g, e := req.URL.String(), "/path?a=b"; e != g {
t.Errorf("expected URL %q; got %q", e, g)
}
}

221
libgo/go/http/cgi/host.go Normal file
View File

@ -0,0 +1,221 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements the host side of CGI (being the webserver
// parent process).
// Package cgi implements CGI (Common Gateway Interface) as specified
// in RFC 3875.
//
// Note that using CGI means starting a new process to handle each
// request, which is typically less efficient than using a
// long-running server. This package is intended primarily for
// compatibility with existing systems.
package cgi
import (
"bytes"
"encoding/line"
"exec"
"fmt"
"http"
"io"
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
)
var trailingPort = regexp.MustCompile(`:([0-9]+)$`)
// Handler runs an executable in a subprocess with a CGI environment.
type Handler struct {
Path string // path to the CGI executable
Root string // root URI prefix of handler or empty for "/"
Env []string // extra environment variables to set, if any
Logger *log.Logger // optional log for errors or nil to use log.Print
Args []string // optional arguments to pass to child process
}
func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
root := h.Root
if root == "" {
root = "/"
}
if len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" {
rw.WriteHeader(http.StatusBadRequest)
rw.Write([]byte("Chunked request bodies are not supported by CGI."))
return
}
pathInfo := req.URL.Path
if root != "/" && strings.HasPrefix(pathInfo, root) {
pathInfo = pathInfo[len(root):]
}
port := "80"
if matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 {
port = matches[1]
}
env := []string{
"SERVER_SOFTWARE=go",
"SERVER_NAME=" + req.Host,
"HTTP_HOST=" + req.Host,
"GATEWAY_INTERFACE=CGI/1.1",
"REQUEST_METHOD=" + req.Method,
"QUERY_STRING=" + req.URL.RawQuery,
"REQUEST_URI=" + req.URL.RawPath,
"PATH_INFO=" + pathInfo,
"SCRIPT_NAME=" + root,
"SCRIPT_FILENAME=" + h.Path,
"REMOTE_ADDR=" + req.RemoteAddr,
"REMOTE_HOST=" + req.RemoteAddr,
"SERVER_PORT=" + port,
}
if req.TLS != nil {
env = append(env, "HTTPS=on")
}
if len(req.Cookie) > 0 {
b := new(bytes.Buffer)
for idx, c := range req.Cookie {
if idx > 0 {
b.Write([]byte("; "))
}
fmt.Fprintf(b, "%s=%s", c.Name, c.Value)
}
env = append(env, "HTTP_COOKIE="+b.String())
}
for k, v := range req.Header {
k = strings.Map(upperCaseAndUnderscore, k)
env = append(env, "HTTP_"+k+"="+strings.Join(v, ", "))
}
if req.ContentLength > 0 {
env = append(env, fmt.Sprintf("CONTENT_LENGTH=%d", req.ContentLength))
}
if ctype := req.Header.Get("Content-Type"); ctype != "" {
env = append(env, "CONTENT_TYPE="+ctype)
}
if h.Env != nil {
env = append(env, h.Env...)
}
cwd, pathBase := filepath.Split(h.Path)
if cwd == "" {
cwd = "."
}
args := []string{h.Path}
args = append(args, h.Args...)
cmd, err := exec.Run(
pathBase,
args,
env,
cwd,
exec.Pipe, // stdin
exec.Pipe, // stdout
exec.PassThrough, // stderr (for now)
)
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("CGI error: %v", err)
return
}
defer func() {
cmd.Stdin.Close()
cmd.Stdout.Close()
cmd.Wait(0) // no zombies
}()
if req.ContentLength != 0 {
go io.Copy(cmd.Stdin, req.Body)
}
linebody := line.NewReader(cmd.Stdout, 1024)
headers := rw.Header()
statusCode := http.StatusOK
for {
line, isPrefix, err := linebody.ReadLine()
if isPrefix {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("CGI: long header line from subprocess.")
return
}
if err == os.EOF {
break
}
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("CGI: error reading headers: %v", err)
return
}
if len(line) == 0 {
break
}
parts := strings.Split(string(line), ":", 2)
if len(parts) < 2 {
h.printf("CGI: bogus header line: %s", string(line))
continue
}
header, val := parts[0], parts[1]
header = strings.TrimSpace(header)
val = strings.TrimSpace(val)
switch {
case header == "Status":
if len(val) < 3 {
h.printf("CGI: bogus status (short): %q", val)
return
}
code, err := strconv.Atoi(val[0:3])
if err != nil {
h.printf("CGI: bogus status: %q", val)
h.printf("CGI: line was %q", line)
return
}
statusCode = code
default:
headers.Add(header, val)
}
}
rw.WriteHeader(statusCode)
_, err = io.Copy(rw, linebody)
if err != nil {
h.printf("CGI: copy error: %v", err)
}
}
func (h *Handler) printf(format string, v ...interface{}) {
if h.Logger != nil {
h.Logger.Printf(format, v...)
} else {
log.Printf(format, v...)
}
}
func upperCaseAndUnderscore(rune int) int {
switch {
case rune >= 'a' && rune <= 'z':
return rune - ('a' - 'A')
case rune == '-':
return '_'
case rune == '=':
// Maybe not part of the CGI 'spec' but would mess up
// the environment in any case, as Go represents the
// environment as a slice of "key=value" strings.
return '_'
}
// TODO: other transformations in spec or practice?
return rune
}

View File

@ -0,0 +1,273 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests for package cgi
package cgi
import (
"bufio"
"exec"
"fmt"
"http"
"http/httptest"
"os"
"strings"
"testing"
)
var cgiScriptWorks = canRun("./testdata/test.cgi")
func canRun(s string) bool {
c, err := exec.Run(s, []string{s}, nil, ".", exec.DevNull, exec.DevNull, exec.DevNull)
if err != nil {
return false
}
w, err := c.Wait(0)
if err != nil {
return false
}
return w.Exited() && w.ExitStatus() == 0
}
func newRequest(httpreq string) *http.Request {
buf := bufio.NewReader(strings.NewReader(httpreq))
req, err := http.ReadRequest(buf)
if err != nil {
panic("cgi: bogus http request in test: " + httpreq)
}
req.RemoteAddr = "1.2.3.4"
return req
}
func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string]string) *httptest.ResponseRecorder {
rw := httptest.NewRecorder()
req := newRequest(httpreq)
h.ServeHTTP(rw, req)
// Make a map to hold the test map that the CGI returns.
m := make(map[string]string)
linesRead := 0
readlines:
for {
line, err := rw.Body.ReadString('\n')
switch {
case err == os.EOF:
break readlines
case err != nil:
t.Fatalf("unexpected error reading from CGI: %v", err)
}
linesRead++
trimmedLine := strings.TrimRight(line, "\r\n")
split := strings.Split(trimmedLine, "=", 2)
if len(split) != 2 {
t.Fatalf("Unexpected %d parts from invalid line number %v: %q; existing map=%v",
len(split), linesRead, line, m)
}
m[split[0]] = split[1]
}
for key, expected := range expectedMap {
if got := m[key]; got != expected {
t.Errorf("for key %q got %q; expected %q", key, got, expected)
}
}
return rw
}
func skipTest(t *testing.T) bool {
if !cgiScriptWorks {
// No Perl on Windows, needed by test.cgi
// TODO: make the child process be Go, not Perl.
t.Logf("Skipping test: test.cgi failed.")
return true
}
return false
}
func TestCGIBasicGet(t *testing.T) {
if skipTest(t) {
return
}
h := &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
}
expectedMap := map[string]string{
"test": "Hello CGI",
"param-a": "b",
"param-foo": "bar",
"env-GATEWAY_INTERFACE": "CGI/1.1",
"env-HTTP_HOST": "example.com",
"env-PATH_INFO": "",
"env-QUERY_STRING": "foo=bar&a=b",
"env-REMOTE_ADDR": "1.2.3.4",
"env-REMOTE_HOST": "1.2.3.4",
"env-REQUEST_METHOD": "GET",
"env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
"env-SCRIPT_FILENAME": "testdata/test.cgi",
"env-SCRIPT_NAME": "/test.cgi",
"env-SERVER_NAME": "example.com",
"env-SERVER_PORT": "80",
"env-SERVER_SOFTWARE": "go",
}
replay := runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
if expected, got := "text/html", replay.Header().Get("Content-Type"); got != expected {
t.Errorf("got a Content-Type of %q; expected %q", got, expected)
}
if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
}
}
func TestCGIBasicGetAbsPath(t *testing.T) {
if skipTest(t) {
return
}
pwd, err := os.Getwd()
if err != nil {
t.Fatalf("getwd error: %v", err)
}
h := &Handler{
Path: pwd + "/testdata/test.cgi",
Root: "/test.cgi",
}
expectedMap := map[string]string{
"env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
"env-SCRIPT_FILENAME": pwd + "/testdata/test.cgi",
"env-SCRIPT_NAME": "/test.cgi",
}
runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
}
func TestPathInfo(t *testing.T) {
if skipTest(t) {
return
}
h := &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
}
expectedMap := map[string]string{
"param-a": "b",
"env-PATH_INFO": "/extrapath",
"env-QUERY_STRING": "a=b",
"env-REQUEST_URI": "/test.cgi/extrapath?a=b",
"env-SCRIPT_FILENAME": "testdata/test.cgi",
"env-SCRIPT_NAME": "/test.cgi",
}
runCgiTest(t, h, "GET /test.cgi/extrapath?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
}
func TestPathInfoDirRoot(t *testing.T) {
if skipTest(t) {
return
}
h := &Handler{
Path: "testdata/test.cgi",
Root: "/myscript/",
}
expectedMap := map[string]string{
"env-PATH_INFO": "bar",
"env-QUERY_STRING": "a=b",
"env-REQUEST_URI": "/myscript/bar?a=b",
"env-SCRIPT_FILENAME": "testdata/test.cgi",
"env-SCRIPT_NAME": "/myscript/",
}
runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
}
func TestDupHeaders(t *testing.T) {
if skipTest(t) {
return
}
h := &Handler{
Path: "testdata/test.cgi",
}
expectedMap := map[string]string{
"env-REQUEST_URI": "/myscript/bar?a=b",
"env-SCRIPT_FILENAME": "testdata/test.cgi",
"env-HTTP_COOKIE": "nom=NOM; yum=YUM",
"env-HTTP_X_FOO": "val1, val2",
}
runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\n"+
"Cookie: nom=NOM\n"+
"Cookie: yum=YUM\n"+
"X-Foo: val1\n"+
"X-Foo: val2\n"+
"Host: example.com\n\n",
expectedMap)
}
func TestPathInfoNoRoot(t *testing.T) {
if skipTest(t) {
return
}
h := &Handler{
Path: "testdata/test.cgi",
Root: "",
}
expectedMap := map[string]string{
"env-PATH_INFO": "/bar",
"env-QUERY_STRING": "a=b",
"env-REQUEST_URI": "/bar?a=b",
"env-SCRIPT_FILENAME": "testdata/test.cgi",
"env-SCRIPT_NAME": "/",
}
runCgiTest(t, h, "GET /bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
}
func TestCGIBasicPost(t *testing.T) {
if skipTest(t) {
return
}
postReq := `POST /test.cgi?a=b HTTP/1.0
Host: example.com
Content-Type: application/x-www-form-urlencoded
Content-Length: 15
postfoo=postbar`
h := &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
}
expectedMap := map[string]string{
"test": "Hello CGI",
"param-postfoo": "postbar",
"env-REQUEST_METHOD": "POST",
"env-CONTENT_LENGTH": "15",
"env-REQUEST_URI": "/test.cgi?a=b",
}
runCgiTest(t, h, postReq, expectedMap)
}
func chunk(s string) string {
return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
}
// The CGI spec doesn't allow chunked requests.
func TestCGIPostChunked(t *testing.T) {
if skipTest(t) {
return
}
postReq := `POST /test.cgi?a=b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Transfer-Encoding: chunked
` + chunk("postfoo") + chunk("=") + chunk("postbar") + chunk("")
h := &Handler{
Path: "testdata/test.cgi",
Root: "/test.cgi",
}
expectedMap := map[string]string{}
resp := runCgiTest(t, h, postReq, expectedMap)
if got, expected := resp.Code, http.StatusBadRequest; got != expected {
t.Fatalf("Expected %v response code from chunked request body; got %d",
expected, got)
}
}

View File

@ -0,0 +1,74 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests a Go CGI program running under a Go CGI host process.
// Further, the two programs are the same binary, just checking
// their environment to figure out what mode to run in.
package cgi
import (
"fmt"
"http"
"os"
"testing"
)
// This test is a CGI host (testing host.go) that runs its own binary
// as a child process testing the other half of CGI (child.go).
func TestHostingOurselves(t *testing.T) {
h := &Handler{
Path: os.Args[0],
Root: "/test.go",
Args: []string{"-test.run=TestBeChildCGIProcess"},
}
expectedMap := map[string]string{
"test": "Hello CGI-in-CGI",
"param-a": "b",
"param-foo": "bar",
"env-GATEWAY_INTERFACE": "CGI/1.1",
"env-HTTP_HOST": "example.com",
"env-PATH_INFO": "",
"env-QUERY_STRING": "foo=bar&a=b",
"env-REMOTE_ADDR": "1.2.3.4",
"env-REMOTE_HOST": "1.2.3.4",
"env-REQUEST_METHOD": "GET",
"env-REQUEST_URI": "/test.go?foo=bar&a=b",
"env-SCRIPT_FILENAME": os.Args[0],
"env-SCRIPT_NAME": "/test.go",
"env-SERVER_NAME": "example.com",
"env-SERVER_PORT": "80",
"env-SERVER_SOFTWARE": "go",
}
replay := runCgiTest(t, h, "GET /test.go?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
if expected, got := "text/html; charset=utf-8", replay.Header().Get("Content-Type"); got != expected {
t.Errorf("got a Content-Type of %q; expected %q", got, expected)
}
if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
}
}
// Note: not actually a test.
func TestBeChildCGIProcess(t *testing.T) {
if os.Getenv("REQUEST_METHOD") == "" {
// Not in a CGI environment; skipping test.
return
}
Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("X-Test-Header", "X-Test-Value")
fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n")
req.ParseForm()
for k, vv := range req.Form {
for _, v := range vv {
fmt.Fprintf(rw, "param-%s=%s\n", k, v)
}
}
for _, kv := range os.Environ() {
fmt.Fprintf(rw, "env-%s\n", kv)
}
}))
os.Exit(0)
}

View File

@ -11,6 +11,7 @@ import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"strconv" "strconv"
"strings" "strings"
@ -20,26 +21,28 @@ import (
// that uses DefaultTransport. // that uses DefaultTransport.
// Client is not yet very configurable. // Client is not yet very configurable.
type Client struct { type Client struct {
Transport ClientTransport // if nil, DefaultTransport is used Transport RoundTripper // if nil, DefaultTransport is used
} }
// DefaultClient is the default Client and is used by Get, Head, and Post. // DefaultClient is the default Client and is used by Get, Head, and Post.
var DefaultClient = &Client{} var DefaultClient = &Client{}
// ClientTransport is an interface representing the ability to execute a // RoundTripper is an interface representing the ability to execute a
// single HTTP transaction, obtaining the Response for a given Request. // single HTTP transaction, obtaining the Response for a given Request.
type ClientTransport interface { type RoundTripper interface {
// Do executes a single HTTP transaction, returning the Response for the // RoundTrip executes a single HTTP transaction, returning
// request req. Do should not attempt to interpret the response. // the Response for the request req. RoundTrip should not
// In particular, Do must return err == nil if it obtained a response, // attempt to interpret the response. In particular,
// regardless of the response's HTTP status code. A non-nil err should // RoundTrip must return err == nil if it obtained a response,
// be reserved for failure to obtain a response. Similarly, Do should // regardless of the response's HTTP status code. A non-nil
// not attempt to handle higher-level protocol details such as redirects, // err should be reserved for failure to obtain a response.
// Similarly, RoundTrip should not attempt to handle
// higher-level protocol details such as redirects,
// authentication, or cookies. // authentication, or cookies.
// //
// Transports may modify the request. The request Headers field is // RoundTrip may modify the request. The request Headers field is
// guaranteed to be initalized. // guaranteed to be initialized.
Do(req *Request) (resp *Response, err os.Error) RoundTrip(req *Request) (resp *Response, err os.Error)
} }
// Given a string of the form "host", "host:port", or "[ipv6::address]:port", // Given a string of the form "host", "host:port", or "[ipv6::address]:port",
@ -54,40 +57,6 @@ type readClose struct {
io.Closer io.Closer
} }
// matchNoProxy returns true if requests to addr should not use a proxy,
// according to the NO_PROXY or no_proxy environment variable.
func matchNoProxy(addr string) bool {
if len(addr) == 0 {
return false
}
no_proxy := os.Getenv("NO_PROXY")
if len(no_proxy) == 0 {
no_proxy = os.Getenv("no_proxy")
}
if no_proxy == "*" {
return true
}
addr = strings.ToLower(strings.TrimSpace(addr))
if hasPort(addr) {
addr = addr[:strings.LastIndex(addr, ":")]
}
for _, p := range strings.Split(no_proxy, ",", -1) {
p = strings.ToLower(strings.TrimSpace(p))
if len(p) == 0 {
continue
}
if hasPort(p) {
p = p[:strings.LastIndex(p, ":")]
}
if addr == p || (p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:])) {
return true
}
}
return false
}
// Do sends an HTTP request and returns an HTTP response, following // Do sends an HTTP request and returns an HTTP response, following
// policy (e.g. redirects, cookies, auth) as configured on the client. // policy (e.g. redirects, cookies, auth) as configured on the client.
// //
@ -100,11 +69,7 @@ func (c *Client) Do(req *Request) (resp *Response, err os.Error) {
// send issues an HTTP request. Caller should close resp.Body when done reading from it. // send issues an HTTP request. Caller should close resp.Body when done reading from it.
// func send(req *Request, t RoundTripper) (resp *Response, err os.Error) {
// TODO: support persistent connections (multiple requests on a single connection).
// send() method is nonpublic because, when we refactor the code for persistent
// connections, it may no longer make sense to have a method with this signature.
func send(req *Request, t ClientTransport) (resp *Response, err os.Error) {
if t == nil { if t == nil {
t = DefaultTransport t = DefaultTransport
if t == nil { if t == nil {
@ -115,9 +80,9 @@ func send(req *Request, t ClientTransport) (resp *Response, err os.Error) {
// Most the callers of send (Get, Post, et al) don't need // Most the callers of send (Get, Post, et al) don't need
// Headers, leaving it uninitialized. We guarantee to the // Headers, leaving it uninitialized. We guarantee to the
// ClientTransport that this has been initialized, though. // Transport that this has been initialized, though.
if req.Header == nil { if req.Header == nil {
req.Header = Header(make(map[string][]string)) req.Header = make(Header)
} }
info := req.URL.RawUserinfo info := req.URL.RawUserinfo
@ -130,7 +95,7 @@ func send(req *Request, t ClientTransport) (resp *Response, err os.Error) {
} }
req.Header.Set("Authorization", "Basic "+string(encoded)) req.Header.Set("Authorization", "Basic "+string(encoded))
} }
return t.Do(req) return t.RoundTrip(req)
} }
// True if the specified HTTP status code is one for which the Get utility should // True if the specified HTTP status code is one for which the Get utility should
@ -237,7 +202,7 @@ func (c *Client) Post(url string, bodyType string, body io.Reader) (r *Response,
req.ProtoMajor = 1 req.ProtoMajor = 1
req.ProtoMinor = 1 req.ProtoMinor = 1
req.Close = true req.Close = true
req.Body = nopCloser{body} req.Body = ioutil.NopCloser(body)
req.Header = Header{ req.Header = Header{
"Content-Type": {bodyType}, "Content-Type": {bodyType},
} }
@ -272,7 +237,7 @@ func (c *Client) PostForm(url string, data map[string]string) (r *Response, err
req.ProtoMinor = 1 req.ProtoMinor = 1
req.Close = true req.Close = true
body := urlencode(data) body := urlencode(data)
req.Body = nopCloser{body} req.Body = ioutil.NopCloser(body)
req.Header = Header{ req.Header = Header{
"Content-Type": {"application/x-www-form-urlencoded"}, "Content-Type": {"application/x-www-form-urlencoded"},
"Content-Length": {strconv.Itoa(body.Len())}, "Content-Length": {strconv.Itoa(body.Len())},
@ -312,9 +277,3 @@ func (c *Client) Head(url string) (r *Response, err os.Error) {
} }
return send(&req, c.Transport) return send(&req, c.Transport)
} }
type nopCloser struct {
io.Reader
}
func (nopCloser) Close() os.Error { return nil }

View File

@ -4,20 +4,28 @@
// Tests for client.go // Tests for client.go
package http package http_test
import ( import (
"fmt"
. "http"
"http/httptest"
"io/ioutil" "io/ioutil"
"os" "os"
"strings" "strings"
"testing" "testing"
) )
func TestClient(t *testing.T) { var robotsTxtHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
// TODO: add a proper test suite. Current test merely verifies that w.Header().Set("Last-Modified", "sometime")
// we can retrieve the Google robots.txt file. fmt.Fprintf(w, "User-agent: go\nDisallow: /something/")
})
r, _, err := Get("http://www.google.com/robots.txt") func TestClient(t *testing.T) {
ts := httptest.NewServer(robotsTxtHandler)
defer ts.Close()
r, _, err := Get(ts.URL)
var b []byte var b []byte
if err == nil { if err == nil {
b, err = ioutil.ReadAll(r.Body) b, err = ioutil.ReadAll(r.Body)
@ -31,7 +39,10 @@ func TestClient(t *testing.T) {
} }
func TestClientHead(t *testing.T) { func TestClientHead(t *testing.T) {
r, err := Head("http://www.google.com/robots.txt") ts := httptest.NewServer(robotsTxtHandler)
defer ts.Close()
r, err := Head(ts.URL)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -44,7 +55,7 @@ type recordingTransport struct {
req *Request req *Request
} }
func (t *recordingTransport) Do(req *Request) (resp *Response, err os.Error) { func (t *recordingTransport) RoundTrip(req *Request) (resp *Response, err os.Error) {
t.req = req t.req = req
return nil, os.NewError("dummy impl") return nil, os.NewError("dummy impl")
} }

272
libgo/go/http/cookie.go Normal file
View File

@ -0,0 +1,272 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"bytes"
"fmt"
"io"
"os"
"sort"
"strconv"
"strings"
"time"
)
// This implementation is done according to IETF draft-ietf-httpstate-cookie-23, found at
//
// http://tools.ietf.org/html/draft-ietf-httpstate-cookie-23
// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an
// HTTP response or the Cookie header of an HTTP request.
type Cookie struct {
Name string
Value string
Path string
Domain string
Expires time.Time
RawExpires string
// MaxAge=0 means no 'Max-Age' attribute specified.
// MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'
// MaxAge>0 means Max-Age attribute present and given in seconds
MaxAge int
Secure bool
HttpOnly bool
Raw string
Unparsed []string // Raw text of unparsed attribute-value pairs
}
// readSetCookies parses all "Set-Cookie" values from
// the header h, removes the successfully parsed values from the
// "Set-Cookie" key in h and returns the parsed Cookies.
func readSetCookies(h Header) []*Cookie {
cookies := []*Cookie{}
var unparsedLines []string
for _, line := range h["Set-Cookie"] {
parts := strings.Split(strings.TrimSpace(line), ";", -1)
if len(parts) == 1 && parts[0] == "" {
continue
}
parts[0] = strings.TrimSpace(parts[0])
j := strings.Index(parts[0], "=")
if j < 0 {
unparsedLines = append(unparsedLines, line)
continue
}
name, value := parts[0][:j], parts[0][j+1:]
if !isCookieNameValid(name) {
unparsedLines = append(unparsedLines, line)
continue
}
value, success := parseCookieValue(value)
if !success {
unparsedLines = append(unparsedLines, line)
continue
}
c := &Cookie{
Name: name,
Value: value,
Raw: line,
}
for i := 1; i < len(parts); i++ {
parts[i] = strings.TrimSpace(parts[i])
if len(parts[i]) == 0 {
continue
}
attr, val := parts[i], ""
if j := strings.Index(attr, "="); j >= 0 {
attr, val = attr[:j], attr[j+1:]
}
val, success = parseCookieValue(val)
if !success {
c.Unparsed = append(c.Unparsed, parts[i])
continue
}
switch strings.ToLower(attr) {
case "secure":
c.Secure = true
continue
case "httponly":
c.HttpOnly = true
continue
case "domain":
c.Domain = val
// TODO: Add domain parsing
continue
case "max-age":
secs, err := strconv.Atoi(val)
if err != nil || secs < 0 || secs != 0 && val[0] == '0' {
break
}
if secs <= 0 {
c.MaxAge = -1
} else {
c.MaxAge = secs
}
continue
case "expires":
c.RawExpires = val
exptime, err := time.Parse(time.RFC1123, val)
if err != nil {
c.Expires = time.Time{}
break
}
c.Expires = *exptime
continue
case "path":
c.Path = val
// TODO: Add path parsing
continue
}
c.Unparsed = append(c.Unparsed, parts[i])
}
cookies = append(cookies, c)
}
h["Set-Cookie"] = unparsedLines, unparsedLines != nil
return cookies
}
// writeSetCookies writes the wire representation of the set-cookies
// to w. Each cookie is written on a separate "Set-Cookie: " line.
// This choice is made because HTTP parsers tend to have a limit on
// line-length, so it seems safer to place cookies on separate lines.
func writeSetCookies(w io.Writer, kk []*Cookie) os.Error {
if kk == nil {
return nil
}
lines := make([]string, 0, len(kk))
var b bytes.Buffer
for _, c := range kk {
b.Reset()
fmt.Fprintf(&b, "%s=%s", c.Name, c.Value)
if len(c.Path) > 0 {
fmt.Fprintf(&b, "; Path=%s", URLEscape(c.Path))
}
if len(c.Domain) > 0 {
fmt.Fprintf(&b, "; Domain=%s", URLEscape(c.Domain))
}
if len(c.Expires.Zone) > 0 {
fmt.Fprintf(&b, "; Expires=%s", c.Expires.Format(time.RFC1123))
}
if c.MaxAge > 0 {
fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge)
} else if c.MaxAge < 0 {
fmt.Fprintf(&b, "; Max-Age=0")
}
if c.HttpOnly {
fmt.Fprintf(&b, "; HttpOnly")
}
if c.Secure {
fmt.Fprintf(&b, "; Secure")
}
lines = append(lines, "Set-Cookie: "+b.String()+"\r\n")
}
sort.SortStrings(lines)
for _, l := range lines {
if _, err := io.WriteString(w, l); err != nil {
return err
}
}
return nil
}
// readCookies parses all "Cookie" values from
// the header h, removes the successfully parsed values from the
// "Cookie" key in h and returns the parsed Cookies.
func readCookies(h Header) []*Cookie {
cookies := []*Cookie{}
lines, ok := h["Cookie"]
if !ok {
return cookies
}
unparsedLines := []string{}
for _, line := range lines {
parts := strings.Split(strings.TrimSpace(line), ";", -1)
if len(parts) == 1 && parts[0] == "" {
continue
}
// Per-line attributes
parsedPairs := 0
for i := 0; i < len(parts); i++ {
parts[i] = strings.TrimSpace(parts[i])
if len(parts[i]) == 0 {
continue
}
attr, val := parts[i], ""
if j := strings.Index(attr, "="); j >= 0 {
attr, val = attr[:j], attr[j+1:]
}
if !isCookieNameValid(attr) {
continue
}
val, success := parseCookieValue(val)
if !success {
continue
}
cookies = append(cookies, &Cookie{Name: attr, Value: val})
parsedPairs++
}
if parsedPairs == 0 {
unparsedLines = append(unparsedLines, line)
}
}
h["Cookie"] = unparsedLines, len(unparsedLines) > 0
return cookies
}
// writeCookies writes the wire representation of the cookies
// to w. Each cookie is written on a separate "Cookie: " line.
// This choice is made because HTTP parsers tend to have a limit on
// line-length, so it seems safer to place cookies on separate lines.
func writeCookies(w io.Writer, kk []*Cookie) os.Error {
lines := make([]string, 0, len(kk))
for _, c := range kk {
lines = append(lines, fmt.Sprintf("Cookie: %s=%s\r\n", c.Name, c.Value))
}
sort.SortStrings(lines)
for _, l := range lines {
if _, err := io.WriteString(w, l); err != nil {
return err
}
}
return nil
}
func unquoteCookieValue(v string) string {
if len(v) > 1 && v[0] == '"' && v[len(v)-1] == '"' {
return v[1 : len(v)-1]
}
return v
}
func isCookieByte(c byte) bool {
switch true {
case c == 0x21, 0x23 <= c && c <= 0x2b, 0x2d <= c && c <= 0x3a,
0x3c <= c && c <= 0x5b, 0x5d <= c && c <= 0x7e:
return true
}
return false
}
func parseCookieValue(raw string) (string, bool) {
raw = unquoteCookieValue(raw)
for i := 0; i < len(raw); i++ {
if !isCookieByte(raw[i]) {
return "", false
}
}
return raw, true
}
func isCookieNameValid(raw string) bool {
for _, c := range raw {
if !isToken(byte(c)) {
return false
}
}
return true
}

View File

@ -0,0 +1,110 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"bytes"
"fmt"
"json"
"reflect"
"testing"
)
var writeSetCookiesTests = []struct {
Cookies []*Cookie
Raw string
}{
{
[]*Cookie{
&Cookie{Name: "cookie-1", Value: "v$1"},
&Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600},
},
"Set-Cookie: cookie-1=v$1\r\n" +
"Set-Cookie: cookie-2=two; Max-Age=3600\r\n",
},
}
func TestWriteSetCookies(t *testing.T) {
for i, tt := range writeSetCookiesTests {
var w bytes.Buffer
writeSetCookies(&w, tt.Cookies)
seen := string(w.Bytes())
if seen != tt.Raw {
t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.Raw, seen)
continue
}
}
}
var writeCookiesTests = []struct {
Cookies []*Cookie
Raw string
}{
{
[]*Cookie{&Cookie{Name: "cookie-1", Value: "v$1"}},
"Cookie: cookie-1=v$1\r\n",
},
}
func TestWriteCookies(t *testing.T) {
for i, tt := range writeCookiesTests {
var w bytes.Buffer
writeCookies(&w, tt.Cookies)
seen := string(w.Bytes())
if seen != tt.Raw {
t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.Raw, seen)
continue
}
}
}
var readSetCookiesTests = []struct {
Header Header
Cookies []*Cookie
}{
{
Header{"Set-Cookie": {"Cookie-1=v$1"}},
[]*Cookie{&Cookie{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}},
},
}
func toJSON(v interface{}) string {
b, err := json.Marshal(v)
if err != nil {
return fmt.Sprintf("%#v", v)
}
return string(b)
}
func TestReadSetCookies(t *testing.T) {
for i, tt := range readSetCookiesTests {
c := readSetCookies(tt.Header)
if !reflect.DeepEqual(c, tt.Cookies) {
t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies))
continue
}
}
}
var readCookiesTests = []struct {
Header Header
Cookies []*Cookie
}{
{
Header{"Cookie": {"Cookie-1=v$1"}},
[]*Cookie{&Cookie{Name: "Cookie-1", Value: "v$1"}},
},
}
func TestReadCookies(t *testing.T) {
for i, tt := range readCookiesTests {
c := readCookies(tt.Header)
if !reflect.DeepEqual(c, tt.Cookies) {
t.Errorf("#%d readCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies))
continue
}
}
}

View File

@ -7,10 +7,10 @@ package http
import ( import (
"bytes" "bytes"
"io" "io"
"io/ioutil"
"os" "os"
) )
// One of the copies, say from b to r2, could be avoided by using a more // One of the copies, say from b to r2, could be avoided by using a more
// elaborate trick where the other copy is made during Request/Response.Write. // elaborate trick where the other copy is made during Request/Response.Write.
// This would complicate things too much, given that these functions are for // This would complicate things too much, given that these functions are for
@ -23,7 +23,7 @@ func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err os.Error) {
if err = b.Close(); err != nil { if err = b.Close(); err != nil {
return nil, nil, err return nil, nil, err
} }
return nopCloser{&buf}, nopCloser{bytes.NewBuffer(buf.Bytes())}, nil return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil
} }
// DumpRequest returns the wire representation of req, // DumpRequest returns the wire representation of req,

View File

@ -0,0 +1,34 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Bridge package to expose http internals to tests in the http_test
// package.
package http
func (t *Transport) IdleConnKeysForTesting() (keys []string) {
keys = make([]string, 0)
t.lk.Lock()
defer t.lk.Unlock()
if t.idleConn == nil {
return
}
for key, _ := range t.idleConn {
keys = append(keys, key)
}
return
}
func (t *Transport) IdleConnCountForTesting(cacheKey string) int {
t.lk.Lock()
defer t.lk.Unlock()
if t.idleConn == nil {
return 0
}
conns, ok := t.idleConn[cacheKey]
if !ok {
return 0
}
return len(conns)
}

View File

@ -11,7 +11,7 @@ import (
"io" "io"
"mime" "mime"
"os" "os"
"path" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -108,11 +108,11 @@ func serveFile(w ResponseWriter, r *Request, name string, redirect bool) {
w.WriteHeader(StatusNotModified) w.WriteHeader(StatusNotModified)
return return
} }
w.SetHeader("Last-Modified", time.SecondsToUTC(d.Mtime_ns/1e9).Format(TimeFormat)) w.Header().Set("Last-Modified", time.SecondsToUTC(d.Mtime_ns/1e9).Format(TimeFormat))
// use contents of index.html for directory, if present // use contents of index.html for directory, if present
if d.IsDirectory() { if d.IsDirectory() {
index := name + indexPage index := name + filepath.FromSlash(indexPage)
ff, err := os.Open(index, os.O_RDONLY, 0) ff, err := os.Open(index, os.O_RDONLY, 0)
if err == nil { if err == nil {
defer ff.Close() defer ff.Close()
@ -135,18 +135,18 @@ func serveFile(w ResponseWriter, r *Request, name string, redirect bool) {
code := StatusOK code := StatusOK
// use extension to find content type. // use extension to find content type.
ext := path.Ext(name) ext := filepath.Ext(name)
if ctype := mime.TypeByExtension(ext); ctype != "" { if ctype := mime.TypeByExtension(ext); ctype != "" {
w.SetHeader("Content-Type", ctype) w.Header().Set("Content-Type", ctype)
} else { } else {
// read first chunk to decide between utf-8 text and binary // read first chunk to decide between utf-8 text and binary
var buf [1024]byte var buf [1024]byte
n, _ := io.ReadFull(f, buf[:]) n, _ := io.ReadFull(f, buf[:])
b := buf[:n] b := buf[:n]
if isText(b) { if isText(b) {
w.SetHeader("Content-Type", "text-plain; charset=utf-8") w.Header().Set("Content-Type", "text-plain; charset=utf-8")
} else { } else {
w.SetHeader("Content-Type", "application/octet-stream") // generic binary w.Header().Set("Content-Type", "application/octet-stream") // generic binary
} }
f.Seek(0, 0) // rewind to output whole file f.Seek(0, 0) // rewind to output whole file
} }
@ -166,11 +166,11 @@ func serveFile(w ResponseWriter, r *Request, name string, redirect bool) {
} }
size = ra.length size = ra.length
code = StatusPartialContent code = StatusPartialContent
w.SetHeader("Content-Range", fmt.Sprintf("bytes %d-%d/%d", ra.start, ra.start+ra.length-1, d.Size)) w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", ra.start, ra.start+ra.length-1, d.Size))
} }
w.SetHeader("Accept-Ranges", "bytes") w.Header().Set("Accept-Ranges", "bytes")
w.SetHeader("Content-Length", strconv.Itoa64(size)) w.Header().Set("Content-Length", strconv.Itoa64(size))
w.WriteHeader(code) w.WriteHeader(code)
@ -202,7 +202,7 @@ func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) {
return return
} }
path = path[len(f.prefix):] path = path[len(f.prefix):]
serveFile(w, r, f.root+"/"+path, true) serveFile(w, r, filepath.Join(f.root, filepath.FromSlash(path)), true)
} }
// httpRange specifies the byte range to be sent to the client. // httpRange specifies the byte range to be sent to the client.

View File

@ -2,89 +2,22 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package http package http_test
import ( import (
"fmt" "fmt"
. "http"
"http/httptest"
"io/ioutil" "io/ioutil"
"net"
"os" "os"
"sync"
"testing" "testing"
) )
var ParseRangeTests = []struct {
s string
length int64
r []httpRange
}{
{"", 0, nil},
{"foo", 0, nil},
{"bytes=", 0, nil},
{"bytes=5-4", 10, nil},
{"bytes=0-2,5-4", 10, nil},
{"bytes=0-9", 10, []httpRange{{0, 10}}},
{"bytes=0-", 10, []httpRange{{0, 10}}},
{"bytes=5-", 10, []httpRange{{5, 5}}},
{"bytes=0-20", 10, []httpRange{{0, 10}}},
{"bytes=15-,0-5", 10, nil},
{"bytes=-5", 10, []httpRange{{5, 5}}},
{"bytes=-15", 10, []httpRange{{0, 10}}},
{"bytes=0-499", 10000, []httpRange{{0, 500}}},
{"bytes=500-999", 10000, []httpRange{{500, 500}}},
{"bytes=-500", 10000, []httpRange{{9500, 500}}},
{"bytes=9500-", 10000, []httpRange{{9500, 500}}},
{"bytes=0-0,-1", 10000, []httpRange{{0, 1}, {9999, 1}}},
{"bytes=500-600,601-999", 10000, []httpRange{{500, 101}, {601, 399}}},
{"bytes=500-700,601-999", 10000, []httpRange{{500, 201}, {601, 399}}},
}
func TestParseRange(t *testing.T) {
for _, test := range ParseRangeTests {
r := test.r
ranges, err := parseRange(test.s, test.length)
if err != nil && r != nil {
t.Errorf("parseRange(%q) returned error %q", test.s, err)
}
if len(ranges) != len(r) {
t.Errorf("len(parseRange(%q)) = %d, want %d", test.s, len(ranges), len(r))
continue
}
for i := range r {
if ranges[i].start != r[i].start {
t.Errorf("parseRange(%q)[%d].start = %d, want %d", test.s, i, ranges[i].start, r[i].start)
}
if ranges[i].length != r[i].length {
t.Errorf("parseRange(%q)[%d].length = %d, want %d", test.s, i, ranges[i].length, r[i].length)
}
}
}
}
const ( const (
testFile = "testdata/file" testFile = "testdata/file"
testFileLength = 11 testFileLength = 11
) )
var (
serverOnce sync.Once
serverAddr string
)
func startServer(t *testing.T) {
serverOnce.Do(func() {
HandleFunc("/ServeFile", func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/file")
})
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal("listen:", err)
}
serverAddr = l.Addr().String()
go Serve(l, nil)
})
}
var ServeFileRangeTests = []struct { var ServeFileRangeTests = []struct {
start, end int start, end int
r string r string
@ -99,7 +32,11 @@ var ServeFileRangeTests = []struct {
} }
func TestServeFile(t *testing.T) { func TestServeFile(t *testing.T) {
startServer(t) ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
var err os.Error var err os.Error
file, err := ioutil.ReadFile(testFile) file, err := ioutil.ReadFile(testFile)
@ -110,7 +47,7 @@ func TestServeFile(t *testing.T) {
// set up the Request (re-used for all tests) // set up the Request (re-used for all tests)
var req Request var req Request
req.Header = make(Header) req.Header = make(Header)
if req.URL, err = ParseURL("http://" + serverAddr + "/ServeFile"); err != nil { if req.URL, err = ParseURL(ts.URL); err != nil {
t.Fatal("ParseURL:", err) t.Fatal("ParseURL:", err)
} }
req.Method = "GET" req.Method = "GET"
@ -149,7 +86,7 @@ func TestServeFile(t *testing.T) {
} }
func getBody(t *testing.T, req Request) (*Response, []byte) { func getBody(t *testing.T, req Request) (*Response, []byte) {
r, err := send(&req, DefaultTransport) r, err := DefaultClient.Do(&req)
if err != nil { if err != nil {
t.Fatal(req.URL.String(), "send:", err) t.Fatal(req.URL.String(), "send:", err)
} }

View File

@ -0,0 +1,59 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The httptest package provides utilities for HTTP testing.
package httptest
import (
"bytes"
"http"
"os"
)
// ResponseRecorder is an implementation of http.ResponseWriter that
// records its mutations for later inspection in tests.
type ResponseRecorder struct {
Code int // the HTTP response code from WriteHeader
HeaderMap http.Header // the HTTP response headers
Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to
Flushed bool
}
// NewRecorder returns an initialized ResponseRecorder.
func NewRecorder() *ResponseRecorder {
return &ResponseRecorder{
HeaderMap: make(http.Header),
Body: new(bytes.Buffer),
}
}
// DefaultRemoteAddr is the default remote address to return in RemoteAddr if
// an explicit DefaultRemoteAddr isn't set on ResponseRecorder.
const DefaultRemoteAddr = "1.2.3.4"
// Header returns the response headers.
func (rw *ResponseRecorder) Header() http.Header {
return rw.HeaderMap
}
// Write always succeeds and writes to rw.Body, if not nil.
func (rw *ResponseRecorder) Write(buf []byte) (int, os.Error) {
if rw.Body != nil {
rw.Body.Write(buf)
}
if rw.Code == 0 {
rw.Code = http.StatusOK
}
return len(buf), nil
}
// WriteHeader sets rw.Code.
func (rw *ResponseRecorder) WriteHeader(code int) {
rw.Code = code
}
// Flush sets rw.Flushed to true.
func (rw *ResponseRecorder) Flush() {
rw.Flushed = true
}

View File

@ -0,0 +1,70 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Implementation of Server
package httptest
import (
"fmt"
"http"
"os"
"net"
)
// A Server is an HTTP server listening on a system-chosen port on the
// local loopback interface, for use in end-to-end HTTP tests.
type Server struct {
URL string // base URL of form http://ipaddr:port with no trailing slash
Listener net.Listener
}
// historyListener keeps track of all connections that it's ever
// accepted.
type historyListener struct {
net.Listener
history []net.Conn
}
func (hs *historyListener) Accept() (c net.Conn, err os.Error) {
c, err = hs.Listener.Accept()
if err == nil {
hs.history = append(hs.history, c)
}
return
}
// NewServer starts and returns a new Server.
// The caller should call Close when finished, to shut it down.
func NewServer(handler http.Handler) *Server {
ts := new(Server)
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {
panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err))
}
}
ts.Listener = &historyListener{l, make([]net.Conn, 0)}
ts.URL = "http://" + l.Addr().String()
server := &http.Server{Handler: handler}
go server.Serve(ts.Listener)
return ts
}
// Close shuts down the server.
func (s *Server) Close() {
s.Listener.Close()
}
// CloseClientConnections closes any currently open HTTP connections
// to the test Server.
func (s *Server) CloseClientConnections() {
hl, ok := s.Listener.(*historyListener)
if !ok {
return
}
for _, conn := range hl.history {
conn.Close()
}
}

View File

@ -25,15 +25,15 @@ var (
// i.e. requests can be read out of sync (but in the same order) while the // i.e. requests can be read out of sync (but in the same order) while the
// respective responses are sent. // respective responses are sent.
type ServerConn struct { type ServerConn struct {
lk sync.Mutex // read-write protects the following fields
c net.Conn c net.Conn
r *bufio.Reader r *bufio.Reader
clsd bool // indicates a graceful close
re, we os.Error // read/write errors re, we os.Error // read/write errors
lastbody io.ReadCloser lastbody io.ReadCloser
nread, nwritten int nread, nwritten int
pipe textproto.Pipeline
pipereq map[*Request]uint pipereq map[*Request]uint
lk sync.Mutex // protected read/write to re,we
pipe textproto.Pipeline
} }
// NewServerConn returns a new ServerConn reading and writing c. If r is not // NewServerConn returns a new ServerConn reading and writing c. If r is not
@ -90,15 +90,21 @@ func (sc *ServerConn) Read() (req *Request, err os.Error) {
defer sc.lk.Unlock() defer sc.lk.Unlock()
return nil, sc.re return nil, sc.re
} }
if sc.r == nil { // connection closed by user in the meantime
defer sc.lk.Unlock()
return nil, os.EBADF
}
r := sc.r
lastbody := sc.lastbody
sc.lastbody = nil
sc.lk.Unlock() sc.lk.Unlock()
// Make sure body is fully consumed, even if user does not call body.Close // Make sure body is fully consumed, even if user does not call body.Close
if sc.lastbody != nil { if lastbody != nil {
// body.Close is assumed to be idempotent and multiple calls to // body.Close is assumed to be idempotent and multiple calls to
// it should return the error that its first invokation // it should return the error that its first invokation
// returned. // returned.
err = sc.lastbody.Close() err = lastbody.Close()
sc.lastbody = nil
if err != nil { if err != nil {
sc.lk.Lock() sc.lk.Lock()
defer sc.lk.Unlock() defer sc.lk.Unlock()
@ -107,10 +113,10 @@ func (sc *ServerConn) Read() (req *Request, err os.Error) {
} }
} }
req, err = ReadRequest(sc.r) req, err = ReadRequest(r)
sc.lk.Lock()
defer sc.lk.Unlock()
if err != nil { if err != nil {
sc.lk.Lock()
defer sc.lk.Unlock()
if err == io.ErrUnexpectedEOF { if err == io.ErrUnexpectedEOF {
// A close from the opposing client is treated as a // A close from the opposing client is treated as a
// graceful close, even if there was some unparse-able // graceful close, even if there was some unparse-able
@ -119,18 +125,16 @@ func (sc *ServerConn) Read() (req *Request, err os.Error) {
return nil, sc.re return nil, sc.re
} else { } else {
sc.re = err sc.re = err
return return req, err
} }
} }
sc.lastbody = req.Body sc.lastbody = req.Body
sc.nread++ sc.nread++
if req.Close { if req.Close {
sc.lk.Lock()
defer sc.lk.Unlock()
sc.re = ErrPersistEOF sc.re = ErrPersistEOF
return req, sc.re return req, sc.re
} }
return return req, err
} }
// Pending returns the number of unanswered requests // Pending returns the number of unanswered requests
@ -165,24 +169,27 @@ func (sc *ServerConn) Write(req *Request, resp *Response) os.Error {
defer sc.lk.Unlock() defer sc.lk.Unlock()
return sc.we return sc.we
} }
sc.lk.Unlock() if sc.c == nil { // connection closed by user in the meantime
defer sc.lk.Unlock()
return os.EBADF
}
c := sc.c
if sc.nread <= sc.nwritten { if sc.nread <= sc.nwritten {
defer sc.lk.Unlock()
return os.NewError("persist server pipe count") return os.NewError("persist server pipe count")
} }
if resp.Close { if resp.Close {
// After signaling a keep-alive close, any pipelined unread // After signaling a keep-alive close, any pipelined unread
// requests will be lost. It is up to the user to drain them // requests will be lost. It is up to the user to drain them
// before signaling. // before signaling.
sc.lk.Lock()
sc.re = ErrPersistEOF sc.re = ErrPersistEOF
sc.lk.Unlock()
} }
sc.lk.Unlock()
err := resp.Write(sc.c) err := resp.Write(c)
sc.lk.Lock()
defer sc.lk.Unlock()
if err != nil { if err != nil {
sc.lk.Lock()
defer sc.lk.Unlock()
sc.we = err sc.we = err
return err return err
} }
@ -196,14 +203,17 @@ func (sc *ServerConn) Write(req *Request, resp *Response) os.Error {
// responsible for closing the underlying connection. One must call Close to // responsible for closing the underlying connection. One must call Close to
// regain control of that connection and deal with it as desired. // regain control of that connection and deal with it as desired.
type ClientConn struct { type ClientConn struct {
lk sync.Mutex // read-write protects the following fields
c net.Conn c net.Conn
r *bufio.Reader r *bufio.Reader
re, we os.Error // read/write errors re, we os.Error // read/write errors
lastbody io.ReadCloser lastbody io.ReadCloser
nread, nwritten int nread, nwritten int
pipe textproto.Pipeline
pipereq map[*Request]uint pipereq map[*Request]uint
lk sync.Mutex // protects read/write to re,we,pipereq,etc.
pipe textproto.Pipeline
writeReq func(*Request, io.Writer) os.Error
readRes func(buf *bufio.Reader, method string) (*Response, os.Error)
} }
// NewClientConn returns a new ClientConn reading and writing c. If r is not // NewClientConn returns a new ClientConn reading and writing c. If r is not
@ -212,7 +222,21 @@ func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
if r == nil { if r == nil {
r = bufio.NewReader(c) r = bufio.NewReader(c)
} }
return &ClientConn{c: c, r: r, pipereq: make(map[*Request]uint)} return &ClientConn{
c: c,
r: r,
pipereq: make(map[*Request]uint),
writeReq: (*Request).Write,
readRes: ReadResponse,
}
}
// NewProxyClientConn works like NewClientConn but writes Requests
// using Request's WriteProxy method.
func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
cc := NewClientConn(c, r)
cc.writeReq = (*Request).WriteProxy
return cc
} }
// Close detaches the ClientConn and returns the underlying connection as well // Close detaches the ClientConn and returns the underlying connection as well
@ -221,11 +245,11 @@ func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
// logic. The user should not call Close while Read or Write is in progress. // logic. The user should not call Close while Read or Write is in progress.
func (cc *ClientConn) Close() (c net.Conn, r *bufio.Reader) { func (cc *ClientConn) Close() (c net.Conn, r *bufio.Reader) {
cc.lk.Lock() cc.lk.Lock()
defer cc.lk.Unlock()
c = cc.c c = cc.c
r = cc.r r = cc.r
cc.c = nil cc.c = nil
cc.r = nil cc.r = nil
cc.lk.Unlock()
return return
} }
@ -261,20 +285,22 @@ func (cc *ClientConn) Write(req *Request) (err os.Error) {
defer cc.lk.Unlock() defer cc.lk.Unlock()
return cc.we return cc.we
} }
cc.lk.Unlock() if cc.c == nil { // connection closed by user in the meantime
defer cc.lk.Unlock()
return os.EBADF
}
c := cc.c
if req.Close { if req.Close {
// We write the EOF to the write-side error, because there // We write the EOF to the write-side error, because there
// still might be some pipelined reads // still might be some pipelined reads
cc.lk.Lock()
cc.we = ErrPersistEOF cc.we = ErrPersistEOF
cc.lk.Unlock()
} }
cc.lk.Unlock()
err = req.Write(cc.c) err = cc.writeReq(req, c)
cc.lk.Lock()
defer cc.lk.Unlock()
if err != nil { if err != nil {
cc.lk.Lock()
defer cc.lk.Unlock()
cc.we = err cc.we = err
return err return err
} }
@ -316,15 +342,21 @@ func (cc *ClientConn) Read(req *Request) (resp *Response, err os.Error) {
defer cc.lk.Unlock() defer cc.lk.Unlock()
return nil, cc.re return nil, cc.re
} }
if cc.r == nil { // connection closed by user in the meantime
defer cc.lk.Unlock()
return nil, os.EBADF
}
r := cc.r
lastbody := cc.lastbody
cc.lastbody = nil
cc.lk.Unlock() cc.lk.Unlock()
// Make sure body is fully consumed, even if user does not call body.Close // Make sure body is fully consumed, even if user does not call body.Close
if cc.lastbody != nil { if lastbody != nil {
// body.Close is assumed to be idempotent and multiple calls to // body.Close is assumed to be idempotent and multiple calls to
// it should return the error that its first invokation // it should return the error that its first invokation
// returned. // returned.
err = cc.lastbody.Close() err = lastbody.Close()
cc.lastbody = nil
if err != nil { if err != nil {
cc.lk.Lock() cc.lk.Lock()
defer cc.lk.Unlock() defer cc.lk.Unlock()
@ -333,24 +365,22 @@ func (cc *ClientConn) Read(req *Request) (resp *Response, err os.Error) {
} }
} }
resp, err = ReadResponse(cc.r, req.Method) resp, err = cc.readRes(r, req.Method)
cc.lk.Lock()
defer cc.lk.Unlock()
if err != nil { if err != nil {
cc.lk.Lock()
defer cc.lk.Unlock()
cc.re = err cc.re = err
return return resp, err
} }
cc.lastbody = resp.Body cc.lastbody = resp.Body
cc.nread++ cc.nread++
if resp.Close { if resp.Close {
cc.lk.Lock()
defer cc.lk.Unlock()
cc.re = ErrPersistEOF // don't send any more requests cc.re = ErrPersistEOF // don't send any more requests
return resp, cc.re return resp, cc.re
} }
return return resp, err
} }
// Do is convenience method that writes a request and reads a response. // Do is convenience method that writes a request and reads a response.

View File

@ -41,14 +41,14 @@ func init() {
// command line, with arguments separated by NUL bytes. // command line, with arguments separated by NUL bytes.
// The package initialization registers it as /debug/pprof/cmdline. // The package initialization registers it as /debug/pprof/cmdline.
func Cmdline(w http.ResponseWriter, r *http.Request) { func Cmdline(w http.ResponseWriter, r *http.Request) {
w.SetHeader("content-type", "text/plain; charset=utf-8") w.Header().Set("content-type", "text/plain; charset=utf-8")
fmt.Fprintf(w, strings.Join(os.Args, "\x00")) fmt.Fprintf(w, strings.Join(os.Args, "\x00"))
} }
// Heap responds with the pprof-formatted heap profile. // Heap responds with the pprof-formatted heap profile.
// The package initialization registers it as /debug/pprof/heap. // The package initialization registers it as /debug/pprof/heap.
func Heap(w http.ResponseWriter, r *http.Request) { func Heap(w http.ResponseWriter, r *http.Request) {
w.SetHeader("content-type", "text/plain; charset=utf-8") w.Header().Set("content-type", "text/plain; charset=utf-8")
pprof.WriteHeapProfile(w) pprof.WriteHeapProfile(w)
} }
@ -56,7 +56,7 @@ func Heap(w http.ResponseWriter, r *http.Request) {
// responding with a table mapping program counters to function names. // responding with a table mapping program counters to function names.
// The package initialization registers it as /debug/pprof/symbol. // The package initialization registers it as /debug/pprof/symbol.
func Symbol(w http.ResponseWriter, r *http.Request) { func Symbol(w http.ResponseWriter, r *http.Request) {
w.SetHeader("content-type", "text/plain; charset=utf-8") w.Header().Set("content-type", "text/plain; charset=utf-8")
// We don't know how many symbols we have, but we // We don't know how many symbols we have, but we
// do have symbol information. Pprof only cares whether // do have symbol information. Pprof only cares whether

View File

@ -12,31 +12,33 @@ import (
// TODO(mattn): // TODO(mattn):
// test ProxyAuth // test ProxyAuth
var MatchNoProxyTests = []struct { var UseProxyTests = []struct {
host string host string
match bool match bool
}{ }{
{"localhost", true}, // match completely {"localhost", false}, // match completely
{"barbaz.net", true}, // match as .barbaz.net {"barbaz.net", false}, // match as .barbaz.net
{"foobar.com:443", true}, // have a port but match {"foobar.com:443", false}, // have a port but match
{"foofoobar.com", false}, // not match as a part of foobar.com {"foofoobar.com", true}, // not match as a part of foobar.com
{"baz.com", false}, // not match as a part of barbaz.com {"baz.com", true}, // not match as a part of barbaz.com
{"localhost.net", false}, // not match as suffix of address {"localhost.net", true}, // not match as suffix of address
{"local.localhost", false}, // not match as prefix as address {"local.localhost", true}, // not match as prefix as address
{"barbarbaz.net", false}, // not match because NO_PROXY have a '.' {"barbarbaz.net", true}, // not match because NO_PROXY have a '.'
{"www.foobar.com", false}, // not match because NO_PROXY is not .foobar.com {"www.foobar.com", true}, // not match because NO_PROXY is not .foobar.com
} }
func TestMatchNoProxy(t *testing.T) { func TestUseProxy(t *testing.T) {
oldenv := os.Getenv("NO_PROXY") oldenv := os.Getenv("NO_PROXY")
no_proxy := "foobar.com, .barbaz.net , localhost" no_proxy := "foobar.com, .barbaz.net , localhost"
os.Setenv("NO_PROXY", no_proxy) os.Setenv("NO_PROXY", no_proxy)
defer os.Setenv("NO_PROXY", oldenv) defer os.Setenv("NO_PROXY", oldenv)
for _, test := range MatchNoProxyTests { tr := &Transport{}
if matchNoProxy(test.host) != test.match {
for _, test := range UseProxyTests {
if tr.useProxy(test.host) != test.match {
if test.match { if test.match {
t.Errorf("matchNoProxy(%v) = %v, want %v", test.host, !test.match, test.match) t.Errorf("useProxy(%v) = %v, want %v", test.host, !test.match, test.match)
} else { } else {
t.Errorf("not expected: '%s' shouldn't match as '%s'", test.host, no_proxy) t.Errorf("not expected: '%s' shouldn't match as '%s'", test.host, no_proxy)
} }

View File

@ -0,0 +1,57 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http
import (
"testing"
)
var ParseRangeTests = []struct {
s string
length int64
r []httpRange
}{
{"", 0, nil},
{"foo", 0, nil},
{"bytes=", 0, nil},
{"bytes=5-4", 10, nil},
{"bytes=0-2,5-4", 10, nil},
{"bytes=0-9", 10, []httpRange{{0, 10}}},
{"bytes=0-", 10, []httpRange{{0, 10}}},
{"bytes=5-", 10, []httpRange{{5, 5}}},
{"bytes=0-20", 10, []httpRange{{0, 10}}},
{"bytes=15-,0-5", 10, nil},
{"bytes=-5", 10, []httpRange{{5, 5}}},
{"bytes=-15", 10, []httpRange{{0, 10}}},
{"bytes=0-499", 10000, []httpRange{{0, 500}}},
{"bytes=500-999", 10000, []httpRange{{500, 500}}},
{"bytes=-500", 10000, []httpRange{{9500, 500}}},
{"bytes=9500-", 10000, []httpRange{{9500, 500}}},
{"bytes=0-0,-1", 10000, []httpRange{{0, 1}, {9999, 1}}},
{"bytes=500-600,601-999", 10000, []httpRange{{500, 101}, {601, 399}}},
{"bytes=500-700,601-999", 10000, []httpRange{{500, 201}, {601, 399}}},
}
func TestParseRange(t *testing.T) {
for _, test := range ParseRangeTests {
r := test.r
ranges, err := parseRange(test.s, test.length)
if err != nil && r != nil {
t.Errorf("parseRange(%q) returned error %q", test.s, err)
}
if len(ranges) != len(r) {
t.Errorf("len(parseRange(%q)) = %d, want %d", test.s, len(ranges), len(r))
continue
}
for i := range r {
if ranges[i].start != r[i].start {
t.Errorf("parseRange(%q)[%d].start = %d, want %d", test.s, i, ranges[i].start, r[i].start)
}
if ranges[i].length != r[i].length {
t.Errorf("parseRange(%q)[%d].length = %d, want %d", test.s, i, ranges[i].length, r[i].length)
}
}
}
}

View File

@ -93,7 +93,7 @@ var reqTests = []reqTest{
Proto: "HTTP/1.1", Proto: "HTTP/1.1",
ProtoMajor: 1, ProtoMajor: 1,
ProtoMinor: 1, ProtoMinor: 1,
Header: map[string][]string{}, Header: Header{},
Close: false, Close: false,
ContentLength: -1, ContentLength: -1,
Host: "test", Host: "test",

View File

@ -11,6 +11,7 @@ package http
import ( import (
"bufio" "bufio"
"crypto/tls"
"container/vector" "container/vector"
"fmt" "fmt"
"io" "io"
@ -92,6 +93,9 @@ type Request struct {
// following a hyphen uppercase and the rest lowercase. // following a hyphen uppercase and the rest lowercase.
Header Header Header Header
// Cookie records the HTTP cookies sent with the request.
Cookie []*Cookie
// The message body. // The message body.
Body io.ReadCloser Body io.ReadCloser
@ -134,6 +138,22 @@ type Request struct {
// response has multiple trailer lines with the same key, they will be // response has multiple trailer lines with the same key, they will be
// concatenated, delimited by commas. // concatenated, delimited by commas.
Trailer Header Trailer Header
// RemoteAddr allows HTTP servers and other software to record
// the network address that sent the request, usually for
// logging. This field is not filled in by ReadRequest and
// has no defined format. The HTTP server in this package
// sets RemoteAddr to an "IP:port" address before invoking a
// handler.
RemoteAddr string
// TLS allows HTTP servers and other software to record
// information about the TLS connection on which the request
// was received. This field is not filled in by ReadRequest.
// The HTTP server in this package sets the field for
// TLS-enabled connections before invoking a handler;
// otherwise it leaves the field nil.
TLS *tls.ConnectionState
} }
// ProtoAtLeast returns whether the HTTP protocol used // ProtoAtLeast returns whether the HTTP protocol used
@ -190,6 +210,8 @@ func (req *Request) Write(w io.Writer) os.Error {
// WriteProxy is like Write but writes the request in the form // WriteProxy is like Write but writes the request in the form
// expected by an HTTP proxy. It includes the scheme and host // expected by an HTTP proxy. It includes the scheme and host
// name in the URI instead of using a separate Host: header line. // name in the URI instead of using a separate Host: header line.
// If req.RawURL is non-empty, WriteProxy uses it unchanged
// instead of URL but still omits the Host: header.
func (req *Request) WriteProxy(w io.Writer) os.Error { func (req *Request) WriteProxy(w io.Writer) os.Error {
return req.write(w, true) return req.write(w, true)
} }
@ -206,13 +228,12 @@ func (req *Request) write(w io.Writer, usingProxy bool) os.Error {
if req.URL.RawQuery != "" { if req.URL.RawQuery != "" {
uri += "?" + req.URL.RawQuery uri += "?" + req.URL.RawQuery
} }
} if usingProxy {
if uri == "" || uri[0] != '/' {
if usingProxy { uri = "/" + uri
if uri == "" || uri[0] != '/' { }
uri = "/" + uri uri = req.URL.Scheme + "://" + host + uri
} }
uri = req.URL.Scheme + "://" + host + uri
} }
fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), uri) fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), uri)
@ -243,11 +264,15 @@ func (req *Request) write(w io.Writer, usingProxy bool) os.Error {
// from Request, and introduce Request methods along the lines of // from Request, and introduce Request methods along the lines of
// Response.{GetHeader,AddHeader} and string constants for "Host", // Response.{GetHeader,AddHeader} and string constants for "Host",
// "User-Agent" and "Referer". // "User-Agent" and "Referer".
err = writeSortedKeyValue(w, req.Header, reqExcludeHeader) err = writeSortedHeader(w, req.Header, reqExcludeHeader)
if err != nil { if err != nil {
return err return err
} }
if err = writeCookies(w, req.Cookie); err != nil {
return err
}
io.WriteString(w, "\r\n") io.WriteString(w, "\r\n")
// Write body and trailer // Write body and trailer
@ -484,6 +509,8 @@ func ReadRequest(b *bufio.Reader) (req *Request, err os.Error) {
return nil, err return nil, err
} }
req.Cookie = readCookies(req.Header)
return req, nil return req, nil
} }

Some files were not shown because too many files have changed in this diff Show More