mirror of git://gcc.gnu.org/git/gcc.git
libgo: update to go1.7rc3
Reviewed-on: https://go-review.googlesource.com/25150 From-SVN: r238662
This commit is contained in:
parent
9d04a3af4c
commit
22b955cca5
|
|
@ -1,4 +1,4 @@
|
|||
4c88f31a83ca28963d29d6dc9fcdb2e9b093610c
|
||||
b156d71ad75a1b73d0ed805a5370a297648d9270
|
||||
|
||||
The first line of this file holds the git revision number of the last
|
||||
merge done from the gofrontend repository.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
f5cf5673590a68c55b2330df9dfcdd6fac75b893
|
||||
8707f31c0abc6b607014e843b7cc188b3019daa9
|
||||
|
||||
The first line of this file holds the git revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
|
|
|||
|
|
@ -111,6 +111,7 @@ endif
|
|||
toolexeclibgo_DATA = \
|
||||
bufio.gox \
|
||||
bytes.gox \
|
||||
context.gox \
|
||||
crypto.gox \
|
||||
encoding.gox \
|
||||
errors.gox \
|
||||
|
|
@ -315,6 +316,7 @@ toolexeclibgonethttp_DATA = \
|
|||
net/http/cookiejar.gox \
|
||||
net/http/fcgi.gox \
|
||||
net/http/httptest.gox \
|
||||
net/http/httptrace.gox \
|
||||
net/http/httputil.gox \
|
||||
net/http/pprof.gox
|
||||
|
||||
|
|
@ -593,6 +595,9 @@ go_bytes_files = \
|
|||
go_bytes_c_files = \
|
||||
go/bytes/indexbyte.c
|
||||
|
||||
go_context_files = \
|
||||
go/context/context.go
|
||||
|
||||
go_crypto_files = \
|
||||
go/crypto/crypto.go
|
||||
|
||||
|
|
@ -776,15 +781,19 @@ if LIBGO_IS_LINUX
|
|||
go_net_interface_file = go/net/interface_linux.go
|
||||
else
|
||||
if LIBGO_IS_NETBSD
|
||||
go_net_interface_file = go/net/interface_netbsd.go
|
||||
go_net_interface_file = go/net/interface_bsdvar.go
|
||||
else
|
||||
if LIBGO_IS_DRAGONFLY
|
||||
go_net_interface_file = go/net/interface_dragonfly.go
|
||||
go_net_interface_file = go/net/interface_bsdvar.go
|
||||
else
|
||||
if LIBGO_IS_OPENBSD
|
||||
go_net_interface_file = go/net/interface_bsdvar.go
|
||||
else
|
||||
go_net_interface_file = go/net/interface_stub.go
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
if LIBGO_IS_LINUX
|
||||
go_net_cloexec_file = go/net/sock_cloexec.go go/net/hook_cloexec.go
|
||||
|
|
@ -845,6 +854,7 @@ go_net_common_files = \
|
|||
go/net/nss.go \
|
||||
go/net/parse.go \
|
||||
go/net/pipe.go \
|
||||
go/net/port.go \
|
||||
go/net/fd_poll_runtime.go \
|
||||
go/net/port_unix.go \
|
||||
$(go_net_sendfile_file) \
|
||||
|
|
@ -991,6 +1001,20 @@ endif
|
|||
endif
|
||||
endif
|
||||
|
||||
if LIBGO_IS_LINUX
|
||||
go_os_wait_file = go/os/wait_waitid.go
|
||||
else
|
||||
if LIBGO_IS_DARWIN
|
||||
go_os_wait_file = go/os/wait_waitid.go
|
||||
else
|
||||
if LIBGO_IS_FREEBSD
|
||||
go_os_wait_file = go/os/wait_wait6.go
|
||||
else
|
||||
go_os_wait_file = go/os/wait_unimp.go
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
go_os_files = \
|
||||
$(go_os_dir_file) \
|
||||
go/os/dir.go \
|
||||
|
|
@ -1016,7 +1040,8 @@ go_os_files = \
|
|||
$(go_os_sys_file) \
|
||||
$(go_os_cloexec_file) \
|
||||
go/os/types.go \
|
||||
go/os/types_unix.go
|
||||
go/os/types_unix.go \
|
||||
$(go_os_wait_file)
|
||||
|
||||
go_path_files = \
|
||||
go/path/match.go \
|
||||
|
|
@ -1048,6 +1073,7 @@ go_runtime_files = \
|
|||
go/runtime/error.go \
|
||||
go/runtime/extern.go \
|
||||
go/runtime/mem.go \
|
||||
go/runtime/symtab.go \
|
||||
version.go
|
||||
|
||||
version.go: s-version; @true
|
||||
|
|
@ -1134,6 +1160,7 @@ go_testing_files = \
|
|||
go/testing/benchmark.go \
|
||||
go/testing/cover.go \
|
||||
go/testing/example.go \
|
||||
go/testing/match.go \
|
||||
go/testing/testing.go
|
||||
|
||||
go_time_files = \
|
||||
|
|
@ -1174,6 +1201,7 @@ endif
|
|||
|
||||
go_archive_tar_files = \
|
||||
go/archive/tar/common.go \
|
||||
go/archive/tar/format.go \
|
||||
go/archive/tar/reader.go \
|
||||
go/archive/tar/stat_unix.go \
|
||||
go/archive/tar/writer.go \
|
||||
|
|
@ -1192,8 +1220,9 @@ go_compress_bzip2_files = \
|
|||
go/compress/bzip2/move_to_front.go
|
||||
|
||||
go_compress_flate_files = \
|
||||
go/compress/flate/copy.go \
|
||||
go/compress/flate/deflate.go \
|
||||
go/compress/flate/deflatefast.go \
|
||||
go/compress/flate/dict_decoder.go \
|
||||
go/compress/flate/huffman_bit_writer.go \
|
||||
go/compress/flate/huffman_code.go \
|
||||
go/compress/flate/inflate.go \
|
||||
|
|
@ -1225,7 +1254,8 @@ go_crypto_aes_files = \
|
|||
go/crypto/aes/block.go \
|
||||
go/crypto/aes/cipher.go \
|
||||
go/crypto/aes/cipher_generic.go \
|
||||
go/crypto/aes/const.go
|
||||
go/crypto/aes/const.go \
|
||||
go/crypto/aes/modes.go
|
||||
go_crypto_cipher_files = \
|
||||
go/crypto/cipher/cbc.go \
|
||||
go/crypto/cipher/cfb.go \
|
||||
|
|
@ -1257,8 +1287,12 @@ go_crypto_md5_files = \
|
|||
if LIBGO_IS_LINUX
|
||||
crypto_rand_file = go/crypto/rand/rand_linux.go
|
||||
else
|
||||
if LIBGO_IS_OPENBSD
|
||||
crypto_rand_file = go/crypto/rand/rand_openbsd.go
|
||||
else
|
||||
crypto_rand_file =
|
||||
endif
|
||||
endif
|
||||
|
||||
go_crypto_rand_files = \
|
||||
go/crypto/rand/eagain.go \
|
||||
|
|
@ -1280,10 +1314,12 @@ go_crypto_sha1_files = \
|
|||
go/crypto/sha1/sha1block_generic.go
|
||||
go_crypto_sha256_files = \
|
||||
go/crypto/sha256/sha256.go \
|
||||
go/crypto/sha256/sha256block.go
|
||||
go/crypto/sha256/sha256block.go \
|
||||
go/crypto/sha256/sha256block_generic.go
|
||||
go_crypto_sha512_files = \
|
||||
go/crypto/sha512/sha512.go \
|
||||
go/crypto/sha512/sha512block.go
|
||||
go/crypto/sha512/sha512block.go \
|
||||
go/crypto/sha512/sha512block_generic.go
|
||||
go_crypto_subtle_files = \
|
||||
go/crypto/subtle/constant_time.go
|
||||
go_crypto_tls_files = \
|
||||
|
|
@ -1375,7 +1411,10 @@ go_debug_macho_files = \
|
|||
go/debug/macho/macho.go
|
||||
go_debug_pe_files = \
|
||||
go/debug/pe/file.go \
|
||||
go/debug/pe/pe.go
|
||||
go/debug/pe/pe.go \
|
||||
go/debug/pe/section.go \
|
||||
go/debug/pe/string.go \
|
||||
go/debug/pe/symbol.go
|
||||
go_debug_plan9obj_files = \
|
||||
go/debug/plan9obj/file.go \
|
||||
go/debug/plan9obj/plan9obj.go
|
||||
|
|
@ -1517,6 +1556,87 @@ go_go_internal_gccgoimporter_files = \
|
|||
go/go/internal/gccgoimporter/importer.go \
|
||||
go/go/internal/gccgoimporter/parser.go
|
||||
|
||||
go_golang_org_x_net_http2_hpack_files = \
|
||||
go/golang_org/x/net/http2/hpack/encode.go \
|
||||
go/golang_org/x/net/http2/hpack/hpack.go \
|
||||
go/golang_org/x/net/http2/hpack/huffman.go \
|
||||
go/golang_org/x/net/http2/hpack/tables.go
|
||||
|
||||
go_golang_org_x_net_lex_httplex_files = \
|
||||
go/golang_org/x/net/lex/httplex/httplex.go
|
||||
|
||||
if LIBGO_IS_BSD
|
||||
|
||||
if LIBGO_IS_DARWIN
|
||||
x_net_route_files =
|
||||
go/golang_org/x/net/route/interface_classic.go \
|
||||
go/golang_org/x/net/route/interface_multicast.go \
|
||||
go/golang_org/x/net/route/route_classic.go \
|
||||
go/golang_org/x/net/route/sys_darwin.go \
|
||||
go/golang_org/x/net/route/zsys_darwin.go
|
||||
endif
|
||||
if LIBGO_IS_DRAGONFLY
|
||||
x_net_route_files =
|
||||
go/golang_org/x/net/route/interface_announce.go \
|
||||
go/golang_org/x/net/route/interface_classic.go \
|
||||
go/golang_org/x/net/route/interface_multicast.go \
|
||||
go/golang_org/x/net/route/route_classic.go \
|
||||
go/golang_org/x/net/route/sys_dragonfly.go \
|
||||
go/golang_org/x/net/route/zsys_dragonfly.go
|
||||
endif
|
||||
if LIBGO_IS_FREEBSD
|
||||
if LIBGO_IS_386
|
||||
x_net_route_freebsd_file =
|
||||
go/golang_org/x/net/route/zsys_freebsd_386.go
|
||||
endif
|
||||
if LIBGO_IS_X86_64
|
||||
x_net_route_freebsd_file =
|
||||
go/golang_org/x/net/route/zsys_freebsd_amd64.go
|
||||
endif
|
||||
if LIBGO_IS_ARM
|
||||
x_net_route_freebsd_file =
|
||||
go/golang_org/x/net/route/zsys_freebsd_arm.go
|
||||
endif
|
||||
x_net_route_files =
|
||||
go/golang_org/x/net/route/interface_announce.go \
|
||||
go/golang_org/x/net/route/interface_freebsd.go \
|
||||
go/golang_org/x/net/route/interface_multicast.go \
|
||||
go/golang_org/x/net/route/route_classic.go \
|
||||
go/golang_org/x/net/route/sys_freebsd.go \
|
||||
$(x_net_route_freebsd_file)
|
||||
endif
|
||||
if LIBGO_IS_NETBSD
|
||||
x_net_route_files =
|
||||
go/golang_org/x/net/route/interface_announce.go \
|
||||
go/golang_org/x/net/route/interface_classic.go \
|
||||
go/golang_org/x/net/route/route_classic.go \
|
||||
go/golang_org/x/net/route/sys_netbsd.go \
|
||||
go/golang_org/x/net/route/zsys_netbsd.go
|
||||
endif
|
||||
if LIBGO_IS_OPENBSD
|
||||
x_net_route_files =
|
||||
go/golang_org/x/net/route/route_openbsd.go \
|
||||
go/golang_org/x/net/route/sys_openbsd.go \
|
||||
go/golang_org/x/net/route/zsys_openbsd.go
|
||||
endif
|
||||
|
||||
go_golang_org_x_net_route_files = \
|
||||
go/golang_org/x/net/route/address.go \
|
||||
go/golang_org/x/net/route/binary.go \
|
||||
go/golang_org/x/net/route/interface.go \
|
||||
go/golang_org/x/net/route/message.go \
|
||||
go/golang_org/x/net/route/route.go \
|
||||
go/golang_org/x/net/route/syscall.go \
|
||||
go/golang_org/x/net/route/sys.go \
|
||||
$(x_net_route_files)
|
||||
|
||||
golang_org_x_net_route_lo = \
|
||||
golang_org/x/net/route/route.lo
|
||||
golang_org_x_net_route_check = \
|
||||
golang_org/x/net/route/check
|
||||
|
||||
endif
|
||||
|
||||
go_hash_adler32_files = \
|
||||
go/hash/adler32/adler32.go
|
||||
go_hash_crc32_files = \
|
||||
|
|
@ -1576,11 +1696,8 @@ go_index_suffixarray_files = \
|
|||
go/index/suffixarray/qsufsort.go \
|
||||
go/index/suffixarray/suffixarray.go
|
||||
|
||||
go_internal_golang_org_x_net_http2_hpack_files = \
|
||||
go/internal/golang.org/x/net/http2/hpack/encode.go \
|
||||
go/internal/golang.org/x/net/http2/hpack/hpack.go \
|
||||
go/internal/golang.org/x/net/http2/hpack/huffman.go \
|
||||
go/internal/golang.org/x/net/http2/hpack/tables.go
|
||||
go_internal_nettrace_files = \
|
||||
go/internal/nettrace/nettrace.go
|
||||
go_internal_race_files = \
|
||||
go/internal/race/doc.go \
|
||||
go/internal/race/norace.go
|
||||
|
|
@ -1603,15 +1720,23 @@ else
|
|||
if LIBGO_IS_MIPS64
|
||||
internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getrandom_linux.go go/internal/syscall/unix/getrandom_linux_mips64x.go
|
||||
else
|
||||
if LIBGO_IS_S390X
|
||||
internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getrandom_linux.go go/internal/syscall/unix/getrandom_linux_s390x.go
|
||||
else
|
||||
internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getrandom_linux.go go/internal/syscall/unix/getrandom_linux_generic.go
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
else
|
||||
if LIBGO_IS_OPENBSD
|
||||
internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getentropy_openbsd.go
|
||||
else
|
||||
internal_syscall_unix_getrandom_files =
|
||||
endif
|
||||
endif
|
||||
|
||||
go_internal_syscall_unix_files = \
|
||||
go/internal/syscall/unix/dummy.go \
|
||||
|
|
@ -1621,6 +1746,7 @@ go_internal_testenv_files = \
|
|||
go/internal/testenv/testenv.go
|
||||
go_internal_trace_files = \
|
||||
go/internal/trace/goroutines.go \
|
||||
go/internal/trace/order.go \
|
||||
go/internal/trace/parser.go
|
||||
|
||||
go_io_ioutil_files = \
|
||||
|
|
@ -1684,8 +1810,8 @@ go_net_http_files = \
|
|||
go/net/http/fs.go \
|
||||
go/net/http/h2_bundle.go \
|
||||
go/net/http/header.go \
|
||||
go/net/http/http.go \
|
||||
go/net/http/jar.go \
|
||||
go/net/http/lex.go \
|
||||
go/net/http/method.go \
|
||||
go/net/http/request.go \
|
||||
go/net/http/response.go \
|
||||
|
|
@ -1718,10 +1844,13 @@ go_net_http_fcgi_files = \
|
|||
go/net/http/fcgi/child.go \
|
||||
go/net/http/fcgi/fcgi.go
|
||||
go_net_http_httptest_files = \
|
||||
go/net/http/httptest/httptest.go \
|
||||
go/net/http/httptest/recorder.go \
|
||||
go/net/http/httptest/server.go
|
||||
go_net_http_pprof_files = \
|
||||
go/net/http/pprof/pprof.go
|
||||
go_net_http_httptrace_files = \
|
||||
go/net/http/httptrace/trace.go
|
||||
go_net_http_httputil_files = \
|
||||
go/net/http/httputil/dump.go \
|
||||
go/net/http/httputil/httputil.go \
|
||||
|
|
@ -1768,15 +1897,18 @@ go_os_signal_files = \
|
|||
|
||||
if LIBGO_IS_SOLARIS
|
||||
os_user_decls_file = go/os/user/decls_solaris.go
|
||||
os_user_listgroups_file = go/os/user/listgroups_solaris.go
|
||||
else
|
||||
os_user_decls_file = go/os/user/decls_unix.go
|
||||
os_user_listgroups_file = go/os/user/listgroups_unix.go
|
||||
endif
|
||||
|
||||
go_os_user_files = \
|
||||
go/os/user/lookup.go \
|
||||
go/os/user/lookup_unix.go \
|
||||
go/os/user/user.go \
|
||||
$(os_user_decls_file)
|
||||
$(os_user_decls_file) \
|
||||
$(os_user_listgroups_file)
|
||||
|
||||
go_path_filepath_files = \
|
||||
go/path/filepath/match.go \
|
||||
|
|
@ -2147,6 +2279,7 @@ libgo_go_objs = \
|
|||
bufio.lo \
|
||||
bytes.lo \
|
||||
bytes/index.lo \
|
||||
context.lo \
|
||||
crypto.lo \
|
||||
encoding.lo \
|
||||
errors.lo \
|
||||
|
|
@ -2242,6 +2375,9 @@ libgo_go_objs = \
|
|||
go/scanner.lo \
|
||||
go/token.lo \
|
||||
go/types.lo \
|
||||
golang_org/x/net/http2/hpack.lo \
|
||||
golang_org/x/net/lex/httplex.lo \
|
||||
$(golang_org_x_net_route_lo) \
|
||||
hash/adler32.lo \
|
||||
hash/crc32.lo \
|
||||
hash/crc64.lo \
|
||||
|
|
@ -2250,6 +2386,7 @@ libgo_go_objs = \
|
|||
net/http/cookiejar.lo \
|
||||
net/http/fcgi.lo \
|
||||
net/http/httptest.lo \
|
||||
net/http/httptrace.lo \
|
||||
net/http/httputil.lo \
|
||||
net/http/internal.lo \
|
||||
net/http/pprof.lo \
|
||||
|
|
@ -2261,7 +2398,7 @@ libgo_go_objs = \
|
|||
image/jpeg.lo \
|
||||
image/png.lo \
|
||||
index/suffixarray.lo \
|
||||
internal/golang.org/x/net/http2/hpack.lo \
|
||||
internal/nettrace.lo \
|
||||
internal/race.lo \
|
||||
internal/singleflight.lo \
|
||||
internal/syscall/unix.lo \
|
||||
|
|
@ -2460,6 +2597,15 @@ bytes/check: $(CHECK_DEPS)
|
|||
@$(CHECK)
|
||||
.PHONY: bytes/check
|
||||
|
||||
@go_include@ context.lo.dep
|
||||
context.lo.dep: $(go_context_files)
|
||||
$(BUILDDEPS)
|
||||
context.lo: $(go_context_files)
|
||||
$(BUILDPACKAGE)
|
||||
context/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: context/check
|
||||
|
||||
@go_include@ crypto.lo.dep
|
||||
crypto.lo.dep: $(go_crypto_files)
|
||||
$(BUILDDEPS)
|
||||
|
|
@ -3275,6 +3421,35 @@ go/internal/gccgoimporter/check: $(CHECK_DEPS)
|
|||
@$(CHECK)
|
||||
.PHONY: go/internal/gccgoimporter/check
|
||||
|
||||
@go_include@ golang_org/x/net/http2/hpack.lo.dep
|
||||
golang_org/x/net/http2/hpack.lo.dep: $(go_golang_org_x_net_http2_hpack_files)
|
||||
$(BUILDDEPS)
|
||||
golang_org/x/net/http2/hpack.lo: $(go_golang_org_x_net_http2_hpack_files)
|
||||
$(BUILDPACKAGE)
|
||||
golang_org/x/net/http2/hpack/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: golang_org/x/net/http2/hpack/check
|
||||
|
||||
@go_include@ golang_org/x/net/lex/httplex.lo.dep
|
||||
golang_org/x/net/lex/httplex.lo.dep: $(go_golang_org_x_net_lex_httplex_files)
|
||||
$(BUILDDEPS)
|
||||
golang_org/x/net/lex/httplex.lo: $(go_golang_org_x_net_lex_httplex_files)
|
||||
$(BUILDPACKAGE)
|
||||
golang_org/x/net/lex/httplex/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: golang_org/x/net/lex/httplex/check
|
||||
|
||||
if LIBGO_IS_BSD
|
||||
@go_include@ golang_org/x/net/route.lo.dep
|
||||
golang_org/x/net/route.lo.dep: $(go_golang_org_x_net_route_files)
|
||||
$(BUILDDEPS)
|
||||
golang_org/x/net/route.lo: $(go_golang_org_x_net_route_files)
|
||||
$(BUILDPACKAGE)
|
||||
golang_org/x/net/route/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
endif
|
||||
.PHONY: golang_org/x/net/route/check
|
||||
|
||||
@go_include@ hash/adler32.lo.dep
|
||||
hash/adler32.lo.dep: $(go_hash_adler32_files)
|
||||
$(BUILDDEPS)
|
||||
|
|
@ -3383,14 +3558,14 @@ index/suffixarray/check: $(CHECK_DEPS)
|
|||
@$(CHECK)
|
||||
.PHONY: index/suffixarray/check
|
||||
|
||||
@go_include@ internal/golang.org/x/net/http2/hpack.lo.dep
|
||||
internal/golang.org/x/net/http2/hpack.lo.dep: $(go_internal_golang_org_x_net_http2_hpack_files)
|
||||
@go_include@ internal/nettrace.lo.dep
|
||||
internal/nettrace.lo.dep: $(go_internal_nettrace_files)
|
||||
$(BUILDDEPS)
|
||||
internal/golang.org/x/net/http2/hpack.lo: $(go_internal_golang_org_x_net_http2_hpack_files)
|
||||
internal/nettrace.lo: $(go_internal_nettrace_files)
|
||||
$(BUILDPACKAGE)
|
||||
internal/golang.org/x/net/http2/hpack/check: $(CHECK_DEPS)
|
||||
internal/nettrace/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: internal/golang.org/x/net/http2/hpack/check
|
||||
.PHONY: internal/nettrace/check
|
||||
|
||||
@go_include@ internal/race.lo.dep
|
||||
internal/race.lo.dep: $(go_internal_race_files)
|
||||
|
|
@ -3593,6 +3768,15 @@ net/http/httptest/check: $(check_deps)
|
|||
@$(CHECK)
|
||||
.PHONY: net/http/httptest/check
|
||||
|
||||
@go_include@ net/http/httptrace.lo.dep
|
||||
net/http/httptrace.lo.dep: $(go_net_http_httptrace_files)
|
||||
$(BUILDDEPS)
|
||||
net/http/httptrace.lo: $(go_net_http_httptrace_files)
|
||||
$(BUILDPACKAGE)
|
||||
net/http/httptrace/check: $(check_deps)
|
||||
@$(CHECK)
|
||||
.PHONY: net/http/httptrace/check
|
||||
|
||||
@go_include@ net/http/httputil.lo.dep
|
||||
net/http/httputil.lo.dep: $(go_net_http_httputil_files)
|
||||
$(BUILDDEPS)
|
||||
|
|
@ -3833,6 +4017,8 @@ bufio.gox: bufio.lo
|
|||
$(BUILDGOX)
|
||||
bytes.gox: bytes.lo
|
||||
$(BUILDGOX)
|
||||
context.gox: context.lo
|
||||
$(BUILDGOX)
|
||||
crypto.gox: crypto.lo
|
||||
$(BUILDGOX)
|
||||
encoding.gox: encoding.lo
|
||||
|
|
@ -4027,6 +4213,17 @@ go/internal/gcimporter.gox: go/internal/gcimporter.lo
|
|||
go/internal/gccgoimporter.gox: go/internal/gccgoimporter.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
golang_org/x/net/http2/hpack.gox: golang_org/x/net/http2/hpack.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
golang_org/x/net/lex/httplex.gox: golang_org/x/net/lex/httplex.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
if LIBGO_IS_BSD
|
||||
golang_org/x/net/route.gox: golang_org/x/net/route.lo
|
||||
$(BUILDGOX)
|
||||
endif
|
||||
|
||||
hash/adler32.gox: hash/adler32.lo
|
||||
$(BUILDGOX)
|
||||
hash/crc32.gox: hash/crc32.lo
|
||||
|
|
@ -4055,7 +4252,7 @@ image/color/palette.gox: image/color/palette.lo
|
|||
index/suffixarray.gox: index/suffixarray.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
internal/golang.org/x/net/http2/hpack.gox: internal/golang.org/x/net/http2/hpack.lo
|
||||
internal/nettrace.gox: internal/nettrace.lo
|
||||
$(BUILDGOX)
|
||||
internal/race.gox: internal/race.lo
|
||||
$(BUILDGOX)
|
||||
|
|
@ -4107,6 +4304,8 @@ net/http/fcgi.gox: net/http/fcgi.lo
|
|||
$(BUILDGOX)
|
||||
net/http/httptest.gox: net/http/httptest.lo
|
||||
$(BUILDGOX)
|
||||
net/http/httptrace.gox: net/http/httptrace.lo
|
||||
$(BUILDGOX)
|
||||
net/http/httputil.gox: net/http/httputil.lo
|
||||
$(BUILDGOX)
|
||||
net/http/pprof.gox: net/http/pprof.lo
|
||||
|
|
@ -4169,6 +4368,7 @@ unicode/utf8.gox: unicode/utf8.lo
|
|||
TEST_PACKAGES = \
|
||||
bufio/check \
|
||||
bytes/check \
|
||||
context/check \
|
||||
errors/check \
|
||||
expvar/check \
|
||||
flag/check \
|
||||
|
|
@ -4252,6 +4452,9 @@ TEST_PACKAGES = \
|
|||
go/scanner/check \
|
||||
go/token/check \
|
||||
go/types/check \
|
||||
golang_org/x/net/http2/hpack/check \
|
||||
golang_org/x/net/lex/httplex/check \
|
||||
$(golang_org_x_net_route_check) \
|
||||
hash/adler32/check \
|
||||
hash/crc32/check \
|
||||
hash/crc64/check \
|
||||
|
|
@ -4261,7 +4464,6 @@ TEST_PACKAGES = \
|
|||
image/jpeg/check \
|
||||
image/png/check \
|
||||
index/suffixarray/check \
|
||||
internal/golang.org/x/net/http2/hpack/check \
|
||||
internal/singleflight/check \
|
||||
internal/trace/check \
|
||||
io/ioutil/check \
|
||||
|
|
@ -4276,6 +4478,7 @@ TEST_PACKAGES = \
|
|||
net/http/cookiejar/check \
|
||||
net/http/fcgi/check \
|
||||
net/http/httptest/check \
|
||||
net/http/httptrace/check \
|
||||
net/http/httputil/check \
|
||||
net/http/internal/check \
|
||||
net/internal/socktest/check \
|
||||
|
|
|
|||
|
|
@ -174,14 +174,14 @@ am_libnetgo_a_OBJECTS = $(am__objects_3)
|
|||
libnetgo_a_OBJECTS = $(am_libnetgo_a_OBJECTS)
|
||||
LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
|
||||
am__DEPENDENCIES_1 =
|
||||
am__DEPENDENCIES_2 = bufio.lo bytes.lo bytes/index.lo crypto.lo \
|
||||
encoding.lo errors.lo expvar.lo flag.lo fmt.lo hash.lo html.lo \
|
||||
image.lo io.lo log.lo math.lo mime.lo net.lo os.lo path.lo \
|
||||
reflect-go.lo reflect/makefunc_ffi_c.lo regexp.lo \
|
||||
runtime-go.lo sort.lo strconv.lo strings.lo strings/index.lo \
|
||||
sync.lo syscall.lo syscall/errno.lo syscall/signame.lo \
|
||||
syscall/wait.lo testing.lo time-go.lo unicode.lo \
|
||||
archive/tar.lo archive/zip.lo compress/bzip2.lo \
|
||||
am__DEPENDENCIES_2 = bufio.lo bytes.lo bytes/index.lo context.lo \
|
||||
crypto.lo encoding.lo errors.lo expvar.lo flag.lo fmt.lo \
|
||||
hash.lo html.lo image.lo io.lo log.lo math.lo mime.lo net.lo \
|
||||
os.lo path.lo reflect-go.lo reflect/makefunc_ffi_c.lo \
|
||||
regexp.lo runtime-go.lo sort.lo strconv.lo strings.lo \
|
||||
strings/index.lo sync.lo syscall.lo syscall/errno.lo \
|
||||
syscall/signame.lo syscall/wait.lo testing.lo time-go.lo \
|
||||
unicode.lo archive/tar.lo archive/zip.lo compress/bzip2.lo \
|
||||
compress/flate.lo compress/gzip.lo compress/lzw.lo \
|
||||
compress/zlib.lo container/heap.lo container/list.lo \
|
||||
container/ring.lo crypto/aes.lo crypto/cipher.lo crypto/des.lo \
|
||||
|
|
@ -199,22 +199,24 @@ am__DEPENDENCIES_2 = bufio.lo bytes.lo bytes/index.lo crypto.lo \
|
|||
go/ast.lo go/build.lo go/constant.lo go/doc.lo go/format.lo \
|
||||
go/importer.lo go/internal/gcimporter.lo \
|
||||
go/internal/gccgoimporter.lo go/parser.lo go/printer.lo \
|
||||
go/scanner.lo go/token.lo go/types.lo hash/adler32.lo \
|
||||
hash/crc32.lo hash/crc64.lo hash/fnv.lo net/http/cgi.lo \
|
||||
net/http/cookiejar.lo net/http/fcgi.lo net/http/httptest.lo \
|
||||
go/scanner.lo go/token.lo go/types.lo \
|
||||
golang_org/x/net/http2/hpack.lo \
|
||||
golang_org/x/net/lex/httplex.lo $(golang_org_x_net_route_lo) \
|
||||
hash/adler32.lo hash/crc32.lo hash/crc64.lo hash/fnv.lo \
|
||||
net/http/cgi.lo net/http/cookiejar.lo net/http/fcgi.lo \
|
||||
net/http/httptest.lo net/http/httptrace.lo \
|
||||
net/http/httputil.lo net/http/internal.lo net/http/pprof.lo \
|
||||
image/color.lo image/color/palette.lo image/draw.lo \
|
||||
image/gif.lo image/internal/imageutil.lo image/jpeg.lo \
|
||||
image/png.lo index/suffixarray.lo \
|
||||
internal/golang.org/x/net/http2/hpack.lo internal/race.lo \
|
||||
internal/singleflight.lo internal/syscall/unix.lo \
|
||||
internal/testenv.lo internal/trace.lo io/ioutil.lo \
|
||||
log/syslog.lo log/syslog/syslog_c.lo math/big.lo math/cmplx.lo \
|
||||
math/rand.lo mime/multipart.lo mime/quotedprintable.lo \
|
||||
net/http.lo net/internal/socktest.lo net/mail.lo net/rpc.lo \
|
||||
net/smtp.lo net/textproto.lo net/url.lo old/regexp.lo \
|
||||
old/template.lo os/exec.lo $(am__DEPENDENCIES_1) os/signal.lo \
|
||||
os/user.lo path/filepath.lo regexp/syntax.lo \
|
||||
image/png.lo index/suffixarray.lo internal/nettrace.lo \
|
||||
internal/race.lo internal/singleflight.lo \
|
||||
internal/syscall/unix.lo internal/testenv.lo internal/trace.lo \
|
||||
io/ioutil.lo log/syslog.lo log/syslog/syslog_c.lo math/big.lo \
|
||||
math/cmplx.lo math/rand.lo mime/multipart.lo \
|
||||
mime/quotedprintable.lo net/http.lo net/internal/socktest.lo \
|
||||
net/mail.lo net/rpc.lo net/smtp.lo net/textproto.lo net/url.lo \
|
||||
old/regexp.lo old/template.lo os/exec.lo $(am__DEPENDENCIES_1) \
|
||||
os/signal.lo os/user.lo path/filepath.lo regexp/syntax.lo \
|
||||
net/rpc/jsonrpc.lo runtime/debug.lo runtime/pprof.lo \
|
||||
sync/atomic.lo sync/atomic_c.lo text/scanner.lo \
|
||||
text/tabwriter.lo text/template.lo text/template/parse.lo \
|
||||
|
|
@ -576,6 +578,7 @@ FLAGS_TO_PASS = $(AM_MAKEFLAGS)
|
|||
toolexeclibgo_DATA = \
|
||||
bufio.gox \
|
||||
bytes.gox \
|
||||
context.gox \
|
||||
crypto.gox \
|
||||
encoding.gox \
|
||||
errors.gox \
|
||||
|
|
@ -758,6 +761,7 @@ toolexeclibgonethttp_DATA = \
|
|||
net/http/cookiejar.gox \
|
||||
net/http/fcgi.gox \
|
||||
net/http/httptest.gox \
|
||||
net/http/httptrace.gox \
|
||||
net/http/httputil.gox \
|
||||
net/http/pprof.gox
|
||||
|
||||
|
|
@ -946,6 +950,9 @@ go_bytes_files = \
|
|||
go_bytes_c_files = \
|
||||
go/bytes/indexbyte.c
|
||||
|
||||
go_context_files = \
|
||||
go/context/context.go
|
||||
|
||||
go_crypto_files = \
|
||||
go/crypto/crypto.go
|
||||
|
||||
|
|
@ -1086,9 +1093,10 @@ go_mime_files = \
|
|||
@LIBGO_IS_DRAGONFLY_TRUE@@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_LINUX_FALSE@go_net_sendfile_file = go/net/sendfile_dragonfly.go
|
||||
@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_LINUX_FALSE@go_net_sendfile_file = go/net/sendfile_freebsd.go
|
||||
@LIBGO_IS_LINUX_TRUE@go_net_sendfile_file = go/net/sendfile_linux.go
|
||||
@LIBGO_IS_DRAGONFLY_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@go_net_interface_file = go/net/interface_stub.go
|
||||
@LIBGO_IS_DRAGONFLY_TRUE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@go_net_interface_file = go/net/interface_dragonfly.go
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_TRUE@go_net_interface_file = go/net/interface_netbsd.go
|
||||
@LIBGO_IS_DRAGONFLY_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@@LIBGO_IS_OPENBSD_FALSE@go_net_interface_file = go/net/interface_stub.go
|
||||
@LIBGO_IS_DRAGONFLY_FALSE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@@LIBGO_IS_OPENBSD_TRUE@go_net_interface_file = go/net/interface_bsdvar.go
|
||||
@LIBGO_IS_DRAGONFLY_TRUE@@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_FALSE@go_net_interface_file = go/net/interface_bsdvar.go
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_NETBSD_TRUE@go_net_interface_file = go/net/interface_bsdvar.go
|
||||
@LIBGO_IS_LINUX_TRUE@go_net_interface_file = go/net/interface_linux.go
|
||||
@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_LINUX_FALSE@go_net_cloexec_file = go/net/sys_cloexec.go
|
||||
@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_LINUX_FALSE@go_net_cloexec_file = go/net/sock_cloexec.go go/net/hook_cloexec.go
|
||||
|
|
@ -1129,6 +1137,7 @@ go_net_common_files = \
|
|||
go/net/nss.go \
|
||||
go/net/parse.go \
|
||||
go/net/pipe.go \
|
||||
go/net/port.go \
|
||||
go/net/fd_poll_runtime.go \
|
||||
go/net/port_unix.go \
|
||||
$(go_net_sendfile_file) \
|
||||
|
|
@ -1190,6 +1199,10 @@ go_netgo_files = \
|
|||
@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_DRAGONFLY_FALSE@@LIBGO_IS_FREEBSD_TRUE@go_os_sticky_file = go/os/sticky_bsd.go
|
||||
@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_DRAGONFLY_TRUE@go_os_sticky_file = go/os/sticky_bsd.go
|
||||
@LIBGO_IS_DARWIN_TRUE@go_os_sticky_file = go/os/sticky_bsd.go
|
||||
@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_FREEBSD_FALSE@@LIBGO_IS_LINUX_FALSE@go_os_wait_file = go/os/wait_unimp.go
|
||||
@LIBGO_IS_DARWIN_FALSE@@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_LINUX_FALSE@go_os_wait_file = go/os/wait_wait6.go
|
||||
@LIBGO_IS_DARWIN_TRUE@@LIBGO_IS_LINUX_FALSE@go_os_wait_file = go/os/wait_waitid.go
|
||||
@LIBGO_IS_LINUX_TRUE@go_os_wait_file = go/os/wait_waitid.go
|
||||
go_os_files = \
|
||||
$(go_os_dir_file) \
|
||||
go/os/dir.go \
|
||||
|
|
@ -1215,7 +1228,8 @@ go_os_files = \
|
|||
$(go_os_sys_file) \
|
||||
$(go_os_cloexec_file) \
|
||||
go/os/types.go \
|
||||
go/os/types_unix.go
|
||||
go/os/types_unix.go \
|
||||
$(go_os_wait_file)
|
||||
|
||||
go_path_files = \
|
||||
go/path/match.go \
|
||||
|
|
@ -1248,6 +1262,7 @@ go_runtime_files = \
|
|||
go/runtime/error.go \
|
||||
go/runtime/extern.go \
|
||||
go/runtime/mem.go \
|
||||
go/runtime/symtab.go \
|
||||
version.go
|
||||
|
||||
noinst_DATA = zstdpkglist.go
|
||||
|
|
@ -1304,6 +1319,7 @@ go_testing_files = \
|
|||
go/testing/benchmark.go \
|
||||
go/testing/cover.go \
|
||||
go/testing/example.go \
|
||||
go/testing/match.go \
|
||||
go/testing/testing.go
|
||||
|
||||
go_time_files = \
|
||||
|
|
@ -1331,6 +1347,7 @@ go_unicode_files = \
|
|||
@LIBGO_IS_SOLARIS_TRUE@archive_tar_atim_file = go/archive/tar/stat_atim.go
|
||||
go_archive_tar_files = \
|
||||
go/archive/tar/common.go \
|
||||
go/archive/tar/format.go \
|
||||
go/archive/tar/reader.go \
|
||||
go/archive/tar/stat_unix.go \
|
||||
go/archive/tar/writer.go \
|
||||
|
|
@ -1349,8 +1366,9 @@ go_compress_bzip2_files = \
|
|||
go/compress/bzip2/move_to_front.go
|
||||
|
||||
go_compress_flate_files = \
|
||||
go/compress/flate/copy.go \
|
||||
go/compress/flate/deflate.go \
|
||||
go/compress/flate/deflatefast.go \
|
||||
go/compress/flate/dict_decoder.go \
|
||||
go/compress/flate/huffman_bit_writer.go \
|
||||
go/compress/flate/huffman_code.go \
|
||||
go/compress/flate/inflate.go \
|
||||
|
|
@ -1382,7 +1400,8 @@ go_crypto_aes_files = \
|
|||
go/crypto/aes/block.go \
|
||||
go/crypto/aes/cipher.go \
|
||||
go/crypto/aes/cipher_generic.go \
|
||||
go/crypto/aes/const.go
|
||||
go/crypto/aes/const.go \
|
||||
go/crypto/aes/modes.go
|
||||
|
||||
go_crypto_cipher_files = \
|
||||
go/crypto/cipher/cbc.go \
|
||||
|
|
@ -1418,7 +1437,8 @@ go_crypto_md5_files = \
|
|||
go/crypto/md5/md5block.go \
|
||||
go/crypto/md5/md5block_generic.go
|
||||
|
||||
@LIBGO_IS_LINUX_FALSE@crypto_rand_file =
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_OPENBSD_FALSE@crypto_rand_file =
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_OPENBSD_TRUE@crypto_rand_file = go/crypto/rand/rand_openbsd.go
|
||||
@LIBGO_IS_LINUX_TRUE@crypto_rand_file = go/crypto/rand/rand_linux.go
|
||||
go_crypto_rand_files = \
|
||||
go/crypto/rand/eagain.go \
|
||||
|
|
@ -1443,11 +1463,13 @@ go_crypto_sha1_files = \
|
|||
|
||||
go_crypto_sha256_files = \
|
||||
go/crypto/sha256/sha256.go \
|
||||
go/crypto/sha256/sha256block.go
|
||||
go/crypto/sha256/sha256block.go \
|
||||
go/crypto/sha256/sha256block_generic.go
|
||||
|
||||
go_crypto_sha512_files = \
|
||||
go/crypto/sha512/sha512.go \
|
||||
go/crypto/sha512/sha512block.go
|
||||
go/crypto/sha512/sha512block.go \
|
||||
go/crypto/sha512/sha512block_generic.go
|
||||
|
||||
go_crypto_subtle_files = \
|
||||
go/crypto/subtle/constant_time.go
|
||||
|
|
@ -1523,7 +1545,10 @@ go_debug_macho_files = \
|
|||
|
||||
go_debug_pe_files = \
|
||||
go/debug/pe/file.go \
|
||||
go/debug/pe/pe.go
|
||||
go/debug/pe/pe.go \
|
||||
go/debug/pe/section.go \
|
||||
go/debug/pe/string.go \
|
||||
go/debug/pe/symbol.go
|
||||
|
||||
go_debug_plan9obj_files = \
|
||||
go/debug/plan9obj/file.go \
|
||||
|
|
@ -1688,6 +1713,39 @@ go_go_internal_gccgoimporter_files = \
|
|||
go/go/internal/gccgoimporter/importer.go \
|
||||
go/go/internal/gccgoimporter/parser.go
|
||||
|
||||
go_golang_org_x_net_http2_hpack_files = \
|
||||
go/golang_org/x/net/http2/hpack/encode.go \
|
||||
go/golang_org/x/net/http2/hpack/hpack.go \
|
||||
go/golang_org/x/net/http2/hpack/huffman.go \
|
||||
go/golang_org/x/net/http2/hpack/tables.go
|
||||
|
||||
go_golang_org_x_net_lex_httplex_files = \
|
||||
go/golang_org/x/net/lex/httplex/httplex.go
|
||||
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DARWIN_TRUE@x_net_route_files =
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DRAGONFLY_TRUE@x_net_route_files =
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@x_net_route_files =
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_NETBSD_TRUE@x_net_route_files =
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_OPENBSD_TRUE@x_net_route_files =
|
||||
@LIBGO_IS_386_TRUE@@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@x_net_route_freebsd_file =
|
||||
@LIBGO_IS_ARM_TRUE@@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@x_net_route_freebsd_file =
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_X86_64_TRUE@x_net_route_freebsd_file =
|
||||
@LIBGO_IS_BSD_TRUE@go_golang_org_x_net_route_files = \
|
||||
@LIBGO_IS_BSD_TRUE@ go/golang_org/x/net/route/address.go \
|
||||
@LIBGO_IS_BSD_TRUE@ go/golang_org/x/net/route/binary.go \
|
||||
@LIBGO_IS_BSD_TRUE@ go/golang_org/x/net/route/interface.go \
|
||||
@LIBGO_IS_BSD_TRUE@ go/golang_org/x/net/route/message.go \
|
||||
@LIBGO_IS_BSD_TRUE@ go/golang_org/x/net/route/route.go \
|
||||
@LIBGO_IS_BSD_TRUE@ go/golang_org/x/net/route/syscall.go \
|
||||
@LIBGO_IS_BSD_TRUE@ go/golang_org/x/net/route/sys.go \
|
||||
@LIBGO_IS_BSD_TRUE@ $(x_net_route_files)
|
||||
|
||||
@LIBGO_IS_BSD_TRUE@golang_org_x_net_route_lo = \
|
||||
@LIBGO_IS_BSD_TRUE@ golang_org/x/net/route/route.lo
|
||||
|
||||
@LIBGO_IS_BSD_TRUE@golang_org_x_net_route_check = \
|
||||
@LIBGO_IS_BSD_TRUE@ golang_org/x/net/route/check
|
||||
|
||||
go_hash_adler32_files = \
|
||||
go/hash/adler32/adler32.go
|
||||
|
||||
|
|
@ -1750,11 +1808,8 @@ go_index_suffixarray_files = \
|
|||
go/index/suffixarray/qsufsort.go \
|
||||
go/index/suffixarray/suffixarray.go
|
||||
|
||||
go_internal_golang_org_x_net_http2_hpack_files = \
|
||||
go/internal/golang.org/x/net/http2/hpack/encode.go \
|
||||
go/internal/golang.org/x/net/http2/hpack/hpack.go \
|
||||
go/internal/golang.org/x/net/http2/hpack/huffman.go \
|
||||
go/internal/golang.org/x/net/http2/hpack/tables.go
|
||||
go_internal_nettrace_files = \
|
||||
go/internal/nettrace/nettrace.go
|
||||
|
||||
go_internal_race_files = \
|
||||
go/internal/race/doc.go \
|
||||
|
|
@ -1763,13 +1818,15 @@ go_internal_race_files = \
|
|||
go_internal_singleflight_files = \
|
||||
go/internal/singleflight/singleflight.go
|
||||
|
||||
@LIBGO_IS_386_FALSE@@LIBGO_IS_ARM_FALSE@@LIBGO_IS_LINUX_TRUE@@LIBGO_IS_MIPS64_FALSE@@LIBGO_IS_PPC64_FALSE@@LIBGO_IS_X86_64_FALSE@internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getrandom_linux.go go/internal/syscall/unix/getrandom_linux_generic.go
|
||||
@LIBGO_IS_386_FALSE@@LIBGO_IS_ARM_FALSE@@LIBGO_IS_LINUX_TRUE@@LIBGO_IS_MIPS64_FALSE@@LIBGO_IS_PPC64_FALSE@@LIBGO_IS_S390X_FALSE@@LIBGO_IS_X86_64_FALSE@internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getrandom_linux.go go/internal/syscall/unix/getrandom_linux_generic.go
|
||||
@LIBGO_IS_386_FALSE@@LIBGO_IS_ARM_FALSE@@LIBGO_IS_LINUX_TRUE@@LIBGO_IS_MIPS64_FALSE@@LIBGO_IS_PPC64_FALSE@@LIBGO_IS_S390X_TRUE@@LIBGO_IS_X86_64_FALSE@internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getrandom_linux.go go/internal/syscall/unix/getrandom_linux_s390x.go
|
||||
@LIBGO_IS_386_FALSE@@LIBGO_IS_ARM_FALSE@@LIBGO_IS_LINUX_TRUE@@LIBGO_IS_MIPS64_TRUE@@LIBGO_IS_PPC64_FALSE@@LIBGO_IS_X86_64_FALSE@internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getrandom_linux.go go/internal/syscall/unix/getrandom_linux_mips64x.go
|
||||
@LIBGO_IS_386_FALSE@@LIBGO_IS_ARM_FALSE@@LIBGO_IS_LINUX_TRUE@@LIBGO_IS_PPC64_TRUE@@LIBGO_IS_X86_64_FALSE@internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getrandom_linux.go go/internal/syscall/unix/getrandom_linux_ppc64x.go
|
||||
@LIBGO_IS_386_FALSE@@LIBGO_IS_ARM_TRUE@@LIBGO_IS_LINUX_TRUE@@LIBGO_IS_X86_64_FALSE@internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getrandom_linux.go go/internal/syscall/unix/getrandom_linux_arm.go
|
||||
@LIBGO_IS_386_FALSE@@LIBGO_IS_LINUX_TRUE@@LIBGO_IS_X86_64_TRUE@internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getrandom_linux.go go/internal/syscall/unix/getrandom_linux_amd64.go
|
||||
@LIBGO_IS_386_TRUE@@LIBGO_IS_LINUX_TRUE@internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getrandom_linux.go go/internal/syscall/unix/getrandom_linux_386.go
|
||||
@LIBGO_IS_LINUX_FALSE@internal_syscall_unix_getrandom_files =
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_OPENBSD_FALSE@internal_syscall_unix_getrandom_files =
|
||||
@LIBGO_IS_LINUX_FALSE@@LIBGO_IS_OPENBSD_TRUE@internal_syscall_unix_getrandom_files = go/internal/syscall/unix/getentropy_openbsd.go
|
||||
go_internal_syscall_unix_files = \
|
||||
go/internal/syscall/unix/dummy.go \
|
||||
$(internal_syscall_unix_getrandom_files)
|
||||
|
|
@ -1779,6 +1836,7 @@ go_internal_testenv_files = \
|
|||
|
||||
go_internal_trace_files = \
|
||||
go/internal/trace/goroutines.go \
|
||||
go/internal/trace/order.go \
|
||||
go/internal/trace/parser.go
|
||||
|
||||
go_io_ioutil_files = \
|
||||
|
|
@ -1844,8 +1902,8 @@ go_net_http_files = \
|
|||
go/net/http/fs.go \
|
||||
go/net/http/h2_bundle.go \
|
||||
go/net/http/header.go \
|
||||
go/net/http/http.go \
|
||||
go/net/http/jar.go \
|
||||
go/net/http/lex.go \
|
||||
go/net/http/method.go \
|
||||
go/net/http/request.go \
|
||||
go/net/http/response.go \
|
||||
|
|
@ -1885,12 +1943,16 @@ go_net_http_fcgi_files = \
|
|||
go/net/http/fcgi/fcgi.go
|
||||
|
||||
go_net_http_httptest_files = \
|
||||
go/net/http/httptest/httptest.go \
|
||||
go/net/http/httptest/recorder.go \
|
||||
go/net/http/httptest/server.go
|
||||
|
||||
go_net_http_pprof_files = \
|
||||
go/net/http/pprof/pprof.go
|
||||
|
||||
go_net_http_httptrace_files = \
|
||||
go/net/http/httptrace/trace.go
|
||||
|
||||
go_net_http_httputil_files = \
|
||||
go/net/http/httputil/dump.go \
|
||||
go/net/http/httputil/httputil.go \
|
||||
|
|
@ -1932,11 +1994,14 @@ go_os_signal_files = \
|
|||
|
||||
@LIBGO_IS_SOLARIS_FALSE@os_user_decls_file = go/os/user/decls_unix.go
|
||||
@LIBGO_IS_SOLARIS_TRUE@os_user_decls_file = go/os/user/decls_solaris.go
|
||||
@LIBGO_IS_SOLARIS_FALSE@os_user_listgroups_file = go/os/user/listgroups_unix.go
|
||||
@LIBGO_IS_SOLARIS_TRUE@os_user_listgroups_file = go/os/user/listgroups_solaris.go
|
||||
go_os_user_files = \
|
||||
go/os/user/lookup.go \
|
||||
go/os/user/lookup_unix.go \
|
||||
go/os/user/user.go \
|
||||
$(os_user_decls_file)
|
||||
$(os_user_decls_file) \
|
||||
$(os_user_listgroups_file)
|
||||
|
||||
go_path_filepath_files = \
|
||||
go/path/filepath/match.go \
|
||||
|
|
@ -2158,6 +2223,7 @@ libgo_go_objs = \
|
|||
bufio.lo \
|
||||
bytes.lo \
|
||||
bytes/index.lo \
|
||||
context.lo \
|
||||
crypto.lo \
|
||||
encoding.lo \
|
||||
errors.lo \
|
||||
|
|
@ -2253,6 +2319,9 @@ libgo_go_objs = \
|
|||
go/scanner.lo \
|
||||
go/token.lo \
|
||||
go/types.lo \
|
||||
golang_org/x/net/http2/hpack.lo \
|
||||
golang_org/x/net/lex/httplex.lo \
|
||||
$(golang_org_x_net_route_lo) \
|
||||
hash/adler32.lo \
|
||||
hash/crc32.lo \
|
||||
hash/crc64.lo \
|
||||
|
|
@ -2261,6 +2330,7 @@ libgo_go_objs = \
|
|||
net/http/cookiejar.lo \
|
||||
net/http/fcgi.lo \
|
||||
net/http/httptest.lo \
|
||||
net/http/httptrace.lo \
|
||||
net/http/httputil.lo \
|
||||
net/http/internal.lo \
|
||||
net/http/pprof.lo \
|
||||
|
|
@ -2272,7 +2342,7 @@ libgo_go_objs = \
|
|||
image/jpeg.lo \
|
||||
image/png.lo \
|
||||
index/suffixarray.lo \
|
||||
internal/golang.org/x/net/http2/hpack.lo \
|
||||
internal/nettrace.lo \
|
||||
internal/race.lo \
|
||||
internal/singleflight.lo \
|
||||
internal/syscall/unix.lo \
|
||||
|
|
@ -2439,6 +2509,7 @@ BUILDGOX = \
|
|||
TEST_PACKAGES = \
|
||||
bufio/check \
|
||||
bytes/check \
|
||||
context/check \
|
||||
errors/check \
|
||||
expvar/check \
|
||||
flag/check \
|
||||
|
|
@ -2522,6 +2593,9 @@ TEST_PACKAGES = \
|
|||
go/scanner/check \
|
||||
go/token/check \
|
||||
go/types/check \
|
||||
golang_org/x/net/http2/hpack/check \
|
||||
golang_org/x/net/lex/httplex/check \
|
||||
$(golang_org_x_net_route_check) \
|
||||
hash/adler32/check \
|
||||
hash/crc32/check \
|
||||
hash/crc64/check \
|
||||
|
|
@ -2531,7 +2605,6 @@ TEST_PACKAGES = \
|
|||
image/jpeg/check \
|
||||
image/png/check \
|
||||
index/suffixarray/check \
|
||||
internal/golang.org/x/net/http2/hpack/check \
|
||||
internal/singleflight/check \
|
||||
internal/trace/check \
|
||||
io/ioutil/check \
|
||||
|
|
@ -2546,6 +2619,7 @@ TEST_PACKAGES = \
|
|||
net/http/cookiejar/check \
|
||||
net/http/fcgi/check \
|
||||
net/http/httptest/check \
|
||||
net/http/httptrace/check \
|
||||
net/http/httputil/check \
|
||||
net/http/internal/check \
|
||||
net/internal/socktest/check \
|
||||
|
|
@ -4761,6 +4835,34 @@ s-zstdpkglist: Makefile
|
|||
echo '}' >> zstdpkglist.go.tmp
|
||||
$(SHELL) $(srcdir)/mvifdiff.sh zstdpkglist.go.tmp zstdpkglist.go
|
||||
$(STAMP) $@
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DARWIN_TRUE@ go/golang_org/x/net/route/interface_classic.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DARWIN_TRUE@ go/golang_org/x/net/route/interface_multicast.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DARWIN_TRUE@ go/golang_org/x/net/route/route_classic.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DARWIN_TRUE@ go/golang_org/x/net/route/sys_darwin.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DARWIN_TRUE@ go/golang_org/x/net/route/zsys_darwin.go
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DRAGONFLY_TRUE@ go/golang_org/x/net/route/interface_announce.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DRAGONFLY_TRUE@ go/golang_org/x/net/route/interface_classic.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DRAGONFLY_TRUE@ go/golang_org/x/net/route/interface_multicast.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DRAGONFLY_TRUE@ go/golang_org/x/net/route/route_classic.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DRAGONFLY_TRUE@ go/golang_org/x/net/route/sys_dragonfly.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_DRAGONFLY_TRUE@ go/golang_org/x/net/route/zsys_dragonfly.go
|
||||
@LIBGO_IS_386_TRUE@@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@ go/golang_org/x/net/route/zsys_freebsd_386.go
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@@LIBGO_IS_X86_64_TRUE@ go/golang_org/x/net/route/zsys_freebsd_amd64.go
|
||||
@LIBGO_IS_ARM_TRUE@@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@ go/golang_org/x/net/route/zsys_freebsd_arm.go
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@ go/golang_org/x/net/route/interface_announce.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@ go/golang_org/x/net/route/interface_freebsd.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@ go/golang_org/x/net/route/interface_multicast.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@ go/golang_org/x/net/route/route_classic.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@ go/golang_org/x/net/route/sys_freebsd.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_FREEBSD_TRUE@ $(x_net_route_freebsd_file)
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_NETBSD_TRUE@ go/golang_org/x/net/route/interface_announce.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_NETBSD_TRUE@ go/golang_org/x/net/route/interface_classic.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_NETBSD_TRUE@ go/golang_org/x/net/route/route_classic.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_NETBSD_TRUE@ go/golang_org/x/net/route/sys_netbsd.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_NETBSD_TRUE@ go/golang_org/x/net/route/zsys_netbsd.go
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_OPENBSD_TRUE@ go/golang_org/x/net/route/route_openbsd.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_OPENBSD_TRUE@ go/golang_org/x/net/route/sys_openbsd.go \
|
||||
@LIBGO_IS_BSD_TRUE@@LIBGO_IS_OPENBSD_TRUE@ go/golang_org/x/net/route/zsys_openbsd.go
|
||||
|
||||
libcalls.go: s-libcalls; @true
|
||||
s-libcalls: libcalls-list go/syscall/mksyscall.awk $(go_base_syscall_files)
|
||||
|
|
@ -4841,6 +4943,15 @@ bytes/check: $(CHECK_DEPS)
|
|||
@$(CHECK)
|
||||
.PHONY: bytes/check
|
||||
|
||||
@go_include@ context.lo.dep
|
||||
context.lo.dep: $(go_context_files)
|
||||
$(BUILDDEPS)
|
||||
context.lo: $(go_context_files)
|
||||
$(BUILDPACKAGE)
|
||||
context/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: context/check
|
||||
|
||||
@go_include@ crypto.lo.dep
|
||||
crypto.lo.dep: $(go_crypto_files)
|
||||
$(BUILDDEPS)
|
||||
|
|
@ -5656,6 +5767,33 @@ go/internal/gccgoimporter/check: $(CHECK_DEPS)
|
|||
@$(CHECK)
|
||||
.PHONY: go/internal/gccgoimporter/check
|
||||
|
||||
@go_include@ golang_org/x/net/http2/hpack.lo.dep
|
||||
golang_org/x/net/http2/hpack.lo.dep: $(go_golang_org_x_net_http2_hpack_files)
|
||||
$(BUILDDEPS)
|
||||
golang_org/x/net/http2/hpack.lo: $(go_golang_org_x_net_http2_hpack_files)
|
||||
$(BUILDPACKAGE)
|
||||
golang_org/x/net/http2/hpack/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: golang_org/x/net/http2/hpack/check
|
||||
|
||||
@go_include@ golang_org/x/net/lex/httplex.lo.dep
|
||||
golang_org/x/net/lex/httplex.lo.dep: $(go_golang_org_x_net_lex_httplex_files)
|
||||
$(BUILDDEPS)
|
||||
golang_org/x/net/lex/httplex.lo: $(go_golang_org_x_net_lex_httplex_files)
|
||||
$(BUILDPACKAGE)
|
||||
golang_org/x/net/lex/httplex/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: golang_org/x/net/lex/httplex/check
|
||||
|
||||
@LIBGO_IS_BSD_TRUE@@go_include@ golang_org/x/net/route.lo.dep
|
||||
@LIBGO_IS_BSD_TRUE@golang_org/x/net/route.lo.dep: $(go_golang_org_x_net_route_files)
|
||||
@LIBGO_IS_BSD_TRUE@ $(BUILDDEPS)
|
||||
@LIBGO_IS_BSD_TRUE@golang_org/x/net/route.lo: $(go_golang_org_x_net_route_files)
|
||||
@LIBGO_IS_BSD_TRUE@ $(BUILDPACKAGE)
|
||||
@LIBGO_IS_BSD_TRUE@golang_org/x/net/route/check: $(CHECK_DEPS)
|
||||
@LIBGO_IS_BSD_TRUE@ @$(CHECK)
|
||||
.PHONY: golang_org/x/net/route/check
|
||||
|
||||
@go_include@ hash/adler32.lo.dep
|
||||
hash/adler32.lo.dep: $(go_hash_adler32_files)
|
||||
$(BUILDDEPS)
|
||||
|
|
@ -5764,14 +5902,14 @@ index/suffixarray/check: $(CHECK_DEPS)
|
|||
@$(CHECK)
|
||||
.PHONY: index/suffixarray/check
|
||||
|
||||
@go_include@ internal/golang.org/x/net/http2/hpack.lo.dep
|
||||
internal/golang.org/x/net/http2/hpack.lo.dep: $(go_internal_golang_org_x_net_http2_hpack_files)
|
||||
@go_include@ internal/nettrace.lo.dep
|
||||
internal/nettrace.lo.dep: $(go_internal_nettrace_files)
|
||||
$(BUILDDEPS)
|
||||
internal/golang.org/x/net/http2/hpack.lo: $(go_internal_golang_org_x_net_http2_hpack_files)
|
||||
internal/nettrace.lo: $(go_internal_nettrace_files)
|
||||
$(BUILDPACKAGE)
|
||||
internal/golang.org/x/net/http2/hpack/check: $(CHECK_DEPS)
|
||||
internal/nettrace/check: $(CHECK_DEPS)
|
||||
@$(CHECK)
|
||||
.PHONY: internal/golang.org/x/net/http2/hpack/check
|
||||
.PHONY: internal/nettrace/check
|
||||
|
||||
@go_include@ internal/race.lo.dep
|
||||
internal/race.lo.dep: $(go_internal_race_files)
|
||||
|
|
@ -5974,6 +6112,15 @@ net/http/httptest/check: $(check_deps)
|
|||
@$(CHECK)
|
||||
.PHONY: net/http/httptest/check
|
||||
|
||||
@go_include@ net/http/httptrace.lo.dep
|
||||
net/http/httptrace.lo.dep: $(go_net_http_httptrace_files)
|
||||
$(BUILDDEPS)
|
||||
net/http/httptrace.lo: $(go_net_http_httptrace_files)
|
||||
$(BUILDPACKAGE)
|
||||
net/http/httptrace/check: $(check_deps)
|
||||
@$(CHECK)
|
||||
.PHONY: net/http/httptrace/check
|
||||
|
||||
@go_include@ net/http/httputil.lo.dep
|
||||
net/http/httputil.lo.dep: $(go_net_http_httputil_files)
|
||||
$(BUILDDEPS)
|
||||
|
|
@ -6205,6 +6352,8 @@ bufio.gox: bufio.lo
|
|||
$(BUILDGOX)
|
||||
bytes.gox: bytes.lo
|
||||
$(BUILDGOX)
|
||||
context.gox: context.lo
|
||||
$(BUILDGOX)
|
||||
crypto.gox: crypto.lo
|
||||
$(BUILDGOX)
|
||||
encoding.gox: encoding.lo
|
||||
|
|
@ -6399,6 +6548,15 @@ go/internal/gcimporter.gox: go/internal/gcimporter.lo
|
|||
go/internal/gccgoimporter.gox: go/internal/gccgoimporter.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
golang_org/x/net/http2/hpack.gox: golang_org/x/net/http2/hpack.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
golang_org/x/net/lex/httplex.gox: golang_org/x/net/lex/httplex.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
@LIBGO_IS_BSD_TRUE@golang_org/x/net/route.gox: golang_org/x/net/route.lo
|
||||
@LIBGO_IS_BSD_TRUE@ $(BUILDGOX)
|
||||
|
||||
hash/adler32.gox: hash/adler32.lo
|
||||
$(BUILDGOX)
|
||||
hash/crc32.gox: hash/crc32.lo
|
||||
|
|
@ -6427,7 +6585,7 @@ image/color/palette.gox: image/color/palette.lo
|
|||
index/suffixarray.gox: index/suffixarray.lo
|
||||
$(BUILDGOX)
|
||||
|
||||
internal/golang.org/x/net/http2/hpack.gox: internal/golang.org/x/net/http2/hpack.lo
|
||||
internal/nettrace.gox: internal/nettrace.lo
|
||||
$(BUILDGOX)
|
||||
internal/race.gox: internal/race.lo
|
||||
$(BUILDGOX)
|
||||
|
|
@ -6479,6 +6637,8 @@ net/http/fcgi.gox: net/http/fcgi.lo
|
|||
$(BUILDGOX)
|
||||
net/http/httptest.gox: net/http/httptest.lo
|
||||
$(BUILDGOX)
|
||||
net/http/httptrace.gox: net/http/httptrace.lo
|
||||
$(BUILDGOX)
|
||||
net/http/httputil.gox: net/http/httputil.lo
|
||||
$(BUILDGOX)
|
||||
net/http/pprof.gox: net/http/pprof.lo
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
go1.6.1
|
||||
go1.7rc3
|
||||
|
|
|
|||
|
|
@ -670,6 +670,8 @@ LIBGO_IS_386_FALSE
|
|||
LIBGO_IS_386_TRUE
|
||||
USE_DEJAGNU
|
||||
GOOS
|
||||
LIBGO_IS_BSD_FALSE
|
||||
LIBGO_IS_BSD_TRUE
|
||||
LIBGO_IS_SOLARIS_FALSE
|
||||
LIBGO_IS_SOLARIS_TRUE
|
||||
LIBGO_IS_RTEMS_FALSE
|
||||
|
|
@ -11126,7 +11128,7 @@ else
|
|||
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
|
||||
lt_status=$lt_dlunknown
|
||||
cat > conftest.$ac_ext <<_LT_EOF
|
||||
#line 11129 "configure"
|
||||
#line 11131 "configure"
|
||||
#include "confdefs.h"
|
||||
|
||||
#if HAVE_DLFCN_H
|
||||
|
|
@ -11232,7 +11234,7 @@ else
|
|||
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
|
||||
lt_status=$lt_dlunknown
|
||||
cat > conftest.$ac_ext <<_LT_EOF
|
||||
#line 11235 "configure"
|
||||
#line 11237 "configure"
|
||||
#include "confdefs.h"
|
||||
|
||||
#if HAVE_DLFCN_H
|
||||
|
|
@ -13611,6 +13613,14 @@ else
|
|||
LIBGO_IS_SOLARIS_FALSE=
|
||||
fi
|
||||
|
||||
if test $is_darwin = yes -o $is_dragonfly = yes -o $is_freebsd = yes -o $is_netbsd = yes -o $is_openbsd = yes; then
|
||||
LIBGO_IS_BSD_TRUE=
|
||||
LIBGO_IS_BSD_FALSE='#'
|
||||
else
|
||||
LIBGO_IS_BSD_TRUE='#'
|
||||
LIBGO_IS_BSD_FALSE=
|
||||
fi
|
||||
|
||||
|
||||
|
||||
USE_DEJAGNU=no
|
||||
|
|
@ -15769,6 +15779,10 @@ if test -z "${LIBGO_IS_SOLARIS_TRUE}" && test -z "${LIBGO_IS_SOLARIS_FALSE}"; th
|
|||
as_fn_error "conditional \"LIBGO_IS_SOLARIS\" was never defined.
|
||||
Usually this means the macro was only invoked conditionally." "$LINENO" 5
|
||||
fi
|
||||
if test -z "${LIBGO_IS_BSD_TRUE}" && test -z "${LIBGO_IS_BSD_FALSE}"; then
|
||||
as_fn_error "conditional \"LIBGO_IS_BSD\" was never defined.
|
||||
Usually this means the macro was only invoked conditionally." "$LINENO" 5
|
||||
fi
|
||||
if test -z "${LIBGO_IS_386_TRUE}" && test -z "${LIBGO_IS_386_FALSE}"; then
|
||||
as_fn_error "conditional \"LIBGO_IS_386\" was never defined.
|
||||
Usually this means the macro was only invoked conditionally." "$LINENO" 5
|
||||
|
|
|
|||
|
|
@ -172,6 +172,7 @@ AM_CONDITIONAL(LIBGO_IS_OPENBSD, test $is_openbsd = yes)
|
|||
AM_CONDITIONAL(LIBGO_IS_DRAGONFLY, test $is_dragonfly = yes)
|
||||
AM_CONDITIONAL(LIBGO_IS_RTEMS, test $is_rtems = yes)
|
||||
AM_CONDITIONAL(LIBGO_IS_SOLARIS, test $is_solaris = yes)
|
||||
AM_CONDITIONAL(LIBGO_IS_BSD, test $is_darwin = yes -o $is_dragonfly = yes -o $is_freebsd = yes -o $is_netbsd = yes -o $is_openbsd = yes)
|
||||
AC_SUBST(GOOS)
|
||||
|
||||
dnl Test whether we need to use DejaGNU or whether we can use the
|
||||
|
|
|
|||
|
|
@ -21,10 +21,8 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// Header type flags.
|
||||
const (
|
||||
blockSize = 512
|
||||
|
||||
// Types
|
||||
TypeReg = '0' // regular file
|
||||
TypeRegA = '\x00' // regular file
|
||||
TypeLink = '1' // hard link
|
||||
|
|
@ -61,12 +59,6 @@ type Header struct {
|
|||
Xattrs map[string]string
|
||||
}
|
||||
|
||||
// File name constants from the tar spec.
|
||||
const (
|
||||
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
|
||||
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
|
||||
)
|
||||
|
||||
// FileInfo returns an os.FileInfo for the Header.
|
||||
func (h *Header) FileInfo() os.FileInfo {
|
||||
return headerFileInfo{h}
|
||||
|
|
@ -279,33 +271,6 @@ func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
|
|||
return h, nil
|
||||
}
|
||||
|
||||
var zeroBlock = make([]byte, blockSize)
|
||||
|
||||
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
|
||||
// We compute and return both.
|
||||
func checksum(header []byte) (unsigned int64, signed int64) {
|
||||
for i := 0; i < len(header); i++ {
|
||||
if i == 148 {
|
||||
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
|
||||
unsigned += ' ' * 8
|
||||
signed += ' ' * 8
|
||||
i += 7
|
||||
continue
|
||||
}
|
||||
unsigned += int64(header[i])
|
||||
signed += int64(int8(header[i]))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type slicer []byte
|
||||
|
||||
func (sp *slicer) next(n int) (b []byte) {
|
||||
s := *sp
|
||||
b, *sp = s[0:n], s[n:]
|
||||
return
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for _, c := range s {
|
||||
if c >= 0x80 {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,197 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
// Constants to identify various tar formats.
|
||||
const (
|
||||
// The format is unknown.
|
||||
formatUnknown = (1 << iota) / 2 // Sequence of 0, 1, 2, 4, 8, etc...
|
||||
|
||||
// The format of the original Unix V7 tar tool prior to standardization.
|
||||
formatV7
|
||||
|
||||
// The old and new GNU formats, which are incompatible with USTAR.
|
||||
// This does cover the old GNU sparse extension.
|
||||
// This does not cover the GNU sparse extensions using PAX headers,
|
||||
// versions 0.0, 0.1, and 1.0; these fall under the PAX format.
|
||||
formatGNU
|
||||
|
||||
// Schily's tar format, which is incompatible with USTAR.
|
||||
// This does not cover STAR extensions to the PAX format; these fall under
|
||||
// the PAX format.
|
||||
formatSTAR
|
||||
|
||||
// USTAR is the former standardization of tar defined in POSIX.1-1988.
|
||||
// This is incompatible with the GNU and STAR formats.
|
||||
formatUSTAR
|
||||
|
||||
// PAX is the latest standardization of tar defined in POSIX.1-2001.
|
||||
// This is an extension of USTAR and is "backwards compatible" with it.
|
||||
//
|
||||
// Some newer formats add their own extensions to PAX, such as GNU sparse
|
||||
// files and SCHILY extended attributes. Since they are backwards compatible
|
||||
// with PAX, they will be labelled as "PAX".
|
||||
formatPAX
|
||||
)
|
||||
|
||||
// Magics used to identify various formats.
|
||||
const (
|
||||
magicGNU, versionGNU = "ustar ", " \x00"
|
||||
magicUSTAR, versionUSTAR = "ustar\x00", "00"
|
||||
trailerSTAR = "tar\x00"
|
||||
)
|
||||
|
||||
// Size constants from various tar specifications.
|
||||
const (
|
||||
blockSize = 512 // Size of each block in a tar stream
|
||||
nameSize = 100 // Max length of the name field in USTAR format
|
||||
prefixSize = 155 // Max length of the prefix field in USTAR format
|
||||
)
|
||||
|
||||
var zeroBlock block
|
||||
|
||||
type block [blockSize]byte
|
||||
|
||||
// Convert block to any number of formats.
|
||||
func (b *block) V7() *headerV7 { return (*headerV7)(b) }
|
||||
func (b *block) GNU() *headerGNU { return (*headerGNU)(b) }
|
||||
func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) }
|
||||
func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) }
|
||||
func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) }
|
||||
|
||||
// GetFormat checks that the block is a valid tar header based on the checksum.
|
||||
// It then attempts to guess the specific format based on magic values.
|
||||
// If the checksum fails, then formatUnknown is returned.
|
||||
func (b *block) GetFormat() (format int) {
|
||||
// Verify checksum.
|
||||
var p parser
|
||||
value := p.parseOctal(b.V7().Chksum())
|
||||
chksum1, chksum2 := b.ComputeChecksum()
|
||||
if p.err != nil || (value != chksum1 && value != chksum2) {
|
||||
return formatUnknown
|
||||
}
|
||||
|
||||
// Guess the magic values.
|
||||
magic := string(b.USTAR().Magic())
|
||||
version := string(b.USTAR().Version())
|
||||
trailer := string(b.STAR().Trailer())
|
||||
switch {
|
||||
case magic == magicUSTAR && trailer == trailerSTAR:
|
||||
return formatSTAR
|
||||
case magic == magicUSTAR:
|
||||
return formatUSTAR
|
||||
case magic == magicGNU && version == versionGNU:
|
||||
return formatGNU
|
||||
default:
|
||||
return formatV7
|
||||
}
|
||||
}
|
||||
|
||||
// SetFormat writes the magic values necessary for specified format
|
||||
// and then updates the checksum accordingly.
|
||||
func (b *block) SetFormat(format int) {
|
||||
// Set the magic values.
|
||||
switch format {
|
||||
case formatV7:
|
||||
// Do nothing.
|
||||
case formatGNU:
|
||||
copy(b.GNU().Magic(), magicGNU)
|
||||
copy(b.GNU().Version(), versionGNU)
|
||||
case formatSTAR:
|
||||
copy(b.STAR().Magic(), magicUSTAR)
|
||||
copy(b.STAR().Version(), versionUSTAR)
|
||||
copy(b.STAR().Trailer(), trailerSTAR)
|
||||
case formatUSTAR, formatPAX:
|
||||
copy(b.USTAR().Magic(), magicUSTAR)
|
||||
copy(b.USTAR().Version(), versionUSTAR)
|
||||
default:
|
||||
panic("invalid format")
|
||||
}
|
||||
|
||||
// Update checksum.
|
||||
// This field is special in that it is terminated by a NULL then space.
|
||||
var f formatter
|
||||
field := b.V7().Chksum()
|
||||
chksum, _ := b.ComputeChecksum() // Possible values are 256..128776
|
||||
f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143
|
||||
field[7] = ' '
|
||||
}
|
||||
|
||||
// ComputeChecksum computes the checksum for the header block.
|
||||
// POSIX specifies a sum of the unsigned byte values, but the Sun tar used
|
||||
// signed byte values.
|
||||
// We compute and return both.
|
||||
func (b *block) ComputeChecksum() (unsigned, signed int64) {
|
||||
for i, c := range b {
|
||||
if 148 <= i && i < 156 {
|
||||
c = ' ' // Treat the checksum field itself as all spaces.
|
||||
}
|
||||
unsigned += int64(uint8(c))
|
||||
signed += int64(int8(c))
|
||||
}
|
||||
return unsigned, signed
|
||||
}
|
||||
|
||||
type headerV7 [blockSize]byte
|
||||
|
||||
func (h *headerV7) Name() []byte { return h[000:][:100] }
|
||||
func (h *headerV7) Mode() []byte { return h[100:][:8] }
|
||||
func (h *headerV7) UID() []byte { return h[108:][:8] }
|
||||
func (h *headerV7) GID() []byte { return h[116:][:8] }
|
||||
func (h *headerV7) Size() []byte { return h[124:][:12] }
|
||||
func (h *headerV7) ModTime() []byte { return h[136:][:12] }
|
||||
func (h *headerV7) Chksum() []byte { return h[148:][:8] }
|
||||
func (h *headerV7) TypeFlag() []byte { return h[156:][:1] }
|
||||
func (h *headerV7) LinkName() []byte { return h[157:][:100] }
|
||||
|
||||
type headerGNU [blockSize]byte
|
||||
|
||||
func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) }
|
||||
func (h *headerGNU) Magic() []byte { return h[257:][:6] }
|
||||
func (h *headerGNU) Version() []byte { return h[263:][:2] }
|
||||
func (h *headerGNU) UserName() []byte { return h[265:][:32] }
|
||||
func (h *headerGNU) GroupName() []byte { return h[297:][:32] }
|
||||
func (h *headerGNU) DevMajor() []byte { return h[329:][:8] }
|
||||
func (h *headerGNU) DevMinor() []byte { return h[337:][:8] }
|
||||
func (h *headerGNU) AccessTime() []byte { return h[345:][:12] }
|
||||
func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] }
|
||||
func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) }
|
||||
func (h *headerGNU) RealSize() []byte { return h[483:][:12] }
|
||||
|
||||
type headerSTAR [blockSize]byte
|
||||
|
||||
func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) }
|
||||
func (h *headerSTAR) Magic() []byte { return h[257:][:6] }
|
||||
func (h *headerSTAR) Version() []byte { return h[263:][:2] }
|
||||
func (h *headerSTAR) UserName() []byte { return h[265:][:32] }
|
||||
func (h *headerSTAR) GroupName() []byte { return h[297:][:32] }
|
||||
func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] }
|
||||
func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] }
|
||||
func (h *headerSTAR) Prefix() []byte { return h[345:][:131] }
|
||||
func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] }
|
||||
func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] }
|
||||
func (h *headerSTAR) Trailer() []byte { return h[508:][:4] }
|
||||
|
||||
type headerUSTAR [blockSize]byte
|
||||
|
||||
func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) }
|
||||
func (h *headerUSTAR) Magic() []byte { return h[257:][:6] }
|
||||
func (h *headerUSTAR) Version() []byte { return h[263:][:2] }
|
||||
func (h *headerUSTAR) UserName() []byte { return h[265:][:32] }
|
||||
func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] }
|
||||
func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] }
|
||||
func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] }
|
||||
func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] }
|
||||
|
||||
type sparseArray []byte
|
||||
|
||||
func (s sparseArray) Entry(i int) sparseNode { return (sparseNode)(s[i*24:]) }
|
||||
func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] }
|
||||
func (s sparseArray) MaxEntries() int { return len(s) / 24 }
|
||||
|
||||
type sparseNode []byte
|
||||
|
||||
func (s sparseNode) Offset() []byte { return s[00:][:12] }
|
||||
func (s sparseNode) NumBytes() []byte { return s[12:][:12] }
|
||||
|
|
@ -13,7 +13,6 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -34,7 +33,7 @@ type Reader struct {
|
|||
err error
|
||||
pad int64 // amount of padding (ignored) after current file entry
|
||||
curr numBytesReader // reader for current file entry
|
||||
hdrBuff [blockSize]byte // buffer to use in readHeader
|
||||
blk block // buffer to use as temporary local storage
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
|
|
@ -99,17 +98,6 @@ const (
|
|||
paxGNUSparseRealSize = "GNU.sparse.realsize"
|
||||
)
|
||||
|
||||
// Keywords for old GNU sparse headers
|
||||
const (
|
||||
oldGNUSparseMainHeaderOffset = 386
|
||||
oldGNUSparseMainHeaderIsExtendedOffset = 482
|
||||
oldGNUSparseMainHeaderNumEntries = 4
|
||||
oldGNUSparseExtendedHeaderIsExtendedOffset = 504
|
||||
oldGNUSparseExtendedHeaderNumEntries = 21
|
||||
oldGNUSparseOffsetSize = 12
|
||||
oldGNUSparseNumBytesSize = 12
|
||||
)
|
||||
|
||||
// NewReader creates a new Reader reading from r.
|
||||
func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
|
||||
|
||||
|
|
@ -307,7 +295,7 @@ func mergePAX(hdr *Header, headers map[string]string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Size = int64(size)
|
||||
hdr.Size = size
|
||||
default:
|
||||
if strings.HasPrefix(k, paxXattr) {
|
||||
if hdr.Xattrs == nil {
|
||||
|
|
@ -337,17 +325,17 @@ func parsePAXTime(t string) (time.Time, error) {
|
|||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
nano_buf := string(buf[pos+1:])
|
||||
nanoBuf := string(buf[pos+1:])
|
||||
// Pad as needed before converting to a decimal.
|
||||
// For example .030 -> .030000000 -> 30000000 nanoseconds
|
||||
if len(nano_buf) < maxNanoSecondIntSize {
|
||||
if len(nanoBuf) < maxNanoSecondIntSize {
|
||||
// Right pad
|
||||
nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
|
||||
} else if len(nano_buf) > maxNanoSecondIntSize {
|
||||
nanoBuf += strings.Repeat("0", maxNanoSecondIntSize-len(nanoBuf))
|
||||
} else if len(nanoBuf) > maxNanoSecondIntSize {
|
||||
// Right truncate
|
||||
nano_buf = nano_buf[:maxNanoSecondIntSize]
|
||||
nanoBuf = nanoBuf[:maxNanoSecondIntSize]
|
||||
}
|
||||
nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
|
||||
nanoseconds, err = strconv.ParseInt(nanoBuf, 10, 0)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
|
@ -379,14 +367,14 @@ func parsePAX(r io.Reader) (map[string]string, error) {
|
|||
}
|
||||
sbuf = residual
|
||||
|
||||
keyStr := string(key)
|
||||
keyStr := key
|
||||
if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
|
||||
// GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
|
||||
sparseMap.WriteString(value)
|
||||
sparseMap.Write([]byte{','})
|
||||
} else {
|
||||
// Normal key. Set the value in the headers map.
|
||||
headers[keyStr] = string(value)
|
||||
headers[keyStr] = value
|
||||
}
|
||||
}
|
||||
if sparseMap.Len() != 0 {
|
||||
|
|
@ -523,10 +511,10 @@ func (tr *Reader) skipUnread() error {
|
|||
// io.Seeker, but calling Seek always returns an error and performs
|
||||
// no action. Thus, we try an innocent seek to the current position
|
||||
// to see if Seek is really supported.
|
||||
pos1, err := sr.Seek(0, os.SEEK_CUR)
|
||||
pos1, err := sr.Seek(0, io.SeekCurrent)
|
||||
if err == nil {
|
||||
// Seek seems supported, so perform the real Seek.
|
||||
pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR)
|
||||
pos2, err := sr.Seek(dataSkip-1, io.SeekCurrent)
|
||||
if err != nil {
|
||||
tr.err = err
|
||||
return tr.err
|
||||
|
|
@ -543,17 +531,6 @@ func (tr *Reader) skipUnread() error {
|
|||
return tr.err
|
||||
}
|
||||
|
||||
func (tr *Reader) verifyChecksum(header []byte) bool {
|
||||
if tr.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var p parser
|
||||
given := p.parseOctal(header[148:156])
|
||||
unsigned, signed := checksum(header)
|
||||
return p.err == nil && (given == unsigned || given == signed)
|
||||
}
|
||||
|
||||
// readHeader reads the next block header and assumes that the underlying reader
|
||||
// is already aligned to a block boundary.
|
||||
//
|
||||
|
|
@ -562,19 +539,16 @@ func (tr *Reader) verifyChecksum(header []byte) bool {
|
|||
// * Exactly 1 block of zeros is read and EOF is hit.
|
||||
// * At least 2 blocks of zeros are read.
|
||||
func (tr *Reader) readHeader() *Header {
|
||||
header := tr.hdrBuff[:]
|
||||
copy(header, zeroBlock)
|
||||
|
||||
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
|
||||
if _, tr.err = io.ReadFull(tr.r, tr.blk[:]); tr.err != nil {
|
||||
return nil // io.EOF is okay here
|
||||
}
|
||||
|
||||
// Two blocks of zero bytes marks the end of the archive.
|
||||
if bytes.Equal(header, zeroBlock[0:blockSize]) {
|
||||
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
|
||||
if bytes.Equal(tr.blk[:], zeroBlock[:]) {
|
||||
if _, tr.err = io.ReadFull(tr.r, tr.blk[:]); tr.err != nil {
|
||||
return nil // io.EOF is okay here
|
||||
}
|
||||
if bytes.Equal(header, zeroBlock[0:blockSize]) {
|
||||
if bytes.Equal(tr.blk[:], zeroBlock[:]) {
|
||||
tr.err = io.EOF
|
||||
} else {
|
||||
tr.err = ErrHeader // zero block and then non-zero block
|
||||
|
|
@ -582,71 +556,55 @@ func (tr *Reader) readHeader() *Header {
|
|||
return nil
|
||||
}
|
||||
|
||||
if !tr.verifyChecksum(header) {
|
||||
// Verify the header matches a known format.
|
||||
format := tr.blk.GetFormat()
|
||||
if format == formatUnknown {
|
||||
tr.err = ErrHeader
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unpack
|
||||
var p parser
|
||||
hdr := new(Header)
|
||||
s := slicer(header)
|
||||
|
||||
hdr.Name = p.parseString(s.next(100))
|
||||
hdr.Mode = p.parseNumeric(s.next(8))
|
||||
hdr.Uid = int(p.parseNumeric(s.next(8)))
|
||||
hdr.Gid = int(p.parseNumeric(s.next(8)))
|
||||
hdr.Size = p.parseNumeric(s.next(12))
|
||||
hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0)
|
||||
s.next(8) // chksum
|
||||
hdr.Typeflag = s.next(1)[0]
|
||||
hdr.Linkname = p.parseString(s.next(100))
|
||||
// Unpack the V7 header.
|
||||
v7 := tr.blk.V7()
|
||||
hdr.Name = p.parseString(v7.Name())
|
||||
hdr.Mode = p.parseNumeric(v7.Mode())
|
||||
hdr.Uid = int(p.parseNumeric(v7.UID()))
|
||||
hdr.Gid = int(p.parseNumeric(v7.GID()))
|
||||
hdr.Size = p.parseNumeric(v7.Size())
|
||||
hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0)
|
||||
hdr.Typeflag = v7.TypeFlag()[0]
|
||||
hdr.Linkname = p.parseString(v7.LinkName())
|
||||
|
||||
// The remainder of the header depends on the value of magic.
|
||||
// The original (v7) version of tar had no explicit magic field,
|
||||
// so its magic bytes, like the rest of the block, are NULs.
|
||||
magic := string(s.next(8)) // contains version field as well.
|
||||
var format string
|
||||
switch {
|
||||
case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
|
||||
if string(header[508:512]) == "tar\x00" {
|
||||
format = "star"
|
||||
} else {
|
||||
format = "posix"
|
||||
}
|
||||
case magic == "ustar \x00": // old GNU tar
|
||||
format = "gnu"
|
||||
}
|
||||
|
||||
switch format {
|
||||
case "posix", "gnu", "star":
|
||||
hdr.Uname = p.parseString(s.next(32))
|
||||
hdr.Gname = p.parseString(s.next(32))
|
||||
devmajor := s.next(8)
|
||||
devminor := s.next(8)
|
||||
// Unpack format specific fields.
|
||||
if format > formatV7 {
|
||||
ustar := tr.blk.USTAR()
|
||||
hdr.Uname = p.parseString(ustar.UserName())
|
||||
hdr.Gname = p.parseString(ustar.GroupName())
|
||||
if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
|
||||
hdr.Devmajor = p.parseNumeric(devmajor)
|
||||
hdr.Devminor = p.parseNumeric(devminor)
|
||||
hdr.Devmajor = p.parseNumeric(ustar.DevMajor())
|
||||
hdr.Devminor = p.parseNumeric(ustar.DevMinor())
|
||||
}
|
||||
|
||||
var prefix string
|
||||
switch format {
|
||||
case "posix", "gnu":
|
||||
prefix = p.parseString(s.next(155))
|
||||
case "star":
|
||||
prefix = p.parseString(s.next(131))
|
||||
hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0)
|
||||
hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0)
|
||||
case formatUSTAR, formatGNU:
|
||||
// TODO(dsnet): Do not use the prefix field for the GNU format!
|
||||
// See golang.org/issues/12594
|
||||
ustar := tr.blk.USTAR()
|
||||
prefix = p.parseString(ustar.Prefix())
|
||||
case formatSTAR:
|
||||
star := tr.blk.STAR()
|
||||
prefix = p.parseString(star.Prefix())
|
||||
hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0)
|
||||
hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0)
|
||||
}
|
||||
if len(prefix) > 0 {
|
||||
hdr.Name = prefix + "/" + hdr.Name
|
||||
}
|
||||
}
|
||||
|
||||
if p.err != nil {
|
||||
tr.err = p.err
|
||||
return nil
|
||||
}
|
||||
|
||||
nb := hdr.Size
|
||||
if isHeaderOnlyType(hdr.Typeflag) {
|
||||
nb = 0
|
||||
|
|
@ -663,14 +621,14 @@ func (tr *Reader) readHeader() *Header {
|
|||
// Check for old GNU sparse format entry.
|
||||
if hdr.Typeflag == TypeGNUSparse {
|
||||
// Get the real size of the file.
|
||||
hdr.Size = p.parseNumeric(header[483:495])
|
||||
hdr.Size = p.parseNumeric(tr.blk.GNU().RealSize())
|
||||
if p.err != nil {
|
||||
tr.err = p.err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the sparse map.
|
||||
sp := tr.readOldGNUSparseMap(header)
|
||||
sp := tr.readOldGNUSparseMap(&tr.blk)
|
||||
if tr.err != nil {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -682,26 +640,24 @@ func (tr *Reader) readHeader() *Header {
|
|||
}
|
||||
}
|
||||
|
||||
if p.err != nil {
|
||||
tr.err = p.err
|
||||
return nil
|
||||
}
|
||||
|
||||
return hdr
|
||||
}
|
||||
|
||||
// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
|
||||
// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
|
||||
// then one or more extension headers are used to store the rest of the sparse map.
|
||||
func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
|
||||
func (tr *Reader) readOldGNUSparseMap(blk *block) []sparseEntry {
|
||||
var p parser
|
||||
isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
|
||||
spCap := oldGNUSparseMainHeaderNumEntries
|
||||
if isExtended {
|
||||
spCap += oldGNUSparseExtendedHeaderNumEntries
|
||||
}
|
||||
sp := make([]sparseEntry, 0, spCap)
|
||||
s := slicer(header[oldGNUSparseMainHeaderOffset:])
|
||||
|
||||
// Read the four entries from the main tar header
|
||||
for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
|
||||
offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
|
||||
numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
|
||||
var s sparseArray = blk.GNU().Sparse()
|
||||
var sp = make([]sparseEntry, 0, s.MaxEntries())
|
||||
for i := 0; i < s.MaxEntries(); i++ {
|
||||
offset := p.parseOctal(s.Entry(i).Offset())
|
||||
numBytes := p.parseOctal(s.Entry(i).NumBytes())
|
||||
if p.err != nil {
|
||||
tr.err = p.err
|
||||
return nil
|
||||
|
|
@ -712,17 +668,17 @@ func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
|
|||
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
|
||||
}
|
||||
|
||||
for isExtended {
|
||||
for s.IsExtended()[0] > 0 {
|
||||
// There are more entries. Read an extension header and parse its entries.
|
||||
sparseHeader := make([]byte, blockSize)
|
||||
if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
|
||||
var blk block
|
||||
if _, tr.err = io.ReadFull(tr.r, blk[:]); tr.err != nil {
|
||||
return nil
|
||||
}
|
||||
isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
|
||||
s = slicer(sparseHeader)
|
||||
for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
|
||||
offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
|
||||
numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
|
||||
s = blk.Sparse()
|
||||
|
||||
for i := 0; i < s.MaxEntries(); i++ {
|
||||
offset := p.parseOctal(s.Entry(i).Offset())
|
||||
numBytes := p.parseOctal(s.Entry(i).NumBytes())
|
||||
if p.err != nil {
|
||||
tr.err = p.err
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -37,9 +37,9 @@ type Writer struct {
|
|||
pad int64 // amount of padding to write after current file entry
|
||||
closed bool
|
||||
usedBinary bool // whether the binary numeric field extension was used
|
||||
preferPax bool // use pax header instead of binary numeric header
|
||||
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
|
||||
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
|
||||
preferPax bool // use PAX header instead of binary numeric header
|
||||
hdrBuff block // buffer to use in writeHeader when writing a regular header
|
||||
paxHdrBuff block // buffer to use in writeHeader when writing a PAX header
|
||||
}
|
||||
|
||||
type formatter struct {
|
||||
|
|
@ -153,27 +153,24 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
|||
// a map to hold pax header records, if any are needed
|
||||
paxHeaders := make(map[string]string)
|
||||
|
||||
// TODO(shanemhansen): we might want to use PAX headers for
|
||||
// TODO(dsnet): we might want to use PAX headers for
|
||||
// subsecond time resolution, but for now let's just capture
|
||||
// too long fields or non ascii characters
|
||||
|
||||
var f formatter
|
||||
var header []byte
|
||||
|
||||
// We need to select which scratch buffer to use carefully,
|
||||
// since this method is called recursively to write PAX headers.
|
||||
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
|
||||
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
|
||||
// already being used by the non-recursive call, so we must use paxHdrBuff.
|
||||
header = tw.hdrBuff[:]
|
||||
header := &tw.hdrBuff
|
||||
if !allowPax {
|
||||
header = tw.paxHdrBuff[:]
|
||||
header = &tw.paxHdrBuff
|
||||
}
|
||||
copy(header, zeroBlock)
|
||||
s := slicer(header)
|
||||
copy(header[:], zeroBlock[:])
|
||||
|
||||
// Wrappers around formatter that automatically sets paxHeaders if the
|
||||
// argument extends beyond the capacity of the input byte slice.
|
||||
var f formatter
|
||||
var formatString = func(b []byte, s string, paxKeyword string) {
|
||||
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
|
||||
if needsPaxHeader {
|
||||
|
|
@ -202,44 +199,33 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
|||
f.formatNumeric(b, x)
|
||||
}
|
||||
|
||||
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
pathHeaderBytes := s.next(fileNameSize)
|
||||
|
||||
formatString(pathHeaderBytes, hdr.Name, paxPath)
|
||||
|
||||
// Handle out of range ModTime carefully.
|
||||
var modTime int64
|
||||
if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
|
||||
modTime = hdr.ModTime.Unix()
|
||||
}
|
||||
|
||||
f.formatOctal(s.next(8), hdr.Mode) // 100:108
|
||||
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
|
||||
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
|
||||
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
|
||||
formatNumeric(s.next(12), modTime, paxNone) // 136:148 --- consider using pax for finer granularity
|
||||
s.next(8) // chksum (148:156)
|
||||
s.next(1)[0] = hdr.Typeflag // 156:157
|
||||
v7 := header.V7()
|
||||
formatString(v7.Name(), hdr.Name, paxPath)
|
||||
// TODO(dsnet): The GNU format permits the mode field to be encoded in
|
||||
// base-256 format. Thus, we can use formatNumeric instead of formatOctal.
|
||||
f.formatOctal(v7.Mode(), hdr.Mode)
|
||||
formatNumeric(v7.UID(), int64(hdr.Uid), paxUid)
|
||||
formatNumeric(v7.GID(), int64(hdr.Gid), paxGid)
|
||||
formatNumeric(v7.Size(), hdr.Size, paxSize)
|
||||
// TODO(dsnet): Consider using PAX for finer time granularity.
|
||||
formatNumeric(v7.ModTime(), modTime, paxNone)
|
||||
v7.TypeFlag()[0] = hdr.Typeflag
|
||||
formatString(v7.LinkName(), hdr.Linkname, paxLinkpath)
|
||||
|
||||
formatString(s.next(100), hdr.Linkname, paxLinkpath)
|
||||
ustar := header.USTAR()
|
||||
formatString(ustar.UserName(), hdr.Uname, paxUname)
|
||||
formatString(ustar.GroupName(), hdr.Gname, paxGname)
|
||||
formatNumeric(ustar.DevMajor(), hdr.Devmajor, paxNone)
|
||||
formatNumeric(ustar.DevMinor(), hdr.Devminor, paxNone)
|
||||
|
||||
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
||||
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
|
||||
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
|
||||
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
|
||||
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
|
||||
|
||||
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
prefixHeaderBytes := s.next(155)
|
||||
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
|
||||
|
||||
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
|
||||
if tw.usedBinary {
|
||||
copy(header[257:265], []byte("ustar \x00"))
|
||||
}
|
||||
|
||||
_, paxPathUsed := paxHeaders[paxPath]
|
||||
// try to use a ustar header when only the name is too long
|
||||
_, paxPathUsed := paxHeaders[paxPath]
|
||||
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
||||
prefix, suffix, ok := splitUSTARPath(hdr.Name)
|
||||
if ok {
|
||||
|
|
@ -247,16 +233,16 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
|||
delete(paxHeaders, paxPath)
|
||||
|
||||
// Update the path fields
|
||||
formatString(pathHeaderBytes, suffix, paxNone)
|
||||
formatString(prefixHeaderBytes, prefix, paxNone)
|
||||
formatString(v7.Name(), suffix, paxNone)
|
||||
formatString(ustar.Prefix(), prefix, paxNone)
|
||||
}
|
||||
}
|
||||
|
||||
// The chksum field is terminated by a NUL and a space.
|
||||
// This is different from the other octal fields.
|
||||
chksum, _ := checksum(header)
|
||||
f.formatOctal(header[148:155], chksum) // Never fails
|
||||
header[155] = ' '
|
||||
if tw.usedBinary {
|
||||
header.SetFormat(formatGNU)
|
||||
} else {
|
||||
header.SetFormat(formatUSTAR)
|
||||
}
|
||||
|
||||
// Check if there were any formatting errors.
|
||||
if f.err != nil {
|
||||
|
|
@ -278,10 +264,10 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
tw.nb = int64(hdr.Size)
|
||||
tw.nb = hdr.Size
|
||||
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
|
||||
|
||||
_, tw.err = tw.w.Write(header)
|
||||
_, tw.err = tw.w.Write(header[:])
|
||||
return tw.err
|
||||
}
|
||||
|
||||
|
|
@ -289,10 +275,10 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
|||
// If the path is not splittable, then it will return ("", "", false).
|
||||
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
||||
length := len(name)
|
||||
if length <= fileNameSize || !isASCII(name) {
|
||||
if length <= nameSize || !isASCII(name) {
|
||||
return "", "", false
|
||||
} else if length > fileNamePrefixSize+1 {
|
||||
length = fileNamePrefixSize + 1
|
||||
} else if length > prefixSize+1 {
|
||||
length = prefixSize + 1
|
||||
} else if name[length-1] == '/' {
|
||||
length--
|
||||
}
|
||||
|
|
@ -300,7 +286,7 @@ func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
|||
i := strings.LastIndex(name[:length], "/")
|
||||
nlen := len(name) - i - 1 // nlen is length of suffix
|
||||
plen := i // plen is length of prefix
|
||||
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
||||
if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize {
|
||||
return "", "", false
|
||||
}
|
||||
return name[:i], name[i+1:], true
|
||||
|
|
@ -323,8 +309,8 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) erro
|
|||
fullName := path.Join(dir, "PaxHeaders.0", file)
|
||||
|
||||
ascii := toASCII(fullName)
|
||||
if len(ascii) > 100 {
|
||||
ascii = ascii[:100]
|
||||
if len(ascii) > nameSize {
|
||||
ascii = ascii[:nameSize]
|
||||
}
|
||||
ext.Name = ascii
|
||||
// Construct the body
|
||||
|
|
@ -407,7 +393,7 @@ func (tw *Writer) Close() error {
|
|||
|
||||
// trailer: two zero blocks
|
||||
for i := 0; i < 2; i++ {
|
||||
_, tw.err = tw.w.Write(zeroBlock)
|
||||
_, tw.err = tw.w.Write(zeroBlock[:])
|
||||
if tw.err != nil {
|
||||
break
|
||||
}
|
||||
|
|
|
|||
|
|
@ -587,17 +587,17 @@ func TestSplitUSTARPath(t *testing.T) {
|
|||
{"", "", "", false},
|
||||
{"abc", "", "", false},
|
||||
{"用戶名", "", "", false},
|
||||
{sr("a", fileNameSize), "", "", false},
|
||||
{sr("a", fileNameSize) + "/", "", "", false},
|
||||
{sr("a", fileNameSize) + "/a", sr("a", fileNameSize), "a", true},
|
||||
{sr("a", fileNamePrefixSize) + "/", "", "", false},
|
||||
{sr("a", fileNamePrefixSize) + "/a", sr("a", fileNamePrefixSize), "a", true},
|
||||
{sr("a", fileNameSize+1), "", "", false},
|
||||
{sr("/", fileNameSize+1), sr("/", fileNameSize-1), "/", true},
|
||||
{sr("a", fileNamePrefixSize) + "/" + sr("b", fileNameSize),
|
||||
sr("a", fileNamePrefixSize), sr("b", fileNameSize), true},
|
||||
{sr("a", fileNamePrefixSize) + "//" + sr("b", fileNameSize), "", "", false},
|
||||
{sr("a/", fileNameSize), sr("a/", 77) + "a", sr("a/", 22), true},
|
||||
{sr("a", nameSize), "", "", false},
|
||||
{sr("a", nameSize) + "/", "", "", false},
|
||||
{sr("a", nameSize) + "/a", sr("a", nameSize), "a", true},
|
||||
{sr("a", prefixSize) + "/", "", "", false},
|
||||
{sr("a", prefixSize) + "/a", sr("a", prefixSize), "a", true},
|
||||
{sr("a", nameSize+1), "", "", false},
|
||||
{sr("/", nameSize+1), sr("/", nameSize-1), "/", true},
|
||||
{sr("a", prefixSize) + "/" + sr("b", nameSize),
|
||||
sr("a", prefixSize), sr("b", nameSize), true},
|
||||
{sr("a", prefixSize) + "//" + sr("b", nameSize), "", "", false},
|
||||
{sr("a/", nameSize), sr("a/", 77) + "a", sr("a/", 22), true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ func (z *Reader) init(r io.ReaderAt, size int64) error {
|
|||
z.File = make([]*File, 0, end.directoryRecords)
|
||||
z.Comment = end.comment
|
||||
rs := io.NewSectionReader(r, 0, size)
|
||||
if _, err = rs.Seek(int64(end.directoryOffset), os.SEEK_SET); err != nil {
|
||||
if _, err = rs.Seek(int64(end.directoryOffset), io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
buf := bufio.NewReader(rs)
|
||||
|
|
@ -153,19 +153,18 @@ func (f *File) DataOffset() (offset int64, err error) {
|
|||
|
||||
// Open returns a ReadCloser that provides access to the File's contents.
|
||||
// Multiple files may be read concurrently.
|
||||
func (f *File) Open() (rc io.ReadCloser, err error) {
|
||||
func (f *File) Open() (io.ReadCloser, error) {
|
||||
bodyOffset, err := f.findBodyOffset()
|
||||
if err != nil {
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
size := int64(f.CompressedSize64)
|
||||
r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
|
||||
dcomp := f.zip.decompressor(f.Method)
|
||||
if dcomp == nil {
|
||||
err = ErrAlgorithm
|
||||
return
|
||||
return nil, ErrAlgorithm
|
||||
}
|
||||
rc = dcomp(r)
|
||||
var rc io.ReadCloser = dcomp(r)
|
||||
var desr io.Reader
|
||||
if f.hasDataDescriptor() {
|
||||
desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen)
|
||||
|
|
@ -176,7 +175,7 @@ func (f *File) Open() (rc io.ReadCloser, err error) {
|
|||
f: f,
|
||||
desr: desr,
|
||||
}
|
||||
return
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
type checksumReader struct {
|
||||
|
|
|
|||
|
|
@ -399,7 +399,7 @@ func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) {
|
|||
// Don't bother uncompressing: too big.
|
||||
if ft.Content == nil && ft.File == "" && ft.Size > 0 {
|
||||
if size != ft.Size {
|
||||
t.Errorf("%v: uncompressed size %#x, want %#x", size, ft.Size)
|
||||
t.Errorf("%v: uncompressed size %#x, want %#x", ft.Name, size, ft.Size)
|
||||
}
|
||||
r.Close()
|
||||
return
|
||||
|
|
|
|||
|
|
@ -64,6 +64,44 @@ func (w *pooledFlateWriter) Close() error {
|
|||
return err
|
||||
}
|
||||
|
||||
var flateReaderPool sync.Pool
|
||||
|
||||
func newFlateReader(r io.Reader) io.ReadCloser {
|
||||
fr, ok := flateReaderPool.Get().(io.ReadCloser)
|
||||
if ok {
|
||||
fr.(flate.Resetter).Reset(r, nil)
|
||||
} else {
|
||||
fr = flate.NewReader(r)
|
||||
}
|
||||
return &pooledFlateReader{fr: fr}
|
||||
}
|
||||
|
||||
type pooledFlateReader struct {
|
||||
mu sync.Mutex // guards Close and Read
|
||||
fr io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *pooledFlateReader) Read(p []byte) (n int, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.fr == nil {
|
||||
return 0, errors.New("Read after Close")
|
||||
}
|
||||
return r.fr.Read(p)
|
||||
}
|
||||
|
||||
func (r *pooledFlateReader) Close() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
var err error
|
||||
if r.fr != nil {
|
||||
err = r.fr.Close()
|
||||
flateReaderPool.Put(r.fr)
|
||||
r.fr = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
mu sync.RWMutex // guards compressor and decompressor maps
|
||||
|
||||
|
|
@ -74,7 +112,7 @@ var (
|
|||
|
||||
decompressors = map[uint16]Decompressor{
|
||||
Store: ioutil.NopCloser,
|
||||
Deflate: flate.NewReader,
|
||||
Deflate: newFlateReader,
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
/*
|
||||
Package zip provides support for reading and writing ZIP archives.
|
||||
|
||||
See: http://www.pkware.com/documents/casestudies/APPNOTE.TXT
|
||||
See: https://www.pkware.com/documents/casestudies/APPNOTE.TXT
|
||||
|
||||
This package does not support disk spanning.
|
||||
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ func (w *Writer) Flush() error {
|
|||
}
|
||||
|
||||
// Close finishes writing the zip file by writing the central directory.
|
||||
// It does not (and can not) close the underlying writer.
|
||||
// It does not (and cannot) close the underlying writer.
|
||||
func (w *Writer) Close() error {
|
||||
if w.last != nil && !w.last.closed {
|
||||
if err := w.last.close(); err != nil {
|
||||
|
|
|
|||
|
|
@ -184,7 +184,7 @@ func BenchmarkCompressedZipGarbage(b *testing.B) {
|
|||
b.ReportAllocs()
|
||||
var buf bytes.Buffer
|
||||
bigBuf := bytes.Repeat([]byte("a"), 1<<20)
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := 0; i <= b.N; i++ {
|
||||
buf.Reset()
|
||||
zw := NewWriter(&buf)
|
||||
for j := 0; j < 3; j++ {
|
||||
|
|
@ -195,5 +195,11 @@ func BenchmarkCompressedZipGarbage(b *testing.B) {
|
|||
w.Write(bigBuf)
|
||||
}
|
||||
zw.Close()
|
||||
if i == 0 {
|
||||
// Reset the timer after the first time through.
|
||||
// This effectively discards the very large initial flate setup cost,
|
||||
// as well as the initialization of bigBuf.
|
||||
b.ResetTimer()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -366,27 +366,6 @@ func testZip64DirectoryRecordLength(buf *rleBuffer, t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testInvalidHeader(h *FileHeader, t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
z := NewWriter(&buf)
|
||||
|
||||
f, err := z.CreateHeader(h)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating header: %v", err)
|
||||
}
|
||||
if _, err := f.Write([]byte("hi")); err != nil {
|
||||
t.Fatalf("error writing content: %v", err)
|
||||
}
|
||||
if err := z.Close(); err != nil {
|
||||
t.Fatalf("error closing zip writer: %v", err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
if _, err = NewReader(bytes.NewReader(b), int64(len(b))); err != ErrFormat {
|
||||
t.Fatalf("got %v, expected ErrFormat", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testValidHeader(h *FileHeader, t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
z := NewWriter(&buf)
|
||||
|
|
|
|||
|
|
@ -124,14 +124,16 @@ func (b *Reader) Peek(n int) ([]byte, error) {
|
|||
if n < 0 {
|
||||
return nil, ErrNegativeCount
|
||||
}
|
||||
if n > len(b.buf) {
|
||||
return nil, ErrBufferFull
|
||||
}
|
||||
// 0 <= n <= len(b.buf)
|
||||
for b.w-b.r < n && b.err == nil {
|
||||
|
||||
for b.w-b.r < n && b.w-b.r < len(b.buf) && b.err == nil {
|
||||
b.fill() // b.w-b.r < len(b.buf) => buffer is not full
|
||||
}
|
||||
|
||||
if n > len(b.buf) {
|
||||
return b.buf[b.r:b.w], ErrBufferFull
|
||||
}
|
||||
|
||||
// 0 <= n <= len(b.buf)
|
||||
var err error
|
||||
if avail := b.w - b.r; avail < n {
|
||||
// not enough data in buffer
|
||||
|
|
@ -220,7 +222,7 @@ func (b *Reader) Read(p []byte) (n int, err error) {
|
|||
|
||||
// ReadByte reads and returns a single byte.
|
||||
// If no byte is available, returns an error.
|
||||
func (b *Reader) ReadByte() (c byte, err error) {
|
||||
func (b *Reader) ReadByte() (byte, error) {
|
||||
b.lastRuneSize = -1
|
||||
for b.r == b.w {
|
||||
if b.err != nil {
|
||||
|
|
@ -228,7 +230,7 @@ func (b *Reader) ReadByte() (c byte, err error) {
|
|||
}
|
||||
b.fill() // buffer is empty
|
||||
}
|
||||
c = b.buf[b.r]
|
||||
c := b.buf[b.r]
|
||||
b.r++
|
||||
b.lastByte = int(c)
|
||||
return c, nil
|
||||
|
|
@ -264,7 +266,7 @@ func (b *Reader) ReadRune() (r rune, size int, err error) {
|
|||
return 0, 0, b.readErr()
|
||||
}
|
||||
r, size = rune(b.buf[b.r]), 1
|
||||
if r >= 0x80 {
|
||||
if r >= utf8.RuneSelf {
|
||||
r, size = utf8.DecodeRune(b.buf[b.r:b.w])
|
||||
}
|
||||
b.r += size
|
||||
|
|
@ -395,12 +397,12 @@ func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
|
|||
// ReadBytes returns err != nil if and only if the returned data does not end in
|
||||
// delim.
|
||||
// For simple uses, a Scanner may be more convenient.
|
||||
func (b *Reader) ReadBytes(delim byte) (line []byte, err error) {
|
||||
func (b *Reader) ReadBytes(delim byte) ([]byte, error) {
|
||||
// Use ReadSlice to look for array,
|
||||
// accumulating full buffers.
|
||||
var frag []byte
|
||||
var full [][]byte
|
||||
|
||||
var err error
|
||||
for {
|
||||
var e error
|
||||
frag, e = b.ReadSlice(delim)
|
||||
|
|
@ -442,10 +444,9 @@ func (b *Reader) ReadBytes(delim byte) (line []byte, err error) {
|
|||
// ReadString returns err != nil if and only if the returned data does not end in
|
||||
// delim.
|
||||
// For simple uses, a Scanner may be more convenient.
|
||||
func (b *Reader) ReadString(delim byte) (line string, err error) {
|
||||
func (b *Reader) ReadString(delim byte) (string, error) {
|
||||
bytes, err := b.ReadBytes(delim)
|
||||
line = string(bytes)
|
||||
return line, err
|
||||
return string(bytes), err
|
||||
}
|
||||
|
||||
// WriteTo implements io.WriterTo.
|
||||
|
|
@ -705,7 +706,7 @@ func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
|
|||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
// If we filled the buffer exactly, flush pre-emptively.
|
||||
// If we filled the buffer exactly, flush preemptively.
|
||||
if b.Available() == 0 {
|
||||
err = b.flush()
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -673,8 +673,8 @@ func TestPeek(t *testing.T) {
|
|||
if _, err := buf.Peek(-1); err != ErrNegativeCount {
|
||||
t.Fatalf("want ErrNegativeCount got %v", err)
|
||||
}
|
||||
if _, err := buf.Peek(32); err != ErrBufferFull {
|
||||
t.Fatalf("want ErrBufFull got %v", err)
|
||||
if s, err := buf.Peek(32); string(s) != "abcdefghijklmnop" || err != ErrBufferFull {
|
||||
t.Fatalf("want %q, ErrBufFull got %q, err=%v", "abcdefghijklmnop", string(s), err)
|
||||
}
|
||||
if _, err := buf.Read(p[0:3]); string(p[0:3]) != "abc" || err != nil {
|
||||
t.Fatalf("want %q got %q, err=%v", "abc", string(p[0:3]), err)
|
||||
|
|
@ -1475,7 +1475,7 @@ func BenchmarkReaderWriteToOptimal(b *testing.B) {
|
|||
b.Fatal("ioutil.Discard doesn't support ReaderFrom")
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
r.Seek(0, 0)
|
||||
r.Seek(0, io.SeekStart)
|
||||
srcReader.Reset(onlyReader{r})
|
||||
n, err := srcReader.WriteTo(ioutil.Discard)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -264,10 +264,6 @@ func testNoNewline(text string, lines []string, t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
var noNewlineLines = []string{
|
||||
"abcdefghijklmn\nopqrstuvwxyz",
|
||||
}
|
||||
|
||||
// Test that the line splitter handles a final line without a newline.
|
||||
func TestScanLineNoNewline(t *testing.T) {
|
||||
const text = "abcdefghijklmn\nopqrstuvwxyz"
|
||||
|
|
@ -351,7 +347,7 @@ func TestSplitError(t *testing.T) {
|
|||
// Test that an EOF is overridden by a user-generated scan error.
|
||||
func TestErrAtEOF(t *testing.T) {
|
||||
s := NewScanner(strings.NewReader("1 2 33"))
|
||||
// This spitter will fail on last entry, after s.err==EOF.
|
||||
// This splitter will fail on last entry, after s.err==EOF.
|
||||
split := func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
advance, token, err = ScanWords(data, atEOF)
|
||||
if len(token) > 1 {
|
||||
|
|
|
|||
|
|
@ -17,8 +17,8 @@ import (
|
|||
type Buffer struct {
|
||||
buf []byte // contents are the bytes buf[off : len(buf)]
|
||||
off int // read at &buf[off], write at &buf[len(buf)]
|
||||
runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune
|
||||
bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation.
|
||||
runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each call to WriteRune
|
||||
bootstrap [64]byte // memory to hold first slice; helps small buffers avoid allocation.
|
||||
lastRead readOp // last read operation, so that Unread* can work correctly.
|
||||
}
|
||||
|
||||
|
|
@ -293,14 +293,14 @@ func (b *Buffer) Next(n int) []byte {
|
|||
|
||||
// ReadByte reads and returns the next byte from the buffer.
|
||||
// If no byte is available, it returns error io.EOF.
|
||||
func (b *Buffer) ReadByte() (c byte, err error) {
|
||||
func (b *Buffer) ReadByte() (byte, error) {
|
||||
b.lastRead = opInvalid
|
||||
if b.off >= len(b.buf) {
|
||||
// Buffer is empty, reset to recover space.
|
||||
b.Truncate(0)
|
||||
return 0, io.EOF
|
||||
}
|
||||
c = b.buf[b.off]
|
||||
c := b.buf[b.off]
|
||||
b.off++
|
||||
b.lastRead = opRead
|
||||
return c, nil
|
||||
|
|
|
|||
|
|
@ -83,6 +83,16 @@ func Contains(b, subslice []byte) bool {
|
|||
return Index(b, subslice) != -1
|
||||
}
|
||||
|
||||
// ContainsAny reports whether any of the UTF-8-encoded Unicode code points in chars are within b.
|
||||
func ContainsAny(b []byte, chars string) bool {
|
||||
return IndexAny(b, chars) >= 0
|
||||
}
|
||||
|
||||
// ContainsRune reports whether the Unicode code point r is within b.
|
||||
func ContainsRune(b []byte, r rune) bool {
|
||||
return IndexRune(b, r) >= 0
|
||||
}
|
||||
|
||||
// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
|
||||
func Index(s, sep []byte) int {
|
||||
n := len(sep)
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ package bytes_test
|
|||
|
||||
import (
|
||||
. "bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
|
@ -47,32 +48,6 @@ type BinOpTest struct {
|
|||
i int
|
||||
}
|
||||
|
||||
var equalTests = []struct {
|
||||
a, b []byte
|
||||
i int
|
||||
}{
|
||||
{[]byte(""), []byte(""), 0},
|
||||
{[]byte("a"), []byte(""), 1},
|
||||
{[]byte(""), []byte("a"), -1},
|
||||
{[]byte("abc"), []byte("abc"), 0},
|
||||
{[]byte("ab"), []byte("abc"), -1},
|
||||
{[]byte("abc"), []byte("ab"), 1},
|
||||
{[]byte("x"), []byte("ab"), 1},
|
||||
{[]byte("ab"), []byte("x"), -1},
|
||||
{[]byte("x"), []byte("a"), 1},
|
||||
{[]byte("b"), []byte("x"), -1},
|
||||
// test runtime·memeq's chunked implementation
|
||||
{[]byte("abcdefgh"), []byte("abcdefgh"), 0},
|
||||
{[]byte("abcdefghi"), []byte("abcdefghi"), 0},
|
||||
{[]byte("abcdefghi"), []byte("abcdefghj"), -1},
|
||||
// nil tests
|
||||
{nil, nil, 0},
|
||||
{[]byte(""), nil, 0},
|
||||
{nil, []byte(""), 0},
|
||||
{[]byte("a"), nil, 1},
|
||||
{nil, []byte("a"), -1},
|
||||
}
|
||||
|
||||
func TestEqual(t *testing.T) {
|
||||
for _, tt := range compareTests {
|
||||
eql := Equal(tt.a, tt.b)
|
||||
|
|
@ -335,6 +310,41 @@ func TestIndexByteBig(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// test a small index across all page offsets
|
||||
func TestIndexByteSmall(t *testing.T) {
|
||||
b := make([]byte, 5015) // bigger than a page
|
||||
// Make sure we find the correct byte even when straddling a page.
|
||||
for i := 0; i <= len(b)-15; i++ {
|
||||
for j := 0; j < 15; j++ {
|
||||
b[i+j] = byte(100 + j)
|
||||
}
|
||||
for j := 0; j < 15; j++ {
|
||||
p := IndexByte(b[i:i+15], byte(100+j))
|
||||
if p != j {
|
||||
t.Errorf("IndexByte(%q, %d) = %d", b[i:i+15], 100+j, p)
|
||||
}
|
||||
}
|
||||
for j := 0; j < 15; j++ {
|
||||
b[i+j] = 0
|
||||
}
|
||||
}
|
||||
// Make sure matches outside the slice never trigger.
|
||||
for i := 0; i <= len(b)-15; i++ {
|
||||
for j := 0; j < 15; j++ {
|
||||
b[i+j] = 1
|
||||
}
|
||||
for j := 0; j < 15; j++ {
|
||||
p := IndexByte(b[i:i+15], byte(0))
|
||||
if p != -1 {
|
||||
t.Errorf("IndexByte(%q, %d) = %d", b[i:i+15], 0, p)
|
||||
}
|
||||
}
|
||||
for j := 0; j < 15; j++ {
|
||||
b[i+j] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexRune(t *testing.T) {
|
||||
for _, tt := range indexRuneTests {
|
||||
a := []byte(tt.a)
|
||||
|
|
@ -348,20 +358,40 @@ func TestIndexRune(t *testing.T) {
|
|||
|
||||
var bmbuf []byte
|
||||
|
||||
func BenchmarkIndexByte32(b *testing.B) { bmIndexByte(b, IndexByte, 32) }
|
||||
func BenchmarkIndexByte4K(b *testing.B) { bmIndexByte(b, IndexByte, 4<<10) }
|
||||
func BenchmarkIndexByte4M(b *testing.B) { bmIndexByte(b, IndexByte, 4<<20) }
|
||||
func BenchmarkIndexByte64M(b *testing.B) { bmIndexByte(b, IndexByte, 64<<20) }
|
||||
func BenchmarkIndexBytePortable32(b *testing.B) { bmIndexByte(b, IndexBytePortable, 32) }
|
||||
func BenchmarkIndexBytePortable4K(b *testing.B) { bmIndexByte(b, IndexBytePortable, 4<<10) }
|
||||
func BenchmarkIndexBytePortable4M(b *testing.B) { bmIndexByte(b, IndexBytePortable, 4<<20) }
|
||||
func BenchmarkIndexBytePortable64M(b *testing.B) { bmIndexByte(b, IndexBytePortable, 64<<20) }
|
||||
func valName(x int) string {
|
||||
if s := x >> 20; s<<20 == x {
|
||||
return fmt.Sprintf("%dM", s)
|
||||
}
|
||||
if s := x >> 10; s<<10 == x {
|
||||
return fmt.Sprintf("%dK", s)
|
||||
}
|
||||
return fmt.Sprint(x)
|
||||
}
|
||||
|
||||
func bmIndexByte(b *testing.B, index func([]byte, byte) int, n int) {
|
||||
func benchBytes(b *testing.B, sizes []int, f func(b *testing.B, n int)) {
|
||||
for _, n := range sizes {
|
||||
b.Run(valName(n), func(b *testing.B) {
|
||||
if len(bmbuf) < n {
|
||||
bmbuf = make([]byte, n)
|
||||
}
|
||||
b.SetBytes(int64(n))
|
||||
f(b, n)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var indexSizes = []int{10, 32, 4 << 10, 4 << 20, 64 << 20}
|
||||
|
||||
func BenchmarkIndexByte(b *testing.B) {
|
||||
benchBytes(b, indexSizes, bmIndexByte(IndexByte))
|
||||
}
|
||||
|
||||
func BenchmarkIndexBytePortable(b *testing.B) {
|
||||
benchBytes(b, indexSizes, bmIndexByte(IndexBytePortable))
|
||||
}
|
||||
|
||||
func bmIndexByte(index func([]byte, byte) int) func(b *testing.B, n int) {
|
||||
return func(b *testing.B, n int) {
|
||||
buf := bmbuf[0:n]
|
||||
buf[n-1] = 'x'
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
|
@ -371,9 +401,11 @@ func bmIndexByte(b *testing.B, index func([]byte, byte) int, n int) {
|
|||
}
|
||||
}
|
||||
buf[n-1] = '\x00'
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEqual0(b *testing.B) {
|
||||
func BenchmarkEqual(b *testing.B) {
|
||||
b.Run("0", func(b *testing.B) {
|
||||
var buf [4]byte
|
||||
buf1 := buf[0:0]
|
||||
buf2 := buf[1:1]
|
||||
|
|
@ -383,30 +415,22 @@ func BenchmarkEqual0(b *testing.B) {
|
|||
b.Fatal("bad equal")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
sizes := []int{1, 6, 9, 15, 16, 20, 32, 4 << 10, 4 << 20, 64 << 20}
|
||||
benchBytes(b, sizes, bmEqual(Equal))
|
||||
}
|
||||
|
||||
func BenchmarkEqual1(b *testing.B) { bmEqual(b, Equal, 1) }
|
||||
func BenchmarkEqual6(b *testing.B) { bmEqual(b, Equal, 6) }
|
||||
func BenchmarkEqual9(b *testing.B) { bmEqual(b, Equal, 9) }
|
||||
func BenchmarkEqual15(b *testing.B) { bmEqual(b, Equal, 15) }
|
||||
func BenchmarkEqual16(b *testing.B) { bmEqual(b, Equal, 16) }
|
||||
func BenchmarkEqual20(b *testing.B) { bmEqual(b, Equal, 20) }
|
||||
func BenchmarkEqual32(b *testing.B) { bmEqual(b, Equal, 32) }
|
||||
func BenchmarkEqual4K(b *testing.B) { bmEqual(b, Equal, 4<<10) }
|
||||
func BenchmarkEqual4M(b *testing.B) { bmEqual(b, Equal, 4<<20) }
|
||||
func BenchmarkEqual64M(b *testing.B) { bmEqual(b, Equal, 64<<20) }
|
||||
func BenchmarkEqualPort1(b *testing.B) { bmEqual(b, EqualPortable, 1) }
|
||||
func BenchmarkEqualPort6(b *testing.B) { bmEqual(b, EqualPortable, 6) }
|
||||
func BenchmarkEqualPort32(b *testing.B) { bmEqual(b, EqualPortable, 32) }
|
||||
func BenchmarkEqualPort4K(b *testing.B) { bmEqual(b, EqualPortable, 4<<10) }
|
||||
func BenchmarkEqualPortable4M(b *testing.B) { bmEqual(b, EqualPortable, 4<<20) }
|
||||
func BenchmarkEqualPortable64M(b *testing.B) { bmEqual(b, EqualPortable, 64<<20) }
|
||||
func BenchmarkEqualPort(b *testing.B) {
|
||||
sizes := []int{1, 6, 32, 4 << 10, 4 << 20, 64 << 20}
|
||||
benchBytes(b, sizes, bmEqual(EqualPortable))
|
||||
}
|
||||
|
||||
func bmEqual(b *testing.B, equal func([]byte, []byte) bool, n int) {
|
||||
func bmEqual(equal func([]byte, []byte) bool) func(b *testing.B, n int) {
|
||||
return func(b *testing.B, n int) {
|
||||
if len(bmbuf) < 2*n {
|
||||
bmbuf = make([]byte, 2*n)
|
||||
}
|
||||
b.SetBytes(int64(n))
|
||||
buf1 := bmbuf[0:n]
|
||||
buf2 := bmbuf[n : 2*n]
|
||||
buf1[n-1] = 'x'
|
||||
|
|
@ -419,94 +443,67 @@ func bmEqual(b *testing.B, equal func([]byte, []byte) bool, n int) {
|
|||
}
|
||||
buf1[n-1] = '\x00'
|
||||
buf2[n-1] = '\x00'
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIndex32(b *testing.B) { bmIndex(b, Index, 32) }
|
||||
func BenchmarkIndex4K(b *testing.B) { bmIndex(b, Index, 4<<10) }
|
||||
func BenchmarkIndex4M(b *testing.B) { bmIndex(b, Index, 4<<20) }
|
||||
func BenchmarkIndex64M(b *testing.B) { bmIndex(b, Index, 64<<20) }
|
||||
|
||||
func bmIndex(b *testing.B, index func([]byte, []byte) int, n int) {
|
||||
if len(bmbuf) < n {
|
||||
bmbuf = make([]byte, n)
|
||||
}
|
||||
b.SetBytes(int64(n))
|
||||
func BenchmarkIndex(b *testing.B) {
|
||||
benchBytes(b, indexSizes, func(b *testing.B, n int) {
|
||||
buf := bmbuf[0:n]
|
||||
buf[n-1] = 'x'
|
||||
for i := 0; i < b.N; i++ {
|
||||
j := index(buf, buf[n-7:])
|
||||
j := Index(buf, buf[n-7:])
|
||||
if j != n-7 {
|
||||
b.Fatal("bad index", j)
|
||||
}
|
||||
}
|
||||
buf[n-1] = '\x00'
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkIndexEasy32(b *testing.B) { bmIndexEasy(b, Index, 32) }
|
||||
func BenchmarkIndexEasy4K(b *testing.B) { bmIndexEasy(b, Index, 4<<10) }
|
||||
func BenchmarkIndexEasy4M(b *testing.B) { bmIndexEasy(b, Index, 4<<20) }
|
||||
func BenchmarkIndexEasy64M(b *testing.B) { bmIndexEasy(b, Index, 64<<20) }
|
||||
|
||||
func bmIndexEasy(b *testing.B, index func([]byte, []byte) int, n int) {
|
||||
if len(bmbuf) < n {
|
||||
bmbuf = make([]byte, n)
|
||||
}
|
||||
b.SetBytes(int64(n))
|
||||
func BenchmarkIndexEasy(b *testing.B) {
|
||||
benchBytes(b, indexSizes, func(b *testing.B, n int) {
|
||||
buf := bmbuf[0:n]
|
||||
buf[n-1] = 'x'
|
||||
buf[n-7] = 'x'
|
||||
for i := 0; i < b.N; i++ {
|
||||
j := index(buf, buf[n-7:])
|
||||
j := Index(buf, buf[n-7:])
|
||||
if j != n-7 {
|
||||
b.Fatal("bad index", j)
|
||||
}
|
||||
}
|
||||
buf[n-1] = '\x00'
|
||||
buf[n-7] = '\x00'
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCount32(b *testing.B) { bmCount(b, Count, 32) }
|
||||
func BenchmarkCount4K(b *testing.B) { bmCount(b, Count, 4<<10) }
|
||||
func BenchmarkCount4M(b *testing.B) { bmCount(b, Count, 4<<20) }
|
||||
func BenchmarkCount64M(b *testing.B) { bmCount(b, Count, 64<<20) }
|
||||
|
||||
func bmCount(b *testing.B, count func([]byte, []byte) int, n int) {
|
||||
if len(bmbuf) < n {
|
||||
bmbuf = make([]byte, n)
|
||||
}
|
||||
b.SetBytes(int64(n))
|
||||
func BenchmarkCount(b *testing.B) {
|
||||
benchBytes(b, indexSizes, func(b *testing.B, n int) {
|
||||
buf := bmbuf[0:n]
|
||||
buf[n-1] = 'x'
|
||||
for i := 0; i < b.N; i++ {
|
||||
j := count(buf, buf[n-7:])
|
||||
j := Count(buf, buf[n-7:])
|
||||
if j != 1 {
|
||||
b.Fatal("bad count", j)
|
||||
}
|
||||
}
|
||||
buf[n-1] = '\x00'
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCountEasy32(b *testing.B) { bmCountEasy(b, Count, 32) }
|
||||
func BenchmarkCountEasy4K(b *testing.B) { bmCountEasy(b, Count, 4<<10) }
|
||||
func BenchmarkCountEasy4M(b *testing.B) { bmCountEasy(b, Count, 4<<20) }
|
||||
func BenchmarkCountEasy64M(b *testing.B) { bmCountEasy(b, Count, 64<<20) }
|
||||
|
||||
func bmCountEasy(b *testing.B, count func([]byte, []byte) int, n int) {
|
||||
if len(bmbuf) < n {
|
||||
bmbuf = make([]byte, n)
|
||||
}
|
||||
b.SetBytes(int64(n))
|
||||
func BenchmarkCountEasy(b *testing.B) {
|
||||
benchBytes(b, indexSizes, func(b *testing.B, n int) {
|
||||
buf := bmbuf[0:n]
|
||||
buf[n-1] = 'x'
|
||||
buf[n-7] = 'x'
|
||||
for i := 0; i < b.N; i++ {
|
||||
j := count(buf, buf[n-7:])
|
||||
j := Count(buf, buf[n-7:])
|
||||
if j != 1 {
|
||||
b.Fatal("bad count", j)
|
||||
}
|
||||
}
|
||||
buf[n-1] = '\x00'
|
||||
buf[n-7] = '\x00'
|
||||
})
|
||||
}
|
||||
|
||||
type ExplodeTest struct {
|
||||
|
|
@ -1207,6 +1204,57 @@ func TestContains(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
var ContainsAnyTests = []struct {
|
||||
b []byte
|
||||
substr string
|
||||
expected bool
|
||||
}{
|
||||
{[]byte(""), "", false},
|
||||
{[]byte(""), "a", false},
|
||||
{[]byte(""), "abc", false},
|
||||
{[]byte("a"), "", false},
|
||||
{[]byte("a"), "a", true},
|
||||
{[]byte("aaa"), "a", true},
|
||||
{[]byte("abc"), "xyz", false},
|
||||
{[]byte("abc"), "xcz", true},
|
||||
{[]byte("a☺b☻c☹d"), "uvw☻xyz", true},
|
||||
{[]byte("aRegExp*"), ".(|)*+?^$[]", true},
|
||||
{[]byte(dots + dots + dots), " ", false},
|
||||
}
|
||||
|
||||
func TestContainsAny(t *testing.T) {
|
||||
for _, ct := range ContainsAnyTests {
|
||||
if ContainsAny(ct.b, ct.substr) != ct.expected {
|
||||
t.Errorf("ContainsAny(%s, %s) = %v, want %v",
|
||||
ct.b, ct.substr, !ct.expected, ct.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var ContainsRuneTests = []struct {
|
||||
b []byte
|
||||
r rune
|
||||
expected bool
|
||||
}{
|
||||
{[]byte(""), 'a', false},
|
||||
{[]byte("a"), 'a', true},
|
||||
{[]byte("aaa"), 'a', true},
|
||||
{[]byte("abc"), 'y', false},
|
||||
{[]byte("abc"), 'c', true},
|
||||
{[]byte("a☺b☻c☹d"), 'x', false},
|
||||
{[]byte("a☺b☻c☹d"), '☻', true},
|
||||
{[]byte("aRegExp*"), '*', true},
|
||||
}
|
||||
|
||||
func TestContainsRune(t *testing.T) {
|
||||
for _, ct := range ContainsRuneTests {
|
||||
if ContainsRune(ct.b, ct.r) != ct.expected {
|
||||
t.Errorf("ContainsRune(%q, %q) = %v, want %v",
|
||||
ct.b, ct.r, !ct.expected, ct.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var makeFieldsInput = func() []byte {
|
||||
x := make([]byte, 1<<20)
|
||||
// Input is ~10% space, ~10% 2-byte UTF-8, rest ASCII non-space.
|
||||
|
|
@ -1256,7 +1304,9 @@ func BenchmarkRepeat(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func benchmarkBytesCompare(b *testing.B, n int) {
|
||||
func BenchmarkBytesCompare(b *testing.B) {
|
||||
for n := 1; n <= 2048; n <<= 1 {
|
||||
b.Run(fmt.Sprint(n), func(b *testing.B) {
|
||||
var x = make([]byte, n)
|
||||
var y = make([]byte, n)
|
||||
|
||||
|
|
@ -1272,17 +1322,6 @@ func benchmarkBytesCompare(b *testing.B, n int) {
|
|||
for i := 0; i < b.N; i++ {
|
||||
Compare(x, y)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBytesCompare1(b *testing.B) { benchmarkBytesCompare(b, 1) }
|
||||
func BenchmarkBytesCompare2(b *testing.B) { benchmarkBytesCompare(b, 2) }
|
||||
func BenchmarkBytesCompare4(b *testing.B) { benchmarkBytesCompare(b, 4) }
|
||||
func BenchmarkBytesCompare8(b *testing.B) { benchmarkBytesCompare(b, 8) }
|
||||
func BenchmarkBytesCompare16(b *testing.B) { benchmarkBytesCompare(b, 16) }
|
||||
func BenchmarkBytesCompare32(b *testing.B) { benchmarkBytesCompare(b, 32) }
|
||||
func BenchmarkBytesCompare64(b *testing.B) { benchmarkBytesCompare(b, 64) }
|
||||
func BenchmarkBytesCompare128(b *testing.B) { benchmarkBytesCompare(b, 128) }
|
||||
func BenchmarkBytesCompare256(b *testing.B) { benchmarkBytesCompare(b, 256) }
|
||||
func BenchmarkBytesCompare512(b *testing.B) { benchmarkBytesCompare(b, 512) }
|
||||
func BenchmarkBytesCompare1024(b *testing.B) { benchmarkBytesCompare(b, 1024) }
|
||||
func BenchmarkBytesCompare2048(b *testing.B) { benchmarkBytesCompare(b, 2048) }
|
||||
|
|
|
|||
|
|
@ -36,9 +36,6 @@ func (r *Reader) Len() int {
|
|||
func (r *Reader) Size() int64 { return int64(len(r.s)) }
|
||||
|
||||
func (r *Reader) Read(b []byte) (n int, err error) {
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if r.i >= int64(len(r.s)) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
|
@ -63,14 +60,14 @@ func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func (r *Reader) ReadByte() (b byte, err error) {
|
||||
func (r *Reader) ReadByte() (byte, error) {
|
||||
r.prevRune = -1
|
||||
if r.i >= int64(len(r.s)) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b = r.s[r.i]
|
||||
b := r.s[r.i]
|
||||
r.i++
|
||||
return
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (r *Reader) UnreadByte() error {
|
||||
|
|
@ -111,11 +108,11 @@ func (r *Reader) Seek(offset int64, whence int) (int64, error) {
|
|||
r.prevRune = -1
|
||||
var abs int64
|
||||
switch whence {
|
||||
case 0:
|
||||
case io.SeekStart:
|
||||
abs = offset
|
||||
case 1:
|
||||
abs = int64(r.i) + offset
|
||||
case 2:
|
||||
case io.SeekCurrent:
|
||||
abs = r.i + offset
|
||||
case io.SeekEnd:
|
||||
abs = int64(len(r.s)) + offset
|
||||
default:
|
||||
return 0, errors.New("bytes.Reader.Seek: invalid whence")
|
||||
|
|
@ -146,5 +143,8 @@ func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// Reset resets the Reader to be reading from b.
|
||||
func (r *Reader) Reset(b []byte) { *r = Reader{b, 0, -1} }
|
||||
|
||||
// NewReader returns a new Reader reading from b.
|
||||
func NewReader(b []byte) *Reader { return &Reader{b, 0, -1} }
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
|
@ -22,17 +21,18 @@ func TestReader(t *testing.T) {
|
|||
n int
|
||||
want string
|
||||
wantpos int64
|
||||
readerr error
|
||||
seekerr string
|
||||
}{
|
||||
{seek: os.SEEK_SET, off: 0, n: 20, want: "0123456789"},
|
||||
{seek: os.SEEK_SET, off: 1, n: 1, want: "1"},
|
||||
{seek: os.SEEK_CUR, off: 1, wantpos: 3, n: 2, want: "34"},
|
||||
{seek: os.SEEK_SET, off: -1, seekerr: "bytes.Reader.Seek: negative position"},
|
||||
{seek: os.SEEK_SET, off: 1 << 33, wantpos: 1 << 33},
|
||||
{seek: os.SEEK_CUR, off: 1, wantpos: 1<<33 + 1},
|
||||
{seek: os.SEEK_SET, n: 5, want: "01234"},
|
||||
{seek: os.SEEK_CUR, n: 5, want: "56789"},
|
||||
{seek: os.SEEK_END, off: -1, n: 1, wantpos: 9, want: "9"},
|
||||
{seek: io.SeekStart, off: 0, n: 20, want: "0123456789"},
|
||||
{seek: io.SeekStart, off: 1, n: 1, want: "1"},
|
||||
{seek: io.SeekCurrent, off: 1, wantpos: 3, n: 2, want: "34"},
|
||||
{seek: io.SeekStart, off: -1, seekerr: "bytes.Reader.Seek: negative position"},
|
||||
{seek: io.SeekStart, off: 1 << 33, wantpos: 1 << 33, readerr: io.EOF},
|
||||
{seek: io.SeekCurrent, off: 1, wantpos: 1<<33 + 1, readerr: io.EOF},
|
||||
{seek: io.SeekStart, n: 5, want: "01234"},
|
||||
{seek: io.SeekCurrent, n: 5, want: "56789"},
|
||||
{seek: io.SeekEnd, off: -1, n: 1, wantpos: 9, want: "9"},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
|
|
@ -50,8 +50,8 @@ func TestReader(t *testing.T) {
|
|||
}
|
||||
buf := make([]byte, tt.n)
|
||||
n, err := r.Read(buf)
|
||||
if err != nil {
|
||||
t.Errorf("%d. read = %v", i, err)
|
||||
if err != tt.readerr {
|
||||
t.Errorf("%d. read = %v; want %v", i, err, tt.readerr)
|
||||
continue
|
||||
}
|
||||
got := string(buf[:n])
|
||||
|
|
@ -63,7 +63,7 @@ func TestReader(t *testing.T) {
|
|||
|
||||
func TestReadAfterBigSeek(t *testing.T) {
|
||||
r := NewReader([]byte("0123456789"))
|
||||
if _, err := r.Seek(1<<31+5, os.SEEK_SET); err != nil {
|
||||
if _, err := r.Seek(1<<31+5, io.SeekStart); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n, err := r.Read(make([]byte, 10)); n != 0 || err != io.EOF {
|
||||
|
|
@ -174,7 +174,7 @@ func TestReaderLen(t *testing.T) {
|
|||
t.Errorf("r.Len(): got %d, want %d", got, want)
|
||||
}
|
||||
if n, err := r.Read(make([]byte, 1)); err != nil || n != 1 {
|
||||
t.Errorf("Read failed: read %d %v", n, err)
|
||||
t.Errorf("Read failed: read %d %v; want 1, nil", n, err)
|
||||
}
|
||||
if got, want := r.Len(), 0; got != want {
|
||||
t.Errorf("r.Len(): got %d, want %d", got, want)
|
||||
|
|
@ -188,7 +188,7 @@ var UnreadRuneErrorTests = []struct {
|
|||
{"Read", func(r *Reader) { r.Read([]byte{0}) }},
|
||||
{"ReadByte", func(r *Reader) { r.ReadByte() }},
|
||||
{"UnreadRune", func(r *Reader) { r.UnreadRune() }},
|
||||
{"Seek", func(r *Reader) { r.Seek(0, 1) }},
|
||||
{"Seek", func(r *Reader) { r.Seek(0, io.SeekCurrent) }},
|
||||
{"WriteTo", func(r *Reader) { r.WriteTo(&Buffer{}) }},
|
||||
}
|
||||
|
||||
|
|
@ -256,3 +256,23 @@ func TestReaderLenSize(t *testing.T) {
|
|||
t.Errorf("Size = %d; want 3", r.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaderReset(t *testing.T) {
|
||||
r := NewReader([]byte("世界"))
|
||||
if _, _, err := r.ReadRune(); err != nil {
|
||||
t.Errorf("ReadRune: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
const want = "abcdef"
|
||||
r.Reset([]byte(want))
|
||||
if err := r.UnreadRune(); err == nil {
|
||||
t.Errorf("UnreadRune: expected error, got nil")
|
||||
}
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Errorf("ReadAll: unexpected error: %v", err)
|
||||
}
|
||||
if got := string(buf); got != want {
|
||||
t.Errorf("ReadAll: got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ func (f *File) ReadGo(name string) {
|
|||
}
|
||||
for _, spec := range d.Specs {
|
||||
s, ok := spec.(*ast.ImportSpec)
|
||||
if !ok || string(s.Path.Value) != `"C"` {
|
||||
if !ok || s.Path.Value != `"C"` {
|
||||
continue
|
||||
}
|
||||
sawC = true
|
||||
|
|
@ -106,7 +106,7 @@ func (f *File) ReadGo(name string) {
|
|||
ws := 0
|
||||
for _, spec := range d.Specs {
|
||||
s, ok := spec.(*ast.ImportSpec)
|
||||
if !ok || string(s.Path.Value) != `"C"` {
|
||||
if !ok || s.Path.Value != `"C"` {
|
||||
d.Specs[ws] = spec
|
||||
ws++
|
||||
}
|
||||
|
|
@ -147,7 +147,7 @@ func commentText(g *ast.CommentGroup) string {
|
|||
}
|
||||
var pieces []string
|
||||
for _, com := range g.List {
|
||||
c := string(com.Text)
|
||||
c := com.Text
|
||||
// Remove comment markers.
|
||||
// The parser has given us exactly the comment text.
|
||||
switch c[1] {
|
||||
|
|
@ -172,7 +172,7 @@ func (f *File) saveExprs(x interface{}, context string) {
|
|||
f.saveRef(x, context)
|
||||
}
|
||||
case *ast.CallExpr:
|
||||
f.saveCall(x)
|
||||
f.saveCall(x, context)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -220,7 +220,7 @@ func (f *File) saveRef(n *ast.Expr, context string) {
|
|||
}
|
||||
|
||||
// Save calls to C.xxx for later processing.
|
||||
func (f *File) saveCall(call *ast.CallExpr) {
|
||||
func (f *File) saveCall(call *ast.CallExpr, context string) {
|
||||
sel, ok := call.Fun.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return
|
||||
|
|
@ -228,7 +228,8 @@ func (f *File) saveCall(call *ast.CallExpr) {
|
|||
if l, ok := sel.X.(*ast.Ident); !ok || l.Name != "C" {
|
||||
return
|
||||
}
|
||||
f.Calls = append(f.Calls, call)
|
||||
c := &Call{Call: call, Deferred: context == "defer"}
|
||||
f.Calls = append(f.Calls, c)
|
||||
}
|
||||
|
||||
// If a function should be exported add it to ExpFunc.
|
||||
|
|
@ -242,11 +243,11 @@ func (f *File) saveExport(x interface{}, context string) {
|
|||
return
|
||||
}
|
||||
for _, c := range n.Doc.List {
|
||||
if !strings.HasPrefix(string(c.Text), "//export ") {
|
||||
if !strings.HasPrefix(c.Text, "//export ") {
|
||||
continue
|
||||
}
|
||||
|
||||
name := strings.TrimSpace(string(c.Text[9:]))
|
||||
name := strings.TrimSpace(c.Text[9:])
|
||||
if name == "" {
|
||||
error_(c.Pos(), "export missing name")
|
||||
}
|
||||
|
|
@ -401,7 +402,7 @@ func (f *File) walk(x interface{}, context string, visit func(*File, interface{}
|
|||
case *ast.GoStmt:
|
||||
f.walk(n.Call, "expr", visit)
|
||||
case *ast.DeferStmt:
|
||||
f.walk(n.Call, "expr", visit)
|
||||
f.walk(n.Call, "defer", visit)
|
||||
case *ast.ReturnStmt:
|
||||
f.walk(n.Results, "expr", visit)
|
||||
case *ast.BranchStmt:
|
||||
|
|
@ -447,7 +448,11 @@ func (f *File) walk(x interface{}, context string, visit func(*File, interface{}
|
|||
case *ast.ImportSpec:
|
||||
case *ast.ValueSpec:
|
||||
f.walk(&n.Type, "type", visit)
|
||||
if len(n.Names) == 2 && len(n.Values) == 1 {
|
||||
f.walk(&n.Values[0], "as2", visit)
|
||||
} else {
|
||||
f.walk(n.Values, "expr", visit)
|
||||
}
|
||||
case *ast.TypeSpec:
|
||||
f.walk(&n.Type, "type", visit)
|
||||
|
||||
|
|
|
|||
|
|
@ -31,9 +31,9 @@ See $GOROOT/misc/cgo/stdio and $GOROOT/misc/cgo/gmp for examples. See
|
|||
"C? Go? Cgo!" for an introduction to using cgo:
|
||||
https://golang.org/doc/articles/c_go_cgo.html.
|
||||
|
||||
CFLAGS, CPPFLAGS, CXXFLAGS and LDFLAGS may be defined with pseudo #cgo
|
||||
directives within these comments to tweak the behavior of the C or C++
|
||||
compiler. Values defined in multiple directives are concatenated
|
||||
CFLAGS, CPPFLAGS, CXXFLAGS, FFLAGS and LDFLAGS may be defined with pseudo
|
||||
#cgo directives within these comments to tweak the behavior of the C, C++
|
||||
or Fortran compiler. Values defined in multiple directives are concatenated
|
||||
together. The directive can include a list of build constraints limiting its
|
||||
effect to systems satisfying one of the constraints
|
||||
(see https://golang.org/pkg/go/build/#hdr-Build_Constraints for details about the constraint syntax).
|
||||
|
|
@ -53,7 +53,7 @@ For example:
|
|||
// #include <png.h>
|
||||
import "C"
|
||||
|
||||
When building, the CGO_CFLAGS, CGO_CPPFLAGS, CGO_CXXFLAGS and
|
||||
When building, the CGO_CFLAGS, CGO_CPPFLAGS, CGO_CXXFLAGS, CGO_FFLAGS and
|
||||
CGO_LDFLAGS environment variables are added to the flags derived from
|
||||
these directives. Package-specific flags should be set using the
|
||||
directives, not the environment variables, so that builds work in
|
||||
|
|
@ -62,10 +62,11 @@ unmodified environments.
|
|||
All the cgo CPPFLAGS and CFLAGS directives in a package are concatenated and
|
||||
used to compile C files in that package. All the CPPFLAGS and CXXFLAGS
|
||||
directives in a package are concatenated and used to compile C++ files in that
|
||||
package. All the LDFLAGS directives in any package in the program are
|
||||
concatenated and used at link time. All the pkg-config directives are
|
||||
concatenated and sent to pkg-config simultaneously to add to each appropriate
|
||||
set of command-line flags.
|
||||
package. All the CPPFLAGS and FFLAGS directives in a package are concatenated
|
||||
and used to compile Fortran files in that package. All the LDFLAGS directives
|
||||
in any package in the program are concatenated and used at link time. All the
|
||||
pkg-config directives are concatenated and sent to pkg-config simultaneously
|
||||
to add to each appropriate set of command-line flags.
|
||||
|
||||
When the cgo directives are parsed, any occurrence of the string ${SRCDIR}
|
||||
will be replaced by the absolute path to the directory containing the source
|
||||
|
|
@ -83,7 +84,8 @@ When the Go tool sees that one or more Go files use the special import
|
|||
"C", it will look for other non-Go files in the directory and compile
|
||||
them as part of the Go package. Any .c, .s, or .S files will be
|
||||
compiled with the C compiler. Any .cc, .cpp, or .cxx files will be
|
||||
compiled with the C++ compiler. Any .h, .hh, .hpp, or .hxx files will
|
||||
compiled with the C++ compiler. Any .f, .F, .for or .f90 files will be
|
||||
compiled with the fortran compiler. Any .h, .hh, .hpp, or .hxx files will
|
||||
not be compiled separately, but, if these header files are changed,
|
||||
the C and C++ files will be recompiled. The default C and C++
|
||||
compilers may be changed by the CC and CXX environment variables,
|
||||
|
|
@ -133,7 +135,7 @@ C's union types are represented as a Go byte array with the same length.
|
|||
|
||||
Go structs cannot embed fields with C types.
|
||||
|
||||
Go code can not refer to zero-sized fields that occur at the end of
|
||||
Go code cannot refer to zero-sized fields that occur at the end of
|
||||
non-empty C structs. To get the address of such a field (which is the
|
||||
only operation you can do with a zero-sized field) you must take the
|
||||
address of the struct and add the size of the struct.
|
||||
|
|
@ -148,8 +150,9 @@ assignment context to retrieve both the return value (if any) and the
|
|||
C errno variable as an error (use _ to skip the result value if the
|
||||
function returns void). For example:
|
||||
|
||||
n, err := C.sqrt(-1)
|
||||
n, err = C.sqrt(-1)
|
||||
_, err := C.voidFunc()
|
||||
var n, err = C.sqrt(1)
|
||||
|
||||
Calling C function pointers is currently not supported, however you can
|
||||
declare Go variables which hold C function pointers and pass them
|
||||
|
|
@ -195,6 +198,13 @@ by making copies of the data. In pseudo-Go definitions:
|
|||
// if C.free is needed).
|
||||
func C.CString(string) *C.char
|
||||
|
||||
// Go []byte slice to C array
|
||||
// The C array is allocated in the C heap using malloc.
|
||||
// It is the caller's responsibility to arrange for it to be
|
||||
// freed, such as by calling C.free (be sure to include stdlib.h
|
||||
// if C.free is needed).
|
||||
func C.CBytes([]byte) unsafe.Pointer
|
||||
|
||||
// C string to Go string
|
||||
func C.GoString(*C.char) string
|
||||
|
||||
|
|
@ -501,7 +511,6 @@ file compiled by gcc, the file x.cgo2.c:
|
|||
void
|
||||
_cgo_be59f0f25121_Cfunc_puts(void *v)
|
||||
{
|
||||
_cgo_wait_runtime_init_done();
|
||||
struct {
|
||||
char* p0;
|
||||
int r;
|
||||
|
|
@ -510,8 +519,7 @@ file compiled by gcc, the file x.cgo2.c:
|
|||
a->r = puts((void*)a->p0);
|
||||
}
|
||||
|
||||
It waits for Go runtime to be initialized (required for shared libraries),
|
||||
extracts the arguments from the pointer to _Cfunc_puts's argument
|
||||
It extracts the arguments from the pointer to _Cfunc_puts's argument
|
||||
frame, invokes the system C function (in this case, puts), stores the
|
||||
result in the frame, and returns.
|
||||
|
||||
|
|
@ -529,8 +537,8 @@ linkage to the desired libraries. The main function is provided by
|
|||
_cgo_main.c:
|
||||
|
||||
int main() { return 0; }
|
||||
void crosscall2(void(*fn)(void*, int), void *a, int c) { }
|
||||
void _cgo_wait_runtime_init_done() { }
|
||||
void crosscall2(void(*fn)(void*, int, uintptr_t), void *a, int c, uintptr_t ctxt) { }
|
||||
uintptr_t _cgo_wait_runtime_init_done() { }
|
||||
void _cgo_allocate(void *a, int c) { }
|
||||
void _cgo_panic(void *a, int c) { }
|
||||
|
||||
|
|
|
|||
|
|
@ -432,7 +432,7 @@ func (p *Package) loadDWARF(f *File, names []*Name) {
|
|||
fmt.Fprintf(&b, "\t0,\n")
|
||||
}
|
||||
}
|
||||
// for the last entry, we can not use 0, otherwise
|
||||
// for the last entry, we cannot use 0, otherwise
|
||||
// in case all __cgodebug_data is zero initialized,
|
||||
// LLVM-based gcc will place the it in the __DATA.__common
|
||||
// zero-filled section (our debug/macho doesn't support
|
||||
|
|
@ -581,7 +581,7 @@ func (p *Package) mangleName(n *Name) {
|
|||
func (p *Package) rewriteCalls(f *File) {
|
||||
for _, call := range f.Calls {
|
||||
// This is a call to C.xxx; set goname to "xxx".
|
||||
goname := call.Fun.(*ast.SelectorExpr).Sel.Name
|
||||
goname := call.Call.Fun.(*ast.SelectorExpr).Sel.Name
|
||||
if goname == "malloc" {
|
||||
continue
|
||||
}
|
||||
|
|
@ -596,37 +596,60 @@ func (p *Package) rewriteCalls(f *File) {
|
|||
|
||||
// rewriteCall rewrites one call to add pointer checks. We replace
|
||||
// each pointer argument x with _cgoCheckPointer(x).(T).
|
||||
func (p *Package) rewriteCall(f *File, call *ast.CallExpr, name *Name) {
|
||||
for i, param := range name.FuncType.Params {
|
||||
if len(call.Args) <= i {
|
||||
// Avoid a crash; this will be caught when the
|
||||
// generated file is compiled.
|
||||
func (p *Package) rewriteCall(f *File, call *Call, name *Name) {
|
||||
// Avoid a crash if the number of arguments is
|
||||
// less than the number of parameters.
|
||||
// This will be caught when the generated file is compiled.
|
||||
if len(call.Call.Args) < len(name.FuncType.Params) {
|
||||
return
|
||||
}
|
||||
|
||||
// An untyped nil does not need a pointer check, and
|
||||
// when _cgoCheckPointer returns the untyped nil the
|
||||
// type assertion we are going to insert will fail.
|
||||
// Easier to just skip nil arguments.
|
||||
// TODO: Note that this fails if nil is shadowed.
|
||||
if id, ok := call.Args[i].(*ast.Ident); ok && id.Name == "nil" {
|
||||
continue
|
||||
any := false
|
||||
for i, param := range name.FuncType.Params {
|
||||
if p.needsPointerCheck(f, param.Go, call.Call.Args[i]) {
|
||||
any = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !any {
|
||||
return
|
||||
}
|
||||
|
||||
if !p.needsPointerCheck(f, param.Go) {
|
||||
// We need to rewrite this call.
|
||||
//
|
||||
// We are going to rewrite C.f(p) to C.f(_cgoCheckPointer(p)).
|
||||
// If the call to C.f is deferred, that will check p at the
|
||||
// point of the defer statement, not when the function is called, so
|
||||
// rewrite to func(_cgo0 ptype) { C.f(_cgoCheckPointer(_cgo0)) }(p)
|
||||
|
||||
var dargs []ast.Expr
|
||||
if call.Deferred {
|
||||
dargs = make([]ast.Expr, len(name.FuncType.Params))
|
||||
}
|
||||
for i, param := range name.FuncType.Params {
|
||||
origArg := call.Call.Args[i]
|
||||
darg := origArg
|
||||
|
||||
if call.Deferred {
|
||||
dargs[i] = darg
|
||||
darg = ast.NewIdent(fmt.Sprintf("_cgo%d", i))
|
||||
call.Call.Args[i] = darg
|
||||
}
|
||||
|
||||
if !p.needsPointerCheck(f, param.Go, origArg) {
|
||||
continue
|
||||
}
|
||||
|
||||
c := &ast.CallExpr{
|
||||
Fun: ast.NewIdent("_cgoCheckPointer"),
|
||||
Args: []ast.Expr{
|
||||
call.Args[i],
|
||||
darg,
|
||||
},
|
||||
}
|
||||
|
||||
// Add optional additional arguments for an address
|
||||
// expression.
|
||||
c.Args = p.checkAddrArgs(f, c.Args, call.Args[i])
|
||||
c.Args = p.checkAddrArgs(f, c.Args, origArg)
|
||||
|
||||
// _cgoCheckPointer returns interface{}.
|
||||
// We need to type assert that to the type we want.
|
||||
|
|
@ -636,7 +659,7 @@ func (p *Package) rewriteCall(f *File, call *ast.CallExpr, name *Name) {
|
|||
// Instead we use a local variant of _cgoCheckPointer.
|
||||
|
||||
var arg ast.Expr
|
||||
if n := p.unsafeCheckPointerName(param.Go); n != "" {
|
||||
if n := p.unsafeCheckPointerName(param.Go, call.Deferred); n != "" {
|
||||
c.Fun = ast.NewIdent(n)
|
||||
arg = c
|
||||
} else {
|
||||
|
|
@ -664,14 +687,73 @@ func (p *Package) rewriteCall(f *File, call *ast.CallExpr, name *Name) {
|
|||
}
|
||||
}
|
||||
|
||||
call.Args[i] = arg
|
||||
call.Call.Args[i] = arg
|
||||
}
|
||||
|
||||
if call.Deferred {
|
||||
params := make([]*ast.Field, len(name.FuncType.Params))
|
||||
for i, param := range name.FuncType.Params {
|
||||
ptype := param.Go
|
||||
if p.hasUnsafePointer(ptype) {
|
||||
// Avoid generating unsafe.Pointer by using
|
||||
// interface{}. This works because we are
|
||||
// going to call a _cgoCheckPointer function
|
||||
// anyhow.
|
||||
ptype = &ast.InterfaceType{
|
||||
Methods: &ast.FieldList{},
|
||||
}
|
||||
}
|
||||
params[i] = &ast.Field{
|
||||
Names: []*ast.Ident{
|
||||
ast.NewIdent(fmt.Sprintf("_cgo%d", i)),
|
||||
},
|
||||
Type: ptype,
|
||||
}
|
||||
}
|
||||
|
||||
dbody := &ast.CallExpr{
|
||||
Fun: call.Call.Fun,
|
||||
Args: call.Call.Args,
|
||||
}
|
||||
call.Call.Fun = &ast.FuncLit{
|
||||
Type: &ast.FuncType{
|
||||
Params: &ast.FieldList{
|
||||
List: params,
|
||||
},
|
||||
},
|
||||
Body: &ast.BlockStmt{
|
||||
List: []ast.Stmt{
|
||||
&ast.ExprStmt{
|
||||
X: dbody,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
call.Call.Args = dargs
|
||||
call.Call.Lparen = token.NoPos
|
||||
call.Call.Rparen = token.NoPos
|
||||
|
||||
// There is a Ref pointing to the old call.Call.Fun.
|
||||
for _, ref := range f.Ref {
|
||||
if ref.Expr == &call.Call.Fun {
|
||||
ref.Expr = &dbody.Fun
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// needsPointerCheck returns whether the type t needs a pointer check.
|
||||
// This is true if t is a pointer and if the value to which it points
|
||||
// might contain a pointer.
|
||||
func (p *Package) needsPointerCheck(f *File, t ast.Expr) bool {
|
||||
func (p *Package) needsPointerCheck(f *File, t ast.Expr, arg ast.Expr) bool {
|
||||
// An untyped nil does not need a pointer check, and when
|
||||
// _cgoCheckPointer returns the untyped nil the type assertion we
|
||||
// are going to insert will fail. Easier to just skip nil arguments.
|
||||
// TODO: Note that this fails if nil is shadowed.
|
||||
if id, ok := arg.(*ast.Ident); ok && id.Name == "nil" {
|
||||
return false
|
||||
}
|
||||
|
||||
return p.hasPointer(f, t, true)
|
||||
}
|
||||
|
||||
|
|
@ -819,14 +901,17 @@ func (p *Package) hasSideEffects(f *File, x ast.Expr) bool {
|
|||
func (p *Package) isType(t ast.Expr) bool {
|
||||
switch t := t.(type) {
|
||||
case *ast.SelectorExpr:
|
||||
if t.Sel.Name != "Pointer" {
|
||||
return false
|
||||
}
|
||||
id, ok := t.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return id.Name == "unsafe"
|
||||
if id.Name == "unsafe" && t.Sel.Name == "Pointer" {
|
||||
return true
|
||||
}
|
||||
if id.Name == "C" && typedef["_Ctype_"+t.Sel.Name] != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
case *ast.Ident:
|
||||
// TODO: This ignores shadowing.
|
||||
switch t.Name {
|
||||
|
|
@ -856,20 +941,31 @@ func (p *Package) isType(t ast.Expr) bool {
|
|||
// assertion to unsafe.Pointer in our copy of user code. We return
|
||||
// the name of the _cgoCheckPointer function we are going to build, or
|
||||
// the empty string if the type does not use unsafe.Pointer.
|
||||
func (p *Package) unsafeCheckPointerName(t ast.Expr) string {
|
||||
//
|
||||
// The deferred parameter is true if this check is for the argument of
|
||||
// a deferred function. In that case we need to use an empty interface
|
||||
// as the argument type, because the deferred function we introduce in
|
||||
// rewriteCall will use an empty interface type, and we can't add a
|
||||
// type assertion. This is handled by keeping a separate list, and
|
||||
// writing out the lists separately in writeDefs.
|
||||
func (p *Package) unsafeCheckPointerName(t ast.Expr, deferred bool) string {
|
||||
if !p.hasUnsafePointer(t) {
|
||||
return ""
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
conf.Fprint(&buf, fset, t)
|
||||
s := buf.String()
|
||||
for i, t := range p.CgoChecks {
|
||||
checks := &p.CgoChecks
|
||||
if deferred {
|
||||
checks = &p.DeferredCgoChecks
|
||||
}
|
||||
for i, t := range *checks {
|
||||
if s == t {
|
||||
return p.unsafeCheckPointerNameIndex(i)
|
||||
return p.unsafeCheckPointerNameIndex(i, deferred)
|
||||
}
|
||||
}
|
||||
p.CgoChecks = append(p.CgoChecks, s)
|
||||
return p.unsafeCheckPointerNameIndex(len(p.CgoChecks) - 1)
|
||||
*checks = append(*checks, s)
|
||||
return p.unsafeCheckPointerNameIndex(len(*checks)-1, deferred)
|
||||
}
|
||||
|
||||
// hasUnsafePointer returns whether the Go type t uses unsafe.Pointer.
|
||||
|
|
@ -897,7 +993,10 @@ func (p *Package) hasUnsafePointer(t ast.Expr) bool {
|
|||
|
||||
// unsafeCheckPointerNameIndex returns the name to use for a
|
||||
// _cgoCheckPointer variant based on the index in the CgoChecks slice.
|
||||
func (p *Package) unsafeCheckPointerNameIndex(i int) string {
|
||||
func (p *Package) unsafeCheckPointerNameIndex(i int, deferred bool) string {
|
||||
if deferred {
|
||||
return fmt.Sprintf("_cgoCheckPointerInDefer%d", i)
|
||||
}
|
||||
return fmt.Sprintf("_cgoCheckPointer%d", i)
|
||||
}
|
||||
|
||||
|
|
@ -1006,7 +1105,7 @@ func (p *Package) rewriteRef(f *File) {
|
|||
if r.Name.Kind == "var" {
|
||||
expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr}
|
||||
} else {
|
||||
error_(r.Pos(), "only C variables allowed in selector expression", fixGo(r.Name.Go))
|
||||
error_(r.Pos(), "only C variables allowed in selector expression %s", fixGo(r.Name.Go))
|
||||
}
|
||||
|
||||
case "type":
|
||||
|
|
@ -1086,6 +1185,8 @@ func (p *Package) gccMachine() []string {
|
|||
return []string{"-m31"}
|
||||
case "s390x":
|
||||
return []string{"-m64"}
|
||||
case "mips64", "mips64le":
|
||||
return []string{"-mabi=64"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1238,12 +1339,20 @@ func (p *Package) gccErrors(stdin []byte) string {
|
|||
// TODO(rsc): require failure
|
||||
args := p.gccCmd()
|
||||
|
||||
// Optimization options can confuse the error messages; remove them.
|
||||
nargs := make([]string, 0, len(args))
|
||||
for _, arg := range args {
|
||||
if !strings.HasPrefix(arg, "-O") {
|
||||
nargs = append(nargs, arg)
|
||||
}
|
||||
}
|
||||
|
||||
if *debugGcc {
|
||||
fmt.Fprintf(os.Stderr, "$ %s <<EOF\n", strings.Join(args, " "))
|
||||
fmt.Fprintf(os.Stderr, "$ %s <<EOF\n", strings.Join(nargs, " "))
|
||||
os.Stderr.Write(stdin)
|
||||
fmt.Fprint(os.Stderr, "EOF\n")
|
||||
}
|
||||
stdout, stderr, _ := run(stdin, args)
|
||||
stdout, stderr, _ := run(stdin, nargs)
|
||||
if *debugGcc {
|
||||
os.Stderr.Write(stdout)
|
||||
os.Stderr.Write(stderr)
|
||||
|
|
@ -1280,7 +1389,6 @@ func runGcc(stdin []byte, args []string) (string, string) {
|
|||
type typeConv struct {
|
||||
// Cache of already-translated or in-progress types.
|
||||
m map[dwarf.Type]*Type
|
||||
typedef map[string]ast.Expr
|
||||
|
||||
// Map from types to incomplete pointers to those types.
|
||||
ptrs map[dwarf.Type][]*Type
|
||||
|
|
@ -2025,7 +2133,7 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct
|
|||
// We can't permit that, because then the size of the Go
|
||||
// struct will not be the same as the size of the C struct.
|
||||
// Our only option in such a case is to remove the field,
|
||||
// which means that it can not be referenced from Go.
|
||||
// which means that it cannot be referenced from Go.
|
||||
for off > 0 && sizes[len(sizes)-1] == 0 {
|
||||
n := len(sizes)
|
||||
fld = fld[0 : n-1]
|
||||
|
|
|
|||
|
|
@ -42,7 +42,10 @@ type Package struct {
|
|||
GoFiles []string // list of Go files
|
||||
GccFiles []string // list of gcc output files
|
||||
Preamble string // collected preamble for _cgo_export.h
|
||||
CgoChecks []string // see unsafeCheckPointerName
|
||||
|
||||
// See unsafeCheckPointerName.
|
||||
CgoChecks []string
|
||||
DeferredCgoChecks []string
|
||||
}
|
||||
|
||||
// A File collects information about a single Go input file.
|
||||
|
|
@ -52,7 +55,7 @@ type File struct {
|
|||
Package string // Package name
|
||||
Preamble string // C preamble (doc comment on import "C")
|
||||
Ref []*Ref // all references to C.xxx in AST
|
||||
Calls []*ast.CallExpr // all calls to C.xxx in AST
|
||||
Calls []*Call // all calls to C.xxx in AST
|
||||
ExpFunc []*ExpFunc // exported functions for this file
|
||||
Name map[string]*Name // map from Go name to Name
|
||||
}
|
||||
|
|
@ -66,6 +69,12 @@ func nameKeys(m map[string]*Name) []string {
|
|||
return ks
|
||||
}
|
||||
|
||||
// A Call refers to a call of a C.xxx function in the AST.
|
||||
type Call struct {
|
||||
Call *ast.CallExpr
|
||||
Deferred bool
|
||||
}
|
||||
|
||||
// A Ref refers to an expression of the form C.xxx in the AST.
|
||||
type Ref struct {
|
||||
Name *Name
|
||||
|
|
@ -245,6 +254,12 @@ func main() {
|
|||
|
||||
goFiles := args[i:]
|
||||
|
||||
for _, arg := range args[:i] {
|
||||
if arg == "-fsanitize=thread" {
|
||||
tsanProlog = yesTsanProlog
|
||||
}
|
||||
}
|
||||
|
||||
p := newPackage(args[:i])
|
||||
|
||||
// Record CGO_LDFLAGS from the environment for external linking.
|
||||
|
|
|
|||
|
|
@ -50,14 +50,16 @@ func (p *Package) writeDefs() {
|
|||
// Write C main file for using gcc to resolve imports.
|
||||
fmt.Fprintf(fm, "int main() { return 0; }\n")
|
||||
if *importRuntimeCgo {
|
||||
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int), void *a, int c) { }\n")
|
||||
fmt.Fprintf(fm, "void _cgo_wait_runtime_init_done() { }\n")
|
||||
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int, __SIZE_TYPE__), void *a, int c, __SIZE_TYPE__ ctxt) { }\n")
|
||||
fmt.Fprintf(fm, "__SIZE_TYPE__ _cgo_wait_runtime_init_done() { return 0; }\n")
|
||||
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__ ctxt) { }\n")
|
||||
fmt.Fprintf(fm, "char* _cgo_topofstack(void) { return (char*)0; }\n")
|
||||
} else {
|
||||
// If we're not importing runtime/cgo, we *are* runtime/cgo,
|
||||
// which provides these functions. We just need a prototype.
|
||||
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int), void *a, int c);\n")
|
||||
fmt.Fprintf(fm, "void _cgo_wait_runtime_init_done();\n")
|
||||
fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*, int, __SIZE_TYPE__), void *a, int c, __SIZE_TYPE__ ctxt);\n")
|
||||
fmt.Fprintf(fm, "__SIZE_TYPE__ _cgo_wait_runtime_init_done();\n")
|
||||
fmt.Fprintf(fm, "void _cgo_release_context(__SIZE_TYPE__);\n")
|
||||
}
|
||||
fmt.Fprintf(fm, "void _cgo_allocate(void *a, int c) { }\n")
|
||||
fmt.Fprintf(fm, "void _cgo_panic(void *a, int c) { }\n")
|
||||
|
|
@ -110,7 +112,13 @@ func (p *Package) writeDefs() {
|
|||
}
|
||||
|
||||
for i, t := range p.CgoChecks {
|
||||
n := p.unsafeCheckPointerNameIndex(i)
|
||||
n := p.unsafeCheckPointerNameIndex(i, false)
|
||||
fmt.Fprintf(fgo2, "\nfunc %s(p %s, args ...interface{}) %s {\n", n, t, t)
|
||||
fmt.Fprintf(fgo2, "\treturn _cgoCheckPointer(p, args...).(%s)\n", t)
|
||||
fmt.Fprintf(fgo2, "}\n")
|
||||
}
|
||||
for i, t := range p.DeferredCgoChecks {
|
||||
n := p.unsafeCheckPointerNameIndex(i, true)
|
||||
fmt.Fprintf(fgo2, "\nfunc %s(p interface{}, args ...interface{}) %s {\n", n, t)
|
||||
fmt.Fprintf(fgo2, "\treturn _cgoCheckPointer(p, args...).(%s)\n", t)
|
||||
fmt.Fprintf(fgo2, "}\n")
|
||||
|
|
@ -173,10 +181,11 @@ func (p *Package) writeDefs() {
|
|||
}
|
||||
fmt.Fprintf(fgo2, "\n")
|
||||
|
||||
callsMalloc := false
|
||||
for _, key := range nameKeys(p.Name) {
|
||||
n := p.Name[key]
|
||||
if n.FuncType != nil {
|
||||
p.writeDefsFunc(fgo2, n)
|
||||
p.writeDefsFunc(fgo2, n, &callsMalloc)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -187,6 +196,12 @@ func (p *Package) writeDefs() {
|
|||
} else {
|
||||
p.writeExports(fgo2, fm, fgcc, fgcch)
|
||||
}
|
||||
|
||||
if callsMalloc && !*gccgo {
|
||||
fmt.Fprint(fgo2, strings.Replace(cMallocDefGo, "PREFIX", cPrefix, -1))
|
||||
fmt.Fprint(fgcc, strings.Replace(strings.Replace(cMallocDefC, "PREFIX", cPrefix, -1), "PACKED", p.packedAttribute(), -1))
|
||||
}
|
||||
|
||||
if err := fgcc.Close(); err != nil {
|
||||
fatalf("%s", err)
|
||||
}
|
||||
|
|
@ -350,7 +365,7 @@ func (p *Package) structType(n *Name) (string, int64) {
|
|||
return buf.String(), off
|
||||
}
|
||||
|
||||
func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name) {
|
||||
func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name, callsMalloc *bool) {
|
||||
name := n.Go
|
||||
gtype := n.FuncType.Go
|
||||
void := gtype.Results == nil || len(gtype.Results.List) == 0
|
||||
|
|
@ -439,6 +454,9 @@ func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name) {
|
|||
|
||||
if inProlog {
|
||||
fmt.Fprint(fgo2, builtinDefs[name])
|
||||
if strings.Contains(builtinDefs[name], "_cgo_cmalloc") {
|
||||
*callsMalloc = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -458,6 +476,7 @@ func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name) {
|
|||
}
|
||||
|
||||
fmt.Fprint(fgo2, "\n")
|
||||
fmt.Fprint(fgo2, "//go:cgo_unsafe_args\n")
|
||||
conf.Fprint(fgo2, fset, d)
|
||||
fmt.Fprint(fgo2, " {\n")
|
||||
|
||||
|
|
@ -507,6 +526,7 @@ func (p *Package) writeOutput(f *File, srcfile string) {
|
|||
// Gcc output starts with the preamble.
|
||||
fmt.Fprintf(fgcc, "%s\n", f.Preamble)
|
||||
fmt.Fprintf(fgcc, "%s\n", gccProlog)
|
||||
fmt.Fprintf(fgcc, "%s\n", tsanProlog)
|
||||
|
||||
for _, key := range nameKeys(f.Name) {
|
||||
n := f.Name[key]
|
||||
|
|
@ -531,6 +551,7 @@ func fixGo(name string) string {
|
|||
|
||||
var isBuiltin = map[string]bool{
|
||||
"_Cfunc_CString": true,
|
||||
"_Cfunc_CBytes": true,
|
||||
"_Cfunc_GoString": true,
|
||||
"_Cfunc_GoStringN": true,
|
||||
"_Cfunc_GoBytes": true,
|
||||
|
|
@ -555,6 +576,7 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
|
|||
|
||||
// Gcc wrapper unpacks the C argument struct
|
||||
// and calls the actual C function.
|
||||
fmt.Fprintf(fgcc, "CGO_NO_SANITIZE_THREAD\n")
|
||||
if n.AddError {
|
||||
fmt.Fprintf(fgcc, "int\n")
|
||||
} else {
|
||||
|
|
@ -563,7 +585,7 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
|
|||
fmt.Fprintf(fgcc, "_cgo%s%s(void *v)\n", cPrefix, n.Mangle)
|
||||
fmt.Fprintf(fgcc, "{\n")
|
||||
if n.AddError {
|
||||
fmt.Fprintf(fgcc, "\terrno = 0;\n")
|
||||
fmt.Fprintf(fgcc, "\tint _cgo_errno;\n")
|
||||
}
|
||||
// We're trying to write a gcc struct that matches gc's layout.
|
||||
// Use packed attribute to force no padding in this struct in case
|
||||
|
|
@ -573,10 +595,18 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
|
|||
// Save the stack top for use below.
|
||||
fmt.Fprintf(fgcc, "\tchar *stktop = _cgo_topofstack();\n")
|
||||
}
|
||||
tr := n.FuncType.Result
|
||||
if tr != nil {
|
||||
fmt.Fprintf(fgcc, "\t__typeof__(a->r) r;\n")
|
||||
}
|
||||
fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n")
|
||||
if n.AddError {
|
||||
fmt.Fprintf(fgcc, "\terrno = 0;\n")
|
||||
}
|
||||
fmt.Fprintf(fgcc, "\t")
|
||||
if t := n.FuncType.Result; t != nil {
|
||||
fmt.Fprintf(fgcc, "__typeof__(a->r) r = ")
|
||||
if c := t.C.String(); c[len(c)-1] == '*' {
|
||||
if tr != nil {
|
||||
fmt.Fprintf(fgcc, "r = ")
|
||||
if c := tr.C.String(); c[len(c)-1] == '*' {
|
||||
fmt.Fprint(fgcc, "(__typeof__(a->r)) ")
|
||||
}
|
||||
}
|
||||
|
|
@ -598,6 +628,10 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
|
|||
fmt.Fprintf(fgcc, "a->p%d", i)
|
||||
}
|
||||
fmt.Fprintf(fgcc, ");\n")
|
||||
if n.AddError {
|
||||
fmt.Fprintf(fgcc, "\t_cgo_errno = errno;\n")
|
||||
}
|
||||
fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n")
|
||||
if n.FuncType.Result != nil {
|
||||
// The cgo call may have caused a stack copy (via a callback).
|
||||
// Adjust the return value pointer appropriately.
|
||||
|
|
@ -606,7 +640,7 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
|
|||
fmt.Fprintf(fgcc, "\ta->r = r;\n")
|
||||
}
|
||||
if n.AddError {
|
||||
fmt.Fprintf(fgcc, "\treturn errno;\n")
|
||||
fmt.Fprintf(fgcc, "\treturn _cgo_errno;\n")
|
||||
}
|
||||
fmt.Fprintf(fgcc, "}\n")
|
||||
fmt.Fprintf(fgcc, "\n")
|
||||
|
|
@ -618,6 +652,7 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) {
|
|||
// wrapper, we can't refer to the function, since the reference is in
|
||||
// a different file.
|
||||
func (p *Package) writeGccgoOutputFunc(fgcc *os.File, n *Name) {
|
||||
fmt.Fprintf(fgcc, "CGO_NO_SANITIZE_THREAD\n")
|
||||
if t := n.FuncType.Result; t != nil {
|
||||
fmt.Fprintf(fgcc, "%s\n", t.C.String())
|
||||
} else {
|
||||
|
|
@ -636,9 +671,13 @@ func (p *Package) writeGccgoOutputFunc(fgcc *os.File, n *Name) {
|
|||
}
|
||||
fmt.Fprintf(fgcc, ")\n")
|
||||
fmt.Fprintf(fgcc, "{\n")
|
||||
if t := n.FuncType.Result; t != nil {
|
||||
fmt.Fprintf(fgcc, "\t%s r;\n", t.C.String())
|
||||
}
|
||||
fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n")
|
||||
fmt.Fprintf(fgcc, "\t")
|
||||
if t := n.FuncType.Result; t != nil {
|
||||
fmt.Fprintf(fgcc, "return ")
|
||||
fmt.Fprintf(fgcc, "r = ")
|
||||
// Cast to void* to avoid warnings due to omitted qualifiers.
|
||||
if c := t.C.String(); c[len(c)-1] == '*' {
|
||||
fmt.Fprintf(fgcc, "(void*)")
|
||||
|
|
@ -656,6 +695,16 @@ func (p *Package) writeGccgoOutputFunc(fgcc *os.File, n *Name) {
|
|||
fmt.Fprintf(fgcc, "p%d", i)
|
||||
}
|
||||
fmt.Fprintf(fgcc, ");\n")
|
||||
fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n")
|
||||
if t := n.FuncType.Result; t != nil {
|
||||
fmt.Fprintf(fgcc, "\treturn ")
|
||||
// Cast to void* to avoid warnings due to omitted qualifiers
|
||||
// and explicit incompatible struct types.
|
||||
if c := t.C.String(); c[len(c)-1] == '*' {
|
||||
fmt.Fprintf(fgcc, "(void*)")
|
||||
}
|
||||
fmt.Fprintf(fgcc, "r;\n")
|
||||
}
|
||||
fmt.Fprintf(fgcc, "}\n")
|
||||
fmt.Fprintf(fgcc, "\n")
|
||||
}
|
||||
|
|
@ -679,10 +728,14 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
|
|||
p.writeExportHeader(fgcch)
|
||||
|
||||
fmt.Fprintf(fgcc, "/* Created by cgo - DO NOT EDIT. */\n")
|
||||
fmt.Fprintf(fgcc, "#include <stdlib.h>\n")
|
||||
fmt.Fprintf(fgcc, "#include \"_cgo_export.h\"\n\n")
|
||||
|
||||
fmt.Fprintf(fgcc, "extern void crosscall2(void (*fn)(void *, int), void *, int);\n")
|
||||
fmt.Fprintf(fgcc, "extern void _cgo_wait_runtime_init_done();\n\n")
|
||||
fmt.Fprintf(fgcc, "extern void crosscall2(void (*fn)(void *, int, __SIZE_TYPE__), void *, int, __SIZE_TYPE__);\n")
|
||||
fmt.Fprintf(fgcc, "extern __SIZE_TYPE__ _cgo_wait_runtime_init_done();\n")
|
||||
fmt.Fprintf(fgcc, "extern void _cgo_release_context(__SIZE_TYPE__);\n\n")
|
||||
fmt.Fprintf(fgcc, "extern char* _cgo_topofstack(void);")
|
||||
fmt.Fprintf(fgcc, "%s\n", tsanProlog)
|
||||
|
||||
for _, exp := range p.ExpFunc {
|
||||
fn := exp.Func
|
||||
|
|
@ -783,10 +836,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
|
|||
}
|
||||
fmt.Fprintf(fgcch, "\nextern %s;\n", s)
|
||||
|
||||
fmt.Fprintf(fgcc, "extern void _cgoexp%s_%s(void *, int);\n", cPrefix, exp.ExpName)
|
||||
fmt.Fprintf(fgcc, "extern void _cgoexp%s_%s(void *, int, __SIZE_TYPE__);\n", cPrefix, exp.ExpName)
|
||||
fmt.Fprintf(fgcc, "\nCGO_NO_SANITIZE_THREAD")
|
||||
fmt.Fprintf(fgcc, "\n%s\n", s)
|
||||
fmt.Fprintf(fgcc, "{\n")
|
||||
fmt.Fprintf(fgcc, "\t_cgo_wait_runtime_init_done();\n")
|
||||
fmt.Fprintf(fgcc, "\t__SIZE_TYPE__ _cgo_ctxt = _cgo_wait_runtime_init_done();\n")
|
||||
fmt.Fprintf(fgcc, "\t%s %v a;\n", ctype, p.packedAttribute())
|
||||
if gccResult != "void" && (len(fntype.Results.List) > 1 || len(fntype.Results.List[0].Names) > 1) {
|
||||
fmt.Fprintf(fgcc, "\t%s r;\n", gccResult)
|
||||
|
|
@ -798,7 +852,10 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
|
|||
func(i int, aname string, atype ast.Expr) {
|
||||
fmt.Fprintf(fgcc, "\ta.p%d = p%d;\n", i, i)
|
||||
})
|
||||
fmt.Fprintf(fgcc, "\tcrosscall2(_cgoexp%s_%s, &a, %d);\n", cPrefix, exp.ExpName, off)
|
||||
fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n")
|
||||
fmt.Fprintf(fgcc, "\tcrosscall2(_cgoexp%s_%s, &a, %d, _cgo_ctxt);\n", cPrefix, exp.ExpName, off)
|
||||
fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n")
|
||||
fmt.Fprintf(fgcc, "\t_cgo_release_context(_cgo_ctxt);\n")
|
||||
if gccResult != "void" {
|
||||
if len(fntype.Results.List) == 1 && len(fntype.Results.List[0].Names) <= 1 {
|
||||
fmt.Fprintf(fgcc, "\treturn a.r0;\n")
|
||||
|
|
@ -823,10 +880,10 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
|
|||
fmt.Fprintf(fgo2, "//go:cgo_export_static _cgoexp%s_%s\n", cPrefix, exp.ExpName)
|
||||
fmt.Fprintf(fgo2, "//go:nosplit\n") // no split stack, so no use of m or g
|
||||
fmt.Fprintf(fgo2, "//go:norace\n") // must not have race detector calls inserted
|
||||
fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a unsafe.Pointer, n int32) {\n", cPrefix, exp.ExpName)
|
||||
fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a unsafe.Pointer, n int32, ctxt uintptr) {\n", cPrefix, exp.ExpName)
|
||||
fmt.Fprintf(fgo2, "\tfn := %s\n", goname)
|
||||
// The indirect here is converting from a Go function pointer to a C function pointer.
|
||||
fmt.Fprintf(fgo2, "\t_cgo_runtime_cgocallback(**(**unsafe.Pointer)(unsafe.Pointer(&fn)), a, uintptr(n));\n")
|
||||
fmt.Fprintf(fgo2, "\t_cgo_runtime_cgocallback(**(**unsafe.Pointer)(unsafe.Pointer(&fn)), a, uintptr(n), ctxt);\n")
|
||||
fmt.Fprintf(fgo2, "}\n")
|
||||
|
||||
fmt.Fprintf(fm, "int _cgoexp%s_%s;\n", cPrefix, exp.ExpName)
|
||||
|
|
@ -915,6 +972,7 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
|
|||
fmt.Fprintf(fgcc, "#include \"_cgo_export.h\"\n")
|
||||
|
||||
fmt.Fprintf(fgcc, "%s\n", gccgoExportFileProlog)
|
||||
fmt.Fprintf(fgcc, "%s\n", tsanProlog)
|
||||
|
||||
for _, exp := range p.ExpFunc {
|
||||
fn := exp.Func
|
||||
|
|
@ -983,13 +1041,17 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
|
|||
fmt.Fprintf(fgcc, `extern %s %s %s __asm__("%s.%s");`, cRet, goName, cParams, gccgoSymbolPrefix, goName)
|
||||
fmt.Fprint(fgcc, "\n")
|
||||
|
||||
fmt.Fprint(fgcc, "\n")
|
||||
fmt.Fprint(fgcc, "\nCGO_NO_SANITIZE_THREAD\n")
|
||||
fmt.Fprintf(fgcc, "%s %s %s {\n", cRet, exp.ExpName, cParams)
|
||||
if resultCount > 0 {
|
||||
fmt.Fprintf(fgcc, "\t%s r;\n", cRet)
|
||||
}
|
||||
fmt.Fprintf(fgcc, "\tif(_cgo_wait_runtime_init_done)\n")
|
||||
fmt.Fprintf(fgcc, "\t\t_cgo_wait_runtime_init_done();\n")
|
||||
fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n")
|
||||
fmt.Fprint(fgcc, "\t")
|
||||
if resultCount > 0 {
|
||||
fmt.Fprint(fgcc, "return ")
|
||||
fmt.Fprint(fgcc, "r = ")
|
||||
}
|
||||
fmt.Fprintf(fgcc, "%s(", goName)
|
||||
if fn.Recv != nil {
|
||||
|
|
@ -1003,6 +1065,10 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
|
|||
fmt.Fprintf(fgcc, "p%d", i)
|
||||
})
|
||||
fmt.Fprint(fgcc, ");\n")
|
||||
fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n")
|
||||
if resultCount > 0 {
|
||||
fmt.Fprint(fgcc, "\treturn r;\n")
|
||||
}
|
||||
fmt.Fprint(fgcc, "}\n")
|
||||
|
||||
// Dummy declaration for _cgo_main.c
|
||||
|
|
@ -1257,6 +1323,36 @@ extern char* _cgo_topofstack(void);
|
|||
#include <string.h>
|
||||
`
|
||||
|
||||
// Prologue defining TSAN functions in C.
|
||||
const noTsanProlog = `
|
||||
#define CGO_NO_SANITIZE_THREAD
|
||||
#define _cgo_tsan_acquire()
|
||||
#define _cgo_tsan_release()
|
||||
`
|
||||
|
||||
// This must match the TSAN code in runtime/cgo/libcgo.h.
|
||||
const yesTsanProlog = `
|
||||
#define CGO_NO_SANITIZE_THREAD __attribute__ ((no_sanitize_thread))
|
||||
|
||||
long long _cgo_sync __attribute__ ((common));
|
||||
|
||||
extern void __tsan_acquire(void*);
|
||||
extern void __tsan_release(void*);
|
||||
|
||||
__attribute__ ((unused))
|
||||
static void _cgo_tsan_acquire() {
|
||||
__tsan_acquire(&_cgo_sync);
|
||||
}
|
||||
|
||||
__attribute__ ((unused))
|
||||
static void _cgo_tsan_release() {
|
||||
__tsan_release(&_cgo_sync);
|
||||
}
|
||||
`
|
||||
|
||||
// Set to yesTsanProlog if we see -fsanitize=thread in the flags for gcc.
|
||||
var tsanProlog = noTsanProlog
|
||||
|
||||
const builtinProlog = `
|
||||
#include <stddef.h> /* for ptrdiff_t and size_t below */
|
||||
|
||||
|
|
@ -1269,6 +1365,7 @@ _GoString_ GoString(char *p);
|
|||
_GoString_ GoStringN(char *p, int l);
|
||||
_GoBytes_ GoBytes(void *p, int n);
|
||||
char *CString(_GoString_);
|
||||
void *CBytes(_GoBytes_);
|
||||
void *_CMalloc(size_t);
|
||||
`
|
||||
|
||||
|
|
@ -1276,11 +1373,8 @@ const goProlog = `
|
|||
//go:linkname _cgo_runtime_cgocall runtime.cgocall
|
||||
func _cgo_runtime_cgocall(unsafe.Pointer, uintptr) int32
|
||||
|
||||
//go:linkname _cgo_runtime_cmalloc runtime.cmalloc
|
||||
func _cgo_runtime_cmalloc(uintptr) unsafe.Pointer
|
||||
|
||||
//go:linkname _cgo_runtime_cgocallback runtime.cgocallback
|
||||
func _cgo_runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr)
|
||||
func _cgo_runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr, uintptr)
|
||||
|
||||
//go:linkname _cgoCheckPointer runtime.cgoCheckPointer
|
||||
func _cgoCheckPointer(interface{}, ...interface{}) interface{}
|
||||
|
|
@ -1324,7 +1418,7 @@ func _Cfunc_GoBytes(p unsafe.Pointer, l _Ctype_int) []byte {
|
|||
|
||||
const cStringDef = `
|
||||
func _Cfunc_CString(s string) *_Ctype_char {
|
||||
p := _cgo_runtime_cmalloc(uintptr(len(s)+1))
|
||||
p := _cgo_cmalloc(uint64(len(s)+1))
|
||||
pp := (*[1<<30]byte)(p)
|
||||
copy(pp[:], s)
|
||||
pp[len(s)] = 0
|
||||
|
|
@ -1332,9 +1426,18 @@ func _Cfunc_CString(s string) *_Ctype_char {
|
|||
}
|
||||
`
|
||||
|
||||
const cBytesDef = `
|
||||
func _Cfunc_CBytes(b []byte) unsafe.Pointer {
|
||||
p := _cgo_cmalloc(uint64(len(b)))
|
||||
pp := (*[1<<30]byte)(p)
|
||||
copy(pp[:], b)
|
||||
return p
|
||||
}
|
||||
`
|
||||
|
||||
const cMallocDef = `
|
||||
func _Cfunc__CMalloc(n _Ctype_size_t) unsafe.Pointer {
|
||||
return _cgo_runtime_cmalloc(uintptr(n))
|
||||
return _cgo_cmalloc(uint64(n))
|
||||
}
|
||||
`
|
||||
|
||||
|
|
@ -1343,9 +1446,54 @@ var builtinDefs = map[string]string{
|
|||
"GoStringN": goStringNDef,
|
||||
"GoBytes": goBytesDef,
|
||||
"CString": cStringDef,
|
||||
"CBytes": cBytesDef,
|
||||
"_CMalloc": cMallocDef,
|
||||
}
|
||||
|
||||
// Definitions for C.malloc in Go and in C. We define it ourselves
|
||||
// since we call it from functions we define, such as C.CString.
|
||||
// Also, we have historically ensured that C.malloc does not return
|
||||
// nil even for an allocation of 0.
|
||||
|
||||
const cMallocDefGo = `
|
||||
//go:cgo_import_static _cgoPREFIX_Cfunc__Cmalloc
|
||||
//go:linkname __cgofn__cgoPREFIX_Cfunc__Cmalloc _cgoPREFIX_Cfunc__Cmalloc
|
||||
var __cgofn__cgoPREFIX_Cfunc__Cmalloc byte
|
||||
var _cgoPREFIX_Cfunc__Cmalloc = unsafe.Pointer(&__cgofn__cgoPREFIX_Cfunc__Cmalloc)
|
||||
|
||||
//go:cgo_unsafe_args
|
||||
func _cgo_cmalloc(p0 uint64) (r1 unsafe.Pointer) {
|
||||
_cgo_runtime_cgocall(_cgoPREFIX_Cfunc__Cmalloc, uintptr(unsafe.Pointer(&p0)))
|
||||
return
|
||||
}
|
||||
`
|
||||
|
||||
// cMallocDefC defines the C version of C.malloc for the gc compiler.
|
||||
// It is defined here because C.CString and friends need a definition.
|
||||
// We define it by hand, rather than simply inventing a reference to
|
||||
// C.malloc, because <stdlib.h> may not have been included.
|
||||
// This is approximately what writeOutputFunc would generate, but
|
||||
// skips the cgo_topofstack code (which is only needed if the C code
|
||||
// calls back into Go). This also avoids returning nil for an
|
||||
// allocation of 0 bytes.
|
||||
const cMallocDefC = `
|
||||
CGO_NO_SANITIZE_THREAD
|
||||
void _cgoPREFIX_Cfunc__Cmalloc(void *v) {
|
||||
struct {
|
||||
unsigned long long p0;
|
||||
void *r1;
|
||||
} PACKED *a = v;
|
||||
void *ret;
|
||||
_cgo_tsan_acquire();
|
||||
ret = malloc(a->p0);
|
||||
if (ret == 0 && a->p0 == 0) {
|
||||
ret = malloc(1);
|
||||
}
|
||||
a->r1 = ret;
|
||||
_cgo_tsan_release();
|
||||
}
|
||||
`
|
||||
|
||||
func (p *Package) cPrologGccgo() string {
|
||||
return strings.Replace(strings.Replace(cPrologGccgo, "PREFIX", cPrefix, -1),
|
||||
"GCCGOSYMBOLPREF", p.gccgoSymbolPrefix(), -1)
|
||||
|
|
@ -1380,6 +1528,12 @@ const char *_cgoPREFIX_Cfunc_CString(struct __go_string s) {
|
|||
return p;
|
||||
}
|
||||
|
||||
void *_cgoPREFIX_Cfunc_CBytes(struct __go_open_array b) {
|
||||
char *p = malloc(b.__count);
|
||||
memmove(p, b.__values, b.__count);
|
||||
return p;
|
||||
}
|
||||
|
||||
struct __go_string _cgoPREFIX_Cfunc_GoString(char *p) {
|
||||
intgo len = (p != NULL) ? strlen(p) : 0;
|
||||
return __go_byte_array_to_string(p, len);
|
||||
|
|
@ -1505,5 +1659,5 @@ static void GoInit(void) {
|
|||
runtime_iscgo = 1;
|
||||
}
|
||||
|
||||
extern void _cgo_wait_runtime_init_done() __attribute__ ((weak));
|
||||
extern __SIZE_TYPE__ _cgo_wait_runtime_init_done() __attribute__ ((weak));
|
||||
`
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"go/token"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
|
@ -16,6 +17,43 @@ import (
|
|||
// It returns the output to standard output and standard error.
|
||||
// ok indicates whether the command exited successfully.
|
||||
func run(stdin []byte, argv []string) (stdout, stderr []byte, ok bool) {
|
||||
if i := find(argv, "-xc"); i >= 0 && argv[len(argv)-1] == "-" {
|
||||
// Some compilers have trouble with standard input.
|
||||
// Others have trouble with -xc.
|
||||
// Avoid both problems by writing a file with a .c extension.
|
||||
f, err := ioutil.TempFile("", "cgo-gcc-input-")
|
||||
if err != nil {
|
||||
fatalf("%s", err)
|
||||
}
|
||||
name := f.Name()
|
||||
f.Close()
|
||||
if err := ioutil.WriteFile(name+".c", stdin, 0666); err != nil {
|
||||
os.Remove(name)
|
||||
fatalf("%s", err)
|
||||
}
|
||||
defer os.Remove(name)
|
||||
defer os.Remove(name + ".c")
|
||||
|
||||
// Build new argument list without -xc and trailing -.
|
||||
new := append(argv[:i:i], argv[i+1:len(argv)-1]...)
|
||||
|
||||
// Since we are going to write the file to a temporary directory,
|
||||
// we will need to add -I . explicitly to the command line:
|
||||
// any #include "foo" before would have looked in the current
|
||||
// directory as the directory "holding" standard input, but now
|
||||
// the temporary directory holds the input.
|
||||
// We've also run into compilers that reject "-I." but allow "-I", ".",
|
||||
// so be sure to use two arguments.
|
||||
// This matters mainly for people invoking cgo -godefs by hand.
|
||||
new = append(new, "-I", ".")
|
||||
|
||||
// Finish argument list with path to C file.
|
||||
new = append(new, name+".c")
|
||||
|
||||
argv = new
|
||||
stdin = nil
|
||||
}
|
||||
|
||||
p := exec.Command(argv[0], argv[1:]...)
|
||||
p.Stdin = bytes.NewReader(stdin)
|
||||
var bout, berr bytes.Buffer
|
||||
|
|
@ -30,6 +68,15 @@ func run(stdin []byte, argv []string) (stdout, stderr []byte, ok bool) {
|
|||
return
|
||||
}
|
||||
|
||||
func find(argv []string, target string) int {
|
||||
for i, arg := range argv {
|
||||
if arg == target {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func lineno(pos token.Pos) string {
|
||||
return fset.Position(pos).String()
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -48,6 +48,8 @@ When compiling multiple packages or a single non-main package,
|
|||
build compiles the packages but discards the resulting object,
|
||||
serving only as a check that the packages can be built.
|
||||
|
||||
When compiling packages, build ignores files that end in '_test.go'.
|
||||
|
||||
The -o flag, only allowed when compiling a single package,
|
||||
forces build to write the resulting executable or object
|
||||
to the named output file, instead of the default behavior described
|
||||
|
|
@ -65,8 +67,7 @@ and test commands:
|
|||
-p n
|
||||
the number of programs, such as build commands or
|
||||
test binaries, that can be run in parallel.
|
||||
The default is the number of CPUs available, except
|
||||
on darwin/arm which defaults to 1.
|
||||
The default is the number of CPUs available.
|
||||
-race
|
||||
enable data race detection.
|
||||
Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64.
|
||||
|
|
@ -145,17 +146,6 @@ func init() {
|
|||
|
||||
addBuildFlags(cmdBuild)
|
||||
addBuildFlags(cmdInstall)
|
||||
|
||||
if buildContext.GOOS == "darwin" {
|
||||
switch buildContext.GOARCH {
|
||||
case "arm", "arm64":
|
||||
// darwin/arm cannot run multiple tests simultaneously.
|
||||
// Parallelism is limited in go_darwin_arm_exec, but
|
||||
// also needs to be limited here so go test std does not
|
||||
// timeout tests that waiting to run.
|
||||
buildP = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flags set by multiple commands.
|
||||
|
|
@ -352,6 +342,11 @@ func buildModeInit() {
|
|||
}
|
||||
return p
|
||||
}
|
||||
switch platform {
|
||||
case "darwin/arm", "darwin/arm64":
|
||||
codegenArg = "-shared"
|
||||
default:
|
||||
}
|
||||
exeSuffix = ".a"
|
||||
ldBuildmode = "c-archive"
|
||||
case "c-shared":
|
||||
|
|
@ -374,6 +369,9 @@ func buildModeInit() {
|
|||
case "android/arm", "android/arm64", "android/amd64", "android/386":
|
||||
codegenArg = "-shared"
|
||||
ldBuildmode = "pie"
|
||||
case "darwin/arm", "darwin/arm64":
|
||||
codegenArg = "-shared"
|
||||
fallthrough
|
||||
default:
|
||||
ldBuildmode = "exe"
|
||||
}
|
||||
|
|
@ -385,7 +383,7 @@ func buildModeInit() {
|
|||
fatalf("-buildmode=pie not supported by gccgo")
|
||||
} else {
|
||||
switch platform {
|
||||
case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le",
|
||||
case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x",
|
||||
"android/amd64", "android/arm", "android/arm64", "android/386":
|
||||
codegenArg = "-shared"
|
||||
default:
|
||||
|
|
@ -399,7 +397,7 @@ func buildModeInit() {
|
|||
codegenArg = "-fPIC"
|
||||
} else {
|
||||
switch platform {
|
||||
case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le":
|
||||
case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x":
|
||||
default:
|
||||
fatalf("-buildmode=shared not supported on %s\n", platform)
|
||||
}
|
||||
|
|
@ -417,7 +415,7 @@ func buildModeInit() {
|
|||
codegenArg = "-fPIC"
|
||||
} else {
|
||||
switch platform {
|
||||
case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le":
|
||||
case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x":
|
||||
buildAsmflags = append(buildAsmflags, "-D=GOBUILDMODE_shared=1")
|
||||
default:
|
||||
fatalf("-linkshared not supported on %s\n", platform)
|
||||
|
|
@ -483,6 +481,7 @@ func runBuild(cmd *Command, args []string) {
|
|||
p := pkgs[0]
|
||||
p.target = *buildO
|
||||
p.Stale = true // must build - not up to date
|
||||
p.StaleReason = "build -o flag in use"
|
||||
a := b.action(modeInstall, depMode, p)
|
||||
b.do(a)
|
||||
return
|
||||
|
|
@ -681,6 +680,7 @@ var (
|
|||
func init() {
|
||||
goarch = buildContext.GOARCH
|
||||
goos = buildContext.GOOS
|
||||
|
||||
if goos == "windows" {
|
||||
exeSuffix = ".exe"
|
||||
}
|
||||
|
|
@ -694,6 +694,7 @@ type builder struct {
|
|||
work string // the temporary work directory (ends in filepath.Separator)
|
||||
actionCache map[cacheKey]*action // a cache of already-constructed actions
|
||||
mkdirCache map[string]bool // a cache of created directories
|
||||
flagCache map[string]bool // a cache of supported compiler flags
|
||||
print func(args ...interface{}) (int, error)
|
||||
|
||||
output sync.Mutex
|
||||
|
|
@ -853,6 +854,7 @@ func goFilesPackage(gofiles []string) *Package {
|
|||
|
||||
pkg.Target = pkg.target
|
||||
pkg.Stale = true
|
||||
pkg.StaleReason = "files named on command line"
|
||||
|
||||
computeStale(pkg)
|
||||
return pkg
|
||||
|
|
@ -1227,6 +1229,14 @@ func allArchiveActions(root *action) []*action {
|
|||
|
||||
// do runs the action graph rooted at root.
|
||||
func (b *builder) do(root *action) {
|
||||
/* Commented out for gccgo, which does not have osArchSupportsCgo.
|
||||
|
||||
if _, ok := osArchSupportsCgo[goos+"/"+goarch]; !ok && buildContext.Compiler == "gc" {
|
||||
fmt.Fprintf(os.Stderr, "cmd/go: unsupported GOOS/GOARCH pair %s/%s\n", goos, goarch)
|
||||
os.Exit(2)
|
||||
}
|
||||
*/
|
||||
|
||||
// Build list of all actions, assigning depth-first post-order priority.
|
||||
// The original implementation here was a true queue
|
||||
// (using a channel) but it had the effect of getting
|
||||
|
|
@ -1331,18 +1341,15 @@ func (b *builder) do(root *action) {
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
// hasString reports whether s appears in the list of strings.
|
||||
func hasString(strings []string, s string) bool {
|
||||
for _, t := range strings {
|
||||
if s == t {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// build is the action for building a single package or command.
|
||||
func (b *builder) build(a *action) (err error) {
|
||||
// Return an error for binary-only package.
|
||||
// We only reach this if isStale believes the binary form is
|
||||
// either not present or not usable.
|
||||
if a.p.BinaryOnly {
|
||||
return fmt.Errorf("missing or invalid package binary for binary-only package %s", a.p.ImportPath)
|
||||
}
|
||||
|
||||
// Return an error if the package has CXX files but it's not using
|
||||
// cgo nor SWIG, since the CXX files can only be processed by cgo
|
||||
// and SWIG.
|
||||
|
|
@ -1355,6 +1362,12 @@ func (b *builder) build(a *action) (err error) {
|
|||
return fmt.Errorf("can't build package %s because it contains Objective-C files (%s) but it's not using cgo nor SWIG",
|
||||
a.p.ImportPath, strings.Join(a.p.MFiles, ","))
|
||||
}
|
||||
// Same as above for Fortran files
|
||||
if len(a.p.FFiles) > 0 && !a.p.usesCgo() && !a.p.usesSwig() {
|
||||
return fmt.Errorf("can't build package %s because it contains Fortran files (%s) but it's not using cgo nor SWIG",
|
||||
a.p.ImportPath, strings.Join(a.p.FFiles, ","))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil && err != errPrintedOutput {
|
||||
err = fmt.Errorf("go build %s: %v", a.p.ImportPath, err)
|
||||
|
|
@ -1421,6 +1434,8 @@ func (b *builder) build(a *action) (err error) {
|
|||
// cgo and non-cgo worlds, so it necessarily has files in both.
|
||||
// In that case gcc only gets the gcc_* files.
|
||||
var gccfiles []string
|
||||
gccfiles = append(gccfiles, cfiles...)
|
||||
cfiles = nil
|
||||
if a.p.Standard && a.p.ImportPath == "runtime/cgo" {
|
||||
filter := func(files, nongcc, gcc []string) ([]string, []string) {
|
||||
for _, f := range files {
|
||||
|
|
@ -1432,11 +1447,9 @@ func (b *builder) build(a *action) (err error) {
|
|||
}
|
||||
return nongcc, gcc
|
||||
}
|
||||
cfiles, gccfiles = filter(cfiles, cfiles[:0], gccfiles)
|
||||
sfiles, gccfiles = filter(sfiles, sfiles[:0], gccfiles)
|
||||
} else {
|
||||
gccfiles = append(cfiles, sfiles...)
|
||||
cfiles = nil
|
||||
gccfiles = append(gccfiles, sfiles...)
|
||||
sfiles = nil
|
||||
}
|
||||
|
||||
|
|
@ -1444,7 +1457,7 @@ func (b *builder) build(a *action) (err error) {
|
|||
if a.cgo != nil && a.cgo.target != "" {
|
||||
cgoExe = a.cgo.target
|
||||
}
|
||||
outGo, outObj, err := b.cgo(a.p, cgoExe, obj, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, cxxfiles, a.p.MFiles)
|
||||
outGo, outObj, err := b.cgo(a.p, cgoExe, obj, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, cxxfiles, a.p.MFiles, a.p.FFiles)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -2180,7 +2193,6 @@ func mkAbs(dir, f string) string {
|
|||
type toolchain interface {
|
||||
// gc runs the compiler in a specific directory on a set of files
|
||||
// and returns the name of the generated output file.
|
||||
// The compiler runs in the directory dir.
|
||||
gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, out []byte, err error)
|
||||
// cc runs the toolchain's C compiler in a directory on a C file
|
||||
// to produce an output file.
|
||||
|
|
@ -2282,7 +2294,7 @@ func (gcToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool,
|
|||
// so that it can give good error messages about forward declarations.
|
||||
// Exceptions: a few standard packages have forward declarations for
|
||||
// pieces supplied behind-the-scenes by package runtime.
|
||||
extFiles := len(p.CgoFiles) + len(p.CFiles) + len(p.CXXFiles) + len(p.MFiles) + len(p.SFiles) + len(p.SysoFiles) + len(p.SwigFiles) + len(p.SwigCXXFiles)
|
||||
extFiles := len(p.CgoFiles) + len(p.CFiles) + len(p.CXXFiles) + len(p.MFiles) + len(p.FFiles) + len(p.SFiles) + len(p.SysoFiles) + len(p.SwigFiles) + len(p.SwigCXXFiles)
|
||||
if p.Standard {
|
||||
switch p.ImportPath {
|
||||
case "bytes", "net", "os", "runtime/pprof", "sync", "time":
|
||||
|
|
@ -2334,7 +2346,15 @@ func (gcToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error {
|
|||
// Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files.
|
||||
inc := filepath.Join(goroot, "pkg", "include")
|
||||
sfile = mkAbs(p.Dir, sfile)
|
||||
args := []interface{}{buildToolExec, tool("asm"), "-o", ofile, "-trimpath", b.work, "-I", obj, "-I", inc, "-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch, buildAsmflags, sfile}
|
||||
args := []interface{}{buildToolExec, tool("asm"), "-o", ofile, "-trimpath", b.work, "-I", obj, "-I", inc, "-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch, buildAsmflags}
|
||||
if p.ImportPath == "runtime" && goarch == "386" {
|
||||
for _, arg := range buildAsmflags {
|
||||
if arg == "-dynlink" {
|
||||
args = append(args, "-D=GOBUILDMODE_shared=1")
|
||||
}
|
||||
}
|
||||
}
|
||||
args = append(args, sfile)
|
||||
if err := b.run(p.Dir, p.ImportPath, nil, args...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -2381,9 +2401,11 @@ func (gcToolchain) pack(b *builder, p *Package, objDir, afile string, ofiles []s
|
|||
|
||||
// The archive file should have been created by the compiler.
|
||||
// Since it used to not work that way, verify.
|
||||
if !buildN {
|
||||
if _, err := os.Stat(absAfile); err != nil {
|
||||
fatalf("os.Stat of archive file failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if buildN || buildX {
|
||||
cmdline := stringList("pack", "r", absAfile, absOfiles)
|
||||
|
|
@ -2629,18 +2651,23 @@ func (gccgoToolchain) pack(b *builder, p *Package, objDir, afile string, ofiles
|
|||
return b.run(p.Dir, p.ImportPath, nil, "ar", "rc", mkAbs(objDir, afile), absOfiles)
|
||||
}
|
||||
|
||||
func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error {
|
||||
func (tools gccgoToolchain) link(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string, buildmode, desc string) error {
|
||||
// gccgo needs explicit linking with all package dependencies,
|
||||
// and all LDFLAGS from cgo dependencies.
|
||||
apackagesSeen := make(map[*Package]bool)
|
||||
apackagePathsSeen := make(map[string]bool)
|
||||
afiles := []string{}
|
||||
shlibs := []string{}
|
||||
xfiles := []string{}
|
||||
ldflags := b.gccArchArgs()
|
||||
cgoldflags := []string{}
|
||||
usesCgo := false
|
||||
cxx := len(root.p.CXXFiles) > 0 || len(root.p.SwigCXXFiles) > 0
|
||||
objc := len(root.p.MFiles) > 0
|
||||
cxx := false
|
||||
objc := false
|
||||
fortran := false
|
||||
if root.p != nil {
|
||||
cxx = len(root.p.CXXFiles) > 0 || len(root.p.SwigCXXFiles) > 0
|
||||
objc = len(root.p.MFiles) > 0
|
||||
fortran = len(root.p.FFiles) > 0
|
||||
}
|
||||
|
||||
readCgoFlags := func(flagsFile string) error {
|
||||
flags, err := ioutil.ReadFile(flagsFile)
|
||||
|
|
@ -2687,11 +2714,11 @@ func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions
|
|||
}
|
||||
|
||||
newarchive := newa.Name()
|
||||
err = b.run(b.work, root.p.ImportPath, nil, "ar", "x", newarchive, "_cgo_flags")
|
||||
err = b.run(b.work, desc, nil, "ar", "x", newarchive, "_cgo_flags")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = b.run(".", root.p.ImportPath, nil, "ar", "d", newarchive, "_cgo_flags")
|
||||
err = b.run(".", desc, nil, "ar", "d", newarchive, "_cgo_flags")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
@ -2723,10 +2750,10 @@ func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions
|
|||
// rather than the 'build' location (which may not exist any
|
||||
// more). We still need to traverse the dependencies of the
|
||||
// build action though so saying
|
||||
// if apackagesSeen[a.p] { return }
|
||||
// if apackagePathsSeen[a.p.ImportPath] { return }
|
||||
// doesn't work.
|
||||
if !apackagesSeen[a.p] {
|
||||
apackagesSeen[a.p] = true
|
||||
if !apackagePathsSeen[a.p.ImportPath] {
|
||||
apackagePathsSeen[a.p.ImportPath] = true
|
||||
target := a.target
|
||||
if len(a.p.CgoFiles) > 0 {
|
||||
target, err = readAndRemoveCgoFlags(target)
|
||||
|
|
@ -2734,19 +2761,9 @@ func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions
|
|||
return
|
||||
}
|
||||
}
|
||||
if a.p.fake && a.p.external {
|
||||
// external _tests, if present must come before
|
||||
// internal _tests. Store these on a separate list
|
||||
// and place them at the head after this loop.
|
||||
xfiles = append(xfiles, target)
|
||||
} else if a.p.fake {
|
||||
// move _test files to the top of the link order
|
||||
afiles = append([]string{target}, afiles...)
|
||||
} else {
|
||||
afiles = append(afiles, target)
|
||||
}
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(a.target, ".so") {
|
||||
shlibs = append(shlibs, a.target)
|
||||
seenShlib = true
|
||||
|
|
@ -2764,7 +2781,6 @@ func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions
|
|||
return err
|
||||
}
|
||||
}
|
||||
afiles = append(xfiles, afiles...)
|
||||
|
||||
for _, a := range allactions {
|
||||
// Gather CgoLDFLAGS, but not from standard packages.
|
||||
|
|
@ -2789,6 +2805,9 @@ func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions
|
|||
if len(a.p.MFiles) > 0 {
|
||||
objc = true
|
||||
}
|
||||
if len(a.p.FFiles) > 0 {
|
||||
fortran = true
|
||||
}
|
||||
}
|
||||
|
||||
for i, o := range ofiles {
|
||||
|
|
@ -2805,7 +2824,9 @@ func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions
|
|||
|
||||
ldflags = append(ldflags, cgoldflags...)
|
||||
ldflags = append(ldflags, envList("CGO_LDFLAGS", "")...)
|
||||
if root.p != nil {
|
||||
ldflags = append(ldflags, root.p.CgoLDFLAGS...)
|
||||
}
|
||||
|
||||
ldflags = stringList("-Wl,-(", ldflags, "-Wl,-)")
|
||||
|
||||
|
|
@ -2820,7 +2841,7 @@ func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions
|
|||
}
|
||||
|
||||
var realOut string
|
||||
switch ldBuildmode {
|
||||
switch buildmode {
|
||||
case "exe":
|
||||
if usesCgo && goos == "linux" {
|
||||
ldflags = append(ldflags, "-Wl,-E")
|
||||
|
|
@ -2855,12 +2876,14 @@ func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions
|
|||
|
||||
case "c-shared":
|
||||
ldflags = append(ldflags, "-shared", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive", "-lgo", "-lgcc_s", "-lgcc", "-lc", "-lgcc")
|
||||
case "shared":
|
||||
ldflags = append(ldflags, "-zdefs", "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc")
|
||||
|
||||
default:
|
||||
fatalf("-buildmode=%s not supported for gccgo", ldBuildmode)
|
||||
fatalf("-buildmode=%s not supported for gccgo", buildmode)
|
||||
}
|
||||
|
||||
switch ldBuildmode {
|
||||
switch buildmode {
|
||||
case "exe", "c-shared":
|
||||
if cxx {
|
||||
ldflags = append(ldflags, "-lstdc++")
|
||||
|
|
@ -2868,43 +2891,40 @@ func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions
|
|||
if objc {
|
||||
ldflags = append(ldflags, "-lobjc")
|
||||
}
|
||||
if fortran {
|
||||
fc := os.Getenv("FC")
|
||||
if fc == "" {
|
||||
fc = "gfortran"
|
||||
}
|
||||
// support gfortran out of the box and let others pass the correct link options
|
||||
// via CGO_LDFLAGS
|
||||
if strings.Contains(fc, "gfortran") {
|
||||
ldflags = append(ldflags, "-lgfortran")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.run(".", root.p.ImportPath, nil, tools.linker(), "-o", out, ofiles, ldflags, buildGccgoflags); err != nil {
|
||||
if err := b.run(".", desc, nil, tools.linker(), "-o", out, ofiles, ldflags, buildGccgoflags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch ldBuildmode {
|
||||
switch buildmode {
|
||||
case "c-archive":
|
||||
if err := b.run(".", root.p.ImportPath, nil, "ar", "rc", realOut, out); err != nil {
|
||||
if err := b.run(".", desc, nil, "ar", "rc", realOut, out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error {
|
||||
return tools.link(b, root, out, allactions, mainpkg, ofiles, ldBuildmode, root.p.ImportPath)
|
||||
}
|
||||
|
||||
func (tools gccgoToolchain) ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error {
|
||||
args := []string{"-o", out, "-shared", "-nostdlib", "-zdefs", "-Wl,--whole-archive"}
|
||||
for _, a := range toplevelactions {
|
||||
args = append(args, a.target)
|
||||
}
|
||||
args = append(args, "-Wl,--no-whole-archive", "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc")
|
||||
shlibs := []string{}
|
||||
for _, a := range allactions {
|
||||
if strings.HasSuffix(a.target, ".so") {
|
||||
shlibs = append(shlibs, a.target)
|
||||
}
|
||||
}
|
||||
for _, shlib := range shlibs {
|
||||
args = append(
|
||||
args,
|
||||
"-L"+filepath.Dir(shlib),
|
||||
"-Wl,-rpath="+filepath.Dir(shlib),
|
||||
"-l"+strings.TrimSuffix(
|
||||
strings.TrimPrefix(filepath.Base(shlib), "lib"),
|
||||
".so"))
|
||||
}
|
||||
return b.run(".", out, nil, tools.linker(), args, buildGccgoflags)
|
||||
fakeRoot := &action{}
|
||||
fakeRoot.deps = toplevelactions
|
||||
return tools.link(b, fakeRoot, out, allactions, "", nil, "shared", out)
|
||||
}
|
||||
|
||||
func (tools gccgoToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error {
|
||||
|
|
@ -2962,6 +2982,11 @@ func (b *builder) gxx(p *Package, out string, flags []string, cxxfile string) er
|
|||
return b.ccompile(p, out, flags, cxxfile, b.gxxCmd(p.Dir))
|
||||
}
|
||||
|
||||
// gfortran runs the gfortran Fortran compiler to create an object from a single Fortran file.
|
||||
func (b *builder) gfortran(p *Package, out string, flags []string, ffile string) error {
|
||||
return b.ccompile(p, out, flags, ffile, b.gfortranCmd(p.Dir))
|
||||
}
|
||||
|
||||
// ccompile runs the given C or C++ compiler and creates an object from a single source file.
|
||||
func (b *builder) ccompile(p *Package, out string, flags []string, file string, compiler []string) error {
|
||||
file = mkAbs(p.Dir, file)
|
||||
|
|
@ -2991,6 +3016,11 @@ func (b *builder) gxxCmd(objdir string) []string {
|
|||
return b.ccompilerCmd("CXX", defaultCXX, objdir)
|
||||
}
|
||||
|
||||
// gfortranCmd returns a gfortran command line prefix.
|
||||
func (b *builder) gfortranCmd(objdir string) []string {
|
||||
return b.ccompilerCmd("FC", "gfortran", objdir)
|
||||
}
|
||||
|
||||
// ccompilerCmd returns a command line prefix for the given environment
|
||||
// variable and using the default command when the variable is empty.
|
||||
func (b *builder) ccompilerCmd(envvar, defcmd, objdir string) []string {
|
||||
|
|
@ -3028,6 +3058,17 @@ func (b *builder) ccompilerCmd(envvar, defcmd, objdir string) []string {
|
|||
// disable word wrapping in error messages
|
||||
a = append(a, "-fmessage-length=0")
|
||||
|
||||
// Tell gcc not to include the work directory in object files.
|
||||
if b.gccSupportsFlag("-fdebug-prefix-map=a=b") {
|
||||
a = append(a, "-fdebug-prefix-map="+b.work+"=/tmp/go-build")
|
||||
}
|
||||
|
||||
// Tell gcc not to include flags in object files, which defeats the
|
||||
// point of -fdebug-prefix-map above.
|
||||
if b.gccSupportsFlag("-gno-record-gcc-switches") {
|
||||
a = append(a, "-gno-record-gcc-switches")
|
||||
}
|
||||
|
||||
// On OS X, some of the compilers behave as if -fno-common
|
||||
// is always set, and the Mach-O linker in 6l/8l assumes this.
|
||||
// See https://golang.org/issue/3253.
|
||||
|
|
@ -3042,19 +3083,24 @@ func (b *builder) ccompilerCmd(envvar, defcmd, objdir string) []string {
|
|||
// -no-pie must be passed when doing a partial link with -Wl,-r. But -no-pie is
|
||||
// not supported by all compilers.
|
||||
func (b *builder) gccSupportsNoPie() bool {
|
||||
if goos != "linux" {
|
||||
// On some BSD platforms, error messages from the
|
||||
// compiler make it to the console despite cmd.Std*
|
||||
// all being nil. As -no-pie is only required on linux
|
||||
// systems so far, we only test there.
|
||||
return false
|
||||
return b.gccSupportsFlag("-no-pie")
|
||||
}
|
||||
|
||||
// gccSupportsFlag checks to see if the compiler supports a flag.
|
||||
func (b *builder) gccSupportsFlag(flag string) bool {
|
||||
b.exec.Lock()
|
||||
defer b.exec.Unlock()
|
||||
if b, ok := b.flagCache[flag]; ok {
|
||||
return b
|
||||
}
|
||||
if b.flagCache == nil {
|
||||
src := filepath.Join(b.work, "trivial.c")
|
||||
if err := ioutil.WriteFile(src, []byte{}, 0666); err != nil {
|
||||
return false
|
||||
}
|
||||
cmdArgs := b.gccCmd(b.work)
|
||||
cmdArgs = append(cmdArgs, "-no-pie", "-c", "trivial.c")
|
||||
b.flagCache = make(map[string]bool)
|
||||
}
|
||||
cmdArgs := append(envList("CC", defaultCC), flag, "-c", "trivial.c")
|
||||
if buildN || buildX {
|
||||
b.showcmd(b.work, "%s", joinUnambiguously(cmdArgs))
|
||||
if buildN {
|
||||
|
|
@ -3063,9 +3109,11 @@ func (b *builder) gccSupportsNoPie() bool {
|
|||
}
|
||||
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
|
||||
cmd.Dir = b.work
|
||||
cmd.Env = envForDir(cmd.Dir, os.Environ())
|
||||
cmd.Env = mergeEnvLists([]string{"LC_ALL=C"}, envForDir(cmd.Dir, os.Environ()))
|
||||
out, err := cmd.CombinedOutput()
|
||||
return err == nil && !bytes.Contains(out, []byte("unrecognized"))
|
||||
supported := err == nil && !bytes.Contains(out, []byte("unrecognized"))
|
||||
b.flagCache[flag] = supported
|
||||
return supported
|
||||
}
|
||||
|
||||
// gccArchArgs returns arguments to pass to gcc based on the architecture.
|
||||
|
|
@ -3077,6 +3125,10 @@ func (b *builder) gccArchArgs() []string {
|
|||
return []string{"-m64"}
|
||||
case "arm":
|
||||
return []string{"-marm"} // not thumb
|
||||
case "s390x":
|
||||
return []string{"-m64", "-march=z196"}
|
||||
case "mips64", "mips64le":
|
||||
return []string{"-mabi=64"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -3091,8 +3143,8 @@ func envList(key, def string) []string {
|
|||
return strings.Fields(v)
|
||||
}
|
||||
|
||||
// Return the flags to use when invoking the C or C++ compilers, or cgo.
|
||||
func (b *builder) cflags(p *Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
|
||||
// Return the flags to use when invoking the C, C++ or Fortran compilers, or cgo.
|
||||
func (b *builder) cflags(p *Package, def bool) (cppflags, cflags, cxxflags, fflags, ldflags []string) {
|
||||
var defaults string
|
||||
if def {
|
||||
defaults = "-g -O2"
|
||||
|
|
@ -3101,15 +3153,16 @@ func (b *builder) cflags(p *Package, def bool) (cppflags, cflags, cxxflags, ldfl
|
|||
cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
|
||||
cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
|
||||
cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
|
||||
fflags = stringList(envList("CGO_FFLAGS", defaults), p.CgoFFLAGS)
|
||||
ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
|
||||
return
|
||||
}
|
||||
|
||||
var cgoRe = regexp.MustCompile(`[/\\:]`)
|
||||
|
||||
func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, gxxfiles, mfiles []string) (outGo, outObj []string, err error) {
|
||||
cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoLDFLAGS := b.cflags(p, true)
|
||||
_, cgoexeCFLAGS, _, _ := b.cflags(p, false)
|
||||
func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, gxxfiles, mfiles, ffiles []string) (outGo, outObj []string, err error) {
|
||||
cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS, cgoLDFLAGS := b.cflags(p, true)
|
||||
_, cgoexeCFLAGS, _, _, _ := b.cflags(p, false)
|
||||
cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
|
||||
cgoLDFLAGS = append(cgoLDFLAGS, pcLDFLAGS...)
|
||||
// If we are compiling Objective-C code, then we need to link against libobjc
|
||||
|
|
@ -3117,6 +3170,19 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofi
|
|||
cgoLDFLAGS = append(cgoLDFLAGS, "-lobjc")
|
||||
}
|
||||
|
||||
// Likewise for Fortran, except there are many Fortran compilers.
|
||||
// Support gfortran out of the box and let others pass the correct link options
|
||||
// via CGO_LDFLAGS
|
||||
if len(ffiles) > 0 {
|
||||
fc := os.Getenv("FC")
|
||||
if fc == "" {
|
||||
fc = "gfortran"
|
||||
}
|
||||
if strings.Contains(fc, "gfortran") {
|
||||
cgoLDFLAGS = append(cgoLDFLAGS, "-lgfortran")
|
||||
}
|
||||
}
|
||||
|
||||
if buildMSan && p.ImportPath != "runtime/cgo" {
|
||||
cgoCFLAGS = append([]string{"-fsanitize=memory"}, cgoCFLAGS...)
|
||||
cgoLDFLAGS = append([]string{"-fsanitize=memory"}, cgoLDFLAGS...)
|
||||
|
|
@ -3284,6 +3350,17 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofi
|
|||
outObj = append(outObj, ofile)
|
||||
}
|
||||
|
||||
fflags := stringList(cgoCPPFLAGS, cgoFFLAGS)
|
||||
for _, file := range ffiles {
|
||||
// Append .o to the file, just in case the pkg has file.c and file.f
|
||||
ofile := obj + cgoRe.ReplaceAllString(file, "_") + ".o"
|
||||
if err := b.gfortran(p, ofile, fflags, file); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
linkobj = append(linkobj, ofile)
|
||||
outObj = append(outObj, ofile)
|
||||
}
|
||||
|
||||
linkobj = append(linkobj, p.SysoFiles...)
|
||||
dynobj := obj + "_cgo_.o"
|
||||
pie := (goarch == "arm" && goos == "linux") || goos == "android"
|
||||
|
|
@ -3449,6 +3526,13 @@ func (b *builder) swigVersionCheck() error {
|
|||
return swigCheck
|
||||
}
|
||||
|
||||
// Find the value to pass for the -intgosize option to swig.
|
||||
var (
|
||||
swigIntSizeOnce sync.Once
|
||||
swigIntSize string
|
||||
swigIntSizeError error
|
||||
)
|
||||
|
||||
// This code fails to build if sizeof(int) <= 32
|
||||
const swigIntSizeCode = `
|
||||
package main
|
||||
|
|
@ -3456,8 +3540,8 @@ const i int = 1 << 32
|
|||
`
|
||||
|
||||
// Determine the size of int on the target system for the -intgosize option
|
||||
// of swig >= 2.0.9
|
||||
func (b *builder) swigIntSize(obj string) (intsize string, err error) {
|
||||
// of swig >= 2.0.9. Run only once.
|
||||
func (b *builder) swigDoIntSize(obj string) (intsize string, err error) {
|
||||
if buildN {
|
||||
return "$INTBITS", nil
|
||||
}
|
||||
|
|
@ -3475,9 +3559,18 @@ func (b *builder) swigIntSize(obj string) (intsize string, err error) {
|
|||
return "64", nil
|
||||
}
|
||||
|
||||
// Determine the size of int on the target system for the -intgosize option
|
||||
// of swig >= 2.0.9.
|
||||
func (b *builder) swigIntSize(obj string) (intsize string, err error) {
|
||||
swigIntSizeOnce.Do(func() {
|
||||
swigIntSize, swigIntSizeError = b.swigDoIntSize(obj)
|
||||
})
|
||||
return swigIntSize, swigIntSizeError
|
||||
}
|
||||
|
||||
// Run SWIG on one SWIG input file.
|
||||
func (b *builder) swigOne(p *Package, file, obj string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) {
|
||||
cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _ := b.cflags(p, true)
|
||||
cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _, _ := b.cflags(p, true)
|
||||
var cflags []string
|
||||
if cxx {
|
||||
cflags = stringList(cgoCPPFLAGS, pcCFLAGS, cgoCXXFLAGS)
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ func newContext(c *build.Context) *Context {
|
|||
GOARCH: c.GOARCH,
|
||||
GOOS: c.GOOS,
|
||||
GOROOT: c.GOROOT,
|
||||
GOPATH: c.GOPATH,
|
||||
CgoEnabled: c.CgoEnabled,
|
||||
UseAllFiles: c.UseAllFiles,
|
||||
Compiler: c.Compiler,
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate ./mkalldocs.sh
|
||||
|
||||
package main
|
||||
|
||||
var cmdDoc = &Command{
|
||||
|
|
|
|||
|
|
@ -33,11 +33,6 @@ func mkEnv() []envVar {
|
|||
var b builder
|
||||
b.init()
|
||||
|
||||
vendorExpValue := "0"
|
||||
if go15VendorExperiment {
|
||||
vendorExpValue = "1"
|
||||
}
|
||||
|
||||
env := []envVar{
|
||||
{"GOARCH", goarch},
|
||||
{"GOBIN", gobin},
|
||||
|
|
@ -49,7 +44,6 @@ func mkEnv() []envVar {
|
|||
{"GORACE", os.Getenv("GORACE")},
|
||||
{"GOROOT", goroot},
|
||||
{"GOTOOLDIR", toolDir},
|
||||
{"GO15VENDOREXPERIMENT", vendorExpValue},
|
||||
|
||||
// disable escape codes in clang errors
|
||||
{"TERM", "dumb"},
|
||||
|
|
|
|||
|
|
@ -14,10 +14,8 @@ import (
|
|||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var cmdGenerate = &Command{
|
||||
|
|
@ -277,8 +275,8 @@ func isGoGenerate(buf []byte) bool {
|
|||
// single go:generate command.
|
||||
func (g *Generator) setEnv() {
|
||||
g.env = []string{
|
||||
"GOARCH=" + runtime.GOARCH,
|
||||
"GOOS=" + runtime.GOOS,
|
||||
"GOARCH=" + buildContext.GOARCH,
|
||||
"GOOS=" + buildContext.GOOS,
|
||||
"GOFILE=" + g.file,
|
||||
"GOLINE=" + strconv.Itoa(g.lineNum),
|
||||
"GOPACKAGE=" + g.pkg,
|
||||
|
|
@ -371,17 +369,6 @@ func (g *Generator) expandVar(word string) string {
|
|||
return os.Getenv(word)
|
||||
}
|
||||
|
||||
// identLength returns the length of the identifier beginning the string.
|
||||
func (g *Generator) identLength(word string) int {
|
||||
for i, r := range word {
|
||||
if r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) {
|
||||
continue
|
||||
}
|
||||
return i
|
||||
}
|
||||
return len(word)
|
||||
}
|
||||
|
||||
// setShorthand installs a new shorthand as defined by a -command directive.
|
||||
func (g *Generator) setShorthand(words []string) {
|
||||
// Create command shorthand.
|
||||
|
|
|
|||
|
|
@ -19,8 +19,8 @@ var cmdGet = &Command{
|
|||
UsageLine: "get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]",
|
||||
Short: "download and install packages and dependencies",
|
||||
Long: `
|
||||
Get downloads and installs the packages named by the import paths,
|
||||
along with their dependencies.
|
||||
Get downloads the packages named by the import paths, along with their
|
||||
dependencies. It then installs the named packages, like 'go install'.
|
||||
|
||||
The -d flag instructs get to stop after downloading the packages; that is,
|
||||
it instructs get not to install the packages.
|
||||
|
|
@ -55,8 +55,7 @@ rule is that if the local installation is running version "go1", get
|
|||
searches for a branch or tag named "go1". If no such version exists it
|
||||
retrieves the most recent version of the package.
|
||||
|
||||
Unless vendoring support is disabled (see 'go help gopath'),
|
||||
when go get checks out or updates a Git repository,
|
||||
When go get checks out or updates a Git repository,
|
||||
it also updates any git submodules referenced by the repository.
|
||||
|
||||
Get never checks out or updates code stored in vendor directories.
|
||||
|
|
@ -119,6 +118,14 @@ func runGet(cmd *Command, args []string) {
|
|||
delete(packageCache, name)
|
||||
}
|
||||
|
||||
// In order to rebuild packages information completely,
|
||||
// we need to clear commands cache. Command packages are
|
||||
// referring to evicted packages from the package cache.
|
||||
// This leads to duplicated loads of the standard packages.
|
||||
for name := range cmdCache {
|
||||
delete(cmdCache, name)
|
||||
}
|
||||
|
||||
args = importPaths(args)
|
||||
packagesForBuild(args)
|
||||
|
||||
|
|
@ -228,16 +235,6 @@ func download(arg string, parent *Package, stk *importStack, mode int) {
|
|||
stk.pop()
|
||||
return
|
||||
}
|
||||
|
||||
// Warn that code.google.com is shutting down. We
|
||||
// issue the warning here because this is where we
|
||||
// have the import stack.
|
||||
if strings.HasPrefix(p.ImportPath, "code.google.com") {
|
||||
fmt.Fprintf(os.Stderr, "warning: code.google.com is shutting down; import path %v will stop working\n", p.ImportPath)
|
||||
if len(*stk) > 1 {
|
||||
fmt.Fprintf(os.Stderr, "warning: package %v\n", strings.Join(*stk, "\n\timports "))
|
||||
}
|
||||
}
|
||||
stk.pop()
|
||||
|
||||
args := []string{arg}
|
||||
|
|
@ -348,7 +345,7 @@ func downloadPackage(p *Package) error {
|
|||
|
||||
if p.build.SrcRoot != "" {
|
||||
// Directory exists. Look for checkout along path to src.
|
||||
vcs, rootPath, err = vcsForDir(p)
|
||||
vcs, rootPath, err = vcsFromDir(p.Dir, p.build.SrcRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -356,7 +353,7 @@ func downloadPackage(p *Package) error {
|
|||
|
||||
// Double-check where it came from.
|
||||
if *getU && vcs.remoteRepo != nil {
|
||||
dir := filepath.Join(p.build.SrcRoot, rootPath)
|
||||
dir := filepath.Join(p.build.SrcRoot, filepath.FromSlash(rootPath))
|
||||
remote, err := vcs.remoteRepo(vcs, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -403,7 +400,7 @@ func downloadPackage(p *Package) error {
|
|||
p.build.SrcRoot = filepath.Join(list[0], "src")
|
||||
p.build.PkgRoot = filepath.Join(list[0], "pkg")
|
||||
}
|
||||
root := filepath.Join(p.build.SrcRoot, rootPath)
|
||||
root := filepath.Join(p.build.SrcRoot, filepath.FromSlash(rootPath))
|
||||
// If we've considered this repository already, don't do it again.
|
||||
if downloadRootCache[root] {
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -421,18 +421,6 @@ func (tg *testgoData) doGrepCount(match string, b *bytes.Buffer) int {
|
|||
return c
|
||||
}
|
||||
|
||||
// grepCountStdout returns the number of times a regexp is seen in
|
||||
// standard output.
|
||||
func (tg *testgoData) grepCountStdout(match string) int {
|
||||
return tg.doGrepCount(match, &tg.stdout)
|
||||
}
|
||||
|
||||
// grepCountStderr returns the number of times a regexp is seen in
|
||||
// standard error.
|
||||
func (tg *testgoData) grepCountStderr(match string) int {
|
||||
return tg.doGrepCount(match, &tg.stderr)
|
||||
}
|
||||
|
||||
// grepCountBoth returns the number of times a regexp is seen in both
|
||||
// standard output and standard error.
|
||||
func (tg *testgoData) grepCountBoth(match string) int {
|
||||
|
|
@ -501,6 +489,16 @@ func (tg *testgoData) path(name string) string {
|
|||
return filepath.Join(tg.tempdir, name)
|
||||
}
|
||||
|
||||
// mustExist fails if path does not exist.
|
||||
func (tg *testgoData) mustExist(path string) {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
tg.t.Fatalf("%s does not exist but should", path)
|
||||
}
|
||||
tg.t.Fatalf("%s stat failed: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// mustNotExist fails if path exists.
|
||||
func (tg *testgoData) mustNotExist(path string) {
|
||||
if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) {
|
||||
|
|
@ -536,32 +534,43 @@ func (tg *testgoData) wantArchive(path string) {
|
|||
}
|
||||
}
|
||||
|
||||
// isStale returns whether pkg is stale.
|
||||
func (tg *testgoData) isStale(pkg string) bool {
|
||||
tg.run("list", "-f", "{{.Stale}}", pkg)
|
||||
switch v := strings.TrimSpace(tg.getStdout()); v {
|
||||
// isStale reports whether pkg is stale, and why
|
||||
func (tg *testgoData) isStale(pkg string) (bool, string) {
|
||||
tg.run("list", "-f", "{{.Stale}}:{{.StaleReason}}", pkg)
|
||||
v := strings.TrimSpace(tg.getStdout())
|
||||
f := strings.SplitN(v, ":", 2)
|
||||
if len(f) == 2 {
|
||||
switch f[0] {
|
||||
case "true":
|
||||
return true
|
||||
return true, f[1]
|
||||
case "false":
|
||||
return false
|
||||
default:
|
||||
return false, f[1]
|
||||
}
|
||||
}
|
||||
tg.t.Fatalf("unexpected output checking staleness of package %v: %v", pkg, v)
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// wantStale fails with msg if pkg is not stale.
|
||||
func (tg *testgoData) wantStale(pkg, msg string) {
|
||||
if !tg.isStale(pkg) {
|
||||
func (tg *testgoData) wantStale(pkg, reason, msg string) {
|
||||
stale, why := tg.isStale(pkg)
|
||||
if !stale {
|
||||
tg.t.Fatal(msg)
|
||||
}
|
||||
if reason == "" && why != "" || !strings.Contains(why, reason) {
|
||||
tg.t.Errorf("wrong reason for Stale=true: %q, want %q", why, reason)
|
||||
}
|
||||
}
|
||||
|
||||
// wantNotStale fails with msg if pkg is stale.
|
||||
func (tg *testgoData) wantNotStale(pkg, msg string) {
|
||||
if tg.isStale(pkg) {
|
||||
func (tg *testgoData) wantNotStale(pkg, reason, msg string) {
|
||||
stale, why := tg.isStale(pkg)
|
||||
if stale {
|
||||
tg.t.Fatal(msg)
|
||||
}
|
||||
if reason == "" && why != "" || !strings.Contains(why, reason) {
|
||||
tg.t.Errorf("wrong reason for Stale=false: %q, want %q", why, reason)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup cleans up a test that runs testgo.
|
||||
|
|
@ -581,32 +590,6 @@ func (tg *testgoData) cleanup() {
|
|||
}
|
||||
}
|
||||
|
||||
// resetReadOnlyFlagAll resets windows read-only flag
|
||||
// set on path and any children it contains.
|
||||
// The flag is set by git and has to be removed.
|
||||
// os.Remove refuses to remove files with read-only flag set.
|
||||
func (tg *testgoData) resetReadOnlyFlagAll(path string) {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
tg.t.Fatalf("resetReadOnlyFlagAll(%q) failed: %v", path, err)
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
err := os.Chmod(path, 0666)
|
||||
if err != nil {
|
||||
tg.t.Fatalf("resetReadOnlyFlagAll(%q) failed: %v", path, err)
|
||||
}
|
||||
}
|
||||
fd, err := os.Open(path)
|
||||
if err != nil {
|
||||
tg.t.Fatalf("resetReadOnlyFlagAll(%q) failed: %v", path, err)
|
||||
}
|
||||
defer fd.Close()
|
||||
names, _ := fd.Readdirnames(-1)
|
||||
for _, name := range names {
|
||||
tg.resetReadOnlyFlagAll(path + string(filepath.Separator) + name)
|
||||
}
|
||||
}
|
||||
|
||||
// failSSH puts an ssh executable in the PATH that always fails.
|
||||
// This is to stub out uses of ssh by go get.
|
||||
func (tg *testgoData) failSSH() {
|
||||
|
|
@ -720,7 +703,7 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
|
|||
tg.tempFile("d1/src/p1/p1.go", `package p1`)
|
||||
tg.setenv("GOPATH", tg.path("d1"))
|
||||
tg.run("install", "-a", "p1")
|
||||
tg.wantNotStale("p1", "./testgo list claims p1 is stale, incorrectly")
|
||||
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly")
|
||||
tg.sleep()
|
||||
|
||||
// Changing mtime and content of runtime/internal/sys/sys.go
|
||||
|
|
@ -729,28 +712,28 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
|
|||
sys := runtime.GOROOT() + "/src/runtime/internal/sys/sys.go"
|
||||
restore := addNL(sys)
|
||||
defer restore()
|
||||
tg.wantNotStale("p1", "./testgo list claims p1 is stale, incorrectly, after updating runtime/internal/sys/sys.go")
|
||||
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after updating runtime/internal/sys/sys.go")
|
||||
restore()
|
||||
tg.wantNotStale("p1", "./testgo list claims p1 is stale, incorrectly, after restoring runtime/internal/sys/sys.go")
|
||||
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after restoring runtime/internal/sys/sys.go")
|
||||
|
||||
// But changing runtime/internal/sys/zversion.go should have an effect:
|
||||
// that's how we tell when we flip from one release to another.
|
||||
zversion := runtime.GOROOT() + "/src/runtime/internal/sys/zversion.go"
|
||||
restore = addNL(zversion)
|
||||
defer restore()
|
||||
tg.wantStale("p1", "./testgo list claims p1 is NOT stale, incorrectly, after changing to new release")
|
||||
tg.wantStale("p1", "build ID mismatch", "./testgo list claims p1 is NOT stale, incorrectly, after changing to new release")
|
||||
restore()
|
||||
tg.wantNotStale("p1", "./testgo list claims p1 is stale, incorrectly, after changing back to old release")
|
||||
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after changing back to old release")
|
||||
addNL(zversion)
|
||||
tg.wantStale("p1", "./testgo list claims p1 is NOT stale, incorrectly, after changing again to new release")
|
||||
tg.wantStale("p1", "build ID mismatch", "./testgo list claims p1 is NOT stale, incorrectly, after changing again to new release")
|
||||
tg.run("install", "p1")
|
||||
tg.wantNotStale("p1", "./testgo list claims p1 is stale after building with new release")
|
||||
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with new release")
|
||||
|
||||
// Restore to "old" release.
|
||||
restore()
|
||||
tg.wantStale("p1", "./testgo list claims p1 is NOT stale, incorrectly, after changing to old release after new build")
|
||||
tg.wantStale("p1", "build ID mismatch", "./testgo list claims p1 is NOT stale, incorrectly, after changing to old release after new build")
|
||||
tg.run("install", "p1")
|
||||
tg.wantNotStale("p1", "./testgo list claims p1 is stale after building with old release")
|
||||
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release")
|
||||
|
||||
// Everything is out of date. Rebuild to leave things in a better state.
|
||||
tg.run("install", "std")
|
||||
|
|
@ -833,8 +816,8 @@ func TestGoInstallRebuildsStalePackagesInOtherGOPATH(t *testing.T) {
|
|||
sep := string(filepath.ListSeparator)
|
||||
tg.setenv("GOPATH", tg.path("d1")+sep+tg.path("d2"))
|
||||
tg.run("install", "p1")
|
||||
tg.wantNotStale("p1", "./testgo list claims p1 is stale, incorrectly")
|
||||
tg.wantNotStale("p2", "./testgo list claims p2 is stale, incorrectly")
|
||||
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly")
|
||||
tg.wantNotStale("p2", "", "./testgo list claims p2 is stale, incorrectly")
|
||||
tg.sleep()
|
||||
if f, err := os.OpenFile(tg.path("d2/src/p2/p2.go"), os.O_WRONLY|os.O_APPEND, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
@ -843,12 +826,12 @@ func TestGoInstallRebuildsStalePackagesInOtherGOPATH(t *testing.T) {
|
|||
} else {
|
||||
tg.must(f.Close())
|
||||
}
|
||||
tg.wantStale("p2", "./testgo list claims p2 is NOT stale, incorrectly")
|
||||
tg.wantStale("p1", "./testgo list claims p1 is NOT stale, incorrectly")
|
||||
tg.wantStale("p2", "newer source file", "./testgo list claims p2 is NOT stale, incorrectly")
|
||||
tg.wantStale("p1", "stale dependency", "./testgo list claims p1 is NOT stale, incorrectly")
|
||||
|
||||
tg.run("install", "p1")
|
||||
tg.wantNotStale("p2", "./testgo list claims p2 is stale after reinstall, incorrectly")
|
||||
tg.wantNotStale("p1", "./testgo list claims p1 is stale after reinstall, incorrectly")
|
||||
tg.wantNotStale("p2", "", "./testgo list claims p2 is stale after reinstall, incorrectly")
|
||||
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after reinstall, incorrectly")
|
||||
}
|
||||
|
||||
func TestGoInstallDetectsRemovedFiles(t *testing.T) {
|
||||
|
|
@ -862,13 +845,13 @@ func TestGoInstallDetectsRemovedFiles(t *testing.T) {
|
|||
package mypkg`)
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.run("install", "mypkg")
|
||||
tg.wantNotStale("mypkg", "./testgo list mypkg claims mypkg is stale, incorrectly")
|
||||
tg.wantNotStale("mypkg", "", "./testgo list mypkg claims mypkg is stale, incorrectly")
|
||||
// z.go was not part of the build; removing it is okay.
|
||||
tg.must(os.Remove(tg.path("src/mypkg/z.go")))
|
||||
tg.wantNotStale("mypkg", "./testgo list mypkg claims mypkg is stale after removing z.go; should not be stale")
|
||||
tg.wantNotStale("mypkg", "", "./testgo list mypkg claims mypkg is stale after removing z.go; should not be stale")
|
||||
// y.go was part of the package; removing it should be detected.
|
||||
tg.must(os.Remove(tg.path("src/mypkg/y.go")))
|
||||
tg.wantStale("mypkg", "./testgo list mypkg claims mypkg is NOT stale after removing y.go; should be stale")
|
||||
tg.wantStale("mypkg", "build ID mismatch", "./testgo list mypkg claims mypkg is NOT stale after removing y.go; should be stale")
|
||||
}
|
||||
|
||||
func TestWildcardMatchesSyntaxErrorDirs(t *testing.T) {
|
||||
|
|
@ -931,13 +914,13 @@ func TestGoInstallDetectsRemovedFilesInPackageMain(t *testing.T) {
|
|||
package main`)
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.run("install", "mycmd")
|
||||
tg.wantNotStale("mycmd", "./testgo list mypkg claims mycmd is stale, incorrectly")
|
||||
tg.wantNotStale("mycmd", "", "./testgo list mypkg claims mycmd is stale, incorrectly")
|
||||
// z.go was not part of the build; removing it is okay.
|
||||
tg.must(os.Remove(tg.path("src/mycmd/z.go")))
|
||||
tg.wantNotStale("mycmd", "./testgo list mycmd claims mycmd is stale after removing z.go; should not be stale")
|
||||
tg.wantNotStale("mycmd", "", "./testgo list mycmd claims mycmd is stale after removing z.go; should not be stale")
|
||||
// y.go was part of the package; removing it should be detected.
|
||||
tg.must(os.Remove(tg.path("src/mycmd/y.go")))
|
||||
tg.wantStale("mycmd", "./testgo list mycmd claims mycmd is NOT stale after removing y.go; should be stale")
|
||||
tg.wantStale("mycmd", "build ID mismatch", "./testgo list mycmd claims mycmd is NOT stale after removing y.go; should be stale")
|
||||
}
|
||||
|
||||
func testLocalRun(tg *testgoData, exepath, local, match string) {
|
||||
|
|
@ -1178,7 +1161,7 @@ func TestImportCommentConflict(t *testing.T) {
|
|||
tg.grepStderr("found import comments", "go build did not mention comment conflict")
|
||||
}
|
||||
|
||||
// cmd/go: custom import path checking should not apply to github.com/xxx/yyy.
|
||||
// cmd/go: custom import path checking should not apply to Go packages without import comment.
|
||||
func TestIssue10952(t *testing.T) {
|
||||
testenv.MustHaveExternalNetwork(t)
|
||||
if _, err := exec.LookPath("git"); err != nil {
|
||||
|
|
@ -1193,11 +1176,38 @@ func TestIssue10952(t *testing.T) {
|
|||
const importPath = "github.com/zombiezen/go-get-issue-10952"
|
||||
tg.run("get", "-d", "-u", importPath)
|
||||
repoDir := tg.path("src/" + importPath)
|
||||
defer tg.resetReadOnlyFlagAll(repoDir)
|
||||
tg.runGit(repoDir, "remote", "set-url", "origin", "https://"+importPath+".git")
|
||||
tg.run("get", "-d", "-u", importPath)
|
||||
}
|
||||
|
||||
// Test git clone URL that uses SCP-like syntax and custom import path checking.
|
||||
func TestIssue11457(t *testing.T) {
|
||||
testenv.MustHaveExternalNetwork(t)
|
||||
if _, err := exec.LookPath("git"); err != nil {
|
||||
t.Skip("skipping because git binary not found")
|
||||
}
|
||||
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.parallel()
|
||||
tg.tempDir("src")
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
const importPath = "github.com/rsc/go-get-issue-11457"
|
||||
tg.run("get", "-d", "-u", importPath)
|
||||
repoDir := tg.path("src/" + importPath)
|
||||
tg.runGit(repoDir, "remote", "set-url", "origin", "git@github.com:rsc/go-get-issue-11457")
|
||||
|
||||
// At this time, custom import path checking compares remotes verbatim (rather than
|
||||
// just the host and path, skipping scheme and user), so we expect go get -u to fail.
|
||||
// However, the goal of this test is to verify that gitRemoteRepo correctly parsed
|
||||
// the SCP-like syntax, and we expect it to appear in the error message.
|
||||
tg.runFail("get", "-d", "-u", importPath)
|
||||
want := " is checked out from ssh://git@github.com/rsc/go-get-issue-11457"
|
||||
if !strings.HasSuffix(strings.TrimSpace(tg.getStderr()), want) {
|
||||
t.Error("expected clone URL to appear in stderr")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetGitDefaultBranch(t *testing.T) {
|
||||
testenv.MustHaveExternalNetwork(t)
|
||||
if _, err := exec.LookPath("git"); err != nil {
|
||||
|
|
@ -1217,7 +1227,6 @@ func TestGetGitDefaultBranch(t *testing.T) {
|
|||
|
||||
tg.run("get", "-d", importPath)
|
||||
repoDir := tg.path("src/" + importPath)
|
||||
defer tg.resetReadOnlyFlagAll(repoDir)
|
||||
tg.runGit(repoDir, "branch", "--contains", "HEAD")
|
||||
tg.grepStdout(`\* another-branch`, "not on correct default branch")
|
||||
|
||||
|
|
@ -1226,14 +1235,6 @@ func TestGetGitDefaultBranch(t *testing.T) {
|
|||
tg.grepStdout(`\* another-branch`, "not on correct default branch")
|
||||
}
|
||||
|
||||
func TestDisallowedCSourceFiles(t *testing.T) {
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
|
||||
tg.runFail("build", "badc")
|
||||
tg.grepStderr("C source files not allowed", "go test did not say C source files not allowed")
|
||||
}
|
||||
|
||||
func TestErrorMessageForSyntaxErrorInTestGoFileSaysFAIL(t *testing.T) {
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
|
|
@ -1337,19 +1338,35 @@ func TestPackageMainTestImportsArchiveNotBinary(t *testing.T) {
|
|||
tg.sleep()
|
||||
tg.run("test", "main_test")
|
||||
tg.run("install", "main_test")
|
||||
tg.wantNotStale("main_test", "after go install, main listed as stale")
|
||||
tg.wantNotStale("main_test", "", "after go install, main listed as stale")
|
||||
tg.run("test", "main_test")
|
||||
}
|
||||
|
||||
// The runtime version string takes one of two forms:
|
||||
// "go1.X[.Y]" for Go releases, and "devel +hash" at tip.
|
||||
// Determine whether we are in a released copy by
|
||||
// inspecting the version.
|
||||
var isGoRelease = strings.HasPrefix(runtime.Version(), "go1")
|
||||
|
||||
// Issue 12690
|
||||
func TestPackageNotStaleWithTrailingSlash(t *testing.T) {
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
|
||||
// Make sure the packages below are not stale.
|
||||
tg.run("install", "runtime", "os", "io")
|
||||
|
||||
goroot := runtime.GOROOT()
|
||||
tg.setenv("GOROOT", goroot+"/")
|
||||
tg.wantNotStale("runtime", "with trailing slash in GOROOT, runtime listed as stale")
|
||||
tg.wantNotStale("os", "with trailing slash in GOROOT, os listed as stale")
|
||||
tg.wantNotStale("io", "with trailing slash in GOROOT, io listed as stale")
|
||||
|
||||
want := ""
|
||||
if isGoRelease {
|
||||
want = "standard package in Go release distribution"
|
||||
}
|
||||
|
||||
tg.wantNotStale("runtime", want, "with trailing slash in GOROOT, runtime listed as stale")
|
||||
tg.wantNotStale("os", want, "with trailing slash in GOROOT, os listed as stale")
|
||||
tg.wantNotStale("io", want, "with trailing slash in GOROOT, io listed as stale")
|
||||
}
|
||||
|
||||
// With $GOBIN set, binaries get installed to $GOBIN.
|
||||
|
|
@ -1397,28 +1414,6 @@ func TestInstallToGOBINCommandLinePackage(t *testing.T) {
|
|||
tg.wantExecutable("testdata/bin1/helloworld"+exeSuffix, "go install testdata/src/go-cmd-test/helloworld.go did not write testdata/bin1/helloworld")
|
||||
}
|
||||
|
||||
func TestGodocInstalls(t *testing.T) {
|
||||
testenv.MustHaveExternalNetwork(t)
|
||||
|
||||
// godoc installs into GOBIN
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.parallel()
|
||||
tg.tempDir("gobin")
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.setenv("GOBIN", tg.path("gobin"))
|
||||
tg.run("get", "golang.org/x/tools/cmd/godoc")
|
||||
tg.wantExecutable(tg.path("gobin/godoc"), "did not install godoc to $GOBIN")
|
||||
tg.unsetenv("GOBIN")
|
||||
|
||||
// godoc installs into GOROOT
|
||||
goroot := runtime.GOROOT()
|
||||
tg.setenv("GOROOT", goroot)
|
||||
tg.check(os.RemoveAll(filepath.Join(goroot, "bin", "godoc")))
|
||||
tg.run("install", "golang.org/x/tools/cmd/godoc")
|
||||
tg.wantExecutable(filepath.Join(goroot, "bin", "godoc"), "did not install godoc to $GOROOT/bin")
|
||||
}
|
||||
|
||||
func TestGoGetNonPkg(t *testing.T) {
|
||||
testenv.MustHaveExternalNetwork(t)
|
||||
|
||||
|
|
@ -1508,7 +1503,7 @@ func TestGoTestWithPackageListedMultipleTimes(t *testing.T) {
|
|||
defer tg.cleanup()
|
||||
tg.parallel()
|
||||
tg.run("test", "errors", "errors", "errors", "errors", "errors")
|
||||
if strings.Index(strings.TrimSpace(tg.getStdout()), "\n") != -1 {
|
||||
if strings.Contains(strings.TrimSpace(tg.getStdout()), "\n") {
|
||||
t.Error("go test errors errors errors errors errors tested the same package multiple times")
|
||||
}
|
||||
}
|
||||
|
|
@ -1537,7 +1532,7 @@ func TestGoListCmdOnlyShowsCommands(t *testing.T) {
|
|||
tg.run("list", "cmd")
|
||||
out := strings.TrimSpace(tg.getStdout())
|
||||
for _, line := range strings.Split(out, "\n") {
|
||||
if strings.Index(line, "cmd/") == -1 {
|
||||
if !strings.Contains(line, "cmd/") {
|
||||
t.Error("go list cmd shows non-commands")
|
||||
break
|
||||
}
|
||||
|
|
@ -1657,8 +1652,8 @@ func TestLdflagsArgumentsWithSpacesIssue3941(t *testing.T) {
|
|||
func main() {
|
||||
println(extern)
|
||||
}`)
|
||||
tg.run("run", "-ldflags", `-X main.extern "hello world"`, tg.path("main.go"))
|
||||
tg.grepStderr("^hello world", `ldflags -X main.extern 'hello world' failed`)
|
||||
tg.run("run", "-ldflags", `-X "main.extern=hello world"`, tg.path("main.go"))
|
||||
tg.grepStderr("^hello world", `ldflags -X "main.extern=hello world"' failed`)
|
||||
}
|
||||
|
||||
func TestGoTestCpuprofileLeavesBinaryBehind(t *testing.T) {
|
||||
|
|
@ -1726,7 +1721,6 @@ func TestSymlinksVendor(t *testing.T) {
|
|||
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
tg.tempDir("gopath/src/dir1/vendor/v")
|
||||
tg.tempFile("gopath/src/dir1/p.go", "package main\nimport _ `v`\nfunc main(){}")
|
||||
tg.tempFile("gopath/src/dir1/vendor/v/v.go", "package v")
|
||||
|
|
@ -1879,7 +1873,9 @@ func TestShadowingLogic(t *testing.T) {
|
|||
}
|
||||
// The output will have makeImportValid applies, but we only
|
||||
// bother to deal with characters we might reasonably see.
|
||||
pwdForwardSlash = strings.Replace(pwdForwardSlash, ":", "_", -1)
|
||||
for _, r := range " :" {
|
||||
pwdForwardSlash = strings.Replace(pwdForwardSlash, string(r), "_", -1)
|
||||
}
|
||||
want := "(_" + pwdForwardSlash + "/testdata/shadow/root1/src/math) (" + filepath.Join(runtime.GOROOT(), "src", "math") + ")"
|
||||
if strings.TrimSpace(tg.getStdout()) != want {
|
||||
t.Error("shadowed math is not shadowed; looking for", want)
|
||||
|
|
@ -2001,6 +1997,27 @@ func TestCoverageUsesActualSettingToOverrideEvenForRace(t *testing.T) {
|
|||
checkCoverage(tg, data)
|
||||
}
|
||||
|
||||
func TestBuildDryRunWithCgo(t *testing.T) {
|
||||
if !canCgo {
|
||||
t.Skip("skipping because cgo not enabled")
|
||||
}
|
||||
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.tempFile("foo.go", `package main
|
||||
|
||||
/*
|
||||
#include <limits.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func main() {
|
||||
println(C.INT_MAX)
|
||||
}`)
|
||||
tg.run("build", "-n", tg.path("foo.go"))
|
||||
tg.grepStderrNot(`os.Stat .* no such file or directory`, "unexpected stat of archive file")
|
||||
}
|
||||
|
||||
func TestCoverageWithCgo(t *testing.T) {
|
||||
if !canCgo {
|
||||
t.Skip("skipping because cgo not enabled")
|
||||
|
|
@ -2106,10 +2123,33 @@ func main() { C.f() }`)
|
|||
tg.grepStderr(`gccgo.*\-L alibpath \-lalib`, `no Go-inline "#cgo LDFLAGS:" ("-L alibpath -lalib") passed to gccgo linking stage`)
|
||||
}
|
||||
|
||||
func TestListTemplateCanUseContextFunction(t *testing.T) {
|
||||
func TestListTemplateContextFunction(t *testing.T) {
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.run("list", "-f", "GOARCH: {{context.GOARCH}}")
|
||||
for _, tt := range []struct {
|
||||
v string
|
||||
want string
|
||||
}{
|
||||
{"GOARCH", runtime.GOARCH},
|
||||
{"GOOS", runtime.GOOS},
|
||||
{"GOROOT", filepath.Clean(runtime.GOROOT())},
|
||||
{"GOPATH", os.Getenv("GOPATH")},
|
||||
{"CgoEnabled", ""},
|
||||
{"UseAllFiles", ""},
|
||||
{"Compiler", ""},
|
||||
{"BuildTags", ""},
|
||||
{"ReleaseTags", ""},
|
||||
{"InstallSuffix", ""},
|
||||
} {
|
||||
tmpl := "{{context." + tt.v + "}}"
|
||||
tg.run("list", "-f", tmpl)
|
||||
if tt.want == "" {
|
||||
continue
|
||||
}
|
||||
if got := strings.TrimSpace(tg.getStdout()); got != tt.want {
|
||||
t.Errorf("go list -f %q: got %q; want %q", tmpl, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cmd/go: "go test" should fail if package does not build
|
||||
|
|
@ -2123,7 +2163,7 @@ func TestIssue7108(t *testing.T) {
|
|||
// cmd/go: go test -a foo does not rebuild regexp.
|
||||
func TestIssue6844(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("don't rebuild the standard libary in short mode")
|
||||
t.Skip("don't rebuild the standard library in short mode")
|
||||
}
|
||||
|
||||
tg := testgo(t)
|
||||
|
|
@ -2295,8 +2335,7 @@ func TestGoVetWithExternalTests(t *testing.T) {
|
|||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.makeTempdir()
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.run("get", "golang.org/x/tools/cmd/vet")
|
||||
tg.run("install", "cmd/vet")
|
||||
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
|
||||
tg.runFail("vet", "vetpkg")
|
||||
tg.grepBoth("missing argument for Printf", "go vet vetpkg did not find missing argument for Printf")
|
||||
|
|
@ -2308,8 +2347,7 @@ func TestGoVetWithTags(t *testing.T) {
|
|||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.makeTempdir()
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.run("get", "golang.org/x/tools/cmd/vet")
|
||||
tg.run("install", "cmd/vet")
|
||||
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
|
||||
tg.runFail("vet", "-tags", "tagtest", "vetpkg")
|
||||
tg.grepBoth(`c\.go.*wrong number of args for format`, "go get vetpkg did not run scan tagged file")
|
||||
|
|
@ -2330,6 +2368,11 @@ func TestGoGetRscIoToolstash(t *testing.T) {
|
|||
// Issue 13037: Was not parsing <meta> tags in 404 served over HTTPS
|
||||
func TestGoGetHTTPS404(t *testing.T) {
|
||||
testenv.MustHaveExternalNetwork(t)
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "linux", "freebsd":
|
||||
default:
|
||||
t.Skipf("test case does not work on %s", runtime.GOOS)
|
||||
}
|
||||
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
|
|
@ -2338,7 +2381,7 @@ func TestGoGetHTTPS404(t *testing.T) {
|
|||
tg.run("get", "bazil.org/fuse/fs/fstestutil")
|
||||
}
|
||||
|
||||
// Test that you can not import a main package.
|
||||
// Test that you cannot import a main package.
|
||||
func TestIssue4210(t *testing.T) {
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
|
|
@ -2359,6 +2402,8 @@ func TestIssue4210(t *testing.T) {
|
|||
func TestGoGetInsecure(t *testing.T) {
|
||||
testenv.MustHaveExternalNetwork(t)
|
||||
|
||||
t.Skip("golang.org/issue/15410")
|
||||
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.makeTempdir()
|
||||
|
|
@ -2414,22 +2459,6 @@ func TestGoGetInsecureCustomDomain(t *testing.T) {
|
|||
tg.run("get", "-d", "-insecure", repo)
|
||||
}
|
||||
|
||||
func TestIssue10193(t *testing.T) {
|
||||
t.Skip("depends on code.google.com")
|
||||
testenv.MustHaveExternalNetwork(t)
|
||||
if _, err := exec.LookPath("hg"); err != nil {
|
||||
t.Skip("skipping because hg binary not found")
|
||||
}
|
||||
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.parallel()
|
||||
tg.tempDir("src")
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.runFail("get", "code.google.com/p/rsc/pdf")
|
||||
tg.grepStderr("is shutting down", "missed warning about code.google.com")
|
||||
}
|
||||
|
||||
func TestGoRunDirs(t *testing.T) {
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
|
|
@ -2759,3 +2788,169 @@ func TestParallelTest(t *testing.T) {
|
|||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.run("test", "-p=4", "p1", "p2", "p3", "p4")
|
||||
}
|
||||
|
||||
func TestCgoConsistentResults(t *testing.T) {
|
||||
if !canCgo {
|
||||
t.Skip("skipping because cgo not enabled")
|
||||
}
|
||||
if runtime.GOOS == "solaris" {
|
||||
// See https://golang.org/issue/13247
|
||||
t.Skip("skipping because Solaris builds are known to be inconsistent; see #13247")
|
||||
}
|
||||
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.parallel()
|
||||
tg.makeTempdir()
|
||||
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
|
||||
exe1 := tg.path("cgotest1" + exeSuffix)
|
||||
exe2 := tg.path("cgotest2" + exeSuffix)
|
||||
tg.run("build", "-o", exe1, "cgotest")
|
||||
tg.run("build", "-x", "-o", exe2, "cgotest")
|
||||
b1, err := ioutil.ReadFile(exe1)
|
||||
tg.must(err)
|
||||
b2, err := ioutil.ReadFile(exe2)
|
||||
tg.must(err)
|
||||
|
||||
if !tg.doGrepMatch(`-fdebug-prefix-map=\$WORK`, &tg.stderr) {
|
||||
t.Skip("skipping because C compiler does not support -fdebug-prefix-map")
|
||||
}
|
||||
if !bytes.Equal(b1, b2) {
|
||||
t.Error("building cgotest twice did not produce the same output")
|
||||
}
|
||||
}
|
||||
|
||||
// Issue 14444: go get -u .../ duplicate loads errors
|
||||
func TestGoGetUpdateAllDoesNotTryToLoadDuplicates(t *testing.T) {
|
||||
testenv.MustHaveExternalNetwork(t)
|
||||
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.makeTempdir()
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.run("get", "-u", ".../")
|
||||
tg.grepStderrNot("duplicate loads of", "did not remove old packages from cache")
|
||||
}
|
||||
|
||||
func TestFatalInBenchmarkCauseNonZeroExitStatus(t *testing.T) {
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.runFail("test", "-bench", ".", "./testdata/src/benchfatal")
|
||||
tg.grepBothNot("^ok", "test passed unexpectedly")
|
||||
tg.grepBoth("FAIL.*benchfatal", "test did not run everything")
|
||||
}
|
||||
|
||||
func TestBinaryOnlyPackages(t *testing.T) {
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.makeTempdir()
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
|
||||
tg.tempFile("src/p1/p1.go", `//go:binary-only-package
|
||||
|
||||
package p1
|
||||
`)
|
||||
tg.wantStale("p1", "cannot access install target", "p1 is binary-only but has no binary, should be stale")
|
||||
tg.runFail("install", "p1")
|
||||
tg.grepStderr("missing or invalid package binary", "did not report attempt to compile binary-only package")
|
||||
|
||||
tg.tempFile("src/p1/p1.go", `
|
||||
package p1
|
||||
import "fmt"
|
||||
func F(b bool) { fmt.Printf("hello from p1\n"); if b { F(false) } }
|
||||
`)
|
||||
tg.run("install", "p1")
|
||||
os.Remove(tg.path("src/p1/p1.go"))
|
||||
tg.mustNotExist(tg.path("src/p1/p1.go"))
|
||||
|
||||
tg.tempFile("src/p2/p2.go", `//go:binary-only-packages-are-not-great
|
||||
|
||||
package p2
|
||||
import "p1"
|
||||
func F() { p1.F(true) }
|
||||
`)
|
||||
tg.runFail("install", "p2")
|
||||
tg.grepStderr("no buildable Go source files", "did not complain about missing sources")
|
||||
|
||||
tg.tempFile("src/p1/missing.go", `//go:binary-only-package
|
||||
|
||||
package p1
|
||||
func G()
|
||||
`)
|
||||
tg.wantNotStale("p1", "no source code", "should NOT want to rebuild p1 (first)")
|
||||
tg.run("install", "-x", "p1") // no-op, up to date
|
||||
tg.grepBothNot("/compile", "should not have run compiler")
|
||||
tg.run("install", "p2") // does not rebuild p1 (or else p2 will fail)
|
||||
tg.wantNotStale("p2", "", "should NOT want to rebuild p2")
|
||||
|
||||
// changes to the non-source-code do not matter,
|
||||
// and only one file needs the special comment.
|
||||
tg.tempFile("src/p1/missing2.go", `
|
||||
package p1
|
||||
func H()
|
||||
`)
|
||||
tg.wantNotStale("p1", "no source code", "should NOT want to rebuild p1 (second)")
|
||||
tg.wantNotStale("p2", "", "should NOT want to rebuild p2")
|
||||
|
||||
tg.tempFile("src/p3/p3.go", `
|
||||
package main
|
||||
import (
|
||||
"p1"
|
||||
"p2"
|
||||
)
|
||||
func main() {
|
||||
p1.F(false)
|
||||
p2.F()
|
||||
}
|
||||
`)
|
||||
tg.run("install", "p3")
|
||||
|
||||
tg.run("run", tg.path("src/p3/p3.go"))
|
||||
tg.grepStdout("hello from p1", "did not see message from p1")
|
||||
}
|
||||
|
||||
// Issue 16050.
|
||||
func TestAlwaysLinkSysoFiles(t *testing.T) {
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.parallel()
|
||||
tg.tempDir("src/syso")
|
||||
tg.tempFile("src/syso/a.syso", ``)
|
||||
tg.tempFile("src/syso/b.go", `package syso`)
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
|
||||
// We should see the .syso file regardless of the setting of
|
||||
// CGO_ENABLED.
|
||||
|
||||
tg.setenv("CGO_ENABLED", "1")
|
||||
tg.run("list", "-f", "{{.SysoFiles}}", "syso")
|
||||
tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=1")
|
||||
|
||||
tg.setenv("CGO_ENABLED", "0")
|
||||
tg.run("list", "-f", "{{.SysoFiles}}", "syso")
|
||||
tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=0")
|
||||
}
|
||||
|
||||
// Issue 16120.
|
||||
func TestGenerateUsesBuildContext(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("this test won't run under Windows")
|
||||
}
|
||||
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.parallel()
|
||||
tg.tempDir("src/gen")
|
||||
tg.tempFile("src/gen/gen.go", "package gen\n//go:generate echo $GOOS $GOARCH\n")
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
|
||||
tg.setenv("GOOS", "linux")
|
||||
tg.setenv("GOARCH", "amd64")
|
||||
tg.run("generate", "gen")
|
||||
tg.grepStdout("linux amd64", "unexpected GOOS/GOARCH combination")
|
||||
|
||||
tg.setenv("GOOS", "darwin")
|
||||
tg.setenv("GOARCH", "386")
|
||||
tg.run("generate", "gen")
|
||||
tg.grepStdout("darwin 386", "unexpected GOOS/GOARCH combination")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -149,14 +149,6 @@ A few common code hosting sites have special syntax:
|
|||
import "github.com/user/project"
|
||||
import "github.com/user/project/sub/directory"
|
||||
|
||||
Google Code Project Hosting (Git, Mercurial, Subversion)
|
||||
|
||||
import "code.google.com/p/project"
|
||||
import "code.google.com/p/project/sub/directory"
|
||||
|
||||
import "code.google.com/p/project.subrepository"
|
||||
import "code.google.com/p/project.subrepository/sub/directory"
|
||||
|
||||
Launchpad (Bazaar)
|
||||
|
||||
import "launchpad.net/project"
|
||||
|
|
@ -269,10 +261,9 @@ unless it is being referred to by that import path. In this way, import comments
|
|||
let package authors make sure the custom import path is used and not a
|
||||
direct path to the underlying code hosting site.
|
||||
|
||||
If vendoring is enabled (see 'go help gopath'), then import path checking is
|
||||
disabled for code found within vendor trees. This makes it possible to copy
|
||||
code into alternate locations in vendor trees without needing to update import
|
||||
comments.
|
||||
Import path checking is disabled for code found within vendor trees.
|
||||
This makes it possible to copy code into alternate locations in vendor trees
|
||||
without needing to update import comments.
|
||||
|
||||
See https://golang.org/s/go14customimport for details.
|
||||
`,
|
||||
|
|
@ -421,12 +412,6 @@ Vendor directories do not affect the placement of new repositories
|
|||
being checked out for the first time by 'go get': those are always
|
||||
placed in the main GOPATH, never in a vendor subtree.
|
||||
|
||||
In Go 1.5, as an experiment, setting the environment variable
|
||||
GO15VENDOREXPERIMENT=1 enabled these features.
|
||||
As of Go 1.6 they are on by default. To turn them off, set
|
||||
GO15VENDOREXPERIMENT=0. In Go 1.7, the environment
|
||||
variable will stop having any effect.
|
||||
|
||||
See https://golang.org/s/go15vendor for details.
|
||||
`,
|
||||
}
|
||||
|
|
@ -497,8 +482,6 @@ Special-purpose environment variables:
|
|||
installed in a location other than where it is built.
|
||||
File names in stack traces are rewritten from GOROOT to
|
||||
GOROOT_FINAL.
|
||||
GO15VENDOREXPERIMENT
|
||||
Set to 0 to disable vendoring semantics.
|
||||
GO_EXTLINK_ENABLED
|
||||
Whether the linker should use external linking mode
|
||||
when using -linkmode=auto with code that uses cgo.
|
||||
|
|
@ -540,7 +523,15 @@ the extension of the file name. These extensions are:
|
|||
Files of each of these types except .syso may contain build
|
||||
constraints, but the go command stops scanning for build constraints
|
||||
at the first item in the file that is not a blank line or //-style
|
||||
line comment.
|
||||
line comment. See the go/build package documentation for
|
||||
more details.
|
||||
|
||||
Non-test Go source files can also include a //go:binary-only-package
|
||||
comment, indicating that the package sources are included
|
||||
for documentation only and must not be used to build the
|
||||
package binary. This enables distribution of Go packages in
|
||||
their compiled form alone. See the go/build package documentation
|
||||
for more details.
|
||||
`,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ var httpClient = http.DefaultClient
|
|||
// when we're connecting to https servers that might not be there
|
||||
// or might be using self-signed certificates.
|
||||
var impatientInsecureHTTPClient = &http.Client{
|
||||
Timeout: time.Duration(5 * time.Second),
|
||||
Timeout: 5 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
|
|
|
|||
|
|
@ -41,7 +41,10 @@ syntax of package template. The default output is equivalent to -f
|
|||
Goroot bool // is this package in the Go root?
|
||||
Standard bool // is this package part of the standard Go library?
|
||||
Stale bool // would 'go install' do anything for this package?
|
||||
StaleReason string // explanation for Stale==true
|
||||
Root string // Go root or Go path dir containing this package
|
||||
ConflictDir string // this directory shadows Dir in $GOPATH
|
||||
BinaryOnly bool // binary-only package: cannot be recompiled from sources
|
||||
|
||||
// Source files
|
||||
GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
|
||||
|
|
@ -51,6 +54,7 @@ syntax of package template. The default output is equivalent to -f
|
|||
CXXFiles []string // .cc, .cxx and .cpp source files
|
||||
MFiles []string // .m source files
|
||||
HFiles []string // .h, .hh, .hpp and .hxx source files
|
||||
FFiles []string // .f, .F, .for and .f90 Fortran source files
|
||||
SFiles []string // .s source files
|
||||
SwigFiles []string // .swig files
|
||||
SwigCXXFiles []string // .swigcxx files
|
||||
|
|
@ -60,6 +64,7 @@ syntax of package template. The default output is equivalent to -f
|
|||
CgoCFLAGS []string // cgo: flags for C compiler
|
||||
CgoCPPFLAGS []string // cgo: flags for C preprocessor
|
||||
CgoCXXFLAGS []string // cgo: flags for C++ compiler
|
||||
CgoFFLAGS []string // cgo: flags for Fortran compiler
|
||||
CgoLDFLAGS []string // cgo: flags for linker
|
||||
CgoPkgConfig []string // cgo: pkg-config names
|
||||
|
||||
|
|
|
|||
|
|
@ -214,15 +214,7 @@ var helpTemplate = `{{if .Runnable}}usage: go {{.UsageLine}}
|
|||
{{end}}{{.Long | trim}}
|
||||
`
|
||||
|
||||
var documentationTemplate = `// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// DO NOT EDIT THIS FILE. GENERATED BY mkalldocs.sh.
|
||||
// Edit the documentation in other files and rerun mkalldocs.sh to generate this one.
|
||||
|
||||
/*
|
||||
{{range .}}{{if .Short}}{{.Short | capitalize}}
|
||||
var documentationTemplate = `{{range .}}{{if .Short}}{{.Short | capitalize}}
|
||||
|
||||
{{end}}{{if .Runnable}}Usage:
|
||||
|
||||
|
|
@ -231,9 +223,39 @@ var documentationTemplate = `// Copyright 2011 The Go Authors. All rights reser
|
|||
{{end}}{{.Long | trim}}
|
||||
|
||||
|
||||
{{end}}*/
|
||||
package main
|
||||
`
|
||||
{{end}}`
|
||||
|
||||
// commentWriter writes a Go comment to the underlying io.Writer,
|
||||
// using line comment form (//).
|
||||
type commentWriter struct {
|
||||
W io.Writer
|
||||
wroteSlashes bool // Wrote "//" at the beginning of the current line.
|
||||
}
|
||||
|
||||
func (c *commentWriter) Write(p []byte) (int, error) {
|
||||
var n int
|
||||
for i, b := range p {
|
||||
if !c.wroteSlashes {
|
||||
s := "//"
|
||||
if b != '\n' {
|
||||
s = "// "
|
||||
}
|
||||
if _, err := io.WriteString(c.W, s); err != nil {
|
||||
return n, err
|
||||
}
|
||||
c.wroteSlashes = true
|
||||
}
|
||||
n0, err := c.W.Write(p[i : i+1])
|
||||
n += n0
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if b == '\n' {
|
||||
c.wroteSlashes = false
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// An errWriter wraps a writer, recording whether a write error occurred.
|
||||
type errWriter struct {
|
||||
|
|
@ -310,10 +332,18 @@ func help(args []string) {
|
|||
|
||||
// 'go help documentation' generates doc.go.
|
||||
if arg == "documentation" {
|
||||
fmt.Println("// Copyright 2011 The Go Authors. All rights reserved.")
|
||||
fmt.Println("// Use of this source code is governed by a BSD-style")
|
||||
fmt.Println("// license that can be found in the LICENSE file.")
|
||||
fmt.Println()
|
||||
fmt.Println("// DO NOT EDIT THIS FILE. GENERATED BY mkalldocs.sh.")
|
||||
fmt.Println("// Edit the documentation in other files and rerun mkalldocs.sh to generate this one.")
|
||||
fmt.Println()
|
||||
buf := new(bytes.Buffer)
|
||||
printUsage(buf)
|
||||
usage := &Command{Long: buf.String()}
|
||||
tmpl(os.Stdout, documentationTemplate, append([]*Command{usage}, commands...))
|
||||
tmpl(&commentWriter{W: os.Stdout}, documentationTemplate, append([]*Command{usage}, commands...))
|
||||
fmt.Println("package main")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -403,8 +433,6 @@ func errorf(format string, args ...interface{}) {
|
|||
setExitStatus(1)
|
||||
}
|
||||
|
||||
var logf = log.Printf
|
||||
|
||||
func exitIfErrors() {
|
||||
if exitStatus != 0 {
|
||||
exit()
|
||||
|
|
@ -428,19 +456,6 @@ func run(cmdargs ...interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
func runOut(dir string, cmdargs ...interface{}) []byte {
|
||||
cmdline := stringList(cmdargs...)
|
||||
cmd := exec.Command(cmdline[0], cmdline[1:]...)
|
||||
cmd.Dir = dir
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
os.Stderr.Write(out)
|
||||
errorf("%v", err)
|
||||
out = nil
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// envForDir returns a copy of the environment
|
||||
// suitable for running in the given directory.
|
||||
// The environment is the current process's environment
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ func readELFGoBuildID(filename string, f *os.File, data []byte) (buildid string,
|
|||
// or even the first few megabytes of the file
|
||||
// due to differences in note segment placement;
|
||||
// in that case, extract the note data manually.
|
||||
_, err = f.Seek(int64(p.Off), 0)
|
||||
_, err = f.Seek(int64(p.Off), io.SeekStart)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -47,8 +47,6 @@ func testNoteReading(t *testing.T) {
|
|||
t.Skipf("skipping - no cgo, so assuming external linking not available")
|
||||
case runtime.GOOS == "linux" && (runtime.GOARCH == "ppc64le" || runtime.GOARCH == "ppc64"):
|
||||
t.Skipf("skipping - external linking not supported, golang.org/issue/11184")
|
||||
case runtime.GOOS == "linux" && (runtime.GOARCH == "mips64le" || runtime.GOARCH == "mips64"):
|
||||
t.Skipf("skipping - external linking not supported, golang.org/issue/12560")
|
||||
case runtime.GOOS == "openbsd" && runtime.GOARCH == "arm":
|
||||
t.Skipf("skipping - external linking not supported, golang.org/issue/10619")
|
||||
case runtime.GOOS == "plan9":
|
||||
|
|
|
|||
|
|
@ -39,8 +39,10 @@ type Package struct {
|
|||
Goroot bool `json:",omitempty"` // is this package found in the Go root?
|
||||
Standard bool `json:",omitempty"` // is this package part of the standard Go library?
|
||||
Stale bool `json:",omitempty"` // would 'go install' do anything for this package?
|
||||
StaleReason string `json:",omitempty"` // why is Stale true?
|
||||
Root string `json:",omitempty"` // Go root or Go path dir containing this package
|
||||
ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory
|
||||
BinaryOnly bool `json:",omitempty"` // package cannot be recompiled
|
||||
|
||||
// Source files
|
||||
GoFiles []string `json:",omitempty"` // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
|
||||
|
|
@ -50,6 +52,7 @@ type Package struct {
|
|||
CXXFiles []string `json:",omitempty"` // .cc, .cpp and .cxx source files
|
||||
MFiles []string `json:",omitempty"` // .m source files
|
||||
HFiles []string `json:",omitempty"` // .h, .hh, .hpp and .hxx source files
|
||||
FFiles []string `json:",omitempty"` // .f, .F, .for and .f90 Fortran source files
|
||||
SFiles []string `json:",omitempty"` // .s source files
|
||||
SwigFiles []string `json:",omitempty"` // .swig files
|
||||
SwigCXXFiles []string `json:",omitempty"` // .swigcxx files
|
||||
|
|
@ -59,6 +62,7 @@ type Package struct {
|
|||
CgoCFLAGS []string `json:",omitempty"` // cgo: flags for C compiler
|
||||
CgoCPPFLAGS []string `json:",omitempty"` // cgo: flags for C preprocessor
|
||||
CgoCXXFLAGS []string `json:",omitempty"` // cgo: flags for C++ compiler
|
||||
CgoFFLAGS []string `json:",omitempty"` // cgo: flags for Fortran compiler
|
||||
CgoLDFLAGS []string `json:",omitempty"` // cgo: flags for linker
|
||||
CgoPkgConfig []string `json:",omitempty"` // cgo: pkg-config names
|
||||
|
||||
|
|
@ -88,7 +92,6 @@ type Package struct {
|
|||
target string // installed file for this package (may be executable)
|
||||
fake bool // synthesized package
|
||||
external bool // synthesized external test package
|
||||
forceBuild bool // this package must be rebuilt
|
||||
forceLibrary bool // this package is a library (even if named "main")
|
||||
cmdline bool // defined by files listed on command line
|
||||
local bool // imported via local path (./ or ../)
|
||||
|
|
@ -151,6 +154,8 @@ func (p *Package) copyBuild(pp *build.Package) {
|
|||
p.Doc = pp.Doc
|
||||
p.Root = pp.Root
|
||||
p.ConflictDir = pp.ConflictDir
|
||||
p.BinaryOnly = pp.BinaryOnly
|
||||
|
||||
// TODO? Target
|
||||
p.Goroot = pp.Goroot
|
||||
if buildContext.Compiler == "gccgo" {
|
||||
|
|
@ -165,6 +170,7 @@ func (p *Package) copyBuild(pp *build.Package) {
|
|||
p.CXXFiles = pp.CXXFiles
|
||||
p.MFiles = pp.MFiles
|
||||
p.HFiles = pp.HFiles
|
||||
p.FFiles = pp.FFiles
|
||||
p.SFiles = pp.SFiles
|
||||
p.SwigFiles = pp.SwigFiles
|
||||
p.SwigCXXFiles = pp.SwigCXXFiles
|
||||
|
|
@ -267,15 +273,6 @@ func reloadPackage(arg string, stk *importStack) *Package {
|
|||
return loadPackage(arg, stk)
|
||||
}
|
||||
|
||||
// The Go 1.5 vendoring experiment was enabled by setting GO15VENDOREXPERIMENT=1.
|
||||
// In Go 1.6 this is on by default and is disabled by setting GO15VENDOREXPERIMENT=0.
|
||||
// In Go 1.7 the variable will stop having any effect.
|
||||
// The variable is obnoxiously long so that years from now when people find it in
|
||||
// their profiles and wonder what it does, there is some chance that a web search
|
||||
// might answer the question.
|
||||
// There is a copy of this variable in src/go/build/build.go. Delete that one when this one goes away.
|
||||
var go15VendorExperiment = os.Getenv("GO15VENDOREXPERIMENT") != "0"
|
||||
|
||||
// dirToImportPath returns the pseudo-import path we use for a package
|
||||
// outside the Go path. It begins with _/ and then contains the full path
|
||||
// to the directory. If the package lives in c:\home\gopher\my\pkg then
|
||||
|
|
@ -365,7 +362,7 @@ func loadImport(path, srcDir string, parent *Package, stk *importStack, importPo
|
|||
// TODO: After Go 1, decide when to pass build.AllowBinary here.
|
||||
// See issue 3268 for mistakes to avoid.
|
||||
buildMode := build.ImportComment
|
||||
if !go15VendorExperiment || mode&useVendor == 0 || path != origPath {
|
||||
if mode&useVendor == 0 || path != origPath {
|
||||
// Not vendoring, or we already found the vendored path.
|
||||
buildMode |= build.IgnoreVendor
|
||||
}
|
||||
|
|
@ -375,7 +372,7 @@ func loadImport(path, srcDir string, parent *Package, stk *importStack, importPo
|
|||
bp.BinDir = gobin
|
||||
}
|
||||
if err == nil && !isLocal && bp.ImportComment != "" && bp.ImportComment != path &&
|
||||
(!go15VendorExperiment || (!strings.Contains(path, "/vendor/") && !strings.HasPrefix(path, "vendor/"))) {
|
||||
!strings.Contains(path, "/vendor/") && !strings.HasPrefix(path, "vendor/") {
|
||||
err = fmt.Errorf("code in directory %s expects import %q", bp.Dir, bp.ImportComment)
|
||||
}
|
||||
p.load(stk, bp, err)
|
||||
|
|
@ -416,7 +413,7 @@ func isDir(path string) bool {
|
|||
// x/vendor/path, vendor/path, or else stay path if none of those exist.
|
||||
// vendoredImportPath returns the expanded path or, if no expansion is found, the original.
|
||||
func vendoredImportPath(parent *Package, path string) (found string) {
|
||||
if parent == nil || parent.Root == "" || !go15VendorExperiment {
|
||||
if parent == nil || parent.Root == "" {
|
||||
return path
|
||||
}
|
||||
|
||||
|
|
@ -445,6 +442,12 @@ func vendoredImportPath(parent *Package, path string) (found string) {
|
|||
}
|
||||
targ := filepath.Join(dir[:i], vpath)
|
||||
if isDir(targ) && hasGoFiles(targ) {
|
||||
importPath := parent.ImportPath
|
||||
if importPath == "command-line-arguments" {
|
||||
// If parent.ImportPath is 'command-line-arguments'.
|
||||
// set to relative directory to root (also chopped root directory)
|
||||
importPath = dir[len(root)+1:]
|
||||
}
|
||||
// We started with parent's dir c:\gopath\src\foo\bar\baz\quux\xyzzy.
|
||||
// We know the import path for parent's dir.
|
||||
// We chopped off some number of path elements and
|
||||
|
|
@ -454,14 +457,14 @@ func vendoredImportPath(parent *Package, path string) (found string) {
|
|||
// (actually the same number of bytes) from parent's import path
|
||||
// and then append /vendor/path.
|
||||
chopped := len(dir) - i
|
||||
if chopped == len(parent.ImportPath)+1 {
|
||||
if chopped == len(importPath)+1 {
|
||||
// We walked up from c:\gopath\src\foo\bar
|
||||
// and found c:\gopath\src\vendor\path.
|
||||
// We chopped \foo\bar (length 8) but the import path is "foo/bar" (length 7).
|
||||
// Use "vendor/path" without any prefix.
|
||||
return vpath
|
||||
}
|
||||
return parent.ImportPath[:len(parent.ImportPath)-chopped] + "/" + vpath
|
||||
return importPath[:len(importPath)-chopped] + "/" + vpath
|
||||
}
|
||||
}
|
||||
return path
|
||||
|
|
@ -528,7 +531,7 @@ func disallowInternal(srcDir string, p *Package, stk *importStack) *Package {
|
|||
return p
|
||||
}
|
||||
|
||||
// Check for "internal" element: four cases depending on begin of string and/or end of string.
|
||||
// Check for "internal" element: three cases depending on begin of string and/or end of string.
|
||||
i, ok := findInternal(p.ImportPath)
|
||||
if !ok {
|
||||
return p
|
||||
|
|
@ -565,7 +568,7 @@ func disallowInternal(srcDir string, p *Package, stk *importStack) *Package {
|
|||
// If there isn't one, findInternal returns ok=false.
|
||||
// Otherwise, findInternal returns ok=true and the index of the "internal".
|
||||
func findInternal(path string) (index int, ok bool) {
|
||||
// Four cases, depending on internal at start/end of string or not.
|
||||
// Three cases, depending on internal at start/end of string or not.
|
||||
// The order matters: we must return the index of the final element,
|
||||
// because the final one produces the most restrictive requirement
|
||||
// on the importer.
|
||||
|
|
@ -584,10 +587,6 @@ func findInternal(path string) (index int, ok bool) {
|
|||
// If the import is allowed, disallowVendor returns the original package p.
|
||||
// If not, it returns a new package containing just an appropriate error.
|
||||
func disallowVendor(srcDir, path string, p *Package, stk *importStack) *Package {
|
||||
if !go15VendorExperiment {
|
||||
return p
|
||||
}
|
||||
|
||||
// The stack includes p.ImportPath.
|
||||
// If that's the only thing on the stack, we started
|
||||
// with a name given on the command line, not an
|
||||
|
|
@ -667,7 +666,7 @@ func disallowVendorVisibility(srcDir string, p *Package, stk *importStack) *Pack
|
|||
|
||||
// findVendor looks for the last non-terminating "vendor" path element in the given import path.
|
||||
// If there isn't one, findVendor returns ok=false.
|
||||
// Otherwise, findInternal returns ok=true and the index of the "vendor".
|
||||
// Otherwise, findVendor returns ok=true and the index of the "vendor".
|
||||
//
|
||||
// Note that terminating "vendor" elements don't count: "x/vendor" is its own package,
|
||||
// not the vendored copy of an import "" (the empty import path).
|
||||
|
|
@ -691,7 +690,6 @@ type targetDir int
|
|||
const (
|
||||
toRoot targetDir = iota // to bin dir inside package root (default)
|
||||
toTool // GOROOT/pkg/tool
|
||||
toBin // GOROOT/bin
|
||||
stalePath // the old import path; fail to build
|
||||
)
|
||||
|
||||
|
|
@ -715,7 +713,6 @@ var goTools = map[string]targetDir{
|
|||
"cmd/trace": toTool,
|
||||
"cmd/vet": toTool,
|
||||
"cmd/yacc": toTool,
|
||||
"golang.org/x/tools/cmd/godoc": toBin,
|
||||
"code.google.com/p/go.tools/cmd/cover": stalePath,
|
||||
"code.google.com/p/go.tools/cmd/godoc": stalePath,
|
||||
"code.google.com/p/go.tools/cmd/vet": stalePath,
|
||||
|
|
@ -802,12 +799,7 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
|
|||
// Install cross-compiled binaries to subdirectories of bin.
|
||||
elem = full
|
||||
}
|
||||
if p.build.BinDir != gobin && goTools[p.ImportPath] == toBin {
|
||||
// Override BinDir.
|
||||
// This is from a subrepo but installs to $GOROOT/bin
|
||||
// by default anyway (like godoc).
|
||||
p.target = filepath.Join(gorootBin, elem)
|
||||
} else if p.build.BinDir != "" {
|
||||
if p.build.BinDir != "" {
|
||||
// Install to GOBIN or bin of GOPATH entry.
|
||||
p.target = filepath.Join(p.build.BinDir, elem)
|
||||
if !p.Goroot && strings.Contains(elem, "/") && gobin != "" {
|
||||
|
|
@ -930,6 +922,7 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
|
|||
p.CXXFiles,
|
||||
p.MFiles,
|
||||
p.HFiles,
|
||||
p.FFiles,
|
||||
p.SFiles,
|
||||
p.SysoFiles,
|
||||
p.SwigFiles,
|
||||
|
|
@ -1032,6 +1025,20 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
|
|||
}
|
||||
p.Target = p.target
|
||||
|
||||
// If cgo is not enabled, ignore cgo supporting sources
|
||||
// just as we ignore go files containing import "C".
|
||||
if !buildContext.CgoEnabled {
|
||||
p.CFiles = nil
|
||||
p.CXXFiles = nil
|
||||
p.MFiles = nil
|
||||
p.SwigFiles = nil
|
||||
p.SwigCXXFiles = nil
|
||||
// Note that SFiles are okay (they go to the Go assembler)
|
||||
// and HFiles are okay (they might be used by the SFiles).
|
||||
// Also Sysofiles are okay (they might not contain object
|
||||
// code; see issue #16050).
|
||||
}
|
||||
|
||||
// The gc toolchain only permits C source files with cgo.
|
||||
if len(p.CFiles) > 0 && !p.usesCgo() && !p.usesSwig() && buildContext.Compiler == "gc" {
|
||||
p.Error = &PackageError{
|
||||
|
|
@ -1054,7 +1061,15 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
|
|||
}
|
||||
}
|
||||
|
||||
if p.BinaryOnly {
|
||||
// For binary-only package, use build ID from supplied package binary.
|
||||
buildID, err := readBuildID(p)
|
||||
if err == nil {
|
||||
p.buildID = buildID
|
||||
}
|
||||
} else {
|
||||
computeBuildID(p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
|
|
@ -1094,7 +1109,7 @@ func packageList(roots []*Package) []*Package {
|
|||
// at the named pkgs (command-line arguments).
|
||||
func computeStale(pkgs ...*Package) {
|
||||
for _, p := range packageList(pkgs) {
|
||||
p.Stale = isStale(p)
|
||||
p.Stale, p.StaleReason = isStale(p)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1240,7 +1255,7 @@ var isGoRelease = strings.HasPrefix(runtime.Version(), "go1")
|
|||
// an explicit data comparison. Specifically, we build a list of the
|
||||
// inputs to the build, compute its SHA1 hash, and record that as the
|
||||
// ``build ID'' in the generated object. At the next build, we can
|
||||
// recompute the buid ID and compare it to the one in the generated
|
||||
// recompute the build ID and compare it to the one in the generated
|
||||
// object. If they differ, the list of inputs has changed, so the object
|
||||
// is out of date and must be rebuilt.
|
||||
//
|
||||
|
|
@ -1365,40 +1380,50 @@ var isGoRelease = strings.HasPrefix(runtime.Version(), "go1")
|
|||
// standard library, even in release versions. This makes
|
||||
// 'go build -tags netgo' work, among other things.
|
||||
|
||||
// isStale reports whether package p needs to be rebuilt.
|
||||
func isStale(p *Package) bool {
|
||||
// isStale reports whether package p needs to be rebuilt,
|
||||
// along with the reason why.
|
||||
func isStale(p *Package) (bool, string) {
|
||||
if p.Standard && (p.ImportPath == "unsafe" || buildContext.Compiler == "gccgo") {
|
||||
// fake, builtin package
|
||||
return false
|
||||
return false, "builtin package"
|
||||
}
|
||||
if p.Error != nil {
|
||||
return true
|
||||
return true, "errors loading package"
|
||||
}
|
||||
if p.Stale {
|
||||
return true, p.StaleReason
|
||||
}
|
||||
|
||||
// A package without Go sources means we only found
|
||||
// the installed .a file. Since we don't know how to rebuild
|
||||
// it, it can't be stale, even if -a is set. This enables binary-only
|
||||
// distributions of Go packages, although such binaries are
|
||||
// only useful with the specific version of the toolchain that
|
||||
// created them.
|
||||
if len(p.gofiles) == 0 && !p.usesSwig() {
|
||||
return false
|
||||
// If this is a package with no source code, it cannot be rebuilt.
|
||||
// If the binary is missing, we mark the package stale so that
|
||||
// if a rebuild is needed, that rebuild attempt will produce a useful error.
|
||||
// (Some commands, such as 'go list', do not attempt to rebuild.)
|
||||
if p.BinaryOnly {
|
||||
if p.target == "" {
|
||||
// Fail if a build is attempted.
|
||||
return true, "no source code for package, but no install target"
|
||||
}
|
||||
if _, err := os.Stat(p.target); err != nil {
|
||||
// Fail if a build is attempted.
|
||||
return true, "no source code for package, but cannot access install target: " + err.Error()
|
||||
}
|
||||
return false, "no source code for package"
|
||||
}
|
||||
|
||||
// If the -a flag is given, rebuild everything.
|
||||
if buildA {
|
||||
return true
|
||||
return true, "build -a flag in use"
|
||||
}
|
||||
|
||||
// If there's no install target or it's already marked stale, we have to rebuild.
|
||||
if p.target == "" || p.Stale {
|
||||
return true
|
||||
// If there's no install target, we have to rebuild.
|
||||
if p.target == "" {
|
||||
return true, "no install target"
|
||||
}
|
||||
|
||||
// Package is stale if completely unbuilt.
|
||||
fi, err := os.Stat(p.target)
|
||||
if err != nil {
|
||||
return true
|
||||
return true, "cannot stat install target"
|
||||
}
|
||||
|
||||
// Package is stale if the expected build ID differs from the
|
||||
|
|
@ -1411,13 +1436,13 @@ func isStale(p *Package) bool {
|
|||
// See issue 8290 and issue 10702.
|
||||
targetBuildID, err := readBuildID(p)
|
||||
if err == nil && targetBuildID != p.buildID {
|
||||
return true
|
||||
return true, "build ID mismatch"
|
||||
}
|
||||
|
||||
// Package is stale if a dependency is.
|
||||
for _, p1 := range p.deps {
|
||||
if p1.Stale {
|
||||
return true
|
||||
return true, "stale dependency"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1440,7 +1465,7 @@ func isStale(p *Package) bool {
|
|||
// install is to run make.bash, which will remove the old package archives
|
||||
// before rebuilding.)
|
||||
if p.Standard && isGoRelease {
|
||||
return false
|
||||
return false, "standard package in Go release distribution"
|
||||
}
|
||||
|
||||
// Time-based staleness.
|
||||
|
|
@ -1455,7 +1480,7 @@ func isStale(p *Package) bool {
|
|||
// Package is stale if a dependency is, or if a dependency is newer.
|
||||
for _, p1 := range p.deps {
|
||||
if p1.target != "" && olderThan(p1.target) {
|
||||
return true
|
||||
return true, "newer dependency"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1474,10 +1499,10 @@ func isStale(p *Package) bool {
|
|||
// taken care of above (at least when the installed Go is a released version).
|
||||
if p.Root != goroot {
|
||||
if olderThan(buildToolchain.compiler()) {
|
||||
return true
|
||||
return true, "newer compiler"
|
||||
}
|
||||
if p.build.IsCommand() && olderThan(buildToolchain.linker()) {
|
||||
return true
|
||||
return true, "newer linker"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1519,14 +1544,14 @@ func isStale(p *Package) bool {
|
|||
// to test for write access, and then skip GOPATH roots we don't have write
|
||||
// access to. But hopefully we can just use the mtimes always.
|
||||
|
||||
srcs := stringList(p.GoFiles, p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.SFiles, p.CgoFiles, p.SysoFiles, p.SwigFiles, p.SwigCXXFiles)
|
||||
srcs := stringList(p.GoFiles, p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.CgoFiles, p.SysoFiles, p.SwigFiles, p.SwigCXXFiles)
|
||||
for _, src := range srcs {
|
||||
if olderThan(filepath.Join(p.Dir, src)) {
|
||||
return true
|
||||
return true, "newer source file"
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// computeBuildID computes the build ID for p, leaving it in p.buildID.
|
||||
|
|
|
|||
|
|
@ -161,9 +161,12 @@ func TestSharedLibName(t *testing.T) {
|
|||
}
|
||||
oldGopath := buildContext.GOPATH
|
||||
defer func() {
|
||||
os.RemoveAll(tmpGopath)
|
||||
buildContext.GOPATH = oldGopath
|
||||
os.Chdir(cwd)
|
||||
err := os.RemoveAll(tmpGopath)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
root := filepath.Join(tmpGopath, "src", data.rootedAt)
|
||||
err = os.MkdirAll(root, 0755)
|
||||
|
|
|
|||
|
|
@ -59,6 +59,9 @@ Each listed package causes the execution of a separate test binary.
|
|||
Test files that declare a package with the suffix "_test" will be compiled as a
|
||||
separate package, and then linked and run with the main test binary.
|
||||
|
||||
The go tool will ignore a directory named "testdata", making it available
|
||||
to hold ancillary data needed by the tests.
|
||||
|
||||
By default, go test needs no arguments. It compiles and tests the package
|
||||
with source in the current directory, including tests, and runs the tests.
|
||||
|
||||
|
|
@ -125,7 +128,10 @@ control the execution of any test:
|
|||
|
||||
const testFlag2 = `
|
||||
-bench regexp
|
||||
Run benchmarks matching the regular expression.
|
||||
Run (sub)benchmarks matching a regular expression.
|
||||
The given regular expression is split into smaller ones by
|
||||
top-level '/', where each must match the corresponding part of a
|
||||
benchmark's identifier.
|
||||
By default, no benchmarks run. To run all benchmarks,
|
||||
use '-bench .' or '-bench=.'.
|
||||
|
||||
|
|
@ -213,8 +219,10 @@ const testFlag2 = `
|
|||
(see 'go help build').
|
||||
|
||||
-run regexp
|
||||
Run only those tests and examples matching the regular
|
||||
expression.
|
||||
Run only those tests and examples matching the regular expression.
|
||||
For tests the regular expression is split into smaller ones by
|
||||
top-level '/', where each must match the corresponding part of a
|
||||
test's identifier.
|
||||
|
||||
-short
|
||||
Tell long-running tests to shorten their run time.
|
||||
|
|
@ -228,7 +236,6 @@ const testFlag2 = `
|
|||
|
||||
-trace trace.out
|
||||
Write an execution trace to the specified file before exiting.
|
||||
Writes test binary as -c would.
|
||||
|
||||
-v
|
||||
Verbose output: log all tests as they are run. Also print all
|
||||
|
|
@ -311,10 +318,11 @@ A benchmark function is one named BenchmarkXXX and should have the signature,
|
|||
|
||||
An example function is similar to a test function but, instead of using
|
||||
*testing.T to report success or failure, prints output to os.Stdout.
|
||||
That output is compared against the function's "Output:" comment, which
|
||||
must be the last comment in the function body (see example below). An
|
||||
example with no such comment, or with no text after "Output:" is compiled
|
||||
but not executed.
|
||||
If the last comment in the function starts with "Output:" then the output
|
||||
is compared exactly against the comment (see examples below). If the last
|
||||
comment begins with "Unordered output:" then the output is compared to the
|
||||
comment, however the order of the lines is ignored. An example with no such
|
||||
comment, or with no text after "Output:" is compiled but not executed.
|
||||
|
||||
Godoc displays the body of ExampleXXX to demonstrate the use
|
||||
of the function, constant, or variable XXX. An example of a method M with
|
||||
|
|
@ -330,6 +338,20 @@ Here is an example of an example:
|
|||
// this example.
|
||||
}
|
||||
|
||||
Here is another example where the ordering of the output is ignored:
|
||||
|
||||
func ExamplePerm() {
|
||||
for _, value := range Perm(4) {
|
||||
fmt.Println(value)
|
||||
}
|
||||
|
||||
// Unordered output: 4
|
||||
// 2
|
||||
// 1
|
||||
// 3
|
||||
// 0
|
||||
}
|
||||
|
||||
The entire test file is presented as the example when it contains a single
|
||||
example function, at least one other function, type, variable, or constant
|
||||
declaration, and no test or benchmark functions.
|
||||
|
|
@ -495,6 +517,7 @@ func runTest(cmd *Command, args []string) {
|
|||
continue
|
||||
}
|
||||
p.Stale = true // rebuild
|
||||
p.StaleReason = "rebuild for coverage"
|
||||
p.fake = true // do not warn about rebuild
|
||||
p.coverMode = testCoverMode
|
||||
var coverFiles []string
|
||||
|
|
@ -738,6 +761,7 @@ func (b *builder) test(p *Package) (buildAction, runAction, printAction *action,
|
|||
ptest.fake = true
|
||||
ptest.forceLibrary = true
|
||||
ptest.Stale = true
|
||||
ptest.StaleReason = "rebuild for test"
|
||||
ptest.build = new(build.Package)
|
||||
*ptest.build = *p.build
|
||||
m := map[string][]token.Position{}
|
||||
|
|
@ -1019,6 +1043,7 @@ func recompileForTest(pmain, preal, ptest *Package, testDir string) {
|
|||
p.target = ""
|
||||
p.fake = true
|
||||
p.Stale = true
|
||||
p.StaleReason = "depends on package being tested"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1218,11 +1243,11 @@ func (b *builder) notest(a *action) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// isTestMain tells whether fn is a TestMain(m *testing.M) function.
|
||||
func isTestMain(fn *ast.FuncDecl) bool {
|
||||
if fn.Name.String() != "TestMain" ||
|
||||
fn.Type.Results != nil && len(fn.Type.Results.List) > 0 ||
|
||||
fn.Type.Params == nil ||
|
||||
// isTestFunc tells whether fn has the type of a testing function. arg
|
||||
// specifies the parameter type we look for: B, M or T.
|
||||
func isTestFunc(fn *ast.FuncDecl, arg string) bool {
|
||||
if fn.Type.Results != nil && len(fn.Type.Results.List) > 0 ||
|
||||
fn.Type.Params.List == nil ||
|
||||
len(fn.Type.Params.List) != 1 ||
|
||||
len(fn.Type.Params.List[0].Names) > 1 {
|
||||
return false
|
||||
|
|
@ -1234,10 +1259,11 @@ func isTestMain(fn *ast.FuncDecl) bool {
|
|||
// We can't easily check that the type is *testing.M
|
||||
// because we don't know how testing has been imported,
|
||||
// but at least check that it's *M or *something.M.
|
||||
if name, ok := ptr.X.(*ast.Ident); ok && name.Name == "M" {
|
||||
// Same applies for B and T.
|
||||
if name, ok := ptr.X.(*ast.Ident); ok && name.Name == arg {
|
||||
return true
|
||||
}
|
||||
if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == "M" {
|
||||
if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == arg {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
|
@ -1337,6 +1363,7 @@ type testFunc struct {
|
|||
Package string // imported package name (_test or _xtest)
|
||||
Name string // function name
|
||||
Output string // output, for examples
|
||||
Unordered bool // output is allowed to be unordered.
|
||||
}
|
||||
|
||||
var testFileSet = token.NewFileSet()
|
||||
|
|
@ -1356,17 +1383,25 @@ func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error {
|
|||
}
|
||||
name := n.Name.String()
|
||||
switch {
|
||||
case isTestMain(n):
|
||||
case name == "TestMain" && isTestFunc(n, "M"):
|
||||
if t.TestMain != nil {
|
||||
return errors.New("multiple definitions of TestMain")
|
||||
}
|
||||
t.TestMain = &testFunc{pkg, name, ""}
|
||||
t.TestMain = &testFunc{pkg, name, "", false}
|
||||
*doImport, *seen = true, true
|
||||
case isTest(name, "Test"):
|
||||
t.Tests = append(t.Tests, testFunc{pkg, name, ""})
|
||||
err := checkTestFunc(n, "T")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Tests = append(t.Tests, testFunc{pkg, name, "", false})
|
||||
*doImport, *seen = true, true
|
||||
case isTest(name, "Benchmark"):
|
||||
t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, ""})
|
||||
err := checkTestFunc(n, "B")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, "", false})
|
||||
*doImport, *seen = true, true
|
||||
}
|
||||
}
|
||||
|
|
@ -1378,12 +1413,21 @@ func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error {
|
|||
// Don't run examples with no output.
|
||||
continue
|
||||
}
|
||||
t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output})
|
||||
t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output, e.Unordered})
|
||||
*seen = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTestFunc(fn *ast.FuncDecl, arg string) error {
|
||||
if !isTestFunc(fn, arg) {
|
||||
name := fn.Name.String()
|
||||
pos := testFileSet.Position(fn.Pos())
|
||||
return fmt.Errorf("%s: wrong signature for %s, must be: func %s(%s *testing.%s)", pos, name, name, strings.ToLower(arg), arg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type byOrder []*doc.Example
|
||||
|
||||
func (x byOrder) Len() int { return len(x) }
|
||||
|
|
@ -1429,7 +1473,7 @@ var benchmarks = []testing.InternalBenchmark{
|
|||
|
||||
var examples = []testing.InternalExample{
|
||||
{{range .Examples}}
|
||||
{"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}},
|
||||
{"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}},
|
||||
{{end}}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,7 @@
|
|||
package benchfatal
|
||||
|
||||
import "testing"
|
||||
|
||||
func BenchmarkThatCallsFatal(b *testing.B) {
|
||||
b.Fatal("called by benchmark")
|
||||
}
|
||||
|
|
@ -149,9 +149,11 @@ func testFlags(args []string) (packageNames, passToTest []string) {
|
|||
testBench = true
|
||||
case "timeout":
|
||||
testTimeout = value
|
||||
case "blockprofile", "cpuprofile", "memprofile", "trace":
|
||||
case "blockprofile", "cpuprofile", "memprofile":
|
||||
testProfile = true
|
||||
testNeedBinary = true
|
||||
case "trace":
|
||||
testProfile = true
|
||||
case "coverpkg":
|
||||
testCover = true
|
||||
if value == "" {
|
||||
|
|
|
|||
|
|
@ -174,7 +174,7 @@ func gitRemoteRepo(vcsGit *vcsCmd, rootDir string) (remoteRepo string, err error
|
|||
Scheme: "ssh",
|
||||
User: url.User(m[1]),
|
||||
Host: m[2],
|
||||
RawPath: m[3],
|
||||
Path: m[3],
|
||||
}
|
||||
} else {
|
||||
repoURL, err = url.Parse(out)
|
||||
|
|
@ -253,7 +253,7 @@ func bzrResolveRepo(vcsBzr *vcsCmd, rootDir, remoteRepo string) (realRepo string
|
|||
return "", fmt.Errorf("unable to parse output of bzr info")
|
||||
}
|
||||
out = out[:i]
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
return strings.TrimSpace(out), nil
|
||||
}
|
||||
|
||||
// vcsSvn describes how to use Subversion.
|
||||
|
|
@ -294,7 +294,7 @@ func svnRemoteRepo(vcsSvn *vcsCmd, rootDir string) (remoteRepo string, err error
|
|||
return "", fmt.Errorf("unable to parse output of svn info")
|
||||
}
|
||||
out = out[:i]
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
return strings.TrimSpace(out), nil
|
||||
}
|
||||
|
||||
func (v *vcsCmd) String() string {
|
||||
|
|
@ -383,9 +383,6 @@ func (v *vcsCmd) ping(scheme, repo string) error {
|
|||
// The parent of dir must exist; dir must not.
|
||||
func (v *vcsCmd) create(dir, repo string) error {
|
||||
for _, cmd := range v.createCmd {
|
||||
if !go15VendorExperiment && strings.Contains(cmd, "submodule") {
|
||||
continue
|
||||
}
|
||||
if err := v.run(".", cmd, "dir", dir, "repo", repo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -396,9 +393,6 @@ func (v *vcsCmd) create(dir, repo string) error {
|
|||
// download downloads any new changes for the repo in dir.
|
||||
func (v *vcsCmd) download(dir string) error {
|
||||
for _, cmd := range v.downloadCmd {
|
||||
if !go15VendorExperiment && strings.Contains(cmd, "submodule") {
|
||||
continue
|
||||
}
|
||||
if err := v.run(dir, cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -445,9 +439,6 @@ func (v *vcsCmd) tagSync(dir, tag string) error {
|
|||
|
||||
if tag == "" && v.tagSyncDefault != nil {
|
||||
for _, cmd := range v.tagSyncDefault {
|
||||
if !go15VendorExperiment && strings.Contains(cmd, "submodule") {
|
||||
continue
|
||||
}
|
||||
if err := v.run(dir, cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -456,9 +447,6 @@ func (v *vcsCmd) tagSync(dir, tag string) error {
|
|||
}
|
||||
|
||||
for _, cmd := range v.tagSyncCmd {
|
||||
if !go15VendorExperiment && strings.Contains(cmd, "submodule") {
|
||||
continue
|
||||
}
|
||||
if err := v.run(dir, cmd, "tag", tag); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -479,15 +467,14 @@ type vcsPath struct {
|
|||
regexp *regexp.Regexp // cached compiled form of re
|
||||
}
|
||||
|
||||
// vcsForDir inspects dir and its parents to determine the
|
||||
// vcsFromDir inspects dir and its parents to determine the
|
||||
// version control system and code repository to use.
|
||||
// On return, root is the import path
|
||||
// corresponding to the root of the repository
|
||||
// (thus root is a prefix of importPath).
|
||||
func vcsForDir(p *Package) (vcs *vcsCmd, root string, err error) {
|
||||
// corresponding to the root of the repository.
|
||||
func vcsFromDir(dir, srcRoot string) (vcs *vcsCmd, root string, err error) {
|
||||
// Clean and double-check that dir is in (a subdirectory of) srcRoot.
|
||||
dir := filepath.Clean(p.Dir)
|
||||
srcRoot := filepath.Clean(p.build.SrcRoot)
|
||||
dir = filepath.Clean(dir)
|
||||
srcRoot = filepath.Clean(srcRoot)
|
||||
if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator {
|
||||
return nil, "", fmt.Errorf("directory %q is outside source root %q", dir, srcRoot)
|
||||
}
|
||||
|
|
@ -496,7 +483,7 @@ func vcsForDir(p *Package) (vcs *vcsCmd, root string, err error) {
|
|||
for len(dir) > len(srcRoot) {
|
||||
for _, vcs := range vcsList {
|
||||
if fi, err := os.Stat(filepath.Join(dir, "."+vcs.cmd)); err == nil && fi.IsDir() {
|
||||
return vcs, dir[len(srcRoot)+1:], nil
|
||||
return vcs, filepath.ToSlash(dir[len(srcRoot)+1:]), nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -780,15 +767,31 @@ type metaImport struct {
|
|||
// errNoMatch is returned from matchGoImport when there's no applicable match.
|
||||
var errNoMatch = errors.New("no import match")
|
||||
|
||||
func splitPathHasPrefix(path, prefix []string) bool {
|
||||
if len(path) < len(prefix) {
|
||||
return false
|
||||
}
|
||||
for i, p := range prefix {
|
||||
if path[i] != p {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// matchGoImport returns the metaImport from imports matching importPath.
|
||||
// An error is returned if there are multiple matches.
|
||||
// errNoMatch is returned if none match.
|
||||
func matchGoImport(imports []metaImport, importPath string) (_ metaImport, err error) {
|
||||
match := -1
|
||||
imp := strings.Split(importPath, "/")
|
||||
for i, im := range imports {
|
||||
if !strings.HasPrefix(importPath, im.Prefix) {
|
||||
pre := strings.Split(im.Prefix, "/")
|
||||
|
||||
if !splitPathHasPrefix(imp, pre) {
|
||||
continue
|
||||
}
|
||||
|
||||
if match != -1 {
|
||||
err = fmt.Errorf("multiple meta tags match import path %q", importPath)
|
||||
return
|
||||
|
|
@ -815,20 +818,6 @@ func expand(match map[string]string, s string) string {
|
|||
// and import paths referring to a fully-qualified importPath
|
||||
// containing a VCS type (foo.com/repo.git/dir)
|
||||
var vcsPaths = []*vcsPath{
|
||||
// Google Code - new syntax
|
||||
{
|
||||
prefix: "code.google.com/",
|
||||
re: `^(?P<root>code\.google\.com/p/(?P<project>[a-z0-9\-]+)(\.(?P<subrepo>[a-z0-9\-]+))?)(/[A-Za-z0-9_.\-]+)*$`,
|
||||
repo: "https://{root}",
|
||||
check: googleCodeVCS,
|
||||
},
|
||||
|
||||
// Google Code - old syntax
|
||||
{
|
||||
re: `^(?P<project>[a-z0-9_\-.]+)\.googlecode\.com/(git|hg|svn)(?P<path>/.*)?$`,
|
||||
check: oldGoogleCode,
|
||||
},
|
||||
|
||||
// Github
|
||||
{
|
||||
prefix: "github.com/",
|
||||
|
|
@ -863,6 +852,14 @@ var vcsPaths = []*vcsPath{
|
|||
repo: "https://{root}",
|
||||
},
|
||||
|
||||
// Git at OpenStack
|
||||
{
|
||||
prefix: "git.openstack.org",
|
||||
re: `^(?P<root>git\.openstack\.org/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(\.git)?(/[A-Za-z0-9_.\-]+)*$`,
|
||||
vcs: "git",
|
||||
repo: "https://{root}",
|
||||
},
|
||||
|
||||
// General syntax for any server.
|
||||
// Must be last.
|
||||
{
|
||||
|
|
@ -911,45 +908,6 @@ func noVCSSuffix(match map[string]string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var googleCheckout = regexp.MustCompile(`id="checkoutcmd">(hg|git|svn)`)
|
||||
|
||||
// googleCodeVCS determines the version control system for
|
||||
// a code.google.com repository, by scraping the project's
|
||||
// /source/checkout page.
|
||||
func googleCodeVCS(match map[string]string) error {
|
||||
if err := noVCSSuffix(match); err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := httpGET(expand(match, "https://code.google.com/p/{project}/source/checkout?repo={subrepo}"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if m := googleCheckout.FindSubmatch(data); m != nil {
|
||||
if vcs := vcsByCmd(string(m[1])); vcs != nil {
|
||||
// Subversion requires the old URLs.
|
||||
// TODO: Test.
|
||||
if vcs == vcsSvn {
|
||||
if match["subrepo"] != "" {
|
||||
return fmt.Errorf("sub-repositories not supported in Google Code Subversion projects")
|
||||
}
|
||||
match["repo"] = expand(match, "https://{project}.googlecode.com/svn")
|
||||
}
|
||||
match["vcs"] = vcs.cmd
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to detect version control system for code.google.com/ path")
|
||||
}
|
||||
|
||||
// oldGoogleCode is invoked for old-style foo.googlecode.com paths.
|
||||
// It prints an error giving the equivalent new path.
|
||||
func oldGoogleCode(match map[string]string) error {
|
||||
return fmt.Errorf("invalid Google Code import path: use %s instead",
|
||||
expand(match, "code.google.com/p/{project}{path}"))
|
||||
}
|
||||
|
||||
// bitbucketVCS determines the version control system for a
|
||||
// Bitbucket repository, by using the Bitbucket API.
|
||||
func bitbucketVCS(match map[string]string) error {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"internal/testenv"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
|
@ -18,20 +23,6 @@ func TestRepoRootForImportPath(t *testing.T) {
|
|||
path string
|
||||
want *repoRoot
|
||||
}{
|
||||
/*{
|
||||
"code.google.com/p/go",
|
||||
&repoRoot{
|
||||
vcs: vcsHg,
|
||||
repo: "https://code.google.com/p/go",
|
||||
},
|
||||
},
|
||||
{
|
||||
"code.google.com/r/go",
|
||||
&repoRoot{
|
||||
vcs: vcsHg,
|
||||
repo: "https://code.google.com/r/go",
|
||||
},
|
||||
},*/
|
||||
{
|
||||
"github.com/golang/groupcache",
|
||||
&repoRoot{
|
||||
|
|
@ -96,6 +87,39 @@ func TestRepoRootForImportPath(t *testing.T) {
|
|||
"hub.jazz.net/git/USER/pkgname",
|
||||
nil,
|
||||
},
|
||||
// OpenStack tests
|
||||
{
|
||||
"git.openstack.org/openstack/swift",
|
||||
&repoRoot{
|
||||
vcs: vcsGit,
|
||||
repo: "https://git.openstack.org/openstack/swift",
|
||||
},
|
||||
},
|
||||
// Trailing .git is less preferred but included for
|
||||
// compatibility purposes while the same source needs to
|
||||
// be compilable on both old and new go
|
||||
{
|
||||
"git.openstack.org/openstack/swift.git",
|
||||
&repoRoot{
|
||||
vcs: vcsGit,
|
||||
repo: "https://git.openstack.org/openstack/swift",
|
||||
},
|
||||
},
|
||||
{
|
||||
"git.openstack.org/openstack/swift/go/hummingbird",
|
||||
&repoRoot{
|
||||
vcs: vcsGit,
|
||||
repo: "https://git.openstack.org/openstack/swift",
|
||||
},
|
||||
},
|
||||
{
|
||||
"git.openstack.org",
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"git.openstack.org/openstack",
|
||||
nil,
|
||||
},
|
||||
// Spaces are not valid in package name
|
||||
{
|
||||
"git.apache.org/package name/path/to/lib",
|
||||
|
|
@ -142,6 +166,37 @@ func TestRepoRootForImportPath(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Test that vcsFromDir correctly inspects a given directory and returns the right VCS and root.
|
||||
func TestFromDir(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "vcstest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
for _, vcs := range vcsList {
|
||||
dir := filepath.Join(tempDir, "example.com", vcs.name, "."+vcs.cmd)
|
||||
err := os.MkdirAll(dir, 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
want := repoRoot{
|
||||
vcs: vcs,
|
||||
root: path.Join("example.com", vcs.name),
|
||||
}
|
||||
var got repoRoot
|
||||
got.vcs, got.root, err = vcsFromDir(dir, tempDir)
|
||||
if err != nil {
|
||||
t.Errorf("FromDir(%q, %q): %v", dir, tempDir, err)
|
||||
continue
|
||||
}
|
||||
if got.vcs.name != want.vcs.name || got.root != want.root {
|
||||
t.Errorf("FromDir(%q, %q) = VCS(%s) Root(%s), want VCS(%s) Root(%s)", dir, tempDir, got.vcs, got.root, want.vcs, want.root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSecure(t *testing.T) {
|
||||
tests := []struct {
|
||||
vcs *vcsCmd
|
||||
|
|
@ -173,3 +228,96 @@ func TestIsSecure(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchGoImport(t *testing.T) {
|
||||
tests := []struct {
|
||||
imports []metaImport
|
||||
path string
|
||||
mi metaImport
|
||||
err error
|
||||
}{
|
||||
{
|
||||
imports: []metaImport{
|
||||
{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
path: "example.com/user/foo",
|
||||
mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
{
|
||||
imports: []metaImport{
|
||||
{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
path: "example.com/user/foo/",
|
||||
mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
{
|
||||
imports: []metaImport{
|
||||
{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
{Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
path: "example.com/user/foo",
|
||||
mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
{
|
||||
imports: []metaImport{
|
||||
{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
{Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
path: "example.com/user/fooa",
|
||||
mi: metaImport{Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
{
|
||||
imports: []metaImport{
|
||||
{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
{Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
path: "example.com/user/foo/bar",
|
||||
err: errors.New("should not be allowed to create nested repo"),
|
||||
},
|
||||
{
|
||||
imports: []metaImport{
|
||||
{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
{Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
path: "example.com/user/foo/bar/baz",
|
||||
err: errors.New("should not be allowed to create nested repo"),
|
||||
},
|
||||
{
|
||||
imports: []metaImport{
|
||||
{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
{Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
path: "example.com/user/foo/bar/baz/qux",
|
||||
err: errors.New("should not be allowed to create nested repo"),
|
||||
},
|
||||
{
|
||||
imports: []metaImport{
|
||||
{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
{Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
path: "example.com/user/foo/bar/baz/",
|
||||
err: errors.New("should not be allowed to create nested repo"),
|
||||
},
|
||||
{
|
||||
imports: []metaImport{
|
||||
{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
{Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"},
|
||||
},
|
||||
path: "example.com",
|
||||
err: errors.New("pathologically short path"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
mi, err := matchGoImport(test.imports, test.path)
|
||||
if mi != test.mi {
|
||||
t.Errorf("unexpected metaImport; got %v, want %v", mi, test.mi)
|
||||
}
|
||||
|
||||
got := err
|
||||
want := test.err
|
||||
if (got == nil) != (want == nil) {
|
||||
t.Errorf("unexpected error; got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ func TestVendorImports(t *testing.T) {
|
|||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
tg.run("list", "-f", "{{.ImportPath}} {{.Imports}}", "vend/...")
|
||||
want := `
|
||||
vend [vend/vendor/p r]
|
||||
|
|
@ -51,7 +50,6 @@ func TestVendorBuild(t *testing.T) {
|
|||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
tg.run("build", "vend/x")
|
||||
}
|
||||
|
||||
|
|
@ -59,7 +57,6 @@ func TestVendorRun(t *testing.T) {
|
|||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
tg.cd(filepath.Join(tg.pwd(), "testdata/src/vend/hello"))
|
||||
tg.run("run", "hello.go")
|
||||
tg.grepStdout("hello, world", "missing hello world output")
|
||||
|
|
@ -74,7 +71,6 @@ func TestVendorGOPATH(t *testing.T) {
|
|||
}
|
||||
gopath := changeVolume(filepath.Join(tg.pwd(), "testdata"), strings.ToLower)
|
||||
tg.setenv("GOPATH", gopath)
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
cd := changeVolume(filepath.Join(tg.pwd(), "testdata/src/vend/hello"), strings.ToUpper)
|
||||
tg.cd(cd)
|
||||
tg.run("run", "hello.go")
|
||||
|
|
@ -85,7 +81,6 @@ func TestVendorTest(t *testing.T) {
|
|||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
tg.cd(filepath.Join(tg.pwd(), "testdata/src/vend/hello"))
|
||||
tg.run("test", "-v")
|
||||
tg.grepStdout("TestMsgInternal", "missing use in internal test")
|
||||
|
|
@ -96,7 +91,6 @@ func TestVendorInvalid(t *testing.T) {
|
|||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
|
||||
tg.runFail("build", "vend/x/invalid")
|
||||
tg.grepStderr("must be imported as foo", "missing vendor import error")
|
||||
|
|
@ -106,7 +100,6 @@ func TestVendorImportError(t *testing.T) {
|
|||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
|
||||
tg.runFail("build", "vend/x/vendor/p/p")
|
||||
|
||||
|
|
@ -173,7 +166,6 @@ func TestVendorGet(t *testing.T) {
|
|||
package p
|
||||
const C = 1`)
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
tg.cd(tg.path("src/v"))
|
||||
tg.run("run", "m.go")
|
||||
tg.run("test")
|
||||
|
|
@ -192,7 +184,6 @@ func TestVendorGetUpdate(t *testing.T) {
|
|||
defer tg.cleanup()
|
||||
tg.makeTempdir()
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
tg.run("get", "github.com/rsc/go-get-issue-11864")
|
||||
tg.run("get", "-u", "github.com/rsc/go-get-issue-11864")
|
||||
}
|
||||
|
|
@ -204,16 +195,15 @@ func TestGetSubmodules(t *testing.T) {
|
|||
defer tg.cleanup()
|
||||
tg.makeTempdir()
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
tg.run("get", "-d", "github.com/rsc/go-get-issue-12612")
|
||||
tg.run("get", "-u", "-d", "github.com/rsc/go-get-issue-12612")
|
||||
tg.mustExist(tg.path("src/github.com/rsc/go-get-issue-12612/vendor/golang.org/x/crypto/.git"))
|
||||
}
|
||||
|
||||
func TestVendorCache(t *testing.T) {
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/testvendor"))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
tg.runFail("build", "p")
|
||||
tg.grepStderr("must be imported as x", "did not fail to build p")
|
||||
}
|
||||
|
|
@ -225,7 +215,6 @@ func TestVendorTest2(t *testing.T) {
|
|||
defer tg.cleanup()
|
||||
tg.makeTempdir()
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
tg.run("get", "github.com/rsc/go-get-issue-11864")
|
||||
|
||||
// build -i should work
|
||||
|
|
@ -244,6 +233,32 @@ func TestVendorTest2(t *testing.T) {
|
|||
tg.run("test", "github.com/rsc/go-get-issue-11864/vendor/vendor.org/tx2")
|
||||
}
|
||||
|
||||
func TestVendorTest3(t *testing.T) {
|
||||
testenv.MustHaveExternalNetwork(t)
|
||||
|
||||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.makeTempdir()
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.run("get", "github.com/clsung/go-vendor-issue-14613")
|
||||
|
||||
tg.run("build", "-o", tg.path("a.out"), "-i", "github.com/clsung/go-vendor-issue-14613")
|
||||
|
||||
// test folder should work
|
||||
tg.run("test", "-i", "github.com/clsung/go-vendor-issue-14613")
|
||||
tg.run("test", "github.com/clsung/go-vendor-issue-14613")
|
||||
|
||||
// test with specified _test.go should work too
|
||||
tg.cd(filepath.Join(tg.path("."), "src"))
|
||||
tg.run("test", "-i", "github.com/clsung/go-vendor-issue-14613/vendor_test.go")
|
||||
tg.run("test", "github.com/clsung/go-vendor-issue-14613/vendor_test.go")
|
||||
|
||||
// test with imported and not used
|
||||
tg.run("test", "-i", "github.com/clsung/go-vendor-issue-14613/vendor/mylibtesttest/myapp/myapp_test.go")
|
||||
tg.runFail("test", "github.com/clsung/go-vendor-issue-14613/vendor/mylibtesttest/myapp/myapp_test.go")
|
||||
tg.grepStderr("imported and not used:", `should say "imported and not used"`)
|
||||
}
|
||||
|
||||
func TestVendorList(t *testing.T) {
|
||||
testenv.MustHaveExternalNetwork(t)
|
||||
|
||||
|
|
@ -251,7 +266,6 @@ func TestVendorList(t *testing.T) {
|
|||
defer tg.cleanup()
|
||||
tg.makeTempdir()
|
||||
tg.setenv("GOPATH", tg.path("."))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
tg.run("get", "github.com/rsc/go-get-issue-11864")
|
||||
|
||||
tg.run("list", "-f", `{{join .TestImports "\n"}}`, "github.com/rsc/go-get-issue-11864/t")
|
||||
|
|
@ -272,7 +286,6 @@ func TestVendor12156(t *testing.T) {
|
|||
tg := testgo(t)
|
||||
defer tg.cleanup()
|
||||
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/testvendor2"))
|
||||
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||
tg.cd(filepath.Join(tg.pwd(), "testdata/testvendor2/src/p"))
|
||||
tg.runFail("build", "p.go")
|
||||
tg.grepStderrNot("panic", "panicked")
|
||||
|
|
|
|||
|
|
@ -55,7 +55,6 @@ func report(err error) {
|
|||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: gofmt [flags] [path ...]\n")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
func initParserMode() {
|
||||
|
|
@ -143,7 +142,9 @@ func visitFile(path string, f os.FileInfo, err error) error {
|
|||
if err == nil && isGoFile(f) {
|
||||
err = processFile(path, nil, os.Stdout, false)
|
||||
}
|
||||
if err != nil {
|
||||
// Don't complain if a file was deleted in the meantime (i.e.
|
||||
// the directory changed concurrently while running gofmt).
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
report(err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ func TestCRLF(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if bytes.Index(data, []byte("\r\n")) < 0 {
|
||||
if !bytes.Contains(data, []byte("\r\n")) {
|
||||
t.Errorf("%s contains no CR/LF's", input)
|
||||
}
|
||||
|
||||
|
|
@ -167,7 +167,7 @@ func TestCRLF(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if bytes.Index(data, []byte("\r")) >= 0 {
|
||||
if bytes.Contains(data, []byte("\r")) {
|
||||
t.Errorf("%s contains CR's", golden)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,11 +10,9 @@ import (
|
|||
"reflect"
|
||||
)
|
||||
|
||||
type simplifier struct {
|
||||
hasDotImport bool // package file contains: import . "some/import/path"
|
||||
}
|
||||
type simplifier struct{}
|
||||
|
||||
func (s *simplifier) Visit(node ast.Node) ast.Visitor {
|
||||
func (s simplifier) Visit(node ast.Node) ast.Visitor {
|
||||
switch n := node.(type) {
|
||||
case *ast.CompositeLit:
|
||||
// array, slice, and map composite literals may be simplified
|
||||
|
|
@ -68,10 +66,13 @@ func (s *simplifier) Visit(node ast.Node) ast.Visitor {
|
|||
// a slice expression of the form: s[a:len(s)]
|
||||
// can be simplified to: s[a:]
|
||||
// if s is "simple enough" (for now we only accept identifiers)
|
||||
if n.Max != nil || s.hasDotImport {
|
||||
//
|
||||
// Note: This may not be correct because len may have been redeclared in another
|
||||
// file belonging to the same package. However, this is extremely unlikely
|
||||
// and so far (April 2016, after years of supporting this rewrite feature)
|
||||
// has never come up, so let's keep it working as is (see also #15153).
|
||||
if n.Max != nil {
|
||||
// - 3-index slices always require the 2nd and 3rd index
|
||||
// - if dot imports are present, we cannot be certain that an
|
||||
// unresolved "len" identifier refers to the predefined len()
|
||||
break
|
||||
}
|
||||
if s, _ := n.X.(*ast.Ident); s != nil && s.Obj != nil {
|
||||
|
|
@ -118,20 +119,11 @@ func isBlank(x ast.Expr) bool {
|
|||
}
|
||||
|
||||
func simplify(f *ast.File) {
|
||||
var s simplifier
|
||||
|
||||
// determine if f contains dot imports
|
||||
for _, imp := range f.Imports {
|
||||
if imp.Name != nil && imp.Name.Name == "." {
|
||||
s.hasDotImport = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// remove empty declarations such as "const ()", etc
|
||||
removeEmptyDeclGroups(f)
|
||||
|
||||
ast.Walk(&s, f)
|
||||
var s simplifier
|
||||
ast.Walk(s, f)
|
||||
}
|
||||
|
||||
func removeEmptyDeclGroups(f *ast.File) {
|
||||
|
|
|
|||
|
|
@ -1,63 +0,0 @@
|
|||
//gofmt -s
|
||||
|
||||
// Test cases for slice expression simplification.
|
||||
// Because of a dot import, these slices must remain untouched.
|
||||
package p
|
||||
|
||||
import . "math"
|
||||
|
||||
var (
|
||||
a [10]byte
|
||||
b [20]float32
|
||||
s []int
|
||||
t struct {
|
||||
s []byte
|
||||
}
|
||||
|
||||
_ = a[0:]
|
||||
_ = a[1:10]
|
||||
_ = a[2:len(a)]
|
||||
_ = a[3:(len(a))]
|
||||
_ = a[len(a) : len(a)-1]
|
||||
_ = a[0:len(b)]
|
||||
|
||||
_ = a[:]
|
||||
_ = a[:10]
|
||||
_ = a[:len(a)]
|
||||
_ = a[:(len(a))]
|
||||
_ = a[:len(a)-1]
|
||||
_ = a[:len(b)]
|
||||
|
||||
_ = s[0:]
|
||||
_ = s[1:10]
|
||||
_ = s[2:len(s)]
|
||||
_ = s[3:(len(s))]
|
||||
_ = s[len(a) : len(s)-1]
|
||||
_ = s[0:len(b)]
|
||||
|
||||
_ = s[:]
|
||||
_ = s[:10]
|
||||
_ = s[:len(s)]
|
||||
_ = s[:(len(s))]
|
||||
_ = s[:len(s)-1]
|
||||
_ = s[:len(b)]
|
||||
|
||||
_ = t.s[0:]
|
||||
_ = t.s[1:10]
|
||||
_ = t.s[2:len(t.s)]
|
||||
_ = t.s[3:(len(t.s))]
|
||||
_ = t.s[len(a) : len(t.s)-1]
|
||||
_ = t.s[0:len(b)]
|
||||
|
||||
_ = t.s[:]
|
||||
_ = t.s[:10]
|
||||
_ = t.s[:len(t.s)]
|
||||
_ = t.s[:(len(t.s))]
|
||||
_ = t.s[:len(t.s)-1]
|
||||
_ = t.s[:len(b)]
|
||||
)
|
||||
|
||||
func _() {
|
||||
s := s[0:len(s)]
|
||||
_ = s
|
||||
}
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
//gofmt -s
|
||||
|
||||
// Test cases for slice expression simplification.
|
||||
// Because of a dot import, these slices must remain untouched.
|
||||
package p
|
||||
|
||||
import . "math"
|
||||
|
||||
var (
|
||||
a [10]byte
|
||||
b [20]float32
|
||||
s []int
|
||||
t struct {
|
||||
s []byte
|
||||
}
|
||||
|
||||
_ = a[0:]
|
||||
_ = a[1:10]
|
||||
_ = a[2:len(a)]
|
||||
_ = a[3:(len(a))]
|
||||
_ = a[len(a) : len(a)-1]
|
||||
_ = a[0:len(b)]
|
||||
|
||||
_ = a[:]
|
||||
_ = a[:10]
|
||||
_ = a[:len(a)]
|
||||
_ = a[:(len(a))]
|
||||
_ = a[:len(a)-1]
|
||||
_ = a[:len(b)]
|
||||
|
||||
_ = s[0:]
|
||||
_ = s[1:10]
|
||||
_ = s[2:len(s)]
|
||||
_ = s[3:(len(s))]
|
||||
_ = s[len(a) : len(s)-1]
|
||||
_ = s[0:len(b)]
|
||||
|
||||
_ = s[:]
|
||||
_ = s[:10]
|
||||
_ = s[:len(s)]
|
||||
_ = s[:(len(s))]
|
||||
_ = s[:len(s)-1]
|
||||
_ = s[:len(b)]
|
||||
|
||||
_ = t.s[0:]
|
||||
_ = t.s[1:10]
|
||||
_ = t.s[2:len(t.s)]
|
||||
_ = t.s[3:(len(t.s))]
|
||||
_ = t.s[len(a) : len(t.s)-1]
|
||||
_ = t.s[0:len(b)]
|
||||
|
||||
_ = t.s[:]
|
||||
_ = t.s[:10]
|
||||
_ = t.s[:len(t.s)]
|
||||
_ = t.s[:(len(t.s))]
|
||||
_ = t.s[:len(t.s)-1]
|
||||
_ = t.s[:len(b)]
|
||||
)
|
||||
|
||||
func _() {
|
||||
s := s[0:len(s)]
|
||||
_ = s
|
||||
}
|
||||
|
|
@ -27,9 +27,8 @@ type reader struct {
|
|||
blockCRC uint32
|
||||
wantBlockCRC uint32
|
||||
setupDone bool // true if we have parsed the bzip2 header.
|
||||
blockSize int // blockSize in bytes, i.e. 900 * 1024.
|
||||
blockSize int // blockSize in bytes, i.e. 900 * 1000.
|
||||
eof bool
|
||||
buf []byte // stores Burrows-Wheeler transformed data.
|
||||
c [256]uint // the `C' array for the inverse BWT.
|
||||
tt []uint32 // mirrors the `tt' array in the bzip2 source and contains the P array in the upper 24 bits.
|
||||
tPos uint32 // Index of the next output byte in tt.
|
||||
|
|
@ -76,7 +75,7 @@ func (bz2 *reader) setup(needMagic bool) error {
|
|||
}
|
||||
|
||||
bz2.fileCRC = 0
|
||||
bz2.blockSize = 100 * 1024 * (int(level) - '0')
|
||||
bz2.blockSize = 100 * 1000 * (level - '0')
|
||||
if bz2.blockSize > len(bz2.tt) {
|
||||
bz2.tt = make([]uint32, bz2.blockSize)
|
||||
}
|
||||
|
|
@ -294,7 +293,7 @@ func (bz2 *reader) readBlock() (err error) {
|
|||
if c >= numHuffmanTrees {
|
||||
return StructuralError("tree index too large")
|
||||
}
|
||||
treeIndexes[i] = uint8(mtfTreeDecoder.Decode(c))
|
||||
treeIndexes[i] = mtfTreeDecoder.Decode(c)
|
||||
}
|
||||
|
||||
// The list of symbols for the move-to-front transform is taken from
|
||||
|
|
@ -319,6 +318,9 @@ func (bz2 *reader) readBlock() (err error) {
|
|||
length := br.ReadBits(5)
|
||||
for j := range lengths {
|
||||
for {
|
||||
if length < 1 || length > 20 {
|
||||
return StructuralError("Huffman length out of range")
|
||||
}
|
||||
if !br.ReadBit() {
|
||||
break
|
||||
}
|
||||
|
|
@ -328,9 +330,6 @@ func (bz2 *reader) readBlock() (err error) {
|
|||
length++
|
||||
}
|
||||
}
|
||||
if length < 0 || length > 20 {
|
||||
return StructuralError("Huffman length out of range")
|
||||
}
|
||||
lengths[j] = uint8(length)
|
||||
}
|
||||
huffmanTrees[i], err = newHuffmanTree(lengths)
|
||||
|
|
@ -400,7 +399,7 @@ func (bz2 *reader) readBlock() (err error) {
|
|||
return StructuralError("repeats past end of block")
|
||||
}
|
||||
for i := 0; i < repeat; i++ {
|
||||
b := byte(mtf.First())
|
||||
b := mtf.First()
|
||||
bz2.tt[bufIndex] = uint32(b)
|
||||
bz2.c[b]++
|
||||
bufIndex++
|
||||
|
|
@ -421,7 +420,7 @@ func (bz2 *reader) readBlock() (err error) {
|
|||
// it's always referenced with a run-length of 1. Thus 0
|
||||
// doesn't need to be encoded and we have |v-1| in the next
|
||||
// line.
|
||||
b := byte(mtf.Decode(int(v - 1)))
|
||||
b := mtf.Decode(int(v - 1))
|
||||
if bufIndex >= bz2.blockSize {
|
||||
return StructuralError("data exceeds block size")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,193 +6,221 @@ package bzip2
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func mustDecodeHex(s string) []byte {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func mustLoadFile(f string) []byte {
|
||||
b, err := ioutil.ReadFile(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func trim(b []byte) string {
|
||||
const limit = 1024
|
||||
if len(b) < limit {
|
||||
return fmt.Sprintf("%q", b)
|
||||
}
|
||||
return fmt.Sprintf("%q...", b[:limit])
|
||||
}
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
var vectors = []struct {
|
||||
desc string
|
||||
input []byte
|
||||
output []byte
|
||||
fail bool
|
||||
}{{
|
||||
desc: "hello world",
|
||||
input: mustDecodeHex("" +
|
||||
"425a68393141592653594eece83600000251800010400006449080200031064c" +
|
||||
"4101a7a9a580bb9431f8bb9229c28482776741b0",
|
||||
),
|
||||
output: []byte("hello world\n"),
|
||||
}, {
|
||||
desc: "concatenated files",
|
||||
input: mustDecodeHex("" +
|
||||
"425a68393141592653594eece83600000251800010400006449080200031064c" +
|
||||
"4101a7a9a580bb9431f8bb9229c28482776741b0425a68393141592653594eec" +
|
||||
"e83600000251800010400006449080200031064c4101a7a9a580bb9431f8bb92" +
|
||||
"29c28482776741b0",
|
||||
),
|
||||
output: []byte("hello world\nhello world\n"),
|
||||
}, {
|
||||
desc: "32B zeros",
|
||||
input: mustDecodeHex("" +
|
||||
"425a6839314159265359b5aa5098000000600040000004200021008283177245" +
|
||||
"385090b5aa5098",
|
||||
),
|
||||
output: make([]byte, 32),
|
||||
}, {
|
||||
desc: "1MiB zeros",
|
||||
input: mustDecodeHex("" +
|
||||
"425a683931415926535938571ce50008084000c0040008200030cc0529a60806" +
|
||||
"c4201e2ee48a70a12070ae39ca",
|
||||
),
|
||||
output: make([]byte, 1<<20),
|
||||
}, {
|
||||
desc: "random data",
|
||||
input: mustLoadFile("testdata/pass-random1.bz2"),
|
||||
output: mustLoadFile("testdata/pass-random1.bin"),
|
||||
}, {
|
||||
desc: "random data - full symbol range",
|
||||
input: mustLoadFile("testdata/pass-random2.bz2"),
|
||||
output: mustLoadFile("testdata/pass-random2.bin"),
|
||||
}, {
|
||||
desc: "random data - uses RLE1 stage",
|
||||
input: mustDecodeHex("" +
|
||||
"425a6839314159265359d992d0f60000137dfe84020310091c1e280e100e0428" +
|
||||
"01099210094806c0110002e70806402000546034000034000000f28300000320" +
|
||||
"00d3403264049270eb7a9280d308ca06ad28f6981bee1bf8160727c7364510d7" +
|
||||
"3a1e123083421b63f031f63993a0f40051fbf177245385090d992d0f60",
|
||||
),
|
||||
output: mustDecodeHex("" +
|
||||
"92d5652616ac444a4a04af1a8a3964aca0450d43d6cf233bd03233f4ba92f871" +
|
||||
"9e6c2a2bd4f5f88db07ecd0da3a33b263483db9b2c158786ad6363be35d17335" +
|
||||
"ba",
|
||||
),
|
||||
}, {
|
||||
desc: "1MiB sawtooth",
|
||||
input: mustLoadFile("testdata/pass-sawtooth.bz2"),
|
||||
output: func() []byte {
|
||||
b := make([]byte, 1<<20)
|
||||
for i := range b {
|
||||
b[i] = byte(i)
|
||||
}
|
||||
return b
|
||||
}(),
|
||||
}, {
|
||||
desc: "RLE2 buffer overrun - issue 5747",
|
||||
input: mustLoadFile("testdata/fail-issue5747.bz2"),
|
||||
fail: true,
|
||||
}, {
|
||||
desc: "out-of-range selector - issue 8363",
|
||||
input: mustDecodeHex("" +
|
||||
"425a68393141592653594eece83600000251800010400006449080200031064c" +
|
||||
"4101a7a9a580bb943117724538509000000000",
|
||||
),
|
||||
fail: true,
|
||||
}, {
|
||||
desc: "bad block size - issue 13941",
|
||||
input: mustDecodeHex("" +
|
||||
"425a683131415926535936dc55330063ffc0006000200020a40830008b0008b8" +
|
||||
"bb9229c28481b6e2a998",
|
||||
),
|
||||
fail: true,
|
||||
}, {
|
||||
desc: "bad huffman delta",
|
||||
input: mustDecodeHex("" +
|
||||
"425a6836314159265359b1f7404b000000400040002000217d184682ee48a70a" +
|
||||
"12163ee80960",
|
||||
),
|
||||
fail: true,
|
||||
}}
|
||||
|
||||
for i, v := range vectors {
|
||||
rd := NewReader(bytes.NewReader(v.input))
|
||||
buf, err := ioutil.ReadAll(rd)
|
||||
|
||||
if fail := bool(err != nil); fail != v.fail {
|
||||
if fail {
|
||||
t.Errorf("test %d (%s), unexpected failure: %v", i, v.desc, err)
|
||||
} else {
|
||||
t.Errorf("test %d (%s), unexpected success", i, v.desc)
|
||||
}
|
||||
}
|
||||
if !v.fail && !bytes.Equal(buf, v.output) {
|
||||
t.Errorf("test %d (%s), output mismatch:\ngot %s\nwant %s", i, v.desc, trim(buf), trim(v.output))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBitReader(t *testing.T) {
|
||||
buf := bytes.NewReader([]byte{0xaa})
|
||||
br := newBitReader(buf)
|
||||
if n := br.ReadBits(1); n != 1 {
|
||||
t.Errorf("read 1 wrong")
|
||||
var vectors = []struct {
|
||||
nbits uint // Number of bits to read
|
||||
value int // Expected output value (0 for error)
|
||||
fail bool // Expected operation failure?
|
||||
}{
|
||||
{nbits: 1, value: 1},
|
||||
{nbits: 1, value: 0},
|
||||
{nbits: 1, value: 1},
|
||||
{nbits: 5, value: 11},
|
||||
{nbits: 32, value: 0x12345678},
|
||||
{nbits: 15, value: 14495},
|
||||
{nbits: 3, value: 6},
|
||||
{nbits: 6, value: 13},
|
||||
{nbits: 1, fail: true},
|
||||
}
|
||||
if n := br.ReadBits(1); n != 0 {
|
||||
t.Errorf("read 2 wrong")
|
||||
|
||||
rd := bytes.NewReader([]byte{0xab, 0x12, 0x34, 0x56, 0x78, 0x71, 0x3f, 0x8d})
|
||||
br := newBitReader(rd)
|
||||
for i, v := range vectors {
|
||||
val := br.ReadBits(v.nbits)
|
||||
if fail := bool(br.err != nil); fail != v.fail {
|
||||
if fail {
|
||||
t.Errorf("test %d, unexpected failure: ReadBits(%d) = %v", i, v.nbits, br.err)
|
||||
} else {
|
||||
t.Errorf("test %d, unexpected success: ReadBits(%d) = nil", i, v.nbits)
|
||||
}
|
||||
if n := br.ReadBits(1); n != 1 {
|
||||
t.Errorf("read 3 wrong")
|
||||
}
|
||||
if n := br.ReadBits(1); n != 0 {
|
||||
t.Errorf("read 4 wrong")
|
||||
if !v.fail && val != v.value {
|
||||
t.Errorf("test %d, mismatching value: ReadBits(%d) = %d, want %d", i, v.nbits, val, v.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBitReaderLarge(t *testing.T) {
|
||||
buf := bytes.NewReader([]byte{0x12, 0x34, 0x56, 0x78})
|
||||
br := newBitReader(buf)
|
||||
if n := br.ReadBits(32); n != 0x12345678 {
|
||||
t.Errorf("got: %x want: %x", n, 0x12345678)
|
||||
func TestMTF(t *testing.T) {
|
||||
var vectors = []struct {
|
||||
idx int // Input index
|
||||
sym uint8 // Expected output symbol
|
||||
}{
|
||||
{idx: 1, sym: 1}, // [1 0 2 3 4]
|
||||
{idx: 0, sym: 1}, // [1 0 2 3 4]
|
||||
{idx: 1, sym: 0}, // [0 1 2 3 4]
|
||||
{idx: 4, sym: 4}, // [4 0 1 2 3]
|
||||
{idx: 1, sym: 0}, // [0 4 1 2 3]
|
||||
}
|
||||
|
||||
mtf := newMTFDecoderWithRange(5)
|
||||
for i, v := range vectors {
|
||||
sym := mtf.Decode(v.idx)
|
||||
t.Log(mtf)
|
||||
if sym != v.sym {
|
||||
t.Errorf("test %d, symbol mismatch: Decode(%d) = %d, want %d", i, v.idx, sym, v.sym)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readerFromHex(s string) io.Reader {
|
||||
data, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic("readerFromHex: bad input")
|
||||
}
|
||||
return bytes.NewReader(data)
|
||||
}
|
||||
|
||||
func decompressHex(s string) (out []byte, err error) {
|
||||
r := NewReader(readerFromHex(s))
|
||||
return ioutil.ReadAll(r)
|
||||
}
|
||||
|
||||
func TestHelloWorldBZ2(t *testing.T) {
|
||||
out, err := decompressHex(helloWorldBZ2Hex)
|
||||
if err != nil {
|
||||
t.Errorf("error from Read: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(helloWorld, out) {
|
||||
t.Errorf("got %x, want %x", out, helloWorld)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcat(t *testing.T) {
|
||||
out, err := decompressHex(helloWorldBZ2Hex + helloWorldBZ2Hex)
|
||||
if err != nil {
|
||||
t.Errorf("error from Read: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
hello2 := bytes.Repeat(helloWorld, 2)
|
||||
if !bytes.Equal(hello2, out) {
|
||||
t.Errorf("got %x, want %x", out, hello2)
|
||||
}
|
||||
}
|
||||
|
||||
func testZeros(t *testing.T, inHex string, n int) {
|
||||
out, err := decompressHex(inHex)
|
||||
if err != nil {
|
||||
t.Errorf("error from Read: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
expected := make([]byte, n)
|
||||
|
||||
if !bytes.Equal(expected, out) {
|
||||
allZeros := true
|
||||
for _, b := range out {
|
||||
if b != 0 {
|
||||
allZeros = false
|
||||
break
|
||||
}
|
||||
}
|
||||
t.Errorf("incorrect result, got %d bytes (allZeros: %t)", len(out), allZeros)
|
||||
}
|
||||
}
|
||||
|
||||
func Test32Zeros(t *testing.T) {
|
||||
testZeros(t, thirtyTwoZerosBZ2Hex, 32)
|
||||
}
|
||||
|
||||
func Test1MBZeros(t *testing.T) {
|
||||
testZeros(t, oneMBZerosBZ2Hex, 1024*1024)
|
||||
}
|
||||
|
||||
func testRandomData(t *testing.T, compressedHex, uncompressedHex string) {
|
||||
out, err := decompressHex(compressedHex)
|
||||
if err != nil {
|
||||
t.Errorf("error from Read: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
expected, _ := hex.DecodeString(uncompressedHex)
|
||||
|
||||
if !bytes.Equal(out, expected) {
|
||||
t.Errorf("incorrect result\ngot: %x\nwant: %x", out, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRandomData1(t *testing.T) {
|
||||
testRandomData(t, randBZ2Hex, randHex)
|
||||
}
|
||||
|
||||
func TestRandomData2(t *testing.T) {
|
||||
// This test involves several repeated bytes in the output, but they
|
||||
// should trigger RLE decoding.
|
||||
testRandomData(t, rand2BZ2Hex, rand2Hex)
|
||||
}
|
||||
|
||||
func TestRandomData3(t *testing.T) {
|
||||
// This test uses the full range of symbols.
|
||||
testRandomData(t, rand3BZ2Hex, rand3Hex)
|
||||
}
|
||||
|
||||
func Test1MBSawtooth(t *testing.T) {
|
||||
out, err := decompressHex(oneMBSawtoothBZ2Hex)
|
||||
if err != nil {
|
||||
t.Errorf("error from Read: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
expected := make([]byte, 1024*1024)
|
||||
|
||||
for i := range expected {
|
||||
expected[i] = byte(i)
|
||||
}
|
||||
|
||||
if !bytes.Equal(out, expected) {
|
||||
t.Error("incorrect result")
|
||||
}
|
||||
}
|
||||
|
||||
const helloWorldBZ2Hex = "425a68393141592653594eece83600000251800010400006449080200031064c4101a7a9a580bb9431f8bb9229c28482776741b0"
|
||||
|
||||
var helloWorld = []byte("hello world\n")
|
||||
|
||||
const thirtyTwoZerosBZ2Hex = "425a6839314159265359b5aa5098000000600040000004200021008283177245385090b5aa5098"
|
||||
const oneMBZerosBZ2Hex = "425a683931415926535938571ce50008084000c0040008200030cc0529a60806c4201e2ee48a70a12070ae39ca"
|
||||
|
||||
const randBZ2Hex = "425a6839314159265359905d990d0001957fffffffffffafffffffffffffffffbfff6fffdfffffffffffffffffffffffffffffc002b6dd75676ed5b77720098320d11a64626981323d4da47a83131a13d09e8040f534cd4f4d27a464d193008cd09804601347a980026350c9886234d36864193d1351b44c136919e90340d26127a4cd264c32023009898981310c0344c340027a8303427a99a04c00003534c230d034f5006468d268cf54d36a3009a69a62626261311b40026013d34201a6934c9a604c98ca6c8460989fa9346234d30d3469a2604fd4131a7aa6d0046043d4c62098479269e89e835190d018d4c046001a11e801a0264792321932308c43a130688c260d46686804cd01a9e80981193684c6a68c00000004c4c20c04627a4c0000260003400d04c0681a01334026009a6f48041466132581ec5212b081d96b0effc16543e2228b052fcd30f2567ee8d970e0f10aabca68dd8270591c376cfc1baae0dba00aaff2d6caf6b211322c997cc18eaee5927f75185336bf907021324c71626c1dd20e22b9b0977f05d0f901eaa51db9fbaf7c603b4c87bc82890e6dd7e61d0079e27ec050dd788fd958152061cd01e222f9547cb9efc465d775b6fc98bac7d387bffd151ae09dadf19494f7a638e2eae58e550faba5fe6820ea520eb986096de4e527d80def3ba625e71fbefdcf7e7844e0a25d29b52dcd1344fca083737d42692aab38d230485f3c8ed54c2ed31f15cf0270c8143765b10b92157233fa1dfe0d7ce8ffe70b8b8f7250071701dfe9f1c94de362c9031455951c93eb098a6b50ee45c6131fefc3b6f9643e21f4adc59497138e246f5c57d834aa67c4f10d8bd8b3908d8130dd7388409c299a268eab3664fa4907c5c31574874bd8d388a4ab22b339660804e53e1b8d05867d40e3082560608d35d5d2c6054e8bab23da28f61f83efd41d25529ad6ea15fb50505cacfabb0902166427354ca3830a2c8415f21b19e592690fbe447020d685a4bcd16ecc4ff1a1c0e572627d0ef6265c008a43fc243240541061ed7840606be466d1c0dac2c53250ed567507d926c844154560d631960c65e15157829b2c7f16859f111a3a8cb72bf24ffa57a680c3be67b1be67c8dd8aea73ac2437a78df5b686d427080ebc01bd30b71a49f6ea31dc0f08e4849e38face96717690239538bc08b6cc5aa8d467cb9c36aa83d40ac7e58bddbfa185b22065e89a86c0145569d9e23726651aec49e31588d70f40fe9a4449dcf4f89eac220171e9c938e803dc195679651004b79ad33cc0c13aeeba5941b33ffeeb8fbe16e76c7811445c67b4269c90479433ddf9e8ed1d00c166b6c17217fb22c3ef1b0c1c7e28e185446a111c37f1ea6c07a59fbcc6546ecc6968d36ba58bc5489a5640647e426b0c39350cb6f07d5dc7a717648c4ec7f841467597ae1f65f408fd2d9940a4b1b860b3c9ae351dcae0b4425f7e8538710f2e40b7f70d13b51ac05ccc6ecda8264a88cad2d721d18132a9b9110a9e759c2483c77dcefc7e464ec88588174cb0c9abff93230ea0bed8decdd8ed8bfe2b5df0a253803678df04fab44c03b9ab7cc97d6e6d6fd0c4c840ce0efc498436f453bbb181603459471f2b588724592b222ec990614db530e10cadd84705621cfdd9261fa44a5f5806a2d74b575056b3c915255c65678f9c16e6dc00a99180fef1a840aff0e842ac02731080cc92782538360a60a727991013984da4fad95f79d5030677b7528d076b2483685fca4429edf804682fdc110dfc2f7c30e23e20a72e039108a0ad6fdee2f76985a4b4be4f5afc6101bf9d5042b657a05dc914e1424241766434"
|
||||
const randHex = "c95138082bdf2b9bfa5b1072b23f729735d42c785eeb94320fb14c265b9c2ca421d01a3db986df1ac2acde5a0e6bf955d6f95e61261540905928e195f1a66644cc7f37281744fff4dc6df35566a494c41a8167151950eb74f5fc45f85ad0e5ed28b49adfe218aa7ec1707e8e1d55825f61f72beda3b4c006b8c9188d7336a5d875329b1b58c27cc4e89ecbae02c7712400c39dd131d2c6de82e2863da51d472bdfb21ecce62cc9cf769ed28aedc7583d755da45a0d90874bda269dd53283a9bdfd05f95fc8e9a304bb338ea1a2111894678c18134f17d31a15d9bfc1237894650f3e715e2548639ecbddb845cfe4a46a7b3a3c540f48629488e8c869f1e9f3f4c552243a8105b20eb8e264994214349dae83b165fd6c2a5b8e83fce09fc0a80d3281c8d53a9a08095bd19cbc1388df23975646ed259e003d39261ee68cbece8bcf32971f7fe7e588e8ba8f5e8597909abaea693836a79a1964050ed910a45a0f13a58cd2d3ae18992c5b23082407fd920d0bf01e33118a017bb5e39f44931346845af52128f7965206759433a346034ea481671f501280067567619f5ecef6cded077f92ed7f3b3ce8e308c80f34ba06939e9303f91b4318c8c1dd4cc223c1f057ac0c91211c629cd30e46ee9ec1d9fd493086b7bc2bc83e33f08749a5d430b0ed4f79d70f481940c9b0930b16321886a0df4fa5a1465d5208c7d3494a7987d9a5e42aa256f0c9523947f8318d0ef0af3d59a45cfc2418d0785c9a548b32b81e7de18be7d55a69a4c156bbb3d7579c0ac8e9c72b24646e54b0d0e8725f8f49fb44ae3c6b9d0287be118586255a90a4a83483ed0328518037e52aa959c5748ed83e13023e532306be98b8288da306bbb040bcf5d92176f84a9306dc6b274b040370b61d71fde58dd6d20e6fee348eae0c54bd0a5a487b2d005f329794f2a902c296af0a4c1f638f63292a1fa18e006c1b1838636f4de71c73635b25660d32e88a0917e1a5677f6a02ca65585b82cbd99fb4badbfa97a585da1e6cadf6737b4ec6ca33f245d66ee6a9fae6785d69b003c17b9fc6ec34fe5824ab8caae5e8e14dc6f9e116e7bf4a60c04388783c8ae929e1b46b3ef3bbe81b38f2fa6da771bf39dfba2374d3d2ed356b8e2c42081d885a91a3afb2f31986d2f9873354c48cf5448492c32e62385af423aa4f83db6d1b2669650379a1134b0a04cbca0862d6f9743c791cbb527d36cd5d1f0fc7f503831c8bd1b7a0ef8ae1a5ed1155dfdd9e32b6bb33138112d3d476b802179cb85a2a6c354ccfed2f31604fbd8d6ec4baf9f1c8454f72c6588c06a7df3178c43a6970bfa02dd6f74cb5ec3b63f9eddaa17db5cbf27fac6de8e57c384afd0954179f7b5690c3bee42abc4fa79b4b12101a9cf5f0b9aecdda945def0bd04163237247d3539850e123fe18139f316fa0256d5bd2faa8"
|
||||
|
||||
const oneMBSawtoothBZ2Hex = "425a683931415926535971931ea00006ddffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe007de00000000000000024c00130001300000000000000000000000000000000000000000000000000000000126000980009800000000000000000000000000000000000000000000000000000000930004c0004c000000000000000000000000000000000000000000000000000000004980026000260000000000000000000000000000000000000000000000000000000009aaaaa0000000000000000000000000000000000000000000000000000000000000000498002600026000000000000000000000000000000000000000000000000000000007fc42271980d044c0a822607411304a08982d044c1a82260f411308a08984d044c2a82261741130ca08986d044c3a82261f411310a08988d044c4a822627411314a0898ad044c5a82262f411318a0898cd044c6a82263741131ca0898ed044c7a82263f411320a08990d044c8a822647411324a08992d044c9a82264f411328a08994d044caa82265741132ca08996d044cba82265f411330a08998d044cca822667411334a0899ad044cda82266f411338a0899cd044cea82267741133ca0899ed044cfa82267f411340a089a0d044d0a822687411344a089a2d044d1a82268f411348a089a4d044d2a82269741134ca089a6d044d3a82269f411350a089a8d044d4a8226a7411354a089aad044d5a8226af411358a089acd044d6a8226b741135ca089aed044d7a8226bf411360a089b0d044d8a8226c7411364a089b2d044d9a8226cf411368a089b4d044daa8226d741136ca089b6d044dba8226df411370a089b8d044dca8226e7411374a089bad044dda8226ef411378a089bcd044dea8226f741137ca089bed044dfa8226ff411380a089c0d044e0a822707411384a089c2d044e1a82270f411388a089c4d044e2a82271741138ca089c59089c69089c71089c79089c81089c89089c91089c99089ca1089ca9089cb1089cb9089cc1089cc9089cd1089cd9089ce1089ce9089cf1089cf9089d01089d09089d11089d19089d21089d29089d31089d39089d41089d49089d51089d59089d61089d69089d71089d79089d81089d89089d91089d99089da1089da9089db1089db9089dc1089dc9089dd1089dd9089de1089de9089df1089df9089e01089e09089e11089e19089e21089e29089e31089e39089e41089e49089e51089e59089e61089e69089e71089e79089e81089e89089e91089e99089ea1089ea9089eb1089eb9089ec1089ec9089ed1089ed9089ee1089ee9089ef1089ef9089f01089f09089f11089f19089f21089f29089f31089f39089f41089f49089f51089f59089f61089f69089f71089f79089f81089f89089f91089f99089fa1089fa9089fb1089fb9089fc1089fc9089fd1089fd9089fe1089fe9089ff1089ff98a0ac9329acf23ba884804fdd3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0034f800000000000024c00130001300000000000000000000000000000000000000000000000000000000126000980009800000000000000000000000000000000000000000000000000000000930004c0004c000000000000000000000000000000000000000000000000000000004980026000260000000000000000000000000000000000000000000000000000000024c0013000130000000000000000000000000000000000000000000000000000000002955540000000000000000000000000000000000000000000000000000000000000001ff108c00846024230221181908c108460a4230621183908c20846124230a21185908c308461a4230e21187908c40846224231221189908c508462a423162118b908c60846324231a2118d908c708463a4231e2118f908c80846424232221191908c908464a4232621193908ca0846524232a21195908cb08465a4232e21197908cc0846624233221199908cd08466a423362119b908ce0846724233a2119d908cf08467a4233e2119f908d008468242342211a1908d108468a42346211a3908d20846924234a211a5908d308469a4234e211a7908d40846a242352211a9908d50846aa42356211ab908d60846b24235a211ad908d70846ba4235e211af908d80846c242362211b1908d90846ca42366211b3908da0846d24236a211b5908db0846da4236e211b7908dc0846e242372211b9908dd0846ea42376211bb908de0846f24237a211bd908df0846fa4237e211bf908e008470242382211c1908e108470a42386211c3908e20847124238a211c5908e2f8c211c6c8471d211c7c84721211c8c84725211c9c84729211cac8472d211cbc84731211ccc84735211cdc84739211cec8473d211cfc84741211d0c84745211d1c84749211d2c8474d211d3c84751211d4c84755211d5c84759211d6c8475d211d7c84761211d8c84765211d9c84769211dac8476d211dbc84771211dcc84775211ddc84779211dec8477d211dfc84781211e0c84785211e1c84789211e2c8478d211e3c84791211e4c84795211e5c84799211e6c8479d211e7c847a1211e8c847a5211e9c847a9211eac847ad211ebc847b1211ecc847b5211edc847b9211eec847bd211efc847c1211f0c847c5211f1c847c9211f2c847cd211f3c847d1211f4c847d5211f5c847d9211f6c847dd211f7c847e1211f8c847e5211f9c847e9211fac847ed211fbc847f1211fcc847f5211fdc847f9211fec847fd211ff8bb9229c284803a8b6248"
|
||||
|
||||
const rand2BZ2Hex = "425a6839314159265359d992d0f60000137dfe84020310091c1e280e100e042801099210094806c0110002e70806402000546034000034000000f2830000032000d3403264049270eb7a9280d308ca06ad28f6981bee1bf8160727c7364510d73a1e123083421b63f031f63993a0f40051fbf177245385090d992d0f60"
|
||||
const rand2Hex = "92d5652616ac444a4a04af1a8a3964aca0450d43d6cf233bd03233f4ba92f8719e6c2a2bd4f5f88db07ecd0da3a33b263483db9b2c158786ad6363be35d17335ba"
|
||||
|
||||
const rand3BZ2Hex = "425a68393141592653593be669d00000327ffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffc002b3b2b1b6e2bae400004c00132300004c0d268c004c08c0130026001a008683234c0684c34008c230261a04c0260064d07a8d00034000d27a1268c9931a8d327a3427a41faa69ea0da264c1a34219326869b51b49a6469a3268c689fa53269a62794687a9a68f5189994c9e487a8f534fd49a3d34043629e8c93d04da4f4648d30d4f44d3234c4d3023d0840680984d309934c234d3131a000640984f536a6132601300130130c8d00d04d1841ea7a8d31a02609b40023460010c01a34d4c1a0d04d3069306810034d0d0d4c0046130d034d0131a9a64d321804c68003400098344c13000991808c0001a00000000098004d3d4da4604c47a13012140aadf8d673c922c607ef6212a8c0403adea4b28aee578900e653b9cdeb8d11e6b838815f3ebaad5a01c5408d84a332170aff8734d4e06612d3c2889f31925fb89e33561f5100ae89b1f7047102e729373d3667e58d73aaa80fa7be368a1cc2dadd81d81ec8e1b504bd772ca31d03649269b01ceddaca07bf3d4eba24de141be3f86f93601e03714c0f64654671684f9f9528626fd4e1b76753dc0c54b842486b8d59d8ab314e86ca818e7a1f079463cbbd70d9b79b283c7edc419406311022e4be98c2c1374df9cdde2d008ce1d00e5f06ad1024baf555631f70831fc1023034e62be7c4bcb648caf276963ffa20e96bb50377fe1c113da0db4625b50741c35a058edb009c6ee5dbf93b8a6b060eec568180e8db791b82aab96cbf4326ca98361461379425ba8dcc347be670bdba7641883e5526ae3d833f6e9cb9bac9557747c79e206151072f7f0071dff3880411846f66bf4075c7462f302b53cb3400a74cf35652ad5641ed33572fd54e7ed7f85f58a0acba89327e7c6be5c58cb71528b99df2431f1d0358f8d28d81d95292da631fb06701decabb205fac59ff0fb1df536afc681eece6ea658c4d9eaa45f1342aa1ff70bdaff2ddaf25ec88c22f12829a0553db1ec2505554cb17d7b282e213a5a2aa30431ded2bce665bb199d023840832fedb2c0c350a27291407ff77440792872137df281592e82076a05c64c345ffb058c64f7f7c207ef78420b7010520610f17e302cc4dfcfaef72a0ed091aab4b541eb0531bbe941ca2f792bf7b31ca6162882b68054a8470115bc2c19f2df2023f7800432b39b04d3a304e8085ba3f1f0ca5b1ba4d38d339e6084de979cdea6d0e244c6c9fa0366bd890621e3d30846f5e8497e21597b8f29bbf52c961a485dfbea647600da0fc1f25ce4d203a8352ece310c39073525044e7ac46acf2ed9120bae1b4f6f02364abfe343f80b290983160c103557af1c68416480d024cc31b6c06cfec011456f1e95c420a12b48b1c3fe220c2879a982fb099948ac440db844b9a112a5188c7783fd3b19593290785f908d95c9db4b280bafe89c1313aeec24772046d9bc089645f0d182a21184e143823c5f52de50e5d7e98d3d7ab56f5413bbccd1415c9bcff707def475b643fb7f29842582104d4cc1dbaaca8f10a2f44273c339e0984f2b1e06ab2f0771db01fafa8142298345f3196f23e5847bda024034b6f59b11c29e981c881456e40d211929fd4f766200258aad8212016322bd5c605790dcfdf1bd2a93d99c9b8f498722d311d7eae7ff420496a31804c55f4759a7b13aaaf5f7ce006c3a8a998897d5e0a504398c2b627852545baf440798bcc5cc049357cf3f17d9771e4528a1af3d77dc794a11346e1bdf5efe37a405b127b4c43b616d61fbc5dc914e14240ef99a7400"
|
||||
const rand3Hex = "1744b384d68c042371244e13500d4bfb98c6244e3d71a5b700224420b59c593553f33bd786e3d0ce31626f511bc985f59d1a88aa38ba8ad6218d306abee60dd9172540232b95be1af146c69e72e5fde667a090dc3f93bdc5c5af0ab80acdbaa7a505f628c59dc0247b31a439cacf5010a94376d71521df08c178b02fb96fdb1809144ea38c68536187c53201fea8631fb0a880b4451ccdca7cc61f6aafca21cc7449d920599db61789ac3b1e164b3390124f95022aeea39ccca3ec1053f4fa10de2978e2861ea58e477085c2220021a0927aa94c5d0006b5055abba340e4f9eba22e969978dfd18e278a8b89d877328ae34268bc0174cfe211954c0036f078025217d1269fac1932a03b05a0b616012271bbe1fb554171c7a59b196d8a4479f45a77931b5d97aaf6c0c673cbe597b79b96e2a0c1eae2e66e46ccc8c85798e23ffe972ebdaa3f6caea243c004e60321eb47cd79137d78fd0613be606feacc5b3637bdc96a89c13746db8cad886f3ccf912b2178c823bcac395f06d28080269bdca2debf3419c66c690fd1adcfbd53e32e79443d7a42511a84cb22ca94fffad9149275a075b2f8ae0b021dcde9bf62b102db920733b897560518b06e1ad7f4b03458493ddaa7f4fa2c1609f7a1735aeeb1b3e2cea3ab45fc376323cc91873b7e9c90d07c192e38d3f5dfc9bfab1fd821c854da9e607ea596c391c7ec4161c6c4493929a8176badaa5a5af7211c623f29643a937677d3df0da9266181b7c4da5dd40376db677fe8f4a1dc456adf6f33c1e37cec471dd318c2647644fe52f93707a77da7d1702380a80e14cc0fdce7bf2eed48a529090bae0388ee277ce6c7018c5fb00b88362554362205c641f0d0fab94fd5b8357b5ff08b207fee023709bc126ec90cfb17c006754638f8186aaeb1265e80be0c1189ec07d01d5f6f96cb9ce82744147d18490de7dc72862f42f024a16968891a356f5e7e0e695d8c933ba5b5e43ad4c4ade5399bc2cae9bb6189b7870d7f22956194d277f28b10e01c10c6ffe3e065f7e2d6d056aa790db5649ca84dc64c35566c0af1b68c32b5b7874aaa66467afa44f40e9a0846a07ae75360a641dd2acc69d93219b2891f190621511e62a27f5e4fbe641ece1fa234fc7e9a74f48d2a760d82160d9540f649256b169d1fed6fbefdc491126530f3cbad7913e19fbd7aa53b1e243fbf28d5f38c10ebd77c8b986775975cc1d619efb27cdcd733fa1ca36cffe9c0a33cc9f02463c91a886601fd349efee85ef1462065ef9bd2c8f533220ad93138b8382d5938103ab25b2d9af8ae106e1211eb9b18793fba033900c809c02cd6d17e2f3e6fc84dae873411f8e87c3f0a8f1765b7825d185ce3730f299c3028d4a62da9ee95c2b870fb70c79370d485f9d5d9acb78926d20444033d960524d2776dc31988ec7c0dbf23b9905d"
|
||||
|
||||
const (
|
||||
digits = iota
|
||||
twain
|
||||
random
|
||||
var (
|
||||
digits = mustLoadFile("testdata/e.txt.bz2")
|
||||
twain = mustLoadFile("testdata/Mark.Twain-Tom.Sawyer.txt.bz2")
|
||||
random = mustLoadFile("testdata/random.data.bz2")
|
||||
)
|
||||
|
||||
var testfiles = []string{
|
||||
// Digits is the digits of the irrational number e. Its decimal representation
|
||||
// does not repeat, but there are only 10 possible digits, so it should be
|
||||
// reasonably compressible.
|
||||
digits: "testdata/e.txt.bz2",
|
||||
// Twain is Mark Twain's classic English novel.
|
||||
twain: "testdata/Mark.Twain-Tom.Sawyer.txt.bz2",
|
||||
// 16KB of random data from /dev/urandom
|
||||
random: "testdata/random.data.bz2",
|
||||
}
|
||||
|
||||
func benchmarkDecode(b *testing.B, testfile int) {
|
||||
compressed, err := ioutil.ReadFile(testfiles[testfile])
|
||||
func benchmarkDecode(b *testing.B, compressed []byte) {
|
||||
// Determine the uncompressed size of testfile.
|
||||
uncompressedSize, err := io.Copy(ioutil.Discard, NewReader(bytes.NewReader(compressed)))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.SetBytes(int64(len(compressed)))
|
||||
|
||||
b.SetBytes(uncompressedSize)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
r := bytes.NewReader(compressed)
|
||||
io.Copy(ioutil.Discard, NewReader(r))
|
||||
|
|
@ -202,222 +230,3 @@ func benchmarkDecode(b *testing.B, testfile int) {
|
|||
func BenchmarkDecodeDigits(b *testing.B) { benchmarkDecode(b, digits) }
|
||||
func BenchmarkDecodeTwain(b *testing.B) { benchmarkDecode(b, twain) }
|
||||
func BenchmarkDecodeRand(b *testing.B) { benchmarkDecode(b, random) }
|
||||
|
||||
func TestBufferOverrun(t *testing.T) {
|
||||
// Tests https://golang.org/issue/5747.
|
||||
buffer := bytes.NewReader([]byte(bufferOverrunBase64))
|
||||
decoder := base64.NewDecoder(base64.StdEncoding, buffer)
|
||||
decompressor := NewReader(decoder)
|
||||
// This shouldn't panic.
|
||||
ioutil.ReadAll(decompressor)
|
||||
}
|
||||
|
||||
func TestOutOfRangeSelector(t *testing.T) {
|
||||
// Tests https://golang.org/issue/8363.
|
||||
buffer := bytes.NewReader(outOfRangeSelector)
|
||||
decompressor := NewReader(buffer)
|
||||
// This shouldn't panic.
|
||||
ioutil.ReadAll(decompressor)
|
||||
}
|
||||
|
||||
func TestMTF(t *testing.T) {
|
||||
mtf := newMTFDecoderWithRange(5)
|
||||
|
||||
// 0 1 2 3 4
|
||||
expect := byte(1)
|
||||
x := mtf.Decode(1)
|
||||
if x != expect {
|
||||
t.Errorf("expected %v, got %v", expect, x)
|
||||
}
|
||||
|
||||
// 1 0 2 3 4
|
||||
x = mtf.Decode(0)
|
||||
if x != expect {
|
||||
t.Errorf("expected %v, got %v", expect, x)
|
||||
}
|
||||
|
||||
// 1 0 2 3 4
|
||||
expect = byte(0)
|
||||
x = mtf.Decode(1)
|
||||
if x != expect {
|
||||
t.Errorf("expected %v, got %v", expect, x)
|
||||
}
|
||||
|
||||
// 0 1 2 3 4
|
||||
expect = byte(4)
|
||||
x = mtf.Decode(4)
|
||||
if x != expect {
|
||||
t.Errorf("expected %v, got %v", expect, x)
|
||||
}
|
||||
|
||||
// 4 0 1 2 3
|
||||
expect = byte(0)
|
||||
x = mtf.Decode(1)
|
||||
if x != expect {
|
||||
t.Errorf("expected %v, got %v", expect, x)
|
||||
}
|
||||
}
|
||||
|
||||
var bufferOverrunBase64 string = `
|
||||
QlpoNTFBWSZTWTzyiGcACMP/////////////////////////////////3/7f3///
|
||||
////4N/fCZODak2Xo44GIHZgkGzDRbFAuwAAKoFV7T6AO6qwA6APb6s2rOoAkAAD
|
||||
oACUoDtndh0iQAPkAAAAaPWihQoCgr5t97Obju21ChQB0NBm3RbA7apXrRoBooAA
|
||||
AhA+IAHWl2Us3O7t9yieb3udvd76+4+fd33nd3HO1bVvfcGRne6+3vfPvfc++995
|
||||
w7k973eJhasLVec970tzDNXdX28LoPXZ3H3K9z0s5ufWAfes49d5594c3dUYtI+2
|
||||
+h1dvtpRa+uvrVEAG9bl893RVEN7cWvroSqWjPMGgAQi7Gq8TJSgKKdjKFBIB9Ae
|
||||
LqWxleu715eXe7ml9e5098Z6G1vr7t1QZ6ot76YzPd3j7333t2ql2Chm7XrA9ICQ
|
||||
VF77z3rVBWqkSXtlfb099hyezAr6USbGpICTSCFAaqHrKo+tUnm32rpE4Ue+t2mj
|
||||
bKUeipEqwc93EdhhTwmQpOhhesC9iqDSPNTWYNSnUtBdm1nsA0nqqNd7OWwDXtFL
|
||||
ONmmA6Ubke26I9UblvWIPR5VOWOnctai443URunnDy77uVC59OfRvezlDu33Z7Ly
|
||||
3NNuuHW63088xu3t3NHZhkZbG7tXRlj00qOtbaXTJUUdspTbABR9R6EUwQAEAAAA
|
||||
EMEwRpoAAAABMmhoAAjBNNAaCMhponpoGpgJpk9TEyp6niGKZkAaAEfqMQ09U80p
|
||||
+pMGSCKngIAAAAgAAg0AAJhGgABGCEaaTyTKeNI1PE0wkj01GajMSNPSZGnqbU9T
|
||||
anlPUNAHqGQ0DQAMg9TamgAAYRU/IAAICAmjQJgjQBMEwp5DTSaaYmhTeqfplPID
|
||||
U1T9TynoU82pT1NPU/VP0j1NHqRpk9TTR7SnqaNNGmmQAaAD1Aeo0PSAAAAaaBiK
|
||||
eBAQBGgIABGQA0AmBNNBoaAgaJmpglPEyYap6npiTT0agGjJjUaaDTQAAAAAAM1A
|
||||
9QAaAAAADU8iEAQAEyAJk0NNNJgIZTJ5E00YSemiaZNGm1MpGNJ+lPU9qm9U2RDM
|
||||
oY0EzJB6h6nqDID1NMBDDRpo1AGNAjCMmhkMgaYSJIgAAAQyAAEyBoATECCNhTT0
|
||||
U/IZAmCM1DSTxkzUE8p6NDaGiZGJqntTFHvUyU9qPQp7Kn5GgKNPU9QAGg9QAAA3
|
||||
wz0Pk/g/m/m9P9H4vxv2+dH3gCS8nhbbbbbYxtgNsBsG0m2MbG0NNtsbYNsaY0wb
|
||||
bBibGmm22mxptNpsaGNDTY02JsG0MY0xg2MaYNNDbGwG0L5vsK/F9DO+EAA447Kq
|
||||
p7Wdf6Y+5c20T7DfHyMXIzRKrZexw72uiQI+y55vOe52xpqbCLC2uR20JdER7Zvr
|
||||
7ufuKb6zhiBxLuj0eA27v8RpMLucw9Ohwcizi2wrpt+yU1FdpM7ZYPcwS3XTef+A
|
||||
Wzjxwhdrgw3aH1LeC1eZW900x8V9Nv4hTPXp4l067P/4ANVZFF/imOe/d5bdueam
|
||||
/DFFokQWnFaU+ZqLBCM+d0PialJQWnLqRQZk/KhfbbYc2pCUTgffcSYbrCM1N+8l
|
||||
HU6gSz+h2GJXs+tbrNviL83M97X0vcTn/F82P8wen8/3/h3sHY+sf9CSej9ThYTV
|
||||
3lQ+FUHpfpGD4kv7dYMV995dpDX/y3xR8FoXx1bjUxBTNxuutwQ/h/Eedn9wpn6w
|
||||
E3+ND8YhN1HSriIxRE/6uFyMv6/oC6Elarw3aHMMqHJkGiiz6tejmvnYLQa+Qm6G
|
||||
deZ7jXTZV6NlpocgDnRdimS06bTYSkvPAL/xoWNLkX6N6VljU0dfKSBmm2uZE/xu
|
||||
sutQ1EdP7GdjhglIq4xlOFUFEQpmX+xx7R8y6c0GSAaqusOjNZwxZRudOvmXm1tZ
|
||||
T+YnbeB2ir9eiHNrtJNSLD/J/WDyuQpwBUtLKo0krccY/wIILP7f86teb9Z/9oyz
|
||||
OX05qEWbObfhpRw+9+rCvp/35ML8KX3aHaI0n+tudbFRsV5FLW+Oa8ruLN4peyVL
|
||||
DWjTHrXNthq/s7zAJYMeFJZkZt5mT9rfpH+5g3nc+piOSZ+J5nHtOnKI7Ff8Xl+j
|
||||
0t76XTNucCHQ6whav1OHdF53TY5wuv5OzvrdnxoId8fTyUvERr0ERINu/8XxZZ5f
|
||||
B5/kTZ8bBO0wv54Jp+ED/GQI8lZHzIQCP3vfQhwnCTj9TvITic7P4mYLDbH3fyzR
|
||||
i+6EajCcpXLWSGf+ZXkOrWspDWDhXtEKas0v3UqWksqgY1rTj45krX4KihN+daXs
|
||||
pZl5WPlta5p06CX6Xm2SfzqkMw12/3ix1bpnnZ+kFeBNX7A+E9zzG6OZaN78GOpl
|
||||
9Ht/eZn9PqWdav852zr0zqkDK2H5IjdvNah+b1YVGdQGzwR4Nw+f13yEKnV+y66W
|
||||
djfq7zWp7m5w+hzfv+Ly8O7oet5Vvd8/wQvO7qzOZ2vjf9X8Tj8PnMb/nc/nKqRR
|
||||
+ml4UEhOOwfCeJEEI109CMYSh91iAJqPjMyH6KjrPD7W25llZVcREYNCTg6htbQt
|
||||
M38wYoquCWP6tdKYlVIv14xTNUeUf4El/FunCf6csZkmv+9tfWx7t59wuKIa3saU
|
||||
tZs9M+3HFOZtz3OLg/Unoaj9BYazYqA78xBU9tZzrtmF/rQL9CGJt90o/oYnSfcS
|
||||
SL3haaw351LXWQ1XOsv1SmH3v6ymuxEpPPnEDmBELaTYsvvMIWJsmPZFFww++Kd7
|
||||
s/Jo0JFeUU7uNtI+gVosAIpVVuWfI/9tOIycz7I5Z7zjV+NR2OuZbYtW5F08KX4o
|
||||
2k/xuJIchcNFPtxPfw9dkDgscRbMckyFMrzuZ3IvrcGzk0J6iI5ytrv37bGpAXMz
|
||||
WK9mMMPebepNevmLjjo/QWoM968Sjv7ldlPS5AinHcXwsFv6dmmh8lJt7UOJWoKu
|
||||
lMD1cB2ksIGpMdv8iuqR42Rn/kn+17BhhUZcwDBaUXVdX6bKW7fxlUYbq+mlqIcf
|
||||
a9v8HF87M9ANbi9bq9onf9TD7nQ6Xf6vZci8TBPX+/GI0He6j31fTVQYW+NsQxvO
|
||||
J8xrx+e58CCLQNjxeIyPt+F+qk/QMiXw+LyxGVkV/XcGQT9X03jSDP6beJ5QG1JW
|
||||
9Q3qLv/YixWI7gPV9Mrhf2oRYTc/9KLFRhkE3SjKOTKuSSBKQ24fI+hEznamH71D
|
||||
66Hwez8/0et7AtTv9zvamv2OD5He6fMV4k+ePl6+qPfO5CdHtK+eCDZL5+4f5yrl
|
||||
gTcRFiq8fXbc5IaI5fbbc1KMM/2T0Mr7+Hwaco6FtXm0fmhCgTZRqY4pKiEIfmaz
|
||||
QwHNOOCrtMJ2VwsyMumt7xsOolGnizRev6lILH43qPcczQM7Gc5zRin80YvFt1Qm
|
||||
h/57Z0auR2h0fuX50MBO4XQ+26y5l6v4j902R66c0j3z2KHstKQ04J/h6LbuNQE4
|
||||
D6cu/lyfK69DxxX8wb8XaQkMUcJdo1LzqUGDAb3Kfn/A3P/JYc99MO9qv67+SxWb
|
||||
wYTyqKdWTd+1KbR/Rcn0Io5zI/QquX7FA1bxfMytjQ/X+l0fh0Pf+Hx97meH4fQL
|
||||
7/T8/sdTm9Tn8nELvedyhydLlPPTScINdXyLIq9wgIJr4fWPbp9ZhFh/56fdSgOG
|
||||
HDXg+gkXsN2Rddr4HQ5P3u+RhLzmSjhzoqY5EsPC4QvRlX9JXjB84rPV5USR66qa
|
||||
/kjw4156GJnzoXtydKJE53t6PHfZWO+3ujsfI6iAdshc7OFzGXiZB9PtItKodhYq
|
||||
nABkTKdcpu4+TOpf9h5piX5slsaBjkeTnj/Ba02ilboQfcDVigxrYn/iTH5ySWUW
|
||||
/lHtg78s5UZM8sErwhNe3N3w+6ZOMnU+5i86/xFNtqZfDdXTGy1H3PzGbdtZXYT+
|
||||
Ixx2vpwBYzbPVYHxKosM5rPiVmcTllI9nuoSfeh9ib4foFWauOpvdmhBDqpTpKTX
|
||||
u8EO2l2Z195G2RIV7TlKSxGWjR5sl/nALu1uzBeLd9zpSujzMTd1uTX9Qk/Q1S+r
|
||||
vaW6bm8qqPO4jb6Wx6XIkm321nrIF6Ae25d1+Dpv/P5G4NoLd2j6/EtENC3FeR5z
|
||||
oo7bA+tI8yEQRhiF0z1FlJXLD5ZbhNNWQm/j/IbzRfh8JtOFZU7ruShLvHXysW9S
|
||||
9V909tr9jn8/E/Hb5N/1NVNHnZu2HIUvJvHJiHd2ucmeI9PWUMnppmE65GQ5E9xV
|
||||
ZRlGEH0X85EvmHyEupkMrCC0oMv9RCq+/H8gcfpe00Hs/S+regT5p58cyYomh93v
|
||||
qvuw/A06BE/wzJESuYbN9pqYpoXqXFemW1NksHEJ2w+PYMJ27WJyD5FpaXB85VaW
|
||||
qMOhDfO8E3QdH8ybyKt/UgI8/tDGpFbyOlaVdIv1FXJhoLp8soAA4Djg6/KZ066N
|
||||
ZFYuS8WdjpSZGP4/Lw+1yaXlzNznc/k2uHe2uXP3uFuPcHx+Dm44utxldoO1uBPy
|
||||
+jzOs14+MIgOjOHMVNqAbMd8fUedLlhJMCfMtm4uz01enLNKcMrtLlPIR37Yukh1
|
||||
YEMXYpm7eU4XU+j+Jj3pDyaXtXs+p1fWfTN/cy9/Oxs4umUXQ4uHh1kObtayDJ56
|
||||
/QMxiHobjHNKuKfMxsrYEwN+QVIyVjAwMDYuMjQ1AAA9IwJniiBLRkZDAAAXt0Ja
|
||||
aDQxQVkmU1lZtwytAACLf///////////////////+//////v//////////bv78//
|
||||
/+AXO133uwO2xB2UxIvbKXrCqCoURUBL2ytFI82AFdcOwMhVTHtk5rD3szEVNYD4
|
||||
aIQINCaMRoTaSn7SbSMJiYmEwieTEp+psqbMCp+VNPaFNpqbBNR7UmanlPUeKfqm
|
||||
j1PU0/VPU08o9Q9EeKHlPJtKbYqeTCYhN6U9T1NH6mp+lPyoGNTI/Knkyg1MggAg
|
||||
CaMEyQnqZoaaRtRtJpppppoDaTR6hpphGh6mmgHpMQBpkGTTEAAaAAAA00AZDag0
|
||||
ADIBkGgABqemiRNTI0k8aU0PRGRoAZlP0UAAAGgAAAyAADQaAAAaAAAAAAAAAAAA
|
||||
AaAAAAM0kgRBJ5MlPFP1Gj0jTTTUaekxNAbUGjTQMgaZANNAAAAaAADTQAAAAAAA
|
||||
ANAA0AAANADQ0QAAAAAAAAAaGgAAAAAAABoA0AAA0AAAAAAAAAAAAANAAAAAkSEI
|
||||
aTRpomp5DUxNNDTJPTKaep6T09Kemmo2JG0aTQ9ENogaaGhkABo0NHqaBoDTI0DC
|
||||
Gj0gNAMhoDQ9QMQNAGQAaDDwyMPIMlbG1vhRBTFo6JksSupgpAjPbY0ec02IGXjb
|
||||
eS+FBsh01+O4ZOaD+srUZCFaT4DRjVDLx7uKIsFtESIDUg1ZkhyCSYov05C00MtR
|
||||
BdNNa/AYPGOQZWcs+VegXOPrkushFbZ3mBoRD6WamClkpBaHZrUhUl02bIfRXX4w
|
||||
b3/9cW9nHDVxh2qFBxqgRKfmq7/Jc/tdJk05nVrGbckGVy2PnIy30CDhpWmqrSot
|
||||
K2bOnX0NbP1iy2cd0Na0ZmbRstm4MzMzbbMySTd35F7f+zPP8DC+NJLYcakkkkRd
|
||||
NZlupJt3OMFoDAD2g+N3FAMCydhIpoRHRQAdFI5nNg4ugEXHCYxkMyGCwtaJmial
|
||||
y0IMlpSYYM/weXNJAhFqS0GNmvaPEtYGjbvaucMdklOTmBX1vfVAkTYB1uXCSK64
|
||||
UNIixOqRKLuRCFtqIQtgwqaFrCkIYbbewErWABa+VGADWsJXJjfx5SJViLuwiGXq
|
||||
Ru6vCuwmU5CJiJz3UiBpmLv0r2wskxUhY4tzPVGQ9RMXJl65eLSNwZVwaSyGZ9Cm
|
||||
A3jztQUUpFeUryBTskW95iVwRMFrhBCwZBAFJBZvhMEMNoDJJlUoIhQkAkjbExp2
|
||||
YZio+ZYeAZUwmH1qUbdQixmxf0+61+aVgJ1hwxsO1yG3hFx4pfjc09ITVht0pG8u
|
||||
FtVFhPa1KE0gTRUSVXywkITucqk0Waz5Fs6qJpVHYdNrbYRFxnFsQGY1qmsTLjK6
|
||||
4QX5Rddo6krM/Bx9CqIAKq4CzVQYHrmIAd2EBhYmwVYwLvhzKIUrc2EirnGIvyuD
|
||||
O4YZDSwsVTA0BpVvUOjDErkCraBoSutcKwUSSLGhVvNYHLz3klgZD++wWsa/swLw
|
||||
gvNDY2De+sncOv8X2lq4HD95ZdwPuTIMXCwSbg4RrIqv+L0y6F17pqDecyQYPEj3
|
||||
iN/0BBeWZlJAyBMi5U3Q1zAlsK8IlDhaXGmvZrgISq5CfNjmUgxDeMggOKqxu4sI
|
||||
OrilS49Lkl1J3u3GjXTuH+rX+4ccyFAQnizCpPClcY77F59j63S6fr5vr+y99tuO
|
||||
7Ox7Wg/ljwhdyaK4xMmXczeJbx7x07htJNtC4xcQfAtvzeznLrN6MN/ILIBOI65I
|
||||
qIA2D5fHHj1XN4aN6TvOjWDaSbSWqxCSCvXUpzkNJAkWXAuTwF8k5uSJvQj/rVo0
|
||||
hAhEMEIYkCRGx9AX+byIuXWlLMbbVeliHNUL5AQYmNwLFu4SkmGD+UWtBMyVHQOQ
|
||||
ss0ggoVKSKOBUgnVS6ljt7WE1qXqJJ4QA1pEwYNLEaguEE1LtPNoVr5WzjbSbWPk
|
||||
V9OW3y9IneUDLoIV5pAkEFTEFGFVjeTFxtpzBBfGgycBxVCdz8eESBIzsamRchAa
|
||||
TQunQH8DHnpfod9QuAuRvc7JBlKUCYmCjMvynLcxIFohxCaYrDvGw4QbXZB7oWQ7
|
||||
hpoGlz23ayDfB8NrRRzdilsEQyQniu9ASLQg7RrGZnoTr1ai12IbCEUCGdFq03P5
|
||||
nBnRFAGmisQGcyykV9gKtcVMWLhCuVmXg86dndn7slUpRNSSEAU20oaWIm1maFTu
|
||||
E0DT4gTbg0nuhjtz3kNOz+i7sBm0bkXjxQWuLqlZEmp60ZTyRZJDUqKSEKg6hqcy
|
||||
ERxdU22CSNOO10RYUUiDVpKhPNdKTOIE1thp02sBNoNTFSht8WJtaBQ09qN3jd5r
|
||||
dOLX4IA5fevRyCCzDgRXfV4wzik4KROjmxmTMglBySlIMEzcXehnDXCRiZSlvwA2
|
||||
0YsIOROcm4UrIRFxJHctJH7OdN5u1aHVHb5UaLHpv48NgmFRE56KTSoaWunqm2st
|
||||
S0mrAdOiqcR12PWVbdVRJKcQ0DQuhwlAPcRtpxN3D4kbXJjToSYJIFw406G2CSaK
|
||||
jQMIJPZGlQmgyFhoCSzeGS1VSq5SKKQQxs5RqKUcVUNY57YUETb4mXzV84SPngKi
|
||||
nsce0mXByZq5BKUA9puHZWLNwQIYuDaJUNgG+E01E3pDYVNLKYQ0hsVesgV5gZY0
|
||||
htDsRdGtm0+iGnkN6+Ea9YJtUZNAkx2GgSoix12nTW0avTUfxR3oYcpvZ7IdtABE
|
||||
UhBcjG4qZtDZsS1JQHys243vhLaDTSvvTeBiJA2tmokqECTBcSOCAGkAxMKlVAva
|
||||
4IsLRaBBqhxDbcGtgdw03mFcLUaFuhtKuuEIEkUleJQwby/zwu9uvvZK4xTV+ECM
|
||||
a8lmzxKmqkBggYK1+xPdbmJclm6tSZhE/OSJtCEjs+unJIQkT9hCWgBJqGMS07Eh
|
||||
AJNmBiuVEVdTyjkIJkavuZmx2sJF13htgEZUCC23lZFOE6gWbM9WyYNJTM8yCQrb
|
||||
0Sx3OQvBML5cRATAQkSQkAJOAhoxpQkNi4ZiEVDbdtJAME0RXNDXGHA3M3Q0mm1o
|
||||
IEwbWpaM1DQCSMbGRCAu3iRIQiT6RlBpT1n3tfwvUXz3gIVlx3mEximY/kZW1kNG
|
||||
sgEJIrBisaEoGYPJ+1CQUYFBw+eGEHJQBpNHjErXUJY2iWHQ30hXwFBuMSxQ2lB5
|
||||
bg+/LX3euG6HsHUB1lFvBvaiaBrITVwkCTa1d0s9CHZCiDZjbWReKyrpPE2oSa7o
|
||||
LPrR4BJvys9ttjUpzETSSMxh8vsr9dXTwKBtK+1xCTGDQmNIaE29HmHdS5GSxpya
|
||||
MismcAUSEgSxHBrKtgsZzduG7vHZn16l3kFkVITtENIzS2JsiBwFTDlhgexsjBHv
|
||||
5HXOYxHBzoSDCcPZ0ctvkY9aS5XpoQuFYkGJgCsqjJZeUMNUEpDSbKcnUc1PifIA
|
||||
CbR2UoXawBlspkEBr9HBfvUi/MUakZVOf1WKYrqSaIXce62JOyhJLq3qJBloTA0F
|
||||
VbILEtM+heFmNRCFt70GJrExVJri0ArYbCRbADSGDBpBXxxb/6fo+s3C7uaL7RjM
|
||||
LV2IQBNrAJrKFeJwTsPnxbAsemirUx2lk1kaxschzdK4TQNJN5wQnolIFg401OZ4
|
||||
2na11LnT3lR+1k1TMJhiAjXMk0F1ooHnYlt9LKfJ3ZIOmeY+2l9bUQHWFNGyEyfj
|
||||
EAcu3kpGLq0Ez7XOS+EpAASRQTAYMATfVQibHLTT30zG732+pNe9za1JNt8sNJYn
|
||||
RjWuJ6jL5ILV0rcd9vT7X9fObvcXitpvJ2XBJE+PhX2HaTkyWeF9pwnlQNrTe9hV
|
||||
tzhA+ihZrDrHNmLcQjZbnv/IMubqq8egxY80t5n6vZ6U5TR6U9uZJvai1xtqAyCR
|
||||
NWkW52m00rDTEuO6BA4q2RHDWwbETF55rRsWLIgNW9qJCyMHPbTM/dMBmWMQSMxz
|
||||
4M2pRzt47SICxA327UqSCEERqMFybmYi3nUxePtLgHYplqRiw4ynMbXd/kiQ0LE0
|
||||
PKJSSCXA42ymziCpAxNWflzpzQdJZusahRFr6t6m+4p273/Taj7k+hZyNgBAgXAY
|
||||
8F7pTts6orLb8IA6o4TOwkwQYmKvKu9VwMrE7+GUhVIAgY9a8DyQMiDBkEAwh7S1
|
||||
KgCBfao8DK1CwSS8Z3WjL5MEgt93z2koUQCD/YxMBppiCMp7SDVSmkkIHptfGpeh
|
||||
t+M13Ccv1tavIASFiaQl6rBz3K4N3DSGwNkCibrvEAC0fQirOWnc4NVbcLKpFG1l
|
||||
NQXF/eqdT79wq1Mvlap3QSCLhcD2D3fCkKVWid4aSjtp9FOX1Uaf7P9eT93zd9Sv
|
||||
mj2yNLRUGzyI/0oONNSzmmkvJ5Cq2X2CdldIWMGZO57RJ8oyATAWTQmRmNkfh0Sx
|
||||
uuR/J9oUsomVy1AEntc0dlPivkqBkBqrxU3j5PnWkaI3ZRGc0gg9spCQEISh4xEU
|
||||
pMhVrnmDQLfLP8Ouqpx917MAw7hkjQk6BJFTAbXDsz3LSHIxo/gB8qrA1vbvdZZh
|
||||
LtR0frJdfdppX8nAQX/TAxOQ8+H6yw8a9i7/zJEfSYIhop59N/fhcWW2F14cj2Xc
|
||||
fyHaZ04lTO4uPnly91jwuFPaREuZVp8AxImIhlkxkAN61tWdWG7tEbaCgszh6VIz
|
||||
ThFnHo2Vi8SQXPrXCN7J9Tc9ZYiAYqoThV/u6SYsea5aZL8deOvKBQCgZZuIxX1z
|
||||
4EnfcqG176vY4VqMBIC4pMJz0WcHJYqN+j7BiwGoMBwExrIdTB7q4XIFLotcIpS0
|
||||
1MqyVsesvoQq7WObmGQXdMliMirSLcDuSx8Qy+4pIBgGDIyMp1qbonnGdcHYvU8S
|
||||
O0A8s/iua5oFdNZTWvbVI4FUH9sKcLiB3/fIAF+sB4n8q6L+UCfmbPcAo/crQ6b3
|
||||
HqhDBMY9J0q/jdz9GNYZ/1fbXdkUqAQKFePhtzJDRBZba27+LPQNMCcrHMq06F1T
|
||||
4QmLmkHt7LxB2pAczUO+T2O9bHEw/HWw+dYf2MoRDUw=
|
||||
`
|
||||
|
||||
var outOfRangeSelector = []byte{
|
||||
0x42, 0x5a, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26,
|
||||
0x53, 0x59, 0x4e, 0xec, 0xe8, 0x36, 0x00, 0x00,
|
||||
0x02, 0x51, 0x80, 0x00, 0x10, 0x40, 0x00, 0x06,
|
||||
0x44, 0x90, 0x80, 0x20, 0x00, 0x31, 0x06, 0x4c,
|
||||
0x41, 0x01, 0xa7, 0xa9, 0xa5, 0x80, 0xbb, 0x94,
|
||||
0x31, 0x17, 0x72, 0x45, 0x38, 0x50, 0x90, 0x00,
|
||||
0x00, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1 @@
|
|||
’Õe&¬DJJ¯Š9d¬ E
CÖÏ#;Ð23ôº’øqžl*+Ôõø<C3B5>°~Í
££;&4ƒÛ›,‡†cc¾5Ñs5º
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -1,32 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// forwardCopy is like the built-in copy function except that it always goes
|
||||
// forward from the start, even if the dst and src overlap.
|
||||
// It is equivalent to:
|
||||
// for i := 0; i < n; i++ {
|
||||
// mem[dst+i] = mem[src+i]
|
||||
// }
|
||||
func forwardCopy(mem []byte, dst, src, n int) {
|
||||
if dst <= src {
|
||||
copy(mem[dst:dst+n], mem[src:src+n])
|
||||
return
|
||||
}
|
||||
for {
|
||||
if dst >= src+n {
|
||||
copy(mem[dst:dst+n], mem[src:src+n])
|
||||
return
|
||||
}
|
||||
// There is some forward overlap. The destination
|
||||
// will be filled with a repeated pattern of mem[src:src+k].
|
||||
// We copy one instance of the pattern here, then repeat.
|
||||
// Each time around this loop k will double.
|
||||
k := dst - src
|
||||
copy(mem[dst:dst+k], mem[src:src+k])
|
||||
n -= k
|
||||
dst += k
|
||||
}
|
||||
}
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestForwardCopy(t *testing.T) {
|
||||
testCases := []struct {
|
||||
dst0, dst1 int
|
||||
src0, src1 int
|
||||
want string
|
||||
}{
|
||||
{0, 9, 0, 9, "012345678"},
|
||||
{0, 5, 4, 9, "45678"},
|
||||
{4, 9, 0, 5, "01230"},
|
||||
{1, 6, 3, 8, "34567"},
|
||||
{3, 8, 1, 6, "12121"},
|
||||
{0, 9, 3, 6, "345"},
|
||||
{3, 6, 0, 9, "012"},
|
||||
{1, 6, 0, 9, "00000"},
|
||||
{0, 4, 7, 8, "7"},
|
||||
{0, 1, 6, 8, "6"},
|
||||
{4, 4, 6, 9, ""},
|
||||
{2, 8, 6, 6, ""},
|
||||
{0, 0, 0, 0, ""},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
b := []byte("0123456789")
|
||||
n := tc.dst1 - tc.dst0
|
||||
if tc.src1-tc.src0 < n {
|
||||
n = tc.src1 - tc.src0
|
||||
}
|
||||
forwardCopy(b, tc.dst0, tc.src0, n)
|
||||
got := string(b[tc.dst0 : tc.dst0+n])
|
||||
if got != tc.want {
|
||||
t.Errorf("dst=b[%d:%d], src=b[%d:%d]: got %q, want %q",
|
||||
tc.dst0, tc.dst1, tc.src0, tc.src1, got, tc.want)
|
||||
}
|
||||
// Check that the bytes outside of dst[:n] were not modified.
|
||||
for i, x := range b {
|
||||
if i >= tc.dst0 && i < tc.dst0+n {
|
||||
continue
|
||||
}
|
||||
if int(x) != '0'+i {
|
||||
t.Errorf("dst=b[%d:%d], src=b[%d:%d]: copy overrun at b[%d]: got '%c', want '%c'",
|
||||
tc.dst0, tc.dst1, tc.src0, tc.src1, i, x, '0'+i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -13,54 +13,65 @@ import (
|
|||
const (
|
||||
NoCompression = 0
|
||||
BestSpeed = 1
|
||||
fastCompression = 3
|
||||
BestCompression = 9
|
||||
DefaultCompression = -1
|
||||
HuffmanOnly = -2 // Disables match search and only does Huffman entropy reduction.
|
||||
)
|
||||
|
||||
const (
|
||||
logWindowSize = 15
|
||||
windowSize = 1 << logWindowSize
|
||||
windowMask = windowSize - 1
|
||||
logMaxOffsetSize = 15 // Standard DEFLATE
|
||||
minMatchLength = 3 // The smallest match that the compressor looks for
|
||||
maxMatchLength = 258 // The longest match for the compressor
|
||||
minOffsetSize = 1 // The shortest offset that makes any sense
|
||||
|
||||
// The maximum number of tokens we put into a single flat block, just to
|
||||
// The LZ77 step produces a sequence of literal tokens and <length, offset>
|
||||
// pair tokens. The offset is also known as distance. The underlying wire
|
||||
// format limits the range of lengths and offsets. For example, there are
|
||||
// 256 legitimate lengths: those in the range [3, 258]. This package's
|
||||
// compressor uses a higher minimum match length, enabling optimizations
|
||||
// such as finding matches via 32-bit loads and compares.
|
||||
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
|
||||
minMatchLength = 4 // The smallest match length that the compressor actually emits
|
||||
maxMatchLength = 258 // The largest match length
|
||||
baseMatchOffset = 1 // The smallest match offset
|
||||
maxMatchOffset = 1 << 15 // The largest match offset
|
||||
|
||||
// The maximum number of tokens we put into a single flate block, just to
|
||||
// stop things from getting too large.
|
||||
maxFlateBlockTokens = 1 << 14
|
||||
maxStoreBlockSize = 65535
|
||||
hashBits = 17
|
||||
hashBits = 17 // After 17 performance degrades
|
||||
hashSize = 1 << hashBits
|
||||
hashMask = (1 << hashBits) - 1
|
||||
hashShift = (hashBits + minMatchLength - 1) / minMatchLength
|
||||
maxHashOffset = 1 << 24
|
||||
|
||||
skipNever = math.MaxInt32
|
||||
)
|
||||
|
||||
type compressionLevel struct {
|
||||
good, lazy, nice, chain, fastSkipHashing int
|
||||
level, good, lazy, nice, chain, fastSkipHashing int
|
||||
}
|
||||
|
||||
var levels = []compressionLevel{
|
||||
{}, // 0
|
||||
// For levels 1-3 we don't bother trying with lazy matches
|
||||
{3, 0, 8, 4, 4},
|
||||
{3, 0, 16, 8, 5},
|
||||
{3, 0, 32, 32, 6},
|
||||
{0, 0, 0, 0, 0, 0}, // NoCompression.
|
||||
{1, 0, 0, 0, 0, 0}, // BestSpeed uses a custom algorithm; see deflatefast.go.
|
||||
// For levels 2-3 we don't bother trying with lazy matches.
|
||||
{2, 4, 0, 16, 8, 5},
|
||||
{3, 4, 0, 32, 32, 6},
|
||||
// Levels 4-9 use increasingly more lazy matching
|
||||
// and increasingly stringent conditions for "good enough".
|
||||
{4, 4, 16, 16, skipNever},
|
||||
{8, 16, 32, 32, skipNever},
|
||||
{8, 16, 128, 128, skipNever},
|
||||
{8, 32, 128, 256, skipNever},
|
||||
{32, 128, 258, 1024, skipNever},
|
||||
{32, 258, 258, 4096, skipNever},
|
||||
{4, 4, 4, 16, 16, skipNever},
|
||||
{5, 8, 16, 32, 32, skipNever},
|
||||
{6, 8, 16, 128, 128, skipNever},
|
||||
{7, 8, 32, 128, 256, skipNever},
|
||||
{8, 32, 128, 258, 1024, skipNever},
|
||||
{9, 32, 258, 258, 4096, skipNever},
|
||||
}
|
||||
|
||||
type compressor struct {
|
||||
compressionLevel
|
||||
|
||||
w *huffmanBitWriter
|
||||
bulkHasher func([]byte, []uint32)
|
||||
|
||||
// compression algorithm
|
||||
fill func(*compressor, []byte) int // copy data to window
|
||||
|
|
@ -73,8 +84,8 @@ type compressor struct {
|
|||
// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
|
||||
// with the same hash value.
|
||||
chainHead int
|
||||
hashHead []int
|
||||
hashPrev []int
|
||||
hashHead [hashSize]uint32
|
||||
hashPrev [windowSize]uint32
|
||||
hashOffset int
|
||||
|
||||
// input window: unprocessed data is window[index:windowEnd]
|
||||
|
|
@ -90,9 +101,12 @@ type compressor struct {
|
|||
// deflate state
|
||||
length int
|
||||
offset int
|
||||
hash int
|
||||
hash uint32
|
||||
maxInsertIndex int
|
||||
err error
|
||||
|
||||
// hashMatch must be able to contain hashes for the maximum match length.
|
||||
hashMatch [maxMatchLength - 1]uint32
|
||||
}
|
||||
|
||||
func (d *compressor) fillDeflate(b []byte) int {
|
||||
|
|
@ -112,15 +126,15 @@ func (d *compressor) fillDeflate(b []byte) int {
|
|||
d.hashOffset -= delta
|
||||
d.chainHead -= delta
|
||||
for i, v := range d.hashPrev {
|
||||
if v > delta {
|
||||
d.hashPrev[i] -= delta
|
||||
if int(v) > delta {
|
||||
d.hashPrev[i] = uint32(int(v) - delta)
|
||||
} else {
|
||||
d.hashPrev[i] = 0
|
||||
}
|
||||
}
|
||||
for i, v := range d.hashHead {
|
||||
if v > delta {
|
||||
d.hashHead[i] -= delta
|
||||
if int(v) > delta {
|
||||
d.hashHead[i] = uint32(int(v) - delta)
|
||||
} else {
|
||||
d.hashHead[i] = 0
|
||||
}
|
||||
|
|
@ -132,19 +146,74 @@ func (d *compressor) fillDeflate(b []byte) int {
|
|||
return n
|
||||
}
|
||||
|
||||
func (d *compressor) writeBlock(tokens []token, index int, eof bool) error {
|
||||
if index > 0 || eof {
|
||||
func (d *compressor) writeBlock(tokens []token, index int) error {
|
||||
if index > 0 {
|
||||
var window []byte
|
||||
if d.blockStart <= index {
|
||||
window = d.window[d.blockStart:index]
|
||||
}
|
||||
d.blockStart = index
|
||||
d.w.writeBlock(tokens, eof, window)
|
||||
d.w.writeBlock(tokens, false, window)
|
||||
return d.w.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fillWindow will fill the current window with the supplied
|
||||
// dictionary and calculate all hashes.
|
||||
// This is much faster than doing a full encode.
|
||||
// Should only be used after a reset.
|
||||
func (d *compressor) fillWindow(b []byte) {
|
||||
// Do not fill window if we are in store-only mode.
|
||||
if d.compressionLevel.level < 2 {
|
||||
return
|
||||
}
|
||||
if d.index != 0 || d.windowEnd != 0 {
|
||||
panic("internal error: fillWindow called with stale data")
|
||||
}
|
||||
|
||||
// If we are given too much, cut it.
|
||||
if len(b) > windowSize {
|
||||
b = b[len(b)-windowSize:]
|
||||
}
|
||||
// Add all to window.
|
||||
n := copy(d.window, b)
|
||||
|
||||
// Calculate 256 hashes at the time (more L1 cache hits)
|
||||
loops := (n + 256 - minMatchLength) / 256
|
||||
for j := 0; j < loops; j++ {
|
||||
index := j * 256
|
||||
end := index + 256 + minMatchLength - 1
|
||||
if end > n {
|
||||
end = n
|
||||
}
|
||||
toCheck := d.window[index:end]
|
||||
dstSize := len(toCheck) - minMatchLength + 1
|
||||
|
||||
if dstSize <= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
dst := d.hashMatch[:dstSize]
|
||||
d.bulkHasher(toCheck, dst)
|
||||
var newH uint32
|
||||
for i, val := range dst {
|
||||
di := i + index
|
||||
newH = val
|
||||
hh := &d.hashHead[newH&hashMask]
|
||||
// Get previous value with the same hash.
|
||||
// Our chain should point to the previous value.
|
||||
d.hashPrev[di&windowMask] = *hh
|
||||
// Set the head of the hash chain to us.
|
||||
*hh = uint32(di + d.hashOffset)
|
||||
}
|
||||
d.hash = newH
|
||||
}
|
||||
// Update window information.
|
||||
d.windowEnd = n
|
||||
d.index = n
|
||||
}
|
||||
|
||||
// Try to find a match starting at index whose length is greater than prevSize.
|
||||
// We only look at chainCount possibilities before giving up.
|
||||
func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
|
||||
|
|
@ -168,20 +237,15 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
|
|||
tries >>= 2
|
||||
}
|
||||
|
||||
w0 := win[pos]
|
||||
w1 := win[pos+1]
|
||||
wEnd := win[pos+length]
|
||||
wPos := win[pos:]
|
||||
minIndex := pos - windowSize
|
||||
|
||||
for i := prevHead; tries > 0; tries-- {
|
||||
if w0 == win[i] && w1 == win[i+1] && wEnd == win[i+length] {
|
||||
// The hash function ensures that if win[i] and win[i+1] match, win[i+2] matches
|
||||
if wEnd == win[i+length] {
|
||||
n := matchLen(win[i:], wPos, minMatchLook)
|
||||
|
||||
n := 3
|
||||
for pos+n < len(win) && win[i+n] == win[pos+n] {
|
||||
n++
|
||||
}
|
||||
if n > length && (n > 3 || pos-i <= 4096) {
|
||||
if n > length && (n > minMatchLength || pos-i <= 4096) {
|
||||
length = n
|
||||
offset = pos - i
|
||||
ok = true
|
||||
|
|
@ -196,7 +260,8 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
|
|||
// hashPrev[i & windowMask] has already been overwritten, so stop now.
|
||||
break
|
||||
}
|
||||
if i = d.hashPrev[i&windowMask] - d.hashOffset; i < minIndex || i < 0 {
|
||||
i = int(d.hashPrev[i&windowMask]) - d.hashOffset
|
||||
if i < minIndex || i < 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
@ -211,9 +276,84 @@ func (d *compressor) writeStoredBlock(buf []byte) error {
|
|||
return d.w.err
|
||||
}
|
||||
|
||||
const hashmul = 0x1e35a7bd
|
||||
|
||||
// hash4 returns a hash representation of the first 4 bytes
|
||||
// of the supplied slice.
|
||||
// The caller must ensure that len(b) >= 4.
|
||||
func hash4(b []byte) uint32 {
|
||||
return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits)
|
||||
}
|
||||
|
||||
// bulkHash4 will compute hashes using the same
|
||||
// algorithm as hash4
|
||||
func bulkHash4(b []byte, dst []uint32) {
|
||||
if len(b) < minMatchLength {
|
||||
return
|
||||
}
|
||||
hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
|
||||
dst[0] = (hb * hashmul) >> (32 - hashBits)
|
||||
end := len(b) - minMatchLength + 1
|
||||
for i := 1; i < end; i++ {
|
||||
hb = (hb << 8) | uint32(b[i+3])
|
||||
dst[i] = (hb * hashmul) >> (32 - hashBits)
|
||||
}
|
||||
}
|
||||
|
||||
// matchLen returns the number of matching bytes in a and b
|
||||
// up to length 'max'. Both slices must be at least 'max'
|
||||
// bytes in size.
|
||||
func matchLen(a, b []byte, max int) int {
|
||||
a = a[:max]
|
||||
b = b[:len(a)]
|
||||
for i, av := range a {
|
||||
if b[i] != av {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
// encSpeed will compress and store the currently added data,
|
||||
// if enough has been accumulated or we at the end of the stream.
|
||||
// Any error that occurred will be in d.err
|
||||
func (d *compressor) encSpeed() {
|
||||
// We only compress if we have maxStoreBlockSize.
|
||||
if d.windowEnd < maxStoreBlockSize {
|
||||
if !d.sync {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle small sizes.
|
||||
if d.windowEnd < 128 {
|
||||
switch {
|
||||
case d.windowEnd == 0:
|
||||
return
|
||||
case d.windowEnd <= 16:
|
||||
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
|
||||
default:
|
||||
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
|
||||
d.err = d.w.err
|
||||
}
|
||||
d.windowEnd = 0
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
// Encode the block.
|
||||
d.tokens = encodeBestSpeed(d.tokens[:0], d.window[:d.windowEnd])
|
||||
|
||||
// If we removed less than 1/16th, Huffman compress the block.
|
||||
if len(d.tokens) > d.windowEnd-(d.windowEnd>>4) {
|
||||
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
|
||||
} else {
|
||||
d.w.writeBlockDynamic(d.tokens, false, d.window[:d.windowEnd])
|
||||
}
|
||||
d.err = d.w.err
|
||||
d.windowEnd = 0
|
||||
}
|
||||
|
||||
func (d *compressor) initDeflate() {
|
||||
d.hashHead = make([]int, hashSize)
|
||||
d.hashPrev = make([]int, windowSize)
|
||||
d.window = make([]byte, 2*windowSize)
|
||||
d.hashOffset = 1
|
||||
d.tokens = make([]token, 0, maxFlateBlockTokens+1)
|
||||
|
|
@ -223,6 +363,7 @@ func (d *compressor) initDeflate() {
|
|||
d.index = 0
|
||||
d.hash = 0
|
||||
d.chainHead = -1
|
||||
d.bulkHasher = bulkHash4
|
||||
}
|
||||
|
||||
func (d *compressor) deflate() {
|
||||
|
|
@ -232,7 +373,7 @@ func (d *compressor) deflate() {
|
|||
|
||||
d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
|
||||
if d.index < d.maxInsertIndex {
|
||||
d.hash = int(d.window[d.index])<<hashShift + int(d.window[d.index+1])
|
||||
d.hash = hash4(d.window[d.index : d.index+minMatchLength])
|
||||
}
|
||||
|
||||
Loop:
|
||||
|
|
@ -256,7 +397,7 @@ Loop:
|
|||
d.byteAvailable = false
|
||||
}
|
||||
if len(d.tokens) > 0 {
|
||||
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
|
||||
if d.err = d.writeBlock(d.tokens, d.index); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens = d.tokens[:0]
|
||||
|
|
@ -266,10 +407,11 @@ Loop:
|
|||
}
|
||||
if d.index < d.maxInsertIndex {
|
||||
// Update the hash
|
||||
d.hash = (d.hash<<hashShift + int(d.window[d.index+2])) & hashMask
|
||||
d.chainHead = d.hashHead[d.hash]
|
||||
d.hashPrev[d.index&windowMask] = d.chainHead
|
||||
d.hashHead[d.hash] = d.index + d.hashOffset
|
||||
d.hash = hash4(d.window[d.index : d.index+minMatchLength])
|
||||
hh := &d.hashHead[d.hash&hashMask]
|
||||
d.chainHead = int(*hh)
|
||||
d.hashPrev[d.index&windowMask] = uint32(d.chainHead)
|
||||
*hh = uint32(d.index + d.hashOffset)
|
||||
}
|
||||
prevLength := d.length
|
||||
prevOffset := d.offset
|
||||
|
|
@ -293,9 +435,9 @@ Loop:
|
|||
// There was a match at the previous step, and the current match is
|
||||
// not better. Output the previous match.
|
||||
if d.fastSkipHashing != skipNever {
|
||||
d.tokens = append(d.tokens, matchToken(uint32(d.length-minMatchLength), uint32(d.offset-minOffsetSize)))
|
||||
d.tokens = append(d.tokens, matchToken(uint32(d.length-baseMatchLength), uint32(d.offset-baseMatchOffset)))
|
||||
} else {
|
||||
d.tokens = append(d.tokens, matchToken(uint32(prevLength-minMatchLength), uint32(prevOffset-minOffsetSize)))
|
||||
d.tokens = append(d.tokens, matchToken(uint32(prevLength-baseMatchLength), uint32(prevOffset-baseMatchOffset)))
|
||||
}
|
||||
// Insert in the hash table all strings up to the end of the match.
|
||||
// index and index-1 are already inserted. If there is not enough
|
||||
|
|
@ -310,12 +452,13 @@ Loop:
|
|||
}
|
||||
for d.index++; d.index < newIndex; d.index++ {
|
||||
if d.index < d.maxInsertIndex {
|
||||
d.hash = (d.hash<<hashShift + int(d.window[d.index+2])) & hashMask
|
||||
d.hash = hash4(d.window[d.index : d.index+minMatchLength])
|
||||
// Get previous value with the same hash.
|
||||
// Our chain should point to the previous value.
|
||||
d.hashPrev[d.index&windowMask] = d.hashHead[d.hash]
|
||||
hh := &d.hashHead[d.hash&hashMask]
|
||||
d.hashPrev[d.index&windowMask] = *hh
|
||||
// Set the head of the hash chain to us.
|
||||
d.hashHead[d.hash] = d.index + d.hashOffset
|
||||
*hh = uint32(d.index + d.hashOffset)
|
||||
}
|
||||
}
|
||||
if d.fastSkipHashing == skipNever {
|
||||
|
|
@ -327,12 +470,12 @@ Loop:
|
|||
// item into the table.
|
||||
d.index += d.length
|
||||
if d.index < d.maxInsertIndex {
|
||||
d.hash = (int(d.window[d.index])<<hashShift + int(d.window[d.index+1]))
|
||||
d.hash = hash4(d.window[d.index : d.index+minMatchLength])
|
||||
}
|
||||
}
|
||||
if len(d.tokens) == maxFlateBlockTokens {
|
||||
// The block includes the current character
|
||||
if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
|
||||
if d.err = d.writeBlock(d.tokens, d.index); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens = d.tokens[:0]
|
||||
|
|
@ -345,7 +488,7 @@ Loop:
|
|||
}
|
||||
d.tokens = append(d.tokens, literalToken(uint32(d.window[i])))
|
||||
if len(d.tokens) == maxFlateBlockTokens {
|
||||
if d.err = d.writeBlock(d.tokens, i+1, false); d.err != nil {
|
||||
if d.err = d.writeBlock(d.tokens, i+1); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens = d.tokens[:0]
|
||||
|
|
@ -372,17 +515,37 @@ func (d *compressor) store() {
|
|||
d.windowEnd = 0
|
||||
}
|
||||
|
||||
// storeHuff compresses and stores the currently added data
|
||||
// when the d.window is full or we are at the end of the stream.
|
||||
// Any error that occurred will be in d.err
|
||||
func (d *compressor) storeHuff() {
|
||||
if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
|
||||
return
|
||||
}
|
||||
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
|
||||
d.err = d.w.err
|
||||
d.windowEnd = 0
|
||||
}
|
||||
|
||||
func (d *compressor) write(b []byte) (n int, err error) {
|
||||
if d.err != nil {
|
||||
return 0, d.err
|
||||
}
|
||||
n = len(b)
|
||||
b = b[d.fill(d, b):]
|
||||
for len(b) > 0 {
|
||||
d.step(d)
|
||||
b = b[d.fill(d, b):]
|
||||
if d.err != nil {
|
||||
return 0, d.err
|
||||
}
|
||||
return n, d.err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (d *compressor) syncFlush() error {
|
||||
if d.err != nil {
|
||||
return d.err
|
||||
}
|
||||
d.sync = true
|
||||
d.step(d)
|
||||
if d.err == nil {
|
||||
|
|
@ -402,56 +565,51 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
|
|||
d.window = make([]byte, maxStoreBlockSize)
|
||||
d.fill = (*compressor).fillStore
|
||||
d.step = (*compressor).store
|
||||
case level == HuffmanOnly:
|
||||
d.window = make([]byte, maxStoreBlockSize)
|
||||
d.fill = (*compressor).fillStore
|
||||
d.step = (*compressor).storeHuff
|
||||
case level == BestSpeed:
|
||||
d.compressionLevel = levels[level]
|
||||
d.window = make([]byte, maxStoreBlockSize)
|
||||
d.fill = (*compressor).fillStore
|
||||
d.step = (*compressor).encSpeed
|
||||
d.tokens = make([]token, maxStoreBlockSize)
|
||||
case level == DefaultCompression:
|
||||
level = 6
|
||||
fallthrough
|
||||
case 1 <= level && level <= 9:
|
||||
case 2 <= level && level <= 9:
|
||||
d.compressionLevel = levels[level]
|
||||
d.initDeflate()
|
||||
d.fill = (*compressor).fillDeflate
|
||||
d.step = (*compressor).deflate
|
||||
default:
|
||||
return fmt.Errorf("flate: invalid compression level %d: want value in range [-1, 9]", level)
|
||||
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var zeroes [32]int
|
||||
var bzeroes [256]byte
|
||||
|
||||
func (d *compressor) reset(w io.Writer) {
|
||||
d.w.reset(w)
|
||||
d.sync = false
|
||||
d.err = nil
|
||||
switch d.compressionLevel.chain {
|
||||
case 0:
|
||||
// level was NoCompression.
|
||||
for i := range d.window {
|
||||
d.window[i] = 0
|
||||
}
|
||||
switch d.compressionLevel.level {
|
||||
case NoCompression:
|
||||
d.windowEnd = 0
|
||||
case BestSpeed:
|
||||
d.windowEnd = 0
|
||||
d.tokens = d.tokens[:0]
|
||||
default:
|
||||
d.chainHead = -1
|
||||
for s := d.hashHead; len(s) > 0; {
|
||||
n := copy(s, zeroes[:])
|
||||
s = s[n:]
|
||||
for i := range d.hashHead {
|
||||
d.hashHead[i] = 0
|
||||
}
|
||||
for s := d.hashPrev; len(s) > 0; s = s[len(zeroes):] {
|
||||
copy(s, zeroes[:])
|
||||
for i := range d.hashPrev {
|
||||
d.hashPrev[i] = 0
|
||||
}
|
||||
d.hashOffset = 1
|
||||
|
||||
d.index, d.windowEnd = 0, 0
|
||||
for s := d.window; len(s) > 0; {
|
||||
n := copy(s, bzeroes[:])
|
||||
s = s[n:]
|
||||
}
|
||||
d.blockStart, d.byteAvailable = 0, false
|
||||
|
||||
d.tokens = d.tokens[:maxFlateBlockTokens+1]
|
||||
for i := 0; i <= maxFlateBlockTokens; i++ {
|
||||
d.tokens[i] = 0
|
||||
}
|
||||
d.tokens = d.tokens[:0]
|
||||
d.length = minMatchLength - 1
|
||||
d.offset = 0
|
||||
|
|
@ -461,6 +619,9 @@ func (d *compressor) reset(w io.Writer) {
|
|||
}
|
||||
|
||||
func (d *compressor) close() error {
|
||||
if d.err != nil {
|
||||
return d.err
|
||||
}
|
||||
d.sync = true
|
||||
d.step(d)
|
||||
if d.err != nil {
|
||||
|
|
@ -477,10 +638,14 @@ func (d *compressor) close() error {
|
|||
// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
|
||||
// higher levels typically run slower but compress more. Level 0
|
||||
// (NoCompression) does not attempt any compression; it only adds the
|
||||
// necessary DEFLATE framing. Level -1 (DefaultCompression) uses the default
|
||||
// compression level.
|
||||
// necessary DEFLATE framing.
|
||||
// Level -1 (DefaultCompression) uses the default compression level.
|
||||
// Level -2 (HuffmanOnly) will use Huffman compression only, giving
|
||||
// a very fast compression for all types of input, but sacrificing considerable
|
||||
// compression efficiency.
|
||||
//
|
||||
// If level is in the range [-1, 9] then the error returned will be nil.
|
||||
//
|
||||
// If level is in the range [-2, 9] then the error returned will be nil.
|
||||
// Otherwise the error returned will be non-nil.
|
||||
func NewWriter(w io.Writer, level int) (*Writer, error) {
|
||||
var dw Writer
|
||||
|
|
@ -497,28 +662,22 @@ func NewWriter(w io.Writer, level int) (*Writer, error) {
|
|||
// can only be decompressed by a Reader initialized with the
|
||||
// same dictionary.
|
||||
func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
|
||||
dw := &dictWriter{w, false}
|
||||
dw := &dictWriter{w}
|
||||
zw, err := NewWriter(dw, level)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zw.Write(dict)
|
||||
zw.Flush()
|
||||
dw.enabled = true
|
||||
zw.d.fillWindow(dict)
|
||||
zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
|
||||
return zw, err
|
||||
}
|
||||
|
||||
type dictWriter struct {
|
||||
w io.Writer
|
||||
enabled bool
|
||||
}
|
||||
|
||||
func (w *dictWriter) Write(b []byte) (n int, err error) {
|
||||
if w.enabled {
|
||||
return w.w.Write(b)
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// A Writer takes data written to it and writes the compressed
|
||||
|
|
@ -560,10 +719,7 @@ func (w *Writer) Reset(dst io.Writer) {
|
|||
// w was created with NewWriterDict
|
||||
dw.w = dst
|
||||
w.d.reset(dw)
|
||||
dw.enabled = false
|
||||
w.Write(w.dict)
|
||||
w.Flush()
|
||||
dw.enabled = true
|
||||
w.d.fillWindow(w.dict)
|
||||
} else {
|
||||
// w was created with NewWriter
|
||||
w.d.reset(dst)
|
||||
|
|
|
|||
|
|
@ -42,10 +42,10 @@ var deflateTests = []*deflateTest{
|
|||
{[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 0,
|
||||
[]byte{0, 8, 0, 247, 255, 17, 17, 17, 17, 17, 17, 17, 17, 1, 0, 0, 255, 255},
|
||||
},
|
||||
{[]byte{}, 1, []byte{1, 0, 0, 255, 255}},
|
||||
{[]byte{0x11}, 1, []byte{18, 4, 4, 0, 0, 255, 255}},
|
||||
{[]byte{0x11, 0x12}, 1, []byte{18, 20, 2, 4, 0, 0, 255, 255}},
|
||||
{[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 1, []byte{18, 132, 2, 64, 0, 0, 0, 255, 255}},
|
||||
{[]byte{}, 2, []byte{1, 0, 0, 255, 255}},
|
||||
{[]byte{0x11}, 2, []byte{18, 4, 4, 0, 0, 255, 255}},
|
||||
{[]byte{0x11, 0x12}, 2, []byte{18, 20, 2, 4, 0, 0, 255, 255}},
|
||||
{[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 2, []byte{18, 132, 2, 64, 0, 0, 0, 255, 255}},
|
||||
{[]byte{}, 9, []byte{1, 0, 0, 255, 255}},
|
||||
{[]byte{0x11}, 9, []byte{18, 4, 4, 0, 0, 255, 255}},
|
||||
{[]byte{0x11, 0x12}, 9, []byte{18, 20, 2, 4, 0, 0, 255, 255}},
|
||||
|
|
@ -80,6 +80,32 @@ func largeDataChunk() []byte {
|
|||
return result
|
||||
}
|
||||
|
||||
func TestBulkHash4(t *testing.T) {
|
||||
for _, x := range deflateTests {
|
||||
y := x.out
|
||||
if len(y) < minMatchLength {
|
||||
continue
|
||||
}
|
||||
y = append(y, y...)
|
||||
for j := 4; j < len(y); j++ {
|
||||
y := y[:j]
|
||||
dst := make([]uint32, len(y)-minMatchLength+1)
|
||||
for i := range dst {
|
||||
dst[i] = uint32(i + 100)
|
||||
}
|
||||
bulkHash4(y, dst)
|
||||
for i, got := range dst {
|
||||
want := hash4(y[i:])
|
||||
if got != want && got == uint32(i)+100 {
|
||||
t.Errorf("Len:%d Index:%d, want 0x%08x but not modified", len(y), i, want)
|
||||
} else if got != want {
|
||||
t.Errorf("Len:%d Index:%d, got 0x%08x want:0x%08x", len(y), i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeflate(t *testing.T) {
|
||||
for _, h := range deflateTests {
|
||||
var buf bytes.Buffer
|
||||
|
|
@ -91,7 +117,7 @@ func TestDeflate(t *testing.T) {
|
|||
w.Write(h.in)
|
||||
w.Close()
|
||||
if !bytes.Equal(buf.Bytes(), h.out) {
|
||||
t.Errorf("Deflate(%d, %x) = %x, want %x", h.level, h.in, buf.Bytes(), h.out)
|
||||
t.Errorf("Deflate(%d, %x) = \n%#v, want \n%#v", h.level, h.in, buf.Bytes(), h.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -289,6 +315,9 @@ func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name str
|
|||
t.Errorf("level: %d, len(compress(data)) = %d > limit = %d", level, buffer.Len(), limit)
|
||||
return
|
||||
}
|
||||
if limit > 0 {
|
||||
t.Logf("level: %d, size:%.2f%%, %d b\n", level, float64(buffer.Len()*100)/float64(limit), buffer.Len())
|
||||
}
|
||||
r := NewReader(&buffer)
|
||||
out, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
|
|
@ -303,15 +332,17 @@ func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name str
|
|||
testSync(t, level, input, name)
|
||||
}
|
||||
|
||||
func testToFromWithLimit(t *testing.T, input []byte, name string, limit [10]int) {
|
||||
func testToFromWithLimit(t *testing.T, input []byte, name string, limit [11]int) {
|
||||
for i := 0; i < 10; i++ {
|
||||
testToFromWithLevelAndLimit(t, i, input, name, limit[i])
|
||||
}
|
||||
// Test HuffmanCompression
|
||||
testToFromWithLevelAndLimit(t, -2, input, name, limit[10])
|
||||
}
|
||||
|
||||
func TestDeflateInflate(t *testing.T) {
|
||||
for i, h := range deflateInflateTests {
|
||||
testToFromWithLimit(t, h.in, fmt.Sprintf("#%d", i), [10]int{})
|
||||
testToFromWithLimit(t, h.in, fmt.Sprintf("#%d", i), [11]int{})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -327,19 +358,19 @@ func TestReverseBits(t *testing.T) {
|
|||
type deflateInflateStringTest struct {
|
||||
filename string
|
||||
label string
|
||||
limit [10]int
|
||||
limit [11]int
|
||||
}
|
||||
|
||||
var deflateInflateStringTests = []deflateInflateStringTest{
|
||||
{
|
||||
"../testdata/e.txt",
|
||||
"2.718281828...",
|
||||
[...]int{100018, 50650, 50960, 51150, 50930, 50790, 50790, 50790, 50790, 50790},
|
||||
[...]int{100018, 50650, 50960, 51150, 50930, 50790, 50790, 50790, 50790, 50790, 43683},
|
||||
},
|
||||
{
|
||||
"../testdata/Mark.Twain-Tom.Sawyer.txt",
|
||||
"Mark.Twain-Tom.Sawyer",
|
||||
[...]int{407330, 187598, 180361, 172974, 169160, 163476, 160936, 160506, 160295, 160295},
|
||||
[...]int{407330, 187598, 180361, 172974, 169160, 163476, 160936, 160506, 160295, 160295, 233460},
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -457,6 +488,17 @@ func TestWriterReset(t *testing.T) {
|
|||
// DeepEqual doesn't compare functions.
|
||||
w.d.fill, wref.d.fill = nil, nil
|
||||
w.d.step, wref.d.step = nil, nil
|
||||
w.d.bulkHasher, wref.d.bulkHasher = nil, nil
|
||||
// hashMatch is always overwritten when used.
|
||||
copy(w.d.hashMatch[:], wref.d.hashMatch[:])
|
||||
if len(w.d.tokens) != 0 {
|
||||
t.Errorf("level %d Writer not reset after Reset. %d tokens were present", level, len(w.d.tokens))
|
||||
}
|
||||
// As long as the length is 0, we don't care about the content.
|
||||
w.d.tokens = wref.d.tokens
|
||||
|
||||
// We don't care if there are values in the window, as long as it is at d.index is 0
|
||||
w.d.window = wref.d.window
|
||||
if !reflect.DeepEqual(w, wref) {
|
||||
t.Errorf("level %d Writer not reset after Reset", level)
|
||||
}
|
||||
|
|
@ -481,7 +523,7 @@ func testResetOutput(t *testing.T, newWriter func(w io.Writer) (*Writer, error))
|
|||
w.Write(b)
|
||||
}
|
||||
w.Close()
|
||||
out1 := buf.String()
|
||||
out1 := buf.Bytes()
|
||||
|
||||
buf2 := new(bytes.Buffer)
|
||||
w.Reset(buf2)
|
||||
|
|
@ -489,10 +531,103 @@ func testResetOutput(t *testing.T, newWriter func(w io.Writer) (*Writer, error))
|
|||
w.Write(b)
|
||||
}
|
||||
w.Close()
|
||||
out2 := buf2.String()
|
||||
out2 := buf2.Bytes()
|
||||
|
||||
if out1 != out2 {
|
||||
t.Errorf("got %q, expected %q", out2, out1)
|
||||
if len(out1) != len(out2) {
|
||||
t.Errorf("got %d, expected %d bytes", len(out2), len(out1))
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(out1, out2) {
|
||||
mm := 0
|
||||
for i, b := range out1[:len(out2)] {
|
||||
if b != out2[i] {
|
||||
t.Errorf("mismatch index %d: %#02x, expected %#02x", i, out2[i], b)
|
||||
}
|
||||
mm++
|
||||
if mm == 10 {
|
||||
t.Fatal("Stopping")
|
||||
}
|
||||
}
|
||||
}
|
||||
t.Logf("got %d bytes", len(out1))
|
||||
}
|
||||
|
||||
// TestBestSpeed tests that round-tripping through deflate and then inflate
|
||||
// recovers the original input. The Write sizes are near the thresholds in the
|
||||
// compressor.encSpeed method (0, 16, 128), as well as near maxStoreBlockSize
|
||||
// (65535).
|
||||
func TestBestSpeed(t *testing.T) {
|
||||
abc := make([]byte, 128)
|
||||
for i := range abc {
|
||||
abc[i] = byte(i)
|
||||
}
|
||||
abcabc := bytes.Repeat(abc, 131072/len(abc))
|
||||
var want []byte
|
||||
|
||||
testCases := [][]int{
|
||||
{65536, 0},
|
||||
{65536, 1},
|
||||
{65536, 1, 256},
|
||||
{65536, 1, 65536},
|
||||
{65536, 14},
|
||||
{65536, 15},
|
||||
{65536, 16},
|
||||
{65536, 16, 256},
|
||||
{65536, 16, 65536},
|
||||
{65536, 127},
|
||||
{65536, 128},
|
||||
{65536, 128, 256},
|
||||
{65536, 128, 65536},
|
||||
{65536, 129},
|
||||
{65536, 65536, 256},
|
||||
{65536, 65536, 65536},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
for _, firstN := range []int{1, 65534, 65535, 65536, 65537, 131072} {
|
||||
tc[0] = firstN
|
||||
outer:
|
||||
for _, flush := range []bool{false, true} {
|
||||
buf := new(bytes.Buffer)
|
||||
want = want[:0]
|
||||
|
||||
w, err := NewWriter(buf, BestSpeed)
|
||||
if err != nil {
|
||||
t.Errorf("i=%d, firstN=%d, flush=%t: NewWriter: %v", i, firstN, flush, err)
|
||||
continue
|
||||
}
|
||||
for _, n := range tc {
|
||||
want = append(want, abcabc[:n]...)
|
||||
if _, err := w.Write(abcabc[:n]); err != nil {
|
||||
t.Errorf("i=%d, firstN=%d, flush=%t: Write: %v", i, firstN, flush, err)
|
||||
continue outer
|
||||
}
|
||||
if !flush {
|
||||
continue
|
||||
}
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Errorf("i=%d, firstN=%d, flush=%t: Flush: %v", i, firstN, flush, err)
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Errorf("i=%d, firstN=%d, flush=%t: Close: %v", i, firstN, flush, err)
|
||||
continue
|
||||
}
|
||||
|
||||
r := NewReader(buf)
|
||||
got, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Errorf("i=%d, firstN=%d, flush=%t: ReadAll: %v", i, firstN, flush, err)
|
||||
continue
|
||||
}
|
||||
r.Close()
|
||||
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("i=%d, firstN=%d, flush=%t: corruption during deflate-then-inflate", i, firstN, flush)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,174 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// This encoding algorithm, which prioritizes speed over output size, is
|
||||
// based on Snappy's LZ77-style encoder: github.com/golang/snappy
|
||||
|
||||
const (
|
||||
tableBits = 14 // Bits used in the table.
|
||||
tableSize = 1 << tableBits // Size of the table.
|
||||
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
|
||||
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
|
||||
)
|
||||
|
||||
func load32(b []byte, i int) uint32 {
|
||||
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func load64(b []byte, i int) uint64 {
|
||||
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
}
|
||||
|
||||
func hash(u uint32) uint32 {
|
||||
return (u * 0x1e35a7bd) >> tableShift
|
||||
}
|
||||
|
||||
// These constants are defined by the Snappy implementation so that its
|
||||
// assembly implementation can fast-path some 16-bytes-at-a-time copies. They
|
||||
// aren't necessary in the pure Go implementation, as we don't use those same
|
||||
// optimizations, but using the same thresholds doesn't really hurt.
|
||||
const (
|
||||
inputMargin = 16 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
|
||||
func encodeBestSpeed(dst []token, src []byte) []token {
|
||||
// This check isn't in the Snappy implementation, but there, the caller
|
||||
// instead of the callee handles this case.
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return emitLiteral(dst, src)
|
||||
}
|
||||
|
||||
// Initialize the hash table.
|
||||
//
|
||||
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
||||
// and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535.
|
||||
var table [tableSize]uint16
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
nextHash := hash(load32(src, s))
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := 32
|
||||
|
||||
nextS := s
|
||||
candidate := 0
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidate = int(table[nextHash&tableMask])
|
||||
table[nextHash&tableMask] = uint16(s)
|
||||
nextHash = hash(load32(src, nextS))
|
||||
// TODO: < should be <=, and add a test for that.
|
||||
if s-candidate < maxMatchOffset && load32(src, s) == load32(src, candidate) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
dst = emitLiteral(dst, src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
base := s
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
// This is an inlined version of Snappy's:
|
||||
// s = extendMatch(src, candidate+4, s+4)
|
||||
s += 4
|
||||
s1 := base + maxMatchLength
|
||||
if s1 > len(src) {
|
||||
s1 = len(src)
|
||||
}
|
||||
for i := candidate + 4; s < s1 && src[i] == src[s]; i, s = i+1, s+1 {
|
||||
}
|
||||
|
||||
// matchToken is flate's equivalent of Snappy's emitCopy.
|
||||
dst = append(dst, matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset)))
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load64(src, s-1)
|
||||
prevHash := hash(uint32(x >> 0))
|
||||
table[prevHash&tableMask] = uint16(s - 1)
|
||||
currHash := hash(uint32(x >> 8))
|
||||
candidate = int(table[currHash&tableMask])
|
||||
table[currHash&tableMask] = uint16(s)
|
||||
// TODO: >= should be >, and add a test for that.
|
||||
if s-candidate >= maxMatchOffset || uint32(x>>8) != load32(src, candidate) {
|
||||
nextHash = hash(uint32(x >> 16))
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
dst = emitLiteral(dst, src[nextEmit:])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func emitLiteral(dst []token, lit []byte) []token {
|
||||
for _, v := range lit {
|
||||
dst = append(dst, token(v))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
|
@ -0,0 +1,184 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
|
||||
// LZ77 decompresses data through sequences of two forms of commands:
|
||||
//
|
||||
// * Literal insertions: Runs of one or more symbols are inserted into the data
|
||||
// stream as is. This is accomplished through the writeByte method for a
|
||||
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
|
||||
// Any valid stream must start with a literal insertion if no preset dictionary
|
||||
// is used.
|
||||
//
|
||||
// * Backward copies: Runs of one or more symbols are copied from previously
|
||||
// emitted data. Backward copies come as the tuple (dist, length) where dist
|
||||
// determines how far back in the stream to copy from and length determines how
|
||||
// many bytes to copy. Note that it is valid for the length to be greater than
|
||||
// the distance. Since LZ77 uses forward copies, that situation is used to
|
||||
// perform a form of run-length encoding on repeated runs of symbols.
|
||||
// The writeCopy and tryWriteCopy are used to implement this command.
|
||||
//
|
||||
// For performance reasons, this implementation performs little to no sanity
|
||||
// checks about the arguments. As such, the invariants documented for each
|
||||
// method call must be respected.
|
||||
type dictDecoder struct {
|
||||
hist []byte // Sliding window history
|
||||
|
||||
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
|
||||
wrPos int // Current output position in buffer
|
||||
rdPos int // Have emitted hist[:rdPos] already
|
||||
full bool // Has a full window length been written yet?
|
||||
}
|
||||
|
||||
// init initializes dictDecoder to have a sliding window dictionary of the given
|
||||
// size. If a preset dict is provided, it will initialize the dictionary with
|
||||
// the contents of dict.
|
||||
func (dd *dictDecoder) init(size int, dict []byte) {
|
||||
*dd = dictDecoder{hist: dd.hist}
|
||||
|
||||
if cap(dd.hist) < size {
|
||||
dd.hist = make([]byte, size)
|
||||
}
|
||||
dd.hist = dd.hist[:size]
|
||||
|
||||
if len(dict) > len(dd.hist) {
|
||||
dict = dict[len(dict)-len(dd.hist):]
|
||||
}
|
||||
dd.wrPos = copy(dd.hist, dict)
|
||||
if dd.wrPos == len(dd.hist) {
|
||||
dd.wrPos = 0
|
||||
dd.full = true
|
||||
}
|
||||
dd.rdPos = dd.wrPos
|
||||
}
|
||||
|
||||
// histSize reports the total amount of historical data in the dictionary.
|
||||
func (dd *dictDecoder) histSize() int {
|
||||
if dd.full {
|
||||
return len(dd.hist)
|
||||
}
|
||||
return dd.wrPos
|
||||
}
|
||||
|
||||
// availRead reports the number of bytes that can be flushed by readFlush.
|
||||
func (dd *dictDecoder) availRead() int {
|
||||
return dd.wrPos - dd.rdPos
|
||||
}
|
||||
|
||||
// availWrite reports the available amount of output buffer space.
|
||||
func (dd *dictDecoder) availWrite() int {
|
||||
return len(dd.hist) - dd.wrPos
|
||||
}
|
||||
|
||||
// writeSlice returns a slice of the available buffer to write data to.
|
||||
//
|
||||
// This invariant will be kept: len(s) <= availWrite()
|
||||
func (dd *dictDecoder) writeSlice() []byte {
|
||||
return dd.hist[dd.wrPos:]
|
||||
}
|
||||
|
||||
// writeMark advances the writer pointer by cnt.
|
||||
//
|
||||
// This invariant must be kept: 0 <= cnt <= availWrite()
|
||||
func (dd *dictDecoder) writeMark(cnt int) {
|
||||
dd.wrPos += cnt
|
||||
}
|
||||
|
||||
// writeByte writes a single byte to the dictionary.
|
||||
//
|
||||
// This invariant must be kept: 0 < availWrite()
|
||||
func (dd *dictDecoder) writeByte(c byte) {
|
||||
dd.hist[dd.wrPos] = c
|
||||
dd.wrPos++
|
||||
}
|
||||
|
||||
// writeCopy copies a string at a given (dist, length) to the output.
|
||||
// This returns the number of bytes copied and may be less than the requested
|
||||
// length if the available space in the output buffer is too small.
|
||||
//
|
||||
// This invariant must be kept: 0 < dist <= histSize()
|
||||
func (dd *dictDecoder) writeCopy(dist, length int) int {
|
||||
dstBase := dd.wrPos
|
||||
dstPos := dstBase
|
||||
srcPos := dstPos - dist
|
||||
endPos := dstPos + length
|
||||
if endPos > len(dd.hist) {
|
||||
endPos = len(dd.hist)
|
||||
}
|
||||
|
||||
// Copy non-overlapping section after destination position.
|
||||
//
|
||||
// This section is non-overlapping in that the copy length for this section
|
||||
// is always less than or equal to the backwards distance. This can occur
|
||||
// if a distance refers to data that wraps-around in the buffer.
|
||||
// Thus, a backwards copy is performed here; that is, the exact bytes in
|
||||
// the source prior to the copy is placed in the destination.
|
||||
if srcPos < 0 {
|
||||
srcPos += len(dd.hist)
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
|
||||
srcPos = 0
|
||||
}
|
||||
|
||||
// Copy possibly overlapping section before destination position.
|
||||
//
|
||||
// This section can overlap if the copy length for this section is larger
|
||||
// than the backwards distance. This is allowed by LZ77 so that repeated
|
||||
// strings can be succinctly represented using (dist, length) pairs.
|
||||
// Thus, a forwards copy is performed here; that is, the bytes copied is
|
||||
// possibly dependent on the resulting bytes in the destination as the copy
|
||||
// progresses along. This is functionally equivalent to the following:
|
||||
//
|
||||
// for i := 0; i < endPos-dstPos; i++ {
|
||||
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
|
||||
// }
|
||||
// dstPos = endPos
|
||||
//
|
||||
for dstPos < endPos {
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
||||
}
|
||||
|
||||
dd.wrPos = dstPos
|
||||
return dstPos - dstBase
|
||||
}
|
||||
|
||||
// tryWriteCopy tries to copy a string at a given (distance, length) to the
|
||||
// output. This specialized version is optimized for short distances.
|
||||
//
|
||||
// This method is designed to be inlined for performance reasons.
|
||||
//
|
||||
// This invariant must be kept: 0 < dist <= histSize()
|
||||
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
|
||||
dstPos := dd.wrPos
|
||||
endPos := dstPos + length
|
||||
if dstPos < dist || endPos > len(dd.hist) {
|
||||
return 0
|
||||
}
|
||||
dstBase := dstPos
|
||||
srcPos := dstPos - dist
|
||||
|
||||
// Copy possibly overlapping section before destination position.
|
||||
loop:
|
||||
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
|
||||
if dstPos < endPos {
|
||||
goto loop // Avoid for-loop so that this function can be inlined
|
||||
}
|
||||
|
||||
dd.wrPos = dstPos
|
||||
return dstPos - dstBase
|
||||
}
|
||||
|
||||
// readFlush returns a slice of the historical buffer that is ready to be
|
||||
// emitted to the user. The data returned by readFlush must be fully consumed
|
||||
// before calling any other dictDecoder methods.
|
||||
func (dd *dictDecoder) readFlush() []byte {
|
||||
toRead := dd.hist[dd.rdPos:dd.wrPos]
|
||||
dd.rdPos = dd.wrPos
|
||||
if dd.wrPos == len(dd.hist) {
|
||||
dd.wrPos, dd.rdPos = 0, 0
|
||||
dd.full = true
|
||||
}
|
||||
return toRead
|
||||
}
|
||||
|
|
@ -0,0 +1,139 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDictDecoder(t *testing.T) {
|
||||
const (
|
||||
abc = "ABC\n"
|
||||
fox = "The quick brown fox jumped over the lazy dog!\n"
|
||||
poem = "The Road Not Taken\nRobert Frost\n" +
|
||||
"\n" +
|
||||
"Two roads diverged in a yellow wood,\n" +
|
||||
"And sorry I could not travel both\n" +
|
||||
"And be one traveler, long I stood\n" +
|
||||
"And looked down one as far as I could\n" +
|
||||
"To where it bent in the undergrowth;\n" +
|
||||
"\n" +
|
||||
"Then took the other, as just as fair,\n" +
|
||||
"And having perhaps the better claim,\n" +
|
||||
"Because it was grassy and wanted wear;\n" +
|
||||
"Though as for that the passing there\n" +
|
||||
"Had worn them really about the same,\n" +
|
||||
"\n" +
|
||||
"And both that morning equally lay\n" +
|
||||
"In leaves no step had trodden black.\n" +
|
||||
"Oh, I kept the first for another day!\n" +
|
||||
"Yet knowing how way leads on to way,\n" +
|
||||
"I doubted if I should ever come back.\n" +
|
||||
"\n" +
|
||||
"I shall be telling this with a sigh\n" +
|
||||
"Somewhere ages and ages hence:\n" +
|
||||
"Two roads diverged in a wood, and I-\n" +
|
||||
"I took the one less traveled by,\n" +
|
||||
"And that has made all the difference.\n"
|
||||
)
|
||||
|
||||
var poemRefs = []struct {
|
||||
dist int // Backward distance (0 if this is an insertion)
|
||||
length int // Length of copy or insertion
|
||||
}{
|
||||
{0, 38}, {33, 3}, {0, 48}, {79, 3}, {0, 11}, {34, 5}, {0, 6}, {23, 7},
|
||||
{0, 8}, {50, 3}, {0, 2}, {69, 3}, {34, 5}, {0, 4}, {97, 3}, {0, 4},
|
||||
{43, 5}, {0, 6}, {7, 4}, {88, 7}, {0, 12}, {80, 3}, {0, 2}, {141, 4},
|
||||
{0, 1}, {196, 3}, {0, 3}, {157, 3}, {0, 6}, {181, 3}, {0, 2}, {23, 3},
|
||||
{77, 3}, {28, 5}, {128, 3}, {110, 4}, {70, 3}, {0, 4}, {85, 6}, {0, 2},
|
||||
{182, 6}, {0, 4}, {133, 3}, {0, 7}, {47, 5}, {0, 20}, {112, 5}, {0, 1},
|
||||
{58, 3}, {0, 8}, {59, 3}, {0, 4}, {173, 3}, {0, 5}, {114, 3}, {0, 4},
|
||||
{92, 5}, {0, 2}, {71, 3}, {0, 2}, {76, 5}, {0, 1}, {46, 3}, {96, 4},
|
||||
{130, 4}, {0, 3}, {360, 3}, {0, 3}, {178, 5}, {0, 7}, {75, 3}, {0, 3},
|
||||
{45, 6}, {0, 6}, {299, 6}, {180, 3}, {70, 6}, {0, 1}, {48, 3}, {66, 4},
|
||||
{0, 3}, {47, 5}, {0, 9}, {325, 3}, {0, 1}, {359, 3}, {318, 3}, {0, 2},
|
||||
{199, 3}, {0, 1}, {344, 3}, {0, 3}, {248, 3}, {0, 10}, {310, 3}, {0, 3},
|
||||
{93, 6}, {0, 3}, {252, 3}, {157, 4}, {0, 2}, {273, 5}, {0, 14}, {99, 4},
|
||||
{0, 1}, {464, 4}, {0, 2}, {92, 4}, {495, 3}, {0, 1}, {322, 4}, {16, 4},
|
||||
{0, 3}, {402, 3}, {0, 2}, {237, 4}, {0, 2}, {432, 4}, {0, 1}, {483, 5},
|
||||
{0, 2}, {294, 4}, {0, 2}, {306, 3}, {113, 5}, {0, 1}, {26, 4}, {164, 3},
|
||||
{488, 4}, {0, 1}, {542, 3}, {248, 6}, {0, 5}, {205, 3}, {0, 8}, {48, 3},
|
||||
{449, 6}, {0, 2}, {192, 3}, {328, 4}, {9, 5}, {433, 3}, {0, 3}, {622, 25},
|
||||
{615, 5}, {46, 5}, {0, 2}, {104, 3}, {475, 10}, {549, 3}, {0, 4}, {597, 8},
|
||||
{314, 3}, {0, 1}, {473, 6}, {317, 5}, {0, 1}, {400, 3}, {0, 3}, {109, 3},
|
||||
{151, 3}, {48, 4}, {0, 4}, {125, 3}, {108, 3}, {0, 2},
|
||||
}
|
||||
|
||||
var got, want bytes.Buffer
|
||||
var dd dictDecoder
|
||||
dd.init(1<<11, nil)
|
||||
|
||||
var writeCopy = func(dist, length int) {
|
||||
for length > 0 {
|
||||
cnt := dd.tryWriteCopy(dist, length)
|
||||
if cnt == 0 {
|
||||
cnt = dd.writeCopy(dist, length)
|
||||
}
|
||||
|
||||
length -= cnt
|
||||
if dd.availWrite() == 0 {
|
||||
got.Write(dd.readFlush())
|
||||
}
|
||||
}
|
||||
}
|
||||
var writeString = func(str string) {
|
||||
for len(str) > 0 {
|
||||
cnt := copy(dd.writeSlice(), str)
|
||||
str = str[cnt:]
|
||||
dd.writeMark(cnt)
|
||||
if dd.availWrite() == 0 {
|
||||
got.Write(dd.readFlush())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
writeString(".")
|
||||
want.WriteByte('.')
|
||||
|
||||
str := poem
|
||||
for _, ref := range poemRefs {
|
||||
if ref.dist == 0 {
|
||||
writeString(str[:ref.length])
|
||||
} else {
|
||||
writeCopy(ref.dist, ref.length)
|
||||
}
|
||||
str = str[ref.length:]
|
||||
}
|
||||
want.WriteString(poem)
|
||||
|
||||
writeCopy(dd.histSize(), 33)
|
||||
want.Write(want.Bytes()[:33])
|
||||
|
||||
writeString(abc)
|
||||
writeCopy(len(abc), 59*len(abc))
|
||||
want.WriteString(strings.Repeat(abc, 60))
|
||||
|
||||
writeString(fox)
|
||||
writeCopy(len(fox), 9*len(fox))
|
||||
want.WriteString(strings.Repeat(fox, 10))
|
||||
|
||||
writeString(".")
|
||||
writeCopy(1, 9)
|
||||
want.WriteString(strings.Repeat(".", 10))
|
||||
|
||||
writeString(strings.ToUpper(poem))
|
||||
writeCopy(len(poem), 7*len(poem))
|
||||
want.WriteString(strings.Repeat(strings.ToUpper(poem), 8))
|
||||
|
||||
writeCopy(dd.histSize(), 10)
|
||||
want.Write(want.Bytes()[want.Len()-dd.histSize():][:10])
|
||||
|
||||
got.Write(dd.readFlush())
|
||||
if got.String() != want.String() {
|
||||
t.Errorf("final string mismatch:\ngot %q\nwant %q", got.String(), want.String())
|
||||
}
|
||||
}
|
||||
|
|
@ -272,3 +272,81 @@ func TestTruncatedStreams(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that flate.Reader.Read returns (n, io.EOF) instead
|
||||
// of (n, nil) + (0, io.EOF) when possible.
|
||||
//
|
||||
// This helps net/http.Transport reuse HTTP/1 connections more
|
||||
// aggressively.
|
||||
//
|
||||
// See https://github.com/google/go-github/pull/317 for background.
|
||||
func TestReaderEarlyEOF(t *testing.T) {
|
||||
testSizes := []int{
|
||||
1, 2, 3, 4, 5, 6, 7, 8,
|
||||
100, 1000, 10000, 100000,
|
||||
128, 1024, 16384, 131072,
|
||||
|
||||
// Testing multiples of windowSize triggers the case
|
||||
// where Read will fail to return an early io.EOF.
|
||||
windowSize * 1, windowSize * 2, windowSize * 3,
|
||||
}
|
||||
|
||||
var maxSize int
|
||||
for _, n := range testSizes {
|
||||
if maxSize < n {
|
||||
maxSize = n
|
||||
}
|
||||
}
|
||||
|
||||
readBuf := make([]byte, 40)
|
||||
data := make([]byte, maxSize)
|
||||
for i := range data {
|
||||
data[i] = byte(i)
|
||||
}
|
||||
|
||||
for _, sz := range testSizes {
|
||||
if testing.Short() && sz > windowSize {
|
||||
continue
|
||||
}
|
||||
for _, flush := range []bool{true, false} {
|
||||
earlyEOF := true // Do we expect early io.EOF?
|
||||
|
||||
var buf bytes.Buffer
|
||||
w, _ := NewWriter(&buf, 5)
|
||||
w.Write(data[:sz])
|
||||
if flush {
|
||||
// If a Flush occurs after all the actual data, the flushing
|
||||
// semantics dictate that we will observe a (0, io.EOF) since
|
||||
// Read must return data before it knows that the stream ended.
|
||||
w.Flush()
|
||||
earlyEOF = false
|
||||
}
|
||||
w.Close()
|
||||
|
||||
r := NewReader(&buf)
|
||||
for {
|
||||
n, err := r.Read(readBuf)
|
||||
if err == io.EOF {
|
||||
// If the availWrite == windowSize, then that means that the
|
||||
// previous Read returned because the write buffer was full
|
||||
// and it just so happened that the stream had no more data.
|
||||
// This situation is rare, but unavoidable.
|
||||
if r.(*decompressor).dict.availWrite() == windowSize {
|
||||
earlyEOF = false
|
||||
}
|
||||
|
||||
if n == 0 && earlyEOF {
|
||||
t.Errorf("On size:%d flush:%v, Read() = (0, io.EOF), want (n, io.EOF)", sz, flush)
|
||||
}
|
||||
if n != 0 && !earlyEOF {
|
||||
t.Errorf("On size:%d flush:%v, Read() = (%d, io.EOF), want (0, io.EOF)", sz, flush, n)
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ package flate
|
|||
|
||||
import (
|
||||
"io"
|
||||
"math"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -22,6 +21,17 @@ const (
|
|||
// The number of codegen codes.
|
||||
codegenCodeCount = 19
|
||||
badCode = 255
|
||||
|
||||
// bufferFlushSize indicates the buffer size
|
||||
// after which bytes are flushed to the writer.
|
||||
// Should preferably be a multiple of 6, since
|
||||
// we accumulate 6 bytes between writes to the buffer.
|
||||
bufferFlushSize = 240
|
||||
|
||||
// bufferSize is the actual output byte buffer size.
|
||||
// It must have additional headroom for a flush
|
||||
// which can contain up to 8 bytes.
|
||||
bufferSize = bufferFlushSize + 8
|
||||
)
|
||||
|
||||
// The number of extra bits needed by length code X - LENGTH_CODES_START.
|
||||
|
|
@ -70,14 +80,14 @@ type huffmanBitWriter struct {
|
|||
w io.Writer
|
||||
// Data waiting to be written is bytes[0:nbytes]
|
||||
// and then the low nbits of bits.
|
||||
bits uint32
|
||||
nbits uint32
|
||||
bytes [64]byte
|
||||
bits uint64
|
||||
nbits uint
|
||||
bytes [bufferSize]byte
|
||||
codegenFreq [codegenCodeCount]int32
|
||||
nbytes int
|
||||
literalFreq []int32
|
||||
offsetFreq []int32
|
||||
codegen []uint8
|
||||
codegenFreq []int32
|
||||
literalEncoding *huffmanEncoder
|
||||
offsetEncoding *huffmanEncoder
|
||||
codegenEncoding *huffmanEncoder
|
||||
|
|
@ -90,54 +100,16 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
|
|||
literalFreq: make([]int32, maxNumLit),
|
||||
offsetFreq: make([]int32, offsetCodeCount),
|
||||
codegen: make([]uint8, maxNumLit+offsetCodeCount+1),
|
||||
codegenFreq: make([]int32, codegenCodeCount),
|
||||
literalEncoding: newHuffmanEncoder(maxNumLit),
|
||||
offsetEncoding: newHuffmanEncoder(offsetCodeCount),
|
||||
codegenEncoding: newHuffmanEncoder(codegenCodeCount),
|
||||
offsetEncoding: newHuffmanEncoder(offsetCodeCount),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) reset(writer io.Writer) {
|
||||
w.w = writer
|
||||
w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
|
||||
w.bytes = [64]byte{}
|
||||
for i := range w.codegen {
|
||||
w.codegen[i] = 0
|
||||
}
|
||||
for _, s := range [...][]int32{w.literalFreq, w.offsetFreq, w.codegenFreq} {
|
||||
for i := range s {
|
||||
s[i] = 0
|
||||
}
|
||||
}
|
||||
for _, enc := range [...]*huffmanEncoder{
|
||||
w.literalEncoding,
|
||||
w.offsetEncoding,
|
||||
w.codegenEncoding} {
|
||||
for i := range enc.code {
|
||||
enc.code[i] = 0
|
||||
}
|
||||
for i := range enc.codeBits {
|
||||
enc.codeBits[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) flushBits() {
|
||||
if w.err != nil {
|
||||
w.nbits = 0
|
||||
return
|
||||
}
|
||||
bits := w.bits
|
||||
w.bits >>= 16
|
||||
w.nbits -= 16
|
||||
n := w.nbytes
|
||||
w.bytes[n] = byte(bits)
|
||||
w.bytes[n+1] = byte(bits >> 8)
|
||||
if n += 2; n >= len(w.bytes) {
|
||||
_, w.err = w.w.Write(w.bytes[0:])
|
||||
n = 0
|
||||
}
|
||||
w.nbytes = n
|
||||
w.bytes = [bufferSize]byte{}
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) flush() {
|
||||
|
|
@ -146,26 +118,42 @@ func (w *huffmanBitWriter) flush() {
|
|||
return
|
||||
}
|
||||
n := w.nbytes
|
||||
if w.nbits > 8 {
|
||||
for w.nbits != 0 {
|
||||
w.bytes[n] = byte(w.bits)
|
||||
w.bits >>= 8
|
||||
if w.nbits > 8 { // Avoid underflow
|
||||
w.nbits -= 8
|
||||
n++
|
||||
}
|
||||
if w.nbits > 0 {
|
||||
w.bytes[n] = byte(w.bits)
|
||||
} else {
|
||||
w.nbits = 0
|
||||
}
|
||||
n++
|
||||
}
|
||||
w.bits = 0
|
||||
_, w.err = w.w.Write(w.bytes[0:n])
|
||||
_, w.err = w.w.Write(w.bytes[:n])
|
||||
w.nbytes = 0
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeBits(b, nb int32) {
|
||||
w.bits |= uint32(b) << w.nbits
|
||||
if w.nbits += uint32(nb); w.nbits >= 16 {
|
||||
w.flushBits()
|
||||
func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
|
||||
w.bits |= uint64(b) << w.nbits
|
||||
w.nbits += nb
|
||||
if w.nbits >= 48 {
|
||||
bits := w.bits
|
||||
w.bits >>= 48
|
||||
w.nbits -= 48
|
||||
n := w.nbytes
|
||||
bytes := w.bytes[n : n+6]
|
||||
bytes[0] = byte(bits)
|
||||
bytes[1] = byte(bits >> 8)
|
||||
bytes[2] = byte(bits >> 16)
|
||||
bytes[3] = byte(bits >> 24)
|
||||
bytes[4] = byte(bits >> 32)
|
||||
bytes[5] = byte(bits >> 40)
|
||||
n += 6
|
||||
if n >= bufferFlushSize {
|
||||
_, w.err = w.w.Write(w.bytes[:n])
|
||||
n = 0
|
||||
}
|
||||
w.nbytes = n
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -174,17 +162,18 @@ func (w *huffmanBitWriter) writeBytes(bytes []byte) {
|
|||
return
|
||||
}
|
||||
n := w.nbytes
|
||||
if w.nbits == 8 {
|
||||
w.bytes[n] = byte(w.bits)
|
||||
w.nbits = 0
|
||||
n++
|
||||
}
|
||||
if w.nbits != 0 {
|
||||
if w.nbits&7 != 0 {
|
||||
w.err = InternalError("writeBytes with unfinished bits")
|
||||
return
|
||||
}
|
||||
for w.nbits != 0 {
|
||||
w.bytes[n] = byte(w.bits)
|
||||
w.bits >>= 8
|
||||
w.nbits -= 8
|
||||
n++
|
||||
}
|
||||
if n != 0 {
|
||||
_, w.err = w.w.Write(w.bytes[0:n])
|
||||
_, w.err = w.w.Write(w.bytes[:n])
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -204,7 +193,8 @@ func (w *huffmanBitWriter) writeBytes(bytes []byte) {
|
|||
//
|
||||
// numLiterals The number of literals in literalEncoding
|
||||
// numOffsets The number of offsets in offsetEncoding
|
||||
func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) {
|
||||
// litenc, offenc The literal and offset encoder to use
|
||||
func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
|
||||
for i := range w.codegenFreq {
|
||||
w.codegenFreq[i] = 0
|
||||
}
|
||||
|
|
@ -214,8 +204,15 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) {
|
|||
// so far.
|
||||
codegen := w.codegen // cache
|
||||
// Copy the concatenated code sizes to codegen. Put a marker at the end.
|
||||
copy(codegen[0:numLiterals], w.literalEncoding.codeBits)
|
||||
copy(codegen[numLiterals:numLiterals+numOffsets], w.offsetEncoding.codeBits)
|
||||
cgnl := codegen[:numLiterals]
|
||||
for i := range cgnl {
|
||||
cgnl[i] = uint8(litEnc.codes[i].len)
|
||||
}
|
||||
|
||||
cgnl = codegen[numLiterals : numLiterals+numOffsets]
|
||||
for i := range cgnl {
|
||||
cgnl[i] = uint8(offEnc.codes[i].len)
|
||||
}
|
||||
codegen[numLiterals+numOffsets] = badCode
|
||||
|
||||
size := codegen[0]
|
||||
|
|
@ -284,11 +281,71 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) {
|
|||
codegen[outIndex] = badCode
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeCode(code *huffmanEncoder, literal uint32) {
|
||||
// dynamicSize returns the size of dynamically encoded data in bits.
|
||||
func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
|
||||
numCodegens = len(w.codegenFreq)
|
||||
for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
|
||||
numCodegens--
|
||||
}
|
||||
header := 3 + 5 + 5 + 4 + (3 * numCodegens) +
|
||||
w.codegenEncoding.bitLength(w.codegenFreq[:]) +
|
||||
int(w.codegenFreq[16])*2 +
|
||||
int(w.codegenFreq[17])*3 +
|
||||
int(w.codegenFreq[18])*7
|
||||
size = header +
|
||||
litEnc.bitLength(w.literalFreq) +
|
||||
offEnc.bitLength(w.offsetFreq) +
|
||||
extraBits
|
||||
|
||||
return size, numCodegens
|
||||
}
|
||||
|
||||
// fixedSize returns the size of dynamically encoded data in bits.
|
||||
func (w *huffmanBitWriter) fixedSize(extraBits int) int {
|
||||
return 3 +
|
||||
fixedLiteralEncoding.bitLength(w.literalFreq) +
|
||||
fixedOffsetEncoding.bitLength(w.offsetFreq) +
|
||||
extraBits
|
||||
}
|
||||
|
||||
// storedSize calculates the stored size, including header.
|
||||
// The function returns the size in bits and whether the block
|
||||
// fits inside a single block.
|
||||
func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
|
||||
if in == nil {
|
||||
return 0, false
|
||||
}
|
||||
if len(in) <= maxStoreBlockSize {
|
||||
return (len(in) + 5) * 8, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeCode(c hcode) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
w.writeBits(int32(code.code[literal]), int32(code.codeBits[literal]))
|
||||
w.bits |= uint64(c.code) << w.nbits
|
||||
w.nbits += uint(c.len)
|
||||
if w.nbits >= 48 {
|
||||
bits := w.bits
|
||||
w.bits >>= 48
|
||||
w.nbits -= 48
|
||||
n := w.nbytes
|
||||
bytes := w.bytes[n : n+6]
|
||||
bytes[0] = byte(bits)
|
||||
bytes[1] = byte(bits >> 8)
|
||||
bytes[2] = byte(bits >> 16)
|
||||
bytes[3] = byte(bits >> 24)
|
||||
bytes[4] = byte(bits >> 32)
|
||||
bytes[5] = byte(bits >> 40)
|
||||
n += 6
|
||||
if n >= bufferFlushSize {
|
||||
_, w.err = w.w.Write(w.bytes[:n])
|
||||
n = 0
|
||||
}
|
||||
w.nbytes = n
|
||||
}
|
||||
}
|
||||
|
||||
// Write the header of a dynamic Huffman block to the output stream.
|
||||
|
|
@ -310,7 +367,7 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n
|
|||
w.writeBits(int32(numCodegens-4), 4)
|
||||
|
||||
for i := 0; i < numCodegens; i++ {
|
||||
value := w.codegenEncoding.codeBits[codegenOrder[i]]
|
||||
value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
|
||||
w.writeBits(int32(value), 3)
|
||||
}
|
||||
|
||||
|
|
@ -321,8 +378,7 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n
|
|||
if codeWord == badCode {
|
||||
break
|
||||
}
|
||||
// The low byte contains the actual code to generate.
|
||||
w.writeCode(w.codegenEncoding, uint32(codeWord))
|
||||
w.writeCode(w.codegenEncoding.codes[uint32(codeWord)])
|
||||
|
||||
switch codeWord {
|
||||
case 16:
|
||||
|
|
@ -367,104 +423,50 @@ func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
|
|||
w.writeBits(value, 3)
|
||||
}
|
||||
|
||||
// writeBlock will write a block of tokens with the smallest encoding.
|
||||
// The original input can be supplied, and if the huffman encoded data
|
||||
// is larger than the original bytes, the data will be written as a
|
||||
// stored block.
|
||||
// If the input is nil, the tokens will always be Huffman encoded.
|
||||
func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
for i := range w.literalFreq {
|
||||
w.literalFreq[i] = 0
|
||||
}
|
||||
for i := range w.offsetFreq {
|
||||
w.offsetFreq[i] = 0
|
||||
}
|
||||
|
||||
n := len(tokens)
|
||||
tokens = tokens[0 : n+1]
|
||||
tokens[n] = endBlockMarker
|
||||
tokens = append(tokens, endBlockMarker)
|
||||
numLiterals, numOffsets := w.indexTokens(tokens)
|
||||
|
||||
for _, t := range tokens {
|
||||
switch t.typ() {
|
||||
case literalType:
|
||||
w.literalFreq[t.literal()]++
|
||||
case matchType:
|
||||
length := t.length()
|
||||
offset := t.offset()
|
||||
w.literalFreq[lengthCodesStart+lengthCode(length)]++
|
||||
w.offsetFreq[offsetCode(offset)]++
|
||||
}
|
||||
}
|
||||
|
||||
// get the number of literals
|
||||
numLiterals := len(w.literalFreq)
|
||||
for w.literalFreq[numLiterals-1] == 0 {
|
||||
numLiterals--
|
||||
}
|
||||
// get the number of offsets
|
||||
numOffsets := len(w.offsetFreq)
|
||||
for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
|
||||
numOffsets--
|
||||
}
|
||||
if numOffsets == 0 {
|
||||
// We haven't found a single match. If we want to go with the dynamic encoding,
|
||||
// we should count at least one offset to be sure that the offset huffman tree could be encoded.
|
||||
w.offsetFreq[0] = 1
|
||||
numOffsets = 1
|
||||
}
|
||||
|
||||
w.literalEncoding.generate(w.literalFreq, 15)
|
||||
w.offsetEncoding.generate(w.offsetFreq, 15)
|
||||
|
||||
storedBytes := 0
|
||||
if input != nil {
|
||||
storedBytes = len(input)
|
||||
}
|
||||
var extraBits int64
|
||||
var storedSize int64 = math.MaxInt64
|
||||
if storedBytes <= maxStoreBlockSize && input != nil {
|
||||
storedSize = int64((storedBytes + 5) * 8)
|
||||
var extraBits int
|
||||
storedSize, storable := w.storedSize(input)
|
||||
if storable {
|
||||
// We only bother calculating the costs of the extra bits required by
|
||||
// the length of offset fields (which will be the same for both fixed
|
||||
// and dynamic encoding), if we need to compare those two encodings
|
||||
// against stored encoding.
|
||||
for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
|
||||
// First eight length codes have extra size = 0.
|
||||
extraBits += int64(w.literalFreq[lengthCode]) * int64(lengthExtraBits[lengthCode-lengthCodesStart])
|
||||
extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart])
|
||||
}
|
||||
for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
|
||||
// First four offset codes have extra size = 0.
|
||||
extraBits += int64(w.offsetFreq[offsetCode]) * int64(offsetExtraBits[offsetCode])
|
||||
extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode])
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out smallest code.
|
||||
// Fixed Huffman baseline.
|
||||
var size = int64(3) +
|
||||
fixedLiteralEncoding.bitLength(w.literalFreq) +
|
||||
fixedOffsetEncoding.bitLength(w.offsetFreq) +
|
||||
extraBits
|
||||
var literalEncoding = fixedLiteralEncoding
|
||||
var offsetEncoding = fixedOffsetEncoding
|
||||
var size = w.fixedSize(extraBits)
|
||||
|
||||
// Dynamic Huffman?
|
||||
var numCodegens int
|
||||
|
||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
||||
// the literalEncoding and the offsetEncoding.
|
||||
w.generateCodegen(numLiterals, numOffsets)
|
||||
w.codegenEncoding.generate(w.codegenFreq, 7)
|
||||
numCodegens = len(w.codegenFreq)
|
||||
for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
|
||||
numCodegens--
|
||||
}
|
||||
dynamicHeader := int64(3+5+5+4+(3*numCodegens)) +
|
||||
w.codegenEncoding.bitLength(w.codegenFreq) +
|
||||
int64(extraBits) +
|
||||
int64(w.codegenFreq[16]*2) +
|
||||
int64(w.codegenFreq[17]*3) +
|
||||
int64(w.codegenFreq[18]*7)
|
||||
dynamicSize := dynamicHeader +
|
||||
w.literalEncoding.bitLength(w.literalFreq) +
|
||||
w.offsetEncoding.bitLength(w.offsetFreq)
|
||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
|
||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
||||
dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
|
||||
|
||||
if dynamicSize < size {
|
||||
size = dynamicSize
|
||||
|
|
@ -473,9 +475,9 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
|
|||
}
|
||||
|
||||
// Stored bytes?
|
||||
if storedSize < size {
|
||||
w.writeStoredHeader(storedBytes, eof)
|
||||
w.writeBytes(input[0:storedBytes])
|
||||
if storable && storedSize < size {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -485,17 +487,101 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
|
|||
} else {
|
||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
||||
}
|
||||
|
||||
// Write the tokens.
|
||||
w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes)
|
||||
}
|
||||
|
||||
// writeBlockDynamic encodes a block using a dynamic Huffman table.
|
||||
// This should be used if the symbols used have a disproportionate
|
||||
// histogram distribution.
|
||||
// If input is supplied and the compression savings are below 1/16th of the
|
||||
// input size the block is stored.
|
||||
func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tokens = append(tokens, endBlockMarker)
|
||||
numLiterals, numOffsets := w.indexTokens(tokens)
|
||||
|
||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
||||
// the literalEncoding and the offsetEncoding.
|
||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
|
||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
||||
size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
|
||||
|
||||
// Store bytes, if we don't get a reasonable improvement.
|
||||
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
|
||||
// Write Huffman table.
|
||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
||||
|
||||
// Write the tokens.
|
||||
w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes)
|
||||
}
|
||||
|
||||
// indexTokens indexes a slice of tokens, and updates
|
||||
// literalFreq and offsetFreq, and generates literalEncoding
|
||||
// and offsetEncoding.
|
||||
// The number of literal and offset tokens is returned.
|
||||
func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) {
|
||||
for i := range w.literalFreq {
|
||||
w.literalFreq[i] = 0
|
||||
}
|
||||
for i := range w.offsetFreq {
|
||||
w.offsetFreq[i] = 0
|
||||
}
|
||||
|
||||
for _, t := range tokens {
|
||||
switch t.typ() {
|
||||
case literalType:
|
||||
w.writeCode(literalEncoding, t.literal())
|
||||
break
|
||||
case matchType:
|
||||
if t < matchType {
|
||||
w.literalFreq[t.literal()]++
|
||||
continue
|
||||
}
|
||||
length := t.length()
|
||||
offset := t.offset()
|
||||
w.literalFreq[lengthCodesStart+lengthCode(length)]++
|
||||
w.offsetFreq[offsetCode(offset)]++
|
||||
}
|
||||
|
||||
// get the number of literals
|
||||
numLiterals = len(w.literalFreq)
|
||||
for w.literalFreq[numLiterals-1] == 0 {
|
||||
numLiterals--
|
||||
}
|
||||
// get the number of offsets
|
||||
numOffsets = len(w.offsetFreq)
|
||||
for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
|
||||
numOffsets--
|
||||
}
|
||||
if numOffsets == 0 {
|
||||
// We haven't found a single match. If we want to go with the dynamic encoding,
|
||||
// we should count at least one offset to be sure that the offset huffman tree could be encoded.
|
||||
w.offsetFreq[0] = 1
|
||||
numOffsets = 1
|
||||
}
|
||||
w.literalEncoding.generate(w.literalFreq, 15)
|
||||
w.offsetEncoding.generate(w.offsetFreq, 15)
|
||||
return
|
||||
}
|
||||
|
||||
// writeTokens writes a slice of tokens to the output.
|
||||
// codes for literal and offset encoding must be supplied.
|
||||
func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
|
||||
for _, t := range tokens {
|
||||
if t < matchType {
|
||||
w.writeCode(leCodes[t.literal()])
|
||||
continue
|
||||
}
|
||||
// Write the length
|
||||
length := t.length()
|
||||
lengthCode := lengthCode(length)
|
||||
w.writeCode(literalEncoding, lengthCode+lengthCodesStart)
|
||||
extraLengthBits := int32(lengthExtraBits[lengthCode])
|
||||
w.writeCode(leCodes[lengthCode+lengthCodesStart])
|
||||
extraLengthBits := uint(lengthExtraBits[lengthCode])
|
||||
if extraLengthBits > 0 {
|
||||
extraLength := int32(length - lengthBase[lengthCode])
|
||||
w.writeBits(extraLength, extraLengthBits)
|
||||
|
|
@ -503,15 +589,109 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
|
|||
// Write the offset
|
||||
offset := t.offset()
|
||||
offsetCode := offsetCode(offset)
|
||||
w.writeCode(offsetEncoding, offsetCode)
|
||||
extraOffsetBits := int32(offsetExtraBits[offsetCode])
|
||||
w.writeCode(oeCodes[offsetCode])
|
||||
extraOffsetBits := uint(offsetExtraBits[offsetCode])
|
||||
if extraOffsetBits > 0 {
|
||||
extraOffset := int32(offset - offsetBase[offsetCode])
|
||||
w.writeBits(extraOffset, extraOffsetBits)
|
||||
}
|
||||
break
|
||||
default:
|
||||
panic("unknown token type: " + string(t))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// huffOffset is a static offset encoder used for huffman only encoding.
|
||||
// It can be reused since we will not be encoding offset values.
|
||||
var huffOffset *huffmanEncoder
|
||||
|
||||
func init() {
|
||||
w := newHuffmanBitWriter(nil)
|
||||
w.offsetFreq[0] = 1
|
||||
huffOffset = newHuffmanEncoder(offsetCodeCount)
|
||||
huffOffset.generate(w.offsetFreq, 15)
|
||||
}
|
||||
|
||||
// writeBlockHuff encodes a block of bytes as either
|
||||
// Huffman encoded literals or uncompressed bytes if the
|
||||
// results only gains very little from compression.
|
||||
func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Clear histogram
|
||||
for i := range w.literalFreq {
|
||||
w.literalFreq[i] = 0
|
||||
}
|
||||
|
||||
// Add everything as literals
|
||||
histogram(input, w.literalFreq)
|
||||
|
||||
w.literalFreq[endBlockMarker] = 1
|
||||
|
||||
const numLiterals = endBlockMarker + 1
|
||||
const numOffsets = 1
|
||||
|
||||
w.literalEncoding.generate(w.literalFreq, 15)
|
||||
|
||||
// Figure out smallest code.
|
||||
// Always use dynamic Huffman or Store
|
||||
var numCodegens int
|
||||
|
||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
||||
// the literalEncoding and the offsetEncoding.
|
||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
|
||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
||||
size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
|
||||
|
||||
// Store bytes, if we don't get a reasonable improvement.
|
||||
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
|
||||
// Huffman.
|
||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
||||
encoding := w.literalEncoding.codes[:257]
|
||||
n := w.nbytes
|
||||
for _, t := range input {
|
||||
// Bitwriting inlined, ~30% speedup
|
||||
c := encoding[t]
|
||||
w.bits |= uint64(c.code) << w.nbits
|
||||
w.nbits += uint(c.len)
|
||||
if w.nbits < 48 {
|
||||
continue
|
||||
}
|
||||
// Store 6 bytes
|
||||
bits := w.bits
|
||||
w.bits >>= 48
|
||||
w.nbits -= 48
|
||||
bytes := w.bytes[n : n+6]
|
||||
bytes[0] = byte(bits)
|
||||
bytes[1] = byte(bits >> 8)
|
||||
bytes[2] = byte(bits >> 16)
|
||||
bytes[3] = byte(bits >> 24)
|
||||
bytes[4] = byte(bits >> 32)
|
||||
bytes[5] = byte(bits >> 40)
|
||||
n += 6
|
||||
if n < bufferFlushSize {
|
||||
continue
|
||||
}
|
||||
_, w.err = w.w.Write(w.bytes[:n])
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
n = 0
|
||||
}
|
||||
w.nbytes = n
|
||||
w.writeCode(encoding[endBlockMarker])
|
||||
}
|
||||
|
||||
// histogram accumulates a histogram of b in h.
|
||||
//
|
||||
// len(h) must be >= 256, and h's elements must be all zeroes.
|
||||
func histogram(b []byte, h []int32) {
|
||||
h = h[:256]
|
||||
for _, t := range b {
|
||||
h[t]++
|
||||
}
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -9,9 +9,17 @@ import (
|
|||
"sort"
|
||||
)
|
||||
|
||||
// hcode is a huffman code with a bit code and bit length.
|
||||
type hcode struct {
|
||||
code, len uint16
|
||||
}
|
||||
|
||||
type huffmanEncoder struct {
|
||||
codeBits []uint8
|
||||
code []uint16
|
||||
codes []hcode
|
||||
freqcache []literalNode
|
||||
bitCount [17]int32
|
||||
lns byLiteral // stored to avoid repeated allocation in generate
|
||||
lfs byFreq // stored to avoid repeated allocation in generate
|
||||
}
|
||||
|
||||
type literalNode struct {
|
||||
|
|
@ -39,21 +47,26 @@ type levelInfo struct {
|
|||
needed int32
|
||||
}
|
||||
|
||||
// set sets the code and length of an hcode.
|
||||
func (h *hcode) set(code uint16, length uint16) {
|
||||
h.len = length
|
||||
h.code = code
|
||||
}
|
||||
|
||||
func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
|
||||
|
||||
func newHuffmanEncoder(size int) *huffmanEncoder {
|
||||
return &huffmanEncoder{make([]uint8, size), make([]uint16, size)}
|
||||
return &huffmanEncoder{codes: make([]hcode, size)}
|
||||
}
|
||||
|
||||
// Generates a HuffmanCode corresponding to the fixed literal table
|
||||
func generateFixedLiteralEncoding() *huffmanEncoder {
|
||||
h := newHuffmanEncoder(maxNumLit)
|
||||
codeBits := h.codeBits
|
||||
code := h.code
|
||||
codes := h.codes
|
||||
var ch uint16
|
||||
for ch = 0; ch < maxNumLit; ch++ {
|
||||
var bits uint16
|
||||
var size uint8
|
||||
var size uint16
|
||||
switch {
|
||||
case ch < 144:
|
||||
// size 8, 000110000 .. 10111111
|
||||
|
|
@ -75,19 +88,16 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
|
|||
bits = ch + 192 - 280
|
||||
size = 8
|
||||
}
|
||||
codeBits[ch] = size
|
||||
code[ch] = reverseBits(bits, size)
|
||||
codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func generateFixedOffsetEncoding() *huffmanEncoder {
|
||||
h := newHuffmanEncoder(30)
|
||||
codeBits := h.codeBits
|
||||
code := h.code
|
||||
for ch := uint16(0); ch < 30; ch++ {
|
||||
codeBits[ch] = 5
|
||||
code[ch] = reverseBits(ch, 5)
|
||||
codes := h.codes
|
||||
for ch := range codes {
|
||||
codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5}
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
|
@ -95,11 +105,11 @@ func generateFixedOffsetEncoding() *huffmanEncoder {
|
|||
var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding()
|
||||
var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding()
|
||||
|
||||
func (h *huffmanEncoder) bitLength(freq []int32) int64 {
|
||||
var total int64
|
||||
func (h *huffmanEncoder) bitLength(freq []int32) int {
|
||||
var total int
|
||||
for i, f := range freq {
|
||||
if f != 0 {
|
||||
total += int64(f) * int64(h.codeBits[i])
|
||||
total += int(f) * int(h.codes[i].len)
|
||||
}
|
||||
}
|
||||
return total
|
||||
|
|
@ -220,7 +230,7 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
|||
panic("leafCounts[maxBits][maxBits] != n")
|
||||
}
|
||||
|
||||
bitCount := make([]int32, maxBits+1)
|
||||
bitCount := h.bitCount[:maxBits+1]
|
||||
bits := 1
|
||||
counts := &leafCounts[maxBits]
|
||||
for level := maxBits; level > 0; level-- {
|
||||
|
|
@ -246,10 +256,10 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
|
|||
// code, code + 1, .... The code values are
|
||||
// assigned in literal order (not frequency order).
|
||||
chunk := list[len(list)-int(bits):]
|
||||
sortByLiteral(chunk)
|
||||
|
||||
h.lns.sort(chunk)
|
||||
for _, node := range chunk {
|
||||
h.codeBits[node.literal] = uint8(n)
|
||||
h.code[node.literal] = reverseBits(code, uint8(n))
|
||||
h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
|
||||
code++
|
||||
}
|
||||
list = list[0 : len(list)-int(bits)]
|
||||
|
|
@ -261,7 +271,13 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
|
|||
// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
|
||||
// maxBits The maximum number of bits to use for any literal.
|
||||
func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
|
||||
list := make([]literalNode, len(freq)+1)
|
||||
if h.freqcache == nil {
|
||||
// Allocate a reusable buffer with the longest possible frequency table.
|
||||
// Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit.
|
||||
// The largest of these is maxNumLit, so we allocate for that case.
|
||||
h.freqcache = make([]literalNode, maxNumLit+1)
|
||||
}
|
||||
list := h.freqcache[:len(freq)+1]
|
||||
// Number of non-zero literals
|
||||
count := 0
|
||||
// Set list to be the set of all non-zero literals and their frequencies
|
||||
|
|
@ -270,23 +286,23 @@ func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
|
|||
list[count] = literalNode{uint16(i), f}
|
||||
count++
|
||||
} else {
|
||||
h.codeBits[i] = 0
|
||||
list[count] = literalNode{}
|
||||
h.codes[i].len = 0
|
||||
}
|
||||
}
|
||||
// If freq[] is shorter than codeBits[], fill rest of codeBits[] with zeros
|
||||
h.codeBits = h.codeBits[0:len(freq)]
|
||||
list = list[0:count]
|
||||
list[len(freq)] = literalNode{}
|
||||
|
||||
list = list[:count]
|
||||
if count <= 2 {
|
||||
// Handle the small cases here, because they are awkward for the general case code. With
|
||||
// two or fewer literals, everything has bit length 1.
|
||||
for i, node := range list {
|
||||
// "list" is in order of increasing literal value.
|
||||
h.codeBits[node.literal] = 1
|
||||
h.code[node.literal] = uint16(i)
|
||||
h.codes[node.literal].set(uint16(i), 1)
|
||||
}
|
||||
return
|
||||
}
|
||||
sortByFreq(list)
|
||||
h.lfs.sort(list)
|
||||
|
||||
// Get the number of literals for each bit count
|
||||
bitCount := h.bitCounts(list, maxBits)
|
||||
|
|
@ -294,30 +310,35 @@ func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
|
|||
h.assignEncodingAndSize(bitCount, list)
|
||||
}
|
||||
|
||||
type literalNodeSorter struct {
|
||||
a []literalNode
|
||||
less func(i, j int) bool
|
||||
type byLiteral []literalNode
|
||||
|
||||
func (s *byLiteral) sort(a []literalNode) {
|
||||
*s = byLiteral(a)
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s literalNodeSorter) Len() int { return len(s.a) }
|
||||
func (s byLiteral) Len() int { return len(s) }
|
||||
|
||||
func (s literalNodeSorter) Less(i, j int) bool {
|
||||
return s.less(i, j)
|
||||
func (s byLiteral) Less(i, j int) bool {
|
||||
return s[i].literal < s[j].literal
|
||||
}
|
||||
|
||||
func (s literalNodeSorter) Swap(i, j int) { s.a[i], s.a[j] = s.a[j], s.a[i] }
|
||||
func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func sortByFreq(a []literalNode) {
|
||||
s := &literalNodeSorter{a, func(i, j int) bool {
|
||||
if a[i].freq == a[j].freq {
|
||||
return a[i].literal < a[j].literal
|
||||
type byFreq []literalNode
|
||||
|
||||
func (s *byFreq) sort(a []literalNode) {
|
||||
*s = byFreq(a)
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s byFreq) Len() int { return len(s) }
|
||||
|
||||
func (s byFreq) Less(i, j int) bool {
|
||||
if s[i].freq == s[j].freq {
|
||||
return s[i].literal < s[j].literal
|
||||
}
|
||||
return a[i].freq < a[j].freq
|
||||
}}
|
||||
sort.Sort(s)
|
||||
return s[i].freq < s[j].freq
|
||||
}
|
||||
|
||||
func sortByLiteral(a []literalNode) {
|
||||
s := &literalNodeSorter{a, func(i, j int) bool { return a[i].literal < a[j].literal }}
|
||||
sort.Sort(s)
|
||||
}
|
||||
func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@ import (
|
|||
|
||||
const (
|
||||
maxCodeLen = 16 // max length of Huffman code
|
||||
maxHist = 32768 // max history required
|
||||
// The next three numbers come from the RFC section 3.2.7, with the
|
||||
// additional proviso in section 3.2.5 which implies that distance codes
|
||||
// 30 and 31 should never occur in compressed data.
|
||||
|
|
@ -268,7 +267,6 @@ type decompressor struct {
|
|||
// Input source.
|
||||
r Reader
|
||||
roffset int64
|
||||
woffset int64
|
||||
|
||||
// Input bits, in top of b.
|
||||
b uint32
|
||||
|
|
@ -282,10 +280,7 @@ type decompressor struct {
|
|||
codebits *[numCodes]int
|
||||
|
||||
// Output history, buffer.
|
||||
hist *[maxHist]byte
|
||||
hp int // current output position in buffer
|
||||
hw int // have written hist[0:hw] already
|
||||
hfull bool // buffer has filled at least once
|
||||
dict dictDecoder
|
||||
|
||||
// Temporary buffer (avoids repeated allocation).
|
||||
buf [4]byte
|
||||
|
|
@ -293,6 +288,7 @@ type decompressor struct {
|
|||
// Next step in the decompression,
|
||||
// and decompression state.
|
||||
step func(*decompressor)
|
||||
stepState int
|
||||
final bool
|
||||
err error
|
||||
toRead []byte
|
||||
|
|
@ -302,14 +298,6 @@ type decompressor struct {
|
|||
}
|
||||
|
||||
func (f *decompressor) nextBlock() {
|
||||
if f.final {
|
||||
if f.hw != f.hp {
|
||||
f.flush((*decompressor).nextBlock)
|
||||
return
|
||||
}
|
||||
f.err = io.EOF
|
||||
return
|
||||
}
|
||||
for f.nb < 1+2 {
|
||||
if f.err = f.moreBits(); f.err != nil {
|
||||
return
|
||||
|
|
@ -347,6 +335,9 @@ func (f *decompressor) Read(b []byte) (int, error) {
|
|||
if len(f.toRead) > 0 {
|
||||
n := copy(b, f.toRead)
|
||||
f.toRead = f.toRead[n:]
|
||||
if len(f.toRead) == 0 {
|
||||
return n, f.err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
if f.err != nil {
|
||||
|
|
@ -481,7 +472,21 @@ func (f *decompressor) readHuffman() error {
|
|||
// and the distance values, respectively. If hd == nil, using the
|
||||
// fixed distance encoding associated with fixed Huffman blocks.
|
||||
func (f *decompressor) huffmanBlock() {
|
||||
for {
|
||||
const (
|
||||
stateInit = iota // Zero value must be stateInit
|
||||
stateDict
|
||||
)
|
||||
|
||||
switch f.stepState {
|
||||
case stateInit:
|
||||
goto readLiteral
|
||||
case stateDict:
|
||||
goto copyHistory
|
||||
}
|
||||
|
||||
readLiteral:
|
||||
// Read literal and/or (length, distance) according to RFC section 3.2.3.
|
||||
{
|
||||
v, err := f.huffSym(f.hl)
|
||||
if err != nil {
|
||||
f.err = err
|
||||
|
|
@ -491,17 +496,16 @@ func (f *decompressor) huffmanBlock() {
|
|||
var length int
|
||||
switch {
|
||||
case v < 256:
|
||||
f.hist[f.hp] = byte(v)
|
||||
f.hp++
|
||||
if f.hp == len(f.hist) {
|
||||
// After the flush, continue this loop.
|
||||
f.flush((*decompressor).huffmanBlock)
|
||||
f.dict.writeByte(byte(v))
|
||||
if f.dict.availWrite() == 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBlock
|
||||
f.stepState = stateInit
|
||||
return
|
||||
}
|
||||
continue
|
||||
goto readLiteral
|
||||
case v == 256:
|
||||
// Done with huffman block; read next block.
|
||||
f.step = (*decompressor).nextBlock
|
||||
f.finishBlock()
|
||||
return
|
||||
// otherwise, reference to older data
|
||||
case v < 265:
|
||||
|
|
@ -581,63 +585,35 @@ func (f *decompressor) huffmanBlock() {
|
|||
return
|
||||
}
|
||||
|
||||
// Copy history[-dist:-dist+length] into output.
|
||||
if dist > len(f.hist) {
|
||||
f.err = InternalError("bad history distance")
|
||||
return
|
||||
}
|
||||
|
||||
// No check on length; encoding can be prescient.
|
||||
if !f.hfull && dist > f.hp {
|
||||
if dist > f.dict.histSize() {
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
|
||||
f.copyLen, f.copyDist = length, dist
|
||||
if f.copyHist() {
|
||||
goto copyHistory
|
||||
}
|
||||
|
||||
copyHistory:
|
||||
// Perform a backwards copy according to RFC section 3.2.3.
|
||||
{
|
||||
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
|
||||
if cnt == 0 {
|
||||
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
|
||||
}
|
||||
f.copyLen -= cnt
|
||||
|
||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBlock // We need to continue this work
|
||||
f.stepState = stateDict
|
||||
return
|
||||
}
|
||||
goto readLiteral
|
||||
}
|
||||
}
|
||||
|
||||
// copyHist copies f.copyLen bytes from f.hist (f.copyDist bytes ago) to itself.
|
||||
// It reports whether the f.hist buffer is full.
|
||||
func (f *decompressor) copyHist() bool {
|
||||
p := f.hp - f.copyDist
|
||||
if p < 0 {
|
||||
p += len(f.hist)
|
||||
}
|
||||
for f.copyLen > 0 {
|
||||
n := f.copyLen
|
||||
if x := len(f.hist) - f.hp; n > x {
|
||||
n = x
|
||||
}
|
||||
if x := len(f.hist) - p; n > x {
|
||||
n = x
|
||||
}
|
||||
forwardCopy(f.hist[:], f.hp, p, n)
|
||||
p += n
|
||||
f.hp += n
|
||||
f.copyLen -= n
|
||||
if f.hp == len(f.hist) {
|
||||
// After flush continue copying out of history.
|
||||
f.flush((*decompressor).copyHuff)
|
||||
return true
|
||||
}
|
||||
if p == len(f.hist) {
|
||||
p = 0
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *decompressor) copyHuff() {
|
||||
if f.copyHist() {
|
||||
return
|
||||
}
|
||||
f.huffmanBlock()
|
||||
}
|
||||
|
||||
// Copy a single uncompressed data block from input to output.
|
||||
func (f *decompressor) dataBlock() {
|
||||
// Uncompressed.
|
||||
|
|
@ -663,8 +639,8 @@ func (f *decompressor) dataBlock() {
|
|||
}
|
||||
|
||||
if n == 0 {
|
||||
// 0-length block means sync
|
||||
f.flush((*decompressor).nextBlock)
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.finishBlock()
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -675,14 +651,15 @@ func (f *decompressor) dataBlock() {
|
|||
// copyData copies f.copyLen bytes from the underlying reader into f.hist.
|
||||
// It pauses for reads when f.hist is full.
|
||||
func (f *decompressor) copyData() {
|
||||
n := f.copyLen
|
||||
for n > 0 {
|
||||
m := len(f.hist) - f.hp
|
||||
if m > n {
|
||||
m = n
|
||||
buf := f.dict.writeSlice()
|
||||
if len(buf) > f.copyLen {
|
||||
buf = buf[:f.copyLen]
|
||||
}
|
||||
m, err := io.ReadFull(f.r, f.hist[f.hp:f.hp+m])
|
||||
f.roffset += int64(m)
|
||||
|
||||
cnt, err := io.ReadFull(f.r, buf)
|
||||
f.roffset += int64(cnt)
|
||||
f.copyLen -= cnt
|
||||
f.dict.writeMark(cnt)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
|
|
@ -690,29 +667,23 @@ func (f *decompressor) copyData() {
|
|||
f.err = err
|
||||
return
|
||||
}
|
||||
n -= m
|
||||
f.hp += m
|
||||
if f.hp == len(f.hist) {
|
||||
f.copyLen = n
|
||||
f.flush((*decompressor).copyData)
|
||||
|
||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).copyData
|
||||
return
|
||||
}
|
||||
}
|
||||
f.step = (*decompressor).nextBlock
|
||||
f.finishBlock()
|
||||
}
|
||||
|
||||
func (f *decompressor) setDict(dict []byte) {
|
||||
if len(dict) > len(f.hist) {
|
||||
// Will only remember the tail.
|
||||
dict = dict[len(dict)-len(f.hist):]
|
||||
func (f *decompressor) finishBlock() {
|
||||
if f.final {
|
||||
if f.dict.availRead() > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
}
|
||||
|
||||
f.hp = copy(f.hist[:], dict)
|
||||
if f.hp == len(f.hist) {
|
||||
f.hp = 0
|
||||
f.hfull = true
|
||||
f.err = io.EOF
|
||||
}
|
||||
f.hw = f.hp
|
||||
f.step = (*decompressor).nextBlock
|
||||
}
|
||||
|
||||
func (f *decompressor) moreBits() error {
|
||||
|
|
@ -760,19 +731,6 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// Flush any buffered output to the underlying writer.
|
||||
func (f *decompressor) flush(step func(*decompressor)) {
|
||||
f.toRead = f.hist[f.hw:f.hp]
|
||||
f.woffset += int64(f.hp - f.hw)
|
||||
f.hw = f.hp
|
||||
if f.hp == len(f.hist) {
|
||||
f.hp = 0
|
||||
f.hw = 0
|
||||
f.hfull = true
|
||||
}
|
||||
f.step = step
|
||||
}
|
||||
|
||||
func makeReader(r io.Reader) Reader {
|
||||
if rr, ok := r.(Reader); ok {
|
||||
return rr
|
||||
|
|
@ -805,12 +763,10 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error {
|
|||
r: makeReader(r),
|
||||
bits: f.bits,
|
||||
codebits: f.codebits,
|
||||
hist: f.hist,
|
||||
dict: f.dict,
|
||||
step: (*decompressor).nextBlock,
|
||||
}
|
||||
if dict != nil {
|
||||
f.setDict(dict)
|
||||
}
|
||||
f.dict.init(maxMatchOffset, dict)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -827,10 +783,10 @@ func NewReader(r io.Reader) io.ReadCloser {
|
|||
|
||||
var f decompressor
|
||||
f.r = makeReader(r)
|
||||
f.hist = new([maxHist]byte)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = (*decompressor).nextBlock
|
||||
f.dict.init(maxMatchOffset, nil)
|
||||
return &f
|
||||
}
|
||||
|
||||
|
|
@ -846,10 +802,9 @@ func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
|
|||
|
||||
var f decompressor
|
||||
f.r = makeReader(r)
|
||||
f.hist = new([maxHist]byte)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = (*decompressor).nextBlock
|
||||
f.setDict(dict)
|
||||
f.dict.init(maxMatchOffset, dict)
|
||||
return &f
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,3 +37,33 @@ func TestReset(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResetDict(t *testing.T) {
|
||||
dict := []byte("the lorem fox")
|
||||
ss := []string{
|
||||
"lorem ipsum izzle fo rizzle",
|
||||
"the quick brown fox jumped over",
|
||||
}
|
||||
|
||||
deflated := make([]bytes.Buffer, len(ss))
|
||||
for i, s := range ss {
|
||||
w, _ := NewWriterDict(&deflated[i], DefaultCompression, dict)
|
||||
w.Write([]byte(s))
|
||||
w.Close()
|
||||
}
|
||||
|
||||
inflated := make([]bytes.Buffer, len(ss))
|
||||
|
||||
f := NewReader(nil)
|
||||
for i := range inflated {
|
||||
f.(Resetter).Reset(&deflated[i], dict)
|
||||
io.Copy(&inflated[i], f)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
for i, s := range ss {
|
||||
if s != inflated[i].String() {
|
||||
t.Errorf("inflated[%d]:\ngot %q\nwant %q", i, inflated[i], s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,31 +22,21 @@ func TestNlitOutOfRange(t *testing.T) {
|
|||
"\x75\xc4\xf8\x0f\x12\x11\xb9\xb4\x4b\x09\xa0\xbe\x8b\x91\x4c")))
|
||||
}
|
||||
|
||||
const (
|
||||
digits = iota
|
||||
twain
|
||||
)
|
||||
|
||||
var testfiles = []string{
|
||||
var suites = []struct{ name, file string }{
|
||||
// Digits is the digits of the irrational number e. Its decimal representation
|
||||
// does not repeat, but there are only 10 possible digits, so it should be
|
||||
// reasonably compressible.
|
||||
digits: "../testdata/e.txt",
|
||||
{"Digits", "../testdata/e.txt"},
|
||||
// Twain is Mark Twain's classic English novel.
|
||||
twain: "../testdata/Mark.Twain-Tom.Sawyer.txt",
|
||||
{"Twain", "../testdata/Mark.Twain-Tom.Sawyer.txt"},
|
||||
}
|
||||
|
||||
func benchmarkDecode(b *testing.B, testfile, level, n int) {
|
||||
func BenchmarkDecode(b *testing.B) {
|
||||
doBench(b, func(b *testing.B, buf0 []byte, level, n int) {
|
||||
b.ReportAllocs()
|
||||
b.StopTimer()
|
||||
b.SetBytes(int64(n))
|
||||
buf0, err := ioutil.ReadFile(testfiles[testfile])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if len(buf0) == 0 {
|
||||
b.Fatalf("test file %q has no data", testfiles[testfile])
|
||||
}
|
||||
|
||||
compressed := new(bytes.Buffer)
|
||||
w, err := NewWriter(compressed, level)
|
||||
if err != nil {
|
||||
|
|
@ -66,31 +56,43 @@ func benchmarkDecode(b *testing.B, testfile, level, n int) {
|
|||
for i := 0; i < b.N; i++ {
|
||||
io.Copy(ioutil.Discard, NewReader(bytes.NewReader(buf1)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// These short names are so that gofmt doesn't break the BenchmarkXxx function
|
||||
// bodies below over multiple lines.
|
||||
const (
|
||||
speed = BestSpeed
|
||||
default_ = DefaultCompression
|
||||
compress = BestCompression
|
||||
)
|
||||
var levelTests = []struct {
|
||||
name string
|
||||
level int
|
||||
}{
|
||||
{"Huffman", HuffmanOnly},
|
||||
{"Speed", BestSpeed},
|
||||
{"Default", DefaultCompression},
|
||||
{"Compression", BestCompression},
|
||||
}
|
||||
|
||||
func BenchmarkDecodeDigitsSpeed1e4(b *testing.B) { benchmarkDecode(b, digits, speed, 1e4) }
|
||||
func BenchmarkDecodeDigitsSpeed1e5(b *testing.B) { benchmarkDecode(b, digits, speed, 1e5) }
|
||||
func BenchmarkDecodeDigitsSpeed1e6(b *testing.B) { benchmarkDecode(b, digits, speed, 1e6) }
|
||||
func BenchmarkDecodeDigitsDefault1e4(b *testing.B) { benchmarkDecode(b, digits, default_, 1e4) }
|
||||
func BenchmarkDecodeDigitsDefault1e5(b *testing.B) { benchmarkDecode(b, digits, default_, 1e5) }
|
||||
func BenchmarkDecodeDigitsDefault1e6(b *testing.B) { benchmarkDecode(b, digits, default_, 1e6) }
|
||||
func BenchmarkDecodeDigitsCompress1e4(b *testing.B) { benchmarkDecode(b, digits, compress, 1e4) }
|
||||
func BenchmarkDecodeDigitsCompress1e5(b *testing.B) { benchmarkDecode(b, digits, compress, 1e5) }
|
||||
func BenchmarkDecodeDigitsCompress1e6(b *testing.B) { benchmarkDecode(b, digits, compress, 1e6) }
|
||||
func BenchmarkDecodeTwainSpeed1e4(b *testing.B) { benchmarkDecode(b, twain, speed, 1e4) }
|
||||
func BenchmarkDecodeTwainSpeed1e5(b *testing.B) { benchmarkDecode(b, twain, speed, 1e5) }
|
||||
func BenchmarkDecodeTwainSpeed1e6(b *testing.B) { benchmarkDecode(b, twain, speed, 1e6) }
|
||||
func BenchmarkDecodeTwainDefault1e4(b *testing.B) { benchmarkDecode(b, twain, default_, 1e4) }
|
||||
func BenchmarkDecodeTwainDefault1e5(b *testing.B) { benchmarkDecode(b, twain, default_, 1e5) }
|
||||
func BenchmarkDecodeTwainDefault1e6(b *testing.B) { benchmarkDecode(b, twain, default_, 1e6) }
|
||||
func BenchmarkDecodeTwainCompress1e4(b *testing.B) { benchmarkDecode(b, twain, compress, 1e4) }
|
||||
func BenchmarkDecodeTwainCompress1e5(b *testing.B) { benchmarkDecode(b, twain, compress, 1e5) }
|
||||
func BenchmarkDecodeTwainCompress1e6(b *testing.B) { benchmarkDecode(b, twain, compress, 1e6) }
|
||||
var sizes = []struct {
|
||||
name string
|
||||
n int
|
||||
}{
|
||||
{"1e4", 1e4},
|
||||
{"1e5", 1e5},
|
||||
{"1e6", 1e6},
|
||||
}
|
||||
|
||||
func doBench(b *testing.B, f func(b *testing.B, buf []byte, level, n int)) {
|
||||
for _, suite := range suites {
|
||||
buf, err := ioutil.ReadFile(suite.file)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
b.Fatalf("test file %q has no data", suite.file)
|
||||
}
|
||||
for _, l := range levelTests {
|
||||
for _, s := range sizes {
|
||||
b.Run(suite.name+"/"+l.name+"/"+s.name, func(b *testing.B) {
|
||||
f(b, buf, l.level, s.n)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,5 +44,5 @@ func reverseUint16(v uint16) uint16 {
|
|||
}
|
||||
|
||||
func reverseBits(number uint16, bitLength byte) uint16 {
|
||||
return reverseUint16(number << uint8(16-bitLength))
|
||||
return reverseUint16(number << (16 - bitLength))
|
||||
}
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1 @@
|
|||
3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117067982148086513282306647093844609550582231725359408128481117450284102701938521105559644622948954930381964428810975665933446128475648233786783165271201909145648566923460348610454326648213393607260249141273724587006606315588174881520920962829254091715364367892590360011330530548820466521384146951941511609433057270365759591953092186117381932611793105118548074462379962749567351885752724891227938183011949129833673362440656643086021394946395224737190702179860943702770539217176293176752384674818467669405132000568127145263560827785771342757789609173637178721468440901224953430146549585371050792279689258923542019956112129021960864034418159813629774771309960518707211349999998372978049951059731732816096318595024459455346908302642522308253344685035261931188171010003137838752886587533208381420617177669147303598253490428755468731159562863882353787593751957781857780532171226806613001927876611195909216420198938095257201065485863278865936153381827968230301952035301852968995773622599413891249721775283479131515574857242454150695950829533116861727855889075098381754637464939319255060400927701671139009848824012858361603563707660104710181942955596198946767837449448255379774726847104047534646208046684259069491293313677028989152104752162056966024058038150193511253382430035587640247496473263914199272604269922796782354781636009341721641219924586315030286182974555706749838505494588586926995690927210797509302955321165344987202755960236480665499119881834797753566369807426542527862551818417574672890977772793800081647060016145249192173217214772350141441973568548161361157352552133475741849468438523323907394143334547762416862518983569485562099219222184272550254256887671790494601653466804988627232791786085784383827967976681454100953883786360950680064225125205117392984896084128488626945604241965285022210661186306744278622039194945047123713786960956364371917287467764657573962413890865832645995813390478027590099465764078951269468398352595709825822620522489407726719478268482601476990902640136394437455305068203496252451749399651431429809190659250937221696461515709858387410597885959772975498930161753928468138268683868942774155991855925245953959431049972524680845987273644695848653836736222626099124608051243884390451244136549762780797715691435997700129616089441694868555848406353422072225828488648158456028506016842739452267467678895252138522549954666727823986456596116354886230577456498035593634568174324112515076069479451096596094025228879710893145669136867228748940560101503308617928680920874760917824938589009714909675985261365549781893129784821682998948722658804857564014270477555132379641451523746234364542858444795265867821051141354735739523113427166102135969536231442952484937187110145765403590279934403742007310578539062198387447808478489683321445713868751943506430218453191048481005370614680674919278191197939952061419663428754440643745123718192179998391015919561814675142691239748940907186494231961567945208095146550225231603881930142093762137855956638937787083039069792077346722182562599661501421503068038447734549202605414665925201497442850732518666002132434088190710486331734649651453905796268561005508106658796998163574736384052571459102897064140110971206280439039759515677157700420337869936007230558763176359421873125147120532928191826186125867321579198414848829164470609575270695722091756711672291098169091528017350671274858322287183520935396572512108357915136988209144421006751033467110314126711136990865851639831501970165151168517143765761835155650884909989859982387345528331635507647918535893226185489632132933089857064204675259070915481416549859461637180
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue