Merge pull request #45158 from thaJeztah/containerd_1.7_deps

vendor: assorted vendor updates for containerd 1.7.0
This commit is contained in:
Sebastiaan van Stijn 2023-03-16 01:18:39 +01:00 committed by GitHub
commit 90e82a2c05
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
153 changed files with 2306 additions and 1655 deletions

View file

@ -29,7 +29,7 @@ require (
github.com/containerd/continuity v0.3.0
github.com/containerd/fifo v1.1.0
github.com/containerd/typeurl/v2 v2.1.0
github.com/coreos/go-systemd/v22 v22.4.0
github.com/coreos/go-systemd/v22 v22.5.0
github.com/creack/pty v1.1.11
github.com/deckarep/golang-set v0.0.0-20141123011944-ef32fa3046d9
github.com/docker/distribution v2.8.1+incompatible
@ -40,7 +40,7 @@ require (
github.com/docker/libkv v0.2.2-0.20211217103745-e480589147e3
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4
github.com/fluent/fluent-logger-golang v1.9.0
github.com/godbus/dbus/v5 v5.0.6
github.com/godbus/dbus/v5 v5.1.0
github.com/gogo/protobuf v1.3.2
github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2
github.com/google/go-cmp v0.5.9
@ -51,9 +51,9 @@ require (
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/memberlist v0.4.0
github.com/hashicorp/serf v0.8.5
github.com/imdario/mergo v0.3.12
github.com/imdario/mergo v0.3.13
github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062
github.com/klauspost/compress v1.15.12
github.com/klauspost/compress v1.16.3
github.com/miekg/dns v1.1.43
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/moby/buildkit v0.11.4
@ -71,9 +71,9 @@ require (
github.com/morikuni/aec v1.0.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1
github.com/opencontainers/runc v1.1.3
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/selinux v1.10.2
github.com/opencontainers/runc v1.1.4
github.com/opencontainers/runtime-spec v1.1.0-rc.1
github.com/opencontainers/selinux v1.11.0
github.com/pelletier/go-toml v1.9.5
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.14.0
@ -89,7 +89,7 @@ require (
go.etcd.io/bbolt v1.3.7
golang.org/x/net v0.7.0
golang.org/x/sync v0.1.0
golang.org/x/sys v0.5.0
golang.org/x/sys v0.6.0
golang.org/x/text v0.7.0
golang.org/x/time v0.3.0
google.golang.org/genproto v0.0.0-20220706185917-7780775163c4

View file

@ -460,8 +460,8 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU=
github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
@ -638,8 +638,9 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro=
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
@ -903,8 +904,9 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
@ -955,8 +957,9 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -1146,23 +1149,24 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm
github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg=
github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.1.0-rc.1 h1:wHa9jroFfKGQqFHj0I1fMRKLl0pfj+ynAqBxo3v6u9w=
github.com/opencontainers/runtime-spec v1.1.0-rc.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4=
github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
@ -1839,8 +1843,8 @@ golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -2221,6 +2225,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=

View file

@ -69,6 +69,58 @@ func Enabled() bool {
return true
}
// StderrIsJournalStream returns whether the process stderr is connected
// to the Journal's stream transport.
//
// This can be used for automatic protocol upgrading described in [Journal Native Protocol].
//
// Returns true if JOURNAL_STREAM environment variable is present,
// and stderr's device and inode numbers match it.
//
// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
// is present, but malformed, fstat syscall fails, etc.
//
// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading
func StderrIsJournalStream() (bool, error) {
return fdIsJournalStream(syscall.Stderr)
}
// StdoutIsJournalStream returns whether the process stdout is connected
// to the Journal's stream transport.
//
// Returns true if JOURNAL_STREAM environment variable is present,
// and stdout's device and inode numbers match it.
//
// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
// is present, but malformed, fstat syscall fails, etc.
//
// Most users should probably use [StderrIsJournalStream].
func StdoutIsJournalStream() (bool, error) {
return fdIsJournalStream(syscall.Stdout)
}
func fdIsJournalStream(fd int) (bool, error) {
journalStream := os.Getenv("JOURNAL_STREAM")
if journalStream == "" {
return false, nil
}
var expectedStat syscall.Stat_t
_, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino)
if err != nil {
return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err)
}
var stat syscall.Stat_t
err = syscall.Fstat(fd, &stat)
if err != nil {
return false, err
}
match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino
return match, nil
}
// Send a message to the local systemd journal. vars is a map of journald
// fields to values. Fields must be composed of uppercase letters, numbers,
// and underscores, but must not start with an underscore. Within these

View file

@ -33,3 +33,11 @@ func Enabled() bool {
func Send(message string, priority Priority, vars map[string]string) error {
return errors.New("could not initialize socket to journald")
}
func StderrIsJournalStream() (bool, error) {
return false, nil
}
func StdoutIsJournalStream() (bool, error) {
return false, nil
}

View file

@ -176,9 +176,10 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo
return err, false
}
state = waitingForReject
} else {
conn.uuid = string(s[1])
return nil, true
}
conn.uuid = string(s[1])
return nil, true
case state == waitingForData:
err = authWriteLine(conn.transport, []byte("ERROR"))
if err != nil {
@ -191,9 +192,10 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo
return err, false
}
state = waitingForReject
} else {
conn.uuid = string(s[1])
return nil, true
}
conn.uuid = string(s[1])
return nil, true
case state == waitingForOk && string(s[0]) == "DATA":
err = authWriteLine(conn.transport, []byte("DATA"))
if err != nil {

View file

@ -169,7 +169,7 @@ func Connect(address string, opts ...ConnOption) (*Conn, error) {
// SystemBusPrivate returns a new private connection to the system bus.
// Note: this connection is not ready to use. One must perform Auth and Hello
// on the connection before it is useable.
// on the connection before it is usable.
func SystemBusPrivate(opts ...ConnOption) (*Conn, error) {
return Dial(getSystemBusPlatformAddress(), opts...)
}
@ -284,10 +284,6 @@ func newConn(tr transport, opts ...ConnOption) (*Conn, error) {
conn.ctx = context.Background()
}
conn.ctx, conn.cancelCtx = context.WithCancel(conn.ctx)
go func() {
<-conn.ctx.Done()
conn.Close()
}()
conn.calls = newCallTracker()
if conn.handler == nil {
@ -302,6 +298,11 @@ func newConn(tr transport, opts ...ConnOption) (*Conn, error) {
conn.outHandler = &outputHandler{conn: conn}
conn.names = newNameTracker()
conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
go func() {
<-conn.ctx.Done()
conn.Close()
}()
return conn, nil
}
@ -550,6 +551,11 @@ func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call {
call.ctx = ctx
call.ctxCanceler = canceler
conn.calls.track(msg.serial, call)
if ctx.Err() != nil {
// short path: don't even send the message if context already cancelled
conn.calls.handleSendError(msg, ctx.Err())
return call
}
go func() {
<-ctx.Done()
conn.calls.handleSendError(msg, ctx.Err())
@ -649,7 +655,9 @@ func (conn *Conn) RemoveMatchSignalContext(ctx context.Context, options ...Match
// Signal registers the given channel to be passed all received signal messages.
//
// Multiple of these channels can be registered at the same time.
// Multiple of these channels can be registered at the same time. The channel is
// closed if the Conn is closed; it should not be closed by the caller before
// RemoveSignal was called on it.
//
// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
// channel for eavesdropped messages, this channel receives all signals, and
@ -765,7 +773,12 @@ func getKey(s, key string) string {
for _, keyEqualsValue := range strings.Split(s, ",") {
keyValue := strings.SplitN(keyEqualsValue, "=", 2)
if len(keyValue) == 2 && keyValue[0] == key {
return keyValue[1]
val, err := UnescapeBusAddressValue(keyValue[1])
if err != nil {
// No way to return an error.
return ""
}
return val
}
}
return ""

View file

@ -54,7 +54,7 @@ func tryDiscoverDbusSessionBusAddress() string {
if runUserBusFile := path.Join(runtimeDirectory, "bus"); fileExists(runUserBusFile) {
// if /run/user/<uid>/bus exists, that file itself
// *is* the unix socket, so return its path
return fmt.Sprintf("unix:path=%s", runUserBusFile)
return fmt.Sprintf("unix:path=%s", EscapeBusAddressValue(runUserBusFile))
}
if runUserSessionDbusFile := path.Join(runtimeDirectory, "dbus-session"); fileExists(runUserSessionDbusFile) {
// if /run/user/<uid>/dbus-session exists, it's a
@ -85,9 +85,6 @@ func getRuntimeDirectory() (string, error) {
}
func fileExists(filename string) bool {
if _, err := os.Stat(filename); !os.IsNotExist(err) {
return true
} else {
return false
}
_, err := os.Stat(filename)
return !os.IsNotExist(err)
}

View file

@ -122,8 +122,11 @@ func isConvertibleTo(dest, src reflect.Type) bool {
case dest.Kind() == reflect.Slice:
return src.Kind() == reflect.Slice &&
isConvertibleTo(dest.Elem(), src.Elem())
case dest.Kind() == reflect.Ptr:
dest = dest.Elem()
return isConvertibleTo(dest, src)
case dest.Kind() == reflect.Struct:
return src == interfacesType
return src == interfacesType || dest.Kind() == src.Kind()
default:
return src.ConvertibleTo(dest)
}
@ -274,13 +277,8 @@ func storeSliceIntoInterface(dest, src reflect.Value) error {
func storeSliceIntoSlice(dest, src reflect.Value) error {
if dest.IsNil() || dest.Len() < src.Len() {
dest.Set(reflect.MakeSlice(dest.Type(), src.Len(), src.Cap()))
}
if dest.Len() != src.Len() {
return fmt.Errorf(
"dbus.Store: type mismatch: "+
"slices are different lengths "+
"need: %d have: %d",
src.Len(), dest.Len())
} else if dest.Len() > src.Len() {
dest.Set(dest.Slice(0, src.Len()))
}
for i := 0; i < src.Len(); i++ {
err := store(dest.Index(i), getVariantValue(src.Index(i)))

View file

@ -10,8 +10,10 @@ value.
Conversion Rules
For outgoing messages, Go types are automatically converted to the
corresponding D-Bus types. The following types are directly encoded as their
respective D-Bus equivalents:
corresponding D-Bus types. See the official specification at
https://dbus.freedesktop.org/doc/dbus-specification.html#type-system for more
information on the D-Bus type system. The following types are directly encoded
as their respective D-Bus equivalents:
Go type | D-Bus type
------------+-----------
@ -39,8 +41,8 @@ Maps encode as DICTs, provided that their key type can be used as a key for
a DICT.
Structs other than Variant and Signature encode as a STRUCT containing their
exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will
be skipped.
exported fields in order. Fields whose tags contain `dbus:"-"` and unexported
fields will be skipped.
Pointers encode as the value they're pointed to.

84
vendor/github.com/godbus/dbus/v5/escape.go generated vendored Normal file
View file

@ -0,0 +1,84 @@
package dbus
import "net/url"
// EscapeBusAddressValue implements a requirement to escape the values
// in D-Bus server addresses, as defined by the D-Bus specification at
// https://dbus.freedesktop.org/doc/dbus-specification.html#addresses.
func EscapeBusAddressValue(val string) string {
toEsc := strNeedsEscape(val)
if toEsc == 0 {
// Avoid unneeded allocation/copying.
return val
}
// Avoid allocation for short paths.
var buf [64]byte
var out []byte
// Every to-be-escaped byte needs 2 extra bytes.
required := len(val) + 2*toEsc
if required <= len(buf) {
out = buf[:required]
} else {
out = make([]byte, required)
}
j := 0
for i := 0; i < len(val); i++ {
if ch := val[i]; needsEscape(ch) {
// Convert ch to %xx, where xx is hex value.
out[j] = '%'
out[j+1] = hexchar(ch >> 4)
out[j+2] = hexchar(ch & 0x0F)
j += 3
} else {
out[j] = ch
j++
}
}
return string(out)
}
// UnescapeBusAddressValue unescapes values in D-Bus server addresses,
// as defined by the D-Bus specification at
// https://dbus.freedesktop.org/doc/dbus-specification.html#addresses.
func UnescapeBusAddressValue(val string) (string, error) {
// Looks like url.PathUnescape does exactly what is required.
return url.PathUnescape(val)
}
// hexchar returns an octal representation of a n, where n < 16.
// For invalid values of n, the function panics.
func hexchar(n byte) byte {
const hex = "0123456789abcdef"
// For n >= len(hex), runtime will panic.
return hex[n]
}
// needsEscape tells if a byte is NOT one of optionally-escaped bytes.
func needsEscape(c byte) bool {
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '_', '/', '\\', '.', '*':
return false
}
return true
}
// strNeedsEscape tells how many bytes in the string need escaping.
func strNeedsEscape(val string) int {
count := 0
for i := 0; i < len(val); i++ {
if needsEscape(val[i]) {
count++
}
}
return count
}

View file

@ -3,6 +3,7 @@ package dbus
import (
"errors"
"fmt"
"os"
"reflect"
"strings"
)
@ -209,28 +210,23 @@ func (conn *Conn) handleCall(msg *Message) {
}
reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
conn.sendMessageAndIfClosed(reply, nil)
if err := reply.IsValid(); err != nil {
fmt.Fprintf(os.Stderr, "dbus: dropping invalid reply to %s.%s on obj %s: %s\n", ifaceName, name, path, err)
} else {
conn.sendMessageAndIfClosed(reply, nil)
}
}
}
// Emit emits the given signal on the message bus. The name parameter must be
// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost".
func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error {
if !path.IsValid() {
return errors.New("dbus: invalid object path")
}
i := strings.LastIndex(name, ".")
if i == -1 {
return errors.New("dbus: invalid method name")
}
iface := name[:i]
member := name[i+1:]
if !isValidMember(member) {
return errors.New("dbus: invalid method name")
}
if !isValidInterface(iface) {
return errors.New("dbus: invalid interface name")
}
msg := new(Message)
msg.Type = TypeSignal
msg.Headers = make(map[HeaderField]Variant)
@ -241,6 +237,9 @@ func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) erro
if len(values) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
}
if err := msg.IsValid(); err != nil {
return err
}
var closed bool
conn.sendMessageAndIfClosed(msg, func() {

View file

@ -2,27 +2,24 @@ package dbus
import (
"os"
"sync"
)
var (
homeDir string
homeDirLock sync.Mutex
"os/user"
)
// Get returns the home directory of the current user, which is usually the
// value of HOME environment variable. In case it is not set or empty, os/user
// package is used.
//
// If linking statically with cgo enabled against glibc, make sure the
// osusergo build tag is used.
//
// If needing to do nss lookups, do not disable cgo or set osusergo.
func getHomeDir() string {
homeDirLock.Lock()
defer homeDirLock.Unlock()
homeDir := os.Getenv("HOME")
if homeDir != "" {
return homeDir
}
homeDir = os.Getenv("HOME")
if homeDir != "" {
return homeDir
if u, err := user.Current(); err == nil {
return u.HomeDir
}
homeDir = lookupHomeDir()
return homeDir
return "/"
}

View file

@ -1,15 +0,0 @@
// +build !static_build
package dbus
import (
"os/user"
)
func lookupHomeDir() string {
u, err := user.Current()
if err != nil {
return "/"
}
return u.HomeDir
}

View file

@ -1,45 +0,0 @@
// +build static_build
package dbus
import (
"bufio"
"os"
"strconv"
"strings"
)
func lookupHomeDir() string {
myUid := os.Getuid()
f, err := os.Open("/etc/passwd")
if err != nil {
return "/"
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if err := s.Err(); err != nil {
break
}
line := strings.TrimSpace(s.Text())
if line == "" {
continue
}
parts := strings.Split(line, ":")
if len(parts) >= 6 {
uid, err := strconv.Atoi(parts[2])
if err == nil && uid == myUid {
return parts[5]
}
}
}
// Default to / if we can't get a better value
return "/"
}

View file

@ -208,7 +208,7 @@ func DecodeMessageWithFDs(rd io.Reader, fds []int) (msg *Message, err error) {
// The possibly returned error can be an error of the underlying reader, an
// InvalidMessageError or a FormatError.
func DecodeMessage(rd io.Reader) (msg *Message, err error) {
return DecodeMessageWithFDs(rd, make([]int, 0));
return DecodeMessageWithFDs(rd, make([]int, 0))
}
type nullwriter struct{}
@ -227,8 +227,8 @@ func (msg *Message) CountFds() (int, error) {
}
func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds []int, err error) {
if err := msg.IsValid(); err != nil {
return make([]int, 0), err
if err := msg.validateHeader(); err != nil {
return nil, err
}
var vs [7]interface{}
switch order {
@ -237,7 +237,7 @@ func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds
case binary.BigEndian:
vs[0] = byte('B')
default:
return make([]int, 0), errors.New("dbus: invalid byte order")
return nil, errors.New("dbus: invalid byte order")
}
body := new(bytes.Buffer)
fds = make([]int, 0)
@ -284,8 +284,13 @@ func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) (err error)
}
// IsValid checks whether msg is a valid message and returns an
// InvalidMessageError if it is not.
// InvalidMessageError or FormatError if it is not.
func (msg *Message) IsValid() error {
var b bytes.Buffer
return msg.EncodeTo(&b, nativeEndian)
}
func (msg *Message) validateHeader() error {
if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected|FlagAllowInteractiveAuthorization) != 0 {
return InvalidMessageError("invalid flags")
}
@ -330,6 +335,7 @@ func (msg *Message) IsValid() error {
return InvalidMessageError("missing signature")
}
}
return nil
}

View file

@ -63,7 +63,7 @@ type Method interface {
// any other decoding scheme.
type ArgumentDecoder interface {
// To decode the arguments of a method the sender and message are
// provided incase the semantics of the implementer provides access
// provided in case the semantics of the implementer provides access
// to these as part of the method invocation.
DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error)
}

View file

@ -102,7 +102,7 @@ func getSignature(t reflect.Type, depth *depthCounter) (sig string) {
}
}
if len(s) == 0 {
panic("empty struct")
panic(InvalidTypeError{t})
}
return "(" + s + ")"
case reflect.Array, reflect.Slice:

View file

@ -154,17 +154,15 @@ func (t *unixTransport) ReadMessage() (*Message, error) {
// substitute the values in the message body (which are indices for the
// array receiver via OOB) with the actual values
for i, v := range msg.Body {
switch v.(type) {
switch index := v.(type) {
case UnixFDIndex:
j := v.(UnixFDIndex)
if uint32(j) >= unixfds {
if uint32(index) >= unixfds {
return nil, InvalidMessageError("invalid index for unix fd")
}
msg.Body[i] = UnixFD(fds[j])
msg.Body[i] = UnixFD(fds[index])
case []UnixFDIndex:
idxArray := v.([]UnixFDIndex)
fdArray := make([]UnixFD, len(idxArray))
for k, j := range idxArray {
fdArray := make([]UnixFD, len(index))
for k, j := range index {
if uint32(j) >= unixfds {
return nil, InvalidMessageError("invalid index for unix fd")
}

6
vendor/github.com/godbus/dbus/v5/transport_zos.go generated vendored Normal file
View file

@ -0,0 +1,6 @@
package dbus
func (t *unixTransport) SendNullByte() error {
_, err := t.Write([]byte{0})
return err
}

View file

@ -49,7 +49,7 @@ func ParseVariant(s string, sig Signature) (Variant, error) {
}
// format returns a formatted version of v and whether this string can be parsed
// unambigously.
// unambiguously.
func (v Variant) format() (string, bool) {
switch v.sig.str[0] {
case 'b', 'i':

View file

@ -8,8 +8,7 @@
[![Coverage Status][9]][10]
[![Sourcegraph][11]][12]
[![FOSSA Status][13]][14]
[![GoCenter Kudos][15]][16]
[![Become my sponsor][15]][16]
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
@ -25,8 +24,8 @@
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
[16]: https://search.gocenter.io/github.com/imdario/mergo
[15]: https://img.shields.io/github/sponsors/imdario
[16]: https://github.com/sponsors/imdario
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
@ -36,11 +35,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
## Status
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
### Important note
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
@ -51,12 +50,12 @@ If you were using Mergo before April 6th, 2015, please check your project works
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo)
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
<a href='https://github.com/sponsors/imdario' target='_blank'><img alt="Become my sponsor" src="https://img.shields.io/github/sponsors/imdario?style=for-the-badge" /></a>
### Mergo in the wild
- [cli/cli](https://github.com/cli/cli)
- [moby/moby](https://github.com/moby/moby)
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
- [vmware/dispatch](https://github.com/vmware/dispatch)
@ -98,6 +97,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
- [containerssh/containerssh](https://github.com/containerssh/containerssh)
- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
- [tjpnz/structbot](https://github.com/tjpnz/structbot)
## Install
@ -168,7 +169,7 @@ func main() {
Note: if test are failing due missing package, please execute:
go get gopkg.in/yaml.v2
go get gopkg.in/yaml.v3
### Transformers
@ -218,7 +219,6 @@ func main() {
}
```
## Contact me
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
@ -227,18 +227,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don
Written by [Dario Castañé](http://dario.im).
## Top Contributors
[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
## License
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).

View file

@ -79,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
visited[h] = &visit{addr, typ, seen}
}
if config.Transformers != nil && !isEmptyValue(dst) {
if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
err = fn(dst, src)
return

View file

@ -17,7 +17,7 @@ import (
var (
ErrNilArguments = errors.New("src and dst must not be nil")
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
ErrNotSupported = errors.New("only structs and maps are supported")
ErrNotSupported = errors.New("only structs, maps, and slices are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
ErrNonPointerAgument = errors.New("dst must be a pointer")
@ -65,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
return
}
vDst = reflect.ValueOf(dst).Elem()
if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
err = ErrNotSupported
return
}

View file

@ -3,7 +3,7 @@
before:
hooks:
- ./gen.sh
- go install mvdan.cc/garble@latest
- go install mvdan.cc/garble@v0.9.3
builds:
-

View file

@ -9,7 +9,6 @@ This package provides various compression algorithms.
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
@ -17,6 +16,43 @@ This package provides various compression algorithms.
# changelog
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
* s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766
* zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773
* huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774
* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)
* s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685
* s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752
* s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
* Jan 21st, 2023 (v1.15.15)
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
* Jan 3rd, 2023 (v1.15.14)
* flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718
* zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720
* export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722
* s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723
* Dec 11, 2022 (v1.15.13)
* zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691
* zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708
* Oct 26, 2022 (v1.15.12)
* zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680
* gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683
* Sept 26, 2022 (v1.15.11)
* flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678

View file

@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error {
c1.encodeZero(tt[src[ip-2]])
ip -= 2
}
src = src[:ip]
// Main compression loop.
switch {
case !s.zeroBits && s.actualTableLog <= 8:
// We can encode 4 symbols without requiring a flush.
// We do not need to check if any output is 0 bits.
for ip >= 4 {
for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encode(tt[v0])
c1.encode(tt[v1])
c2.encode(tt[v2])
c1.encode(tt[v3])
ip -= 4
}
case !s.zeroBits:
// We do not need to check if any output is 0 bits.
for ip >= 4 {
for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encode(tt[v0])
c1.encode(tt[v1])
s.bw.flush32()
c2.encode(tt[v2])
c1.encode(tt[v3])
ip -= 4
}
case s.actualTableLog <= 8:
// We can encode 4 symbols without requiring a flush
for ip >= 4 {
for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encodeZero(tt[v0])
c1.encodeZero(tt[v1])
c2.encodeZero(tt[v2])
c1.encodeZero(tt[v3])
ip -= 4
}
default:
for ip >= 4 {
for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encodeZero(tt[v0])
c1.encodeZero(tt[v1])
s.bw.flush32()
c2.encodeZero(tt[v2])
c1.encodeZero(tt[v3])
ip -= 4
}
}
@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) {
for _, v := range in {
s.count[v]++
}
m := uint32(0)
m, symlen := uint32(0), s.symbolLen
for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m {
m = v
}
if v > 0 {
s.symbolLen = uint16(i) + 1
}
symlen = uint16(i) + 1
}
s.symbolLen = symlen
return int(m)
}

View file

@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error {
// If the buffer is over-read an error is returned.
func (s *Scratch) decompress() error {
br := &s.bits
br.init(s.br.unread())
if err := br.init(s.br.unread()); err != nil {
return err
}
var s1, s2 decoder
// Initialize and decode first state and symbol.

View file

@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() {
// 2 bounds checks.
v := b.in[b.off-4 : b.off]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() {
return
}
if b.off > 4 {
v := b.in[b.off-4:]
v = v[:4]
v := b.in[b.off-4 : b.off]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() {
// 2 bounds checks.
v := b.in[b.off-4 : b.off]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() {
return
}
if b.off > 4 {
v := b.in[b.off-4:]
v = v[:4]
v := b.in[b.off-4 : b.off]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32

View file

@ -60,6 +60,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
b.nBits += encA.nBits + encB.nBits
}
// encFourSymbols adds up to 32 bits from four symbols.
// It will not check if there is space for them,
// so the caller must ensure that b has been flushed recently.
func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
bitsA := encA.nBits
bitsB := bitsA + encB.nBits
bitsC := bitsB + encC.nBits
bitsD := bitsC + encD.nBits
combined := uint64(encA.val) |
(uint64(encB.val) << (bitsA & 63)) |
(uint64(encC.val) << (bitsB & 63)) |
(uint64(encD.val) << (bitsC & 63))
b.bitContainer |= combined << (b.nBits & 63)
b.nBits += bitsD
}
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {

View file

@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
tmp := src[n : n+4]
// tmp should be len 4
bw.flush32()
bw.encTwoSymbols(cTable, tmp[3], tmp[2])
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
}
} else {
for ; n >= 0; n -= 4 {
@ -365,29 +364,29 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
m := uint32(0)
if len(s.prevTable) > 0 {
for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m {
m = v
}
if v > 0 {
s.symbolLen = uint16(i) + 1
if i >= len(s.prevTable) {
reuse = false
} else {
if s.prevTable[i].nBits == 0 {
reuse = false
}
}
s.symbolLen = uint16(i) + 1
if i >= len(s.prevTable) {
reuse = false
} else if s.prevTable[i].nBits == 0 {
reuse = false
}
}
return int(m), reuse
}
for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m {
m = v
}
if v > 0 {
s.symbolLen = uint16(i) + 1
}
s.symbolLen = uint16(i) + 1
}
return int(m), false
}
@ -484,34 +483,35 @@ func (s *Scratch) buildCTable() error {
// Different from reference implementation.
huffNode0 := s.nodes[0 : huffNodesLen+1]
for huffNode[nonNullRank].count == 0 {
for huffNode[nonNullRank].count() == 0 {
nonNullRank--
}
lowS := int16(nonNullRank)
nodeRoot := nodeNb + lowS - 1
lowN := nodeNb
huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count
huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb)
huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count())
huffNode[lowS].setParent(nodeNb)
huffNode[lowS-1].setParent(nodeNb)
nodeNb++
lowS -= 2
for n := nodeNb; n <= nodeRoot; n++ {
huffNode[n].count = 1 << 30
huffNode[n].setCount(1 << 30)
}
// fake entry, strong barrier
huffNode0[0].count = 1 << 31
huffNode0[0].setCount(1 << 31)
// create parents
for nodeNb <= nodeRoot {
var n1, n2 int16
if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
n1 = lowS
lowS--
} else {
n1 = lowN
lowN++
}
if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
n2 = lowS
lowS--
} else {
@ -519,18 +519,19 @@ func (s *Scratch) buildCTable() error {
lowN++
}
huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count
huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb)
huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count())
huffNode0[n1+1].setParent(nodeNb)
huffNode0[n2+1].setParent(nodeNb)
nodeNb++
}
// distribute weights (unlimited tree height)
huffNode[nodeRoot].nbBits = 0
huffNode[nodeRoot].setNbBits(0)
for n := nodeRoot - 1; n >= startNode; n-- {
huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
}
for n := uint16(0); n <= nonNullRank; n++ {
huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
}
s.actualTableLog = s.setMaxHeight(int(nonNullRank))
maxNbBits := s.actualTableLog
@ -542,7 +543,7 @@ func (s *Scratch) buildCTable() error {
var nbPerRank [tableLogMax + 1]uint16
var valPerRank [16]uint16
for _, v := range huffNode[:nonNullRank+1] {
nbPerRank[v.nbBits]++
nbPerRank[v.nbBits()]++
}
// determine stating value per rank
{
@ -557,7 +558,7 @@ func (s *Scratch) buildCTable() error {
// push nbBits per symbol, symbol order
for _, v := range huffNode[:nonNullRank+1] {
s.cTable[v.symbol].nBits = v.nbBits
s.cTable[v.symbol()].nBits = v.nbBits()
}
// assign value within rank, symbol order
@ -603,12 +604,12 @@ func (s *Scratch) huffSort() {
pos := rank[r].current
rank[r].current++
prev := nodes[(pos-1)&huffNodesMask]
for pos > rank[r].base && c > prev.count {
for pos > rank[r].base && c > prev.count() {
nodes[pos&huffNodesMask] = prev
pos--
prev = nodes[(pos-1)&huffNodesMask]
}
nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n))
}
}
@ -617,7 +618,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
huffNode := s.nodes[1 : huffNodesLen+1]
//huffNode = huffNode[: huffNodesLen]
largestBits := huffNode[lastNonNull].nbBits
largestBits := huffNode[lastNonNull].nbBits()
// early exit : no elt > maxNbBits
if largestBits <= maxNbBits {
@ -627,14 +628,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
baseCost := int(1) << (largestBits - maxNbBits)
n := uint32(lastNonNull)
for huffNode[n].nbBits > maxNbBits {
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits))
huffNode[n].nbBits = maxNbBits
for huffNode[n].nbBits() > maxNbBits {
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits()))
huffNode[n].setNbBits(maxNbBits)
n--
}
// n stops at huffNode[n].nbBits <= maxNbBits
for huffNode[n].nbBits == maxNbBits {
for huffNode[n].nbBits() == maxNbBits {
n--
}
// n end at index of smallest symbol using < maxNbBits
@ -655,10 +656,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
{
currentNbBits := maxNbBits
for pos := int(n); pos >= 0; pos-- {
if huffNode[pos].nbBits >= currentNbBits {
if huffNode[pos].nbBits() >= currentNbBits {
continue
}
currentNbBits = huffNode[pos].nbBits // < maxNbBits
currentNbBits = huffNode[pos].nbBits() // < maxNbBits
rankLast[maxNbBits-currentNbBits] = uint32(pos)
}
}
@ -675,8 +676,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
if lowPos == noSymbol {
break
}
highTotal := huffNode[highPos].count
lowTotal := 2 * huffNode[lowPos].count
highTotal := huffNode[highPos].count()
lowTotal := 2 * huffNode[lowPos].count()
if highTotal <= lowTotal {
break
}
@ -692,13 +693,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
// this rank is no longer empty
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
}
huffNode[rankLast[nBitsToDecrease]].nbBits++
huffNode[rankLast[nBitsToDecrease]].setNbBits(1 +
huffNode[rankLast[nBitsToDecrease]].nbBits())
if rankLast[nBitsToDecrease] == 0 {
/* special case, reached largest symbol */
rankLast[nBitsToDecrease] = noSymbol
} else {
rankLast[nBitsToDecrease]--
if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease {
if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease {
rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
}
}
@ -706,15 +708,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
for totalCost < 0 { /* Sometimes, cost correction overshoot */
if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
for huffNode[n].nbBits == maxNbBits {
for huffNode[n].nbBits() == maxNbBits {
n--
}
huffNode[n+1].nbBits--
huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1)
rankLast[1] = n + 1
totalCost++
continue
}
huffNode[rankLast[1]+1].nbBits--
huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1)
rankLast[1]++
totalCost++
}
@ -722,9 +724,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
return maxNbBits
}
type nodeElt struct {
count uint32
parent uint16
symbol byte
nbBits uint8
// A nodeElt is the fields
//
// count uint32
// parent uint16
// symbol byte
// nbBits uint8
//
// in some order, all squashed into an integer so that the compiler
// always loads and stores entire nodeElts instead of separate fields.
type nodeElt uint64
func makeNodeElt(count uint32, symbol byte) nodeElt {
return nodeElt(count) | nodeElt(symbol)<<48
}
func (e *nodeElt) count() uint32 { return uint32(*e) }
func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) }
func (e *nodeElt) symbol() byte { return byte(*e >> 48) }
func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) }
func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) }
func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 }
func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 }

View file

@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
b, err := fse.Decompress(in[:iSize], s.fse)
s.fse.Out = nil
if err != nil {
return s, nil, err
return s, nil, fmt.Errorf("fse decompress returned: %w", err)
}
if len(b) > 255 {
return s, nil, errors.New("corrupt input: output table too large")

View file

@ -4,360 +4,349 @@
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
TEXT ·decompress4x_main_loop_amd64(SB), $0-8
XORQ DX, DX
// Preload values
MOVQ ctx+0(FP), AX
MOVBQZX 8(AX), DI
MOVQ 16(AX), SI
MOVQ 48(AX), BX
MOVQ 24(AX), R9
MOVQ 32(AX), R10
MOVQ (AX), R11
MOVQ 16(AX), BX
MOVQ 48(AX), SI
MOVQ 24(AX), R8
MOVQ 32(AX), R9
MOVQ (AX), R10
// Main loop
main_loop:
MOVQ SI, R8
CMPQ R8, BX
XORL DX, DX
CMPQ BX, SI
SETGE DL
// br0.fillFast32()
MOVQ 32(R11), R12
MOVBQZX 40(R11), R13
CMPQ R13, $0x20
MOVQ 32(R10), R11
MOVBQZX 40(R10), R12
CMPQ R12, $0x20
JBE skip_fill0
MOVQ 24(R11), AX
SUBQ $0x20, R13
MOVQ 24(R10), AX
SUBQ $0x20, R12
SUBQ $0x04, AX
MOVQ (R11), R14
MOVQ (R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (AX)(R14*1), R14
MOVQ R13, CX
SHLQ CL, R14
MOVQ AX, 24(R11)
ORQ R14, R12
MOVL (AX)(R13*1), R13
MOVQ R12, CX
SHLQ CL, R13
MOVQ AX, 24(R10)
ORQ R13, R11
// exhausted = exhausted || (br0.off < 4)
CMPQ AX, $0x04
SETLT AL
ORB AL, DL
// exhausted += (br0.off < 4)
CMPQ AX, $0x04
ADCB $+0, DL
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v0 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br0.advance(uint8(v0.entry)
MOVB CH, AL
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// val1 := br0.peekTopBits(peekBits)
MOVQ DI, CX
MOVQ R12, R14
SHRQ CL, R14
MOVQ R11, R13
SHRQ CL, R13
// v1 := table[val1&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br0.advance(uint8(v1.entry))
MOVB CH, AH
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
MOVW AX, (R8)
MOVW AX, (BX)
// update the bitreader structure
MOVQ R12, 32(R11)
MOVB R13, 40(R11)
ADDQ R9, R8
MOVQ R11, 32(R10)
MOVB R12, 40(R10)
// br1.fillFast32()
MOVQ 80(R11), R12
MOVBQZX 88(R11), R13
CMPQ R13, $0x20
MOVQ 80(R10), R11
MOVBQZX 88(R10), R12
CMPQ R12, $0x20
JBE skip_fill1
MOVQ 72(R11), AX
SUBQ $0x20, R13
MOVQ 72(R10), AX
SUBQ $0x20, R12
SUBQ $0x04, AX
MOVQ 48(R11), R14
MOVQ 48(R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (AX)(R14*1), R14
MOVQ R13, CX
SHLQ CL, R14
MOVQ AX, 72(R11)
ORQ R14, R12
MOVL (AX)(R13*1), R13
MOVQ R12, CX
SHLQ CL, R13
MOVQ AX, 72(R10)
ORQ R13, R11
// exhausted = exhausted || (br1.off < 4)
CMPQ AX, $0x04
SETLT AL
ORB AL, DL
// exhausted += (br1.off < 4)
CMPQ AX, $0x04
ADCB $+0, DL
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v0 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br1.advance(uint8(v0.entry)
MOVB CH, AL
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// val1 := br1.peekTopBits(peekBits)
MOVQ DI, CX
MOVQ R12, R14
SHRQ CL, R14
MOVQ R11, R13
SHRQ CL, R13
// v1 := table[val1&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br1.advance(uint8(v1.entry))
MOVB CH, AH
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
MOVW AX, (R8)
MOVW AX, (BX)(R8*1)
// update the bitreader structure
MOVQ R12, 80(R11)
MOVB R13, 88(R11)
ADDQ R9, R8
MOVQ R11, 80(R10)
MOVB R12, 88(R10)
// br2.fillFast32()
MOVQ 128(R11), R12
MOVBQZX 136(R11), R13
CMPQ R13, $0x20
MOVQ 128(R10), R11
MOVBQZX 136(R10), R12
CMPQ R12, $0x20
JBE skip_fill2
MOVQ 120(R11), AX
SUBQ $0x20, R13
MOVQ 120(R10), AX
SUBQ $0x20, R12
SUBQ $0x04, AX
MOVQ 96(R11), R14
MOVQ 96(R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (AX)(R14*1), R14
MOVQ R13, CX
SHLQ CL, R14
MOVQ AX, 120(R11)
ORQ R14, R12
MOVL (AX)(R13*1), R13
MOVQ R12, CX
SHLQ CL, R13
MOVQ AX, 120(R10)
ORQ R13, R11
// exhausted = exhausted || (br2.off < 4)
CMPQ AX, $0x04
SETLT AL
ORB AL, DL
// exhausted += (br2.off < 4)
CMPQ AX, $0x04
ADCB $+0, DL
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v0 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br2.advance(uint8(v0.entry)
MOVB CH, AL
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// val1 := br2.peekTopBits(peekBits)
MOVQ DI, CX
MOVQ R12, R14
SHRQ CL, R14
MOVQ R11, R13
SHRQ CL, R13
// v1 := table[val1&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br2.advance(uint8(v1.entry))
MOVB CH, AH
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
MOVW AX, (R8)
MOVW AX, (BX)(R8*2)
// update the bitreader structure
MOVQ R12, 128(R11)
MOVB R13, 136(R11)
ADDQ R9, R8
MOVQ R11, 128(R10)
MOVB R12, 136(R10)
// br3.fillFast32()
MOVQ 176(R11), R12
MOVBQZX 184(R11), R13
CMPQ R13, $0x20
MOVQ 176(R10), R11
MOVBQZX 184(R10), R12
CMPQ R12, $0x20
JBE skip_fill3
MOVQ 168(R11), AX
SUBQ $0x20, R13
MOVQ 168(R10), AX
SUBQ $0x20, R12
SUBQ $0x04, AX
MOVQ 144(R11), R14
MOVQ 144(R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (AX)(R14*1), R14
MOVQ R13, CX
SHLQ CL, R14
MOVQ AX, 168(R11)
ORQ R14, R12
MOVL (AX)(R13*1), R13
MOVQ R12, CX
SHLQ CL, R13
MOVQ AX, 168(R10)
ORQ R13, R11
// exhausted = exhausted || (br3.off < 4)
CMPQ AX, $0x04
SETLT AL
ORB AL, DL
// exhausted += (br3.off < 4)
CMPQ AX, $0x04
ADCB $+0, DL
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v0 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br3.advance(uint8(v0.entry)
MOVB CH, AL
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// val1 := br3.peekTopBits(peekBits)
MOVQ DI, CX
MOVQ R12, R14
SHRQ CL, R14
MOVQ R11, R13
SHRQ CL, R13
// v1 := table[val1&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br3.advance(uint8(v1.entry))
MOVB CH, AH
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
MOVW AX, (R8)
LEAQ (R8)(R8*2), CX
MOVW AX, (BX)(CX*1)
// update the bitreader structure
MOVQ R12, 176(R11)
MOVB R13, 184(R11)
ADDQ $0x02, SI
MOVQ R11, 176(R10)
MOVB R12, 184(R10)
ADDQ $0x02, BX
TESTB DL, DL
JZ main_loop
MOVQ ctx+0(FP), AX
SUBQ 16(AX), SI
SHLQ $0x02, SI
MOVQ SI, 40(AX)
SUBQ 16(AX), BX
SHLQ $0x02, BX
MOVQ BX, 40(AX)
RET
// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
XORQ DX, DX
// Preload values
MOVQ ctx+0(FP), CX
MOVBQZX 8(CX), DI
MOVQ 16(CX), BX
MOVQ 48(CX), SI
MOVQ 24(CX), R9
MOVQ 32(CX), R10
MOVQ (CX), R11
MOVQ 24(CX), R8
MOVQ 32(CX), R9
MOVQ (CX), R10
// Main loop
main_loop:
MOVQ BX, R8
CMPQ R8, SI
XORL DX, DX
CMPQ BX, SI
SETGE DL
// br0.fillFast32()
MOVQ 32(R11), R12
MOVBQZX 40(R11), R13
CMPQ R13, $0x20
MOVQ 32(R10), R11
MOVBQZX 40(R10), R12
CMPQ R12, $0x20
JBE skip_fill0
MOVQ 24(R11), R14
SUBQ $0x20, R13
SUBQ $0x04, R14
MOVQ (R11), R15
MOVQ 24(R10), R13
SUBQ $0x20, R12
SUBQ $0x04, R13
MOVQ (R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (R14)(R15*1), R15
MOVQ R13, CX
SHLQ CL, R15
MOVQ R14, 24(R11)
ORQ R15, R12
MOVL (R13)(R14*1), R14
MOVQ R12, CX
SHLQ CL, R14
MOVQ R13, 24(R10)
ORQ R14, R11
// exhausted = exhausted || (br0.off < 4)
CMPQ R14, $0x04
SETLT AL
ORB AL, DL
// exhausted += (br0.off < 4)
CMPQ R13, $0x04
ADCB $+0, DL
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v0 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br0.advance(uint8(v0.entry)
MOVB CH, AL
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// val1 := br0.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v1 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br0.advance(uint8(v1.entry)
MOVB CH, AH
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
BSWAPL AX
// val2 := br0.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v2 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br0.advance(uint8(v2.entry)
MOVB CH, AH
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// val3 := br0.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v3 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br0.advance(uint8(v3.entry)
MOVB CH, AL
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
BSWAPL AX
// these four writes get coalesced
@ -365,88 +354,86 @@ skip_fill0:
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
MOVL AX, (R8)
MOVL AX, (BX)
// update the bitreader structure
MOVQ R12, 32(R11)
MOVB R13, 40(R11)
ADDQ R9, R8
MOVQ R11, 32(R10)
MOVB R12, 40(R10)
// br1.fillFast32()
MOVQ 80(R11), R12
MOVBQZX 88(R11), R13
CMPQ R13, $0x20
MOVQ 80(R10), R11
MOVBQZX 88(R10), R12
CMPQ R12, $0x20
JBE skip_fill1
MOVQ 72(R11), R14
SUBQ $0x20, R13
SUBQ $0x04, R14
MOVQ 48(R11), R15
MOVQ 72(R10), R13
SUBQ $0x20, R12
SUBQ $0x04, R13
MOVQ 48(R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (R14)(R15*1), R15
MOVQ R13, CX
SHLQ CL, R15
MOVQ R14, 72(R11)
ORQ R15, R12
MOVL (R13)(R14*1), R14
MOVQ R12, CX
SHLQ CL, R14
MOVQ R13, 72(R10)
ORQ R14, R11
// exhausted = exhausted || (br1.off < 4)
CMPQ R14, $0x04
SETLT AL
ORB AL, DL
// exhausted += (br1.off < 4)
CMPQ R13, $0x04
ADCB $+0, DL
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v0 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br1.advance(uint8(v0.entry)
MOVB CH, AL
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// val1 := br1.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v1 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br1.advance(uint8(v1.entry)
MOVB CH, AH
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
BSWAPL AX
// val2 := br1.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v2 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br1.advance(uint8(v2.entry)
MOVB CH, AH
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// val3 := br1.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v3 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br1.advance(uint8(v3.entry)
MOVB CH, AL
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
BSWAPL AX
// these four writes get coalesced
@ -454,88 +441,86 @@ skip_fill1:
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
MOVL AX, (R8)
MOVL AX, (BX)(R8*1)
// update the bitreader structure
MOVQ R12, 80(R11)
MOVB R13, 88(R11)
ADDQ R9, R8
MOVQ R11, 80(R10)
MOVB R12, 88(R10)
// br2.fillFast32()
MOVQ 128(R11), R12
MOVBQZX 136(R11), R13
CMPQ R13, $0x20
MOVQ 128(R10), R11
MOVBQZX 136(R10), R12
CMPQ R12, $0x20
JBE skip_fill2
MOVQ 120(R11), R14
SUBQ $0x20, R13
SUBQ $0x04, R14
MOVQ 96(R11), R15
MOVQ 120(R10), R13
SUBQ $0x20, R12
SUBQ $0x04, R13
MOVQ 96(R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (R14)(R15*1), R15
MOVQ R13, CX
SHLQ CL, R15
MOVQ R14, 120(R11)
ORQ R15, R12
MOVL (R13)(R14*1), R14
MOVQ R12, CX
SHLQ CL, R14
MOVQ R13, 120(R10)
ORQ R14, R11
// exhausted = exhausted || (br2.off < 4)
CMPQ R14, $0x04
SETLT AL
ORB AL, DL
// exhausted += (br2.off < 4)
CMPQ R13, $0x04
ADCB $+0, DL
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v0 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br2.advance(uint8(v0.entry)
MOVB CH, AL
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// val1 := br2.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v1 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br2.advance(uint8(v1.entry)
MOVB CH, AH
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
BSWAPL AX
// val2 := br2.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v2 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br2.advance(uint8(v2.entry)
MOVB CH, AH
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// val3 := br2.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v3 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br2.advance(uint8(v3.entry)
MOVB CH, AL
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
BSWAPL AX
// these four writes get coalesced
@ -543,88 +528,86 @@ skip_fill2:
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
MOVL AX, (R8)
MOVL AX, (BX)(R8*2)
// update the bitreader structure
MOVQ R12, 128(R11)
MOVB R13, 136(R11)
ADDQ R9, R8
MOVQ R11, 128(R10)
MOVB R12, 136(R10)
// br3.fillFast32()
MOVQ 176(R11), R12
MOVBQZX 184(R11), R13
CMPQ R13, $0x20
MOVQ 176(R10), R11
MOVBQZX 184(R10), R12
CMPQ R12, $0x20
JBE skip_fill3
MOVQ 168(R11), R14
SUBQ $0x20, R13
SUBQ $0x04, R14
MOVQ 144(R11), R15
MOVQ 168(R10), R13
SUBQ $0x20, R12
SUBQ $0x04, R13
MOVQ 144(R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (R14)(R15*1), R15
MOVQ R13, CX
SHLQ CL, R15
MOVQ R14, 168(R11)
ORQ R15, R12
MOVL (R13)(R14*1), R14
MOVQ R12, CX
SHLQ CL, R14
MOVQ R13, 168(R10)
ORQ R14, R11
// exhausted = exhausted || (br3.off < 4)
CMPQ R14, $0x04
SETLT AL
ORB AL, DL
// exhausted += (br3.off < 4)
CMPQ R13, $0x04
ADCB $+0, DL
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v0 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br3.advance(uint8(v0.entry)
MOVB CH, AL
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// val1 := br3.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v1 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br3.advance(uint8(v1.entry)
MOVB CH, AH
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
BSWAPL AX
// val2 := br3.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v2 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br3.advance(uint8(v2.entry)
MOVB CH, AH
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
// val3 := br3.peekTopBits(peekBits)
MOVQ R12, R14
MOVQ R11, R13
MOVQ DI, CX
SHRQ CL, R14
SHRQ CL, R13
// v3 := table[val0&mask]
MOVW (R10)(R14*2), CX
MOVW (R9)(R13*2), CX
// br3.advance(uint8(v3.entry)
MOVB CH, AL
SHLQ CL, R12
ADDB CL, R13
SHLQ CL, R11
ADDB CL, R12
BSWAPL AX
// these four writes get coalesced
@ -632,11 +615,12 @@ skip_fill3:
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
MOVL AX, (R8)
LEAQ (R8)(R8*2), CX
MOVL AX, (BX)(CX*1)
// update the bitreader structure
MOVQ R12, 176(R11)
MOVB R13, 184(R11)
MOVQ R11, 176(R10)
MOVB R12, 184(R10)
ADDQ $0x04, BX
TESTB DL, DL
JZ main_loop
@ -652,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8
MOVQ 16(CX), DX
MOVQ 24(CX), BX
CMPQ BX, $0x04
JB error_max_decoded_size_exeeded
JB error_max_decoded_size_exceeded
LEAQ (DX)(BX*1), BX
MOVQ (CX), SI
MOVQ (SI), R8
@ -667,7 +651,7 @@ main_loop:
// Check if we have room for 4 bytes in the output buffer
LEAQ 4(DX), CX
CMPQ CX, BX
JGE error_max_decoded_size_exeeded
JGE error_max_decoded_size_exceeded
// Decode 4 values
CMPQ R11, $0x20
@ -744,7 +728,7 @@ loop_condition:
RET
// Report error
error_max_decoded_size_exeeded:
error_max_decoded_size_exceeded:
MOVQ ctx+0(FP), AX
MOVQ $-1, CX
MOVQ CX, 40(AX)
@ -757,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8
MOVQ 16(CX), DX
MOVQ 24(CX), BX
CMPQ BX, $0x04
JB error_max_decoded_size_exeeded
JB error_max_decoded_size_exceeded
LEAQ (DX)(BX*1), BX
MOVQ (CX), SI
MOVQ (SI), R8
@ -772,7 +756,7 @@ main_loop:
// Check if we have room for 4 bytes in the output buffer
LEAQ 4(DX), CX
CMPQ CX, BX
JGE error_max_decoded_size_exeeded
JGE error_max_decoded_size_exceeded
// Decode 4 values
CMPQ R11, $0x20
@ -839,7 +823,7 @@ loop_condition:
RET
// Report error
error_max_decoded_size_exeeded:
error_max_decoded_size_exceeded:
MOVQ ctx+0(FP), AX
MOVQ $-1, CX
MOVQ CX, 40(AX)

View file

@ -103,6 +103,28 @@ func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}
// EncodeBlockInto exposes encodeBlock but checks dst size.
func EncodeBlockInto(dst, src []byte) (d int) {
if MaxEncodedLen(len(src)) > len(dst) {
return 0
}
// encodeBlock breaks on too big blocks, so split.
for len(src) > 0 {
p := src
src = nil
if len(p) > maxBlockSize {
p, src = p[:maxBlockSize], p[maxBlockSize:]
}
if len(p) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], p)
} else {
d += encodeBlock(dst[d:], p)
}
}
return d
}
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.

View file

@ -9,6 +9,7 @@ import (
"encoding/binary"
"errors"
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
@ -82,8 +83,9 @@ type blockDec struct {
err error
// Check against this crc
checkCRC []byte
// Check against this crc, if hasCRC is true.
checkCRC uint32
hasCRC bool
// Frame to use for singlethreaded decoding.
// Should not be used by the decoder itself since parent may be another frame.
@ -191,16 +193,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
// Read block data.
if cap(b.dataStorage) < cSize {
if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize {
// byteBuf doesn't need a destination buffer.
if b.lowMem || cSize > maxCompressedBlockSize {
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
} else {
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
}
}
if cap(b.dst) <= maxSize {
b.dst = make([]byte, 0, maxSize+1)
}
b.data, err = br.readBig(cSize, b.dataStorage)
if err != nil {
if debugDecoder {
@ -209,6 +209,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
return err
}
if cap(b.dst) <= maxSize {
b.dst = make([]byte, 0, maxSize+1)
}
return nil
}
@ -440,6 +443,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
}
}
var err error
if debugDecoder {
println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
}
huff, literals, err = huff0.ReadTable(literals, huff)
if err != nil {
println("reading huffman table:", err)

View file

@ -54,7 +54,7 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
func (b *byteBuf) readByte() (byte, error) {
bb := *b
if len(bb) < 1 {
return 0, nil
return 0, io.ErrUnexpectedEOF
}
r := bb[0]
*b = bb[1:]

View file

@ -4,7 +4,6 @@
package zstd
import (
"bytes"
"encoding/binary"
"errors"
"io"
@ -102,8 +101,8 @@ func (h *Header) Decode(in []byte) error {
}
h.HeaderSize += 4
b, in := in[:4], in[4:]
if !bytes.Equal(b, frameMagic) {
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
if string(b) != frameMagic {
if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
return ErrMagicMismatch
}
if len(in) < 4 {
@ -153,7 +152,7 @@ func (h *Header) Decode(in []byte) error {
}
b, in = in[:size], in[size:]
h.HeaderSize += int(size)
switch size {
switch len(b) {
case 1:
h.DictionaryID = uint32(b[0])
case 2:
@ -183,7 +182,7 @@ func (h *Header) Decode(in []byte) error {
}
b, in = in[:fcsSize], in[fcsSize:]
h.HeaderSize += int(fcsSize)
switch fcsSize {
switch len(b) {
case 1:
h.FrameContentSize = uint64(b[0])
case 2:

View file

@ -5,7 +5,6 @@
package zstd
import (
"bytes"
"context"
"encoding/binary"
"io"
@ -41,8 +40,7 @@ type Decoder struct {
frame *frameDec
// Custom dictionaries.
// Always uses copies.
dicts map[uint32]dict
dicts map[uint32]*dict
// streamWg is the waitgroup for all streams
streamWg sync.WaitGroup
@ -104,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
}
// Transfer option dicts.
d.dicts = make(map[uint32]dict, len(d.o.dicts))
d.dicts = make(map[uint32]*dict, len(d.o.dicts))
for _, dc := range d.o.dicts {
d.dicts[dc.id] = dc
}
@ -342,15 +340,8 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
}
return dst, err
}
if frame.DictionaryID != nil {
dict, ok := d.dicts[*frame.DictionaryID]
if !ok {
return nil, ErrUnknownDictionary
}
if debugDecoder {
println("setting dict", frame.DictionaryID)
}
frame.history.setDict(&dict)
if err = d.setDict(frame); err != nil {
return nil, err
}
if frame.WindowSize > d.o.maxWindowSize {
if debugDecoder {
@ -459,7 +450,11 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
}
if !d.o.ignoreChecksum && len(next.b) > 0 {
if d.o.ignoreChecksum {
return true
}
if len(next.b) > 0 {
n, err := d.current.crc.Write(next.b)
if err == nil {
if n != len(next.b) {
@ -467,18 +462,16 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
}
}
}
if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
got := d.current.crc.Sum64()
var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
if next.err == nil && next.d != nil && next.d.hasCRC {
got := uint32(d.current.crc.Sum64())
if got != next.d.checkCRC {
if debugDecoder {
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
}
d.current.err = ErrCRCMismatch
} else {
if debugDecoder {
println("CRC ok", tmp[:])
printf("CRC ok %08x\n", got)
}
}
}
@ -494,18 +487,12 @@ func (d *Decoder) nextBlockSync() (ok bool) {
if !d.syncStream.inFrame {
d.frame.history.reset()
d.current.err = d.frame.reset(&d.syncStream.br)
if d.current.err == nil {
d.current.err = d.setDict(d.frame)
}
if d.current.err != nil {
return false
}
if d.frame.DictionaryID != nil {
dict, ok := d.dicts[*d.frame.DictionaryID]
if !ok {
d.current.err = ErrUnknownDictionary
return false
} else {
d.frame.history.setDict(&dict)
}
}
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
d.current.err = ErrDecoderSizeExceeded
return false
@ -864,13 +851,8 @@ decodeStream:
if debugDecoder && err != nil {
println("Frame decoder returned", err)
}
if err == nil && frame.DictionaryID != nil {
dict, ok := d.dicts[*frame.DictionaryID]
if !ok {
err = ErrUnknownDictionary
} else {
frame.history.setDict(&dict)
}
if err == nil {
err = d.setDict(frame)
}
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
if debugDecoder {
@ -918,18 +900,22 @@ decodeStream:
println("next block returned error:", err)
}
dec.err = err
dec.checkCRC = nil
dec.hasCRC = false
if dec.Last && frame.HasCheckSum && err == nil {
crc, err := frame.rawInput.readSmall(4)
if err != nil {
if len(crc) < 4 {
if err == nil {
err = io.ErrUnexpectedEOF
}
println("CRC missing?", err)
dec.err = err
}
var tmp [4]byte
copy(tmp[:], crc)
dec.checkCRC = tmp[:]
if debugDecoder {
println("found crc to check:", dec.checkCRC)
} else {
dec.checkCRC = binary.LittleEndian.Uint32(crc)
dec.hasCRC = true
if debugDecoder {
printf("found crc to check: %08x\n", dec.checkCRC)
}
}
}
err = dec.err
@ -948,3 +934,20 @@ decodeStream:
hist.reset()
d.frame.history.b = frameHistCache
}
func (d *Decoder) setDict(frame *frameDec) (err error) {
dict, ok := d.dicts[frame.DictionaryID]
if ok {
if debugDecoder {
println("setting dict", frame.DictionaryID)
}
frame.history.setDict(dict)
} else if frame.DictionaryID != 0 {
// A zero or missing dictionary id is ambiguous:
// either dictionary zero, or no dictionary. In particular,
// zstd --patch-from uses this id for the source file,
// so only return an error if the dictionary id is not zero.
err = ErrUnknownDictionary
}
return err
}

View file

@ -6,6 +6,8 @@ package zstd
import (
"errors"
"fmt"
"math/bits"
"runtime"
)
@ -18,7 +20,7 @@ type decoderOptions struct {
concurrent int
maxDecodedSize uint64
maxWindowSize uint64
dicts []dict
dicts []*dict
ignoreChecksum bool
limitToCap bool
decodeBufsBelow int
@ -85,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption {
}
// WithDecoderDicts allows to register one or more dictionaries for the decoder.
// If several dictionaries with the same ID is provided the last one will be used.
//
// Each slice in dict must be in the [dictionary format] produced by
// "zstd --train" from the Zstandard reference implementation.
//
// If several dictionaries with the same ID are provided, the last one will be used.
//
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithDecoderDicts(dicts ...[]byte) DOption {
return func(o *decoderOptions) error {
for _, b := range dicts {
@ -93,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
if err != nil {
return err
}
o.dicts = append(o.dicts, *d)
o.dicts = append(o.dicts, d)
}
return nil
}
}
// WithEncoderDictRaw registers a dictionary that may be used by the decoder.
// The slice content can be arbitrary data.
func WithDecoderDictRaw(id uint32, content []byte) DOption {
return func(o *decoderOptions) error {
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
}
o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}})
return nil
}
}
// WithDecoderMaxWindow allows to set a maximum window size for decodes.
// This allows rejecting packets that will cause big memory usage.
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.

View file

@ -1,7 +1,6 @@
package zstd
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
@ -20,7 +19,10 @@ type dict struct {
content []byte
}
var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
const dictMagic = "\x37\xa4\x30\xec"
// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB.
const dictMaxLength = 1 << 31
// ID returns the dictionary id or 0 if d is nil.
func (d *dict) ID() uint32 {
@ -30,14 +32,38 @@ func (d *dict) ID() uint32 {
return d.id
}
// DictContentSize returns the dictionary content size or 0 if d is nil.
func (d *dict) DictContentSize() int {
// ContentSize returns the dictionary content size or 0 if d is nil.
func (d *dict) ContentSize() int {
if d == nil {
return 0
}
return len(d.content)
}
// Content returns the dictionary content.
func (d *dict) Content() []byte {
if d == nil {
return nil
}
return d.content
}
// Offsets returns the initial offsets.
func (d *dict) Offsets() [3]int {
if d == nil {
return [3]int{}
}
return d.offsets
}
// LitEncoder returns the literal encoder.
func (d *dict) LitEncoder() *huff0.Scratch {
if d == nil {
return nil
}
return d.litEnc
}
// Load a dictionary as described in
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
func loadDict(b []byte) (*dict, error) {
@ -50,7 +76,7 @@ func loadDict(b []byte) (*dict, error) {
ofDec: sequenceDec{fse: &fseDecoder{}},
mlDec: sequenceDec{fse: &fseDecoder{}},
}
if !bytes.Equal(b[:4], dictMagic[:]) {
if string(b[:4]) != dictMagic {
return nil, ErrMagicMismatch
}
d.id = binary.LittleEndian.Uint32(b[4:8])
@ -62,7 +88,7 @@ func loadDict(b []byte) (*dict, error) {
var err error
d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
if err != nil {
return nil, err
return nil, fmt.Errorf("loading literal table: %w", err)
}
d.litEnc.Reuse = huff0.ReusePolicyMust
@ -120,3 +146,16 @@ func loadDict(b []byte) (*dict, error) {
return &d, nil
}
// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
func InspectDictionary(b []byte) (interface {
ID() uint32
ContentSize() int
Content() []byte
Offsets() [3]int
LitEncoder() *huff0.Scratch
}, error) {
initPredefined()
d, err := loadDict(b)
return d, err
}

View file

@ -16,6 +16,7 @@ type fastBase struct {
cur int32
// maximum offset. Should be at least 2x block size.
maxMatchOff int32
bufferReset int32
hist []byte
crc *xxhash.Digest
tmp [8]byte
@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc {
}
func (e *fastBase) addBlock(src []byte) int32 {
if debugAsserts && e.cur > bufferReset {
panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
if debugAsserts && e.cur > e.bufferReset {
panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
}
// check if we have space already
if len(e.hist)+len(src) > cap(e.hist) {
@ -126,24 +127,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
}
}
a := src[s:]
b := src[t:]
b = b[:len(a)]
end := int32((len(a) >> 3) << 3)
for i := int32(0); i < end; i += 8 {
if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
return i + int32(bits.TrailingZeros64(diff)>>3)
}
}
a = a[end:]
b = b[end:]
for i := range a {
if a[i] != b[i] {
return int32(i) + end
}
}
return int32(len(a)) + end
return int32(matchLen(src[s:], src[t:]))
}
// Reset the encoding table.
@ -165,13 +149,13 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
if singleBlock {
e.lowMem = true
}
e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
e.lowMem = low
}
// We offset current position so everything will be out of reach.
// If above reset line, history will be purged.
if e.cur < bufferReset {
if e.cur < e.bufferReset {
e.cur += e.maxMatchOff + int32(len(e.hist))
}
e.hist = e.hist[:0]

View file

@ -32,7 +32,6 @@ type match struct {
length int32
rep int32
est int32
_ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
}
const highScore = 25000
@ -85,14 +84,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = prevEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = prevEntry{}
}
e.table = [bestShortTableSize]prevEntry{}
e.longTable = [bestLongTableSize]prevEntry{}
e.cur = e.maxMatchOff
break
}
@ -193,12 +188,6 @@ encodeLoop:
panic("offset0 was 0")
}
bestOf := func(a, b match) match {
if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
return a
}
return b
}
const goodEnough = 100
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
@ -206,36 +195,41 @@ encodeLoop:
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
matchAt := func(offset int32, s int32, first uint32, rep int32) match {
// Set m to a match at offset if it looks like that will improve compression.
improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
return match{s: s, est: highScore}
return
}
if debugAsserts {
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
}
}
m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
m.estBits(bitsPerByte)
return m
cand := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
cand.estBits(bitsPerByte)
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
*m = cand
}
}
best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
best := match{s: s, est: highScore}
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
if canRepeat && best.length < goodEnough {
cv32 := uint32(cv >> 8)
spp := s + 1
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
improve(&best, spp-offset1, spp, cv32, 1)
improve(&best, spp-offset2, spp, cv32, 2)
improve(&best, spp-offset3, spp, cv32, 3)
if best.length > 0 {
cv32 = uint32(cv >> 24)
spp += 2
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
improve(&best, spp-offset1, spp, cv32, 1)
improve(&best, spp-offset2, spp, cv32, 2)
improve(&best, spp-offset3, spp, cv32, 3)
}
}
// Load next and check...
@ -262,28 +256,30 @@ encodeLoop:
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
// Long at s+1, s+2
best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
improve(&best, candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
improve(&best, candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
if false {
// Short at s+3.
// Too often worse...
best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
}
// See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL]
if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 {
bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1))
// Start check at a fixed offset to allow for a few mismatches.
// For this compression level 2 yields the best results.
const skipBeginning = 2
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
}
best = bestEnd
}
}
}

View file

@ -62,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = prevEntry{}
}
e.table = [betterShortTableSize]tableEntry{}
e.longTable = [betterLongTableSize]prevEntry{}
e.cur = e.maxMatchOff
break
}
@ -587,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}

View file

@ -44,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = tableEntry{}
}
e.table = [dFastShortTableSize]tableEntry{}
e.longTable = [dFastLongTableSize]tableEntry{}
e.cur = e.maxMatchOff
break
}
@ -388,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
if e.cur >= bufferReset {
if e.cur >= e.bufferReset {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
@ -685,7 +681,7 @@ encodeLoop:
}
// We do not store history, so we must offset e.cur to avoid false matches for next user.
if e.cur < bufferReset {
if e.cur < e.bufferReset {
e.cur += int32(len(src))
}
}
@ -700,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}

View file

@ -43,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
@ -310,7 +310,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
}
// Protect against e.cur wraparound.
if e.cur >= bufferReset {
if e.cur >= e.bufferReset {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
@ -538,7 +538,7 @@ encodeLoop:
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
// We do not store history, so we must offset e.cur to avoid false matches for next user.
if e.cur < bufferReset {
if e.cur < e.bufferReset {
e.cur += int32(len(src))
}
}
@ -555,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
return
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
e.table = [tableSize]tableEntry{}
e.cur = e.maxMatchOff
break
}

View file

@ -8,6 +8,7 @@ import (
"crypto/rand"
"fmt"
"io"
"math"
rdebug "runtime/debug"
"sync"
@ -639,3 +640,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
return dst
}
// MaxEncodedSize returns the expected maximum
// size of an encoded block or stream.
func (e *Encoder) MaxEncodedSize(size int) int {
frameHeader := 4 + 2 // magic + frame header & window descriptor
if e.o.dict != nil {
frameHeader += 4
}
// Frame content size:
if size < 256 {
frameHeader++
} else if size < 65536+256 {
frameHeader += 2
} else if size < math.MaxInt32 {
frameHeader += 4
} else {
frameHeader += 8
}
// Final crc
if e.o.crc {
frameHeader += 4
}
// Max overhead is 3 bytes/block.
// There cannot be 0 blocks.
blocks := (size + e.o.blockSize) / e.o.blockSize
// Combine, add padding.
maxSz := frameHeader + 3*blocks + size
if e.o.pad > 1 {
maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
}
return maxSz
}

View file

@ -3,6 +3,8 @@ package zstd
import (
"errors"
"fmt"
"math"
"math/bits"
"runtime"
"strings"
)
@ -47,22 +49,22 @@ func (o encoderOptions) encoder() encoder {
switch o.level {
case SpeedFastest:
if o.dict != nil {
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
}
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedDefault:
if o.dict != nil {
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
}
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
case SpeedBetterCompression:
if o.dict != nil {
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
}
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedBestCompression:
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
}
panic("unknown compression level")
}
@ -304,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption {
}
// WithEncoderDict allows to register a dictionary that will be used for the encode.
//
// The slice dict must be in the [dictionary format] produced by
// "zstd --train" from the Zstandard reference implementation.
//
// The encoder *may* choose to use no dictionary instead for certain payloads.
//
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithEncoderDict(dict []byte) EOption {
return func(o *encoderOptions) error {
d, err := loadDict(dict)
@ -315,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption {
return nil
}
}
// WithEncoderDictRaw registers a dictionary that may be used by the encoder.
//
// The slice content may contain arbitrary data. It will be used as an initial
// history.
func WithEncoderDictRaw(id uint32, content []byte) EOption {
return func(o *encoderOptions) error {
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
}
o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
return nil
}
}

View file

@ -5,7 +5,7 @@
package zstd
import (
"bytes"
"encoding/binary"
"encoding/hex"
"errors"
"io"
@ -29,7 +29,7 @@ type frameDec struct {
FrameContentSize uint64
DictionaryID *uint32
DictionaryID uint32
HasCheckSum bool
SingleSegment bool
}
@ -43,9 +43,9 @@ const (
MaxWindowSize = 1 << 29
)
var (
frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
skippableFrameMagic = []byte{0x2a, 0x4d, 0x18}
const (
frameMagic = "\x28\xb5\x2f\xfd"
skippableFrameMagic = "\x2a\x4d\x18"
)
func newFrameDec(o decoderOptions) *frameDec {
@ -89,9 +89,9 @@ func (d *frameDec) reset(br byteBuffer) error {
copy(signature[1:], b)
}
if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 {
if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
if debugDecoder {
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic))
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
}
// Break if not skippable frame.
break
@ -114,9 +114,9 @@ func (d *frameDec) reset(br byteBuffer) error {
return err
}
}
if !bytes.Equal(signature[:], frameMagic) {
if string(signature[:]) != frameMagic {
if debugDecoder {
println("Got magic numbers: ", signature, "want:", frameMagic)
println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
}
return ErrMagicMismatch
}
@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error {
// Read Dictionary_ID
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
d.DictionaryID = nil
d.DictionaryID = 0
if size := fhd & 3; size != 0 {
if size == 3 {
size = 4
@ -167,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error {
return err
}
var id uint32
switch size {
switch len(b) {
case 1:
id = uint32(b[0])
case 2:
@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error {
if debugDecoder {
println("Dict size", size, "ID:", id)
}
if id > 0 {
// ID 0 means "sorry, no dictionary anyway".
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
d.DictionaryID = &id
}
d.DictionaryID = id
}
// Read Frame_Content_Size
@ -204,7 +200,7 @@ func (d *frameDec) reset(br byteBuffer) error {
println("Reading Frame content", err)
return err
}
switch fcsSize {
switch len(b) {
case 1:
d.FrameContentSize = uint64(b[0])
case 2:
@ -305,7 +301,7 @@ func (d *frameDec) checkCRC() error {
}
// We can overwrite upper tmp now
want, err := d.rawInput.readSmall(4)
buf, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
return err
@ -315,22 +311,17 @@ func (d *frameDec) checkCRC() error {
return nil
}
var tmp [4]byte
got := d.crc.Sum64()
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
want := binary.LittleEndian.Uint32(buf[:4])
got := uint32(d.crc.Sum64())
if !bytes.Equal(tmp[:], want) {
if got != want {
if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want)
printf("CRC check failed: got %08x, want %08x\n", got, want)
}
return ErrCRCMismatch
}
if debugDecoder {
println("CRC ok", tmp[:])
printf("CRC ok %08x\n", got)
}
return nil
}

View file

@ -2,12 +2,7 @@
VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
@ -28,31 +23,49 @@ func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
This implementation provides a fast pure-Go implementation and an even faster
assembly implementation for amd64.
The package is written with optimized pure Go and also contains even faster
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
opts into using the Go code even on those architectures.
[xxHash]: http://cyan4973.github.io/xxHash/
## Compatibility
This package is in a module and the latest code is in version 2 of the module.
You need a version of Go with at least "minimal module compatibility" to use
github.com/cespare/xxhash/v2:
* 1.9.7+ for Go 1.9
* 1.10.3+ for Go 1.10
* Go 1.11 or later
I recommend using the latest release of Go.
## Benchmarks
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
| input size | purego | asm |
| --- | --- | --- |
| 5 B | 979.66 MB/s | 1291.17 MB/s |
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
| input size | purego | asm |
| ---------- | --------- | --------- |
| 4 B | 1.3 GB/s | 1.2 GB/s |
| 16 B | 2.9 GB/s | 3.5 GB/s |
| 100 B | 6.9 GB/s | 8.1 GB/s |
| 4 KB | 11.7 GB/s | 16.7 GB/s |
| 10 MB | 12.0 GB/s | 17.3 GB/s |
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
the following commands under Go 1.11.2:
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
CPU using the following commands under Go 1.19.2:
```
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
```
## Projects using this package
- [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus)
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)

View file

@ -18,19 +18,11 @@ const (
prime5 uint64 = 2870177450012600261
)
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
// possible in the Go code is worth a small (but measurable) performance boost
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
// convenience in the Go code in a few places where we need to intentionally
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
// result overflows a uint64).
var (
prime1v = prime1
prime2v = prime2
prime3v = prime3
prime4v = prime4
prime5v = prime5
)
// Store the primes in an array as well.
//
// The consts are used when possible in Go code to avoid MOVs but we need a
// contiguous array of the assembly code.
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
type Digest struct {
@ -52,10 +44,10 @@ func New() *Digest {
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
d.v1 = prime1v + prime2
d.v1 = primes[0] + prime2
d.v2 = prime2
d.v3 = 0
d.v4 = -prime1v
d.v4 = -primes[0]
d.total = 0
d.n = 0
}
@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
memleft := d.mem[d.n&(len(d.mem)-1):]
if d.n+n < 32 {
// This new data doesn't even fill the current block.
copy(d.mem[d.n:], b)
copy(memleft, b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
copy(d.mem[d.n:], b)
c := copy(memleft, b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
b = b[32-d.n:]
b = b[c:]
d.n = 0
}
@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 {
h += d.total
i, end := 0, d.n
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(d.mem[i:i+8]))
b := d.mem[:d.n&(len(d.mem)-1)]
for ; len(b) >= 8; b = b[8:] {
k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(d.mem[i:i+4])) * prime1
if len(b) >= 4 {
h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
b = b[4:]
}
for i < end {
h ^= uint64(d.mem[i]) * prime5
for ; len(b) > 0; b = b[1:] {
h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
i++
}
h ^= h >> 33

View file

@ -1,3 +1,4 @@
//go:build !appengine && gc && !purego && !noasm
// +build !appengine
// +build gc
// +build !purego
@ -5,212 +6,205 @@
#include "textflag.h"
// Register allocation:
// AX h
// SI pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
// R9 v2
// R10 v3
// R11 v4
// R12 tmp
// R13 prime1v
// R14 prime2v
// DI prime4v
// Registers:
#define h AX
#define d AX
#define p SI // pointer to advance through b
#define n DX
#define end BX // loop end
#define v1 R8
#define v2 R9
#define v3 R10
#define v4 R11
#define x R12
#define prime1 R13
#define prime2 R14
#define prime4 DI
// round reads from and advances the buffer pointer in SI.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
MOVQ (SI), R12 \
ADDQ $8, SI \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
#define round(acc, x) \
IMULQ prime2, x \
ADDQ x, acc \
ROLQ $31, acc \
IMULQ prime1, acc
// mergeRound applies a merge round on the two registers acc and val.
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
ADDQ DI, acc
// round0 performs the operation x = round(0, x).
#define round0(x) \
IMULQ prime2, x \
ROLQ $31, x \
IMULQ prime1, x
// mergeRound applies a merge round on the two registers acc and x.
// It assumes that prime1, prime2, and prime4 have been loaded.
#define mergeRound(acc, x) \
round0(x) \
XORQ x, acc \
IMULQ prime1, acc \
ADDQ prime4, acc
// blockLoop processes as many 32-byte blocks as possible,
// updating v1, v2, v3, and v4. It assumes that there is at least one block
// to process.
#define blockLoop() \
loop: \
MOVQ +0(p), x \
round(v1, x) \
MOVQ +8(p), x \
round(v2, x) \
MOVQ +16(p), x \
round(v3, x) \
MOVQ +24(p), x \
round(v4, x) \
ADDQ $32, p \
CMPQ p, end \
JLE loop
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
MOVQ ·prime4v(SB), DI
MOVQ ·primes+0(SB), prime1
MOVQ ·primes+8(SB), prime2
MOVQ ·primes+24(SB), prime4
// Load slice.
MOVQ b_base+0(FP), SI
MOVQ b_len+8(FP), DX
LEAQ (SI)(DX*1), BX
MOVQ b_base+0(FP), p
MOVQ b_len+8(FP), n
LEAQ (p)(n*1), end
// The first loop limit will be len(b)-32.
SUBQ $32, BX
SUBQ $32, end
// Check whether we have at least one block.
CMPQ DX, $32
CMPQ n, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
MOVQ R13, R8
ADDQ R14, R8
MOVQ R14, R9
XORQ R10, R10
XORQ R11, R11
SUBQ R13, R11
MOVQ prime1, v1
ADDQ prime2, v1
MOVQ prime2, v2
XORQ v3, v3
XORQ v4, v4
SUBQ prime1, v4
// Loop until SI > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
blockLoop()
CMPQ SI, BX
JLE blockLoop
MOVQ v1, h
ROLQ $1, h
MOVQ v2, x
ROLQ $7, x
ADDQ x, h
MOVQ v3, x
ROLQ $12, x
ADDQ x, h
MOVQ v4, x
ROLQ $18, x
ADDQ x, h
MOVQ R8, AX
ROLQ $1, AX
MOVQ R9, R12
ROLQ $7, R12
ADDQ R12, AX
MOVQ R10, R12
ROLQ $12, R12
ADDQ R12, AX
MOVQ R11, R12
ROLQ $18, R12
ADDQ R12, AX
mergeRound(AX, R8)
mergeRound(AX, R9)
mergeRound(AX, R10)
mergeRound(AX, R11)
mergeRound(h, v1)
mergeRound(h, v2)
mergeRound(h, v3)
mergeRound(h, v4)
JMP afterBlocks
noBlocks:
MOVQ ·prime5v(SB), AX
MOVQ ·primes+32(SB), h
afterBlocks:
ADDQ DX, AX
ADDQ n, h
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
ADDQ $24, BX
ADDQ $24, end
CMPQ p, end
JG try4
CMPQ SI, BX
JG fourByte
loop8:
MOVQ (p), x
ADDQ $8, p
round0(x)
XORQ x, h
ROLQ $27, h
IMULQ prime1, h
ADDQ prime4, h
wordLoop:
// Calculate k1.
MOVQ (SI), R8
ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
CMPQ p, end
JLE loop8
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
ADDQ DI, AX
try4:
ADDQ $4, end
CMPQ p, end
JG try1
CMPQ SI, BX
JLE wordLoop
MOVL (p), x
ADDQ $4, p
IMULQ prime1, x
XORQ x, h
fourByte:
ADDQ $4, BX
CMPQ SI, BX
JG singles
ROLQ $23, h
IMULQ prime2, h
ADDQ ·primes+16(SB), h
MOVL (SI), R8
ADDQ $4, SI
IMULQ R13, R8
XORQ R8, AX
ROLQ $23, AX
IMULQ R14, AX
ADDQ ·prime3v(SB), AX
singles:
ADDQ $4, BX
CMPQ SI, BX
try1:
ADDQ $4, end
CMPQ p, end
JGE finalize
singlesLoop:
MOVBQZX (SI), R12
ADDQ $1, SI
IMULQ ·prime5v(SB), R12
XORQ R12, AX
loop1:
MOVBQZX (p), x
ADDQ $1, p
IMULQ ·primes+32(SB), x
XORQ x, h
ROLQ $11, h
IMULQ prime1, h
ROLQ $11, AX
IMULQ R13, AX
CMPQ SI, BX
JL singlesLoop
CMPQ p, end
JL loop1
finalize:
MOVQ AX, R12
SHRQ $33, R12
XORQ R12, AX
IMULQ R14, AX
MOVQ AX, R12
SHRQ $29, R12
XORQ R12, AX
IMULQ ·prime3v(SB), AX
MOVQ AX, R12
SHRQ $32, R12
XORQ R12, AX
MOVQ h, x
SHRQ $33, x
XORQ x, h
IMULQ prime2, h
MOVQ h, x
SHRQ $29, x
XORQ x, h
IMULQ ·primes+16(SB), h
MOVQ h, x
SHRQ $32, x
XORQ x, h
MOVQ AX, ret+24(FP)
MOVQ h, ret+24(FP)
RET
// writeBlocks uses the same registers as above except that it uses AX to store
// the d pointer.
// func writeBlocks(d *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Load fixed primes needed for round.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
MOVQ ·primes+0(SB), prime1
MOVQ ·primes+8(SB), prime2
// Load slice.
MOVQ b_base+8(FP), SI
MOVQ b_len+16(FP), DX
LEAQ (SI)(DX*1), BX
SUBQ $32, BX
MOVQ b_base+8(FP), p
MOVQ b_len+16(FP), n
LEAQ (p)(n*1), end
SUBQ $32, end
// Load vN from d.
MOVQ d+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
MOVQ 24(AX), R11 // v4
MOVQ s+0(FP), d
MOVQ 0(d), v1
MOVQ 8(d), v2
MOVQ 16(d), v3
MOVQ 24(d), v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX
JLE blockLoop
blockLoop()
// Copy vN back to d.
MOVQ R8, 0(AX)
MOVQ R9, 8(AX)
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
MOVQ v1, 0(d)
MOVQ v2, 8(d)
MOVQ v3, 16(d)
MOVQ v4, 24(d)
// The number of bytes written is SI minus the old base pointer.
SUBQ b_base+8(FP), SI
MOVQ SI, ret+32(FP)
// The number of bytes written is p minus the old base pointer.
SUBQ b_base+8(FP), p
MOVQ p, ret+32(FP)
RET

View file

@ -1,13 +1,17 @@
// +build gc,!purego,!noasm
//go:build !appengine && gc && !purego && !noasm
// +build !appengine
// +build gc
// +build !purego
// +build !noasm
#include "textflag.h"
// Register allocation.
// Registers:
#define digest R1
#define h R2 // Return value.
#define p R3 // Input pointer.
#define len R4
#define nblocks R5 // len / 32.
#define h R2 // return value
#define p R3 // input pointer
#define n R4 // input length
#define nblocks R5 // n / 32
#define prime1 R7
#define prime2 R8
#define prime3 R9
@ -25,60 +29,52 @@
#define round(acc, x) \
MADD prime2, acc, x, acc \
ROR $64-31, acc \
MUL prime1, acc \
MUL prime1, acc
// x = round(0, x).
// round0 performs the operation x = round(0, x).
#define round0(x) \
MUL prime2, x \
ROR $64-31, x \
MUL prime1, x \
MUL prime1, x
#define mergeRound(x) \
round0(x) \
EOR x, h \
MADD h, prime4, prime1, h \
#define mergeRound(acc, x) \
round0(x) \
EOR x, acc \
MADD acc, prime4, prime1, acc
// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
#define blocksLoop() \
LSR $5, len, nblocks \
PCALIGN $16 \
loop: \
LDP.P 32(p), (x1, x2) \
round(v1, x1) \
LDP -16(p), (x3, x4) \
round(v2, x2) \
SUB $1, nblocks \
round(v3, x3) \
round(v4, x4) \
CBNZ nblocks, loop \
// The primes are repeated here to ensure that they're stored
// in a contiguous array, so we can load them with LDP.
DATA primes<> +0(SB)/8, $11400714785074694791
DATA primes<> +8(SB)/8, $14029467366897019727
DATA primes<>+16(SB)/8, $1609587929392839161
DATA primes<>+24(SB)/8, $9650029242287828579
DATA primes<>+32(SB)/8, $2870177450012600261
GLOBL primes<>(SB), NOPTR+RODATA, $40
// blockLoop processes as many 32-byte blocks as possible,
// updating v1, v2, v3, and v4. It assumes that n >= 32.
#define blockLoop() \
LSR $5, n, nblocks \
PCALIGN $16 \
loop: \
LDP.P 16(p), (x1, x2) \
LDP.P 16(p), (x3, x4) \
round(v1, x1) \
round(v2, x2) \
round(v3, x3) \
round(v4, x4) \
SUB $1, nblocks \
CBNZ nblocks, loop
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
LDP b_base+0(FP), (p, len)
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
LDP b_base+0(FP), (p, n)
LDP primes<> +0(SB), (prime1, prime2)
LDP primes<>+16(SB), (prime3, prime4)
MOVD primes<>+32(SB), prime5
LDP ·primes+0(SB), (prime1, prime2)
LDP ·primes+16(SB), (prime3, prime4)
MOVD ·primes+32(SB), prime5
CMP $32, len
CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 }
BLO afterLoop
CMP $32, n
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
BLT afterLoop
ADD prime1, prime2, v1
MOVD prime2, v2
MOVD $0, v3
NEG prime1, v4
blocksLoop()
blockLoop()
ROR $64-1, v1, x1
ROR $64-7, v2, x2
@ -88,71 +84,75 @@ TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
ADD x3, x4
ADD x2, x4, h
mergeRound(v1)
mergeRound(v2)
mergeRound(v3)
mergeRound(v4)
mergeRound(h, v1)
mergeRound(h, v2)
mergeRound(h, v3)
mergeRound(h, v4)
afterLoop:
ADD len, h
ADD n, h
TBZ $4, len, try8
TBZ $4, n, try8
LDP.P 16(p), (x1, x2)
round0(x1)
// NOTE: here and below, sequencing the EOR after the ROR (using a
// rotated register) is worth a small but measurable speedup for small
// inputs.
ROR $64-27, h
EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h
round0(x2)
ROR $64-27, h
EOR x2 @> 64-27, h
EOR x2 @> 64-27, h, h
MADD h, prime4, prime1, h
try8:
TBZ $3, len, try4
TBZ $3, n, try4
MOVD.P 8(p), x1
round0(x1)
ROR $64-27, h
EOR x1 @> 64-27, h
EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h
try4:
TBZ $2, len, try2
TBZ $2, n, try2
MOVWU.P 4(p), x2
MUL prime1, x2
ROR $64-23, h
EOR x2 @> 64-23, h
EOR x2 @> 64-23, h, h
MADD h, prime3, prime2, h
try2:
TBZ $1, len, try1
TBZ $1, n, try1
MOVHU.P 2(p), x3
AND $255, x3, x1
LSR $8, x3, x2
MUL prime5, x1
ROR $64-11, h
EOR x1 @> 64-11, h
EOR x1 @> 64-11, h, h
MUL prime1, h
MUL prime5, x2
ROR $64-11, h
EOR x2 @> 64-11, h
EOR x2 @> 64-11, h, h
MUL prime1, h
try1:
TBZ $0, len, end
TBZ $0, n, finalize
MOVBU (p), x4
MUL prime5, x4
ROR $64-11, h
EOR x4 @> 64-11, h
EOR x4 @> 64-11, h, h
MUL prime1, h
end:
finalize:
EOR h >> 33, h
MUL prime2, h
EOR h >> 29, h
@ -163,24 +163,22 @@ end:
RET
// func writeBlocks(d *Digest, b []byte) int
//
// Assumes len(b) >= 32.
TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
LDP primes<>(SB), (prime1, prime2)
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
LDP ·primes+0(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously.
MOVD d+0(FP), digest
LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4)
LDP b_base+8(FP), (p, len)
LDP b_base+8(FP), (p, n)
blocksLoop()
blockLoop()
// Store updated state.
STP (v1, v2), 0(digest)
STP (v3, v4), 16(digest)
BIC $31, len
MOVD len, ret+32(FP)
BIC $31, n
MOVD n, ret+32(FP)
RET

View file

@ -13,4 +13,4 @@ package xxhash
func Sum64(b []byte) uint64
//go:noescape
func writeBlocks(d *Digest, b []byte) int
func writeBlocks(s *Digest, b []byte) int

View file

@ -15,10 +15,10 @@ func Sum64(b []byte) uint64 {
var h uint64
if n >= 32 {
v1 := prime1v + prime2
v1 := primes[0] + prime2
v2 := prime2
v3 := uint64(0)
v4 := -prime1v
v4 := -primes[0]
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
@ -37,19 +37,18 @@ func Sum64(b []byte) uint64 {
h += uint64(n)
i, end := 0, len(b)
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(b[i:i+8:len(b)]))
for ; len(b) >= 8; b = b[8:] {
k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
if len(b) >= 4 {
h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
b = b[4:]
}
for ; i < end; i++ {
h ^= uint64(b[i]) * prime5
for ; len(b) > 0; b = b[1:] {
h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
}

View file

@ -314,9 +314,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
}
size := ll + ml + len(out)
if size-startSize > maxBlockSize {
if size-startSize == 424242 {
panic("here")
}
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
if size > cap(out) {
@ -427,8 +424,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
}
}
// Check if space for literals
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}

View file

@ -148,7 +148,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize {
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
err := br.close()
if err != nil {

View file

@ -320,10 +320,6 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with not enough output space error
MOVQ $0x00000005, ret+24(FP)
RET
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
@ -617,10 +613,6 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with not enough output space error
MOVQ $0x00000005, ret+24(FP)
RET
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
@ -897,10 +889,6 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with not enough output space error
MOVQ $0x00000005, ret+24(FP)
RET
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
@ -1152,10 +1140,6 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with not enough output space error
MOVQ $0x00000005, ret+24(FP)
RET
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Requires: SSE
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
@ -1389,8 +1373,7 @@ loop_finished:
MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX)
MOVQ DI, 104(AX)
MOVQ 80(AX), CX
SUBQ CX, SI
SUBQ 80(AX), SI
MOVQ SI, 112(AX)
RET
@ -1402,8 +1385,7 @@ error_match_off_too_big:
MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX)
MOVQ DI, 104(AX)
MOVQ 80(AX), CX
SUBQ CX, SI
SUBQ 80(AX), SI
MOVQ SI, 112(AX)
RET
@ -1747,8 +1729,7 @@ loop_finished:
MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX)
MOVQ DI, 104(AX)
MOVQ 80(AX), CX
SUBQ CX, SI
SUBQ 80(AX), SI
MOVQ SI, 112(AX)
RET
@ -1760,8 +1741,7 @@ error_match_off_too_big:
MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX)
MOVQ DI, 104(AX)
MOVQ 80(AX), CX
SUBQ CX, SI
SUBQ 80(AX), SI
MOVQ SI, 112(AX)
RET

View file

@ -36,9 +36,6 @@ const forcePreDef = false
// zstdMinMatch is the minimum zstd match length.
const zstdMinMatch = 3
// Reset the buffer offset when reaching this.
const bufferReset = math.MaxInt32 - MaxWindowSize
// fcsUnknown is used for unknown frame content size.
const fcsUnknown = math.MaxUint64
@ -75,7 +72,6 @@ var (
ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
// ErrUnknownDictionary is returned if the dictionary ID is unknown.
// For the time being dictionaries are not supported.
ErrUnknownDictionary = errors.New("unknown dictionary")
// ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
@ -110,26 +106,25 @@ func printf(format string, a ...interface{}) {
}
}
// matchLen returns the maximum length.
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
// The function also returns whether all bytes matched.
func matchLen(a, b []byte) int {
b = b[:len(a)]
for i := 0; i < len(a)-7; i += 8 {
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
return i + (bits.TrailingZeros64(diff) >> 3)
func matchLen(a, b []byte) (n int) {
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
}
checked := (len(a) >> 3) << 3
a = a[checked:]
b = b[checked:]
for i := range a {
if a[i] != b[i] {
return i + checked
break
}
n++
}
return len(a) + checked
return n
}
func load3232(b []byte, i int32) uint32 {
@ -140,10 +135,6 @@ func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
func load64(b []byte, i int) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
type byter interface {
Bytes() []byte
Len() int

View file

@ -12,10 +12,12 @@ type Spec struct {
Root *Root `json:"root,omitempty"`
// Hostname configures the container's hostname.
Hostname string `json:"hostname,omitempty"`
// Domainname configures the container's domainname.
Domainname string `json:"domainname,omitempty"`
// Mounts configures additional mounts (on top of Root).
Mounts []Mount `json:"mounts,omitempty"`
// Hooks configures callbacks for container lifecycle events.
Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris"`
Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris,zos"`
// Annotations contains arbitrary metadata for the container.
Annotations map[string]string `json:"annotations,omitempty"`
@ -27,6 +29,8 @@ type Spec struct {
Windows *Windows `json:"windows,omitempty" platform:"windows"`
// VM specifies configuration for virtual-machine-based containers.
VM *VM `json:"vm,omitempty" platform:"vm"`
// ZOS is platform-specific configuration for z/OS based containers.
ZOS *ZOS `json:"zos,omitempty" platform:"zos"`
}
// Process contains information to start a specific application inside the container.
@ -49,7 +53,7 @@ type Process struct {
// Capabilities are Linux capabilities that are kept for the process.
Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"`
// Rlimits specifies rlimit options to apply to the process.
Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris"`
Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris,zos"`
// NoNewPrivileges controls whether additional privileges could be gained by processes in the container.
NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"`
// ApparmorProfile specifies the apparmor profile for the container.
@ -86,11 +90,11 @@ type Box struct {
// User specifies specific user (and group) information for the container process.
type User struct {
// UID is the user id.
UID uint32 `json:"uid" platform:"linux,solaris"`
UID uint32 `json:"uid" platform:"linux,solaris,zos"`
// GID is the group id.
GID uint32 `json:"gid" platform:"linux,solaris"`
GID uint32 `json:"gid" platform:"linux,solaris,zos"`
// Umask is the umask for the init process.
Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris"`
Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris,zos"`
// AdditionalGids are additional group ids set for the container's process.
AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"`
// Username is the user name.
@ -110,11 +114,16 @@ type Mount struct {
// Destination is the absolute path where the mount will be placed in the container.
Destination string `json:"destination"`
// Type specifies the mount kind.
Type string `json:"type,omitempty" platform:"linux,solaris"`
Type string `json:"type,omitempty" platform:"linux,solaris,zos"`
// Source specifies the source path of the mount.
Source string `json:"source,omitempty"`
// Options are fstab style mount options.
Options []string `json:"options,omitempty"`
// UID/GID mappings used for changing file owners w/o calling chown, fs should support it.
// Every mount point could have its own mapping.
UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty" platform:"linux"`
GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty" platform:"linux"`
}
// Hook specifies a command that is run at a particular event in the lifecycle of a container
@ -178,7 +187,7 @@ type Linux struct {
// MountLabel specifies the selinux context for the mounts in the container.
MountLabel string `json:"mountLabel,omitempty"`
// IntelRdt contains Intel Resource Director Technology (RDT) information for
// handling resource constraints (e.g., L3 cache, memory bandwidth) for the container
// handling resource constraints and monitoring metrics (e.g., L3 cache, memory bandwidth) for the container
IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"`
// Personality contains configuration for the Linux personality syscall
Personality *LinuxPersonality `json:"personality,omitempty"`
@ -250,8 +259,8 @@ type LinuxInterfacePriority struct {
Priority uint32 `json:"priority"`
}
// linuxBlockIODevice holds major:minor format supported in blkio cgroup
type linuxBlockIODevice struct {
// LinuxBlockIODevice holds major:minor format supported in blkio cgroup
type LinuxBlockIODevice struct {
// Major is the device's major number.
Major int64 `json:"major"`
// Minor is the device's minor number.
@ -260,7 +269,7 @@ type linuxBlockIODevice struct {
// LinuxWeightDevice struct holds a `major:minor weight` pair for weightDevice
type LinuxWeightDevice struct {
linuxBlockIODevice
LinuxBlockIODevice
// Weight is the bandwidth rate for the device.
Weight *uint16 `json:"weight,omitempty"`
// LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only
@ -269,7 +278,7 @@ type LinuxWeightDevice struct {
// LinuxThrottleDevice struct holds a `major:minor rate_per_second` pair
type LinuxThrottleDevice struct {
linuxBlockIODevice
LinuxBlockIODevice
// Rate is the IO rate limit per cgroup per device
Rate uint64 `json:"rate"`
}
@ -310,6 +319,10 @@ type LinuxMemory struct {
DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"`
// Enables hierarchical memory accounting
UseHierarchy *bool `json:"useHierarchy,omitempty"`
// CheckBeforeUpdate enables checking if a new memory limit is lower
// than the current usage during update, and if so, rejecting the new
// limit.
CheckBeforeUpdate *bool `json:"checkBeforeUpdate,omitempty"`
}
// LinuxCPU for Linux cgroup 'cpu' resource management
@ -318,6 +331,9 @@ type LinuxCPU struct {
Shares *uint64 `json:"shares,omitempty"`
// CPU hardcap limit (in usecs). Allowed cpu time in a given period.
Quota *int64 `json:"quota,omitempty"`
// CPU hardcap burst limit (in usecs). Allowed accumulated cpu time additionally for burst in a
// given period.
Burst *uint64 `json:"burst,omitempty"`
// CPU period to be used for hardcapping (in usecs).
Period *uint64 `json:"period,omitempty"`
// How much time realtime scheduling may use (in usecs).
@ -328,6 +344,8 @@ type LinuxCPU struct {
Cpus string `json:"cpus,omitempty"`
// List of memory nodes in the cpuset. Default is to use any available memory node.
Mems string `json:"mems,omitempty"`
// cgroups are configured with minimum weight, 0: default behavior, 1: SCHED_IDLE.
Idle *int64 `json:"idle,omitempty"`
}
// LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3)
@ -522,11 +540,21 @@ type WindowsMemoryResources struct {
// WindowsCPUResources contains CPU resource management settings.
type WindowsCPUResources struct {
// Number of CPUs available to the container.
// Count is the number of CPUs available to the container. It represents the
// fraction of the configured processor `count` in a container in relation
// to the processors available in the host. The fraction ultimately
// determines the portion of processor cycles that the threads in a
// container can use during each scheduling interval, as the number of
// cycles per 10,000 cycles.
Count *uint64 `json:"count,omitempty"`
// CPU shares (relative weight to other containers with cpu shares).
// Shares limits the share of processor time given to the container relative
// to other workloads on the processor. The processor `shares` (`weight` at
// the platform level) is a value between 0 and 10000.
Shares *uint16 `json:"shares,omitempty"`
// Specifies the portion of processor cycles that this container can use as a percentage times 100.
// Maximum determines the portion of processor cycles that the threads in a
// container can use during each scheduling interval, as the number of
// cycles per 10,000 cycles. Set processor `maximum` to a percentage times
// 100.
Maximum *uint16 `json:"maximum,omitempty"`
}
@ -613,6 +641,23 @@ type Arch string
// LinuxSeccompFlag is a flag to pass to seccomp(2).
type LinuxSeccompFlag string
const (
// LinuxSeccompFlagLog is a seccomp flag to request all returned
// actions except SECCOMP_RET_ALLOW to be logged. An administrator may
// override this filter flag by preventing specific actions from being
// logged via the /proc/sys/kernel/seccomp/actions_logged file. (since
// Linux 4.14)
LinuxSeccompFlagLog LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_LOG"
// LinuxSeccompFlagSpecAllow can be used to disable Speculative Store
// Bypass mitigation. (since Linux 4.17)
LinuxSeccompFlagSpecAllow LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_SPEC_ALLOW"
// LinuxSeccompFlagWaitKillableRecv can be used to switch to the wait
// killable semantics. (since Linux 5.19)
LinuxSeccompFlagWaitKillableRecv LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV"
)
// Additional architectures permitted to be used for system calls
// By default only the native architecture of the kernel is permitted
const (
@ -683,8 +728,9 @@ type LinuxSyscall struct {
Args []LinuxSeccompArg `json:"args,omitempty"`
}
// LinuxIntelRdt has container runtime resource constraints for Intel RDT
// CAT and MBA features which introduced in Linux 4.10 and 4.12 kernel
// LinuxIntelRdt has container runtime resource constraints for Intel RDT CAT and MBA
// features and flags enabling Intel RDT CMT and MBM features.
// Intel RDT features are available in Linux 4.14 and newer kernel versions.
type LinuxIntelRdt struct {
// The identity for RDT Class of Service
ClosID string `json:"closID,omitempty"`
@ -697,4 +743,36 @@ type LinuxIntelRdt struct {
// The unit of memory bandwidth is specified in "percentages" by
// default, and in "MBps" if MBA Software Controller is enabled.
MemBwSchema string `json:"memBwSchema,omitempty"`
// EnableCMT is the flag to indicate if the Intel RDT CMT is enabled. CMT (Cache Monitoring Technology) supports monitoring of
// the last-level cache (LLC) occupancy for the container.
EnableCMT bool `json:"enableCMT,omitempty"`
// EnableMBM is the flag to indicate if the Intel RDT MBM is enabled. MBM (Memory Bandwidth Monitoring) supports monitoring of
// total and local memory bandwidth for the container.
EnableMBM bool `json:"enableMBM,omitempty"`
}
// ZOS contains platform-specific configuration for z/OS based containers.
type ZOS struct {
// Devices are a list of device nodes that are created for the container
Devices []ZOSDevice `json:"devices,omitempty"`
}
// ZOSDevice represents the mknod information for a z/OS special device file
type ZOSDevice struct {
// Path to the device.
Path string `json:"path"`
// Device type, block, char, etc.
Type string `json:"type"`
// Major is the device's major number.
Major int64 `json:"major"`
// Minor is the device's minor number.
Minor int64 `json:"minor"`
// FileMode permission bits for the device.
FileMode *os.FileMode `json:"fileMode,omitempty"`
// UID of the device.
UID *uint32 `json:"uid,omitempty"`
// Gid of the device.
GID *uint32 `json:"gid,omitempty"`
}

View file

@ -6,12 +6,12 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 1
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 0
VersionMinor = 1
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 2
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-dev"
VersionDev = "-rc.1"
)
// Version is the specification version that the package types support.

View file

@ -78,6 +78,9 @@ func ReleaseLabel(label string) error {
// Deprecated: use selinux.DupSecOpt
var DupSecOpt = selinux.DupSecOpt
// FormatMountLabel returns a string to be used by the mount command. Using
// the SELinux `context` mount option. Changing labels of files on mount
// points with this option can never be changed.
// FormatMountLabel returns a string to be used by the mount command.
// The format of this string will be used to alter the labeling of the mountpoint.
// The string returned is suitable to be used as the options field of the mount command.
@ -85,12 +88,27 @@ var DupSecOpt = selinux.DupSecOpt
// the first parameter. Second parameter is the label that you wish to apply
// to all content in the mount point.
func FormatMountLabel(src, mountLabel string) string {
return FormatMountLabelByType(src, mountLabel, "context")
}
// FormatMountLabelByType returns a string to be used by the mount command.
// Allow caller to specify the mount options. For example using the SELinux
// `fscontext` mount option would allow certain container processes to change
// labels of files created on the mount points, where as `context` option does
// not.
// FormatMountLabelByType returns a string to be used by the mount command.
// The format of this string will be used to alter the labeling of the mountpoint.
// The string returned is suitable to be used as the options field of the mount command.
// If you need to have additional mount point options, you can pass them in as
// the first parameter. Second parameter is the label that you wish to apply
// to all content in the mount point.
func FormatMountLabelByType(src, mountLabel, contextType string) string {
if mountLabel != "" {
switch src {
case "":
src = fmt.Sprintf("context=%q", mountLabel)
src = fmt.Sprintf("%s=%q", contextType, mountLabel)
default:
src = fmt.Sprintf("%s,context=%q", src, mountLabel)
src = fmt.Sprintf("%s,%s=%q", src, contextType, mountLabel)
}
}
return src

View file

@ -1,34 +0,0 @@
//go:build linux && go1.16
// +build linux,go1.16
package selinux
import (
"errors"
"io/fs"
"os"
"github.com/opencontainers/selinux/pkg/pwalkdir"
)
func rchcon(fpath, label string) error {
fastMode := false
// If the current label matches the new label, assume
// other labels are correct.
if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label {
fastMode = true
}
return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error {
if fastMode {
if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label {
return nil
}
}
e := lSetFileLabel(p, label)
// Walk a file tree can race with removal, so ignore ENOENT.
if errors.Is(e, os.ErrNotExist) {
return nil
}
return e
})
}

View file

@ -1,22 +0,0 @@
//go:build linux && !go1.16
// +build linux,!go1.16
package selinux
import (
"errors"
"os"
"github.com/opencontainers/selinux/pkg/pwalk"
)
func rchcon(fpath, label string) error {
return pwalk.Walk(fpath, func(p string, _ os.FileInfo, _ error) error {
e := lSetFileLabel(p, label)
// Walk a file tree can race with removal, so ignore ENOENT.
if errors.Is(e, os.ErrNotExist) {
return nil
}
return e
})
}

View file

@ -23,8 +23,13 @@ var (
// ErrEmptyPath is returned when an empty path has been specified.
ErrEmptyPath = errors.New("empty path")
// ErrInvalidLabel is returned when an invalid label is specified.
ErrInvalidLabel = errors.New("invalid Label")
// InvalidLabel is returned when an invalid label is specified.
InvalidLabel = errors.New("Invalid Label")
//
// Deprecated: use [ErrInvalidLabel].
InvalidLabel = ErrInvalidLabel
// ErrIncomparable is returned two levels are not comparable
ErrIncomparable = errors.New("incomparable levels")
@ -144,7 +149,7 @@ func CalculateGlbLub(sourceRange, targetRange string) (string, error) {
// of the program is finished to guarantee another goroutine does not migrate to the current
// thread before execution is complete.
func SetExecLabel(label string) error {
return setExecLabel(label)
return writeCon(attrPath("exec"), label)
}
// SetTaskLabel sets the SELinux label for the current thread, or an error.
@ -152,21 +157,21 @@ func SetExecLabel(label string) error {
// be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() to guarantee
// the current thread does not run in a new mislabeled thread.
func SetTaskLabel(label string) error {
return setTaskLabel(label)
return writeCon(attrPath("current"), label)
}
// SetSocketLabel takes a process label and tells the kernel to assign the
// label to the next socket that gets created. Calls to SetSocketLabel
// should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until
// the the socket is created to guarantee another goroutine does not migrate
// the socket is created to guarantee another goroutine does not migrate
// to the current thread before execution is complete.
func SetSocketLabel(label string) error {
return setSocketLabel(label)
return writeCon(attrPath("sockcreate"), label)
}
// SocketLabel retrieves the current socket label setting
func SocketLabel() (string, error) {
return socketLabel()
return readCon(attrPath("sockcreate"))
}
// PeerLabel retrieves the label of the client on the other side of a socket
@ -185,7 +190,7 @@ func SetKeyLabel(label string) error {
// KeyLabel retrieves the current kernel keyring label setting
func KeyLabel() (string, error) {
return keyLabel()
return readCon("/proc/self/attr/keycreate")
}
// Get returns the Context as a string
@ -208,6 +213,11 @@ func ReserveLabel(label string) {
reserveLabel(label)
}
// MLSEnabled checks if MLS is enabled.
func MLSEnabled() bool {
return isMLSEnabled()
}
// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
func EnforceMode() int {
return enforceMode()
@ -220,7 +230,7 @@ func SetEnforceMode(mode int) error {
}
// DefaultEnforceMode returns the systems default SELinux mode Enforcing,
// Permissive or Disabled. Note this is is just the default at boot time.
// Permissive or Disabled. Note this is just the default at boot time.
// EnforceMode tells you the systems current mode.
func DefaultEnforceMode() int {
return defaultEnforceMode()
@ -266,7 +276,7 @@ func CopyLevel(src, dest string) (string, error) {
return copyLevel(src, dest)
}
// Chcon changes the fpath file object to the SELinux label label.
// Chcon changes the fpath file object to the SELinux label.
// If fpath is a directory and recurse is true, then Chcon walks the
// directory tree setting the label.
//
@ -284,7 +294,7 @@ func DupSecOpt(src string) ([]string, error) {
// DisableSecOpt returns a security opt that can be used to disable SELinux
// labeling support for future container processes.
func DisableSecOpt() []string {
return disableSecOpt()
return []string{"disable"}
}
// GetDefaultContextWithLevel gets a single context for the specified SELinux user

View file

@ -8,16 +8,16 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"io/fs"
"math/big"
"os"
"os/user"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/opencontainers/selinux/pkg/pwalkdir"
"golang.org/x/sys/unix"
)
@ -35,17 +35,17 @@ const (
)
type selinuxState struct {
mcsList map[string]bool
selinuxfs string
selinuxfsOnce sync.Once
enabledSet bool
enabled bool
selinuxfsOnce sync.Once
selinuxfs string
mcsList map[string]bool
sync.Mutex
}
type level struct {
sens uint
cats *big.Int
sens uint
}
type mlsRange struct {
@ -54,10 +54,10 @@ type mlsRange struct {
}
type defaultSECtx struct {
user, level, scon string
userRdr, defaultRdr io.Reader
verifier func(string) error
userRdr io.Reader
verifier func(string) error
defaultRdr io.Reader
user, level, scon string
}
type levelItem byte
@ -155,7 +155,7 @@ func findSELinuxfs() string {
}
// check if selinuxfs is available before going the slow path
fs, err := ioutil.ReadFile("/proc/filesystems")
fs, err := os.ReadFile("/proc/filesystems")
if err != nil {
return ""
}
@ -292,7 +292,7 @@ func readCon(fpath string) (string, error) {
}
func readConFd(in *os.File) (string, error) {
data, err := ioutil.ReadAll(in)
data, err := io.ReadAll(in)
if err != nil {
return "", err
}
@ -305,7 +305,7 @@ func classIndex(class string) (int, error) {
permpath := fmt.Sprintf("class/%s/index", class)
indexpath := filepath.Join(getSelinuxMountPoint(), permpath)
indexB, err := ioutil.ReadFile(indexpath)
indexB, err := os.ReadFile(indexpath)
if err != nil {
return -1, err
}
@ -391,21 +391,19 @@ func lFileLabel(fpath string) (string, error) {
return string(label), nil
}
// setFSCreateLabel tells kernel the label to create all file system objects
// created by this task. Setting label="" to return to default.
func setFSCreateLabel(label string) error {
return writeAttr("fscreate", label)
return writeCon(attrPath("fscreate"), label)
}
// fsCreateLabel returns the default label the kernel which the kernel is using
// for file system objects created by this task. "" indicates default.
func fsCreateLabel() (string, error) {
return readAttr("fscreate")
return readCon(attrPath("fscreate"))
}
// currentLabel returns the SELinux label of the current process thread, or an error.
func currentLabel() (string, error) {
return readAttr("current")
return readCon(attrPath("current"))
}
// pidLabel returns the SELinux label of the given pid, or an error.
@ -416,7 +414,7 @@ func pidLabel(pid int) (string, error) {
// ExecLabel returns the SELinux label that the kernel will use for any programs
// that are executed by the current process thread, or an error.
func execLabel() (string, error) {
return readAttr("exec")
return readCon(attrPath("exec"))
}
func writeCon(fpath, val string) error {
@ -462,18 +460,10 @@ func attrPath(attr string) string {
})
if haveThreadSelf {
return path.Join(threadSelfPrefix, attr)
return filepath.Join(threadSelfPrefix, attr)
}
return path.Join("/proc/self/task/", strconv.Itoa(unix.Gettid()), "/attr/", attr)
}
func readAttr(attr string) (string, error) {
return readCon(attrPath(attr))
}
func writeAttr(attr, val string) error {
return writeCon(attrPath(attr), val)
return filepath.Join("/proc/self/task", strconv.Itoa(unix.Gettid()), "attr", attr)
}
// canonicalizeContext takes a context string and writes it to the kernel
@ -560,30 +550,30 @@ func (l *level) parseLevel(levelStr string) error {
// rangeStrToMLSRange marshals a string representation of a range.
func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) {
mlsRange := &mlsRange{}
levelSlice := strings.SplitN(rangeStr, "-", 2)
r := &mlsRange{}
l := strings.SplitN(rangeStr, "-", 2)
switch len(levelSlice) {
switch len(l) {
// rangeStr that has a low and a high level, e.g. s4:c0.c1023-s6:c0.c1023
case 2:
mlsRange.high = &level{}
if err := mlsRange.high.parseLevel(levelSlice[1]); err != nil {
return nil, fmt.Errorf("failed to parse high level %q: %w", levelSlice[1], err)
r.high = &level{}
if err := r.high.parseLevel(l[1]); err != nil {
return nil, fmt.Errorf("failed to parse high level %q: %w", l[1], err)
}
fallthrough
// rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023
case 1:
mlsRange.low = &level{}
if err := mlsRange.low.parseLevel(levelSlice[0]); err != nil {
return nil, fmt.Errorf("failed to parse low level %q: %w", levelSlice[0], err)
r.low = &level{}
if err := r.low.parseLevel(l[0]); err != nil {
return nil, fmt.Errorf("failed to parse low level %q: %w", l[0], err)
}
}
if mlsRange.high == nil {
mlsRange.high = mlsRange.low
if r.high == nil {
r.high = r.low
}
return mlsRange, nil
return r, nil
}
// bitsetToStr takes a category bitset and returns it in the
@ -617,17 +607,17 @@ func bitsetToStr(c *big.Int) string {
return str
}
func (l1 *level) equal(l2 *level) bool {
if l2 == nil || l1 == nil {
return l1 == l2
func (l *level) equal(l2 *level) bool {
if l2 == nil || l == nil {
return l == l2
}
if l1.sens != l2.sens {
if l2.sens != l.sens {
return false
}
if l2.cats == nil || l1.cats == nil {
return l2.cats == l1.cats
if l2.cats == nil || l.cats == nil {
return l2.cats == l.cats
}
return l1.cats.Cmp(l2.cats) == 0
return l.cats.Cmp(l2.cats) == 0
}
// String returns an mlsRange as a string.
@ -721,36 +711,13 @@ func readWriteCon(fpath string, val string) (string, error) {
return readConFd(f)
}
// setExecLabel sets the SELinux label that the kernel will use for any programs
// that are executed by the current process thread, or an error.
func setExecLabel(label string) error {
return writeAttr("exec", label)
}
// setTaskLabel sets the SELinux label for the current thread, or an error.
// This requires the dyntransition permission.
func setTaskLabel(label string) error {
return writeAttr("current", label)
}
// setSocketLabel takes a process label and tells the kernel to assign the
// label to the next socket that gets created
func setSocketLabel(label string) error {
return writeAttr("sockcreate", label)
}
// socketLabel retrieves the current socket label setting
func socketLabel() (string, error) {
return readAttr("sockcreate")
}
// peerLabel retrieves the label of the client on the other side of a socket
func peerLabel(fd uintptr) (string, error) {
label, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC)
l, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC)
if err != nil {
return "", &os.PathError{Op: "getsockopt", Path: "fd " + strconv.Itoa(int(fd)), Err: err}
}
return label, nil
return l, nil
}
// setKeyLabel takes a process label and tells the kernel to assign the
@ -766,15 +733,10 @@ func setKeyLabel(label string) error {
return err
}
// keyLabel retrieves the current kernel keyring label setting
func keyLabel() (string, error) {
return readCon("/proc/self/attr/keycreate")
}
// get returns the Context as a string
func (c Context) get() string {
if level := c["level"]; level != "" {
return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + level
if l := c["level"]; l != "" {
return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + l
}
return c["user"] + ":" + c["role"] + ":" + c["type"]
}
@ -786,7 +748,7 @@ func newContext(label string) (Context, error) {
if len(label) != 0 {
con := strings.SplitN(label, ":", 4)
if len(con) < 3 {
return c, InvalidLabel
return c, ErrInvalidLabel
}
c["user"] = con[0]
c["role"] = con[1]
@ -816,14 +778,23 @@ func reserveLabel(label string) {
}
func selinuxEnforcePath() string {
return path.Join(getSelinuxMountPoint(), "enforce")
return filepath.Join(getSelinuxMountPoint(), "enforce")
}
// isMLSEnabled checks if MLS is enabled.
func isMLSEnabled() bool {
enabledB, err := os.ReadFile(filepath.Join(getSelinuxMountPoint(), "mls"))
if err != nil {
return false
}
return bytes.Equal(enabledB, []byte{'1'})
}
// enforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
func enforceMode() int {
var enforce int
enforceB, err := ioutil.ReadFile(selinuxEnforcePath())
enforceB, err := os.ReadFile(selinuxEnforcePath())
if err != nil {
return -1
}
@ -837,11 +808,12 @@ func enforceMode() int {
// setEnforceMode sets the current SELinux mode Enforcing, Permissive.
// Disabled is not valid, since this needs to be set at boot time.
func setEnforceMode(mode int) error {
return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644)
//nolint:gosec // ignore G306: permissions to be 0600 or less.
return os.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644)
}
// defaultEnforceMode returns the systems default SELinux mode Enforcing,
// Permissive or Disabled. Note this is is just the default at boot time.
// Permissive or Disabled. Note this is just the default at boot time.
// EnforceMode tells you the systems current mode.
func defaultEnforceMode() int {
switch readConfig(selinuxTag) {
@ -941,7 +913,7 @@ func openContextFile() (*os.File, error) {
if f, err := os.Open(contextFile); err == nil {
return f, nil
}
return os.Open(filepath.Join(policyRoot(), "/contexts/lxc_contexts"))
return os.Open(filepath.Join(policyRoot(), "contexts", "lxc_contexts"))
}
func loadLabels() {
@ -1044,7 +1016,8 @@ func addMcs(processLabel, fileLabel string) (string, string) {
// securityCheckContext validates that the SELinux label is understood by the kernel
func securityCheckContext(val string) error {
return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644)
//nolint:gosec // ignore G306: permissions to be 0600 or less.
return os.WriteFile(filepath.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644)
}
// copyLevel returns a label with the MLS/MCS level from src label replaced on
@ -1073,7 +1046,7 @@ func copyLevel(src, dest string) (string, error) {
return tcon.Get(), nil
}
// chcon changes the fpath file object to the SELinux label label.
// chcon changes the fpath file object to the SELinux label.
// If fpath is a directory and recurse is true, then chcon walks the
// directory tree setting the label.
func chcon(fpath string, label string, recurse bool) error {
@ -1084,7 +1057,7 @@ func chcon(fpath string, label string, recurse bool) error {
return nil
}
exclude_paths := map[string]bool{
excludePaths := map[string]bool{
"/": true,
"/bin": true,
"/boot": true,
@ -1112,19 +1085,19 @@ func chcon(fpath string, label string, recurse bool) error {
}
if home := os.Getenv("HOME"); home != "" {
exclude_paths[home] = true
excludePaths[home] = true
}
if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" {
if usr, err := user.Lookup(sudoUser); err == nil {
exclude_paths[usr.HomeDir] = true
excludePaths[usr.HomeDir] = true
}
}
if fpath != "/" {
fpath = strings.TrimSuffix(fpath, "/")
}
if exclude_paths[fpath] {
if excludePaths[fpath] {
return fmt.Errorf("SELinux relabeling of %s is not allowed", fpath)
}
@ -1152,6 +1125,28 @@ func chcon(fpath string, label string, recurse bool) error {
return rchcon(fpath, label)
}
func rchcon(fpath, label string) error { //revive:disable:cognitive-complexity
fastMode := false
// If the current label matches the new label, assume
// other labels are correct.
if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label {
fastMode = true
}
return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error {
if fastMode {
if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label {
return nil
}
}
err := lSetFileLabel(p, label)
// Walk a file tree can race with removal, so ignore ENOENT.
if errors.Is(err, os.ErrNotExist) {
return nil
}
return err
})
}
// dupSecOpt takes an SELinux process label and returns security options that
// can be used to set the SELinux Type and Level for future container processes.
func dupSecOpt(src string) ([]string, error) {
@ -1180,12 +1175,6 @@ func dupSecOpt(src string) ([]string, error) {
return dup, nil
}
// disableSecOpt returns a security opt that can be used to disable SELinux
// labeling support for future container processes.
func disableSecOpt() []string {
return []string{"disable"}
}
// findUserInContext scans the reader for a valid SELinux context
// match that is verified with the verifier. Invalid contexts are
// skipped. It returns a matched context or an empty string if no

View file

@ -3,9 +3,20 @@
package selinux
func setDisabled() {
func attrPath(string) string {
return ""
}
func readCon(fpath string) (string, error) {
return "", nil
}
func writeCon(string, string) error {
return nil
}
func setDisabled() {}
func getEnabled() bool {
return false
}
@ -62,22 +73,6 @@ func calculateGlbLub(sourceRange, targetRange string) (string, error) {
return "", nil
}
func setExecLabel(label string) error {
return nil
}
func setTaskLabel(label string) error {
return nil
}
func setSocketLabel(label string) error {
return nil
}
func socketLabel() (string, error) {
return "", nil
}
func peerLabel(fd uintptr) (string, error) {
return "", nil
}
@ -86,17 +81,12 @@ func setKeyLabel(label string) error {
return nil
}
func keyLabel() (string, error) {
return "", nil
}
func (c Context) get() string {
return ""
}
func newContext(label string) (Context, error) {
c := make(Context)
return c, nil
return Context{}, nil
}
func clearLabels() {
@ -105,6 +95,10 @@ func clearLabels() {
func reserveLabel(label string) {
}
func isMLSEnabled() bool {
return false
}
func enforceMode() int {
return Disabled
}
@ -152,10 +146,6 @@ func dupSecOpt(src string) ([]string, error) {
return nil, nil
}
func disableSecOpt() []string {
return []string{"disable"}
}
func getDefaultContextWithLevel(user, level, scon string) (string, error) {
return "", nil
}

View file

@ -1,48 +0,0 @@
## pwalk: parallel implementation of filepath.Walk
This is a wrapper for [filepath.Walk](https://pkg.go.dev/path/filepath?tab=doc#Walk)
which may speed it up by calling multiple callback functions (WalkFunc) in parallel,
utilizing goroutines.
By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks.
This can be changed by using WalkN function which has the additional
parameter, specifying the number of goroutines (concurrency).
### pwalk vs pwalkdir
This package is deprecated in favor of
[pwalkdir](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir),
which is faster, but requires at least Go 1.16.
### Caveats
Please note the following limitations of this code:
* Unlike filepath.Walk, the order of calls is non-deterministic;
* Only primitive error handling is supported:
* filepath.SkipDir is not supported;
* no errors are ever passed to WalkFunc;
* once any error is returned from any WalkFunc instance, no more new calls
to WalkFunc are made, and the error is returned to the caller of Walk;
* if more than one walkFunc instance will return an error, only one
of such errors will be propagated and returned by Walk, others
will be silently discarded.
### Documentation
For the official documentation, see
https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalk?tab=doc
### Benchmarks
For a WalkFunc that consists solely of the return statement, this
implementation is about 10% slower than the standard library's
filepath.Walk.
Otherwise (if a WalkFunc is doing something) this is usually faster,
except when the WalkN(..., 1) is used.

View file

@ -1,115 +0,0 @@
package pwalk
import (
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
)
type WalkFunc = filepath.WalkFunc
// Walk is a wrapper for filepath.Walk which can call multiple walkFn
// in parallel, allowing to handle each item concurrently. A maximum of
// twice the runtime.NumCPU() walkFn will be called at any one time.
// If you want to change the maximum, use WalkN instead.
//
// The order of calls is non-deterministic.
//
// Note that this implementation only supports primitive error handling:
//
// - no errors are ever passed to walkFn;
//
// - once a walkFn returns any error, all further processing stops
// and the error is returned to the caller of Walk;
//
// - filepath.SkipDir is not supported;
//
// - if more than one walkFn instance will return an error, only one
// of such errors will be propagated and returned by Walk, others
// will be silently discarded.
func Walk(root string, walkFn WalkFunc) error {
return WalkN(root, walkFn, runtime.NumCPU()*2)
}
// WalkN is a wrapper for filepath.Walk which can call multiple walkFn
// in parallel, allowing to handle each item concurrently. A maximum of
// num walkFn will be called at any one time.
//
// Please see Walk documentation for caveats of using this function.
func WalkN(root string, walkFn WalkFunc, num int) error {
// make sure limit is sensible
if num < 1 {
return fmt.Errorf("walk(%q): num must be > 0", root)
}
files := make(chan *walkArgs, 2*num)
errCh := make(chan error, 1) // get the first error, ignore others
// Start walking a tree asap
var (
err error
wg sync.WaitGroup
rootLen = len(root)
rootEntry *walkArgs
)
wg.Add(1)
go func() {
err = filepath.Walk(root, func(p string, info os.FileInfo, err error) error {
if err != nil {
close(files)
return err
}
if len(p) == rootLen {
// Root entry is processed separately below.
rootEntry = &walkArgs{path: p, info: &info}
return nil
}
// add a file to the queue unless a callback sent an error
select {
case e := <-errCh:
close(files)
return e
default:
files <- &walkArgs{path: p, info: &info}
return nil
}
})
if err == nil {
close(files)
}
wg.Done()
}()
wg.Add(num)
for i := 0; i < num; i++ {
go func() {
for file := range files {
if e := walkFn(file.path, *file.info, nil); e != nil {
select {
case errCh <- e: // sent ok
default: // buffer full
}
}
}
wg.Done()
}()
}
wg.Wait()
if err == nil {
err = walkFn(rootEntry.path, *rootEntry.info, nil)
}
return err
}
// walkArgs holds the arguments that were passed to the Walk or WalkN
// functions.
type walkArgs struct {
path string
info *os.FileInfo
}

View file

@ -111,6 +111,6 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
// walkArgs holds the arguments that were passed to the Walk or WalkN
// functions.
type walkArgs struct {
path string
entry fs.DirEntry
path string
}

View file

@ -24,6 +24,21 @@ var hwCap uint
var hwCap2 uint
func readHWCAP() error {
// For Go 1.21+, get auxv from the Go runtime.
if a := getAuxv(); len(a) > 0 {
for len(a) >= 2 {
tag, val := a[0], uint(a[1])
a = a[2:]
switch tag {
case _AT_HWCAP:
hwCap = val
case _AT_HWCAP2:
hwCap2 = val
}
}
return nil
}
buf, err := ioutil.ReadFile(procAuxv)
if err != nil {
// e.g. on android /proc/self/auxv is not accessible, so silently

16
vendor/golang.org/x/sys/cpu/runtime_auxv.go generated vendored Normal file
View file

@ -0,0 +1,16 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init)
// on platforms that use auxv.
var getAuxvFn func() []uintptr
func getAuxv() []uintptr {
if getAuxvFn == nil {
return nil
}
return getAuxvFn()
}

19
vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21
// +build go1.21
package cpu
import (
_ "unsafe" // for linkname
)
//go:linkname runtime_getAuxv runtime.getAuxv
func runtime_getAuxv() []uintptr
func init() {
getAuxvFn = runtime_getAuxv
}

View file

@ -63,7 +63,7 @@ func LookPath(file string) (string, error) {
}
func fixCmd(name string, cmd *exec.Cmd) {
if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) {
if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) {
// exec.Command was called with a bare binary name and
// exec.LookPath returned a path which is not absolute.
// Set cmd.lookPathErr and clear cmd.Path so that it

View file

@ -7,6 +7,12 @@
package execabs
import "os/exec"
func isGo119ErrDot(err error) bool {
return false
}
func isGo119ErrFieldSet(cmd *exec.Cmd) bool {
return false
}

View file

@ -15,3 +15,7 @@ import (
func isGo119ErrDot(err error) bool {
return errors.Is(err, exec.ErrDot)
}
func isGo119ErrFieldSet(cmd *exec.Cmd) bool {
return cmd.Err != nil
}

View file

@ -8,7 +8,6 @@
package unix
import (
"runtime"
"unsafe"
)
@ -27,7 +26,7 @@ func IoctlSetInt(fd int, req uint, value int) error {
// passing the integer value directly.
func IoctlSetPointerInt(fd int, req uint, value int) error {
v := int32(value)
return ioctl(fd, req, uintptr(unsafe.Pointer(&v)))
return ioctlPtr(fd, req, unsafe.Pointer(&v))
}
// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
@ -36,9 +35,7 @@ func IoctlSetPointerInt(fd int, req uint, value int) error {
func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
// TODO: if we get the chance, remove the req parameter and
// hardcode TIOCSWINSZ.
err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
runtime.KeepAlive(value)
return err
return ioctlPtr(fd, req, unsafe.Pointer(value))
}
// IoctlSetTermios performs an ioctl on fd with a *Termios.
@ -46,9 +43,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
// The req value will usually be TCSETA or TIOCSETA.
func IoctlSetTermios(fd int, req uint, value *Termios) error {
// TODO: if we get the chance, remove the req parameter.
err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
runtime.KeepAlive(value)
return err
return ioctlPtr(fd, req, unsafe.Pointer(value))
}
// IoctlGetInt performs an ioctl operation which gets an integer value
@ -58,18 +53,18 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error {
// for those, IoctlRetInt should be used instead of this function.
func IoctlGetInt(fd int, req uint) (int, error) {
var value int
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
return value, err
}
func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
var value Winsize
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
return &value, err
}
func IoctlGetTermios(fd int, req uint) (*Termios, error) {
var value Termios
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
return &value, err
}

View file

@ -27,9 +27,7 @@ func IoctlSetInt(fd int, req uint, value int) error {
func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
// TODO: if we get the chance, remove the req parameter and
// hardcode TIOCSWINSZ.
err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
runtime.KeepAlive(value)
return err
return ioctlPtr(fd, req, unsafe.Pointer(value))
}
// IoctlSetTermios performs an ioctl on fd with a *Termios.
@ -51,13 +49,13 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error {
// for those, IoctlRetInt should be used instead of this function.
func IoctlGetInt(fd int, req uint) (int, error) {
var value int
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
return value, err
}
func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
var value Winsize
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
return &value, err
}

View file

@ -7,6 +7,12 @@
package unix
import "unsafe"
func ptrace(request int, pid int, addr uintptr, data uintptr) error {
return ptrace1(request, pid, addr, data)
}
func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error {
return ptrace1Ptr(request, pid, addr, data)
}

View file

@ -7,6 +7,12 @@
package unix
import "unsafe"
func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
return ENOTSUP
}
func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) {
return ENOTSUP
}

View file

@ -292,9 +292,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
break
}
}
bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
sa.Name = string(bytes)
sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
return sa, nil
case AF_INET:
@ -411,6 +409,7 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 }
func (w WaitStatus) TrapCause() int { return -1 }
//sys ioctl(fd int, req uint, arg uintptr) (err error)
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = ioctl
// fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX
// There is no way to create a custom fcntl and to keep //sys fcntl easily,

View file

@ -245,8 +245,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
break
}
}
bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
sa.Name = string(bytes)
sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
return sa, nil
case AF_INET:

View file

@ -14,7 +14,6 @@ package unix
import (
"fmt"
"runtime"
"syscall"
"unsafe"
)
@ -376,11 +375,10 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) {
func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) }
//sys ioctl(fd int, req uint, arg uintptr) (err error)
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error {
err := ioctl(fd, CTLIOCGINFO, uintptr(unsafe.Pointer(ctlInfo)))
runtime.KeepAlive(ctlInfo)
return err
return ioctlPtr(fd, CTLIOCGINFO, unsafe.Pointer(ctlInfo))
}
// IfreqMTU is struct ifreq used to get or set a network device's MTU.
@ -394,16 +392,14 @@ type IfreqMTU struct {
func IoctlGetIfreqMTU(fd int, ifname string) (*IfreqMTU, error) {
var ifreq IfreqMTU
copy(ifreq.Name[:], ifname)
err := ioctl(fd, SIOCGIFMTU, uintptr(unsafe.Pointer(&ifreq)))
err := ioctlPtr(fd, SIOCGIFMTU, unsafe.Pointer(&ifreq))
return &ifreq, err
}
// IoctlSetIfreqMTU performs the SIOCSIFMTU ioctl operation on fd to set the MTU
// of the network device specified by ifreq.Name.
func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error {
err := ioctl(fd, SIOCSIFMTU, uintptr(unsafe.Pointer(ifreq)))
runtime.KeepAlive(ifreq)
return err
return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq))
}
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL

View file

@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
//sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64
//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace
//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64

View file

@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
//sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT
//sys Lstat(path string, stat *Stat_t) (err error)
//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace
//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace
//sys Stat(path string, stat *Stat_t) (err error)
//sys Statfs(path string, stat *Statfs_t) (err error)

View file

@ -172,6 +172,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
}
//sys ioctl(fd int, req uint, arg uintptr) (err error)
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL

View file

@ -161,7 +161,8 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
return
}
//sys ioctl(fd int, req uint, arg uintptr) (err error)
//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
@ -253,6 +254,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
}
//sys ptrace(request int, pid int, addr uintptr, data int) (err error)
//sys ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) = SYS_PTRACE
func PtraceAttach(pid int) (err error) {
return ptrace(PT_ATTACH, pid, 0, 0)
@ -267,19 +269,36 @@ func PtraceDetach(pid int) (err error) {
}
func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) {
return ptrace(PT_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0)
return ptracePtr(PT_GETFPREGS, pid, unsafe.Pointer(fpregsout), 0)
}
func PtraceGetRegs(pid int, regsout *Reg) (err error) {
return ptrace(PT_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0)
return ptracePtr(PT_GETREGS, pid, unsafe.Pointer(regsout), 0)
}
func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
}
if countin > 0 {
_ = out[:countin] // check bounds
ioDesc.Addr = &out[0]
} else if out != nil {
ioDesc.Addr = (*byte)(unsafe.Pointer(&_zero))
}
ioDesc.SetLen(countin)
err = ptracePtr(PT_IO, pid, unsafe.Pointer(&ioDesc), 0)
return int(ioDesc.Len), err
}
func PtraceLwpEvents(pid int, enable int) (err error) {
return ptrace(PT_LWP_EVENTS, pid, 0, enable)
}
func PtraceLwpInfo(pid int, info uintptr) (err error) {
return ptrace(PT_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{})))
func PtraceLwpInfo(pid int, info *PtraceLwpInfoStruct) (err error) {
return ptracePtr(PT_LWPINFO, pid, unsafe.Pointer(info), int(unsafe.Sizeof(*info)))
}
func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
@ -299,13 +318,25 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
}
func PtraceSetRegs(pid int, regs *Reg) (err error) {
return ptrace(PT_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0)
return ptracePtr(PT_SETREGS, pid, unsafe.Pointer(regs), 0)
}
func PtraceSingleStep(pid int) (err error) {
return ptrace(PT_STEP, pid, 1, 0)
}
func Dup3(oldfd, newfd, flags int) error {
if oldfd == newfd || flags&^O_CLOEXEC != 0 {
return EINVAL
}
how := F_DUP2FD
if flags&O_CLOEXEC != 0 {
how = F_DUP2FD_CLOEXEC
}
_, err := fcntl(oldfd, how, newfd)
return err
}
/*
* Exposed directly
*/

View file

@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
func (d *PtraceIoDesc) SetLen(length int) {
d.Len = uint32(length)
}
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
var writtenOut uint64 = 0
_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0)
@ -57,16 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0)
}
func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint32(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0)
}

View file

@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
func (d *PtraceIoDesc) SetLen(length int) {
d.Len = uint64(length)
}
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
var writtenOut uint64 = 0
_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
@ -57,16 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0)
}
func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint64(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0)
}

View file

@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
func (d *PtraceIoDesc) SetLen(length int) {
d.Len = uint32(length)
}
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
var writtenOut uint64 = 0
_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0)
@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
}
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint32(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
func (d *PtraceIoDesc) SetLen(length int) {
d.Len = uint64(length)
}
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
var writtenOut uint64 = 0
_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
}
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint64(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
func (d *PtraceIoDesc) SetLen(length int) {
d.Len = uint64(length)
}
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
var writtenOut uint64 = 0
_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
}
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint64(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -20,3 +20,11 @@ func ioctl(fd int, req uint, arg uintptr) (err error) {
}
return
}
func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(uintptr(arg)))
if r0 == -1 && er != nil {
err = er
}
return
}

View file

@ -1015,8 +1015,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
for n < len(pp.Path) && pp.Path[n] != 0 {
n++
}
bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
sa.Name = string(bytes)
sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
return sa, nil
case AF_INET:
@ -1365,6 +1364,10 @@ func SetsockoptTCPRepairOpt(fd, level, opt int, o []TCPRepairOpt) (err error) {
return setsockopt(fd, level, opt, unsafe.Pointer(&o[0]), uintptr(SizeofTCPRepairOpt*len(o)))
}
func SetsockoptTCPMD5Sig(fd, level, opt int, s *TCPMD5Sig) error {
return setsockopt(fd, level, opt, unsafe.Pointer(s), unsafe.Sizeof(*s))
}
// Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html)
// KeyctlInt calls keyctl commands in which each argument is an int.
@ -1579,6 +1582,7 @@ func BindToDevice(fd int, device string) (err error) {
}
//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
//sys ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) = SYS_PTRACE
func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) {
// The peek requests are machine-size oriented, so we wrap it
@ -1596,7 +1600,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro
// boundary.
n := 0
if addr%SizeofPtr != 0 {
err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
err = ptracePtr(req, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0]))
if err != nil {
return 0, err
}
@ -1608,7 +1612,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro
for len(out) > 0 {
// We use an internal buffer to guarantee alignment.
// It's not documented if this is necessary, but we're paranoid.
err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
err = ptracePtr(req, pid, addr+uintptr(n), unsafe.Pointer(&buf[0]))
if err != nil {
return n, err
}
@ -1640,7 +1644,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c
n := 0
if addr%SizeofPtr != 0 {
var buf [SizeofPtr]byte
err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
err = ptracePtr(peekReq, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0]))
if err != nil {
return 0, err
}
@ -1667,7 +1671,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c
// Trailing edge.
if len(data) > 0 {
var buf [SizeofPtr]byte
err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
err = ptracePtr(peekReq, pid, addr+uintptr(n), unsafe.Pointer(&buf[0]))
if err != nil {
return n, err
}
@ -1696,11 +1700,11 @@ func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) {
}
func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) {
return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
}
func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) {
return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
}
func PtraceSetOptions(pid int, options int) (err error) {
@ -1709,7 +1713,7 @@ func PtraceSetOptions(pid int, options int) (err error) {
func PtraceGetEventMsg(pid int) (msg uint, err error) {
var data _C_long
err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data)))
err = ptracePtr(PTRACE_GETEVENTMSG, pid, 0, unsafe.Pointer(&data))
msg = uint(data)
return
}
@ -2154,6 +2158,14 @@ func isGroupMember(gid int) bool {
return false
}
func isCapDacOverrideSet() bool {
hdr := CapUserHeader{Version: LINUX_CAPABILITY_VERSION_3}
data := [2]CapUserData{}
err := Capget(&hdr, &data[0])
return err == nil && data[0].Effective&(1<<CAP_DAC_OVERRIDE) != 0
}
//sys faccessat(dirfd int, path string, mode uint32) (err error)
//sys Faccessat2(dirfd int, path string, mode uint32, flags int) (err error)
@ -2189,6 +2201,12 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
var uid int
if flags&AT_EACCESS != 0 {
uid = Geteuid()
if uid != 0 && isCapDacOverrideSet() {
// If CAP_DAC_OVERRIDE is set, file access check is
// done by the kernel in the same way as for root
// (see generic_permission() in the Linux sources).
uid = 0
}
} else {
uid = Getuid()
}

View file

@ -13,7 +13,6 @@
package unix
import (
"runtime"
"syscall"
"unsafe"
)
@ -178,13 +177,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
}
//sys ioctl(fd int, req uint, arg uintptr) (err error)
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) {
var value Ptmget
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
runtime.KeepAlive(value)
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
return &value, err
}

View file

@ -152,6 +152,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
}
//sys ioctl(fd int, req uint, arg uintptr) (err error)
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL

View file

@ -408,8 +408,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
for n < len(pp.Path) && pp.Path[n] != 0 {
n++
}
bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
sa.Name = string(bytes)
sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
return sa, nil
case AF_INET:
@ -547,21 +546,25 @@ func Minor(dev uint64) uint32 {
*/
//sys ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) = libc.ioctl
//sys ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) = libc.ioctl
func ioctl(fd int, req uint, arg uintptr) (err error) {
_, err = ioctlRet(fd, req, arg)
return err
}
func IoctlSetTermio(fd int, req uint, value *Termio) error {
err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
runtime.KeepAlive(value)
func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
_, err = ioctlPtrRet(fd, req, arg)
return err
}
func IoctlSetTermio(fd int, req uint, value *Termio) error {
return ioctlPtr(fd, req, unsafe.Pointer(value))
}
func IoctlGetTermio(fd int, req uint) (*Termio, error) {
var value Termio
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
return &value, err
}
@ -1084,7 +1087,7 @@ func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) {
func IoctlSetString(fd int, req uint, val string) error {
bs := make([]byte, len(val)+1)
copy(bs[:len(bs)-1], val)
err := ioctl(fd, req, uintptr(unsafe.Pointer(&bs[0])))
err := ioctlPtr(fd, req, unsafe.Pointer(&bs[0]))
runtime.KeepAlive(&bs[0])
return err
}
@ -1118,7 +1121,7 @@ func (l *Lifreq) GetLifruUint() uint {
}
func IoctlLifreq(fd int, req uint, l *Lifreq) error {
return ioctl(fd, req, uintptr(unsafe.Pointer(l)))
return ioctlPtr(fd, req, unsafe.Pointer(l))
}
// Strioctl Helpers
@ -1129,5 +1132,5 @@ func (s *Strioctl) SetInt(i int) {
}
func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) {
return ioctlRet(fd, req, uintptr(unsafe.Pointer(s)))
return ioctlPtrRet(fd, req, unsafe.Pointer(s))
}

View file

@ -139,8 +139,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) {
for n < int(pp.Len) && pp.Path[n] != 0 {
n++
}
bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
sa.Name = string(bytes)
sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
return sa, nil
case AF_INET:
@ -214,6 +213,7 @@ func (cmsg *Cmsghdr) SetLen(length int) {
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) = SYS_MMAP
//sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP
//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL
//sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A
//sys Chdir(path string) (err error) = SYS___CHDIR_A

View file

@ -70,6 +70,7 @@ const (
ALG_SET_DRBG_ENTROPY = 0x6
ALG_SET_IV = 0x2
ALG_SET_KEY = 0x1
ALG_SET_KEY_BY_KEY_SERIAL = 0x7
ALG_SET_OP = 0x3
ANON_INODE_FS_MAGIC = 0x9041934
ARPHRD_6LOWPAN = 0x339
@ -774,6 +775,8 @@ const (
DEVLINK_GENL_MCGRP_CONFIG_NAME = "config"
DEVLINK_GENL_NAME = "devlink"
DEVLINK_GENL_VERSION = 0x1
DEVLINK_PORT_FN_CAP_MIGRATABLE = 0x2
DEVLINK_PORT_FN_CAP_ROCE = 0x1
DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14
DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS = 0x3
DEVMEM_MAGIC = 0x454d444d
@ -1262,6 +1265,8 @@ const (
FSCRYPT_MODE_AES_256_CTS = 0x4
FSCRYPT_MODE_AES_256_HCTR2 = 0xa
FSCRYPT_MODE_AES_256_XTS = 0x1
FSCRYPT_MODE_SM4_CTS = 0x8
FSCRYPT_MODE_SM4_XTS = 0x7
FSCRYPT_POLICY_FLAGS_PAD_16 = 0x2
FSCRYPT_POLICY_FLAGS_PAD_32 = 0x3
FSCRYPT_POLICY_FLAGS_PAD_4 = 0x0
@ -1280,8 +1285,6 @@ const (
FS_ENCRYPTION_MODE_AES_256_GCM = 0x2
FS_ENCRYPTION_MODE_AES_256_XTS = 0x1
FS_ENCRYPTION_MODE_INVALID = 0x0
FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8
FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7
FS_IOC_ADD_ENCRYPTION_KEY = 0xc0506617
FS_IOC_GET_ENCRYPTION_KEY_STATUS = 0xc080661a
FS_IOC_GET_ENCRYPTION_POLICY_EX = 0xc0096616
@ -1770,6 +1773,7 @@ const (
LANDLOCK_ACCESS_FS_REFER = 0x2000
LANDLOCK_ACCESS_FS_REMOVE_DIR = 0x10
LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20
LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000
LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2
LANDLOCK_CREATE_RULESET_VERSION = 0x1
LINUX_REBOOT_CMD_CAD_OFF = 0x0
@ -1809,6 +1813,7 @@ const (
LWTUNNEL_IP_OPT_GENEVE_MAX = 0x3
LWTUNNEL_IP_OPT_VXLAN_MAX = 0x1
MADV_COLD = 0x14
MADV_COLLAPSE = 0x19
MADV_DODUMP = 0x11
MADV_DOFORK = 0xb
MADV_DONTDUMP = 0x10
@ -2163,6 +2168,7 @@ const (
PACKET_FANOUT_DATA = 0x16
PACKET_FANOUT_EBPF = 0x7
PACKET_FANOUT_FLAG_DEFRAG = 0x8000
PACKET_FANOUT_FLAG_IGNORE_OUTGOING = 0x4000
PACKET_FANOUT_FLAG_ROLLOVER = 0x1000
PACKET_FANOUT_FLAG_UNIQUEID = 0x2000
PACKET_FANOUT_HASH = 0x0

View file

@ -15,12 +15,12 @@ type PtraceRegsArm struct {
// PtraceGetRegsArm fetches the registers used by arm binaries.
func PtraceGetRegsArm(pid int, regsout *PtraceRegsArm) error {
return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
}
// PtraceSetRegsArm sets the registers used by arm binaries.
func PtraceSetRegsArm(pid int, regs *PtraceRegsArm) error {
return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
}
// PtraceRegsArm64 is the registers used by arm64 binaries.
@ -33,10 +33,10 @@ type PtraceRegsArm64 struct {
// PtraceGetRegsArm64 fetches the registers used by arm64 binaries.
func PtraceGetRegsArm64(pid int, regsout *PtraceRegsArm64) error {
return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
}
// PtraceSetRegsArm64 sets the registers used by arm64 binaries.
func PtraceSetRegsArm64(pid int, regs *PtraceRegsArm64) error {
return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
}

View file

@ -7,11 +7,11 @@ import "unsafe"
// PtraceGetRegSetArm64 fetches the registers used by arm64 binaries.
func PtraceGetRegSetArm64(pid, addr int, regsout *PtraceRegsArm64) error {
iovec := Iovec{(*byte)(unsafe.Pointer(regsout)), uint64(unsafe.Sizeof(*regsout))}
return ptrace(PTRACE_GETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec)))
return ptracePtr(PTRACE_GETREGSET, pid, uintptr(addr), unsafe.Pointer(&iovec))
}
// PtraceSetRegSetArm64 sets the registers used by arm64 binaries.
func PtraceSetRegSetArm64(pid, addr int, regs *PtraceRegsArm64) error {
iovec := Iovec{(*byte)(unsafe.Pointer(regs)), uint64(unsafe.Sizeof(*regs))}
return ptrace(PTRACE_SETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec)))
return ptracePtr(PTRACE_SETREGSET, pid, uintptr(addr), unsafe.Pointer(&iovec))
}

View file

@ -21,12 +21,12 @@ type PtraceRegsMips struct {
// PtraceGetRegsMips fetches the registers used by mips binaries.
func PtraceGetRegsMips(pid int, regsout *PtraceRegsMips) error {
return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
}
// PtraceSetRegsMips sets the registers used by mips binaries.
func PtraceSetRegsMips(pid int, regs *PtraceRegsMips) error {
return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
}
// PtraceRegsMips64 is the registers used by mips64 binaries.
@ -42,10 +42,10 @@ type PtraceRegsMips64 struct {
// PtraceGetRegsMips64 fetches the registers used by mips64 binaries.
func PtraceGetRegsMips64(pid int, regsout *PtraceRegsMips64) error {
return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout))
}
// PtraceSetRegsMips64 sets the registers used by mips64 binaries.
func PtraceSetRegsMips64(pid int, regs *PtraceRegsMips64) error {
return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs))
}

Some files were not shown because too many files have changed in this diff Show more