diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e444053 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +SOURCES/libpod-5cc9284.tar.gz diff --git a/.podman.metadata b/.podman.metadata new file mode 100644 index 0000000..d1ef31e --- /dev/null +++ b/.podman.metadata @@ -0,0 +1 @@ +dd35f1a00ac7860feeaa77dd5a92bc7bb310b821 SOURCES/libpod-5cc9284.tar.gz diff --git a/SOURCES/497.patch b/SOURCES/497.patch new file mode 100644 index 0000000..e79307a --- /dev/null +++ b/SOURCES/497.patch @@ -0,0 +1,60 @@ +From a6fec757c8a17f3a5b92fb766b0f2eeb3b1a208a Mon Sep 17 00:00:00 2001 +From: Giuseppe Scrivano +Date: Thu, 19 Dec 2019 19:06:00 +0100 +Subject: [PATCH] store: keep graph lock during Mount + +This solves a race condition where a mountpoint is created without the +home mount being present. + +The cause is that another process could be calling the graph driver +cleanup as part of store.Shutdown() causing the unmount of the +driver home directory. + +The unmount could happen between the time the rlstore is retrieved and +the actual mount, causing the driver mount to be done without a home +mount below it. + +A third process then would re-create again the home mount, shadowing +the previous mount. + +Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1757845 + +Signed-off-by: Giuseppe Scrivano +--- + store.go | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/store.go b/store.go +index 65808b8a0..272153e51 100644 +--- a/vendor/github.com/containers/storage/store.go ++++ b/vendor/github.com/containers/storage/store.go +@@ -2479,6 +2479,10 @@ func (s *store) Mount(id, mountLabel string) (string, error) { + if err != nil { + return "", err + } ++ ++ s.graphLock.Lock() ++ defer s.graphLock.Unlock() ++ + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { +@@ -2486,6 +2490,18 @@ func (s *store) Mount(id, mountLabel string) (string, error) { + return "", err + } + } ++ ++ /* We need to make sure the home mount is present when the Mount is done. */ ++ if s.graphLock.TouchedSince(s.lastLoaded) { ++ s.graphDriver = nil ++ s.layerStore = nil ++ s.graphDriver, err = s.getGraphDriver() ++ if err != nil { ++ return "", err ++ } ++ s.lastLoaded = time.Now() ++ } ++ + if rlstore.Exists(id) { + options := drivers.MountOpts{ + MountLabel: mountLabel, diff --git a/SOURCES/CVE-2020-1702-1801929.patch b/SOURCES/CVE-2020-1702-1801929.patch new file mode 100644 index 0000000..01e1aca --- /dev/null +++ b/SOURCES/CVE-2020-1702-1801929.patch @@ -0,0 +1,13539 @@ +From 23d7b2d5c4281f54ffe351293f68fb5136013bcc Mon Sep 17 00:00:00 2001 +From: Valentin Rothberg +Date: Wed, 5 Feb 2020 14:55:48 +0100 +Subject: [PATCH 1/3] [v1.6] update containers/image + +Note that this includes fixes for +https://access.redhat.com/security/cve/CVE-2020-1702. + +Signed-off-by: Valentin Rothberg +--- + go.mod | 2 +- + go.sum | 2 + + .../Microsoft/hcsshim/mksyscall_windows.go | 943 ---------- + .../image/v5/docker/docker_client.go | 6 +- + .../image/v5/docker/docker_image_dest.go | 3 +- + .../image/v5/docker/docker_image_src.go | 10 +- + .../image/v5/docker/tarfile/dest.go | 3 +- + .../containers/image/v5/docker/tarfile/src.go | 9 +- + .../image/v5/image/docker_schema2.go | 4 +- + .../containers/image/v5/image/oci.go | 4 +- + .../image/v5/internal/iolimits/iolimits.go | 60 + + .../image/v5/openshift/openshift.go | 4 +- + .../storage/pkg/archive/example_changes.go | 97 -- + .../docker/pkg/archive/example_changes.go | 97 -- + .../klauspost/compress/flate/gen.go | 265 --- + .../github.com/klauspost/cpuid/private-gen.go | 476 ------ + vendor/github.com/ulikunitz/xz/example.go | 40 - + vendor/golang.org/x/net/html/atom/gen.go | 712 -------- + vendor/golang.org/x/sys/unix/mkasm_darwin.go | 61 - + vendor/golang.org/x/sys/unix/mkpost.go | 122 -- + vendor/golang.org/x/sys/unix/mksyscall.go | 407 ----- + .../x/sys/unix/mksyscall_aix_ppc.go | 415 ----- + .../x/sys/unix/mksyscall_aix_ppc64.go | 614 ------- + .../x/sys/unix/mksyscall_solaris.go | 335 ---- + .../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 ---- + vendor/golang.org/x/sys/unix/mksysnum.go | 190 --- + vendor/golang.org/x/sys/unix/types_aix.go | 237 --- + vendor/golang.org/x/sys/unix/types_darwin.go | 283 --- + .../golang.org/x/sys/unix/types_dragonfly.go | 263 --- + vendor/golang.org/x/sys/unix/types_freebsd.go | 400 ----- + vendor/golang.org/x/sys/unix/types_netbsd.go | 290 ---- + vendor/golang.org/x/sys/unix/types_openbsd.go | 283 --- + vendor/golang.org/x/sys/unix/types_solaris.go | 266 --- + .../x/text/encoding/charmap/maketables.go | 556 ------ + .../x/text/encoding/htmlindex/gen.go | 173 -- + .../text/encoding/internal/identifier/gen.go | 142 -- + .../x/text/encoding/japanese/maketables.go | 161 -- + .../x/text/encoding/korean/maketables.go | 143 -- + .../encoding/simplifiedchinese/maketables.go | 161 -- + .../encoding/traditionalchinese/maketables.go | 140 -- + .../x/text/internal/language/compact/gen.go | 64 - + .../internal/language/compact/gen_index.go | 113 -- + .../internal/language/compact/gen_parents.go | 54 - + .../x/text/internal/language/gen.go | 1520 ----------------- + .../x/text/internal/language/gen_common.go | 20 - + vendor/golang.org/x/text/language/gen.go | 305 ---- + vendor/golang.org/x/text/unicode/bidi/gen.go | 133 -- + .../x/text/unicode/bidi/gen_ranges.go | 57 - + .../x/text/unicode/bidi/gen_trieval.go | 64 - + .../x/text/unicode/norm/maketables.go | 986 ----------- + .../golang.org/x/text/unicode/norm/triegen.go | 117 -- + vendor/modules.txt | 407 ++--- + 52 files changed, 291 insertions(+), 12283 deletions(-) + delete mode 100644 vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go + create mode 100644 vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go + delete mode 100644 vendor/github.com/containers/storage/pkg/archive/example_changes.go + delete mode 100644 vendor/github.com/docker/docker/pkg/archive/example_changes.go + delete mode 100644 vendor/github.com/klauspost/compress/flate/gen.go + delete mode 100644 vendor/github.com/klauspost/cpuid/private-gen.go + delete mode 100644 vendor/github.com/ulikunitz/xz/example.go + delete mode 100644 vendor/golang.org/x/net/html/atom/gen.go + delete mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go + delete mode 100644 vendor/golang.org/x/sys/unix/mkpost.go + delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go + delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go + delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go + delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go + delete mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.go + delete mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_aix.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go + delete mode 100644 vendor/golang.org/x/text/encoding/charmap/maketables.go + delete mode 100644 vendor/golang.org/x/text/encoding/htmlindex/gen.go + delete mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/gen.go + delete mode 100644 vendor/golang.org/x/text/encoding/japanese/maketables.go + delete mode 100644 vendor/golang.org/x/text/encoding/korean/maketables.go + delete mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go + delete mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go + delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen.go + delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_index.go + delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_parents.go + delete mode 100644 vendor/golang.org/x/text/internal/language/gen.go + delete mode 100644 vendor/golang.org/x/text/internal/language/gen_common.go + delete mode 100644 vendor/golang.org/x/text/language/gen.go + delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go + delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go + delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go + delete mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go + delete mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go + +diff --git a/go.mod b/go.mod +index 6ee27ae6e2..064089b76d 100644 +--- a/go.mod ++++ b/go.mod +@@ -12,7 +12,7 @@ require ( + github.com/containernetworking/cni v0.7.1 + github.com/containernetworking/plugins v0.8.2 + github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 +- github.com/containers/image/v5 v5.0.0 ++ github.com/containers/image/v5 v5.0.1-0.20200205124631-82291c45f2b0 + github.com/containers/psgo v1.3.2 + github.com/containers/storage v1.13.6 + github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f +diff --git a/go.sum b/go.sum +index 0d73288fb4..2dfb33e942 100644 +--- a/go.sum ++++ b/go.sum +@@ -61,6 +61,8 @@ github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 h1:5WUe09k2s + github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982/go.mod h1:eGWB4tLoo0hIBuytQpvgUC0hk2mvl2ofaYBeDsU/qoc= + github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4= + github.com/containers/image/v5 v5.0.0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY= ++github.com/containers/image/v5 v5.0.1-0.20200205124631-82291c45f2b0 h1:iV4aHKRoPcHp5BISsuiPMyaCjGJfLKp/FUMAG1NeqvE= ++github.com/containers/image/v5 v5.0.1-0.20200205124631-82291c45f2b0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY= + github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= + github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= + github.com/containers/psgo v1.3.2 h1:jYfppPih3S/j2Yi5O14AXjd8GfCx1ph9L3YsoK3adko= +diff --git a/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go +deleted file mode 100644 +index 7647734de9..0000000000 +--- a/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go ++++ /dev/null +@@ -1,943 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-mksyscall_windows generates windows system call bodies +- +-It parses all files specified on command line containing function +-prototypes (like syscall_windows.go) and prints system call bodies +-to standard output. +- +-The prototypes are marked by lines beginning with "//sys" and read +-like func declarations if //sys is replaced by func, but: +- +-* The parameter lists must give a name for each argument. This +- includes return parameters. +- +-* The parameter lists must give a type for each argument: +- the (x, y, z int) shorthand is not allowed. +- +-* If the return parameter is an error number, it must be named err. +- +-* If go func name needs to be different from it's winapi dll name, +- the winapi name could be specified at the end, after "=" sign, like +- //sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA +- +-* Each function that returns err needs to supply a condition, that +- return value of winapi will be tested against to detect failure. +- This would set err to windows "last-error", otherwise it will be nil. +- The value can be provided at end of //sys declaration, like +- //sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA +- and is [failretval==0] by default. +- +-Usage: +- mksyscall_windows [flags] [path ...] +- +-The flags are: +- -output +- Specify output file name (outputs to console if blank). +- -trace +- Generate print statement after every syscall. +-*/ +-package main +- +-import ( +- "bufio" +- "bytes" +- "errors" +- "flag" +- "fmt" +- "go/format" +- "go/parser" +- "go/token" +- "io" +- "io/ioutil" +- "log" +- "os" +- "path/filepath" +- "runtime" +- "sort" +- "strconv" +- "strings" +- "text/template" +-) +- +-var ( +- filename = flag.String("output", "", "output file name (standard output if omitted)") +- printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall") +- systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory") +- winio = flag.Bool("winio", false, "import go-winio") +-) +- +-func trim(s string) string { +- return strings.Trim(s, " \t") +-} +- +-var packageName string +- +-func packagename() string { +- return packageName +-} +- +-func syscalldot() string { +- if packageName == "syscall" { +- return "" +- } +- return "syscall." +-} +- +-// Param is function parameter +-type Param struct { +- Name string +- Type string +- fn *Fn +- tmpVarIdx int +-} +- +-// tmpVar returns temp variable name that will be used to represent p during syscall. +-func (p *Param) tmpVar() string { +- if p.tmpVarIdx < 0 { +- p.tmpVarIdx = p.fn.curTmpVarIdx +- p.fn.curTmpVarIdx++ +- } +- return fmt.Sprintf("_p%d", p.tmpVarIdx) +-} +- +-// BoolTmpVarCode returns source code for bool temp variable. +-func (p *Param) BoolTmpVarCode() string { +- const code = `var %s uint32 +- if %s { +- %s = 1 +- } else { +- %s = 0 +- }` +- tmp := p.tmpVar() +- return fmt.Sprintf(code, tmp, p.Name, tmp, tmp) +-} +- +-// SliceTmpVarCode returns source code for slice temp variable. +-func (p *Param) SliceTmpVarCode() string { +- const code = `var %s *%s +- if len(%s) > 0 { +- %s = &%s[0] +- }` +- tmp := p.tmpVar() +- return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name) +-} +- +-// StringTmpVarCode returns source code for string temp variable. +-func (p *Param) StringTmpVarCode() string { +- errvar := p.fn.Rets.ErrorVarName() +- if errvar == "" { +- errvar = "_" +- } +- tmp := p.tmpVar() +- const code = `var %s %s +- %s, %s = %s(%s)` +- s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name) +- if errvar == "-" { +- return s +- } +- const morecode = ` +- if %s != nil { +- return +- }` +- return s + fmt.Sprintf(morecode, errvar) +-} +- +-// TmpVarCode returns source code for temp variable. +-func (p *Param) TmpVarCode() string { +- switch { +- case p.Type == "bool": +- return p.BoolTmpVarCode() +- case strings.HasPrefix(p.Type, "[]"): +- return p.SliceTmpVarCode() +- default: +- return "" +- } +-} +- +-// TmpVarHelperCode returns source code for helper's temp variable. +-func (p *Param) TmpVarHelperCode() string { +- if p.Type != "string" { +- return "" +- } +- return p.StringTmpVarCode() +-} +- +-// SyscallArgList returns source code fragments representing p parameter +-// in syscall. Slices are translated into 2 syscall parameters: pointer to +-// the first element and length. +-func (p *Param) SyscallArgList() []string { +- t := p.HelperType() +- var s string +- switch { +- case t[0] == '*': +- s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name) +- case t == "bool": +- s = p.tmpVar() +- case strings.HasPrefix(t, "[]"): +- return []string{ +- fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()), +- fmt.Sprintf("uintptr(len(%s))", p.Name), +- } +- default: +- s = p.Name +- } +- return []string{fmt.Sprintf("uintptr(%s)", s)} +-} +- +-// IsError determines if p parameter is used to return error. +-func (p *Param) IsError() bool { +- return p.Name == "err" && p.Type == "error" +-} +- +-// HelperType returns type of parameter p used in helper function. +-func (p *Param) HelperType() string { +- if p.Type == "string" { +- return p.fn.StrconvType() +- } +- return p.Type +-} +- +-// join concatenates parameters ps into a string with sep separator. +-// Each parameter is converted into string by applying fn to it +-// before conversion. +-func join(ps []*Param, fn func(*Param) string, sep string) string { +- if len(ps) == 0 { +- return "" +- } +- a := make([]string, 0) +- for _, p := range ps { +- a = append(a, fn(p)) +- } +- return strings.Join(a, sep) +-} +- +-// Rets describes function return parameters. +-type Rets struct { +- Name string +- Type string +- ReturnsError bool +- FailCond string +-} +- +-// ErrorVarName returns error variable name for r. +-func (r *Rets) ErrorVarName() string { +- if r.ReturnsError { +- return "err" +- } +- if r.Type == "error" { +- return r.Name +- } +- return "" +-} +- +-// ToParams converts r into slice of *Param. +-func (r *Rets) ToParams() []*Param { +- ps := make([]*Param, 0) +- if len(r.Name) > 0 { +- ps = append(ps, &Param{Name: r.Name, Type: r.Type}) +- } +- if r.ReturnsError { +- ps = append(ps, &Param{Name: "err", Type: "error"}) +- } +- return ps +-} +- +-// List returns source code of syscall return parameters. +-func (r *Rets) List() string { +- s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ") +- if len(s) > 0 { +- s = "(" + s + ")" +- } +- return s +-} +- +-// PrintList returns source code of trace printing part correspondent +-// to syscall return values. +-func (r *Rets) PrintList() string { +- return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) +-} +- +-// SetReturnValuesCode returns source code that accepts syscall return values. +-func (r *Rets) SetReturnValuesCode() string { +- if r.Name == "" && !r.ReturnsError { +- return "" +- } +- retvar := "r0" +- if r.Name == "" { +- retvar = "r1" +- } +- errvar := "_" +- if r.ReturnsError { +- errvar = "e1" +- } +- return fmt.Sprintf("%s, _, %s := ", retvar, errvar) +-} +- +-func (r *Rets) useLongHandleErrorCode(retvar string) string { +- const code = `if %s { +- if e1 != 0 { +- err = errnoErr(e1) +- } else { +- err = %sEINVAL +- } +- }` +- cond := retvar + " == 0" +- if r.FailCond != "" { +- cond = strings.Replace(r.FailCond, "failretval", retvar, 1) +- } +- return fmt.Sprintf(code, cond, syscalldot()) +-} +- +-// SetErrorCode returns source code that sets return parameters. +-func (r *Rets) SetErrorCode() string { +- const code = `if r0 != 0 { +- %s = %sErrno(r0) +- }` +- const hrCode = `if int32(r0) < 0 { +- if r0&0x1fff0000 == 0x00070000 { +- r0 &= 0xffff +- } +- %s = %sErrno(r0) +- }` +- if r.Name == "" && !r.ReturnsError { +- return "" +- } +- if r.Name == "" { +- return r.useLongHandleErrorCode("r1") +- } +- if r.Type == "error" { +- if r.Name == "hr" { +- return fmt.Sprintf(hrCode, r.Name, syscalldot()) +- } else { +- return fmt.Sprintf(code, r.Name, syscalldot()) +- } +- } +- s := "" +- switch { +- case r.Type[0] == '*': +- s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type) +- case r.Type == "bool": +- s = fmt.Sprintf("%s = r0 != 0", r.Name) +- default: +- s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type) +- } +- if !r.ReturnsError { +- return s +- } +- return s + "\n\t" + r.useLongHandleErrorCode(r.Name) +-} +- +-// Fn describes syscall function. +-type Fn struct { +- Name string +- Params []*Param +- Rets *Rets +- PrintTrace bool +- confirmproc bool +- dllname string +- dllfuncname string +- src string +- // TODO: get rid of this field and just use parameter index instead +- curTmpVarIdx int // insure tmp variables have uniq names +-} +- +-// extractParams parses s to extract function parameters. +-func extractParams(s string, f *Fn) ([]*Param, error) { +- s = trim(s) +- if s == "" { +- return nil, nil +- } +- a := strings.Split(s, ",") +- ps := make([]*Param, len(a)) +- for i := range ps { +- s2 := trim(a[i]) +- b := strings.Split(s2, " ") +- if len(b) != 2 { +- b = strings.Split(s2, "\t") +- if len(b) != 2 { +- return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"") +- } +- } +- ps[i] = &Param{ +- Name: trim(b[0]), +- Type: trim(b[1]), +- fn: f, +- tmpVarIdx: -1, +- } +- } +- return ps, nil +-} +- +-// extractSection extracts text out of string s starting after start +-// and ending just before end. found return value will indicate success, +-// and prefix, body and suffix will contain correspondent parts of string s. +-func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) { +- s = trim(s) +- if strings.HasPrefix(s, string(start)) { +- // no prefix +- body = s[1:] +- } else { +- a := strings.SplitN(s, string(start), 2) +- if len(a) != 2 { +- return "", "", s, false +- } +- prefix = a[0] +- body = a[1] +- } +- a := strings.SplitN(body, string(end), 2) +- if len(a) != 2 { +- return "", "", "", false +- } +- return prefix, a[0], a[1], true +-} +- +-// newFn parses string s and return created function Fn. +-func newFn(s string) (*Fn, error) { +- s = trim(s) +- f := &Fn{ +- Rets: &Rets{}, +- src: s, +- PrintTrace: *printTraceFlag, +- } +- // function name and args +- prefix, body, s, found := extractSection(s, '(', ')') +- if !found || prefix == "" { +- return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"") +- } +- f.Name = prefix +- var err error +- f.Params, err = extractParams(body, f) +- if err != nil { +- return nil, err +- } +- // return values +- _, body, s, found = extractSection(s, '(', ')') +- if found { +- r, err := extractParams(body, f) +- if err != nil { +- return nil, err +- } +- switch len(r) { +- case 0: +- case 1: +- if r[0].IsError() { +- f.Rets.ReturnsError = true +- } else { +- f.Rets.Name = r[0].Name +- f.Rets.Type = r[0].Type +- } +- case 2: +- if !r[1].IsError() { +- return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"") +- } +- f.Rets.ReturnsError = true +- f.Rets.Name = r[0].Name +- f.Rets.Type = r[0].Type +- default: +- return nil, errors.New("Too many return values in \"" + f.src + "\"") +- } +- } +- // fail condition +- _, body, s, found = extractSection(s, '[', ']') +- if found { +- f.Rets.FailCond = body +- } +- // dll and dll function names +- s = trim(s) +- if s == "" { +- return f, nil +- } +- if !strings.HasPrefix(s, "=") { +- return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") +- } +- s = trim(s[1:]) +- a := strings.Split(s, ".") +- switch len(a) { +- case 1: +- f.dllfuncname = a[0] +- case 2: +- f.dllname = a[0] +- f.dllfuncname = a[1] +- default: +- return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") +- } +- if f.dllfuncname[len(f.dllfuncname)-1] == '?' { +- f.confirmproc = true +- f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1] +- } +- return f, nil +-} +- +-// DLLName returns DLL name for function f. +-func (f *Fn) DLLName() string { +- if f.dllname == "" { +- return "kernel32" +- } +- return f.dllname +-} +- +-// DLLName returns DLL function name for function f. +-func (f *Fn) DLLFuncName() string { +- if f.dllfuncname == "" { +- return f.Name +- } +- return f.dllfuncname +-} +- +-func (f *Fn) ConfirmProc() bool { +- return f.confirmproc +-} +- +-// ParamList returns source code for function f parameters. +-func (f *Fn) ParamList() string { +- return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ") +-} +- +-// HelperParamList returns source code for helper function f parameters. +-func (f *Fn) HelperParamList() string { +- return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ") +-} +- +-// ParamPrintList returns source code of trace printing part correspondent +-// to syscall input parameters. +-func (f *Fn) ParamPrintList() string { +- return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) +-} +- +-// ParamCount return number of syscall parameters for function f. +-func (f *Fn) ParamCount() int { +- n := 0 +- for _, p := range f.Params { +- n += len(p.SyscallArgList()) +- } +- return n +-} +- +-// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/... +-// to use. It returns parameter count for correspondent SyscallX function. +-func (f *Fn) SyscallParamCount() int { +- n := f.ParamCount() +- switch { +- case n <= 3: +- return 3 +- case n <= 6: +- return 6 +- case n <= 9: +- return 9 +- case n <= 12: +- return 12 +- case n <= 15: +- return 15 +- default: +- panic("too many arguments to system call") +- } +-} +- +-// Syscall determines which SyscallX function to use for function f. +-func (f *Fn) Syscall() string { +- c := f.SyscallParamCount() +- if c == 3 { +- return syscalldot() + "Syscall" +- } +- return syscalldot() + "Syscall" + strconv.Itoa(c) +-} +- +-// SyscallParamList returns source code for SyscallX parameters for function f. +-func (f *Fn) SyscallParamList() string { +- a := make([]string, 0) +- for _, p := range f.Params { +- a = append(a, p.SyscallArgList()...) +- } +- for len(a) < f.SyscallParamCount() { +- a = append(a, "0") +- } +- return strings.Join(a, ", ") +-} +- +-// HelperCallParamList returns source code of call into function f helper. +-func (f *Fn) HelperCallParamList() string { +- a := make([]string, 0, len(f.Params)) +- for _, p := range f.Params { +- s := p.Name +- if p.Type == "string" { +- s = p.tmpVar() +- } +- a = append(a, s) +- } +- return strings.Join(a, ", ") +-} +- +-// IsUTF16 is true, if f is W (utf16) function. It is false +-// for all A (ascii) functions. +-func (_ *Fn) IsUTF16() bool { +- return true +-} +- +-// StrconvFunc returns name of Go string to OS string function for f. +-func (f *Fn) StrconvFunc() string { +- if f.IsUTF16() { +- return syscalldot() + "UTF16PtrFromString" +- } +- return syscalldot() + "BytePtrFromString" +-} +- +-// StrconvType returns Go type name used for OS string for f. +-func (f *Fn) StrconvType() string { +- if f.IsUTF16() { +- return "*uint16" +- } +- return "*byte" +-} +- +-// HasStringParam is true, if f has at least one string parameter. +-// Otherwise it is false. +-func (f *Fn) HasStringParam() bool { +- for _, p := range f.Params { +- if p.Type == "string" { +- return true +- } +- } +- return false +-} +- +-var uniqDllFuncName = make(map[string]bool) +- +-// IsNotDuplicate is true if f is not a duplicated function +-func (f *Fn) IsNotDuplicate() bool { +- funcName := f.DLLFuncName() +- if uniqDllFuncName[funcName] == false { +- uniqDllFuncName[funcName] = true +- return true +- } +- return false +-} +- +-// HelperName returns name of function f helper. +-func (f *Fn) HelperName() string { +- if !f.HasStringParam() { +- return f.Name +- } +- return "_" + f.Name +-} +- +-// Source files and functions. +-type Source struct { +- Funcs []*Fn +- Files []string +- StdLibImports []string +- ExternalImports []string +-} +- +-func (src *Source) Import(pkg string) { +- src.StdLibImports = append(src.StdLibImports, pkg) +- sort.Strings(src.StdLibImports) +-} +- +-func (src *Source) ExternalImport(pkg string) { +- src.ExternalImports = append(src.ExternalImports, pkg) +- sort.Strings(src.ExternalImports) +-} +- +-// ParseFiles parses files listed in fs and extracts all syscall +-// functions listed in sys comments. It returns source files +-// and functions collection *Source if successful. +-func ParseFiles(fs []string) (*Source, error) { +- src := &Source{ +- Funcs: make([]*Fn, 0), +- Files: make([]string, 0), +- StdLibImports: []string{ +- "unsafe", +- }, +- ExternalImports: make([]string, 0), +- } +- for _, file := range fs { +- if err := src.ParseFile(file); err != nil { +- return nil, err +- } +- } +- return src, nil +-} +- +-// DLLs return dll names for a source set src. +-func (src *Source) DLLs() []string { +- uniq := make(map[string]bool) +- r := make([]string, 0) +- for _, f := range src.Funcs { +- name := f.DLLName() +- if _, found := uniq[name]; !found { +- uniq[name] = true +- r = append(r, name) +- } +- } +- return r +-} +- +-// ParseFile adds additional file path to a source set src. +-func (src *Source) ParseFile(path string) error { +- file, err := os.Open(path) +- if err != nil { +- return err +- } +- defer file.Close() +- +- s := bufio.NewScanner(file) +- for s.Scan() { +- t := trim(s.Text()) +- if len(t) < 7 { +- continue +- } +- if !strings.HasPrefix(t, "//sys") { +- continue +- } +- t = t[5:] +- if !(t[0] == ' ' || t[0] == '\t') { +- continue +- } +- f, err := newFn(t[1:]) +- if err != nil { +- return err +- } +- src.Funcs = append(src.Funcs, f) +- } +- if err := s.Err(); err != nil { +- return err +- } +- src.Files = append(src.Files, path) +- +- // get package name +- fset := token.NewFileSet() +- _, err = file.Seek(0, 0) +- if err != nil { +- return err +- } +- pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly) +- if err != nil { +- return err +- } +- packageName = pkg.Name.Name +- +- return nil +-} +- +-// IsStdRepo returns true if src is part of standard library. +-func (src *Source) IsStdRepo() (bool, error) { +- if len(src.Files) == 0 { +- return false, errors.New("no input files provided") +- } +- abspath, err := filepath.Abs(src.Files[0]) +- if err != nil { +- return false, err +- } +- goroot := runtime.GOROOT() +- if runtime.GOOS == "windows" { +- abspath = strings.ToLower(abspath) +- goroot = strings.ToLower(goroot) +- } +- sep := string(os.PathSeparator) +- if !strings.HasSuffix(goroot, sep) { +- goroot += sep +- } +- return strings.HasPrefix(abspath, goroot), nil +-} +- +-// Generate output source file from a source set src. +-func (src *Source) Generate(w io.Writer) error { +- const ( +- pkgStd = iota // any package in std library +- pkgXSysWindows // x/sys/windows package +- pkgOther +- ) +- isStdRepo, err := src.IsStdRepo() +- if err != nil { +- return err +- } +- var pkgtype int +- switch { +- case isStdRepo: +- pkgtype = pkgStd +- case packageName == "windows": +- // TODO: this needs better logic than just using package name +- pkgtype = pkgXSysWindows +- default: +- pkgtype = pkgOther +- } +- if *systemDLL { +- switch pkgtype { +- case pkgStd: +- src.Import("internal/syscall/windows/sysdll") +- case pkgXSysWindows: +- default: +- src.ExternalImport("golang.org/x/sys/windows") +- } +- } +- if *winio { +- src.ExternalImport("github.com/Microsoft/go-winio") +- } +- if packageName != "syscall" { +- src.Import("syscall") +- } +- funcMap := template.FuncMap{ +- "packagename": packagename, +- "syscalldot": syscalldot, +- "newlazydll": func(dll string) string { +- arg := "\"" + dll + ".dll\"" +- if !*systemDLL { +- return syscalldot() + "NewLazyDLL(" + arg + ")" +- } +- if strings.HasPrefix(dll, "api_") || strings.HasPrefix(dll, "ext_") { +- arg = strings.Replace(arg, "_", "-", -1) +- } +- switch pkgtype { +- case pkgStd: +- return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))" +- case pkgXSysWindows: +- return "NewLazySystemDLL(" + arg + ")" +- default: +- return "windows.NewLazySystemDLL(" + arg + ")" +- } +- }, +- } +- t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate)) +- err = t.Execute(w, src) +- if err != nil { +- return errors.New("Failed to execute template: " + err.Error()) +- } +- return nil +-} +- +-func usage() { +- fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n") +- flag.PrintDefaults() +- os.Exit(1) +-} +- +-func main() { +- flag.Usage = usage +- flag.Parse() +- if len(flag.Args()) <= 0 { +- fmt.Fprintf(os.Stderr, "no files to parse provided\n") +- usage() +- } +- +- src, err := ParseFiles(flag.Args()) +- if err != nil { +- log.Fatal(err) +- } +- +- var buf bytes.Buffer +- if err := src.Generate(&buf); err != nil { +- log.Fatal(err) +- } +- +- data, err := format.Source(buf.Bytes()) +- if err != nil { +- log.Fatal(err) +- } +- if *filename == "" { +- _, err = os.Stdout.Write(data) +- } else { +- err = ioutil.WriteFile(*filename, data, 0644) +- } +- if err != nil { +- log.Fatal(err) +- } +-} +- +-// TODO: use println instead to print in the following template +-const srcTemplate = ` +- +-{{define "main"}}// Code generated mksyscall_windows.exe DO NOT EDIT +- +-package {{packagename}} +- +-import ( +-{{range .StdLibImports}}"{{.}}" +-{{end}} +- +-{{range .ExternalImports}}"{{.}}" +-{{end}} +-) +- +-var _ unsafe.Pointer +- +-// Do the interface allocations only once for common +-// Errno values. +-const ( +- errnoERROR_IO_PENDING = 997 +-) +- +-var ( +- errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING) +-) +- +-// errnoErr returns common boxed Errno values, to prevent +-// allocations at runtime. +-func errnoErr(e {{syscalldot}}Errno) error { +- switch e { +- case 0: +- return nil +- case errnoERROR_IO_PENDING: +- return errERROR_IO_PENDING +- } +- // TODO: add more here, after collecting data on the common +- // error values see on Windows. (perhaps when running +- // all.bat?) +- return e +-} +- +-var ( +-{{template "dlls" .}} +-{{template "funcnames" .}}) +-{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}} +-{{end}} +- +-{{/* help functions */}} +- +-{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}} +-{{end}}{{end}} +- +-{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}} +-{{end}}{{end}} +- +-{{define "helperbody"}} +-func {{.Name}}({{.ParamList}}) {{template "results" .}}{ +-{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}}) +-} +-{{end}} +- +-{{define "funcbody"}} +-func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{ +-{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}} +-{{template "seterror" .}}{{template "printtrace" .}} return +-} +-{{end}} +- +-{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}} +-{{end}}{{end}}{{end}} +- +-{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}} +-{{end}}{{end}}{{end}} +- +-{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}} +- +-{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}} +- +-{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil { +- return +-} +-{{end}}{{end}} +- +- +-{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}} +-{{end}}{{end}} +- +-{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n") +-{{end}}{{end}} +- +-` +diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go +index 0b012c703b..bff077a40a 100644 +--- a/vendor/github.com/containers/image/v5/docker/docker_client.go ++++ b/vendor/github.com/containers/image/v5/docker/docker_client.go +@@ -6,7 +6,6 @@ import ( + "encoding/json" + "fmt" + "io" +- "io/ioutil" + "net/http" + "net/url" + "os" +@@ -17,6 +16,7 @@ import ( + "time" + + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/pkg/docker/config" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/pkg/tlsclientconfig" +@@ -597,7 +597,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, + default: + return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL) + } +- tokenBlob, err := ioutil.ReadAll(res.Body) ++ tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) + if err != nil { + return nil, err + } +@@ -690,7 +690,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe + return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) + } + +- body, err := ioutil.ReadAll(res.Body) ++ body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +index 417d97aec9..ce8a1f357e 100644 +--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go ++++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +@@ -15,6 +15,7 @@ import ( + "strings" + + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" +@@ -620,7 +621,7 @@ sigExists: + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { +- body, err := ioutil.ReadAll(res.Body) ++ body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxErrorBodySize) + if err == nil { + logrus.Debugf("Error body %s", string(body)) + } +diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go +index 35beb30e54..5436d9b7d9 100644 +--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go ++++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go +@@ -12,6 +12,7 @@ import ( + "strconv" + + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" +@@ -156,7 +157,8 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin + if res.StatusCode != http.StatusOK { + return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name()) + } +- manblob, err := ioutil.ReadAll(res.Body) ++ ++ manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize) + if err != nil { + return nil, "", err + } +@@ -342,7 +344,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( + } else if res.StatusCode != http.StatusOK { + return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) + } +- sig, err := ioutil.ReadAll(res.Body) ++ sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize) + if err != nil { + return nil, false, err + } +@@ -401,7 +403,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere + return err + } + defer get.Body.Close() +- manifestBody, err := ioutil.ReadAll(get.Body) ++ manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize) + if err != nil { + return err + } +@@ -424,7 +426,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere + } + defer delete.Body.Close() + +- body, err := ioutil.ReadAll(delete.Body) ++ body, err := iolimits.ReadAtMost(delete.Body, iolimits.MaxErrorBodySize) + if err != nil { + return err + } +diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go +index b02c60bb3d..9748ca1121 100644 +--- a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go ++++ b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go +@@ -13,6 +13,7 @@ import ( + "time" + + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" +@@ -135,7 +136,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t + } + + if isConfig { +- buf, err := ioutil.ReadAll(stream) ++ buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream") + } +diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/tarfile/src.go +index ad0a3d2cb4..bbf604da6e 100644 +--- a/vendor/github.com/containers/image/v5/docker/tarfile/src.go ++++ b/vendor/github.com/containers/image/v5/docker/tarfile/src.go +@@ -11,6 +11,7 @@ import ( + "path" + "sync" + ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" +@@ -187,13 +188,13 @@ func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Heade + } + + // readTarComponent returns full contents of componentPath. +-func (s *Source) readTarComponent(path string) ([]byte, error) { ++func (s *Source) readTarComponent(path string, limit int) ([]byte, error) { + file, err := s.openTarComponent(path) + if err != nil { + return nil, errors.Wrapf(err, "Error loading tar component %s", path) + } + defer file.Close() +- bytes, err := ioutil.ReadAll(file) ++ bytes, err := iolimits.ReadAtMost(file, limit) + if err != nil { + return nil, err + } +@@ -224,7 +225,7 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error { + } + + // Read and parse config. +- configBytes, err := s.readTarComponent(tarManifest[0].Config) ++ configBytes, err := s.readTarComponent(tarManifest[0].Config, iolimits.MaxConfigBodySize) + if err != nil { + return err + } +@@ -250,7 +251,7 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error { + // loadTarManifest loads and decodes the manifest.json. + func (s *Source) loadTarManifest() ([]ManifestItem, error) { + // FIXME? Do we need to deal with the legacy format? +- bytes, err := s.readTarComponent(manifestFileName) ++ bytes, err := s.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go +index 254c13f789..29c5047d73 100644 +--- a/vendor/github.com/containers/image/v5/image/docker_schema2.go ++++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go +@@ -7,10 +7,10 @@ import ( + "encoding/hex" + "encoding/json" + "fmt" +- "io/ioutil" + "strings" + + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" +@@ -102,7 +102,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { + return nil, err + } + defer stream.Close() +- blob, err := ioutil.ReadAll(stream) ++ blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go +index 18a38d463e..406da262f3 100644 +--- a/vendor/github.com/containers/image/v5/image/oci.go ++++ b/vendor/github.com/containers/image/v5/image/oci.go +@@ -4,9 +4,9 @@ import ( + "context" + "encoding/json" + "fmt" +- "io/ioutil" + + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" +@@ -67,7 +67,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { + return nil, err + } + defer stream.Close() +- blob, err := ioutil.ReadAll(stream) ++ blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go +new file mode 100644 +index 0000000000..3fed1995cb +--- /dev/null ++++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go +@@ -0,0 +1,60 @@ ++package iolimits ++ ++import ( ++ "io" ++ "io/ioutil" ++ ++ "github.com/pkg/errors" ++) ++ ++// All constants below are intended to be used as limits for `ReadAtMost`. The ++// immediate use-case for limiting the size of in-memory copied data is to ++// protect against OOM DOS attacks as described inCVE-2020-1702. Instead of ++// copying data until running out of memory, we error out after hitting the ++// specified limit. ++const ( ++ // megaByte denotes one megabyte and is intended to be used as a limit in ++ // `ReadAtMost`. ++ megaByte = 1 << 20 ++ // MaxManifestBodySize is the maximum allowed size of a manifest. The limit ++ // of 4 MB aligns with the one of a Docker registry: ++ // https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/handlers/manifests.go#L30 ++ MaxManifestBodySize = 4 * megaByte ++ // MaxAuthTokenBodySize is the maximum allowed size of an auth token. ++ // The limit of 1 MB is considered to be greatly sufficient. ++ MaxAuthTokenBodySize = megaByte ++ // MaxSignatureListBodySize is the maximum allowed size of a signature list. ++ // The limit of 4 MB is considered to be greatly sufficient. ++ MaxSignatureListBodySize = 4 * megaByte ++ // MaxSignatureBodySize is the maximum allowed size of a signature. ++ // The limit of 4 MB is considered to be greatly sufficient. ++ MaxSignatureBodySize = 4 * megaByte ++ // MaxErrorBodySize is the maximum allowed size of an error-response body. ++ // The limit of 1 MB is considered to be greatly sufficient. ++ MaxErrorBodySize = megaByte ++ // MaxConfigBodySize is the maximum allowed size of a config blob. ++ // The limit of 4 MB is considered to be greatly sufficient. ++ MaxConfigBodySize = 4 * megaByte ++ // MaxOpenShiftStatusBody is the maximum allowed size of an OpenShift status body. ++ // The limit of 4 MB is considered to be greatly sufficient. ++ MaxOpenShiftStatusBody = 4 * megaByte ++ // MaxTarFileManifestSize is the maximum allowed size of a (docker save)-like manifest (which may contain multiple images) ++ // The limit of 1 MB is considered to be greatly sufficient. ++ MaxTarFileManifestSize = megaByte ++) ++ ++// ReadAtMost reads from reader and errors out if the specified limit (in bytes) is exceeded. ++func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { ++ limitedReader := io.LimitReader(reader, int64(limit+1)) ++ ++ res, err := ioutil.ReadAll(limitedReader) ++ if err != nil { ++ return nil, err ++ } ++ ++ if len(res) > limit { ++ return nil, errors.Errorf("exceeded maximum allowed size of %d bytes", limit) ++ } ++ ++ return res, nil ++} +diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go +index 016de48034..c37e1b7510 100644 +--- a/vendor/github.com/containers/image/v5/openshift/openshift.go ++++ b/vendor/github.com/containers/image/v5/openshift/openshift.go +@@ -7,13 +7,13 @@ import ( + "encoding/json" + "fmt" + "io" +- "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/image/v5/version" +@@ -102,7 +102,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re + return nil, err + } + defer res.Body.Close() +- body, err := ioutil.ReadAll(res.Body) ++ body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxOpenShiftStatusBody) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/github.com/containers/storage/pkg/archive/example_changes.go +deleted file mode 100644 +index 70f9c5564a..0000000000 +--- a/vendor/github.com/containers/storage/pkg/archive/example_changes.go ++++ /dev/null +@@ -1,97 +0,0 @@ +-// +build ignore +- +-// Simple tool to create an archive stream from an old and new directory +-// +-// By default it will stream the comparison of two temporary directories with junk files +-package main +- +-import ( +- "flag" +- "fmt" +- "io" +- "io/ioutil" +- "os" +- "path" +- +- "github.com/containers/storage/pkg/archive" +- "github.com/sirupsen/logrus" +-) +- +-var ( +- flDebug = flag.Bool("D", false, "debugging output") +- flNewDir = flag.String("newdir", "", "") +- flOldDir = flag.String("olddir", "", "") +- log = logrus.New() +-) +- +-func main() { +- flag.Usage = func() { +- fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") +- fmt.Printf("%s [OPTIONS]\n", os.Args[0]) +- flag.PrintDefaults() +- } +- flag.Parse() +- log.Out = os.Stderr +- if (len(os.Getenv("DEBUG")) > 0) || *flDebug { +- logrus.SetLevel(logrus.DebugLevel) +- } +- var newDir, oldDir string +- +- if len(*flNewDir) == 0 { +- var err error +- newDir, err = ioutil.TempDir("", "storage-test-newDir") +- if err != nil { +- log.Fatal(err) +- } +- defer os.RemoveAll(newDir) +- if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { +- log.Fatal(err) +- } +- } else { +- newDir = *flNewDir +- } +- +- if len(*flOldDir) == 0 { +- oldDir, err := ioutil.TempDir("", "storage-test-oldDir") +- if err != nil { +- log.Fatal(err) +- } +- defer os.RemoveAll(oldDir) +- } else { +- oldDir = *flOldDir +- } +- +- changes, err := archive.ChangesDirs(newDir, oldDir) +- if err != nil { +- log.Fatal(err) +- } +- +- a, err := archive.ExportChanges(newDir, changes) +- if err != nil { +- log.Fatal(err) +- } +- defer a.Close() +- +- i, err := io.Copy(os.Stdout, a) +- if err != nil && err != io.EOF { +- log.Fatal(err) +- } +- fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +-} +- +-func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { +- fileData := []byte("fooo") +- for n := 0; n < numberOfFiles; n++ { +- fileName := fmt.Sprintf("file-%d", n) +- if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { +- return 0, err +- } +- if makeLinks { +- if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { +- return 0, err +- } +- } +- } +- totalSize := numberOfFiles * len(fileData) +- return totalSize, nil +-} +diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go +deleted file mode 100644 +index 495db809e9..0000000000 +--- a/vendor/github.com/docker/docker/pkg/archive/example_changes.go ++++ /dev/null +@@ -1,97 +0,0 @@ +-// +build ignore +- +-// Simple tool to create an archive stream from an old and new directory +-// +-// By default it will stream the comparison of two temporary directories with junk files +-package main +- +-import ( +- "flag" +- "fmt" +- "io" +- "io/ioutil" +- "os" +- "path" +- +- "github.com/docker/docker/pkg/archive" +- "github.com/sirupsen/logrus" +-) +- +-var ( +- flDebug = flag.Bool("D", false, "debugging output") +- flNewDir = flag.String("newdir", "", "") +- flOldDir = flag.String("olddir", "", "") +- log = logrus.New() +-) +- +-func main() { +- flag.Usage = func() { +- fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") +- fmt.Printf("%s [OPTIONS]\n", os.Args[0]) +- flag.PrintDefaults() +- } +- flag.Parse() +- log.Out = os.Stderr +- if (len(os.Getenv("DEBUG")) > 0) || *flDebug { +- logrus.SetLevel(logrus.DebugLevel) +- } +- var newDir, oldDir string +- +- if len(*flNewDir) == 0 { +- var err error +- newDir, err = ioutil.TempDir("", "docker-test-newDir") +- if err != nil { +- log.Fatal(err) +- } +- defer os.RemoveAll(newDir) +- if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { +- log.Fatal(err) +- } +- } else { +- newDir = *flNewDir +- } +- +- if len(*flOldDir) == 0 { +- oldDir, err := ioutil.TempDir("", "docker-test-oldDir") +- if err != nil { +- log.Fatal(err) +- } +- defer os.RemoveAll(oldDir) +- } else { +- oldDir = *flOldDir +- } +- +- changes, err := archive.ChangesDirs(newDir, oldDir) +- if err != nil { +- log.Fatal(err) +- } +- +- a, err := archive.ExportChanges(newDir, changes) +- if err != nil { +- log.Fatal(err) +- } +- defer a.Close() +- +- i, err := io.Copy(os.Stdout, a) +- if err != nil && err != io.EOF { +- log.Fatal(err) +- } +- fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +-} +- +-func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { +- fileData := []byte("fooo") +- for n := 0; n < numberOfFiles; n++ { +- fileName := fmt.Sprintf("file-%d", n) +- if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { +- return 0, err +- } +- if makeLinks { +- if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { +- return 0, err +- } +- } +- } +- totalSize := numberOfFiles * len(fileData) +- return totalSize, nil +-} +diff --git a/vendor/github.com/klauspost/compress/flate/gen.go b/vendor/github.com/klauspost/compress/flate/gen.go +deleted file mode 100644 +index 154c89a488..0000000000 +--- a/vendor/github.com/klauspost/compress/flate/gen.go ++++ /dev/null +@@ -1,265 +0,0 @@ +-// Copyright 2012 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// This program generates fixedhuff.go +-// Invoke as +-// +-// go run gen.go -output fixedhuff.go +- +-package main +- +-import ( +- "bytes" +- "flag" +- "fmt" +- "go/format" +- "io/ioutil" +- "log" +-) +- +-var filename = flag.String("output", "fixedhuff.go", "output file name") +- +-const maxCodeLen = 16 +- +-// Note: the definition of the huffmanDecoder struct is copied from +-// inflate.go, as it is private to the implementation. +- +-// chunk & 15 is number of bits +-// chunk >> 4 is value, including table link +- +-const ( +- huffmanChunkBits = 9 +- huffmanNumChunks = 1 << huffmanChunkBits +- huffmanCountMask = 15 +- huffmanValueShift = 4 +-) +- +-type huffmanDecoder struct { +- min int // the minimum code length +- chunks [huffmanNumChunks]uint32 // chunks as described above +- links [][]uint32 // overflow links +- linkMask uint32 // mask the width of the link table +-} +- +-// Initialize Huffman decoding tables from array of code lengths. +-// Following this function, h is guaranteed to be initialized into a complete +-// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +-// degenerate case where the tree has only a single symbol with length 1. Empty +-// trees are permitted. +-func (h *huffmanDecoder) init(bits []int) bool { +- // Sanity enables additional runtime tests during Huffman +- // table construction. It's intended to be used during +- // development to supplement the currently ad-hoc unit tests. +- const sanity = false +- +- if h.min != 0 { +- *h = huffmanDecoder{} +- } +- +- // Count number of codes of each length, +- // compute min and max length. +- var count [maxCodeLen]int +- var min, max int +- for _, n := range bits { +- if n == 0 { +- continue +- } +- if min == 0 || n < min { +- min = n +- } +- if n > max { +- max = n +- } +- count[n]++ +- } +- +- // Empty tree. The decompressor.huffSym function will fail later if the tree +- // is used. Technically, an empty tree is only valid for the HDIST tree and +- // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree +- // is guaranteed to fail since it will attempt to use the tree to decode the +- // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is +- // guaranteed to fail later since the compressed data section must be +- // composed of at least one symbol (the end-of-block marker). +- if max == 0 { +- return true +- } +- +- code := 0 +- var nextcode [maxCodeLen]int +- for i := min; i <= max; i++ { +- code <<= 1 +- nextcode[i] = code +- code += count[i] +- } +- +- // Check that the coding is complete (i.e., that we've +- // assigned all 2-to-the-max possible bit sequences). +- // Exception: To be compatible with zlib, we also need to +- // accept degenerate single-code codings. See also +- // TestDegenerateHuffmanCoding. +- if code != 1< huffmanChunkBits { +- numLinks := 1 << (uint(max) - huffmanChunkBits) +- h.linkMask = uint32(numLinks - 1) +- +- // create link tables +- link := nextcode[huffmanChunkBits+1] >> 1 +- h.links = make([][]uint32, huffmanNumChunks-link) +- for j := uint(link); j < huffmanNumChunks; j++ { +- reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 +- reverse >>= uint(16 - huffmanChunkBits) +- off := j - uint(link) +- if sanity && h.chunks[reverse] != 0 { +- panic("impossible: overwriting existing chunk") +- } +- h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 +- reverse >>= uint(16 - n) +- if n <= huffmanChunkBits { +- for off := reverse; off < len(h.chunks); off += 1 << uint(n) { +- // We should never need to overwrite +- // an existing chunk. Also, 0 is +- // never a valid chunk, because the +- // lower 4 "count" bits should be +- // between 1 and 15. +- if sanity && h.chunks[off] != 0 { +- panic("impossible: overwriting existing chunk") +- } +- h.chunks[off] = chunk +- } +- } else { +- j := reverse & (huffmanNumChunks - 1) +- if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { +- // Longer codes should have been +- // associated with a link table above. +- panic("impossible: not an indirect chunk") +- } +- value := h.chunks[j] >> huffmanValueShift +- linktab := h.links[value] +- reverse >>= huffmanChunkBits +- for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { +- if sanity && linktab[off] != 0 { +- panic("impossible: overwriting existing chunk") +- } +- linktab[off] = chunk +- } +- } +- } +- +- if sanity { +- // Above we've sanity checked that we never overwrote +- // an existing entry. Here we additionally check that +- // we filled the tables completely. +- for i, chunk := range h.chunks { +- if chunk == 0 { +- // As an exception, in the degenerate +- // single-code case, we allow odd +- // chunks to be missing. +- if code == 1 && i%2 == 1 { +- continue +- } +- panic("impossible: missing chunk") +- } +- } +- for _, linktab := range h.links { +- for _, chunk := range linktab { +- if chunk == 0 { +- panic("impossible: missing chunk") +- } +- } +- } +- } +- +- return true +-} +- +-func main() { +- flag.Parse() +- +- var h huffmanDecoder +- var bits [288]int +- initReverseByte() +- for i := 0; i < 144; i++ { +- bits[i] = 8 +- } +- for i := 144; i < 256; i++ { +- bits[i] = 9 +- } +- for i := 256; i < 280; i++ { +- bits[i] = 7 +- } +- for i := 280; i < 288; i++ { +- bits[i] = 8 +- } +- h.init(bits[:]) +- if h.links != nil { +- log.Fatal("Unexpected links table in fixed Huffman decoder") +- } +- +- var buf bytes.Buffer +- +- fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file.`+"\n\n") +- +- fmt.Fprintln(&buf, "package flate") +- fmt.Fprintln(&buf) +- fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT") +- fmt.Fprintln(&buf) +- fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{") +- fmt.Fprintf(&buf, "\t%d,\n", h.min) +- fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{") +- for i := 0; i < huffmanNumChunks; i++ { +- if i&7 == 0 { +- fmt.Fprintf(&buf, "\t\t") +- } else { +- fmt.Fprintf(&buf, " ") +- } +- fmt.Fprintf(&buf, "0x%04x,", h.chunks[i]) +- if i&7 == 7 { +- fmt.Fprintln(&buf) +- } +- } +- fmt.Fprintln(&buf, "\t},") +- fmt.Fprintln(&buf, "\tnil, 0,") +- fmt.Fprintln(&buf, "}") +- +- data, err := format.Source(buf.Bytes()) +- if err != nil { +- log.Fatal(err) +- } +- err = ioutil.WriteFile(*filename, data, 0644) +- if err != nil { +- log.Fatal(err) +- } +-} +- +-var reverseByte [256]byte +- +-func initReverseByte() { +- for x := 0; x < 256; x++ { +- var result byte +- for i := uint(0); i < 8; i++ { +- result |= byte(((x >> i) & 1) << (7 - i)) +- } +- reverseByte[x] = result +- } +-} +diff --git a/vendor/github.com/klauspost/cpuid/private-gen.go b/vendor/github.com/klauspost/cpuid/private-gen.go +deleted file mode 100644 +index 437333d292..0000000000 +--- a/vendor/github.com/klauspost/cpuid/private-gen.go ++++ /dev/null +@@ -1,476 +0,0 @@ +-// +build ignore +- +-package main +- +-import ( +- "bytes" +- "fmt" +- "go/ast" +- "go/parser" +- "go/printer" +- "go/token" +- "io" +- "io/ioutil" +- "log" +- "os" +- "reflect" +- "strings" +- "unicode" +- "unicode/utf8" +-) +- +-var inFiles = []string{"cpuid.go", "cpuid_test.go"} +-var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"} +-var fileSet = token.NewFileSet() +-var reWrites = []rewrite{ +- initRewrite("CPUInfo -> cpuInfo"), +- initRewrite("Vendor -> vendor"), +- initRewrite("Flags -> flags"), +- initRewrite("Detect -> detect"), +- initRewrite("CPU -> cpu"), +-} +-var excludeNames = map[string]bool{"string": true, "join": true, "trim": true, +- // cpuid_test.go +- "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true, +-} +- +-var excludePrefixes = []string{"test", "benchmark"} +- +-func main() { +- Package := "private" +- parserMode := parser.ParseComments +- exported := make(map[string]rewrite) +- for _, file := range inFiles { +- in, err := os.Open(file) +- if err != nil { +- log.Fatalf("opening input", err) +- } +- +- src, err := ioutil.ReadAll(in) +- if err != nil { +- log.Fatalf("reading input", err) +- } +- +- astfile, err := parser.ParseFile(fileSet, file, src, parserMode) +- if err != nil { +- log.Fatalf("parsing input", err) +- } +- +- for _, rw := range reWrites { +- astfile = rw(astfile) +- } +- +- // Inspect the AST and print all identifiers and literals. +- var startDecl token.Pos +- var endDecl token.Pos +- ast.Inspect(astfile, func(n ast.Node) bool { +- var s string +- switch x := n.(type) { +- case *ast.Ident: +- if x.IsExported() { +- t := strings.ToLower(x.Name) +- for _, pre := range excludePrefixes { +- if strings.HasPrefix(t, pre) { +- return true +- } +- } +- if excludeNames[t] != true { +- //if x.Pos() > startDecl && x.Pos() < endDecl { +- exported[x.Name] = initRewrite(x.Name + " -> " + t) +- } +- } +- +- case *ast.GenDecl: +- if x.Tok == token.CONST && x.Lparen > 0 { +- startDecl = x.Lparen +- endDecl = x.Rparen +- // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl)) +- } +- } +- if s != "" { +- fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s) +- } +- return true +- }) +- +- for _, rw := range exported { +- astfile = rw(astfile) +- } +- +- var buf bytes.Buffer +- +- printer.Fprint(&buf, fileSet, astfile) +- +- // Remove package documentation and insert information +- s := buf.String() +- ind := strings.Index(buf.String(), "\npackage cpuid") +- s = s[ind:] +- s = "// Generated, DO NOT EDIT,\n" + +- "// but copy it to your own project and rename the package.\n" + +- "// See more at http://github.com/klauspost/cpuid\n" + +- s +- +- outputName := Package + string(os.PathSeparator) + file +- +- err = ioutil.WriteFile(outputName, []byte(s), 0644) +- if err != nil { +- log.Fatalf("writing output: %s", err) +- } +- log.Println("Generated", outputName) +- } +- +- for _, file := range copyFiles { +- dst := "" +- if strings.HasPrefix(file, "cpuid") { +- dst = Package + string(os.PathSeparator) + file +- } else { +- dst = Package + string(os.PathSeparator) + "cpuid_" + file +- } +- err := copyFile(file, dst) +- if err != nil { +- log.Fatalf("copying file: %s", err) +- } +- log.Println("Copied", dst) +- } +-} +- +-// CopyFile copies a file from src to dst. If src and dst files exist, and are +-// the same, then return success. Copy the file contents from src to dst. +-func copyFile(src, dst string) (err error) { +- sfi, err := os.Stat(src) +- if err != nil { +- return +- } +- if !sfi.Mode().IsRegular() { +- // cannot copy non-regular files (e.g., directories, +- // symlinks, devices, etc.) +- return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String()) +- } +- dfi, err := os.Stat(dst) +- if err != nil { +- if !os.IsNotExist(err) { +- return +- } +- } else { +- if !(dfi.Mode().IsRegular()) { +- return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String()) +- } +- if os.SameFile(sfi, dfi) { +- return +- } +- } +- err = copyFileContents(src, dst) +- return +-} +- +-// copyFileContents copies the contents of the file named src to the file named +-// by dst. The file will be created if it does not already exist. If the +-// destination file exists, all it's contents will be replaced by the contents +-// of the source file. +-func copyFileContents(src, dst string) (err error) { +- in, err := os.Open(src) +- if err != nil { +- return +- } +- defer in.Close() +- out, err := os.Create(dst) +- if err != nil { +- return +- } +- defer func() { +- cerr := out.Close() +- if err == nil { +- err = cerr +- } +- }() +- if _, err = io.Copy(out, in); err != nil { +- return +- } +- err = out.Sync() +- return +-} +- +-type rewrite func(*ast.File) *ast.File +- +-// Mostly copied from gofmt +-func initRewrite(rewriteRule string) rewrite { +- f := strings.Split(rewriteRule, "->") +- if len(f) != 2 { +- fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") +- os.Exit(2) +- } +- pattern := parseExpr(f[0], "pattern") +- replace := parseExpr(f[1], "replacement") +- return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) } +-} +- +-// parseExpr parses s as an expression. +-// It might make sense to expand this to allow statement patterns, +-// but there are problems with preserving formatting and also +-// with what a wildcard for a statement looks like. +-func parseExpr(s, what string) ast.Expr { +- x, err := parser.ParseExpr(s) +- if err != nil { +- fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) +- os.Exit(2) +- } +- return x +-} +- +-// Keep this function for debugging. +-/* +-func dump(msg string, val reflect.Value) { +- fmt.Printf("%s:\n", msg) +- ast.Print(fileSet, val.Interface()) +- fmt.Println() +-} +-*/ +- +-// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. +-func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File { +- cmap := ast.NewCommentMap(fileSet, p, p.Comments) +- m := make(map[string]reflect.Value) +- pat := reflect.ValueOf(pattern) +- repl := reflect.ValueOf(replace) +- +- var rewriteVal func(val reflect.Value) reflect.Value +- rewriteVal = func(val reflect.Value) reflect.Value { +- // don't bother if val is invalid to start with +- if !val.IsValid() { +- return reflect.Value{} +- } +- for k := range m { +- delete(m, k) +- } +- val = apply(rewriteVal, val) +- if match(m, pat, val) { +- val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) +- } +- return val +- } +- +- r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) +- r.Comments = cmap.Filter(r).Comments() // recreate comments list +- return r +-} +- +-// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. +-func set(x, y reflect.Value) { +- // don't bother if x cannot be set or y is invalid +- if !x.CanSet() || !y.IsValid() { +- return +- } +- defer func() { +- if x := recover(); x != nil { +- if s, ok := x.(string); ok && +- (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { +- // x cannot be set to y - ignore this rewrite +- return +- } +- panic(x) +- } +- }() +- x.Set(y) +-} +- +-// Values/types for special cases. +-var ( +- objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) +- scopePtrNil = reflect.ValueOf((*ast.Scope)(nil)) +- +- identType = reflect.TypeOf((*ast.Ident)(nil)) +- objectPtrType = reflect.TypeOf((*ast.Object)(nil)) +- positionType = reflect.TypeOf(token.NoPos) +- callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) +- scopePtrType = reflect.TypeOf((*ast.Scope)(nil)) +-) +- +-// apply replaces each AST field x in val with f(x), returning val. +-// To avoid extra conversions, f operates on the reflect.Value form. +-func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { +- if !val.IsValid() { +- return reflect.Value{} +- } +- +- // *ast.Objects introduce cycles and are likely incorrect after +- // rewrite; don't follow them but replace with nil instead +- if val.Type() == objectPtrType { +- return objectPtrNil +- } +- +- // similarly for scopes: they are likely incorrect after a rewrite; +- // replace them with nil +- if val.Type() == scopePtrType { +- return scopePtrNil +- } +- +- switch v := reflect.Indirect(val); v.Kind() { +- case reflect.Slice: +- for i := 0; i < v.Len(); i++ { +- e := v.Index(i) +- set(e, f(e)) +- } +- case reflect.Struct: +- for i := 0; i < v.NumField(); i++ { +- e := v.Field(i) +- set(e, f(e)) +- } +- case reflect.Interface: +- e := v.Elem() +- set(v, f(e)) +- } +- return val +-} +- +-func isWildcard(s string) bool { +- rune, size := utf8.DecodeRuneInString(s) +- return size == len(s) && unicode.IsLower(rune) +-} +- +-// match returns true if pattern matches val, +-// recording wildcard submatches in m. +-// If m == nil, match checks whether pattern == val. +-func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { +- // Wildcard matches any expression. If it appears multiple +- // times in the pattern, it must match the same expression +- // each time. +- if m != nil && pattern.IsValid() && pattern.Type() == identType { +- name := pattern.Interface().(*ast.Ident).Name +- if isWildcard(name) && val.IsValid() { +- // wildcards only match valid (non-nil) expressions. +- if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { +- if old, ok := m[name]; ok { +- return match(nil, old, val) +- } +- m[name] = val +- return true +- } +- } +- } +- +- // Otherwise, pattern and val must match recursively. +- if !pattern.IsValid() || !val.IsValid() { +- return !pattern.IsValid() && !val.IsValid() +- } +- if pattern.Type() != val.Type() { +- return false +- } +- +- // Special cases. +- switch pattern.Type() { +- case identType: +- // For identifiers, only the names need to match +- // (and none of the other *ast.Object information). +- // This is a common case, handle it all here instead +- // of recursing down any further via reflection. +- p := pattern.Interface().(*ast.Ident) +- v := val.Interface().(*ast.Ident) +- return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name +- case objectPtrType, positionType: +- // object pointers and token positions always match +- return true +- case callExprType: +- // For calls, the Ellipsis fields (token.Position) must +- // match since that is how f(x) and f(x...) are different. +- // Check them here but fall through for the remaining fields. +- p := pattern.Interface().(*ast.CallExpr) +- v := val.Interface().(*ast.CallExpr) +- if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { +- return false +- } +- } +- +- p := reflect.Indirect(pattern) +- v := reflect.Indirect(val) +- if !p.IsValid() || !v.IsValid() { +- return !p.IsValid() && !v.IsValid() +- } +- +- switch p.Kind() { +- case reflect.Slice: +- if p.Len() != v.Len() { +- return false +- } +- for i := 0; i < p.Len(); i++ { +- if !match(m, p.Index(i), v.Index(i)) { +- return false +- } +- } +- return true +- +- case reflect.Struct: +- for i := 0; i < p.NumField(); i++ { +- if !match(m, p.Field(i), v.Field(i)) { +- return false +- } +- } +- return true +- +- case reflect.Interface: +- return match(m, p.Elem(), v.Elem()) +- } +- +- // Handle token integers, etc. +- return p.Interface() == v.Interface() +-} +- +-// subst returns a copy of pattern with values from m substituted in place +-// of wildcards and pos used as the position of tokens from the pattern. +-// if m == nil, subst returns a copy of pattern and doesn't change the line +-// number information. +-func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { +- if !pattern.IsValid() { +- return reflect.Value{} +- } +- +- // Wildcard gets replaced with map value. +- if m != nil && pattern.Type() == identType { +- name := pattern.Interface().(*ast.Ident).Name +- if isWildcard(name) { +- if old, ok := m[name]; ok { +- return subst(nil, old, reflect.Value{}) +- } +- } +- } +- +- if pos.IsValid() && pattern.Type() == positionType { +- // use new position only if old position was valid in the first place +- if old := pattern.Interface().(token.Pos); !old.IsValid() { +- return pattern +- } +- return pos +- } +- +- // Otherwise copy. +- switch p := pattern; p.Kind() { +- case reflect.Slice: +- v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) +- for i := 0; i < p.Len(); i++ { +- v.Index(i).Set(subst(m, p.Index(i), pos)) +- } +- return v +- +- case reflect.Struct: +- v := reflect.New(p.Type()).Elem() +- for i := 0; i < p.NumField(); i++ { +- v.Field(i).Set(subst(m, p.Field(i), pos)) +- } +- return v +- +- case reflect.Ptr: +- v := reflect.New(p.Type()).Elem() +- if elem := p.Elem(); elem.IsValid() { +- v.Set(subst(m, elem, pos).Addr()) +- } +- return v +- +- case reflect.Interface: +- v := reflect.New(p.Type()).Elem() +- if elem := p.Elem(); elem.IsValid() { +- v.Set(subst(m, elem, pos)) +- } +- return v +- } +- +- return pattern +-} +diff --git a/vendor/github.com/ulikunitz/xz/example.go b/vendor/github.com/ulikunitz/xz/example.go +deleted file mode 100644 +index 855e60aee5..0000000000 +--- a/vendor/github.com/ulikunitz/xz/example.go ++++ /dev/null +@@ -1,40 +0,0 @@ +-// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "bytes" +- "io" +- "log" +- "os" +- +- "github.com/ulikunitz/xz" +-) +- +-func main() { +- const text = "The quick brown fox jumps over the lazy dog.\n" +- var buf bytes.Buffer +- // compress text +- w, err := xz.NewWriter(&buf) +- if err != nil { +- log.Fatalf("xz.NewWriter error %s", err) +- } +- if _, err := io.WriteString(w, text); err != nil { +- log.Fatalf("WriteString error %s", err) +- } +- if err := w.Close(); err != nil { +- log.Fatalf("w.Close error %s", err) +- } +- // decompress buffer and write output to stdout +- r, err := xz.NewReader(&buf) +- if err != nil { +- log.Fatalf("NewReader error %s", err) +- } +- if _, err = io.Copy(os.Stdout, r); err != nil { +- log.Fatalf("io.Copy error %s", err) +- } +-} +diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go +deleted file mode 100644 +index 5d052781bc..0000000000 +--- a/vendor/golang.org/x/net/html/atom/gen.go ++++ /dev/null +@@ -1,712 +0,0 @@ +-// Copyright 2012 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-//go:generate go run gen.go +-//go:generate go run gen.go -test +- +-package main +- +-import ( +- "bytes" +- "flag" +- "fmt" +- "go/format" +- "io/ioutil" +- "math/rand" +- "os" +- "sort" +- "strings" +-) +- +-// identifier converts s to a Go exported identifier. +-// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". +-func identifier(s string) string { +- b := make([]byte, 0, len(s)) +- cap := true +- for _, c := range s { +- if c == '-' { +- cap = true +- continue +- } +- if cap && 'a' <= c && c <= 'z' { +- c -= 'a' - 'A' +- } +- cap = false +- b = append(b, byte(c)) +- } +- return string(b) +-} +- +-var test = flag.Bool("test", false, "generate table_test.go") +- +-func genFile(name string, buf *bytes.Buffer) { +- b, err := format.Source(buf.Bytes()) +- if err != nil { +- fmt.Fprintln(os.Stderr, err) +- os.Exit(1) +- } +- if err := ioutil.WriteFile(name, b, 0644); err != nil { +- fmt.Fprintln(os.Stderr, err) +- os.Exit(1) +- } +-} +- +-func main() { +- flag.Parse() +- +- var all []string +- all = append(all, elements...) +- all = append(all, attributes...) +- all = append(all, eventHandlers...) +- all = append(all, extra...) +- sort.Strings(all) +- +- // uniq - lists have dups +- w := 0 +- for _, s := range all { +- if w == 0 || all[w-1] != s { +- all[w] = s +- w++ +- } +- } +- all = all[:w] +- +- if *test { +- var buf bytes.Buffer +- fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") +- fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") +- fmt.Fprintln(&buf, "package atom\n") +- fmt.Fprintln(&buf, "var testAtomList = []string{") +- for _, s := range all { +- fmt.Fprintf(&buf, "\t%q,\n", s) +- } +- fmt.Fprintln(&buf, "}") +- +- genFile("table_test.go", &buf) +- return +- } +- +- // Find hash that minimizes table size. +- var best *table +- for i := 0; i < 1000000; i++ { +- if best != nil && 1<<(best.k-1) < len(all) { +- break +- } +- h := rand.Uint32() +- for k := uint(0); k <= 16; k++ { +- if best != nil && k >= best.k { +- break +- } +- var t table +- if t.init(h, k, all) { +- best = &t +- break +- } +- } +- } +- if best == nil { +- fmt.Fprintf(os.Stderr, "failed to construct string table\n") +- os.Exit(1) +- } +- +- // Lay out strings, using overlaps when possible. +- layout := append([]string{}, all...) +- +- // Remove strings that are substrings of other strings +- for changed := true; changed; { +- changed = false +- for i, s := range layout { +- if s == "" { +- continue +- } +- for j, t := range layout { +- if i != j && t != "" && strings.Contains(s, t) { +- changed = true +- layout[j] = "" +- } +- } +- } +- } +- +- // Join strings where one suffix matches another prefix. +- for { +- // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], +- // maximizing overlap length k. +- besti := -1 +- bestj := -1 +- bestk := 0 +- for i, s := range layout { +- if s == "" { +- continue +- } +- for j, t := range layout { +- if i == j { +- continue +- } +- for k := bestk + 1; k <= len(s) && k <= len(t); k++ { +- if s[len(s)-k:] == t[:k] { +- besti = i +- bestj = j +- bestk = k +- } +- } +- } +- } +- if bestk > 0 { +- layout[besti] += layout[bestj][bestk:] +- layout[bestj] = "" +- continue +- } +- break +- } +- +- text := strings.Join(layout, "") +- +- atom := map[string]uint32{} +- for _, s := range all { +- off := strings.Index(text, s) +- if off < 0 { +- panic("lost string " + s) +- } +- atom[s] = uint32(off<<8 | len(s)) +- } +- +- var buf bytes.Buffer +- // Generate the Go code. +- fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") +- fmt.Fprintln(&buf, "//go:generate go run gen.go\n") +- fmt.Fprintln(&buf, "package atom\n\nconst (") +- +- // compute max len +- maxLen := 0 +- for _, s := range all { +- if maxLen < len(s) { +- maxLen = len(s) +- } +- fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) +- } +- fmt.Fprintln(&buf, ")\n") +- +- fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) +- fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) +- +- fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) +- for i, s := range best.tab { +- if s == "" { +- continue +- } +- fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) +- } +- fmt.Fprintf(&buf, "}\n") +- datasize := (1 << best.k) * 4 +- +- fmt.Fprintln(&buf, "const atomText =") +- textsize := len(text) +- for len(text) > 60 { +- fmt.Fprintf(&buf, "\t%q +\n", text[:60]) +- text = text[60:] +- } +- fmt.Fprintf(&buf, "\t%q\n\n", text) +- +- genFile("table.go", &buf) +- +- fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) +-} +- +-type byLen []string +- +-func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } +-func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +-func (x byLen) Len() int { return len(x) } +- +-// fnv computes the FNV hash with an arbitrary starting value h. +-func fnv(h uint32, s string) uint32 { +- for i := 0; i < len(s); i++ { +- h ^= uint32(s[i]) +- h *= 16777619 +- } +- return h +-} +- +-// A table represents an attempt at constructing the lookup table. +-// The lookup table uses cuckoo hashing, meaning that each string +-// can be found in one of two positions. +-type table struct { +- h0 uint32 +- k uint +- mask uint32 +- tab []string +-} +- +-// hash returns the two hashes for s. +-func (t *table) hash(s string) (h1, h2 uint32) { +- h := fnv(t.h0, s) +- h1 = h & t.mask +- h2 = (h >> 16) & t.mask +- return +-} +- +-// init initializes the table with the given parameters. +-// h0 is the initial hash value, +-// k is the number of bits of hash value to use, and +-// x is the list of strings to store in the table. +-// init returns false if the table cannot be constructed. +-func (t *table) init(h0 uint32, k uint, x []string) bool { +- t.h0 = h0 +- t.k = k +- t.tab = make([]string, 1< len(t.tab) { +- return false +- } +- s := t.tab[i] +- h1, h2 := t.hash(s) +- j := h1 + h2 - i +- if t.tab[j] != "" && !t.push(j, depth+1) { +- return false +- } +- t.tab[j] = s +- return true +-} +- +-// The lists of element names and attribute keys were taken from +-// https://html.spec.whatwg.org/multipage/indices.html#index +-// as of the "HTML Living Standard - Last Updated 16 April 2018" version. +- +-// "command", "keygen" and "menuitem" have been removed from the spec, +-// but are kept here for backwards compatibility. +-var elements = []string{ +- "a", +- "abbr", +- "address", +- "area", +- "article", +- "aside", +- "audio", +- "b", +- "base", +- "bdi", +- "bdo", +- "blockquote", +- "body", +- "br", +- "button", +- "canvas", +- "caption", +- "cite", +- "code", +- "col", +- "colgroup", +- "command", +- "data", +- "datalist", +- "dd", +- "del", +- "details", +- "dfn", +- "dialog", +- "div", +- "dl", +- "dt", +- "em", +- "embed", +- "fieldset", +- "figcaption", +- "figure", +- "footer", +- "form", +- "h1", +- "h2", +- "h3", +- "h4", +- "h5", +- "h6", +- "head", +- "header", +- "hgroup", +- "hr", +- "html", +- "i", +- "iframe", +- "img", +- "input", +- "ins", +- "kbd", +- "keygen", +- "label", +- "legend", +- "li", +- "link", +- "main", +- "map", +- "mark", +- "menu", +- "menuitem", +- "meta", +- "meter", +- "nav", +- "noscript", +- "object", +- "ol", +- "optgroup", +- "option", +- "output", +- "p", +- "param", +- "picture", +- "pre", +- "progress", +- "q", +- "rp", +- "rt", +- "ruby", +- "s", +- "samp", +- "script", +- "section", +- "select", +- "slot", +- "small", +- "source", +- "span", +- "strong", +- "style", +- "sub", +- "summary", +- "sup", +- "table", +- "tbody", +- "td", +- "template", +- "textarea", +- "tfoot", +- "th", +- "thead", +- "time", +- "title", +- "tr", +- "track", +- "u", +- "ul", +- "var", +- "video", +- "wbr", +-} +- +-// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 +-// +-// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", +-// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, +-// but are kept here for backwards compatibility. +-var attributes = []string{ +- "abbr", +- "accept", +- "accept-charset", +- "accesskey", +- "action", +- "allowfullscreen", +- "allowpaymentrequest", +- "allowusermedia", +- "alt", +- "as", +- "async", +- "autocomplete", +- "autofocus", +- "autoplay", +- "challenge", +- "charset", +- "checked", +- "cite", +- "class", +- "color", +- "cols", +- "colspan", +- "command", +- "content", +- "contenteditable", +- "contextmenu", +- "controls", +- "coords", +- "crossorigin", +- "data", +- "datetime", +- "default", +- "defer", +- "dir", +- "dirname", +- "disabled", +- "download", +- "draggable", +- "dropzone", +- "enctype", +- "for", +- "form", +- "formaction", +- "formenctype", +- "formmethod", +- "formnovalidate", +- "formtarget", +- "headers", +- "height", +- "hidden", +- "high", +- "href", +- "hreflang", +- "http-equiv", +- "icon", +- "id", +- "inputmode", +- "integrity", +- "is", +- "ismap", +- "itemid", +- "itemprop", +- "itemref", +- "itemscope", +- "itemtype", +- "keytype", +- "kind", +- "label", +- "lang", +- "list", +- "loop", +- "low", +- "manifest", +- "max", +- "maxlength", +- "media", +- "mediagroup", +- "method", +- "min", +- "minlength", +- "multiple", +- "muted", +- "name", +- "nomodule", +- "nonce", +- "novalidate", +- "open", +- "optimum", +- "pattern", +- "ping", +- "placeholder", +- "playsinline", +- "poster", +- "preload", +- "radiogroup", +- "readonly", +- "referrerpolicy", +- "rel", +- "required", +- "reversed", +- "rows", +- "rowspan", +- "sandbox", +- "spellcheck", +- "scope", +- "scoped", +- "seamless", +- "selected", +- "shape", +- "size", +- "sizes", +- "sortable", +- "sorted", +- "slot", +- "span", +- "spellcheck", +- "src", +- "srcdoc", +- "srclang", +- "srcset", +- "start", +- "step", +- "style", +- "tabindex", +- "target", +- "title", +- "translate", +- "type", +- "typemustmatch", +- "updateviacache", +- "usemap", +- "value", +- "width", +- "workertype", +- "wrap", +-} +- +-// "onautocomplete", "onautocompleteerror", "onmousewheel", +-// "onshow" and "onsort" have been removed from the spec, +-// but are kept here for backwards compatibility. +-var eventHandlers = []string{ +- "onabort", +- "onautocomplete", +- "onautocompleteerror", +- "onauxclick", +- "onafterprint", +- "onbeforeprint", +- "onbeforeunload", +- "onblur", +- "oncancel", +- "oncanplay", +- "oncanplaythrough", +- "onchange", +- "onclick", +- "onclose", +- "oncontextmenu", +- "oncopy", +- "oncuechange", +- "oncut", +- "ondblclick", +- "ondrag", +- "ondragend", +- "ondragenter", +- "ondragexit", +- "ondragleave", +- "ondragover", +- "ondragstart", +- "ondrop", +- "ondurationchange", +- "onemptied", +- "onended", +- "onerror", +- "onfocus", +- "onhashchange", +- "oninput", +- "oninvalid", +- "onkeydown", +- "onkeypress", +- "onkeyup", +- "onlanguagechange", +- "onload", +- "onloadeddata", +- "onloadedmetadata", +- "onloadend", +- "onloadstart", +- "onmessage", +- "onmessageerror", +- "onmousedown", +- "onmouseenter", +- "onmouseleave", +- "onmousemove", +- "onmouseout", +- "onmouseover", +- "onmouseup", +- "onmousewheel", +- "onwheel", +- "onoffline", +- "ononline", +- "onpagehide", +- "onpageshow", +- "onpaste", +- "onpause", +- "onplay", +- "onplaying", +- "onpopstate", +- "onprogress", +- "onratechange", +- "onreset", +- "onresize", +- "onrejectionhandled", +- "onscroll", +- "onsecuritypolicyviolation", +- "onseeked", +- "onseeking", +- "onselect", +- "onshow", +- "onsort", +- "onstalled", +- "onstorage", +- "onsubmit", +- "onsuspend", +- "ontimeupdate", +- "ontoggle", +- "onunhandledrejection", +- "onunload", +- "onvolumechange", +- "onwaiting", +-} +- +-// extra are ad-hoc values not covered by any of the lists above. +-var extra = []string{ +- "acronym", +- "align", +- "annotation", +- "annotation-xml", +- "applet", +- "basefont", +- "bgsound", +- "big", +- "blink", +- "center", +- "color", +- "desc", +- "face", +- "font", +- "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. +- "foreignobject", +- "frame", +- "frameset", +- "image", +- "isindex", +- "listing", +- "malignmark", +- "marquee", +- "math", +- "mglyph", +- "mi", +- "mn", +- "mo", +- "ms", +- "mtext", +- "nobr", +- "noembed", +- "noframes", +- "plaintext", +- "prompt", +- "public", +- "rb", +- "rtc", +- "spacer", +- "strike", +- "svg", +- "system", +- "tt", +- "xmp", +-} +diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go +deleted file mode 100644 +index 4548b993db..0000000000 +--- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go ++++ /dev/null +@@ -1,61 +0,0 @@ +-// Copyright 2018 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. +-//This program must be run after mksyscall.go. +-package main +- +-import ( +- "bytes" +- "fmt" +- "io/ioutil" +- "log" +- "os" +- "strings" +-) +- +-func main() { +- in1, err := ioutil.ReadFile("syscall_darwin.go") +- if err != nil { +- log.Fatalf("can't open syscall_darwin.go: %s", err) +- } +- arch := os.Args[1] +- in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) +- if err != nil { +- log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) +- } +- in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) +- if err != nil { +- log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) +- } +- in := string(in1) + string(in2) + string(in3) +- +- trampolines := map[string]bool{} +- +- var out bytes.Buffer +- +- fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) +- fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") +- fmt.Fprintf(&out, "\n") +- fmt.Fprintf(&out, "// +build go1.12\n") +- fmt.Fprintf(&out, "\n") +- fmt.Fprintf(&out, "#include \"textflag.h\"\n") +- for _, line := range strings.Split(in, "\n") { +- if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { +- continue +- } +- fn := line[5 : len(line)-13] +- if !trampolines[fn] { +- trampolines[fn] = true +- fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) +- fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) +- } +- } +- err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644) +- if err != nil { +- log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err) +- } +-} +diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go +deleted file mode 100644 +index eb4332059a..0000000000 +--- a/vendor/golang.org/x/sys/unix/mkpost.go ++++ /dev/null +@@ -1,122 +0,0 @@ +-// Copyright 2016 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// mkpost processes the output of cgo -godefs to +-// modify the generated types. It is used to clean up +-// the sys API in an architecture specific manner. +-// +-// mkpost is run after cgo -godefs; see README.md. +-package main +- +-import ( +- "bytes" +- "fmt" +- "go/format" +- "io/ioutil" +- "log" +- "os" +- "regexp" +-) +- +-func main() { +- // Get the OS and architecture (using GOARCH_TARGET if it exists) +- goos := os.Getenv("GOOS") +- goarch := os.Getenv("GOARCH_TARGET") +- if goarch == "" { +- goarch = os.Getenv("GOARCH") +- } +- // Check that we are using the Docker-based build system if we should be. +- if goos == "linux" { +- if os.Getenv("GOLANG_SYS_BUILD") != "docker" { +- os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") +- os.Stderr.WriteString("See README.md\n") +- os.Exit(1) +- } +- } +- +- b, err := ioutil.ReadAll(os.Stdin) +- if err != nil { +- log.Fatal(err) +- } +- +- if goos == "aix" { +- // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t +- // to avoid having both StTimespec and Timespec. +- sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`) +- b = sttimespec.ReplaceAll(b, []byte("Timespec")) +- } +- +- // Intentionally export __val fields in Fsid and Sigset_t +- valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`) +- b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}")) +- +- // Intentionally export __fds_bits field in FdSet +- fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) +- b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) +- +- // If we have empty Ptrace structs, we should delete them. Only s390x emits +- // nonempty Ptrace structs. +- ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) +- b = ptraceRexexp.ReplaceAll(b, nil) +- +- // Replace the control_regs union with a blank identifier for now. +- controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) +- b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) +- +- // Remove fields that are added by glibc +- // Note that this is unstable as the identifers are private. +- removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) +- b = removeFieldsRegex.ReplaceAll(b, []byte("_")) +- +- // Convert [65]int8 to [65]byte in Utsname members to simplify +- // conversion to string; see golang.org/issue/20753 +- convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) +- b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) +- +- // Convert [1024]int8 to [1024]byte in Ptmget members +- convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) +- b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) +- +- // Remove spare fields (e.g. in Statx_t) +- spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) +- b = spareFieldsRegex.ReplaceAll(b, []byte("_")) +- +- // Remove cgo padding fields +- removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) +- b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) +- +- // Remove padding, hidden, or unused fields +- removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) +- b = removeFieldsRegex.ReplaceAll(b, []byte("_")) +- +- // Remove the first line of warning from cgo +- b = b[bytes.IndexByte(b, '\n')+1:] +- // Modify the command in the header to include: +- // mkpost, our own warning, and a build tag. +- replacement := fmt.Sprintf(`$1 | go run mkpost.go +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s,%s`, goarch, goos) +- cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) +- b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) +- +- // Rename Stat_t time fields +- if goos == "freebsd" && goarch == "386" { +- // Hide Stat_t.[AMCB]tim_ext fields +- renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`) +- b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_")) +- } +- renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`) +- b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}")) +- +- // gofmt +- b, err = format.Source(b) +- if err != nil { +- log.Fatal(err) +- } +- +- os.Stdout.Write(b) +-} +diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go +deleted file mode 100644 +index e4af9424e9..0000000000 +--- a/vendor/golang.org/x/sys/unix/mksyscall.go ++++ /dev/null +@@ -1,407 +0,0 @@ +-// Copyright 2018 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-This program reads a file containing function prototypes +-(like syscall_darwin.go) and generates system call bodies. +-The prototypes are marked by lines beginning with "//sys" +-and read like func declarations if //sys is replaced by func, but: +- * The parameter lists must give a name for each argument. +- This includes return parameters. +- * The parameter lists must give a type for each argument: +- the (x, y, z int) shorthand is not allowed. +- * If the return parameter is an error number, it must be named errno. +- +-A line beginning with //sysnb is like //sys, except that the +-goroutine will not be suspended during the execution of the system +-call. This must only be used for system calls which can never +-block, as otherwise the system call could cause all goroutines to +-hang. +-*/ +-package main +- +-import ( +- "bufio" +- "flag" +- "fmt" +- "os" +- "regexp" +- "strings" +-) +- +-var ( +- b32 = flag.Bool("b32", false, "32bit big-endian") +- l32 = flag.Bool("l32", false, "32bit little-endian") +- plan9 = flag.Bool("plan9", false, "plan9") +- openbsd = flag.Bool("openbsd", false, "openbsd") +- netbsd = flag.Bool("netbsd", false, "netbsd") +- dragonfly = flag.Bool("dragonfly", false, "dragonfly") +- arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair +- tags = flag.String("tags", "", "build tags") +- filename = flag.String("output", "", "output file name (standard output if omitted)") +-) +- +-// cmdLine returns this programs's commandline arguments +-func cmdLine() string { +- return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") +-} +- +-// buildTags returns build tags +-func buildTags() string { +- return *tags +-} +- +-// Param is function parameter +-type Param struct { +- Name string +- Type string +-} +- +-// usage prints the program usage +-func usage() { +- fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") +- os.Exit(1) +-} +- +-// parseParamList parses parameter list and returns a slice of parameters +-func parseParamList(list string) []string { +- list = strings.TrimSpace(list) +- if list == "" { +- return []string{} +- } +- return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +-} +- +-// parseParam splits a parameter into name and type +-func parseParam(p string) Param { +- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) +- if ps == nil { +- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) +- os.Exit(1) +- } +- return Param{ps[1], ps[2]} +-} +- +-func main() { +- // Get the OS and architecture (using GOARCH_TARGET if it exists) +- goos := os.Getenv("GOOS") +- if goos == "" { +- fmt.Fprintln(os.Stderr, "GOOS not defined in environment") +- os.Exit(1) +- } +- goarch := os.Getenv("GOARCH_TARGET") +- if goarch == "" { +- goarch = os.Getenv("GOARCH") +- } +- +- // Check that we are using the Docker-based build system if we should +- if goos == "linux" { +- if os.Getenv("GOLANG_SYS_BUILD") != "docker" { +- fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") +- fmt.Fprintf(os.Stderr, "See README.md\n") +- os.Exit(1) +- } +- } +- +- flag.Usage = usage +- flag.Parse() +- if len(flag.Args()) <= 0 { +- fmt.Fprintf(os.Stderr, "no files to parse provided\n") +- usage() +- } +- +- endianness := "" +- if *b32 { +- endianness = "big-endian" +- } else if *l32 { +- endianness = "little-endian" +- } +- +- libc := false +- if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") { +- libc = true +- } +- trampolines := map[string]bool{} +- +- text := "" +- for _, path := range flag.Args() { +- file, err := os.Open(path) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- s := bufio.NewScanner(file) +- for s.Scan() { +- t := s.Text() +- t = strings.TrimSpace(t) +- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) +- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) +- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { +- continue +- } +- +- // Line must be of the form +- // func Open(path string, mode int, perm int) (fd int, errno error) +- // Split into name, in params, out params. +- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) +- if f == nil { +- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) +- os.Exit(1) +- } +- funct, inps, outps, sysname := f[2], f[3], f[4], f[5] +- +- // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. +- if goos == "darwin" && !libc && funct == "ClockGettime" { +- continue +- } +- +- // Split argument lists on comma. +- in := parseParamList(inps) +- out := parseParamList(outps) +- +- // Try in vain to keep people from editing this file. +- // The theory is that they jump into the middle of the file +- // without reading the header. +- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" +- +- // Go function header. +- outDecl := "" +- if len(out) > 0 { +- outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) +- } +- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) +- +- // Check if err return available +- errvar := "" +- for _, param := range out { +- p := parseParam(param) +- if p.Type == "error" { +- errvar = p.Name +- break +- } +- } +- +- // Prepare arguments to Syscall. +- var args []string +- n := 0 +- for _, param := range in { +- p := parseParam(param) +- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { +- args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") +- } else if p.Type == "string" && errvar != "" { +- text += fmt.Sprintf("\tvar _p%d *byte\n", n) +- text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) +- text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) +- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) +- n++ +- } else if p.Type == "string" { +- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") +- text += fmt.Sprintf("\tvar _p%d *byte\n", n) +- text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) +- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) +- n++ +- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { +- // Convert slice into pointer, length. +- // Have to be careful not to take address of &a[0] if len == 0: +- // pass dummy pointer in that case. +- // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). +- text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) +- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) +- text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) +- args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) +- n++ +- } else if p.Type == "int64" && (*openbsd || *netbsd) { +- args = append(args, "0") +- if endianness == "big-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) +- } else if endianness == "little-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) +- } +- } else if p.Type == "int64" && *dragonfly { +- if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { +- args = append(args, "0") +- } +- if endianness == "big-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) +- } else if endianness == "little-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) +- } +- } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { +- if len(args)%2 == 1 && *arm { +- // arm abi specifies 64-bit argument uses +- // (even, odd) pair +- args = append(args, "0") +- } +- if endianness == "big-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) +- } +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) +- } +- } +- +- // Determine which form to use; pad args with zeros. +- asm := "Syscall" +- if nonblock != nil { +- if errvar == "" && goos == "linux" { +- asm = "RawSyscallNoError" +- } else { +- asm = "RawSyscall" +- } +- } else { +- if errvar == "" && goos == "linux" { +- asm = "SyscallNoError" +- } +- } +- if len(args) <= 3 { +- for len(args) < 3 { +- args = append(args, "0") +- } +- } else if len(args) <= 6 { +- asm += "6" +- for len(args) < 6 { +- args = append(args, "0") +- } +- } else if len(args) <= 9 { +- asm += "9" +- for len(args) < 9 { +- args = append(args, "0") +- } +- } else { +- fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) +- } +- +- // System call number. +- if sysname == "" { +- sysname = "SYS_" + funct +- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) +- sysname = strings.ToUpper(sysname) +- } +- +- var libcFn string +- if libc { +- asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call +- sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ +- sysname = strings.ToLower(sysname) // lowercase +- if sysname == "getdirentries64" { +- // Special case - libSystem name and +- // raw syscall name don't match. +- sysname = "__getdirentries64" +- } +- libcFn = sysname +- sysname = "funcPC(libc_" + sysname + "_trampoline)" +- } +- +- // Actual call. +- arglist := strings.Join(args, ", ") +- call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) +- +- // Assign return values. +- body := "" +- ret := []string{"_", "_", "_"} +- doErrno := false +- for i := 0; i < len(out); i++ { +- p := parseParam(out[i]) +- reg := "" +- if p.Name == "err" && !*plan9 { +- reg = "e1" +- ret[2] = reg +- doErrno = true +- } else if p.Name == "err" && *plan9 { +- ret[0] = "r0" +- ret[2] = "e1" +- break +- } else { +- reg = fmt.Sprintf("r%d", i) +- ret[i] = reg +- } +- if p.Type == "bool" { +- reg = fmt.Sprintf("%s != 0", reg) +- } +- if p.Type == "int64" && endianness != "" { +- // 64-bit number in r1:r0 or r0:r1. +- if i+2 > len(out) { +- fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) +- } +- if endianness == "big-endian" { +- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) +- } else { +- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) +- } +- ret[i] = fmt.Sprintf("r%d", i) +- ret[i+1] = fmt.Sprintf("r%d", i+1) +- } +- if reg != "e1" || *plan9 { +- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) +- } +- } +- if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { +- text += fmt.Sprintf("\t%s\n", call) +- } else { +- if errvar == "" && goos == "linux" { +- // raw syscall without error on Linux, see golang.org/issue/22924 +- text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) +- } else { +- text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) +- } +- } +- text += body +- +- if *plan9 && ret[2] == "e1" { +- text += "\tif int32(r0) == -1 {\n" +- text += "\t\terr = e1\n" +- text += "\t}\n" +- } else if doErrno { +- text += "\tif e1 != 0 {\n" +- text += "\t\terr = errnoErr(e1)\n" +- text += "\t}\n" +- } +- text += "\treturn\n" +- text += "}\n\n" +- +- if libc && !trampolines[libcFn] { +- // some system calls share a trampoline, like read and readlen. +- trampolines[libcFn] = true +- // Declare assembly trampoline. +- text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) +- // Assembly trampoline calls the libc_* function, which this magic +- // redirects to use the function from libSystem. +- text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) +- text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) +- text += "\n" +- } +- } +- if err := s.Err(); err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- file.Close() +- } +- fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) +-} +- +-const srcTemplate = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +- +-package unix +- +-import ( +- "syscall" +- "unsafe" +-) +- +-var _ syscall.Errno +- +-%s +-` +diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go +deleted file mode 100644 +index 3be3cdfc3b..0000000000 +--- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go ++++ /dev/null +@@ -1,415 +0,0 @@ +-// Copyright 2019 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-This program reads a file containing function prototypes +-(like syscall_aix.go) and generates system call bodies. +-The prototypes are marked by lines beginning with "//sys" +-and read like func declarations if //sys is replaced by func, but: +- * The parameter lists must give a name for each argument. +- This includes return parameters. +- * The parameter lists must give a type for each argument: +- the (x, y, z int) shorthand is not allowed. +- * If the return parameter is an error number, it must be named err. +- * If go func name needs to be different than its libc name, +- * or the function is not in libc, name could be specified +- * at the end, after "=" sign, like +- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt +-*/ +-package main +- +-import ( +- "bufio" +- "flag" +- "fmt" +- "os" +- "regexp" +- "strings" +-) +- +-var ( +- b32 = flag.Bool("b32", false, "32bit big-endian") +- l32 = flag.Bool("l32", false, "32bit little-endian") +- aix = flag.Bool("aix", false, "aix") +- tags = flag.String("tags", "", "build tags") +-) +- +-// cmdLine returns this programs's commandline arguments +-func cmdLine() string { +- return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") +-} +- +-// buildTags returns build tags +-func buildTags() string { +- return *tags +-} +- +-// Param is function parameter +-type Param struct { +- Name string +- Type string +-} +- +-// usage prints the program usage +-func usage() { +- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") +- os.Exit(1) +-} +- +-// parseParamList parses parameter list and returns a slice of parameters +-func parseParamList(list string) []string { +- list = strings.TrimSpace(list) +- if list == "" { +- return []string{} +- } +- return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +-} +- +-// parseParam splits a parameter into name and type +-func parseParam(p string) Param { +- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) +- if ps == nil { +- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) +- os.Exit(1) +- } +- return Param{ps[1], ps[2]} +-} +- +-func main() { +- flag.Usage = usage +- flag.Parse() +- if len(flag.Args()) <= 0 { +- fmt.Fprintf(os.Stderr, "no files to parse provided\n") +- usage() +- } +- +- endianness := "" +- if *b32 { +- endianness = "big-endian" +- } else if *l32 { +- endianness = "little-endian" +- } +- +- pack := "" +- text := "" +- cExtern := "/*\n#include \n#include \n" +- for _, path := range flag.Args() { +- file, err := os.Open(path) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- s := bufio.NewScanner(file) +- for s.Scan() { +- t := s.Text() +- t = strings.TrimSpace(t) +- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) +- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { +- pack = p[1] +- } +- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) +- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { +- continue +- } +- +- // Line must be of the form +- // func Open(path string, mode int, perm int) (fd int, err error) +- // Split into name, in params, out params. +- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) +- if f == nil { +- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) +- os.Exit(1) +- } +- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] +- +- // Split argument lists on comma. +- in := parseParamList(inps) +- out := parseParamList(outps) +- +- inps = strings.Join(in, ", ") +- outps = strings.Join(out, ", ") +- +- // Try in vain to keep people from editing this file. +- // The theory is that they jump into the middle of the file +- // without reading the header. +- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" +- +- // Check if value return, err return available +- errvar := "" +- retvar := "" +- rettype := "" +- for _, param := range out { +- p := parseParam(param) +- if p.Type == "error" { +- errvar = p.Name +- } else { +- retvar = p.Name +- rettype = p.Type +- } +- } +- +- // System call name. +- if sysname == "" { +- sysname = funct +- } +- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) +- sysname = strings.ToLower(sysname) // All libc functions are lowercase. +- +- cRettype := "" +- if rettype == "unsafe.Pointer" { +- cRettype = "uintptr_t" +- } else if rettype == "uintptr" { +- cRettype = "uintptr_t" +- } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { +- cRettype = "uintptr_t" +- } else if rettype == "int" { +- cRettype = "int" +- } else if rettype == "int32" { +- cRettype = "int" +- } else if rettype == "int64" { +- cRettype = "long long" +- } else if rettype == "uint32" { +- cRettype = "unsigned int" +- } else if rettype == "uint64" { +- cRettype = "unsigned long long" +- } else { +- cRettype = "int" +- } +- if sysname == "exit" { +- cRettype = "void" +- } +- +- // Change p.Types to c +- var cIn []string +- for _, param := range in { +- p := parseParam(param) +- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { +- cIn = append(cIn, "uintptr_t") +- } else if p.Type == "string" { +- cIn = append(cIn, "uintptr_t") +- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { +- cIn = append(cIn, "uintptr_t", "size_t") +- } else if p.Type == "unsafe.Pointer" { +- cIn = append(cIn, "uintptr_t") +- } else if p.Type == "uintptr" { +- cIn = append(cIn, "uintptr_t") +- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { +- cIn = append(cIn, "uintptr_t") +- } else if p.Type == "int" { +- cIn = append(cIn, "int") +- } else if p.Type == "int32" { +- cIn = append(cIn, "int") +- } else if p.Type == "int64" { +- cIn = append(cIn, "long long") +- } else if p.Type == "uint32" { +- cIn = append(cIn, "unsigned int") +- } else if p.Type == "uint64" { +- cIn = append(cIn, "unsigned long long") +- } else { +- cIn = append(cIn, "int") +- } +- } +- +- if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { +- if sysname == "select" { +- // select is a keyword of Go. Its name is +- // changed to c_select. +- cExtern += "#define c_select select\n" +- } +- // Imports of system calls from libc +- cExtern += fmt.Sprintf("%s %s", cRettype, sysname) +- cIn := strings.Join(cIn, ", ") +- cExtern += fmt.Sprintf("(%s);\n", cIn) +- } +- +- // So file name. +- if *aix { +- if modname == "" { +- modname = "libc.a/shr_64.o" +- } else { +- fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) +- os.Exit(1) +- } +- } +- +- strconvfunc := "C.CString" +- +- // Go function header. +- if outps != "" { +- outps = fmt.Sprintf(" (%s)", outps) +- } +- if text != "" { +- text += "\n" +- } +- +- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) +- +- // Prepare arguments to Syscall. +- var args []string +- n := 0 +- argN := 0 +- for _, param := range in { +- p := parseParam(param) +- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { +- args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") +- } else if p.Type == "string" && errvar != "" { +- text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) +- args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) +- n++ +- } else if p.Type == "string" { +- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") +- text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) +- args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) +- n++ +- } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { +- // Convert slice into pointer, length. +- // Have to be careful not to take address of &a[0] if len == 0: +- // pass nil in that case. +- text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) +- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) +- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) +- n++ +- text += fmt.Sprintf("\tvar _p%d int\n", n) +- text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) +- args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) +- n++ +- } else if p.Type == "int64" && endianness != "" { +- if endianness == "big-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) +- } +- n++ +- } else if p.Type == "bool" { +- text += fmt.Sprintf("\tvar _p%d uint32\n", n) +- text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) +- args = append(args, fmt.Sprintf("_p%d", n)) +- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { +- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) +- } else if p.Type == "unsafe.Pointer" { +- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) +- } else if p.Type == "int" { +- if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { +- args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) +- } else if argN == 0 && funct == "fcntl" { +- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { +- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) +- } +- } else if p.Type == "int32" { +- args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) +- } else if p.Type == "int64" { +- args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) +- } else if p.Type == "uint32" { +- args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) +- } else if p.Type == "uint64" { +- args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) +- } else if p.Type == "uintptr" { +- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) +- } +- argN++ +- } +- +- // Actual call. +- arglist := strings.Join(args, ", ") +- call := "" +- if sysname == "exit" { +- if errvar != "" { +- call += "er :=" +- } else { +- call += "" +- } +- } else if errvar != "" { +- call += "r0,er :=" +- } else if retvar != "" { +- call += "r0,_ :=" +- } else { +- call += "" +- } +- if sysname == "select" { +- // select is a keyword of Go. Its name is +- // changed to c_select. +- call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist) +- } else { +- call += fmt.Sprintf("C.%s(%s)", sysname, arglist) +- } +- +- // Assign return values. +- body := "" +- for i := 0; i < len(out); i++ { +- p := parseParam(out[i]) +- reg := "" +- if p.Name == "err" { +- reg = "e1" +- } else { +- reg = "r0" +- } +- if reg != "e1" { +- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) +- } +- } +- +- // verify return +- if sysname != "exit" && errvar != "" { +- if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { +- body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" +- body += fmt.Sprintf("\t\t%s = er\n", errvar) +- body += "\t}\n" +- } else { +- body += "\tif (r0 ==-1 && er != nil) {\n" +- body += fmt.Sprintf("\t\t%s = er\n", errvar) +- body += "\t}\n" +- } +- } else if errvar != "" { +- body += "\tif (er != nil) {\n" +- body += fmt.Sprintf("\t\t%s = er\n", errvar) +- body += "\t}\n" +- } +- +- text += fmt.Sprintf("\t%s\n", call) +- text += body +- +- text += "\treturn\n" +- text += "}\n" +- } +- if err := s.Err(); err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- file.Close() +- } +- imp := "" +- if pack != "unix" { +- imp = "import \"golang.org/x/sys/unix\"\n" +- +- } +- fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) +-} +- +-const srcTemplate = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +- +-package %s +- +- +-%s +-*/ +-import "C" +-import ( +- "unsafe" +-) +- +- +-%s +- +-%s +-` +diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go +deleted file mode 100644 +index c960099517..0000000000 +--- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go ++++ /dev/null +@@ -1,614 +0,0 @@ +-// Copyright 2019 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-This program reads a file containing function prototypes +-(like syscall_aix.go) and generates system call bodies. +-The prototypes are marked by lines beginning with "//sys" +-and read like func declarations if //sys is replaced by func, but: +- * The parameter lists must give a name for each argument. +- This includes return parameters. +- * The parameter lists must give a type for each argument: +- the (x, y, z int) shorthand is not allowed. +- * If the return parameter is an error number, it must be named err. +- * If go func name needs to be different than its libc name, +- * or the function is not in libc, name could be specified +- * at the end, after "=" sign, like +- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt +- +- +-This program will generate three files and handle both gc and gccgo implementation: +- - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) +- - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 +- - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. +- +- The generated code looks like this +- +-zsyscall_aix_ppc64.go +-func asyscall(...) (n int, err error) { +- // Pointer Creation +- r1, e1 := callasyscall(...) +- // Type Conversion +- // Error Handler +- return +-} +- +-zsyscall_aix_ppc64_gc.go +-//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" +-//go:linkname libc_asyscall libc_asyscall +-var asyscall syscallFunc +- +-func callasyscall(...) (r1 uintptr, e1 Errno) { +- r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) +- return +-} +- +-zsyscall_aix_ppc64_ggcgo.go +- +-// int asyscall(...) +- +-import "C" +- +-func callasyscall(...) (r1 uintptr, e1 Errno) { +- r1 = uintptr(C.asyscall(...)) +- e1 = syscall.GetErrno() +- return +-} +-*/ +- +-package main +- +-import ( +- "bufio" +- "flag" +- "fmt" +- "io/ioutil" +- "os" +- "regexp" +- "strings" +-) +- +-var ( +- b32 = flag.Bool("b32", false, "32bit big-endian") +- l32 = flag.Bool("l32", false, "32bit little-endian") +- aix = flag.Bool("aix", false, "aix") +- tags = flag.String("tags", "", "build tags") +-) +- +-// cmdLine returns this programs's commandline arguments +-func cmdLine() string { +- return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") +-} +- +-// buildTags returns build tags +-func buildTags() string { +- return *tags +-} +- +-// Param is function parameter +-type Param struct { +- Name string +- Type string +-} +- +-// usage prints the program usage +-func usage() { +- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") +- os.Exit(1) +-} +- +-// parseParamList parses parameter list and returns a slice of parameters +-func parseParamList(list string) []string { +- list = strings.TrimSpace(list) +- if list == "" { +- return []string{} +- } +- return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +-} +- +-// parseParam splits a parameter into name and type +-func parseParam(p string) Param { +- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) +- if ps == nil { +- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) +- os.Exit(1) +- } +- return Param{ps[1], ps[2]} +-} +- +-func main() { +- flag.Usage = usage +- flag.Parse() +- if len(flag.Args()) <= 0 { +- fmt.Fprintf(os.Stderr, "no files to parse provided\n") +- usage() +- } +- +- endianness := "" +- if *b32 { +- endianness = "big-endian" +- } else if *l32 { +- endianness = "little-endian" +- } +- +- pack := "" +- // GCCGO +- textgccgo := "" +- cExtern := "/*\n#include \n" +- // GC +- textgc := "" +- dynimports := "" +- linknames := "" +- var vars []string +- // COMMON +- textcommon := "" +- for _, path := range flag.Args() { +- file, err := os.Open(path) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- s := bufio.NewScanner(file) +- for s.Scan() { +- t := s.Text() +- t = strings.TrimSpace(t) +- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) +- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { +- pack = p[1] +- } +- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) +- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { +- continue +- } +- +- // Line must be of the form +- // func Open(path string, mode int, perm int) (fd int, err error) +- // Split into name, in params, out params. +- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) +- if f == nil { +- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) +- os.Exit(1) +- } +- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] +- +- // Split argument lists on comma. +- in := parseParamList(inps) +- out := parseParamList(outps) +- +- inps = strings.Join(in, ", ") +- outps = strings.Join(out, ", ") +- +- if sysname == "" { +- sysname = funct +- } +- +- onlyCommon := false +- if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { +- // This function call another syscall which is already implemented. +- // Therefore, the gc and gccgo part must not be generated. +- onlyCommon = true +- } +- +- // Try in vain to keep people from editing this file. +- // The theory is that they jump into the middle of the file +- // without reading the header. +- +- textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" +- if !onlyCommon { +- textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" +- textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" +- } +- +- // Check if value return, err return available +- errvar := "" +- rettype := "" +- for _, param := range out { +- p := parseParam(param) +- if p.Type == "error" { +- errvar = p.Name +- } else { +- rettype = p.Type +- } +- } +- +- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) +- sysname = strings.ToLower(sysname) // All libc functions are lowercase. +- +- // GCCGO Prototype return type +- cRettype := "" +- if rettype == "unsafe.Pointer" { +- cRettype = "uintptr_t" +- } else if rettype == "uintptr" { +- cRettype = "uintptr_t" +- } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { +- cRettype = "uintptr_t" +- } else if rettype == "int" { +- cRettype = "int" +- } else if rettype == "int32" { +- cRettype = "int" +- } else if rettype == "int64" { +- cRettype = "long long" +- } else if rettype == "uint32" { +- cRettype = "unsigned int" +- } else if rettype == "uint64" { +- cRettype = "unsigned long long" +- } else { +- cRettype = "int" +- } +- if sysname == "exit" { +- cRettype = "void" +- } +- +- // GCCGO Prototype arguments type +- var cIn []string +- for i, param := range in { +- p := parseParam(param) +- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { +- cIn = append(cIn, "uintptr_t") +- } else if p.Type == "string" { +- cIn = append(cIn, "uintptr_t") +- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { +- cIn = append(cIn, "uintptr_t", "size_t") +- } else if p.Type == "unsafe.Pointer" { +- cIn = append(cIn, "uintptr_t") +- } else if p.Type == "uintptr" { +- cIn = append(cIn, "uintptr_t") +- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { +- cIn = append(cIn, "uintptr_t") +- } else if p.Type == "int" { +- if (i == 0 || i == 2) && funct == "fcntl" { +- // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock +- cIn = append(cIn, "uintptr_t") +- } else { +- cIn = append(cIn, "int") +- } +- +- } else if p.Type == "int32" { +- cIn = append(cIn, "int") +- } else if p.Type == "int64" { +- cIn = append(cIn, "long long") +- } else if p.Type == "uint32" { +- cIn = append(cIn, "unsigned int") +- } else if p.Type == "uint64" { +- cIn = append(cIn, "unsigned long long") +- } else { +- cIn = append(cIn, "int") +- } +- } +- +- if !onlyCommon { +- // GCCGO Prototype Generation +- // Imports of system calls from libc +- if sysname == "select" { +- // select is a keyword of Go. Its name is +- // changed to c_select. +- cExtern += "#define c_select select\n" +- } +- cExtern += fmt.Sprintf("%s %s", cRettype, sysname) +- cIn := strings.Join(cIn, ", ") +- cExtern += fmt.Sprintf("(%s);\n", cIn) +- } +- // GC Library name +- if modname == "" { +- modname = "libc.a/shr_64.o" +- } else { +- fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) +- os.Exit(1) +- } +- sysvarname := fmt.Sprintf("libc_%s", sysname) +- +- if !onlyCommon { +- // GC Runtime import of function to allow cross-platform builds. +- dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) +- // GC Link symbol to proc address variable. +- linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) +- // GC Library proc address variable. +- vars = append(vars, sysvarname) +- } +- +- strconvfunc := "BytePtrFromString" +- strconvtype := "*byte" +- +- // Go function header. +- if outps != "" { +- outps = fmt.Sprintf(" (%s)", outps) +- } +- if textcommon != "" { +- textcommon += "\n" +- } +- +- textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) +- +- // Prepare arguments tocall. +- var argscommon []string // Arguments in the common part +- var argscall []string // Arguments for call prototype +- var argsgc []string // Arguments for gc call (with syscall6) +- var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) +- n := 0 +- argN := 0 +- for _, param := range in { +- p := parseParam(param) +- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { +- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) +- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) +- argsgc = append(argsgc, p.Name) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- } else if p.Type == "string" && errvar != "" { +- textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) +- textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) +- textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) +- +- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) +- argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) +- argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) +- n++ +- } else if p.Type == "string" { +- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") +- textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) +- textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) +- textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) +- +- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) +- argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) +- argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) +- n++ +- } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { +- // Convert slice into pointer, length. +- // Have to be careful not to take address of &a[0] if len == 0: +- // pass nil in that case. +- textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) +- textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) +- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) +- argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) +- argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) +- n++ +- } else if p.Type == "int64" && endianness != "" { +- fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") +- } else if p.Type == "bool" { +- fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") +- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { +- argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) +- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) +- argsgc = append(argsgc, p.Name) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- } else if p.Type == "int" { +- if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { +- // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock +- argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) +- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) +- argsgc = append(argsgc, p.Name) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- +- } else { +- argscommon = append(argscommon, p.Name) +- argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) +- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) +- } +- } else if p.Type == "int32" { +- argscommon = append(argscommon, p.Name) +- argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) +- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) +- } else if p.Type == "int64" { +- argscommon = append(argscommon, p.Name) +- argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) +- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) +- } else if p.Type == "uint32" { +- argscommon = append(argscommon, p.Name) +- argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) +- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) +- } else if p.Type == "uint64" { +- argscommon = append(argscommon, p.Name) +- argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) +- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) +- } else if p.Type == "uintptr" { +- argscommon = append(argscommon, p.Name) +- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) +- argsgc = append(argsgc, p.Name) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- } else { +- argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) +- argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) +- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) +- } +- argN++ +- } +- nargs := len(argsgc) +- +- // COMMON function generation +- argscommonlist := strings.Join(argscommon, ", ") +- callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) +- ret := []string{"_", "_"} +- body := "" +- doErrno := false +- for i := 0; i < len(out); i++ { +- p := parseParam(out[i]) +- reg := "" +- if p.Name == "err" { +- reg = "e1" +- ret[1] = reg +- doErrno = true +- } else { +- reg = "r0" +- ret[0] = reg +- } +- if p.Type == "bool" { +- reg = fmt.Sprintf("%s != 0", reg) +- } +- if reg != "e1" { +- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) +- } +- } +- if ret[0] == "_" && ret[1] == "_" { +- textcommon += fmt.Sprintf("\t%s\n", callcommon) +- } else { +- textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) +- } +- textcommon += body +- +- if doErrno { +- textcommon += "\tif e1 != 0 {\n" +- textcommon += "\t\terr = errnoErr(e1)\n" +- textcommon += "\t}\n" +- } +- textcommon += "\treturn\n" +- textcommon += "}\n" +- +- if onlyCommon { +- continue +- } +- +- // CALL Prototype +- callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) +- +- // GC function generation +- asm := "syscall6" +- if nonblock != nil { +- asm = "rawSyscall6" +- } +- +- if len(argsgc) <= 6 { +- for len(argsgc) < 6 { +- argsgc = append(argsgc, "0") +- } +- } else { +- fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) +- os.Exit(1) +- } +- argsgclist := strings.Join(argsgc, ", ") +- callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) +- +- textgc += callProto +- textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) +- textgc += "\treturn\n}\n" +- +- // GCCGO function generation +- argsgccgolist := strings.Join(argsgccgo, ", ") +- var callgccgo string +- if sysname == "select" { +- // select is a keyword of Go. Its name is +- // changed to c_select. +- callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist) +- } else { +- callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) +- } +- textgccgo += callProto +- textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) +- textgccgo += "\te1 = syscall.GetErrno()\n" +- textgccgo += "\treturn\n}\n" +- } +- if err := s.Err(); err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- file.Close() +- } +- imp := "" +- if pack != "unix" { +- imp = "import \"golang.org/x/sys/unix\"\n" +- +- } +- +- // Print zsyscall_aix_ppc64.go +- err := ioutil.WriteFile("zsyscall_aix_ppc64.go", +- []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), +- 0644) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- +- // Print zsyscall_aix_ppc64_gc.go +- vardecls := "\t" + strings.Join(vars, ",\n\t") +- vardecls += " syscallFunc" +- err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", +- []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), +- 0644) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- +- // Print zsyscall_aix_ppc64_gccgo.go +- err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", +- []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), +- 0644) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +-} +- +-const srcTemplate1 = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +- +-package %s +- +-import ( +- "unsafe" +-) +- +- +-%s +- +-%s +-` +-const srcTemplate2 = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +-// +build !gccgo +- +-package %s +- +-import ( +- "unsafe" +-) +-%s +-%s +-%s +-type syscallFunc uintptr +- +-var ( +-%s +-) +- +-// Implemented in runtime/syscall_aix.go. +-func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +-func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +- +-%s +-` +-const srcTemplate3 = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +-// +build gccgo +- +-package %s +- +-%s +-*/ +-import "C" +-import ( +- "syscall" +-) +- +- +-%s +- +-%s +-` +diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go +deleted file mode 100644 +index 3d864738b6..0000000000 +--- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go ++++ /dev/null +@@ -1,335 +0,0 @@ +-// Copyright 2019 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +- This program reads a file containing function prototypes +- (like syscall_solaris.go) and generates system call bodies. +- The prototypes are marked by lines beginning with "//sys" +- and read like func declarations if //sys is replaced by func, but: +- * The parameter lists must give a name for each argument. +- This includes return parameters. +- * The parameter lists must give a type for each argument: +- the (x, y, z int) shorthand is not allowed. +- * If the return parameter is an error number, it must be named err. +- * If go func name needs to be different than its libc name, +- * or the function is not in libc, name could be specified +- * at the end, after "=" sign, like +- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt +-*/ +- +-package main +- +-import ( +- "bufio" +- "flag" +- "fmt" +- "os" +- "regexp" +- "strings" +-) +- +-var ( +- b32 = flag.Bool("b32", false, "32bit big-endian") +- l32 = flag.Bool("l32", false, "32bit little-endian") +- tags = flag.String("tags", "", "build tags") +-) +- +-// cmdLine returns this programs's commandline arguments +-func cmdLine() string { +- return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") +-} +- +-// buildTags returns build tags +-func buildTags() string { +- return *tags +-} +- +-// Param is function parameter +-type Param struct { +- Name string +- Type string +-} +- +-// usage prints the program usage +-func usage() { +- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") +- os.Exit(1) +-} +- +-// parseParamList parses parameter list and returns a slice of parameters +-func parseParamList(list string) []string { +- list = strings.TrimSpace(list) +- if list == "" { +- return []string{} +- } +- return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +-} +- +-// parseParam splits a parameter into name and type +-func parseParam(p string) Param { +- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) +- if ps == nil { +- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) +- os.Exit(1) +- } +- return Param{ps[1], ps[2]} +-} +- +-func main() { +- flag.Usage = usage +- flag.Parse() +- if len(flag.Args()) <= 0 { +- fmt.Fprintf(os.Stderr, "no files to parse provided\n") +- usage() +- } +- +- endianness := "" +- if *b32 { +- endianness = "big-endian" +- } else if *l32 { +- endianness = "little-endian" +- } +- +- pack := "" +- text := "" +- dynimports := "" +- linknames := "" +- var vars []string +- for _, path := range flag.Args() { +- file, err := os.Open(path) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- s := bufio.NewScanner(file) +- for s.Scan() { +- t := s.Text() +- t = strings.TrimSpace(t) +- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) +- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { +- pack = p[1] +- } +- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) +- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { +- continue +- } +- +- // Line must be of the form +- // func Open(path string, mode int, perm int) (fd int, err error) +- // Split into name, in params, out params. +- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) +- if f == nil { +- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) +- os.Exit(1) +- } +- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] +- +- // Split argument lists on comma. +- in := parseParamList(inps) +- out := parseParamList(outps) +- +- inps = strings.Join(in, ", ") +- outps = strings.Join(out, ", ") +- +- // Try in vain to keep people from editing this file. +- // The theory is that they jump into the middle of the file +- // without reading the header. +- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" +- +- // So file name. +- if modname == "" { +- modname = "libc" +- } +- +- // System call name. +- if sysname == "" { +- sysname = funct +- } +- +- // System call pointer variable name. +- sysvarname := fmt.Sprintf("proc%s", sysname) +- +- strconvfunc := "BytePtrFromString" +- strconvtype := "*byte" +- +- sysname = strings.ToLower(sysname) // All libc functions are lowercase. +- +- // Runtime import of function to allow cross-platform builds. +- dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) +- // Link symbol to proc address variable. +- linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) +- // Library proc address variable. +- vars = append(vars, sysvarname) +- +- // Go function header. +- outlist := strings.Join(out, ", ") +- if outlist != "" { +- outlist = fmt.Sprintf(" (%s)", outlist) +- } +- if text != "" { +- text += "\n" +- } +- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) +- +- // Check if err return available +- errvar := "" +- for _, param := range out { +- p := parseParam(param) +- if p.Type == "error" { +- errvar = p.Name +- continue +- } +- } +- +- // Prepare arguments to Syscall. +- var args []string +- n := 0 +- for _, param := range in { +- p := parseParam(param) +- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { +- args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") +- } else if p.Type == "string" && errvar != "" { +- text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) +- text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) +- text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) +- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) +- n++ +- } else if p.Type == "string" { +- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") +- text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) +- text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) +- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) +- n++ +- } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { +- // Convert slice into pointer, length. +- // Have to be careful not to take address of &a[0] if len == 0: +- // pass nil in that case. +- text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) +- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) +- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) +- n++ +- } else if p.Type == "int64" && endianness != "" { +- if endianness == "big-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) +- } +- } else if p.Type == "bool" { +- text += fmt.Sprintf("\tvar _p%d uint32\n", n) +- text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) +- args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) +- n++ +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) +- } +- } +- nargs := len(args) +- +- // Determine which form to use; pad args with zeros. +- asm := "sysvicall6" +- if nonblock != nil { +- asm = "rawSysvicall6" +- } +- if len(args) <= 6 { +- for len(args) < 6 { +- args = append(args, "0") +- } +- } else { +- fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) +- os.Exit(1) +- } +- +- // Actual call. +- arglist := strings.Join(args, ", ") +- call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) +- +- // Assign return values. +- body := "" +- ret := []string{"_", "_", "_"} +- doErrno := false +- for i := 0; i < len(out); i++ { +- p := parseParam(out[i]) +- reg := "" +- if p.Name == "err" { +- reg = "e1" +- ret[2] = reg +- doErrno = true +- } else { +- reg = fmt.Sprintf("r%d", i) +- ret[i] = reg +- } +- if p.Type == "bool" { +- reg = fmt.Sprintf("%d != 0", reg) +- } +- if p.Type == "int64" && endianness != "" { +- // 64-bit number in r1:r0 or r0:r1. +- if i+2 > len(out) { +- fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) +- os.Exit(1) +- } +- if endianness == "big-endian" { +- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) +- } else { +- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) +- } +- ret[i] = fmt.Sprintf("r%d", i) +- ret[i+1] = fmt.Sprintf("r%d", i+1) +- } +- if reg != "e1" { +- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) +- } +- } +- if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { +- text += fmt.Sprintf("\t%s\n", call) +- } else { +- text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) +- } +- text += body +- +- if doErrno { +- text += "\tif e1 != 0 {\n" +- text += "\t\terr = e1\n" +- text += "\t}\n" +- } +- text += "\treturn\n" +- text += "}\n" +- } +- if err := s.Err(); err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- file.Close() +- } +- imp := "" +- if pack != "unix" { +- imp = "import \"golang.org/x/sys/unix\"\n" +- +- } +- vardecls := "\t" + strings.Join(vars, ",\n\t") +- vardecls += " syscallFunc" +- fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) +-} +- +-const srcTemplate = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +- +-package %s +- +-import ( +- "syscall" +- "unsafe" +-) +-%s +-%s +-%s +-var ( +-%s +-) +- +-%s +-` +diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go +deleted file mode 100644 +index b6b409909c..0000000000 +--- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go ++++ /dev/null +@@ -1,355 +0,0 @@ +-// Copyright 2019 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// Parse the header files for OpenBSD and generate a Go usable sysctl MIB. +-// +-// Build a MIB with each entry being an array containing the level, type and +-// a hash that will contain additional entries if the current entry is a node. +-// We then walk this MIB and create a flattened sysctl name to OID hash. +- +-package main +- +-import ( +- "bufio" +- "fmt" +- "os" +- "path/filepath" +- "regexp" +- "sort" +- "strings" +-) +- +-var ( +- goos, goarch string +-) +- +-// cmdLine returns this programs's commandline arguments. +-func cmdLine() string { +- return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ") +-} +- +-// buildTags returns build tags. +-func buildTags() string { +- return fmt.Sprintf("%s,%s", goarch, goos) +-} +- +-// reMatch performs regular expression match and stores the substring slice to value pointed by m. +-func reMatch(re *regexp.Regexp, str string, m *[]string) bool { +- *m = re.FindStringSubmatch(str) +- if *m != nil { +- return true +- } +- return false +-} +- +-type nodeElement struct { +- n int +- t string +- pE *map[string]nodeElement +-} +- +-var ( +- debugEnabled bool +- mib map[string]nodeElement +- node *map[string]nodeElement +- nodeMap map[string]string +- sysCtl []string +-) +- +-var ( +- ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`) +- ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`) +- ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`) +- netInetRE = regexp.MustCompile(`^netinet/`) +- netInet6RE = regexp.MustCompile(`^netinet6/`) +- netRE = regexp.MustCompile(`^net/`) +- bracesRE = regexp.MustCompile(`{.*}`) +- ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`) +- fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`) +-) +- +-func debug(s string) { +- if debugEnabled { +- fmt.Fprintln(os.Stderr, s) +- } +-} +- +-// Walk the MIB and build a sysctl name to OID mapping. +-func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) { +- lNode := pNode // local copy of pointer to node +- var keys []string +- for k := range *lNode { +- keys = append(keys, k) +- } +- sort.Strings(keys) +- +- for _, key := range keys { +- nodename := name +- if name != "" { +- nodename += "." +- } +- nodename += key +- +- nodeoid := append(oid, (*pNode)[key].n) +- +- if (*pNode)[key].t == `CTLTYPE_NODE` { +- if _, ok := nodeMap[nodename]; ok { +- lNode = &mib +- ctlName := nodeMap[nodename] +- for _, part := range strings.Split(ctlName, ".") { +- lNode = ((*lNode)[part]).pE +- } +- } else { +- lNode = (*pNode)[key].pE +- } +- buildSysctl(lNode, nodename, nodeoid) +- } else if (*pNode)[key].t != "" { +- oidStr := []string{} +- for j := range nodeoid { +- oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j])) +- } +- text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n" +- sysCtl = append(sysCtl, text) +- } +- } +-} +- +-func main() { +- // Get the OS (using GOOS_TARGET if it exist) +- goos = os.Getenv("GOOS_TARGET") +- if goos == "" { +- goos = os.Getenv("GOOS") +- } +- // Get the architecture (using GOARCH_TARGET if it exists) +- goarch = os.Getenv("GOARCH_TARGET") +- if goarch == "" { +- goarch = os.Getenv("GOARCH") +- } +- // Check if GOOS and GOARCH environment variables are defined +- if goarch == "" || goos == "" { +- fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") +- os.Exit(1) +- } +- +- mib = make(map[string]nodeElement) +- headers := [...]string{ +- `sys/sysctl.h`, +- `sys/socket.h`, +- `sys/tty.h`, +- `sys/malloc.h`, +- `sys/mount.h`, +- `sys/namei.h`, +- `sys/sem.h`, +- `sys/shm.h`, +- `sys/vmmeter.h`, +- `uvm/uvmexp.h`, +- `uvm/uvm_param.h`, +- `uvm/uvm_swap_encrypt.h`, +- `ddb/db_var.h`, +- `net/if.h`, +- `net/if_pfsync.h`, +- `net/pipex.h`, +- `netinet/in.h`, +- `netinet/icmp_var.h`, +- `netinet/igmp_var.h`, +- `netinet/ip_ah.h`, +- `netinet/ip_carp.h`, +- `netinet/ip_divert.h`, +- `netinet/ip_esp.h`, +- `netinet/ip_ether.h`, +- `netinet/ip_gre.h`, +- `netinet/ip_ipcomp.h`, +- `netinet/ip_ipip.h`, +- `netinet/pim_var.h`, +- `netinet/tcp_var.h`, +- `netinet/udp_var.h`, +- `netinet6/in6.h`, +- `netinet6/ip6_divert.h`, +- `netinet6/pim6_var.h`, +- `netinet/icmp6.h`, +- `netmpls/mpls.h`, +- } +- +- ctls := [...]string{ +- `kern`, +- `vm`, +- `fs`, +- `net`, +- //debug /* Special handling required */ +- `hw`, +- //machdep /* Arch specific */ +- `user`, +- `ddb`, +- //vfs /* Special handling required */ +- `fs.posix`, +- `kern.forkstat`, +- `kern.intrcnt`, +- `kern.malloc`, +- `kern.nchstats`, +- `kern.seminfo`, +- `kern.shminfo`, +- `kern.timecounter`, +- `kern.tty`, +- `kern.watchdog`, +- `net.bpf`, +- `net.ifq`, +- `net.inet`, +- `net.inet.ah`, +- `net.inet.carp`, +- `net.inet.divert`, +- `net.inet.esp`, +- `net.inet.etherip`, +- `net.inet.gre`, +- `net.inet.icmp`, +- `net.inet.igmp`, +- `net.inet.ip`, +- `net.inet.ip.ifq`, +- `net.inet.ipcomp`, +- `net.inet.ipip`, +- `net.inet.mobileip`, +- `net.inet.pfsync`, +- `net.inet.pim`, +- `net.inet.tcp`, +- `net.inet.udp`, +- `net.inet6`, +- `net.inet6.divert`, +- `net.inet6.ip6`, +- `net.inet6.icmp6`, +- `net.inet6.pim6`, +- `net.inet6.tcp6`, +- `net.inet6.udp6`, +- `net.mpls`, +- `net.mpls.ifq`, +- `net.key`, +- `net.pflow`, +- `net.pfsync`, +- `net.pipex`, +- `net.rt`, +- `vm.swapencrypt`, +- //vfsgenctl /* Special handling required */ +- } +- +- // Node name "fixups" +- ctlMap := map[string]string{ +- "ipproto": "net.inet", +- "net.inet.ipproto": "net.inet", +- "net.inet6.ipv6proto": "net.inet6", +- "net.inet6.ipv6": "net.inet6.ip6", +- "net.inet.icmpv6": "net.inet6.icmp6", +- "net.inet6.divert6": "net.inet6.divert", +- "net.inet6.tcp6": "net.inet.tcp", +- "net.inet6.udp6": "net.inet.udp", +- "mpls": "net.mpls", +- "swpenc": "vm.swapencrypt", +- } +- +- // Node mappings +- nodeMap = map[string]string{ +- "net.inet.ip.ifq": "net.ifq", +- "net.inet.pfsync": "net.pfsync", +- "net.mpls.ifq": "net.ifq", +- } +- +- mCtls := make(map[string]bool) +- for _, ctl := range ctls { +- mCtls[ctl] = true +- } +- +- for _, header := range headers { +- debug("Processing " + header) +- file, err := os.Open(filepath.Join("/usr/include", header)) +- if err != nil { +- fmt.Fprintf(os.Stderr, "%v\n", err) +- os.Exit(1) +- } +- s := bufio.NewScanner(file) +- for s.Scan() { +- var sub []string +- if reMatch(ctlNames1RE, s.Text(), &sub) || +- reMatch(ctlNames2RE, s.Text(), &sub) || +- reMatch(ctlNames3RE, s.Text(), &sub) { +- if sub[1] == `CTL_NAMES` { +- // Top level. +- node = &mib +- } else { +- // Node. +- nodename := strings.ToLower(sub[2]) +- ctlName := "" +- if reMatch(netInetRE, header, &sub) { +- ctlName = "net.inet." + nodename +- } else if reMatch(netInet6RE, header, &sub) { +- ctlName = "net.inet6." + nodename +- } else if reMatch(netRE, header, &sub) { +- ctlName = "net." + nodename +- } else { +- ctlName = nodename +- ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`) +- } +- +- if val, ok := ctlMap[ctlName]; ok { +- ctlName = val +- } +- if _, ok := mCtls[ctlName]; !ok { +- debug("Ignoring " + ctlName + "...") +- continue +- } +- +- // Walk down from the top of the MIB. +- node = &mib +- for _, part := range strings.Split(ctlName, ".") { +- if _, ok := (*node)[part]; !ok { +- debug("Missing node " + part) +- (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}} +- } +- node = (*node)[part].pE +- } +- } +- +- // Populate current node with entries. +- i := -1 +- for !strings.HasPrefix(s.Text(), "}") { +- s.Scan() +- if reMatch(bracesRE, s.Text(), &sub) { +- i++ +- } +- if !reMatch(ctlTypeRE, s.Text(), &sub) { +- continue +- } +- (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}} +- } +- } +- } +- err = s.Err() +- if err != nil { +- fmt.Fprintf(os.Stderr, "%v\n", err) +- os.Exit(1) +- } +- file.Close() +- } +- buildSysctl(&mib, "", []int{}) +- +- sort.Strings(sysCtl) +- text := strings.Join(sysCtl, "") +- +- fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) +-} +- +-const srcTemplate = `// %s +-// Code generated by the command above; DO NOT EDIT. +- +-// +build %s +- +-package unix +- +-type mibentry struct { +- ctlname string +- ctloid []_C_int +-} +- +-var sysctlMib = []mibentry { +-%s +-} +-` +diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go +deleted file mode 100644 +index baa6ecd850..0000000000 +--- a/vendor/golang.org/x/sys/unix/mksysnum.go ++++ /dev/null +@@ -1,190 +0,0 @@ +-// Copyright 2018 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// Generate system call table for DragonFly, NetBSD, +-// FreeBSD, OpenBSD or Darwin from master list +-// (for example, /usr/src/sys/kern/syscalls.master or +-// sys/syscall.h). +-package main +- +-import ( +- "bufio" +- "fmt" +- "io" +- "io/ioutil" +- "net/http" +- "os" +- "regexp" +- "strings" +-) +- +-var ( +- goos, goarch string +-) +- +-// cmdLine returns this programs's commandline arguments +-func cmdLine() string { +- return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") +-} +- +-// buildTags returns build tags +-func buildTags() string { +- return fmt.Sprintf("%s,%s", goarch, goos) +-} +- +-func checkErr(err error) { +- if err != nil { +- fmt.Fprintf(os.Stderr, "%v\n", err) +- os.Exit(1) +- } +-} +- +-// source string and substring slice for regexp +-type re struct { +- str string // source string +- sub []string // matched sub-string +-} +- +-// Match performs regular expression match +-func (r *re) Match(exp string) bool { +- r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) +- if r.sub != nil { +- return true +- } +- return false +-} +- +-// fetchFile fetches a text file from URL +-func fetchFile(URL string) io.Reader { +- resp, err := http.Get(URL) +- checkErr(err) +- defer resp.Body.Close() +- body, err := ioutil.ReadAll(resp.Body) +- checkErr(err) +- return strings.NewReader(string(body)) +-} +- +-// readFile reads a text file from path +-func readFile(path string) io.Reader { +- file, err := os.Open(os.Args[1]) +- checkErr(err) +- return file +-} +- +-func format(name, num, proto string) string { +- name = strings.ToUpper(name) +- // There are multiple entries for enosys and nosys, so comment them out. +- nm := re{str: name} +- if nm.Match(`^SYS_E?NOSYS$`) { +- name = fmt.Sprintf("// %s", name) +- } +- if name == `SYS_SYS_EXIT` { +- name = `SYS_EXIT` +- } +- return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) +-} +- +-func main() { +- // Get the OS (using GOOS_TARGET if it exist) +- goos = os.Getenv("GOOS_TARGET") +- if goos == "" { +- goos = os.Getenv("GOOS") +- } +- // Get the architecture (using GOARCH_TARGET if it exists) +- goarch = os.Getenv("GOARCH_TARGET") +- if goarch == "" { +- goarch = os.Getenv("GOARCH") +- } +- // Check if GOOS and GOARCH environment variables are defined +- if goarch == "" || goos == "" { +- fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") +- os.Exit(1) +- } +- +- file := strings.TrimSpace(os.Args[1]) +- var syscalls io.Reader +- if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { +- // Download syscalls.master file +- syscalls = fetchFile(file) +- } else { +- syscalls = readFile(file) +- } +- +- var text, line string +- s := bufio.NewScanner(syscalls) +- for s.Scan() { +- t := re{str: line} +- if t.Match(`^(.*)\\$`) { +- // Handle continuation +- line = t.sub[1] +- line += strings.TrimLeft(s.Text(), " \t") +- } else { +- // New line +- line = s.Text() +- } +- t = re{str: line} +- if t.Match(`\\$`) { +- continue +- } +- t = re{str: line} +- +- switch goos { +- case "dragonfly": +- if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { +- num, proto := t.sub[1], t.sub[2] +- name := fmt.Sprintf("SYS_%s", t.sub[3]) +- text += format(name, num, proto) +- } +- case "freebsd": +- if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) { +- num, proto := t.sub[1], t.sub[2] +- name := fmt.Sprintf("SYS_%s", t.sub[3]) +- text += format(name, num, proto) +- } +- case "openbsd": +- if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { +- num, proto, name := t.sub[1], t.sub[3], t.sub[4] +- text += format(name, num, proto) +- } +- case "netbsd": +- if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { +- num, proto, compat := t.sub[1], t.sub[6], t.sub[8] +- name := t.sub[7] + "_" + t.sub[9] +- if t.sub[11] != "" { +- name = t.sub[7] + "_" + t.sub[11] +- } +- name = strings.ToUpper(name) +- if compat == "" || compat == "13" || compat == "30" || compat == "50" { +- text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) +- } +- } +- case "darwin": +- if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { +- name, num := t.sub[1], t.sub[2] +- name = strings.ToUpper(name) +- text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) +- } +- default: +- fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) +- os.Exit(1) +- +- } +- } +- err := s.Err() +- checkErr(err) +- +- fmt.Printf(template, cmdLine(), buildTags(), text) +-} +- +-const template = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +- +-package unix +- +-const( +-%s)` +diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go +deleted file mode 100644 +index 40d2beede5..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_aix.go ++++ /dev/null +@@ -1,237 +0,0 @@ +-// Copyright 2018 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +-// +build aix +- +-/* +-Input to cgo -godefs. See also mkerrors.sh and mkall.sh +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include +- +-#include +-#include +-#include +-#include +- +- +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +- PathMax = C.PATH_MAX +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-type off64 C.off64_t +-type off C.off_t +-type Mode_t C.mode_t +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-type Timeval32 C.struct_timeval32 +- +-type Timex C.struct_timex +- +-type Time_t C.time_t +- +-type Tms C.struct_tms +- +-type Utimbuf C.struct_utimbuf +- +-type Timezone C.struct_timezone +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit64 +- +-type Pid_t C.pid_t +- +-type _Gid_t C.gid_t +- +-type dev_t C.dev_t +- +-// Files +- +-type Stat_t C.struct_stat +- +-type StatxTimestamp C.struct_statx_timestamp +- +-type Statx_t C.struct_statx +- +-type Dirent C.struct_dirent +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Cmsghdr C.struct_cmsghdr +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type Linger C.struct_linger +- +-type Msghdr C.struct_msghdr +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Routing and interface messages +- +-const ( +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr +-) +- +-type IfMsgHdr C.struct_if_msghdr +- +-// Misc +- +-type FdSet C.fd_set +- +-type Utsname C.struct_utsname +- +-type Ustat_t C.struct_ustat +- +-type Sigset_t C.sigset_t +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_REMOVEDIR = C.AT_REMOVEDIR +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +-) +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Termio C.struct_termio +- +-type Winsize C.struct_winsize +- +-//poll +- +-type PollFd struct { +- Fd int32 +- Events uint16 +- Revents uint16 +-} +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +- +-//flock_t +- +-type Flock_t C.struct_flock64 +- +-// Statfs +- +-type Fsid_t C.struct_fsid_t +-type Fsid64_t C.struct_fsid64_t +- +-type Statfs_t C.struct_statfs +- +-const RNDGETENTCNT = 0x80045200 +diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go +deleted file mode 100644 +index 155c2e692b..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_darwin.go ++++ /dev/null +@@ -1,283 +0,0 @@ +-// Copyright 2009 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-Input to cgo -godefs. See README.md +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#define __DARWIN_UNIX03 0 +-#define KERNEL +-#define _DARWIN_USE_64_BIT_INODE +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-type Timeval32 C.struct_timeval32 +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit +- +-type _Gid_t C.gid_t +- +-// Files +- +-type Stat_t C.struct_stat64 +- +-type Statfs_t C.struct_statfs64 +- +-type Flock_t C.struct_flock +- +-type Fstore_t C.struct_fstore +- +-type Radvisory_t C.struct_radvisory +- +-type Fbootstraptransfer_t C.struct_fbootstraptransfer +- +-type Log2phys_t C.struct_log2phys +- +-type Fsid C.struct_fsid +- +-type Dirent C.struct_dirent +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Linger C.struct_linger +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type Msghdr C.struct_msghdr +- +-type Cmsghdr C.struct_cmsghdr +- +-type Inet4Pktinfo C.struct_in_pktinfo +- +-type Inet6Pktinfo C.struct_in6_pktinfo +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo +- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Ptrace requests +- +-const ( +- PTRACE_TRACEME = C.PT_TRACE_ME +- PTRACE_CONT = C.PT_CONTINUE +- PTRACE_KILL = C.PT_KILL +-) +- +-// Events (kqueue, kevent) +- +-type Kevent_t C.struct_kevent +- +-// Select +- +-type FdSet C.fd_set +- +-// Routing and interface messages +- +-const ( +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr +- SizeofIfData = C.sizeof_struct_if_data +- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr +- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr +- SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 +- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr +- SizeofRtMetrics = C.sizeof_struct_rt_metrics +-) +- +-type IfMsghdr C.struct_if_msghdr +- +-type IfData C.struct_if_data +- +-type IfaMsghdr C.struct_ifa_msghdr +- +-type IfmaMsghdr C.struct_ifma_msghdr +- +-type IfmaMsghdr2 C.struct_ifma_msghdr2 +- +-type RtMsghdr C.struct_rt_msghdr +- +-type RtMetrics C.struct_rt_metrics +- +-// Berkeley packet filter +- +-const ( +- SizeofBpfVersion = C.sizeof_struct_bpf_version +- SizeofBpfStat = C.sizeof_struct_bpf_stat +- SizeofBpfProgram = C.sizeof_struct_bpf_program +- SizeofBpfInsn = C.sizeof_struct_bpf_insn +- SizeofBpfHdr = C.sizeof_struct_bpf_hdr +-) +- +-type BpfVersion C.struct_bpf_version +- +-type BpfStat C.struct_bpf_stat +- +-type BpfProgram C.struct_bpf_program +- +-type BpfInsn C.struct_bpf_insn +- +-type BpfHdr C.struct_bpf_hdr +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Winsize C.struct_winsize +- +-// fchmodat-like syscalls. +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_REMOVEDIR = C.AT_REMOVEDIR +- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +-) +- +-// poll +- +-type PollFd C.struct_pollfd +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +- +-// uname +- +-type Utsname C.struct_utsname +- +-// Clockinfo +- +-const SizeofClockinfo = C.sizeof_struct_clockinfo +- +-type Clockinfo C.struct_clockinfo +diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go +deleted file mode 100644 +index 3365dd79d0..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_dragonfly.go ++++ /dev/null +@@ -1,263 +0,0 @@ +-// Copyright 2009 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-Input to cgo -godefs. See README.md +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#define KERNEL +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit +- +-type _Gid_t C.gid_t +- +-// Files +- +-type Stat_t C.struct_stat +- +-type Statfs_t C.struct_statfs +- +-type Flock_t C.struct_flock +- +-type Dirent C.struct_dirent +- +-type Fsid C.struct_fsid +- +-// File system limits +- +-const ( +- PathMax = C.PATH_MAX +-) +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Linger C.struct_linger +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type Msghdr C.struct_msghdr +- +-type Cmsghdr C.struct_cmsghdr +- +-type Inet6Pktinfo C.struct_in6_pktinfo +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Ptrace requests +- +-const ( +- PTRACE_TRACEME = C.PT_TRACE_ME +- PTRACE_CONT = C.PT_CONTINUE +- PTRACE_KILL = C.PT_KILL +-) +- +-// Events (kqueue, kevent) +- +-type Kevent_t C.struct_kevent +- +-// Select +- +-type FdSet C.fd_set +- +-// Routing and interface messages +- +-const ( +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr +- SizeofIfData = C.sizeof_struct_if_data +- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr +- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr +- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr +- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr +- SizeofRtMetrics = C.sizeof_struct_rt_metrics +-) +- +-type IfMsghdr C.struct_if_msghdr +- +-type IfData C.struct_if_data +- +-type IfaMsghdr C.struct_ifa_msghdr +- +-type IfmaMsghdr C.struct_ifma_msghdr +- +-type IfAnnounceMsghdr C.struct_if_announcemsghdr +- +-type RtMsghdr C.struct_rt_msghdr +- +-type RtMetrics C.struct_rt_metrics +- +-// Berkeley packet filter +- +-const ( +- SizeofBpfVersion = C.sizeof_struct_bpf_version +- SizeofBpfStat = C.sizeof_struct_bpf_stat +- SizeofBpfProgram = C.sizeof_struct_bpf_program +- SizeofBpfInsn = C.sizeof_struct_bpf_insn +- SizeofBpfHdr = C.sizeof_struct_bpf_hdr +-) +- +-type BpfVersion C.struct_bpf_version +- +-type BpfStat C.struct_bpf_stat +- +-type BpfProgram C.struct_bpf_program +- +-type BpfInsn C.struct_bpf_insn +- +-type BpfHdr C.struct_bpf_hdr +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Winsize C.struct_winsize +- +-// fchmodat-like syscalls. +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +-) +- +-// poll +- +-type PollFd C.struct_pollfd +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +- +-// Uname +- +-type Utsname C.struct_utsname +diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go +deleted file mode 100644 +index a121dc3368..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_freebsd.go ++++ /dev/null +@@ -1,400 +0,0 @@ +-// Copyright 2009 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-Input to cgo -godefs. See README.md +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#define _WANT_FREEBSD11_STAT 1 +-#define _WANT_FREEBSD11_STATFS 1 +-#define _WANT_FREEBSD11_DIRENT 1 +-#define _WANT_FREEBSD11_KEVENT 1 +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-// This structure is a duplicate of if_data on FreeBSD 8-STABLE. +-// See /usr/include/net/if.h. +-struct if_data8 { +- u_char ifi_type; +- u_char ifi_physical; +- u_char ifi_addrlen; +- u_char ifi_hdrlen; +- u_char ifi_link_state; +- u_char ifi_spare_char1; +- u_char ifi_spare_char2; +- u_char ifi_datalen; +- u_long ifi_mtu; +- u_long ifi_metric; +- u_long ifi_baudrate; +- u_long ifi_ipackets; +- u_long ifi_ierrors; +- u_long ifi_opackets; +- u_long ifi_oerrors; +- u_long ifi_collisions; +- u_long ifi_ibytes; +- u_long ifi_obytes; +- u_long ifi_imcasts; +- u_long ifi_omcasts; +- u_long ifi_iqdrops; +- u_long ifi_noproto; +- u_long ifi_hwassist; +-// FIXME: these are now unions, so maybe need to change definitions? +-#undef ifi_epoch +- time_t ifi_epoch; +-#undef ifi_lastchange +- struct timeval ifi_lastchange; +-}; +- +-// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. +-// See /usr/include/net/if.h. +-struct if_msghdr8 { +- u_short ifm_msglen; +- u_char ifm_version; +- u_char ifm_type; +- int ifm_addrs; +- int ifm_flags; +- u_short ifm_index; +- struct if_data8 ifm_data; +-}; +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit +- +-type _Gid_t C.gid_t +- +-// Files +- +-const ( +- _statfsVersion = C.STATFS_VERSION +- _dirblksiz = C.DIRBLKSIZ +-) +- +-type Stat_t C.struct_stat +- +-type stat_freebsd11_t C.struct_freebsd11_stat +- +-type Statfs_t C.struct_statfs +- +-type statfs_freebsd11_t C.struct_freebsd11_statfs +- +-type Flock_t C.struct_flock +- +-type Dirent C.struct_dirent +- +-type dirent_freebsd11 C.struct_freebsd11_dirent +- +-type Fsid C.struct_fsid +- +-// File system limits +- +-const ( +- PathMax = C.PATH_MAX +-) +- +-// Advice to Fadvise +- +-const ( +- FADV_NORMAL = C.POSIX_FADV_NORMAL +- FADV_RANDOM = C.POSIX_FADV_RANDOM +- FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL +- FADV_WILLNEED = C.POSIX_FADV_WILLNEED +- FADV_DONTNEED = C.POSIX_FADV_DONTNEED +- FADV_NOREUSE = C.POSIX_FADV_NOREUSE +-) +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Linger C.struct_linger +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPMreqn C.struct_ip_mreqn +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type Msghdr C.struct_msghdr +- +-type Cmsghdr C.struct_cmsghdr +- +-type Inet6Pktinfo C.struct_in6_pktinfo +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPMreqn = C.sizeof_struct_ip_mreqn +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Ptrace requests +- +-const ( +- PTRACE_ATTACH = C.PT_ATTACH +- PTRACE_CONT = C.PT_CONTINUE +- PTRACE_DETACH = C.PT_DETACH +- PTRACE_GETFPREGS = C.PT_GETFPREGS +- PTRACE_GETFSBASE = C.PT_GETFSBASE +- PTRACE_GETLWPLIST = C.PT_GETLWPLIST +- PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS +- PTRACE_GETREGS = C.PT_GETREGS +- PTRACE_GETXSTATE = C.PT_GETXSTATE +- PTRACE_IO = C.PT_IO +- PTRACE_KILL = C.PT_KILL +- PTRACE_LWPEVENTS = C.PT_LWP_EVENTS +- PTRACE_LWPINFO = C.PT_LWPINFO +- PTRACE_SETFPREGS = C.PT_SETFPREGS +- PTRACE_SETREGS = C.PT_SETREGS +- PTRACE_SINGLESTEP = C.PT_STEP +- PTRACE_TRACEME = C.PT_TRACE_ME +-) +- +-const ( +- PIOD_READ_D = C.PIOD_READ_D +- PIOD_WRITE_D = C.PIOD_WRITE_D +- PIOD_READ_I = C.PIOD_READ_I +- PIOD_WRITE_I = C.PIOD_WRITE_I +-) +- +-const ( +- PL_FLAG_BORN = C.PL_FLAG_BORN +- PL_FLAG_EXITED = C.PL_FLAG_EXITED +- PL_FLAG_SI = C.PL_FLAG_SI +-) +- +-const ( +- TRAP_BRKPT = C.TRAP_BRKPT +- TRAP_TRACE = C.TRAP_TRACE +-) +- +-type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo +- +-type __Siginfo C.struct___siginfo +- +-type Sigset_t C.sigset_t +- +-type Reg C.struct_reg +- +-type FpReg C.struct_fpreg +- +-type PtraceIoDesc C.struct_ptrace_io_desc +- +-// Events (kqueue, kevent) +- +-type Kevent_t C.struct_kevent_freebsd11 +- +-// Select +- +-type FdSet C.fd_set +- +-// Routing and interface messages +- +-const ( +- sizeofIfMsghdr = C.sizeof_struct_if_msghdr +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 +- sizeofIfData = C.sizeof_struct_if_data +- SizeofIfData = C.sizeof_struct_if_data8 +- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr +- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr +- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr +- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr +- SizeofRtMetrics = C.sizeof_struct_rt_metrics +-) +- +-type ifMsghdr C.struct_if_msghdr +- +-type IfMsghdr C.struct_if_msghdr8 +- +-type ifData C.struct_if_data +- +-type IfData C.struct_if_data8 +- +-type IfaMsghdr C.struct_ifa_msghdr +- +-type IfmaMsghdr C.struct_ifma_msghdr +- +-type IfAnnounceMsghdr C.struct_if_announcemsghdr +- +-type RtMsghdr C.struct_rt_msghdr +- +-type RtMetrics C.struct_rt_metrics +- +-// Berkeley packet filter +- +-const ( +- SizeofBpfVersion = C.sizeof_struct_bpf_version +- SizeofBpfStat = C.sizeof_struct_bpf_stat +- SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf +- SizeofBpfProgram = C.sizeof_struct_bpf_program +- SizeofBpfInsn = C.sizeof_struct_bpf_insn +- SizeofBpfHdr = C.sizeof_struct_bpf_hdr +- SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header +-) +- +-type BpfVersion C.struct_bpf_version +- +-type BpfStat C.struct_bpf_stat +- +-type BpfZbuf C.struct_bpf_zbuf +- +-type BpfProgram C.struct_bpf_program +- +-type BpfInsn C.struct_bpf_insn +- +-type BpfHdr C.struct_bpf_hdr +- +-type BpfZbufHeader C.struct_bpf_zbuf_header +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Winsize C.struct_winsize +- +-// fchmodat-like syscalls. +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_REMOVEDIR = C.AT_REMOVEDIR +- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +-) +- +-// poll +- +-type PollFd C.struct_pollfd +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLINIGNEOF = C.POLLINIGNEOF +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +- +-// Capabilities +- +-type CapRights C.struct_cap_rights +- +-// Uname +- +-type Utsname C.struct_utsname +diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go +deleted file mode 100644 +index 4a96d72c37..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_netbsd.go ++++ /dev/null +@@ -1,290 +0,0 @@ +-// Copyright 2009 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-Input to cgo -godefs. See README.md +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#define KERNEL +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit +- +-type _Gid_t C.gid_t +- +-// Files +- +-type Stat_t C.struct_stat +- +-type Statfs_t C.struct_statfs +- +-type Flock_t C.struct_flock +- +-type Dirent C.struct_dirent +- +-type Fsid C.fsid_t +- +-// File system limits +- +-const ( +- PathMax = C.PATH_MAX +-) +- +-// Advice to Fadvise +- +-const ( +- FADV_NORMAL = C.POSIX_FADV_NORMAL +- FADV_RANDOM = C.POSIX_FADV_RANDOM +- FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL +- FADV_WILLNEED = C.POSIX_FADV_WILLNEED +- FADV_DONTNEED = C.POSIX_FADV_DONTNEED +- FADV_NOREUSE = C.POSIX_FADV_NOREUSE +-) +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Linger C.struct_linger +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type Msghdr C.struct_msghdr +- +-type Cmsghdr C.struct_cmsghdr +- +-type Inet6Pktinfo C.struct_in6_pktinfo +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Ptrace requests +- +-const ( +- PTRACE_TRACEME = C.PT_TRACE_ME +- PTRACE_CONT = C.PT_CONTINUE +- PTRACE_KILL = C.PT_KILL +-) +- +-// Events (kqueue, kevent) +- +-type Kevent_t C.struct_kevent +- +-// Select +- +-type FdSet C.fd_set +- +-// Routing and interface messages +- +-const ( +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr +- SizeofIfData = C.sizeof_struct_if_data +- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr +- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr +- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr +- SizeofRtMetrics = C.sizeof_struct_rt_metrics +-) +- +-type IfMsghdr C.struct_if_msghdr +- +-type IfData C.struct_if_data +- +-type IfaMsghdr C.struct_ifa_msghdr +- +-type IfAnnounceMsghdr C.struct_if_announcemsghdr +- +-type RtMsghdr C.struct_rt_msghdr +- +-type RtMetrics C.struct_rt_metrics +- +-type Mclpool C.struct_mclpool +- +-// Berkeley packet filter +- +-const ( +- SizeofBpfVersion = C.sizeof_struct_bpf_version +- SizeofBpfStat = C.sizeof_struct_bpf_stat +- SizeofBpfProgram = C.sizeof_struct_bpf_program +- SizeofBpfInsn = C.sizeof_struct_bpf_insn +- SizeofBpfHdr = C.sizeof_struct_bpf_hdr +-) +- +-type BpfVersion C.struct_bpf_version +- +-type BpfStat C.struct_bpf_stat +- +-type BpfProgram C.struct_bpf_program +- +-type BpfInsn C.struct_bpf_insn +- +-type BpfHdr C.struct_bpf_hdr +- +-type BpfTimeval C.struct_bpf_timeval +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Winsize C.struct_winsize +- +-type Ptmget C.struct_ptmget +- +-// fchmodat-like syscalls. +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +-) +- +-// poll +- +-type PollFd C.struct_pollfd +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +- +-// Sysctl +- +-type Sysctlnode C.struct_sysctlnode +- +-// Uname +- +-type Utsname C.struct_utsname +- +-// Clockinfo +- +-const SizeofClockinfo = C.sizeof_struct_clockinfo +- +-type Clockinfo C.struct_clockinfo +diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go +deleted file mode 100644 +index 775cb57dc8..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_openbsd.go ++++ /dev/null +@@ -1,283 +0,0 @@ +-// Copyright 2009 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-Input to cgo -godefs. See README.md +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#define KERNEL +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit +- +-type _Gid_t C.gid_t +- +-// Files +- +-type Stat_t C.struct_stat +- +-type Statfs_t C.struct_statfs +- +-type Flock_t C.struct_flock +- +-type Dirent C.struct_dirent +- +-type Fsid C.fsid_t +- +-// File system limits +- +-const ( +- PathMax = C.PATH_MAX +-) +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Linger C.struct_linger +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type Msghdr C.struct_msghdr +- +-type Cmsghdr C.struct_cmsghdr +- +-type Inet6Pktinfo C.struct_in6_pktinfo +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Ptrace requests +- +-const ( +- PTRACE_TRACEME = C.PT_TRACE_ME +- PTRACE_CONT = C.PT_CONTINUE +- PTRACE_KILL = C.PT_KILL +-) +- +-// Events (kqueue, kevent) +- +-type Kevent_t C.struct_kevent +- +-// Select +- +-type FdSet C.fd_set +- +-// Routing and interface messages +- +-const ( +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr +- SizeofIfData = C.sizeof_struct_if_data +- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr +- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr +- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr +- SizeofRtMetrics = C.sizeof_struct_rt_metrics +-) +- +-type IfMsghdr C.struct_if_msghdr +- +-type IfData C.struct_if_data +- +-type IfaMsghdr C.struct_ifa_msghdr +- +-type IfAnnounceMsghdr C.struct_if_announcemsghdr +- +-type RtMsghdr C.struct_rt_msghdr +- +-type RtMetrics C.struct_rt_metrics +- +-type Mclpool C.struct_mclpool +- +-// Berkeley packet filter +- +-const ( +- SizeofBpfVersion = C.sizeof_struct_bpf_version +- SizeofBpfStat = C.sizeof_struct_bpf_stat +- SizeofBpfProgram = C.sizeof_struct_bpf_program +- SizeofBpfInsn = C.sizeof_struct_bpf_insn +- SizeofBpfHdr = C.sizeof_struct_bpf_hdr +-) +- +-type BpfVersion C.struct_bpf_version +- +-type BpfStat C.struct_bpf_stat +- +-type BpfProgram C.struct_bpf_program +- +-type BpfInsn C.struct_bpf_insn +- +-type BpfHdr C.struct_bpf_hdr +- +-type BpfTimeval C.struct_bpf_timeval +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Winsize C.struct_winsize +- +-// fchmodat-like syscalls. +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +-) +- +-// poll +- +-type PollFd C.struct_pollfd +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +- +-// Signal Sets +- +-type Sigset_t C.sigset_t +- +-// Uname +- +-type Utsname C.struct_utsname +- +-// Uvmexp +- +-const SizeofUvmexp = C.sizeof_struct_uvmexp +- +-type Uvmexp C.struct_uvmexp +- +-// Clockinfo +- +-const SizeofClockinfo = C.sizeof_struct_clockinfo +- +-type Clockinfo C.struct_clockinfo +diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go +deleted file mode 100644 +index 2b716f9348..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_solaris.go ++++ /dev/null +@@ -1,266 +0,0 @@ +-// Copyright 2009 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-Input to cgo -godefs. See README.md +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#define KERNEL +-// These defines ensure that builds done on newer versions of Solaris are +-// backwards-compatible with older versions of Solaris and +-// OpenSolaris-based derivatives. +-#define __USE_SUNOS_SOCKETS__ // msghdr +-#define __USE_LEGACY_PROTOTYPES__ // iovec +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +- PathMax = C.PATH_MAX +- MaxHostNameLen = C.MAXHOSTNAMELEN +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-type Timeval32 C.struct_timeval32 +- +-type Tms C.struct_tms +- +-type Utimbuf C.struct_utimbuf +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit +- +-type _Gid_t C.gid_t +- +-// Files +- +-type Stat_t C.struct_stat +- +-type Flock_t C.struct_flock +- +-type Dirent C.struct_dirent +- +-// Filesystems +- +-type _Fsblkcnt_t C.fsblkcnt_t +- +-type Statvfs_t C.struct_statvfs +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Linger C.struct_linger +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type Msghdr C.struct_msghdr +- +-type Cmsghdr C.struct_cmsghdr +- +-type Inet6Pktinfo C.struct_in6_pktinfo +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Select +- +-type FdSet C.fd_set +- +-// Misc +- +-type Utsname C.struct_utsname +- +-type Ustat_t C.struct_ustat +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW +- AT_REMOVEDIR = C.AT_REMOVEDIR +- AT_EACCESS = C.AT_EACCESS +-) +- +-// Routing and interface messages +- +-const ( +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr +- SizeofIfData = C.sizeof_struct_if_data +- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr +- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr +- SizeofRtMetrics = C.sizeof_struct_rt_metrics +-) +- +-type IfMsghdr C.struct_if_msghdr +- +-type IfData C.struct_if_data +- +-type IfaMsghdr C.struct_ifa_msghdr +- +-type RtMsghdr C.struct_rt_msghdr +- +-type RtMetrics C.struct_rt_metrics +- +-// Berkeley packet filter +- +-const ( +- SizeofBpfVersion = C.sizeof_struct_bpf_version +- SizeofBpfStat = C.sizeof_struct_bpf_stat +- SizeofBpfProgram = C.sizeof_struct_bpf_program +- SizeofBpfInsn = C.sizeof_struct_bpf_insn +- SizeofBpfHdr = C.sizeof_struct_bpf_hdr +-) +- +-type BpfVersion C.struct_bpf_version +- +-type BpfStat C.struct_bpf_stat +- +-type BpfProgram C.struct_bpf_program +- +-type BpfInsn C.struct_bpf_insn +- +-type BpfTimeval C.struct_bpf_timeval +- +-type BpfHdr C.struct_bpf_hdr +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Termio C.struct_termio +- +-type Winsize C.struct_winsize +- +-// poll +- +-type PollFd C.struct_pollfd +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +diff --git a/vendor/golang.org/x/text/encoding/charmap/maketables.go b/vendor/golang.org/x/text/encoding/charmap/maketables.go +deleted file mode 100644 +index f7941701e8..0000000000 +--- a/vendor/golang.org/x/text/encoding/charmap/maketables.go ++++ /dev/null +@@ -1,556 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "bufio" +- "fmt" +- "log" +- "net/http" +- "sort" +- "strings" +- "unicode/utf8" +- +- "golang.org/x/text/encoding" +- "golang.org/x/text/internal/gen" +-) +- +-const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + +- "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + +- ` !"#$%&'()*+,-./0123456789:;<=>?` + +- `@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` + +- "`abcdefghijklmnopqrstuvwxyz{|}~\u007f" +- +-var encodings = []struct { +- name string +- mib string +- comment string +- varName string +- replacement byte +- mapping string +-}{ +- { +- "IBM Code Page 037", +- "IBM037", +- "", +- "CodePage037", +- 0x3f, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM037-2.1.2.ucm", +- }, +- { +- "IBM Code Page 437", +- "PC8CodePage437", +- "", +- "CodePage437", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm", +- }, +- { +- "IBM Code Page 850", +- "PC850Multilingual", +- "", +- "CodePage850", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm", +- }, +- { +- "IBM Code Page 852", +- "PCp852", +- "", +- "CodePage852", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm", +- }, +- { +- "IBM Code Page 855", +- "IBM855", +- "", +- "CodePage855", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm", +- }, +- { +- "Windows Code Page 858", // PC latin1 with Euro +- "IBM00858", +- "", +- "CodePage858", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm", +- }, +- { +- "IBM Code Page 860", +- "IBM860", +- "", +- "CodePage860", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM860-2.1.2.ucm", +- }, +- { +- "IBM Code Page 862", +- "PC862LatinHebrew", +- "", +- "CodePage862", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm", +- }, +- { +- "IBM Code Page 863", +- "IBM863", +- "", +- "CodePage863", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM863-2.1.2.ucm", +- }, +- { +- "IBM Code Page 865", +- "IBM865", +- "", +- "CodePage865", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM865-2.1.2.ucm", +- }, +- { +- "IBM Code Page 866", +- "IBM866", +- "", +- "CodePage866", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-ibm866.txt", +- }, +- { +- "IBM Code Page 1047", +- "IBM1047", +- "", +- "CodePage1047", +- 0x3f, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM1047-2.1.2.ucm", +- }, +- { +- "IBM Code Page 1140", +- "IBM01140", +- "", +- "CodePage1140", +- 0x3f, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/ibm-1140_P100-1997.ucm", +- }, +- { +- "ISO 8859-1", +- "ISOLatin1", +- "", +- "ISO8859_1", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm", +- }, +- { +- "ISO 8859-2", +- "ISOLatin2", +- "", +- "ISO8859_2", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-2.txt", +- }, +- { +- "ISO 8859-3", +- "ISOLatin3", +- "", +- "ISO8859_3", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-3.txt", +- }, +- { +- "ISO 8859-4", +- "ISOLatin4", +- "", +- "ISO8859_4", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-4.txt", +- }, +- { +- "ISO 8859-5", +- "ISOLatinCyrillic", +- "", +- "ISO8859_5", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-5.txt", +- }, +- { +- "ISO 8859-6", +- "ISOLatinArabic", +- "", +- "ISO8859_6,ISO8859_6E,ISO8859_6I", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-6.txt", +- }, +- { +- "ISO 8859-7", +- "ISOLatinGreek", +- "", +- "ISO8859_7", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-7.txt", +- }, +- { +- "ISO 8859-8", +- "ISOLatinHebrew", +- "", +- "ISO8859_8,ISO8859_8E,ISO8859_8I", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-8.txt", +- }, +- { +- "ISO 8859-9", +- "ISOLatin5", +- "", +- "ISO8859_9", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_9-1999.ucm", +- }, +- { +- "ISO 8859-10", +- "ISOLatin6", +- "", +- "ISO8859_10", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-10.txt", +- }, +- { +- "ISO 8859-13", +- "ISO885913", +- "", +- "ISO8859_13", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-13.txt", +- }, +- { +- "ISO 8859-14", +- "ISO885914", +- "", +- "ISO8859_14", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-14.txt", +- }, +- { +- "ISO 8859-15", +- "ISO885915", +- "", +- "ISO8859_15", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-15.txt", +- }, +- { +- "ISO 8859-16", +- "ISO885916", +- "", +- "ISO8859_16", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-16.txt", +- }, +- { +- "KOI8-R", +- "KOI8R", +- "", +- "KOI8R", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-koi8-r.txt", +- }, +- { +- "KOI8-U", +- "KOI8U", +- "", +- "KOI8U", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-koi8-u.txt", +- }, +- { +- "Macintosh", +- "Macintosh", +- "", +- "Macintosh", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-macintosh.txt", +- }, +- { +- "Macintosh Cyrillic", +- "MacintoshCyrillic", +- "", +- "MacintoshCyrillic", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt", +- }, +- { +- "Windows 874", +- "Windows874", +- "", +- "Windows874", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-874.txt", +- }, +- { +- "Windows 1250", +- "Windows1250", +- "", +- "Windows1250", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1250.txt", +- }, +- { +- "Windows 1251", +- "Windows1251", +- "", +- "Windows1251", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1251.txt", +- }, +- { +- "Windows 1252", +- "Windows1252", +- "", +- "Windows1252", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1252.txt", +- }, +- { +- "Windows 1253", +- "Windows1253", +- "", +- "Windows1253", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1253.txt", +- }, +- { +- "Windows 1254", +- "Windows1254", +- "", +- "Windows1254", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1254.txt", +- }, +- { +- "Windows 1255", +- "Windows1255", +- "", +- "Windows1255", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1255.txt", +- }, +- { +- "Windows 1256", +- "Windows1256", +- "", +- "Windows1256", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1256.txt", +- }, +- { +- "Windows 1257", +- "Windows1257", +- "", +- "Windows1257", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1257.txt", +- }, +- { +- "Windows 1258", +- "Windows1258", +- "", +- "Windows1258", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1258.txt", +- }, +- { +- "X-User-Defined", +- "XUserDefined", +- "It is defined at http://encoding.spec.whatwg.org/#x-user-defined", +- "XUserDefined", +- encoding.ASCIISub, +- ascii + +- "\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" + +- "\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" + +- "\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" + +- "\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" + +- "\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" + +- "\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" + +- "\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" + +- "\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" + +- "\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" + +- "\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" + +- "\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" + +- "\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" + +- "\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" + +- "\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" + +- "\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" + +- "\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff", +- }, +-} +- +-func getWHATWG(url string) string { +- res, err := http.Get(url) +- if err != nil { +- log.Fatalf("%q: Get: %v", url, err) +- } +- defer res.Body.Close() +- +- mapping := make([]rune, 128) +- for i := range mapping { +- mapping[i] = '\ufffd' +- } +- +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- x, y := 0, 0 +- if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil { +- log.Fatalf("could not parse %q", s) +- } +- if x < 0 || 128 <= x { +- log.Fatalf("code %d is out of range", x) +- } +- if 0x80 <= y && y < 0xa0 { +- // We diverge from the WHATWG spec by mapping control characters +- // in the range [0x80, 0xa0) to U+FFFD. +- continue +- } +- mapping[x] = rune(y) +- } +- return ascii + string(mapping) +-} +- +-func getUCM(url string) string { +- res, err := http.Get(url) +- if err != nil { +- log.Fatalf("%q: Get: %v", url, err) +- } +- defer res.Body.Close() +- +- mapping := make([]rune, 256) +- for i := range mapping { +- mapping[i] = '\ufffd' +- } +- +- charsFound := 0 +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- var c byte +- var r rune +- if _, err := fmt.Sscanf(s, ` \x%x |0`, &r, &c); err != nil { +- continue +- } +- mapping[c] = r +- charsFound++ +- } +- +- if charsFound < 200 { +- log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound) +- } +- +- return string(mapping) +-} +- +-func main() { +- mibs := map[string]bool{} +- all := []string{} +- +- w := gen.NewCodeWriter() +- defer w.WriteGoFile("tables.go", "charmap") +- +- printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) } +- +- printf("import (\n") +- printf("\t\"golang.org/x/text/encoding\"\n") +- printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n") +- printf(")\n\n") +- for _, e := range encodings { +- varNames := strings.Split(e.varName, ",") +- all = append(all, varNames...) +- varName := varNames[0] +- switch { +- case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"): +- e.mapping = getWHATWG(e.mapping) +- case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"): +- e.mapping = getUCM(e.mapping) +- } +- +- asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00 +- if asciiSuperset { +- low = 0x80 +- } +- lvn := 1 +- if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") { +- lvn = 3 +- } +- lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:] +- printf("// %s is the %s encoding.\n", varName, e.name) +- if e.comment != "" { +- printf("//\n// %s\n", e.comment) +- } +- printf("var %s *Charmap = &%s\n\nvar %s = Charmap{\nname: %q,\n", +- varName, lowerVarName, lowerVarName, e.name) +- if mibs[e.mib] { +- log.Fatalf("MIB type %q declared multiple times.", e.mib) +- } +- printf("mib: identifier.%s,\n", e.mib) +- printf("asciiSuperset: %t,\n", asciiSuperset) +- printf("low: 0x%02x,\n", low) +- printf("replacement: 0x%02x,\n", e.replacement) +- +- printf("decode: [256]utf8Enc{\n") +- i, backMapping := 0, map[rune]byte{} +- for _, c := range e.mapping { +- if _, ok := backMapping[c]; !ok && c != utf8.RuneError { +- backMapping[c] = byte(i) +- } +- var buf [8]byte +- n := utf8.EncodeRune(buf[:], c) +- if n > 3 { +- panic(fmt.Sprintf("rune %q (%U) is too long", c, c)) +- } +- printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2]) +- if i%2 == 1 { +- printf("\n") +- } +- i++ +- } +- printf("},\n") +- +- printf("encode: [256]uint32{\n") +- encode := make([]uint32, 0, 256) +- for c, i := range backMapping { +- encode = append(encode, uint32(i)<<24|uint32(c)) +- } +- sort.Sort(byRune(encode)) +- for len(encode) < cap(encode) { +- encode = append(encode, encode[len(encode)-1]) +- } +- for i, enc := range encode { +- printf("0x%08x,", enc) +- if i%8 == 7 { +- printf("\n") +- } +- } +- printf("},\n}\n") +- +- // Add an estimate of the size of a single Charmap{} struct value, which +- // includes two 256 elem arrays of 4 bytes and some extra fields, which +- // align to 3 uint64s on 64-bit architectures. +- w.Size += 2*4*256 + 3*8 +- } +- // TODO: add proper line breaking. +- printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n")) +-} +- +-type byRune []uint32 +- +-func (b byRune) Len() int { return len(b) } +-func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff } +-func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +diff --git a/vendor/golang.org/x/text/encoding/htmlindex/gen.go b/vendor/golang.org/x/text/encoding/htmlindex/gen.go +deleted file mode 100644 +index ac6b4a77fd..0000000000 +--- a/vendor/golang.org/x/text/encoding/htmlindex/gen.go ++++ /dev/null +@@ -1,173 +0,0 @@ +-// Copyright 2015 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "bytes" +- "encoding/json" +- "fmt" +- "log" +- "strings" +- +- "golang.org/x/text/internal/gen" +-) +- +-type group struct { +- Encodings []struct { +- Labels []string +- Name string +- } +-} +- +-func main() { +- gen.Init() +- +- r := gen.Open("https://encoding.spec.whatwg.org", "whatwg", "encodings.json") +- var groups []group +- if err := json.NewDecoder(r).Decode(&groups); err != nil { +- log.Fatalf("Error reading encodings.json: %v", err) +- } +- +- w := &bytes.Buffer{} +- fmt.Fprintln(w, "type htmlEncoding byte") +- fmt.Fprintln(w, "const (") +- for i, g := range groups { +- for _, e := range g.Encodings { +- key := strings.ToLower(e.Name) +- name := consts[key] +- if name == "" { +- log.Fatalf("No const defined for %s.", key) +- } +- if i == 0 { +- fmt.Fprintf(w, "%s htmlEncoding = iota\n", name) +- } else { +- fmt.Fprintf(w, "%s\n", name) +- } +- } +- } +- fmt.Fprintln(w, "numEncodings") +- fmt.Fprint(w, ")\n\n") +- +- fmt.Fprintln(w, "var canonical = [numEncodings]string{") +- for _, g := range groups { +- for _, e := range g.Encodings { +- fmt.Fprintf(w, "%q,\n", strings.ToLower(e.Name)) +- } +- } +- fmt.Fprint(w, "}\n\n") +- +- fmt.Fprintln(w, "var nameMap = map[string]htmlEncoding{") +- for _, g := range groups { +- for _, e := range g.Encodings { +- for _, l := range e.Labels { +- key := strings.ToLower(e.Name) +- name := consts[key] +- fmt.Fprintf(w, "%q: %s,\n", l, name) +- } +- } +- } +- fmt.Fprint(w, "}\n\n") +- +- var tags []string +- fmt.Fprintln(w, "var localeMap = []htmlEncoding{") +- for _, loc := range locales { +- tags = append(tags, loc.tag) +- fmt.Fprintf(w, "%s, // %s \n", consts[loc.name], loc.tag) +- } +- fmt.Fprint(w, "}\n\n") +- +- fmt.Fprintf(w, "const locales = %q\n", strings.Join(tags, " ")) +- +- gen.WriteGoFile("tables.go", "htmlindex", w.Bytes()) +-} +- +-// consts maps canonical encoding name to internal constant. +-var consts = map[string]string{ +- "utf-8": "utf8", +- "ibm866": "ibm866", +- "iso-8859-2": "iso8859_2", +- "iso-8859-3": "iso8859_3", +- "iso-8859-4": "iso8859_4", +- "iso-8859-5": "iso8859_5", +- "iso-8859-6": "iso8859_6", +- "iso-8859-7": "iso8859_7", +- "iso-8859-8": "iso8859_8", +- "iso-8859-8-i": "iso8859_8I", +- "iso-8859-10": "iso8859_10", +- "iso-8859-13": "iso8859_13", +- "iso-8859-14": "iso8859_14", +- "iso-8859-15": "iso8859_15", +- "iso-8859-16": "iso8859_16", +- "koi8-r": "koi8r", +- "koi8-u": "koi8u", +- "macintosh": "macintosh", +- "windows-874": "windows874", +- "windows-1250": "windows1250", +- "windows-1251": "windows1251", +- "windows-1252": "windows1252", +- "windows-1253": "windows1253", +- "windows-1254": "windows1254", +- "windows-1255": "windows1255", +- "windows-1256": "windows1256", +- "windows-1257": "windows1257", +- "windows-1258": "windows1258", +- "x-mac-cyrillic": "macintoshCyrillic", +- "gbk": "gbk", +- "gb18030": "gb18030", +- // "hz-gb-2312": "hzgb2312", // Was removed from WhatWG +- "big5": "big5", +- "euc-jp": "eucjp", +- "iso-2022-jp": "iso2022jp", +- "shift_jis": "shiftJIS", +- "euc-kr": "euckr", +- "replacement": "replacement", +- "utf-16be": "utf16be", +- "utf-16le": "utf16le", +- "x-user-defined": "xUserDefined", +-} +- +-// locales is taken from +-// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm. +-var locales = []struct{ tag, name string }{ +- // The default value. Explicitly state latin to benefit from the exact +- // script option, while still making 1252 the default encoding for languages +- // written in Latin script. +- {"und_Latn", "windows-1252"}, +- {"ar", "windows-1256"}, +- {"ba", "windows-1251"}, +- {"be", "windows-1251"}, +- {"bg", "windows-1251"}, +- {"cs", "windows-1250"}, +- {"el", "iso-8859-7"}, +- {"et", "windows-1257"}, +- {"fa", "windows-1256"}, +- {"he", "windows-1255"}, +- {"hr", "windows-1250"}, +- {"hu", "iso-8859-2"}, +- {"ja", "shift_jis"}, +- {"kk", "windows-1251"}, +- {"ko", "euc-kr"}, +- {"ku", "windows-1254"}, +- {"ky", "windows-1251"}, +- {"lt", "windows-1257"}, +- {"lv", "windows-1257"}, +- {"mk", "windows-1251"}, +- {"pl", "iso-8859-2"}, +- {"ru", "windows-1251"}, +- {"sah", "windows-1251"}, +- {"sk", "windows-1250"}, +- {"sl", "iso-8859-2"}, +- {"sr", "windows-1251"}, +- {"tg", "windows-1251"}, +- {"th", "windows-874"}, +- {"tr", "windows-1254"}, +- {"tt", "windows-1251"}, +- {"uk", "windows-1251"}, +- {"vi", "windows-1258"}, +- {"zh-hans", "gb18030"}, +- {"zh-hant", "big5"}, +-} +diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go +deleted file mode 100644 +index 26cfef9c6b..0000000000 +--- a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go ++++ /dev/null +@@ -1,142 +0,0 @@ +-// Copyright 2015 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "bytes" +- "encoding/xml" +- "fmt" +- "io" +- "log" +- "strings" +- +- "golang.org/x/text/internal/gen" +-) +- +-type registry struct { +- XMLName xml.Name `xml:"registry"` +- Updated string `xml:"updated"` +- Registry []struct { +- ID string `xml:"id,attr"` +- Record []struct { +- Name string `xml:"name"` +- Xref []struct { +- Type string `xml:"type,attr"` +- Data string `xml:"data,attr"` +- } `xml:"xref"` +- Desc struct { +- Data string `xml:",innerxml"` +- // Any []struct { +- // Data string `xml:",chardata"` +- // } `xml:",any"` +- // Data string `xml:",chardata"` +- } `xml:"description,"` +- MIB string `xml:"value"` +- Alias []string `xml:"alias"` +- MIME string `xml:"preferred_alias"` +- } `xml:"record"` +- } `xml:"registry"` +-} +- +-func main() { +- r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml") +- reg := ®istry{} +- if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF { +- log.Fatalf("Error decoding charset registry: %v", err) +- } +- if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" { +- log.Fatalf("Unexpected ID %s", reg.Registry[0].ID) +- } +- +- w := &bytes.Buffer{} +- fmt.Fprintf(w, "const (\n") +- for _, rec := range reg.Registry[0].Record { +- constName := "" +- for _, a := range rec.Alias { +- if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 { +- // Some of the constant definitions have comments in them. Strip those. +- constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0]) +- } +- } +- if constName == "" { +- switch rec.MIB { +- case "2085": +- constName = "HZGB2312" // Not listed as alias for some reason. +- default: +- log.Fatalf("No cs alias defined for %s.", rec.MIB) +- } +- } +- if rec.MIME != "" { +- rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME) +- } +- fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME) +- if len(rec.Desc.Data) > 0 { +- fmt.Fprint(w, "// ") +- d := xml.NewDecoder(strings.NewReader(rec.Desc.Data)) +- inElem := true +- attr := "" +- for { +- t, err := d.Token() +- if err != nil { +- if err != io.EOF { +- log.Fatal(err) +- } +- break +- } +- switch x := t.(type) { +- case xml.CharData: +- attr = "" // Don't need attribute info. +- a := bytes.Split([]byte(x), []byte("\n")) +- for i, b := range a { +- if b = bytes.TrimSpace(b); len(b) != 0 { +- if !inElem && i > 0 { +- fmt.Fprint(w, "\n// ") +- } +- inElem = false +- fmt.Fprintf(w, "%s ", string(b)) +- } +- } +- case xml.StartElement: +- if x.Name.Local == "xref" { +- inElem = true +- use := false +- for _, a := range x.Attr { +- if a.Name.Local == "type" { +- use = use || a.Value != "person" +- } +- if a.Name.Local == "data" && use { +- // Patch up URLs to use https. From some links, the +- // https version is different from the http one. +- s := a.Value +- s = strings.Replace(s, "http://", "https://", -1) +- s = strings.Replace(s, "/unicode/", "/", -1) +- attr = s + " " +- } +- } +- } +- case xml.EndElement: +- inElem = false +- fmt.Fprint(w, attr) +- } +- } +- fmt.Fprint(w, "\n") +- } +- for _, x := range rec.Xref { +- switch x.Type { +- case "rfc": +- fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data)) +- case "uri": +- fmt.Fprintf(w, "// Reference: %s\n", x.Data) +- } +- } +- fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB) +- fmt.Fprintln(w) +- } +- fmt.Fprintln(w, ")") +- +- gen.WriteGoFile("mib.go", "identifier", w.Bytes()) +-} +diff --git a/vendor/golang.org/x/text/encoding/japanese/maketables.go b/vendor/golang.org/x/text/encoding/japanese/maketables.go +deleted file mode 100644 +index 023957a672..0000000000 +--- a/vendor/golang.org/x/text/encoding/japanese/maketables.go ++++ /dev/null +@@ -1,161 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-// This program generates tables.go: +-// go run maketables.go | gofmt > tables.go +- +-// TODO: Emoji extensions? +-// https://www.unicode.org/faq/emoji_dingbats.html +-// https://www.unicode.org/Public/UNIDATA/EmojiSources.txt +- +-import ( +- "bufio" +- "fmt" +- "log" +- "net/http" +- "sort" +- "strings" +-) +- +-type entry struct { +- jisCode, table int +-} +- +-func main() { +- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") +- fmt.Printf("// Package japanese provides Japanese encodings such as EUC-JP and Shift JIS.\n") +- fmt.Printf(`package japanese // import "golang.org/x/text/encoding/japanese"` + "\n\n") +- +- reverse := [65536]entry{} +- for i := range reverse { +- reverse[i].table = -1 +- } +- +- tables := []struct { +- url string +- name string +- }{ +- {"http://encoding.spec.whatwg.org/index-jis0208.txt", "0208"}, +- {"http://encoding.spec.whatwg.org/index-jis0212.txt", "0212"}, +- } +- for i, table := range tables { +- res, err := http.Get(table.url) +- if err != nil { +- log.Fatalf("%q: Get: %v", table.url, err) +- } +- defer res.Body.Close() +- +- mapping := [65536]uint16{} +- +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- x, y := 0, uint16(0) +- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { +- log.Fatalf("%q: could not parse %q", table.url, s) +- } +- if x < 0 || 120*94 <= x { +- log.Fatalf("%q: JIS code %d is out of range", table.url, x) +- } +- mapping[x] = y +- if reverse[y].table == -1 { +- reverse[y] = entry{jisCode: x, table: i} +- } +- } +- if err := scanner.Err(); err != nil { +- log.Fatalf("%q: scanner error: %v", table.url, err) +- } +- +- fmt.Printf("// jis%sDecode is the decoding table from JIS %s code to Unicode.\n// It is defined at %s\n", +- table.name, table.name, table.url) +- fmt.Printf("var jis%sDecode = [...]uint16{\n", table.name) +- for i, m := range mapping { +- if m != 0 { +- fmt.Printf("\t%d: 0x%04X,\n", i, m) +- } +- } +- fmt.Printf("}\n\n") +- } +- +- // Any run of at least separation continuous zero entries in the reverse map will +- // be a separate encode table. +- const separation = 1024 +- +- intervals := []interval(nil) +- low, high := -1, -1 +- for i, v := range reverse { +- if v.table == -1 { +- continue +- } +- if low < 0 { +- low = i +- } else if i-high >= separation { +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- low = i +- } +- high = i + 1 +- } +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- sort.Sort(byDecreasingLength(intervals)) +- +- fmt.Printf("const (\n") +- fmt.Printf("\tjis0208 = 1\n") +- fmt.Printf("\tjis0212 = 2\n") +- fmt.Printf("\tcodeMask = 0x7f\n") +- fmt.Printf("\tcodeShift = 7\n") +- fmt.Printf("\ttableShift = 14\n") +- fmt.Printf(")\n\n") +- +- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) +- fmt.Printf("// encodeX are the encoding tables from Unicode to JIS code,\n") +- fmt.Printf("// sorted by decreasing length.\n") +- for i, v := range intervals { +- fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) +- } +- fmt.Printf("//\n") +- fmt.Printf("// The high two bits of the value record whether the JIS code comes from the\n") +- fmt.Printf("// JIS0208 table (high bits == 1) or the JIS0212 table (high bits == 2).\n") +- fmt.Printf("// The low 14 bits are two 7-bit unsigned integers j1 and j2 that form the\n") +- fmt.Printf("// JIS code (94*j1 + j2) within that table.\n") +- fmt.Printf("\n") +- +- for i, v := range intervals { +- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) +- fmt.Printf("var encode%d = [...]uint16{\n", i) +- for j := v.low; j < v.high; j++ { +- x := reverse[j] +- if x.table == -1 { +- continue +- } +- fmt.Printf("\t%d - %d: jis%s<<14 | 0x%02X<<7 | 0x%02X,\n", +- j, v.low, tables[x.table].name, x.jisCode/94, x.jisCode%94) +- } +- fmt.Printf("}\n\n") +- } +-} +- +-// interval is a half-open interval [low, high). +-type interval struct { +- low, high int +-} +- +-func (i interval) len() int { return i.high - i.low } +- +-// byDecreasingLength sorts intervals by decreasing length. +-type byDecreasingLength []interval +- +-func (b byDecreasingLength) Len() int { return len(b) } +-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } +-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +diff --git a/vendor/golang.org/x/text/encoding/korean/maketables.go b/vendor/golang.org/x/text/encoding/korean/maketables.go +deleted file mode 100644 +index c84034fb67..0000000000 +--- a/vendor/golang.org/x/text/encoding/korean/maketables.go ++++ /dev/null +@@ -1,143 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-// This program generates tables.go: +-// go run maketables.go | gofmt > tables.go +- +-import ( +- "bufio" +- "fmt" +- "log" +- "net/http" +- "sort" +- "strings" +-) +- +-func main() { +- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") +- fmt.Printf("// Package korean provides Korean encodings such as EUC-KR.\n") +- fmt.Printf(`package korean // import "golang.org/x/text/encoding/korean"` + "\n\n") +- +- res, err := http.Get("http://encoding.spec.whatwg.org/index-euc-kr.txt") +- if err != nil { +- log.Fatalf("Get: %v", err) +- } +- defer res.Body.Close() +- +- mapping := [65536]uint16{} +- reverse := [65536]uint16{} +- +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- x, y := uint16(0), uint16(0) +- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { +- log.Fatalf("could not parse %q", s) +- } +- if x < 0 || 178*(0xc7-0x81)+(0xfe-0xc7)*94+(0xff-0xa1) <= x { +- log.Fatalf("EUC-KR code %d is out of range", x) +- } +- mapping[x] = y +- if reverse[y] == 0 { +- c0, c1 := uint16(0), uint16(0) +- if x < 178*(0xc7-0x81) { +- c0 = uint16(x/178) + 0x81 +- c1 = uint16(x % 178) +- switch { +- case c1 < 1*26: +- c1 += 0x41 +- case c1 < 2*26: +- c1 += 0x47 +- default: +- c1 += 0x4d +- } +- } else { +- x -= 178 * (0xc7 - 0x81) +- c0 = uint16(x/94) + 0xc7 +- c1 = uint16(x%94) + 0xa1 +- } +- reverse[y] = c0<<8 | c1 +- } +- } +- if err := scanner.Err(); err != nil { +- log.Fatalf("scanner error: %v", err) +- } +- +- fmt.Printf("// decode is the decoding table from EUC-KR code to Unicode.\n") +- fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt\n") +- fmt.Printf("var decode = [...]uint16{\n") +- for i, v := range mapping { +- if v != 0 { +- fmt.Printf("\t%d: 0x%04X,\n", i, v) +- } +- } +- fmt.Printf("}\n\n") +- +- // Any run of at least separation continuous zero entries in the reverse map will +- // be a separate encode table. +- const separation = 1024 +- +- intervals := []interval(nil) +- low, high := -1, -1 +- for i, v := range reverse { +- if v == 0 { +- continue +- } +- if low < 0 { +- low = i +- } else if i-high >= separation { +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- low = i +- } +- high = i + 1 +- } +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- sort.Sort(byDecreasingLength(intervals)) +- +- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) +- fmt.Printf("// encodeX are the encoding tables from Unicode to EUC-KR code,\n") +- fmt.Printf("// sorted by decreasing length.\n") +- for i, v := range intervals { +- fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) +- } +- fmt.Printf("\n") +- +- for i, v := range intervals { +- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) +- fmt.Printf("var encode%d = [...]uint16{\n", i) +- for j := v.low; j < v.high; j++ { +- x := reverse[j] +- if x == 0 { +- continue +- } +- fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) +- } +- fmt.Printf("}\n\n") +- } +-} +- +-// interval is a half-open interval [low, high). +-type interval struct { +- low, high int +-} +- +-func (i interval) len() int { return i.high - i.low } +- +-// byDecreasingLength sorts intervals by decreasing length. +-type byDecreasingLength []interval +- +-func (b byDecreasingLength) Len() int { return len(b) } +-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } +-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go +deleted file mode 100644 +index 55016c7862..0000000000 +--- a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go ++++ /dev/null +@@ -1,161 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-// This program generates tables.go: +-// go run maketables.go | gofmt > tables.go +- +-import ( +- "bufio" +- "fmt" +- "log" +- "net/http" +- "sort" +- "strings" +-) +- +-func main() { +- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") +- fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n") +- fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n") +- +- printGB18030() +- printGBK() +-} +- +-func printGB18030() { +- res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt") +- if err != nil { +- log.Fatalf("Get: %v", err) +- } +- defer res.Body.Close() +- +- fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n") +- fmt.Printf("var gb18030 = [...][2]uint16{\n") +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- x, y := uint32(0), uint32(0) +- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { +- log.Fatalf("could not parse %q", s) +- } +- if x < 0x10000 && y < 0x10000 { +- fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y) +- } +- } +- fmt.Printf("}\n\n") +-} +- +-func printGBK() { +- res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt") +- if err != nil { +- log.Fatalf("Get: %v", err) +- } +- defer res.Body.Close() +- +- mapping := [65536]uint16{} +- reverse := [65536]uint16{} +- +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- x, y := uint16(0), uint16(0) +- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { +- log.Fatalf("could not parse %q", s) +- } +- if x < 0 || 126*190 <= x { +- log.Fatalf("GBK code %d is out of range", x) +- } +- mapping[x] = y +- if reverse[y] == 0 { +- c0, c1 := x/190, x%190 +- if c1 >= 0x3f { +- c1++ +- } +- reverse[y] = (0x81+c0)<<8 | (0x40 + c1) +- } +- } +- if err := scanner.Err(); err != nil { +- log.Fatalf("scanner error: %v", err) +- } +- +- fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n") +- fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n") +- fmt.Printf("var decode = [...]uint16{\n") +- for i, v := range mapping { +- if v != 0 { +- fmt.Printf("\t%d: 0x%04X,\n", i, v) +- } +- } +- fmt.Printf("}\n\n") +- +- // Any run of at least separation continuous zero entries in the reverse map will +- // be a separate encode table. +- const separation = 1024 +- +- intervals := []interval(nil) +- low, high := -1, -1 +- for i, v := range reverse { +- if v == 0 { +- continue +- } +- if low < 0 { +- low = i +- } else if i-high >= separation { +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- low = i +- } +- high = i + 1 +- } +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- sort.Sort(byDecreasingLength(intervals)) +- +- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) +- fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n") +- fmt.Printf("// sorted by decreasing length.\n") +- for i, v := range intervals { +- fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) +- } +- fmt.Printf("\n") +- +- for i, v := range intervals { +- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) +- fmt.Printf("var encode%d = [...]uint16{\n", i) +- for j := v.low; j < v.high; j++ { +- x := reverse[j] +- if x == 0 { +- continue +- } +- fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) +- } +- fmt.Printf("}\n\n") +- } +-} +- +-// interval is a half-open interval [low, high). +-type interval struct { +- low, high int +-} +- +-func (i interval) len() int { return i.high - i.low } +- +-// byDecreasingLength sorts intervals by decreasing length. +-type byDecreasingLength []interval +- +-func (b byDecreasingLength) Len() int { return len(b) } +-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } +-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +diff --git a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go b/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go +deleted file mode 100644 +index cf7fdb31a5..0000000000 +--- a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go ++++ /dev/null +@@ -1,140 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-// This program generates tables.go: +-// go run maketables.go | gofmt > tables.go +- +-import ( +- "bufio" +- "fmt" +- "log" +- "net/http" +- "sort" +- "strings" +-) +- +-func main() { +- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") +- fmt.Printf("// Package traditionalchinese provides Traditional Chinese encodings such as Big5.\n") +- fmt.Printf(`package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese"` + "\n\n") +- +- res, err := http.Get("http://encoding.spec.whatwg.org/index-big5.txt") +- if err != nil { +- log.Fatalf("Get: %v", err) +- } +- defer res.Body.Close() +- +- mapping := [65536]uint32{} +- reverse := [65536 * 4]uint16{} +- +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- x, y := uint16(0), uint32(0) +- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { +- log.Fatalf("could not parse %q", s) +- } +- if x < 0 || 126*157 <= x { +- log.Fatalf("Big5 code %d is out of range", x) +- } +- mapping[x] = y +- +- // The WHATWG spec http://encoding.spec.whatwg.org/#indexes says that +- // "The index pointer for code point in index is the first pointer +- // corresponding to code point in index", which would normally mean +- // that the code below should be guarded by "if reverse[y] == 0", but +- // last instead of first seems to match the behavior of +- // "iconv -f UTF-8 -t BIG5". For example, U+8005 者 occurs twice in +- // http://encoding.spec.whatwg.org/index-big5.txt, as index 2148 +- // (encoded as "\x8e\xcd") and index 6543 (encoded as "\xaa\xcc") +- // and "echo 者 | iconv -f UTF-8 -t BIG5 | xxd" gives "\xaa\xcc". +- c0, c1 := x/157, x%157 +- if c1 < 0x3f { +- c1 += 0x40 +- } else { +- c1 += 0x62 +- } +- reverse[y] = (0x81+c0)<<8 | c1 +- } +- if err := scanner.Err(); err != nil { +- log.Fatalf("scanner error: %v", err) +- } +- +- fmt.Printf("// decode is the decoding table from Big5 code to Unicode.\n") +- fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-big5.txt\n") +- fmt.Printf("var decode = [...]uint32{\n") +- for i, v := range mapping { +- if v != 0 { +- fmt.Printf("\t%d: 0x%08X,\n", i, v) +- } +- } +- fmt.Printf("}\n\n") +- +- // Any run of at least separation continuous zero entries in the reverse map will +- // be a separate encode table. +- const separation = 1024 +- +- intervals := []interval(nil) +- low, high := -1, -1 +- for i, v := range reverse { +- if v == 0 { +- continue +- } +- if low < 0 { +- low = i +- } else if i-high >= separation { +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- low = i +- } +- high = i + 1 +- } +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- sort.Sort(byDecreasingLength(intervals)) +- +- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) +- fmt.Printf("// encodeX are the encoding tables from Unicode to Big5 code,\n") +- fmt.Printf("// sorted by decreasing length.\n") +- for i, v := range intervals { +- fmt.Printf("// encode%d: %5d entries for runes in [%6d, %6d).\n", i, v.len(), v.low, v.high) +- } +- fmt.Printf("\n") +- +- for i, v := range intervals { +- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) +- fmt.Printf("var encode%d = [...]uint16{\n", i) +- for j := v.low; j < v.high; j++ { +- x := reverse[j] +- if x == 0 { +- continue +- } +- fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) +- } +- fmt.Printf("}\n\n") +- } +-} +- +-// interval is a half-open interval [low, high). +-type interval struct { +- low, high int +-} +- +-func (i interval) len() int { return i.high - i.low } +- +-// byDecreasingLength sorts intervals by decreasing length. +-type byDecreasingLength []interval +- +-func (b byDecreasingLength) Len() int { return len(b) } +-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } +-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +diff --git a/vendor/golang.org/x/text/internal/language/compact/gen.go b/vendor/golang.org/x/text/internal/language/compact/gen.go +deleted file mode 100644 +index 0c36a052f6..0000000000 +--- a/vendor/golang.org/x/text/internal/language/compact/gen.go ++++ /dev/null +@@ -1,64 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// Language tag table generator. +-// Data read from the web. +- +-package main +- +-import ( +- "flag" +- "fmt" +- "log" +- +- "golang.org/x/text/internal/gen" +- "golang.org/x/text/unicode/cldr" +-) +- +-var ( +- test = flag.Bool("test", +- false, +- "test existing tables; can be used to compare web data with package data.") +- outputFile = flag.String("output", +- "tables.go", +- "output file for generated tables") +-) +- +-func main() { +- gen.Init() +- +- w := gen.NewCodeWriter() +- defer w.WriteGoFile("tables.go", "compact") +- +- fmt.Fprintln(w, `import "golang.org/x/text/internal/language"`) +- +- b := newBuilder(w) +- gen.WriteCLDRVersion(w) +- +- b.writeCompactIndex() +-} +- +-type builder struct { +- w *gen.CodeWriter +- data *cldr.CLDR +- supp *cldr.SupplementalData +-} +- +-func newBuilder(w *gen.CodeWriter) *builder { +- r := gen.OpenCLDRCoreZip() +- defer r.Close() +- d := &cldr.Decoder{} +- data, err := d.DecodeZip(r) +- if err != nil { +- log.Fatal(err) +- } +- b := builder{ +- w: w, +- data: data, +- supp: data.Supplemental(), +- } +- return &b +-} +diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_index.go b/vendor/golang.org/x/text/internal/language/compact/gen_index.go +deleted file mode 100644 +index 136cefaf08..0000000000 +--- a/vendor/golang.org/x/text/internal/language/compact/gen_index.go ++++ /dev/null +@@ -1,113 +0,0 @@ +-// Copyright 2015 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-// This file generates derivative tables based on the language package itself. +- +-import ( +- "fmt" +- "log" +- "sort" +- "strings" +- +- "golang.org/x/text/internal/language" +-) +- +-// Compact indices: +-// Note -va-X variants only apply to localization variants. +-// BCP variants only ever apply to language. +-// The only ambiguity between tags is with regions. +- +-func (b *builder) writeCompactIndex() { +- // Collect all language tags for which we have any data in CLDR. +- m := map[language.Tag]bool{} +- for _, lang := range b.data.Locales() { +- // We include all locales unconditionally to be consistent with en_US. +- // We want en_US, even though it has no data associated with it. +- +- // TODO: put any of the languages for which no data exists at the end +- // of the index. This allows all components based on ICU to use that +- // as the cutoff point. +- // if x := data.RawLDML(lang); false || +- // x.LocaleDisplayNames != nil || +- // x.Characters != nil || +- // x.Delimiters != nil || +- // x.Measurement != nil || +- // x.Dates != nil || +- // x.Numbers != nil || +- // x.Units != nil || +- // x.ListPatterns != nil || +- // x.Collations != nil || +- // x.Segmentations != nil || +- // x.Rbnf != nil || +- // x.Annotations != nil || +- // x.Metadata != nil { +- +- // TODO: support POSIX natively, albeit non-standard. +- tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1)) +- m[tag] = true +- // } +- } +- +- // TODO: plural rules are also defined for the deprecated tags: +- // iw mo sh tl +- // Consider removing these as compact tags. +- +- // Include locales for plural rules, which uses a different structure. +- for _, plurals := range b.supp.Plurals { +- for _, rules := range plurals.PluralRules { +- for _, lang := range strings.Split(rules.Locales, " ") { +- m[language.Make(lang)] = true +- } +- } +- } +- +- var coreTags []language.CompactCoreInfo +- var special []string +- +- for t := range m { +- if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" { +- log.Fatalf("Unexpected extension %v in %v", x, t) +- } +- if len(t.Variants()) == 0 && len(t.Extensions()) == 0 { +- cci, ok := language.GetCompactCore(t) +- if !ok { +- log.Fatalf("Locale for non-basic language %q", t) +- } +- coreTags = append(coreTags, cci) +- } else { +- special = append(special, t.String()) +- } +- } +- +- w := b.w +- +- sort.Slice(coreTags, func(i, j int) bool { return coreTags[i] < coreTags[j] }) +- sort.Strings(special) +- +- w.WriteComment(` +- NumCompactTags is the number of common tags. The maximum tag is +- NumCompactTags-1.`) +- w.WriteConst("NumCompactTags", len(m)) +- +- fmt.Fprintln(w, "const (") +- for i, t := range coreTags { +- fmt.Fprintf(w, "%s ID = %d\n", ident(t.Tag().String()), i) +- } +- for i, t := range special { +- fmt.Fprintf(w, "%s ID = %d\n", ident(t), i+len(coreTags)) +- } +- fmt.Fprintln(w, ")") +- +- w.WriteVar("coreTags", coreTags) +- +- w.WriteConst("specialTagsStr", strings.Join(special, " ")) +-} +- +-func ident(s string) string { +- return strings.Replace(s, "-", "", -1) + "Index" +-} +diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go b/vendor/golang.org/x/text/internal/language/compact/gen_parents.go +deleted file mode 100644 +index 9543d58323..0000000000 +--- a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go ++++ /dev/null +@@ -1,54 +0,0 @@ +-// Copyright 2018 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "log" +- +- "golang.org/x/text/internal/gen" +- "golang.org/x/text/internal/language" +- "golang.org/x/text/internal/language/compact" +- "golang.org/x/text/unicode/cldr" +-) +- +-func main() { +- r := gen.OpenCLDRCoreZip() +- defer r.Close() +- +- d := &cldr.Decoder{} +- data, err := d.DecodeZip(r) +- if err != nil { +- log.Fatalf("DecodeZip: %v", err) +- } +- +- w := gen.NewCodeWriter() +- defer w.WriteGoFile("parents.go", "compact") +- +- // Create parents table. +- type ID uint16 +- parents := make([]ID, compact.NumCompactTags) +- for _, loc := range data.Locales() { +- tag := language.MustParse(loc) +- index, ok := compact.FromTag(tag) +- if !ok { +- continue +- } +- parentIndex := compact.ID(0) // und +- for p := tag.Parent(); p != language.Und; p = p.Parent() { +- if x, ok := compact.FromTag(p); ok { +- parentIndex = x +- break +- } +- } +- parents[index] = ID(parentIndex) +- } +- +- w.WriteComment(` +- parents maps a compact index of a tag to the compact index of the parent of +- this tag.`) +- w.WriteVar("parents", parents) +-} +diff --git a/vendor/golang.org/x/text/internal/language/gen.go b/vendor/golang.org/x/text/internal/language/gen.go +deleted file mode 100644 +index cdcc7febcb..0000000000 +--- a/vendor/golang.org/x/text/internal/language/gen.go ++++ /dev/null +@@ -1,1520 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// Language tag table generator. +-// Data read from the web. +- +-package main +- +-import ( +- "bufio" +- "flag" +- "fmt" +- "io" +- "io/ioutil" +- "log" +- "math" +- "reflect" +- "regexp" +- "sort" +- "strconv" +- "strings" +- +- "golang.org/x/text/internal/gen" +- "golang.org/x/text/internal/tag" +- "golang.org/x/text/unicode/cldr" +-) +- +-var ( +- test = flag.Bool("test", +- false, +- "test existing tables; can be used to compare web data with package data.") +- outputFile = flag.String("output", +- "tables.go", +- "output file for generated tables") +-) +- +-var comment = []string{ +- ` +-lang holds an alphabetically sorted list of ISO-639 language identifiers. +-All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. +-For 2-byte language identifiers, the two successive bytes have the following meaning: +- - if the first letter of the 2- and 3-letter ISO codes are the same: +- the second and third letter of the 3-letter ISO code. +- - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. +-For 3-byte language identifiers the 4th byte is 0.`, +- ` +-langNoIndex is a bit vector of all 3-letter language codes that are not used as an index +-in lookup tables. The language ids for these language codes are derived directly +-from the letters and are not consecutive.`, +- ` +-altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives +-to 2-letter language codes that cannot be derived using the method described above. +-Each 3-letter code is followed by its 1-byte langID.`, +- ` +-altLangIndex is used to convert indexes in altLangISO3 to langIDs.`, +- ` +-AliasMap maps langIDs to their suggested replacements.`, +- ` +-script is an alphabetically sorted list of ISO 15924 codes. The index +-of the script in the string, divided by 4, is the internal scriptID.`, +- ` +-isoRegionOffset needs to be added to the index of regionISO to obtain the regionID +-for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for +-the UN.M49 codes used for groups.)`, +- ` +-regionISO holds a list of alphabetically sorted 2-letter ISO region codes. +-Each 2-letter codes is followed by two bytes with the following meaning: +- - [A-Z}{2}: the first letter of the 2-letter code plus these two +- letters form the 3-letter ISO code. +- - 0, n: index into altRegionISO3.`, +- ` +-regionTypes defines the status of a region for various standards.`, +- ` +-m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are +-codes indicating collections of regions.`, +- ` +-m49Index gives indexes into fromM49 based on the three most significant bits +-of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in +- fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] +-for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. +-The region code is stored in the 9 lsb of the indexed value.`, +- ` +-fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`, +- ` +-altRegionISO3 holds a list of 3-letter region codes that cannot be +-mapped to 2-letter codes using the default algorithm. This is a short list.`, +- ` +-altRegionIDs holds a list of regionIDs the positions of which match those +-of the 3-letter ISO codes in altRegionISO3.`, +- ` +-variantNumSpecialized is the number of specialized variants in variants.`, +- ` +-suppressScript is an index from langID to the dominant script for that language, +-if it exists. If a script is given, it should be suppressed from the language tag.`, +- ` +-likelyLang is a lookup table, indexed by langID, for the most likely +-scripts and regions given incomplete information. If more entries exist for a +-given language, region and script are the index and size respectively +-of the list in likelyLangList.`, +- ` +-likelyLangList holds lists info associated with likelyLang.`, +- ` +-likelyRegion is a lookup table, indexed by regionID, for the most likely +-languages and scripts given incomplete information. If more entries exist +-for a given regionID, lang and script are the index and size respectively +-of the list in likelyRegionList. +-TODO: exclude containers and user-definable regions from the list.`, +- ` +-likelyRegionList holds lists info associated with likelyRegion.`, +- ` +-likelyScript is a lookup table, indexed by scriptID, for the most likely +-languages and regions given a script.`, +- ` +-nRegionGroups is the number of region groups.`, +- ` +-regionInclusion maps region identifiers to sets of regions in regionInclusionBits, +-where each set holds all groupings that are directly connected in a region +-containment graph.`, +- ` +-regionInclusionBits is an array of bit vectors where every vector represents +-a set of region groupings. These sets are used to compute the distance +-between two regions for the purpose of language matching.`, +- ` +-regionInclusionNext marks, for each entry in regionInclusionBits, the set of +-all groups that are reachable from the groups set in the respective entry.`, +-} +- +-// TODO: consider changing some of these structures to tries. This can reduce +-// memory, but may increase the need for memory allocations. This could be +-// mitigated if we can piggyback on language tags for common cases. +- +-func failOnError(e error) { +- if e != nil { +- log.Panic(e) +- } +-} +- +-type setType int +- +-const ( +- Indexed setType = 1 + iota // all elements must be of same size +- Linear +-) +- +-type stringSet struct { +- s []string +- sorted, frozen bool +- +- // We often need to update values after the creation of an index is completed. +- // We include a convenience map for keeping track of this. +- update map[string]string +- typ setType // used for checking. +-} +- +-func (ss *stringSet) clone() stringSet { +- c := *ss +- c.s = append([]string(nil), c.s...) +- return c +-} +- +-func (ss *stringSet) setType(t setType) { +- if ss.typ != t && ss.typ != 0 { +- log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ) +- } +-} +- +-// parse parses a whitespace-separated string and initializes ss with its +-// components. +-func (ss *stringSet) parse(s string) { +- scan := bufio.NewScanner(strings.NewReader(s)) +- scan.Split(bufio.ScanWords) +- for scan.Scan() { +- ss.add(scan.Text()) +- } +-} +- +-func (ss *stringSet) assertChangeable() { +- if ss.frozen { +- log.Panic("attempt to modify a frozen stringSet") +- } +-} +- +-func (ss *stringSet) add(s string) { +- ss.assertChangeable() +- ss.s = append(ss.s, s) +- ss.sorted = ss.frozen +-} +- +-func (ss *stringSet) freeze() { +- ss.compact() +- ss.frozen = true +-} +- +-func (ss *stringSet) compact() { +- if ss.sorted { +- return +- } +- a := ss.s +- sort.Strings(a) +- k := 0 +- for i := 1; i < len(a); i++ { +- if a[k] != a[i] { +- a[k+1] = a[i] +- k++ +- } +- } +- ss.s = a[:k+1] +- ss.sorted = ss.frozen +-} +- +-type funcSorter struct { +- fn func(a, b string) bool +- sort.StringSlice +-} +- +-func (s funcSorter) Less(i, j int) bool { +- return s.fn(s.StringSlice[i], s.StringSlice[j]) +-} +- +-func (ss *stringSet) sortFunc(f func(a, b string) bool) { +- ss.compact() +- sort.Sort(funcSorter{f, sort.StringSlice(ss.s)}) +-} +- +-func (ss *stringSet) remove(s string) { +- ss.assertChangeable() +- if i, ok := ss.find(s); ok { +- copy(ss.s[i:], ss.s[i+1:]) +- ss.s = ss.s[:len(ss.s)-1] +- } +-} +- +-func (ss *stringSet) replace(ol, nu string) { +- ss.s[ss.index(ol)] = nu +- ss.sorted = ss.frozen +-} +- +-func (ss *stringSet) index(s string) int { +- ss.setType(Indexed) +- i, ok := ss.find(s) +- if !ok { +- if i < len(ss.s) { +- log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i]) +- } +- log.Panicf("find: item %q is not in list", s) +- +- } +- return i +-} +- +-func (ss *stringSet) find(s string) (int, bool) { +- ss.compact() +- i := sort.SearchStrings(ss.s, s) +- return i, i != len(ss.s) && ss.s[i] == s +-} +- +-func (ss *stringSet) slice() []string { +- ss.compact() +- return ss.s +-} +- +-func (ss *stringSet) updateLater(v, key string) { +- if ss.update == nil { +- ss.update = map[string]string{} +- } +- ss.update[v] = key +-} +- +-// join joins the string and ensures that all entries are of the same length. +-func (ss *stringSet) join() string { +- ss.setType(Indexed) +- n := len(ss.s[0]) +- for _, s := range ss.s { +- if len(s) != n { +- log.Panicf("join: not all entries are of the same length: %q", s) +- } +- } +- ss.s = append(ss.s, strings.Repeat("\xff", n)) +- return strings.Join(ss.s, "") +-} +- +-// ianaEntry holds information for an entry in the IANA Language Subtag Repository. +-// All types use the same entry. +-// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various +-// fields. +-type ianaEntry struct { +- typ string +- description []string +- scope string +- added string +- preferred string +- deprecated string +- suppressScript string +- macro string +- prefix []string +-} +- +-type builder struct { +- w *gen.CodeWriter +- hw io.Writer // MultiWriter for w and w.Hash +- data *cldr.CLDR +- supp *cldr.SupplementalData +- +- // indices +- locale stringSet // common locales +- lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data +- langNoIndex stringSet // 3-letter ISO codes with no associated data +- script stringSet // 4-letter ISO codes +- region stringSet // 2-letter ISO or 3-digit UN M49 codes +- variant stringSet // 4-8-alphanumeric variant code. +- +- // Region codes that are groups with their corresponding group IDs. +- groups map[int]index +- +- // langInfo +- registry map[string]*ianaEntry +-} +- +-type index uint +- +-func newBuilder(w *gen.CodeWriter) *builder { +- r := gen.OpenCLDRCoreZip() +- defer r.Close() +- d := &cldr.Decoder{} +- data, err := d.DecodeZip(r) +- failOnError(err) +- b := builder{ +- w: w, +- hw: io.MultiWriter(w, w.Hash), +- data: data, +- supp: data.Supplemental(), +- } +- b.parseRegistry() +- return &b +-} +- +-func (b *builder) parseRegistry() { +- r := gen.OpenIANAFile("assignments/language-subtag-registry") +- defer r.Close() +- b.registry = make(map[string]*ianaEntry) +- +- scan := bufio.NewScanner(r) +- scan.Split(bufio.ScanWords) +- var record *ianaEntry +- for more := scan.Scan(); more; { +- key := scan.Text() +- more = scan.Scan() +- value := scan.Text() +- switch key { +- case "Type:": +- record = &ianaEntry{typ: value} +- case "Subtag:", "Tag:": +- if s := strings.SplitN(value, "..", 2); len(s) > 1 { +- for a := s[0]; a <= s[1]; a = inc(a) { +- b.addToRegistry(a, record) +- } +- } else { +- b.addToRegistry(value, record) +- } +- case "Suppress-Script:": +- record.suppressScript = value +- case "Added:": +- record.added = value +- case "Deprecated:": +- record.deprecated = value +- case "Macrolanguage:": +- record.macro = value +- case "Preferred-Value:": +- record.preferred = value +- case "Prefix:": +- record.prefix = append(record.prefix, value) +- case "Scope:": +- record.scope = value +- case "Description:": +- buf := []byte(value) +- for more = scan.Scan(); more; more = scan.Scan() { +- b := scan.Bytes() +- if b[0] == '%' || b[len(b)-1] == ':' { +- break +- } +- buf = append(buf, ' ') +- buf = append(buf, b...) +- } +- record.description = append(record.description, string(buf)) +- continue +- default: +- continue +- } +- more = scan.Scan() +- } +- if scan.Err() != nil { +- log.Panic(scan.Err()) +- } +-} +- +-func (b *builder) addToRegistry(key string, entry *ianaEntry) { +- if info, ok := b.registry[key]; ok { +- if info.typ != "language" || entry.typ != "extlang" { +- log.Fatalf("parseRegistry: tag %q already exists", key) +- } +- } else { +- b.registry[key] = entry +- } +-} +- +-var commentIndex = make(map[string]string) +- +-func init() { +- for _, s := range comment { +- key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0]) +- commentIndex[key] = s +- } +-} +- +-func (b *builder) comment(name string) { +- if s := commentIndex[name]; len(s) > 0 { +- b.w.WriteComment(s) +- } else { +- fmt.Fprintln(b.w) +- } +-} +- +-func (b *builder) pf(f string, x ...interface{}) { +- fmt.Fprintf(b.hw, f, x...) +- fmt.Fprint(b.hw, "\n") +-} +- +-func (b *builder) p(x ...interface{}) { +- fmt.Fprintln(b.hw, x...) +-} +- +-func (b *builder) addSize(s int) { +- b.w.Size += s +- b.pf("// Size: %d bytes", s) +-} +- +-func (b *builder) writeConst(name string, x interface{}) { +- b.comment(name) +- b.w.WriteConst(name, x) +-} +- +-// writeConsts computes f(v) for all v in values and writes the results +-// as constants named _v to a single constant block. +-func (b *builder) writeConsts(f func(string) int, values ...string) { +- b.pf("const (") +- for _, v := range values { +- b.pf("\t_%s = %v", v, f(v)) +- } +- b.pf(")") +-} +- +-// writeType writes the type of the given value, which must be a struct. +-func (b *builder) writeType(value interface{}) { +- b.comment(reflect.TypeOf(value).Name()) +- b.w.WriteType(value) +-} +- +-func (b *builder) writeSlice(name string, ss interface{}) { +- b.writeSliceAddSize(name, 0, ss) +-} +- +-func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) { +- b.comment(name) +- b.w.Size += extraSize +- v := reflect.ValueOf(ss) +- t := v.Type().Elem() +- b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len()) +- +- fmt.Fprintf(b.w, "var %s = ", name) +- b.w.WriteArray(ss) +- b.p() +-} +- +-type FromTo struct { +- From, To uint16 +-} +- +-func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) { +- ss.sortFunc(func(a, b string) bool { +- return index(a) < index(b) +- }) +- m := []FromTo{} +- for _, s := range ss.s { +- m = append(m, FromTo{index(s), index(ss.update[s])}) +- } +- b.writeSlice(name, m) +-} +- +-const base = 'z' - 'a' + 1 +- +-func strToInt(s string) uint { +- v := uint(0) +- for i := 0; i < len(s); i++ { +- v *= base +- v += uint(s[i] - 'a') +- } +- return v +-} +- +-// converts the given integer to the original ASCII string passed to strToInt. +-// len(s) must match the number of characters obtained. +-func intToStr(v uint, s []byte) { +- for i := len(s) - 1; i >= 0; i-- { +- s[i] = byte(v%base) + 'a' +- v /= base +- } +-} +- +-func (b *builder) writeBitVector(name string, ss []string) { +- vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8))) +- for _, s := range ss { +- v := strToInt(s) +- vec[v/8] |= 1 << (v % 8) +- } +- b.writeSlice(name, vec) +-} +- +-// TODO: convert this type into a list or two-stage trie. +-func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) { +- b.comment(name) +- v := reflect.ValueOf(m) +- sz := v.Len() * (2 + int(v.Type().Key().Size())) +- for _, k := range m { +- sz += len(k) +- } +- b.addSize(sz) +- keys := []string{} +- b.pf(`var %s = map[string]uint16{`, name) +- for k := range m { +- keys = append(keys, k) +- } +- sort.Strings(keys) +- for _, k := range keys { +- b.pf("\t%q: %v,", k, f(m[k])) +- } +- b.p("}") +-} +- +-func (b *builder) writeMap(name string, m interface{}) { +- b.comment(name) +- v := reflect.ValueOf(m) +- sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size())) +- b.addSize(sz) +- f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool { +- return strings.IndexRune("{}, ", r) != -1 +- }) +- sort.Strings(f[1:]) +- b.pf(`var %s = %s{`, name, f[0]) +- for _, kv := range f[1:] { +- b.pf("\t%s,", kv) +- } +- b.p("}") +-} +- +-func (b *builder) langIndex(s string) uint16 { +- if s == "und" { +- return 0 +- } +- if i, ok := b.lang.find(s); ok { +- return uint16(i) +- } +- return uint16(strToInt(s)) + uint16(len(b.lang.s)) +-} +- +-// inc advances the string to its lexicographical successor. +-func inc(s string) string { +- const maxTagLength = 4 +- var buf [maxTagLength]byte +- intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)]) +- for i := 0; i < len(s); i++ { +- if s[i] <= 'Z' { +- buf[i] -= 'a' - 'A' +- } +- } +- return string(buf[:len(s)]) +-} +- +-func (b *builder) parseIndices() { +- meta := b.supp.Metadata +- +- for k, v := range b.registry { +- var ss *stringSet +- switch v.typ { +- case "language": +- if len(k) == 2 || v.suppressScript != "" || v.scope == "special" { +- b.lang.add(k) +- continue +- } else { +- ss = &b.langNoIndex +- } +- case "region": +- ss = &b.region +- case "script": +- ss = &b.script +- case "variant": +- ss = &b.variant +- default: +- continue +- } +- ss.add(k) +- } +- // Include any language for which there is data. +- for _, lang := range b.data.Locales() { +- if x := b.data.RawLDML(lang); false || +- x.LocaleDisplayNames != nil || +- x.Characters != nil || +- x.Delimiters != nil || +- x.Measurement != nil || +- x.Dates != nil || +- x.Numbers != nil || +- x.Units != nil || +- x.ListPatterns != nil || +- x.Collations != nil || +- x.Segmentations != nil || +- x.Rbnf != nil || +- x.Annotations != nil || +- x.Metadata != nil { +- +- from := strings.Split(lang, "_") +- if lang := from[0]; lang != "root" { +- b.lang.add(lang) +- } +- } +- } +- // Include locales for plural rules, which uses a different structure. +- for _, plurals := range b.data.Supplemental().Plurals { +- for _, rules := range plurals.PluralRules { +- for _, lang := range strings.Split(rules.Locales, " ") { +- if lang = strings.Split(lang, "_")[0]; lang != "root" { +- b.lang.add(lang) +- } +- } +- } +- } +- // Include languages in likely subtags. +- for _, m := range b.supp.LikelySubtags.LikelySubtag { +- from := strings.Split(m.From, "_") +- b.lang.add(from[0]) +- } +- // Include ISO-639 alpha-3 bibliographic entries. +- for _, a := range meta.Alias.LanguageAlias { +- if a.Reason == "bibliographic" { +- b.langNoIndex.add(a.Type) +- } +- } +- // Include regions in territoryAlias (not all are in the IANA registry!) +- for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { +- if len(reg.Type) == 2 { +- b.region.add(reg.Type) +- } +- } +- +- for _, s := range b.lang.s { +- if len(s) == 3 { +- b.langNoIndex.remove(s) +- } +- } +- b.writeConst("NumLanguages", len(b.lang.slice())+len(b.langNoIndex.slice())) +- b.writeConst("NumScripts", len(b.script.slice())) +- b.writeConst("NumRegions", len(b.region.slice())) +- +- // Add dummy codes at the start of each list to represent "unspecified". +- b.lang.add("---") +- b.script.add("----") +- b.region.add("---") +- +- // common locales +- b.locale.parse(meta.DefaultContent.Locales) +-} +- +-// TODO: region inclusion data will probably not be use used in future matchers. +- +-func (b *builder) computeRegionGroups() { +- b.groups = make(map[int]index) +- +- // Create group indices. +- for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID. +- b.groups[i] = index(len(b.groups)) +- } +- for _, g := range b.supp.TerritoryContainment.Group { +- // Skip UN and EURO zone as they are flattening the containment +- // relationship. +- if g.Type == "EZ" || g.Type == "UN" { +- continue +- } +- group := b.region.index(g.Type) +- if _, ok := b.groups[group]; !ok { +- b.groups[group] = index(len(b.groups)) +- } +- } +- if len(b.groups) > 64 { +- log.Fatalf("only 64 groups supported, found %d", len(b.groups)) +- } +- b.writeConst("nRegionGroups", len(b.groups)) +-} +- +-var langConsts = []string{ +- "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es", +- "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is", +- "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml", +- "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt", +- "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th", +- "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu", +- +- // constants for grandfathered tags (if not already defined) +- "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu", +- "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn", +-} +- +-// writeLanguage generates all tables needed for language canonicalization. +-func (b *builder) writeLanguage() { +- meta := b.supp.Metadata +- +- b.writeConst("nonCanonicalUnd", b.lang.index("und")) +- b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...) +- b.writeConst("langPrivateStart", b.langIndex("qaa")) +- b.writeConst("langPrivateEnd", b.langIndex("qtz")) +- +- // Get language codes that need to be mapped (overlong 3-letter codes, +- // deprecated 2-letter codes, legacy and grandfathered tags.) +- langAliasMap := stringSet{} +- aliasTypeMap := map[string]AliasType{} +- +- // altLangISO3 get the alternative ISO3 names that need to be mapped. +- altLangISO3 := stringSet{} +- // Add dummy start to avoid the use of index 0. +- altLangISO3.add("---") +- altLangISO3.updateLater("---", "aa") +- +- lang := b.lang.clone() +- for _, a := range meta.Alias.LanguageAlias { +- if a.Replacement == "" { +- a.Replacement = "und" +- } +- // TODO: support mapping to tags +- repl := strings.SplitN(a.Replacement, "_", 2)[0] +- if a.Reason == "overlong" { +- if len(a.Replacement) == 2 && len(a.Type) == 3 { +- lang.updateLater(a.Replacement, a.Type) +- } +- } else if len(a.Type) <= 3 { +- switch a.Reason { +- case "macrolanguage": +- aliasTypeMap[a.Type] = Macro +- case "deprecated": +- // handled elsewhere +- continue +- case "bibliographic", "legacy": +- if a.Type == "no" { +- continue +- } +- aliasTypeMap[a.Type] = Legacy +- default: +- log.Fatalf("new %s alias: %s", a.Reason, a.Type) +- } +- langAliasMap.add(a.Type) +- langAliasMap.updateLater(a.Type, repl) +- } +- } +- // Manually add the mapping of "nb" (Norwegian) to its macro language. +- // This can be removed if CLDR adopts this change. +- langAliasMap.add("nb") +- langAliasMap.updateLater("nb", "no") +- aliasTypeMap["nb"] = Macro +- +- for k, v := range b.registry { +- // Also add deprecated values for 3-letter ISO codes, which CLDR omits. +- if v.typ == "language" && v.deprecated != "" && v.preferred != "" { +- langAliasMap.add(k) +- langAliasMap.updateLater(k, v.preferred) +- aliasTypeMap[k] = Deprecated +- } +- } +- // Fix CLDR mappings. +- lang.updateLater("tl", "tgl") +- lang.updateLater("sh", "hbs") +- lang.updateLater("mo", "mol") +- lang.updateLater("no", "nor") +- lang.updateLater("tw", "twi") +- lang.updateLater("nb", "nob") +- lang.updateLater("ak", "aka") +- lang.updateLater("bh", "bih") +- +- // Ensure that each 2-letter code is matched with a 3-letter code. +- for _, v := range lang.s[1:] { +- s, ok := lang.update[v] +- if !ok { +- if s, ok = lang.update[langAliasMap.update[v]]; !ok { +- continue +- } +- lang.update[v] = s +- } +- if v[0] != s[0] { +- altLangISO3.add(s) +- altLangISO3.updateLater(s, v) +- } +- } +- +- // Complete canonicalized language tags. +- lang.freeze() +- for i, v := range lang.s { +- // We can avoid these manual entries by using the IANA registry directly. +- // Seems easier to update the list manually, as changes are rare. +- // The panic in this loop will trigger if we miss an entry. +- add := "" +- if s, ok := lang.update[v]; ok { +- if s[0] == v[0] { +- add = s[1:] +- } else { +- add = string([]byte{0, byte(altLangISO3.index(s))}) +- } +- } else if len(v) == 3 { +- add = "\x00" +- } else { +- log.Panicf("no data for long form of %q", v) +- } +- lang.s[i] += add +- } +- b.writeConst("lang", tag.Index(lang.join())) +- +- b.writeConst("langNoIndexOffset", len(b.lang.s)) +- +- // space of all valid 3-letter language identifiers. +- b.writeBitVector("langNoIndex", b.langNoIndex.slice()) +- +- altLangIndex := []uint16{} +- for i, s := range altLangISO3.slice() { +- altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))}) +- if i > 0 { +- idx := b.lang.index(altLangISO3.update[s]) +- altLangIndex = append(altLangIndex, uint16(idx)) +- } +- } +- b.writeConst("altLangISO3", tag.Index(altLangISO3.join())) +- b.writeSlice("altLangIndex", altLangIndex) +- +- b.writeSortedMap("AliasMap", &langAliasMap, b.langIndex) +- types := make([]AliasType, len(langAliasMap.s)) +- for i, s := range langAliasMap.s { +- types[i] = aliasTypeMap[s] +- } +- b.writeSlice("AliasTypes", types) +-} +- +-var scriptConsts = []string{ +- "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy", +- "Zzzz", +-} +- +-func (b *builder) writeScript() { +- b.writeConsts(b.script.index, scriptConsts...) +- b.writeConst("script", tag.Index(b.script.join())) +- +- supp := make([]uint8, len(b.lang.slice())) +- for i, v := range b.lang.slice()[1:] { +- if sc := b.registry[v].suppressScript; sc != "" { +- supp[i+1] = uint8(b.script.index(sc)) +- } +- } +- b.writeSlice("suppressScript", supp) +- +- // There is only one deprecated script in CLDR. This value is hard-coded. +- // We check here if the code must be updated. +- for _, a := range b.supp.Metadata.Alias.ScriptAlias { +- if a.Type != "Qaai" { +- log.Panicf("unexpected deprecated stript %q", a.Type) +- } +- } +-} +- +-func parseM49(s string) int16 { +- if len(s) == 0 { +- return 0 +- } +- v, err := strconv.ParseUint(s, 10, 10) +- failOnError(err) +- return int16(v) +-} +- +-var regionConsts = []string{ +- "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US", +- "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo. +-} +- +-func (b *builder) writeRegion() { +- b.writeConsts(b.region.index, regionConsts...) +- +- isoOffset := b.region.index("AA") +- m49map := make([]int16, len(b.region.slice())) +- fromM49map := make(map[int16]int) +- altRegionISO3 := "" +- altRegionIDs := []uint16{} +- +- b.writeConst("isoRegionOffset", isoOffset) +- +- // 2-letter region lookup and mapping to numeric codes. +- regionISO := b.region.clone() +- regionISO.s = regionISO.s[isoOffset:] +- regionISO.sorted = false +- +- regionTypes := make([]byte, len(b.region.s)) +- +- // Is the region valid BCP 47? +- for s, e := range b.registry { +- if len(s) == 2 && s == strings.ToUpper(s) { +- i := b.region.index(s) +- for _, d := range e.description { +- if strings.Contains(d, "Private use") { +- regionTypes[i] = iso3166UserAssigned +- } +- } +- regionTypes[i] |= bcp47Region +- } +- } +- +- // Is the region a valid ccTLD? +- r := gen.OpenIANAFile("domains/root/db") +- defer r.Close() +- +- buf, err := ioutil.ReadAll(r) +- failOnError(err) +- re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`) +- for _, m := range re.FindAllSubmatch(buf, -1) { +- i := b.region.index(strings.ToUpper(string(m[1]))) +- regionTypes[i] |= ccTLD +- } +- +- b.writeSlice("regionTypes", regionTypes) +- +- iso3Set := make(map[string]int) +- update := func(iso2, iso3 string) { +- i := regionISO.index(iso2) +- if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] { +- regionISO.s[i] += iso3[1:] +- iso3Set[iso3] = -1 +- } else { +- if ok && j >= 0 { +- regionISO.s[i] += string([]byte{0, byte(j)}) +- } else { +- iso3Set[iso3] = len(altRegionISO3) +- regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))}) +- altRegionISO3 += iso3 +- altRegionIDs = append(altRegionIDs, uint16(isoOffset+i)) +- } +- } +- } +- for _, tc := range b.supp.CodeMappings.TerritoryCodes { +- i := regionISO.index(tc.Type) + isoOffset +- if d := m49map[i]; d != 0 { +- log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d) +- } +- m49 := parseM49(tc.Numeric) +- m49map[i] = m49 +- if r := fromM49map[m49]; r == 0 { +- fromM49map[m49] = i +- } else if r != i { +- dep := b.registry[regionISO.s[r-isoOffset]].deprecated +- if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) { +- fromM49map[m49] = i +- } +- } +- } +- for _, ta := range b.supp.Metadata.Alias.TerritoryAlias { +- if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 { +- from := parseM49(ta.Type) +- if r := fromM49map[from]; r == 0 { +- fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset +- } +- } +- } +- for _, tc := range b.supp.CodeMappings.TerritoryCodes { +- if len(tc.Alpha3) == 3 { +- update(tc.Type, tc.Alpha3) +- } +- } +- // This entries are not included in territoryCodes. Mostly 3-letter variants +- // of deleted codes and an entry for QU. +- for _, m := range []struct{ iso2, iso3 string }{ +- {"CT", "CTE"}, +- {"DY", "DHY"}, +- {"HV", "HVO"}, +- {"JT", "JTN"}, +- {"MI", "MID"}, +- {"NH", "NHB"}, +- {"NQ", "ATN"}, +- {"PC", "PCI"}, +- {"PU", "PUS"}, +- {"PZ", "PCZ"}, +- {"RH", "RHO"}, +- {"VD", "VDR"}, +- {"WK", "WAK"}, +- // These three-letter codes are used for others as well. +- {"FQ", "ATF"}, +- } { +- update(m.iso2, m.iso3) +- } +- for i, s := range regionISO.s { +- if len(s) != 4 { +- regionISO.s[i] = s + " " +- } +- } +- b.writeConst("regionISO", tag.Index(regionISO.join())) +- b.writeConst("altRegionISO3", altRegionISO3) +- b.writeSlice("altRegionIDs", altRegionIDs) +- +- // Create list of deprecated regions. +- // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only +- // Transitionally-reserved mapping not included. +- regionOldMap := stringSet{} +- // Include regions in territoryAlias (not all are in the IANA registry!) +- for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { +- if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 { +- regionOldMap.add(reg.Type) +- regionOldMap.updateLater(reg.Type, reg.Replacement) +- i, _ := regionISO.find(reg.Type) +- j, _ := regionISO.find(reg.Replacement) +- if k := m49map[i+isoOffset]; k == 0 { +- m49map[i+isoOffset] = m49map[j+isoOffset] +- } +- } +- } +- b.writeSortedMap("regionOldMap", ®ionOldMap, func(s string) uint16 { +- return uint16(b.region.index(s)) +- }) +- // 3-digit region lookup, groupings. +- for i := 1; i < isoOffset; i++ { +- m := parseM49(b.region.s[i]) +- m49map[i] = m +- fromM49map[m] = i +- } +- b.writeSlice("m49", m49map) +- +- const ( +- searchBits = 7 +- regionBits = 9 +- ) +- if len(m49map) >= 1< %d", len(m49map), 1<>searchBits] = int16(len(fromM49)) +- } +- b.writeSlice("m49Index", m49Index) +- b.writeSlice("fromM49", fromM49) +-} +- +-const ( +- // TODO: put these lists in regionTypes as user data? Could be used for +- // various optimizations and refinements and could be exposed in the API. +- iso3166Except = "AC CP DG EA EU FX IC SU TA UK" +- iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions. +- // DY and RH are actually not deleted, but indeterminately reserved. +- iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD" +-) +- +-const ( +- iso3166UserAssigned = 1 << iota +- ccTLD +- bcp47Region +-) +- +-func find(list []string, s string) int { +- for i, t := range list { +- if t == s { +- return i +- } +- } +- return -1 +-} +- +-// writeVariants generates per-variant information and creates a map from variant +-// name to index value. We assign index values such that sorting multiple +-// variants by index value will result in the correct order. +-// There are two types of variants: specialized and general. Specialized variants +-// are only applicable to certain language or language-script pairs. Generalized +-// variants apply to any language. Generalized variants always sort after +-// specialized variants. We will therefore always assign a higher index value +-// to a generalized variant than any other variant. Generalized variants are +-// sorted alphabetically among themselves. +-// Specialized variants may also sort after other specialized variants. Such +-// variants will be ordered after any of the variants they may follow. +-// We assume that if a variant x is followed by a variant y, then for any prefix +-// p of x, p-x is a prefix of y. This allows us to order tags based on the +-// maximum of the length of any of its prefixes. +-// TODO: it is possible to define a set of Prefix values on variants such that +-// a total order cannot be defined to the point that this algorithm breaks. +-// In other words, we cannot guarantee the same order of variants for the +-// future using the same algorithm or for non-compliant combinations of +-// variants. For this reason, consider using simple alphabetic sorting +-// of variants and ignore Prefix restrictions altogether. +-func (b *builder) writeVariant() { +- generalized := stringSet{} +- specialized := stringSet{} +- specializedExtend := stringSet{} +- // Collate the variants by type and check assumptions. +- for _, v := range b.variant.slice() { +- e := b.registry[v] +- if len(e.prefix) == 0 { +- generalized.add(v) +- continue +- } +- c := strings.Split(e.prefix[0], "-") +- hasScriptOrRegion := false +- if len(c) > 1 { +- _, hasScriptOrRegion = b.script.find(c[1]) +- if !hasScriptOrRegion { +- _, hasScriptOrRegion = b.region.find(c[1]) +- +- } +- } +- if len(c) == 1 || len(c) == 2 && hasScriptOrRegion { +- // Variant is preceded by a language. +- specialized.add(v) +- continue +- } +- // Variant is preceded by another variant. +- specializedExtend.add(v) +- prefix := c[0] + "-" +- if hasScriptOrRegion { +- prefix += c[1] +- } +- for _, p := range e.prefix { +- // Verify that the prefix minus the last element is a prefix of the +- // predecessor element. +- i := strings.LastIndex(p, "-") +- pred := b.registry[p[i+1:]] +- if find(pred.prefix, p[:i]) < 0 { +- log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v) +- } +- // The sorting used below does not work in the general case. It works +- // if we assume that variants that may be followed by others only have +- // prefixes of the same length. Verify this. +- count := strings.Count(p[:i], "-") +- for _, q := range pred.prefix { +- if c := strings.Count(q, "-"); c != count { +- log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count) +- } +- } +- if !strings.HasPrefix(p, prefix) { +- log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix) +- } +- } +- } +- +- // Sort extended variants. +- a := specializedExtend.s +- less := func(v, w string) bool { +- // Sort by the maximum number of elements. +- maxCount := func(s string) (max int) { +- for _, p := range b.registry[s].prefix { +- if c := strings.Count(p, "-"); c > max { +- max = c +- } +- } +- return +- } +- if cv, cw := maxCount(v), maxCount(w); cv != cw { +- return cv < cw +- } +- // Sort by name as tie breaker. +- return v < w +- } +- sort.Sort(funcSorter{less, sort.StringSlice(a)}) +- specializedExtend.frozen = true +- +- // Create index from variant name to index. +- variantIndex := make(map[string]uint8) +- add := func(s []string) { +- for _, v := range s { +- variantIndex[v] = uint8(len(variantIndex)) +- } +- } +- add(specialized.slice()) +- add(specializedExtend.s) +- numSpecialized := len(variantIndex) +- add(generalized.slice()) +- if n := len(variantIndex); n > 255 { +- log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n) +- } +- b.writeMap("variantIndex", variantIndex) +- b.writeConst("variantNumSpecialized", numSpecialized) +-} +- +-func (b *builder) writeLanguageInfo() { +-} +- +-// writeLikelyData writes tables that are used both for finding parent relations and for +-// language matching. Each entry contains additional bits to indicate the status of the +-// data to know when it cannot be used for parent relations. +-func (b *builder) writeLikelyData() { +- const ( +- isList = 1 << iota +- scriptInFrom +- regionInFrom +- ) +- type ( // generated types +- likelyScriptRegion struct { +- region uint16 +- script uint8 +- flags uint8 +- } +- likelyLangScript struct { +- lang uint16 +- script uint8 +- flags uint8 +- } +- likelyLangRegion struct { +- lang uint16 +- region uint16 +- } +- // likelyTag is used for getting likely tags for group regions, where +- // the likely region might be a region contained in the group. +- likelyTag struct { +- lang uint16 +- region uint16 +- script uint8 +- } +- ) +- var ( // generated variables +- likelyRegionGroup = make([]likelyTag, len(b.groups)) +- likelyLang = make([]likelyScriptRegion, len(b.lang.s)) +- likelyRegion = make([]likelyLangScript, len(b.region.s)) +- likelyScript = make([]likelyLangRegion, len(b.script.s)) +- likelyLangList = []likelyScriptRegion{} +- likelyRegionList = []likelyLangScript{} +- ) +- type fromTo struct { +- from, to []string +- } +- langToOther := map[int][]fromTo{} +- regionToOther := map[int][]fromTo{} +- for _, m := range b.supp.LikelySubtags.LikelySubtag { +- from := strings.Split(m.From, "_") +- to := strings.Split(m.To, "_") +- if len(to) != 3 { +- log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to)) +- } +- if len(from) > 3 { +- log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from)) +- } +- if from[0] != to[0] && from[0] != "und" { +- log.Fatalf("unexpected language change in expansion: %s -> %s", from, to) +- } +- if len(from) == 3 { +- if from[2] != to[2] { +- log.Fatalf("unexpected region change in expansion: %s -> %s", from, to) +- } +- if from[0] != "und" { +- log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to) +- } +- } +- if len(from) == 1 || from[0] != "und" { +- id := 0 +- if from[0] != "und" { +- id = b.lang.index(from[0]) +- } +- langToOther[id] = append(langToOther[id], fromTo{from, to}) +- } else if len(from) == 2 && len(from[1]) == 4 { +- sid := b.script.index(from[1]) +- likelyScript[sid].lang = uint16(b.langIndex(to[0])) +- likelyScript[sid].region = uint16(b.region.index(to[2])) +- } else { +- r := b.region.index(from[len(from)-1]) +- if id, ok := b.groups[r]; ok { +- if from[0] != "und" { +- log.Fatalf("region changed unexpectedly: %s -> %s", from, to) +- } +- likelyRegionGroup[id].lang = uint16(b.langIndex(to[0])) +- likelyRegionGroup[id].script = uint8(b.script.index(to[1])) +- likelyRegionGroup[id].region = uint16(b.region.index(to[2])) +- } else { +- regionToOther[r] = append(regionToOther[r], fromTo{from, to}) +- } +- } +- } +- b.writeType(likelyLangRegion{}) +- b.writeSlice("likelyScript", likelyScript) +- +- for id := range b.lang.s { +- list := langToOther[id] +- if len(list) == 1 { +- likelyLang[id].region = uint16(b.region.index(list[0].to[2])) +- likelyLang[id].script = uint8(b.script.index(list[0].to[1])) +- } else if len(list) > 1 { +- likelyLang[id].flags = isList +- likelyLang[id].region = uint16(len(likelyLangList)) +- likelyLang[id].script = uint8(len(list)) +- for _, x := range list { +- flags := uint8(0) +- if len(x.from) > 1 { +- if x.from[1] == x.to[2] { +- flags = regionInFrom +- } else { +- flags = scriptInFrom +- } +- } +- likelyLangList = append(likelyLangList, likelyScriptRegion{ +- region: uint16(b.region.index(x.to[2])), +- script: uint8(b.script.index(x.to[1])), +- flags: flags, +- }) +- } +- } +- } +- // TODO: merge suppressScript data with this table. +- b.writeType(likelyScriptRegion{}) +- b.writeSlice("likelyLang", likelyLang) +- b.writeSlice("likelyLangList", likelyLangList) +- +- for id := range b.region.s { +- list := regionToOther[id] +- if len(list) == 1 { +- likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0])) +- likelyRegion[id].script = uint8(b.script.index(list[0].to[1])) +- if len(list[0].from) > 2 { +- likelyRegion[id].flags = scriptInFrom +- } +- } else if len(list) > 1 { +- likelyRegion[id].flags = isList +- likelyRegion[id].lang = uint16(len(likelyRegionList)) +- likelyRegion[id].script = uint8(len(list)) +- for i, x := range list { +- if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 { +- log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i) +- } +- x := likelyLangScript{ +- lang: uint16(b.langIndex(x.to[0])), +- script: uint8(b.script.index(x.to[1])), +- } +- if len(list[0].from) > 2 { +- x.flags = scriptInFrom +- } +- likelyRegionList = append(likelyRegionList, x) +- } +- } +- } +- b.writeType(likelyLangScript{}) +- b.writeSlice("likelyRegion", likelyRegion) +- b.writeSlice("likelyRegionList", likelyRegionList) +- +- b.writeType(likelyTag{}) +- b.writeSlice("likelyRegionGroup", likelyRegionGroup) +-} +- +-func (b *builder) writeRegionInclusionData() { +- var ( +- // mm holds for each group the set of groups with a distance of 1. +- mm = make(map[int][]index) +- +- // containment holds for each group the transitive closure of +- // containment of other groups. +- containment = make(map[index][]index) +- ) +- for _, g := range b.supp.TerritoryContainment.Group { +- // Skip UN and EURO zone as they are flattening the containment +- // relationship. +- if g.Type == "EZ" || g.Type == "UN" { +- continue +- } +- group := b.region.index(g.Type) +- groupIdx := b.groups[group] +- for _, mem := range strings.Split(g.Contains, " ") { +- r := b.region.index(mem) +- mm[r] = append(mm[r], groupIdx) +- if g, ok := b.groups[r]; ok { +- mm[group] = append(mm[group], g) +- containment[groupIdx] = append(containment[groupIdx], g) +- } +- } +- } +- +- regionContainment := make([]uint64, len(b.groups)) +- for _, g := range b.groups { +- l := containment[g] +- +- // Compute the transitive closure of containment. +- for i := 0; i < len(l); i++ { +- l = append(l, containment[l[i]]...) +- } +- +- // Compute the bitmask. +- regionContainment[g] = 1 << g +- for _, v := range l { +- regionContainment[g] |= 1 << v +- } +- } +- b.writeSlice("regionContainment", regionContainment) +- +- regionInclusion := make([]uint8, len(b.region.s)) +- bvs := make(map[uint64]index) +- // Make the first bitvector positions correspond with the groups. +- for r, i := range b.groups { +- bv := uint64(1 << i) +- for _, g := range mm[r] { +- bv |= 1 << g +- } +- bvs[bv] = i +- regionInclusion[r] = uint8(bvs[bv]) +- } +- for r := 1; r < len(b.region.s); r++ { +- if _, ok := b.groups[r]; !ok { +- bv := uint64(0) +- for _, g := range mm[r] { +- bv |= 1 << g +- } +- if bv == 0 { +- // Pick the world for unspecified regions. +- bv = 1 << b.groups[b.region.index("001")] +- } +- if _, ok := bvs[bv]; !ok { +- bvs[bv] = index(len(bvs)) +- } +- regionInclusion[r] = uint8(bvs[bv]) +- } +- } +- b.writeSlice("regionInclusion", regionInclusion) +- regionInclusionBits := make([]uint64, len(bvs)) +- for k, v := range bvs { +- regionInclusionBits[v] = uint64(k) +- } +- // Add bit vectors for increasingly large distances until a fixed point is reached. +- regionInclusionNext := []uint8{} +- for i := 0; i < len(regionInclusionBits); i++ { +- bits := regionInclusionBits[i] +- next := bits +- for i := uint(0); i < uint(len(b.groups)); i++ { +- if bits&(1< 6 { +- log.Fatalf("Too many groups: %d", i) +- } +- idToIndex[mv.Id] = uint8(i + 1) +- // TODO: also handle '-' +- for _, r := range strings.Split(mv.Value, "+") { +- todo := []string{r} +- for k := 0; k < len(todo); k++ { +- r := todo[k] +- regionToGroups[b.regionIndex(r)] |= 1 << uint8(i) +- todo = append(todo, regionHierarchy[r]...) +- } +- } +- } +- b.w.WriteVar("regionToGroups", regionToGroups) +- +- // maps language id to in- and out-of-group region. +- paradigmLocales := [][3]uint16{} +- locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ") +- for i := 0; i < len(locales); i += 2 { +- x := [3]uint16{} +- for j := 0; j < 2; j++ { +- pc := strings.SplitN(locales[i+j], "-", 2) +- x[0] = b.langIndex(pc[0]) +- if len(pc) == 2 { +- x[1+j] = uint16(b.regionIndex(pc[1])) +- } +- } +- paradigmLocales = append(paradigmLocales, x) +- } +- b.w.WriteVar("paradigmLocales", paradigmLocales) +- +- b.w.WriteType(mutualIntelligibility{}) +- b.w.WriteType(scriptIntelligibility{}) +- b.w.WriteType(regionIntelligibility{}) +- +- matchLang := []mutualIntelligibility{} +- matchScript := []scriptIntelligibility{} +- matchRegion := []regionIntelligibility{} +- // Convert the languageMatch entries in lists keyed by desired language. +- for _, m := range lm[0].LanguageMatch { +- // Different versions of CLDR use different separators. +- desired := strings.Replace(m.Desired, "-", "_", -1) +- supported := strings.Replace(m.Supported, "-", "_", -1) +- d := strings.Split(desired, "_") +- s := strings.Split(supported, "_") +- if len(d) != len(s) { +- log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) +- continue +- } +- distance, _ := strconv.ParseInt(m.Distance, 10, 8) +- switch len(d) { +- case 2: +- if desired == supported && desired == "*_*" { +- continue +- } +- // language-script pair. +- matchScript = append(matchScript, scriptIntelligibility{ +- wantLang: uint16(b.langIndex(d[0])), +- haveLang: uint16(b.langIndex(s[0])), +- wantScript: uint8(b.scriptIndex(d[1])), +- haveScript: uint8(b.scriptIndex(s[1])), +- distance: uint8(distance), +- }) +- if m.Oneway != "true" { +- matchScript = append(matchScript, scriptIntelligibility{ +- wantLang: uint16(b.langIndex(s[0])), +- haveLang: uint16(b.langIndex(d[0])), +- wantScript: uint8(b.scriptIndex(s[1])), +- haveScript: uint8(b.scriptIndex(d[1])), +- distance: uint8(distance), +- }) +- } +- case 1: +- if desired == supported && desired == "*" { +- continue +- } +- if distance == 1 { +- // nb == no is already handled by macro mapping. Check there +- // really is only this case. +- if d[0] != "no" || s[0] != "nb" { +- log.Fatalf("unhandled equivalence %s == %s", s[0], d[0]) +- } +- continue +- } +- // TODO: consider dropping oneway field and just doubling the entry. +- matchLang = append(matchLang, mutualIntelligibility{ +- want: uint16(b.langIndex(d[0])), +- have: uint16(b.langIndex(s[0])), +- distance: uint8(distance), +- oneway: m.Oneway == "true", +- }) +- case 3: +- if desired == supported && desired == "*_*_*" { +- continue +- } +- if desired != supported { +- // This is now supported by CLDR, but only one case, which +- // should already be covered by paradigm locales. For instance, +- // test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in +- // testdata/CLDRLocaleMatcherTest.txt tests this. +- if supported != "en_*_GB" { +- log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) +- } +- continue +- } +- ri := regionIntelligibility{ +- lang: b.langIndex(d[0]), +- distance: uint8(distance), +- } +- if d[1] != "*" { +- ri.script = uint8(b.scriptIndex(d[1])) +- } +- switch { +- case d[2] == "*": +- ri.group = 0x80 // not contained in anything +- case strings.HasPrefix(d[2], "$!"): +- ri.group = 0x80 +- d[2] = "$" + d[2][len("$!"):] +- fallthrough +- case strings.HasPrefix(d[2], "$"): +- ri.group |= idToIndex[d[2]] +- } +- matchRegion = append(matchRegion, ri) +- default: +- log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) +- } +- } +- sort.SliceStable(matchLang, func(i, j int) bool { +- return matchLang[i].distance < matchLang[j].distance +- }) +- b.w.WriteComment(` +- matchLang holds pairs of langIDs of base languages that are typically +- mutually intelligible. Each pair is associated with a confidence and +- whether the intelligibility goes one or both ways.`) +- b.w.WriteVar("matchLang", matchLang) +- +- b.w.WriteComment(` +- matchScript holds pairs of scriptIDs where readers of one script +- can typically also read the other. Each is associated with a confidence.`) +- sort.SliceStable(matchScript, func(i, j int) bool { +- return matchScript[i].distance < matchScript[j].distance +- }) +- b.w.WriteVar("matchScript", matchScript) +- +- sort.SliceStable(matchRegion, func(i, j int) bool { +- return matchRegion[i].distance < matchRegion[j].distance +- }) +- b.w.WriteVar("matchRegion", matchRegion) +-} +diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go +deleted file mode 100644 +index 987fc169cc..0000000000 +--- a/vendor/golang.org/x/text/unicode/bidi/gen.go ++++ /dev/null +@@ -1,133 +0,0 @@ +-// Copyright 2015 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "flag" +- "log" +- +- "golang.org/x/text/internal/gen" +- "golang.org/x/text/internal/triegen" +- "golang.org/x/text/internal/ucd" +-) +- +-var outputFile = flag.String("out", "tables.go", "output file") +- +-func main() { +- gen.Init() +- gen.Repackage("gen_trieval.go", "trieval.go", "bidi") +- gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") +- +- genTables() +-} +- +-// bidiClass names and codes taken from class "bc" in +-// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt +-var bidiClass = map[string]Class{ +- "AL": AL, // ArabicLetter +- "AN": AN, // ArabicNumber +- "B": B, // ParagraphSeparator +- "BN": BN, // BoundaryNeutral +- "CS": CS, // CommonSeparator +- "EN": EN, // EuropeanNumber +- "ES": ES, // EuropeanSeparator +- "ET": ET, // EuropeanTerminator +- "L": L, // LeftToRight +- "NSM": NSM, // NonspacingMark +- "ON": ON, // OtherNeutral +- "R": R, // RightToLeft +- "S": S, // SegmentSeparator +- "WS": WS, // WhiteSpace +- +- "FSI": Control, +- "PDF": Control, +- "PDI": Control, +- "LRE": Control, +- "LRI": Control, +- "LRO": Control, +- "RLE": Control, +- "RLI": Control, +- "RLO": Control, +-} +- +-func genTables() { +- if numClass > 0x0F { +- log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) +- } +- w := gen.NewCodeWriter() +- defer w.WriteVersionedGoFile(*outputFile, "bidi") +- +- gen.WriteUnicodeVersion(w) +- +- t := triegen.NewTrie("bidi") +- +- // Build data about bracket mapping. These bits need to be or-ed with +- // any other bits. +- orMask := map[rune]uint64{} +- +- xorMap := map[rune]int{} +- xorMasks := []rune{0} // First value is no-op. +- +- ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { +- r1 := p.Rune(0) +- r2 := p.Rune(1) +- xor := r1 ^ r2 +- if _, ok := xorMap[xor]; !ok { +- xorMap[xor] = len(xorMasks) +- xorMasks = append(xorMasks, xor) +- } +- entry := uint64(xorMap[xor]) << xorMaskShift +- switch p.String(2) { +- case "o": +- entry |= openMask +- case "c", "n": +- default: +- log.Fatalf("Unknown bracket class %q.", p.String(2)) +- } +- orMask[r1] = entry +- }) +- +- w.WriteComment(` +- xorMasks contains masks to be xor-ed with brackets to get the reverse +- version.`) +- w.WriteVar("xorMasks", xorMasks) +- +- done := map[rune]bool{} +- +- insert := func(r rune, c Class) { +- if !done[r] { +- t.Insert(r, orMask[r]|uint64(c)) +- done[r] = true +- } +- } +- +- // Insert the derived BiDi properties. +- ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { +- r := p.Rune(0) +- class, ok := bidiClass[p.String(1)] +- if !ok { +- log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) +- } +- insert(r, class) +- }) +- visitDefaults(insert) +- +- // TODO: use sparse blocks. This would reduce table size considerably +- // from the looks of it. +- +- sz, err := t.Gen(w) +- if err != nil { +- log.Fatal(err) +- } +- w.Size += sz +-} +- +-// dummy values to make methods in gen_common compile. The real versions +-// will be generated by this file to tables.go. +-var ( +- xorMasks []rune +-) +diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go +deleted file mode 100644 +index 02c3b505d6..0000000000 +--- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go ++++ /dev/null +@@ -1,57 +0,0 @@ +-// Copyright 2015 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "unicode" +- +- "golang.org/x/text/internal/gen" +- "golang.org/x/text/internal/ucd" +- "golang.org/x/text/unicode/rangetable" +-) +- +-// These tables are hand-extracted from: +-// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt +-func visitDefaults(fn func(r rune, c Class)) { +- // first write default values for ranges listed above. +- visitRunes(fn, AL, []rune{ +- 0x0600, 0x07BF, // Arabic +- 0x08A0, 0x08FF, // Arabic Extended-A +- 0xFB50, 0xFDCF, // Arabic Presentation Forms +- 0xFDF0, 0xFDFF, +- 0xFE70, 0xFEFF, +- 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols +- }) +- visitRunes(fn, R, []rune{ +- 0x0590, 0x05FF, // Hebrew +- 0x07C0, 0x089F, // Nko et al. +- 0xFB1D, 0xFB4F, +- 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. +- 0x0001E800, 0x0001EDFF, +- 0x0001EF00, 0x0001EFFF, +- }) +- visitRunes(fn, ET, []rune{ // European Terminator +- 0x20A0, 0x20Cf, // Currency symbols +- }) +- rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { +- fn(r, BN) // Boundary Neutral +- }) +- ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { +- if p.String(1) == "Default_Ignorable_Code_Point" { +- fn(p.Rune(0), BN) // Boundary Neutral +- } +- }) +-} +- +-func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { +- for i := 0; i < len(runes); i += 2 { +- lo, hi := runes[i], runes[i+1] +- for j := lo; j <= hi; j++ { +- fn(j, c) +- } +- } +-} +diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go +deleted file mode 100644 +index 9cb9942894..0000000000 +--- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go ++++ /dev/null +@@ -1,64 +0,0 @@ +-// Copyright 2015 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-// Class is the Unicode BiDi class. Each rune has a single class. +-type Class uint +- +-const ( +- L Class = iota // LeftToRight +- R // RightToLeft +- EN // EuropeanNumber +- ES // EuropeanSeparator +- ET // EuropeanTerminator +- AN // ArabicNumber +- CS // CommonSeparator +- B // ParagraphSeparator +- S // SegmentSeparator +- WS // WhiteSpace +- ON // OtherNeutral +- BN // BoundaryNeutral +- NSM // NonspacingMark +- AL // ArabicLetter +- Control // Control LRO - PDI +- +- numClass +- +- LRO // LeftToRightOverride +- RLO // RightToLeftOverride +- LRE // LeftToRightEmbedding +- RLE // RightToLeftEmbedding +- PDF // PopDirectionalFormat +- LRI // LeftToRightIsolate +- RLI // RightToLeftIsolate +- FSI // FirstStrongIsolate +- PDI // PopDirectionalIsolate +- +- unknownClass = ^Class(0) +-) +- +-var controlToClass = map[rune]Class{ +- 0x202D: LRO, // LeftToRightOverride, +- 0x202E: RLO, // RightToLeftOverride, +- 0x202A: LRE, // LeftToRightEmbedding, +- 0x202B: RLE, // RightToLeftEmbedding, +- 0x202C: PDF, // PopDirectionalFormat, +- 0x2066: LRI, // LeftToRightIsolate, +- 0x2067: RLI, // RightToLeftIsolate, +- 0x2068: FSI, // FirstStrongIsolate, +- 0x2069: PDI, // PopDirectionalIsolate, +-} +- +-// A trie entry has the following bits: +-// 7..5 XOR mask for brackets +-// 4 1: Bracket open, 0: Bracket close +-// 3..0 Class type +- +-const ( +- openMask = 0x10 +- xorMaskShift = 5 +-) +diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go +deleted file mode 100644 +index 30a3aa9334..0000000000 +--- a/vendor/golang.org/x/text/unicode/norm/maketables.go ++++ /dev/null +@@ -1,986 +0,0 @@ +-// Copyright 2011 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// Normalization table generator. +-// Data read from the web. +-// See forminfo.go for a description of the trie values associated with each rune. +- +-package main +- +-import ( +- "bytes" +- "encoding/binary" +- "flag" +- "fmt" +- "io" +- "log" +- "sort" +- "strconv" +- "strings" +- +- "golang.org/x/text/internal/gen" +- "golang.org/x/text/internal/triegen" +- "golang.org/x/text/internal/ucd" +-) +- +-func main() { +- gen.Init() +- loadUnicodeData() +- compactCCC() +- loadCompositionExclusions() +- completeCharFields(FCanonical) +- completeCharFields(FCompatibility) +- computeNonStarterCounts() +- verifyComputed() +- printChars() +- testDerived() +- printTestdata() +- makeTables() +-} +- +-var ( +- tablelist = flag.String("tables", +- "all", +- "comma-separated list of which tables to generate; "+ +- "can be 'decomp', 'recomp', 'info' and 'all'") +- test = flag.Bool("test", +- false, +- "test existing tables against DerivedNormalizationProps and generate test data for regression testing") +- verbose = flag.Bool("verbose", +- false, +- "write data to stdout as it is parsed") +-) +- +-const MaxChar = 0x10FFFF // anything above this shouldn't exist +- +-// Quick Check properties of runes allow us to quickly +-// determine whether a rune may occur in a normal form. +-// For a given normal form, a rune may be guaranteed to occur +-// verbatim (QC=Yes), may or may not combine with another +-// rune (QC=Maybe), or may not occur (QC=No). +-type QCResult int +- +-const ( +- QCUnknown QCResult = iota +- QCYes +- QCNo +- QCMaybe +-) +- +-func (r QCResult) String() string { +- switch r { +- case QCYes: +- return "Yes" +- case QCNo: +- return "No" +- case QCMaybe: +- return "Maybe" +- } +- return "***UNKNOWN***" +-} +- +-const ( +- FCanonical = iota // NFC or NFD +- FCompatibility // NFKC or NFKD +- FNumberOfFormTypes +-) +- +-const ( +- MComposed = iota // NFC or NFKC +- MDecomposed // NFD or NFKD +- MNumberOfModes +-) +- +-// This contains only the properties we're interested in. +-type Char struct { +- name string +- codePoint rune // if zero, this index is not a valid code point. +- ccc uint8 // canonical combining class +- origCCC uint8 +- excludeInComp bool // from CompositionExclusions.txt +- compatDecomp bool // it has a compatibility expansion +- +- nTrailingNonStarters uint8 +- nLeadingNonStarters uint8 // must be equal to trailing if non-zero +- +- forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility +- +- state State +-} +- +-var chars = make([]Char, MaxChar+1) +-var cccMap = make(map[uint8]uint8) +- +-func (c Char) String() string { +- buf := new(bytes.Buffer) +- +- fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) +- fmt.Fprintf(buf, " ccc: %v\n", c.ccc) +- fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) +- fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) +- fmt.Fprintf(buf, " state: %v\n", c.state) +- fmt.Fprintf(buf, " NFC:\n") +- fmt.Fprint(buf, c.forms[FCanonical]) +- fmt.Fprintf(buf, " NFKC:\n") +- fmt.Fprint(buf, c.forms[FCompatibility]) +- +- return buf.String() +-} +- +-// In UnicodeData.txt, some ranges are marked like this: +-// 3400;;Lo;0;L;;;;;N;;;;; +-// 4DB5;;Lo;0;L;;;;;N;;;;; +-// parseCharacter keeps a state variable indicating the weirdness. +-type State int +- +-const ( +- SNormal State = iota // known to be zero for the type +- SFirst +- SLast +- SMissing +-) +- +-var lastChar = rune('\u0000') +- +-func (c Char) isValid() bool { +- return c.codePoint != 0 && c.state != SMissing +-} +- +-type FormInfo struct { +- quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed +- verified [MNumberOfModes]bool // index: MComposed or MDecomposed +- +- combinesForward bool // May combine with rune on the right +- combinesBackward bool // May combine with rune on the left +- isOneWay bool // Never appears in result +- inDecomp bool // Some decompositions result in this char. +- decomp Decomposition +- expandedDecomp Decomposition +-} +- +-func (f FormInfo) String() string { +- buf := bytes.NewBuffer(make([]byte, 0)) +- +- fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) +- fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) +- fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) +- fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) +- fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) +- fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) +- fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) +- fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) +- +- return buf.String() +-} +- +-type Decomposition []rune +- +-func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { +- decomp := strings.Split(s, " ") +- if len(decomp) > 0 && skipfirst { +- decomp = decomp[1:] +- } +- for _, d := range decomp { +- point, err := strconv.ParseUint(d, 16, 64) +- if err != nil { +- return a, err +- } +- a = append(a, rune(point)) +- } +- return a, nil +-} +- +-func loadUnicodeData() { +- f := gen.OpenUCDFile("UnicodeData.txt") +- defer f.Close() +- p := ucd.New(f) +- for p.Next() { +- r := p.Rune(ucd.CodePoint) +- char := &chars[r] +- +- char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) +- decmap := p.String(ucd.DecompMapping) +- +- exp, err := parseDecomposition(decmap, false) +- isCompat := false +- if err != nil { +- if len(decmap) > 0 { +- exp, err = parseDecomposition(decmap, true) +- if err != nil { +- log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) +- } +- isCompat = true +- } +- } +- +- char.name = p.String(ucd.Name) +- char.codePoint = r +- char.forms[FCompatibility].decomp = exp +- if !isCompat { +- char.forms[FCanonical].decomp = exp +- } else { +- char.compatDecomp = true +- } +- if len(decmap) > 0 { +- char.forms[FCompatibility].decomp = exp +- } +- } +- if err := p.Err(); err != nil { +- log.Fatal(err) +- } +-} +- +-// compactCCC converts the sparse set of CCC values to a continguous one, +-// reducing the number of bits needed from 8 to 6. +-func compactCCC() { +- m := make(map[uint8]uint8) +- for i := range chars { +- c := &chars[i] +- m[c.ccc] = 0 +- } +- cccs := []int{} +- for v, _ := range m { +- cccs = append(cccs, int(v)) +- } +- sort.Ints(cccs) +- for i, c := range cccs { +- cccMap[uint8(i)] = uint8(c) +- m[uint8(c)] = uint8(i) +- } +- for i := range chars { +- c := &chars[i] +- c.origCCC = c.ccc +- c.ccc = m[c.ccc] +- } +- if len(m) >= 1<<6 { +- log.Fatalf("too many difference CCC values: %d >= 64", len(m)) +- } +-} +- +-// CompositionExclusions.txt has form: +-// 0958 # ... +-// See https://unicode.org/reports/tr44/ for full explanation +-func loadCompositionExclusions() { +- f := gen.OpenUCDFile("CompositionExclusions.txt") +- defer f.Close() +- p := ucd.New(f) +- for p.Next() { +- c := &chars[p.Rune(0)] +- if c.excludeInComp { +- log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) +- } +- c.excludeInComp = true +- } +- if e := p.Err(); e != nil { +- log.Fatal(e) +- } +-} +- +-// hasCompatDecomp returns true if any of the recursive +-// decompositions contains a compatibility expansion. +-// In this case, the character may not occur in NFK*. +-func hasCompatDecomp(r rune) bool { +- c := &chars[r] +- if c.compatDecomp { +- return true +- } +- for _, d := range c.forms[FCompatibility].decomp { +- if hasCompatDecomp(d) { +- return true +- } +- } +- return false +-} +- +-// Hangul related constants. +-const ( +- HangulBase = 0xAC00 +- HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) +- +- JamoLBase = 0x1100 +- JamoLEnd = 0x1113 +- JamoVBase = 0x1161 +- JamoVEnd = 0x1176 +- JamoTBase = 0x11A8 +- JamoTEnd = 0x11C3 +- +- JamoLVTCount = 19 * 21 * 28 +- JamoTCount = 28 +-) +- +-func isHangul(r rune) bool { +- return HangulBase <= r && r < HangulEnd +-} +- +-func isHangulWithoutJamoT(r rune) bool { +- if !isHangul(r) { +- return false +- } +- r -= HangulBase +- return r < JamoLVTCount && r%JamoTCount == 0 +-} +- +-func ccc(r rune) uint8 { +- return chars[r].ccc +-} +- +-// Insert a rune in a buffer, ordered by Canonical Combining Class. +-func insertOrdered(b Decomposition, r rune) Decomposition { +- n := len(b) +- b = append(b, 0) +- cc := ccc(r) +- if cc > 0 { +- // Use bubble sort. +- for ; n > 0; n-- { +- if ccc(b[n-1]) <= cc { +- break +- } +- b[n] = b[n-1] +- } +- } +- b[n] = r +- return b +-} +- +-// Recursively decompose. +-func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { +- dcomp := chars[r].forms[form].decomp +- if len(dcomp) == 0 { +- return insertOrdered(d, r) +- } +- for _, c := range dcomp { +- d = decomposeRecursive(form, c, d) +- } +- return d +-} +- +-func completeCharFields(form int) { +- // Phase 0: pre-expand decomposition. +- for i := range chars { +- f := &chars[i].forms[form] +- if len(f.decomp) == 0 { +- continue +- } +- exp := make(Decomposition, 0) +- for _, c := range f.decomp { +- exp = decomposeRecursive(form, c, exp) +- } +- f.expandedDecomp = exp +- } +- +- // Phase 1: composition exclusion, mark decomposition. +- for i := range chars { +- c := &chars[i] +- f := &c.forms[form] +- +- // Marks script-specific exclusions and version restricted. +- f.isOneWay = c.excludeInComp +- +- // Singletons +- f.isOneWay = f.isOneWay || len(f.decomp) == 1 +- +- // Non-starter decompositions +- if len(f.decomp) > 1 { +- chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 +- f.isOneWay = f.isOneWay || chk +- } +- +- // Runes that decompose into more than two runes. +- f.isOneWay = f.isOneWay || len(f.decomp) > 2 +- +- if form == FCompatibility { +- f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) +- } +- +- for _, r := range f.decomp { +- chars[r].forms[form].inDecomp = true +- } +- } +- +- // Phase 2: forward and backward combining. +- for i := range chars { +- c := &chars[i] +- f := &c.forms[form] +- +- if !f.isOneWay && len(f.decomp) == 2 { +- f0 := &chars[f.decomp[0]].forms[form] +- f1 := &chars[f.decomp[1]].forms[form] +- if !f0.isOneWay { +- f0.combinesForward = true +- } +- if !f1.isOneWay { +- f1.combinesBackward = true +- } +- } +- if isHangulWithoutJamoT(rune(i)) { +- f.combinesForward = true +- } +- } +- +- // Phase 3: quick check values. +- for i := range chars { +- c := &chars[i] +- f := &c.forms[form] +- +- switch { +- case len(f.decomp) > 0: +- f.quickCheck[MDecomposed] = QCNo +- case isHangul(rune(i)): +- f.quickCheck[MDecomposed] = QCNo +- default: +- f.quickCheck[MDecomposed] = QCYes +- } +- switch { +- case f.isOneWay: +- f.quickCheck[MComposed] = QCNo +- case (i & 0xffff00) == JamoLBase: +- f.quickCheck[MComposed] = QCYes +- if JamoLBase <= i && i < JamoLEnd { +- f.combinesForward = true +- } +- if JamoVBase <= i && i < JamoVEnd { +- f.quickCheck[MComposed] = QCMaybe +- f.combinesBackward = true +- f.combinesForward = true +- } +- if JamoTBase <= i && i < JamoTEnd { +- f.quickCheck[MComposed] = QCMaybe +- f.combinesBackward = true +- } +- case !f.combinesBackward: +- f.quickCheck[MComposed] = QCYes +- default: +- f.quickCheck[MComposed] = QCMaybe +- } +- } +-} +- +-func computeNonStarterCounts() { +- // Phase 4: leading and trailing non-starter count +- for i := range chars { +- c := &chars[i] +- +- runes := []rune{rune(i)} +- // We always use FCompatibility so that the CGJ insertion points do not +- // change for repeated normalizations with different forms. +- if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { +- runes = exp +- } +- // We consider runes that combine backwards to be non-starters for the +- // purpose of Stream-Safe Text Processing. +- for _, r := range runes { +- if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { +- break +- } +- c.nLeadingNonStarters++ +- } +- for i := len(runes) - 1; i >= 0; i-- { +- if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { +- break +- } +- c.nTrailingNonStarters++ +- } +- if c.nTrailingNonStarters > 3 { +- log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) +- } +- +- if isHangul(rune(i)) { +- c.nTrailingNonStarters = 2 +- if isHangulWithoutJamoT(rune(i)) { +- c.nTrailingNonStarters = 1 +- } +- } +- +- if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { +- log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) +- } +- if t := c.nTrailingNonStarters; t > 3 { +- log.Fatalf("%U: number of trailing non-starters is %d > 3", t) +- } +- } +-} +- +-func printBytes(w io.Writer, b []byte, name string) { +- fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) +- fmt.Fprintf(w, "var %s = [...]byte {", name) +- for i, c := range b { +- switch { +- case i%64 == 0: +- fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) +- case i%8 == 0: +- fmt.Fprintf(w, "\n") +- } +- fmt.Fprintf(w, "0x%.2X, ", c) +- } +- fmt.Fprint(w, "\n}\n\n") +-} +- +-// See forminfo.go for format. +-func makeEntry(f *FormInfo, c *Char) uint16 { +- e := uint16(0) +- if r := c.codePoint; HangulBase <= r && r < HangulEnd { +- e |= 0x40 +- } +- if f.combinesForward { +- e |= 0x20 +- } +- if f.quickCheck[MDecomposed] == QCNo { +- e |= 0x4 +- } +- switch f.quickCheck[MComposed] { +- case QCYes: +- case QCNo: +- e |= 0x10 +- case QCMaybe: +- e |= 0x18 +- default: +- log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) +- } +- e |= uint16(c.nTrailingNonStarters) +- return e +-} +- +-// decompSet keeps track of unique decompositions, grouped by whether +-// the decomposition is followed by a trailing and/or leading CCC. +-type decompSet [7]map[string]bool +- +-const ( +- normalDecomp = iota +- firstMulti +- firstCCC +- endMulti +- firstLeadingCCC +- firstCCCZeroExcept +- firstStarterWithNLead +- lastDecomp +-) +- +-var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} +- +-func makeDecompSet() decompSet { +- m := decompSet{} +- for i := range m { +- m[i] = make(map[string]bool) +- } +- return m +-} +-func (m *decompSet) insert(key int, s string) { +- m[key][s] = true +-} +- +-func printCharInfoTables(w io.Writer) int { +- mkstr := func(r rune, f *FormInfo) (int, string) { +- d := f.expandedDecomp +- s := string([]rune(d)) +- if max := 1 << 6; len(s) >= max { +- const msg = "%U: too many bytes in decomposition: %d >= %d" +- log.Fatalf(msg, r, len(s), max) +- } +- head := uint8(len(s)) +- if f.quickCheck[MComposed] != QCYes { +- head |= 0x40 +- } +- if f.combinesForward { +- head |= 0x80 +- } +- s = string([]byte{head}) + s +- +- lccc := ccc(d[0]) +- tccc := ccc(d[len(d)-1]) +- cc := ccc(r) +- if cc != 0 && lccc == 0 && tccc == 0 { +- log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) +- } +- if tccc < lccc && lccc != 0 { +- const msg = "%U: lccc (%d) must be <= tcc (%d)" +- log.Fatalf(msg, r, lccc, tccc) +- } +- index := normalDecomp +- nTrail := chars[r].nTrailingNonStarters +- nLead := chars[r].nLeadingNonStarters +- if tccc > 0 || lccc > 0 || nTrail > 0 { +- tccc <<= 2 +- tccc |= nTrail +- s += string([]byte{tccc}) +- index = endMulti +- for _, r := range d[1:] { +- if ccc(r) == 0 { +- index = firstCCC +- } +- } +- if lccc > 0 || nLead > 0 { +- s += string([]byte{lccc}) +- if index == firstCCC { +- log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) +- } +- index = firstLeadingCCC +- } +- if cc != lccc { +- if cc != 0 { +- log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) +- } +- index = firstCCCZeroExcept +- } +- } else if len(d) > 1 { +- index = firstMulti +- } +- return index, s +- } +- +- decompSet := makeDecompSet() +- const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. +- decompSet.insert(firstStarterWithNLead, nLeadStr) +- +- // Store the uniqued decompositions in a byte buffer, +- // preceded by their byte length. +- for _, c := range chars { +- for _, f := range c.forms { +- if len(f.expandedDecomp) == 0 { +- continue +- } +- if f.combinesBackward { +- log.Fatalf("%U: combinesBackward and decompose", c.codePoint) +- } +- index, s := mkstr(c.codePoint, &f) +- decompSet.insert(index, s) +- } +- } +- +- decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) +- size := 0 +- positionMap := make(map[string]uint16) +- decompositions.WriteString("\000") +- fmt.Fprintln(w, "const (") +- for i, m := range decompSet { +- sa := []string{} +- for s := range m { +- sa = append(sa, s) +- } +- sort.Strings(sa) +- for _, s := range sa { +- p := decompositions.Len() +- decompositions.WriteString(s) +- positionMap[s] = uint16(p) +- } +- if cname[i] != "" { +- fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) +- } +- } +- fmt.Fprintln(w, "maxDecomp = 0x8000") +- fmt.Fprintln(w, ")") +- b := decompositions.Bytes() +- printBytes(w, b, "decomps") +- size += len(b) +- +- varnames := []string{"nfc", "nfkc"} +- for i := 0; i < FNumberOfFormTypes; i++ { +- trie := triegen.NewTrie(varnames[i]) +- +- for r, c := range chars { +- f := c.forms[i] +- d := f.expandedDecomp +- if len(d) != 0 { +- _, key := mkstr(c.codePoint, &f) +- trie.Insert(rune(r), uint64(positionMap[key])) +- if c.ccc != ccc(d[0]) { +- // We assume the lead ccc of a decomposition !=0 in this case. +- if ccc(d[0]) == 0 { +- log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) +- } +- } +- } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { +- // Handle cases where it can't be detected that the nLead should be equal +- // to nTrail. +- trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) +- } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { +- trie.Insert(c.codePoint, uint64(0x8000|v)) +- } +- } +- sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) +- if err != nil { +- log.Fatal(err) +- } +- size += sz +- } +- return size +-} +- +-func contains(sa []string, s string) bool { +- for _, a := range sa { +- if a == s { +- return true +- } +- } +- return false +-} +- +-func makeTables() { +- w := &bytes.Buffer{} +- +- size := 0 +- if *tablelist == "" { +- return +- } +- list := strings.Split(*tablelist, ",") +- if *tablelist == "all" { +- list = []string{"recomp", "info"} +- } +- +- // Compute maximum decomposition size. +- max := 0 +- for _, c := range chars { +- if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { +- max = n +- } +- } +- fmt.Fprintln(w, `import "sync"`) +- fmt.Fprintln(w) +- +- fmt.Fprintln(w, "const (") +- fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") +- fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) +- fmt.Fprintln(w) +- fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") +- fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") +- fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") +- fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") +- fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) +- fmt.Fprintln(w, ")\n") +- +- // Print the CCC remap table. +- size += len(cccMap) +- fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) +- for i := 0; i < len(cccMap); i++ { +- if i%8 == 0 { +- fmt.Fprintln(w) +- } +- fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) +- } +- fmt.Fprintln(w, "\n}\n") +- +- if contains(list, "info") { +- size += printCharInfoTables(w) +- } +- +- if contains(list, "recomp") { +- // Note that we use 32 bit keys, instead of 64 bit. +- // This clips the bits of three entries, but we know +- // this won't cause a collision. The compiler will catch +- // any changes made to UnicodeData.txt that introduces +- // a collision. +- // Note that the recomposition map for NFC and NFKC +- // are identical. +- +- // Recomposition map +- nrentries := 0 +- for _, c := range chars { +- f := c.forms[FCanonical] +- if !f.isOneWay && len(f.decomp) > 0 { +- nrentries++ +- } +- } +- sz := nrentries * 8 +- size += sz +- fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) +- fmt.Fprintln(w, "var recompMap map[uint32]rune") +- fmt.Fprintln(w, "var recompMapOnce sync.Once\n") +- fmt.Fprintln(w, `const recompMapPacked = "" +`) +- var buf [8]byte +- for i, c := range chars { +- f := c.forms[FCanonical] +- d := f.decomp +- if !f.isOneWay && len(d) > 0 { +- key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) +- binary.BigEndian.PutUint32(buf[:4], key) +- binary.BigEndian.PutUint32(buf[4:], uint32(i)) +- fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i)) +- } +- } +- // hack so we don't have to special case the trailing plus sign +- fmt.Fprintf(w, ` ""`) +- fmt.Fprintln(w) +- } +- +- fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) +- gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) +-} +- +-func printChars() { +- if *verbose { +- for _, c := range chars { +- if !c.isValid() || c.state == SMissing { +- continue +- } +- fmt.Println(c) +- } +- } +-} +- +-// verifyComputed does various consistency tests. +-func verifyComputed() { +- for i, c := range chars { +- for _, f := range c.forms { +- isNo := (f.quickCheck[MDecomposed] == QCNo) +- if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { +- log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) +- } +- +- isMaybe := f.quickCheck[MComposed] == QCMaybe +- if f.combinesBackward != isMaybe { +- log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) +- } +- if len(f.decomp) > 0 && f.combinesForward && isMaybe { +- log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) +- } +- +- if len(f.expandedDecomp) != 0 { +- continue +- } +- if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { +- // We accept these runes to be treated differently (it only affects +- // segment breaking in iteration, most likely on improper use), but +- // reconsider if more characters are added. +- // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;; +- // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;; +- // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; +- // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;; +- // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; +- // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;; +- if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { +- log.Fatalf("%U: nLead was %v; want %v", i, a, b) +- } +- } +- } +- nfc := c.forms[FCanonical] +- nfkc := c.forms[FCompatibility] +- if nfc.combinesBackward != nfkc.combinesBackward { +- log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) +- } +- } +-} +- +-// Use values in DerivedNormalizationProps.txt to compare against the +-// values we computed. +-// DerivedNormalizationProps.txt has form: +-// 00C0..00C5 ; NFD_QC; N # ... +-// 0374 ; NFD_QC; N # ... +-// See https://unicode.org/reports/tr44/ for full explanation +-func testDerived() { +- f := gen.OpenUCDFile("DerivedNormalizationProps.txt") +- defer f.Close() +- p := ucd.New(f) +- for p.Next() { +- r := p.Rune(0) +- c := &chars[r] +- +- var ftype, mode int +- qt := p.String(1) +- switch qt { +- case "NFC_QC": +- ftype, mode = FCanonical, MComposed +- case "NFD_QC": +- ftype, mode = FCanonical, MDecomposed +- case "NFKC_QC": +- ftype, mode = FCompatibility, MComposed +- case "NFKD_QC": +- ftype, mode = FCompatibility, MDecomposed +- default: +- continue +- } +- var qr QCResult +- switch p.String(2) { +- case "Y": +- qr = QCYes +- case "N": +- qr = QCNo +- case "M": +- qr = QCMaybe +- default: +- log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) +- } +- if got := c.forms[ftype].quickCheck[mode]; got != qr { +- log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) +- } +- c.forms[ftype].verified[mode] = true +- } +- if err := p.Err(); err != nil { +- log.Fatal(err) +- } +- // Any unspecified value must be QCYes. Verify this. +- for i, c := range chars { +- for j, fd := range c.forms { +- for k, qr := range fd.quickCheck { +- if !fd.verified[k] && qr != QCYes { +- m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" +- log.Printf(m, i, j, k, qr, c.name) +- } +- } +- } +- } +-} +- +-var testHeader = `const ( +- Yes = iota +- No +- Maybe +-) +- +-type formData struct { +- qc uint8 +- combinesForward bool +- decomposition string +-} +- +-type runeData struct { +- r rune +- ccc uint8 +- nLead uint8 +- nTrail uint8 +- f [2]formData // 0: canonical; 1: compatibility +-} +- +-func f(qc uint8, cf bool, dec string) [2]formData { +- return [2]formData{{qc, cf, dec}, {qc, cf, dec}} +-} +- +-func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { +- return [2]formData{{qc, cf, d}, {qck, cfk, dk}} +-} +- +-var testData = []runeData{ +-` +- +-func printTestdata() { +- type lastInfo struct { +- ccc uint8 +- nLead uint8 +- nTrail uint8 +- f string +- } +- +- last := lastInfo{} +- w := &bytes.Buffer{} +- fmt.Fprintf(w, testHeader) +- for r, c := range chars { +- f := c.forms[FCanonical] +- qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) +- f = c.forms[FCompatibility] +- qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) +- s := "" +- if d == dk && qc == qck && cf == cfk { +- s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) +- } else { +- s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) +- } +- current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} +- if last != current { +- fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) +- last = current +- } +- } +- fmt.Fprintln(w, "}") +- gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) +-} +diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go +deleted file mode 100644 +index 45d711900d..0000000000 +--- a/vendor/golang.org/x/text/unicode/norm/triegen.go ++++ /dev/null +@@ -1,117 +0,0 @@ +-// Copyright 2011 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// Trie table generator. +-// Used by make*tables tools to generate a go file with trie data structures +-// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte +-// sequence are used to lookup offsets in the index table to be used for the +-// next byte. The last byte is used to index into a table with 16-bit values. +- +-package main +- +-import ( +- "fmt" +- "io" +-) +- +-const maxSparseEntries = 16 +- +-type normCompacter struct { +- sparseBlocks [][]uint64 +- sparseOffset []uint16 +- sparseCount int +- name string +-} +- +-func mostFrequentStride(a []uint64) int { +- counts := make(map[int]int) +- var v int +- for _, x := range a { +- if stride := int(x) - v; v != 0 && stride >= 0 { +- counts[stride]++ +- } +- v = int(x) +- } +- var maxs, maxc int +- for stride, cnt := range counts { +- if cnt > maxc || (cnt == maxc && stride < maxs) { +- maxs, maxc = stride, cnt +- } +- } +- return maxs +-} +- +-func countSparseEntries(a []uint64) int { +- stride := mostFrequentStride(a) +- var v, count int +- for _, tv := range a { +- if int(tv)-v != stride { +- if tv != 0 { +- count++ +- } +- } +- v = int(tv) +- } +- return count +-} +- +-func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { +- if n := countSparseEntries(v); n <= maxSparseEntries { +- return (n+1)*4 + 2, true +- } +- return 0, false +-} +- +-func (c *normCompacter) Store(v []uint64) uint32 { +- h := uint32(len(c.sparseOffset)) +- c.sparseBlocks = append(c.sparseBlocks, v) +- c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) +- c.sparseCount += countSparseEntries(v) + 1 +- return h +-} +- +-func (c *normCompacter) Handler() string { +- return c.name + "Sparse.lookup" +-} +- +-func (c *normCompacter) Print(w io.Writer) (retErr error) { +- p := func(f string, x ...interface{}) { +- if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { +- retErr = err +- } +- } +- +- ls := len(c.sparseBlocks) +- p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) +- p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) +- +- ns := c.sparseCount +- p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) +- p("var %sSparseValues = [%d]valueRange {", c.name, ns) +- for i, b := range c.sparseBlocks { +- p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) +- var v int +- stride := mostFrequentStride(b) +- n := countSparseEntries(b) +- p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) +- for i, nv := range b { +- if int(nv)-v != stride { +- if v != 0 { +- p(",hi:%#02x},", 0x80+i-1) +- } +- if nv != 0 { +- p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) +- } +- } +- v = int(nv) +- } +- if v != 0 { +- p(",hi:%#02x},", 0x80+len(b)-1) +- } +- } +- p("\n}\n\n") +- return +-} +diff --git a/vendor/modules.txt b/vendor/modules.txt +index 91f7df534b..685585c07c 100644 +--- a/vendor/modules.txt ++++ b/vendor/modules.txt +@@ -1,30 +1,30 @@ + # github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 +-github.com/Azure/go-ansiterm/winterm + github.com/Azure/go-ansiterm ++github.com/Azure/go-ansiterm/winterm + # github.com/BurntSushi/toml v0.3.1 + github.com/BurntSushi/toml + # github.com/Microsoft/go-winio v0.4.14 + github.com/Microsoft/go-winio +-github.com/Microsoft/go-winio/pkg/guid + github.com/Microsoft/go-winio/archive/tar + github.com/Microsoft/go-winio/backuptar ++github.com/Microsoft/go-winio/pkg/guid + # github.com/Microsoft/hcsshim v0.8.6 +-github.com/Microsoft/hcsshim/osversion + github.com/Microsoft/hcsshim ++github.com/Microsoft/hcsshim/internal/guestrequest + github.com/Microsoft/hcsshim/internal/guid + github.com/Microsoft/hcsshim/internal/hcs + github.com/Microsoft/hcsshim/internal/hcserror + github.com/Microsoft/hcsshim/internal/hns +-github.com/Microsoft/hcsshim/internal/mergemaps +-github.com/Microsoft/hcsshim/internal/schema1 +-github.com/Microsoft/hcsshim/internal/wclayer +-github.com/Microsoft/hcsshim/internal/guestrequest + github.com/Microsoft/hcsshim/internal/interop + github.com/Microsoft/hcsshim/internal/logfields +-github.com/Microsoft/hcsshim/internal/timeout +-github.com/Microsoft/hcsshim/internal/schema2 + github.com/Microsoft/hcsshim/internal/longpath ++github.com/Microsoft/hcsshim/internal/mergemaps + github.com/Microsoft/hcsshim/internal/safefile ++github.com/Microsoft/hcsshim/internal/schema1 ++github.com/Microsoft/hcsshim/internal/schema2 ++github.com/Microsoft/hcsshim/internal/timeout ++github.com/Microsoft/hcsshim/internal/wclayer ++github.com/Microsoft/hcsshim/osversion + # github.com/VividCortex/ewma v1.1.1 + github.com/VividCortex/ewma + # github.com/beorn7/perks v1.0.1 +@@ -40,133 +40,134 @@ github.com/checkpoint-restore/go-criu/rpc + github.com/containerd/containerd/errdefs + # github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc + github.com/containerd/continuity/fs +-github.com/containerd/continuity/sysx + github.com/containerd/continuity/syscallx ++github.com/containerd/continuity/sysx + # github.com/containernetworking/cni v0.7.1 +-github.com/containernetworking/cni/pkg/types +-github.com/containernetworking/cni/pkg/types/current +-github.com/containernetworking/cni/pkg/version + github.com/containernetworking/cni/libcni + github.com/containernetworking/cni/pkg/invoke ++github.com/containernetworking/cni/pkg/types + github.com/containernetworking/cni/pkg/types/020 ++github.com/containernetworking/cni/pkg/types/current ++github.com/containernetworking/cni/pkg/version + # github.com/containernetworking/plugins v0.8.2 +-github.com/containernetworking/plugins/pkg/ns + github.com/containernetworking/plugins/pkg/ip +-github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator ++github.com/containernetworking/plugins/pkg/ns + github.com/containernetworking/plugins/pkg/utils/hwaddr + github.com/containernetworking/plugins/plugins/ipam/host-local/backend ++github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator + # github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 + github.com/containers/buildah +-github.com/containers/buildah/imagebuildah +-github.com/containers/buildah/pkg/chrootuser +-github.com/containers/buildah/pkg/cli +-github.com/containers/buildah/pkg/formats +-github.com/containers/buildah/util +-github.com/containers/buildah/pkg/secrets +-github.com/containers/buildah/pkg/parse + github.com/containers/buildah/bind + github.com/containers/buildah/chroot + github.com/containers/buildah/docker ++github.com/containers/buildah/imagebuildah + github.com/containers/buildah/pkg/blobcache + github.com/containers/buildah/pkg/cgroups ++github.com/containers/buildah/pkg/chrootuser ++github.com/containers/buildah/pkg/cli ++github.com/containers/buildah/pkg/formats + github.com/containers/buildah/pkg/overlay +-github.com/containers/buildah/pkg/unshare ++github.com/containers/buildah/pkg/parse ++github.com/containers/buildah/pkg/secrets + github.com/containers/buildah/pkg/umask +-# github.com/containers/image/v5 v5.0.0 ++github.com/containers/buildah/pkg/unshare ++github.com/containers/buildah/util ++# github.com/containers/image/v5 v5.0.1-0.20200205124631-82291c45f2b0 ++github.com/containers/image/v5/copy + github.com/containers/image/v5/directory ++github.com/containers/image/v5/directory/explicitfilepath + github.com/containers/image/v5/docker + github.com/containers/image/v5/docker/archive +-github.com/containers/image/v5/manifest +-github.com/containers/image/v5/pkg/docker/config +-github.com/containers/image/v5/signature +-github.com/containers/image/v5/transports +-github.com/containers/image/v5/transports/alltransports +-github.com/containers/image/v5/types +-github.com/containers/image/v5/oci/archive +-github.com/containers/image/v5/storage +-github.com/containers/image/v5/copy ++github.com/containers/image/v5/docker/daemon ++github.com/containers/image/v5/docker/policyconfiguration + github.com/containers/image/v5/docker/reference + github.com/containers/image/v5/docker/tarfile + github.com/containers/image/v5/image +-github.com/containers/image/v5/oci/layout +-github.com/containers/image/v5/tarball +-github.com/containers/image/v5/pkg/sysregistriesv2 +-github.com/containers/image/v5/directory/explicitfilepath +-github.com/containers/image/v5/docker/policyconfiguration +-github.com/containers/image/v5/pkg/blobinfocache/none +-github.com/containers/image/v5/pkg/tlsclientconfig +-github.com/containers/image/v5/pkg/compression +-github.com/containers/image/v5/pkg/strslice ++github.com/containers/image/v5/internal/iolimits + github.com/containers/image/v5/internal/pkg/keyctl +-github.com/containers/image/v5/version +-github.com/containers/image/v5/docker/daemon +-github.com/containers/image/v5/openshift +-github.com/containers/image/v5/ostree +-github.com/containers/image/v5/pkg/compression/types + github.com/containers/image/v5/internal/tmpdir ++github.com/containers/image/v5/manifest ++github.com/containers/image/v5/oci/archive + github.com/containers/image/v5/oci/internal ++github.com/containers/image/v5/oci/layout ++github.com/containers/image/v5/openshift ++github.com/containers/image/v5/ostree + github.com/containers/image/v5/pkg/blobinfocache +-github.com/containers/image/v5/pkg/compression/internal + github.com/containers/image/v5/pkg/blobinfocache/boltdb +-github.com/containers/image/v5/pkg/blobinfocache/memory + github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize ++github.com/containers/image/v5/pkg/blobinfocache/memory ++github.com/containers/image/v5/pkg/blobinfocache/none ++github.com/containers/image/v5/pkg/compression ++github.com/containers/image/v5/pkg/compression/internal ++github.com/containers/image/v5/pkg/compression/types ++github.com/containers/image/v5/pkg/docker/config ++github.com/containers/image/v5/pkg/strslice ++github.com/containers/image/v5/pkg/sysregistriesv2 ++github.com/containers/image/v5/pkg/tlsclientconfig ++github.com/containers/image/v5/signature ++github.com/containers/image/v5/storage ++github.com/containers/image/v5/tarball ++github.com/containers/image/v5/transports ++github.com/containers/image/v5/transports/alltransports ++github.com/containers/image/v5/types ++github.com/containers/image/v5/version + # github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b + github.com/containers/libtrust + # github.com/containers/psgo v1.3.2 + github.com/containers/psgo + github.com/containers/psgo/internal/capabilities ++github.com/containers/psgo/internal/cgroups + github.com/containers/psgo/internal/dev ++github.com/containers/psgo/internal/host + github.com/containers/psgo/internal/proc + github.com/containers/psgo/internal/process +-github.com/containers/psgo/internal/cgroups +-github.com/containers/psgo/internal/host + # github.com/containers/storage v1.13.6 + github.com/containers/storage +-github.com/containers/storage/pkg/archive +-github.com/containers/storage/pkg/chrootarchive +-github.com/containers/storage/pkg/idtools +-github.com/containers/storage/pkg/reexec +-github.com/containers/storage/pkg/mount +-github.com/containers/storage/pkg/stringid +-github.com/containers/storage/pkg/system +-github.com/containers/storage/pkg/truncindex +-github.com/containers/storage/pkg/parsers/kernel +-github.com/containers/storage/pkg/fileutils +-github.com/containers/storage/pkg/ioutils +-github.com/containers/storage/pkg/pools +-github.com/containers/storage/pkg/homedir + github.com/containers/storage/drivers +-github.com/containers/storage/drivers/register +-github.com/containers/storage/pkg/config +-github.com/containers/storage/pkg/directory +-github.com/containers/storage/pkg/lockfile +-github.com/containers/storage/pkg/parsers +-github.com/containers/storage/pkg/stringutils +-github.com/containers/storage/pkg/tarlog +-github.com/containers/storage/pkg/longpath +-github.com/containers/storage/pkg/promise + github.com/containers/storage/drivers/aufs + github.com/containers/storage/drivers/btrfs ++github.com/containers/storage/drivers/copy + github.com/containers/storage/drivers/devmapper + github.com/containers/storage/drivers/overlay ++github.com/containers/storage/drivers/overlayutils ++github.com/containers/storage/drivers/quota ++github.com/containers/storage/drivers/register + github.com/containers/storage/drivers/vfs + github.com/containers/storage/drivers/windows + github.com/containers/storage/drivers/zfs +-github.com/containers/storage/pkg/locker ++github.com/containers/storage/pkg/archive ++github.com/containers/storage/pkg/chrootarchive ++github.com/containers/storage/pkg/config + github.com/containers/storage/pkg/devicemapper ++github.com/containers/storage/pkg/directory + github.com/containers/storage/pkg/dmesg +-github.com/containers/storage/pkg/loopback +-github.com/containers/storage/drivers/overlayutils +-github.com/containers/storage/drivers/quota ++github.com/containers/storage/pkg/fileutils + github.com/containers/storage/pkg/fsutils +-github.com/containers/storage/drivers/copy ++github.com/containers/storage/pkg/homedir ++github.com/containers/storage/pkg/idtools ++github.com/containers/storage/pkg/ioutils ++github.com/containers/storage/pkg/locker ++github.com/containers/storage/pkg/lockfile ++github.com/containers/storage/pkg/longpath ++github.com/containers/storage/pkg/loopback ++github.com/containers/storage/pkg/mount ++github.com/containers/storage/pkg/parsers ++github.com/containers/storage/pkg/parsers/kernel ++github.com/containers/storage/pkg/pools ++github.com/containers/storage/pkg/promise ++github.com/containers/storage/pkg/reexec ++github.com/containers/storage/pkg/stringid ++github.com/containers/storage/pkg/stringutils ++github.com/containers/storage/pkg/system ++github.com/containers/storage/pkg/tarlog ++github.com/containers/storage/pkg/truncindex + # github.com/coreos/go-iptables v0.4.2 + github.com/coreos/go-iptables/iptables + # github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f + github.com/coreos/go-systemd/activation + github.com/coreos/go-systemd/dbus +-github.com/coreos/go-systemd/sdjournal + github.com/coreos/go-systemd/journal ++github.com/coreos/go-systemd/sdjournal + # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f + github.com/coreos/pkg/dlopen + # github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca +@@ -176,68 +177,68 @@ github.com/cyphar/filepath-securejoin + # github.com/davecgh/go-spew v1.1.1 + github.com/davecgh/go-spew/spew + # github.com/docker/distribution v2.7.1+incompatible ++github.com/docker/distribution ++github.com/docker/distribution/digestset ++github.com/docker/distribution/metrics + github.com/docker/distribution/reference + github.com/docker/distribution/registry/api/errcode + github.com/docker/distribution/registry/api/v2 + github.com/docker/distribution/registry/client +-github.com/docker/distribution/digestset +-github.com/docker/distribution + github.com/docker/distribution/registry/client/auth/challenge + github.com/docker/distribution/registry/client/transport + github.com/docker/distribution/registry/storage/cache + github.com/docker/distribution/registry/storage/cache/memory +-github.com/docker/distribution/metrics + # github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce +-github.com/docker/docker/pkg/signal +-github.com/docker/docker/pkg/homedir +-github.com/docker/docker/oci/caps +-github.com/docker/docker/pkg/namesgenerator +-github.com/docker/docker/pkg/term +-github.com/docker/docker/pkg/ioutils +-github.com/docker/docker/pkg/parsers +-github.com/docker/docker/api/types/versions +-github.com/docker/docker/errdefs +-github.com/docker/docker/pkg/term/windows +-github.com/docker/docker/pkg/longpath +-github.com/docker/docker/api/types/registry +-github.com/docker/docker/api/types/swarm +-github.com/docker/docker/pkg/archive +-github.com/docker/docker/pkg/fileutils +-github.com/docker/docker/pkg/jsonmessage +-github.com/docker/docker/pkg/stdcopy +-github.com/docker/docker/pkg/system +-github.com/docker/docker/client +-github.com/docker/docker/api/types/container +-github.com/docker/docker/api/types/mount +-github.com/docker/docker/api/types/network +-github.com/docker/docker/api/types/swarm/runtime +-github.com/docker/docker/pkg/idtools +-github.com/docker/docker/pkg/pools +-github.com/docker/docker/pkg/mount + github.com/docker/docker/api + github.com/docker/docker/api/types ++github.com/docker/docker/api/types/blkiodev ++github.com/docker/docker/api/types/container + github.com/docker/docker/api/types/events + github.com/docker/docker/api/types/filters + github.com/docker/docker/api/types/image ++github.com/docker/docker/api/types/mount ++github.com/docker/docker/api/types/network ++github.com/docker/docker/api/types/registry ++github.com/docker/docker/api/types/strslice ++github.com/docker/docker/api/types/swarm ++github.com/docker/docker/api/types/swarm/runtime + github.com/docker/docker/api/types/time ++github.com/docker/docker/api/types/versions + github.com/docker/docker/api/types/volume +-github.com/docker/docker/api/types/blkiodev +-github.com/docker/docker/api/types/strslice ++github.com/docker/docker/client ++github.com/docker/docker/errdefs ++github.com/docker/docker/oci/caps ++github.com/docker/docker/pkg/archive ++github.com/docker/docker/pkg/fileutils ++github.com/docker/docker/pkg/homedir ++github.com/docker/docker/pkg/idtools ++github.com/docker/docker/pkg/ioutils ++github.com/docker/docker/pkg/jsonmessage ++github.com/docker/docker/pkg/longpath ++github.com/docker/docker/pkg/mount ++github.com/docker/docker/pkg/namesgenerator ++github.com/docker/docker/pkg/parsers ++github.com/docker/docker/pkg/pools ++github.com/docker/docker/pkg/signal ++github.com/docker/docker/pkg/stdcopy ++github.com/docker/docker/pkg/system ++github.com/docker/docker/pkg/term ++github.com/docker/docker/pkg/term/windows + # github.com/docker/docker-credential-helpers v0.6.3 +-github.com/docker/docker-credential-helpers/credentials + github.com/docker/docker-credential-helpers/client ++github.com/docker/docker-credential-helpers/credentials + # github.com/docker/go-connections v0.4.0 + github.com/docker/go-connections/nat +-github.com/docker/go-connections/tlsconfig + github.com/docker/go-connections/sockets ++github.com/docker/go-connections/tlsconfig + # github.com/docker/go-metrics v0.0.1 + github.com/docker/go-metrics + # github.com/docker/go-units v0.4.0 + github.com/docker/go-units + # github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 + github.com/docker/libnetwork/resolvconf +-github.com/docker/libnetwork/types + github.com/docker/libnetwork/resolvconf/dns ++github.com/docker/libnetwork/types + # github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c + github.com/docker/spdystream + github.com/docker/spdystream/spdy +@@ -287,12 +288,12 @@ github.com/ishidawataru/sctp + # github.com/json-iterator/go v1.1.7 + github.com/json-iterator/go + # github.com/klauspost/compress v1.8.1 +-github.com/klauspost/compress/zstd + github.com/klauspost/compress/flate ++github.com/klauspost/compress/fse + github.com/klauspost/compress/huff0 + github.com/klauspost/compress/snappy ++github.com/klauspost/compress/zstd + github.com/klauspost/compress/zstd/internal/xxhash +-github.com/klauspost/compress/fse + # github.com/klauspost/cpuid v1.2.1 + github.com/klauspost/cpuid + # github.com/klauspost/pgzip v1.2.1 +@@ -318,83 +319,83 @@ github.com/mrunalp/fileutils + # github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c + github.com/mtrmac/gpgme + # github.com/onsi/ginkgo v1.10.2 +-github.com/onsi/ginkgo/ginkgo + github.com/onsi/ginkgo + github.com/onsi/ginkgo/config ++github.com/onsi/ginkgo/extensions/table ++github.com/onsi/ginkgo/ginkgo + github.com/onsi/ginkgo/ginkgo/convert + github.com/onsi/ginkgo/ginkgo/interrupthandler + github.com/onsi/ginkgo/ginkgo/nodot + github.com/onsi/ginkgo/ginkgo/testrunner + github.com/onsi/ginkgo/ginkgo/testsuite + github.com/onsi/ginkgo/ginkgo/watch +-github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable +-github.com/onsi/ginkgo/types + github.com/onsi/ginkgo/internal/codelocation ++github.com/onsi/ginkgo/internal/containernode + github.com/onsi/ginkgo/internal/failer ++github.com/onsi/ginkgo/internal/leafnodes + github.com/onsi/ginkgo/internal/remote ++github.com/onsi/ginkgo/internal/spec ++github.com/onsi/ginkgo/internal/spec_iterator ++github.com/onsi/ginkgo/internal/specrunner + github.com/onsi/ginkgo/internal/suite + github.com/onsi/ginkgo/internal/testingtproxy + github.com/onsi/ginkgo/internal/writer + github.com/onsi/ginkgo/reporters + github.com/onsi/ginkgo/reporters/stenographer +-github.com/onsi/ginkgo/extensions/table ++github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable + github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty +-github.com/onsi/ginkgo/internal/spec_iterator +-github.com/onsi/ginkgo/internal/containernode +-github.com/onsi/ginkgo/internal/leafnodes +-github.com/onsi/ginkgo/internal/spec +-github.com/onsi/ginkgo/internal/specrunner ++github.com/onsi/ginkgo/types + # github.com/onsi/gomega v1.7.0 + github.com/onsi/gomega +-github.com/onsi/gomega/gexec + github.com/onsi/gomega/format ++github.com/onsi/gomega/gbytes ++github.com/onsi/gomega/gexec + github.com/onsi/gomega/internal/assertion + github.com/onsi/gomega/internal/asyncassertion ++github.com/onsi/gomega/internal/oraclematcher + github.com/onsi/gomega/internal/testingtsupport + github.com/onsi/gomega/matchers +-github.com/onsi/gomega/types +-github.com/onsi/gomega/gbytes +-github.com/onsi/gomega/internal/oraclematcher + github.com/onsi/gomega/matchers/support/goraph/bipartitegraph + github.com/onsi/gomega/matchers/support/goraph/edge + github.com/onsi/gomega/matchers/support/goraph/node + github.com/onsi/gomega/matchers/support/goraph/util ++github.com/onsi/gomega/types + # github.com/opencontainers/go-digest v1.0.0-rc1 + github.com/opencontainers/go-digest + # github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 +-github.com/opencontainers/image-spec/specs-go/v1 + github.com/opencontainers/image-spec/specs-go ++github.com/opencontainers/image-spec/specs-go/v1 + # github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158 +-github.com/opencontainers/runc/libcontainer/user + github.com/opencontainers/runc/libcontainer/apparmor ++github.com/opencontainers/runc/libcontainer/cgroups + github.com/opencontainers/runc/libcontainer/configs + github.com/opencontainers/runc/libcontainer/devices +-github.com/opencontainers/runc/libcontainer/cgroups + github.com/opencontainers/runc/libcontainer/system ++github.com/opencontainers/runc/libcontainer/user + # github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 + github.com/opencontainers/runtime-spec/specs-go + # github.com/opencontainers/runtime-tools v0.9.0 ++github.com/opencontainers/runtime-tools/error ++github.com/opencontainers/runtime-tools/filepath + github.com/opencontainers/runtime-tools/generate +-github.com/opencontainers/runtime-tools/validate + github.com/opencontainers/runtime-tools/generate/seccomp +-github.com/opencontainers/runtime-tools/filepath + github.com/opencontainers/runtime-tools/specerror +-github.com/opencontainers/runtime-tools/error ++github.com/opencontainers/runtime-tools/validate + # github.com/opencontainers/selinux v1.3.0 +-github.com/opencontainers/selinux/go-selinux/label + github.com/opencontainers/selinux/go-selinux ++github.com/opencontainers/selinux/go-selinux/label + # github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible + github.com/openshift/api/config/v1 + # github.com/openshift/imagebuilder v1.1.1 + github.com/openshift/imagebuilder +-github.com/openshift/imagebuilder/dockerfile/parser + github.com/openshift/imagebuilder/dockerfile/command ++github.com/openshift/imagebuilder/dockerfile/parser + github.com/openshift/imagebuilder/signal + github.com/openshift/imagebuilder/strslice + # github.com/opentracing/opentracing-go v1.1.0 + github.com/opentracing/opentracing-go +-github.com/opentracing/opentracing-go/log + github.com/opentracing/opentracing-go/ext ++github.com/opentracing/opentracing-go/log + # github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 + github.com/ostreedev/ostree-go/pkg/glibobject + github.com/ostreedev/ostree-go/pkg/otbuiltin +@@ -406,19 +407,19 @@ github.com/pkg/profile + github.com/pmezard/go-difflib/difflib + # github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9 + github.com/pquerna/ffjson/fflib/v1 ++github.com/pquerna/ffjson/fflib/v1/internal + github.com/pquerna/ffjson/inception + github.com/pquerna/ffjson/shared +-github.com/pquerna/ffjson/fflib/v1/internal + # github.com/prometheus/client_golang v1.1.0 + github.com/prometheus/client_golang/prometheus +-github.com/prometheus/client_golang/prometheus/promhttp + github.com/prometheus/client_golang/prometheus/internal ++github.com/prometheus/client_golang/prometheus/promhttp + # github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 + github.com/prometheus/client_model/go + # github.com/prometheus/common v0.6.0 + github.com/prometheus/common/expfmt +-github.com/prometheus/common/model + github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg ++github.com/prometheus/common/model + # github.com/prometheus/procfs v0.0.3 + github.com/prometheus/procfs + github.com/prometheus/procfs/internal/fs +@@ -446,30 +447,30 @@ github.com/tchap/go-patricia/patricia + github.com/uber/jaeger-client-go + github.com/uber/jaeger-client-go/config + github.com/uber/jaeger-client-go/internal/baggage ++github.com/uber/jaeger-client-go/internal/baggage/remote + github.com/uber/jaeger-client-go/internal/spanlog + github.com/uber/jaeger-client-go/internal/throttler ++github.com/uber/jaeger-client-go/internal/throttler/remote + github.com/uber/jaeger-client-go/log ++github.com/uber/jaeger-client-go/rpcmetrics + github.com/uber/jaeger-client-go/thrift ++github.com/uber/jaeger-client-go/thrift-gen/agent ++github.com/uber/jaeger-client-go/thrift-gen/baggage + github.com/uber/jaeger-client-go/thrift-gen/jaeger + github.com/uber/jaeger-client-go/thrift-gen/sampling + github.com/uber/jaeger-client-go/thrift-gen/zipkincore +-github.com/uber/jaeger-client-go/utils +-github.com/uber/jaeger-client-go/internal/baggage/remote +-github.com/uber/jaeger-client-go/internal/throttler/remote +-github.com/uber/jaeger-client-go/rpcmetrics + github.com/uber/jaeger-client-go/transport +-github.com/uber/jaeger-client-go/thrift-gen/agent +-github.com/uber/jaeger-client-go/thrift-gen/baggage ++github.com/uber/jaeger-client-go/utils + # github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 + github.com/uber/jaeger-lib/metrics + # github.com/ulikunitz/xz v0.5.6 + github.com/ulikunitz/xz ++github.com/ulikunitz/xz/internal/hash + github.com/ulikunitz/xz/internal/xlog + github.com/ulikunitz/xz/lzma +-github.com/ulikunitz/xz/internal/hash + # github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b +-github.com/varlink/go/varlink + github.com/varlink/go/cmd/varlink-go-interface-generator ++github.com/varlink/go/varlink + github.com/varlink/go/varlink/idl + # github.com/vbatts/tar-split v0.11.1 + github.com/vbatts/tar-split/archive/tar +@@ -477,8 +478,8 @@ github.com/vbatts/tar-split/tar/asm + github.com/vbatts/tar-split/tar/storage + # github.com/vbauerster/mpb v3.4.0+incompatible + github.com/vbauerster/mpb +-github.com/vbauerster/mpb/decor + github.com/vbauerster/mpb/cwriter ++github.com/vbauerster/mpb/decor + github.com/vbauerster/mpb/internal + # github.com/vishvananda/netlink v1.0.0 + github.com/vishvananda/netlink +@@ -492,32 +493,32 @@ github.com/xeipuuv/gojsonreference + # github.com/xeipuuv/gojsonschema v1.1.0 + github.com/xeipuuv/gojsonschema + # golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad +-golang.org/x/crypto/ssh/terminal ++golang.org/x/crypto/cast5 + golang.org/x/crypto/openpgp + golang.org/x/crypto/openpgp/armor ++golang.org/x/crypto/openpgp/elgamal + golang.org/x/crypto/openpgp/errors + golang.org/x/crypto/openpgp/packet + golang.org/x/crypto/openpgp/s2k +-golang.org/x/crypto/cast5 +-golang.org/x/crypto/openpgp/elgamal ++golang.org/x/crypto/ssh/terminal + # golang.org/x/net v0.0.0-20190628185345-da137c7871d7 + golang.org/x/net/context +-golang.org/x/net/http2 ++golang.org/x/net/context/ctxhttp ++golang.org/x/net/html ++golang.org/x/net/html/atom + golang.org/x/net/html/charset +-golang.org/x/net/proxy + golang.org/x/net/http/httpguts ++golang.org/x/net/http2 + golang.org/x/net/http2/hpack + golang.org/x/net/idna +-golang.org/x/net/html + golang.org/x/net/internal/socks +-golang.org/x/net/html/atom +-golang.org/x/net/context/ctxhttp ++golang.org/x/net/proxy + # golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + golang.org/x/oauth2 + golang.org/x/oauth2/internal + # golang.org/x/sync v0.0.0-20190423024810-112230192c58 +-golang.org/x/sync/semaphore + golang.org/x/sync/errgroup ++golang.org/x/sync/semaphore + # golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 + golang.org/x/sys/unix + golang.org/x/sys/windows +@@ -525,41 +526,41 @@ golang.org/x/sys/windows + golang.org/x/text/encoding + golang.org/x/text/encoding/charmap + golang.org/x/text/encoding/htmlindex +-golang.org/x/text/transform +-golang.org/x/text/secure/bidirule +-golang.org/x/text/unicode/bidi +-golang.org/x/text/unicode/norm +-golang.org/x/text/encoding/internal/identifier + golang.org/x/text/encoding/internal ++golang.org/x/text/encoding/internal/identifier + golang.org/x/text/encoding/japanese + golang.org/x/text/encoding/korean + golang.org/x/text/encoding/simplifiedchinese + golang.org/x/text/encoding/traditionalchinese + golang.org/x/text/encoding/unicode +-golang.org/x/text/language +-golang.org/x/text/internal/utf8internal +-golang.org/x/text/runes + golang.org/x/text/internal/language + golang.org/x/text/internal/language/compact + golang.org/x/text/internal/tag ++golang.org/x/text/internal/utf8internal ++golang.org/x/text/language ++golang.org/x/text/runes ++golang.org/x/text/secure/bidirule ++golang.org/x/text/transform ++golang.org/x/text/unicode/bidi ++golang.org/x/text/unicode/norm + # golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 + golang.org/x/time/rate + # google.golang.org/appengine v1.6.1 +-google.golang.org/appengine/urlfetch + google.golang.org/appengine/internal +-google.golang.org/appengine/internal/urlfetch + google.golang.org/appengine/internal/base + google.golang.org/appengine/internal/datastore + google.golang.org/appengine/internal/log + google.golang.org/appengine/internal/remote_api ++google.golang.org/appengine/internal/urlfetch ++google.golang.org/appengine/urlfetch + # google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601 + google.golang.org/genproto/googleapis/rpc/status + # google.golang.org/grpc v1.24.0 + google.golang.org/grpc/codes +-google.golang.org/grpc/status +-google.golang.org/grpc/internal + google.golang.org/grpc/connectivity + google.golang.org/grpc/grpclog ++google.golang.org/grpc/internal ++google.golang.org/grpc/status + # gopkg.in/fsnotify.v1 v1.4.7 + gopkg.in/fsnotify.v1 + # gopkg.in/inf.v0 v0.9.1 +@@ -571,61 +572,61 @@ gopkg.in/yaml.v2 + # k8s.io/api v0.0.0-20190813020757-36bff7324fb7 + k8s.io/api/core/v1 + # k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010 +-k8s.io/apimachinery/pkg/apis/meta/v1 +-k8s.io/apimachinery/pkg/util/runtime ++k8s.io/apimachinery/pkg/api/errors + k8s.io/apimachinery/pkg/api/resource +-k8s.io/apimachinery/pkg/runtime +-k8s.io/apimachinery/pkg/runtime/schema +-k8s.io/apimachinery/pkg/types +-k8s.io/apimachinery/pkg/util/intstr ++k8s.io/apimachinery/pkg/apis/meta/v1 ++k8s.io/apimachinery/pkg/apis/meta/v1/unstructured + k8s.io/apimachinery/pkg/conversion ++k8s.io/apimachinery/pkg/conversion/queryparams + k8s.io/apimachinery/pkg/fields + k8s.io/apimachinery/pkg/labels ++k8s.io/apimachinery/pkg/runtime ++k8s.io/apimachinery/pkg/runtime/schema ++k8s.io/apimachinery/pkg/runtime/serializer ++k8s.io/apimachinery/pkg/runtime/serializer/json ++k8s.io/apimachinery/pkg/runtime/serializer/protobuf ++k8s.io/apimachinery/pkg/runtime/serializer/recognizer ++k8s.io/apimachinery/pkg/runtime/serializer/streaming ++k8s.io/apimachinery/pkg/runtime/serializer/versioning + k8s.io/apimachinery/pkg/selection +-k8s.io/apimachinery/pkg/watch +-k8s.io/apimachinery/pkg/util/httpstream +-k8s.io/apimachinery/pkg/util/remotecommand +-k8s.io/apimachinery/pkg/conversion/queryparams ++k8s.io/apimachinery/pkg/types ++k8s.io/apimachinery/pkg/util/clock + k8s.io/apimachinery/pkg/util/errors ++k8s.io/apimachinery/pkg/util/framer ++k8s.io/apimachinery/pkg/util/httpstream ++k8s.io/apimachinery/pkg/util/httpstream/spdy ++k8s.io/apimachinery/pkg/util/intstr + k8s.io/apimachinery/pkg/util/json + k8s.io/apimachinery/pkg/util/naming ++k8s.io/apimachinery/pkg/util/net ++k8s.io/apimachinery/pkg/util/remotecommand ++k8s.io/apimachinery/pkg/util/runtime + k8s.io/apimachinery/pkg/util/sets +-k8s.io/apimachinery/third_party/forked/golang/reflect + k8s.io/apimachinery/pkg/util/validation +-k8s.io/apimachinery/pkg/util/net +-k8s.io/apimachinery/pkg/api/errors +-k8s.io/apimachinery/pkg/runtime/serializer/streaming +-k8s.io/apimachinery/pkg/util/httpstream/spdy + k8s.io/apimachinery/pkg/util/validation/field ++k8s.io/apimachinery/pkg/util/yaml + k8s.io/apimachinery/pkg/version +-k8s.io/apimachinery/pkg/runtime/serializer +-k8s.io/apimachinery/pkg/util/clock ++k8s.io/apimachinery/pkg/watch + k8s.io/apimachinery/third_party/forked/golang/netutil +-k8s.io/apimachinery/pkg/runtime/serializer/json +-k8s.io/apimachinery/pkg/runtime/serializer/protobuf +-k8s.io/apimachinery/pkg/runtime/serializer/recognizer +-k8s.io/apimachinery/pkg/runtime/serializer/versioning +-k8s.io/apimachinery/pkg/util/framer +-k8s.io/apimachinery/pkg/util/yaml +-k8s.io/apimachinery/pkg/apis/meta/v1/unstructured ++k8s.io/apimachinery/third_party/forked/golang/reflect + # k8s.io/client-go v0.0.0-20190620085101-78d2af792bab +-k8s.io/client-go/tools/remotecommand +-k8s.io/client-go/rest +-k8s.io/client-go/transport/spdy +-k8s.io/client-go/util/exec +-k8s.io/client-go/util/homedir ++k8s.io/client-go/pkg/apis/clientauthentication ++k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 ++k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 + k8s.io/client-go/pkg/version + k8s.io/client-go/plugin/pkg/client/auth/exec ++k8s.io/client-go/rest + k8s.io/client-go/rest/watch + k8s.io/client-go/tools/clientcmd/api + k8s.io/client-go/tools/metrics ++k8s.io/client-go/tools/remotecommand + k8s.io/client-go/transport ++k8s.io/client-go/transport/spdy + k8s.io/client-go/util/cert +-k8s.io/client-go/util/flowcontrol +-k8s.io/client-go/pkg/apis/clientauthentication +-k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 +-k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 + k8s.io/client-go/util/connrotation ++k8s.io/client-go/util/exec ++k8s.io/client-go/util/flowcontrol ++k8s.io/client-go/util/homedir + k8s.io/client-go/util/keyutil + # k8s.io/klog v0.3.3 + k8s.io/klog + +From 54558fbe4b27a8e8bc81c3c6079ea7e89ac683ee Mon Sep 17 00:00:00 2001 +From: Valentin Rothberg +Date: Wed, 5 Feb 2020 15:19:56 +0100 +Subject: [PATCH 2/3] bump golangci-lint + +Fixes: https://github.com/golangci/golangci-lint/issues/658 +Signed-off-by: Valentin Rothberg +--- + Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/Makefile b/Makefile +index a5265653d1..eb67d2665a 100644 +--- a/Makefile ++++ b/Makefile +@@ -479,7 +479,7 @@ endef + + .install.golangci-lint: .gopathok + if [ ! -x "$(GOBIN)/golangci-lint" ]; then \ +- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOBIN)/ v1.17.1; \ ++ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOBIN)/ v1.18.0; \ + fi + + .install.md2man: .gopathok + +From b8c5d5612b90ebce453a1bbc4757fced4dc731dc Mon Sep 17 00:00:00 2001 +From: Valentin Rothberg +Date: Mon, 10 Feb 2020 09:42:17 +0100 +Subject: [PATCH 3/3] e2e pull test: use k8s pause instead of alpine + +When pulling with --all-tags, use the k8s pause image instead of alpine. +The pause repo has considerably less tags and is hence used in master to +prevent timeouts that we're hitting when running rootless, where we're +suffering additional performance regressions on VFS. + +Signed-off-by: Valentin Rothberg +--- + test/e2e/pull_test.go | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/test/e2e/pull_test.go b/test/e2e/pull_test.go +index 5152409aff..8a59794850 100644 +--- a/test/e2e/pull_test.go ++++ b/test/e2e/pull_test.go +@@ -339,7 +339,7 @@ var _ = Describe("Podman pull", func() { + }) + + It("podman pull check all tags", func() { +- session := podmanTest.PodmanNoCache([]string{"pull", "--all-tags", "alpine"}) ++ session := podmanTest.PodmanNoCache([]string{"pull", "--all-tags", "k8s.gcr.io/pause"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.LineInOuputStartsWith("Pulled Images:")).To(BeTrue()) diff --git a/SOURCES/podman-1784950.patch b/SOURCES/podman-1784950.patch new file mode 100644 index 0000000..a8a0aad --- /dev/null +++ b/SOURCES/podman-1784950.patch @@ -0,0 +1,145 @@ +From fb7d2b6bd6a16ffdbe4a69428e3ba5b487719e78 Mon Sep 17 00:00:00 2001 +From: Daniel J Walsh +Date: Tue, 17 Dec 2019 15:24:29 -0500 +Subject: [PATCH] Add support for FIPS-Mode backends + +If host is running in fips mode, then RHEL8.2 and beyond container images +will come with a directory /usr/share/crypto-policies/back-ends/FIPS. +This directory needs to be bind mounted over /etc/crypto-policies/back-ends in +order to make all tools in the container follow the FIPS Mode rules. + +Signed-off-by: Daniel J Walsh +--- + pkg/secrets/secrets.go | 48 +++++++++++++++++++++++++++++++++--------- + run_linux.go | 2 +- + 2 files changed, 39 insertions(+), 11 deletions(-) + +diff -up ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/pkg/secrets/secrets.go.1784950 ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/pkg/secrets/secrets.go +--- libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/pkg/secrets/secrets.go.1784950 2020-02-19 14:58:22.049213896 +0100 ++++ libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/pkg/secrets/secrets.go 2020-02-19 14:58:22.052213937 +0100 +@@ -148,12 +148,21 @@ func getMountsMap(path string) (string, + } + + // SecretMounts copies, adds, and mounts the secrets to the container root filesystem ++// Deprecated, Please use SecretMountWithUIDGID + func SecretMounts(mountLabel, containerWorkingDir, mountFile string, rootless, disableFips bool) []rspec.Mount { + return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0, rootless, disableFips) + } + +-// SecretMountsWithUIDGID specifies the uid/gid of the owner +-func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int, rootless, disableFips bool) []rspec.Mount { ++// SecretMountsWithUIDGID copies, adds, and mounts the secrets to the container root filesystem ++// mountLabel: MAC/SELinux label for container content ++// containerWorkingDir: Private data for storing secrets on the host mounted in container. ++// mountFile: Additional mount points required for the container. ++// mountPoint: Container image mountpoint ++// uid: to assign to content created for secrets ++// gid: to assign to content created for secrets ++// rootless: indicates whether container is running in rootless mode ++// disableFips: indicates whether system should ignore fips mode ++func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint string, uid, gid int, rootless, disableFips bool) []rspec.Mount { + var ( + secretMounts []rspec.Mount + mountFiles []string +@@ -171,7 +180,7 @@ func SecretMountsWithUIDGID(mountLabel, + } + for _, file := range mountFiles { + if _, err := os.Stat(file); err == nil { +- mounts, err := addSecretsFromMountsFile(file, mountLabel, containerWorkingDir, mountPrefix, uid, gid) ++ mounts, err := addSecretsFromMountsFile(file, mountLabel, containerWorkingDir, uid, gid) + if err != nil { + logrus.Warnf("error mounting secrets, skipping entry in %s: %v", file, err) + } +@@ -187,7 +196,7 @@ func SecretMountsWithUIDGID(mountLabel, + // Add FIPS mode secret if /etc/system-fips exists on the host + _, err := os.Stat("/etc/system-fips") + if err == nil { +- if err := addFIPSModeSecret(&secretMounts, containerWorkingDir, mountPrefix, mountLabel, uid, gid); err != nil { ++ if err := addFIPSModeSecret(&secretMounts, containerWorkingDir, mountPoint, mountLabel, uid, gid); err != nil { + logrus.Errorf("error adding FIPS mode secret to container: %v", err) + } + } else if os.IsNotExist(err) { +@@ -206,7 +215,7 @@ func rchown(chowndir string, uid, gid in + + // addSecretsFromMountsFile copies the contents of host directory to container directory + // and returns a list of mounts +-func addSecretsFromMountsFile(filePath, mountLabel, containerWorkingDir, mountPrefix string, uid, gid int) ([]rspec.Mount, error) { ++func addSecretsFromMountsFile(filePath, mountLabel, containerWorkingDir string, uid, gid int) ([]rspec.Mount, error) { + var mounts []rspec.Mount + defaultMountsPaths := getMounts(filePath) + for _, path := range defaultMountsPaths { +@@ -285,7 +294,7 @@ func addSecretsFromMountsFile(filePath, + } + + m := rspec.Mount{ +- Source: filepath.Join(mountPrefix, ctrDirOrFile), ++ Source: ctrDirOrFileOnHost, + Destination: ctrDirOrFile, + Type: "bind", + Options: []string{"bind", "rprivate"}, +@@ -300,15 +309,15 @@ func addSecretsFromMountsFile(filePath, + // root filesystem if /etc/system-fips exists on hosts. + // This enables the container to be FIPS compliant and run openssl in + // FIPS mode as the host is also in FIPS mode. +-func addFIPSModeSecret(mounts *[]rspec.Mount, containerWorkingDir, mountPrefix, mountLabel string, uid, gid int) error { ++func addFIPSModeSecret(mounts *[]rspec.Mount, containerWorkingDir, mountPoint, mountLabel string, uid, gid int) error { + secretsDir := "/run/secrets" + ctrDirOnHost := filepath.Join(containerWorkingDir, secretsDir) + if _, err := os.Stat(ctrDirOnHost); os.IsNotExist(err) { + if err = idtools.MkdirAllAs(ctrDirOnHost, 0755, uid, gid); err != nil { +- return errors.Wrapf(err, "making container directory on host failed") ++ return errors.Wrapf(err, "making container directory %q on host failed", ctrDirOnHost) + } + if err = label.Relabel(ctrDirOnHost, mountLabel, false); err != nil { +- return errors.Wrap(err, "error applying correct labels") ++ return errors.Wrapf(err, "error applying correct labels on %q", ctrDirOnHost) + } + } + fipsFile := filepath.Join(ctrDirOnHost, "system-fips") +@@ -323,7 +332,7 @@ func addFIPSModeSecret(mounts *[]rspec.M + + if !mountExists(*mounts, secretsDir) { + m := rspec.Mount{ +- Source: filepath.Join(mountPrefix, secretsDir), ++ Source: ctrDirOnHost, + Destination: secretsDir, + Type: "bind", + Options: []string{"bind", "rprivate"}, +@@ -331,6 +340,25 @@ func addFIPSModeSecret(mounts *[]rspec.M + *mounts = append(*mounts, m) + } + ++ srcBackendDir := "/usr/share/crypto-policies/back-ends/FIPS" ++ destDir := "/etc/crypto-policies/back-ends" ++ srcOnHost := filepath.Join(mountPoint, srcBackendDir) ++ if _, err := os.Stat(srcOnHost); err != nil { ++ if os.IsNotExist(err) { ++ return nil ++ } ++ return errors.Wrapf(err, "failed to stat FIPS Backend directory %q", ctrDirOnHost) ++ } ++ ++ if !mountExists(*mounts, destDir) { ++ m := rspec.Mount{ ++ Source: srcOnHost, ++ Destination: destDir, ++ Type: "bind", ++ Options: []string{"bind", "rprivate"}, ++ } ++ *mounts = append(*mounts, m) ++ } + return nil + } + +diff -up ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/run_linux.go.1784950 ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/run_linux.go +--- libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/run_linux.go.1784950 2020-02-19 14:58:22.021213507 +0100 ++++ libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/run_linux.go 2020-02-19 14:58:22.024213549 +0100 +@@ -460,7 +460,7 @@ func (b *Builder) setupMounts(mountPoint + } + + // Get the list of secrets mounts. +- secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless(), false) ++ secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false) + + // Add temporary copies of the contents of volume locations at the + // volume locations, unless we already have something there. diff --git a/SOURCES/podman-1805212.patch b/SOURCES/podman-1805212.patch new file mode 100644 index 0000000..4617599 --- /dev/null +++ b/SOURCES/podman-1805212.patch @@ -0,0 +1,51 @@ +From 6c97e0d5c140d587e5477d478159e91b8adcfd15 Mon Sep 17 00:00:00 2001 +From: Brent Baude +Date: Thu, 27 Feb 2020 14:39:31 -0600 +Subject: [PATCH 2/2] network create should use firewall plugin + +when creating a network, podman should add the firewall plugin to the config but not specify a backend. this will allow cni to determine whether it should use an iptables|firewalld backend. + +Signed-off-by: Brent Baude +--- + pkg/adapter/network.go | 1 + + pkg/network/netconflist.go | 1 - + 2 files changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pkg/network/netconflist.go b/pkg/network/netconflist.go +index a8217097ac..34ff000249 100644 +--- a/pkg/network/netconflist.go ++++ b/pkg/network/netconflist.go +@@ -110,7 +110,6 @@ func NewPortMapPlugin() PortMapConfig { + func NewFirewallPlugin() FirewallConfig { + return FirewallConfig{ + PluginType: "firewall", +- Backend: "iptables", + } + } + + +From cfd40608907b653a8b05f2e4f4243f8aa677b6e3 Mon Sep 17 00:00:00 2001 +From: Brent Baude +Date: Thu, 27 Feb 2020 14:35:48 -0600 +Subject: [PATCH 1/2] add firewall plugin (no backend) to default cni config + +in order for the fall back mechanisms to work in containernetworking-plugins, the firewall plugin must still be called via the cni configuration file. however, no backend w + +Signed-off-by: Brent Baude +--- + cni/87-podman-bridge.conflist | 3 +++ + 1 file changed, 3 insertions(+) + +diff -up a/cni/87-podman-bridge.conflist b/cni/87-podman-bridge.conflist +--- a/cni/87-podman-bridge.conflist ++++ b/cni/87-podman-bridge.conflist +@@ -31,8 +31,7 @@ + } + }, + { +- "type": "firewall", +- "backend": "iptables" ++ "type": "firewall" + } + ] + } diff --git a/SOURCES/podman-1807310.patch b/SOURCES/podman-1807310.patch new file mode 100644 index 0000000..d182dab --- /dev/null +++ b/SOURCES/podman-1807310.patch @@ -0,0 +1,133 @@ +From b41c864d569357a102ee2335a4947e59e5e2b08a Mon Sep 17 00:00:00 2001 +From: Matthew Heon +Date: Thu, 27 Feb 2020 16:08:29 -0500 +Subject: [PATCH] Ensure that exec sessions inherit supplemental groups + +This corrects a regression from Podman 1.4.x where container exec +sessions inherited supplemental groups from the container, iff +the exec session did not specify a user. + +Signed-off-by: Matthew Heon +--- + libpod/container_api.go | 5 ----- + libpod/container_internal_linux.go | 5 ++++- + libpod/oci_conmon_linux.go | 25 +++++++++++++++++++++---- + test/e2e/exec_test.go | 24 ++++++++++++++++++++++++ + 4 files changed, 49 insertions(+), 10 deletions(-) + +diff --git a/libpod/container_api.go b/libpod/container_api.go +index d612341bce..dabbe27dcd 100644 +--- a/libpod/container_api.go ++++ b/libpod/container_api.go +@@ -270,11 +270,6 @@ func (c *Container) Exec(tty, privileged bool, env map[string]string, cmd []stri + } + }() + +- // if the user is empty, we should inherit the user that the container is currently running with +- if user == "" { +- user = c.config.User +- } +- + opts := new(ExecOptions) + opts.Cmd = cmd + opts.CapAdd = capList +diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go +index 7390262647..63968918cb 100644 +--- a/libpod/container_internal_linux.go ++++ b/libpod/container_internal_linux.go +@@ -330,7 +330,10 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { + + // Add addition groups if c.config.GroupAdd is not empty + if len(c.config.Groups) > 0 { +- gids, _ := lookup.GetContainerGroups(c.config.Groups, c.state.Mountpoint, nil) ++ gids, err := lookup.GetContainerGroups(c.config.Groups, c.state.Mountpoint, overrides) ++ if err != nil { ++ return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s", c.ID()) ++ } + for _, gid := range gids { + g.AddProcessAdditionalGid(gid) + } +diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go +index 07d38693f0..800f896036 100644 +--- a/libpod/oci_conmon_linux.go ++++ b/libpod/oci_conmon_linux.go +@@ -1252,18 +1252,35 @@ func prepareProcessExec(c *Container, cmd, env []string, tty bool, cwd, user, se + + } + ++ var addGroups []string ++ var sgids []uint32 ++ ++ // if the user is empty, we should inherit the user that the container is currently running with ++ if user == "" { ++ user = c.config.User ++ addGroups = c.config.Groups ++ } ++ + overrides := c.getUserOverrides() + execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, user, overrides) + if err != nil { + return nil, err + } + ++ if len(addGroups) > 0 { ++ sgids, err = lookup.GetContainerGroups(addGroups, c.state.Mountpoint, overrides) ++ if err != nil { ++ return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s exec session %s", c.ID(), sessionID) ++ } ++ } ++ + // If user was set, look it up in the container to get a UID to use on + // the host +- if user != "" { +- sgids := make([]uint32, 0, len(execUser.Sgids)) +- for _, sgid := range execUser.Sgids { +- sgids = append(sgids, uint32(sgid)) ++ if user != "" || len(sgids) > 0 { ++ if user != "" { ++ for _, sgid := range execUser.Sgids { ++ sgids = append(sgids, uint32(sgid)) ++ } + } + processUser := spec.User{ + UID: uint32(execUser.Uid), +diff --git a/test/e2e/exec_test.go b/test/e2e/exec_test.go +index ed4eb3335f..ab806f6831 100644 +--- a/test/e2e/exec_test.go ++++ b/test/e2e/exec_test.go +@@ -1,6 +1,7 @@ + package integration + + import ( ++ "fmt" + "os" + "strings" + +@@ -244,4 +245,27 @@ var _ = Describe("Podman exec", func() { + Expect(session.ExitCode()).To(Equal(0)) + }) + ++ It("podman exec preserves --group-add groups", func() { ++ groupName := "group1" ++ gid := "4444" ++ ctrName1 := "ctr1" ++ ctr1 := podmanTest.Podman([]string{"run", "-ti", "--name", ctrName1, fedoraMinimal, "groupadd", "-g", gid, groupName}) ++ ctr1.WaitWithDefaultTimeout() ++ Expect(ctr1.ExitCode()).To(Equal(0)) ++ ++ imgName := "img1" ++ commit := podmanTest.Podman([]string{"commit", ctrName1, imgName}) ++ commit.WaitWithDefaultTimeout() ++ Expect(commit.ExitCode()).To(Equal(0)) ++ ++ ctrName2 := "ctr2" ++ ctr2 := podmanTest.Podman([]string{"run", "-d", "--name", ctrName2, "--group-add", groupName, imgName, "sleep", "300"}) ++ ctr2.WaitWithDefaultTimeout() ++ Expect(ctr2.ExitCode()).To(Equal(0)) ++ ++ exec := podmanTest.Podman([]string{"exec", "-ti", ctrName2, "id"}) ++ exec.WaitWithDefaultTimeout() ++ Expect(exec.ExitCode()).To(Equal(0)) ++ Expect(strings.Contains(exec.OutputToString(), fmt.Sprintf("%s(%s)", gid, groupName))).To(BeTrue()) ++ }) + }) diff --git a/SOURCES/podman-1868603.patch b/SOURCES/podman-1868603.patch new file mode 100644 index 0000000..8787372 --- /dev/null +++ b/SOURCES/podman-1868603.patch @@ -0,0 +1,340 @@ +From 81308749f70d6c40c6b0fea39ffe767bfe50da38 Mon Sep 17 00:00:00 2001 +From: bpopovschi +Date: Wed, 6 Nov 2019 18:20:42 +0200 +Subject: [PATCH] Added possibility to overwrite default tmp dir for big files + +Signed-off-by: bpopovschi +--- + docker/archive/src.go | 5 +++-- + docker/archive/transport.go | 4 ++-- + docker/daemon/daemon_src.go | 2 +- + docker/tarfile/dest.go | 5 ++++- + docker/tarfile/src.go | 20 ++++++++++++++++++-- + internal/tmpdir/tmpdir.go | 7 ++++++- + oci/archive/oci_dest.go | 2 +- + oci/archive/oci_src.go | 4 ++-- + oci/archive/oci_transport.go | 9 +++++---- + storage/storage_image.go | 4 ++-- + storage/storage_reference.go | 2 +- + types/types.go | 3 ++- + 12 files changed, 47 insertions(+), 20 deletions(-) + +diff --git a/docker/archive/src.go b/docker/archive/src.go +index a90707437..6a628508d 100644 +--- a/vendor/github.com/containers/image/v5/docker/archive/src.go ++++ b/vendor/github.com/containers/image/v5/docker/archive/src.go +@@ -2,6 +2,7 @@ package archive + + import ( + "context" ++ + "github.com/containers/image/v5/docker/tarfile" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +@@ -14,11 +15,11 @@ type archiveImageSource struct { + + // newImageSource returns a types.ImageSource for the specified image reference. + // The caller must call .Close() on the returned ImageSource. +-func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSource, error) { ++func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) { + if ref.destinationRef != nil { + logrus.Warnf("docker-archive: references are not supported for sources (ignoring)") + } +- src, err := tarfile.NewSourceFromFile(ref.path) ++ src, err := tarfile.NewSourceFromFileWithContext(sys, ref.path) + if err != nil { + return nil, err + } +diff --git a/docker/archive/transport.go b/docker/archive/transport.go +index 44213bb8d..46c01891f 100644 +--- a/vendor/github.com/containers/image/v5/docker/archive/transport.go ++++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go +@@ -134,7 +134,7 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string { + // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. + // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. + func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { +- src, err := newImageSource(ctx, ref) ++ src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } +@@ -144,7 +144,7 @@ func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemConte + // NewImageSource returns a types.ImageSource for this reference. + // The caller must call .Close() on the returned ImageSource. + func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { +- return newImageSource(ctx, ref) ++ return newImageSource(ctx, sys, ref) + } + + // NewImageDestination returns a types.ImageDestination for this reference. +diff --git a/docker/daemon/daemon_src.go b/docker/daemon/daemon_src.go +index 46fbcc4e0..2bca16866 100644 +--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go ++++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go +@@ -40,7 +40,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef + } + defer inputStream.Close() + +- src, err := tarfile.NewSourceFromStream(inputStream) ++ src, err := tarfile.NewSourceFromStreamWithSystemContext(sys, inputStream) + if err != nil { + return nil, err + } +diff --git a/docker/tarfile/dest.go b/docker/tarfile/dest.go +index b02c60bb3..7b2f0e418 100644 +--- a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go ++++ b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go +@@ -29,6 +29,7 @@ type Destination struct { + // Other state. + blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs + config []byte ++ sysCtx *types.SystemContext + } + + // NewDestination returns a tarfile.Destination for the specified io.Writer. +@@ -94,12 +95,14 @@ func (d *Destination) HasThreadSafePutBlob() bool { + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. ++// Deprecated: Please use PutBlobWithSystemContext which will allows you to configure temp directory ++// for big files through SystemContext.BigFilesTemporaryDir + func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // Ouch, we need to stream the blob into a temporary file just to determine the size. + // When the layer is decompressed, we also have to generate the digest on uncompressed datas. + if inputInfo.Size == -1 || inputInfo.Digest.String() == "" { + logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") +- streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob") ++ streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob") + if err != nil { + return types.BlobInfo{}, err + } +diff --git a/docker/tarfile/src.go b/docker/tarfile/src.go +index ad0a3d2cb..3ea5ce053 100644 +--- a/vendor/github.com/containers/image/v5/docker/tarfile/src.go ++++ b/vendor/github.com/containers/image/v5/docker/tarfile/src.go +@@ -46,7 +46,14 @@ type layerInfo struct { + // To do for both the NewSourceFromFile and NewSourceFromStream functions + + // NewSourceFromFile returns a tarfile.Source for the specified path. ++// Deprecated: Please use NewSourceFromFileWithContext which will allows you to configure temp directory ++// for big files through SystemContext.BigFilesTemporaryDir + func NewSourceFromFile(path string) (*Source, error) { ++ return NewSourceFromFileWithContext(nil, path) ++} ++ ++// NewSourceFromFileWithContext returns a tarfile.Source for the specified path. ++func NewSourceFromFileWithContext(sys *types.SystemContext, path string) (*Source, error) { + file, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "error opening file %q", path) +@@ -65,16 +72,25 @@ func NewSourceFromFile(path string) (*Source, error) { + tarPath: path, + }, nil + } +- return NewSourceFromStream(stream) ++ return NewSourceFromStreamWithSystemContext(sys, stream) + } + + // NewSourceFromStream returns a tarfile.Source for the specified inputStream, + // which can be either compressed or uncompressed. The caller can close the + // inputStream immediately after NewSourceFromFile returns. ++// Deprecated: Please use NewSourceFromStreamWithSystemContext which will allows you to configure ++// temp directory for big files through SystemContext.BigFilesTemporaryDir + func NewSourceFromStream(inputStream io.Reader) (*Source, error) { ++ return NewSourceFromStreamWithSystemContext(nil, inputStream) ++} ++ ++// NewSourceFromStreamWithSystemContext returns a tarfile.Source for the specified inputStream, ++// which can be either compressed or uncompressed. The caller can close the ++// inputStream immediately after NewSourceFromFile returns. ++func NewSourceFromStreamWithSystemContext(sys *types.SystemContext, inputStream io.Reader) (*Source, error) { + // FIXME: use SystemContext here. + // Save inputStream to a temporary file +- tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tar") ++ tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") + if err != nil { + return nil, errors.Wrap(err, "error creating temporary file") + } +diff --git a/internal/tmpdir/tmpdir.go b/internal/tmpdir/tmpdir.go +index 8c776929c..a3081f4f2 100644 +--- a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go ++++ b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go +@@ -3,6 +3,8 @@ package tmpdir + import ( + "os" + "runtime" ++ ++ "github.com/containers/image/v5/types" + ) + + // unixTempDirForBigFiles is the directory path to store big files on non Windows systems. +@@ -18,7 +20,10 @@ const builtinUnixTempDirForBigFiles = "/var/tmp" + // TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. + // On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp + // which on systemd based systems could be the unsuitable tmpfs filesystem. +-func TemporaryDirectoryForBigFiles() string { ++func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string { ++ if sys != nil && sys.BigFilesTemporaryDir != "" { ++ return sys.BigFilesTemporaryDir ++ } + var temporaryDirectoryForBigFiles string + if runtime.GOOS == "windows" { + temporaryDirectoryForBigFiles = os.TempDir() +diff --git a/oci/archive/oci_dest.go b/oci/archive/oci_dest.go +index 164d5522d..6918f7fb0 100644 +--- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go ++++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +@@ -19,7 +19,7 @@ type ociArchiveImageDestination struct { + + // newImageDestination returns an ImageDestination for writing to an existing directory. + func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) { +- tempDirRef, err := createOCIRef(ref.image) ++ tempDirRef, err := createOCIRef(sys, ref.image) + if err != nil { + return nil, errors.Wrapf(err, "error creating oci reference") + } +diff --git a/oci/archive/oci_src.go b/oci/archive/oci_src.go +index 33a41d44b..363c12b0b 100644 +--- a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go ++++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go +@@ -20,7 +20,7 @@ type ociArchiveImageSource struct { + // newImageSource returns an ImageSource for reading from an existing directory. + // newImageSource untars the file and saves it in a temp directory + func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) { +- tempDirRef, err := createUntarTempDir(ref) ++ tempDirRef, err := createUntarTempDir(sys, ref) + if err != nil { + return nil, errors.Wrap(err, "error creating temp directory") + } +@@ -43,7 +43,7 @@ func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, + if !ok { + return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference") + } +- tempDirRef, err := createUntarTempDir(ociArchRef) ++ tempDirRef, err := createUntarTempDir(nil, ociArchRef) + if err != nil { + return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory") + } +diff --git a/oci/archive/oci_transport.go b/oci/archive/oci_transport.go +index 2d72a6fee..b7780abde 100644 +--- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go ++++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go +@@ -159,8 +159,9 @@ func (t *tempDirOCIRef) deleteTempDir() error { + } + + // createOCIRef creates the oci reference of the image +-func createOCIRef(image string) (tempDirOCIRef, error) { +- dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci") ++// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files ++func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) { ++ dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") + if err != nil { + return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory") + } +@@ -174,8 +175,8 @@ func createOCIRef(image string) (tempDirOCIRef, error) { + } + + // creates the temporary directory and copies the tarred content to it +-func createUntarTempDir(ref ociArchiveReference) (tempDirOCIRef, error) { +- tempDirRef, err := createOCIRef(ref.image) ++func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) { ++ tempDirRef, err := createOCIRef(sys, ref.image) + if err != nil { + return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference") + } +diff --git a/storage/storage_image.go b/storage/storage_image.go +index 2b89f329f..409619b21 100644 +--- a/vendor/github.com/containers/image/v5/storage/storage_image.go ++++ b/vendor/github.com/containers/image/v5/storage/storage_image.go +@@ -341,8 +341,8 @@ func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest * + + // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until + // it's time to Commit() the image +-func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { +- directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "storage") ++func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { ++ directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") + if err != nil { + return nil, errors.Wrapf(err, "error creating a temporary directory") + } +diff --git a/storage/storage_reference.go b/storage/storage_reference.go +index 4e137ad1b..9eb0ae738 100644 +--- a/vendor/github.com/containers/image/v5/storage/storage_reference.go ++++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go +@@ -295,5 +295,5 @@ func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemC + } + + func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { +- return newImageDestination(s) ++ return newImageDestination(sys, s) + } +diff --git a/types/types.go b/types/types.go +index aaeb97da6..13a8ef78d 100644 +--- a/vendor/github.com/containers/image/v5/types/types.go ++++ b/vendor/github.com/containers/image/v5/types/types.go +@@ -490,9 +490,10 @@ type SystemContext struct { + OSChoice string + // If not "", overrides the system's default directory containing a blob info cache. + BlobInfoCacheDir string +- + // Additional tags when creating or copying a docker-archive. + DockerArchiveAdditionalTags []reference.NamedTagged ++ // If not "", overrides the temporary directory to use for storing big files ++ BigFilesTemporaryDir string + + // === OCI.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), +From b65de0f71c33ae1d3558132261f159e321c8edf1 Mon Sep 17 00:00:00 2001 +From: Matthew Heon +Date: Mon, 17 Aug 2020 09:24:41 -0400 +Subject: [PATCH] Add support for setting the large files tmpdir to v1.6 + +This is based on 2c328f94b61116bfa7d1d46525d854678f94c9f3 by Les +Aker, and e53fc16b9f470a137abf182b0561a16447bfd5b7 by Dan Walsh +(the latter from containers/buildah). They have been merge here +to allow this to compile on the older v1.6 branch of Podman. + +Unfortunately this does not fix Buildah, as the Buildah patches +are too new to apply on top of this old branch. + +Signed-off-by: Matthew Heon +--- + libpod/image/docker_registry_options.go | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +diff --git a/libpod/image/docker_registry_options.go b/libpod/image/docker_registry_options.go +index 62a4af4653..b1eb31e2db 100644 +--- a/libpod/image/docker_registry_options.go ++++ b/libpod/image/docker_registry_options.go +@@ -2,10 +2,10 @@ package image + + import ( + "fmt" ++ "os" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" +- + podmanVersion "github.com/containers/libpod/version" + ) + +@@ -41,6 +41,7 @@ func (o DockerRegistryOptions) GetSystemContext(parent *types.SystemContext, add + DockerArchiveAdditionalTags: additionalDockerArchiveTags, + OSChoice: o.OSChoice, + ArchitectureChoice: o.ArchitectureChoice, ++ BigFilesTemporaryDir: GetTempDir(), + } + if parent != nil { + sc.SignaturePolicyPath = parent.SignaturePolicyPath +@@ -65,3 +66,11 @@ func GetSystemContext(signaturePolicyPath, authFilePath string, forceCompress bo + + return sc + } ++ ++// Retrieve the temporary directory for storing large files. ++func GetTempDir() string { ++ if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { ++ return tmpdir ++ } ++ return "/var/tmp" ++} diff --git a/SOURCES/podman-CVE-2020-10696.patch b/SOURCES/podman-CVE-2020-10696.patch new file mode 100644 index 0000000..b2569a4 --- /dev/null +++ b/SOURCES/podman-CVE-2020-10696.patch @@ -0,0 +1,58 @@ +From 840e7dad513b86f454573ad415701c0199f78d30 Mon Sep 17 00:00:00 2001 +From: TomSweeneyRedHat +Date: Tue, 24 Mar 2020 20:10:22 -0400 +Subject: [PATCH] Fix potential CVE in tarfile w/ symlink + +Stealing @nalind 's workaround to avoid refetching +content after a file read failure. Under the right +circumstances that could be a symlink to a file meant +to overwrite a good file with bad data. + +Testing: +``` +goodstuff + +[1] 14901 + +127.0.0.1 - - [24/Mar/2020 20:15:50] "GET / HTTP/1.1" 200 - +127.0.0.1 - - [24/Mar/2020 20:15:50] "GET / HTTP/1.1" 200 - +no FROM statement found + +goodstuff +``` + +Signed-off-by: TomSweeneyRedHat +--- + imagebuildah/util.go | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/imagebuildah/util.go b/imagebuildah/util.go +index 29ea60970..5f14c9883 100644 +--- a/vendor/github.com/containers/buildah/imagebuildah/util.go ++++ b/vendor/github.com/containers/buildah/imagebuildah/util.go +@@ -14,6 +14,7 @@ import ( + + "github.com/containers/buildah" + "github.com/containers/storage/pkg/chrootarchive" ++ "github.com/containers/storage/pkg/ioutils" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +@@ -57,7 +58,7 @@ func downloadToDirectory(url, dir string) error { + } + dockerfile := filepath.Join(dir, "Dockerfile") + // Assume this is a Dockerfile +- if err := ioutil.WriteFile(dockerfile, body, 0600); err != nil { ++ if err := ioutils.AtomicWriteFile(dockerfile, body, 0600); err != nil { + return errors.Wrapf(err, "Failed to write %q to %q", url, dockerfile) + } + } +@@ -75,7 +76,7 @@ func stdinToDirectory(dir string) error { + if err := chrootarchive.Untar(reader, dir, nil); err != nil { + dockerfile := filepath.Join(dir, "Dockerfile") + // Assume this is a Dockerfile +- if err := ioutil.WriteFile(dockerfile, b, 0600); err != nil { ++ if err := ioutils.AtomicWriteFile(dockerfile, b, 0600); err != nil { + return errors.Wrapf(err, "Failed to write bytes to %q", dockerfile) + } + } diff --git a/SOURCES/podman-CVE-2020-1726.patch b/SOURCES/podman-CVE-2020-1726.patch new file mode 100644 index 0000000..47056cd --- /dev/null +++ b/SOURCES/podman-CVE-2020-1726.patch @@ -0,0 +1,100 @@ +From c140ecdc9b416ab4efd4d21d14acd63b6adbdd42 Mon Sep 17 00:00:00 2001 +From: Matthew Heon +Date: Mon, 10 Feb 2020 13:37:38 -0500 +Subject: [PATCH] Do not copy up when volume is not empty + +When Docker performs a copy up, it first verifies that the volume +being copied into is empty; thus, for volumes that have been +modified elsewhere (e.g. manually copying into then), the copy up +will not be performed at all. Duplicate this behavior in Podman +by checking if the volume is empty before copying. + +Furthermore, move setting copyup to false further up. This will +prevent a potential race where copy up could happen more than +once if Podman was killed after some files had been copied but +before the DB was updated. + +This resolves CVE-2020-1726. + +Signed-off-by: Matthew Heon +--- + libpod/container_internal.go | 28 ++++++++++++++++++++++------ + test/e2e/run_volume_test.go | 24 ++++++++++++++++++++++++ + 2 files changed, 46 insertions(+), 6 deletions(-) + +diff -up ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/libpod/container_internal.go.1801152 ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/libpod/container_internal.go +--- libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/libpod/container_internal.go.1801152 2020-02-21 17:08:38.015363357 +0100 ++++ libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/libpod/container_internal.go 2020-02-21 17:08:38.019363413 +0100 +@@ -1358,18 +1358,34 @@ func (c *Container) mountNamedVolume(v * + } + if vol.state.NeedsCopyUp { + logrus.Debugf("Copying up contents from container %s to volume %s", c.ID(), vol.Name()) ++ ++ // Set NeedsCopyUp to false immediately, so we don't try this ++ // again when there are already files copied. ++ vol.state.NeedsCopyUp = false ++ if err := vol.save(); err != nil { ++ return nil, err ++ } ++ ++ // If the volume is not empty, we should not copy up. ++ volMount := vol.MountPoint() ++ contents, err := ioutil.ReadDir(volMount) ++ if err != nil { ++ return nil, errors.Wrapf(err, "error listing contents of volume %s mountpoint when copying up from container %s", vol.Name(), c.ID()) ++ } ++ if len(contents) > 0 { ++ // The volume is not empty. It was likely modified ++ // outside of Podman. For safety, let's not copy up into ++ // it. Fixes CVE-2020-1726. ++ return vol, nil ++ } ++ + srcDir, err := securejoin.SecureJoin(mountpoint, v.Dest) + if err != nil { + return nil, errors.Wrapf(err, "error calculating destination path to copy up container %s volume %s", c.ID(), vol.Name()) + } +- if err := c.copyWithTarFromImage(srcDir, vol.MountPoint()); err != nil && !os.IsNotExist(err) { ++ if err := c.copyWithTarFromImage(srcDir, volMount); err != nil && !os.IsNotExist(err) { + return nil, errors.Wrapf(err, "error copying content from container %s into volume %s", c.ID(), vol.Name()) + } +- +- vol.state.NeedsCopyUp = false +- if err := vol.save(); err != nil { +- return nil, err +- } + } + return vol, nil + } +diff -up ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/test/e2e/run_volume_test.go.1801152 ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/test/e2e/run_volume_test.go +--- libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/test/e2e/run_volume_test.go.1801152 2020-02-21 17:08:38.042363735 +0100 ++++ libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/test/e2e/run_volume_test.go 2020-02-21 17:08:38.046363791 +0100 +@@ -375,4 +375,28 @@ var _ = Describe("Podman run with volume + volMount.WaitWithDefaultTimeout() + Expect(volMount.ExitCode()).To(Not(Equal(0))) + }) ++ ++ It("Podman fix for CVE-2020-1726", func() { ++ volName := "testVol" ++ volCreate := podmanTest.Podman([]string{"volume", "create", volName}) ++ volCreate.WaitWithDefaultTimeout() ++ Expect(volCreate.ExitCode()).To(Equal(0)) ++ ++ volPath := podmanTest.Podman([]string{"volume", "inspect", "--format", "{{.Mountpoint}}", volName}) ++ volPath.WaitWithDefaultTimeout() ++ Expect(volPath.ExitCode()).To(Equal(0)) ++ path := volPath.OutputToString() ++ ++ fileName := "thisIsATestFile" ++ file, err := os.Create(filepath.Join(path, fileName)) ++ Expect(err).To(BeNil()) ++ defer file.Close() ++ ++ runLs := podmanTest.Podman([]string{"run", "-t", "-i", "--rm", "-v", fmt.Sprintf("%v:/etc/ssl", volName), ALPINE, "ls", "-1", "/etc/ssl"}) ++ runLs.WaitWithDefaultTimeout() ++ Expect(runLs.ExitCode()).To(Equal(0)) ++ outputArr := runLs.OutputToStringArray() ++ Expect(len(outputArr)).To(Equal(1)) ++ Expect(strings.Contains(outputArr[0], fileName)).To(BeTrue()) ++ }) + }) diff --git a/SPECS/podman.spec b/SPECS/podman.spec new file mode 100644 index 0000000..92bf652 --- /dev/null +++ b/SPECS/podman.spec @@ -0,0 +1,754 @@ +%global with_debug 1 +%global with_check 0 + +%bcond_without varlink +%global gogenerate go generate + +%if 0%{?with_debug} +%global _find_debuginfo_dwz_opts %{nil} +%global _dwz_low_mem_die_limit 0 +%else +%global debug_package %{nil} +%endif + +%if 0%{?rhel} > 7 && ! 0%{?fedora} +%define gobuild(o:) \ +go build -buildmode pie -compiler gc -tags="rpm_crashtraceback libtrust_openssl ${BUILDTAGS:-}" -ldflags "${LDFLAGS:-} -compressdwarf=false -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '%__global_ldflags'" -a -v -x %{?**}; +%endif + +%global provider github +%global provider_tld com +%global project containers +%global repo libpod +# https://github.com/containers/libpod +%global provider_prefix %{provider}.%{provider_tld}/%{project}/%{repo} +%global import_path %{provider_prefix} +%global git0 https://%{provider}.%{provider_tld}/%{project}/%{repo} +%global commit0 5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26 +%global shortcommit0 %(c=%{commit0}; echo ${c:0:7}) + +Name: podman +Version: 1.6.4 +Release: 14%{?dist} +Summary: Manage Pods, Containers and Container Images +License: ASL 2.0 +URL: https://%{name}.io/ +# Build fails with: No matching package to install: 'golang >= 1.12.12-4' on i686 +ExcludeArch: i686 +Source0: %{git0}/archive/%{commit0}/%{repo}-%{shortcommit0}.tar.gz +Patch0: https://patch-diff.githubusercontent.com/raw/containers/storage/pull/497.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2020-1702 +# https://github.com/containers/libpod/pull/5096.patch +Patch1: CVE-2020-1702-1801929.patch +# related bug: https://bugzilla.redhat.com/show_bug.cgi?id=1784950 +# backported: https://patch-diff.githubusercontent.com/raw/containers/buildah/pull/2031.patch +Patch2: podman-1784950.patch +# tracker bug: https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2020-1726 +# backported: https://patch-diff.githubusercontent.com/raw/containers/libpod/pull/5168.patch +Patch3: podman-CVE-2020-1726.patch +# related bug: https://bugzilla.redhat.com/show_bug.cgi?id=1805212 +# backported: https://github.com/containers/libpod/pull/5348.patch +Patch4: podman-1805212.patch +# related bug: https://bugzilla.redhat.com/show_bug.cgi?id=1807310 +# patch: https://github.com/containers/libpod/pull/5349.patch +Patch5: podman-1807310.patch +# tracker bug: https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2020-10696 +# backported: https://github.com/containers/buildah/commit/c61925b8936e93a5e900f91b653a846f7ea3a9ed.patch +Patch6: podman-CVE-2020-10696.patch +# related bug: https://bugzilla.redhat.com/show_bug.cgi?id=1868603 +# backported: https://github.com/containers/image/commit/81308749f70d6c40c6b0fea39ffe767bfe50da38.patch +# patch: https://github.com/mheon/libpod/commit/b65de0f71c33ae1d3558132261f159e321c8edf1.patch +Patch7: podman-1868603.patch +Provides: %{name}-manpages = %{version}-%{release} +Obsoletes: %{name}-manpages < %{version}-%{release} +BuildRequires: golang >= 1.12.12-4 +BuildRequires: glib2-devel +BuildRequires: glibc-devel +BuildRequires: glibc-static +BuildRequires: git +BuildRequires: go-md2man +BuildRequires: gpgme-devel +BuildRequires: libassuan-devel +BuildRequires: libgpg-error-devel +BuildRequires: libseccomp-devel +BuildRequires: libselinux-devel +BuildRequires: ostree-devel +BuildRequires: pkgconfig +BuildRequires: make +BuildRequires: systemd +BuildRequires: systemd-devel +Requires: containers-common >= 0.1.29-3 +Requires: containernetworking-plugins >= 0.8.1-1 +Requires: iptables +Requires: nftables +Requires: libseccomp >= 2.4.1 +Requires: conmon +Requires: container-selinux +Requires: slirp4netns >= 0.4.0-1 +Requires: runc >= 1.0.0-57 +Requires: fuse-overlayfs +Requires: libvarlink + +# vendored libraries +# awk '{print "Provides: bundled(golang("$1")) = "$2}' vendor.conf | sort +# [thanks to Carl George for containerd.spec] +Provides: bundled(golang(github.com/Azure/go-ansiterm)) = 19f72df4d05d31cbe1c56bfc8045c96babff6c7e +Provides: bundled(golang(github.com/blang/semver)) = v3.5.0 +Provides: bundled(golang(github.com/boltdb/bolt)) = master +Provides: bundled(golang(github.com/buger/goterm)) = 2f8dfbc7dbbff5dd1d391ed91482c24df243b2d3 +Provides: bundled(golang(github.com/BurntSushi/toml)) = v0.2.0 +Provides: bundled(golang(github.com/containerd/cgroups)) = 58556f5ad8448d99a6f7bea69ea4bdb7747cfeb0 +Provides: bundled(golang(github.com/containerd/continuity)) = master +#Provides: bundled(golang(github.com/containernetworking/cni)) = v0.7.0-alpha1 +Provides: bundled(golang(github.com/containernetworking/plugins)) = 1562a1e60ed101aacc5e08ed9dbeba8e9f3d4ec1 +Provides: bundled(golang(github.com/containers/image)) = 85d7559d44fd71f30e46e43d809bfbf88d11d916 +Provides: bundled(golang(github.com/containers/psgo)) = 5dde6da0bc8831b35243a847625bcf18183bd1ee +Provides: bundled(golang(github.com/containers/storage)) = 243c4cd616afdf06b4a975f18c4db083d26b1641 +Provides: bundled(golang(github.com/coreos/go-iptables)) = 25d087f3cffd9aedc0c2b7eff25f23cbf3c20fe1 +Provides: bundled(golang(github.com/coreos/go-systemd)) = v14 +Provides: bundled(golang(github.com/cri-o/ocicni)) = master +Provides: bundled(golang(github.com/cyphar/filepath-securejoin)) = v0.2.1 +Provides: bundled(golang(github.com/davecgh/go-spew)) = v1.1.0 +Provides: bundled(golang(github.com/docker/distribution)) = 7a8efe719e55bbfaff7bc5718cdf0ed51ca821df +Provides: bundled(golang(github.com/docker/docker)) = 86f080cff0914e9694068ed78d503701667c4c00 +Provides: bundled(golang(github.com/docker/docker-credential-helpers)) = d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 +Provides: bundled(golang(github.com/docker/go-connections)) = 3ede32e2033de7505e6500d6c868c2b9ed9f169d +Provides: bundled(golang(github.com/docker/go-units)) = v0.3.2 +Provides: bundled(golang(github.com/docker/libtrust)) = aabc10ec26b754e797f9028f4589c5b7bd90dc20 +Provides: bundled(golang(github.com/docker/spdystream)) = ed496381df8283605c435b86d4fdd6f4f20b8c6e +Provides: bundled(golang(github.com/fatih/camelcase)) = f6a740d52f961c60348ebb109adde9f4635d7540 +Provides: bundled(golang(github.com/fsnotify/fsnotify)) = 7d7316ed6e1ed2de075aab8dfc76de5d158d66e1 +Provides: bundled(golang(github.com/fsouza/go-dockerclient)) = master +Provides: bundled(golang(github.com/ghodss/yaml)) = 04f313413ffd65ce25f2541bfd2b2ceec5c0908c +Provides: bundled(golang(github.com/godbus/dbus)) = a389bdde4dd695d414e47b755e95e72b7826432c +Provides: bundled(golang(github.com/gogo/protobuf)) = c0656edd0d9eab7c66d1eb0c568f9039345796f7 +Provides: bundled(golang(github.com/golang/glog)) = 23def4e6c14b4da8ac2ed8007337bc5eb5007998 +Provides: bundled(golang(github.com/golang/groupcache)) = b710c8433bd175204919eb38776e944233235d03 +Provides: bundled(golang(github.com/golang/protobuf)) = 4bd1920723d7b7c925de087aa32e2187708897f7 +Provides: bundled(golang(github.com/googleapis/gnostic)) = 0c5108395e2debce0d731cf0287ddf7242066aba +Provides: bundled(golang(github.com/google/gofuzz)) = 44d81051d367757e1c7c6a5a86423ece9afcf63c +Provides: bundled(golang(github.com/gorilla/context)) = v1.1 +Provides: bundled(golang(github.com/gorilla/mux)) = v1.3.0 +Provides: bundled(golang(github.com/hashicorp/errwrap)) = 7554cd9344cec97297fa6649b055a8c98c2a1e55 +Provides: bundled(golang(github.com/hashicorp/golang-lru)) = 0a025b7e63adc15a622f29b0b2c4c3848243bbf6 +Provides: bundled(golang(github.com/hashicorp/go-multierror)) = 83588e72410abfbe4df460eeb6f30841ae47d4c4 +Provides: bundled(golang(github.com/imdario/mergo)) = 0.2.2 +Provides: bundled(golang(github.com/json-iterator/go)) = 1.0.0 +Provides: bundled(golang(github.com/kr/pty)) = v1.0.0 +Provides: bundled(golang(github.com/mailru/easyjson)) = 03f2033d19d5860aef995fe360ac7d395cd8ce65 +Provides: bundled(golang(github.com/mattn/go-runewidth)) = v0.0.1 +Provides: bundled(golang(github.com/Microsoft/go-winio)) = 78439966b38d69bf38227fbf57ac8a6fee70f69a +Provides: bundled(golang(github.com/Microsoft/hcsshim)) = 43f9725307998e09f2e3816c2c0c36dc98f0c982 +Provides: bundled(golang(github.com/mistifyio/go-zfs)) = v2.1.1 +Provides: bundled(golang(github.com/mrunalp/fileutils)) = master +Provides: bundled(golang(github.com/mtrmac/gpgme)) = b2432428689ca58c2b8e8dea9449d3295cf96fc9 +Provides: bundled(golang(github.com/Nvveen/Gotty)) = master +#Provides: bundled(golang(github.com/opencontainers/go-digest)) = v1.0.0-rc0 +Provides: bundled(golang(github.com/opencontainers/image-spec)) = v1.0.0 +Provides: bundled(golang(github.com/opencontainers/runc)) = b4e2ecb452d9ee4381137cc0a7e6715b96bed6de +Provides: bundled(golang(github.com/opencontainers/runtime-spec)) = d810dbc60d8c5aeeb3d054bd1132fab2121968ce +Provides: bundled(golang(github.com/opencontainers/runtime-tools)) = master +Provides: bundled(golang(github.com/opencontainers/selinux)) = b6fa367ed7f534f9ba25391cc2d467085dbb445a +Provides: bundled(golang(github.com/openshift/imagebuilder)) = master +Provides: bundled(golang(github.com/ostreedev/ostree-go)) = master +Provides: bundled(golang(github.com/pkg/errors)) = v0.8.0 +Provides: bundled(golang(github.com/pmezard/go-difflib)) = 792786c7400a136282c1664665ae0a8db921c6c2 +Provides: bundled(golang(github.com/pquerna/ffjson)) = d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac +Provides: bundled(golang(github.com/projectatomic/buildah)) = af5bbde0180026ae87b7fc81c2dc124aa73ec959 +Provides: bundled(golang(github.com/seccomp/containers-golang)) = master +Provides: bundled(golang(github.com/seccomp/libseccomp-golang)) = v0.9.0 +Provides: bundled(golang(github.com/sirupsen/logrus)) = v1.0.0 +Provides: bundled(golang(github.com/spf13/pflag)) = 9ff6c6923cfffbcd502984b8e0c80539a94968b7 +Provides: bundled(golang(github.com/stretchr/testify)) = 4d4bfba8f1d1027c4fdbe371823030df51419987 +Provides: bundled(golang(github.com/syndtr/gocapability)) = e7cb7fa329f456b3855136a2642b197bad7366ba +Provides: bundled(golang(github.com/tchap/go-patricia)) = v2.2.6 +Provides: bundled(golang(github.com/ulikunitz/xz)) = v0.5.4 +Provides: bundled(golang(github.com/ulule/deepcopier)) = master +Provides: bundled(golang(github.com/urfave/cli)) = 934abfb2f102315b5794e15ebc7949e4ca253920 +Provides: bundled(golang(github.com/varlink/go)) = master +Provides: bundled(golang(github.com/vbatts/tar-split)) = v0.10.2 +Provides: bundled(golang(github.com/vishvananda/netlink)) = master +Provides: bundled(golang(github.com/vishvananda/netns)) = master +Provides: bundled(golang(github.com/xeipuuv/gojsonpointer)) = master +Provides: bundled(golang(github.com/xeipuuv/gojsonreference)) = master +Provides: bundled(golang(github.com/xeipuuv/gojsonschema)) = master +Provides: bundled(golang(golang.org/x/crypto)) = 81e90905daefcd6fd217b62423c0908922eadb30 +Provides: bundled(golang(golang.org/x/net)) = c427ad74c6d7a814201695e9ffde0c5d400a7674 +Provides: bundled(golang(golang.org/x/sys)) = master +Provides: bundled(golang(golang.org/x/text)) = f72d8390a633d5dfb0cc84043294db9f6c935756 +Provides: bundled(golang(golang.org/x/time)) = f51c12702a4d776e4c1fa9b0fabab841babae631 +Provides: bundled(golang(google.golang.org/grpc)) = v1.0.4 +Provides: bundled(golang(gopkg.in/cheggaaa/pb.v1)) = v1.0.7 +Provides: bundled(golang(gopkg.in/inf.v0)) = v0.9.0 +Provides: bundled(golang(gopkg.in/mgo.v2)) = v2 +Provides: bundled(golang(gopkg.in/square/go-jose.v2)) = v2.1.3 +Provides: bundled(golang(gopkg.in/yaml.v2)) = v2 +Provides: bundled(golang(k8s.io/api)) = 5ce4aa0bf2f097f6021127b3d879eeda82026be8 +Provides: bundled(golang(k8s.io/apiextensions-apiserver)) = 1b31e26d82f1ec2e945c560790e98f34bb5f2e63 +Provides: bundled(golang(k8s.io/apimachinery)) = 616b23029fa3dc3e0ccefd47963f5651a6543d94 +Provides: bundled(golang(k8s.io/apiserver)) = 4d1163080139f1f9094baf8a3a6099e85e1867f6 +Provides: bundled(golang(k8s.io/client-go)) = 7cd1d3291b7d9b1e2d54d4b69eb65995eaf8888e +Provides: bundled(golang(k8s.io/kube-openapi)) = 275e2ce91dec4c05a4094a7b1daee5560b555ac9 +Provides: bundled(golang(k8s.io/utils)) = 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e + +%description +%{name} (Pod Manager) is a fully featured container engine that is a simple daemonless tool. %{name} provides a Docker-CLI comparable command line that eases the transition from other container engines and allows the management of pods, containers and images. Simply put: alias docker=%{name}. Most %{name} commands can be run as a regular user, without requiring additional privileges. + +%{name} uses Buildah(1) internally to create container images. Both tools share image (not container) storage, hence each can use or manipulate images (but not containers) created by the other. + +%{summary} +%{repo} Simple management tool for pods, containers and images + +%package docker +Summary: Emulate Docker CLI using %{name} +BuildArch: noarch +Requires: %{name} = %{version}-%{release} +Conflicts: docker +Conflicts: docker-latest +Conflicts: docker-ce +Conflicts: docker-ee +Conflicts: moby-engine + +%description docker +This package installs a script named docker that emulates the Docker CLI by +executes %{name} commands, it also creates links between all Docker CLI man +pages and %{name}. + +%package remote +Summary: (Experimental) Remote client for managing %{name} containers + +%description remote +Remote client for managing %{name} containers. + +This experimental remote client is under heavy development. Please do not +run %{name}-remote in production. + +%{name}-remote uses the varlink connection to connect to a %{name} client to +manage pods, containers and container images. %{name}-remote supports ssh +connections as well. + +%package tests +Summary: Tests for %{name} +Requires: %{name} = %{version}-%{release} +#Requires: bats (which RHEL8 doesn't have. If it ever does, un-comment this) +Requires: jq + +%description tests +%{summary} + +This package contains system tests for %{name} + +%prep +%autosetup -Sgit -n %{repo}-%{commit0} + +sed -i 's/install.bin: podman/install.bin:/' Makefile +sed -i 's/install.man: docs/install.man:/' Makefile +sed -i 's/install.remote: podman-remote/install.remote:/' Makefile +mv pkg/hooks/README.md pkg/hooks/README-hooks.md + +%build +export GO111MODULE=off +export GOPATH=$(pwd):$(pwd)/_build + +mkdir -p $(pwd)/_build +pushd $(pwd)/_build +mkdir -p src/%{provider}.%{provider_tld}/%{project} +ln -s ../../../../ src/%{import_path} +popd +ln -s vendor src + +rm -rf vendor/github.com/containers/storage/drivers/register/register_btrfs.go +%gogenerate ./cmd/%{name}/varlink/... + +# build %%{name} +export BUILDTAGS="varlink systemd selinux seccomp btrfs_noversion exclude_graphdriver_devicemapper $(hack/libdm_tag.sh)" +%gobuild -o bin/%{name} %{import_path}/cmd/%{name} + +# build %%{name}-remote +export BUILDTAGS="remoteclient $BUILDTAGS" +%gobuild -o bin/%{name}-remote %{import_path}/cmd/%{name} + +%{__make} docs +./docs/dckrman.sh ./docs/build/man/* + +%install +install -dp %{buildroot}%{_unitdir} +PODMAN_VERSION=%{version} %{__make} PREFIX=%{buildroot}%{_prefix} ETCDIR=%{buildroot}%{_sysconfdir} \ + install.bin \ + install.remote \ + install.man \ + install.cni \ + install.systemd \ + install.completions + +# install libpod.conf +install -dp %{buildroot}%{_datadir}/containers +install -m 644 %{repo}.conf %{buildroot}%{_datadir}/containers + +# install docker-docs +install -dp %{buildroot}%{_mandir}/man1 +install -m 644 docs/build/man/docker*.1 -t %{buildroot}%{_mandir}/man1 + +# install docker symlink +install -m 755 docker %{buildroot}%{_bindir} + +# install test stuff +ln -s ./ ./vendor/src # ./vendor/src -> ./vendor +install -d -p %{buildroot}/%{_datadir}/%{name}/test/system +cp -pav test/system %{buildroot}/%{_datadir}/%{name}/test/ + +# do not include docker and podman-remote man pages in main package +for file in `find %{buildroot}%{_mandir}/man[15] -type f | sed "s,%{buildroot},," | grep -v -e remote -e docker`; do + echo "$file*" >> podman.file-list +done + +%check +%if 0%{?with_check} +# Since we aren't packaging up the vendor directory we need to link +# back to it somehow. Hack it up so that we can add the vendor +# directory from BUILD dir as a gopath to be searched when executing +# tests from the BUILDROOT dir. +ln -s ./ ./vendor/src # ./vendor/src -> ./vendor + +export GOPATH=%{buildroot}/%{gopath}:$(pwd)/vendor:%{gopath} + +%if ! 0%{?gotest:1} +%global gotest go test +%endif + +%gotest %{import_path}/cmd/%{name} +%gotest %{import_path}/libkpod +%gotest %{import_path}/libpod +%gotest %{import_path}/pkg/registrar +%endif + +%triggerpostun -- %{name} < 1.1 +%{_bindir}/%{name} system renumber +exit 0 + +#define license tag if not already defined +%{!?_licensedir:%global license %doc} + +%files -f podman.file-list +%license LICENSE +%doc README.md CONTRIBUTING.md pkg/hooks/README-hooks.md install.md code-of-conduct.md transfer.md +%{_bindir}/%{name} +%{_datadir}/bash-completion/completions/* +# By "owning" the site-functions dir, we don't need to Require zsh +%{_datadir}/zsh/site-functions +%{_datadir}/zsh/site-functions/* +%config(noreplace) %{_sysconfdir}/cni/net.d/87-%{name}-bridge.conflist +%{_datadir}/containers/%{repo}.conf +%{_unitdir}/io.%{name}.service +%{_unitdir}/io.%{name}.socket +%{_userunitdir}/io.%{name}.service +%{_userunitdir}/io.%{name}.socket +%{_usr}/lib/tmpfiles.d/%{name}.conf + +%files docker +%{_bindir}/docker +%{_mandir}/man1/docker*.1* + +%files remote +%{_bindir}/%{name}-remote +%{_mandir}/man1/%{name}-remote*.1* + +%files tests +%license LICENSE +%{_datadir}/%{name}/test + +%changelog +* Mon Aug 17 2020 Jindrich Novy - 1.6.4-14 +- fix "[2.0-8.3.0] Podman does not use --tmpdir when pulling an image" +- Related: #1868603 + +* Thu Jul 16 2020 Jindrich Novy - 1.6.4-13 +- exclude i686 arch +- Related: #1821193 + +* Fri Jun 26 2020 Jindrich Novy - 1.6.4-12 +- bump release to preserve upgrade path +- Related: #1821193 + +* Wed Apr 01 2020 Jindrich Novy - 1.6.4-11 +- fix "CVE-2020-10696 buildah: crafted input tar file may lead to local file overwriting during image build process" +- Resolves: #1819391 + +* Thu Mar 19 2020 Jindrich Novy - 1.6.4-10 +- use the full PR 5348 to fix "no route to host from inside container" +- Resolves: #1806899 + +* Fri Mar 06 2020 Jindrich Novy - 1.6.4-9 +- update fix for "podman (1.6.4) rhel 8.1 no route to host from inside container" +- Resolves: #1806899 + +* Fri Mar 06 2020 Jindrich Novy - 1.6.4-8 +- fix "[FJ8.2 Bug]: [REG]The "--group-add" option of "podman create" doesn't function." +- Resolves: #1808705 + +* Thu Feb 27 2020 Jindrich Novy - 1.6.4-7 +- fix "podman (1.6.4) rhel 8.1 no route to host from inside container" +- Resolves: #1806899 + +* Fri Feb 21 2020 Jindrich Novy - 1.6.4-6 +- fix "CVE-2020-1726 podman: incorrectly allows existing files in volumes to be overwritten by a container when it is created" +- Resolves: #1801572 + +* Wed Feb 19 2020 Jindrich Novy - 1.6.4-5 +- fix "Podman support for FIPS Mode requires a bind mount inside the container" +- Resolves: #1804193 + +* Mon Feb 17 2020 Jindrich Novy - 1.6.4-4 +- fix CVE-2020-1702 +- Resolves: #1801929 + +* Wed Jan 08 2020 Jindrich Novy - 1.6.4-3 +- merge podman-manpages with podman package and put man pages for + podman-remote to its dedicated subpackage +Resolves: #1788539 + +* Fri Jan 03 2020 Jindrich Novy - 1.6.4-2 +- apply fix for #1757845 +- Related: RHELPLAN-25139 + +* Wed Dec 11 2019 Jindrich Novy - 1.6.4-1 +- update to 1.6.4 +- Related: RHELPLAN-25139 + +* Mon Dec 09 2019 Jindrich Novy - 1.6.3-6 +- remove BR: device-mapper-devel, minor spec file changes +- Related: RHELPLAN-25139 + +* Sat Dec 07 2019 Jindrich Novy - 1.6.3-5 +- Ensure volumes reacquire locks on state refresh (thanks Matt Heon) +- Related: RHELPLAN-25139 + +* Fri Nov 29 2019 Jindrich Novy - 1.6.3-4 +- use the file events logger backend if systemd isn't available + (thanks to Giuseppe Scrivano) +- Related: RHELPLAN-25139 + +* Thu Nov 21 2019 Jindrich Novy - 1.6.3-3 +- require slirp4netns >= 0.4.0-1 +- Resolves: #1766774 + +* Tue Nov 19 2019 Jindrich Novy - 1.6.3-2 +- apply fix to not to fail gating tests: + don't parse the config for cgroup-manager default +- don't hang while on podman run --rm - bug 1767663 +- Related: RHELPLAN-25139 + +* Mon Nov 18 2019 Jindrich Novy - 1.6.3-1 +- update to podman 1.6.3 +- addresses CVE-2019-18466 +- Related: RHELPLAN-25139 + +* Fri Nov 08 2019 Jindrich Novy - 1.6.2-6 +- fix %%gobuild macro to not to ignore BUILDTAGS +- Related: RHELPLAN-25139 + +* Tue Nov 05 2019 Jindrich Novy - 1.6.2-5 +- use btrfs_noversion to really disable BTRFS support +- amend/reuse BUILDTAGS +- still keep device-mapper-devel BR otherwise build fails + despite dm support being disabled (build scripting invokes + pkg-config for devmapper which is shipped by the dm-devel + package) +- Related: RHELPLAN-25139 + +* Mon Nov 04 2019 Jindrich Novy - 1.6.2-4 +- disable BTRFS support +- Related: RHELPLAN-25139 + +* Mon Nov 04 2019 Jindrich Novy - 1.6.2-3 +- split podman and conmon packages +- drop BR: device-mapper-devel and update BRs in general +- Related: RHELPLAN-25139 + +* Fri Nov 01 2019 Jindrich Novy - 1.6.2-2 +- drop oci-systemd-hook requirement +- drop upstreamed CVE-2019-10214 patch +- Related: RHELPLAN-25139 + +* Tue Oct 29 2019 Jindrich Novy - 1.6.2-1 +- update to podman 1.6.2 + +* Wed Oct 16 2019 Jindrich Novy - 1.4.2-6 +- fix build with --nocheck (#1721394) +- escape commented out macros + +* Thu Sep 12 2019 Jindrich Novy - 1.4.2-5 +- Fix CVE-2019-10214 (#1734649). + +* Tue Sep 03 2019 Jindrich Novy - 1.4.2-4 +- update to latest conmon (Resolves: #1743685) + +* Wed Aug 28 2019 Jindrich Novy - 1.4.2-3 +- update to v1.4.2-stable1 +- Resolves: #1741157 + +* Wed Jun 19 2019 Lokesh Mandvekar - 1.4.2-2 +- Resolves: #1669197, #1705763, #1737077, #1671622, #1723879, #1730281, +- Resolves: #1731117 +- built libpod v1.4.2-stable1 + +* Wed Jun 19 2019 Lokesh Mandvekar - 1.4.2-1 +- Resolves: #1721638 +- bump to v1.4.2 + +* Mon Jun 17 2019 Lokesh Mandvekar - 1.4.1-4 +- Resolves: #1720654 - update dep on libvarlink +- Resolves: #1721247 - enable fips mode + +* Mon Jun 17 2019 Lokesh Mandvekar - 1.4.1-3 +- Resolves: #1720654 - podman requires podman-manpages +- update dep on cni plugins >= 0.8.1-1 + +* Sat Jun 15 2019 Lokesh Mandvekar - 1.4.1-2 +- Resolves: #1720654 - podman-manpages obsoletes podman < 1.4.1-2 + +* Sat Jun 15 2019 Lokesh Mandvekar - 1.4.1-1 +- Resolves: #1720654 - bump to v1.4.1 +- bump conmon to v0.3.0 + +* Fri Jun 14 2019 Lokesh Mandvekar - 1.4.0-1 +- Resolves: #1720654 - bump to v1.4.0 + +* Fri Jun 07 2019 Lokesh Mandvekar - 1.3.2-2 +- Resolves: #1683217 - tests subpackage requires slirp4netns + +* Fri May 31 2019 Lokesh Mandvekar - 1.3.2-1 +- Resolves: #1707220 - bump to v1.3.2 +- built conmon v0.2.0 + +* Wed Apr 3 2019 Eduardo Santiago - 1.2.0-1.git3bd528e5 +- package system tests, zsh completion. Update CI tests to use new -tests pkg + +* Thu Feb 28 2019 Lokesh Mandvekar - 1.1.0-1.git006206a +- bump to v1.1.0 + +* Fri Feb 22 2019 Lokesh Mandvekar - 1.0.1-1.git2c74edd +- bump to v1.0.1 + +* Mon Feb 11 2019 Frantisek Kluknavsky - 1.0.0-2.git921f98f +- rebase + +* Tue Jan 15 2019 Frantisek Kluknavsky - 1.0.0-1.git82e8011 +- rebase to v1, yay! +- rebase conmon to 9b1f0a08285a7f74b21cc9b6bfd98a48905a7ba2 +- Resolves:#1623282 +- python interface removed, moved to https://github.com/containers/python-podman/ + +* Tue Dec 18 2018 Frantisek Kluknavsky - 0.12.1.2-4.git9551f6b +- re-enable debuginfo + +* Mon Dec 17 2018 Frantisek Kluknavsky - 0.12.1.2-3.git9551f6b +- python libraries added +- resolves: #1657180 + +* Mon Dec 17 2018 Frantisek Kluknavsky - 0.12.1.2-2.git9551f6b +- rebase + +* Mon Dec 17 2018 Frantisek Kluknavsky - 0.11.1.1-3.git594495d +- go tools not in scl anymore + +* Mon Nov 19 2018 Frantisek Kluknavsky - 0.11.1.1-2.git594495d +- fedora-like buildrequires go toolset + +* Sat Nov 17 2018 Lokesh Mandvekar - 0.11.1.1-1.git594495d +- Resolves: #1636230 - build with FIPS enabled golang toolchain +- bump to v0.11.1.1 +- built commit 594495d + +* Fri Nov 16 2018 Frantisek Kluknavsky - 0.11.1-3.gita4adfe5 +- podman-docker provides docker +- Resolves: #1650355 + +* Thu Nov 15 2018 Lumír Balhar - 0.11.1-2.gita4adfe5 +- Require platform-python-setuptools instead of python3-setuptools +- Resolves: rhbz#1650144 + +* Tue Nov 13 2018 Lokesh Mandvekar - 0.11.1-1.gita4adfe5 +- bump to v0.11.1 +- built libpod commit a4adfe5 +- built conmon from cri-o commit 464dba6 + +* Fri Oct 19 2018 Lokesh Mandvekar - 0.10.1.3-5.gitdb08685 +- Resolves: #1625384 - keep BR: device-mapper-devel but don't build with it +- not having device-mapper-devel seems to have brew not recognize %%{_unitdir} + +* Thu Oct 18 2018 Lokesh Mandvekar - 0.10.1.3-4.gitdb08685 +- Resolves: #1625384 - correctly add buildtags to remove devmapper + +* Thu Oct 18 2018 Lokesh Mandvekar - 0.10.1.3-3.gitdb08685 +- Resolves: #1625384 - build without device-mapper-devel (no podman support) and lvm2 + +* Wed Oct 17 2018 Lokesh Mandvekar - 0.10.1.3-2.gitdb08685 +- Resolves: #1625384 - depend on lvm2 + +* Wed Oct 17 2018 Lokesh Mandvekar - 0.10.1.3-1.gitdb08685 +- Resolves: #1640298 - update vendored buildah to allow building when there are +running containers +- bump to v0.10.1.3 +- built podman commit db08685 + +* Wed Oct 17 2018 Lokesh Mandvekar - 0.10.1.2-1.git2b4f8d1 +- Resolves: #1625378 +- bump to v0.10.1.2 +- built podman commit 2b4f8d1 + +* Tue Oct 16 2018 Lokesh Mandvekar - 0.10.1.1-1.git4bea3e9 +- bump to v0.10.1.1 +- built podman commit 4bea3e9 + +* Thu Oct 11 2018 Lokesh Mandvekar - 0.10.1-1.gite4a1553 +- bump podman to v0.10.1 +- built podman commit e4a1553 +- built conmon from cri-o commit a30f93c + +* Tue Oct 09 2018 Frantisek Kluknavsky - 0.9.3.1-4.git1cd906d +- rebased cri-o to 1.11.6 + +* Wed Sep 26 2018 Frantisek Kluknavsky - 0.9.3.1-3.git1cd906d +- rebase + +* Tue Sep 18 2018 Frantisek Kluknavsky - 0.9.2-2.git37a2afe +- rebase to podman 0.9.2 +- rebase to cri-o 0.11.4 + +* Tue Sep 11 2018 Frantisek Kluknavsky - 0.9.1.1-2.git123de30 +- rebase + +* Mon Aug 27 2018 Lokesh Mandvekar - 0.8.4-1.git9f9b8cf +- bump to v0.8.4 +- built commit 9f9b8cf +- upstream username changed from projectatomic to containers +- use containernetworking-plugins >= 0.7.3-5 + +* Mon Aug 13 2018 Lokesh Mandvekar - 0.8.2.1-2.git7a526bb +- Resolves: #1615607 - rebuild with gobuild tag 'no_openssl' + +* Sun Aug 12 2018 Dan Walsh - 0.8.2.1-1.git7a526bb +- Upstream 0.8.2.1 release +- Add support for podman-docker +Resolves: rhbz#1615104 + +* Fri Aug 10 2018 Lokesh Mandvekar - 0.8.2-1.dev.git8b2d38e +- Resolves: #1614710 - podman search name includes registry +- bump to v0.8.2-dev +- built libpod commit 8b2d38e +- built conmon from cri-o commit acc0ee7 + +* Wed Aug 8 2018 Dan Walsh - 0.8.1-2.git6b4ab2a +- Add recommends for slirp4netns and container-selinux + +* Tue Aug 07 2018 Lokesh Mandvekar - 0.8.1-2.git6b4ab2a +- bump to v0.8.1 +- use %%go{build,generate} instead of go build and go generate +- update go deps to use scl-ized builds +- No need for Makefile patch for python installs + +* Sat Aug 4 2018 Dan Walsh - 0.8.1-1.git6b4ab2a +- Bump to v0.8.1 + +* Wed Aug 1 2018 Dan Walsh - 0.7.4-2.git079121 +- podman should not require atomic-registries + +* Tue Jul 24 2018 Lokesh Mandvekar - 0.7.4-1.dev.git9a18681 +- bump to v0.7.4-dev +- built commit 9a18681 + +* Sat Jul 21 2018 Dan Walsh - 0.7.3-2.git079121 +- Turn on ostree support +- Upstream 0.7.3 + +* Sat Jul 14 2018 Dan Walsh - 0.7.2-2.git4ca4c5f +- Upstream 0.7.2 release + +* Wed Jul 11 2018 Frantisek Kluknavsky - 0.7.1-3.git84cfdb2 +- rebuilt + +* Wed Jul 11 2018 Frantisek Kluknavsky - 0.7.1-2.git84cfdb2 +- rebase to 84cfdb2 + +* Sun Jul 08 2018 Dan Walsh - 0.7.1-1.git802d4f2 +- Upstream 0.7.1 release + +* Mon Jun 25 2018 Lokesh Mandvekar - 0.6.4-2.gitd5beb2f +- disable devel and unittest subpackages +- include conditionals for rhel-8.0 + +* Fri Jun 22 2018 Dan Walsh - 0.6.4-1.gitd5beb2f +- do not compress debuginfo with dwz to support delve debugger + +* Mon Jun 04 2018 Lokesh Mandvekar - 0.6.1-3.git3e0ff12 +- do not compress debuginfo with dwz to support delve debugger + +* Mon Jun 04 2018 Lokesh Mandvekar - 0.6.1-2.git3e0ff12 +- bash completion shouldn't have shebang + +* Mon Jun 04 2018 Lokesh Mandvekar - 0.6.1-1.git3e0ff12 +- Resolves: #1584429 - drop capabilities when running a container as non-root +- bump to v0.6.1 +- built podman commit 3e0ff12 +- built conmon from cri-o commit 1c0c3b0 +- drop containernetworking-plugins subpackage, it's now split out into a standalone +package + +* Fri Apr 27 2018 Lokesh Mandvekar - 0.4.1-4.gitb51d327 +- Resolves: #1572538 - build host-device and portmap plugins + +* Thu Apr 12 2018 Lokesh Mandvekar - 0.4.1-3.gitb51d327 +- correct dep on containernetworking-plugins + +* Thu Apr 12 2018 Lokesh Mandvekar - 0.4.1-2.gitb51d327 +- add containernetworking-plugins v0.7.0 as a subpackage (podman dep) +- release tag for the containernetworking-plugins is actually gotten from +podman release tag. + +* Wed Apr 11 2018 Lokesh Mandvekar - 0.4.1-1.gitb51d327 +- bump to v0.4.1 +- built commit b51d327 + +* Wed Mar 14 2018 Lokesh Mandvekar - 0.3.3-1.dev.gitbc358eb +- built podman commit bc358eb +- built conmon from cri-o commit 712f3b8 + +* Fri Mar 09 2018 baude - 0.3.2-1.gitf79a39a +- Release 0.3.2-1 + +* Sun Mar 04 2018 baude - 0.3.1-2.git98b95ff +- Correct RPM version + +* Fri Mar 02 2018 baude - 0.3.1-1-gitc187538 +- Release 0.3.1-1 + +* Sun Feb 25 2018 Peter Robinson 0.2.2-2.git525e3b1 +- Build on ARMv7 too (Fedora supports containers on that arch too) + +* Fri Feb 23 2018 baude - 0.2.2-1.git525e3b1 +- Release 0.2.2 + +* Fri Feb 16 2018 baude - 0.2.1-1.git3d0100b +- Release 0.2.1 + +* Wed Feb 14 2018 baude - 0.2-3.git3d0100b +- Add dep for atomic-registries + +* Tue Feb 13 2018 baude - 0.2-2.git3d0100b +- Add more 64bit arches +- Add containernetworking-cni dependancy +- Add iptables dependancy + +* Mon Feb 12 2018 baude - 0-2.1.git3d0100 +- Release 0.2 + +* Tue Feb 06 2018 Lokesh Mandvekar - 0-0.3.git367213a +- Resolves: #1541554 - first official build +- built commit 367213a + +* Fri Feb 02 2018 Lokesh Mandvekar - 0-0.2.git0387f69 +- built commit 0387f69 + +* Wed Jan 10 2018 Frantisek Kluknavsky - 0-0.1.gitc1b2278 +- First package for Fedora