From 4c4c1bf6efe34e29f1317c62455a1ecbedabbfa9 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Mar 31 2020 23:32:53 +0000 Subject: import podman-1.6.4-16.el7_8 --- diff --git a/.gitignore b/.gitignore index 8307c16..e444053 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1 @@ -SOURCES/conmon-8455ce1.tar.gz -SOURCES/libpod-b3f10c8.tar.gz +SOURCES/libpod-5cc9284.tar.gz diff --git a/.podman.metadata b/.podman.metadata index 368c696..d1ef31e 100644 --- a/.podman.metadata +++ b/.podman.metadata @@ -1,2 +1 @@ -64e40f340f3f708ccf7d6815b136fa4265838524 SOURCES/conmon-8455ce1.tar.gz -fc4faf79c56b697db59b15eca020768b997f6ebc SOURCES/libpod-b3f10c8.tar.gz +dd35f1a00ac7860feeaa77dd5a92bc7bb310b821 SOURCES/libpod-5cc9284.tar.gz diff --git a/SOURCES/5085.patch b/SOURCES/5085.patch new file mode 100644 index 0000000..2b04368 --- /dev/null +++ b/SOURCES/5085.patch @@ -0,0 +1,24 @@ +From 03faf97aa07f92712e4c5ffac3194ee27cec5aea Mon Sep 17 00:00:00 2001 +From: Daniel J Walsh +Date: Tue, 4 Feb 2020 13:12:01 -0500 +Subject: [PATCH] Close tarSource when finished using it + +Fixes https://bugzilla.redhat.com/show_bug.cgi?id=1797599 + +Signed-off-by: Daniel J Walsh +--- + libpod/image/pull.go | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/libpod/image/pull.go b/libpod/image/pull.go +index 76294ba06f..fd359d5931 100644 +--- a/libpod/image/pull.go ++++ b/libpod/image/pull.go +@@ -126,6 +126,7 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types. + if err != nil { + return nil, err + } ++ defer tarSource.Close() + manifest, err := tarSource.LoadTarManifest() + + if err != nil { diff --git a/SOURCES/bz1728242-1.patch b/SOURCES/bz1728242-1.patch deleted file mode 100644 index eaebb8d..0000000 --- a/SOURCES/bz1728242-1.patch +++ /dev/null @@ -1,211 +0,0 @@ -From bbe03e61a375416180432fbd9d00d23a7c2a4714 Mon Sep 17 00:00:00 2001 -From: Giuseppe Scrivano -Date: Mon, 8 Jul 2019 10:16:13 +0200 -Subject: [PATCH] cgroups: support creating cgroupsv2 paths - -drop the limitation of not supporting creating new cgroups v2 paths. -Every controller enabled /sys/fs/cgroup will be propagated down to the -created path. This won't work for rootless cgroupsv2, but it is not -an issue for now, as this code is used only by CRI-O. - -Signed-off-by: Giuseppe Scrivano ---- - pkg/cgroups/blkio.go | 2 +- - pkg/cgroups/cgroups.go | 52 +++++++++++++++++++++++++++++++++++++++++- - pkg/cgroups/cpu.go | 2 +- - pkg/cgroups/cpuset.go | 19 +++++++++------ - pkg/cgroups/memory.go | 2 +- - pkg/cgroups/pids.go | 3 --- - 6 files changed, 66 insertions(+), 14 deletions(-) - -diff --git a/pkg/cgroups/blkio.go b/pkg/cgroups/blkio.go -index ca9107d977..9c2a811d9e 100644 ---- a/pkg/cgroups/blkio.go -+++ b/pkg/cgroups/blkio.go -@@ -30,7 +30,7 @@ func (c *blkioHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error - // Create the cgroup - func (c *blkioHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { -- return false, fmt.Errorf("io create not implemented for cgroup v2") -+ return false, nil - } - return ctr.createCgroupDirectory(Blkio) - } -diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go -index d6c19212bc..1dad45d7f7 100644 ---- a/pkg/cgroups/cgroups.go -+++ b/pkg/cgroups/cgroups.go -@@ -149,6 +149,51 @@ func (c *CgroupControl) getCgroupv1Path(name string) string { - return filepath.Join(cgroupRoot, name, c.path) - } - -+// createCgroupv2Path creates the cgroupv2 path and enables all the available controllers -+func createCgroupv2Path(path string) (Err error) { -+ content, err := ioutil.ReadFile("/sys/fs/cgroup/cgroup.controllers") -+ if err != nil { -+ return errors.Wrapf(err, "read /sys/fs/cgroup/cgroup.controllers") -+ } -+ if !filepath.HasPrefix(path, "/sys/fs/cgroup") { -+ return fmt.Errorf("invalid cgroup path %s", path) -+ } -+ -+ res := "" -+ for i, c := range strings.Split(strings.TrimSpace(string(content)), " ") { -+ if i == 0 { -+ res = fmt.Sprintf("+%s", c) -+ } else { -+ res = res + fmt.Sprintf(" +%s", c) -+ } -+ } -+ resByte := []byte(res) -+ -+ current := "/sys/fs" -+ elements := strings.Split(path, "/") -+ for i, e := range elements[3:] { -+ current = filepath.Join(current, e) -+ if i > 0 { -+ if err := os.Mkdir(current, 0755); err != nil { -+ if !os.IsExist(err) { -+ return errors.Wrapf(err, "mkdir %s", path) -+ } -+ } else { -+ // If the directory was created, be sure it is not left around on errors. -+ defer func() { -+ if Err != nil { -+ os.Remove(current) -+ } -+ }() -+ } -+ } -+ if err := ioutil.WriteFile(filepath.Join(current, "cgroup.subtree_control"), resByte, 0755); err != nil { -+ return errors.Wrapf(err, "write %s", filepath.Join(current, "cgroup.subtree_control")) -+ } -+ } -+ return nil -+} -+ - // initialize initializes the specified hierarchy - func (c *CgroupControl) initialize() (err error) { - createdSoFar := map[string]controllerHandler{} -@@ -161,6 +206,11 @@ func (c *CgroupControl) initialize() (err error) { - } - } - }() -+ if c.cgroup2 { -+ if err := createCgroupv2Path(filepath.Join(cgroupRoot, c.path)); err != nil { -+ return errors.Wrapf(err, "error creating cgroup path %s", c.path) -+ } -+ } - for name, handler := range handlers { - created, err := handler.Create(c) - if err != nil { -@@ -341,7 +391,7 @@ func (c *CgroupControl) AddPid(pid int) error { - pidString := []byte(fmt.Sprintf("%d\n", pid)) - - if c.cgroup2 { -- p := filepath.Join(cgroupRoot, c.path, "tasks") -+ p := filepath.Join(cgroupRoot, c.path, "cgroup.procs") - if err := ioutil.WriteFile(p, pidString, 0644); err != nil { - return errors.Wrapf(err, "write %s", p) - } -diff --git a/pkg/cgroups/cpu.go b/pkg/cgroups/cpu.go -index 8640d490e6..c9325946b4 100644 ---- a/pkg/cgroups/cpu.go -+++ b/pkg/cgroups/cpu.go -@@ -61,7 +61,7 @@ func (c *cpuHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - // Create the cgroup - func (c *cpuHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { -- return false, fmt.Errorf("cpu create not implemented for cgroup v2") -+ return false, nil - } - return ctr.createCgroupDirectory(CPU) - } -diff --git a/pkg/cgroups/cpuset.go b/pkg/cgroups/cpuset.go -index 9aef493c9f..25d2f7f769 100644 ---- a/pkg/cgroups/cpuset.go -+++ b/pkg/cgroups/cpuset.go -@@ -14,19 +14,23 @@ import ( - type cpusetHandler struct { - } - --func cpusetCopyFileFromParent(dir, file string) ([]byte, error) { -+func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { - if dir == cgroupRoot { - return nil, fmt.Errorf("could not find parent to initialize cpuset %s", file) - } - path := filepath.Join(dir, file) -- data, err := ioutil.ReadFile(path) -+ parentPath := path -+ if cgroupv2 { -+ parentPath = fmt.Sprintf("%s.effective", parentPath) -+ } -+ data, err := ioutil.ReadFile(parentPath) - if err != nil { - return nil, errors.Wrapf(err, "open %s", path) - } - if len(strings.Trim(string(data), "\n")) != 0 { - return data, nil - } -- data, err = cpusetCopyFileFromParent(filepath.Dir(dir), file) -+ data, err = cpusetCopyFileFromParent(filepath.Dir(dir), file, cgroupv2) - if err != nil { - return nil, err - } -@@ -36,9 +40,9 @@ func cpusetCopyFileFromParent(dir, file string) ([]byte, error) { - return data, nil - } - --func cpusetCopyFromParent(path string) error { -+func cpusetCopyFromParent(path string, cgroupv2 bool) error { - for _, file := range []string{"cpuset.cpus", "cpuset.mems"} { -- if _, err := cpusetCopyFileFromParent(path, file); err != nil { -+ if _, err := cpusetCopyFileFromParent(path, file, cgroupv2); err != nil { - return err - } - } -@@ -60,14 +64,15 @@ func (c *cpusetHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) erro - // Create the cgroup - func (c *cpusetHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { -- return false, fmt.Errorf("cpuset create not implemented for cgroup v2") -+ path := filepath.Join(cgroupRoot, ctr.path) -+ return true, cpusetCopyFromParent(path, true) - } - - created, err := ctr.createCgroupDirectory(CPUset) - if !created || err != nil { - return created, err - } -- return true, cpusetCopyFromParent(ctr.getCgroupv1Path(CPUset)) -+ return true, cpusetCopyFromParent(ctr.getCgroupv1Path(CPUset), false) - } - - // Destroy the cgroup -diff --git a/pkg/cgroups/memory.go b/pkg/cgroups/memory.go -index 0505eac409..80e88d17c4 100644 ---- a/pkg/cgroups/memory.go -+++ b/pkg/cgroups/memory.go -@@ -26,7 +26,7 @@ func (c *memHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - // Create the cgroup - func (c *memHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { -- return false, fmt.Errorf("memory create not implemented for cgroup v2") -+ return false, nil - } - return ctr.createCgroupDirectory(Memory) - } -diff --git a/pkg/cgroups/pids.go b/pkg/cgroups/pids.go -index c90dc1c020..ffbde100dd 100644 ---- a/pkg/cgroups/pids.go -+++ b/pkg/cgroups/pids.go -@@ -35,9 +35,6 @@ func (c *pidHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - - // Create the cgroup - func (c *pidHandler) Create(ctr *CgroupControl) (bool, error) { -- if ctr.cgroup2 { -- return false, fmt.Errorf("pid create not implemented for cgroup v2") -- } - return ctr.createCgroupDirectory(Pids) - } - diff --git a/SOURCES/bz1728242-2.patch b/SOURCES/bz1728242-2.patch deleted file mode 100644 index 34eccfc..0000000 --- a/SOURCES/bz1728242-2.patch +++ /dev/null @@ -1,46 +0,0 @@ -From b0c2bb996276a706585d1a3eebcaa0b687715b5a Mon Sep 17 00:00:00 2001 -From: Giuseppe Scrivano -Date: Tue, 9 Jul 2019 18:42:35 +0200 -Subject: [PATCH] cgroups: skip not existing cpuacct files - -if the cpuacct file doesn't exist, ignore it instead of erroring out. - -Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1728242 - -Signed-off-by: Giuseppe Scrivano ---- - pkg/cgroups/cpu.go | 15 ++++++++++++--- - 1 file changed, 12 insertions(+), 3 deletions(-) - -diff --git a/pkg/cgroups/cpu.go b/pkg/cgroups/cpu.go -index c9325946b4..1c8610cc45 100644 ---- a/pkg/cgroups/cpu.go -+++ b/pkg/cgroups/cpu.go -@@ -98,15 +98,24 @@ func (c *cpuHandler) Stat(ctr *CgroupControl, m *Metrics) error { - } else { - usage.Total, err = readAcct(ctr, "cpuacct.usage") - if err != nil { -- return err -+ if !os.IsNotExist(errors.Cause(err)) { -+ return err -+ } -+ usage.Total = 0 - } - usage.Kernel, err = readAcct(ctr, "cpuacct.usage_sys") - if err != nil { -- return err -+ if !os.IsNotExist(errors.Cause(err)) { -+ return err -+ } -+ usage.Kernel = 0 - } - usage.PerCPU, err = readAcctList(ctr, "cpuacct.usage_percpu") - if err != nil { -- return err -+ if !os.IsNotExist(errors.Cause(err)) { -+ return err -+ } -+ usage.PerCPU = nil - } - } - m.CPU = CPUMetrics{Usage: usage} diff --git a/SOURCES/podman-1775647.patch b/SOURCES/podman-1775647.patch new file mode 100644 index 0000000..36b3796 --- /dev/null +++ b/SOURCES/podman-1775647.patch @@ -0,0 +1,500 @@ +From 25cc43c376c5ddfa70a6009526f8f03b5235c2c6 Mon Sep 17 00:00:00 2001 +From: Matthew Heon +Date: Mon, 11 Nov 2019 09:52:13 -0500 +Subject: [PATCH 1/2] Add ContainerStateRemoving + +When Libpod removes a container, there is the possibility that +removal will not fully succeed. The most notable problems are +storage issues, where the container cannot be removed from +c/storage. + +When this occurs, we were faced with a choice. We can keep the +container in the state, appearing in `podman ps` and available for +other API operations, but likely unable to do any of them as it's +been partially removed. Or we can remove it very early and clean +up after it's already gone. We have, until now, used the second +approach. + +The problem that arises is intermittent problems removing +storage. We end up removing a container, failing to remove its +storage, and ending up with a container permanently stuck in +c/storage that we can't remove with the normal Podman CLI, can't +use the name of, and generally can't interact with. A notable +cause is when Podman is hit by a SIGKILL midway through removal, +which can consistently cause `podman rm` to fail to remove +storage. + +We now add a new state for containers that are in the process of +being removed, ContainerStateRemoving. We set this at the +beginning of the removal process. It notifies Podman that the +container cannot be used anymore, but preserves it in the DB +until it is fully removed. This will allow Remove to be run on +these containers again, which should successfully remove storage +if it fails. + +Fixes #3906 + +Signed-off-by: Matthew Heon +--- + cmd/podman/shared/container.go | 2 + + libpod/container_api.go | 17 +++++- + libpod/container_internal.go | 5 +- + libpod/container_internal_linux.go | 5 +- + libpod/define/containerstate.go | 7 +++ + libpod/options.go | 84 +++++------------------------- + libpod/runtime_ctr.go | 55 +++++++++++-------- + libpod/util.go | 25 +++++++++ + 8 files changed, 105 insertions(+), 95 deletions(-) + +diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go +index f49943477f..0a2e96cf7f 100644 +--- a/cmd/podman/shared/container.go ++++ b/cmd/podman/shared/container.go +@@ -195,6 +195,8 @@ func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput + status = "Paused" + case define.ContainerStateCreated.String(), define.ContainerStateConfigured.String(): + status = "Created" ++ case define.ContainerStateRemoving.String(): ++ status = "Removing" + default: + status = "Error" + } +diff --git a/libpod/container_api.go b/libpod/container_api.go +index b8cfe02f6f..153a1d628e 100644 +--- a/libpod/container_api.go ++++ b/libpod/container_api.go +@@ -404,6 +404,11 @@ func (c *Container) Mount() (string, error) { + return "", err + } + } ++ ++ if c.state.State == define.ContainerStateRemoving { ++ return "", errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID()) ++ } ++ + defer c.newContainerEvent(events.Mount) + return c.mount() + } +@@ -488,7 +493,12 @@ func (c *Container) Export(path string) error { + return err + } + } +- defer c.newContainerEvent(events.Export) ++ ++ if c.state.State == define.ContainerStateRemoving { ++ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID()) ++ } ++ ++ defer c.newContainerEvent(events.Mount) + return c.export(path) + } + +@@ -674,6 +684,10 @@ func (c *Container) Refresh(ctx context.Context) error { + } + } + ++ if c.state.State == define.ContainerStateRemoving { ++ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot refresh containers that are being removed") ++ } ++ + wasCreated := false + if c.state.State == define.ContainerStateCreated { + wasCreated = true +@@ -819,7 +833,6 @@ func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointO + return err + } + } +- defer c.newContainerEvent(events.Checkpoint) + return c.checkpoint(ctx, options) + } + +diff --git a/libpod/container_internal.go b/libpod/container_internal.go +index 4ff1913b52..1e8a8a5808 100644 +--- a/libpod/container_internal.go ++++ b/libpod/container_internal.go +@@ -719,7 +719,8 @@ func (c *Container) isStopped() (bool, error) { + if err != nil { + return true, err + } +- return c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused, nil ++ ++ return !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused), nil + } + + // save container state to the database +@@ -1057,6 +1058,8 @@ func (c *Container) initAndStart(ctx context.Context) (err error) { + // If we are ContainerStateUnknown, throw an error + if c.state.State == define.ContainerStateUnknown { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID()) ++ } else if c.state.State == define.ContainerStateRemoving { ++ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start container %s as it is being removed", c.ID()) + } + + // If we are running, do nothing +diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go +index 26d6771b0f..aca7bdc67a 100644 +--- a/libpod/container_internal_linux.go ++++ b/libpod/container_internal_linux.go +@@ -21,6 +21,7 @@ import ( + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/buildah/pkg/secrets" + "github.com/containers/libpod/libpod/define" ++ "github.com/containers/libpod/libpod/events" + "github.com/containers/libpod/pkg/annotations" + "github.com/containers/libpod/pkg/apparmor" + "github.com/containers/libpod/pkg/cgroups" +@@ -695,6 +696,8 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO + return err + } + ++ defer c.newContainerEvent(events.Checkpoint) ++ + if options.TargetFile != "" { + if err = c.exportCheckpoint(options.TargetFile, options.IgnoreRootfs); err != nil { + return err +@@ -766,7 +769,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti + return err + } + +- if (c.state.State != define.ContainerStateConfigured) && (c.state.State != define.ContainerStateExited) { ++ if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID()) + } + +diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go +index ab2527b3ee..e7d258e214 100644 +--- a/libpod/define/containerstate.go ++++ b/libpod/define/containerstate.go +@@ -25,6 +25,9 @@ const ( + // ContainerStateExited indicates the the container has stopped and been + // cleaned up + ContainerStateExited ContainerStatus = iota ++ // ContainerStateRemoving indicates the container is in the process of ++ // being removed. ++ ContainerStateRemoving ContainerStatus = iota + ) + + // ContainerStatus returns a string representation for users +@@ -45,6 +48,8 @@ func (t ContainerStatus) String() string { + return "paused" + case ContainerStateExited: + return "exited" ++ case ContainerStateRemoving: ++ return "removing" + } + return "bad state" + } +@@ -67,6 +72,8 @@ func StringToContainerStatus(status string) (ContainerStatus, error) { + return ContainerStatePaused, nil + case ContainerStateExited.String(): + return ContainerStateExited, nil ++ case ContainerStateRemoving.String(): ++ return ContainerStateRemoving, nil + default: + return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status) + } +diff --git a/libpod/options.go b/libpod/options.go +index bfbbb9e2da..19c776cf06 100644 +--- a/libpod/options.go ++++ b/libpod/options.go +@@ -768,16 +768,8 @@ func WithIPCNSFrom(nsCtr *Container) CtrCreateOption { + return define.ErrCtrFinalized + } + +- if !nsCtr.valid { +- return define.ErrCtrRemoved +- } +- +- if nsCtr.ID() == ctr.ID() { +- return errors.Wrapf(define.ErrInvalidArg, "must specify another container") +- } +- +- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { +- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) ++ if err := checkDependencyContainer(nsCtr, ctr); err != nil { ++ return err + } + + ctr.config.IPCNsCtr = nsCtr.ID() +@@ -796,16 +788,8 @@ func WithMountNSFrom(nsCtr *Container) CtrCreateOption { + return define.ErrCtrFinalized + } + +- if !nsCtr.valid { +- return define.ErrCtrRemoved +- } +- +- if nsCtr.ID() == ctr.ID() { +- return errors.Wrapf(define.ErrInvalidArg, "must specify another container") +- } +- +- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { +- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) ++ if err := checkDependencyContainer(nsCtr, ctr); err != nil { ++ return err + } + + ctr.config.MountNsCtr = nsCtr.ID() +@@ -824,22 +808,14 @@ func WithNetNSFrom(nsCtr *Container) CtrCreateOption { + return define.ErrCtrFinalized + } + +- if !nsCtr.valid { +- return define.ErrCtrRemoved +- } +- +- if nsCtr.ID() == ctr.ID() { +- return errors.Wrapf(define.ErrInvalidArg, "must specify another container") ++ if err := checkDependencyContainer(nsCtr, ctr); err != nil { ++ return err + } + + if ctr.config.CreateNetNS { + return errors.Wrapf(define.ErrInvalidArg, "cannot join another container's net ns as we are making a new net ns") + } + +- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { +- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) +- } +- + ctr.config.NetNsCtr = nsCtr.ID() + + return nil +@@ -856,16 +832,8 @@ func WithPIDNSFrom(nsCtr *Container) CtrCreateOption { + return define.ErrCtrFinalized + } + +- if !nsCtr.valid { +- return define.ErrCtrRemoved +- } +- +- if nsCtr.ID() == ctr.ID() { +- return errors.Wrapf(define.ErrInvalidArg, "must specify another container") +- } +- +- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { +- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) ++ if err := checkDependencyContainer(nsCtr, ctr); err != nil { ++ return err + } + + if ctr.config.NoCgroups { +@@ -888,16 +856,8 @@ func WithUserNSFrom(nsCtr *Container) CtrCreateOption { + return define.ErrCtrFinalized + } + +- if !nsCtr.valid { +- return define.ErrCtrRemoved +- } +- +- if nsCtr.ID() == ctr.ID() { +- return errors.Wrapf(define.ErrInvalidArg, "must specify another container") +- } +- +- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { +- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) ++ if err := checkDependencyContainer(nsCtr, ctr); err != nil { ++ return err + } + + ctr.config.UserNsCtr = nsCtr.ID() +@@ -917,16 +877,8 @@ func WithUTSNSFrom(nsCtr *Container) CtrCreateOption { + return define.ErrCtrFinalized + } + +- if !nsCtr.valid { +- return define.ErrCtrRemoved +- } +- +- if nsCtr.ID() == ctr.ID() { +- return errors.Wrapf(define.ErrInvalidArg, "must specify another container") +- } +- +- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { +- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) ++ if err := checkDependencyContainer(nsCtr, ctr); err != nil { ++ return err + } + + ctr.config.UTSNsCtr = nsCtr.ID() +@@ -945,16 +897,8 @@ func WithCgroupNSFrom(nsCtr *Container) CtrCreateOption { + return define.ErrCtrFinalized + } + +- if !nsCtr.valid { +- return define.ErrCtrRemoved +- } +- +- if nsCtr.ID() == ctr.ID() { +- return errors.Wrapf(define.ErrInvalidArg, "must specify another container") +- } +- +- if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { +- return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) ++ if err := checkDependencyContainer(nsCtr, ctr); err != nil { ++ return err + } + + ctr.config.CgroupNsCtr = nsCtr.ID() +diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go +index 7069d34940..ae401013c8 100644 +--- a/libpod/runtime_ctr.go ++++ b/libpod/runtime_ctr.go +@@ -489,32 +489,19 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, + } + } + +- var cleanupErr error +- // Remove the container from the state +- if c.config.Pod != "" { +- // If we're removing the pod, the container will be evicted +- // from the state elsewhere +- if !removePod { +- if err := r.state.RemoveContainerFromPod(pod, c); err != nil { +- cleanupErr = err +- } +- } +- } else { +- if err := r.state.RemoveContainer(c); err != nil { +- cleanupErr = err +- } ++ // Set ContainerStateRemoving and remove exec sessions ++ c.state.State = define.ContainerStateRemoving ++ c.state.ExecSessions = nil ++ ++ if err := c.save(); err != nil { ++ return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID()) + } + +- // Set container as invalid so it can no longer be used +- c.valid = false ++ var cleanupErr error + + // Clean up network namespace, cgroups, mounts + if err := c.cleanup(ctx); err != nil { +- if cleanupErr == nil { +- cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID()) +- } else { +- logrus.Errorf("cleanup network, cgroups, mounts: %v", err) +- } ++ cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID()) + } + + // Stop the container's storage +@@ -540,6 +527,29 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, + } + } + ++ // Remove the container from the state ++ if c.config.Pod != "" { ++ // If we're removing the pod, the container will be evicted ++ // from the state elsewhere ++ if !removePod { ++ if err := r.state.RemoveContainerFromPod(pod, c); err != nil { ++ if cleanupErr == nil { ++ cleanupErr = err ++ } else { ++ logrus.Errorf("Error removing container %s from database: %v", c.ID(), err) ++ } ++ } ++ } ++ } else { ++ if err := r.state.RemoveContainer(c); err != nil { ++ if cleanupErr == nil { ++ cleanupErr = err ++ } else { ++ logrus.Errorf("Error removing container %s from database: %v", c.ID(), err) ++ } ++ } ++ } ++ + // Deallocate the container's lock + if err := c.lock.Free(); err != nil { + if cleanupErr == nil { +@@ -549,6 +559,9 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, + } + } + ++ // Set container as invalid so it can no longer be used ++ c.valid = false ++ + c.newContainerEvent(events.Remove) + + if !removeVolume { +diff --git a/libpod/util.go b/libpod/util.go +index bae2f4eb83..30e5cd4c33 100644 +--- a/libpod/util.go ++++ b/libpod/util.go +@@ -206,3 +206,28 @@ func DefaultSeccompPath() (string, error) { + } + return config.SeccompDefaultPath, nil + } ++ ++// CheckDependencyContainer verifies the given container can be used as a ++// dependency of another container. ++// Both the dependency to check and the container that will be using the ++// dependency must be passed in. ++// It is assumed that ctr is locked, and depCtr is unlocked. ++func checkDependencyContainer(depCtr, ctr *Container) error { ++ state, err := depCtr.State() ++ if err != nil { ++ return errors.Wrapf(err, "error accessing dependency container %s state", depCtr.ID()) ++ } ++ if state == define.ContainerStateRemoving { ++ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot use container %s as a dependency as it is being removed", depCtr.ID()) ++ } ++ ++ if depCtr.ID() == ctr.ID() { ++ return errors.Wrapf(define.ErrInvalidArg, "must specify another container") ++ } ++ ++ if ctr.config.Pod != "" && depCtr.PodID() != ctr.config.Pod { ++ return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, depCtr.ID()) ++ } ++ ++ return nil ++} + +From 6c405b5fbcc83ba49c187087eb4e1ccc1a7ff147 Mon Sep 17 00:00:00 2001 +From: Matthew Heon +Date: Mon, 11 Nov 2019 14:36:00 -0500 +Subject: [PATCH 2/2] Error on netns not exist only when ctr is running + +If the container is running and we need to get its netns and +can't, that is a serious bug deserving of errors. + +If it's not running, that's not really a big deal. Log an error +and continue. + +Signed-off-by: Matthew Heon +--- + libpod/boltdb_state_linux.go | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/libpod/boltdb_state_linux.go b/libpod/boltdb_state_linux.go +index 09a9be6067..6ccda71bd5 100644 +--- a/libpod/boltdb_state_linux.go ++++ b/libpod/boltdb_state_linux.go +@@ -3,6 +3,8 @@ + package libpod + + import ( ++ "github.com/containers/libpod/libpod/define" ++ "github.com/pkg/errors" + "github.com/sirupsen/logrus" + ) + +@@ -25,8 +27,12 @@ func replaceNetNS(netNSPath string, ctr *Container, newState *ContainerState) er + if err == nil { + newState.NetNS = ns + } else { ++ if ctr.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { ++ return errors.Wrapf(err, "error joning network namespace of container %s", ctr.ID()) ++ } ++ + logrus.Errorf("error joining network namespace for container %s: %v", ctr.ID(), err) +- ctr.valid = false ++ ctr.state.NetNS = nil + } + } + } else { diff --git a/SOURCES/podman-1784950.patch b/SOURCES/podman-1784950.patch new file mode 100644 index 0000000..8d4ed08 --- /dev/null +++ b/SOURCES/podman-1784950.patch @@ -0,0 +1,145 @@ +From fb7d2b6bd6a16ffdbe4a69428e3ba5b487719e78 Mon Sep 17 00:00:00 2001 +From: Daniel J Walsh +Date: Tue, 17 Dec 2019 15:24:29 -0500 +Subject: [PATCH] Add support for FIPS-Mode backends + +If host is running in fips mode, then RHEL8.2 and beyond container images +will come with a directory /usr/share/crypto-policies/back-ends/FIPS. +This directory needs to be bind mounted over /etc/crypto-policies/back-ends in +order to make all tools in the container follow the FIPS Mode rules. + +Signed-off-by: Daniel J Walsh +--- + pkg/secrets/secrets.go | 48 +++++++++++++++++++++++++++++++++--------- + run_linux.go | 2 +- + 2 files changed, 39 insertions(+), 11 deletions(-) + +diff -up ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/pkg/secrets/secrets.go.1784950 ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/pkg/secrets/secrets.go +--- libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/pkg/secrets/secrets.go.1784950 2020-02-19 16:11:04.224932088 +0100 ++++ libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/pkg/secrets/secrets.go 2020-02-19 16:11:04.226932116 +0100 +@@ -148,12 +148,21 @@ func getMountsMap(path string) (string, + } + + // SecretMounts copies, adds, and mounts the secrets to the container root filesystem ++// Deprecated, Please use SecretMountWithUIDGID + func SecretMounts(mountLabel, containerWorkingDir, mountFile string, rootless, disableFips bool) []rspec.Mount { + return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0, rootless, disableFips) + } + +-// SecretMountsWithUIDGID specifies the uid/gid of the owner +-func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int, rootless, disableFips bool) []rspec.Mount { ++// SecretMountsWithUIDGID copies, adds, and mounts the secrets to the container root filesystem ++// mountLabel: MAC/SELinux label for container content ++// containerWorkingDir: Private data for storing secrets on the host mounted in container. ++// mountFile: Additional mount points required for the container. ++// mountPoint: Container image mountpoint ++// uid: to assign to content created for secrets ++// gid: to assign to content created for secrets ++// rootless: indicates whether container is running in rootless mode ++// disableFips: indicates whether system should ignore fips mode ++func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint string, uid, gid int, rootless, disableFips bool) []rspec.Mount { + var ( + secretMounts []rspec.Mount + mountFiles []string +@@ -171,7 +180,7 @@ func SecretMountsWithUIDGID(mountLabel, + } + for _, file := range mountFiles { + if _, err := os.Stat(file); err == nil { +- mounts, err := addSecretsFromMountsFile(file, mountLabel, containerWorkingDir, mountPrefix, uid, gid) ++ mounts, err := addSecretsFromMountsFile(file, mountLabel, containerWorkingDir, uid, gid) + if err != nil { + logrus.Warnf("error mounting secrets, skipping entry in %s: %v", file, err) + } +@@ -187,7 +196,7 @@ func SecretMountsWithUIDGID(mountLabel, + // Add FIPS mode secret if /etc/system-fips exists on the host + _, err := os.Stat("/etc/system-fips") + if err == nil { +- if err := addFIPSModeSecret(&secretMounts, containerWorkingDir, mountPrefix, mountLabel, uid, gid); err != nil { ++ if err := addFIPSModeSecret(&secretMounts, containerWorkingDir, mountPoint, mountLabel, uid, gid); err != nil { + logrus.Errorf("error adding FIPS mode secret to container: %v", err) + } + } else if os.IsNotExist(err) { +@@ -206,7 +215,7 @@ func rchown(chowndir string, uid, gid in + + // addSecretsFromMountsFile copies the contents of host directory to container directory + // and returns a list of mounts +-func addSecretsFromMountsFile(filePath, mountLabel, containerWorkingDir, mountPrefix string, uid, gid int) ([]rspec.Mount, error) { ++func addSecretsFromMountsFile(filePath, mountLabel, containerWorkingDir string, uid, gid int) ([]rspec.Mount, error) { + var mounts []rspec.Mount + defaultMountsPaths := getMounts(filePath) + for _, path := range defaultMountsPaths { +@@ -285,7 +294,7 @@ func addSecretsFromMountsFile(filePath, + } + + m := rspec.Mount{ +- Source: filepath.Join(mountPrefix, ctrDirOrFile), ++ Source: ctrDirOrFileOnHost, + Destination: ctrDirOrFile, + Type: "bind", + Options: []string{"bind", "rprivate"}, +@@ -300,15 +309,15 @@ func addSecretsFromMountsFile(filePath, + // root filesystem if /etc/system-fips exists on hosts. + // This enables the container to be FIPS compliant and run openssl in + // FIPS mode as the host is also in FIPS mode. +-func addFIPSModeSecret(mounts *[]rspec.Mount, containerWorkingDir, mountPrefix, mountLabel string, uid, gid int) error { ++func addFIPSModeSecret(mounts *[]rspec.Mount, containerWorkingDir, mountPoint, mountLabel string, uid, gid int) error { + secretsDir := "/run/secrets" + ctrDirOnHost := filepath.Join(containerWorkingDir, secretsDir) + if _, err := os.Stat(ctrDirOnHost); os.IsNotExist(err) { + if err = idtools.MkdirAllAs(ctrDirOnHost, 0755, uid, gid); err != nil { +- return errors.Wrapf(err, "making container directory on host failed") ++ return errors.Wrapf(err, "making container directory %q on host failed", ctrDirOnHost) + } + if err = label.Relabel(ctrDirOnHost, mountLabel, false); err != nil { +- return errors.Wrap(err, "error applying correct labels") ++ return errors.Wrapf(err, "error applying correct labels on %q", ctrDirOnHost) + } + } + fipsFile := filepath.Join(ctrDirOnHost, "system-fips") +@@ -323,7 +332,7 @@ func addFIPSModeSecret(mounts *[]rspec.M + + if !mountExists(*mounts, secretsDir) { + m := rspec.Mount{ +- Source: filepath.Join(mountPrefix, secretsDir), ++ Source: ctrDirOnHost, + Destination: secretsDir, + Type: "bind", + Options: []string{"bind", "rprivate"}, +@@ -331,6 +340,25 @@ func addFIPSModeSecret(mounts *[]rspec.M + *mounts = append(*mounts, m) + } + ++ srcBackendDir := "/usr/share/crypto-policies/back-ends/FIPS" ++ destDir := "/etc/crypto-policies/back-ends" ++ srcOnHost := filepath.Join(mountPoint, srcBackendDir) ++ if _, err := os.Stat(srcOnHost); err != nil { ++ if os.IsNotExist(err) { ++ return nil ++ } ++ return errors.Wrapf(err, "failed to stat FIPS Backend directory %q", ctrDirOnHost) ++ } ++ ++ if !mountExists(*mounts, destDir) { ++ m := rspec.Mount{ ++ Source: srcOnHost, ++ Destination: destDir, ++ Type: "bind", ++ Options: []string{"bind", "rprivate"}, ++ } ++ *mounts = append(*mounts, m) ++ } + return nil + } + +diff -up ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/run_linux.go.1784950 ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/run_linux.go +--- libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/run_linux.go.1784950 2020-02-19 16:11:04.197931712 +0100 ++++ libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/containers/buildah/run_linux.go 2020-02-19 16:11:04.200931754 +0100 +@@ -460,7 +460,7 @@ func (b *Builder) setupMounts(mountPoint + } + + // Get the list of secrets mounts. +- secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless(), false) ++ secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false) + + // Add temporary copies of the contents of volume locations at the + // volume locations, unless we already have something there. diff --git a/SOURCES/podman-1792243.patch b/SOURCES/podman-1792243.patch new file mode 100644 index 0000000..26b1268 --- /dev/null +++ b/SOURCES/podman-1792243.patch @@ -0,0 +1,12 @@ +diff -up ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/mtrmac/gpgme/gpgme.go.1792243 ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/mtrmac/gpgme/gpgme.go +--- libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/mtrmac/gpgme/gpgme.go.1792243 2020-01-20 14:15:24.017928970 +0100 ++++ libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/vendor/github.com/mtrmac/gpgme/gpgme.go 2020-01-20 14:15:24.019928990 +0100 +@@ -1,7 +1,7 @@ + // Package gpgme provides a Go wrapper for the GPGME library + package gpgme + +-// #cgo LDFLAGS: -lgpgme -lassuan -lgpg-error ++// #cgo LDFLAGS: -lgpgme-pthread -lassuan -lgpg-error + // #cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64 + // #include + // #include diff --git a/SOURCES/podman-1805212.patch b/SOURCES/podman-1805212.patch new file mode 100644 index 0000000..4617599 --- /dev/null +++ b/SOURCES/podman-1805212.patch @@ -0,0 +1,51 @@ +From 6c97e0d5c140d587e5477d478159e91b8adcfd15 Mon Sep 17 00:00:00 2001 +From: Brent Baude +Date: Thu, 27 Feb 2020 14:39:31 -0600 +Subject: [PATCH 2/2] network create should use firewall plugin + +when creating a network, podman should add the firewall plugin to the config but not specify a backend. this will allow cni to determine whether it should use an iptables|firewalld backend. + +Signed-off-by: Brent Baude +--- + pkg/adapter/network.go | 1 + + pkg/network/netconflist.go | 1 - + 2 files changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pkg/network/netconflist.go b/pkg/network/netconflist.go +index a8217097ac..34ff000249 100644 +--- a/pkg/network/netconflist.go ++++ b/pkg/network/netconflist.go +@@ -110,7 +110,6 @@ func NewPortMapPlugin() PortMapConfig { + func NewFirewallPlugin() FirewallConfig { + return FirewallConfig{ + PluginType: "firewall", +- Backend: "iptables", + } + } + + +From cfd40608907b653a8b05f2e4f4243f8aa677b6e3 Mon Sep 17 00:00:00 2001 +From: Brent Baude +Date: Thu, 27 Feb 2020 14:35:48 -0600 +Subject: [PATCH 1/2] add firewall plugin (no backend) to default cni config + +in order for the fall back mechanisms to work in containernetworking-plugins, the firewall plugin must still be called via the cni configuration file. however, no backend w + +Signed-off-by: Brent Baude +--- + cni/87-podman-bridge.conflist | 3 +++ + 1 file changed, 3 insertions(+) + +diff -up a/cni/87-podman-bridge.conflist b/cni/87-podman-bridge.conflist +--- a/cni/87-podman-bridge.conflist ++++ b/cni/87-podman-bridge.conflist +@@ -31,8 +31,7 @@ + } + }, + { +- "type": "firewall", +- "backend": "iptables" ++ "type": "firewall" + } + ] + } diff --git a/SOURCES/podman-1807310.patch b/SOURCES/podman-1807310.patch new file mode 100644 index 0000000..d182dab --- /dev/null +++ b/SOURCES/podman-1807310.patch @@ -0,0 +1,133 @@ +From b41c864d569357a102ee2335a4947e59e5e2b08a Mon Sep 17 00:00:00 2001 +From: Matthew Heon +Date: Thu, 27 Feb 2020 16:08:29 -0500 +Subject: [PATCH] Ensure that exec sessions inherit supplemental groups + +This corrects a regression from Podman 1.4.x where container exec +sessions inherited supplemental groups from the container, iff +the exec session did not specify a user. + +Signed-off-by: Matthew Heon +--- + libpod/container_api.go | 5 ----- + libpod/container_internal_linux.go | 5 ++++- + libpod/oci_conmon_linux.go | 25 +++++++++++++++++++++---- + test/e2e/exec_test.go | 24 ++++++++++++++++++++++++ + 4 files changed, 49 insertions(+), 10 deletions(-) + +diff --git a/libpod/container_api.go b/libpod/container_api.go +index d612341bce..dabbe27dcd 100644 +--- a/libpod/container_api.go ++++ b/libpod/container_api.go +@@ -270,11 +270,6 @@ func (c *Container) Exec(tty, privileged bool, env map[string]string, cmd []stri + } + }() + +- // if the user is empty, we should inherit the user that the container is currently running with +- if user == "" { +- user = c.config.User +- } +- + opts := new(ExecOptions) + opts.Cmd = cmd + opts.CapAdd = capList +diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go +index 7390262647..63968918cb 100644 +--- a/libpod/container_internal_linux.go ++++ b/libpod/container_internal_linux.go +@@ -330,7 +330,10 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { + + // Add addition groups if c.config.GroupAdd is not empty + if len(c.config.Groups) > 0 { +- gids, _ := lookup.GetContainerGroups(c.config.Groups, c.state.Mountpoint, nil) ++ gids, err := lookup.GetContainerGroups(c.config.Groups, c.state.Mountpoint, overrides) ++ if err != nil { ++ return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s", c.ID()) ++ } + for _, gid := range gids { + g.AddProcessAdditionalGid(gid) + } +diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go +index 07d38693f0..800f896036 100644 +--- a/libpod/oci_conmon_linux.go ++++ b/libpod/oci_conmon_linux.go +@@ -1252,18 +1252,35 @@ func prepareProcessExec(c *Container, cmd, env []string, tty bool, cwd, user, se + + } + ++ var addGroups []string ++ var sgids []uint32 ++ ++ // if the user is empty, we should inherit the user that the container is currently running with ++ if user == "" { ++ user = c.config.User ++ addGroups = c.config.Groups ++ } ++ + overrides := c.getUserOverrides() + execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, user, overrides) + if err != nil { + return nil, err + } + ++ if len(addGroups) > 0 { ++ sgids, err = lookup.GetContainerGroups(addGroups, c.state.Mountpoint, overrides) ++ if err != nil { ++ return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s exec session %s", c.ID(), sessionID) ++ } ++ } ++ + // If user was set, look it up in the container to get a UID to use on + // the host +- if user != "" { +- sgids := make([]uint32, 0, len(execUser.Sgids)) +- for _, sgid := range execUser.Sgids { +- sgids = append(sgids, uint32(sgid)) ++ if user != "" || len(sgids) > 0 { ++ if user != "" { ++ for _, sgid := range execUser.Sgids { ++ sgids = append(sgids, uint32(sgid)) ++ } + } + processUser := spec.User{ + UID: uint32(execUser.Uid), +diff --git a/test/e2e/exec_test.go b/test/e2e/exec_test.go +index ed4eb3335f..ab806f6831 100644 +--- a/test/e2e/exec_test.go ++++ b/test/e2e/exec_test.go +@@ -1,6 +1,7 @@ + package integration + + import ( ++ "fmt" + "os" + "strings" + +@@ -244,4 +245,27 @@ var _ = Describe("Podman exec", func() { + Expect(session.ExitCode()).To(Equal(0)) + }) + ++ It("podman exec preserves --group-add groups", func() { ++ groupName := "group1" ++ gid := "4444" ++ ctrName1 := "ctr1" ++ ctr1 := podmanTest.Podman([]string{"run", "-ti", "--name", ctrName1, fedoraMinimal, "groupadd", "-g", gid, groupName}) ++ ctr1.WaitWithDefaultTimeout() ++ Expect(ctr1.ExitCode()).To(Equal(0)) ++ ++ imgName := "img1" ++ commit := podmanTest.Podman([]string{"commit", ctrName1, imgName}) ++ commit.WaitWithDefaultTimeout() ++ Expect(commit.ExitCode()).To(Equal(0)) ++ ++ ctrName2 := "ctr2" ++ ctr2 := podmanTest.Podman([]string{"run", "-d", "--name", ctrName2, "--group-add", groupName, imgName, "sleep", "300"}) ++ ctr2.WaitWithDefaultTimeout() ++ Expect(ctr2.ExitCode()).To(Equal(0)) ++ ++ exec := podmanTest.Podman([]string{"exec", "-ti", ctrName2, "id"}) ++ exec.WaitWithDefaultTimeout() ++ Expect(exec.ExitCode()).To(Equal(0)) ++ Expect(strings.Contains(exec.OutputToString(), fmt.Sprintf("%s(%s)", gid, groupName))).To(BeTrue()) ++ }) + }) diff --git a/SOURCES/podman-1807379.patch b/SOURCES/podman-1807379.patch new file mode 100644 index 0000000..93a6c18 --- /dev/null +++ b/SOURCES/podman-1807379.patch @@ -0,0 +1,49 @@ +From 776eb64ab2cc07e0bd2879791780fa9b9fcd7ea1 Mon Sep 17 00:00:00 2001 +From: Peter Hunt +Date: Wed, 8 Jan 2020 11:09:07 -0500 +Subject: [PATCH] exec: fix pipes + +In a largely anticlimatic solution to the saga of piped input from conmon, we come to this solution. + +When we pass the Stdin stream to the exec.Command structure, it's immediately consumed and lost, instead of being consumed through CopyDetachable(). + +When we don't pass -i in, conmon is not told to create a masterfd_stdin, and won't pass anything to the container. + +With both, we can do + +echo hi | podman exec -til cat + +and get the expected hi + +Signed-off-by: Peter Hunt +--- + libpod/oci_conmon_linux.go | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go +index 37aa71cbba..78c8f41260 100644 +--- a/libpod/oci_conmon_linux.go ++++ b/libpod/oci_conmon_linux.go +@@ -546,6 +546,10 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options + args = append(args, "-t") + } + ++ if options.Streams.AttachInput { ++ args = append(args, "-i") ++ } ++ + // Append container ID and command + args = append(args, "-e") + // TODO make this optional when we can detach +@@ -558,9 +562,8 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options + execCmd := exec.Command(r.conmonPath, args...) + + if options.Streams != nil { +- if options.Streams.AttachInput { +- execCmd.Stdin = options.Streams.InputStream +- } ++ // Don't add the InputStream to the execCmd. Instead, the data should be passed ++ // through CopyDetachable + if options.Streams.AttachOutput { + execCmd.Stdout = options.Streams.OutputStream + } diff --git a/SOURCES/podman-CVE-2020-1702.patch b/SOURCES/podman-CVE-2020-1702.patch new file mode 100644 index 0000000..01e1aca --- /dev/null +++ b/SOURCES/podman-CVE-2020-1702.patch @@ -0,0 +1,13539 @@ +From 23d7b2d5c4281f54ffe351293f68fb5136013bcc Mon Sep 17 00:00:00 2001 +From: Valentin Rothberg +Date: Wed, 5 Feb 2020 14:55:48 +0100 +Subject: [PATCH 1/3] [v1.6] update containers/image + +Note that this includes fixes for +https://access.redhat.com/security/cve/CVE-2020-1702. + +Signed-off-by: Valentin Rothberg +--- + go.mod | 2 +- + go.sum | 2 + + .../Microsoft/hcsshim/mksyscall_windows.go | 943 ---------- + .../image/v5/docker/docker_client.go | 6 +- + .../image/v5/docker/docker_image_dest.go | 3 +- + .../image/v5/docker/docker_image_src.go | 10 +- + .../image/v5/docker/tarfile/dest.go | 3 +- + .../containers/image/v5/docker/tarfile/src.go | 9 +- + .../image/v5/image/docker_schema2.go | 4 +- + .../containers/image/v5/image/oci.go | 4 +- + .../image/v5/internal/iolimits/iolimits.go | 60 + + .../image/v5/openshift/openshift.go | 4 +- + .../storage/pkg/archive/example_changes.go | 97 -- + .../docker/pkg/archive/example_changes.go | 97 -- + .../klauspost/compress/flate/gen.go | 265 --- + .../github.com/klauspost/cpuid/private-gen.go | 476 ------ + vendor/github.com/ulikunitz/xz/example.go | 40 - + vendor/golang.org/x/net/html/atom/gen.go | 712 -------- + vendor/golang.org/x/sys/unix/mkasm_darwin.go | 61 - + vendor/golang.org/x/sys/unix/mkpost.go | 122 -- + vendor/golang.org/x/sys/unix/mksyscall.go | 407 ----- + .../x/sys/unix/mksyscall_aix_ppc.go | 415 ----- + .../x/sys/unix/mksyscall_aix_ppc64.go | 614 ------- + .../x/sys/unix/mksyscall_solaris.go | 335 ---- + .../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 ---- + vendor/golang.org/x/sys/unix/mksysnum.go | 190 --- + vendor/golang.org/x/sys/unix/types_aix.go | 237 --- + vendor/golang.org/x/sys/unix/types_darwin.go | 283 --- + .../golang.org/x/sys/unix/types_dragonfly.go | 263 --- + vendor/golang.org/x/sys/unix/types_freebsd.go | 400 ----- + vendor/golang.org/x/sys/unix/types_netbsd.go | 290 ---- + vendor/golang.org/x/sys/unix/types_openbsd.go | 283 --- + vendor/golang.org/x/sys/unix/types_solaris.go | 266 --- + .../x/text/encoding/charmap/maketables.go | 556 ------ + .../x/text/encoding/htmlindex/gen.go | 173 -- + .../text/encoding/internal/identifier/gen.go | 142 -- + .../x/text/encoding/japanese/maketables.go | 161 -- + .../x/text/encoding/korean/maketables.go | 143 -- + .../encoding/simplifiedchinese/maketables.go | 161 -- + .../encoding/traditionalchinese/maketables.go | 140 -- + .../x/text/internal/language/compact/gen.go | 64 - + .../internal/language/compact/gen_index.go | 113 -- + .../internal/language/compact/gen_parents.go | 54 - + .../x/text/internal/language/gen.go | 1520 ----------------- + .../x/text/internal/language/gen_common.go | 20 - + vendor/golang.org/x/text/language/gen.go | 305 ---- + vendor/golang.org/x/text/unicode/bidi/gen.go | 133 -- + .../x/text/unicode/bidi/gen_ranges.go | 57 - + .../x/text/unicode/bidi/gen_trieval.go | 64 - + .../x/text/unicode/norm/maketables.go | 986 ----------- + .../golang.org/x/text/unicode/norm/triegen.go | 117 -- + vendor/modules.txt | 407 ++--- + 52 files changed, 291 insertions(+), 12283 deletions(-) + delete mode 100644 vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go + create mode 100644 vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go + delete mode 100644 vendor/github.com/containers/storage/pkg/archive/example_changes.go + delete mode 100644 vendor/github.com/docker/docker/pkg/archive/example_changes.go + delete mode 100644 vendor/github.com/klauspost/compress/flate/gen.go + delete mode 100644 vendor/github.com/klauspost/cpuid/private-gen.go + delete mode 100644 vendor/github.com/ulikunitz/xz/example.go + delete mode 100644 vendor/golang.org/x/net/html/atom/gen.go + delete mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go + delete mode 100644 vendor/golang.org/x/sys/unix/mkpost.go + delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go + delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go + delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go + delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go + delete mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.go + delete mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_aix.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go + delete mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go + delete mode 100644 vendor/golang.org/x/text/encoding/charmap/maketables.go + delete mode 100644 vendor/golang.org/x/text/encoding/htmlindex/gen.go + delete mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/gen.go + delete mode 100644 vendor/golang.org/x/text/encoding/japanese/maketables.go + delete mode 100644 vendor/golang.org/x/text/encoding/korean/maketables.go + delete mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go + delete mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go + delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen.go + delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_index.go + delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_parents.go + delete mode 100644 vendor/golang.org/x/text/internal/language/gen.go + delete mode 100644 vendor/golang.org/x/text/internal/language/gen_common.go + delete mode 100644 vendor/golang.org/x/text/language/gen.go + delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go + delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go + delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go + delete mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go + delete mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go + +diff --git a/go.mod b/go.mod +index 6ee27ae6e2..064089b76d 100644 +--- a/go.mod ++++ b/go.mod +@@ -12,7 +12,7 @@ require ( + github.com/containernetworking/cni v0.7.1 + github.com/containernetworking/plugins v0.8.2 + github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 +- github.com/containers/image/v5 v5.0.0 ++ github.com/containers/image/v5 v5.0.1-0.20200205124631-82291c45f2b0 + github.com/containers/psgo v1.3.2 + github.com/containers/storage v1.13.6 + github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f +diff --git a/go.sum b/go.sum +index 0d73288fb4..2dfb33e942 100644 +--- a/go.sum ++++ b/go.sum +@@ -61,6 +61,8 @@ github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 h1:5WUe09k2s + github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982/go.mod h1:eGWB4tLoo0hIBuytQpvgUC0hk2mvl2ofaYBeDsU/qoc= + github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4= + github.com/containers/image/v5 v5.0.0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY= ++github.com/containers/image/v5 v5.0.1-0.20200205124631-82291c45f2b0 h1:iV4aHKRoPcHp5BISsuiPMyaCjGJfLKp/FUMAG1NeqvE= ++github.com/containers/image/v5 v5.0.1-0.20200205124631-82291c45f2b0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY= + github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= + github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= + github.com/containers/psgo v1.3.2 h1:jYfppPih3S/j2Yi5O14AXjd8GfCx1ph9L3YsoK3adko= +diff --git a/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go +deleted file mode 100644 +index 7647734de9..0000000000 +--- a/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go ++++ /dev/null +@@ -1,943 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-mksyscall_windows generates windows system call bodies +- +-It parses all files specified on command line containing function +-prototypes (like syscall_windows.go) and prints system call bodies +-to standard output. +- +-The prototypes are marked by lines beginning with "//sys" and read +-like func declarations if //sys is replaced by func, but: +- +-* The parameter lists must give a name for each argument. This +- includes return parameters. +- +-* The parameter lists must give a type for each argument: +- the (x, y, z int) shorthand is not allowed. +- +-* If the return parameter is an error number, it must be named err. +- +-* If go func name needs to be different from it's winapi dll name, +- the winapi name could be specified at the end, after "=" sign, like +- //sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA +- +-* Each function that returns err needs to supply a condition, that +- return value of winapi will be tested against to detect failure. +- This would set err to windows "last-error", otherwise it will be nil. +- The value can be provided at end of //sys declaration, like +- //sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA +- and is [failretval==0] by default. +- +-Usage: +- mksyscall_windows [flags] [path ...] +- +-The flags are: +- -output +- Specify output file name (outputs to console if blank). +- -trace +- Generate print statement after every syscall. +-*/ +-package main +- +-import ( +- "bufio" +- "bytes" +- "errors" +- "flag" +- "fmt" +- "go/format" +- "go/parser" +- "go/token" +- "io" +- "io/ioutil" +- "log" +- "os" +- "path/filepath" +- "runtime" +- "sort" +- "strconv" +- "strings" +- "text/template" +-) +- +-var ( +- filename = flag.String("output", "", "output file name (standard output if omitted)") +- printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall") +- systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory") +- winio = flag.Bool("winio", false, "import go-winio") +-) +- +-func trim(s string) string { +- return strings.Trim(s, " \t") +-} +- +-var packageName string +- +-func packagename() string { +- return packageName +-} +- +-func syscalldot() string { +- if packageName == "syscall" { +- return "" +- } +- return "syscall." +-} +- +-// Param is function parameter +-type Param struct { +- Name string +- Type string +- fn *Fn +- tmpVarIdx int +-} +- +-// tmpVar returns temp variable name that will be used to represent p during syscall. +-func (p *Param) tmpVar() string { +- if p.tmpVarIdx < 0 { +- p.tmpVarIdx = p.fn.curTmpVarIdx +- p.fn.curTmpVarIdx++ +- } +- return fmt.Sprintf("_p%d", p.tmpVarIdx) +-} +- +-// BoolTmpVarCode returns source code for bool temp variable. +-func (p *Param) BoolTmpVarCode() string { +- const code = `var %s uint32 +- if %s { +- %s = 1 +- } else { +- %s = 0 +- }` +- tmp := p.tmpVar() +- return fmt.Sprintf(code, tmp, p.Name, tmp, tmp) +-} +- +-// SliceTmpVarCode returns source code for slice temp variable. +-func (p *Param) SliceTmpVarCode() string { +- const code = `var %s *%s +- if len(%s) > 0 { +- %s = &%s[0] +- }` +- tmp := p.tmpVar() +- return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name) +-} +- +-// StringTmpVarCode returns source code for string temp variable. +-func (p *Param) StringTmpVarCode() string { +- errvar := p.fn.Rets.ErrorVarName() +- if errvar == "" { +- errvar = "_" +- } +- tmp := p.tmpVar() +- const code = `var %s %s +- %s, %s = %s(%s)` +- s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name) +- if errvar == "-" { +- return s +- } +- const morecode = ` +- if %s != nil { +- return +- }` +- return s + fmt.Sprintf(morecode, errvar) +-} +- +-// TmpVarCode returns source code for temp variable. +-func (p *Param) TmpVarCode() string { +- switch { +- case p.Type == "bool": +- return p.BoolTmpVarCode() +- case strings.HasPrefix(p.Type, "[]"): +- return p.SliceTmpVarCode() +- default: +- return "" +- } +-} +- +-// TmpVarHelperCode returns source code for helper's temp variable. +-func (p *Param) TmpVarHelperCode() string { +- if p.Type != "string" { +- return "" +- } +- return p.StringTmpVarCode() +-} +- +-// SyscallArgList returns source code fragments representing p parameter +-// in syscall. Slices are translated into 2 syscall parameters: pointer to +-// the first element and length. +-func (p *Param) SyscallArgList() []string { +- t := p.HelperType() +- var s string +- switch { +- case t[0] == '*': +- s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name) +- case t == "bool": +- s = p.tmpVar() +- case strings.HasPrefix(t, "[]"): +- return []string{ +- fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()), +- fmt.Sprintf("uintptr(len(%s))", p.Name), +- } +- default: +- s = p.Name +- } +- return []string{fmt.Sprintf("uintptr(%s)", s)} +-} +- +-// IsError determines if p parameter is used to return error. +-func (p *Param) IsError() bool { +- return p.Name == "err" && p.Type == "error" +-} +- +-// HelperType returns type of parameter p used in helper function. +-func (p *Param) HelperType() string { +- if p.Type == "string" { +- return p.fn.StrconvType() +- } +- return p.Type +-} +- +-// join concatenates parameters ps into a string with sep separator. +-// Each parameter is converted into string by applying fn to it +-// before conversion. +-func join(ps []*Param, fn func(*Param) string, sep string) string { +- if len(ps) == 0 { +- return "" +- } +- a := make([]string, 0) +- for _, p := range ps { +- a = append(a, fn(p)) +- } +- return strings.Join(a, sep) +-} +- +-// Rets describes function return parameters. +-type Rets struct { +- Name string +- Type string +- ReturnsError bool +- FailCond string +-} +- +-// ErrorVarName returns error variable name for r. +-func (r *Rets) ErrorVarName() string { +- if r.ReturnsError { +- return "err" +- } +- if r.Type == "error" { +- return r.Name +- } +- return "" +-} +- +-// ToParams converts r into slice of *Param. +-func (r *Rets) ToParams() []*Param { +- ps := make([]*Param, 0) +- if len(r.Name) > 0 { +- ps = append(ps, &Param{Name: r.Name, Type: r.Type}) +- } +- if r.ReturnsError { +- ps = append(ps, &Param{Name: "err", Type: "error"}) +- } +- return ps +-} +- +-// List returns source code of syscall return parameters. +-func (r *Rets) List() string { +- s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ") +- if len(s) > 0 { +- s = "(" + s + ")" +- } +- return s +-} +- +-// PrintList returns source code of trace printing part correspondent +-// to syscall return values. +-func (r *Rets) PrintList() string { +- return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) +-} +- +-// SetReturnValuesCode returns source code that accepts syscall return values. +-func (r *Rets) SetReturnValuesCode() string { +- if r.Name == "" && !r.ReturnsError { +- return "" +- } +- retvar := "r0" +- if r.Name == "" { +- retvar = "r1" +- } +- errvar := "_" +- if r.ReturnsError { +- errvar = "e1" +- } +- return fmt.Sprintf("%s, _, %s := ", retvar, errvar) +-} +- +-func (r *Rets) useLongHandleErrorCode(retvar string) string { +- const code = `if %s { +- if e1 != 0 { +- err = errnoErr(e1) +- } else { +- err = %sEINVAL +- } +- }` +- cond := retvar + " == 0" +- if r.FailCond != "" { +- cond = strings.Replace(r.FailCond, "failretval", retvar, 1) +- } +- return fmt.Sprintf(code, cond, syscalldot()) +-} +- +-// SetErrorCode returns source code that sets return parameters. +-func (r *Rets) SetErrorCode() string { +- const code = `if r0 != 0 { +- %s = %sErrno(r0) +- }` +- const hrCode = `if int32(r0) < 0 { +- if r0&0x1fff0000 == 0x00070000 { +- r0 &= 0xffff +- } +- %s = %sErrno(r0) +- }` +- if r.Name == "" && !r.ReturnsError { +- return "" +- } +- if r.Name == "" { +- return r.useLongHandleErrorCode("r1") +- } +- if r.Type == "error" { +- if r.Name == "hr" { +- return fmt.Sprintf(hrCode, r.Name, syscalldot()) +- } else { +- return fmt.Sprintf(code, r.Name, syscalldot()) +- } +- } +- s := "" +- switch { +- case r.Type[0] == '*': +- s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type) +- case r.Type == "bool": +- s = fmt.Sprintf("%s = r0 != 0", r.Name) +- default: +- s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type) +- } +- if !r.ReturnsError { +- return s +- } +- return s + "\n\t" + r.useLongHandleErrorCode(r.Name) +-} +- +-// Fn describes syscall function. +-type Fn struct { +- Name string +- Params []*Param +- Rets *Rets +- PrintTrace bool +- confirmproc bool +- dllname string +- dllfuncname string +- src string +- // TODO: get rid of this field and just use parameter index instead +- curTmpVarIdx int // insure tmp variables have uniq names +-} +- +-// extractParams parses s to extract function parameters. +-func extractParams(s string, f *Fn) ([]*Param, error) { +- s = trim(s) +- if s == "" { +- return nil, nil +- } +- a := strings.Split(s, ",") +- ps := make([]*Param, len(a)) +- for i := range ps { +- s2 := trim(a[i]) +- b := strings.Split(s2, " ") +- if len(b) != 2 { +- b = strings.Split(s2, "\t") +- if len(b) != 2 { +- return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"") +- } +- } +- ps[i] = &Param{ +- Name: trim(b[0]), +- Type: trim(b[1]), +- fn: f, +- tmpVarIdx: -1, +- } +- } +- return ps, nil +-} +- +-// extractSection extracts text out of string s starting after start +-// and ending just before end. found return value will indicate success, +-// and prefix, body and suffix will contain correspondent parts of string s. +-func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) { +- s = trim(s) +- if strings.HasPrefix(s, string(start)) { +- // no prefix +- body = s[1:] +- } else { +- a := strings.SplitN(s, string(start), 2) +- if len(a) != 2 { +- return "", "", s, false +- } +- prefix = a[0] +- body = a[1] +- } +- a := strings.SplitN(body, string(end), 2) +- if len(a) != 2 { +- return "", "", "", false +- } +- return prefix, a[0], a[1], true +-} +- +-// newFn parses string s and return created function Fn. +-func newFn(s string) (*Fn, error) { +- s = trim(s) +- f := &Fn{ +- Rets: &Rets{}, +- src: s, +- PrintTrace: *printTraceFlag, +- } +- // function name and args +- prefix, body, s, found := extractSection(s, '(', ')') +- if !found || prefix == "" { +- return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"") +- } +- f.Name = prefix +- var err error +- f.Params, err = extractParams(body, f) +- if err != nil { +- return nil, err +- } +- // return values +- _, body, s, found = extractSection(s, '(', ')') +- if found { +- r, err := extractParams(body, f) +- if err != nil { +- return nil, err +- } +- switch len(r) { +- case 0: +- case 1: +- if r[0].IsError() { +- f.Rets.ReturnsError = true +- } else { +- f.Rets.Name = r[0].Name +- f.Rets.Type = r[0].Type +- } +- case 2: +- if !r[1].IsError() { +- return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"") +- } +- f.Rets.ReturnsError = true +- f.Rets.Name = r[0].Name +- f.Rets.Type = r[0].Type +- default: +- return nil, errors.New("Too many return values in \"" + f.src + "\"") +- } +- } +- // fail condition +- _, body, s, found = extractSection(s, '[', ']') +- if found { +- f.Rets.FailCond = body +- } +- // dll and dll function names +- s = trim(s) +- if s == "" { +- return f, nil +- } +- if !strings.HasPrefix(s, "=") { +- return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") +- } +- s = trim(s[1:]) +- a := strings.Split(s, ".") +- switch len(a) { +- case 1: +- f.dllfuncname = a[0] +- case 2: +- f.dllname = a[0] +- f.dllfuncname = a[1] +- default: +- return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") +- } +- if f.dllfuncname[len(f.dllfuncname)-1] == '?' { +- f.confirmproc = true +- f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1] +- } +- return f, nil +-} +- +-// DLLName returns DLL name for function f. +-func (f *Fn) DLLName() string { +- if f.dllname == "" { +- return "kernel32" +- } +- return f.dllname +-} +- +-// DLLName returns DLL function name for function f. +-func (f *Fn) DLLFuncName() string { +- if f.dllfuncname == "" { +- return f.Name +- } +- return f.dllfuncname +-} +- +-func (f *Fn) ConfirmProc() bool { +- return f.confirmproc +-} +- +-// ParamList returns source code for function f parameters. +-func (f *Fn) ParamList() string { +- return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ") +-} +- +-// HelperParamList returns source code for helper function f parameters. +-func (f *Fn) HelperParamList() string { +- return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ") +-} +- +-// ParamPrintList returns source code of trace printing part correspondent +-// to syscall input parameters. +-func (f *Fn) ParamPrintList() string { +- return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) +-} +- +-// ParamCount return number of syscall parameters for function f. +-func (f *Fn) ParamCount() int { +- n := 0 +- for _, p := range f.Params { +- n += len(p.SyscallArgList()) +- } +- return n +-} +- +-// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/... +-// to use. It returns parameter count for correspondent SyscallX function. +-func (f *Fn) SyscallParamCount() int { +- n := f.ParamCount() +- switch { +- case n <= 3: +- return 3 +- case n <= 6: +- return 6 +- case n <= 9: +- return 9 +- case n <= 12: +- return 12 +- case n <= 15: +- return 15 +- default: +- panic("too many arguments to system call") +- } +-} +- +-// Syscall determines which SyscallX function to use for function f. +-func (f *Fn) Syscall() string { +- c := f.SyscallParamCount() +- if c == 3 { +- return syscalldot() + "Syscall" +- } +- return syscalldot() + "Syscall" + strconv.Itoa(c) +-} +- +-// SyscallParamList returns source code for SyscallX parameters for function f. +-func (f *Fn) SyscallParamList() string { +- a := make([]string, 0) +- for _, p := range f.Params { +- a = append(a, p.SyscallArgList()...) +- } +- for len(a) < f.SyscallParamCount() { +- a = append(a, "0") +- } +- return strings.Join(a, ", ") +-} +- +-// HelperCallParamList returns source code of call into function f helper. +-func (f *Fn) HelperCallParamList() string { +- a := make([]string, 0, len(f.Params)) +- for _, p := range f.Params { +- s := p.Name +- if p.Type == "string" { +- s = p.tmpVar() +- } +- a = append(a, s) +- } +- return strings.Join(a, ", ") +-} +- +-// IsUTF16 is true, if f is W (utf16) function. It is false +-// for all A (ascii) functions. +-func (_ *Fn) IsUTF16() bool { +- return true +-} +- +-// StrconvFunc returns name of Go string to OS string function for f. +-func (f *Fn) StrconvFunc() string { +- if f.IsUTF16() { +- return syscalldot() + "UTF16PtrFromString" +- } +- return syscalldot() + "BytePtrFromString" +-} +- +-// StrconvType returns Go type name used for OS string for f. +-func (f *Fn) StrconvType() string { +- if f.IsUTF16() { +- return "*uint16" +- } +- return "*byte" +-} +- +-// HasStringParam is true, if f has at least one string parameter. +-// Otherwise it is false. +-func (f *Fn) HasStringParam() bool { +- for _, p := range f.Params { +- if p.Type == "string" { +- return true +- } +- } +- return false +-} +- +-var uniqDllFuncName = make(map[string]bool) +- +-// IsNotDuplicate is true if f is not a duplicated function +-func (f *Fn) IsNotDuplicate() bool { +- funcName := f.DLLFuncName() +- if uniqDllFuncName[funcName] == false { +- uniqDllFuncName[funcName] = true +- return true +- } +- return false +-} +- +-// HelperName returns name of function f helper. +-func (f *Fn) HelperName() string { +- if !f.HasStringParam() { +- return f.Name +- } +- return "_" + f.Name +-} +- +-// Source files and functions. +-type Source struct { +- Funcs []*Fn +- Files []string +- StdLibImports []string +- ExternalImports []string +-} +- +-func (src *Source) Import(pkg string) { +- src.StdLibImports = append(src.StdLibImports, pkg) +- sort.Strings(src.StdLibImports) +-} +- +-func (src *Source) ExternalImport(pkg string) { +- src.ExternalImports = append(src.ExternalImports, pkg) +- sort.Strings(src.ExternalImports) +-} +- +-// ParseFiles parses files listed in fs and extracts all syscall +-// functions listed in sys comments. It returns source files +-// and functions collection *Source if successful. +-func ParseFiles(fs []string) (*Source, error) { +- src := &Source{ +- Funcs: make([]*Fn, 0), +- Files: make([]string, 0), +- StdLibImports: []string{ +- "unsafe", +- }, +- ExternalImports: make([]string, 0), +- } +- for _, file := range fs { +- if err := src.ParseFile(file); err != nil { +- return nil, err +- } +- } +- return src, nil +-} +- +-// DLLs return dll names for a source set src. +-func (src *Source) DLLs() []string { +- uniq := make(map[string]bool) +- r := make([]string, 0) +- for _, f := range src.Funcs { +- name := f.DLLName() +- if _, found := uniq[name]; !found { +- uniq[name] = true +- r = append(r, name) +- } +- } +- return r +-} +- +-// ParseFile adds additional file path to a source set src. +-func (src *Source) ParseFile(path string) error { +- file, err := os.Open(path) +- if err != nil { +- return err +- } +- defer file.Close() +- +- s := bufio.NewScanner(file) +- for s.Scan() { +- t := trim(s.Text()) +- if len(t) < 7 { +- continue +- } +- if !strings.HasPrefix(t, "//sys") { +- continue +- } +- t = t[5:] +- if !(t[0] == ' ' || t[0] == '\t') { +- continue +- } +- f, err := newFn(t[1:]) +- if err != nil { +- return err +- } +- src.Funcs = append(src.Funcs, f) +- } +- if err := s.Err(); err != nil { +- return err +- } +- src.Files = append(src.Files, path) +- +- // get package name +- fset := token.NewFileSet() +- _, err = file.Seek(0, 0) +- if err != nil { +- return err +- } +- pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly) +- if err != nil { +- return err +- } +- packageName = pkg.Name.Name +- +- return nil +-} +- +-// IsStdRepo returns true if src is part of standard library. +-func (src *Source) IsStdRepo() (bool, error) { +- if len(src.Files) == 0 { +- return false, errors.New("no input files provided") +- } +- abspath, err := filepath.Abs(src.Files[0]) +- if err != nil { +- return false, err +- } +- goroot := runtime.GOROOT() +- if runtime.GOOS == "windows" { +- abspath = strings.ToLower(abspath) +- goroot = strings.ToLower(goroot) +- } +- sep := string(os.PathSeparator) +- if !strings.HasSuffix(goroot, sep) { +- goroot += sep +- } +- return strings.HasPrefix(abspath, goroot), nil +-} +- +-// Generate output source file from a source set src. +-func (src *Source) Generate(w io.Writer) error { +- const ( +- pkgStd = iota // any package in std library +- pkgXSysWindows // x/sys/windows package +- pkgOther +- ) +- isStdRepo, err := src.IsStdRepo() +- if err != nil { +- return err +- } +- var pkgtype int +- switch { +- case isStdRepo: +- pkgtype = pkgStd +- case packageName == "windows": +- // TODO: this needs better logic than just using package name +- pkgtype = pkgXSysWindows +- default: +- pkgtype = pkgOther +- } +- if *systemDLL { +- switch pkgtype { +- case pkgStd: +- src.Import("internal/syscall/windows/sysdll") +- case pkgXSysWindows: +- default: +- src.ExternalImport("golang.org/x/sys/windows") +- } +- } +- if *winio { +- src.ExternalImport("github.com/Microsoft/go-winio") +- } +- if packageName != "syscall" { +- src.Import("syscall") +- } +- funcMap := template.FuncMap{ +- "packagename": packagename, +- "syscalldot": syscalldot, +- "newlazydll": func(dll string) string { +- arg := "\"" + dll + ".dll\"" +- if !*systemDLL { +- return syscalldot() + "NewLazyDLL(" + arg + ")" +- } +- if strings.HasPrefix(dll, "api_") || strings.HasPrefix(dll, "ext_") { +- arg = strings.Replace(arg, "_", "-", -1) +- } +- switch pkgtype { +- case pkgStd: +- return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))" +- case pkgXSysWindows: +- return "NewLazySystemDLL(" + arg + ")" +- default: +- return "windows.NewLazySystemDLL(" + arg + ")" +- } +- }, +- } +- t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate)) +- err = t.Execute(w, src) +- if err != nil { +- return errors.New("Failed to execute template: " + err.Error()) +- } +- return nil +-} +- +-func usage() { +- fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n") +- flag.PrintDefaults() +- os.Exit(1) +-} +- +-func main() { +- flag.Usage = usage +- flag.Parse() +- if len(flag.Args()) <= 0 { +- fmt.Fprintf(os.Stderr, "no files to parse provided\n") +- usage() +- } +- +- src, err := ParseFiles(flag.Args()) +- if err != nil { +- log.Fatal(err) +- } +- +- var buf bytes.Buffer +- if err := src.Generate(&buf); err != nil { +- log.Fatal(err) +- } +- +- data, err := format.Source(buf.Bytes()) +- if err != nil { +- log.Fatal(err) +- } +- if *filename == "" { +- _, err = os.Stdout.Write(data) +- } else { +- err = ioutil.WriteFile(*filename, data, 0644) +- } +- if err != nil { +- log.Fatal(err) +- } +-} +- +-// TODO: use println instead to print in the following template +-const srcTemplate = ` +- +-{{define "main"}}// Code generated mksyscall_windows.exe DO NOT EDIT +- +-package {{packagename}} +- +-import ( +-{{range .StdLibImports}}"{{.}}" +-{{end}} +- +-{{range .ExternalImports}}"{{.}}" +-{{end}} +-) +- +-var _ unsafe.Pointer +- +-// Do the interface allocations only once for common +-// Errno values. +-const ( +- errnoERROR_IO_PENDING = 997 +-) +- +-var ( +- errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING) +-) +- +-// errnoErr returns common boxed Errno values, to prevent +-// allocations at runtime. +-func errnoErr(e {{syscalldot}}Errno) error { +- switch e { +- case 0: +- return nil +- case errnoERROR_IO_PENDING: +- return errERROR_IO_PENDING +- } +- // TODO: add more here, after collecting data on the common +- // error values see on Windows. (perhaps when running +- // all.bat?) +- return e +-} +- +-var ( +-{{template "dlls" .}} +-{{template "funcnames" .}}) +-{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}} +-{{end}} +- +-{{/* help functions */}} +- +-{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}} +-{{end}}{{end}} +- +-{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}} +-{{end}}{{end}} +- +-{{define "helperbody"}} +-func {{.Name}}({{.ParamList}}) {{template "results" .}}{ +-{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}}) +-} +-{{end}} +- +-{{define "funcbody"}} +-func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{ +-{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}} +-{{template "seterror" .}}{{template "printtrace" .}} return +-} +-{{end}} +- +-{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}} +-{{end}}{{end}}{{end}} +- +-{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}} +-{{end}}{{end}}{{end}} +- +-{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}} +- +-{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}} +- +-{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil { +- return +-} +-{{end}}{{end}} +- +- +-{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}} +-{{end}}{{end}} +- +-{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n") +-{{end}}{{end}} +- +-` +diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go +index 0b012c703b..bff077a40a 100644 +--- a/vendor/github.com/containers/image/v5/docker/docker_client.go ++++ b/vendor/github.com/containers/image/v5/docker/docker_client.go +@@ -6,7 +6,6 @@ import ( + "encoding/json" + "fmt" + "io" +- "io/ioutil" + "net/http" + "net/url" + "os" +@@ -17,6 +16,7 @@ import ( + "time" + + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/pkg/docker/config" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/pkg/tlsclientconfig" +@@ -597,7 +597,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, + default: + return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL) + } +- tokenBlob, err := ioutil.ReadAll(res.Body) ++ tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) + if err != nil { + return nil, err + } +@@ -690,7 +690,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe + return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) + } + +- body, err := ioutil.ReadAll(res.Body) ++ body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +index 417d97aec9..ce8a1f357e 100644 +--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go ++++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +@@ -15,6 +15,7 @@ import ( + "strings" + + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" +@@ -620,7 +621,7 @@ sigExists: + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { +- body, err := ioutil.ReadAll(res.Body) ++ body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxErrorBodySize) + if err == nil { + logrus.Debugf("Error body %s", string(body)) + } +diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go +index 35beb30e54..5436d9b7d9 100644 +--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go ++++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go +@@ -12,6 +12,7 @@ import ( + "strconv" + + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" +@@ -156,7 +157,8 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin + if res.StatusCode != http.StatusOK { + return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name()) + } +- manblob, err := ioutil.ReadAll(res.Body) ++ ++ manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize) + if err != nil { + return nil, "", err + } +@@ -342,7 +344,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( + } else if res.StatusCode != http.StatusOK { + return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) + } +- sig, err := ioutil.ReadAll(res.Body) ++ sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize) + if err != nil { + return nil, false, err + } +@@ -401,7 +403,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere + return err + } + defer get.Body.Close() +- manifestBody, err := ioutil.ReadAll(get.Body) ++ manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize) + if err != nil { + return err + } +@@ -424,7 +426,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere + } + defer delete.Body.Close() + +- body, err := ioutil.ReadAll(delete.Body) ++ body, err := iolimits.ReadAtMost(delete.Body, iolimits.MaxErrorBodySize) + if err != nil { + return err + } +diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go +index b02c60bb3d..9748ca1121 100644 +--- a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go ++++ b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go +@@ -13,6 +13,7 @@ import ( + "time" + + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" +@@ -135,7 +136,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t + } + + if isConfig { +- buf, err := ioutil.ReadAll(stream) ++ buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream") + } +diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/tarfile/src.go +index ad0a3d2cb4..bbf604da6e 100644 +--- a/vendor/github.com/containers/image/v5/docker/tarfile/src.go ++++ b/vendor/github.com/containers/image/v5/docker/tarfile/src.go +@@ -11,6 +11,7 @@ import ( + "path" + "sync" + ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" +@@ -187,13 +188,13 @@ func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Heade + } + + // readTarComponent returns full contents of componentPath. +-func (s *Source) readTarComponent(path string) ([]byte, error) { ++func (s *Source) readTarComponent(path string, limit int) ([]byte, error) { + file, err := s.openTarComponent(path) + if err != nil { + return nil, errors.Wrapf(err, "Error loading tar component %s", path) + } + defer file.Close() +- bytes, err := ioutil.ReadAll(file) ++ bytes, err := iolimits.ReadAtMost(file, limit) + if err != nil { + return nil, err + } +@@ -224,7 +225,7 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error { + } + + // Read and parse config. +- configBytes, err := s.readTarComponent(tarManifest[0].Config) ++ configBytes, err := s.readTarComponent(tarManifest[0].Config, iolimits.MaxConfigBodySize) + if err != nil { + return err + } +@@ -250,7 +251,7 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error { + // loadTarManifest loads and decodes the manifest.json. + func (s *Source) loadTarManifest() ([]ManifestItem, error) { + // FIXME? Do we need to deal with the legacy format? +- bytes, err := s.readTarComponent(manifestFileName) ++ bytes, err := s.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go +index 254c13f789..29c5047d73 100644 +--- a/vendor/github.com/containers/image/v5/image/docker_schema2.go ++++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go +@@ -7,10 +7,10 @@ import ( + "encoding/hex" + "encoding/json" + "fmt" +- "io/ioutil" + "strings" + + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" +@@ -102,7 +102,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { + return nil, err + } + defer stream.Close() +- blob, err := ioutil.ReadAll(stream) ++ blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go +index 18a38d463e..406da262f3 100644 +--- a/vendor/github.com/containers/image/v5/image/oci.go ++++ b/vendor/github.com/containers/image/v5/image/oci.go +@@ -4,9 +4,9 @@ import ( + "context" + "encoding/json" + "fmt" +- "io/ioutil" + + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" +@@ -67,7 +67,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { + return nil, err + } + defer stream.Close() +- blob, err := ioutil.ReadAll(stream) ++ blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go +new file mode 100644 +index 0000000000..3fed1995cb +--- /dev/null ++++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go +@@ -0,0 +1,60 @@ ++package iolimits ++ ++import ( ++ "io" ++ "io/ioutil" ++ ++ "github.com/pkg/errors" ++) ++ ++// All constants below are intended to be used as limits for `ReadAtMost`. The ++// immediate use-case for limiting the size of in-memory copied data is to ++// protect against OOM DOS attacks as described inCVE-2020-1702. Instead of ++// copying data until running out of memory, we error out after hitting the ++// specified limit. ++const ( ++ // megaByte denotes one megabyte and is intended to be used as a limit in ++ // `ReadAtMost`. ++ megaByte = 1 << 20 ++ // MaxManifestBodySize is the maximum allowed size of a manifest. The limit ++ // of 4 MB aligns with the one of a Docker registry: ++ // https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/handlers/manifests.go#L30 ++ MaxManifestBodySize = 4 * megaByte ++ // MaxAuthTokenBodySize is the maximum allowed size of an auth token. ++ // The limit of 1 MB is considered to be greatly sufficient. ++ MaxAuthTokenBodySize = megaByte ++ // MaxSignatureListBodySize is the maximum allowed size of a signature list. ++ // The limit of 4 MB is considered to be greatly sufficient. ++ MaxSignatureListBodySize = 4 * megaByte ++ // MaxSignatureBodySize is the maximum allowed size of a signature. ++ // The limit of 4 MB is considered to be greatly sufficient. ++ MaxSignatureBodySize = 4 * megaByte ++ // MaxErrorBodySize is the maximum allowed size of an error-response body. ++ // The limit of 1 MB is considered to be greatly sufficient. ++ MaxErrorBodySize = megaByte ++ // MaxConfigBodySize is the maximum allowed size of a config blob. ++ // The limit of 4 MB is considered to be greatly sufficient. ++ MaxConfigBodySize = 4 * megaByte ++ // MaxOpenShiftStatusBody is the maximum allowed size of an OpenShift status body. ++ // The limit of 4 MB is considered to be greatly sufficient. ++ MaxOpenShiftStatusBody = 4 * megaByte ++ // MaxTarFileManifestSize is the maximum allowed size of a (docker save)-like manifest (which may contain multiple images) ++ // The limit of 1 MB is considered to be greatly sufficient. ++ MaxTarFileManifestSize = megaByte ++) ++ ++// ReadAtMost reads from reader and errors out if the specified limit (in bytes) is exceeded. ++func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { ++ limitedReader := io.LimitReader(reader, int64(limit+1)) ++ ++ res, err := ioutil.ReadAll(limitedReader) ++ if err != nil { ++ return nil, err ++ } ++ ++ if len(res) > limit { ++ return nil, errors.Errorf("exceeded maximum allowed size of %d bytes", limit) ++ } ++ ++ return res, nil ++} +diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go +index 016de48034..c37e1b7510 100644 +--- a/vendor/github.com/containers/image/v5/openshift/openshift.go ++++ b/vendor/github.com/containers/image/v5/openshift/openshift.go +@@ -7,13 +7,13 @@ import ( + "encoding/json" + "fmt" + "io" +- "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" ++ "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/image/v5/version" +@@ -102,7 +102,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re + return nil, err + } + defer res.Body.Close() +- body, err := ioutil.ReadAll(res.Body) ++ body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxOpenShiftStatusBody) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/github.com/containers/storage/pkg/archive/example_changes.go +deleted file mode 100644 +index 70f9c5564a..0000000000 +--- a/vendor/github.com/containers/storage/pkg/archive/example_changes.go ++++ /dev/null +@@ -1,97 +0,0 @@ +-// +build ignore +- +-// Simple tool to create an archive stream from an old and new directory +-// +-// By default it will stream the comparison of two temporary directories with junk files +-package main +- +-import ( +- "flag" +- "fmt" +- "io" +- "io/ioutil" +- "os" +- "path" +- +- "github.com/containers/storage/pkg/archive" +- "github.com/sirupsen/logrus" +-) +- +-var ( +- flDebug = flag.Bool("D", false, "debugging output") +- flNewDir = flag.String("newdir", "", "") +- flOldDir = flag.String("olddir", "", "") +- log = logrus.New() +-) +- +-func main() { +- flag.Usage = func() { +- fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") +- fmt.Printf("%s [OPTIONS]\n", os.Args[0]) +- flag.PrintDefaults() +- } +- flag.Parse() +- log.Out = os.Stderr +- if (len(os.Getenv("DEBUG")) > 0) || *flDebug { +- logrus.SetLevel(logrus.DebugLevel) +- } +- var newDir, oldDir string +- +- if len(*flNewDir) == 0 { +- var err error +- newDir, err = ioutil.TempDir("", "storage-test-newDir") +- if err != nil { +- log.Fatal(err) +- } +- defer os.RemoveAll(newDir) +- if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { +- log.Fatal(err) +- } +- } else { +- newDir = *flNewDir +- } +- +- if len(*flOldDir) == 0 { +- oldDir, err := ioutil.TempDir("", "storage-test-oldDir") +- if err != nil { +- log.Fatal(err) +- } +- defer os.RemoveAll(oldDir) +- } else { +- oldDir = *flOldDir +- } +- +- changes, err := archive.ChangesDirs(newDir, oldDir) +- if err != nil { +- log.Fatal(err) +- } +- +- a, err := archive.ExportChanges(newDir, changes) +- if err != nil { +- log.Fatal(err) +- } +- defer a.Close() +- +- i, err := io.Copy(os.Stdout, a) +- if err != nil && err != io.EOF { +- log.Fatal(err) +- } +- fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +-} +- +-func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { +- fileData := []byte("fooo") +- for n := 0; n < numberOfFiles; n++ { +- fileName := fmt.Sprintf("file-%d", n) +- if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { +- return 0, err +- } +- if makeLinks { +- if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { +- return 0, err +- } +- } +- } +- totalSize := numberOfFiles * len(fileData) +- return totalSize, nil +-} +diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go +deleted file mode 100644 +index 495db809e9..0000000000 +--- a/vendor/github.com/docker/docker/pkg/archive/example_changes.go ++++ /dev/null +@@ -1,97 +0,0 @@ +-// +build ignore +- +-// Simple tool to create an archive stream from an old and new directory +-// +-// By default it will stream the comparison of two temporary directories with junk files +-package main +- +-import ( +- "flag" +- "fmt" +- "io" +- "io/ioutil" +- "os" +- "path" +- +- "github.com/docker/docker/pkg/archive" +- "github.com/sirupsen/logrus" +-) +- +-var ( +- flDebug = flag.Bool("D", false, "debugging output") +- flNewDir = flag.String("newdir", "", "") +- flOldDir = flag.String("olddir", "", "") +- log = logrus.New() +-) +- +-func main() { +- flag.Usage = func() { +- fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") +- fmt.Printf("%s [OPTIONS]\n", os.Args[0]) +- flag.PrintDefaults() +- } +- flag.Parse() +- log.Out = os.Stderr +- if (len(os.Getenv("DEBUG")) > 0) || *flDebug { +- logrus.SetLevel(logrus.DebugLevel) +- } +- var newDir, oldDir string +- +- if len(*flNewDir) == 0 { +- var err error +- newDir, err = ioutil.TempDir("", "docker-test-newDir") +- if err != nil { +- log.Fatal(err) +- } +- defer os.RemoveAll(newDir) +- if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { +- log.Fatal(err) +- } +- } else { +- newDir = *flNewDir +- } +- +- if len(*flOldDir) == 0 { +- oldDir, err := ioutil.TempDir("", "docker-test-oldDir") +- if err != nil { +- log.Fatal(err) +- } +- defer os.RemoveAll(oldDir) +- } else { +- oldDir = *flOldDir +- } +- +- changes, err := archive.ChangesDirs(newDir, oldDir) +- if err != nil { +- log.Fatal(err) +- } +- +- a, err := archive.ExportChanges(newDir, changes) +- if err != nil { +- log.Fatal(err) +- } +- defer a.Close() +- +- i, err := io.Copy(os.Stdout, a) +- if err != nil && err != io.EOF { +- log.Fatal(err) +- } +- fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +-} +- +-func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { +- fileData := []byte("fooo") +- for n := 0; n < numberOfFiles; n++ { +- fileName := fmt.Sprintf("file-%d", n) +- if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { +- return 0, err +- } +- if makeLinks { +- if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { +- return 0, err +- } +- } +- } +- totalSize := numberOfFiles * len(fileData) +- return totalSize, nil +-} +diff --git a/vendor/github.com/klauspost/compress/flate/gen.go b/vendor/github.com/klauspost/compress/flate/gen.go +deleted file mode 100644 +index 154c89a488..0000000000 +--- a/vendor/github.com/klauspost/compress/flate/gen.go ++++ /dev/null +@@ -1,265 +0,0 @@ +-// Copyright 2012 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// This program generates fixedhuff.go +-// Invoke as +-// +-// go run gen.go -output fixedhuff.go +- +-package main +- +-import ( +- "bytes" +- "flag" +- "fmt" +- "go/format" +- "io/ioutil" +- "log" +-) +- +-var filename = flag.String("output", "fixedhuff.go", "output file name") +- +-const maxCodeLen = 16 +- +-// Note: the definition of the huffmanDecoder struct is copied from +-// inflate.go, as it is private to the implementation. +- +-// chunk & 15 is number of bits +-// chunk >> 4 is value, including table link +- +-const ( +- huffmanChunkBits = 9 +- huffmanNumChunks = 1 << huffmanChunkBits +- huffmanCountMask = 15 +- huffmanValueShift = 4 +-) +- +-type huffmanDecoder struct { +- min int // the minimum code length +- chunks [huffmanNumChunks]uint32 // chunks as described above +- links [][]uint32 // overflow links +- linkMask uint32 // mask the width of the link table +-} +- +-// Initialize Huffman decoding tables from array of code lengths. +-// Following this function, h is guaranteed to be initialized into a complete +-// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +-// degenerate case where the tree has only a single symbol with length 1. Empty +-// trees are permitted. +-func (h *huffmanDecoder) init(bits []int) bool { +- // Sanity enables additional runtime tests during Huffman +- // table construction. It's intended to be used during +- // development to supplement the currently ad-hoc unit tests. +- const sanity = false +- +- if h.min != 0 { +- *h = huffmanDecoder{} +- } +- +- // Count number of codes of each length, +- // compute min and max length. +- var count [maxCodeLen]int +- var min, max int +- for _, n := range bits { +- if n == 0 { +- continue +- } +- if min == 0 || n < min { +- min = n +- } +- if n > max { +- max = n +- } +- count[n]++ +- } +- +- // Empty tree. The decompressor.huffSym function will fail later if the tree +- // is used. Technically, an empty tree is only valid for the HDIST tree and +- // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree +- // is guaranteed to fail since it will attempt to use the tree to decode the +- // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is +- // guaranteed to fail later since the compressed data section must be +- // composed of at least one symbol (the end-of-block marker). +- if max == 0 { +- return true +- } +- +- code := 0 +- var nextcode [maxCodeLen]int +- for i := min; i <= max; i++ { +- code <<= 1 +- nextcode[i] = code +- code += count[i] +- } +- +- // Check that the coding is complete (i.e., that we've +- // assigned all 2-to-the-max possible bit sequences). +- // Exception: To be compatible with zlib, we also need to +- // accept degenerate single-code codings. See also +- // TestDegenerateHuffmanCoding. +- if code != 1< huffmanChunkBits { +- numLinks := 1 << (uint(max) - huffmanChunkBits) +- h.linkMask = uint32(numLinks - 1) +- +- // create link tables +- link := nextcode[huffmanChunkBits+1] >> 1 +- h.links = make([][]uint32, huffmanNumChunks-link) +- for j := uint(link); j < huffmanNumChunks; j++ { +- reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 +- reverse >>= uint(16 - huffmanChunkBits) +- off := j - uint(link) +- if sanity && h.chunks[reverse] != 0 { +- panic("impossible: overwriting existing chunk") +- } +- h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 +- reverse >>= uint(16 - n) +- if n <= huffmanChunkBits { +- for off := reverse; off < len(h.chunks); off += 1 << uint(n) { +- // We should never need to overwrite +- // an existing chunk. Also, 0 is +- // never a valid chunk, because the +- // lower 4 "count" bits should be +- // between 1 and 15. +- if sanity && h.chunks[off] != 0 { +- panic("impossible: overwriting existing chunk") +- } +- h.chunks[off] = chunk +- } +- } else { +- j := reverse & (huffmanNumChunks - 1) +- if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { +- // Longer codes should have been +- // associated with a link table above. +- panic("impossible: not an indirect chunk") +- } +- value := h.chunks[j] >> huffmanValueShift +- linktab := h.links[value] +- reverse >>= huffmanChunkBits +- for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { +- if sanity && linktab[off] != 0 { +- panic("impossible: overwriting existing chunk") +- } +- linktab[off] = chunk +- } +- } +- } +- +- if sanity { +- // Above we've sanity checked that we never overwrote +- // an existing entry. Here we additionally check that +- // we filled the tables completely. +- for i, chunk := range h.chunks { +- if chunk == 0 { +- // As an exception, in the degenerate +- // single-code case, we allow odd +- // chunks to be missing. +- if code == 1 && i%2 == 1 { +- continue +- } +- panic("impossible: missing chunk") +- } +- } +- for _, linktab := range h.links { +- for _, chunk := range linktab { +- if chunk == 0 { +- panic("impossible: missing chunk") +- } +- } +- } +- } +- +- return true +-} +- +-func main() { +- flag.Parse() +- +- var h huffmanDecoder +- var bits [288]int +- initReverseByte() +- for i := 0; i < 144; i++ { +- bits[i] = 8 +- } +- for i := 144; i < 256; i++ { +- bits[i] = 9 +- } +- for i := 256; i < 280; i++ { +- bits[i] = 7 +- } +- for i := 280; i < 288; i++ { +- bits[i] = 8 +- } +- h.init(bits[:]) +- if h.links != nil { +- log.Fatal("Unexpected links table in fixed Huffman decoder") +- } +- +- var buf bytes.Buffer +- +- fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file.`+"\n\n") +- +- fmt.Fprintln(&buf, "package flate") +- fmt.Fprintln(&buf) +- fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT") +- fmt.Fprintln(&buf) +- fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{") +- fmt.Fprintf(&buf, "\t%d,\n", h.min) +- fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{") +- for i := 0; i < huffmanNumChunks; i++ { +- if i&7 == 0 { +- fmt.Fprintf(&buf, "\t\t") +- } else { +- fmt.Fprintf(&buf, " ") +- } +- fmt.Fprintf(&buf, "0x%04x,", h.chunks[i]) +- if i&7 == 7 { +- fmt.Fprintln(&buf) +- } +- } +- fmt.Fprintln(&buf, "\t},") +- fmt.Fprintln(&buf, "\tnil, 0,") +- fmt.Fprintln(&buf, "}") +- +- data, err := format.Source(buf.Bytes()) +- if err != nil { +- log.Fatal(err) +- } +- err = ioutil.WriteFile(*filename, data, 0644) +- if err != nil { +- log.Fatal(err) +- } +-} +- +-var reverseByte [256]byte +- +-func initReverseByte() { +- for x := 0; x < 256; x++ { +- var result byte +- for i := uint(0); i < 8; i++ { +- result |= byte(((x >> i) & 1) << (7 - i)) +- } +- reverseByte[x] = result +- } +-} +diff --git a/vendor/github.com/klauspost/cpuid/private-gen.go b/vendor/github.com/klauspost/cpuid/private-gen.go +deleted file mode 100644 +index 437333d292..0000000000 +--- a/vendor/github.com/klauspost/cpuid/private-gen.go ++++ /dev/null +@@ -1,476 +0,0 @@ +-// +build ignore +- +-package main +- +-import ( +- "bytes" +- "fmt" +- "go/ast" +- "go/parser" +- "go/printer" +- "go/token" +- "io" +- "io/ioutil" +- "log" +- "os" +- "reflect" +- "strings" +- "unicode" +- "unicode/utf8" +-) +- +-var inFiles = []string{"cpuid.go", "cpuid_test.go"} +-var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"} +-var fileSet = token.NewFileSet() +-var reWrites = []rewrite{ +- initRewrite("CPUInfo -> cpuInfo"), +- initRewrite("Vendor -> vendor"), +- initRewrite("Flags -> flags"), +- initRewrite("Detect -> detect"), +- initRewrite("CPU -> cpu"), +-} +-var excludeNames = map[string]bool{"string": true, "join": true, "trim": true, +- // cpuid_test.go +- "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true, +-} +- +-var excludePrefixes = []string{"test", "benchmark"} +- +-func main() { +- Package := "private" +- parserMode := parser.ParseComments +- exported := make(map[string]rewrite) +- for _, file := range inFiles { +- in, err := os.Open(file) +- if err != nil { +- log.Fatalf("opening input", err) +- } +- +- src, err := ioutil.ReadAll(in) +- if err != nil { +- log.Fatalf("reading input", err) +- } +- +- astfile, err := parser.ParseFile(fileSet, file, src, parserMode) +- if err != nil { +- log.Fatalf("parsing input", err) +- } +- +- for _, rw := range reWrites { +- astfile = rw(astfile) +- } +- +- // Inspect the AST and print all identifiers and literals. +- var startDecl token.Pos +- var endDecl token.Pos +- ast.Inspect(astfile, func(n ast.Node) bool { +- var s string +- switch x := n.(type) { +- case *ast.Ident: +- if x.IsExported() { +- t := strings.ToLower(x.Name) +- for _, pre := range excludePrefixes { +- if strings.HasPrefix(t, pre) { +- return true +- } +- } +- if excludeNames[t] != true { +- //if x.Pos() > startDecl && x.Pos() < endDecl { +- exported[x.Name] = initRewrite(x.Name + " -> " + t) +- } +- } +- +- case *ast.GenDecl: +- if x.Tok == token.CONST && x.Lparen > 0 { +- startDecl = x.Lparen +- endDecl = x.Rparen +- // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl)) +- } +- } +- if s != "" { +- fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s) +- } +- return true +- }) +- +- for _, rw := range exported { +- astfile = rw(astfile) +- } +- +- var buf bytes.Buffer +- +- printer.Fprint(&buf, fileSet, astfile) +- +- // Remove package documentation and insert information +- s := buf.String() +- ind := strings.Index(buf.String(), "\npackage cpuid") +- s = s[ind:] +- s = "// Generated, DO NOT EDIT,\n" + +- "// but copy it to your own project and rename the package.\n" + +- "// See more at http://github.com/klauspost/cpuid\n" + +- s +- +- outputName := Package + string(os.PathSeparator) + file +- +- err = ioutil.WriteFile(outputName, []byte(s), 0644) +- if err != nil { +- log.Fatalf("writing output: %s", err) +- } +- log.Println("Generated", outputName) +- } +- +- for _, file := range copyFiles { +- dst := "" +- if strings.HasPrefix(file, "cpuid") { +- dst = Package + string(os.PathSeparator) + file +- } else { +- dst = Package + string(os.PathSeparator) + "cpuid_" + file +- } +- err := copyFile(file, dst) +- if err != nil { +- log.Fatalf("copying file: %s", err) +- } +- log.Println("Copied", dst) +- } +-} +- +-// CopyFile copies a file from src to dst. If src and dst files exist, and are +-// the same, then return success. Copy the file contents from src to dst. +-func copyFile(src, dst string) (err error) { +- sfi, err := os.Stat(src) +- if err != nil { +- return +- } +- if !sfi.Mode().IsRegular() { +- // cannot copy non-regular files (e.g., directories, +- // symlinks, devices, etc.) +- return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String()) +- } +- dfi, err := os.Stat(dst) +- if err != nil { +- if !os.IsNotExist(err) { +- return +- } +- } else { +- if !(dfi.Mode().IsRegular()) { +- return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String()) +- } +- if os.SameFile(sfi, dfi) { +- return +- } +- } +- err = copyFileContents(src, dst) +- return +-} +- +-// copyFileContents copies the contents of the file named src to the file named +-// by dst. The file will be created if it does not already exist. If the +-// destination file exists, all it's contents will be replaced by the contents +-// of the source file. +-func copyFileContents(src, dst string) (err error) { +- in, err := os.Open(src) +- if err != nil { +- return +- } +- defer in.Close() +- out, err := os.Create(dst) +- if err != nil { +- return +- } +- defer func() { +- cerr := out.Close() +- if err == nil { +- err = cerr +- } +- }() +- if _, err = io.Copy(out, in); err != nil { +- return +- } +- err = out.Sync() +- return +-} +- +-type rewrite func(*ast.File) *ast.File +- +-// Mostly copied from gofmt +-func initRewrite(rewriteRule string) rewrite { +- f := strings.Split(rewriteRule, "->") +- if len(f) != 2 { +- fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") +- os.Exit(2) +- } +- pattern := parseExpr(f[0], "pattern") +- replace := parseExpr(f[1], "replacement") +- return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) } +-} +- +-// parseExpr parses s as an expression. +-// It might make sense to expand this to allow statement patterns, +-// but there are problems with preserving formatting and also +-// with what a wildcard for a statement looks like. +-func parseExpr(s, what string) ast.Expr { +- x, err := parser.ParseExpr(s) +- if err != nil { +- fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) +- os.Exit(2) +- } +- return x +-} +- +-// Keep this function for debugging. +-/* +-func dump(msg string, val reflect.Value) { +- fmt.Printf("%s:\n", msg) +- ast.Print(fileSet, val.Interface()) +- fmt.Println() +-} +-*/ +- +-// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. +-func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File { +- cmap := ast.NewCommentMap(fileSet, p, p.Comments) +- m := make(map[string]reflect.Value) +- pat := reflect.ValueOf(pattern) +- repl := reflect.ValueOf(replace) +- +- var rewriteVal func(val reflect.Value) reflect.Value +- rewriteVal = func(val reflect.Value) reflect.Value { +- // don't bother if val is invalid to start with +- if !val.IsValid() { +- return reflect.Value{} +- } +- for k := range m { +- delete(m, k) +- } +- val = apply(rewriteVal, val) +- if match(m, pat, val) { +- val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) +- } +- return val +- } +- +- r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) +- r.Comments = cmap.Filter(r).Comments() // recreate comments list +- return r +-} +- +-// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. +-func set(x, y reflect.Value) { +- // don't bother if x cannot be set or y is invalid +- if !x.CanSet() || !y.IsValid() { +- return +- } +- defer func() { +- if x := recover(); x != nil { +- if s, ok := x.(string); ok && +- (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { +- // x cannot be set to y - ignore this rewrite +- return +- } +- panic(x) +- } +- }() +- x.Set(y) +-} +- +-// Values/types for special cases. +-var ( +- objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) +- scopePtrNil = reflect.ValueOf((*ast.Scope)(nil)) +- +- identType = reflect.TypeOf((*ast.Ident)(nil)) +- objectPtrType = reflect.TypeOf((*ast.Object)(nil)) +- positionType = reflect.TypeOf(token.NoPos) +- callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) +- scopePtrType = reflect.TypeOf((*ast.Scope)(nil)) +-) +- +-// apply replaces each AST field x in val with f(x), returning val. +-// To avoid extra conversions, f operates on the reflect.Value form. +-func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { +- if !val.IsValid() { +- return reflect.Value{} +- } +- +- // *ast.Objects introduce cycles and are likely incorrect after +- // rewrite; don't follow them but replace with nil instead +- if val.Type() == objectPtrType { +- return objectPtrNil +- } +- +- // similarly for scopes: they are likely incorrect after a rewrite; +- // replace them with nil +- if val.Type() == scopePtrType { +- return scopePtrNil +- } +- +- switch v := reflect.Indirect(val); v.Kind() { +- case reflect.Slice: +- for i := 0; i < v.Len(); i++ { +- e := v.Index(i) +- set(e, f(e)) +- } +- case reflect.Struct: +- for i := 0; i < v.NumField(); i++ { +- e := v.Field(i) +- set(e, f(e)) +- } +- case reflect.Interface: +- e := v.Elem() +- set(v, f(e)) +- } +- return val +-} +- +-func isWildcard(s string) bool { +- rune, size := utf8.DecodeRuneInString(s) +- return size == len(s) && unicode.IsLower(rune) +-} +- +-// match returns true if pattern matches val, +-// recording wildcard submatches in m. +-// If m == nil, match checks whether pattern == val. +-func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { +- // Wildcard matches any expression. If it appears multiple +- // times in the pattern, it must match the same expression +- // each time. +- if m != nil && pattern.IsValid() && pattern.Type() == identType { +- name := pattern.Interface().(*ast.Ident).Name +- if isWildcard(name) && val.IsValid() { +- // wildcards only match valid (non-nil) expressions. +- if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { +- if old, ok := m[name]; ok { +- return match(nil, old, val) +- } +- m[name] = val +- return true +- } +- } +- } +- +- // Otherwise, pattern and val must match recursively. +- if !pattern.IsValid() || !val.IsValid() { +- return !pattern.IsValid() && !val.IsValid() +- } +- if pattern.Type() != val.Type() { +- return false +- } +- +- // Special cases. +- switch pattern.Type() { +- case identType: +- // For identifiers, only the names need to match +- // (and none of the other *ast.Object information). +- // This is a common case, handle it all here instead +- // of recursing down any further via reflection. +- p := pattern.Interface().(*ast.Ident) +- v := val.Interface().(*ast.Ident) +- return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name +- case objectPtrType, positionType: +- // object pointers and token positions always match +- return true +- case callExprType: +- // For calls, the Ellipsis fields (token.Position) must +- // match since that is how f(x) and f(x...) are different. +- // Check them here but fall through for the remaining fields. +- p := pattern.Interface().(*ast.CallExpr) +- v := val.Interface().(*ast.CallExpr) +- if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { +- return false +- } +- } +- +- p := reflect.Indirect(pattern) +- v := reflect.Indirect(val) +- if !p.IsValid() || !v.IsValid() { +- return !p.IsValid() && !v.IsValid() +- } +- +- switch p.Kind() { +- case reflect.Slice: +- if p.Len() != v.Len() { +- return false +- } +- for i := 0; i < p.Len(); i++ { +- if !match(m, p.Index(i), v.Index(i)) { +- return false +- } +- } +- return true +- +- case reflect.Struct: +- for i := 0; i < p.NumField(); i++ { +- if !match(m, p.Field(i), v.Field(i)) { +- return false +- } +- } +- return true +- +- case reflect.Interface: +- return match(m, p.Elem(), v.Elem()) +- } +- +- // Handle token integers, etc. +- return p.Interface() == v.Interface() +-} +- +-// subst returns a copy of pattern with values from m substituted in place +-// of wildcards and pos used as the position of tokens from the pattern. +-// if m == nil, subst returns a copy of pattern and doesn't change the line +-// number information. +-func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { +- if !pattern.IsValid() { +- return reflect.Value{} +- } +- +- // Wildcard gets replaced with map value. +- if m != nil && pattern.Type() == identType { +- name := pattern.Interface().(*ast.Ident).Name +- if isWildcard(name) { +- if old, ok := m[name]; ok { +- return subst(nil, old, reflect.Value{}) +- } +- } +- } +- +- if pos.IsValid() && pattern.Type() == positionType { +- // use new position only if old position was valid in the first place +- if old := pattern.Interface().(token.Pos); !old.IsValid() { +- return pattern +- } +- return pos +- } +- +- // Otherwise copy. +- switch p := pattern; p.Kind() { +- case reflect.Slice: +- v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) +- for i := 0; i < p.Len(); i++ { +- v.Index(i).Set(subst(m, p.Index(i), pos)) +- } +- return v +- +- case reflect.Struct: +- v := reflect.New(p.Type()).Elem() +- for i := 0; i < p.NumField(); i++ { +- v.Field(i).Set(subst(m, p.Field(i), pos)) +- } +- return v +- +- case reflect.Ptr: +- v := reflect.New(p.Type()).Elem() +- if elem := p.Elem(); elem.IsValid() { +- v.Set(subst(m, elem, pos).Addr()) +- } +- return v +- +- case reflect.Interface: +- v := reflect.New(p.Type()).Elem() +- if elem := p.Elem(); elem.IsValid() { +- v.Set(subst(m, elem, pos)) +- } +- return v +- } +- +- return pattern +-} +diff --git a/vendor/github.com/ulikunitz/xz/example.go b/vendor/github.com/ulikunitz/xz/example.go +deleted file mode 100644 +index 855e60aee5..0000000000 +--- a/vendor/github.com/ulikunitz/xz/example.go ++++ /dev/null +@@ -1,40 +0,0 @@ +-// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "bytes" +- "io" +- "log" +- "os" +- +- "github.com/ulikunitz/xz" +-) +- +-func main() { +- const text = "The quick brown fox jumps over the lazy dog.\n" +- var buf bytes.Buffer +- // compress text +- w, err := xz.NewWriter(&buf) +- if err != nil { +- log.Fatalf("xz.NewWriter error %s", err) +- } +- if _, err := io.WriteString(w, text); err != nil { +- log.Fatalf("WriteString error %s", err) +- } +- if err := w.Close(); err != nil { +- log.Fatalf("w.Close error %s", err) +- } +- // decompress buffer and write output to stdout +- r, err := xz.NewReader(&buf) +- if err != nil { +- log.Fatalf("NewReader error %s", err) +- } +- if _, err = io.Copy(os.Stdout, r); err != nil { +- log.Fatalf("io.Copy error %s", err) +- } +-} +diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go +deleted file mode 100644 +index 5d052781bc..0000000000 +--- a/vendor/golang.org/x/net/html/atom/gen.go ++++ /dev/null +@@ -1,712 +0,0 @@ +-// Copyright 2012 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-//go:generate go run gen.go +-//go:generate go run gen.go -test +- +-package main +- +-import ( +- "bytes" +- "flag" +- "fmt" +- "go/format" +- "io/ioutil" +- "math/rand" +- "os" +- "sort" +- "strings" +-) +- +-// identifier converts s to a Go exported identifier. +-// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". +-func identifier(s string) string { +- b := make([]byte, 0, len(s)) +- cap := true +- for _, c := range s { +- if c == '-' { +- cap = true +- continue +- } +- if cap && 'a' <= c && c <= 'z' { +- c -= 'a' - 'A' +- } +- cap = false +- b = append(b, byte(c)) +- } +- return string(b) +-} +- +-var test = flag.Bool("test", false, "generate table_test.go") +- +-func genFile(name string, buf *bytes.Buffer) { +- b, err := format.Source(buf.Bytes()) +- if err != nil { +- fmt.Fprintln(os.Stderr, err) +- os.Exit(1) +- } +- if err := ioutil.WriteFile(name, b, 0644); err != nil { +- fmt.Fprintln(os.Stderr, err) +- os.Exit(1) +- } +-} +- +-func main() { +- flag.Parse() +- +- var all []string +- all = append(all, elements...) +- all = append(all, attributes...) +- all = append(all, eventHandlers...) +- all = append(all, extra...) +- sort.Strings(all) +- +- // uniq - lists have dups +- w := 0 +- for _, s := range all { +- if w == 0 || all[w-1] != s { +- all[w] = s +- w++ +- } +- } +- all = all[:w] +- +- if *test { +- var buf bytes.Buffer +- fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") +- fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") +- fmt.Fprintln(&buf, "package atom\n") +- fmt.Fprintln(&buf, "var testAtomList = []string{") +- for _, s := range all { +- fmt.Fprintf(&buf, "\t%q,\n", s) +- } +- fmt.Fprintln(&buf, "}") +- +- genFile("table_test.go", &buf) +- return +- } +- +- // Find hash that minimizes table size. +- var best *table +- for i := 0; i < 1000000; i++ { +- if best != nil && 1<<(best.k-1) < len(all) { +- break +- } +- h := rand.Uint32() +- for k := uint(0); k <= 16; k++ { +- if best != nil && k >= best.k { +- break +- } +- var t table +- if t.init(h, k, all) { +- best = &t +- break +- } +- } +- } +- if best == nil { +- fmt.Fprintf(os.Stderr, "failed to construct string table\n") +- os.Exit(1) +- } +- +- // Lay out strings, using overlaps when possible. +- layout := append([]string{}, all...) +- +- // Remove strings that are substrings of other strings +- for changed := true; changed; { +- changed = false +- for i, s := range layout { +- if s == "" { +- continue +- } +- for j, t := range layout { +- if i != j && t != "" && strings.Contains(s, t) { +- changed = true +- layout[j] = "" +- } +- } +- } +- } +- +- // Join strings where one suffix matches another prefix. +- for { +- // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], +- // maximizing overlap length k. +- besti := -1 +- bestj := -1 +- bestk := 0 +- for i, s := range layout { +- if s == "" { +- continue +- } +- for j, t := range layout { +- if i == j { +- continue +- } +- for k := bestk + 1; k <= len(s) && k <= len(t); k++ { +- if s[len(s)-k:] == t[:k] { +- besti = i +- bestj = j +- bestk = k +- } +- } +- } +- } +- if bestk > 0 { +- layout[besti] += layout[bestj][bestk:] +- layout[bestj] = "" +- continue +- } +- break +- } +- +- text := strings.Join(layout, "") +- +- atom := map[string]uint32{} +- for _, s := range all { +- off := strings.Index(text, s) +- if off < 0 { +- panic("lost string " + s) +- } +- atom[s] = uint32(off<<8 | len(s)) +- } +- +- var buf bytes.Buffer +- // Generate the Go code. +- fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") +- fmt.Fprintln(&buf, "//go:generate go run gen.go\n") +- fmt.Fprintln(&buf, "package atom\n\nconst (") +- +- // compute max len +- maxLen := 0 +- for _, s := range all { +- if maxLen < len(s) { +- maxLen = len(s) +- } +- fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) +- } +- fmt.Fprintln(&buf, ")\n") +- +- fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) +- fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) +- +- fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) +- for i, s := range best.tab { +- if s == "" { +- continue +- } +- fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) +- } +- fmt.Fprintf(&buf, "}\n") +- datasize := (1 << best.k) * 4 +- +- fmt.Fprintln(&buf, "const atomText =") +- textsize := len(text) +- for len(text) > 60 { +- fmt.Fprintf(&buf, "\t%q +\n", text[:60]) +- text = text[60:] +- } +- fmt.Fprintf(&buf, "\t%q\n\n", text) +- +- genFile("table.go", &buf) +- +- fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) +-} +- +-type byLen []string +- +-func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } +-func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +-func (x byLen) Len() int { return len(x) } +- +-// fnv computes the FNV hash with an arbitrary starting value h. +-func fnv(h uint32, s string) uint32 { +- for i := 0; i < len(s); i++ { +- h ^= uint32(s[i]) +- h *= 16777619 +- } +- return h +-} +- +-// A table represents an attempt at constructing the lookup table. +-// The lookup table uses cuckoo hashing, meaning that each string +-// can be found in one of two positions. +-type table struct { +- h0 uint32 +- k uint +- mask uint32 +- tab []string +-} +- +-// hash returns the two hashes for s. +-func (t *table) hash(s string) (h1, h2 uint32) { +- h := fnv(t.h0, s) +- h1 = h & t.mask +- h2 = (h >> 16) & t.mask +- return +-} +- +-// init initializes the table with the given parameters. +-// h0 is the initial hash value, +-// k is the number of bits of hash value to use, and +-// x is the list of strings to store in the table. +-// init returns false if the table cannot be constructed. +-func (t *table) init(h0 uint32, k uint, x []string) bool { +- t.h0 = h0 +- t.k = k +- t.tab = make([]string, 1< len(t.tab) { +- return false +- } +- s := t.tab[i] +- h1, h2 := t.hash(s) +- j := h1 + h2 - i +- if t.tab[j] != "" && !t.push(j, depth+1) { +- return false +- } +- t.tab[j] = s +- return true +-} +- +-// The lists of element names and attribute keys were taken from +-// https://html.spec.whatwg.org/multipage/indices.html#index +-// as of the "HTML Living Standard - Last Updated 16 April 2018" version. +- +-// "command", "keygen" and "menuitem" have been removed from the spec, +-// but are kept here for backwards compatibility. +-var elements = []string{ +- "a", +- "abbr", +- "address", +- "area", +- "article", +- "aside", +- "audio", +- "b", +- "base", +- "bdi", +- "bdo", +- "blockquote", +- "body", +- "br", +- "button", +- "canvas", +- "caption", +- "cite", +- "code", +- "col", +- "colgroup", +- "command", +- "data", +- "datalist", +- "dd", +- "del", +- "details", +- "dfn", +- "dialog", +- "div", +- "dl", +- "dt", +- "em", +- "embed", +- "fieldset", +- "figcaption", +- "figure", +- "footer", +- "form", +- "h1", +- "h2", +- "h3", +- "h4", +- "h5", +- "h6", +- "head", +- "header", +- "hgroup", +- "hr", +- "html", +- "i", +- "iframe", +- "img", +- "input", +- "ins", +- "kbd", +- "keygen", +- "label", +- "legend", +- "li", +- "link", +- "main", +- "map", +- "mark", +- "menu", +- "menuitem", +- "meta", +- "meter", +- "nav", +- "noscript", +- "object", +- "ol", +- "optgroup", +- "option", +- "output", +- "p", +- "param", +- "picture", +- "pre", +- "progress", +- "q", +- "rp", +- "rt", +- "ruby", +- "s", +- "samp", +- "script", +- "section", +- "select", +- "slot", +- "small", +- "source", +- "span", +- "strong", +- "style", +- "sub", +- "summary", +- "sup", +- "table", +- "tbody", +- "td", +- "template", +- "textarea", +- "tfoot", +- "th", +- "thead", +- "time", +- "title", +- "tr", +- "track", +- "u", +- "ul", +- "var", +- "video", +- "wbr", +-} +- +-// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 +-// +-// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", +-// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, +-// but are kept here for backwards compatibility. +-var attributes = []string{ +- "abbr", +- "accept", +- "accept-charset", +- "accesskey", +- "action", +- "allowfullscreen", +- "allowpaymentrequest", +- "allowusermedia", +- "alt", +- "as", +- "async", +- "autocomplete", +- "autofocus", +- "autoplay", +- "challenge", +- "charset", +- "checked", +- "cite", +- "class", +- "color", +- "cols", +- "colspan", +- "command", +- "content", +- "contenteditable", +- "contextmenu", +- "controls", +- "coords", +- "crossorigin", +- "data", +- "datetime", +- "default", +- "defer", +- "dir", +- "dirname", +- "disabled", +- "download", +- "draggable", +- "dropzone", +- "enctype", +- "for", +- "form", +- "formaction", +- "formenctype", +- "formmethod", +- "formnovalidate", +- "formtarget", +- "headers", +- "height", +- "hidden", +- "high", +- "href", +- "hreflang", +- "http-equiv", +- "icon", +- "id", +- "inputmode", +- "integrity", +- "is", +- "ismap", +- "itemid", +- "itemprop", +- "itemref", +- "itemscope", +- "itemtype", +- "keytype", +- "kind", +- "label", +- "lang", +- "list", +- "loop", +- "low", +- "manifest", +- "max", +- "maxlength", +- "media", +- "mediagroup", +- "method", +- "min", +- "minlength", +- "multiple", +- "muted", +- "name", +- "nomodule", +- "nonce", +- "novalidate", +- "open", +- "optimum", +- "pattern", +- "ping", +- "placeholder", +- "playsinline", +- "poster", +- "preload", +- "radiogroup", +- "readonly", +- "referrerpolicy", +- "rel", +- "required", +- "reversed", +- "rows", +- "rowspan", +- "sandbox", +- "spellcheck", +- "scope", +- "scoped", +- "seamless", +- "selected", +- "shape", +- "size", +- "sizes", +- "sortable", +- "sorted", +- "slot", +- "span", +- "spellcheck", +- "src", +- "srcdoc", +- "srclang", +- "srcset", +- "start", +- "step", +- "style", +- "tabindex", +- "target", +- "title", +- "translate", +- "type", +- "typemustmatch", +- "updateviacache", +- "usemap", +- "value", +- "width", +- "workertype", +- "wrap", +-} +- +-// "onautocomplete", "onautocompleteerror", "onmousewheel", +-// "onshow" and "onsort" have been removed from the spec, +-// but are kept here for backwards compatibility. +-var eventHandlers = []string{ +- "onabort", +- "onautocomplete", +- "onautocompleteerror", +- "onauxclick", +- "onafterprint", +- "onbeforeprint", +- "onbeforeunload", +- "onblur", +- "oncancel", +- "oncanplay", +- "oncanplaythrough", +- "onchange", +- "onclick", +- "onclose", +- "oncontextmenu", +- "oncopy", +- "oncuechange", +- "oncut", +- "ondblclick", +- "ondrag", +- "ondragend", +- "ondragenter", +- "ondragexit", +- "ondragleave", +- "ondragover", +- "ondragstart", +- "ondrop", +- "ondurationchange", +- "onemptied", +- "onended", +- "onerror", +- "onfocus", +- "onhashchange", +- "oninput", +- "oninvalid", +- "onkeydown", +- "onkeypress", +- "onkeyup", +- "onlanguagechange", +- "onload", +- "onloadeddata", +- "onloadedmetadata", +- "onloadend", +- "onloadstart", +- "onmessage", +- "onmessageerror", +- "onmousedown", +- "onmouseenter", +- "onmouseleave", +- "onmousemove", +- "onmouseout", +- "onmouseover", +- "onmouseup", +- "onmousewheel", +- "onwheel", +- "onoffline", +- "ononline", +- "onpagehide", +- "onpageshow", +- "onpaste", +- "onpause", +- "onplay", +- "onplaying", +- "onpopstate", +- "onprogress", +- "onratechange", +- "onreset", +- "onresize", +- "onrejectionhandled", +- "onscroll", +- "onsecuritypolicyviolation", +- "onseeked", +- "onseeking", +- "onselect", +- "onshow", +- "onsort", +- "onstalled", +- "onstorage", +- "onsubmit", +- "onsuspend", +- "ontimeupdate", +- "ontoggle", +- "onunhandledrejection", +- "onunload", +- "onvolumechange", +- "onwaiting", +-} +- +-// extra are ad-hoc values not covered by any of the lists above. +-var extra = []string{ +- "acronym", +- "align", +- "annotation", +- "annotation-xml", +- "applet", +- "basefont", +- "bgsound", +- "big", +- "blink", +- "center", +- "color", +- "desc", +- "face", +- "font", +- "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. +- "foreignobject", +- "frame", +- "frameset", +- "image", +- "isindex", +- "listing", +- "malignmark", +- "marquee", +- "math", +- "mglyph", +- "mi", +- "mn", +- "mo", +- "ms", +- "mtext", +- "nobr", +- "noembed", +- "noframes", +- "plaintext", +- "prompt", +- "public", +- "rb", +- "rtc", +- "spacer", +- "strike", +- "svg", +- "system", +- "tt", +- "xmp", +-} +diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go +deleted file mode 100644 +index 4548b993db..0000000000 +--- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go ++++ /dev/null +@@ -1,61 +0,0 @@ +-// Copyright 2018 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. +-//This program must be run after mksyscall.go. +-package main +- +-import ( +- "bytes" +- "fmt" +- "io/ioutil" +- "log" +- "os" +- "strings" +-) +- +-func main() { +- in1, err := ioutil.ReadFile("syscall_darwin.go") +- if err != nil { +- log.Fatalf("can't open syscall_darwin.go: %s", err) +- } +- arch := os.Args[1] +- in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) +- if err != nil { +- log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) +- } +- in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) +- if err != nil { +- log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) +- } +- in := string(in1) + string(in2) + string(in3) +- +- trampolines := map[string]bool{} +- +- var out bytes.Buffer +- +- fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) +- fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") +- fmt.Fprintf(&out, "\n") +- fmt.Fprintf(&out, "// +build go1.12\n") +- fmt.Fprintf(&out, "\n") +- fmt.Fprintf(&out, "#include \"textflag.h\"\n") +- for _, line := range strings.Split(in, "\n") { +- if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { +- continue +- } +- fn := line[5 : len(line)-13] +- if !trampolines[fn] { +- trampolines[fn] = true +- fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) +- fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) +- } +- } +- err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644) +- if err != nil { +- log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err) +- } +-} +diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go +deleted file mode 100644 +index eb4332059a..0000000000 +--- a/vendor/golang.org/x/sys/unix/mkpost.go ++++ /dev/null +@@ -1,122 +0,0 @@ +-// Copyright 2016 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// mkpost processes the output of cgo -godefs to +-// modify the generated types. It is used to clean up +-// the sys API in an architecture specific manner. +-// +-// mkpost is run after cgo -godefs; see README.md. +-package main +- +-import ( +- "bytes" +- "fmt" +- "go/format" +- "io/ioutil" +- "log" +- "os" +- "regexp" +-) +- +-func main() { +- // Get the OS and architecture (using GOARCH_TARGET if it exists) +- goos := os.Getenv("GOOS") +- goarch := os.Getenv("GOARCH_TARGET") +- if goarch == "" { +- goarch = os.Getenv("GOARCH") +- } +- // Check that we are using the Docker-based build system if we should be. +- if goos == "linux" { +- if os.Getenv("GOLANG_SYS_BUILD") != "docker" { +- os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") +- os.Stderr.WriteString("See README.md\n") +- os.Exit(1) +- } +- } +- +- b, err := ioutil.ReadAll(os.Stdin) +- if err != nil { +- log.Fatal(err) +- } +- +- if goos == "aix" { +- // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t +- // to avoid having both StTimespec and Timespec. +- sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`) +- b = sttimespec.ReplaceAll(b, []byte("Timespec")) +- } +- +- // Intentionally export __val fields in Fsid and Sigset_t +- valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`) +- b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}")) +- +- // Intentionally export __fds_bits field in FdSet +- fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) +- b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) +- +- // If we have empty Ptrace structs, we should delete them. Only s390x emits +- // nonempty Ptrace structs. +- ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) +- b = ptraceRexexp.ReplaceAll(b, nil) +- +- // Replace the control_regs union with a blank identifier for now. +- controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) +- b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) +- +- // Remove fields that are added by glibc +- // Note that this is unstable as the identifers are private. +- removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) +- b = removeFieldsRegex.ReplaceAll(b, []byte("_")) +- +- // Convert [65]int8 to [65]byte in Utsname members to simplify +- // conversion to string; see golang.org/issue/20753 +- convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) +- b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) +- +- // Convert [1024]int8 to [1024]byte in Ptmget members +- convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) +- b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) +- +- // Remove spare fields (e.g. in Statx_t) +- spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) +- b = spareFieldsRegex.ReplaceAll(b, []byte("_")) +- +- // Remove cgo padding fields +- removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) +- b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) +- +- // Remove padding, hidden, or unused fields +- removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) +- b = removeFieldsRegex.ReplaceAll(b, []byte("_")) +- +- // Remove the first line of warning from cgo +- b = b[bytes.IndexByte(b, '\n')+1:] +- // Modify the command in the header to include: +- // mkpost, our own warning, and a build tag. +- replacement := fmt.Sprintf(`$1 | go run mkpost.go +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s,%s`, goarch, goos) +- cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) +- b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) +- +- // Rename Stat_t time fields +- if goos == "freebsd" && goarch == "386" { +- // Hide Stat_t.[AMCB]tim_ext fields +- renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`) +- b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_")) +- } +- renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`) +- b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}")) +- +- // gofmt +- b, err = format.Source(b) +- if err != nil { +- log.Fatal(err) +- } +- +- os.Stdout.Write(b) +-} +diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go +deleted file mode 100644 +index e4af9424e9..0000000000 +--- a/vendor/golang.org/x/sys/unix/mksyscall.go ++++ /dev/null +@@ -1,407 +0,0 @@ +-// Copyright 2018 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-This program reads a file containing function prototypes +-(like syscall_darwin.go) and generates system call bodies. +-The prototypes are marked by lines beginning with "//sys" +-and read like func declarations if //sys is replaced by func, but: +- * The parameter lists must give a name for each argument. +- This includes return parameters. +- * The parameter lists must give a type for each argument: +- the (x, y, z int) shorthand is not allowed. +- * If the return parameter is an error number, it must be named errno. +- +-A line beginning with //sysnb is like //sys, except that the +-goroutine will not be suspended during the execution of the system +-call. This must only be used for system calls which can never +-block, as otherwise the system call could cause all goroutines to +-hang. +-*/ +-package main +- +-import ( +- "bufio" +- "flag" +- "fmt" +- "os" +- "regexp" +- "strings" +-) +- +-var ( +- b32 = flag.Bool("b32", false, "32bit big-endian") +- l32 = flag.Bool("l32", false, "32bit little-endian") +- plan9 = flag.Bool("plan9", false, "plan9") +- openbsd = flag.Bool("openbsd", false, "openbsd") +- netbsd = flag.Bool("netbsd", false, "netbsd") +- dragonfly = flag.Bool("dragonfly", false, "dragonfly") +- arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair +- tags = flag.String("tags", "", "build tags") +- filename = flag.String("output", "", "output file name (standard output if omitted)") +-) +- +-// cmdLine returns this programs's commandline arguments +-func cmdLine() string { +- return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") +-} +- +-// buildTags returns build tags +-func buildTags() string { +- return *tags +-} +- +-// Param is function parameter +-type Param struct { +- Name string +- Type string +-} +- +-// usage prints the program usage +-func usage() { +- fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") +- os.Exit(1) +-} +- +-// parseParamList parses parameter list and returns a slice of parameters +-func parseParamList(list string) []string { +- list = strings.TrimSpace(list) +- if list == "" { +- return []string{} +- } +- return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +-} +- +-// parseParam splits a parameter into name and type +-func parseParam(p string) Param { +- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) +- if ps == nil { +- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) +- os.Exit(1) +- } +- return Param{ps[1], ps[2]} +-} +- +-func main() { +- // Get the OS and architecture (using GOARCH_TARGET if it exists) +- goos := os.Getenv("GOOS") +- if goos == "" { +- fmt.Fprintln(os.Stderr, "GOOS not defined in environment") +- os.Exit(1) +- } +- goarch := os.Getenv("GOARCH_TARGET") +- if goarch == "" { +- goarch = os.Getenv("GOARCH") +- } +- +- // Check that we are using the Docker-based build system if we should +- if goos == "linux" { +- if os.Getenv("GOLANG_SYS_BUILD") != "docker" { +- fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") +- fmt.Fprintf(os.Stderr, "See README.md\n") +- os.Exit(1) +- } +- } +- +- flag.Usage = usage +- flag.Parse() +- if len(flag.Args()) <= 0 { +- fmt.Fprintf(os.Stderr, "no files to parse provided\n") +- usage() +- } +- +- endianness := "" +- if *b32 { +- endianness = "big-endian" +- } else if *l32 { +- endianness = "little-endian" +- } +- +- libc := false +- if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") { +- libc = true +- } +- trampolines := map[string]bool{} +- +- text := "" +- for _, path := range flag.Args() { +- file, err := os.Open(path) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- s := bufio.NewScanner(file) +- for s.Scan() { +- t := s.Text() +- t = strings.TrimSpace(t) +- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) +- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) +- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { +- continue +- } +- +- // Line must be of the form +- // func Open(path string, mode int, perm int) (fd int, errno error) +- // Split into name, in params, out params. +- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) +- if f == nil { +- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) +- os.Exit(1) +- } +- funct, inps, outps, sysname := f[2], f[3], f[4], f[5] +- +- // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. +- if goos == "darwin" && !libc && funct == "ClockGettime" { +- continue +- } +- +- // Split argument lists on comma. +- in := parseParamList(inps) +- out := parseParamList(outps) +- +- // Try in vain to keep people from editing this file. +- // The theory is that they jump into the middle of the file +- // without reading the header. +- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" +- +- // Go function header. +- outDecl := "" +- if len(out) > 0 { +- outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) +- } +- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) +- +- // Check if err return available +- errvar := "" +- for _, param := range out { +- p := parseParam(param) +- if p.Type == "error" { +- errvar = p.Name +- break +- } +- } +- +- // Prepare arguments to Syscall. +- var args []string +- n := 0 +- for _, param := range in { +- p := parseParam(param) +- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { +- args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") +- } else if p.Type == "string" && errvar != "" { +- text += fmt.Sprintf("\tvar _p%d *byte\n", n) +- text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) +- text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) +- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) +- n++ +- } else if p.Type == "string" { +- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") +- text += fmt.Sprintf("\tvar _p%d *byte\n", n) +- text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) +- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) +- n++ +- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { +- // Convert slice into pointer, length. +- // Have to be careful not to take address of &a[0] if len == 0: +- // pass dummy pointer in that case. +- // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). +- text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) +- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) +- text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) +- args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) +- n++ +- } else if p.Type == "int64" && (*openbsd || *netbsd) { +- args = append(args, "0") +- if endianness == "big-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) +- } else if endianness == "little-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) +- } +- } else if p.Type == "int64" && *dragonfly { +- if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { +- args = append(args, "0") +- } +- if endianness == "big-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) +- } else if endianness == "little-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) +- } +- } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { +- if len(args)%2 == 1 && *arm { +- // arm abi specifies 64-bit argument uses +- // (even, odd) pair +- args = append(args, "0") +- } +- if endianness == "big-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) +- } +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) +- } +- } +- +- // Determine which form to use; pad args with zeros. +- asm := "Syscall" +- if nonblock != nil { +- if errvar == "" && goos == "linux" { +- asm = "RawSyscallNoError" +- } else { +- asm = "RawSyscall" +- } +- } else { +- if errvar == "" && goos == "linux" { +- asm = "SyscallNoError" +- } +- } +- if len(args) <= 3 { +- for len(args) < 3 { +- args = append(args, "0") +- } +- } else if len(args) <= 6 { +- asm += "6" +- for len(args) < 6 { +- args = append(args, "0") +- } +- } else if len(args) <= 9 { +- asm += "9" +- for len(args) < 9 { +- args = append(args, "0") +- } +- } else { +- fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) +- } +- +- // System call number. +- if sysname == "" { +- sysname = "SYS_" + funct +- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) +- sysname = strings.ToUpper(sysname) +- } +- +- var libcFn string +- if libc { +- asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call +- sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ +- sysname = strings.ToLower(sysname) // lowercase +- if sysname == "getdirentries64" { +- // Special case - libSystem name and +- // raw syscall name don't match. +- sysname = "__getdirentries64" +- } +- libcFn = sysname +- sysname = "funcPC(libc_" + sysname + "_trampoline)" +- } +- +- // Actual call. +- arglist := strings.Join(args, ", ") +- call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) +- +- // Assign return values. +- body := "" +- ret := []string{"_", "_", "_"} +- doErrno := false +- for i := 0; i < len(out); i++ { +- p := parseParam(out[i]) +- reg := "" +- if p.Name == "err" && !*plan9 { +- reg = "e1" +- ret[2] = reg +- doErrno = true +- } else if p.Name == "err" && *plan9 { +- ret[0] = "r0" +- ret[2] = "e1" +- break +- } else { +- reg = fmt.Sprintf("r%d", i) +- ret[i] = reg +- } +- if p.Type == "bool" { +- reg = fmt.Sprintf("%s != 0", reg) +- } +- if p.Type == "int64" && endianness != "" { +- // 64-bit number in r1:r0 or r0:r1. +- if i+2 > len(out) { +- fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) +- } +- if endianness == "big-endian" { +- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) +- } else { +- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) +- } +- ret[i] = fmt.Sprintf("r%d", i) +- ret[i+1] = fmt.Sprintf("r%d", i+1) +- } +- if reg != "e1" || *plan9 { +- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) +- } +- } +- if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { +- text += fmt.Sprintf("\t%s\n", call) +- } else { +- if errvar == "" && goos == "linux" { +- // raw syscall without error on Linux, see golang.org/issue/22924 +- text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) +- } else { +- text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) +- } +- } +- text += body +- +- if *plan9 && ret[2] == "e1" { +- text += "\tif int32(r0) == -1 {\n" +- text += "\t\terr = e1\n" +- text += "\t}\n" +- } else if doErrno { +- text += "\tif e1 != 0 {\n" +- text += "\t\terr = errnoErr(e1)\n" +- text += "\t}\n" +- } +- text += "\treturn\n" +- text += "}\n\n" +- +- if libc && !trampolines[libcFn] { +- // some system calls share a trampoline, like read and readlen. +- trampolines[libcFn] = true +- // Declare assembly trampoline. +- text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) +- // Assembly trampoline calls the libc_* function, which this magic +- // redirects to use the function from libSystem. +- text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) +- text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) +- text += "\n" +- } +- } +- if err := s.Err(); err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- file.Close() +- } +- fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) +-} +- +-const srcTemplate = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +- +-package unix +- +-import ( +- "syscall" +- "unsafe" +-) +- +-var _ syscall.Errno +- +-%s +-` +diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go +deleted file mode 100644 +index 3be3cdfc3b..0000000000 +--- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go ++++ /dev/null +@@ -1,415 +0,0 @@ +-// Copyright 2019 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-This program reads a file containing function prototypes +-(like syscall_aix.go) and generates system call bodies. +-The prototypes are marked by lines beginning with "//sys" +-and read like func declarations if //sys is replaced by func, but: +- * The parameter lists must give a name for each argument. +- This includes return parameters. +- * The parameter lists must give a type for each argument: +- the (x, y, z int) shorthand is not allowed. +- * If the return parameter is an error number, it must be named err. +- * If go func name needs to be different than its libc name, +- * or the function is not in libc, name could be specified +- * at the end, after "=" sign, like +- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt +-*/ +-package main +- +-import ( +- "bufio" +- "flag" +- "fmt" +- "os" +- "regexp" +- "strings" +-) +- +-var ( +- b32 = flag.Bool("b32", false, "32bit big-endian") +- l32 = flag.Bool("l32", false, "32bit little-endian") +- aix = flag.Bool("aix", false, "aix") +- tags = flag.String("tags", "", "build tags") +-) +- +-// cmdLine returns this programs's commandline arguments +-func cmdLine() string { +- return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") +-} +- +-// buildTags returns build tags +-func buildTags() string { +- return *tags +-} +- +-// Param is function parameter +-type Param struct { +- Name string +- Type string +-} +- +-// usage prints the program usage +-func usage() { +- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") +- os.Exit(1) +-} +- +-// parseParamList parses parameter list and returns a slice of parameters +-func parseParamList(list string) []string { +- list = strings.TrimSpace(list) +- if list == "" { +- return []string{} +- } +- return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +-} +- +-// parseParam splits a parameter into name and type +-func parseParam(p string) Param { +- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) +- if ps == nil { +- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) +- os.Exit(1) +- } +- return Param{ps[1], ps[2]} +-} +- +-func main() { +- flag.Usage = usage +- flag.Parse() +- if len(flag.Args()) <= 0 { +- fmt.Fprintf(os.Stderr, "no files to parse provided\n") +- usage() +- } +- +- endianness := "" +- if *b32 { +- endianness = "big-endian" +- } else if *l32 { +- endianness = "little-endian" +- } +- +- pack := "" +- text := "" +- cExtern := "/*\n#include \n#include \n" +- for _, path := range flag.Args() { +- file, err := os.Open(path) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- s := bufio.NewScanner(file) +- for s.Scan() { +- t := s.Text() +- t = strings.TrimSpace(t) +- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) +- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { +- pack = p[1] +- } +- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) +- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { +- continue +- } +- +- // Line must be of the form +- // func Open(path string, mode int, perm int) (fd int, err error) +- // Split into name, in params, out params. +- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) +- if f == nil { +- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) +- os.Exit(1) +- } +- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] +- +- // Split argument lists on comma. +- in := parseParamList(inps) +- out := parseParamList(outps) +- +- inps = strings.Join(in, ", ") +- outps = strings.Join(out, ", ") +- +- // Try in vain to keep people from editing this file. +- // The theory is that they jump into the middle of the file +- // without reading the header. +- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" +- +- // Check if value return, err return available +- errvar := "" +- retvar := "" +- rettype := "" +- for _, param := range out { +- p := parseParam(param) +- if p.Type == "error" { +- errvar = p.Name +- } else { +- retvar = p.Name +- rettype = p.Type +- } +- } +- +- // System call name. +- if sysname == "" { +- sysname = funct +- } +- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) +- sysname = strings.ToLower(sysname) // All libc functions are lowercase. +- +- cRettype := "" +- if rettype == "unsafe.Pointer" { +- cRettype = "uintptr_t" +- } else if rettype == "uintptr" { +- cRettype = "uintptr_t" +- } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { +- cRettype = "uintptr_t" +- } else if rettype == "int" { +- cRettype = "int" +- } else if rettype == "int32" { +- cRettype = "int" +- } else if rettype == "int64" { +- cRettype = "long long" +- } else if rettype == "uint32" { +- cRettype = "unsigned int" +- } else if rettype == "uint64" { +- cRettype = "unsigned long long" +- } else { +- cRettype = "int" +- } +- if sysname == "exit" { +- cRettype = "void" +- } +- +- // Change p.Types to c +- var cIn []string +- for _, param := range in { +- p := parseParam(param) +- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { +- cIn = append(cIn, "uintptr_t") +- } else if p.Type == "string" { +- cIn = append(cIn, "uintptr_t") +- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { +- cIn = append(cIn, "uintptr_t", "size_t") +- } else if p.Type == "unsafe.Pointer" { +- cIn = append(cIn, "uintptr_t") +- } else if p.Type == "uintptr" { +- cIn = append(cIn, "uintptr_t") +- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { +- cIn = append(cIn, "uintptr_t") +- } else if p.Type == "int" { +- cIn = append(cIn, "int") +- } else if p.Type == "int32" { +- cIn = append(cIn, "int") +- } else if p.Type == "int64" { +- cIn = append(cIn, "long long") +- } else if p.Type == "uint32" { +- cIn = append(cIn, "unsigned int") +- } else if p.Type == "uint64" { +- cIn = append(cIn, "unsigned long long") +- } else { +- cIn = append(cIn, "int") +- } +- } +- +- if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { +- if sysname == "select" { +- // select is a keyword of Go. Its name is +- // changed to c_select. +- cExtern += "#define c_select select\n" +- } +- // Imports of system calls from libc +- cExtern += fmt.Sprintf("%s %s", cRettype, sysname) +- cIn := strings.Join(cIn, ", ") +- cExtern += fmt.Sprintf("(%s);\n", cIn) +- } +- +- // So file name. +- if *aix { +- if modname == "" { +- modname = "libc.a/shr_64.o" +- } else { +- fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) +- os.Exit(1) +- } +- } +- +- strconvfunc := "C.CString" +- +- // Go function header. +- if outps != "" { +- outps = fmt.Sprintf(" (%s)", outps) +- } +- if text != "" { +- text += "\n" +- } +- +- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) +- +- // Prepare arguments to Syscall. +- var args []string +- n := 0 +- argN := 0 +- for _, param := range in { +- p := parseParam(param) +- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { +- args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") +- } else if p.Type == "string" && errvar != "" { +- text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) +- args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) +- n++ +- } else if p.Type == "string" { +- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") +- text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) +- args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) +- n++ +- } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { +- // Convert slice into pointer, length. +- // Have to be careful not to take address of &a[0] if len == 0: +- // pass nil in that case. +- text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) +- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) +- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) +- n++ +- text += fmt.Sprintf("\tvar _p%d int\n", n) +- text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) +- args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) +- n++ +- } else if p.Type == "int64" && endianness != "" { +- if endianness == "big-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) +- } +- n++ +- } else if p.Type == "bool" { +- text += fmt.Sprintf("\tvar _p%d uint32\n", n) +- text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) +- args = append(args, fmt.Sprintf("_p%d", n)) +- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { +- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) +- } else if p.Type == "unsafe.Pointer" { +- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) +- } else if p.Type == "int" { +- if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { +- args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) +- } else if argN == 0 && funct == "fcntl" { +- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { +- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) +- } +- } else if p.Type == "int32" { +- args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) +- } else if p.Type == "int64" { +- args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) +- } else if p.Type == "uint32" { +- args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) +- } else if p.Type == "uint64" { +- args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) +- } else if p.Type == "uintptr" { +- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) +- } +- argN++ +- } +- +- // Actual call. +- arglist := strings.Join(args, ", ") +- call := "" +- if sysname == "exit" { +- if errvar != "" { +- call += "er :=" +- } else { +- call += "" +- } +- } else if errvar != "" { +- call += "r0,er :=" +- } else if retvar != "" { +- call += "r0,_ :=" +- } else { +- call += "" +- } +- if sysname == "select" { +- // select is a keyword of Go. Its name is +- // changed to c_select. +- call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist) +- } else { +- call += fmt.Sprintf("C.%s(%s)", sysname, arglist) +- } +- +- // Assign return values. +- body := "" +- for i := 0; i < len(out); i++ { +- p := parseParam(out[i]) +- reg := "" +- if p.Name == "err" { +- reg = "e1" +- } else { +- reg = "r0" +- } +- if reg != "e1" { +- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) +- } +- } +- +- // verify return +- if sysname != "exit" && errvar != "" { +- if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { +- body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" +- body += fmt.Sprintf("\t\t%s = er\n", errvar) +- body += "\t}\n" +- } else { +- body += "\tif (r0 ==-1 && er != nil) {\n" +- body += fmt.Sprintf("\t\t%s = er\n", errvar) +- body += "\t}\n" +- } +- } else if errvar != "" { +- body += "\tif (er != nil) {\n" +- body += fmt.Sprintf("\t\t%s = er\n", errvar) +- body += "\t}\n" +- } +- +- text += fmt.Sprintf("\t%s\n", call) +- text += body +- +- text += "\treturn\n" +- text += "}\n" +- } +- if err := s.Err(); err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- file.Close() +- } +- imp := "" +- if pack != "unix" { +- imp = "import \"golang.org/x/sys/unix\"\n" +- +- } +- fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) +-} +- +-const srcTemplate = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +- +-package %s +- +- +-%s +-*/ +-import "C" +-import ( +- "unsafe" +-) +- +- +-%s +- +-%s +-` +diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go +deleted file mode 100644 +index c960099517..0000000000 +--- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go ++++ /dev/null +@@ -1,614 +0,0 @@ +-// Copyright 2019 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-This program reads a file containing function prototypes +-(like syscall_aix.go) and generates system call bodies. +-The prototypes are marked by lines beginning with "//sys" +-and read like func declarations if //sys is replaced by func, but: +- * The parameter lists must give a name for each argument. +- This includes return parameters. +- * The parameter lists must give a type for each argument: +- the (x, y, z int) shorthand is not allowed. +- * If the return parameter is an error number, it must be named err. +- * If go func name needs to be different than its libc name, +- * or the function is not in libc, name could be specified +- * at the end, after "=" sign, like +- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt +- +- +-This program will generate three files and handle both gc and gccgo implementation: +- - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) +- - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 +- - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. +- +- The generated code looks like this +- +-zsyscall_aix_ppc64.go +-func asyscall(...) (n int, err error) { +- // Pointer Creation +- r1, e1 := callasyscall(...) +- // Type Conversion +- // Error Handler +- return +-} +- +-zsyscall_aix_ppc64_gc.go +-//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" +-//go:linkname libc_asyscall libc_asyscall +-var asyscall syscallFunc +- +-func callasyscall(...) (r1 uintptr, e1 Errno) { +- r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) +- return +-} +- +-zsyscall_aix_ppc64_ggcgo.go +- +-// int asyscall(...) +- +-import "C" +- +-func callasyscall(...) (r1 uintptr, e1 Errno) { +- r1 = uintptr(C.asyscall(...)) +- e1 = syscall.GetErrno() +- return +-} +-*/ +- +-package main +- +-import ( +- "bufio" +- "flag" +- "fmt" +- "io/ioutil" +- "os" +- "regexp" +- "strings" +-) +- +-var ( +- b32 = flag.Bool("b32", false, "32bit big-endian") +- l32 = flag.Bool("l32", false, "32bit little-endian") +- aix = flag.Bool("aix", false, "aix") +- tags = flag.String("tags", "", "build tags") +-) +- +-// cmdLine returns this programs's commandline arguments +-func cmdLine() string { +- return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") +-} +- +-// buildTags returns build tags +-func buildTags() string { +- return *tags +-} +- +-// Param is function parameter +-type Param struct { +- Name string +- Type string +-} +- +-// usage prints the program usage +-func usage() { +- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") +- os.Exit(1) +-} +- +-// parseParamList parses parameter list and returns a slice of parameters +-func parseParamList(list string) []string { +- list = strings.TrimSpace(list) +- if list == "" { +- return []string{} +- } +- return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +-} +- +-// parseParam splits a parameter into name and type +-func parseParam(p string) Param { +- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) +- if ps == nil { +- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) +- os.Exit(1) +- } +- return Param{ps[1], ps[2]} +-} +- +-func main() { +- flag.Usage = usage +- flag.Parse() +- if len(flag.Args()) <= 0 { +- fmt.Fprintf(os.Stderr, "no files to parse provided\n") +- usage() +- } +- +- endianness := "" +- if *b32 { +- endianness = "big-endian" +- } else if *l32 { +- endianness = "little-endian" +- } +- +- pack := "" +- // GCCGO +- textgccgo := "" +- cExtern := "/*\n#include \n" +- // GC +- textgc := "" +- dynimports := "" +- linknames := "" +- var vars []string +- // COMMON +- textcommon := "" +- for _, path := range flag.Args() { +- file, err := os.Open(path) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- s := bufio.NewScanner(file) +- for s.Scan() { +- t := s.Text() +- t = strings.TrimSpace(t) +- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) +- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { +- pack = p[1] +- } +- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) +- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { +- continue +- } +- +- // Line must be of the form +- // func Open(path string, mode int, perm int) (fd int, err error) +- // Split into name, in params, out params. +- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) +- if f == nil { +- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) +- os.Exit(1) +- } +- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] +- +- // Split argument lists on comma. +- in := parseParamList(inps) +- out := parseParamList(outps) +- +- inps = strings.Join(in, ", ") +- outps = strings.Join(out, ", ") +- +- if sysname == "" { +- sysname = funct +- } +- +- onlyCommon := false +- if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { +- // This function call another syscall which is already implemented. +- // Therefore, the gc and gccgo part must not be generated. +- onlyCommon = true +- } +- +- // Try in vain to keep people from editing this file. +- // The theory is that they jump into the middle of the file +- // without reading the header. +- +- textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" +- if !onlyCommon { +- textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" +- textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" +- } +- +- // Check if value return, err return available +- errvar := "" +- rettype := "" +- for _, param := range out { +- p := parseParam(param) +- if p.Type == "error" { +- errvar = p.Name +- } else { +- rettype = p.Type +- } +- } +- +- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) +- sysname = strings.ToLower(sysname) // All libc functions are lowercase. +- +- // GCCGO Prototype return type +- cRettype := "" +- if rettype == "unsafe.Pointer" { +- cRettype = "uintptr_t" +- } else if rettype == "uintptr" { +- cRettype = "uintptr_t" +- } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { +- cRettype = "uintptr_t" +- } else if rettype == "int" { +- cRettype = "int" +- } else if rettype == "int32" { +- cRettype = "int" +- } else if rettype == "int64" { +- cRettype = "long long" +- } else if rettype == "uint32" { +- cRettype = "unsigned int" +- } else if rettype == "uint64" { +- cRettype = "unsigned long long" +- } else { +- cRettype = "int" +- } +- if sysname == "exit" { +- cRettype = "void" +- } +- +- // GCCGO Prototype arguments type +- var cIn []string +- for i, param := range in { +- p := parseParam(param) +- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { +- cIn = append(cIn, "uintptr_t") +- } else if p.Type == "string" { +- cIn = append(cIn, "uintptr_t") +- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { +- cIn = append(cIn, "uintptr_t", "size_t") +- } else if p.Type == "unsafe.Pointer" { +- cIn = append(cIn, "uintptr_t") +- } else if p.Type == "uintptr" { +- cIn = append(cIn, "uintptr_t") +- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { +- cIn = append(cIn, "uintptr_t") +- } else if p.Type == "int" { +- if (i == 0 || i == 2) && funct == "fcntl" { +- // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock +- cIn = append(cIn, "uintptr_t") +- } else { +- cIn = append(cIn, "int") +- } +- +- } else if p.Type == "int32" { +- cIn = append(cIn, "int") +- } else if p.Type == "int64" { +- cIn = append(cIn, "long long") +- } else if p.Type == "uint32" { +- cIn = append(cIn, "unsigned int") +- } else if p.Type == "uint64" { +- cIn = append(cIn, "unsigned long long") +- } else { +- cIn = append(cIn, "int") +- } +- } +- +- if !onlyCommon { +- // GCCGO Prototype Generation +- // Imports of system calls from libc +- if sysname == "select" { +- // select is a keyword of Go. Its name is +- // changed to c_select. +- cExtern += "#define c_select select\n" +- } +- cExtern += fmt.Sprintf("%s %s", cRettype, sysname) +- cIn := strings.Join(cIn, ", ") +- cExtern += fmt.Sprintf("(%s);\n", cIn) +- } +- // GC Library name +- if modname == "" { +- modname = "libc.a/shr_64.o" +- } else { +- fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) +- os.Exit(1) +- } +- sysvarname := fmt.Sprintf("libc_%s", sysname) +- +- if !onlyCommon { +- // GC Runtime import of function to allow cross-platform builds. +- dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) +- // GC Link symbol to proc address variable. +- linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) +- // GC Library proc address variable. +- vars = append(vars, sysvarname) +- } +- +- strconvfunc := "BytePtrFromString" +- strconvtype := "*byte" +- +- // Go function header. +- if outps != "" { +- outps = fmt.Sprintf(" (%s)", outps) +- } +- if textcommon != "" { +- textcommon += "\n" +- } +- +- textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) +- +- // Prepare arguments tocall. +- var argscommon []string // Arguments in the common part +- var argscall []string // Arguments for call prototype +- var argsgc []string // Arguments for gc call (with syscall6) +- var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) +- n := 0 +- argN := 0 +- for _, param := range in { +- p := parseParam(param) +- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { +- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) +- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) +- argsgc = append(argsgc, p.Name) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- } else if p.Type == "string" && errvar != "" { +- textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) +- textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) +- textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) +- +- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) +- argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) +- argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) +- n++ +- } else if p.Type == "string" { +- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") +- textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) +- textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) +- textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) +- +- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) +- argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) +- argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) +- n++ +- } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { +- // Convert slice into pointer, length. +- // Have to be careful not to take address of &a[0] if len == 0: +- // pass nil in that case. +- textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) +- textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) +- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) +- argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) +- argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) +- n++ +- } else if p.Type == "int64" && endianness != "" { +- fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") +- } else if p.Type == "bool" { +- fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") +- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { +- argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) +- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) +- argsgc = append(argsgc, p.Name) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- } else if p.Type == "int" { +- if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { +- // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock +- argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) +- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) +- argsgc = append(argsgc, p.Name) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- +- } else { +- argscommon = append(argscommon, p.Name) +- argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) +- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) +- } +- } else if p.Type == "int32" { +- argscommon = append(argscommon, p.Name) +- argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) +- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) +- } else if p.Type == "int64" { +- argscommon = append(argscommon, p.Name) +- argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) +- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) +- } else if p.Type == "uint32" { +- argscommon = append(argscommon, p.Name) +- argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) +- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) +- } else if p.Type == "uint64" { +- argscommon = append(argscommon, p.Name) +- argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) +- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) +- } else if p.Type == "uintptr" { +- argscommon = append(argscommon, p.Name) +- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) +- argsgc = append(argsgc, p.Name) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) +- } else { +- argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) +- argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) +- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) +- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) +- } +- argN++ +- } +- nargs := len(argsgc) +- +- // COMMON function generation +- argscommonlist := strings.Join(argscommon, ", ") +- callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) +- ret := []string{"_", "_"} +- body := "" +- doErrno := false +- for i := 0; i < len(out); i++ { +- p := parseParam(out[i]) +- reg := "" +- if p.Name == "err" { +- reg = "e1" +- ret[1] = reg +- doErrno = true +- } else { +- reg = "r0" +- ret[0] = reg +- } +- if p.Type == "bool" { +- reg = fmt.Sprintf("%s != 0", reg) +- } +- if reg != "e1" { +- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) +- } +- } +- if ret[0] == "_" && ret[1] == "_" { +- textcommon += fmt.Sprintf("\t%s\n", callcommon) +- } else { +- textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) +- } +- textcommon += body +- +- if doErrno { +- textcommon += "\tif e1 != 0 {\n" +- textcommon += "\t\terr = errnoErr(e1)\n" +- textcommon += "\t}\n" +- } +- textcommon += "\treturn\n" +- textcommon += "}\n" +- +- if onlyCommon { +- continue +- } +- +- // CALL Prototype +- callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) +- +- // GC function generation +- asm := "syscall6" +- if nonblock != nil { +- asm = "rawSyscall6" +- } +- +- if len(argsgc) <= 6 { +- for len(argsgc) < 6 { +- argsgc = append(argsgc, "0") +- } +- } else { +- fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) +- os.Exit(1) +- } +- argsgclist := strings.Join(argsgc, ", ") +- callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) +- +- textgc += callProto +- textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) +- textgc += "\treturn\n}\n" +- +- // GCCGO function generation +- argsgccgolist := strings.Join(argsgccgo, ", ") +- var callgccgo string +- if sysname == "select" { +- // select is a keyword of Go. Its name is +- // changed to c_select. +- callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist) +- } else { +- callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) +- } +- textgccgo += callProto +- textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) +- textgccgo += "\te1 = syscall.GetErrno()\n" +- textgccgo += "\treturn\n}\n" +- } +- if err := s.Err(); err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- file.Close() +- } +- imp := "" +- if pack != "unix" { +- imp = "import \"golang.org/x/sys/unix\"\n" +- +- } +- +- // Print zsyscall_aix_ppc64.go +- err := ioutil.WriteFile("zsyscall_aix_ppc64.go", +- []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), +- 0644) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- +- // Print zsyscall_aix_ppc64_gc.go +- vardecls := "\t" + strings.Join(vars, ",\n\t") +- vardecls += " syscallFunc" +- err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", +- []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), +- 0644) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- +- // Print zsyscall_aix_ppc64_gccgo.go +- err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", +- []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), +- 0644) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +-} +- +-const srcTemplate1 = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +- +-package %s +- +-import ( +- "unsafe" +-) +- +- +-%s +- +-%s +-` +-const srcTemplate2 = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +-// +build !gccgo +- +-package %s +- +-import ( +- "unsafe" +-) +-%s +-%s +-%s +-type syscallFunc uintptr +- +-var ( +-%s +-) +- +-// Implemented in runtime/syscall_aix.go. +-func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +-func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +- +-%s +-` +-const srcTemplate3 = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +-// +build gccgo +- +-package %s +- +-%s +-*/ +-import "C" +-import ( +- "syscall" +-) +- +- +-%s +- +-%s +-` +diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go +deleted file mode 100644 +index 3d864738b6..0000000000 +--- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go ++++ /dev/null +@@ -1,335 +0,0 @@ +-// Copyright 2019 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +- This program reads a file containing function prototypes +- (like syscall_solaris.go) and generates system call bodies. +- The prototypes are marked by lines beginning with "//sys" +- and read like func declarations if //sys is replaced by func, but: +- * The parameter lists must give a name for each argument. +- This includes return parameters. +- * The parameter lists must give a type for each argument: +- the (x, y, z int) shorthand is not allowed. +- * If the return parameter is an error number, it must be named err. +- * If go func name needs to be different than its libc name, +- * or the function is not in libc, name could be specified +- * at the end, after "=" sign, like +- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt +-*/ +- +-package main +- +-import ( +- "bufio" +- "flag" +- "fmt" +- "os" +- "regexp" +- "strings" +-) +- +-var ( +- b32 = flag.Bool("b32", false, "32bit big-endian") +- l32 = flag.Bool("l32", false, "32bit little-endian") +- tags = flag.String("tags", "", "build tags") +-) +- +-// cmdLine returns this programs's commandline arguments +-func cmdLine() string { +- return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") +-} +- +-// buildTags returns build tags +-func buildTags() string { +- return *tags +-} +- +-// Param is function parameter +-type Param struct { +- Name string +- Type string +-} +- +-// usage prints the program usage +-func usage() { +- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") +- os.Exit(1) +-} +- +-// parseParamList parses parameter list and returns a slice of parameters +-func parseParamList(list string) []string { +- list = strings.TrimSpace(list) +- if list == "" { +- return []string{} +- } +- return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +-} +- +-// parseParam splits a parameter into name and type +-func parseParam(p string) Param { +- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) +- if ps == nil { +- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) +- os.Exit(1) +- } +- return Param{ps[1], ps[2]} +-} +- +-func main() { +- flag.Usage = usage +- flag.Parse() +- if len(flag.Args()) <= 0 { +- fmt.Fprintf(os.Stderr, "no files to parse provided\n") +- usage() +- } +- +- endianness := "" +- if *b32 { +- endianness = "big-endian" +- } else if *l32 { +- endianness = "little-endian" +- } +- +- pack := "" +- text := "" +- dynimports := "" +- linknames := "" +- var vars []string +- for _, path := range flag.Args() { +- file, err := os.Open(path) +- if err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- s := bufio.NewScanner(file) +- for s.Scan() { +- t := s.Text() +- t = strings.TrimSpace(t) +- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) +- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { +- pack = p[1] +- } +- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) +- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { +- continue +- } +- +- // Line must be of the form +- // func Open(path string, mode int, perm int) (fd int, err error) +- // Split into name, in params, out params. +- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) +- if f == nil { +- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) +- os.Exit(1) +- } +- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] +- +- // Split argument lists on comma. +- in := parseParamList(inps) +- out := parseParamList(outps) +- +- inps = strings.Join(in, ", ") +- outps = strings.Join(out, ", ") +- +- // Try in vain to keep people from editing this file. +- // The theory is that they jump into the middle of the file +- // without reading the header. +- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" +- +- // So file name. +- if modname == "" { +- modname = "libc" +- } +- +- // System call name. +- if sysname == "" { +- sysname = funct +- } +- +- // System call pointer variable name. +- sysvarname := fmt.Sprintf("proc%s", sysname) +- +- strconvfunc := "BytePtrFromString" +- strconvtype := "*byte" +- +- sysname = strings.ToLower(sysname) // All libc functions are lowercase. +- +- // Runtime import of function to allow cross-platform builds. +- dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) +- // Link symbol to proc address variable. +- linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) +- // Library proc address variable. +- vars = append(vars, sysvarname) +- +- // Go function header. +- outlist := strings.Join(out, ", ") +- if outlist != "" { +- outlist = fmt.Sprintf(" (%s)", outlist) +- } +- if text != "" { +- text += "\n" +- } +- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) +- +- // Check if err return available +- errvar := "" +- for _, param := range out { +- p := parseParam(param) +- if p.Type == "error" { +- errvar = p.Name +- continue +- } +- } +- +- // Prepare arguments to Syscall. +- var args []string +- n := 0 +- for _, param := range in { +- p := parseParam(param) +- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { +- args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") +- } else if p.Type == "string" && errvar != "" { +- text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) +- text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) +- text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) +- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) +- n++ +- } else if p.Type == "string" { +- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") +- text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) +- text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) +- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) +- n++ +- } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { +- // Convert slice into pointer, length. +- // Have to be careful not to take address of &a[0] if len == 0: +- // pass nil in that case. +- text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) +- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) +- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) +- n++ +- } else if p.Type == "int64" && endianness != "" { +- if endianness == "big-endian" { +- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) +- } +- } else if p.Type == "bool" { +- text += fmt.Sprintf("\tvar _p%d uint32\n", n) +- text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) +- args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) +- n++ +- } else { +- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) +- } +- } +- nargs := len(args) +- +- // Determine which form to use; pad args with zeros. +- asm := "sysvicall6" +- if nonblock != nil { +- asm = "rawSysvicall6" +- } +- if len(args) <= 6 { +- for len(args) < 6 { +- args = append(args, "0") +- } +- } else { +- fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) +- os.Exit(1) +- } +- +- // Actual call. +- arglist := strings.Join(args, ", ") +- call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) +- +- // Assign return values. +- body := "" +- ret := []string{"_", "_", "_"} +- doErrno := false +- for i := 0; i < len(out); i++ { +- p := parseParam(out[i]) +- reg := "" +- if p.Name == "err" { +- reg = "e1" +- ret[2] = reg +- doErrno = true +- } else { +- reg = fmt.Sprintf("r%d", i) +- ret[i] = reg +- } +- if p.Type == "bool" { +- reg = fmt.Sprintf("%d != 0", reg) +- } +- if p.Type == "int64" && endianness != "" { +- // 64-bit number in r1:r0 or r0:r1. +- if i+2 > len(out) { +- fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) +- os.Exit(1) +- } +- if endianness == "big-endian" { +- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) +- } else { +- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) +- } +- ret[i] = fmt.Sprintf("r%d", i) +- ret[i+1] = fmt.Sprintf("r%d", i+1) +- } +- if reg != "e1" { +- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) +- } +- } +- if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { +- text += fmt.Sprintf("\t%s\n", call) +- } else { +- text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) +- } +- text += body +- +- if doErrno { +- text += "\tif e1 != 0 {\n" +- text += "\t\terr = e1\n" +- text += "\t}\n" +- } +- text += "\treturn\n" +- text += "}\n" +- } +- if err := s.Err(); err != nil { +- fmt.Fprintf(os.Stderr, err.Error()) +- os.Exit(1) +- } +- file.Close() +- } +- imp := "" +- if pack != "unix" { +- imp = "import \"golang.org/x/sys/unix\"\n" +- +- } +- vardecls := "\t" + strings.Join(vars, ",\n\t") +- vardecls += " syscallFunc" +- fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) +-} +- +-const srcTemplate = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +- +-package %s +- +-import ( +- "syscall" +- "unsafe" +-) +-%s +-%s +-%s +-var ( +-%s +-) +- +-%s +-` +diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go +deleted file mode 100644 +index b6b409909c..0000000000 +--- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go ++++ /dev/null +@@ -1,355 +0,0 @@ +-// Copyright 2019 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// Parse the header files for OpenBSD and generate a Go usable sysctl MIB. +-// +-// Build a MIB with each entry being an array containing the level, type and +-// a hash that will contain additional entries if the current entry is a node. +-// We then walk this MIB and create a flattened sysctl name to OID hash. +- +-package main +- +-import ( +- "bufio" +- "fmt" +- "os" +- "path/filepath" +- "regexp" +- "sort" +- "strings" +-) +- +-var ( +- goos, goarch string +-) +- +-// cmdLine returns this programs's commandline arguments. +-func cmdLine() string { +- return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ") +-} +- +-// buildTags returns build tags. +-func buildTags() string { +- return fmt.Sprintf("%s,%s", goarch, goos) +-} +- +-// reMatch performs regular expression match and stores the substring slice to value pointed by m. +-func reMatch(re *regexp.Regexp, str string, m *[]string) bool { +- *m = re.FindStringSubmatch(str) +- if *m != nil { +- return true +- } +- return false +-} +- +-type nodeElement struct { +- n int +- t string +- pE *map[string]nodeElement +-} +- +-var ( +- debugEnabled bool +- mib map[string]nodeElement +- node *map[string]nodeElement +- nodeMap map[string]string +- sysCtl []string +-) +- +-var ( +- ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`) +- ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`) +- ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`) +- netInetRE = regexp.MustCompile(`^netinet/`) +- netInet6RE = regexp.MustCompile(`^netinet6/`) +- netRE = regexp.MustCompile(`^net/`) +- bracesRE = regexp.MustCompile(`{.*}`) +- ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`) +- fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`) +-) +- +-func debug(s string) { +- if debugEnabled { +- fmt.Fprintln(os.Stderr, s) +- } +-} +- +-// Walk the MIB and build a sysctl name to OID mapping. +-func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) { +- lNode := pNode // local copy of pointer to node +- var keys []string +- for k := range *lNode { +- keys = append(keys, k) +- } +- sort.Strings(keys) +- +- for _, key := range keys { +- nodename := name +- if name != "" { +- nodename += "." +- } +- nodename += key +- +- nodeoid := append(oid, (*pNode)[key].n) +- +- if (*pNode)[key].t == `CTLTYPE_NODE` { +- if _, ok := nodeMap[nodename]; ok { +- lNode = &mib +- ctlName := nodeMap[nodename] +- for _, part := range strings.Split(ctlName, ".") { +- lNode = ((*lNode)[part]).pE +- } +- } else { +- lNode = (*pNode)[key].pE +- } +- buildSysctl(lNode, nodename, nodeoid) +- } else if (*pNode)[key].t != "" { +- oidStr := []string{} +- for j := range nodeoid { +- oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j])) +- } +- text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n" +- sysCtl = append(sysCtl, text) +- } +- } +-} +- +-func main() { +- // Get the OS (using GOOS_TARGET if it exist) +- goos = os.Getenv("GOOS_TARGET") +- if goos == "" { +- goos = os.Getenv("GOOS") +- } +- // Get the architecture (using GOARCH_TARGET if it exists) +- goarch = os.Getenv("GOARCH_TARGET") +- if goarch == "" { +- goarch = os.Getenv("GOARCH") +- } +- // Check if GOOS and GOARCH environment variables are defined +- if goarch == "" || goos == "" { +- fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") +- os.Exit(1) +- } +- +- mib = make(map[string]nodeElement) +- headers := [...]string{ +- `sys/sysctl.h`, +- `sys/socket.h`, +- `sys/tty.h`, +- `sys/malloc.h`, +- `sys/mount.h`, +- `sys/namei.h`, +- `sys/sem.h`, +- `sys/shm.h`, +- `sys/vmmeter.h`, +- `uvm/uvmexp.h`, +- `uvm/uvm_param.h`, +- `uvm/uvm_swap_encrypt.h`, +- `ddb/db_var.h`, +- `net/if.h`, +- `net/if_pfsync.h`, +- `net/pipex.h`, +- `netinet/in.h`, +- `netinet/icmp_var.h`, +- `netinet/igmp_var.h`, +- `netinet/ip_ah.h`, +- `netinet/ip_carp.h`, +- `netinet/ip_divert.h`, +- `netinet/ip_esp.h`, +- `netinet/ip_ether.h`, +- `netinet/ip_gre.h`, +- `netinet/ip_ipcomp.h`, +- `netinet/ip_ipip.h`, +- `netinet/pim_var.h`, +- `netinet/tcp_var.h`, +- `netinet/udp_var.h`, +- `netinet6/in6.h`, +- `netinet6/ip6_divert.h`, +- `netinet6/pim6_var.h`, +- `netinet/icmp6.h`, +- `netmpls/mpls.h`, +- } +- +- ctls := [...]string{ +- `kern`, +- `vm`, +- `fs`, +- `net`, +- //debug /* Special handling required */ +- `hw`, +- //machdep /* Arch specific */ +- `user`, +- `ddb`, +- //vfs /* Special handling required */ +- `fs.posix`, +- `kern.forkstat`, +- `kern.intrcnt`, +- `kern.malloc`, +- `kern.nchstats`, +- `kern.seminfo`, +- `kern.shminfo`, +- `kern.timecounter`, +- `kern.tty`, +- `kern.watchdog`, +- `net.bpf`, +- `net.ifq`, +- `net.inet`, +- `net.inet.ah`, +- `net.inet.carp`, +- `net.inet.divert`, +- `net.inet.esp`, +- `net.inet.etherip`, +- `net.inet.gre`, +- `net.inet.icmp`, +- `net.inet.igmp`, +- `net.inet.ip`, +- `net.inet.ip.ifq`, +- `net.inet.ipcomp`, +- `net.inet.ipip`, +- `net.inet.mobileip`, +- `net.inet.pfsync`, +- `net.inet.pim`, +- `net.inet.tcp`, +- `net.inet.udp`, +- `net.inet6`, +- `net.inet6.divert`, +- `net.inet6.ip6`, +- `net.inet6.icmp6`, +- `net.inet6.pim6`, +- `net.inet6.tcp6`, +- `net.inet6.udp6`, +- `net.mpls`, +- `net.mpls.ifq`, +- `net.key`, +- `net.pflow`, +- `net.pfsync`, +- `net.pipex`, +- `net.rt`, +- `vm.swapencrypt`, +- //vfsgenctl /* Special handling required */ +- } +- +- // Node name "fixups" +- ctlMap := map[string]string{ +- "ipproto": "net.inet", +- "net.inet.ipproto": "net.inet", +- "net.inet6.ipv6proto": "net.inet6", +- "net.inet6.ipv6": "net.inet6.ip6", +- "net.inet.icmpv6": "net.inet6.icmp6", +- "net.inet6.divert6": "net.inet6.divert", +- "net.inet6.tcp6": "net.inet.tcp", +- "net.inet6.udp6": "net.inet.udp", +- "mpls": "net.mpls", +- "swpenc": "vm.swapencrypt", +- } +- +- // Node mappings +- nodeMap = map[string]string{ +- "net.inet.ip.ifq": "net.ifq", +- "net.inet.pfsync": "net.pfsync", +- "net.mpls.ifq": "net.ifq", +- } +- +- mCtls := make(map[string]bool) +- for _, ctl := range ctls { +- mCtls[ctl] = true +- } +- +- for _, header := range headers { +- debug("Processing " + header) +- file, err := os.Open(filepath.Join("/usr/include", header)) +- if err != nil { +- fmt.Fprintf(os.Stderr, "%v\n", err) +- os.Exit(1) +- } +- s := bufio.NewScanner(file) +- for s.Scan() { +- var sub []string +- if reMatch(ctlNames1RE, s.Text(), &sub) || +- reMatch(ctlNames2RE, s.Text(), &sub) || +- reMatch(ctlNames3RE, s.Text(), &sub) { +- if sub[1] == `CTL_NAMES` { +- // Top level. +- node = &mib +- } else { +- // Node. +- nodename := strings.ToLower(sub[2]) +- ctlName := "" +- if reMatch(netInetRE, header, &sub) { +- ctlName = "net.inet." + nodename +- } else if reMatch(netInet6RE, header, &sub) { +- ctlName = "net.inet6." + nodename +- } else if reMatch(netRE, header, &sub) { +- ctlName = "net." + nodename +- } else { +- ctlName = nodename +- ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`) +- } +- +- if val, ok := ctlMap[ctlName]; ok { +- ctlName = val +- } +- if _, ok := mCtls[ctlName]; !ok { +- debug("Ignoring " + ctlName + "...") +- continue +- } +- +- // Walk down from the top of the MIB. +- node = &mib +- for _, part := range strings.Split(ctlName, ".") { +- if _, ok := (*node)[part]; !ok { +- debug("Missing node " + part) +- (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}} +- } +- node = (*node)[part].pE +- } +- } +- +- // Populate current node with entries. +- i := -1 +- for !strings.HasPrefix(s.Text(), "}") { +- s.Scan() +- if reMatch(bracesRE, s.Text(), &sub) { +- i++ +- } +- if !reMatch(ctlTypeRE, s.Text(), &sub) { +- continue +- } +- (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}} +- } +- } +- } +- err = s.Err() +- if err != nil { +- fmt.Fprintf(os.Stderr, "%v\n", err) +- os.Exit(1) +- } +- file.Close() +- } +- buildSysctl(&mib, "", []int{}) +- +- sort.Strings(sysCtl) +- text := strings.Join(sysCtl, "") +- +- fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) +-} +- +-const srcTemplate = `// %s +-// Code generated by the command above; DO NOT EDIT. +- +-// +build %s +- +-package unix +- +-type mibentry struct { +- ctlname string +- ctloid []_C_int +-} +- +-var sysctlMib = []mibentry { +-%s +-} +-` +diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go +deleted file mode 100644 +index baa6ecd850..0000000000 +--- a/vendor/golang.org/x/sys/unix/mksysnum.go ++++ /dev/null +@@ -1,190 +0,0 @@ +-// Copyright 2018 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// Generate system call table for DragonFly, NetBSD, +-// FreeBSD, OpenBSD or Darwin from master list +-// (for example, /usr/src/sys/kern/syscalls.master or +-// sys/syscall.h). +-package main +- +-import ( +- "bufio" +- "fmt" +- "io" +- "io/ioutil" +- "net/http" +- "os" +- "regexp" +- "strings" +-) +- +-var ( +- goos, goarch string +-) +- +-// cmdLine returns this programs's commandline arguments +-func cmdLine() string { +- return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") +-} +- +-// buildTags returns build tags +-func buildTags() string { +- return fmt.Sprintf("%s,%s", goarch, goos) +-} +- +-func checkErr(err error) { +- if err != nil { +- fmt.Fprintf(os.Stderr, "%v\n", err) +- os.Exit(1) +- } +-} +- +-// source string and substring slice for regexp +-type re struct { +- str string // source string +- sub []string // matched sub-string +-} +- +-// Match performs regular expression match +-func (r *re) Match(exp string) bool { +- r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) +- if r.sub != nil { +- return true +- } +- return false +-} +- +-// fetchFile fetches a text file from URL +-func fetchFile(URL string) io.Reader { +- resp, err := http.Get(URL) +- checkErr(err) +- defer resp.Body.Close() +- body, err := ioutil.ReadAll(resp.Body) +- checkErr(err) +- return strings.NewReader(string(body)) +-} +- +-// readFile reads a text file from path +-func readFile(path string) io.Reader { +- file, err := os.Open(os.Args[1]) +- checkErr(err) +- return file +-} +- +-func format(name, num, proto string) string { +- name = strings.ToUpper(name) +- // There are multiple entries for enosys and nosys, so comment them out. +- nm := re{str: name} +- if nm.Match(`^SYS_E?NOSYS$`) { +- name = fmt.Sprintf("// %s", name) +- } +- if name == `SYS_SYS_EXIT` { +- name = `SYS_EXIT` +- } +- return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) +-} +- +-func main() { +- // Get the OS (using GOOS_TARGET if it exist) +- goos = os.Getenv("GOOS_TARGET") +- if goos == "" { +- goos = os.Getenv("GOOS") +- } +- // Get the architecture (using GOARCH_TARGET if it exists) +- goarch = os.Getenv("GOARCH_TARGET") +- if goarch == "" { +- goarch = os.Getenv("GOARCH") +- } +- // Check if GOOS and GOARCH environment variables are defined +- if goarch == "" || goos == "" { +- fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") +- os.Exit(1) +- } +- +- file := strings.TrimSpace(os.Args[1]) +- var syscalls io.Reader +- if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { +- // Download syscalls.master file +- syscalls = fetchFile(file) +- } else { +- syscalls = readFile(file) +- } +- +- var text, line string +- s := bufio.NewScanner(syscalls) +- for s.Scan() { +- t := re{str: line} +- if t.Match(`^(.*)\\$`) { +- // Handle continuation +- line = t.sub[1] +- line += strings.TrimLeft(s.Text(), " \t") +- } else { +- // New line +- line = s.Text() +- } +- t = re{str: line} +- if t.Match(`\\$`) { +- continue +- } +- t = re{str: line} +- +- switch goos { +- case "dragonfly": +- if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { +- num, proto := t.sub[1], t.sub[2] +- name := fmt.Sprintf("SYS_%s", t.sub[3]) +- text += format(name, num, proto) +- } +- case "freebsd": +- if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) { +- num, proto := t.sub[1], t.sub[2] +- name := fmt.Sprintf("SYS_%s", t.sub[3]) +- text += format(name, num, proto) +- } +- case "openbsd": +- if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { +- num, proto, name := t.sub[1], t.sub[3], t.sub[4] +- text += format(name, num, proto) +- } +- case "netbsd": +- if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { +- num, proto, compat := t.sub[1], t.sub[6], t.sub[8] +- name := t.sub[7] + "_" + t.sub[9] +- if t.sub[11] != "" { +- name = t.sub[7] + "_" + t.sub[11] +- } +- name = strings.ToUpper(name) +- if compat == "" || compat == "13" || compat == "30" || compat == "50" { +- text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) +- } +- } +- case "darwin": +- if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { +- name, num := t.sub[1], t.sub[2] +- name = strings.ToUpper(name) +- text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) +- } +- default: +- fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) +- os.Exit(1) +- +- } +- } +- err := s.Err() +- checkErr(err) +- +- fmt.Printf(template, cmdLine(), buildTags(), text) +-} +- +-const template = `// %s +-// Code generated by the command above; see README.md. DO NOT EDIT. +- +-// +build %s +- +-package unix +- +-const( +-%s)` +diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go +deleted file mode 100644 +index 40d2beede5..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_aix.go ++++ /dev/null +@@ -1,237 +0,0 @@ +-// Copyright 2018 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +-// +build aix +- +-/* +-Input to cgo -godefs. See also mkerrors.sh and mkall.sh +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include +- +-#include +-#include +-#include +-#include +- +- +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +- PathMax = C.PATH_MAX +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-type off64 C.off64_t +-type off C.off_t +-type Mode_t C.mode_t +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-type Timeval32 C.struct_timeval32 +- +-type Timex C.struct_timex +- +-type Time_t C.time_t +- +-type Tms C.struct_tms +- +-type Utimbuf C.struct_utimbuf +- +-type Timezone C.struct_timezone +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit64 +- +-type Pid_t C.pid_t +- +-type _Gid_t C.gid_t +- +-type dev_t C.dev_t +- +-// Files +- +-type Stat_t C.struct_stat +- +-type StatxTimestamp C.struct_statx_timestamp +- +-type Statx_t C.struct_statx +- +-type Dirent C.struct_dirent +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Cmsghdr C.struct_cmsghdr +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type Linger C.struct_linger +- +-type Msghdr C.struct_msghdr +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Routing and interface messages +- +-const ( +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr +-) +- +-type IfMsgHdr C.struct_if_msghdr +- +-// Misc +- +-type FdSet C.fd_set +- +-type Utsname C.struct_utsname +- +-type Ustat_t C.struct_ustat +- +-type Sigset_t C.sigset_t +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_REMOVEDIR = C.AT_REMOVEDIR +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +-) +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Termio C.struct_termio +- +-type Winsize C.struct_winsize +- +-//poll +- +-type PollFd struct { +- Fd int32 +- Events uint16 +- Revents uint16 +-} +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +- +-//flock_t +- +-type Flock_t C.struct_flock64 +- +-// Statfs +- +-type Fsid_t C.struct_fsid_t +-type Fsid64_t C.struct_fsid64_t +- +-type Statfs_t C.struct_statfs +- +-const RNDGETENTCNT = 0x80045200 +diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go +deleted file mode 100644 +index 155c2e692b..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_darwin.go ++++ /dev/null +@@ -1,283 +0,0 @@ +-// Copyright 2009 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-Input to cgo -godefs. See README.md +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#define __DARWIN_UNIX03 0 +-#define KERNEL +-#define _DARWIN_USE_64_BIT_INODE +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-type Timeval32 C.struct_timeval32 +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit +- +-type _Gid_t C.gid_t +- +-// Files +- +-type Stat_t C.struct_stat64 +- +-type Statfs_t C.struct_statfs64 +- +-type Flock_t C.struct_flock +- +-type Fstore_t C.struct_fstore +- +-type Radvisory_t C.struct_radvisory +- +-type Fbootstraptransfer_t C.struct_fbootstraptransfer +- +-type Log2phys_t C.struct_log2phys +- +-type Fsid C.struct_fsid +- +-type Dirent C.struct_dirent +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Linger C.struct_linger +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type Msghdr C.struct_msghdr +- +-type Cmsghdr C.struct_cmsghdr +- +-type Inet4Pktinfo C.struct_in_pktinfo +- +-type Inet6Pktinfo C.struct_in6_pktinfo +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo +- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Ptrace requests +- +-const ( +- PTRACE_TRACEME = C.PT_TRACE_ME +- PTRACE_CONT = C.PT_CONTINUE +- PTRACE_KILL = C.PT_KILL +-) +- +-// Events (kqueue, kevent) +- +-type Kevent_t C.struct_kevent +- +-// Select +- +-type FdSet C.fd_set +- +-// Routing and interface messages +- +-const ( +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr +- SizeofIfData = C.sizeof_struct_if_data +- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr +- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr +- SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 +- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr +- SizeofRtMetrics = C.sizeof_struct_rt_metrics +-) +- +-type IfMsghdr C.struct_if_msghdr +- +-type IfData C.struct_if_data +- +-type IfaMsghdr C.struct_ifa_msghdr +- +-type IfmaMsghdr C.struct_ifma_msghdr +- +-type IfmaMsghdr2 C.struct_ifma_msghdr2 +- +-type RtMsghdr C.struct_rt_msghdr +- +-type RtMetrics C.struct_rt_metrics +- +-// Berkeley packet filter +- +-const ( +- SizeofBpfVersion = C.sizeof_struct_bpf_version +- SizeofBpfStat = C.sizeof_struct_bpf_stat +- SizeofBpfProgram = C.sizeof_struct_bpf_program +- SizeofBpfInsn = C.sizeof_struct_bpf_insn +- SizeofBpfHdr = C.sizeof_struct_bpf_hdr +-) +- +-type BpfVersion C.struct_bpf_version +- +-type BpfStat C.struct_bpf_stat +- +-type BpfProgram C.struct_bpf_program +- +-type BpfInsn C.struct_bpf_insn +- +-type BpfHdr C.struct_bpf_hdr +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Winsize C.struct_winsize +- +-// fchmodat-like syscalls. +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_REMOVEDIR = C.AT_REMOVEDIR +- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +-) +- +-// poll +- +-type PollFd C.struct_pollfd +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +- +-// uname +- +-type Utsname C.struct_utsname +- +-// Clockinfo +- +-const SizeofClockinfo = C.sizeof_struct_clockinfo +- +-type Clockinfo C.struct_clockinfo +diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go +deleted file mode 100644 +index 3365dd79d0..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_dragonfly.go ++++ /dev/null +@@ -1,263 +0,0 @@ +-// Copyright 2009 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-Input to cgo -godefs. See README.md +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#define KERNEL +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit +- +-type _Gid_t C.gid_t +- +-// Files +- +-type Stat_t C.struct_stat +- +-type Statfs_t C.struct_statfs +- +-type Flock_t C.struct_flock +- +-type Dirent C.struct_dirent +- +-type Fsid C.struct_fsid +- +-// File system limits +- +-const ( +- PathMax = C.PATH_MAX +-) +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Linger C.struct_linger +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type Msghdr C.struct_msghdr +- +-type Cmsghdr C.struct_cmsghdr +- +-type Inet6Pktinfo C.struct_in6_pktinfo +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Ptrace requests +- +-const ( +- PTRACE_TRACEME = C.PT_TRACE_ME +- PTRACE_CONT = C.PT_CONTINUE +- PTRACE_KILL = C.PT_KILL +-) +- +-// Events (kqueue, kevent) +- +-type Kevent_t C.struct_kevent +- +-// Select +- +-type FdSet C.fd_set +- +-// Routing and interface messages +- +-const ( +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr +- SizeofIfData = C.sizeof_struct_if_data +- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr +- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr +- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr +- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr +- SizeofRtMetrics = C.sizeof_struct_rt_metrics +-) +- +-type IfMsghdr C.struct_if_msghdr +- +-type IfData C.struct_if_data +- +-type IfaMsghdr C.struct_ifa_msghdr +- +-type IfmaMsghdr C.struct_ifma_msghdr +- +-type IfAnnounceMsghdr C.struct_if_announcemsghdr +- +-type RtMsghdr C.struct_rt_msghdr +- +-type RtMetrics C.struct_rt_metrics +- +-// Berkeley packet filter +- +-const ( +- SizeofBpfVersion = C.sizeof_struct_bpf_version +- SizeofBpfStat = C.sizeof_struct_bpf_stat +- SizeofBpfProgram = C.sizeof_struct_bpf_program +- SizeofBpfInsn = C.sizeof_struct_bpf_insn +- SizeofBpfHdr = C.sizeof_struct_bpf_hdr +-) +- +-type BpfVersion C.struct_bpf_version +- +-type BpfStat C.struct_bpf_stat +- +-type BpfProgram C.struct_bpf_program +- +-type BpfInsn C.struct_bpf_insn +- +-type BpfHdr C.struct_bpf_hdr +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Winsize C.struct_winsize +- +-// fchmodat-like syscalls. +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +-) +- +-// poll +- +-type PollFd C.struct_pollfd +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +- +-// Uname +- +-type Utsname C.struct_utsname +diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go +deleted file mode 100644 +index a121dc3368..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_freebsd.go ++++ /dev/null +@@ -1,400 +0,0 @@ +-// Copyright 2009 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-Input to cgo -godefs. See README.md +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#define _WANT_FREEBSD11_STAT 1 +-#define _WANT_FREEBSD11_STATFS 1 +-#define _WANT_FREEBSD11_DIRENT 1 +-#define _WANT_FREEBSD11_KEVENT 1 +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-// This structure is a duplicate of if_data on FreeBSD 8-STABLE. +-// See /usr/include/net/if.h. +-struct if_data8 { +- u_char ifi_type; +- u_char ifi_physical; +- u_char ifi_addrlen; +- u_char ifi_hdrlen; +- u_char ifi_link_state; +- u_char ifi_spare_char1; +- u_char ifi_spare_char2; +- u_char ifi_datalen; +- u_long ifi_mtu; +- u_long ifi_metric; +- u_long ifi_baudrate; +- u_long ifi_ipackets; +- u_long ifi_ierrors; +- u_long ifi_opackets; +- u_long ifi_oerrors; +- u_long ifi_collisions; +- u_long ifi_ibytes; +- u_long ifi_obytes; +- u_long ifi_imcasts; +- u_long ifi_omcasts; +- u_long ifi_iqdrops; +- u_long ifi_noproto; +- u_long ifi_hwassist; +-// FIXME: these are now unions, so maybe need to change definitions? +-#undef ifi_epoch +- time_t ifi_epoch; +-#undef ifi_lastchange +- struct timeval ifi_lastchange; +-}; +- +-// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. +-// See /usr/include/net/if.h. +-struct if_msghdr8 { +- u_short ifm_msglen; +- u_char ifm_version; +- u_char ifm_type; +- int ifm_addrs; +- int ifm_flags; +- u_short ifm_index; +- struct if_data8 ifm_data; +-}; +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit +- +-type _Gid_t C.gid_t +- +-// Files +- +-const ( +- _statfsVersion = C.STATFS_VERSION +- _dirblksiz = C.DIRBLKSIZ +-) +- +-type Stat_t C.struct_stat +- +-type stat_freebsd11_t C.struct_freebsd11_stat +- +-type Statfs_t C.struct_statfs +- +-type statfs_freebsd11_t C.struct_freebsd11_statfs +- +-type Flock_t C.struct_flock +- +-type Dirent C.struct_dirent +- +-type dirent_freebsd11 C.struct_freebsd11_dirent +- +-type Fsid C.struct_fsid +- +-// File system limits +- +-const ( +- PathMax = C.PATH_MAX +-) +- +-// Advice to Fadvise +- +-const ( +- FADV_NORMAL = C.POSIX_FADV_NORMAL +- FADV_RANDOM = C.POSIX_FADV_RANDOM +- FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL +- FADV_WILLNEED = C.POSIX_FADV_WILLNEED +- FADV_DONTNEED = C.POSIX_FADV_DONTNEED +- FADV_NOREUSE = C.POSIX_FADV_NOREUSE +-) +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Linger C.struct_linger +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPMreqn C.struct_ip_mreqn +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type Msghdr C.struct_msghdr +- +-type Cmsghdr C.struct_cmsghdr +- +-type Inet6Pktinfo C.struct_in6_pktinfo +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPMreqn = C.sizeof_struct_ip_mreqn +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Ptrace requests +- +-const ( +- PTRACE_ATTACH = C.PT_ATTACH +- PTRACE_CONT = C.PT_CONTINUE +- PTRACE_DETACH = C.PT_DETACH +- PTRACE_GETFPREGS = C.PT_GETFPREGS +- PTRACE_GETFSBASE = C.PT_GETFSBASE +- PTRACE_GETLWPLIST = C.PT_GETLWPLIST +- PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS +- PTRACE_GETREGS = C.PT_GETREGS +- PTRACE_GETXSTATE = C.PT_GETXSTATE +- PTRACE_IO = C.PT_IO +- PTRACE_KILL = C.PT_KILL +- PTRACE_LWPEVENTS = C.PT_LWP_EVENTS +- PTRACE_LWPINFO = C.PT_LWPINFO +- PTRACE_SETFPREGS = C.PT_SETFPREGS +- PTRACE_SETREGS = C.PT_SETREGS +- PTRACE_SINGLESTEP = C.PT_STEP +- PTRACE_TRACEME = C.PT_TRACE_ME +-) +- +-const ( +- PIOD_READ_D = C.PIOD_READ_D +- PIOD_WRITE_D = C.PIOD_WRITE_D +- PIOD_READ_I = C.PIOD_READ_I +- PIOD_WRITE_I = C.PIOD_WRITE_I +-) +- +-const ( +- PL_FLAG_BORN = C.PL_FLAG_BORN +- PL_FLAG_EXITED = C.PL_FLAG_EXITED +- PL_FLAG_SI = C.PL_FLAG_SI +-) +- +-const ( +- TRAP_BRKPT = C.TRAP_BRKPT +- TRAP_TRACE = C.TRAP_TRACE +-) +- +-type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo +- +-type __Siginfo C.struct___siginfo +- +-type Sigset_t C.sigset_t +- +-type Reg C.struct_reg +- +-type FpReg C.struct_fpreg +- +-type PtraceIoDesc C.struct_ptrace_io_desc +- +-// Events (kqueue, kevent) +- +-type Kevent_t C.struct_kevent_freebsd11 +- +-// Select +- +-type FdSet C.fd_set +- +-// Routing and interface messages +- +-const ( +- sizeofIfMsghdr = C.sizeof_struct_if_msghdr +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 +- sizeofIfData = C.sizeof_struct_if_data +- SizeofIfData = C.sizeof_struct_if_data8 +- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr +- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr +- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr +- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr +- SizeofRtMetrics = C.sizeof_struct_rt_metrics +-) +- +-type ifMsghdr C.struct_if_msghdr +- +-type IfMsghdr C.struct_if_msghdr8 +- +-type ifData C.struct_if_data +- +-type IfData C.struct_if_data8 +- +-type IfaMsghdr C.struct_ifa_msghdr +- +-type IfmaMsghdr C.struct_ifma_msghdr +- +-type IfAnnounceMsghdr C.struct_if_announcemsghdr +- +-type RtMsghdr C.struct_rt_msghdr +- +-type RtMetrics C.struct_rt_metrics +- +-// Berkeley packet filter +- +-const ( +- SizeofBpfVersion = C.sizeof_struct_bpf_version +- SizeofBpfStat = C.sizeof_struct_bpf_stat +- SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf +- SizeofBpfProgram = C.sizeof_struct_bpf_program +- SizeofBpfInsn = C.sizeof_struct_bpf_insn +- SizeofBpfHdr = C.sizeof_struct_bpf_hdr +- SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header +-) +- +-type BpfVersion C.struct_bpf_version +- +-type BpfStat C.struct_bpf_stat +- +-type BpfZbuf C.struct_bpf_zbuf +- +-type BpfProgram C.struct_bpf_program +- +-type BpfInsn C.struct_bpf_insn +- +-type BpfHdr C.struct_bpf_hdr +- +-type BpfZbufHeader C.struct_bpf_zbuf_header +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Winsize C.struct_winsize +- +-// fchmodat-like syscalls. +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_REMOVEDIR = C.AT_REMOVEDIR +- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +-) +- +-// poll +- +-type PollFd C.struct_pollfd +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLINIGNEOF = C.POLLINIGNEOF +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +- +-// Capabilities +- +-type CapRights C.struct_cap_rights +- +-// Uname +- +-type Utsname C.struct_utsname +diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go +deleted file mode 100644 +index 4a96d72c37..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_netbsd.go ++++ /dev/null +@@ -1,290 +0,0 @@ +-// Copyright 2009 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-Input to cgo -godefs. See README.md +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#define KERNEL +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit +- +-type _Gid_t C.gid_t +- +-// Files +- +-type Stat_t C.struct_stat +- +-type Statfs_t C.struct_statfs +- +-type Flock_t C.struct_flock +- +-type Dirent C.struct_dirent +- +-type Fsid C.fsid_t +- +-// File system limits +- +-const ( +- PathMax = C.PATH_MAX +-) +- +-// Advice to Fadvise +- +-const ( +- FADV_NORMAL = C.POSIX_FADV_NORMAL +- FADV_RANDOM = C.POSIX_FADV_RANDOM +- FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL +- FADV_WILLNEED = C.POSIX_FADV_WILLNEED +- FADV_DONTNEED = C.POSIX_FADV_DONTNEED +- FADV_NOREUSE = C.POSIX_FADV_NOREUSE +-) +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Linger C.struct_linger +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type Msghdr C.struct_msghdr +- +-type Cmsghdr C.struct_cmsghdr +- +-type Inet6Pktinfo C.struct_in6_pktinfo +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Ptrace requests +- +-const ( +- PTRACE_TRACEME = C.PT_TRACE_ME +- PTRACE_CONT = C.PT_CONTINUE +- PTRACE_KILL = C.PT_KILL +-) +- +-// Events (kqueue, kevent) +- +-type Kevent_t C.struct_kevent +- +-// Select +- +-type FdSet C.fd_set +- +-// Routing and interface messages +- +-const ( +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr +- SizeofIfData = C.sizeof_struct_if_data +- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr +- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr +- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr +- SizeofRtMetrics = C.sizeof_struct_rt_metrics +-) +- +-type IfMsghdr C.struct_if_msghdr +- +-type IfData C.struct_if_data +- +-type IfaMsghdr C.struct_ifa_msghdr +- +-type IfAnnounceMsghdr C.struct_if_announcemsghdr +- +-type RtMsghdr C.struct_rt_msghdr +- +-type RtMetrics C.struct_rt_metrics +- +-type Mclpool C.struct_mclpool +- +-// Berkeley packet filter +- +-const ( +- SizeofBpfVersion = C.sizeof_struct_bpf_version +- SizeofBpfStat = C.sizeof_struct_bpf_stat +- SizeofBpfProgram = C.sizeof_struct_bpf_program +- SizeofBpfInsn = C.sizeof_struct_bpf_insn +- SizeofBpfHdr = C.sizeof_struct_bpf_hdr +-) +- +-type BpfVersion C.struct_bpf_version +- +-type BpfStat C.struct_bpf_stat +- +-type BpfProgram C.struct_bpf_program +- +-type BpfInsn C.struct_bpf_insn +- +-type BpfHdr C.struct_bpf_hdr +- +-type BpfTimeval C.struct_bpf_timeval +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Winsize C.struct_winsize +- +-type Ptmget C.struct_ptmget +- +-// fchmodat-like syscalls. +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +-) +- +-// poll +- +-type PollFd C.struct_pollfd +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +- +-// Sysctl +- +-type Sysctlnode C.struct_sysctlnode +- +-// Uname +- +-type Utsname C.struct_utsname +- +-// Clockinfo +- +-const SizeofClockinfo = C.sizeof_struct_clockinfo +- +-type Clockinfo C.struct_clockinfo +diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go +deleted file mode 100644 +index 775cb57dc8..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_openbsd.go ++++ /dev/null +@@ -1,283 +0,0 @@ +-// Copyright 2009 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-Input to cgo -godefs. See README.md +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#define KERNEL +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit +- +-type _Gid_t C.gid_t +- +-// Files +- +-type Stat_t C.struct_stat +- +-type Statfs_t C.struct_statfs +- +-type Flock_t C.struct_flock +- +-type Dirent C.struct_dirent +- +-type Fsid C.fsid_t +- +-// File system limits +- +-const ( +- PathMax = C.PATH_MAX +-) +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Linger C.struct_linger +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type Msghdr C.struct_msghdr +- +-type Cmsghdr C.struct_cmsghdr +- +-type Inet6Pktinfo C.struct_in6_pktinfo +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Ptrace requests +- +-const ( +- PTRACE_TRACEME = C.PT_TRACE_ME +- PTRACE_CONT = C.PT_CONTINUE +- PTRACE_KILL = C.PT_KILL +-) +- +-// Events (kqueue, kevent) +- +-type Kevent_t C.struct_kevent +- +-// Select +- +-type FdSet C.fd_set +- +-// Routing and interface messages +- +-const ( +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr +- SizeofIfData = C.sizeof_struct_if_data +- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr +- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr +- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr +- SizeofRtMetrics = C.sizeof_struct_rt_metrics +-) +- +-type IfMsghdr C.struct_if_msghdr +- +-type IfData C.struct_if_data +- +-type IfaMsghdr C.struct_ifa_msghdr +- +-type IfAnnounceMsghdr C.struct_if_announcemsghdr +- +-type RtMsghdr C.struct_rt_msghdr +- +-type RtMetrics C.struct_rt_metrics +- +-type Mclpool C.struct_mclpool +- +-// Berkeley packet filter +- +-const ( +- SizeofBpfVersion = C.sizeof_struct_bpf_version +- SizeofBpfStat = C.sizeof_struct_bpf_stat +- SizeofBpfProgram = C.sizeof_struct_bpf_program +- SizeofBpfInsn = C.sizeof_struct_bpf_insn +- SizeofBpfHdr = C.sizeof_struct_bpf_hdr +-) +- +-type BpfVersion C.struct_bpf_version +- +-type BpfStat C.struct_bpf_stat +- +-type BpfProgram C.struct_bpf_program +- +-type BpfInsn C.struct_bpf_insn +- +-type BpfHdr C.struct_bpf_hdr +- +-type BpfTimeval C.struct_bpf_timeval +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Winsize C.struct_winsize +- +-// fchmodat-like syscalls. +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +-) +- +-// poll +- +-type PollFd C.struct_pollfd +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +- +-// Signal Sets +- +-type Sigset_t C.sigset_t +- +-// Uname +- +-type Utsname C.struct_utsname +- +-// Uvmexp +- +-const SizeofUvmexp = C.sizeof_struct_uvmexp +- +-type Uvmexp C.struct_uvmexp +- +-// Clockinfo +- +-const SizeofClockinfo = C.sizeof_struct_clockinfo +- +-type Clockinfo C.struct_clockinfo +diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go +deleted file mode 100644 +index 2b716f9348..0000000000 +--- a/vendor/golang.org/x/sys/unix/types_solaris.go ++++ /dev/null +@@ -1,266 +0,0 @@ +-// Copyright 2009 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-/* +-Input to cgo -godefs. See README.md +-*/ +- +-// +godefs map struct_in_addr [4]byte /* in_addr */ +-// +godefs map struct_in6_addr [16]byte /* in6_addr */ +- +-package unix +- +-/* +-#define KERNEL +-// These defines ensure that builds done on newer versions of Solaris are +-// backwards-compatible with older versions of Solaris and +-// OpenSolaris-based derivatives. +-#define __USE_SUNOS_SOCKETS__ // msghdr +-#define __USE_LEGACY_PROTOTYPES__ // iovec +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-enum { +- sizeofPtr = sizeof(void*), +-}; +- +-union sockaddr_all { +- struct sockaddr s1; // this one gets used for fields +- struct sockaddr_in s2; // these pad it out +- struct sockaddr_in6 s3; +- struct sockaddr_un s4; +- struct sockaddr_dl s5; +-}; +- +-struct sockaddr_any { +- struct sockaddr addr; +- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +-}; +- +-*/ +-import "C" +- +-// Machine characteristics +- +-const ( +- SizeofPtr = C.sizeofPtr +- SizeofShort = C.sizeof_short +- SizeofInt = C.sizeof_int +- SizeofLong = C.sizeof_long +- SizeofLongLong = C.sizeof_longlong +- PathMax = C.PATH_MAX +- MaxHostNameLen = C.MAXHOSTNAMELEN +-) +- +-// Basic types +- +-type ( +- _C_short C.short +- _C_int C.int +- _C_long C.long +- _C_long_long C.longlong +-) +- +-// Time +- +-type Timespec C.struct_timespec +- +-type Timeval C.struct_timeval +- +-type Timeval32 C.struct_timeval32 +- +-type Tms C.struct_tms +- +-type Utimbuf C.struct_utimbuf +- +-// Processes +- +-type Rusage C.struct_rusage +- +-type Rlimit C.struct_rlimit +- +-type _Gid_t C.gid_t +- +-// Files +- +-type Stat_t C.struct_stat +- +-type Flock_t C.struct_flock +- +-type Dirent C.struct_dirent +- +-// Filesystems +- +-type _Fsblkcnt_t C.fsblkcnt_t +- +-type Statvfs_t C.struct_statvfs +- +-// Sockets +- +-type RawSockaddrInet4 C.struct_sockaddr_in +- +-type RawSockaddrInet6 C.struct_sockaddr_in6 +- +-type RawSockaddrUnix C.struct_sockaddr_un +- +-type RawSockaddrDatalink C.struct_sockaddr_dl +- +-type RawSockaddr C.struct_sockaddr +- +-type RawSockaddrAny C.struct_sockaddr_any +- +-type _Socklen C.socklen_t +- +-type Linger C.struct_linger +- +-type Iovec C.struct_iovec +- +-type IPMreq C.struct_ip_mreq +- +-type IPv6Mreq C.struct_ipv6_mreq +- +-type Msghdr C.struct_msghdr +- +-type Cmsghdr C.struct_cmsghdr +- +-type Inet6Pktinfo C.struct_in6_pktinfo +- +-type IPv6MTUInfo C.struct_ip6_mtuinfo +- +-type ICMPv6Filter C.struct_icmp6_filter +- +-const ( +- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in +- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any +- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un +- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl +- SizeofLinger = C.sizeof_struct_linger +- SizeofIPMreq = C.sizeof_struct_ip_mreq +- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq +- SizeofMsghdr = C.sizeof_struct_msghdr +- SizeofCmsghdr = C.sizeof_struct_cmsghdr +- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo +- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo +- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +-) +- +-// Select +- +-type FdSet C.fd_set +- +-// Misc +- +-type Utsname C.struct_utsname +- +-type Ustat_t C.struct_ustat +- +-const ( +- AT_FDCWD = C.AT_FDCWD +- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW +- AT_REMOVEDIR = C.AT_REMOVEDIR +- AT_EACCESS = C.AT_EACCESS +-) +- +-// Routing and interface messages +- +-const ( +- SizeofIfMsghdr = C.sizeof_struct_if_msghdr +- SizeofIfData = C.sizeof_struct_if_data +- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr +- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr +- SizeofRtMetrics = C.sizeof_struct_rt_metrics +-) +- +-type IfMsghdr C.struct_if_msghdr +- +-type IfData C.struct_if_data +- +-type IfaMsghdr C.struct_ifa_msghdr +- +-type RtMsghdr C.struct_rt_msghdr +- +-type RtMetrics C.struct_rt_metrics +- +-// Berkeley packet filter +- +-const ( +- SizeofBpfVersion = C.sizeof_struct_bpf_version +- SizeofBpfStat = C.sizeof_struct_bpf_stat +- SizeofBpfProgram = C.sizeof_struct_bpf_program +- SizeofBpfInsn = C.sizeof_struct_bpf_insn +- SizeofBpfHdr = C.sizeof_struct_bpf_hdr +-) +- +-type BpfVersion C.struct_bpf_version +- +-type BpfStat C.struct_bpf_stat +- +-type BpfProgram C.struct_bpf_program +- +-type BpfInsn C.struct_bpf_insn +- +-type BpfTimeval C.struct_bpf_timeval +- +-type BpfHdr C.struct_bpf_hdr +- +-// Terminal handling +- +-type Termios C.struct_termios +- +-type Termio C.struct_termio +- +-type Winsize C.struct_winsize +- +-// poll +- +-type PollFd C.struct_pollfd +- +-const ( +- POLLERR = C.POLLERR +- POLLHUP = C.POLLHUP +- POLLIN = C.POLLIN +- POLLNVAL = C.POLLNVAL +- POLLOUT = C.POLLOUT +- POLLPRI = C.POLLPRI +- POLLRDBAND = C.POLLRDBAND +- POLLRDNORM = C.POLLRDNORM +- POLLWRBAND = C.POLLWRBAND +- POLLWRNORM = C.POLLWRNORM +-) +diff --git a/vendor/golang.org/x/text/encoding/charmap/maketables.go b/vendor/golang.org/x/text/encoding/charmap/maketables.go +deleted file mode 100644 +index f7941701e8..0000000000 +--- a/vendor/golang.org/x/text/encoding/charmap/maketables.go ++++ /dev/null +@@ -1,556 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "bufio" +- "fmt" +- "log" +- "net/http" +- "sort" +- "strings" +- "unicode/utf8" +- +- "golang.org/x/text/encoding" +- "golang.org/x/text/internal/gen" +-) +- +-const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + +- "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + +- ` !"#$%&'()*+,-./0123456789:;<=>?` + +- `@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` + +- "`abcdefghijklmnopqrstuvwxyz{|}~\u007f" +- +-var encodings = []struct { +- name string +- mib string +- comment string +- varName string +- replacement byte +- mapping string +-}{ +- { +- "IBM Code Page 037", +- "IBM037", +- "", +- "CodePage037", +- 0x3f, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM037-2.1.2.ucm", +- }, +- { +- "IBM Code Page 437", +- "PC8CodePage437", +- "", +- "CodePage437", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm", +- }, +- { +- "IBM Code Page 850", +- "PC850Multilingual", +- "", +- "CodePage850", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm", +- }, +- { +- "IBM Code Page 852", +- "PCp852", +- "", +- "CodePage852", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm", +- }, +- { +- "IBM Code Page 855", +- "IBM855", +- "", +- "CodePage855", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm", +- }, +- { +- "Windows Code Page 858", // PC latin1 with Euro +- "IBM00858", +- "", +- "CodePage858", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm", +- }, +- { +- "IBM Code Page 860", +- "IBM860", +- "", +- "CodePage860", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM860-2.1.2.ucm", +- }, +- { +- "IBM Code Page 862", +- "PC862LatinHebrew", +- "", +- "CodePage862", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm", +- }, +- { +- "IBM Code Page 863", +- "IBM863", +- "", +- "CodePage863", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM863-2.1.2.ucm", +- }, +- { +- "IBM Code Page 865", +- "IBM865", +- "", +- "CodePage865", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM865-2.1.2.ucm", +- }, +- { +- "IBM Code Page 866", +- "IBM866", +- "", +- "CodePage866", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-ibm866.txt", +- }, +- { +- "IBM Code Page 1047", +- "IBM1047", +- "", +- "CodePage1047", +- 0x3f, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM1047-2.1.2.ucm", +- }, +- { +- "IBM Code Page 1140", +- "IBM01140", +- "", +- "CodePage1140", +- 0x3f, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/ibm-1140_P100-1997.ucm", +- }, +- { +- "ISO 8859-1", +- "ISOLatin1", +- "", +- "ISO8859_1", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm", +- }, +- { +- "ISO 8859-2", +- "ISOLatin2", +- "", +- "ISO8859_2", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-2.txt", +- }, +- { +- "ISO 8859-3", +- "ISOLatin3", +- "", +- "ISO8859_3", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-3.txt", +- }, +- { +- "ISO 8859-4", +- "ISOLatin4", +- "", +- "ISO8859_4", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-4.txt", +- }, +- { +- "ISO 8859-5", +- "ISOLatinCyrillic", +- "", +- "ISO8859_5", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-5.txt", +- }, +- { +- "ISO 8859-6", +- "ISOLatinArabic", +- "", +- "ISO8859_6,ISO8859_6E,ISO8859_6I", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-6.txt", +- }, +- { +- "ISO 8859-7", +- "ISOLatinGreek", +- "", +- "ISO8859_7", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-7.txt", +- }, +- { +- "ISO 8859-8", +- "ISOLatinHebrew", +- "", +- "ISO8859_8,ISO8859_8E,ISO8859_8I", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-8.txt", +- }, +- { +- "ISO 8859-9", +- "ISOLatin5", +- "", +- "ISO8859_9", +- encoding.ASCIISub, +- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_9-1999.ucm", +- }, +- { +- "ISO 8859-10", +- "ISOLatin6", +- "", +- "ISO8859_10", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-10.txt", +- }, +- { +- "ISO 8859-13", +- "ISO885913", +- "", +- "ISO8859_13", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-13.txt", +- }, +- { +- "ISO 8859-14", +- "ISO885914", +- "", +- "ISO8859_14", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-14.txt", +- }, +- { +- "ISO 8859-15", +- "ISO885915", +- "", +- "ISO8859_15", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-15.txt", +- }, +- { +- "ISO 8859-16", +- "ISO885916", +- "", +- "ISO8859_16", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-iso-8859-16.txt", +- }, +- { +- "KOI8-R", +- "KOI8R", +- "", +- "KOI8R", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-koi8-r.txt", +- }, +- { +- "KOI8-U", +- "KOI8U", +- "", +- "KOI8U", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-koi8-u.txt", +- }, +- { +- "Macintosh", +- "Macintosh", +- "", +- "Macintosh", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-macintosh.txt", +- }, +- { +- "Macintosh Cyrillic", +- "MacintoshCyrillic", +- "", +- "MacintoshCyrillic", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt", +- }, +- { +- "Windows 874", +- "Windows874", +- "", +- "Windows874", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-874.txt", +- }, +- { +- "Windows 1250", +- "Windows1250", +- "", +- "Windows1250", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1250.txt", +- }, +- { +- "Windows 1251", +- "Windows1251", +- "", +- "Windows1251", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1251.txt", +- }, +- { +- "Windows 1252", +- "Windows1252", +- "", +- "Windows1252", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1252.txt", +- }, +- { +- "Windows 1253", +- "Windows1253", +- "", +- "Windows1253", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1253.txt", +- }, +- { +- "Windows 1254", +- "Windows1254", +- "", +- "Windows1254", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1254.txt", +- }, +- { +- "Windows 1255", +- "Windows1255", +- "", +- "Windows1255", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1255.txt", +- }, +- { +- "Windows 1256", +- "Windows1256", +- "", +- "Windows1256", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1256.txt", +- }, +- { +- "Windows 1257", +- "Windows1257", +- "", +- "Windows1257", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1257.txt", +- }, +- { +- "Windows 1258", +- "Windows1258", +- "", +- "Windows1258", +- encoding.ASCIISub, +- "http://encoding.spec.whatwg.org/index-windows-1258.txt", +- }, +- { +- "X-User-Defined", +- "XUserDefined", +- "It is defined at http://encoding.spec.whatwg.org/#x-user-defined", +- "XUserDefined", +- encoding.ASCIISub, +- ascii + +- "\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" + +- "\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" + +- "\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" + +- "\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" + +- "\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" + +- "\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" + +- "\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" + +- "\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" + +- "\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" + +- "\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" + +- "\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" + +- "\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" + +- "\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" + +- "\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" + +- "\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" + +- "\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff", +- }, +-} +- +-func getWHATWG(url string) string { +- res, err := http.Get(url) +- if err != nil { +- log.Fatalf("%q: Get: %v", url, err) +- } +- defer res.Body.Close() +- +- mapping := make([]rune, 128) +- for i := range mapping { +- mapping[i] = '\ufffd' +- } +- +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- x, y := 0, 0 +- if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil { +- log.Fatalf("could not parse %q", s) +- } +- if x < 0 || 128 <= x { +- log.Fatalf("code %d is out of range", x) +- } +- if 0x80 <= y && y < 0xa0 { +- // We diverge from the WHATWG spec by mapping control characters +- // in the range [0x80, 0xa0) to U+FFFD. +- continue +- } +- mapping[x] = rune(y) +- } +- return ascii + string(mapping) +-} +- +-func getUCM(url string) string { +- res, err := http.Get(url) +- if err != nil { +- log.Fatalf("%q: Get: %v", url, err) +- } +- defer res.Body.Close() +- +- mapping := make([]rune, 256) +- for i := range mapping { +- mapping[i] = '\ufffd' +- } +- +- charsFound := 0 +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- var c byte +- var r rune +- if _, err := fmt.Sscanf(s, ` \x%x |0`, &r, &c); err != nil { +- continue +- } +- mapping[c] = r +- charsFound++ +- } +- +- if charsFound < 200 { +- log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound) +- } +- +- return string(mapping) +-} +- +-func main() { +- mibs := map[string]bool{} +- all := []string{} +- +- w := gen.NewCodeWriter() +- defer w.WriteGoFile("tables.go", "charmap") +- +- printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) } +- +- printf("import (\n") +- printf("\t\"golang.org/x/text/encoding\"\n") +- printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n") +- printf(")\n\n") +- for _, e := range encodings { +- varNames := strings.Split(e.varName, ",") +- all = append(all, varNames...) +- varName := varNames[0] +- switch { +- case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"): +- e.mapping = getWHATWG(e.mapping) +- case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"): +- e.mapping = getUCM(e.mapping) +- } +- +- asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00 +- if asciiSuperset { +- low = 0x80 +- } +- lvn := 1 +- if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") { +- lvn = 3 +- } +- lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:] +- printf("// %s is the %s encoding.\n", varName, e.name) +- if e.comment != "" { +- printf("//\n// %s\n", e.comment) +- } +- printf("var %s *Charmap = &%s\n\nvar %s = Charmap{\nname: %q,\n", +- varName, lowerVarName, lowerVarName, e.name) +- if mibs[e.mib] { +- log.Fatalf("MIB type %q declared multiple times.", e.mib) +- } +- printf("mib: identifier.%s,\n", e.mib) +- printf("asciiSuperset: %t,\n", asciiSuperset) +- printf("low: 0x%02x,\n", low) +- printf("replacement: 0x%02x,\n", e.replacement) +- +- printf("decode: [256]utf8Enc{\n") +- i, backMapping := 0, map[rune]byte{} +- for _, c := range e.mapping { +- if _, ok := backMapping[c]; !ok && c != utf8.RuneError { +- backMapping[c] = byte(i) +- } +- var buf [8]byte +- n := utf8.EncodeRune(buf[:], c) +- if n > 3 { +- panic(fmt.Sprintf("rune %q (%U) is too long", c, c)) +- } +- printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2]) +- if i%2 == 1 { +- printf("\n") +- } +- i++ +- } +- printf("},\n") +- +- printf("encode: [256]uint32{\n") +- encode := make([]uint32, 0, 256) +- for c, i := range backMapping { +- encode = append(encode, uint32(i)<<24|uint32(c)) +- } +- sort.Sort(byRune(encode)) +- for len(encode) < cap(encode) { +- encode = append(encode, encode[len(encode)-1]) +- } +- for i, enc := range encode { +- printf("0x%08x,", enc) +- if i%8 == 7 { +- printf("\n") +- } +- } +- printf("},\n}\n") +- +- // Add an estimate of the size of a single Charmap{} struct value, which +- // includes two 256 elem arrays of 4 bytes and some extra fields, which +- // align to 3 uint64s on 64-bit architectures. +- w.Size += 2*4*256 + 3*8 +- } +- // TODO: add proper line breaking. +- printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n")) +-} +- +-type byRune []uint32 +- +-func (b byRune) Len() int { return len(b) } +-func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff } +-func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +diff --git a/vendor/golang.org/x/text/encoding/htmlindex/gen.go b/vendor/golang.org/x/text/encoding/htmlindex/gen.go +deleted file mode 100644 +index ac6b4a77fd..0000000000 +--- a/vendor/golang.org/x/text/encoding/htmlindex/gen.go ++++ /dev/null +@@ -1,173 +0,0 @@ +-// Copyright 2015 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "bytes" +- "encoding/json" +- "fmt" +- "log" +- "strings" +- +- "golang.org/x/text/internal/gen" +-) +- +-type group struct { +- Encodings []struct { +- Labels []string +- Name string +- } +-} +- +-func main() { +- gen.Init() +- +- r := gen.Open("https://encoding.spec.whatwg.org", "whatwg", "encodings.json") +- var groups []group +- if err := json.NewDecoder(r).Decode(&groups); err != nil { +- log.Fatalf("Error reading encodings.json: %v", err) +- } +- +- w := &bytes.Buffer{} +- fmt.Fprintln(w, "type htmlEncoding byte") +- fmt.Fprintln(w, "const (") +- for i, g := range groups { +- for _, e := range g.Encodings { +- key := strings.ToLower(e.Name) +- name := consts[key] +- if name == "" { +- log.Fatalf("No const defined for %s.", key) +- } +- if i == 0 { +- fmt.Fprintf(w, "%s htmlEncoding = iota\n", name) +- } else { +- fmt.Fprintf(w, "%s\n", name) +- } +- } +- } +- fmt.Fprintln(w, "numEncodings") +- fmt.Fprint(w, ")\n\n") +- +- fmt.Fprintln(w, "var canonical = [numEncodings]string{") +- for _, g := range groups { +- for _, e := range g.Encodings { +- fmt.Fprintf(w, "%q,\n", strings.ToLower(e.Name)) +- } +- } +- fmt.Fprint(w, "}\n\n") +- +- fmt.Fprintln(w, "var nameMap = map[string]htmlEncoding{") +- for _, g := range groups { +- for _, e := range g.Encodings { +- for _, l := range e.Labels { +- key := strings.ToLower(e.Name) +- name := consts[key] +- fmt.Fprintf(w, "%q: %s,\n", l, name) +- } +- } +- } +- fmt.Fprint(w, "}\n\n") +- +- var tags []string +- fmt.Fprintln(w, "var localeMap = []htmlEncoding{") +- for _, loc := range locales { +- tags = append(tags, loc.tag) +- fmt.Fprintf(w, "%s, // %s \n", consts[loc.name], loc.tag) +- } +- fmt.Fprint(w, "}\n\n") +- +- fmt.Fprintf(w, "const locales = %q\n", strings.Join(tags, " ")) +- +- gen.WriteGoFile("tables.go", "htmlindex", w.Bytes()) +-} +- +-// consts maps canonical encoding name to internal constant. +-var consts = map[string]string{ +- "utf-8": "utf8", +- "ibm866": "ibm866", +- "iso-8859-2": "iso8859_2", +- "iso-8859-3": "iso8859_3", +- "iso-8859-4": "iso8859_4", +- "iso-8859-5": "iso8859_5", +- "iso-8859-6": "iso8859_6", +- "iso-8859-7": "iso8859_7", +- "iso-8859-8": "iso8859_8", +- "iso-8859-8-i": "iso8859_8I", +- "iso-8859-10": "iso8859_10", +- "iso-8859-13": "iso8859_13", +- "iso-8859-14": "iso8859_14", +- "iso-8859-15": "iso8859_15", +- "iso-8859-16": "iso8859_16", +- "koi8-r": "koi8r", +- "koi8-u": "koi8u", +- "macintosh": "macintosh", +- "windows-874": "windows874", +- "windows-1250": "windows1250", +- "windows-1251": "windows1251", +- "windows-1252": "windows1252", +- "windows-1253": "windows1253", +- "windows-1254": "windows1254", +- "windows-1255": "windows1255", +- "windows-1256": "windows1256", +- "windows-1257": "windows1257", +- "windows-1258": "windows1258", +- "x-mac-cyrillic": "macintoshCyrillic", +- "gbk": "gbk", +- "gb18030": "gb18030", +- // "hz-gb-2312": "hzgb2312", // Was removed from WhatWG +- "big5": "big5", +- "euc-jp": "eucjp", +- "iso-2022-jp": "iso2022jp", +- "shift_jis": "shiftJIS", +- "euc-kr": "euckr", +- "replacement": "replacement", +- "utf-16be": "utf16be", +- "utf-16le": "utf16le", +- "x-user-defined": "xUserDefined", +-} +- +-// locales is taken from +-// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm. +-var locales = []struct{ tag, name string }{ +- // The default value. Explicitly state latin to benefit from the exact +- // script option, while still making 1252 the default encoding for languages +- // written in Latin script. +- {"und_Latn", "windows-1252"}, +- {"ar", "windows-1256"}, +- {"ba", "windows-1251"}, +- {"be", "windows-1251"}, +- {"bg", "windows-1251"}, +- {"cs", "windows-1250"}, +- {"el", "iso-8859-7"}, +- {"et", "windows-1257"}, +- {"fa", "windows-1256"}, +- {"he", "windows-1255"}, +- {"hr", "windows-1250"}, +- {"hu", "iso-8859-2"}, +- {"ja", "shift_jis"}, +- {"kk", "windows-1251"}, +- {"ko", "euc-kr"}, +- {"ku", "windows-1254"}, +- {"ky", "windows-1251"}, +- {"lt", "windows-1257"}, +- {"lv", "windows-1257"}, +- {"mk", "windows-1251"}, +- {"pl", "iso-8859-2"}, +- {"ru", "windows-1251"}, +- {"sah", "windows-1251"}, +- {"sk", "windows-1250"}, +- {"sl", "iso-8859-2"}, +- {"sr", "windows-1251"}, +- {"tg", "windows-1251"}, +- {"th", "windows-874"}, +- {"tr", "windows-1254"}, +- {"tt", "windows-1251"}, +- {"uk", "windows-1251"}, +- {"vi", "windows-1258"}, +- {"zh-hans", "gb18030"}, +- {"zh-hant", "big5"}, +-} +diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go +deleted file mode 100644 +index 26cfef9c6b..0000000000 +--- a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go ++++ /dev/null +@@ -1,142 +0,0 @@ +-// Copyright 2015 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "bytes" +- "encoding/xml" +- "fmt" +- "io" +- "log" +- "strings" +- +- "golang.org/x/text/internal/gen" +-) +- +-type registry struct { +- XMLName xml.Name `xml:"registry"` +- Updated string `xml:"updated"` +- Registry []struct { +- ID string `xml:"id,attr"` +- Record []struct { +- Name string `xml:"name"` +- Xref []struct { +- Type string `xml:"type,attr"` +- Data string `xml:"data,attr"` +- } `xml:"xref"` +- Desc struct { +- Data string `xml:",innerxml"` +- // Any []struct { +- // Data string `xml:",chardata"` +- // } `xml:",any"` +- // Data string `xml:",chardata"` +- } `xml:"description,"` +- MIB string `xml:"value"` +- Alias []string `xml:"alias"` +- MIME string `xml:"preferred_alias"` +- } `xml:"record"` +- } `xml:"registry"` +-} +- +-func main() { +- r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml") +- reg := ®istry{} +- if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF { +- log.Fatalf("Error decoding charset registry: %v", err) +- } +- if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" { +- log.Fatalf("Unexpected ID %s", reg.Registry[0].ID) +- } +- +- w := &bytes.Buffer{} +- fmt.Fprintf(w, "const (\n") +- for _, rec := range reg.Registry[0].Record { +- constName := "" +- for _, a := range rec.Alias { +- if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 { +- // Some of the constant definitions have comments in them. Strip those. +- constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0]) +- } +- } +- if constName == "" { +- switch rec.MIB { +- case "2085": +- constName = "HZGB2312" // Not listed as alias for some reason. +- default: +- log.Fatalf("No cs alias defined for %s.", rec.MIB) +- } +- } +- if rec.MIME != "" { +- rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME) +- } +- fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME) +- if len(rec.Desc.Data) > 0 { +- fmt.Fprint(w, "// ") +- d := xml.NewDecoder(strings.NewReader(rec.Desc.Data)) +- inElem := true +- attr := "" +- for { +- t, err := d.Token() +- if err != nil { +- if err != io.EOF { +- log.Fatal(err) +- } +- break +- } +- switch x := t.(type) { +- case xml.CharData: +- attr = "" // Don't need attribute info. +- a := bytes.Split([]byte(x), []byte("\n")) +- for i, b := range a { +- if b = bytes.TrimSpace(b); len(b) != 0 { +- if !inElem && i > 0 { +- fmt.Fprint(w, "\n// ") +- } +- inElem = false +- fmt.Fprintf(w, "%s ", string(b)) +- } +- } +- case xml.StartElement: +- if x.Name.Local == "xref" { +- inElem = true +- use := false +- for _, a := range x.Attr { +- if a.Name.Local == "type" { +- use = use || a.Value != "person" +- } +- if a.Name.Local == "data" && use { +- // Patch up URLs to use https. From some links, the +- // https version is different from the http one. +- s := a.Value +- s = strings.Replace(s, "http://", "https://", -1) +- s = strings.Replace(s, "/unicode/", "/", -1) +- attr = s + " " +- } +- } +- } +- case xml.EndElement: +- inElem = false +- fmt.Fprint(w, attr) +- } +- } +- fmt.Fprint(w, "\n") +- } +- for _, x := range rec.Xref { +- switch x.Type { +- case "rfc": +- fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data)) +- case "uri": +- fmt.Fprintf(w, "// Reference: %s\n", x.Data) +- } +- } +- fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB) +- fmt.Fprintln(w) +- } +- fmt.Fprintln(w, ")") +- +- gen.WriteGoFile("mib.go", "identifier", w.Bytes()) +-} +diff --git a/vendor/golang.org/x/text/encoding/japanese/maketables.go b/vendor/golang.org/x/text/encoding/japanese/maketables.go +deleted file mode 100644 +index 023957a672..0000000000 +--- a/vendor/golang.org/x/text/encoding/japanese/maketables.go ++++ /dev/null +@@ -1,161 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-// This program generates tables.go: +-// go run maketables.go | gofmt > tables.go +- +-// TODO: Emoji extensions? +-// https://www.unicode.org/faq/emoji_dingbats.html +-// https://www.unicode.org/Public/UNIDATA/EmojiSources.txt +- +-import ( +- "bufio" +- "fmt" +- "log" +- "net/http" +- "sort" +- "strings" +-) +- +-type entry struct { +- jisCode, table int +-} +- +-func main() { +- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") +- fmt.Printf("// Package japanese provides Japanese encodings such as EUC-JP and Shift JIS.\n") +- fmt.Printf(`package japanese // import "golang.org/x/text/encoding/japanese"` + "\n\n") +- +- reverse := [65536]entry{} +- for i := range reverse { +- reverse[i].table = -1 +- } +- +- tables := []struct { +- url string +- name string +- }{ +- {"http://encoding.spec.whatwg.org/index-jis0208.txt", "0208"}, +- {"http://encoding.spec.whatwg.org/index-jis0212.txt", "0212"}, +- } +- for i, table := range tables { +- res, err := http.Get(table.url) +- if err != nil { +- log.Fatalf("%q: Get: %v", table.url, err) +- } +- defer res.Body.Close() +- +- mapping := [65536]uint16{} +- +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- x, y := 0, uint16(0) +- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { +- log.Fatalf("%q: could not parse %q", table.url, s) +- } +- if x < 0 || 120*94 <= x { +- log.Fatalf("%q: JIS code %d is out of range", table.url, x) +- } +- mapping[x] = y +- if reverse[y].table == -1 { +- reverse[y] = entry{jisCode: x, table: i} +- } +- } +- if err := scanner.Err(); err != nil { +- log.Fatalf("%q: scanner error: %v", table.url, err) +- } +- +- fmt.Printf("// jis%sDecode is the decoding table from JIS %s code to Unicode.\n// It is defined at %s\n", +- table.name, table.name, table.url) +- fmt.Printf("var jis%sDecode = [...]uint16{\n", table.name) +- for i, m := range mapping { +- if m != 0 { +- fmt.Printf("\t%d: 0x%04X,\n", i, m) +- } +- } +- fmt.Printf("}\n\n") +- } +- +- // Any run of at least separation continuous zero entries in the reverse map will +- // be a separate encode table. +- const separation = 1024 +- +- intervals := []interval(nil) +- low, high := -1, -1 +- for i, v := range reverse { +- if v.table == -1 { +- continue +- } +- if low < 0 { +- low = i +- } else if i-high >= separation { +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- low = i +- } +- high = i + 1 +- } +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- sort.Sort(byDecreasingLength(intervals)) +- +- fmt.Printf("const (\n") +- fmt.Printf("\tjis0208 = 1\n") +- fmt.Printf("\tjis0212 = 2\n") +- fmt.Printf("\tcodeMask = 0x7f\n") +- fmt.Printf("\tcodeShift = 7\n") +- fmt.Printf("\ttableShift = 14\n") +- fmt.Printf(")\n\n") +- +- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) +- fmt.Printf("// encodeX are the encoding tables from Unicode to JIS code,\n") +- fmt.Printf("// sorted by decreasing length.\n") +- for i, v := range intervals { +- fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) +- } +- fmt.Printf("//\n") +- fmt.Printf("// The high two bits of the value record whether the JIS code comes from the\n") +- fmt.Printf("// JIS0208 table (high bits == 1) or the JIS0212 table (high bits == 2).\n") +- fmt.Printf("// The low 14 bits are two 7-bit unsigned integers j1 and j2 that form the\n") +- fmt.Printf("// JIS code (94*j1 + j2) within that table.\n") +- fmt.Printf("\n") +- +- for i, v := range intervals { +- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) +- fmt.Printf("var encode%d = [...]uint16{\n", i) +- for j := v.low; j < v.high; j++ { +- x := reverse[j] +- if x.table == -1 { +- continue +- } +- fmt.Printf("\t%d - %d: jis%s<<14 | 0x%02X<<7 | 0x%02X,\n", +- j, v.low, tables[x.table].name, x.jisCode/94, x.jisCode%94) +- } +- fmt.Printf("}\n\n") +- } +-} +- +-// interval is a half-open interval [low, high). +-type interval struct { +- low, high int +-} +- +-func (i interval) len() int { return i.high - i.low } +- +-// byDecreasingLength sorts intervals by decreasing length. +-type byDecreasingLength []interval +- +-func (b byDecreasingLength) Len() int { return len(b) } +-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } +-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +diff --git a/vendor/golang.org/x/text/encoding/korean/maketables.go b/vendor/golang.org/x/text/encoding/korean/maketables.go +deleted file mode 100644 +index c84034fb67..0000000000 +--- a/vendor/golang.org/x/text/encoding/korean/maketables.go ++++ /dev/null +@@ -1,143 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-// This program generates tables.go: +-// go run maketables.go | gofmt > tables.go +- +-import ( +- "bufio" +- "fmt" +- "log" +- "net/http" +- "sort" +- "strings" +-) +- +-func main() { +- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") +- fmt.Printf("// Package korean provides Korean encodings such as EUC-KR.\n") +- fmt.Printf(`package korean // import "golang.org/x/text/encoding/korean"` + "\n\n") +- +- res, err := http.Get("http://encoding.spec.whatwg.org/index-euc-kr.txt") +- if err != nil { +- log.Fatalf("Get: %v", err) +- } +- defer res.Body.Close() +- +- mapping := [65536]uint16{} +- reverse := [65536]uint16{} +- +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- x, y := uint16(0), uint16(0) +- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { +- log.Fatalf("could not parse %q", s) +- } +- if x < 0 || 178*(0xc7-0x81)+(0xfe-0xc7)*94+(0xff-0xa1) <= x { +- log.Fatalf("EUC-KR code %d is out of range", x) +- } +- mapping[x] = y +- if reverse[y] == 0 { +- c0, c1 := uint16(0), uint16(0) +- if x < 178*(0xc7-0x81) { +- c0 = uint16(x/178) + 0x81 +- c1 = uint16(x % 178) +- switch { +- case c1 < 1*26: +- c1 += 0x41 +- case c1 < 2*26: +- c1 += 0x47 +- default: +- c1 += 0x4d +- } +- } else { +- x -= 178 * (0xc7 - 0x81) +- c0 = uint16(x/94) + 0xc7 +- c1 = uint16(x%94) + 0xa1 +- } +- reverse[y] = c0<<8 | c1 +- } +- } +- if err := scanner.Err(); err != nil { +- log.Fatalf("scanner error: %v", err) +- } +- +- fmt.Printf("// decode is the decoding table from EUC-KR code to Unicode.\n") +- fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt\n") +- fmt.Printf("var decode = [...]uint16{\n") +- for i, v := range mapping { +- if v != 0 { +- fmt.Printf("\t%d: 0x%04X,\n", i, v) +- } +- } +- fmt.Printf("}\n\n") +- +- // Any run of at least separation continuous zero entries in the reverse map will +- // be a separate encode table. +- const separation = 1024 +- +- intervals := []interval(nil) +- low, high := -1, -1 +- for i, v := range reverse { +- if v == 0 { +- continue +- } +- if low < 0 { +- low = i +- } else if i-high >= separation { +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- low = i +- } +- high = i + 1 +- } +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- sort.Sort(byDecreasingLength(intervals)) +- +- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) +- fmt.Printf("// encodeX are the encoding tables from Unicode to EUC-KR code,\n") +- fmt.Printf("// sorted by decreasing length.\n") +- for i, v := range intervals { +- fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) +- } +- fmt.Printf("\n") +- +- for i, v := range intervals { +- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) +- fmt.Printf("var encode%d = [...]uint16{\n", i) +- for j := v.low; j < v.high; j++ { +- x := reverse[j] +- if x == 0 { +- continue +- } +- fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) +- } +- fmt.Printf("}\n\n") +- } +-} +- +-// interval is a half-open interval [low, high). +-type interval struct { +- low, high int +-} +- +-func (i interval) len() int { return i.high - i.low } +- +-// byDecreasingLength sorts intervals by decreasing length. +-type byDecreasingLength []interval +- +-func (b byDecreasingLength) Len() int { return len(b) } +-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } +-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go +deleted file mode 100644 +index 55016c7862..0000000000 +--- a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go ++++ /dev/null +@@ -1,161 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-// This program generates tables.go: +-// go run maketables.go | gofmt > tables.go +- +-import ( +- "bufio" +- "fmt" +- "log" +- "net/http" +- "sort" +- "strings" +-) +- +-func main() { +- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") +- fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n") +- fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n") +- +- printGB18030() +- printGBK() +-} +- +-func printGB18030() { +- res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt") +- if err != nil { +- log.Fatalf("Get: %v", err) +- } +- defer res.Body.Close() +- +- fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n") +- fmt.Printf("var gb18030 = [...][2]uint16{\n") +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- x, y := uint32(0), uint32(0) +- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { +- log.Fatalf("could not parse %q", s) +- } +- if x < 0x10000 && y < 0x10000 { +- fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y) +- } +- } +- fmt.Printf("}\n\n") +-} +- +-func printGBK() { +- res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt") +- if err != nil { +- log.Fatalf("Get: %v", err) +- } +- defer res.Body.Close() +- +- mapping := [65536]uint16{} +- reverse := [65536]uint16{} +- +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- x, y := uint16(0), uint16(0) +- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { +- log.Fatalf("could not parse %q", s) +- } +- if x < 0 || 126*190 <= x { +- log.Fatalf("GBK code %d is out of range", x) +- } +- mapping[x] = y +- if reverse[y] == 0 { +- c0, c1 := x/190, x%190 +- if c1 >= 0x3f { +- c1++ +- } +- reverse[y] = (0x81+c0)<<8 | (0x40 + c1) +- } +- } +- if err := scanner.Err(); err != nil { +- log.Fatalf("scanner error: %v", err) +- } +- +- fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n") +- fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n") +- fmt.Printf("var decode = [...]uint16{\n") +- for i, v := range mapping { +- if v != 0 { +- fmt.Printf("\t%d: 0x%04X,\n", i, v) +- } +- } +- fmt.Printf("}\n\n") +- +- // Any run of at least separation continuous zero entries in the reverse map will +- // be a separate encode table. +- const separation = 1024 +- +- intervals := []interval(nil) +- low, high := -1, -1 +- for i, v := range reverse { +- if v == 0 { +- continue +- } +- if low < 0 { +- low = i +- } else if i-high >= separation { +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- low = i +- } +- high = i + 1 +- } +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- sort.Sort(byDecreasingLength(intervals)) +- +- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) +- fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n") +- fmt.Printf("// sorted by decreasing length.\n") +- for i, v := range intervals { +- fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) +- } +- fmt.Printf("\n") +- +- for i, v := range intervals { +- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) +- fmt.Printf("var encode%d = [...]uint16{\n", i) +- for j := v.low; j < v.high; j++ { +- x := reverse[j] +- if x == 0 { +- continue +- } +- fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) +- } +- fmt.Printf("}\n\n") +- } +-} +- +-// interval is a half-open interval [low, high). +-type interval struct { +- low, high int +-} +- +-func (i interval) len() int { return i.high - i.low } +- +-// byDecreasingLength sorts intervals by decreasing length. +-type byDecreasingLength []interval +- +-func (b byDecreasingLength) Len() int { return len(b) } +-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } +-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +diff --git a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go b/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go +deleted file mode 100644 +index cf7fdb31a5..0000000000 +--- a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go ++++ /dev/null +@@ -1,140 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-// This program generates tables.go: +-// go run maketables.go | gofmt > tables.go +- +-import ( +- "bufio" +- "fmt" +- "log" +- "net/http" +- "sort" +- "strings" +-) +- +-func main() { +- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") +- fmt.Printf("// Package traditionalchinese provides Traditional Chinese encodings such as Big5.\n") +- fmt.Printf(`package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese"` + "\n\n") +- +- res, err := http.Get("http://encoding.spec.whatwg.org/index-big5.txt") +- if err != nil { +- log.Fatalf("Get: %v", err) +- } +- defer res.Body.Close() +- +- mapping := [65536]uint32{} +- reverse := [65536 * 4]uint16{} +- +- scanner := bufio.NewScanner(res.Body) +- for scanner.Scan() { +- s := strings.TrimSpace(scanner.Text()) +- if s == "" || s[0] == '#' { +- continue +- } +- x, y := uint16(0), uint32(0) +- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { +- log.Fatalf("could not parse %q", s) +- } +- if x < 0 || 126*157 <= x { +- log.Fatalf("Big5 code %d is out of range", x) +- } +- mapping[x] = y +- +- // The WHATWG spec http://encoding.spec.whatwg.org/#indexes says that +- // "The index pointer for code point in index is the first pointer +- // corresponding to code point in index", which would normally mean +- // that the code below should be guarded by "if reverse[y] == 0", but +- // last instead of first seems to match the behavior of +- // "iconv -f UTF-8 -t BIG5". For example, U+8005 者 occurs twice in +- // http://encoding.spec.whatwg.org/index-big5.txt, as index 2148 +- // (encoded as "\x8e\xcd") and index 6543 (encoded as "\xaa\xcc") +- // and "echo 者 | iconv -f UTF-8 -t BIG5 | xxd" gives "\xaa\xcc". +- c0, c1 := x/157, x%157 +- if c1 < 0x3f { +- c1 += 0x40 +- } else { +- c1 += 0x62 +- } +- reverse[y] = (0x81+c0)<<8 | c1 +- } +- if err := scanner.Err(); err != nil { +- log.Fatalf("scanner error: %v", err) +- } +- +- fmt.Printf("// decode is the decoding table from Big5 code to Unicode.\n") +- fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-big5.txt\n") +- fmt.Printf("var decode = [...]uint32{\n") +- for i, v := range mapping { +- if v != 0 { +- fmt.Printf("\t%d: 0x%08X,\n", i, v) +- } +- } +- fmt.Printf("}\n\n") +- +- // Any run of at least separation continuous zero entries in the reverse map will +- // be a separate encode table. +- const separation = 1024 +- +- intervals := []interval(nil) +- low, high := -1, -1 +- for i, v := range reverse { +- if v == 0 { +- continue +- } +- if low < 0 { +- low = i +- } else if i-high >= separation { +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- low = i +- } +- high = i + 1 +- } +- if high >= 0 { +- intervals = append(intervals, interval{low, high}) +- } +- sort.Sort(byDecreasingLength(intervals)) +- +- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) +- fmt.Printf("// encodeX are the encoding tables from Unicode to Big5 code,\n") +- fmt.Printf("// sorted by decreasing length.\n") +- for i, v := range intervals { +- fmt.Printf("// encode%d: %5d entries for runes in [%6d, %6d).\n", i, v.len(), v.low, v.high) +- } +- fmt.Printf("\n") +- +- for i, v := range intervals { +- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) +- fmt.Printf("var encode%d = [...]uint16{\n", i) +- for j := v.low; j < v.high; j++ { +- x := reverse[j] +- if x == 0 { +- continue +- } +- fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) +- } +- fmt.Printf("}\n\n") +- } +-} +- +-// interval is a half-open interval [low, high). +-type interval struct { +- low, high int +-} +- +-func (i interval) len() int { return i.high - i.low } +- +-// byDecreasingLength sorts intervals by decreasing length. +-type byDecreasingLength []interval +- +-func (b byDecreasingLength) Len() int { return len(b) } +-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } +-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +diff --git a/vendor/golang.org/x/text/internal/language/compact/gen.go b/vendor/golang.org/x/text/internal/language/compact/gen.go +deleted file mode 100644 +index 0c36a052f6..0000000000 +--- a/vendor/golang.org/x/text/internal/language/compact/gen.go ++++ /dev/null +@@ -1,64 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// Language tag table generator. +-// Data read from the web. +- +-package main +- +-import ( +- "flag" +- "fmt" +- "log" +- +- "golang.org/x/text/internal/gen" +- "golang.org/x/text/unicode/cldr" +-) +- +-var ( +- test = flag.Bool("test", +- false, +- "test existing tables; can be used to compare web data with package data.") +- outputFile = flag.String("output", +- "tables.go", +- "output file for generated tables") +-) +- +-func main() { +- gen.Init() +- +- w := gen.NewCodeWriter() +- defer w.WriteGoFile("tables.go", "compact") +- +- fmt.Fprintln(w, `import "golang.org/x/text/internal/language"`) +- +- b := newBuilder(w) +- gen.WriteCLDRVersion(w) +- +- b.writeCompactIndex() +-} +- +-type builder struct { +- w *gen.CodeWriter +- data *cldr.CLDR +- supp *cldr.SupplementalData +-} +- +-func newBuilder(w *gen.CodeWriter) *builder { +- r := gen.OpenCLDRCoreZip() +- defer r.Close() +- d := &cldr.Decoder{} +- data, err := d.DecodeZip(r) +- if err != nil { +- log.Fatal(err) +- } +- b := builder{ +- w: w, +- data: data, +- supp: data.Supplemental(), +- } +- return &b +-} +diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_index.go b/vendor/golang.org/x/text/internal/language/compact/gen_index.go +deleted file mode 100644 +index 136cefaf08..0000000000 +--- a/vendor/golang.org/x/text/internal/language/compact/gen_index.go ++++ /dev/null +@@ -1,113 +0,0 @@ +-// Copyright 2015 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-// This file generates derivative tables based on the language package itself. +- +-import ( +- "fmt" +- "log" +- "sort" +- "strings" +- +- "golang.org/x/text/internal/language" +-) +- +-// Compact indices: +-// Note -va-X variants only apply to localization variants. +-// BCP variants only ever apply to language. +-// The only ambiguity between tags is with regions. +- +-func (b *builder) writeCompactIndex() { +- // Collect all language tags for which we have any data in CLDR. +- m := map[language.Tag]bool{} +- for _, lang := range b.data.Locales() { +- // We include all locales unconditionally to be consistent with en_US. +- // We want en_US, even though it has no data associated with it. +- +- // TODO: put any of the languages for which no data exists at the end +- // of the index. This allows all components based on ICU to use that +- // as the cutoff point. +- // if x := data.RawLDML(lang); false || +- // x.LocaleDisplayNames != nil || +- // x.Characters != nil || +- // x.Delimiters != nil || +- // x.Measurement != nil || +- // x.Dates != nil || +- // x.Numbers != nil || +- // x.Units != nil || +- // x.ListPatterns != nil || +- // x.Collations != nil || +- // x.Segmentations != nil || +- // x.Rbnf != nil || +- // x.Annotations != nil || +- // x.Metadata != nil { +- +- // TODO: support POSIX natively, albeit non-standard. +- tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1)) +- m[tag] = true +- // } +- } +- +- // TODO: plural rules are also defined for the deprecated tags: +- // iw mo sh tl +- // Consider removing these as compact tags. +- +- // Include locales for plural rules, which uses a different structure. +- for _, plurals := range b.supp.Plurals { +- for _, rules := range plurals.PluralRules { +- for _, lang := range strings.Split(rules.Locales, " ") { +- m[language.Make(lang)] = true +- } +- } +- } +- +- var coreTags []language.CompactCoreInfo +- var special []string +- +- for t := range m { +- if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" { +- log.Fatalf("Unexpected extension %v in %v", x, t) +- } +- if len(t.Variants()) == 0 && len(t.Extensions()) == 0 { +- cci, ok := language.GetCompactCore(t) +- if !ok { +- log.Fatalf("Locale for non-basic language %q", t) +- } +- coreTags = append(coreTags, cci) +- } else { +- special = append(special, t.String()) +- } +- } +- +- w := b.w +- +- sort.Slice(coreTags, func(i, j int) bool { return coreTags[i] < coreTags[j] }) +- sort.Strings(special) +- +- w.WriteComment(` +- NumCompactTags is the number of common tags. The maximum tag is +- NumCompactTags-1.`) +- w.WriteConst("NumCompactTags", len(m)) +- +- fmt.Fprintln(w, "const (") +- for i, t := range coreTags { +- fmt.Fprintf(w, "%s ID = %d\n", ident(t.Tag().String()), i) +- } +- for i, t := range special { +- fmt.Fprintf(w, "%s ID = %d\n", ident(t), i+len(coreTags)) +- } +- fmt.Fprintln(w, ")") +- +- w.WriteVar("coreTags", coreTags) +- +- w.WriteConst("specialTagsStr", strings.Join(special, " ")) +-} +- +-func ident(s string) string { +- return strings.Replace(s, "-", "", -1) + "Index" +-} +diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go b/vendor/golang.org/x/text/internal/language/compact/gen_parents.go +deleted file mode 100644 +index 9543d58323..0000000000 +--- a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go ++++ /dev/null +@@ -1,54 +0,0 @@ +-// Copyright 2018 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "log" +- +- "golang.org/x/text/internal/gen" +- "golang.org/x/text/internal/language" +- "golang.org/x/text/internal/language/compact" +- "golang.org/x/text/unicode/cldr" +-) +- +-func main() { +- r := gen.OpenCLDRCoreZip() +- defer r.Close() +- +- d := &cldr.Decoder{} +- data, err := d.DecodeZip(r) +- if err != nil { +- log.Fatalf("DecodeZip: %v", err) +- } +- +- w := gen.NewCodeWriter() +- defer w.WriteGoFile("parents.go", "compact") +- +- // Create parents table. +- type ID uint16 +- parents := make([]ID, compact.NumCompactTags) +- for _, loc := range data.Locales() { +- tag := language.MustParse(loc) +- index, ok := compact.FromTag(tag) +- if !ok { +- continue +- } +- parentIndex := compact.ID(0) // und +- for p := tag.Parent(); p != language.Und; p = p.Parent() { +- if x, ok := compact.FromTag(p); ok { +- parentIndex = x +- break +- } +- } +- parents[index] = ID(parentIndex) +- } +- +- w.WriteComment(` +- parents maps a compact index of a tag to the compact index of the parent of +- this tag.`) +- w.WriteVar("parents", parents) +-} +diff --git a/vendor/golang.org/x/text/internal/language/gen.go b/vendor/golang.org/x/text/internal/language/gen.go +deleted file mode 100644 +index cdcc7febcb..0000000000 +--- a/vendor/golang.org/x/text/internal/language/gen.go ++++ /dev/null +@@ -1,1520 +0,0 @@ +-// Copyright 2013 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// Language tag table generator. +-// Data read from the web. +- +-package main +- +-import ( +- "bufio" +- "flag" +- "fmt" +- "io" +- "io/ioutil" +- "log" +- "math" +- "reflect" +- "regexp" +- "sort" +- "strconv" +- "strings" +- +- "golang.org/x/text/internal/gen" +- "golang.org/x/text/internal/tag" +- "golang.org/x/text/unicode/cldr" +-) +- +-var ( +- test = flag.Bool("test", +- false, +- "test existing tables; can be used to compare web data with package data.") +- outputFile = flag.String("output", +- "tables.go", +- "output file for generated tables") +-) +- +-var comment = []string{ +- ` +-lang holds an alphabetically sorted list of ISO-639 language identifiers. +-All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. +-For 2-byte language identifiers, the two successive bytes have the following meaning: +- - if the first letter of the 2- and 3-letter ISO codes are the same: +- the second and third letter of the 3-letter ISO code. +- - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. +-For 3-byte language identifiers the 4th byte is 0.`, +- ` +-langNoIndex is a bit vector of all 3-letter language codes that are not used as an index +-in lookup tables. The language ids for these language codes are derived directly +-from the letters and are not consecutive.`, +- ` +-altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives +-to 2-letter language codes that cannot be derived using the method described above. +-Each 3-letter code is followed by its 1-byte langID.`, +- ` +-altLangIndex is used to convert indexes in altLangISO3 to langIDs.`, +- ` +-AliasMap maps langIDs to their suggested replacements.`, +- ` +-script is an alphabetically sorted list of ISO 15924 codes. The index +-of the script in the string, divided by 4, is the internal scriptID.`, +- ` +-isoRegionOffset needs to be added to the index of regionISO to obtain the regionID +-for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for +-the UN.M49 codes used for groups.)`, +- ` +-regionISO holds a list of alphabetically sorted 2-letter ISO region codes. +-Each 2-letter codes is followed by two bytes with the following meaning: +- - [A-Z}{2}: the first letter of the 2-letter code plus these two +- letters form the 3-letter ISO code. +- - 0, n: index into altRegionISO3.`, +- ` +-regionTypes defines the status of a region for various standards.`, +- ` +-m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are +-codes indicating collections of regions.`, +- ` +-m49Index gives indexes into fromM49 based on the three most significant bits +-of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in +- fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] +-for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. +-The region code is stored in the 9 lsb of the indexed value.`, +- ` +-fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`, +- ` +-altRegionISO3 holds a list of 3-letter region codes that cannot be +-mapped to 2-letter codes using the default algorithm. This is a short list.`, +- ` +-altRegionIDs holds a list of regionIDs the positions of which match those +-of the 3-letter ISO codes in altRegionISO3.`, +- ` +-variantNumSpecialized is the number of specialized variants in variants.`, +- ` +-suppressScript is an index from langID to the dominant script for that language, +-if it exists. If a script is given, it should be suppressed from the language tag.`, +- ` +-likelyLang is a lookup table, indexed by langID, for the most likely +-scripts and regions given incomplete information. If more entries exist for a +-given language, region and script are the index and size respectively +-of the list in likelyLangList.`, +- ` +-likelyLangList holds lists info associated with likelyLang.`, +- ` +-likelyRegion is a lookup table, indexed by regionID, for the most likely +-languages and scripts given incomplete information. If more entries exist +-for a given regionID, lang and script are the index and size respectively +-of the list in likelyRegionList. +-TODO: exclude containers and user-definable regions from the list.`, +- ` +-likelyRegionList holds lists info associated with likelyRegion.`, +- ` +-likelyScript is a lookup table, indexed by scriptID, for the most likely +-languages and regions given a script.`, +- ` +-nRegionGroups is the number of region groups.`, +- ` +-regionInclusion maps region identifiers to sets of regions in regionInclusionBits, +-where each set holds all groupings that are directly connected in a region +-containment graph.`, +- ` +-regionInclusionBits is an array of bit vectors where every vector represents +-a set of region groupings. These sets are used to compute the distance +-between two regions for the purpose of language matching.`, +- ` +-regionInclusionNext marks, for each entry in regionInclusionBits, the set of +-all groups that are reachable from the groups set in the respective entry.`, +-} +- +-// TODO: consider changing some of these structures to tries. This can reduce +-// memory, but may increase the need for memory allocations. This could be +-// mitigated if we can piggyback on language tags for common cases. +- +-func failOnError(e error) { +- if e != nil { +- log.Panic(e) +- } +-} +- +-type setType int +- +-const ( +- Indexed setType = 1 + iota // all elements must be of same size +- Linear +-) +- +-type stringSet struct { +- s []string +- sorted, frozen bool +- +- // We often need to update values after the creation of an index is completed. +- // We include a convenience map for keeping track of this. +- update map[string]string +- typ setType // used for checking. +-} +- +-func (ss *stringSet) clone() stringSet { +- c := *ss +- c.s = append([]string(nil), c.s...) +- return c +-} +- +-func (ss *stringSet) setType(t setType) { +- if ss.typ != t && ss.typ != 0 { +- log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ) +- } +-} +- +-// parse parses a whitespace-separated string and initializes ss with its +-// components. +-func (ss *stringSet) parse(s string) { +- scan := bufio.NewScanner(strings.NewReader(s)) +- scan.Split(bufio.ScanWords) +- for scan.Scan() { +- ss.add(scan.Text()) +- } +-} +- +-func (ss *stringSet) assertChangeable() { +- if ss.frozen { +- log.Panic("attempt to modify a frozen stringSet") +- } +-} +- +-func (ss *stringSet) add(s string) { +- ss.assertChangeable() +- ss.s = append(ss.s, s) +- ss.sorted = ss.frozen +-} +- +-func (ss *stringSet) freeze() { +- ss.compact() +- ss.frozen = true +-} +- +-func (ss *stringSet) compact() { +- if ss.sorted { +- return +- } +- a := ss.s +- sort.Strings(a) +- k := 0 +- for i := 1; i < len(a); i++ { +- if a[k] != a[i] { +- a[k+1] = a[i] +- k++ +- } +- } +- ss.s = a[:k+1] +- ss.sorted = ss.frozen +-} +- +-type funcSorter struct { +- fn func(a, b string) bool +- sort.StringSlice +-} +- +-func (s funcSorter) Less(i, j int) bool { +- return s.fn(s.StringSlice[i], s.StringSlice[j]) +-} +- +-func (ss *stringSet) sortFunc(f func(a, b string) bool) { +- ss.compact() +- sort.Sort(funcSorter{f, sort.StringSlice(ss.s)}) +-} +- +-func (ss *stringSet) remove(s string) { +- ss.assertChangeable() +- if i, ok := ss.find(s); ok { +- copy(ss.s[i:], ss.s[i+1:]) +- ss.s = ss.s[:len(ss.s)-1] +- } +-} +- +-func (ss *stringSet) replace(ol, nu string) { +- ss.s[ss.index(ol)] = nu +- ss.sorted = ss.frozen +-} +- +-func (ss *stringSet) index(s string) int { +- ss.setType(Indexed) +- i, ok := ss.find(s) +- if !ok { +- if i < len(ss.s) { +- log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i]) +- } +- log.Panicf("find: item %q is not in list", s) +- +- } +- return i +-} +- +-func (ss *stringSet) find(s string) (int, bool) { +- ss.compact() +- i := sort.SearchStrings(ss.s, s) +- return i, i != len(ss.s) && ss.s[i] == s +-} +- +-func (ss *stringSet) slice() []string { +- ss.compact() +- return ss.s +-} +- +-func (ss *stringSet) updateLater(v, key string) { +- if ss.update == nil { +- ss.update = map[string]string{} +- } +- ss.update[v] = key +-} +- +-// join joins the string and ensures that all entries are of the same length. +-func (ss *stringSet) join() string { +- ss.setType(Indexed) +- n := len(ss.s[0]) +- for _, s := range ss.s { +- if len(s) != n { +- log.Panicf("join: not all entries are of the same length: %q", s) +- } +- } +- ss.s = append(ss.s, strings.Repeat("\xff", n)) +- return strings.Join(ss.s, "") +-} +- +-// ianaEntry holds information for an entry in the IANA Language Subtag Repository. +-// All types use the same entry. +-// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various +-// fields. +-type ianaEntry struct { +- typ string +- description []string +- scope string +- added string +- preferred string +- deprecated string +- suppressScript string +- macro string +- prefix []string +-} +- +-type builder struct { +- w *gen.CodeWriter +- hw io.Writer // MultiWriter for w and w.Hash +- data *cldr.CLDR +- supp *cldr.SupplementalData +- +- // indices +- locale stringSet // common locales +- lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data +- langNoIndex stringSet // 3-letter ISO codes with no associated data +- script stringSet // 4-letter ISO codes +- region stringSet // 2-letter ISO or 3-digit UN M49 codes +- variant stringSet // 4-8-alphanumeric variant code. +- +- // Region codes that are groups with their corresponding group IDs. +- groups map[int]index +- +- // langInfo +- registry map[string]*ianaEntry +-} +- +-type index uint +- +-func newBuilder(w *gen.CodeWriter) *builder { +- r := gen.OpenCLDRCoreZip() +- defer r.Close() +- d := &cldr.Decoder{} +- data, err := d.DecodeZip(r) +- failOnError(err) +- b := builder{ +- w: w, +- hw: io.MultiWriter(w, w.Hash), +- data: data, +- supp: data.Supplemental(), +- } +- b.parseRegistry() +- return &b +-} +- +-func (b *builder) parseRegistry() { +- r := gen.OpenIANAFile("assignments/language-subtag-registry") +- defer r.Close() +- b.registry = make(map[string]*ianaEntry) +- +- scan := bufio.NewScanner(r) +- scan.Split(bufio.ScanWords) +- var record *ianaEntry +- for more := scan.Scan(); more; { +- key := scan.Text() +- more = scan.Scan() +- value := scan.Text() +- switch key { +- case "Type:": +- record = &ianaEntry{typ: value} +- case "Subtag:", "Tag:": +- if s := strings.SplitN(value, "..", 2); len(s) > 1 { +- for a := s[0]; a <= s[1]; a = inc(a) { +- b.addToRegistry(a, record) +- } +- } else { +- b.addToRegistry(value, record) +- } +- case "Suppress-Script:": +- record.suppressScript = value +- case "Added:": +- record.added = value +- case "Deprecated:": +- record.deprecated = value +- case "Macrolanguage:": +- record.macro = value +- case "Preferred-Value:": +- record.preferred = value +- case "Prefix:": +- record.prefix = append(record.prefix, value) +- case "Scope:": +- record.scope = value +- case "Description:": +- buf := []byte(value) +- for more = scan.Scan(); more; more = scan.Scan() { +- b := scan.Bytes() +- if b[0] == '%' || b[len(b)-1] == ':' { +- break +- } +- buf = append(buf, ' ') +- buf = append(buf, b...) +- } +- record.description = append(record.description, string(buf)) +- continue +- default: +- continue +- } +- more = scan.Scan() +- } +- if scan.Err() != nil { +- log.Panic(scan.Err()) +- } +-} +- +-func (b *builder) addToRegistry(key string, entry *ianaEntry) { +- if info, ok := b.registry[key]; ok { +- if info.typ != "language" || entry.typ != "extlang" { +- log.Fatalf("parseRegistry: tag %q already exists", key) +- } +- } else { +- b.registry[key] = entry +- } +-} +- +-var commentIndex = make(map[string]string) +- +-func init() { +- for _, s := range comment { +- key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0]) +- commentIndex[key] = s +- } +-} +- +-func (b *builder) comment(name string) { +- if s := commentIndex[name]; len(s) > 0 { +- b.w.WriteComment(s) +- } else { +- fmt.Fprintln(b.w) +- } +-} +- +-func (b *builder) pf(f string, x ...interface{}) { +- fmt.Fprintf(b.hw, f, x...) +- fmt.Fprint(b.hw, "\n") +-} +- +-func (b *builder) p(x ...interface{}) { +- fmt.Fprintln(b.hw, x...) +-} +- +-func (b *builder) addSize(s int) { +- b.w.Size += s +- b.pf("// Size: %d bytes", s) +-} +- +-func (b *builder) writeConst(name string, x interface{}) { +- b.comment(name) +- b.w.WriteConst(name, x) +-} +- +-// writeConsts computes f(v) for all v in values and writes the results +-// as constants named _v to a single constant block. +-func (b *builder) writeConsts(f func(string) int, values ...string) { +- b.pf("const (") +- for _, v := range values { +- b.pf("\t_%s = %v", v, f(v)) +- } +- b.pf(")") +-} +- +-// writeType writes the type of the given value, which must be a struct. +-func (b *builder) writeType(value interface{}) { +- b.comment(reflect.TypeOf(value).Name()) +- b.w.WriteType(value) +-} +- +-func (b *builder) writeSlice(name string, ss interface{}) { +- b.writeSliceAddSize(name, 0, ss) +-} +- +-func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) { +- b.comment(name) +- b.w.Size += extraSize +- v := reflect.ValueOf(ss) +- t := v.Type().Elem() +- b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len()) +- +- fmt.Fprintf(b.w, "var %s = ", name) +- b.w.WriteArray(ss) +- b.p() +-} +- +-type FromTo struct { +- From, To uint16 +-} +- +-func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) { +- ss.sortFunc(func(a, b string) bool { +- return index(a) < index(b) +- }) +- m := []FromTo{} +- for _, s := range ss.s { +- m = append(m, FromTo{index(s), index(ss.update[s])}) +- } +- b.writeSlice(name, m) +-} +- +-const base = 'z' - 'a' + 1 +- +-func strToInt(s string) uint { +- v := uint(0) +- for i := 0; i < len(s); i++ { +- v *= base +- v += uint(s[i] - 'a') +- } +- return v +-} +- +-// converts the given integer to the original ASCII string passed to strToInt. +-// len(s) must match the number of characters obtained. +-func intToStr(v uint, s []byte) { +- for i := len(s) - 1; i >= 0; i-- { +- s[i] = byte(v%base) + 'a' +- v /= base +- } +-} +- +-func (b *builder) writeBitVector(name string, ss []string) { +- vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8))) +- for _, s := range ss { +- v := strToInt(s) +- vec[v/8] |= 1 << (v % 8) +- } +- b.writeSlice(name, vec) +-} +- +-// TODO: convert this type into a list or two-stage trie. +-func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) { +- b.comment(name) +- v := reflect.ValueOf(m) +- sz := v.Len() * (2 + int(v.Type().Key().Size())) +- for _, k := range m { +- sz += len(k) +- } +- b.addSize(sz) +- keys := []string{} +- b.pf(`var %s = map[string]uint16{`, name) +- for k := range m { +- keys = append(keys, k) +- } +- sort.Strings(keys) +- for _, k := range keys { +- b.pf("\t%q: %v,", k, f(m[k])) +- } +- b.p("}") +-} +- +-func (b *builder) writeMap(name string, m interface{}) { +- b.comment(name) +- v := reflect.ValueOf(m) +- sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size())) +- b.addSize(sz) +- f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool { +- return strings.IndexRune("{}, ", r) != -1 +- }) +- sort.Strings(f[1:]) +- b.pf(`var %s = %s{`, name, f[0]) +- for _, kv := range f[1:] { +- b.pf("\t%s,", kv) +- } +- b.p("}") +-} +- +-func (b *builder) langIndex(s string) uint16 { +- if s == "und" { +- return 0 +- } +- if i, ok := b.lang.find(s); ok { +- return uint16(i) +- } +- return uint16(strToInt(s)) + uint16(len(b.lang.s)) +-} +- +-// inc advances the string to its lexicographical successor. +-func inc(s string) string { +- const maxTagLength = 4 +- var buf [maxTagLength]byte +- intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)]) +- for i := 0; i < len(s); i++ { +- if s[i] <= 'Z' { +- buf[i] -= 'a' - 'A' +- } +- } +- return string(buf[:len(s)]) +-} +- +-func (b *builder) parseIndices() { +- meta := b.supp.Metadata +- +- for k, v := range b.registry { +- var ss *stringSet +- switch v.typ { +- case "language": +- if len(k) == 2 || v.suppressScript != "" || v.scope == "special" { +- b.lang.add(k) +- continue +- } else { +- ss = &b.langNoIndex +- } +- case "region": +- ss = &b.region +- case "script": +- ss = &b.script +- case "variant": +- ss = &b.variant +- default: +- continue +- } +- ss.add(k) +- } +- // Include any language for which there is data. +- for _, lang := range b.data.Locales() { +- if x := b.data.RawLDML(lang); false || +- x.LocaleDisplayNames != nil || +- x.Characters != nil || +- x.Delimiters != nil || +- x.Measurement != nil || +- x.Dates != nil || +- x.Numbers != nil || +- x.Units != nil || +- x.ListPatterns != nil || +- x.Collations != nil || +- x.Segmentations != nil || +- x.Rbnf != nil || +- x.Annotations != nil || +- x.Metadata != nil { +- +- from := strings.Split(lang, "_") +- if lang := from[0]; lang != "root" { +- b.lang.add(lang) +- } +- } +- } +- // Include locales for plural rules, which uses a different structure. +- for _, plurals := range b.data.Supplemental().Plurals { +- for _, rules := range plurals.PluralRules { +- for _, lang := range strings.Split(rules.Locales, " ") { +- if lang = strings.Split(lang, "_")[0]; lang != "root" { +- b.lang.add(lang) +- } +- } +- } +- } +- // Include languages in likely subtags. +- for _, m := range b.supp.LikelySubtags.LikelySubtag { +- from := strings.Split(m.From, "_") +- b.lang.add(from[0]) +- } +- // Include ISO-639 alpha-3 bibliographic entries. +- for _, a := range meta.Alias.LanguageAlias { +- if a.Reason == "bibliographic" { +- b.langNoIndex.add(a.Type) +- } +- } +- // Include regions in territoryAlias (not all are in the IANA registry!) +- for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { +- if len(reg.Type) == 2 { +- b.region.add(reg.Type) +- } +- } +- +- for _, s := range b.lang.s { +- if len(s) == 3 { +- b.langNoIndex.remove(s) +- } +- } +- b.writeConst("NumLanguages", len(b.lang.slice())+len(b.langNoIndex.slice())) +- b.writeConst("NumScripts", len(b.script.slice())) +- b.writeConst("NumRegions", len(b.region.slice())) +- +- // Add dummy codes at the start of each list to represent "unspecified". +- b.lang.add("---") +- b.script.add("----") +- b.region.add("---") +- +- // common locales +- b.locale.parse(meta.DefaultContent.Locales) +-} +- +-// TODO: region inclusion data will probably not be use used in future matchers. +- +-func (b *builder) computeRegionGroups() { +- b.groups = make(map[int]index) +- +- // Create group indices. +- for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID. +- b.groups[i] = index(len(b.groups)) +- } +- for _, g := range b.supp.TerritoryContainment.Group { +- // Skip UN and EURO zone as they are flattening the containment +- // relationship. +- if g.Type == "EZ" || g.Type == "UN" { +- continue +- } +- group := b.region.index(g.Type) +- if _, ok := b.groups[group]; !ok { +- b.groups[group] = index(len(b.groups)) +- } +- } +- if len(b.groups) > 64 { +- log.Fatalf("only 64 groups supported, found %d", len(b.groups)) +- } +- b.writeConst("nRegionGroups", len(b.groups)) +-} +- +-var langConsts = []string{ +- "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es", +- "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is", +- "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml", +- "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt", +- "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th", +- "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu", +- +- // constants for grandfathered tags (if not already defined) +- "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu", +- "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn", +-} +- +-// writeLanguage generates all tables needed for language canonicalization. +-func (b *builder) writeLanguage() { +- meta := b.supp.Metadata +- +- b.writeConst("nonCanonicalUnd", b.lang.index("und")) +- b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...) +- b.writeConst("langPrivateStart", b.langIndex("qaa")) +- b.writeConst("langPrivateEnd", b.langIndex("qtz")) +- +- // Get language codes that need to be mapped (overlong 3-letter codes, +- // deprecated 2-letter codes, legacy and grandfathered tags.) +- langAliasMap := stringSet{} +- aliasTypeMap := map[string]AliasType{} +- +- // altLangISO3 get the alternative ISO3 names that need to be mapped. +- altLangISO3 := stringSet{} +- // Add dummy start to avoid the use of index 0. +- altLangISO3.add("---") +- altLangISO3.updateLater("---", "aa") +- +- lang := b.lang.clone() +- for _, a := range meta.Alias.LanguageAlias { +- if a.Replacement == "" { +- a.Replacement = "und" +- } +- // TODO: support mapping to tags +- repl := strings.SplitN(a.Replacement, "_", 2)[0] +- if a.Reason == "overlong" { +- if len(a.Replacement) == 2 && len(a.Type) == 3 { +- lang.updateLater(a.Replacement, a.Type) +- } +- } else if len(a.Type) <= 3 { +- switch a.Reason { +- case "macrolanguage": +- aliasTypeMap[a.Type] = Macro +- case "deprecated": +- // handled elsewhere +- continue +- case "bibliographic", "legacy": +- if a.Type == "no" { +- continue +- } +- aliasTypeMap[a.Type] = Legacy +- default: +- log.Fatalf("new %s alias: %s", a.Reason, a.Type) +- } +- langAliasMap.add(a.Type) +- langAliasMap.updateLater(a.Type, repl) +- } +- } +- // Manually add the mapping of "nb" (Norwegian) to its macro language. +- // This can be removed if CLDR adopts this change. +- langAliasMap.add("nb") +- langAliasMap.updateLater("nb", "no") +- aliasTypeMap["nb"] = Macro +- +- for k, v := range b.registry { +- // Also add deprecated values for 3-letter ISO codes, which CLDR omits. +- if v.typ == "language" && v.deprecated != "" && v.preferred != "" { +- langAliasMap.add(k) +- langAliasMap.updateLater(k, v.preferred) +- aliasTypeMap[k] = Deprecated +- } +- } +- // Fix CLDR mappings. +- lang.updateLater("tl", "tgl") +- lang.updateLater("sh", "hbs") +- lang.updateLater("mo", "mol") +- lang.updateLater("no", "nor") +- lang.updateLater("tw", "twi") +- lang.updateLater("nb", "nob") +- lang.updateLater("ak", "aka") +- lang.updateLater("bh", "bih") +- +- // Ensure that each 2-letter code is matched with a 3-letter code. +- for _, v := range lang.s[1:] { +- s, ok := lang.update[v] +- if !ok { +- if s, ok = lang.update[langAliasMap.update[v]]; !ok { +- continue +- } +- lang.update[v] = s +- } +- if v[0] != s[0] { +- altLangISO3.add(s) +- altLangISO3.updateLater(s, v) +- } +- } +- +- // Complete canonicalized language tags. +- lang.freeze() +- for i, v := range lang.s { +- // We can avoid these manual entries by using the IANA registry directly. +- // Seems easier to update the list manually, as changes are rare. +- // The panic in this loop will trigger if we miss an entry. +- add := "" +- if s, ok := lang.update[v]; ok { +- if s[0] == v[0] { +- add = s[1:] +- } else { +- add = string([]byte{0, byte(altLangISO3.index(s))}) +- } +- } else if len(v) == 3 { +- add = "\x00" +- } else { +- log.Panicf("no data for long form of %q", v) +- } +- lang.s[i] += add +- } +- b.writeConst("lang", tag.Index(lang.join())) +- +- b.writeConst("langNoIndexOffset", len(b.lang.s)) +- +- // space of all valid 3-letter language identifiers. +- b.writeBitVector("langNoIndex", b.langNoIndex.slice()) +- +- altLangIndex := []uint16{} +- for i, s := range altLangISO3.slice() { +- altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))}) +- if i > 0 { +- idx := b.lang.index(altLangISO3.update[s]) +- altLangIndex = append(altLangIndex, uint16(idx)) +- } +- } +- b.writeConst("altLangISO3", tag.Index(altLangISO3.join())) +- b.writeSlice("altLangIndex", altLangIndex) +- +- b.writeSortedMap("AliasMap", &langAliasMap, b.langIndex) +- types := make([]AliasType, len(langAliasMap.s)) +- for i, s := range langAliasMap.s { +- types[i] = aliasTypeMap[s] +- } +- b.writeSlice("AliasTypes", types) +-} +- +-var scriptConsts = []string{ +- "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy", +- "Zzzz", +-} +- +-func (b *builder) writeScript() { +- b.writeConsts(b.script.index, scriptConsts...) +- b.writeConst("script", tag.Index(b.script.join())) +- +- supp := make([]uint8, len(b.lang.slice())) +- for i, v := range b.lang.slice()[1:] { +- if sc := b.registry[v].suppressScript; sc != "" { +- supp[i+1] = uint8(b.script.index(sc)) +- } +- } +- b.writeSlice("suppressScript", supp) +- +- // There is only one deprecated script in CLDR. This value is hard-coded. +- // We check here if the code must be updated. +- for _, a := range b.supp.Metadata.Alias.ScriptAlias { +- if a.Type != "Qaai" { +- log.Panicf("unexpected deprecated stript %q", a.Type) +- } +- } +-} +- +-func parseM49(s string) int16 { +- if len(s) == 0 { +- return 0 +- } +- v, err := strconv.ParseUint(s, 10, 10) +- failOnError(err) +- return int16(v) +-} +- +-var regionConsts = []string{ +- "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US", +- "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo. +-} +- +-func (b *builder) writeRegion() { +- b.writeConsts(b.region.index, regionConsts...) +- +- isoOffset := b.region.index("AA") +- m49map := make([]int16, len(b.region.slice())) +- fromM49map := make(map[int16]int) +- altRegionISO3 := "" +- altRegionIDs := []uint16{} +- +- b.writeConst("isoRegionOffset", isoOffset) +- +- // 2-letter region lookup and mapping to numeric codes. +- regionISO := b.region.clone() +- regionISO.s = regionISO.s[isoOffset:] +- regionISO.sorted = false +- +- regionTypes := make([]byte, len(b.region.s)) +- +- // Is the region valid BCP 47? +- for s, e := range b.registry { +- if len(s) == 2 && s == strings.ToUpper(s) { +- i := b.region.index(s) +- for _, d := range e.description { +- if strings.Contains(d, "Private use") { +- regionTypes[i] = iso3166UserAssigned +- } +- } +- regionTypes[i] |= bcp47Region +- } +- } +- +- // Is the region a valid ccTLD? +- r := gen.OpenIANAFile("domains/root/db") +- defer r.Close() +- +- buf, err := ioutil.ReadAll(r) +- failOnError(err) +- re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`) +- for _, m := range re.FindAllSubmatch(buf, -1) { +- i := b.region.index(strings.ToUpper(string(m[1]))) +- regionTypes[i] |= ccTLD +- } +- +- b.writeSlice("regionTypes", regionTypes) +- +- iso3Set := make(map[string]int) +- update := func(iso2, iso3 string) { +- i := regionISO.index(iso2) +- if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] { +- regionISO.s[i] += iso3[1:] +- iso3Set[iso3] = -1 +- } else { +- if ok && j >= 0 { +- regionISO.s[i] += string([]byte{0, byte(j)}) +- } else { +- iso3Set[iso3] = len(altRegionISO3) +- regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))}) +- altRegionISO3 += iso3 +- altRegionIDs = append(altRegionIDs, uint16(isoOffset+i)) +- } +- } +- } +- for _, tc := range b.supp.CodeMappings.TerritoryCodes { +- i := regionISO.index(tc.Type) + isoOffset +- if d := m49map[i]; d != 0 { +- log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d) +- } +- m49 := parseM49(tc.Numeric) +- m49map[i] = m49 +- if r := fromM49map[m49]; r == 0 { +- fromM49map[m49] = i +- } else if r != i { +- dep := b.registry[regionISO.s[r-isoOffset]].deprecated +- if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) { +- fromM49map[m49] = i +- } +- } +- } +- for _, ta := range b.supp.Metadata.Alias.TerritoryAlias { +- if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 { +- from := parseM49(ta.Type) +- if r := fromM49map[from]; r == 0 { +- fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset +- } +- } +- } +- for _, tc := range b.supp.CodeMappings.TerritoryCodes { +- if len(tc.Alpha3) == 3 { +- update(tc.Type, tc.Alpha3) +- } +- } +- // This entries are not included in territoryCodes. Mostly 3-letter variants +- // of deleted codes and an entry for QU. +- for _, m := range []struct{ iso2, iso3 string }{ +- {"CT", "CTE"}, +- {"DY", "DHY"}, +- {"HV", "HVO"}, +- {"JT", "JTN"}, +- {"MI", "MID"}, +- {"NH", "NHB"}, +- {"NQ", "ATN"}, +- {"PC", "PCI"}, +- {"PU", "PUS"}, +- {"PZ", "PCZ"}, +- {"RH", "RHO"}, +- {"VD", "VDR"}, +- {"WK", "WAK"}, +- // These three-letter codes are used for others as well. +- {"FQ", "ATF"}, +- } { +- update(m.iso2, m.iso3) +- } +- for i, s := range regionISO.s { +- if len(s) != 4 { +- regionISO.s[i] = s + " " +- } +- } +- b.writeConst("regionISO", tag.Index(regionISO.join())) +- b.writeConst("altRegionISO3", altRegionISO3) +- b.writeSlice("altRegionIDs", altRegionIDs) +- +- // Create list of deprecated regions. +- // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only +- // Transitionally-reserved mapping not included. +- regionOldMap := stringSet{} +- // Include regions in territoryAlias (not all are in the IANA registry!) +- for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { +- if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 { +- regionOldMap.add(reg.Type) +- regionOldMap.updateLater(reg.Type, reg.Replacement) +- i, _ := regionISO.find(reg.Type) +- j, _ := regionISO.find(reg.Replacement) +- if k := m49map[i+isoOffset]; k == 0 { +- m49map[i+isoOffset] = m49map[j+isoOffset] +- } +- } +- } +- b.writeSortedMap("regionOldMap", ®ionOldMap, func(s string) uint16 { +- return uint16(b.region.index(s)) +- }) +- // 3-digit region lookup, groupings. +- for i := 1; i < isoOffset; i++ { +- m := parseM49(b.region.s[i]) +- m49map[i] = m +- fromM49map[m] = i +- } +- b.writeSlice("m49", m49map) +- +- const ( +- searchBits = 7 +- regionBits = 9 +- ) +- if len(m49map) >= 1< %d", len(m49map), 1<>searchBits] = int16(len(fromM49)) +- } +- b.writeSlice("m49Index", m49Index) +- b.writeSlice("fromM49", fromM49) +-} +- +-const ( +- // TODO: put these lists in regionTypes as user data? Could be used for +- // various optimizations and refinements and could be exposed in the API. +- iso3166Except = "AC CP DG EA EU FX IC SU TA UK" +- iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions. +- // DY and RH are actually not deleted, but indeterminately reserved. +- iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD" +-) +- +-const ( +- iso3166UserAssigned = 1 << iota +- ccTLD +- bcp47Region +-) +- +-func find(list []string, s string) int { +- for i, t := range list { +- if t == s { +- return i +- } +- } +- return -1 +-} +- +-// writeVariants generates per-variant information and creates a map from variant +-// name to index value. We assign index values such that sorting multiple +-// variants by index value will result in the correct order. +-// There are two types of variants: specialized and general. Specialized variants +-// are only applicable to certain language or language-script pairs. Generalized +-// variants apply to any language. Generalized variants always sort after +-// specialized variants. We will therefore always assign a higher index value +-// to a generalized variant than any other variant. Generalized variants are +-// sorted alphabetically among themselves. +-// Specialized variants may also sort after other specialized variants. Such +-// variants will be ordered after any of the variants they may follow. +-// We assume that if a variant x is followed by a variant y, then for any prefix +-// p of x, p-x is a prefix of y. This allows us to order tags based on the +-// maximum of the length of any of its prefixes. +-// TODO: it is possible to define a set of Prefix values on variants such that +-// a total order cannot be defined to the point that this algorithm breaks. +-// In other words, we cannot guarantee the same order of variants for the +-// future using the same algorithm or for non-compliant combinations of +-// variants. For this reason, consider using simple alphabetic sorting +-// of variants and ignore Prefix restrictions altogether. +-func (b *builder) writeVariant() { +- generalized := stringSet{} +- specialized := stringSet{} +- specializedExtend := stringSet{} +- // Collate the variants by type and check assumptions. +- for _, v := range b.variant.slice() { +- e := b.registry[v] +- if len(e.prefix) == 0 { +- generalized.add(v) +- continue +- } +- c := strings.Split(e.prefix[0], "-") +- hasScriptOrRegion := false +- if len(c) > 1 { +- _, hasScriptOrRegion = b.script.find(c[1]) +- if !hasScriptOrRegion { +- _, hasScriptOrRegion = b.region.find(c[1]) +- +- } +- } +- if len(c) == 1 || len(c) == 2 && hasScriptOrRegion { +- // Variant is preceded by a language. +- specialized.add(v) +- continue +- } +- // Variant is preceded by another variant. +- specializedExtend.add(v) +- prefix := c[0] + "-" +- if hasScriptOrRegion { +- prefix += c[1] +- } +- for _, p := range e.prefix { +- // Verify that the prefix minus the last element is a prefix of the +- // predecessor element. +- i := strings.LastIndex(p, "-") +- pred := b.registry[p[i+1:]] +- if find(pred.prefix, p[:i]) < 0 { +- log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v) +- } +- // The sorting used below does not work in the general case. It works +- // if we assume that variants that may be followed by others only have +- // prefixes of the same length. Verify this. +- count := strings.Count(p[:i], "-") +- for _, q := range pred.prefix { +- if c := strings.Count(q, "-"); c != count { +- log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count) +- } +- } +- if !strings.HasPrefix(p, prefix) { +- log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix) +- } +- } +- } +- +- // Sort extended variants. +- a := specializedExtend.s +- less := func(v, w string) bool { +- // Sort by the maximum number of elements. +- maxCount := func(s string) (max int) { +- for _, p := range b.registry[s].prefix { +- if c := strings.Count(p, "-"); c > max { +- max = c +- } +- } +- return +- } +- if cv, cw := maxCount(v), maxCount(w); cv != cw { +- return cv < cw +- } +- // Sort by name as tie breaker. +- return v < w +- } +- sort.Sort(funcSorter{less, sort.StringSlice(a)}) +- specializedExtend.frozen = true +- +- // Create index from variant name to index. +- variantIndex := make(map[string]uint8) +- add := func(s []string) { +- for _, v := range s { +- variantIndex[v] = uint8(len(variantIndex)) +- } +- } +- add(specialized.slice()) +- add(specializedExtend.s) +- numSpecialized := len(variantIndex) +- add(generalized.slice()) +- if n := len(variantIndex); n > 255 { +- log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n) +- } +- b.writeMap("variantIndex", variantIndex) +- b.writeConst("variantNumSpecialized", numSpecialized) +-} +- +-func (b *builder) writeLanguageInfo() { +-} +- +-// writeLikelyData writes tables that are used both for finding parent relations and for +-// language matching. Each entry contains additional bits to indicate the status of the +-// data to know when it cannot be used for parent relations. +-func (b *builder) writeLikelyData() { +- const ( +- isList = 1 << iota +- scriptInFrom +- regionInFrom +- ) +- type ( // generated types +- likelyScriptRegion struct { +- region uint16 +- script uint8 +- flags uint8 +- } +- likelyLangScript struct { +- lang uint16 +- script uint8 +- flags uint8 +- } +- likelyLangRegion struct { +- lang uint16 +- region uint16 +- } +- // likelyTag is used for getting likely tags for group regions, where +- // the likely region might be a region contained in the group. +- likelyTag struct { +- lang uint16 +- region uint16 +- script uint8 +- } +- ) +- var ( // generated variables +- likelyRegionGroup = make([]likelyTag, len(b.groups)) +- likelyLang = make([]likelyScriptRegion, len(b.lang.s)) +- likelyRegion = make([]likelyLangScript, len(b.region.s)) +- likelyScript = make([]likelyLangRegion, len(b.script.s)) +- likelyLangList = []likelyScriptRegion{} +- likelyRegionList = []likelyLangScript{} +- ) +- type fromTo struct { +- from, to []string +- } +- langToOther := map[int][]fromTo{} +- regionToOther := map[int][]fromTo{} +- for _, m := range b.supp.LikelySubtags.LikelySubtag { +- from := strings.Split(m.From, "_") +- to := strings.Split(m.To, "_") +- if len(to) != 3 { +- log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to)) +- } +- if len(from) > 3 { +- log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from)) +- } +- if from[0] != to[0] && from[0] != "und" { +- log.Fatalf("unexpected language change in expansion: %s -> %s", from, to) +- } +- if len(from) == 3 { +- if from[2] != to[2] { +- log.Fatalf("unexpected region change in expansion: %s -> %s", from, to) +- } +- if from[0] != "und" { +- log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to) +- } +- } +- if len(from) == 1 || from[0] != "und" { +- id := 0 +- if from[0] != "und" { +- id = b.lang.index(from[0]) +- } +- langToOther[id] = append(langToOther[id], fromTo{from, to}) +- } else if len(from) == 2 && len(from[1]) == 4 { +- sid := b.script.index(from[1]) +- likelyScript[sid].lang = uint16(b.langIndex(to[0])) +- likelyScript[sid].region = uint16(b.region.index(to[2])) +- } else { +- r := b.region.index(from[len(from)-1]) +- if id, ok := b.groups[r]; ok { +- if from[0] != "und" { +- log.Fatalf("region changed unexpectedly: %s -> %s", from, to) +- } +- likelyRegionGroup[id].lang = uint16(b.langIndex(to[0])) +- likelyRegionGroup[id].script = uint8(b.script.index(to[1])) +- likelyRegionGroup[id].region = uint16(b.region.index(to[2])) +- } else { +- regionToOther[r] = append(regionToOther[r], fromTo{from, to}) +- } +- } +- } +- b.writeType(likelyLangRegion{}) +- b.writeSlice("likelyScript", likelyScript) +- +- for id := range b.lang.s { +- list := langToOther[id] +- if len(list) == 1 { +- likelyLang[id].region = uint16(b.region.index(list[0].to[2])) +- likelyLang[id].script = uint8(b.script.index(list[0].to[1])) +- } else if len(list) > 1 { +- likelyLang[id].flags = isList +- likelyLang[id].region = uint16(len(likelyLangList)) +- likelyLang[id].script = uint8(len(list)) +- for _, x := range list { +- flags := uint8(0) +- if len(x.from) > 1 { +- if x.from[1] == x.to[2] { +- flags = regionInFrom +- } else { +- flags = scriptInFrom +- } +- } +- likelyLangList = append(likelyLangList, likelyScriptRegion{ +- region: uint16(b.region.index(x.to[2])), +- script: uint8(b.script.index(x.to[1])), +- flags: flags, +- }) +- } +- } +- } +- // TODO: merge suppressScript data with this table. +- b.writeType(likelyScriptRegion{}) +- b.writeSlice("likelyLang", likelyLang) +- b.writeSlice("likelyLangList", likelyLangList) +- +- for id := range b.region.s { +- list := regionToOther[id] +- if len(list) == 1 { +- likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0])) +- likelyRegion[id].script = uint8(b.script.index(list[0].to[1])) +- if len(list[0].from) > 2 { +- likelyRegion[id].flags = scriptInFrom +- } +- } else if len(list) > 1 { +- likelyRegion[id].flags = isList +- likelyRegion[id].lang = uint16(len(likelyRegionList)) +- likelyRegion[id].script = uint8(len(list)) +- for i, x := range list { +- if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 { +- log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i) +- } +- x := likelyLangScript{ +- lang: uint16(b.langIndex(x.to[0])), +- script: uint8(b.script.index(x.to[1])), +- } +- if len(list[0].from) > 2 { +- x.flags = scriptInFrom +- } +- likelyRegionList = append(likelyRegionList, x) +- } +- } +- } +- b.writeType(likelyLangScript{}) +- b.writeSlice("likelyRegion", likelyRegion) +- b.writeSlice("likelyRegionList", likelyRegionList) +- +- b.writeType(likelyTag{}) +- b.writeSlice("likelyRegionGroup", likelyRegionGroup) +-} +- +-func (b *builder) writeRegionInclusionData() { +- var ( +- // mm holds for each group the set of groups with a distance of 1. +- mm = make(map[int][]index) +- +- // containment holds for each group the transitive closure of +- // containment of other groups. +- containment = make(map[index][]index) +- ) +- for _, g := range b.supp.TerritoryContainment.Group { +- // Skip UN and EURO zone as they are flattening the containment +- // relationship. +- if g.Type == "EZ" || g.Type == "UN" { +- continue +- } +- group := b.region.index(g.Type) +- groupIdx := b.groups[group] +- for _, mem := range strings.Split(g.Contains, " ") { +- r := b.region.index(mem) +- mm[r] = append(mm[r], groupIdx) +- if g, ok := b.groups[r]; ok { +- mm[group] = append(mm[group], g) +- containment[groupIdx] = append(containment[groupIdx], g) +- } +- } +- } +- +- regionContainment := make([]uint64, len(b.groups)) +- for _, g := range b.groups { +- l := containment[g] +- +- // Compute the transitive closure of containment. +- for i := 0; i < len(l); i++ { +- l = append(l, containment[l[i]]...) +- } +- +- // Compute the bitmask. +- regionContainment[g] = 1 << g +- for _, v := range l { +- regionContainment[g] |= 1 << v +- } +- } +- b.writeSlice("regionContainment", regionContainment) +- +- regionInclusion := make([]uint8, len(b.region.s)) +- bvs := make(map[uint64]index) +- // Make the first bitvector positions correspond with the groups. +- for r, i := range b.groups { +- bv := uint64(1 << i) +- for _, g := range mm[r] { +- bv |= 1 << g +- } +- bvs[bv] = i +- regionInclusion[r] = uint8(bvs[bv]) +- } +- for r := 1; r < len(b.region.s); r++ { +- if _, ok := b.groups[r]; !ok { +- bv := uint64(0) +- for _, g := range mm[r] { +- bv |= 1 << g +- } +- if bv == 0 { +- // Pick the world for unspecified regions. +- bv = 1 << b.groups[b.region.index("001")] +- } +- if _, ok := bvs[bv]; !ok { +- bvs[bv] = index(len(bvs)) +- } +- regionInclusion[r] = uint8(bvs[bv]) +- } +- } +- b.writeSlice("regionInclusion", regionInclusion) +- regionInclusionBits := make([]uint64, len(bvs)) +- for k, v := range bvs { +- regionInclusionBits[v] = uint64(k) +- } +- // Add bit vectors for increasingly large distances until a fixed point is reached. +- regionInclusionNext := []uint8{} +- for i := 0; i < len(regionInclusionBits); i++ { +- bits := regionInclusionBits[i] +- next := bits +- for i := uint(0); i < uint(len(b.groups)); i++ { +- if bits&(1< 6 { +- log.Fatalf("Too many groups: %d", i) +- } +- idToIndex[mv.Id] = uint8(i + 1) +- // TODO: also handle '-' +- for _, r := range strings.Split(mv.Value, "+") { +- todo := []string{r} +- for k := 0; k < len(todo); k++ { +- r := todo[k] +- regionToGroups[b.regionIndex(r)] |= 1 << uint8(i) +- todo = append(todo, regionHierarchy[r]...) +- } +- } +- } +- b.w.WriteVar("regionToGroups", regionToGroups) +- +- // maps language id to in- and out-of-group region. +- paradigmLocales := [][3]uint16{} +- locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ") +- for i := 0; i < len(locales); i += 2 { +- x := [3]uint16{} +- for j := 0; j < 2; j++ { +- pc := strings.SplitN(locales[i+j], "-", 2) +- x[0] = b.langIndex(pc[0]) +- if len(pc) == 2 { +- x[1+j] = uint16(b.regionIndex(pc[1])) +- } +- } +- paradigmLocales = append(paradigmLocales, x) +- } +- b.w.WriteVar("paradigmLocales", paradigmLocales) +- +- b.w.WriteType(mutualIntelligibility{}) +- b.w.WriteType(scriptIntelligibility{}) +- b.w.WriteType(regionIntelligibility{}) +- +- matchLang := []mutualIntelligibility{} +- matchScript := []scriptIntelligibility{} +- matchRegion := []regionIntelligibility{} +- // Convert the languageMatch entries in lists keyed by desired language. +- for _, m := range lm[0].LanguageMatch { +- // Different versions of CLDR use different separators. +- desired := strings.Replace(m.Desired, "-", "_", -1) +- supported := strings.Replace(m.Supported, "-", "_", -1) +- d := strings.Split(desired, "_") +- s := strings.Split(supported, "_") +- if len(d) != len(s) { +- log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) +- continue +- } +- distance, _ := strconv.ParseInt(m.Distance, 10, 8) +- switch len(d) { +- case 2: +- if desired == supported && desired == "*_*" { +- continue +- } +- // language-script pair. +- matchScript = append(matchScript, scriptIntelligibility{ +- wantLang: uint16(b.langIndex(d[0])), +- haveLang: uint16(b.langIndex(s[0])), +- wantScript: uint8(b.scriptIndex(d[1])), +- haveScript: uint8(b.scriptIndex(s[1])), +- distance: uint8(distance), +- }) +- if m.Oneway != "true" { +- matchScript = append(matchScript, scriptIntelligibility{ +- wantLang: uint16(b.langIndex(s[0])), +- haveLang: uint16(b.langIndex(d[0])), +- wantScript: uint8(b.scriptIndex(s[1])), +- haveScript: uint8(b.scriptIndex(d[1])), +- distance: uint8(distance), +- }) +- } +- case 1: +- if desired == supported && desired == "*" { +- continue +- } +- if distance == 1 { +- // nb == no is already handled by macro mapping. Check there +- // really is only this case. +- if d[0] != "no" || s[0] != "nb" { +- log.Fatalf("unhandled equivalence %s == %s", s[0], d[0]) +- } +- continue +- } +- // TODO: consider dropping oneway field and just doubling the entry. +- matchLang = append(matchLang, mutualIntelligibility{ +- want: uint16(b.langIndex(d[0])), +- have: uint16(b.langIndex(s[0])), +- distance: uint8(distance), +- oneway: m.Oneway == "true", +- }) +- case 3: +- if desired == supported && desired == "*_*_*" { +- continue +- } +- if desired != supported { +- // This is now supported by CLDR, but only one case, which +- // should already be covered by paradigm locales. For instance, +- // test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in +- // testdata/CLDRLocaleMatcherTest.txt tests this. +- if supported != "en_*_GB" { +- log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) +- } +- continue +- } +- ri := regionIntelligibility{ +- lang: b.langIndex(d[0]), +- distance: uint8(distance), +- } +- if d[1] != "*" { +- ri.script = uint8(b.scriptIndex(d[1])) +- } +- switch { +- case d[2] == "*": +- ri.group = 0x80 // not contained in anything +- case strings.HasPrefix(d[2], "$!"): +- ri.group = 0x80 +- d[2] = "$" + d[2][len("$!"):] +- fallthrough +- case strings.HasPrefix(d[2], "$"): +- ri.group |= idToIndex[d[2]] +- } +- matchRegion = append(matchRegion, ri) +- default: +- log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) +- } +- } +- sort.SliceStable(matchLang, func(i, j int) bool { +- return matchLang[i].distance < matchLang[j].distance +- }) +- b.w.WriteComment(` +- matchLang holds pairs of langIDs of base languages that are typically +- mutually intelligible. Each pair is associated with a confidence and +- whether the intelligibility goes one or both ways.`) +- b.w.WriteVar("matchLang", matchLang) +- +- b.w.WriteComment(` +- matchScript holds pairs of scriptIDs where readers of one script +- can typically also read the other. Each is associated with a confidence.`) +- sort.SliceStable(matchScript, func(i, j int) bool { +- return matchScript[i].distance < matchScript[j].distance +- }) +- b.w.WriteVar("matchScript", matchScript) +- +- sort.SliceStable(matchRegion, func(i, j int) bool { +- return matchRegion[i].distance < matchRegion[j].distance +- }) +- b.w.WriteVar("matchRegion", matchRegion) +-} +diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go +deleted file mode 100644 +index 987fc169cc..0000000000 +--- a/vendor/golang.org/x/text/unicode/bidi/gen.go ++++ /dev/null +@@ -1,133 +0,0 @@ +-// Copyright 2015 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "flag" +- "log" +- +- "golang.org/x/text/internal/gen" +- "golang.org/x/text/internal/triegen" +- "golang.org/x/text/internal/ucd" +-) +- +-var outputFile = flag.String("out", "tables.go", "output file") +- +-func main() { +- gen.Init() +- gen.Repackage("gen_trieval.go", "trieval.go", "bidi") +- gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") +- +- genTables() +-} +- +-// bidiClass names and codes taken from class "bc" in +-// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt +-var bidiClass = map[string]Class{ +- "AL": AL, // ArabicLetter +- "AN": AN, // ArabicNumber +- "B": B, // ParagraphSeparator +- "BN": BN, // BoundaryNeutral +- "CS": CS, // CommonSeparator +- "EN": EN, // EuropeanNumber +- "ES": ES, // EuropeanSeparator +- "ET": ET, // EuropeanTerminator +- "L": L, // LeftToRight +- "NSM": NSM, // NonspacingMark +- "ON": ON, // OtherNeutral +- "R": R, // RightToLeft +- "S": S, // SegmentSeparator +- "WS": WS, // WhiteSpace +- +- "FSI": Control, +- "PDF": Control, +- "PDI": Control, +- "LRE": Control, +- "LRI": Control, +- "LRO": Control, +- "RLE": Control, +- "RLI": Control, +- "RLO": Control, +-} +- +-func genTables() { +- if numClass > 0x0F { +- log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) +- } +- w := gen.NewCodeWriter() +- defer w.WriteVersionedGoFile(*outputFile, "bidi") +- +- gen.WriteUnicodeVersion(w) +- +- t := triegen.NewTrie("bidi") +- +- // Build data about bracket mapping. These bits need to be or-ed with +- // any other bits. +- orMask := map[rune]uint64{} +- +- xorMap := map[rune]int{} +- xorMasks := []rune{0} // First value is no-op. +- +- ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { +- r1 := p.Rune(0) +- r2 := p.Rune(1) +- xor := r1 ^ r2 +- if _, ok := xorMap[xor]; !ok { +- xorMap[xor] = len(xorMasks) +- xorMasks = append(xorMasks, xor) +- } +- entry := uint64(xorMap[xor]) << xorMaskShift +- switch p.String(2) { +- case "o": +- entry |= openMask +- case "c", "n": +- default: +- log.Fatalf("Unknown bracket class %q.", p.String(2)) +- } +- orMask[r1] = entry +- }) +- +- w.WriteComment(` +- xorMasks contains masks to be xor-ed with brackets to get the reverse +- version.`) +- w.WriteVar("xorMasks", xorMasks) +- +- done := map[rune]bool{} +- +- insert := func(r rune, c Class) { +- if !done[r] { +- t.Insert(r, orMask[r]|uint64(c)) +- done[r] = true +- } +- } +- +- // Insert the derived BiDi properties. +- ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { +- r := p.Rune(0) +- class, ok := bidiClass[p.String(1)] +- if !ok { +- log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) +- } +- insert(r, class) +- }) +- visitDefaults(insert) +- +- // TODO: use sparse blocks. This would reduce table size considerably +- // from the looks of it. +- +- sz, err := t.Gen(w) +- if err != nil { +- log.Fatal(err) +- } +- w.Size += sz +-} +- +-// dummy values to make methods in gen_common compile. The real versions +-// will be generated by this file to tables.go. +-var ( +- xorMasks []rune +-) +diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go +deleted file mode 100644 +index 02c3b505d6..0000000000 +--- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go ++++ /dev/null +@@ -1,57 +0,0 @@ +-// Copyright 2015 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-import ( +- "unicode" +- +- "golang.org/x/text/internal/gen" +- "golang.org/x/text/internal/ucd" +- "golang.org/x/text/unicode/rangetable" +-) +- +-// These tables are hand-extracted from: +-// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt +-func visitDefaults(fn func(r rune, c Class)) { +- // first write default values for ranges listed above. +- visitRunes(fn, AL, []rune{ +- 0x0600, 0x07BF, // Arabic +- 0x08A0, 0x08FF, // Arabic Extended-A +- 0xFB50, 0xFDCF, // Arabic Presentation Forms +- 0xFDF0, 0xFDFF, +- 0xFE70, 0xFEFF, +- 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols +- }) +- visitRunes(fn, R, []rune{ +- 0x0590, 0x05FF, // Hebrew +- 0x07C0, 0x089F, // Nko et al. +- 0xFB1D, 0xFB4F, +- 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. +- 0x0001E800, 0x0001EDFF, +- 0x0001EF00, 0x0001EFFF, +- }) +- visitRunes(fn, ET, []rune{ // European Terminator +- 0x20A0, 0x20Cf, // Currency symbols +- }) +- rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { +- fn(r, BN) // Boundary Neutral +- }) +- ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { +- if p.String(1) == "Default_Ignorable_Code_Point" { +- fn(p.Rune(0), BN) // Boundary Neutral +- } +- }) +-} +- +-func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { +- for i := 0; i < len(runes); i += 2 { +- lo, hi := runes[i], runes[i+1] +- for j := lo; j <= hi; j++ { +- fn(j, c) +- } +- } +-} +diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go +deleted file mode 100644 +index 9cb9942894..0000000000 +--- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go ++++ /dev/null +@@ -1,64 +0,0 @@ +-// Copyright 2015 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-package main +- +-// Class is the Unicode BiDi class. Each rune has a single class. +-type Class uint +- +-const ( +- L Class = iota // LeftToRight +- R // RightToLeft +- EN // EuropeanNumber +- ES // EuropeanSeparator +- ET // EuropeanTerminator +- AN // ArabicNumber +- CS // CommonSeparator +- B // ParagraphSeparator +- S // SegmentSeparator +- WS // WhiteSpace +- ON // OtherNeutral +- BN // BoundaryNeutral +- NSM // NonspacingMark +- AL // ArabicLetter +- Control // Control LRO - PDI +- +- numClass +- +- LRO // LeftToRightOverride +- RLO // RightToLeftOverride +- LRE // LeftToRightEmbedding +- RLE // RightToLeftEmbedding +- PDF // PopDirectionalFormat +- LRI // LeftToRightIsolate +- RLI // RightToLeftIsolate +- FSI // FirstStrongIsolate +- PDI // PopDirectionalIsolate +- +- unknownClass = ^Class(0) +-) +- +-var controlToClass = map[rune]Class{ +- 0x202D: LRO, // LeftToRightOverride, +- 0x202E: RLO, // RightToLeftOverride, +- 0x202A: LRE, // LeftToRightEmbedding, +- 0x202B: RLE, // RightToLeftEmbedding, +- 0x202C: PDF, // PopDirectionalFormat, +- 0x2066: LRI, // LeftToRightIsolate, +- 0x2067: RLI, // RightToLeftIsolate, +- 0x2068: FSI, // FirstStrongIsolate, +- 0x2069: PDI, // PopDirectionalIsolate, +-} +- +-// A trie entry has the following bits: +-// 7..5 XOR mask for brackets +-// 4 1: Bracket open, 0: Bracket close +-// 3..0 Class type +- +-const ( +- openMask = 0x10 +- xorMaskShift = 5 +-) +diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go +deleted file mode 100644 +index 30a3aa9334..0000000000 +--- a/vendor/golang.org/x/text/unicode/norm/maketables.go ++++ /dev/null +@@ -1,986 +0,0 @@ +-// Copyright 2011 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// Normalization table generator. +-// Data read from the web. +-// See forminfo.go for a description of the trie values associated with each rune. +- +-package main +- +-import ( +- "bytes" +- "encoding/binary" +- "flag" +- "fmt" +- "io" +- "log" +- "sort" +- "strconv" +- "strings" +- +- "golang.org/x/text/internal/gen" +- "golang.org/x/text/internal/triegen" +- "golang.org/x/text/internal/ucd" +-) +- +-func main() { +- gen.Init() +- loadUnicodeData() +- compactCCC() +- loadCompositionExclusions() +- completeCharFields(FCanonical) +- completeCharFields(FCompatibility) +- computeNonStarterCounts() +- verifyComputed() +- printChars() +- testDerived() +- printTestdata() +- makeTables() +-} +- +-var ( +- tablelist = flag.String("tables", +- "all", +- "comma-separated list of which tables to generate; "+ +- "can be 'decomp', 'recomp', 'info' and 'all'") +- test = flag.Bool("test", +- false, +- "test existing tables against DerivedNormalizationProps and generate test data for regression testing") +- verbose = flag.Bool("verbose", +- false, +- "write data to stdout as it is parsed") +-) +- +-const MaxChar = 0x10FFFF // anything above this shouldn't exist +- +-// Quick Check properties of runes allow us to quickly +-// determine whether a rune may occur in a normal form. +-// For a given normal form, a rune may be guaranteed to occur +-// verbatim (QC=Yes), may or may not combine with another +-// rune (QC=Maybe), or may not occur (QC=No). +-type QCResult int +- +-const ( +- QCUnknown QCResult = iota +- QCYes +- QCNo +- QCMaybe +-) +- +-func (r QCResult) String() string { +- switch r { +- case QCYes: +- return "Yes" +- case QCNo: +- return "No" +- case QCMaybe: +- return "Maybe" +- } +- return "***UNKNOWN***" +-} +- +-const ( +- FCanonical = iota // NFC or NFD +- FCompatibility // NFKC or NFKD +- FNumberOfFormTypes +-) +- +-const ( +- MComposed = iota // NFC or NFKC +- MDecomposed // NFD or NFKD +- MNumberOfModes +-) +- +-// This contains only the properties we're interested in. +-type Char struct { +- name string +- codePoint rune // if zero, this index is not a valid code point. +- ccc uint8 // canonical combining class +- origCCC uint8 +- excludeInComp bool // from CompositionExclusions.txt +- compatDecomp bool // it has a compatibility expansion +- +- nTrailingNonStarters uint8 +- nLeadingNonStarters uint8 // must be equal to trailing if non-zero +- +- forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility +- +- state State +-} +- +-var chars = make([]Char, MaxChar+1) +-var cccMap = make(map[uint8]uint8) +- +-func (c Char) String() string { +- buf := new(bytes.Buffer) +- +- fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) +- fmt.Fprintf(buf, " ccc: %v\n", c.ccc) +- fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) +- fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) +- fmt.Fprintf(buf, " state: %v\n", c.state) +- fmt.Fprintf(buf, " NFC:\n") +- fmt.Fprint(buf, c.forms[FCanonical]) +- fmt.Fprintf(buf, " NFKC:\n") +- fmt.Fprint(buf, c.forms[FCompatibility]) +- +- return buf.String() +-} +- +-// In UnicodeData.txt, some ranges are marked like this: +-// 3400;;Lo;0;L;;;;;N;;;;; +-// 4DB5;;Lo;0;L;;;;;N;;;;; +-// parseCharacter keeps a state variable indicating the weirdness. +-type State int +- +-const ( +- SNormal State = iota // known to be zero for the type +- SFirst +- SLast +- SMissing +-) +- +-var lastChar = rune('\u0000') +- +-func (c Char) isValid() bool { +- return c.codePoint != 0 && c.state != SMissing +-} +- +-type FormInfo struct { +- quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed +- verified [MNumberOfModes]bool // index: MComposed or MDecomposed +- +- combinesForward bool // May combine with rune on the right +- combinesBackward bool // May combine with rune on the left +- isOneWay bool // Never appears in result +- inDecomp bool // Some decompositions result in this char. +- decomp Decomposition +- expandedDecomp Decomposition +-} +- +-func (f FormInfo) String() string { +- buf := bytes.NewBuffer(make([]byte, 0)) +- +- fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) +- fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) +- fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) +- fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) +- fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) +- fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) +- fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) +- fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) +- +- return buf.String() +-} +- +-type Decomposition []rune +- +-func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { +- decomp := strings.Split(s, " ") +- if len(decomp) > 0 && skipfirst { +- decomp = decomp[1:] +- } +- for _, d := range decomp { +- point, err := strconv.ParseUint(d, 16, 64) +- if err != nil { +- return a, err +- } +- a = append(a, rune(point)) +- } +- return a, nil +-} +- +-func loadUnicodeData() { +- f := gen.OpenUCDFile("UnicodeData.txt") +- defer f.Close() +- p := ucd.New(f) +- for p.Next() { +- r := p.Rune(ucd.CodePoint) +- char := &chars[r] +- +- char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) +- decmap := p.String(ucd.DecompMapping) +- +- exp, err := parseDecomposition(decmap, false) +- isCompat := false +- if err != nil { +- if len(decmap) > 0 { +- exp, err = parseDecomposition(decmap, true) +- if err != nil { +- log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) +- } +- isCompat = true +- } +- } +- +- char.name = p.String(ucd.Name) +- char.codePoint = r +- char.forms[FCompatibility].decomp = exp +- if !isCompat { +- char.forms[FCanonical].decomp = exp +- } else { +- char.compatDecomp = true +- } +- if len(decmap) > 0 { +- char.forms[FCompatibility].decomp = exp +- } +- } +- if err := p.Err(); err != nil { +- log.Fatal(err) +- } +-} +- +-// compactCCC converts the sparse set of CCC values to a continguous one, +-// reducing the number of bits needed from 8 to 6. +-func compactCCC() { +- m := make(map[uint8]uint8) +- for i := range chars { +- c := &chars[i] +- m[c.ccc] = 0 +- } +- cccs := []int{} +- for v, _ := range m { +- cccs = append(cccs, int(v)) +- } +- sort.Ints(cccs) +- for i, c := range cccs { +- cccMap[uint8(i)] = uint8(c) +- m[uint8(c)] = uint8(i) +- } +- for i := range chars { +- c := &chars[i] +- c.origCCC = c.ccc +- c.ccc = m[c.ccc] +- } +- if len(m) >= 1<<6 { +- log.Fatalf("too many difference CCC values: %d >= 64", len(m)) +- } +-} +- +-// CompositionExclusions.txt has form: +-// 0958 # ... +-// See https://unicode.org/reports/tr44/ for full explanation +-func loadCompositionExclusions() { +- f := gen.OpenUCDFile("CompositionExclusions.txt") +- defer f.Close() +- p := ucd.New(f) +- for p.Next() { +- c := &chars[p.Rune(0)] +- if c.excludeInComp { +- log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) +- } +- c.excludeInComp = true +- } +- if e := p.Err(); e != nil { +- log.Fatal(e) +- } +-} +- +-// hasCompatDecomp returns true if any of the recursive +-// decompositions contains a compatibility expansion. +-// In this case, the character may not occur in NFK*. +-func hasCompatDecomp(r rune) bool { +- c := &chars[r] +- if c.compatDecomp { +- return true +- } +- for _, d := range c.forms[FCompatibility].decomp { +- if hasCompatDecomp(d) { +- return true +- } +- } +- return false +-} +- +-// Hangul related constants. +-const ( +- HangulBase = 0xAC00 +- HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) +- +- JamoLBase = 0x1100 +- JamoLEnd = 0x1113 +- JamoVBase = 0x1161 +- JamoVEnd = 0x1176 +- JamoTBase = 0x11A8 +- JamoTEnd = 0x11C3 +- +- JamoLVTCount = 19 * 21 * 28 +- JamoTCount = 28 +-) +- +-func isHangul(r rune) bool { +- return HangulBase <= r && r < HangulEnd +-} +- +-func isHangulWithoutJamoT(r rune) bool { +- if !isHangul(r) { +- return false +- } +- r -= HangulBase +- return r < JamoLVTCount && r%JamoTCount == 0 +-} +- +-func ccc(r rune) uint8 { +- return chars[r].ccc +-} +- +-// Insert a rune in a buffer, ordered by Canonical Combining Class. +-func insertOrdered(b Decomposition, r rune) Decomposition { +- n := len(b) +- b = append(b, 0) +- cc := ccc(r) +- if cc > 0 { +- // Use bubble sort. +- for ; n > 0; n-- { +- if ccc(b[n-1]) <= cc { +- break +- } +- b[n] = b[n-1] +- } +- } +- b[n] = r +- return b +-} +- +-// Recursively decompose. +-func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { +- dcomp := chars[r].forms[form].decomp +- if len(dcomp) == 0 { +- return insertOrdered(d, r) +- } +- for _, c := range dcomp { +- d = decomposeRecursive(form, c, d) +- } +- return d +-} +- +-func completeCharFields(form int) { +- // Phase 0: pre-expand decomposition. +- for i := range chars { +- f := &chars[i].forms[form] +- if len(f.decomp) == 0 { +- continue +- } +- exp := make(Decomposition, 0) +- for _, c := range f.decomp { +- exp = decomposeRecursive(form, c, exp) +- } +- f.expandedDecomp = exp +- } +- +- // Phase 1: composition exclusion, mark decomposition. +- for i := range chars { +- c := &chars[i] +- f := &c.forms[form] +- +- // Marks script-specific exclusions and version restricted. +- f.isOneWay = c.excludeInComp +- +- // Singletons +- f.isOneWay = f.isOneWay || len(f.decomp) == 1 +- +- // Non-starter decompositions +- if len(f.decomp) > 1 { +- chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 +- f.isOneWay = f.isOneWay || chk +- } +- +- // Runes that decompose into more than two runes. +- f.isOneWay = f.isOneWay || len(f.decomp) > 2 +- +- if form == FCompatibility { +- f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) +- } +- +- for _, r := range f.decomp { +- chars[r].forms[form].inDecomp = true +- } +- } +- +- // Phase 2: forward and backward combining. +- for i := range chars { +- c := &chars[i] +- f := &c.forms[form] +- +- if !f.isOneWay && len(f.decomp) == 2 { +- f0 := &chars[f.decomp[0]].forms[form] +- f1 := &chars[f.decomp[1]].forms[form] +- if !f0.isOneWay { +- f0.combinesForward = true +- } +- if !f1.isOneWay { +- f1.combinesBackward = true +- } +- } +- if isHangulWithoutJamoT(rune(i)) { +- f.combinesForward = true +- } +- } +- +- // Phase 3: quick check values. +- for i := range chars { +- c := &chars[i] +- f := &c.forms[form] +- +- switch { +- case len(f.decomp) > 0: +- f.quickCheck[MDecomposed] = QCNo +- case isHangul(rune(i)): +- f.quickCheck[MDecomposed] = QCNo +- default: +- f.quickCheck[MDecomposed] = QCYes +- } +- switch { +- case f.isOneWay: +- f.quickCheck[MComposed] = QCNo +- case (i & 0xffff00) == JamoLBase: +- f.quickCheck[MComposed] = QCYes +- if JamoLBase <= i && i < JamoLEnd { +- f.combinesForward = true +- } +- if JamoVBase <= i && i < JamoVEnd { +- f.quickCheck[MComposed] = QCMaybe +- f.combinesBackward = true +- f.combinesForward = true +- } +- if JamoTBase <= i && i < JamoTEnd { +- f.quickCheck[MComposed] = QCMaybe +- f.combinesBackward = true +- } +- case !f.combinesBackward: +- f.quickCheck[MComposed] = QCYes +- default: +- f.quickCheck[MComposed] = QCMaybe +- } +- } +-} +- +-func computeNonStarterCounts() { +- // Phase 4: leading and trailing non-starter count +- for i := range chars { +- c := &chars[i] +- +- runes := []rune{rune(i)} +- // We always use FCompatibility so that the CGJ insertion points do not +- // change for repeated normalizations with different forms. +- if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { +- runes = exp +- } +- // We consider runes that combine backwards to be non-starters for the +- // purpose of Stream-Safe Text Processing. +- for _, r := range runes { +- if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { +- break +- } +- c.nLeadingNonStarters++ +- } +- for i := len(runes) - 1; i >= 0; i-- { +- if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { +- break +- } +- c.nTrailingNonStarters++ +- } +- if c.nTrailingNonStarters > 3 { +- log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) +- } +- +- if isHangul(rune(i)) { +- c.nTrailingNonStarters = 2 +- if isHangulWithoutJamoT(rune(i)) { +- c.nTrailingNonStarters = 1 +- } +- } +- +- if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { +- log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) +- } +- if t := c.nTrailingNonStarters; t > 3 { +- log.Fatalf("%U: number of trailing non-starters is %d > 3", t) +- } +- } +-} +- +-func printBytes(w io.Writer, b []byte, name string) { +- fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) +- fmt.Fprintf(w, "var %s = [...]byte {", name) +- for i, c := range b { +- switch { +- case i%64 == 0: +- fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) +- case i%8 == 0: +- fmt.Fprintf(w, "\n") +- } +- fmt.Fprintf(w, "0x%.2X, ", c) +- } +- fmt.Fprint(w, "\n}\n\n") +-} +- +-// See forminfo.go for format. +-func makeEntry(f *FormInfo, c *Char) uint16 { +- e := uint16(0) +- if r := c.codePoint; HangulBase <= r && r < HangulEnd { +- e |= 0x40 +- } +- if f.combinesForward { +- e |= 0x20 +- } +- if f.quickCheck[MDecomposed] == QCNo { +- e |= 0x4 +- } +- switch f.quickCheck[MComposed] { +- case QCYes: +- case QCNo: +- e |= 0x10 +- case QCMaybe: +- e |= 0x18 +- default: +- log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) +- } +- e |= uint16(c.nTrailingNonStarters) +- return e +-} +- +-// decompSet keeps track of unique decompositions, grouped by whether +-// the decomposition is followed by a trailing and/or leading CCC. +-type decompSet [7]map[string]bool +- +-const ( +- normalDecomp = iota +- firstMulti +- firstCCC +- endMulti +- firstLeadingCCC +- firstCCCZeroExcept +- firstStarterWithNLead +- lastDecomp +-) +- +-var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} +- +-func makeDecompSet() decompSet { +- m := decompSet{} +- for i := range m { +- m[i] = make(map[string]bool) +- } +- return m +-} +-func (m *decompSet) insert(key int, s string) { +- m[key][s] = true +-} +- +-func printCharInfoTables(w io.Writer) int { +- mkstr := func(r rune, f *FormInfo) (int, string) { +- d := f.expandedDecomp +- s := string([]rune(d)) +- if max := 1 << 6; len(s) >= max { +- const msg = "%U: too many bytes in decomposition: %d >= %d" +- log.Fatalf(msg, r, len(s), max) +- } +- head := uint8(len(s)) +- if f.quickCheck[MComposed] != QCYes { +- head |= 0x40 +- } +- if f.combinesForward { +- head |= 0x80 +- } +- s = string([]byte{head}) + s +- +- lccc := ccc(d[0]) +- tccc := ccc(d[len(d)-1]) +- cc := ccc(r) +- if cc != 0 && lccc == 0 && tccc == 0 { +- log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) +- } +- if tccc < lccc && lccc != 0 { +- const msg = "%U: lccc (%d) must be <= tcc (%d)" +- log.Fatalf(msg, r, lccc, tccc) +- } +- index := normalDecomp +- nTrail := chars[r].nTrailingNonStarters +- nLead := chars[r].nLeadingNonStarters +- if tccc > 0 || lccc > 0 || nTrail > 0 { +- tccc <<= 2 +- tccc |= nTrail +- s += string([]byte{tccc}) +- index = endMulti +- for _, r := range d[1:] { +- if ccc(r) == 0 { +- index = firstCCC +- } +- } +- if lccc > 0 || nLead > 0 { +- s += string([]byte{lccc}) +- if index == firstCCC { +- log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) +- } +- index = firstLeadingCCC +- } +- if cc != lccc { +- if cc != 0 { +- log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) +- } +- index = firstCCCZeroExcept +- } +- } else if len(d) > 1 { +- index = firstMulti +- } +- return index, s +- } +- +- decompSet := makeDecompSet() +- const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. +- decompSet.insert(firstStarterWithNLead, nLeadStr) +- +- // Store the uniqued decompositions in a byte buffer, +- // preceded by their byte length. +- for _, c := range chars { +- for _, f := range c.forms { +- if len(f.expandedDecomp) == 0 { +- continue +- } +- if f.combinesBackward { +- log.Fatalf("%U: combinesBackward and decompose", c.codePoint) +- } +- index, s := mkstr(c.codePoint, &f) +- decompSet.insert(index, s) +- } +- } +- +- decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) +- size := 0 +- positionMap := make(map[string]uint16) +- decompositions.WriteString("\000") +- fmt.Fprintln(w, "const (") +- for i, m := range decompSet { +- sa := []string{} +- for s := range m { +- sa = append(sa, s) +- } +- sort.Strings(sa) +- for _, s := range sa { +- p := decompositions.Len() +- decompositions.WriteString(s) +- positionMap[s] = uint16(p) +- } +- if cname[i] != "" { +- fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) +- } +- } +- fmt.Fprintln(w, "maxDecomp = 0x8000") +- fmt.Fprintln(w, ")") +- b := decompositions.Bytes() +- printBytes(w, b, "decomps") +- size += len(b) +- +- varnames := []string{"nfc", "nfkc"} +- for i := 0; i < FNumberOfFormTypes; i++ { +- trie := triegen.NewTrie(varnames[i]) +- +- for r, c := range chars { +- f := c.forms[i] +- d := f.expandedDecomp +- if len(d) != 0 { +- _, key := mkstr(c.codePoint, &f) +- trie.Insert(rune(r), uint64(positionMap[key])) +- if c.ccc != ccc(d[0]) { +- // We assume the lead ccc of a decomposition !=0 in this case. +- if ccc(d[0]) == 0 { +- log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) +- } +- } +- } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { +- // Handle cases where it can't be detected that the nLead should be equal +- // to nTrail. +- trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) +- } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { +- trie.Insert(c.codePoint, uint64(0x8000|v)) +- } +- } +- sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) +- if err != nil { +- log.Fatal(err) +- } +- size += sz +- } +- return size +-} +- +-func contains(sa []string, s string) bool { +- for _, a := range sa { +- if a == s { +- return true +- } +- } +- return false +-} +- +-func makeTables() { +- w := &bytes.Buffer{} +- +- size := 0 +- if *tablelist == "" { +- return +- } +- list := strings.Split(*tablelist, ",") +- if *tablelist == "all" { +- list = []string{"recomp", "info"} +- } +- +- // Compute maximum decomposition size. +- max := 0 +- for _, c := range chars { +- if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { +- max = n +- } +- } +- fmt.Fprintln(w, `import "sync"`) +- fmt.Fprintln(w) +- +- fmt.Fprintln(w, "const (") +- fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") +- fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) +- fmt.Fprintln(w) +- fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") +- fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") +- fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") +- fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") +- fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) +- fmt.Fprintln(w, ")\n") +- +- // Print the CCC remap table. +- size += len(cccMap) +- fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) +- for i := 0; i < len(cccMap); i++ { +- if i%8 == 0 { +- fmt.Fprintln(w) +- } +- fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) +- } +- fmt.Fprintln(w, "\n}\n") +- +- if contains(list, "info") { +- size += printCharInfoTables(w) +- } +- +- if contains(list, "recomp") { +- // Note that we use 32 bit keys, instead of 64 bit. +- // This clips the bits of three entries, but we know +- // this won't cause a collision. The compiler will catch +- // any changes made to UnicodeData.txt that introduces +- // a collision. +- // Note that the recomposition map for NFC and NFKC +- // are identical. +- +- // Recomposition map +- nrentries := 0 +- for _, c := range chars { +- f := c.forms[FCanonical] +- if !f.isOneWay && len(f.decomp) > 0 { +- nrentries++ +- } +- } +- sz := nrentries * 8 +- size += sz +- fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) +- fmt.Fprintln(w, "var recompMap map[uint32]rune") +- fmt.Fprintln(w, "var recompMapOnce sync.Once\n") +- fmt.Fprintln(w, `const recompMapPacked = "" +`) +- var buf [8]byte +- for i, c := range chars { +- f := c.forms[FCanonical] +- d := f.decomp +- if !f.isOneWay && len(d) > 0 { +- key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) +- binary.BigEndian.PutUint32(buf[:4], key) +- binary.BigEndian.PutUint32(buf[4:], uint32(i)) +- fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i)) +- } +- } +- // hack so we don't have to special case the trailing plus sign +- fmt.Fprintf(w, ` ""`) +- fmt.Fprintln(w) +- } +- +- fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) +- gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) +-} +- +-func printChars() { +- if *verbose { +- for _, c := range chars { +- if !c.isValid() || c.state == SMissing { +- continue +- } +- fmt.Println(c) +- } +- } +-} +- +-// verifyComputed does various consistency tests. +-func verifyComputed() { +- for i, c := range chars { +- for _, f := range c.forms { +- isNo := (f.quickCheck[MDecomposed] == QCNo) +- if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { +- log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) +- } +- +- isMaybe := f.quickCheck[MComposed] == QCMaybe +- if f.combinesBackward != isMaybe { +- log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) +- } +- if len(f.decomp) > 0 && f.combinesForward && isMaybe { +- log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) +- } +- +- if len(f.expandedDecomp) != 0 { +- continue +- } +- if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { +- // We accept these runes to be treated differently (it only affects +- // segment breaking in iteration, most likely on improper use), but +- // reconsider if more characters are added. +- // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;; +- // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;; +- // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; +- // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;; +- // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; +- // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;; +- if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { +- log.Fatalf("%U: nLead was %v; want %v", i, a, b) +- } +- } +- } +- nfc := c.forms[FCanonical] +- nfkc := c.forms[FCompatibility] +- if nfc.combinesBackward != nfkc.combinesBackward { +- log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) +- } +- } +-} +- +-// Use values in DerivedNormalizationProps.txt to compare against the +-// values we computed. +-// DerivedNormalizationProps.txt has form: +-// 00C0..00C5 ; NFD_QC; N # ... +-// 0374 ; NFD_QC; N # ... +-// See https://unicode.org/reports/tr44/ for full explanation +-func testDerived() { +- f := gen.OpenUCDFile("DerivedNormalizationProps.txt") +- defer f.Close() +- p := ucd.New(f) +- for p.Next() { +- r := p.Rune(0) +- c := &chars[r] +- +- var ftype, mode int +- qt := p.String(1) +- switch qt { +- case "NFC_QC": +- ftype, mode = FCanonical, MComposed +- case "NFD_QC": +- ftype, mode = FCanonical, MDecomposed +- case "NFKC_QC": +- ftype, mode = FCompatibility, MComposed +- case "NFKD_QC": +- ftype, mode = FCompatibility, MDecomposed +- default: +- continue +- } +- var qr QCResult +- switch p.String(2) { +- case "Y": +- qr = QCYes +- case "N": +- qr = QCNo +- case "M": +- qr = QCMaybe +- default: +- log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) +- } +- if got := c.forms[ftype].quickCheck[mode]; got != qr { +- log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) +- } +- c.forms[ftype].verified[mode] = true +- } +- if err := p.Err(); err != nil { +- log.Fatal(err) +- } +- // Any unspecified value must be QCYes. Verify this. +- for i, c := range chars { +- for j, fd := range c.forms { +- for k, qr := range fd.quickCheck { +- if !fd.verified[k] && qr != QCYes { +- m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" +- log.Printf(m, i, j, k, qr, c.name) +- } +- } +- } +- } +-} +- +-var testHeader = `const ( +- Yes = iota +- No +- Maybe +-) +- +-type formData struct { +- qc uint8 +- combinesForward bool +- decomposition string +-} +- +-type runeData struct { +- r rune +- ccc uint8 +- nLead uint8 +- nTrail uint8 +- f [2]formData // 0: canonical; 1: compatibility +-} +- +-func f(qc uint8, cf bool, dec string) [2]formData { +- return [2]formData{{qc, cf, dec}, {qc, cf, dec}} +-} +- +-func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { +- return [2]formData{{qc, cf, d}, {qck, cfk, dk}} +-} +- +-var testData = []runeData{ +-` +- +-func printTestdata() { +- type lastInfo struct { +- ccc uint8 +- nLead uint8 +- nTrail uint8 +- f string +- } +- +- last := lastInfo{} +- w := &bytes.Buffer{} +- fmt.Fprintf(w, testHeader) +- for r, c := range chars { +- f := c.forms[FCanonical] +- qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) +- f = c.forms[FCompatibility] +- qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) +- s := "" +- if d == dk && qc == qck && cf == cfk { +- s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) +- } else { +- s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) +- } +- current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} +- if last != current { +- fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) +- last = current +- } +- } +- fmt.Fprintln(w, "}") +- gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) +-} +diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go +deleted file mode 100644 +index 45d711900d..0000000000 +--- a/vendor/golang.org/x/text/unicode/norm/triegen.go ++++ /dev/null +@@ -1,117 +0,0 @@ +-// Copyright 2011 The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// +build ignore +- +-// Trie table generator. +-// Used by make*tables tools to generate a go file with trie data structures +-// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte +-// sequence are used to lookup offsets in the index table to be used for the +-// next byte. The last byte is used to index into a table with 16-bit values. +- +-package main +- +-import ( +- "fmt" +- "io" +-) +- +-const maxSparseEntries = 16 +- +-type normCompacter struct { +- sparseBlocks [][]uint64 +- sparseOffset []uint16 +- sparseCount int +- name string +-} +- +-func mostFrequentStride(a []uint64) int { +- counts := make(map[int]int) +- var v int +- for _, x := range a { +- if stride := int(x) - v; v != 0 && stride >= 0 { +- counts[stride]++ +- } +- v = int(x) +- } +- var maxs, maxc int +- for stride, cnt := range counts { +- if cnt > maxc || (cnt == maxc && stride < maxs) { +- maxs, maxc = stride, cnt +- } +- } +- return maxs +-} +- +-func countSparseEntries(a []uint64) int { +- stride := mostFrequentStride(a) +- var v, count int +- for _, tv := range a { +- if int(tv)-v != stride { +- if tv != 0 { +- count++ +- } +- } +- v = int(tv) +- } +- return count +-} +- +-func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { +- if n := countSparseEntries(v); n <= maxSparseEntries { +- return (n+1)*4 + 2, true +- } +- return 0, false +-} +- +-func (c *normCompacter) Store(v []uint64) uint32 { +- h := uint32(len(c.sparseOffset)) +- c.sparseBlocks = append(c.sparseBlocks, v) +- c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) +- c.sparseCount += countSparseEntries(v) + 1 +- return h +-} +- +-func (c *normCompacter) Handler() string { +- return c.name + "Sparse.lookup" +-} +- +-func (c *normCompacter) Print(w io.Writer) (retErr error) { +- p := func(f string, x ...interface{}) { +- if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { +- retErr = err +- } +- } +- +- ls := len(c.sparseBlocks) +- p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) +- p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) +- +- ns := c.sparseCount +- p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) +- p("var %sSparseValues = [%d]valueRange {", c.name, ns) +- for i, b := range c.sparseBlocks { +- p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) +- var v int +- stride := mostFrequentStride(b) +- n := countSparseEntries(b) +- p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) +- for i, nv := range b { +- if int(nv)-v != stride { +- if v != 0 { +- p(",hi:%#02x},", 0x80+i-1) +- } +- if nv != 0 { +- p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) +- } +- } +- v = int(nv) +- } +- if v != 0 { +- p(",hi:%#02x},", 0x80+len(b)-1) +- } +- } +- p("\n}\n\n") +- return +-} +diff --git a/vendor/modules.txt b/vendor/modules.txt +index 91f7df534b..685585c07c 100644 +--- a/vendor/modules.txt ++++ b/vendor/modules.txt +@@ -1,30 +1,30 @@ + # github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 +-github.com/Azure/go-ansiterm/winterm + github.com/Azure/go-ansiterm ++github.com/Azure/go-ansiterm/winterm + # github.com/BurntSushi/toml v0.3.1 + github.com/BurntSushi/toml + # github.com/Microsoft/go-winio v0.4.14 + github.com/Microsoft/go-winio +-github.com/Microsoft/go-winio/pkg/guid + github.com/Microsoft/go-winio/archive/tar + github.com/Microsoft/go-winio/backuptar ++github.com/Microsoft/go-winio/pkg/guid + # github.com/Microsoft/hcsshim v0.8.6 +-github.com/Microsoft/hcsshim/osversion + github.com/Microsoft/hcsshim ++github.com/Microsoft/hcsshim/internal/guestrequest + github.com/Microsoft/hcsshim/internal/guid + github.com/Microsoft/hcsshim/internal/hcs + github.com/Microsoft/hcsshim/internal/hcserror + github.com/Microsoft/hcsshim/internal/hns +-github.com/Microsoft/hcsshim/internal/mergemaps +-github.com/Microsoft/hcsshim/internal/schema1 +-github.com/Microsoft/hcsshim/internal/wclayer +-github.com/Microsoft/hcsshim/internal/guestrequest + github.com/Microsoft/hcsshim/internal/interop + github.com/Microsoft/hcsshim/internal/logfields +-github.com/Microsoft/hcsshim/internal/timeout +-github.com/Microsoft/hcsshim/internal/schema2 + github.com/Microsoft/hcsshim/internal/longpath ++github.com/Microsoft/hcsshim/internal/mergemaps + github.com/Microsoft/hcsshim/internal/safefile ++github.com/Microsoft/hcsshim/internal/schema1 ++github.com/Microsoft/hcsshim/internal/schema2 ++github.com/Microsoft/hcsshim/internal/timeout ++github.com/Microsoft/hcsshim/internal/wclayer ++github.com/Microsoft/hcsshim/osversion + # github.com/VividCortex/ewma v1.1.1 + github.com/VividCortex/ewma + # github.com/beorn7/perks v1.0.1 +@@ -40,133 +40,134 @@ github.com/checkpoint-restore/go-criu/rpc + github.com/containerd/containerd/errdefs + # github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc + github.com/containerd/continuity/fs +-github.com/containerd/continuity/sysx + github.com/containerd/continuity/syscallx ++github.com/containerd/continuity/sysx + # github.com/containernetworking/cni v0.7.1 +-github.com/containernetworking/cni/pkg/types +-github.com/containernetworking/cni/pkg/types/current +-github.com/containernetworking/cni/pkg/version + github.com/containernetworking/cni/libcni + github.com/containernetworking/cni/pkg/invoke ++github.com/containernetworking/cni/pkg/types + github.com/containernetworking/cni/pkg/types/020 ++github.com/containernetworking/cni/pkg/types/current ++github.com/containernetworking/cni/pkg/version + # github.com/containernetworking/plugins v0.8.2 +-github.com/containernetworking/plugins/pkg/ns + github.com/containernetworking/plugins/pkg/ip +-github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator ++github.com/containernetworking/plugins/pkg/ns + github.com/containernetworking/plugins/pkg/utils/hwaddr + github.com/containernetworking/plugins/plugins/ipam/host-local/backend ++github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator + # github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 + github.com/containers/buildah +-github.com/containers/buildah/imagebuildah +-github.com/containers/buildah/pkg/chrootuser +-github.com/containers/buildah/pkg/cli +-github.com/containers/buildah/pkg/formats +-github.com/containers/buildah/util +-github.com/containers/buildah/pkg/secrets +-github.com/containers/buildah/pkg/parse + github.com/containers/buildah/bind + github.com/containers/buildah/chroot + github.com/containers/buildah/docker ++github.com/containers/buildah/imagebuildah + github.com/containers/buildah/pkg/blobcache + github.com/containers/buildah/pkg/cgroups ++github.com/containers/buildah/pkg/chrootuser ++github.com/containers/buildah/pkg/cli ++github.com/containers/buildah/pkg/formats + github.com/containers/buildah/pkg/overlay +-github.com/containers/buildah/pkg/unshare ++github.com/containers/buildah/pkg/parse ++github.com/containers/buildah/pkg/secrets + github.com/containers/buildah/pkg/umask +-# github.com/containers/image/v5 v5.0.0 ++github.com/containers/buildah/pkg/unshare ++github.com/containers/buildah/util ++# github.com/containers/image/v5 v5.0.1-0.20200205124631-82291c45f2b0 ++github.com/containers/image/v5/copy + github.com/containers/image/v5/directory ++github.com/containers/image/v5/directory/explicitfilepath + github.com/containers/image/v5/docker + github.com/containers/image/v5/docker/archive +-github.com/containers/image/v5/manifest +-github.com/containers/image/v5/pkg/docker/config +-github.com/containers/image/v5/signature +-github.com/containers/image/v5/transports +-github.com/containers/image/v5/transports/alltransports +-github.com/containers/image/v5/types +-github.com/containers/image/v5/oci/archive +-github.com/containers/image/v5/storage +-github.com/containers/image/v5/copy ++github.com/containers/image/v5/docker/daemon ++github.com/containers/image/v5/docker/policyconfiguration + github.com/containers/image/v5/docker/reference + github.com/containers/image/v5/docker/tarfile + github.com/containers/image/v5/image +-github.com/containers/image/v5/oci/layout +-github.com/containers/image/v5/tarball +-github.com/containers/image/v5/pkg/sysregistriesv2 +-github.com/containers/image/v5/directory/explicitfilepath +-github.com/containers/image/v5/docker/policyconfiguration +-github.com/containers/image/v5/pkg/blobinfocache/none +-github.com/containers/image/v5/pkg/tlsclientconfig +-github.com/containers/image/v5/pkg/compression +-github.com/containers/image/v5/pkg/strslice ++github.com/containers/image/v5/internal/iolimits + github.com/containers/image/v5/internal/pkg/keyctl +-github.com/containers/image/v5/version +-github.com/containers/image/v5/docker/daemon +-github.com/containers/image/v5/openshift +-github.com/containers/image/v5/ostree +-github.com/containers/image/v5/pkg/compression/types + github.com/containers/image/v5/internal/tmpdir ++github.com/containers/image/v5/manifest ++github.com/containers/image/v5/oci/archive + github.com/containers/image/v5/oci/internal ++github.com/containers/image/v5/oci/layout ++github.com/containers/image/v5/openshift ++github.com/containers/image/v5/ostree + github.com/containers/image/v5/pkg/blobinfocache +-github.com/containers/image/v5/pkg/compression/internal + github.com/containers/image/v5/pkg/blobinfocache/boltdb +-github.com/containers/image/v5/pkg/blobinfocache/memory + github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize ++github.com/containers/image/v5/pkg/blobinfocache/memory ++github.com/containers/image/v5/pkg/blobinfocache/none ++github.com/containers/image/v5/pkg/compression ++github.com/containers/image/v5/pkg/compression/internal ++github.com/containers/image/v5/pkg/compression/types ++github.com/containers/image/v5/pkg/docker/config ++github.com/containers/image/v5/pkg/strslice ++github.com/containers/image/v5/pkg/sysregistriesv2 ++github.com/containers/image/v5/pkg/tlsclientconfig ++github.com/containers/image/v5/signature ++github.com/containers/image/v5/storage ++github.com/containers/image/v5/tarball ++github.com/containers/image/v5/transports ++github.com/containers/image/v5/transports/alltransports ++github.com/containers/image/v5/types ++github.com/containers/image/v5/version + # github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b + github.com/containers/libtrust + # github.com/containers/psgo v1.3.2 + github.com/containers/psgo + github.com/containers/psgo/internal/capabilities ++github.com/containers/psgo/internal/cgroups + github.com/containers/psgo/internal/dev ++github.com/containers/psgo/internal/host + github.com/containers/psgo/internal/proc + github.com/containers/psgo/internal/process +-github.com/containers/psgo/internal/cgroups +-github.com/containers/psgo/internal/host + # github.com/containers/storage v1.13.6 + github.com/containers/storage +-github.com/containers/storage/pkg/archive +-github.com/containers/storage/pkg/chrootarchive +-github.com/containers/storage/pkg/idtools +-github.com/containers/storage/pkg/reexec +-github.com/containers/storage/pkg/mount +-github.com/containers/storage/pkg/stringid +-github.com/containers/storage/pkg/system +-github.com/containers/storage/pkg/truncindex +-github.com/containers/storage/pkg/parsers/kernel +-github.com/containers/storage/pkg/fileutils +-github.com/containers/storage/pkg/ioutils +-github.com/containers/storage/pkg/pools +-github.com/containers/storage/pkg/homedir + github.com/containers/storage/drivers +-github.com/containers/storage/drivers/register +-github.com/containers/storage/pkg/config +-github.com/containers/storage/pkg/directory +-github.com/containers/storage/pkg/lockfile +-github.com/containers/storage/pkg/parsers +-github.com/containers/storage/pkg/stringutils +-github.com/containers/storage/pkg/tarlog +-github.com/containers/storage/pkg/longpath +-github.com/containers/storage/pkg/promise + github.com/containers/storage/drivers/aufs + github.com/containers/storage/drivers/btrfs ++github.com/containers/storage/drivers/copy + github.com/containers/storage/drivers/devmapper + github.com/containers/storage/drivers/overlay ++github.com/containers/storage/drivers/overlayutils ++github.com/containers/storage/drivers/quota ++github.com/containers/storage/drivers/register + github.com/containers/storage/drivers/vfs + github.com/containers/storage/drivers/windows + github.com/containers/storage/drivers/zfs +-github.com/containers/storage/pkg/locker ++github.com/containers/storage/pkg/archive ++github.com/containers/storage/pkg/chrootarchive ++github.com/containers/storage/pkg/config + github.com/containers/storage/pkg/devicemapper ++github.com/containers/storage/pkg/directory + github.com/containers/storage/pkg/dmesg +-github.com/containers/storage/pkg/loopback +-github.com/containers/storage/drivers/overlayutils +-github.com/containers/storage/drivers/quota ++github.com/containers/storage/pkg/fileutils + github.com/containers/storage/pkg/fsutils +-github.com/containers/storage/drivers/copy ++github.com/containers/storage/pkg/homedir ++github.com/containers/storage/pkg/idtools ++github.com/containers/storage/pkg/ioutils ++github.com/containers/storage/pkg/locker ++github.com/containers/storage/pkg/lockfile ++github.com/containers/storage/pkg/longpath ++github.com/containers/storage/pkg/loopback ++github.com/containers/storage/pkg/mount ++github.com/containers/storage/pkg/parsers ++github.com/containers/storage/pkg/parsers/kernel ++github.com/containers/storage/pkg/pools ++github.com/containers/storage/pkg/promise ++github.com/containers/storage/pkg/reexec ++github.com/containers/storage/pkg/stringid ++github.com/containers/storage/pkg/stringutils ++github.com/containers/storage/pkg/system ++github.com/containers/storage/pkg/tarlog ++github.com/containers/storage/pkg/truncindex + # github.com/coreos/go-iptables v0.4.2 + github.com/coreos/go-iptables/iptables + # github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f + github.com/coreos/go-systemd/activation + github.com/coreos/go-systemd/dbus +-github.com/coreos/go-systemd/sdjournal + github.com/coreos/go-systemd/journal ++github.com/coreos/go-systemd/sdjournal + # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f + github.com/coreos/pkg/dlopen + # github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca +@@ -176,68 +177,68 @@ github.com/cyphar/filepath-securejoin + # github.com/davecgh/go-spew v1.1.1 + github.com/davecgh/go-spew/spew + # github.com/docker/distribution v2.7.1+incompatible ++github.com/docker/distribution ++github.com/docker/distribution/digestset ++github.com/docker/distribution/metrics + github.com/docker/distribution/reference + github.com/docker/distribution/registry/api/errcode + github.com/docker/distribution/registry/api/v2 + github.com/docker/distribution/registry/client +-github.com/docker/distribution/digestset +-github.com/docker/distribution + github.com/docker/distribution/registry/client/auth/challenge + github.com/docker/distribution/registry/client/transport + github.com/docker/distribution/registry/storage/cache + github.com/docker/distribution/registry/storage/cache/memory +-github.com/docker/distribution/metrics + # github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce +-github.com/docker/docker/pkg/signal +-github.com/docker/docker/pkg/homedir +-github.com/docker/docker/oci/caps +-github.com/docker/docker/pkg/namesgenerator +-github.com/docker/docker/pkg/term +-github.com/docker/docker/pkg/ioutils +-github.com/docker/docker/pkg/parsers +-github.com/docker/docker/api/types/versions +-github.com/docker/docker/errdefs +-github.com/docker/docker/pkg/term/windows +-github.com/docker/docker/pkg/longpath +-github.com/docker/docker/api/types/registry +-github.com/docker/docker/api/types/swarm +-github.com/docker/docker/pkg/archive +-github.com/docker/docker/pkg/fileutils +-github.com/docker/docker/pkg/jsonmessage +-github.com/docker/docker/pkg/stdcopy +-github.com/docker/docker/pkg/system +-github.com/docker/docker/client +-github.com/docker/docker/api/types/container +-github.com/docker/docker/api/types/mount +-github.com/docker/docker/api/types/network +-github.com/docker/docker/api/types/swarm/runtime +-github.com/docker/docker/pkg/idtools +-github.com/docker/docker/pkg/pools +-github.com/docker/docker/pkg/mount + github.com/docker/docker/api + github.com/docker/docker/api/types ++github.com/docker/docker/api/types/blkiodev ++github.com/docker/docker/api/types/container + github.com/docker/docker/api/types/events + github.com/docker/docker/api/types/filters + github.com/docker/docker/api/types/image ++github.com/docker/docker/api/types/mount ++github.com/docker/docker/api/types/network ++github.com/docker/docker/api/types/registry ++github.com/docker/docker/api/types/strslice ++github.com/docker/docker/api/types/swarm ++github.com/docker/docker/api/types/swarm/runtime + github.com/docker/docker/api/types/time ++github.com/docker/docker/api/types/versions + github.com/docker/docker/api/types/volume +-github.com/docker/docker/api/types/blkiodev +-github.com/docker/docker/api/types/strslice ++github.com/docker/docker/client ++github.com/docker/docker/errdefs ++github.com/docker/docker/oci/caps ++github.com/docker/docker/pkg/archive ++github.com/docker/docker/pkg/fileutils ++github.com/docker/docker/pkg/homedir ++github.com/docker/docker/pkg/idtools ++github.com/docker/docker/pkg/ioutils ++github.com/docker/docker/pkg/jsonmessage ++github.com/docker/docker/pkg/longpath ++github.com/docker/docker/pkg/mount ++github.com/docker/docker/pkg/namesgenerator ++github.com/docker/docker/pkg/parsers ++github.com/docker/docker/pkg/pools ++github.com/docker/docker/pkg/signal ++github.com/docker/docker/pkg/stdcopy ++github.com/docker/docker/pkg/system ++github.com/docker/docker/pkg/term ++github.com/docker/docker/pkg/term/windows + # github.com/docker/docker-credential-helpers v0.6.3 +-github.com/docker/docker-credential-helpers/credentials + github.com/docker/docker-credential-helpers/client ++github.com/docker/docker-credential-helpers/credentials + # github.com/docker/go-connections v0.4.0 + github.com/docker/go-connections/nat +-github.com/docker/go-connections/tlsconfig + github.com/docker/go-connections/sockets ++github.com/docker/go-connections/tlsconfig + # github.com/docker/go-metrics v0.0.1 + github.com/docker/go-metrics + # github.com/docker/go-units v0.4.0 + github.com/docker/go-units + # github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 + github.com/docker/libnetwork/resolvconf +-github.com/docker/libnetwork/types + github.com/docker/libnetwork/resolvconf/dns ++github.com/docker/libnetwork/types + # github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c + github.com/docker/spdystream + github.com/docker/spdystream/spdy +@@ -287,12 +288,12 @@ github.com/ishidawataru/sctp + # github.com/json-iterator/go v1.1.7 + github.com/json-iterator/go + # github.com/klauspost/compress v1.8.1 +-github.com/klauspost/compress/zstd + github.com/klauspost/compress/flate ++github.com/klauspost/compress/fse + github.com/klauspost/compress/huff0 + github.com/klauspost/compress/snappy ++github.com/klauspost/compress/zstd + github.com/klauspost/compress/zstd/internal/xxhash +-github.com/klauspost/compress/fse + # github.com/klauspost/cpuid v1.2.1 + github.com/klauspost/cpuid + # github.com/klauspost/pgzip v1.2.1 +@@ -318,83 +319,83 @@ github.com/mrunalp/fileutils + # github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c + github.com/mtrmac/gpgme + # github.com/onsi/ginkgo v1.10.2 +-github.com/onsi/ginkgo/ginkgo + github.com/onsi/ginkgo + github.com/onsi/ginkgo/config ++github.com/onsi/ginkgo/extensions/table ++github.com/onsi/ginkgo/ginkgo + github.com/onsi/ginkgo/ginkgo/convert + github.com/onsi/ginkgo/ginkgo/interrupthandler + github.com/onsi/ginkgo/ginkgo/nodot + github.com/onsi/ginkgo/ginkgo/testrunner + github.com/onsi/ginkgo/ginkgo/testsuite + github.com/onsi/ginkgo/ginkgo/watch +-github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable +-github.com/onsi/ginkgo/types + github.com/onsi/ginkgo/internal/codelocation ++github.com/onsi/ginkgo/internal/containernode + github.com/onsi/ginkgo/internal/failer ++github.com/onsi/ginkgo/internal/leafnodes + github.com/onsi/ginkgo/internal/remote ++github.com/onsi/ginkgo/internal/spec ++github.com/onsi/ginkgo/internal/spec_iterator ++github.com/onsi/ginkgo/internal/specrunner + github.com/onsi/ginkgo/internal/suite + github.com/onsi/ginkgo/internal/testingtproxy + github.com/onsi/ginkgo/internal/writer + github.com/onsi/ginkgo/reporters + github.com/onsi/ginkgo/reporters/stenographer +-github.com/onsi/ginkgo/extensions/table ++github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable + github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty +-github.com/onsi/ginkgo/internal/spec_iterator +-github.com/onsi/ginkgo/internal/containernode +-github.com/onsi/ginkgo/internal/leafnodes +-github.com/onsi/ginkgo/internal/spec +-github.com/onsi/ginkgo/internal/specrunner ++github.com/onsi/ginkgo/types + # github.com/onsi/gomega v1.7.0 + github.com/onsi/gomega +-github.com/onsi/gomega/gexec + github.com/onsi/gomega/format ++github.com/onsi/gomega/gbytes ++github.com/onsi/gomega/gexec + github.com/onsi/gomega/internal/assertion + github.com/onsi/gomega/internal/asyncassertion ++github.com/onsi/gomega/internal/oraclematcher + github.com/onsi/gomega/internal/testingtsupport + github.com/onsi/gomega/matchers +-github.com/onsi/gomega/types +-github.com/onsi/gomega/gbytes +-github.com/onsi/gomega/internal/oraclematcher + github.com/onsi/gomega/matchers/support/goraph/bipartitegraph + github.com/onsi/gomega/matchers/support/goraph/edge + github.com/onsi/gomega/matchers/support/goraph/node + github.com/onsi/gomega/matchers/support/goraph/util ++github.com/onsi/gomega/types + # github.com/opencontainers/go-digest v1.0.0-rc1 + github.com/opencontainers/go-digest + # github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 +-github.com/opencontainers/image-spec/specs-go/v1 + github.com/opencontainers/image-spec/specs-go ++github.com/opencontainers/image-spec/specs-go/v1 + # github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158 +-github.com/opencontainers/runc/libcontainer/user + github.com/opencontainers/runc/libcontainer/apparmor ++github.com/opencontainers/runc/libcontainer/cgroups + github.com/opencontainers/runc/libcontainer/configs + github.com/opencontainers/runc/libcontainer/devices +-github.com/opencontainers/runc/libcontainer/cgroups + github.com/opencontainers/runc/libcontainer/system ++github.com/opencontainers/runc/libcontainer/user + # github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 + github.com/opencontainers/runtime-spec/specs-go + # github.com/opencontainers/runtime-tools v0.9.0 ++github.com/opencontainers/runtime-tools/error ++github.com/opencontainers/runtime-tools/filepath + github.com/opencontainers/runtime-tools/generate +-github.com/opencontainers/runtime-tools/validate + github.com/opencontainers/runtime-tools/generate/seccomp +-github.com/opencontainers/runtime-tools/filepath + github.com/opencontainers/runtime-tools/specerror +-github.com/opencontainers/runtime-tools/error ++github.com/opencontainers/runtime-tools/validate + # github.com/opencontainers/selinux v1.3.0 +-github.com/opencontainers/selinux/go-selinux/label + github.com/opencontainers/selinux/go-selinux ++github.com/opencontainers/selinux/go-selinux/label + # github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible + github.com/openshift/api/config/v1 + # github.com/openshift/imagebuilder v1.1.1 + github.com/openshift/imagebuilder +-github.com/openshift/imagebuilder/dockerfile/parser + github.com/openshift/imagebuilder/dockerfile/command ++github.com/openshift/imagebuilder/dockerfile/parser + github.com/openshift/imagebuilder/signal + github.com/openshift/imagebuilder/strslice + # github.com/opentracing/opentracing-go v1.1.0 + github.com/opentracing/opentracing-go +-github.com/opentracing/opentracing-go/log + github.com/opentracing/opentracing-go/ext ++github.com/opentracing/opentracing-go/log + # github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 + github.com/ostreedev/ostree-go/pkg/glibobject + github.com/ostreedev/ostree-go/pkg/otbuiltin +@@ -406,19 +407,19 @@ github.com/pkg/profile + github.com/pmezard/go-difflib/difflib + # github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9 + github.com/pquerna/ffjson/fflib/v1 ++github.com/pquerna/ffjson/fflib/v1/internal + github.com/pquerna/ffjson/inception + github.com/pquerna/ffjson/shared +-github.com/pquerna/ffjson/fflib/v1/internal + # github.com/prometheus/client_golang v1.1.0 + github.com/prometheus/client_golang/prometheus +-github.com/prometheus/client_golang/prometheus/promhttp + github.com/prometheus/client_golang/prometheus/internal ++github.com/prometheus/client_golang/prometheus/promhttp + # github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 + github.com/prometheus/client_model/go + # github.com/prometheus/common v0.6.0 + github.com/prometheus/common/expfmt +-github.com/prometheus/common/model + github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg ++github.com/prometheus/common/model + # github.com/prometheus/procfs v0.0.3 + github.com/prometheus/procfs + github.com/prometheus/procfs/internal/fs +@@ -446,30 +447,30 @@ github.com/tchap/go-patricia/patricia + github.com/uber/jaeger-client-go + github.com/uber/jaeger-client-go/config + github.com/uber/jaeger-client-go/internal/baggage ++github.com/uber/jaeger-client-go/internal/baggage/remote + github.com/uber/jaeger-client-go/internal/spanlog + github.com/uber/jaeger-client-go/internal/throttler ++github.com/uber/jaeger-client-go/internal/throttler/remote + github.com/uber/jaeger-client-go/log ++github.com/uber/jaeger-client-go/rpcmetrics + github.com/uber/jaeger-client-go/thrift ++github.com/uber/jaeger-client-go/thrift-gen/agent ++github.com/uber/jaeger-client-go/thrift-gen/baggage + github.com/uber/jaeger-client-go/thrift-gen/jaeger + github.com/uber/jaeger-client-go/thrift-gen/sampling + github.com/uber/jaeger-client-go/thrift-gen/zipkincore +-github.com/uber/jaeger-client-go/utils +-github.com/uber/jaeger-client-go/internal/baggage/remote +-github.com/uber/jaeger-client-go/internal/throttler/remote +-github.com/uber/jaeger-client-go/rpcmetrics + github.com/uber/jaeger-client-go/transport +-github.com/uber/jaeger-client-go/thrift-gen/agent +-github.com/uber/jaeger-client-go/thrift-gen/baggage ++github.com/uber/jaeger-client-go/utils + # github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 + github.com/uber/jaeger-lib/metrics + # github.com/ulikunitz/xz v0.5.6 + github.com/ulikunitz/xz ++github.com/ulikunitz/xz/internal/hash + github.com/ulikunitz/xz/internal/xlog + github.com/ulikunitz/xz/lzma +-github.com/ulikunitz/xz/internal/hash + # github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b +-github.com/varlink/go/varlink + github.com/varlink/go/cmd/varlink-go-interface-generator ++github.com/varlink/go/varlink + github.com/varlink/go/varlink/idl + # github.com/vbatts/tar-split v0.11.1 + github.com/vbatts/tar-split/archive/tar +@@ -477,8 +478,8 @@ github.com/vbatts/tar-split/tar/asm + github.com/vbatts/tar-split/tar/storage + # github.com/vbauerster/mpb v3.4.0+incompatible + github.com/vbauerster/mpb +-github.com/vbauerster/mpb/decor + github.com/vbauerster/mpb/cwriter ++github.com/vbauerster/mpb/decor + github.com/vbauerster/mpb/internal + # github.com/vishvananda/netlink v1.0.0 + github.com/vishvananda/netlink +@@ -492,32 +493,32 @@ github.com/xeipuuv/gojsonreference + # github.com/xeipuuv/gojsonschema v1.1.0 + github.com/xeipuuv/gojsonschema + # golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad +-golang.org/x/crypto/ssh/terminal ++golang.org/x/crypto/cast5 + golang.org/x/crypto/openpgp + golang.org/x/crypto/openpgp/armor ++golang.org/x/crypto/openpgp/elgamal + golang.org/x/crypto/openpgp/errors + golang.org/x/crypto/openpgp/packet + golang.org/x/crypto/openpgp/s2k +-golang.org/x/crypto/cast5 +-golang.org/x/crypto/openpgp/elgamal ++golang.org/x/crypto/ssh/terminal + # golang.org/x/net v0.0.0-20190628185345-da137c7871d7 + golang.org/x/net/context +-golang.org/x/net/http2 ++golang.org/x/net/context/ctxhttp ++golang.org/x/net/html ++golang.org/x/net/html/atom + golang.org/x/net/html/charset +-golang.org/x/net/proxy + golang.org/x/net/http/httpguts ++golang.org/x/net/http2 + golang.org/x/net/http2/hpack + golang.org/x/net/idna +-golang.org/x/net/html + golang.org/x/net/internal/socks +-golang.org/x/net/html/atom +-golang.org/x/net/context/ctxhttp ++golang.org/x/net/proxy + # golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + golang.org/x/oauth2 + golang.org/x/oauth2/internal + # golang.org/x/sync v0.0.0-20190423024810-112230192c58 +-golang.org/x/sync/semaphore + golang.org/x/sync/errgroup ++golang.org/x/sync/semaphore + # golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 + golang.org/x/sys/unix + golang.org/x/sys/windows +@@ -525,41 +526,41 @@ golang.org/x/sys/windows + golang.org/x/text/encoding + golang.org/x/text/encoding/charmap + golang.org/x/text/encoding/htmlindex +-golang.org/x/text/transform +-golang.org/x/text/secure/bidirule +-golang.org/x/text/unicode/bidi +-golang.org/x/text/unicode/norm +-golang.org/x/text/encoding/internal/identifier + golang.org/x/text/encoding/internal ++golang.org/x/text/encoding/internal/identifier + golang.org/x/text/encoding/japanese + golang.org/x/text/encoding/korean + golang.org/x/text/encoding/simplifiedchinese + golang.org/x/text/encoding/traditionalchinese + golang.org/x/text/encoding/unicode +-golang.org/x/text/language +-golang.org/x/text/internal/utf8internal +-golang.org/x/text/runes + golang.org/x/text/internal/language + golang.org/x/text/internal/language/compact + golang.org/x/text/internal/tag ++golang.org/x/text/internal/utf8internal ++golang.org/x/text/language ++golang.org/x/text/runes ++golang.org/x/text/secure/bidirule ++golang.org/x/text/transform ++golang.org/x/text/unicode/bidi ++golang.org/x/text/unicode/norm + # golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 + golang.org/x/time/rate + # google.golang.org/appengine v1.6.1 +-google.golang.org/appengine/urlfetch + google.golang.org/appengine/internal +-google.golang.org/appengine/internal/urlfetch + google.golang.org/appengine/internal/base + google.golang.org/appengine/internal/datastore + google.golang.org/appengine/internal/log + google.golang.org/appengine/internal/remote_api ++google.golang.org/appengine/internal/urlfetch ++google.golang.org/appengine/urlfetch + # google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601 + google.golang.org/genproto/googleapis/rpc/status + # google.golang.org/grpc v1.24.0 + google.golang.org/grpc/codes +-google.golang.org/grpc/status +-google.golang.org/grpc/internal + google.golang.org/grpc/connectivity + google.golang.org/grpc/grpclog ++google.golang.org/grpc/internal ++google.golang.org/grpc/status + # gopkg.in/fsnotify.v1 v1.4.7 + gopkg.in/fsnotify.v1 + # gopkg.in/inf.v0 v0.9.1 +@@ -571,61 +572,61 @@ gopkg.in/yaml.v2 + # k8s.io/api v0.0.0-20190813020757-36bff7324fb7 + k8s.io/api/core/v1 + # k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010 +-k8s.io/apimachinery/pkg/apis/meta/v1 +-k8s.io/apimachinery/pkg/util/runtime ++k8s.io/apimachinery/pkg/api/errors + k8s.io/apimachinery/pkg/api/resource +-k8s.io/apimachinery/pkg/runtime +-k8s.io/apimachinery/pkg/runtime/schema +-k8s.io/apimachinery/pkg/types +-k8s.io/apimachinery/pkg/util/intstr ++k8s.io/apimachinery/pkg/apis/meta/v1 ++k8s.io/apimachinery/pkg/apis/meta/v1/unstructured + k8s.io/apimachinery/pkg/conversion ++k8s.io/apimachinery/pkg/conversion/queryparams + k8s.io/apimachinery/pkg/fields + k8s.io/apimachinery/pkg/labels ++k8s.io/apimachinery/pkg/runtime ++k8s.io/apimachinery/pkg/runtime/schema ++k8s.io/apimachinery/pkg/runtime/serializer ++k8s.io/apimachinery/pkg/runtime/serializer/json ++k8s.io/apimachinery/pkg/runtime/serializer/protobuf ++k8s.io/apimachinery/pkg/runtime/serializer/recognizer ++k8s.io/apimachinery/pkg/runtime/serializer/streaming ++k8s.io/apimachinery/pkg/runtime/serializer/versioning + k8s.io/apimachinery/pkg/selection +-k8s.io/apimachinery/pkg/watch +-k8s.io/apimachinery/pkg/util/httpstream +-k8s.io/apimachinery/pkg/util/remotecommand +-k8s.io/apimachinery/pkg/conversion/queryparams ++k8s.io/apimachinery/pkg/types ++k8s.io/apimachinery/pkg/util/clock + k8s.io/apimachinery/pkg/util/errors ++k8s.io/apimachinery/pkg/util/framer ++k8s.io/apimachinery/pkg/util/httpstream ++k8s.io/apimachinery/pkg/util/httpstream/spdy ++k8s.io/apimachinery/pkg/util/intstr + k8s.io/apimachinery/pkg/util/json + k8s.io/apimachinery/pkg/util/naming ++k8s.io/apimachinery/pkg/util/net ++k8s.io/apimachinery/pkg/util/remotecommand ++k8s.io/apimachinery/pkg/util/runtime + k8s.io/apimachinery/pkg/util/sets +-k8s.io/apimachinery/third_party/forked/golang/reflect + k8s.io/apimachinery/pkg/util/validation +-k8s.io/apimachinery/pkg/util/net +-k8s.io/apimachinery/pkg/api/errors +-k8s.io/apimachinery/pkg/runtime/serializer/streaming +-k8s.io/apimachinery/pkg/util/httpstream/spdy + k8s.io/apimachinery/pkg/util/validation/field ++k8s.io/apimachinery/pkg/util/yaml + k8s.io/apimachinery/pkg/version +-k8s.io/apimachinery/pkg/runtime/serializer +-k8s.io/apimachinery/pkg/util/clock ++k8s.io/apimachinery/pkg/watch + k8s.io/apimachinery/third_party/forked/golang/netutil +-k8s.io/apimachinery/pkg/runtime/serializer/json +-k8s.io/apimachinery/pkg/runtime/serializer/protobuf +-k8s.io/apimachinery/pkg/runtime/serializer/recognizer +-k8s.io/apimachinery/pkg/runtime/serializer/versioning +-k8s.io/apimachinery/pkg/util/framer +-k8s.io/apimachinery/pkg/util/yaml +-k8s.io/apimachinery/pkg/apis/meta/v1/unstructured ++k8s.io/apimachinery/third_party/forked/golang/reflect + # k8s.io/client-go v0.0.0-20190620085101-78d2af792bab +-k8s.io/client-go/tools/remotecommand +-k8s.io/client-go/rest +-k8s.io/client-go/transport/spdy +-k8s.io/client-go/util/exec +-k8s.io/client-go/util/homedir ++k8s.io/client-go/pkg/apis/clientauthentication ++k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 ++k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 + k8s.io/client-go/pkg/version + k8s.io/client-go/plugin/pkg/client/auth/exec ++k8s.io/client-go/rest + k8s.io/client-go/rest/watch + k8s.io/client-go/tools/clientcmd/api + k8s.io/client-go/tools/metrics ++k8s.io/client-go/tools/remotecommand + k8s.io/client-go/transport ++k8s.io/client-go/transport/spdy + k8s.io/client-go/util/cert +-k8s.io/client-go/util/flowcontrol +-k8s.io/client-go/pkg/apis/clientauthentication +-k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 +-k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 + k8s.io/client-go/util/connrotation ++k8s.io/client-go/util/exec ++k8s.io/client-go/util/flowcontrol ++k8s.io/client-go/util/homedir + k8s.io/client-go/util/keyutil + # k8s.io/klog v0.3.3 + k8s.io/klog + +From 54558fbe4b27a8e8bc81c3c6079ea7e89ac683ee Mon Sep 17 00:00:00 2001 +From: Valentin Rothberg +Date: Wed, 5 Feb 2020 15:19:56 +0100 +Subject: [PATCH 2/3] bump golangci-lint + +Fixes: https://github.com/golangci/golangci-lint/issues/658 +Signed-off-by: Valentin Rothberg +--- + Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/Makefile b/Makefile +index a5265653d1..eb67d2665a 100644 +--- a/Makefile ++++ b/Makefile +@@ -479,7 +479,7 @@ endef + + .install.golangci-lint: .gopathok + if [ ! -x "$(GOBIN)/golangci-lint" ]; then \ +- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOBIN)/ v1.17.1; \ ++ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOBIN)/ v1.18.0; \ + fi + + .install.md2man: .gopathok + +From b8c5d5612b90ebce453a1bbc4757fced4dc731dc Mon Sep 17 00:00:00 2001 +From: Valentin Rothberg +Date: Mon, 10 Feb 2020 09:42:17 +0100 +Subject: [PATCH 3/3] e2e pull test: use k8s pause instead of alpine + +When pulling with --all-tags, use the k8s pause image instead of alpine. +The pause repo has considerably less tags and is hence used in master to +prevent timeouts that we're hitting when running rootless, where we're +suffering additional performance regressions on VFS. + +Signed-off-by: Valentin Rothberg +--- + test/e2e/pull_test.go | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/test/e2e/pull_test.go b/test/e2e/pull_test.go +index 5152409aff..8a59794850 100644 +--- a/test/e2e/pull_test.go ++++ b/test/e2e/pull_test.go +@@ -339,7 +339,7 @@ var _ = Describe("Podman pull", func() { + }) + + It("podman pull check all tags", func() { +- session := podmanTest.PodmanNoCache([]string{"pull", "--all-tags", "alpine"}) ++ session := podmanTest.PodmanNoCache([]string{"pull", "--all-tags", "k8s.gcr.io/pause"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.LineInOuputStartsWith("Pulled Images:")).To(BeTrue()) diff --git a/SOURCES/podman-CVE-2020-1726.patch b/SOURCES/podman-CVE-2020-1726.patch new file mode 100644 index 0000000..b7e9bec --- /dev/null +++ b/SOURCES/podman-CVE-2020-1726.patch @@ -0,0 +1,100 @@ +From c140ecdc9b416ab4efd4d21d14acd63b6adbdd42 Mon Sep 17 00:00:00 2001 +From: Matthew Heon +Date: Mon, 10 Feb 2020 13:37:38 -0500 +Subject: [PATCH] Do not copy up when volume is not empty + +When Docker performs a copy up, it first verifies that the volume +being copied into is empty; thus, for volumes that have been +modified elsewhere (e.g. manually copying into then), the copy up +will not be performed at all. Duplicate this behavior in Podman +by checking if the volume is empty before copying. + +Furthermore, move setting copyup to false further up. This will +prevent a potential race where copy up could happen more than +once if Podman was killed after some files had been copied but +before the DB was updated. + +This resolves CVE-2020-1726. + +Signed-off-by: Matthew Heon +--- + libpod/container_internal.go | 28 ++++++++++++++++++++++------ + test/e2e/run_volume_test.go | 24 ++++++++++++++++++++++++ + 2 files changed, 46 insertions(+), 6 deletions(-) + +diff -up ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/libpod/container_internal.go.CVE-2020-1726 ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/libpod/container_internal.go +--- libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/libpod/container_internal.go.CVE-2020-1726 2020-02-13 22:37:15.002058706 +0100 ++++ libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/libpod/container_internal.go 2020-02-13 22:37:15.005058742 +0100 +@@ -1358,18 +1358,34 @@ func (c *Container) mountNamedVolume(v * + } + if vol.state.NeedsCopyUp { + logrus.Debugf("Copying up contents from container %s to volume %s", c.ID(), vol.Name()) ++ ++ // Set NeedsCopyUp to false immediately, so we don't try this ++ // again when there are already files copied. ++ vol.state.NeedsCopyUp = false ++ if err := vol.save(); err != nil { ++ return nil, err ++ } ++ ++ // If the volume is not empty, we should not copy up. ++ volMount := vol.MountPoint() ++ contents, err := ioutil.ReadDir(volMount) ++ if err != nil { ++ return nil, errors.Wrapf(err, "error listing contents of volume %s mountpoint when copying up from container %s", vol.Name(), c.ID()) ++ } ++ if len(contents) > 0 { ++ // The volume is not empty. It was likely modified ++ // outside of Podman. For safety, let's not copy up into ++ // it. Fixes CVE-2020-1726. ++ return vol, nil ++ } ++ + srcDir, err := securejoin.SecureJoin(mountpoint, v.Dest) + if err != nil { + return nil, errors.Wrapf(err, "error calculating destination path to copy up container %s volume %s", c.ID(), vol.Name()) + } +- if err := c.copyWithTarFromImage(srcDir, vol.MountPoint()); err != nil && !os.IsNotExist(err) { ++ if err := c.copyWithTarFromImage(srcDir, volMount); err != nil && !os.IsNotExist(err) { + return nil, errors.Wrapf(err, "error copying content from container %s into volume %s", c.ID(), vol.Name()) + } +- +- vol.state.NeedsCopyUp = false +- if err := vol.save(); err != nil { +- return nil, err +- } + } + return vol, nil + } +diff -up ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/test/e2e/run_volume_test.go.CVE-2020-1726 ./libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/test/e2e/run_volume_test.go +--- libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/test/e2e/run_volume_test.go.CVE-2020-1726 2020-02-13 22:37:15.030059039 +0100 ++++ libpod-5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26/test/e2e/run_volume_test.go 2020-02-13 22:37:15.033059075 +0100 +@@ -375,4 +375,28 @@ var _ = Describe("Podman run with volume + volMount.WaitWithDefaultTimeout() + Expect(volMount.ExitCode()).To(Not(Equal(0))) + }) ++ ++ It("Podman fix for CVE-2020-1726", func() { ++ volName := "testVol" ++ volCreate := podmanTest.Podman([]string{"volume", "create", volName}) ++ volCreate.WaitWithDefaultTimeout() ++ Expect(volCreate.ExitCode()).To(Equal(0)) ++ ++ volPath := podmanTest.Podman([]string{"volume", "inspect", "--format", "{{.Mountpoint}}", volName}) ++ volPath.WaitWithDefaultTimeout() ++ Expect(volPath.ExitCode()).To(Equal(0)) ++ path := volPath.OutputToString() ++ ++ fileName := "thisIsATestFile" ++ file, err := os.Create(filepath.Join(path, fileName)) ++ Expect(err).To(BeNil()) ++ defer file.Close() ++ ++ runLs := podmanTest.Podman([]string{"run", "-t", "-i", "--rm", "-v", fmt.Sprintf("%v:/etc/ssl", volName), ALPINE, "ls", "-1", "/etc/ssl"}) ++ runLs.WaitWithDefaultTimeout() ++ Expect(runLs.ExitCode()).To(Equal(0)) ++ outputArr := runLs.OutputToStringArray() ++ Expect(len(outputArr)).To(Equal(1)) ++ Expect(strings.Contains(outputArr[0], fileName)).To(BeTrue()) ++ }) + }) diff --git a/SPECS/podman.spec b/SPECS/podman.spec index 4c79c45..3f297ef 100644 --- a/SPECS/podman.spec +++ b/SPECS/podman.spec @@ -1,52 +1,68 @@ %global with_debug 1 %global with_check 0 +%bcond_without varlink %if 0%{?with_debug} %global _find_debuginfo_dwz_opts %{nil} %global _dwz_low_mem_die_limit 0 %else -%global debug_package %{nil} +%global debug_package %{nil} %endif -%define gobuild(o:) go build -buildmode pie -compiler gc -tags="rpm_crashtraceback ${BUILDTAGS:-}" -ldflags "${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '-Wl,-z,relro -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld '" -a -v -x %{?**}; -%define gogenerate(o:) go generate %{?**}; +%if 0%{?rhel} <= 7 && ! 0%{?fedora} +%define gobuild(o:) scl enable go-toolset-1.12 -- go build -buildmode pie -compiler gc -tags="rpm_crashtraceback libtrust_openssl ${BUILDTAGS:-}" -ldflags "${LDFLAGS:-} -compressdwarf=false -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '%__global_ldflags'" -a -v -x %{?**}; +%define gogenerate(o:) scl enable go-toolset-1.12 -- go generate %{?**}; +%else +%define gobuild(o:) go build -buildmode pie -compiler gc -tags="rpm_crashtraceback libtrust_openssl ${BUILDTAGS:-}" -ldflags "${LDFLAGS:-} -compressdwarf=false -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '%__global_ldflags'" -a -v -x %{?**}; +%global gogenerate go generate +%endif %global provider github %global provider_tld com %global project containers %global repo libpod # https://github.com/containers/libpod -%global import_path %{provider}.%{provider_tld}/%{project}/%{repo} -%global git_podman https://%{provider}.%{provider_tld}/%{project}/%{repo} -%global commit b3f10c8be229bcc58c1673b0431285fd5fce1293 -%global shortcommit %(c=%{commit}; echo ${c:0:7}) - -%global import_path_conmon github.com/containers/conmon -%global git_conmon https://%{import_path_conmon} -%global commit_conmon 8455ce1ef385120deb827d0f0588c04357bad4c4 -%global shortcommit_conmon %(c=%{commit_conmon}; echo ${c:0:7}) +%global provider_prefix %{provider}.%{provider_tld}/%{project}/%{repo} +%global import_path %{provider_prefix} +%global git0 https://%{provider}.%{provider_tld}/%{project}/%{repo} +%global commit0 5cc92849f7fc9dd734ca2fd8f3ae8830b9a7eb26 +%global shortcommit0 %(c=%{commit0}; echo ${c:0:7}) Name: podman -Version: 1.4.4 -Release: 4%{?dist} +Version: 1.6.4 +Release: 16%{?dist} Summary: Manage Pods, Containers and Container Images +ExcludeArch: %{ix86} s390 ppc ppc64 License: ASL 2.0 -URL: https://%{name}.io -Source0: %{git_podman}/archive/%{commit}/%{repo}-%{shortcommit}.tar.gz -Source1: %{git_conmon}/archive/%{commit_conmon}/conmon-%{shortcommit_conmon}.tar.gz -Patch0: bz1728242-1.patch -Patch1: bz1728242-2.patch -ExclusiveArch: aarch64 ppc64le s390x x86_64 %{ix86} %{arm} -# If go_compiler is not set to 1, there is no virtual provide. Use golang instead. -%if 0%{?fedora} -BuildRequires: %{?go_compiler:compiler(go-compiler)}%{!?go_compiler:golang} -BuildRequires: make +URL: https://%{name}.io/ +Source0: %{git0}/archive/%{commit0}/%{repo}-%{shortcommit0}.tar.gz +Patch0: podman-1792243.patch +Patch1: https://patch-diff.githubusercontent.com/raw/containers/libpod/pull/5085.patch +Patch2: podman-CVE-2020-1726.patch +# related bug: https://bugzilla.redhat.com/show_bug.cgi?id=1784950 +# backported: https://patch-diff.githubusercontent.com/raw/containers/buildah/pull/2031.patch +Patch3: podman-1784950.patch +# related bug: https://bugzilla.redhat.com/show_bug.cgi?id=1805212 +# backported: https://github.com/containers/libpod/pull/5348/commits/6c97e0d5c140d587e5477d478159e91b8adcfd15.patch +Patch4: podman-1805212.patch +# related bug: https://bugzilla.redhat.com/show_bug.cgi?id=1807379 +# patch: https://github.com/containers/libpod/pull/4818.patch +Patch5: podman-1807379.patch +# related bug: https://bugzilla.redhat.com/show_bug.cgi?id=1775647 +# patch: https://patch-diff.githubusercontent.com/raw/containers/libpod/pull/4493.patch +Patch6: podman-1775647.patch +# related bug: https://bugzilla.redhat.com/show_bug.cgi?id=1807310 +# patch: https://github.com/containers/libpod/pull/5349.patch +Patch7: podman-1807310.patch +# related bug: https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2020-1702 +# patch: https://patch-diff.githubusercontent.com/raw/containers/libpod/pull/5096.patch +Patch8: podman-CVE-2020-1702.patch +Provides: %{name}-manpages = %{version}-%{release} +Obsoletes: %{name}-manpages < %{version}-%{release} +%if 0%{?rhel} <= 7 && ! 0%{?fedora} +BuildRequires: go-toolset-1.12 %else -BuildRequires: go-toolset-1.10 -#BuildRequires: openssl-devel -%endif #fedora -%if 0%{?fedora} || 0%{?rhel} <= 7 -BuildRequires: btrfs-progs-devel +BuildRequires: golang >= 1.12.12-4 %endif BuildRequires: glib2-devel BuildRequires: glibc-devel @@ -58,147 +74,183 @@ BuildRequires: libassuan-devel BuildRequires: libgpg-error-devel BuildRequires: libseccomp-devel BuildRequires: libselinux-devel +BuildRequires: ostree-devel BuildRequires: pkgconfig +BuildRequires: make +BuildRequires: systemd BuildRequires: systemd-devel -Requires: runc -Requires: skopeo-containers >= 0.1.29-3 -# can't use default conmon right now, so we ship our own -#Requires: conmon +Requires: containers-common >= 0.1.29-3 Requires: containernetworking-plugins >= 0.8.1-1 Requires: iptables +Requires: nftables +Requires: libseccomp +Requires: conmon Requires: container-selinux +Requires: slirp4netns >= 0.4.0-1 +Requires: runc >= 1.0.0-57 +Requires: fuse-overlayfs + # vendored libraries # awk '{print "Provides: bundled(golang("$1")) = "$2}' vendor.conf | sort # [thanks to Carl George for containerd.spec] -Provides: bundled(golang(github.com/asaskevich/govalidator)) = v6 Provides: bundled(golang(github.com/Azure/go-ansiterm)) = 19f72df4d05d31cbe1c56bfc8045c96babff6c7e -Provides: bundled(golang(github.com/beorn7/perks)) = 3ac7bf7a47d159a033b107610db8a1b6575507a4 Provides: bundled(golang(github.com/blang/semver)) = v3.5.0 +Provides: bundled(golang(github.com/boltdb/bolt)) = master Provides: bundled(golang(github.com/buger/goterm)) = 2f8dfbc7dbbff5dd1d391ed91482c24df243b2d3 Provides: bundled(golang(github.com/BurntSushi/toml)) = v0.2.0 -Provides: bundled(golang(github.com/containerd/cgroups)) = 7a5fdd8330119dc70d850260db8f3594d89d6943 +Provides: bundled(golang(github.com/containerd/cgroups)) = 58556f5ad8448d99a6f7bea69ea4bdb7747cfeb0 Provides: bundled(golang(github.com/containerd/continuity)) = master -Provides: bundled(golang(github.com/containernetworking/cni)) = v0.4.0 -Provides: bundled(golang(github.com/containers/image)) = b129a8413fd1e8c53379acbbacfc7b667070ae50 -Provides: bundled(golang(github.com/containers/storage)) = 1e5ce40cdb84ab66e26186435b1273e04b879fef +#Provides: bundled(golang(github.com/containernetworking/cni)) = v0.7.0-alpha1 +Provides: bundled(golang(github.com/containernetworking/plugins)) = 1562a1e60ed101aacc5e08ed9dbeba8e9f3d4ec1 +Provides: bundled(golang(github.com/containers/image)) = 85d7559d44fd71f30e46e43d809bfbf88d11d916 +Provides: bundled(golang(github.com/containers/psgo)) = 5dde6da0bc8831b35243a847625bcf18183bd1ee +Provides: bundled(golang(github.com/containers/storage)) = 243c4cd616afdf06b4a975f18c4db083d26b1641 +Provides: bundled(golang(github.com/coreos/go-iptables)) = 25d087f3cffd9aedc0c2b7eff25f23cbf3c20fe1 Provides: bundled(golang(github.com/coreos/go-systemd)) = v14 +Provides: bundled(golang(github.com/cri-o/ocicni)) = master Provides: bundled(golang(github.com/cyphar/filepath-securejoin)) = v0.2.1 Provides: bundled(golang(github.com/davecgh/go-spew)) = v1.1.0 -Provides: bundled(golang(github.com/dgrijalva/jwt-go)) = v3.0.0 Provides: bundled(golang(github.com/docker/distribution)) = 7a8efe719e55bbfaff7bc5718cdf0ed51ca821df -Provides: bundled(golang(github.com/docker/docker)) = ce452fb72ffcdb7605ce98bde9302238f47c63c5 +Provides: bundled(golang(github.com/docker/docker)) = 86f080cff0914e9694068ed78d503701667c4c00 Provides: bundled(golang(github.com/docker/docker-credential-helpers)) = d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 Provides: bundled(golang(github.com/docker/go-connections)) = 3ede32e2033de7505e6500d6c868c2b9ed9f169d Provides: bundled(golang(github.com/docker/go-units)) = v0.3.2 Provides: bundled(golang(github.com/docker/libtrust)) = aabc10ec26b754e797f9028f4589c5b7bd90dc20 Provides: bundled(golang(github.com/docker/spdystream)) = ed496381df8283605c435b86d4fdd6f4f20b8c6e -Provides: bundled(golang(github.com/emicklei/go-restful)) = ff4f55a206334ef123e4f79bbf348980da81ca46 -Provides: bundled(golang(github.com/emicklei/go-restful-swagger12)) = 1.0.1 -Provides: bundled(golang(github.com/exponent-io/jsonpath)) = d6023ce2651d8eafb5c75bb0c7167536102ec9f5 Provides: bundled(golang(github.com/fatih/camelcase)) = f6a740d52f961c60348ebb109adde9f4635d7540 +Provides: bundled(golang(github.com/fsnotify/fsnotify)) = 7d7316ed6e1ed2de075aab8dfc76de5d158d66e1 +Provides: bundled(golang(github.com/fsouza/go-dockerclient)) = master Provides: bundled(golang(github.com/ghodss/yaml)) = 04f313413ffd65ce25f2541bfd2b2ceec5c0908c Provides: bundled(golang(github.com/godbus/dbus)) = a389bdde4dd695d414e47b755e95e72b7826432c -Provides: bundled(golang(github.com/gogo/protobuf)) = v0.3 +Provides: bundled(golang(github.com/gogo/protobuf)) = c0656edd0d9eab7c66d1eb0c568f9039345796f7 Provides: bundled(golang(github.com/golang/glog)) = 23def4e6c14b4da8ac2ed8007337bc5eb5007998 Provides: bundled(golang(github.com/golang/groupcache)) = b710c8433bd175204919eb38776e944233235d03 -Provides: bundled(golang(github.com/golang/protobuf)) = 748d386b5c1ea99658fd69fe9f03991ce86a90c1 +Provides: bundled(golang(github.com/golang/protobuf)) = 4bd1920723d7b7c925de087aa32e2187708897f7 +Provides: bundled(golang(github.com/googleapis/gnostic)) = 0c5108395e2debce0d731cf0287ddf7242066aba Provides: bundled(golang(github.com/google/gofuzz)) = 44d81051d367757e1c7c6a5a86423ece9afcf63c -Provides: bundled(golang(github.com/go-openapi/analysis)) = b44dc874b601d9e4e2f6e19140e794ba24bead3b -Provides: bundled(golang(github.com/go-openapi/errors)) = d24ebc2075bad502fac3a8ae27aa6dd58e1952dc -Provides: bundled(golang(github.com/go-openapi/jsonpointer)) = 779f45308c19820f1a69e9a4cd965f496e0da10f -Provides: bundled(golang(github.com/go-openapi/jsonreference)) = 36d33bfe519efae5632669801b180bf1a245da3b -Provides: bundled(golang(github.com/go-openapi/loads)) = 18441dfa706d924a39a030ee2c3b1d8d81917b38 -Provides: bundled(golang(github.com/go-openapi/spec)) = 6aced65f8501fe1217321abf0749d354824ba2ff -Provides: bundled(golang(github.com/go-openapi/strfmt)) = 93a31ef21ac23f317792fff78f9539219dd74619 -Provides: bundled(golang(github.com/go-openapi/swag)) = 1d0bd113de87027671077d3c71eb3ac5d7dbba72 Provides: bundled(golang(github.com/gorilla/context)) = v1.1 Provides: bundled(golang(github.com/gorilla/mux)) = v1.3.0 Provides: bundled(golang(github.com/hashicorp/errwrap)) = 7554cd9344cec97297fa6649b055a8c98c2a1e55 Provides: bundled(golang(github.com/hashicorp/golang-lru)) = 0a025b7e63adc15a622f29b0b2c4c3848243bbf6 Provides: bundled(golang(github.com/hashicorp/go-multierror)) = 83588e72410abfbe4df460eeb6f30841ae47d4c4 Provides: bundled(golang(github.com/imdario/mergo)) = 0.2.2 -Provides: bundled(golang(github.com/juju/ratelimit)) = 5b9ff866471762aa2ab2dced63c9fb6f53921342 +Provides: bundled(golang(github.com/json-iterator/go)) = 1.0.0 Provides: bundled(golang(github.com/kr/pty)) = v1.0.0 -Provides: bundled(golang(github.com/mailru/easyjson)) = 99e922cf9de1bc0ab38310c277cff32c2147e747 +Provides: bundled(golang(github.com/mailru/easyjson)) = 03f2033d19d5860aef995fe360ac7d395cd8ce65 Provides: bundled(golang(github.com/mattn/go-runewidth)) = v0.0.1 -Provides: bundled(golang(github.com/matttproud/golang_protobuf_extensions)) = fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a Provides: bundled(golang(github.com/Microsoft/go-winio)) = 78439966b38d69bf38227fbf57ac8a6fee70f69a Provides: bundled(golang(github.com/Microsoft/hcsshim)) = 43f9725307998e09f2e3816c2c0c36dc98f0c982 Provides: bundled(golang(github.com/mistifyio/go-zfs)) = v2.1.1 -Provides: bundled(golang(github.com/mitchellh/mapstructure)) = d0303fe809921458f417bcf828397a65db30a7e4 +Provides: bundled(golang(github.com/mrunalp/fileutils)) = master Provides: bundled(golang(github.com/mtrmac/gpgme)) = b2432428689ca58c2b8e8dea9449d3295cf96fc9 -Provides: bundled(golang(github.com/opencontainers/go-digest)) = v1.0.0-rc0 +Provides: bundled(golang(github.com/Nvveen/Gotty)) = master +#Provides: bundled(golang(github.com/opencontainers/go-digest)) = v1.0.0-rc0 Provides: bundled(golang(github.com/opencontainers/image-spec)) = v1.0.0 -Provides: bundled(golang(github.com/opencontainers/runc)) = 6e15bc3f92fd4c58b3285e8f27eaeb6b22d62920 -Provides: bundled(golang(github.com/opencontainers/runtime-spec)) = v1.0.0 -Provides: bundled(golang(github.com/opencontainers/runtime-tools)) = 625e2322645b151a7cbb93a8b42920933e72167f -Provides: bundled(golang(github.com/opencontainers/selinux)) = b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd +Provides: bundled(golang(github.com/opencontainers/runc)) = b4e2ecb452d9ee4381137cc0a7e6715b96bed6de +Provides: bundled(golang(github.com/opencontainers/runtime-spec)) = d810dbc60d8c5aeeb3d054bd1132fab2121968ce +Provides: bundled(golang(github.com/opencontainers/runtime-tools)) = master +Provides: bundled(golang(github.com/opencontainers/selinux)) = b6fa367ed7f534f9ba25391cc2d467085dbb445a +Provides: bundled(golang(github.com/openshift/imagebuilder)) = master Provides: bundled(golang(github.com/ostreedev/ostree-go)) = master Provides: bundled(golang(github.com/pkg/errors)) = v0.8.0 Provides: bundled(golang(github.com/pmezard/go-difflib)) = 792786c7400a136282c1664665ae0a8db921c6c2 Provides: bundled(golang(github.com/pquerna/ffjson)) = d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac -Provides: bundled(golang(github.com/prometheus/client_golang)) = e7e903064f5e9eb5da98208bae10b475d4db0f8c -Provides: bundled(golang(github.com/prometheus/client_model)) = fa8ad6fec33561be4280a8f0514318c79d7f6cb6 -Provides: bundled(golang(github.com/prometheus/common)) = 13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207 -Provides: bundled(golang(github.com/prometheus/procfs)) = 65c1f6f8f0fc1e2185eb9863a3bc751496404259 -Provides: bundled(golang(github.com/PuerkitoBio/purell)) = v1.1.0 -Provides: bundled(golang(github.com/PuerkitoBio/urlesc)) = 5bd2802263f21d8788851d5305584c82a5c75d7e -Provides: bundled(golang(github.com/renstrom/dedent)) = v1.0.0 +Provides: bundled(golang(github.com/projectatomic/buildah)) = af5bbde0180026ae87b7fc81c2dc124aa73ec959 +Provides: bundled(golang(github.com/seccomp/containers-golang)) = master Provides: bundled(golang(github.com/seccomp/libseccomp-golang)) = v0.9.0 Provides: bundled(golang(github.com/sirupsen/logrus)) = v1.0.0 Provides: bundled(golang(github.com/spf13/pflag)) = 9ff6c6923cfffbcd502984b8e0c80539a94968b7 Provides: bundled(golang(github.com/stretchr/testify)) = 4d4bfba8f1d1027c4fdbe371823030df51419987 Provides: bundled(golang(github.com/syndtr/gocapability)) = e7cb7fa329f456b3855136a2642b197bad7366ba Provides: bundled(golang(github.com/tchap/go-patricia)) = v2.2.6 -Provides: bundled(golang(github.com/ugorji/go)) = d23841a297e5489e787e72fceffabf9d2994b52a -Provides: bundled(golang(github.com/urfave/cli)) = 39908eb08fee7c10d842622a114a5c133fb0a3c6 +Provides: bundled(golang(github.com/ulikunitz/xz)) = v0.5.4 +Provides: bundled(golang(github.com/ulule/deepcopier)) = master +Provides: bundled(golang(github.com/urfave/cli)) = 934abfb2f102315b5794e15ebc7949e4ca253920 +Provides: bundled(golang(github.com/varlink/go)) = master Provides: bundled(golang(github.com/vbatts/tar-split)) = v0.10.2 Provides: bundled(golang(github.com/vishvananda/netlink)) = master Provides: bundled(golang(github.com/vishvananda/netns)) = master Provides: bundled(golang(github.com/xeipuuv/gojsonpointer)) = master Provides: bundled(golang(github.com/xeipuuv/gojsonreference)) = master Provides: bundled(golang(github.com/xeipuuv/gojsonschema)) = master -Provides: bundled(golang(golang.org/x/crypto)) = 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 +Provides: bundled(golang(golang.org/x/crypto)) = 81e90905daefcd6fd217b62423c0908922eadb30 Provides: bundled(golang(golang.org/x/net)) = c427ad74c6d7a814201695e9ffde0c5d400a7674 -Provides: bundled(golang(golang.org/x/sys)) = 9aade4d3a3b7e6d876cd3823ad20ec45fc035402 +Provides: bundled(golang(golang.org/x/sys)) = master Provides: bundled(golang(golang.org/x/text)) = f72d8390a633d5dfb0cc84043294db9f6c935756 +Provides: bundled(golang(golang.org/x/time)) = f51c12702a4d776e4c1fa9b0fabab841babae631 Provides: bundled(golang(google.golang.org/grpc)) = v1.0.4 Provides: bundled(golang(gopkg.in/cheggaaa/pb.v1)) = v1.0.7 Provides: bundled(golang(gopkg.in/inf.v0)) = v0.9.0 Provides: bundled(golang(gopkg.in/mgo.v2)) = v2 +Provides: bundled(golang(gopkg.in/square/go-jose.v2)) = v2.1.3 Provides: bundled(golang(gopkg.in/yaml.v2)) = v2 +Provides: bundled(golang(k8s.io/api)) = 5ce4aa0bf2f097f6021127b3d879eeda82026be8 +Provides: bundled(golang(k8s.io/apiextensions-apiserver)) = 1b31e26d82f1ec2e945c560790e98f34bb5f2e63 +Provides: bundled(golang(k8s.io/apimachinery)) = 616b23029fa3dc3e0ccefd47963f5651a6543d94 +Provides: bundled(golang(k8s.io/apiserver)) = 4d1163080139f1f9094baf8a3a6099e85e1867f6 +Provides: bundled(golang(k8s.io/client-go)) = 7cd1d3291b7d9b1e2d54d4b69eb65995eaf8888e +Provides: bundled(golang(k8s.io/kube-openapi)) = 275e2ce91dec4c05a4094a7b1daee5560b555ac9 +Provides: bundled(golang(k8s.io/utils)) = 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e %description +%{name} (Pod Manager) is a fully featured container engine that is a simple daemonless tool. %{name} provides a Docker-CLI comparable command line that eases the transition from other container engines and allows the management of pods, containers and images. Simply put: alias docker=%{name}. Most %{name} commands can be run as a regular user, without requiring additional privileges. + +%{name} uses Buildah(1) internally to create container images. Both tools share image (not container) storage, hence each can use or manipulate images (but not containers) created by the other. + %{summary} -libpod provides a library for applications looking to use -the Container Pod concept popularized by Kubernetes. +%{repo} Simple management tool for pods, containers and images %package docker -Summary: Emulate Docker CLI using podman +Summary: Emulate Docker CLI using %{name} BuildArch: noarch Requires: %{name} = %{version}-%{release} Conflicts: docker Conflicts: docker-latest Conflicts: docker-ce Conflicts: docker-ee -Conflicts: docker-common +Conflicts: moby-engine %description docker This package installs a script named docker that emulates the Docker CLI by -executing %{name} commands, it also creates links between all Docker CLI man +executes %{name} commands, it also creates links between all Docker CLI man pages and %{name}. -# Go Toolset -%{?enable_gotoolset110} +%package remote +Summary: (Experimental) Remote client for managing %{name} containers + +%description remote +Remote client for managing %{name} containers. + +This experimental remote client is under heavy development. Please do not +run %{name}-remote in production. + +%{name}-remote uses the varlink connection to connect to a %{name} client to +manage pods, containers and container images. %{name}-remote supports ssh +connections as well. + +%package tests +Summary: Tests for %{name} +Requires: %{name} = %{version}-%{release} +#Requires: bats (which RHEL8 doesn't have. If it ever does, un-comment this) +Requires: jq + +%description tests +%{summary} + +This package contains system tests for %{name} %prep -%autosetup -Sgit -n %{repo}-%{commit} -mv pkg/hooks/README.md pkg/hooks/README-hooks.md +%autosetup -Sgit -n %{repo}-%{commit0} -# untar conmon -tar zxf %{SOURCE1} +sed -i 's/install.bin: podman/install.bin:/' Makefile +sed -i 's/install.man: docs/install.man:/' Makefile +sed -i 's/install.remote: podman-remote/install.remote:/' Makefile +mv pkg/hooks/README.md pkg/hooks/README-hooks.md %build +export GO111MODULE=off +export GOPATH=$(pwd):$(pwd)/_build + mkdir -p $(pwd)/_build pushd $(pwd)/_build mkdir -p src/%{provider}.%{provider_tld}/%{project} @@ -206,48 +258,59 @@ ln -s ../../../../ src/%{import_path} popd ln -s vendor src -export GOPATH=$(pwd):$(pwd)/_build - -#%%gogenerate ./cmd/%%{name}/varlink/... +rm -rf vendor/github.com/containers/storage/drivers/register/register_btrfs.go +%gogenerate ./cmd/%{name}/varlink/... -export GO111MODULE=off -export BUILDTAGS="systemd selinux seccomp exclude_graphdriver_devicemapper $(hack/btrfs_installed_tag.sh) $(hack/btrfs_tag.sh) $(hack/libdm_tag.sh) containers_image_ostree_stub" +# build %%{name} +export BUILDTAGS="varlink systemd selinux seccomp btrfs_noversion exclude_graphdriver_devicemapper $(hack/libdm_tag.sh)" %gobuild -o bin/%{name} %{import_path}/cmd/%{name} -make docs +# build %%{name}-remote +export BUILDTAGS="remoteclient $BUILDTAGS" +%gobuild -o bin/%{name}-remote %{import_path}/cmd/%{name} -# build conmon -pushd conmon-%{commit_conmon} -%{__make} all -popd +%{__make} docs +./docs/dckrman.sh ./docs/build/man/* %install -%{__make} GOPATH=%{buildroot} PREFIX=%{buildroot}%{_usr} ETCDIR=%{buildroot}%{_sysconfdir} \ - SYSTEMDDIR=%{buildroot}%{_unitdir} MANDIR=%{buildroot}%{_mandir} \ +install -dp %{buildroot}%{_unitdir} +PODMAN_VERSION=%{version} %{__make} PREFIX=%{buildroot}%{_prefix} ETCDIR=%{buildroot}%{_sysconfdir} \ install.bin \ + install.remote \ install.man \ install.cni \ - install.docker \ install.systemd \ install.completions # install libpod.conf install -dp %{buildroot}%{_datadir}/containers -install -p -m 644 %{repo}.conf %{buildroot}%{_datadir}/containers +install -m 644 %{repo}.conf %{buildroot}%{_datadir}/containers -# install conmon -pushd conmon-%{commit_conmon} -%{__make} LIBEXECDIR=%{buildroot}%{_libexecdir} install.%{name} -popd +# install docker-docs +install -dp %{buildroot}%{_mandir}/man1 +install -m 644 docs/build/man/docker*.1 -t %{buildroot}%{_mandir}/man1 + +# install docker symlink +install -m 755 docker %{buildroot}%{_bindir} -rm -rf %{buildroot}/src/github.com +# install test stuff +ln -s ./ ./vendor/src # ./vendor/src -> ./vendor +install -d -p %{buildroot}/%{_datadir}/%{name}/test/system +cp -pav test/system %{buildroot}/%{_datadir}/%{name}/test/ -#https://bugzilla.redhat.com/show_bug.cgi?id=1657303 varlink not in rhel7 -rm -f %{buildroot}/%{_unitdir}/io.%{name}.service %{buildroot}/%{_unitdir}/io.%{name}.socket +# do not include docker and podman-remote man pages in main package +for file in `find %{buildroot}%{_mandir}/man[15] -type f | sed "s,%{buildroot},," | grep -v -e remote -e docker`; do + echo "$file*" >> podman.file-list +done %check %if 0%{?with_check} +# Since we aren't packaging up the vendor directory we need to link +# back to it somehow. Hack it up so that we can add the vendor +# directory from BUILD dir as a gopath to be searched when executing +# tests from the BUILDROOT dir. ln -s ./ ./vendor/src # ./vendor/src -> ./vendor + export GOPATH=%{buildroot}/%{gopath}:$(pwd)/vendor:%{gopath} %if ! 0%{?gotest:1} @@ -267,25 +330,100 @@ exit 0 #define license tag if not already defined %{!?_licensedir:%global license %doc} -%files +%files -f podman.file-list %license LICENSE %doc README.md CONTRIBUTING.md pkg/hooks/README-hooks.md install.md code-of-conduct.md transfer.md %{_bindir}/%{name} %{_datadir}/bash-completion/completions/* -%{_datadir}/zsh/site-functions/_%{name} -%{_mandir}/man1/%{name}*.1* -%{_mandir}/man5/*.5* -%dir %{_libexecdir}/%{name} -%{_libexecdir}/%{name}/conmon +# By "owning" the site-functions dir, we don't need to Require zsh +%{_datadir}/zsh/site-functions +%{_datadir}/zsh/site-functions/* %config(noreplace) %{_sysconfdir}/cni/net.d/87-%{name}-bridge.conflist %{_datadir}/containers/%{repo}.conf +%{_unitdir}/io.%{name}.service +%{_unitdir}/io.%{name}.socket +%{_userunitdir}/io.%{name}.service +%{_userunitdir}/io.%{name}.socket + %{_usr}/lib/tmpfiles.d/%{name}.conf %files docker %{_bindir}/docker %{_mandir}/man1/docker*.1* +%files remote +%{_bindir}/%{name}-remote +%{_mandir}/man1/%{name}-remote*.1* + +%files tests +%license LICENSE +%{_datadir}/%{name}/test + %changelog +* Wed Mar 25 2020 Jindrich Novy - 1.6.4-16 +- use the full PR 5348 to fix "no route to host from inside container" +- Resolves: #1806895 + +* Tue Mar 17 2020 Jindrich Novy - 1.6.4-15 +- update fix for "podman (1.6.4) rhel 8.1 no route to host from inside container" +- Resolves: #1806895 + +* Mon Mar 16 2020 Jindrich Novy - 1.6.4-14 +- fix "CVE-2020-1702 podman: containers/image: Container images read entire image manifest into memory" +- Resolves: #1810614 + +* Sat Feb 29 2020 Jindrich Novy - 1.6.4-13 +- fix "[FJ8.2 Bug]: [REG]The "--group-add" option of "podman create" doesn't function." +- Resolves: #1808702 + +* Wed Feb 26 2020 Jindrich Novy - 1.6.4-12 +- fix "Podman can't reuse a container name, even if the container that was using it is no longer around" +- Resolves: #1807437 + +* Wed Feb 26 2020 Jindrich Novy - 1.6.4-11 +- fix "podman exec does not reads from stdin" +- Resolves: #1807586 + +* Tue Feb 25 2020 Jindrich Novy - 1.6.4-10 +- fix "podman (1.6.4) rhel 8.1 no route to host from inside container" +- Resolves: #1806895 + +* Wed Feb 19 2020 Jindrich Novy - 1.6.4-9 +- fix "Podman support for FIPS Mode requires a bind mount inside the container" +- Resolves: #1804189 + +* Thu Feb 13 2020 Jindrich Novy - 1.6.4-8 +- Fix CVE-2020-1726 +- Resolves: #1801825 + +* Fri Feb 07 2020 Jindrich Novy - 1.6.4-7 +- allow colon be present in tarball name (#1797599) + +* Fri Jan 24 2020 Jindrich Novy - 1.6.4-6 +- resurrect s390x arch as kernel there now has the renameat2 syscall (#1773504) + +* Mon Jan 20 2020 Jindrich Novy - 1.6.4-5 +- Fix thread safety of gpgme (#1792243) + +* Wed Jan 15 2020 Jindrich Novy - 1.6.4-4 +- temporary disable s390x arch due to #1773504 causing fuse-overlayfs + failing to build - podman requires it + +* Tue Jan 14 2020 Jindrich Novy - 1.6.4-3 +- drop libvarlink and hard libseccomp deps: we don't have these in RHEL7.8 + +* Wed Jan 08 2020 Jindrich Novy - 1.6.4-2 +- merge podman-manpages with podman package and put man pages for + podman-remote to its dedicated subpackage +Resolves: #1788549 + +* Thu Dec 12 2019 Jindrich Novy - 1.6.4-1 +- update to 1.6.4 +- split podman and conmon packages + +* Thu Sep 12 2019 Jindrich Novy - 1.4.4-5 +- Fix CVE-2019-10214. + * Wed Aug 14 2019 Lokesh Mandvekar - 1.4.4-4 - Resolves: #1741264 - remove unnecessary dep on atomic-registries