diff --git a/.gitignore b/.gitignore index 600386e..56247fd 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ -SOURCES/contrib-bb44ddd.tar.gz -SOURCES/kubernetes-b9a88a7.tar.gz +SOURCES/contrib-1c4eb2d.tar.gz +SOURCES/kubernetes-4c8e6f4.tar.gz +SOURCES/ose-8632732.tar.gz diff --git a/.kubernetes.metadata b/.kubernetes.metadata index b657940..6aff113 100644 --- a/.kubernetes.metadata +++ b/.kubernetes.metadata @@ -1,2 +1,3 @@ -9ee8a9f63a7efd37cc7b12ef2671f0b880e6c2f8 SOURCES/contrib-bb44ddd.tar.gz -c33e374dbffafd1f1c9d56089ccd5e5f43a6910e SOURCES/kubernetes-b9a88a7.tar.gz +393766b555fb5db21b01eae455c251c2eebc7dc7 SOURCES/contrib-1c4eb2d.tar.gz +fa0f406753d9ce5832c69298f9046a31a9b913ce SOURCES/kubernetes-4c8e6f4.tar.gz +3c483df529968428143a7841c667c436704a1f0d SOURCES/ose-8632732.tar.gz diff --git a/SOURCES/0001-internal-inteernal.patch b/SOURCES/0001-internal-inteernal.patch new file mode 100644 index 0000000..2f290e6 --- /dev/null +++ b/SOURCES/0001-internal-inteernal.patch @@ -0,0 +1,4927 @@ +From 18fceae12a70e5535c7458c0b162b805deb7931a Mon Sep 17 00:00:00 2001 +From: Jan Chaloupka +Date: Wed, 23 Sep 2015 16:11:38 +0200 +Subject: [PATCH] internal->inteernal + +--- + .../gcloud-golang/compute/metadata/metadata.go | 2 +- + .../cloud/compute/metadata/metadata.go | 2 +- + .../src/google.golang.org/cloud/inteernal/cloud.go | 128 ++ + .../cloud/inteernal/datastore/datastore_v1.pb.go | 1633 ++++++++++++++++++++ + .../cloud/inteernal/datastore/datastore_v1.proto | 594 +++++++ + .../cloud/inteernal/testutil/context.go | 57 + + .../src/google.golang.org/cloud/internal/cloud.go | 128 -- + .../cloud/internal/datastore/datastore_v1.pb.go | 1633 -------------------- + .../cloud/internal/datastore/datastore_v1.proto | 594 ------- + .../cloud/internal/testutil/context.go | 57 - + 10 files changed, 2414 insertions(+), 2414 deletions(-) + create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/inteernal/cloud.go + create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.pb.go + create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.proto + create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/inteernal/testutil/context.go + delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go + delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go + delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto + delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go + +diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata/metadata.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata/metadata.go +index b007cde..c92267f 100644 +--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata/metadata.go ++++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata/metadata.go +@@ -30,7 +30,7 @@ import ( + "sync" + "time" + +- "google.golang.org/cloud/internal" ++ "google.golang.org/cloud/inteernal" + ) + + type cachedValue struct { +diff --git a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go +index 7753a05..6102500 100644 +--- a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go ++++ b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go +@@ -29,7 +29,7 @@ import ( + "sync" + "time" + +- "google.golang.org/cloud/internal" ++ "google.golang.org/cloud/inteernal" + ) + + type cachedValue struct { +diff --git a/Godeps/_workspace/src/google.golang.org/cloud/inteernal/cloud.go b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/cloud.go +new file mode 100644 +index 0000000..984323c +--- /dev/null ++++ b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/cloud.go +@@ -0,0 +1,128 @@ ++// Copyright 2014 Google Inc. All Rights Reserved. ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package internal provides support for the cloud packages. ++// ++// Users should not import this package directly. ++package internal ++ ++import ( ++ "fmt" ++ "net/http" ++ "sync" ++ ++ "golang.org/x/net/context" ++) ++ ++type contextKey struct{} ++ ++func WithContext(parent context.Context, projID string, c *http.Client) context.Context { ++ if c == nil { ++ panic("nil *http.Client passed to WithContext") ++ } ++ if projID == "" { ++ panic("empty project ID passed to WithContext") ++ } ++ return context.WithValue(parent, contextKey{}, &cloudContext{ ++ ProjectID: projID, ++ HTTPClient: c, ++ }) ++} ++ ++const userAgent = "gcloud-golang/0.1" ++ ++type cloudContext struct { ++ ProjectID string ++ HTTPClient *http.Client ++ ++ mu sync.Mutex // guards svc ++ svc map[string]interface{} // e.g. "storage" => *rawStorage.Service ++} ++ ++// Service returns the result of the fill function if it's never been ++// called before for the given name (which is assumed to be an API ++// service name, like "datastore"). If it has already been cached, the fill ++// func is not run. ++// It's safe for concurrent use by multiple goroutines. ++func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { ++ return cc(ctx).service(name, fill) ++} ++ ++func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { ++ c.mu.Lock() ++ defer c.mu.Unlock() ++ ++ if c.svc == nil { ++ c.svc = make(map[string]interface{}) ++ } else if v, ok := c.svc[name]; ok { ++ return v ++ } ++ v := fill(c.HTTPClient) ++ c.svc[name] = v ++ return v ++} ++ ++// Transport is an http.RoundTripper that appends ++// Google Cloud client's user-agent to the original ++// request's user-agent header. ++type Transport struct { ++ // Base represents the actual http.RoundTripper ++ // the requests will be delegated to. ++ Base http.RoundTripper ++} ++ ++// RoundTrip appends a user-agent to the existing user-agent ++// header and delegates the request to the base http.RoundTripper. ++func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { ++ req = cloneRequest(req) ++ ua := req.Header.Get("User-Agent") ++ if ua == "" { ++ ua = userAgent ++ } else { ++ ua = fmt.Sprintf("%s;%s", ua, userAgent) ++ } ++ req.Header.Set("User-Agent", ua) ++ return t.Base.RoundTrip(req) ++} ++ ++// cloneRequest returns a clone of the provided *http.Request. ++// The clone is a shallow copy of the struct and its Header map. ++func cloneRequest(r *http.Request) *http.Request { ++ // shallow copy of the struct ++ r2 := new(http.Request) ++ *r2 = *r ++ // deep copy of the Header ++ r2.Header = make(http.Header) ++ for k, s := range r.Header { ++ r2.Header[k] = s ++ } ++ return r2 ++} ++ ++func ProjID(ctx context.Context) string { ++ return cc(ctx).ProjectID ++} ++ ++func HTTPClient(ctx context.Context) *http.Client { ++ return cc(ctx).HTTPClient ++} ++ ++// cc returns the internal *cloudContext (cc) state for a context.Context. ++// It panics if the user did it wrong. ++func cc(ctx context.Context) *cloudContext { ++ if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { ++ return c ++ } ++ panic("invalid context.Context type; it should be created with cloud.NewContext") ++} +diff --git a/Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.pb.go +new file mode 100644 +index 0000000..be903e5 +--- /dev/null ++++ b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.pb.go +@@ -0,0 +1,1633 @@ ++// Code generated by protoc-gen-go. ++// source: datastore_v1.proto ++// DO NOT EDIT! ++ ++/* ++Package pb is a generated protocol buffer package. ++ ++It is generated from these files: ++ datastore_v1.proto ++ ++It has these top-level messages: ++ PartitionId ++ Key ++ Value ++ Property ++ Entity ++ EntityResult ++ Query ++ KindExpression ++ PropertyReference ++ PropertyExpression ++ PropertyOrder ++ Filter ++ CompositeFilter ++ PropertyFilter ++ GqlQuery ++ GqlQueryArg ++ QueryResultBatch ++ Mutation ++ MutationResult ++ ReadOptions ++ LookupRequest ++ LookupResponse ++ RunQueryRequest ++ RunQueryResponse ++ BeginTransactionRequest ++ BeginTransactionResponse ++ RollbackRequest ++ RollbackResponse ++ CommitRequest ++ CommitResponse ++ AllocateIdsRequest ++ AllocateIdsResponse ++*/ ++package pb ++ ++import proto "github.com/golang/protobuf/proto" ++import math "math" ++ ++// Reference imports to suppress errors if they are not otherwise used. ++var _ = proto.Marshal ++var _ = math.Inf ++ ++// Specifies what data the 'entity' field contains. ++// A ResultType is either implied (for example, in LookupResponse.found it ++// is always FULL) or specified by context (for example, in message ++// QueryResultBatch, field 'entity_result_type' specifies a ResultType ++// for all the values in field 'entity_result'). ++type EntityResult_ResultType int32 ++ ++const ( ++ EntityResult_FULL EntityResult_ResultType = 1 ++ EntityResult_PROJECTION EntityResult_ResultType = 2 ++ // The entity may have no key. ++ // A property value may have meaning 18. ++ EntityResult_KEY_ONLY EntityResult_ResultType = 3 ++) ++ ++var EntityResult_ResultType_name = map[int32]string{ ++ 1: "FULL", ++ 2: "PROJECTION", ++ 3: "KEY_ONLY", ++} ++var EntityResult_ResultType_value = map[string]int32{ ++ "FULL": 1, ++ "PROJECTION": 2, ++ "KEY_ONLY": 3, ++} ++ ++func (x EntityResult_ResultType) Enum() *EntityResult_ResultType { ++ p := new(EntityResult_ResultType) ++ *p = x ++ return p ++} ++func (x EntityResult_ResultType) String() string { ++ return proto.EnumName(EntityResult_ResultType_name, int32(x)) ++} ++func (x *EntityResult_ResultType) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(EntityResult_ResultType_value, data, "EntityResult_ResultType") ++ if err != nil { ++ return err ++ } ++ *x = EntityResult_ResultType(value) ++ return nil ++} ++ ++type PropertyExpression_AggregationFunction int32 ++ ++const ( ++ PropertyExpression_FIRST PropertyExpression_AggregationFunction = 1 ++) ++ ++var PropertyExpression_AggregationFunction_name = map[int32]string{ ++ 1: "FIRST", ++} ++var PropertyExpression_AggregationFunction_value = map[string]int32{ ++ "FIRST": 1, ++} ++ ++func (x PropertyExpression_AggregationFunction) Enum() *PropertyExpression_AggregationFunction { ++ p := new(PropertyExpression_AggregationFunction) ++ *p = x ++ return p ++} ++func (x PropertyExpression_AggregationFunction) String() string { ++ return proto.EnumName(PropertyExpression_AggregationFunction_name, int32(x)) ++} ++func (x *PropertyExpression_AggregationFunction) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(PropertyExpression_AggregationFunction_value, data, "PropertyExpression_AggregationFunction") ++ if err != nil { ++ return err ++ } ++ *x = PropertyExpression_AggregationFunction(value) ++ return nil ++} ++ ++type PropertyOrder_Direction int32 ++ ++const ( ++ PropertyOrder_ASCENDING PropertyOrder_Direction = 1 ++ PropertyOrder_DESCENDING PropertyOrder_Direction = 2 ++) ++ ++var PropertyOrder_Direction_name = map[int32]string{ ++ 1: "ASCENDING", ++ 2: "DESCENDING", ++} ++var PropertyOrder_Direction_value = map[string]int32{ ++ "ASCENDING": 1, ++ "DESCENDING": 2, ++} ++ ++func (x PropertyOrder_Direction) Enum() *PropertyOrder_Direction { ++ p := new(PropertyOrder_Direction) ++ *p = x ++ return p ++} ++func (x PropertyOrder_Direction) String() string { ++ return proto.EnumName(PropertyOrder_Direction_name, int32(x)) ++} ++func (x *PropertyOrder_Direction) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(PropertyOrder_Direction_value, data, "PropertyOrder_Direction") ++ if err != nil { ++ return err ++ } ++ *x = PropertyOrder_Direction(value) ++ return nil ++} ++ ++type CompositeFilter_Operator int32 ++ ++const ( ++ CompositeFilter_AND CompositeFilter_Operator = 1 ++) ++ ++var CompositeFilter_Operator_name = map[int32]string{ ++ 1: "AND", ++} ++var CompositeFilter_Operator_value = map[string]int32{ ++ "AND": 1, ++} ++ ++func (x CompositeFilter_Operator) Enum() *CompositeFilter_Operator { ++ p := new(CompositeFilter_Operator) ++ *p = x ++ return p ++} ++func (x CompositeFilter_Operator) String() string { ++ return proto.EnumName(CompositeFilter_Operator_name, int32(x)) ++} ++func (x *CompositeFilter_Operator) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(CompositeFilter_Operator_value, data, "CompositeFilter_Operator") ++ if err != nil { ++ return err ++ } ++ *x = CompositeFilter_Operator(value) ++ return nil ++} ++ ++type PropertyFilter_Operator int32 ++ ++const ( ++ PropertyFilter_LESS_THAN PropertyFilter_Operator = 1 ++ PropertyFilter_LESS_THAN_OR_EQUAL PropertyFilter_Operator = 2 ++ PropertyFilter_GREATER_THAN PropertyFilter_Operator = 3 ++ PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4 ++ PropertyFilter_EQUAL PropertyFilter_Operator = 5 ++ PropertyFilter_HAS_ANCESTOR PropertyFilter_Operator = 11 ++) ++ ++var PropertyFilter_Operator_name = map[int32]string{ ++ 1: "LESS_THAN", ++ 2: "LESS_THAN_OR_EQUAL", ++ 3: "GREATER_THAN", ++ 4: "GREATER_THAN_OR_EQUAL", ++ 5: "EQUAL", ++ 11: "HAS_ANCESTOR", ++} ++var PropertyFilter_Operator_value = map[string]int32{ ++ "LESS_THAN": 1, ++ "LESS_THAN_OR_EQUAL": 2, ++ "GREATER_THAN": 3, ++ "GREATER_THAN_OR_EQUAL": 4, ++ "EQUAL": 5, ++ "HAS_ANCESTOR": 11, ++} ++ ++func (x PropertyFilter_Operator) Enum() *PropertyFilter_Operator { ++ p := new(PropertyFilter_Operator) ++ *p = x ++ return p ++} ++func (x PropertyFilter_Operator) String() string { ++ return proto.EnumName(PropertyFilter_Operator_name, int32(x)) ++} ++func (x *PropertyFilter_Operator) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(PropertyFilter_Operator_value, data, "PropertyFilter_Operator") ++ if err != nil { ++ return err ++ } ++ *x = PropertyFilter_Operator(value) ++ return nil ++} ++ ++// The possible values for the 'more_results' field. ++type QueryResultBatch_MoreResultsType int32 ++ ++const ( ++ QueryResultBatch_NOT_FINISHED QueryResultBatch_MoreResultsType = 1 ++ QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2 ++ // results after the limit. ++ QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3 ++) ++ ++var QueryResultBatch_MoreResultsType_name = map[int32]string{ ++ 1: "NOT_FINISHED", ++ 2: "MORE_RESULTS_AFTER_LIMIT", ++ 3: "NO_MORE_RESULTS", ++} ++var QueryResultBatch_MoreResultsType_value = map[string]int32{ ++ "NOT_FINISHED": 1, ++ "MORE_RESULTS_AFTER_LIMIT": 2, ++ "NO_MORE_RESULTS": 3, ++} ++ ++func (x QueryResultBatch_MoreResultsType) Enum() *QueryResultBatch_MoreResultsType { ++ p := new(QueryResultBatch_MoreResultsType) ++ *p = x ++ return p ++} ++func (x QueryResultBatch_MoreResultsType) String() string { ++ return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x)) ++} ++func (x *QueryResultBatch_MoreResultsType) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(QueryResultBatch_MoreResultsType_value, data, "QueryResultBatch_MoreResultsType") ++ if err != nil { ++ return err ++ } ++ *x = QueryResultBatch_MoreResultsType(value) ++ return nil ++} ++ ++type ReadOptions_ReadConsistency int32 ++ ++const ( ++ ReadOptions_DEFAULT ReadOptions_ReadConsistency = 0 ++ ReadOptions_STRONG ReadOptions_ReadConsistency = 1 ++ ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2 ++) ++ ++var ReadOptions_ReadConsistency_name = map[int32]string{ ++ 0: "DEFAULT", ++ 1: "STRONG", ++ 2: "EVENTUAL", ++} ++var ReadOptions_ReadConsistency_value = map[string]int32{ ++ "DEFAULT": 0, ++ "STRONG": 1, ++ "EVENTUAL": 2, ++} ++ ++func (x ReadOptions_ReadConsistency) Enum() *ReadOptions_ReadConsistency { ++ p := new(ReadOptions_ReadConsistency) ++ *p = x ++ return p ++} ++func (x ReadOptions_ReadConsistency) String() string { ++ return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x)) ++} ++func (x *ReadOptions_ReadConsistency) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(ReadOptions_ReadConsistency_value, data, "ReadOptions_ReadConsistency") ++ if err != nil { ++ return err ++ } ++ *x = ReadOptions_ReadConsistency(value) ++ return nil ++} ++ ++type BeginTransactionRequest_IsolationLevel int32 ++ ++const ( ++ BeginTransactionRequest_SNAPSHOT BeginTransactionRequest_IsolationLevel = 0 ++ // conflict if their mutations conflict. For example: ++ // Read(A),Write(B) may not conflict with Read(B),Write(A), ++ // but Read(B),Write(B) does conflict with Read(B),Write(B). ++ BeginTransactionRequest_SERIALIZABLE BeginTransactionRequest_IsolationLevel = 1 ++) ++ ++var BeginTransactionRequest_IsolationLevel_name = map[int32]string{ ++ 0: "SNAPSHOT", ++ 1: "SERIALIZABLE", ++} ++var BeginTransactionRequest_IsolationLevel_value = map[string]int32{ ++ "SNAPSHOT": 0, ++ "SERIALIZABLE": 1, ++} ++ ++func (x BeginTransactionRequest_IsolationLevel) Enum() *BeginTransactionRequest_IsolationLevel { ++ p := new(BeginTransactionRequest_IsolationLevel) ++ *p = x ++ return p ++} ++func (x BeginTransactionRequest_IsolationLevel) String() string { ++ return proto.EnumName(BeginTransactionRequest_IsolationLevel_name, int32(x)) ++} ++func (x *BeginTransactionRequest_IsolationLevel) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_IsolationLevel_value, data, "BeginTransactionRequest_IsolationLevel") ++ if err != nil { ++ return err ++ } ++ *x = BeginTransactionRequest_IsolationLevel(value) ++ return nil ++} ++ ++type CommitRequest_Mode int32 ++ ++const ( ++ CommitRequest_TRANSACTIONAL CommitRequest_Mode = 1 ++ CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2 ++) ++ ++var CommitRequest_Mode_name = map[int32]string{ ++ 1: "TRANSACTIONAL", ++ 2: "NON_TRANSACTIONAL", ++} ++var CommitRequest_Mode_value = map[string]int32{ ++ "TRANSACTIONAL": 1, ++ "NON_TRANSACTIONAL": 2, ++} ++ ++func (x CommitRequest_Mode) Enum() *CommitRequest_Mode { ++ p := new(CommitRequest_Mode) ++ *p = x ++ return p ++} ++func (x CommitRequest_Mode) String() string { ++ return proto.EnumName(CommitRequest_Mode_name, int32(x)) ++} ++func (x *CommitRequest_Mode) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(CommitRequest_Mode_value, data, "CommitRequest_Mode") ++ if err != nil { ++ return err ++ } ++ *x = CommitRequest_Mode(value) ++ return nil ++} ++ ++// An identifier for a particular subset of entities. ++// ++// Entities are partitioned into various subsets, each used by different ++// datasets and different namespaces within a dataset and so forth. ++// ++// All input partition IDs are normalized before use. ++// A partition ID is normalized as follows: ++// If the partition ID is unset or is set to an empty partition ID, replace it ++// with the context partition ID. ++// Otherwise, if the partition ID has no dataset ID, assign it the context ++// partition ID's dataset ID. ++// Unless otherwise documented, the context partition ID has the dataset ID set ++// to the context dataset ID and no other partition dimension set. ++// ++// A partition ID is empty if all of its fields are unset. ++// ++// Partition dimension: ++// A dimension may be unset. ++// A dimension's value must never be "". ++// A dimension's value must match [A-Za-z\d\.\-_]{1,100} ++// If the value of any dimension matches regex "__.*__", ++// the partition is reserved/read-only. ++// A reserved/read-only partition ID is forbidden in certain documented contexts. ++// ++// Dataset ID: ++// A dataset id's value must never be "". ++// A dataset id's value must match ++// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} ++type PartitionId struct { ++ // The dataset ID. ++ DatasetId *string `protobuf:"bytes,3,opt,name=dataset_id" json:"dataset_id,omitempty"` ++ // The namespace. ++ Namespace *string `protobuf:"bytes,4,opt,name=namespace" json:"namespace,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *PartitionId) Reset() { *m = PartitionId{} } ++func (m *PartitionId) String() string { return proto.CompactTextString(m) } ++func (*PartitionId) ProtoMessage() {} ++ ++func (m *PartitionId) GetDatasetId() string { ++ if m != nil && m.DatasetId != nil { ++ return *m.DatasetId ++ } ++ return "" ++} ++ ++func (m *PartitionId) GetNamespace() string { ++ if m != nil && m.Namespace != nil { ++ return *m.Namespace ++ } ++ return "" ++} ++ ++// A unique identifier for an entity. ++// If a key's partition id or any of its path kinds or names are ++// reserved/read-only, the key is reserved/read-only. ++// A reserved/read-only key is forbidden in certain documented contexts. ++type Key struct { ++ // Entities are partitioned into subsets, currently identified by a dataset ++ // (usually implicitly specified by the project) and namespace ID. ++ // Queries are scoped to a single partition. ++ PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id" json:"partition_id,omitempty"` ++ // The entity path. ++ // An entity path consists of one or more elements composed of a kind and a ++ // string or numerical identifier, which identify entities. The first ++ // element identifies a root entity, the second element identifies ++ // a child of the root entity, the third element a child of the ++ // second entity, and so forth. The entities identified by all prefixes of ++ // the path are called the element's ancestors. ++ // An entity path is always fully complete: ALL of the entity's ancestors ++ // are required to be in the path along with the entity identifier itself. ++ // The only exception is that in some documented cases, the identifier in the ++ // last path element (for the entity) itself may be omitted. A path can never ++ // be empty. ++ PathElement []*Key_PathElement `protobuf:"bytes,2,rep,name=path_element" json:"path_element,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *Key) Reset() { *m = Key{} } ++func (m *Key) String() string { return proto.CompactTextString(m) } ++func (*Key) ProtoMessage() {} ++ ++func (m *Key) GetPartitionId() *PartitionId { ++ if m != nil { ++ return m.PartitionId ++ } ++ return nil ++} ++ ++func (m *Key) GetPathElement() []*Key_PathElement { ++ if m != nil { ++ return m.PathElement ++ } ++ return nil ++} ++ ++// A (kind, ID/name) pair used to construct a key path. ++// ++// At most one of name or ID may be set. ++// If either is set, the element is complete. ++// If neither is set, the element is incomplete. ++type Key_PathElement struct { ++ // The kind of the entity. ++ // A kind matching regex "__.*__" is reserved/read-only. ++ // A kind must not contain more than 500 characters. ++ // Cannot be "". ++ Kind *string `protobuf:"bytes,1,req,name=kind" json:"kind,omitempty"` ++ // The ID of the entity. ++ // Never equal to zero. Values less than zero are discouraged and will not ++ // be supported in the future. ++ Id *int64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` ++ // The name of the entity. ++ // A name matching regex "__.*__" is reserved/read-only. ++ // A name must not be more than 500 characters. ++ // Cannot be "". ++ Name *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } ++func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } ++func (*Key_PathElement) ProtoMessage() {} ++ ++func (m *Key_PathElement) GetKind() string { ++ if m != nil && m.Kind != nil { ++ return *m.Kind ++ } ++ return "" ++} ++ ++func (m *Key_PathElement) GetId() int64 { ++ if m != nil && m.Id != nil { ++ return *m.Id ++ } ++ return 0 ++} ++ ++func (m *Key_PathElement) GetName() string { ++ if m != nil && m.Name != nil { ++ return *m.Name ++ } ++ return "" ++} ++ ++// A message that can hold any of the supported value types and associated ++// metadata. ++// ++// At most one of the Value fields may be set. ++// If none are set the value is "null". ++// ++type Value struct { ++ // A boolean value. ++ BooleanValue *bool `protobuf:"varint,1,opt,name=boolean_value" json:"boolean_value,omitempty"` ++ // An integer value. ++ IntegerValue *int64 `protobuf:"varint,2,opt,name=integer_value" json:"integer_value,omitempty"` ++ // A double value. ++ DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"` ++ // A timestamp value. ++ TimestampMicrosecondsValue *int64 `protobuf:"varint,4,opt,name=timestamp_microseconds_value" json:"timestamp_microseconds_value,omitempty"` ++ // A key value. ++ KeyValue *Key `protobuf:"bytes,5,opt,name=key_value" json:"key_value,omitempty"` ++ // A blob key value. ++ BlobKeyValue *string `protobuf:"bytes,16,opt,name=blob_key_value" json:"blob_key_value,omitempty"` ++ // A UTF-8 encoded string value. ++ StringValue *string `protobuf:"bytes,17,opt,name=string_value" json:"string_value,omitempty"` ++ // A blob value. ++ BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value" json:"blob_value,omitempty"` ++ // An entity value. ++ // May have no key. ++ // May have a key with an incomplete key path. ++ // May have a reserved/read-only key. ++ EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value" json:"entity_value,omitempty"` ++ // A list value. ++ // Cannot contain another list value. ++ // Cannot also have a meaning and indexing set. ++ ListValue []*Value `protobuf:"bytes,7,rep,name=list_value" json:"list_value,omitempty"` ++ // The meaning field is reserved and should not be used. ++ Meaning *int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"` ++ // If the value should be indexed. ++ // ++ // The indexed property may be set for a ++ // null value. ++ // When indexed is true, stringValue ++ // is limited to 500 characters and the blob value is limited to 500 bytes. ++ // Exception: If meaning is set to 2, string_value is limited to 2038 ++ // characters regardless of indexed. ++ // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 ++ // will be ignored on input (and will never be set on output). ++ // Input values by default have indexed set to ++ // true; however, you can explicitly set indexed to ++ // true if you want. (An output value never has ++ // indexed explicitly set to true.) If a value is ++ // itself an entity, it cannot have indexed set to ++ // true. ++ // Exception: An entity value with meaning 9, 20 or 21 may be indexed. ++ Indexed *bool `protobuf:"varint,15,opt,name=indexed,def=1" json:"indexed,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *Value) Reset() { *m = Value{} } ++func (m *Value) String() string { return proto.CompactTextString(m) } ++func (*Value) ProtoMessage() {} ++ ++const Default_Value_Indexed bool = true ++ ++func (m *Value) GetBooleanValue() bool { ++ if m != nil && m.BooleanValue != nil { ++ return *m.BooleanValue ++ } ++ return false ++} ++ ++func (m *Value) GetIntegerValue() int64 { ++ if m != nil && m.IntegerValue != nil { ++ return *m.IntegerValue ++ } ++ return 0 ++} ++ ++func (m *Value) GetDoubleValue() float64 { ++ if m != nil && m.DoubleValue != nil { ++ return *m.DoubleValue ++ } ++ return 0 ++} ++ ++func (m *Value) GetTimestampMicrosecondsValue() int64 { ++ if m != nil && m.TimestampMicrosecondsValue != nil { ++ return *m.TimestampMicrosecondsValue ++ } ++ return 0 ++} ++ ++func (m *Value) GetKeyValue() *Key { ++ if m != nil { ++ return m.KeyValue ++ } ++ return nil ++} ++ ++func (m *Value) GetBlobKeyValue() string { ++ if m != nil && m.BlobKeyValue != nil { ++ return *m.BlobKeyValue ++ } ++ return "" ++} ++ ++func (m *Value) GetStringValue() string { ++ if m != nil && m.StringValue != nil { ++ return *m.StringValue ++ } ++ return "" ++} ++ ++func (m *Value) GetBlobValue() []byte { ++ if m != nil { ++ return m.BlobValue ++ } ++ return nil ++} ++ ++func (m *Value) GetEntityValue() *Entity { ++ if m != nil { ++ return m.EntityValue ++ } ++ return nil ++} ++ ++func (m *Value) GetListValue() []*Value { ++ if m != nil { ++ return m.ListValue ++ } ++ return nil ++} ++ ++func (m *Value) GetMeaning() int32 { ++ if m != nil && m.Meaning != nil { ++ return *m.Meaning ++ } ++ return 0 ++} ++ ++func (m *Value) GetIndexed() bool { ++ if m != nil && m.Indexed != nil { ++ return *m.Indexed ++ } ++ return Default_Value_Indexed ++} ++ ++// An entity property. ++type Property struct { ++ // The name of the property. ++ // A property name matching regex "__.*__" is reserved. ++ // A reserved property name is forbidden in certain documented contexts. ++ // The name must not contain more than 500 characters. ++ // Cannot be "". ++ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` ++ // The value(s) of the property. ++ // Each value can have only one value property populated. For example, ++ // you cannot have a values list of { value: { integerValue: 22, ++ // stringValue: "a" } }, but you can have { value: { listValue: ++ // [ { integerValue: 22 }, { stringValue: "a" } ] }. ++ Value *Value `protobuf:"bytes,4,req,name=value" json:"value,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *Property) Reset() { *m = Property{} } ++func (m *Property) String() string { return proto.CompactTextString(m) } ++func (*Property) ProtoMessage() {} ++ ++func (m *Property) GetName() string { ++ if m != nil && m.Name != nil { ++ return *m.Name ++ } ++ return "" ++} ++ ++func (m *Property) GetValue() *Value { ++ if m != nil { ++ return m.Value ++ } ++ return nil ++} ++ ++// An entity. ++// ++// An entity is limited to 1 megabyte when stored. That roughly ++// corresponds to a limit of 1 megabyte for the serialized form of this ++// message. ++type Entity struct { ++ // The entity's key. ++ // ++ // An entity must have a key, unless otherwise documented (for example, ++ // an entity in Value.entityValue may have no key). ++ // An entity's kind is its key's path's last element's kind, ++ // or null if it has no key. ++ Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` ++ // The entity's properties. ++ // Each property's name must be unique for its entity. ++ Property []*Property `protobuf:"bytes,2,rep,name=property" json:"property,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *Entity) Reset() { *m = Entity{} } ++func (m *Entity) String() string { return proto.CompactTextString(m) } ++func (*Entity) ProtoMessage() {} ++ ++func (m *Entity) GetKey() *Key { ++ if m != nil { ++ return m.Key ++ } ++ return nil ++} ++ ++func (m *Entity) GetProperty() []*Property { ++ if m != nil { ++ return m.Property ++ } ++ return nil ++} ++ ++// The result of fetching an entity from the datastore. ++type EntityResult struct { ++ // The resulting entity. ++ Entity *Entity `protobuf:"bytes,1,req,name=entity" json:"entity,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *EntityResult) Reset() { *m = EntityResult{} } ++func (m *EntityResult) String() string { return proto.CompactTextString(m) } ++func (*EntityResult) ProtoMessage() {} ++ ++func (m *EntityResult) GetEntity() *Entity { ++ if m != nil { ++ return m.Entity ++ } ++ return nil ++} ++ ++// A query. ++type Query struct { ++ // The projection to return. If not set the entire entity is returned. ++ Projection []*PropertyExpression `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"` ++ // The kinds to query (if empty, returns entities from all kinds). ++ Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"` ++ // The filter to apply (optional). ++ Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` ++ // The order to apply to the query results (if empty, order is unspecified). ++ Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"` ++ // The properties to group by (if empty, no grouping is applied to the ++ // result set). ++ GroupBy []*PropertyReference `protobuf:"bytes,6,rep,name=group_by" json:"group_by,omitempty"` ++ // A starting point for the query results. Optional. Query cursors are ++ // returned in query result batches. ++ StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor" json:"start_cursor,omitempty"` ++ // An ending point for the query results. Optional. Query cursors are ++ // returned in query result batches. ++ EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor" json:"end_cursor,omitempty"` ++ // The number of results to skip. Applies before limit, but after all other ++ // constraints (optional, defaults to 0). ++ Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` ++ // The maximum number of results to return. Applies after all other ++ // constraints. Optional. ++ Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *Query) Reset() { *m = Query{} } ++func (m *Query) String() string { return proto.CompactTextString(m) } ++func (*Query) ProtoMessage() {} ++ ++const Default_Query_Offset int32 = 0 ++ ++func (m *Query) GetProjection() []*PropertyExpression { ++ if m != nil { ++ return m.Projection ++ } ++ return nil ++} ++ ++func (m *Query) GetKind() []*KindExpression { ++ if m != nil { ++ return m.Kind ++ } ++ return nil ++} ++ ++func (m *Query) GetFilter() *Filter { ++ if m != nil { ++ return m.Filter ++ } ++ return nil ++} ++ ++func (m *Query) GetOrder() []*PropertyOrder { ++ if m != nil { ++ return m.Order ++ } ++ return nil ++} ++ ++func (m *Query) GetGroupBy() []*PropertyReference { ++ if m != nil { ++ return m.GroupBy ++ } ++ return nil ++} ++ ++func (m *Query) GetStartCursor() []byte { ++ if m != nil { ++ return m.StartCursor ++ } ++ return nil ++} ++ ++func (m *Query) GetEndCursor() []byte { ++ if m != nil { ++ return m.EndCursor ++ } ++ return nil ++} ++ ++func (m *Query) GetOffset() int32 { ++ if m != nil && m.Offset != nil { ++ return *m.Offset ++ } ++ return Default_Query_Offset ++} ++ ++func (m *Query) GetLimit() int32 { ++ if m != nil && m.Limit != nil { ++ return *m.Limit ++ } ++ return 0 ++} ++ ++// A representation of a kind. ++type KindExpression struct { ++ // The name of the kind. ++ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *KindExpression) Reset() { *m = KindExpression{} } ++func (m *KindExpression) String() string { return proto.CompactTextString(m) } ++func (*KindExpression) ProtoMessage() {} ++ ++func (m *KindExpression) GetName() string { ++ if m != nil && m.Name != nil { ++ return *m.Name ++ } ++ return "" ++} ++ ++// A reference to a property relative to the kind expressions. ++// exactly. ++type PropertyReference struct { ++ // The name of the property. ++ Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *PropertyReference) Reset() { *m = PropertyReference{} } ++func (m *PropertyReference) String() string { return proto.CompactTextString(m) } ++func (*PropertyReference) ProtoMessage() {} ++ ++func (m *PropertyReference) GetName() string { ++ if m != nil && m.Name != nil { ++ return *m.Name ++ } ++ return "" ++} ++ ++// A representation of a property in a projection. ++type PropertyExpression struct { ++ // The property to project. ++ Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` ++ // The aggregation function to apply to the property. Optional. ++ // Can only be used when grouping by at least one property. Must ++ // then be set on all properties in the projection that are not ++ // being grouped by. ++ AggregationFunction *PropertyExpression_AggregationFunction `protobuf:"varint,2,opt,name=aggregation_function,enum=pb.PropertyExpression_AggregationFunction" json:"aggregation_function,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *PropertyExpression) Reset() { *m = PropertyExpression{} } ++func (m *PropertyExpression) String() string { return proto.CompactTextString(m) } ++func (*PropertyExpression) ProtoMessage() {} ++ ++func (m *PropertyExpression) GetProperty() *PropertyReference { ++ if m != nil { ++ return m.Property ++ } ++ return nil ++} ++ ++func (m *PropertyExpression) GetAggregationFunction() PropertyExpression_AggregationFunction { ++ if m != nil && m.AggregationFunction != nil { ++ return *m.AggregationFunction ++ } ++ return PropertyExpression_FIRST ++} ++ ++// The desired order for a specific property. ++type PropertyOrder struct { ++ // The property to order by. ++ Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` ++ // The direction to order by. ++ Direction *PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=pb.PropertyOrder_Direction,def=1" json:"direction,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *PropertyOrder) Reset() { *m = PropertyOrder{} } ++func (m *PropertyOrder) String() string { return proto.CompactTextString(m) } ++func (*PropertyOrder) ProtoMessage() {} ++ ++const Default_PropertyOrder_Direction PropertyOrder_Direction = PropertyOrder_ASCENDING ++ ++func (m *PropertyOrder) GetProperty() *PropertyReference { ++ if m != nil { ++ return m.Property ++ } ++ return nil ++} ++ ++func (m *PropertyOrder) GetDirection() PropertyOrder_Direction { ++ if m != nil && m.Direction != nil { ++ return *m.Direction ++ } ++ return Default_PropertyOrder_Direction ++} ++ ++// A holder for any type of filter. Exactly one field should be specified. ++type Filter struct { ++ // A composite filter. ++ CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter" json:"composite_filter,omitempty"` ++ // A filter on a property. ++ PropertyFilter *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter" json:"property_filter,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *Filter) Reset() { *m = Filter{} } ++func (m *Filter) String() string { return proto.CompactTextString(m) } ++func (*Filter) ProtoMessage() {} ++ ++func (m *Filter) GetCompositeFilter() *CompositeFilter { ++ if m != nil { ++ return m.CompositeFilter ++ } ++ return nil ++} ++ ++func (m *Filter) GetPropertyFilter() *PropertyFilter { ++ if m != nil { ++ return m.PropertyFilter ++ } ++ return nil ++} ++ ++// A filter that merges the multiple other filters using the given operation. ++type CompositeFilter struct { ++ // The operator for combining multiple filters. ++ Operator *CompositeFilter_Operator `protobuf:"varint,1,req,name=operator,enum=pb.CompositeFilter_Operator" json:"operator,omitempty"` ++ // The list of filters to combine. ++ // Must contain at least one filter. ++ Filter []*Filter `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *CompositeFilter) Reset() { *m = CompositeFilter{} } ++func (m *CompositeFilter) String() string { return proto.CompactTextString(m) } ++func (*CompositeFilter) ProtoMessage() {} ++ ++func (m *CompositeFilter) GetOperator() CompositeFilter_Operator { ++ if m != nil && m.Operator != nil { ++ return *m.Operator ++ } ++ return CompositeFilter_AND ++} ++ ++func (m *CompositeFilter) GetFilter() []*Filter { ++ if m != nil { ++ return m.Filter ++ } ++ return nil ++} ++ ++// A filter on a specific property. ++type PropertyFilter struct { ++ // The property to filter by. ++ Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` ++ // The operator to filter by. ++ Operator *PropertyFilter_Operator `protobuf:"varint,2,req,name=operator,enum=pb.PropertyFilter_Operator" json:"operator,omitempty"` ++ // The value to compare the property to. ++ Value *Value `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *PropertyFilter) Reset() { *m = PropertyFilter{} } ++func (m *PropertyFilter) String() string { return proto.CompactTextString(m) } ++func (*PropertyFilter) ProtoMessage() {} ++ ++func (m *PropertyFilter) GetProperty() *PropertyReference { ++ if m != nil { ++ return m.Property ++ } ++ return nil ++} ++ ++func (m *PropertyFilter) GetOperator() PropertyFilter_Operator { ++ if m != nil && m.Operator != nil { ++ return *m.Operator ++ } ++ return PropertyFilter_LESS_THAN ++} ++ ++func (m *PropertyFilter) GetValue() *Value { ++ if m != nil { ++ return m.Value ++ } ++ return nil ++} ++ ++// A GQL query. ++type GqlQuery struct { ++ QueryString *string `protobuf:"bytes,1,req,name=query_string" json:"query_string,omitempty"` ++ // When false, the query string must not contain a literal. ++ AllowLiteral *bool `protobuf:"varint,2,opt,name=allow_literal,def=0" json:"allow_literal,omitempty"` ++ // A named argument must set field GqlQueryArg.name. ++ // No two named arguments may have the same name. ++ // For each non-reserved named binding site in the query string, ++ // there must be a named argument with that name, ++ // but not necessarily the inverse. ++ NameArg []*GqlQueryArg `protobuf:"bytes,3,rep,name=name_arg" json:"name_arg,omitempty"` ++ // Numbered binding site @1 references the first numbered argument, ++ // effectively using 1-based indexing, rather than the usual 0. ++ // A numbered argument must NOT set field GqlQueryArg.name. ++ // For each binding site numbered i in query_string, ++ // there must be an ith numbered argument. ++ // The inverse must also be true. ++ NumberArg []*GqlQueryArg `protobuf:"bytes,4,rep,name=number_arg" json:"number_arg,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *GqlQuery) Reset() { *m = GqlQuery{} } ++func (m *GqlQuery) String() string { return proto.CompactTextString(m) } ++func (*GqlQuery) ProtoMessage() {} ++ ++const Default_GqlQuery_AllowLiteral bool = false ++ ++func (m *GqlQuery) GetQueryString() string { ++ if m != nil && m.QueryString != nil { ++ return *m.QueryString ++ } ++ return "" ++} ++ ++func (m *GqlQuery) GetAllowLiteral() bool { ++ if m != nil && m.AllowLiteral != nil { ++ return *m.AllowLiteral ++ } ++ return Default_GqlQuery_AllowLiteral ++} ++ ++func (m *GqlQuery) GetNameArg() []*GqlQueryArg { ++ if m != nil { ++ return m.NameArg ++ } ++ return nil ++} ++ ++func (m *GqlQuery) GetNumberArg() []*GqlQueryArg { ++ if m != nil { ++ return m.NumberArg ++ } ++ return nil ++} ++ ++// A binding argument for a GQL query. ++// Exactly one of fields value and cursor must be set. ++type GqlQueryArg struct { ++ // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". ++ // Must not match regex "__.*__". ++ // Must not be "". ++ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` ++ Value *Value `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` ++ Cursor []byte `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *GqlQueryArg) Reset() { *m = GqlQueryArg{} } ++func (m *GqlQueryArg) String() string { return proto.CompactTextString(m) } ++func (*GqlQueryArg) ProtoMessage() {} ++ ++func (m *GqlQueryArg) GetName() string { ++ if m != nil && m.Name != nil { ++ return *m.Name ++ } ++ return "" ++} ++ ++func (m *GqlQueryArg) GetValue() *Value { ++ if m != nil { ++ return m.Value ++ } ++ return nil ++} ++ ++func (m *GqlQueryArg) GetCursor() []byte { ++ if m != nil { ++ return m.Cursor ++ } ++ return nil ++} ++ ++// A batch of results produced by a query. ++type QueryResultBatch struct { ++ // The result type for every entity in entityResults. ++ EntityResultType *EntityResult_ResultType `protobuf:"varint,1,req,name=entity_result_type,enum=pb.EntityResult_ResultType" json:"entity_result_type,omitempty"` ++ // The results for this batch. ++ EntityResult []*EntityResult `protobuf:"bytes,2,rep,name=entity_result" json:"entity_result,omitempty"` ++ // A cursor that points to the position after the last result in the batch. ++ // May be absent. ++ EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor" json:"end_cursor,omitempty"` ++ // The state of the query after the current batch. ++ MoreResults *QueryResultBatch_MoreResultsType `protobuf:"varint,5,req,name=more_results,enum=pb.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"` ++ // The number of results skipped because of Query.offset. ++ SkippedResults *int32 `protobuf:"varint,6,opt,name=skipped_results" json:"skipped_results,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *QueryResultBatch) Reset() { *m = QueryResultBatch{} } ++func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) } ++func (*QueryResultBatch) ProtoMessage() {} ++ ++func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType { ++ if m != nil && m.EntityResultType != nil { ++ return *m.EntityResultType ++ } ++ return EntityResult_FULL ++} ++ ++func (m *QueryResultBatch) GetEntityResult() []*EntityResult { ++ if m != nil { ++ return m.EntityResult ++ } ++ return nil ++} ++ ++func (m *QueryResultBatch) GetEndCursor() []byte { ++ if m != nil { ++ return m.EndCursor ++ } ++ return nil ++} ++ ++func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType { ++ if m != nil && m.MoreResults != nil { ++ return *m.MoreResults ++ } ++ return QueryResultBatch_NOT_FINISHED ++} ++ ++func (m *QueryResultBatch) GetSkippedResults() int32 { ++ if m != nil && m.SkippedResults != nil { ++ return *m.SkippedResults ++ } ++ return 0 ++} ++ ++// A set of changes to apply. ++// ++// No entity in this message may have a reserved property name, ++// not even a property in an entity in a value. ++// No value in this message may have meaning 18, ++// not even a value in an entity in another value. ++// ++// If entities with duplicate keys are present, an arbitrary choice will ++// be made as to which is written. ++type Mutation struct { ++ // Entities to upsert. ++ // Each upserted entity's key must have a complete path and ++ // must not be reserved/read-only. ++ Upsert []*Entity `protobuf:"bytes,1,rep,name=upsert" json:"upsert,omitempty"` ++ // Entities to update. ++ // Each updated entity's key must have a complete path and ++ // must not be reserved/read-only. ++ Update []*Entity `protobuf:"bytes,2,rep,name=update" json:"update,omitempty"` ++ // Entities to insert. ++ // Each inserted entity's key must have a complete path and ++ // must not be reserved/read-only. ++ Insert []*Entity `protobuf:"bytes,3,rep,name=insert" json:"insert,omitempty"` ++ // Insert entities with a newly allocated ID. ++ // Each inserted entity's key must omit the final identifier in its path and ++ // must not be reserved/read-only. ++ InsertAutoId []*Entity `protobuf:"bytes,4,rep,name=insert_auto_id" json:"insert_auto_id,omitempty"` ++ // Keys of entities to delete. ++ // Each key must have a complete key path and must not be reserved/read-only. ++ Delete []*Key `protobuf:"bytes,5,rep,name=delete" json:"delete,omitempty"` ++ // Ignore a user specified read-only period. Optional. ++ Force *bool `protobuf:"varint,6,opt,name=force" json:"force,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *Mutation) Reset() { *m = Mutation{} } ++func (m *Mutation) String() string { return proto.CompactTextString(m) } ++func (*Mutation) ProtoMessage() {} ++ ++func (m *Mutation) GetUpsert() []*Entity { ++ if m != nil { ++ return m.Upsert ++ } ++ return nil ++} ++ ++func (m *Mutation) GetUpdate() []*Entity { ++ if m != nil { ++ return m.Update ++ } ++ return nil ++} ++ ++func (m *Mutation) GetInsert() []*Entity { ++ if m != nil { ++ return m.Insert ++ } ++ return nil ++} ++ ++func (m *Mutation) GetInsertAutoId() []*Entity { ++ if m != nil { ++ return m.InsertAutoId ++ } ++ return nil ++} ++ ++func (m *Mutation) GetDelete() []*Key { ++ if m != nil { ++ return m.Delete ++ } ++ return nil ++} ++ ++func (m *Mutation) GetForce() bool { ++ if m != nil && m.Force != nil { ++ return *m.Force ++ } ++ return false ++} ++ ++// The result of applying a mutation. ++type MutationResult struct { ++ // Number of index writes. ++ IndexUpdates *int32 `protobuf:"varint,1,req,name=index_updates" json:"index_updates,omitempty"` ++ // Keys for insertAutoId entities. One per entity from the ++ // request, in the same order. ++ InsertAutoIdKey []*Key `protobuf:"bytes,2,rep,name=insert_auto_id_key" json:"insert_auto_id_key,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *MutationResult) Reset() { *m = MutationResult{} } ++func (m *MutationResult) String() string { return proto.CompactTextString(m) } ++func (*MutationResult) ProtoMessage() {} ++ ++func (m *MutationResult) GetIndexUpdates() int32 { ++ if m != nil && m.IndexUpdates != nil { ++ return *m.IndexUpdates ++ } ++ return 0 ++} ++ ++func (m *MutationResult) GetInsertAutoIdKey() []*Key { ++ if m != nil { ++ return m.InsertAutoIdKey ++ } ++ return nil ++} ++ ++// Options shared by read requests. ++type ReadOptions struct { ++ // The read consistency to use. ++ // Cannot be set when transaction is set. ++ // Lookup and ancestor queries default to STRONG, global queries default to ++ // EVENTUAL and cannot be set to STRONG. ++ ReadConsistency *ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,enum=pb.ReadOptions_ReadConsistency,def=0" json:"read_consistency,omitempty"` ++ // The transaction to use. Optional. ++ Transaction []byte `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *ReadOptions) Reset() { *m = ReadOptions{} } ++func (m *ReadOptions) String() string { return proto.CompactTextString(m) } ++func (*ReadOptions) ProtoMessage() {} ++ ++const Default_ReadOptions_ReadConsistency ReadOptions_ReadConsistency = ReadOptions_DEFAULT ++ ++func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency { ++ if m != nil && m.ReadConsistency != nil { ++ return *m.ReadConsistency ++ } ++ return Default_ReadOptions_ReadConsistency ++} ++ ++func (m *ReadOptions) GetTransaction() []byte { ++ if m != nil { ++ return m.Transaction ++ } ++ return nil ++} ++ ++// The request for Lookup. ++type LookupRequest struct { ++ // Options for this lookup request. Optional. ++ ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` ++ // Keys of entities to look up from the datastore. ++ Key []*Key `protobuf:"bytes,3,rep,name=key" json:"key,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *LookupRequest) Reset() { *m = LookupRequest{} } ++func (m *LookupRequest) String() string { return proto.CompactTextString(m) } ++func (*LookupRequest) ProtoMessage() {} ++ ++func (m *LookupRequest) GetReadOptions() *ReadOptions { ++ if m != nil { ++ return m.ReadOptions ++ } ++ return nil ++} ++ ++func (m *LookupRequest) GetKey() []*Key { ++ if m != nil { ++ return m.Key ++ } ++ return nil ++} ++ ++// The response for Lookup. ++type LookupResponse struct { ++ // Entities found as ResultType.FULL entities. ++ Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` ++ // Entities not found as ResultType.KEY_ONLY entities. ++ Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"` ++ // A list of keys that were not looked up due to resource constraints. ++ Deferred []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *LookupResponse) Reset() { *m = LookupResponse{} } ++func (m *LookupResponse) String() string { return proto.CompactTextString(m) } ++func (*LookupResponse) ProtoMessage() {} ++ ++func (m *LookupResponse) GetFound() []*EntityResult { ++ if m != nil { ++ return m.Found ++ } ++ return nil ++} ++ ++func (m *LookupResponse) GetMissing() []*EntityResult { ++ if m != nil { ++ return m.Missing ++ } ++ return nil ++} ++ ++func (m *LookupResponse) GetDeferred() []*Key { ++ if m != nil { ++ return m.Deferred ++ } ++ return nil ++} ++ ++// The request for RunQuery. ++type RunQueryRequest struct { ++ // The options for this query. ++ ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` ++ // Entities are partitioned into subsets, identified by a dataset (usually ++ // implicitly specified by the project) and namespace ID. Queries are scoped ++ // to a single partition. ++ // This partition ID is normalized with the standard default context ++ // partition ID, but all other partition IDs in RunQueryRequest are ++ // normalized with this partition ID as the context partition ID. ++ PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id" json:"partition_id,omitempty"` ++ // The query to run. ++ // Either this field or field gql_query must be set, but not both. ++ Query *Query `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"` ++ // The GQL query to run. ++ // Either this field or field query must be set, but not both. ++ GqlQuery *GqlQuery `protobuf:"bytes,7,opt,name=gql_query" json:"gql_query,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} } ++func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) } ++func (*RunQueryRequest) ProtoMessage() {} ++ ++func (m *RunQueryRequest) GetReadOptions() *ReadOptions { ++ if m != nil { ++ return m.ReadOptions ++ } ++ return nil ++} ++ ++func (m *RunQueryRequest) GetPartitionId() *PartitionId { ++ if m != nil { ++ return m.PartitionId ++ } ++ return nil ++} ++ ++func (m *RunQueryRequest) GetQuery() *Query { ++ if m != nil { ++ return m.Query ++ } ++ return nil ++} ++ ++func (m *RunQueryRequest) GetGqlQuery() *GqlQuery { ++ if m != nil { ++ return m.GqlQuery ++ } ++ return nil ++} ++ ++// The response for RunQuery. ++type RunQueryResponse struct { ++ // A batch of query results (always present). ++ Batch *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} } ++func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) } ++func (*RunQueryResponse) ProtoMessage() {} ++ ++func (m *RunQueryResponse) GetBatch() *QueryResultBatch { ++ if m != nil { ++ return m.Batch ++ } ++ return nil ++} ++ ++// The request for BeginTransaction. ++type BeginTransactionRequest struct { ++ // The transaction isolation level. ++ IsolationLevel *BeginTransactionRequest_IsolationLevel `protobuf:"varint,1,opt,name=isolation_level,enum=pb.BeginTransactionRequest_IsolationLevel,def=0" json:"isolation_level,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } ++func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } ++func (*BeginTransactionRequest) ProtoMessage() {} ++ ++const Default_BeginTransactionRequest_IsolationLevel BeginTransactionRequest_IsolationLevel = BeginTransactionRequest_SNAPSHOT ++ ++func (m *BeginTransactionRequest) GetIsolationLevel() BeginTransactionRequest_IsolationLevel { ++ if m != nil && m.IsolationLevel != nil { ++ return *m.IsolationLevel ++ } ++ return Default_BeginTransactionRequest_IsolationLevel ++} ++ ++// The response for BeginTransaction. ++type BeginTransactionResponse struct { ++ // The transaction identifier (always present). ++ Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } ++func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } ++func (*BeginTransactionResponse) ProtoMessage() {} ++ ++func (m *BeginTransactionResponse) GetTransaction() []byte { ++ if m != nil { ++ return m.Transaction ++ } ++ return nil ++} ++ ++// The request for Rollback. ++type RollbackRequest struct { ++ // The transaction identifier, returned by a call to ++ // beginTransaction. ++ Transaction []byte `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } ++func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } ++func (*RollbackRequest) ProtoMessage() {} ++ ++func (m *RollbackRequest) GetTransaction() []byte { ++ if m != nil { ++ return m.Transaction ++ } ++ return nil ++} ++ ++// The response for Rollback. ++type RollbackResponse struct { ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } ++func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } ++func (*RollbackResponse) ProtoMessage() {} ++ ++// The request for Commit. ++type CommitRequest struct { ++ // The transaction identifier, returned by a call to ++ // beginTransaction. Must be set when mode is TRANSACTIONAL. ++ Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` ++ // The mutation to perform. Optional. ++ Mutation *Mutation `protobuf:"bytes,2,opt,name=mutation" json:"mutation,omitempty"` ++ // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. ++ Mode *CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=pb.CommitRequest_Mode,def=1" json:"mode,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *CommitRequest) Reset() { *m = CommitRequest{} } ++func (m *CommitRequest) String() string { return proto.CompactTextString(m) } ++func (*CommitRequest) ProtoMessage() {} ++ ++const Default_CommitRequest_Mode CommitRequest_Mode = CommitRequest_TRANSACTIONAL ++ ++func (m *CommitRequest) GetTransaction() []byte { ++ if m != nil { ++ return m.Transaction ++ } ++ return nil ++} ++ ++func (m *CommitRequest) GetMutation() *Mutation { ++ if m != nil { ++ return m.Mutation ++ } ++ return nil ++} ++ ++func (m *CommitRequest) GetMode() CommitRequest_Mode { ++ if m != nil && m.Mode != nil { ++ return *m.Mode ++ } ++ return Default_CommitRequest_Mode ++} ++ ++// The response for Commit. ++type CommitResponse struct { ++ // The result of performing the mutation (if any). ++ MutationResult *MutationResult `protobuf:"bytes,1,opt,name=mutation_result" json:"mutation_result,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *CommitResponse) Reset() { *m = CommitResponse{} } ++func (m *CommitResponse) String() string { return proto.CompactTextString(m) } ++func (*CommitResponse) ProtoMessage() {} ++ ++func (m *CommitResponse) GetMutationResult() *MutationResult { ++ if m != nil { ++ return m.MutationResult ++ } ++ return nil ++} ++ ++// The request for AllocateIds. ++type AllocateIdsRequest struct { ++ // A list of keys with incomplete key paths to allocate IDs for. ++ // No key may be reserved/read-only. ++ Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } ++func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } ++func (*AllocateIdsRequest) ProtoMessage() {} ++ ++func (m *AllocateIdsRequest) GetKey() []*Key { ++ if m != nil { ++ return m.Key ++ } ++ return nil ++} ++ ++// The response for AllocateIds. ++type AllocateIdsResponse struct { ++ // The keys specified in the request (in the same order), each with ++ // its key path completed with a newly allocated ID. ++ Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` ++ XXX_unrecognized []byte `json:"-"` ++} ++ ++func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } ++func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } ++func (*AllocateIdsResponse) ProtoMessage() {} ++ ++func (m *AllocateIdsResponse) GetKey() []*Key { ++ if m != nil { ++ return m.Key ++ } ++ return nil ++} ++ ++func init() { ++ proto.RegisterEnum("pb.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value) ++ proto.RegisterEnum("pb.PropertyExpression_AggregationFunction", PropertyExpression_AggregationFunction_name, PropertyExpression_AggregationFunction_value) ++ proto.RegisterEnum("pb.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value) ++ proto.RegisterEnum("pb.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value) ++ proto.RegisterEnum("pb.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value) ++ proto.RegisterEnum("pb.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value) ++ proto.RegisterEnum("pb.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value) ++ proto.RegisterEnum("pb.BeginTransactionRequest_IsolationLevel", BeginTransactionRequest_IsolationLevel_name, BeginTransactionRequest_IsolationLevel_value) ++ proto.RegisterEnum("pb.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value) ++} +diff --git a/Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.proto b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.proto +new file mode 100644 +index 0000000..bb4c199 +--- /dev/null ++++ b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.proto +@@ -0,0 +1,594 @@ ++// Copyright 2013 Google Inc. All Rights Reserved. ++// ++// The datastore v1 service proto definitions ++ ++syntax = "proto2"; ++ ++package pb; ++option java_package = "com.google.api.services.datastore"; ++ ++ ++// An identifier for a particular subset of entities. ++// ++// Entities are partitioned into various subsets, each used by different ++// datasets and different namespaces within a dataset and so forth. ++// ++// All input partition IDs are normalized before use. ++// A partition ID is normalized as follows: ++// If the partition ID is unset or is set to an empty partition ID, replace it ++// with the context partition ID. ++// Otherwise, if the partition ID has no dataset ID, assign it the context ++// partition ID's dataset ID. ++// Unless otherwise documented, the context partition ID has the dataset ID set ++// to the context dataset ID and no other partition dimension set. ++// ++// A partition ID is empty if all of its fields are unset. ++// ++// Partition dimension: ++// A dimension may be unset. ++// A dimension's value must never be "". ++// A dimension's value must match [A-Za-z\d\.\-_]{1,100} ++// If the value of any dimension matches regex "__.*__", ++// the partition is reserved/read-only. ++// A reserved/read-only partition ID is forbidden in certain documented contexts. ++// ++// Dataset ID: ++// A dataset id's value must never be "". ++// A dataset id's value must match ++// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} ++message PartitionId { ++ // The dataset ID. ++ optional string dataset_id = 3; ++ // The namespace. ++ optional string namespace = 4; ++} ++ ++// A unique identifier for an entity. ++// If a key's partition id or any of its path kinds or names are ++// reserved/read-only, the key is reserved/read-only. ++// A reserved/read-only key is forbidden in certain documented contexts. ++message Key { ++ // Entities are partitioned into subsets, currently identified by a dataset ++ // (usually implicitly specified by the project) and namespace ID. ++ // Queries are scoped to a single partition. ++ optional PartitionId partition_id = 1; ++ ++ // A (kind, ID/name) pair used to construct a key path. ++ // ++ // At most one of name or ID may be set. ++ // If either is set, the element is complete. ++ // If neither is set, the element is incomplete. ++ message PathElement { ++ // The kind of the entity. ++ // A kind matching regex "__.*__" is reserved/read-only. ++ // A kind must not contain more than 500 characters. ++ // Cannot be "". ++ required string kind = 1; ++ // The ID of the entity. ++ // Never equal to zero. Values less than zero are discouraged and will not ++ // be supported in the future. ++ optional int64 id = 2; ++ // The name of the entity. ++ // A name matching regex "__.*__" is reserved/read-only. ++ // A name must not be more than 500 characters. ++ // Cannot be "". ++ optional string name = 3; ++ } ++ ++ // The entity path. ++ // An entity path consists of one or more elements composed of a kind and a ++ // string or numerical identifier, which identify entities. The first ++ // element identifies a root entity, the second element identifies ++ // a child of the root entity, the third element a child of the ++ // second entity, and so forth. The entities identified by all prefixes of ++ // the path are called the element's ancestors. ++ // An entity path is always fully complete: ALL of the entity's ancestors ++ // are required to be in the path along with the entity identifier itself. ++ // The only exception is that in some documented cases, the identifier in the ++ // last path element (for the entity) itself may be omitted. A path can never ++ // be empty. ++ repeated PathElement path_element = 2; ++} ++ ++// A message that can hold any of the supported value types and associated ++// metadata. ++// ++// At most one of the Value fields may be set. ++// If none are set the value is "null". ++// ++message Value { ++ // A boolean value. ++ optional bool boolean_value = 1; ++ // An integer value. ++ optional int64 integer_value = 2; ++ // A double value. ++ optional double double_value = 3; ++ // A timestamp value. ++ optional int64 timestamp_microseconds_value = 4; ++ // A key value. ++ optional Key key_value = 5; ++ // A blob key value. ++ optional string blob_key_value = 16; ++ // A UTF-8 encoded string value. ++ optional string string_value = 17; ++ // A blob value. ++ optional bytes blob_value = 18; ++ // An entity value. ++ // May have no key. ++ // May have a key with an incomplete key path. ++ // May have a reserved/read-only key. ++ optional Entity entity_value = 6; ++ // A list value. ++ // Cannot contain another list value. ++ // Cannot also have a meaning and indexing set. ++ repeated Value list_value = 7; ++ ++ // The meaning field is reserved and should not be used. ++ optional int32 meaning = 14; ++ ++ // If the value should be indexed. ++ // ++ // The indexed property may be set for a ++ // null value. ++ // When indexed is true, stringValue ++ // is limited to 500 characters and the blob value is limited to 500 bytes. ++ // Exception: If meaning is set to 2, string_value is limited to 2038 ++ // characters regardless of indexed. ++ // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 ++ // will be ignored on input (and will never be set on output). ++ // Input values by default have indexed set to ++ // true; however, you can explicitly set indexed to ++ // true if you want. (An output value never has ++ // indexed explicitly set to true.) If a value is ++ // itself an entity, it cannot have indexed set to ++ // true. ++ // Exception: An entity value with meaning 9, 20 or 21 may be indexed. ++ optional bool indexed = 15 [default = true]; ++} ++ ++// An entity property. ++message Property { ++ // The name of the property. ++ // A property name matching regex "__.*__" is reserved. ++ // A reserved property name is forbidden in certain documented contexts. ++ // The name must not contain more than 500 characters. ++ // Cannot be "". ++ required string name = 1; ++ ++ // The value(s) of the property. ++ // Each value can have only one value property populated. For example, ++ // you cannot have a values list of { value: { integerValue: 22, ++ // stringValue: "a" } }, but you can have { value: { listValue: ++ // [ { integerValue: 22 }, { stringValue: "a" } ] }. ++ required Value value = 4; ++} ++ ++// An entity. ++// ++// An entity is limited to 1 megabyte when stored. That roughly ++// corresponds to a limit of 1 megabyte for the serialized form of this ++// message. ++message Entity { ++ // The entity's key. ++ // ++ // An entity must have a key, unless otherwise documented (for example, ++ // an entity in Value.entityValue may have no key). ++ // An entity's kind is its key's path's last element's kind, ++ // or null if it has no key. ++ optional Key key = 1; ++ // The entity's properties. ++ // Each property's name must be unique for its entity. ++ repeated Property property = 2; ++} ++ ++// The result of fetching an entity from the datastore. ++message EntityResult { ++ // Specifies what data the 'entity' field contains. ++ // A ResultType is either implied (for example, in LookupResponse.found it ++ // is always FULL) or specified by context (for example, in message ++ // QueryResultBatch, field 'entity_result_type' specifies a ResultType ++ // for all the values in field 'entity_result'). ++ enum ResultType { ++ FULL = 1; // The entire entity. ++ PROJECTION = 2; // A projected subset of properties. ++ // The entity may have no key. ++ // A property value may have meaning 18. ++ KEY_ONLY = 3; // Only the key. ++ } ++ ++ // The resulting entity. ++ required Entity entity = 1; ++} ++ ++// A query. ++message Query { ++ // The projection to return. If not set the entire entity is returned. ++ repeated PropertyExpression projection = 2; ++ ++ // The kinds to query (if empty, returns entities from all kinds). ++ repeated KindExpression kind = 3; ++ ++ // The filter to apply (optional). ++ optional Filter filter = 4; ++ ++ // The order to apply to the query results (if empty, order is unspecified). ++ repeated PropertyOrder order = 5; ++ ++ // The properties to group by (if empty, no grouping is applied to the ++ // result set). ++ repeated PropertyReference group_by = 6; ++ ++ // A starting point for the query results. Optional. Query cursors are ++ // returned in query result batches. ++ optional bytes /* serialized QueryCursor */ start_cursor = 7; ++ ++ // An ending point for the query results. Optional. Query cursors are ++ // returned in query result batches. ++ optional bytes /* serialized QueryCursor */ end_cursor = 8; ++ ++ // The number of results to skip. Applies before limit, but after all other ++ // constraints (optional, defaults to 0). ++ optional int32 offset = 10 [default=0]; ++ ++ // The maximum number of results to return. Applies after all other ++ // constraints. Optional. ++ optional int32 limit = 11; ++} ++ ++// A representation of a kind. ++message KindExpression { ++ // The name of the kind. ++ required string name = 1; ++} ++ ++// A reference to a property relative to the kind expressions. ++// exactly. ++message PropertyReference { ++ // The name of the property. ++ required string name = 2; ++} ++ ++// A representation of a property in a projection. ++message PropertyExpression { ++ enum AggregationFunction { ++ FIRST = 1; ++ } ++ // The property to project. ++ required PropertyReference property = 1; ++ // The aggregation function to apply to the property. Optional. ++ // Can only be used when grouping by at least one property. Must ++ // then be set on all properties in the projection that are not ++ // being grouped by. ++ optional AggregationFunction aggregation_function = 2; ++} ++ ++// The desired order for a specific property. ++message PropertyOrder { ++ enum Direction { ++ ASCENDING = 1; ++ DESCENDING = 2; ++ } ++ // The property to order by. ++ required PropertyReference property = 1; ++ // The direction to order by. ++ optional Direction direction = 2 [default=ASCENDING]; ++} ++ ++// A holder for any type of filter. Exactly one field should be specified. ++message Filter { ++ // A composite filter. ++ optional CompositeFilter composite_filter = 1; ++ // A filter on a property. ++ optional PropertyFilter property_filter = 2; ++} ++ ++// A filter that merges the multiple other filters using the given operation. ++message CompositeFilter { ++ enum Operator { ++ AND = 1; ++ } ++ ++ // The operator for combining multiple filters. ++ required Operator operator = 1; ++ // The list of filters to combine. ++ // Must contain at least one filter. ++ repeated Filter filter = 2; ++} ++ ++// A filter on a specific property. ++message PropertyFilter { ++ enum Operator { ++ LESS_THAN = 1; ++ LESS_THAN_OR_EQUAL = 2; ++ GREATER_THAN = 3; ++ GREATER_THAN_OR_EQUAL = 4; ++ EQUAL = 5; ++ ++ HAS_ANCESTOR = 11; ++ } ++ ++ // The property to filter by. ++ required PropertyReference property = 1; ++ // The operator to filter by. ++ required Operator operator = 2; ++ // The value to compare the property to. ++ required Value value = 3; ++} ++ ++// A GQL query. ++message GqlQuery { ++ required string query_string = 1; ++ // When false, the query string must not contain a literal. ++ optional bool allow_literal = 2 [default = false]; ++ // A named argument must set field GqlQueryArg.name. ++ // No two named arguments may have the same name. ++ // For each non-reserved named binding site in the query string, ++ // there must be a named argument with that name, ++ // but not necessarily the inverse. ++ repeated GqlQueryArg name_arg = 3; ++ // Numbered binding site @1 references the first numbered argument, ++ // effectively using 1-based indexing, rather than the usual 0. ++ // A numbered argument must NOT set field GqlQueryArg.name. ++ // For each binding site numbered i in query_string, ++ // there must be an ith numbered argument. ++ // The inverse must also be true. ++ repeated GqlQueryArg number_arg = 4; ++} ++ ++// A binding argument for a GQL query. ++// Exactly one of fields value and cursor must be set. ++message GqlQueryArg { ++ // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". ++ // Must not match regex "__.*__". ++ // Must not be "". ++ optional string name = 1; ++ optional Value value = 2; ++ optional bytes cursor = 3; ++} ++ ++// A batch of results produced by a query. ++message QueryResultBatch { ++ // The possible values for the 'more_results' field. ++ enum MoreResultsType { ++ NOT_FINISHED = 1; // There are additional batches to fetch from this query. ++ MORE_RESULTS_AFTER_LIMIT = 2; // The query is finished, but there are more ++ // results after the limit. ++ NO_MORE_RESULTS = 3; // The query has been exhausted. ++ } ++ ++ // The result type for every entity in entityResults. ++ required EntityResult.ResultType entity_result_type = 1; ++ // The results for this batch. ++ repeated EntityResult entity_result = 2; ++ ++ // A cursor that points to the position after the last result in the batch. ++ // May be absent. ++ optional bytes /* serialized QueryCursor */ end_cursor = 4; ++ ++ // The state of the query after the current batch. ++ required MoreResultsType more_results = 5; ++ ++ // The number of results skipped because of Query.offset. ++ optional int32 skipped_results = 6; ++} ++ ++// A set of changes to apply. ++// ++// No entity in this message may have a reserved property name, ++// not even a property in an entity in a value. ++// No value in this message may have meaning 18, ++// not even a value in an entity in another value. ++// ++// If entities with duplicate keys are present, an arbitrary choice will ++// be made as to which is written. ++message Mutation { ++ // Entities to upsert. ++ // Each upserted entity's key must have a complete path and ++ // must not be reserved/read-only. ++ repeated Entity upsert = 1; ++ // Entities to update. ++ // Each updated entity's key must have a complete path and ++ // must not be reserved/read-only. ++ repeated Entity update = 2; ++ // Entities to insert. ++ // Each inserted entity's key must have a complete path and ++ // must not be reserved/read-only. ++ repeated Entity insert = 3; ++ // Insert entities with a newly allocated ID. ++ // Each inserted entity's key must omit the final identifier in its path and ++ // must not be reserved/read-only. ++ repeated Entity insert_auto_id = 4; ++ // Keys of entities to delete. ++ // Each key must have a complete key path and must not be reserved/read-only. ++ repeated Key delete = 5; ++ // Ignore a user specified read-only period. Optional. ++ optional bool force = 6; ++} ++ ++// The result of applying a mutation. ++message MutationResult { ++ // Number of index writes. ++ required int32 index_updates = 1; ++ // Keys for insertAutoId entities. One per entity from the ++ // request, in the same order. ++ repeated Key insert_auto_id_key = 2; ++} ++ ++// Options shared by read requests. ++message ReadOptions { ++ enum ReadConsistency { ++ DEFAULT = 0; ++ STRONG = 1; ++ EVENTUAL = 2; ++ } ++ ++ // The read consistency to use. ++ // Cannot be set when transaction is set. ++ // Lookup and ancestor queries default to STRONG, global queries default to ++ // EVENTUAL and cannot be set to STRONG. ++ optional ReadConsistency read_consistency = 1 [default=DEFAULT]; ++ ++ // The transaction to use. Optional. ++ optional bytes /* serialized Transaction */ transaction = 2; ++} ++ ++// The request for Lookup. ++message LookupRequest { ++ ++ // Options for this lookup request. Optional. ++ optional ReadOptions read_options = 1; ++ // Keys of entities to look up from the datastore. ++ repeated Key key = 3; ++} ++ ++// The response for Lookup. ++message LookupResponse { ++ ++ // The order of results in these fields is undefined and has no relation to ++ // the order of the keys in the input. ++ ++ // Entities found as ResultType.FULL entities. ++ repeated EntityResult found = 1; ++ ++ // Entities not found as ResultType.KEY_ONLY entities. ++ repeated EntityResult missing = 2; ++ ++ // A list of keys that were not looked up due to resource constraints. ++ repeated Key deferred = 3; ++} ++ ++ ++// The request for RunQuery. ++message RunQueryRequest { ++ ++ // The options for this query. ++ optional ReadOptions read_options = 1; ++ ++ // Entities are partitioned into subsets, identified by a dataset (usually ++ // implicitly specified by the project) and namespace ID. Queries are scoped ++ // to a single partition. ++ // This partition ID is normalized with the standard default context ++ // partition ID, but all other partition IDs in RunQueryRequest are ++ // normalized with this partition ID as the context partition ID. ++ optional PartitionId partition_id = 2; ++ ++ // The query to run. ++ // Either this field or field gql_query must be set, but not both. ++ optional Query query = 3; ++ // The GQL query to run. ++ // Either this field or field query must be set, but not both. ++ optional GqlQuery gql_query = 7; ++} ++ ++// The response for RunQuery. ++message RunQueryResponse { ++ ++ // A batch of query results (always present). ++ optional QueryResultBatch batch = 1; ++ ++} ++ ++// The request for BeginTransaction. ++message BeginTransactionRequest { ++ ++ enum IsolationLevel { ++ SNAPSHOT = 0; // Read from a consistent snapshot. Concurrent transactions ++ // conflict if their mutations conflict. For example: ++ // Read(A),Write(B) may not conflict with Read(B),Write(A), ++ // but Read(B),Write(B) does conflict with Read(B),Write(B). ++ SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent ++ // transactions conflict if they cannot be serialized. ++ // For example Read(A),Write(B) does conflict with ++ // Read(B),Write(A) but Read(A) may not conflict with ++ // Write(A). ++ } ++ ++ // The transaction isolation level. ++ optional IsolationLevel isolation_level = 1 [default=SNAPSHOT]; ++} ++ ++// The response for BeginTransaction. ++message BeginTransactionResponse { ++ ++ // The transaction identifier (always present). ++ optional bytes /* serialized Transaction */ transaction = 1; ++} ++ ++// The request for Rollback. ++message RollbackRequest { ++ ++ // The transaction identifier, returned by a call to ++ // beginTransaction. ++ required bytes /* serialized Transaction */ transaction = 1; ++} ++ ++// The response for Rollback. ++message RollbackResponse { ++// Empty ++} ++ ++// The request for Commit. ++message CommitRequest { ++ ++ enum Mode { ++ TRANSACTIONAL = 1; ++ NON_TRANSACTIONAL = 2; ++ } ++ ++ // The transaction identifier, returned by a call to ++ // beginTransaction. Must be set when mode is TRANSACTIONAL. ++ optional bytes /* serialized Transaction */ transaction = 1; ++ // The mutation to perform. Optional. ++ optional Mutation mutation = 2; ++ // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. ++ optional Mode mode = 5 [default=TRANSACTIONAL]; ++} ++ ++// The response for Commit. ++message CommitResponse { ++ ++ // The result of performing the mutation (if any). ++ optional MutationResult mutation_result = 1; ++} ++ ++// The request for AllocateIds. ++message AllocateIdsRequest { ++ ++ // A list of keys with incomplete key paths to allocate IDs for. ++ // No key may be reserved/read-only. ++ repeated Key key = 1; ++} ++ ++// The response for AllocateIds. ++message AllocateIdsResponse { ++ ++ // The keys specified in the request (in the same order), each with ++ // its key path completed with a newly allocated ID. ++ repeated Key key = 1; ++} ++ ++// Each rpc normalizes the partition IDs of the keys in its input entities, ++// and always returns entities with keys with normalized partition IDs. ++// (Note that applies to all entities, including entities in values.) ++service DatastoreService { ++ // Look up some entities by key. ++ rpc Lookup(LookupRequest) returns (LookupResponse) { ++ }; ++ // Query for entities. ++ rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) { ++ }; ++ // Begin a new transaction. ++ rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { ++ }; ++ // Commit a transaction, optionally creating, deleting or modifying some ++ // entities. ++ rpc Commit(CommitRequest) returns (CommitResponse) { ++ }; ++ // Roll back a transaction. ++ rpc Rollback(RollbackRequest) returns (RollbackResponse) { ++ }; ++ // Allocate IDs for incomplete keys (useful for referencing an entity before ++ // it is inserted). ++ rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) { ++ }; ++} +diff --git a/Godeps/_workspace/src/google.golang.org/cloud/inteernal/testutil/context.go b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/testutil/context.go +new file mode 100644 +index 0000000..aafd683 +--- /dev/null ++++ b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/testutil/context.go +@@ -0,0 +1,57 @@ ++// Copyright 2014 Google Inc. All Rights Reserved. ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package testutil contains helper functions for writing tests. ++package testutil ++ ++import ( ++ "io/ioutil" ++ "log" ++ "net/http" ++ "os" ++ ++ "golang.org/x/net/context" ++ "golang.org/x/oauth2" ++ "golang.org/x/oauth2/google" ++ "google.golang.org/cloud" ++) ++ ++const ( ++ envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" ++ envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" ++) ++ ++func Context(scopes ...string) context.Context { ++ key, projID := os.Getenv(envPrivateKey), os.Getenv(envProjID) ++ if key == "" || projID == "" { ++ log.Fatal("GCLOUD_TESTS_GOLANG_KEY and GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") ++ } ++ jsonKey, err := ioutil.ReadFile(key) ++ if err != nil { ++ log.Fatalf("Cannot read the JSON key file, err: %v", err) ++ } ++ conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) ++ if err != nil { ++ log.Fatal(err) ++ } ++ return cloud.NewContext(projID, conf.Client(oauth2.NoContext)) ++} ++ ++func NoAuthContext() context.Context { ++ projID := os.Getenv(envProjID) ++ if projID == "" { ++ log.Fatal("GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") ++ } ++ return cloud.NewContext(projID, &http.Client{Transport: http.DefaultTransport}) ++} +diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go +deleted file mode 100644 +index 984323c..0000000 +--- a/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go ++++ /dev/null +@@ -1,128 +0,0 @@ +-// Copyright 2014 Google Inc. All Rights Reserved. +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-// Package internal provides support for the cloud packages. +-// +-// Users should not import this package directly. +-package internal +- +-import ( +- "fmt" +- "net/http" +- "sync" +- +- "golang.org/x/net/context" +-) +- +-type contextKey struct{} +- +-func WithContext(parent context.Context, projID string, c *http.Client) context.Context { +- if c == nil { +- panic("nil *http.Client passed to WithContext") +- } +- if projID == "" { +- panic("empty project ID passed to WithContext") +- } +- return context.WithValue(parent, contextKey{}, &cloudContext{ +- ProjectID: projID, +- HTTPClient: c, +- }) +-} +- +-const userAgent = "gcloud-golang/0.1" +- +-type cloudContext struct { +- ProjectID string +- HTTPClient *http.Client +- +- mu sync.Mutex // guards svc +- svc map[string]interface{} // e.g. "storage" => *rawStorage.Service +-} +- +-// Service returns the result of the fill function if it's never been +-// called before for the given name (which is assumed to be an API +-// service name, like "datastore"). If it has already been cached, the fill +-// func is not run. +-// It's safe for concurrent use by multiple goroutines. +-func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { +- return cc(ctx).service(name, fill) +-} +- +-func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { +- c.mu.Lock() +- defer c.mu.Unlock() +- +- if c.svc == nil { +- c.svc = make(map[string]interface{}) +- } else if v, ok := c.svc[name]; ok { +- return v +- } +- v := fill(c.HTTPClient) +- c.svc[name] = v +- return v +-} +- +-// Transport is an http.RoundTripper that appends +-// Google Cloud client's user-agent to the original +-// request's user-agent header. +-type Transport struct { +- // Base represents the actual http.RoundTripper +- // the requests will be delegated to. +- Base http.RoundTripper +-} +- +-// RoundTrip appends a user-agent to the existing user-agent +-// header and delegates the request to the base http.RoundTripper. +-func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { +- req = cloneRequest(req) +- ua := req.Header.Get("User-Agent") +- if ua == "" { +- ua = userAgent +- } else { +- ua = fmt.Sprintf("%s;%s", ua, userAgent) +- } +- req.Header.Set("User-Agent", ua) +- return t.Base.RoundTrip(req) +-} +- +-// cloneRequest returns a clone of the provided *http.Request. +-// The clone is a shallow copy of the struct and its Header map. +-func cloneRequest(r *http.Request) *http.Request { +- // shallow copy of the struct +- r2 := new(http.Request) +- *r2 = *r +- // deep copy of the Header +- r2.Header = make(http.Header) +- for k, s := range r.Header { +- r2.Header[k] = s +- } +- return r2 +-} +- +-func ProjID(ctx context.Context) string { +- return cc(ctx).ProjectID +-} +- +-func HTTPClient(ctx context.Context) *http.Client { +- return cc(ctx).HTTPClient +-} +- +-// cc returns the internal *cloudContext (cc) state for a context.Context. +-// It panics if the user did it wrong. +-func cc(ctx context.Context) *cloudContext { +- if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { +- return c +- } +- panic("invalid context.Context type; it should be created with cloud.NewContext") +-} +diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go +deleted file mode 100644 +index be903e5..0000000 +--- a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go ++++ /dev/null +@@ -1,1633 +0,0 @@ +-// Code generated by protoc-gen-go. +-// source: datastore_v1.proto +-// DO NOT EDIT! +- +-/* +-Package pb is a generated protocol buffer package. +- +-It is generated from these files: +- datastore_v1.proto +- +-It has these top-level messages: +- PartitionId +- Key +- Value +- Property +- Entity +- EntityResult +- Query +- KindExpression +- PropertyReference +- PropertyExpression +- PropertyOrder +- Filter +- CompositeFilter +- PropertyFilter +- GqlQuery +- GqlQueryArg +- QueryResultBatch +- Mutation +- MutationResult +- ReadOptions +- LookupRequest +- LookupResponse +- RunQueryRequest +- RunQueryResponse +- BeginTransactionRequest +- BeginTransactionResponse +- RollbackRequest +- RollbackResponse +- CommitRequest +- CommitResponse +- AllocateIdsRequest +- AllocateIdsResponse +-*/ +-package pb +- +-import proto "github.com/golang/protobuf/proto" +-import math "math" +- +-// Reference imports to suppress errors if they are not otherwise used. +-var _ = proto.Marshal +-var _ = math.Inf +- +-// Specifies what data the 'entity' field contains. +-// A ResultType is either implied (for example, in LookupResponse.found it +-// is always FULL) or specified by context (for example, in message +-// QueryResultBatch, field 'entity_result_type' specifies a ResultType +-// for all the values in field 'entity_result'). +-type EntityResult_ResultType int32 +- +-const ( +- EntityResult_FULL EntityResult_ResultType = 1 +- EntityResult_PROJECTION EntityResult_ResultType = 2 +- // The entity may have no key. +- // A property value may have meaning 18. +- EntityResult_KEY_ONLY EntityResult_ResultType = 3 +-) +- +-var EntityResult_ResultType_name = map[int32]string{ +- 1: "FULL", +- 2: "PROJECTION", +- 3: "KEY_ONLY", +-} +-var EntityResult_ResultType_value = map[string]int32{ +- "FULL": 1, +- "PROJECTION": 2, +- "KEY_ONLY": 3, +-} +- +-func (x EntityResult_ResultType) Enum() *EntityResult_ResultType { +- p := new(EntityResult_ResultType) +- *p = x +- return p +-} +-func (x EntityResult_ResultType) String() string { +- return proto.EnumName(EntityResult_ResultType_name, int32(x)) +-} +-func (x *EntityResult_ResultType) UnmarshalJSON(data []byte) error { +- value, err := proto.UnmarshalJSONEnum(EntityResult_ResultType_value, data, "EntityResult_ResultType") +- if err != nil { +- return err +- } +- *x = EntityResult_ResultType(value) +- return nil +-} +- +-type PropertyExpression_AggregationFunction int32 +- +-const ( +- PropertyExpression_FIRST PropertyExpression_AggregationFunction = 1 +-) +- +-var PropertyExpression_AggregationFunction_name = map[int32]string{ +- 1: "FIRST", +-} +-var PropertyExpression_AggregationFunction_value = map[string]int32{ +- "FIRST": 1, +-} +- +-func (x PropertyExpression_AggregationFunction) Enum() *PropertyExpression_AggregationFunction { +- p := new(PropertyExpression_AggregationFunction) +- *p = x +- return p +-} +-func (x PropertyExpression_AggregationFunction) String() string { +- return proto.EnumName(PropertyExpression_AggregationFunction_name, int32(x)) +-} +-func (x *PropertyExpression_AggregationFunction) UnmarshalJSON(data []byte) error { +- value, err := proto.UnmarshalJSONEnum(PropertyExpression_AggregationFunction_value, data, "PropertyExpression_AggregationFunction") +- if err != nil { +- return err +- } +- *x = PropertyExpression_AggregationFunction(value) +- return nil +-} +- +-type PropertyOrder_Direction int32 +- +-const ( +- PropertyOrder_ASCENDING PropertyOrder_Direction = 1 +- PropertyOrder_DESCENDING PropertyOrder_Direction = 2 +-) +- +-var PropertyOrder_Direction_name = map[int32]string{ +- 1: "ASCENDING", +- 2: "DESCENDING", +-} +-var PropertyOrder_Direction_value = map[string]int32{ +- "ASCENDING": 1, +- "DESCENDING": 2, +-} +- +-func (x PropertyOrder_Direction) Enum() *PropertyOrder_Direction { +- p := new(PropertyOrder_Direction) +- *p = x +- return p +-} +-func (x PropertyOrder_Direction) String() string { +- return proto.EnumName(PropertyOrder_Direction_name, int32(x)) +-} +-func (x *PropertyOrder_Direction) UnmarshalJSON(data []byte) error { +- value, err := proto.UnmarshalJSONEnum(PropertyOrder_Direction_value, data, "PropertyOrder_Direction") +- if err != nil { +- return err +- } +- *x = PropertyOrder_Direction(value) +- return nil +-} +- +-type CompositeFilter_Operator int32 +- +-const ( +- CompositeFilter_AND CompositeFilter_Operator = 1 +-) +- +-var CompositeFilter_Operator_name = map[int32]string{ +- 1: "AND", +-} +-var CompositeFilter_Operator_value = map[string]int32{ +- "AND": 1, +-} +- +-func (x CompositeFilter_Operator) Enum() *CompositeFilter_Operator { +- p := new(CompositeFilter_Operator) +- *p = x +- return p +-} +-func (x CompositeFilter_Operator) String() string { +- return proto.EnumName(CompositeFilter_Operator_name, int32(x)) +-} +-func (x *CompositeFilter_Operator) UnmarshalJSON(data []byte) error { +- value, err := proto.UnmarshalJSONEnum(CompositeFilter_Operator_value, data, "CompositeFilter_Operator") +- if err != nil { +- return err +- } +- *x = CompositeFilter_Operator(value) +- return nil +-} +- +-type PropertyFilter_Operator int32 +- +-const ( +- PropertyFilter_LESS_THAN PropertyFilter_Operator = 1 +- PropertyFilter_LESS_THAN_OR_EQUAL PropertyFilter_Operator = 2 +- PropertyFilter_GREATER_THAN PropertyFilter_Operator = 3 +- PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4 +- PropertyFilter_EQUAL PropertyFilter_Operator = 5 +- PropertyFilter_HAS_ANCESTOR PropertyFilter_Operator = 11 +-) +- +-var PropertyFilter_Operator_name = map[int32]string{ +- 1: "LESS_THAN", +- 2: "LESS_THAN_OR_EQUAL", +- 3: "GREATER_THAN", +- 4: "GREATER_THAN_OR_EQUAL", +- 5: "EQUAL", +- 11: "HAS_ANCESTOR", +-} +-var PropertyFilter_Operator_value = map[string]int32{ +- "LESS_THAN": 1, +- "LESS_THAN_OR_EQUAL": 2, +- "GREATER_THAN": 3, +- "GREATER_THAN_OR_EQUAL": 4, +- "EQUAL": 5, +- "HAS_ANCESTOR": 11, +-} +- +-func (x PropertyFilter_Operator) Enum() *PropertyFilter_Operator { +- p := new(PropertyFilter_Operator) +- *p = x +- return p +-} +-func (x PropertyFilter_Operator) String() string { +- return proto.EnumName(PropertyFilter_Operator_name, int32(x)) +-} +-func (x *PropertyFilter_Operator) UnmarshalJSON(data []byte) error { +- value, err := proto.UnmarshalJSONEnum(PropertyFilter_Operator_value, data, "PropertyFilter_Operator") +- if err != nil { +- return err +- } +- *x = PropertyFilter_Operator(value) +- return nil +-} +- +-// The possible values for the 'more_results' field. +-type QueryResultBatch_MoreResultsType int32 +- +-const ( +- QueryResultBatch_NOT_FINISHED QueryResultBatch_MoreResultsType = 1 +- QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2 +- // results after the limit. +- QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3 +-) +- +-var QueryResultBatch_MoreResultsType_name = map[int32]string{ +- 1: "NOT_FINISHED", +- 2: "MORE_RESULTS_AFTER_LIMIT", +- 3: "NO_MORE_RESULTS", +-} +-var QueryResultBatch_MoreResultsType_value = map[string]int32{ +- "NOT_FINISHED": 1, +- "MORE_RESULTS_AFTER_LIMIT": 2, +- "NO_MORE_RESULTS": 3, +-} +- +-func (x QueryResultBatch_MoreResultsType) Enum() *QueryResultBatch_MoreResultsType { +- p := new(QueryResultBatch_MoreResultsType) +- *p = x +- return p +-} +-func (x QueryResultBatch_MoreResultsType) String() string { +- return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x)) +-} +-func (x *QueryResultBatch_MoreResultsType) UnmarshalJSON(data []byte) error { +- value, err := proto.UnmarshalJSONEnum(QueryResultBatch_MoreResultsType_value, data, "QueryResultBatch_MoreResultsType") +- if err != nil { +- return err +- } +- *x = QueryResultBatch_MoreResultsType(value) +- return nil +-} +- +-type ReadOptions_ReadConsistency int32 +- +-const ( +- ReadOptions_DEFAULT ReadOptions_ReadConsistency = 0 +- ReadOptions_STRONG ReadOptions_ReadConsistency = 1 +- ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2 +-) +- +-var ReadOptions_ReadConsistency_name = map[int32]string{ +- 0: "DEFAULT", +- 1: "STRONG", +- 2: "EVENTUAL", +-} +-var ReadOptions_ReadConsistency_value = map[string]int32{ +- "DEFAULT": 0, +- "STRONG": 1, +- "EVENTUAL": 2, +-} +- +-func (x ReadOptions_ReadConsistency) Enum() *ReadOptions_ReadConsistency { +- p := new(ReadOptions_ReadConsistency) +- *p = x +- return p +-} +-func (x ReadOptions_ReadConsistency) String() string { +- return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x)) +-} +-func (x *ReadOptions_ReadConsistency) UnmarshalJSON(data []byte) error { +- value, err := proto.UnmarshalJSONEnum(ReadOptions_ReadConsistency_value, data, "ReadOptions_ReadConsistency") +- if err != nil { +- return err +- } +- *x = ReadOptions_ReadConsistency(value) +- return nil +-} +- +-type BeginTransactionRequest_IsolationLevel int32 +- +-const ( +- BeginTransactionRequest_SNAPSHOT BeginTransactionRequest_IsolationLevel = 0 +- // conflict if their mutations conflict. For example: +- // Read(A),Write(B) may not conflict with Read(B),Write(A), +- // but Read(B),Write(B) does conflict with Read(B),Write(B). +- BeginTransactionRequest_SERIALIZABLE BeginTransactionRequest_IsolationLevel = 1 +-) +- +-var BeginTransactionRequest_IsolationLevel_name = map[int32]string{ +- 0: "SNAPSHOT", +- 1: "SERIALIZABLE", +-} +-var BeginTransactionRequest_IsolationLevel_value = map[string]int32{ +- "SNAPSHOT": 0, +- "SERIALIZABLE": 1, +-} +- +-func (x BeginTransactionRequest_IsolationLevel) Enum() *BeginTransactionRequest_IsolationLevel { +- p := new(BeginTransactionRequest_IsolationLevel) +- *p = x +- return p +-} +-func (x BeginTransactionRequest_IsolationLevel) String() string { +- return proto.EnumName(BeginTransactionRequest_IsolationLevel_name, int32(x)) +-} +-func (x *BeginTransactionRequest_IsolationLevel) UnmarshalJSON(data []byte) error { +- value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_IsolationLevel_value, data, "BeginTransactionRequest_IsolationLevel") +- if err != nil { +- return err +- } +- *x = BeginTransactionRequest_IsolationLevel(value) +- return nil +-} +- +-type CommitRequest_Mode int32 +- +-const ( +- CommitRequest_TRANSACTIONAL CommitRequest_Mode = 1 +- CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2 +-) +- +-var CommitRequest_Mode_name = map[int32]string{ +- 1: "TRANSACTIONAL", +- 2: "NON_TRANSACTIONAL", +-} +-var CommitRequest_Mode_value = map[string]int32{ +- "TRANSACTIONAL": 1, +- "NON_TRANSACTIONAL": 2, +-} +- +-func (x CommitRequest_Mode) Enum() *CommitRequest_Mode { +- p := new(CommitRequest_Mode) +- *p = x +- return p +-} +-func (x CommitRequest_Mode) String() string { +- return proto.EnumName(CommitRequest_Mode_name, int32(x)) +-} +-func (x *CommitRequest_Mode) UnmarshalJSON(data []byte) error { +- value, err := proto.UnmarshalJSONEnum(CommitRequest_Mode_value, data, "CommitRequest_Mode") +- if err != nil { +- return err +- } +- *x = CommitRequest_Mode(value) +- return nil +-} +- +-// An identifier for a particular subset of entities. +-// +-// Entities are partitioned into various subsets, each used by different +-// datasets and different namespaces within a dataset and so forth. +-// +-// All input partition IDs are normalized before use. +-// A partition ID is normalized as follows: +-// If the partition ID is unset or is set to an empty partition ID, replace it +-// with the context partition ID. +-// Otherwise, if the partition ID has no dataset ID, assign it the context +-// partition ID's dataset ID. +-// Unless otherwise documented, the context partition ID has the dataset ID set +-// to the context dataset ID and no other partition dimension set. +-// +-// A partition ID is empty if all of its fields are unset. +-// +-// Partition dimension: +-// A dimension may be unset. +-// A dimension's value must never be "". +-// A dimension's value must match [A-Za-z\d\.\-_]{1,100} +-// If the value of any dimension matches regex "__.*__", +-// the partition is reserved/read-only. +-// A reserved/read-only partition ID is forbidden in certain documented contexts. +-// +-// Dataset ID: +-// A dataset id's value must never be "". +-// A dataset id's value must match +-// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} +-type PartitionId struct { +- // The dataset ID. +- DatasetId *string `protobuf:"bytes,3,opt,name=dataset_id" json:"dataset_id,omitempty"` +- // The namespace. +- Namespace *string `protobuf:"bytes,4,opt,name=namespace" json:"namespace,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *PartitionId) Reset() { *m = PartitionId{} } +-func (m *PartitionId) String() string { return proto.CompactTextString(m) } +-func (*PartitionId) ProtoMessage() {} +- +-func (m *PartitionId) GetDatasetId() string { +- if m != nil && m.DatasetId != nil { +- return *m.DatasetId +- } +- return "" +-} +- +-func (m *PartitionId) GetNamespace() string { +- if m != nil && m.Namespace != nil { +- return *m.Namespace +- } +- return "" +-} +- +-// A unique identifier for an entity. +-// If a key's partition id or any of its path kinds or names are +-// reserved/read-only, the key is reserved/read-only. +-// A reserved/read-only key is forbidden in certain documented contexts. +-type Key struct { +- // Entities are partitioned into subsets, currently identified by a dataset +- // (usually implicitly specified by the project) and namespace ID. +- // Queries are scoped to a single partition. +- PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id" json:"partition_id,omitempty"` +- // The entity path. +- // An entity path consists of one or more elements composed of a kind and a +- // string or numerical identifier, which identify entities. The first +- // element identifies a root entity, the second element identifies +- // a child of the root entity, the third element a child of the +- // second entity, and so forth. The entities identified by all prefixes of +- // the path are called the element's ancestors. +- // An entity path is always fully complete: ALL of the entity's ancestors +- // are required to be in the path along with the entity identifier itself. +- // The only exception is that in some documented cases, the identifier in the +- // last path element (for the entity) itself may be omitted. A path can never +- // be empty. +- PathElement []*Key_PathElement `protobuf:"bytes,2,rep,name=path_element" json:"path_element,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *Key) Reset() { *m = Key{} } +-func (m *Key) String() string { return proto.CompactTextString(m) } +-func (*Key) ProtoMessage() {} +- +-func (m *Key) GetPartitionId() *PartitionId { +- if m != nil { +- return m.PartitionId +- } +- return nil +-} +- +-func (m *Key) GetPathElement() []*Key_PathElement { +- if m != nil { +- return m.PathElement +- } +- return nil +-} +- +-// A (kind, ID/name) pair used to construct a key path. +-// +-// At most one of name or ID may be set. +-// If either is set, the element is complete. +-// If neither is set, the element is incomplete. +-type Key_PathElement struct { +- // The kind of the entity. +- // A kind matching regex "__.*__" is reserved/read-only. +- // A kind must not contain more than 500 characters. +- // Cannot be "". +- Kind *string `protobuf:"bytes,1,req,name=kind" json:"kind,omitempty"` +- // The ID of the entity. +- // Never equal to zero. Values less than zero are discouraged and will not +- // be supported in the future. +- Id *int64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` +- // The name of the entity. +- // A name matching regex "__.*__" is reserved/read-only. +- // A name must not be more than 500 characters. +- // Cannot be "". +- Name *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } +-func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } +-func (*Key_PathElement) ProtoMessage() {} +- +-func (m *Key_PathElement) GetKind() string { +- if m != nil && m.Kind != nil { +- return *m.Kind +- } +- return "" +-} +- +-func (m *Key_PathElement) GetId() int64 { +- if m != nil && m.Id != nil { +- return *m.Id +- } +- return 0 +-} +- +-func (m *Key_PathElement) GetName() string { +- if m != nil && m.Name != nil { +- return *m.Name +- } +- return "" +-} +- +-// A message that can hold any of the supported value types and associated +-// metadata. +-// +-// At most one of the Value fields may be set. +-// If none are set the value is "null". +-// +-type Value struct { +- // A boolean value. +- BooleanValue *bool `protobuf:"varint,1,opt,name=boolean_value" json:"boolean_value,omitempty"` +- // An integer value. +- IntegerValue *int64 `protobuf:"varint,2,opt,name=integer_value" json:"integer_value,omitempty"` +- // A double value. +- DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"` +- // A timestamp value. +- TimestampMicrosecondsValue *int64 `protobuf:"varint,4,opt,name=timestamp_microseconds_value" json:"timestamp_microseconds_value,omitempty"` +- // A key value. +- KeyValue *Key `protobuf:"bytes,5,opt,name=key_value" json:"key_value,omitempty"` +- // A blob key value. +- BlobKeyValue *string `protobuf:"bytes,16,opt,name=blob_key_value" json:"blob_key_value,omitempty"` +- // A UTF-8 encoded string value. +- StringValue *string `protobuf:"bytes,17,opt,name=string_value" json:"string_value,omitempty"` +- // A blob value. +- BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value" json:"blob_value,omitempty"` +- // An entity value. +- // May have no key. +- // May have a key with an incomplete key path. +- // May have a reserved/read-only key. +- EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value" json:"entity_value,omitempty"` +- // A list value. +- // Cannot contain another list value. +- // Cannot also have a meaning and indexing set. +- ListValue []*Value `protobuf:"bytes,7,rep,name=list_value" json:"list_value,omitempty"` +- // The meaning field is reserved and should not be used. +- Meaning *int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"` +- // If the value should be indexed. +- // +- // The indexed property may be set for a +- // null value. +- // When indexed is true, stringValue +- // is limited to 500 characters and the blob value is limited to 500 bytes. +- // Exception: If meaning is set to 2, string_value is limited to 2038 +- // characters regardless of indexed. +- // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 +- // will be ignored on input (and will never be set on output). +- // Input values by default have indexed set to +- // true; however, you can explicitly set indexed to +- // true if you want. (An output value never has +- // indexed explicitly set to true.) If a value is +- // itself an entity, it cannot have indexed set to +- // true. +- // Exception: An entity value with meaning 9, 20 or 21 may be indexed. +- Indexed *bool `protobuf:"varint,15,opt,name=indexed,def=1" json:"indexed,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *Value) Reset() { *m = Value{} } +-func (m *Value) String() string { return proto.CompactTextString(m) } +-func (*Value) ProtoMessage() {} +- +-const Default_Value_Indexed bool = true +- +-func (m *Value) GetBooleanValue() bool { +- if m != nil && m.BooleanValue != nil { +- return *m.BooleanValue +- } +- return false +-} +- +-func (m *Value) GetIntegerValue() int64 { +- if m != nil && m.IntegerValue != nil { +- return *m.IntegerValue +- } +- return 0 +-} +- +-func (m *Value) GetDoubleValue() float64 { +- if m != nil && m.DoubleValue != nil { +- return *m.DoubleValue +- } +- return 0 +-} +- +-func (m *Value) GetTimestampMicrosecondsValue() int64 { +- if m != nil && m.TimestampMicrosecondsValue != nil { +- return *m.TimestampMicrosecondsValue +- } +- return 0 +-} +- +-func (m *Value) GetKeyValue() *Key { +- if m != nil { +- return m.KeyValue +- } +- return nil +-} +- +-func (m *Value) GetBlobKeyValue() string { +- if m != nil && m.BlobKeyValue != nil { +- return *m.BlobKeyValue +- } +- return "" +-} +- +-func (m *Value) GetStringValue() string { +- if m != nil && m.StringValue != nil { +- return *m.StringValue +- } +- return "" +-} +- +-func (m *Value) GetBlobValue() []byte { +- if m != nil { +- return m.BlobValue +- } +- return nil +-} +- +-func (m *Value) GetEntityValue() *Entity { +- if m != nil { +- return m.EntityValue +- } +- return nil +-} +- +-func (m *Value) GetListValue() []*Value { +- if m != nil { +- return m.ListValue +- } +- return nil +-} +- +-func (m *Value) GetMeaning() int32 { +- if m != nil && m.Meaning != nil { +- return *m.Meaning +- } +- return 0 +-} +- +-func (m *Value) GetIndexed() bool { +- if m != nil && m.Indexed != nil { +- return *m.Indexed +- } +- return Default_Value_Indexed +-} +- +-// An entity property. +-type Property struct { +- // The name of the property. +- // A property name matching regex "__.*__" is reserved. +- // A reserved property name is forbidden in certain documented contexts. +- // The name must not contain more than 500 characters. +- // Cannot be "". +- Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` +- // The value(s) of the property. +- // Each value can have only one value property populated. For example, +- // you cannot have a values list of { value: { integerValue: 22, +- // stringValue: "a" } }, but you can have { value: { listValue: +- // [ { integerValue: 22 }, { stringValue: "a" } ] }. +- Value *Value `protobuf:"bytes,4,req,name=value" json:"value,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *Property) Reset() { *m = Property{} } +-func (m *Property) String() string { return proto.CompactTextString(m) } +-func (*Property) ProtoMessage() {} +- +-func (m *Property) GetName() string { +- if m != nil && m.Name != nil { +- return *m.Name +- } +- return "" +-} +- +-func (m *Property) GetValue() *Value { +- if m != nil { +- return m.Value +- } +- return nil +-} +- +-// An entity. +-// +-// An entity is limited to 1 megabyte when stored. That roughly +-// corresponds to a limit of 1 megabyte for the serialized form of this +-// message. +-type Entity struct { +- // The entity's key. +- // +- // An entity must have a key, unless otherwise documented (for example, +- // an entity in Value.entityValue may have no key). +- // An entity's kind is its key's path's last element's kind, +- // or null if it has no key. +- Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` +- // The entity's properties. +- // Each property's name must be unique for its entity. +- Property []*Property `protobuf:"bytes,2,rep,name=property" json:"property,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *Entity) Reset() { *m = Entity{} } +-func (m *Entity) String() string { return proto.CompactTextString(m) } +-func (*Entity) ProtoMessage() {} +- +-func (m *Entity) GetKey() *Key { +- if m != nil { +- return m.Key +- } +- return nil +-} +- +-func (m *Entity) GetProperty() []*Property { +- if m != nil { +- return m.Property +- } +- return nil +-} +- +-// The result of fetching an entity from the datastore. +-type EntityResult struct { +- // The resulting entity. +- Entity *Entity `protobuf:"bytes,1,req,name=entity" json:"entity,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *EntityResult) Reset() { *m = EntityResult{} } +-func (m *EntityResult) String() string { return proto.CompactTextString(m) } +-func (*EntityResult) ProtoMessage() {} +- +-func (m *EntityResult) GetEntity() *Entity { +- if m != nil { +- return m.Entity +- } +- return nil +-} +- +-// A query. +-type Query struct { +- // The projection to return. If not set the entire entity is returned. +- Projection []*PropertyExpression `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"` +- // The kinds to query (if empty, returns entities from all kinds). +- Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"` +- // The filter to apply (optional). +- Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` +- // The order to apply to the query results (if empty, order is unspecified). +- Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"` +- // The properties to group by (if empty, no grouping is applied to the +- // result set). +- GroupBy []*PropertyReference `protobuf:"bytes,6,rep,name=group_by" json:"group_by,omitempty"` +- // A starting point for the query results. Optional. Query cursors are +- // returned in query result batches. +- StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor" json:"start_cursor,omitempty"` +- // An ending point for the query results. Optional. Query cursors are +- // returned in query result batches. +- EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor" json:"end_cursor,omitempty"` +- // The number of results to skip. Applies before limit, but after all other +- // constraints (optional, defaults to 0). +- Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` +- // The maximum number of results to return. Applies after all other +- // constraints. Optional. +- Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *Query) Reset() { *m = Query{} } +-func (m *Query) String() string { return proto.CompactTextString(m) } +-func (*Query) ProtoMessage() {} +- +-const Default_Query_Offset int32 = 0 +- +-func (m *Query) GetProjection() []*PropertyExpression { +- if m != nil { +- return m.Projection +- } +- return nil +-} +- +-func (m *Query) GetKind() []*KindExpression { +- if m != nil { +- return m.Kind +- } +- return nil +-} +- +-func (m *Query) GetFilter() *Filter { +- if m != nil { +- return m.Filter +- } +- return nil +-} +- +-func (m *Query) GetOrder() []*PropertyOrder { +- if m != nil { +- return m.Order +- } +- return nil +-} +- +-func (m *Query) GetGroupBy() []*PropertyReference { +- if m != nil { +- return m.GroupBy +- } +- return nil +-} +- +-func (m *Query) GetStartCursor() []byte { +- if m != nil { +- return m.StartCursor +- } +- return nil +-} +- +-func (m *Query) GetEndCursor() []byte { +- if m != nil { +- return m.EndCursor +- } +- return nil +-} +- +-func (m *Query) GetOffset() int32 { +- if m != nil && m.Offset != nil { +- return *m.Offset +- } +- return Default_Query_Offset +-} +- +-func (m *Query) GetLimit() int32 { +- if m != nil && m.Limit != nil { +- return *m.Limit +- } +- return 0 +-} +- +-// A representation of a kind. +-type KindExpression struct { +- // The name of the kind. +- Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *KindExpression) Reset() { *m = KindExpression{} } +-func (m *KindExpression) String() string { return proto.CompactTextString(m) } +-func (*KindExpression) ProtoMessage() {} +- +-func (m *KindExpression) GetName() string { +- if m != nil && m.Name != nil { +- return *m.Name +- } +- return "" +-} +- +-// A reference to a property relative to the kind expressions. +-// exactly. +-type PropertyReference struct { +- // The name of the property. +- Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *PropertyReference) Reset() { *m = PropertyReference{} } +-func (m *PropertyReference) String() string { return proto.CompactTextString(m) } +-func (*PropertyReference) ProtoMessage() {} +- +-func (m *PropertyReference) GetName() string { +- if m != nil && m.Name != nil { +- return *m.Name +- } +- return "" +-} +- +-// A representation of a property in a projection. +-type PropertyExpression struct { +- // The property to project. +- Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` +- // The aggregation function to apply to the property. Optional. +- // Can only be used when grouping by at least one property. Must +- // then be set on all properties in the projection that are not +- // being grouped by. +- AggregationFunction *PropertyExpression_AggregationFunction `protobuf:"varint,2,opt,name=aggregation_function,enum=pb.PropertyExpression_AggregationFunction" json:"aggregation_function,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *PropertyExpression) Reset() { *m = PropertyExpression{} } +-func (m *PropertyExpression) String() string { return proto.CompactTextString(m) } +-func (*PropertyExpression) ProtoMessage() {} +- +-func (m *PropertyExpression) GetProperty() *PropertyReference { +- if m != nil { +- return m.Property +- } +- return nil +-} +- +-func (m *PropertyExpression) GetAggregationFunction() PropertyExpression_AggregationFunction { +- if m != nil && m.AggregationFunction != nil { +- return *m.AggregationFunction +- } +- return PropertyExpression_FIRST +-} +- +-// The desired order for a specific property. +-type PropertyOrder struct { +- // The property to order by. +- Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` +- // The direction to order by. +- Direction *PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=pb.PropertyOrder_Direction,def=1" json:"direction,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *PropertyOrder) Reset() { *m = PropertyOrder{} } +-func (m *PropertyOrder) String() string { return proto.CompactTextString(m) } +-func (*PropertyOrder) ProtoMessage() {} +- +-const Default_PropertyOrder_Direction PropertyOrder_Direction = PropertyOrder_ASCENDING +- +-func (m *PropertyOrder) GetProperty() *PropertyReference { +- if m != nil { +- return m.Property +- } +- return nil +-} +- +-func (m *PropertyOrder) GetDirection() PropertyOrder_Direction { +- if m != nil && m.Direction != nil { +- return *m.Direction +- } +- return Default_PropertyOrder_Direction +-} +- +-// A holder for any type of filter. Exactly one field should be specified. +-type Filter struct { +- // A composite filter. +- CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter" json:"composite_filter,omitempty"` +- // A filter on a property. +- PropertyFilter *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter" json:"property_filter,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *Filter) Reset() { *m = Filter{} } +-func (m *Filter) String() string { return proto.CompactTextString(m) } +-func (*Filter) ProtoMessage() {} +- +-func (m *Filter) GetCompositeFilter() *CompositeFilter { +- if m != nil { +- return m.CompositeFilter +- } +- return nil +-} +- +-func (m *Filter) GetPropertyFilter() *PropertyFilter { +- if m != nil { +- return m.PropertyFilter +- } +- return nil +-} +- +-// A filter that merges the multiple other filters using the given operation. +-type CompositeFilter struct { +- // The operator for combining multiple filters. +- Operator *CompositeFilter_Operator `protobuf:"varint,1,req,name=operator,enum=pb.CompositeFilter_Operator" json:"operator,omitempty"` +- // The list of filters to combine. +- // Must contain at least one filter. +- Filter []*Filter `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *CompositeFilter) Reset() { *m = CompositeFilter{} } +-func (m *CompositeFilter) String() string { return proto.CompactTextString(m) } +-func (*CompositeFilter) ProtoMessage() {} +- +-func (m *CompositeFilter) GetOperator() CompositeFilter_Operator { +- if m != nil && m.Operator != nil { +- return *m.Operator +- } +- return CompositeFilter_AND +-} +- +-func (m *CompositeFilter) GetFilter() []*Filter { +- if m != nil { +- return m.Filter +- } +- return nil +-} +- +-// A filter on a specific property. +-type PropertyFilter struct { +- // The property to filter by. +- Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` +- // The operator to filter by. +- Operator *PropertyFilter_Operator `protobuf:"varint,2,req,name=operator,enum=pb.PropertyFilter_Operator" json:"operator,omitempty"` +- // The value to compare the property to. +- Value *Value `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *PropertyFilter) Reset() { *m = PropertyFilter{} } +-func (m *PropertyFilter) String() string { return proto.CompactTextString(m) } +-func (*PropertyFilter) ProtoMessage() {} +- +-func (m *PropertyFilter) GetProperty() *PropertyReference { +- if m != nil { +- return m.Property +- } +- return nil +-} +- +-func (m *PropertyFilter) GetOperator() PropertyFilter_Operator { +- if m != nil && m.Operator != nil { +- return *m.Operator +- } +- return PropertyFilter_LESS_THAN +-} +- +-func (m *PropertyFilter) GetValue() *Value { +- if m != nil { +- return m.Value +- } +- return nil +-} +- +-// A GQL query. +-type GqlQuery struct { +- QueryString *string `protobuf:"bytes,1,req,name=query_string" json:"query_string,omitempty"` +- // When false, the query string must not contain a literal. +- AllowLiteral *bool `protobuf:"varint,2,opt,name=allow_literal,def=0" json:"allow_literal,omitempty"` +- // A named argument must set field GqlQueryArg.name. +- // No two named arguments may have the same name. +- // For each non-reserved named binding site in the query string, +- // there must be a named argument with that name, +- // but not necessarily the inverse. +- NameArg []*GqlQueryArg `protobuf:"bytes,3,rep,name=name_arg" json:"name_arg,omitempty"` +- // Numbered binding site @1 references the first numbered argument, +- // effectively using 1-based indexing, rather than the usual 0. +- // A numbered argument must NOT set field GqlQueryArg.name. +- // For each binding site numbered i in query_string, +- // there must be an ith numbered argument. +- // The inverse must also be true. +- NumberArg []*GqlQueryArg `protobuf:"bytes,4,rep,name=number_arg" json:"number_arg,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *GqlQuery) Reset() { *m = GqlQuery{} } +-func (m *GqlQuery) String() string { return proto.CompactTextString(m) } +-func (*GqlQuery) ProtoMessage() {} +- +-const Default_GqlQuery_AllowLiteral bool = false +- +-func (m *GqlQuery) GetQueryString() string { +- if m != nil && m.QueryString != nil { +- return *m.QueryString +- } +- return "" +-} +- +-func (m *GqlQuery) GetAllowLiteral() bool { +- if m != nil && m.AllowLiteral != nil { +- return *m.AllowLiteral +- } +- return Default_GqlQuery_AllowLiteral +-} +- +-func (m *GqlQuery) GetNameArg() []*GqlQueryArg { +- if m != nil { +- return m.NameArg +- } +- return nil +-} +- +-func (m *GqlQuery) GetNumberArg() []*GqlQueryArg { +- if m != nil { +- return m.NumberArg +- } +- return nil +-} +- +-// A binding argument for a GQL query. +-// Exactly one of fields value and cursor must be set. +-type GqlQueryArg struct { +- // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". +- // Must not match regex "__.*__". +- // Must not be "". +- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +- Value *Value `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +- Cursor []byte `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *GqlQueryArg) Reset() { *m = GqlQueryArg{} } +-func (m *GqlQueryArg) String() string { return proto.CompactTextString(m) } +-func (*GqlQueryArg) ProtoMessage() {} +- +-func (m *GqlQueryArg) GetName() string { +- if m != nil && m.Name != nil { +- return *m.Name +- } +- return "" +-} +- +-func (m *GqlQueryArg) GetValue() *Value { +- if m != nil { +- return m.Value +- } +- return nil +-} +- +-func (m *GqlQueryArg) GetCursor() []byte { +- if m != nil { +- return m.Cursor +- } +- return nil +-} +- +-// A batch of results produced by a query. +-type QueryResultBatch struct { +- // The result type for every entity in entityResults. +- EntityResultType *EntityResult_ResultType `protobuf:"varint,1,req,name=entity_result_type,enum=pb.EntityResult_ResultType" json:"entity_result_type,omitempty"` +- // The results for this batch. +- EntityResult []*EntityResult `protobuf:"bytes,2,rep,name=entity_result" json:"entity_result,omitempty"` +- // A cursor that points to the position after the last result in the batch. +- // May be absent. +- EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor" json:"end_cursor,omitempty"` +- // The state of the query after the current batch. +- MoreResults *QueryResultBatch_MoreResultsType `protobuf:"varint,5,req,name=more_results,enum=pb.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"` +- // The number of results skipped because of Query.offset. +- SkippedResults *int32 `protobuf:"varint,6,opt,name=skipped_results" json:"skipped_results,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *QueryResultBatch) Reset() { *m = QueryResultBatch{} } +-func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) } +-func (*QueryResultBatch) ProtoMessage() {} +- +-func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType { +- if m != nil && m.EntityResultType != nil { +- return *m.EntityResultType +- } +- return EntityResult_FULL +-} +- +-func (m *QueryResultBatch) GetEntityResult() []*EntityResult { +- if m != nil { +- return m.EntityResult +- } +- return nil +-} +- +-func (m *QueryResultBatch) GetEndCursor() []byte { +- if m != nil { +- return m.EndCursor +- } +- return nil +-} +- +-func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType { +- if m != nil && m.MoreResults != nil { +- return *m.MoreResults +- } +- return QueryResultBatch_NOT_FINISHED +-} +- +-func (m *QueryResultBatch) GetSkippedResults() int32 { +- if m != nil && m.SkippedResults != nil { +- return *m.SkippedResults +- } +- return 0 +-} +- +-// A set of changes to apply. +-// +-// No entity in this message may have a reserved property name, +-// not even a property in an entity in a value. +-// No value in this message may have meaning 18, +-// not even a value in an entity in another value. +-// +-// If entities with duplicate keys are present, an arbitrary choice will +-// be made as to which is written. +-type Mutation struct { +- // Entities to upsert. +- // Each upserted entity's key must have a complete path and +- // must not be reserved/read-only. +- Upsert []*Entity `protobuf:"bytes,1,rep,name=upsert" json:"upsert,omitempty"` +- // Entities to update. +- // Each updated entity's key must have a complete path and +- // must not be reserved/read-only. +- Update []*Entity `protobuf:"bytes,2,rep,name=update" json:"update,omitempty"` +- // Entities to insert. +- // Each inserted entity's key must have a complete path and +- // must not be reserved/read-only. +- Insert []*Entity `protobuf:"bytes,3,rep,name=insert" json:"insert,omitempty"` +- // Insert entities with a newly allocated ID. +- // Each inserted entity's key must omit the final identifier in its path and +- // must not be reserved/read-only. +- InsertAutoId []*Entity `protobuf:"bytes,4,rep,name=insert_auto_id" json:"insert_auto_id,omitempty"` +- // Keys of entities to delete. +- // Each key must have a complete key path and must not be reserved/read-only. +- Delete []*Key `protobuf:"bytes,5,rep,name=delete" json:"delete,omitempty"` +- // Ignore a user specified read-only period. Optional. +- Force *bool `protobuf:"varint,6,opt,name=force" json:"force,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *Mutation) Reset() { *m = Mutation{} } +-func (m *Mutation) String() string { return proto.CompactTextString(m) } +-func (*Mutation) ProtoMessage() {} +- +-func (m *Mutation) GetUpsert() []*Entity { +- if m != nil { +- return m.Upsert +- } +- return nil +-} +- +-func (m *Mutation) GetUpdate() []*Entity { +- if m != nil { +- return m.Update +- } +- return nil +-} +- +-func (m *Mutation) GetInsert() []*Entity { +- if m != nil { +- return m.Insert +- } +- return nil +-} +- +-func (m *Mutation) GetInsertAutoId() []*Entity { +- if m != nil { +- return m.InsertAutoId +- } +- return nil +-} +- +-func (m *Mutation) GetDelete() []*Key { +- if m != nil { +- return m.Delete +- } +- return nil +-} +- +-func (m *Mutation) GetForce() bool { +- if m != nil && m.Force != nil { +- return *m.Force +- } +- return false +-} +- +-// The result of applying a mutation. +-type MutationResult struct { +- // Number of index writes. +- IndexUpdates *int32 `protobuf:"varint,1,req,name=index_updates" json:"index_updates,omitempty"` +- // Keys for insertAutoId entities. One per entity from the +- // request, in the same order. +- InsertAutoIdKey []*Key `protobuf:"bytes,2,rep,name=insert_auto_id_key" json:"insert_auto_id_key,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *MutationResult) Reset() { *m = MutationResult{} } +-func (m *MutationResult) String() string { return proto.CompactTextString(m) } +-func (*MutationResult) ProtoMessage() {} +- +-func (m *MutationResult) GetIndexUpdates() int32 { +- if m != nil && m.IndexUpdates != nil { +- return *m.IndexUpdates +- } +- return 0 +-} +- +-func (m *MutationResult) GetInsertAutoIdKey() []*Key { +- if m != nil { +- return m.InsertAutoIdKey +- } +- return nil +-} +- +-// Options shared by read requests. +-type ReadOptions struct { +- // The read consistency to use. +- // Cannot be set when transaction is set. +- // Lookup and ancestor queries default to STRONG, global queries default to +- // EVENTUAL and cannot be set to STRONG. +- ReadConsistency *ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,enum=pb.ReadOptions_ReadConsistency,def=0" json:"read_consistency,omitempty"` +- // The transaction to use. Optional. +- Transaction []byte `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *ReadOptions) Reset() { *m = ReadOptions{} } +-func (m *ReadOptions) String() string { return proto.CompactTextString(m) } +-func (*ReadOptions) ProtoMessage() {} +- +-const Default_ReadOptions_ReadConsistency ReadOptions_ReadConsistency = ReadOptions_DEFAULT +- +-func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency { +- if m != nil && m.ReadConsistency != nil { +- return *m.ReadConsistency +- } +- return Default_ReadOptions_ReadConsistency +-} +- +-func (m *ReadOptions) GetTransaction() []byte { +- if m != nil { +- return m.Transaction +- } +- return nil +-} +- +-// The request for Lookup. +-type LookupRequest struct { +- // Options for this lookup request. Optional. +- ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` +- // Keys of entities to look up from the datastore. +- Key []*Key `protobuf:"bytes,3,rep,name=key" json:"key,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *LookupRequest) Reset() { *m = LookupRequest{} } +-func (m *LookupRequest) String() string { return proto.CompactTextString(m) } +-func (*LookupRequest) ProtoMessage() {} +- +-func (m *LookupRequest) GetReadOptions() *ReadOptions { +- if m != nil { +- return m.ReadOptions +- } +- return nil +-} +- +-func (m *LookupRequest) GetKey() []*Key { +- if m != nil { +- return m.Key +- } +- return nil +-} +- +-// The response for Lookup. +-type LookupResponse struct { +- // Entities found as ResultType.FULL entities. +- Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` +- // Entities not found as ResultType.KEY_ONLY entities. +- Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"` +- // A list of keys that were not looked up due to resource constraints. +- Deferred []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *LookupResponse) Reset() { *m = LookupResponse{} } +-func (m *LookupResponse) String() string { return proto.CompactTextString(m) } +-func (*LookupResponse) ProtoMessage() {} +- +-func (m *LookupResponse) GetFound() []*EntityResult { +- if m != nil { +- return m.Found +- } +- return nil +-} +- +-func (m *LookupResponse) GetMissing() []*EntityResult { +- if m != nil { +- return m.Missing +- } +- return nil +-} +- +-func (m *LookupResponse) GetDeferred() []*Key { +- if m != nil { +- return m.Deferred +- } +- return nil +-} +- +-// The request for RunQuery. +-type RunQueryRequest struct { +- // The options for this query. +- ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` +- // Entities are partitioned into subsets, identified by a dataset (usually +- // implicitly specified by the project) and namespace ID. Queries are scoped +- // to a single partition. +- // This partition ID is normalized with the standard default context +- // partition ID, but all other partition IDs in RunQueryRequest are +- // normalized with this partition ID as the context partition ID. +- PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id" json:"partition_id,omitempty"` +- // The query to run. +- // Either this field or field gql_query must be set, but not both. +- Query *Query `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"` +- // The GQL query to run. +- // Either this field or field query must be set, but not both. +- GqlQuery *GqlQuery `protobuf:"bytes,7,opt,name=gql_query" json:"gql_query,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} } +-func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) } +-func (*RunQueryRequest) ProtoMessage() {} +- +-func (m *RunQueryRequest) GetReadOptions() *ReadOptions { +- if m != nil { +- return m.ReadOptions +- } +- return nil +-} +- +-func (m *RunQueryRequest) GetPartitionId() *PartitionId { +- if m != nil { +- return m.PartitionId +- } +- return nil +-} +- +-func (m *RunQueryRequest) GetQuery() *Query { +- if m != nil { +- return m.Query +- } +- return nil +-} +- +-func (m *RunQueryRequest) GetGqlQuery() *GqlQuery { +- if m != nil { +- return m.GqlQuery +- } +- return nil +-} +- +-// The response for RunQuery. +-type RunQueryResponse struct { +- // A batch of query results (always present). +- Batch *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} } +-func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) } +-func (*RunQueryResponse) ProtoMessage() {} +- +-func (m *RunQueryResponse) GetBatch() *QueryResultBatch { +- if m != nil { +- return m.Batch +- } +- return nil +-} +- +-// The request for BeginTransaction. +-type BeginTransactionRequest struct { +- // The transaction isolation level. +- IsolationLevel *BeginTransactionRequest_IsolationLevel `protobuf:"varint,1,opt,name=isolation_level,enum=pb.BeginTransactionRequest_IsolationLevel,def=0" json:"isolation_level,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +-func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +-func (*BeginTransactionRequest) ProtoMessage() {} +- +-const Default_BeginTransactionRequest_IsolationLevel BeginTransactionRequest_IsolationLevel = BeginTransactionRequest_SNAPSHOT +- +-func (m *BeginTransactionRequest) GetIsolationLevel() BeginTransactionRequest_IsolationLevel { +- if m != nil && m.IsolationLevel != nil { +- return *m.IsolationLevel +- } +- return Default_BeginTransactionRequest_IsolationLevel +-} +- +-// The response for BeginTransaction. +-type BeginTransactionResponse struct { +- // The transaction identifier (always present). +- Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } +-func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } +-func (*BeginTransactionResponse) ProtoMessage() {} +- +-func (m *BeginTransactionResponse) GetTransaction() []byte { +- if m != nil { +- return m.Transaction +- } +- return nil +-} +- +-// The request for Rollback. +-type RollbackRequest struct { +- // The transaction identifier, returned by a call to +- // beginTransaction. +- Transaction []byte `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } +-func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } +-func (*RollbackRequest) ProtoMessage() {} +- +-func (m *RollbackRequest) GetTransaction() []byte { +- if m != nil { +- return m.Transaction +- } +- return nil +-} +- +-// The response for Rollback. +-type RollbackResponse struct { +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } +-func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } +-func (*RollbackResponse) ProtoMessage() {} +- +-// The request for Commit. +-type CommitRequest struct { +- // The transaction identifier, returned by a call to +- // beginTransaction. Must be set when mode is TRANSACTIONAL. +- Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` +- // The mutation to perform. Optional. +- Mutation *Mutation `protobuf:"bytes,2,opt,name=mutation" json:"mutation,omitempty"` +- // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. +- Mode *CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=pb.CommitRequest_Mode,def=1" json:"mode,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *CommitRequest) Reset() { *m = CommitRequest{} } +-func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +-func (*CommitRequest) ProtoMessage() {} +- +-const Default_CommitRequest_Mode CommitRequest_Mode = CommitRequest_TRANSACTIONAL +- +-func (m *CommitRequest) GetTransaction() []byte { +- if m != nil { +- return m.Transaction +- } +- return nil +-} +- +-func (m *CommitRequest) GetMutation() *Mutation { +- if m != nil { +- return m.Mutation +- } +- return nil +-} +- +-func (m *CommitRequest) GetMode() CommitRequest_Mode { +- if m != nil && m.Mode != nil { +- return *m.Mode +- } +- return Default_CommitRequest_Mode +-} +- +-// The response for Commit. +-type CommitResponse struct { +- // The result of performing the mutation (if any). +- MutationResult *MutationResult `protobuf:"bytes,1,opt,name=mutation_result" json:"mutation_result,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *CommitResponse) Reset() { *m = CommitResponse{} } +-func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +-func (*CommitResponse) ProtoMessage() {} +- +-func (m *CommitResponse) GetMutationResult() *MutationResult { +- if m != nil { +- return m.MutationResult +- } +- return nil +-} +- +-// The request for AllocateIds. +-type AllocateIdsRequest struct { +- // A list of keys with incomplete key paths to allocate IDs for. +- // No key may be reserved/read-only. +- Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +-func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +-func (*AllocateIdsRequest) ProtoMessage() {} +- +-func (m *AllocateIdsRequest) GetKey() []*Key { +- if m != nil { +- return m.Key +- } +- return nil +-} +- +-// The response for AllocateIds. +-type AllocateIdsResponse struct { +- // The keys specified in the request (in the same order), each with +- // its key path completed with a newly allocated ID. +- Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` +- XXX_unrecognized []byte `json:"-"` +-} +- +-func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +-func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +-func (*AllocateIdsResponse) ProtoMessage() {} +- +-func (m *AllocateIdsResponse) GetKey() []*Key { +- if m != nil { +- return m.Key +- } +- return nil +-} +- +-func init() { +- proto.RegisterEnum("pb.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value) +- proto.RegisterEnum("pb.PropertyExpression_AggregationFunction", PropertyExpression_AggregationFunction_name, PropertyExpression_AggregationFunction_value) +- proto.RegisterEnum("pb.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value) +- proto.RegisterEnum("pb.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value) +- proto.RegisterEnum("pb.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value) +- proto.RegisterEnum("pb.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value) +- proto.RegisterEnum("pb.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value) +- proto.RegisterEnum("pb.BeginTransactionRequest_IsolationLevel", BeginTransactionRequest_IsolationLevel_name, BeginTransactionRequest_IsolationLevel_value) +- proto.RegisterEnum("pb.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value) +-} +diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto +deleted file mode 100644 +index bb4c199..0000000 +--- a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto ++++ /dev/null +@@ -1,594 +0,0 @@ +-// Copyright 2013 Google Inc. All Rights Reserved. +-// +-// The datastore v1 service proto definitions +- +-syntax = "proto2"; +- +-package pb; +-option java_package = "com.google.api.services.datastore"; +- +- +-// An identifier for a particular subset of entities. +-// +-// Entities are partitioned into various subsets, each used by different +-// datasets and different namespaces within a dataset and so forth. +-// +-// All input partition IDs are normalized before use. +-// A partition ID is normalized as follows: +-// If the partition ID is unset or is set to an empty partition ID, replace it +-// with the context partition ID. +-// Otherwise, if the partition ID has no dataset ID, assign it the context +-// partition ID's dataset ID. +-// Unless otherwise documented, the context partition ID has the dataset ID set +-// to the context dataset ID and no other partition dimension set. +-// +-// A partition ID is empty if all of its fields are unset. +-// +-// Partition dimension: +-// A dimension may be unset. +-// A dimension's value must never be "". +-// A dimension's value must match [A-Za-z\d\.\-_]{1,100} +-// If the value of any dimension matches regex "__.*__", +-// the partition is reserved/read-only. +-// A reserved/read-only partition ID is forbidden in certain documented contexts. +-// +-// Dataset ID: +-// A dataset id's value must never be "". +-// A dataset id's value must match +-// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} +-message PartitionId { +- // The dataset ID. +- optional string dataset_id = 3; +- // The namespace. +- optional string namespace = 4; +-} +- +-// A unique identifier for an entity. +-// If a key's partition id or any of its path kinds or names are +-// reserved/read-only, the key is reserved/read-only. +-// A reserved/read-only key is forbidden in certain documented contexts. +-message Key { +- // Entities are partitioned into subsets, currently identified by a dataset +- // (usually implicitly specified by the project) and namespace ID. +- // Queries are scoped to a single partition. +- optional PartitionId partition_id = 1; +- +- // A (kind, ID/name) pair used to construct a key path. +- // +- // At most one of name or ID may be set. +- // If either is set, the element is complete. +- // If neither is set, the element is incomplete. +- message PathElement { +- // The kind of the entity. +- // A kind matching regex "__.*__" is reserved/read-only. +- // A kind must not contain more than 500 characters. +- // Cannot be "". +- required string kind = 1; +- // The ID of the entity. +- // Never equal to zero. Values less than zero are discouraged and will not +- // be supported in the future. +- optional int64 id = 2; +- // The name of the entity. +- // A name matching regex "__.*__" is reserved/read-only. +- // A name must not be more than 500 characters. +- // Cannot be "". +- optional string name = 3; +- } +- +- // The entity path. +- // An entity path consists of one or more elements composed of a kind and a +- // string or numerical identifier, which identify entities. The first +- // element identifies a root entity, the second element identifies +- // a child of the root entity, the third element a child of the +- // second entity, and so forth. The entities identified by all prefixes of +- // the path are called the element's ancestors. +- // An entity path is always fully complete: ALL of the entity's ancestors +- // are required to be in the path along with the entity identifier itself. +- // The only exception is that in some documented cases, the identifier in the +- // last path element (for the entity) itself may be omitted. A path can never +- // be empty. +- repeated PathElement path_element = 2; +-} +- +-// A message that can hold any of the supported value types and associated +-// metadata. +-// +-// At most one of the Value fields may be set. +-// If none are set the value is "null". +-// +-message Value { +- // A boolean value. +- optional bool boolean_value = 1; +- // An integer value. +- optional int64 integer_value = 2; +- // A double value. +- optional double double_value = 3; +- // A timestamp value. +- optional int64 timestamp_microseconds_value = 4; +- // A key value. +- optional Key key_value = 5; +- // A blob key value. +- optional string blob_key_value = 16; +- // A UTF-8 encoded string value. +- optional string string_value = 17; +- // A blob value. +- optional bytes blob_value = 18; +- // An entity value. +- // May have no key. +- // May have a key with an incomplete key path. +- // May have a reserved/read-only key. +- optional Entity entity_value = 6; +- // A list value. +- // Cannot contain another list value. +- // Cannot also have a meaning and indexing set. +- repeated Value list_value = 7; +- +- // The meaning field is reserved and should not be used. +- optional int32 meaning = 14; +- +- // If the value should be indexed. +- // +- // The indexed property may be set for a +- // null value. +- // When indexed is true, stringValue +- // is limited to 500 characters and the blob value is limited to 500 bytes. +- // Exception: If meaning is set to 2, string_value is limited to 2038 +- // characters regardless of indexed. +- // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 +- // will be ignored on input (and will never be set on output). +- // Input values by default have indexed set to +- // true; however, you can explicitly set indexed to +- // true if you want. (An output value never has +- // indexed explicitly set to true.) If a value is +- // itself an entity, it cannot have indexed set to +- // true. +- // Exception: An entity value with meaning 9, 20 or 21 may be indexed. +- optional bool indexed = 15 [default = true]; +-} +- +-// An entity property. +-message Property { +- // The name of the property. +- // A property name matching regex "__.*__" is reserved. +- // A reserved property name is forbidden in certain documented contexts. +- // The name must not contain more than 500 characters. +- // Cannot be "". +- required string name = 1; +- +- // The value(s) of the property. +- // Each value can have only one value property populated. For example, +- // you cannot have a values list of { value: { integerValue: 22, +- // stringValue: "a" } }, but you can have { value: { listValue: +- // [ { integerValue: 22 }, { stringValue: "a" } ] }. +- required Value value = 4; +-} +- +-// An entity. +-// +-// An entity is limited to 1 megabyte when stored. That roughly +-// corresponds to a limit of 1 megabyte for the serialized form of this +-// message. +-message Entity { +- // The entity's key. +- // +- // An entity must have a key, unless otherwise documented (for example, +- // an entity in Value.entityValue may have no key). +- // An entity's kind is its key's path's last element's kind, +- // or null if it has no key. +- optional Key key = 1; +- // The entity's properties. +- // Each property's name must be unique for its entity. +- repeated Property property = 2; +-} +- +-// The result of fetching an entity from the datastore. +-message EntityResult { +- // Specifies what data the 'entity' field contains. +- // A ResultType is either implied (for example, in LookupResponse.found it +- // is always FULL) or specified by context (for example, in message +- // QueryResultBatch, field 'entity_result_type' specifies a ResultType +- // for all the values in field 'entity_result'). +- enum ResultType { +- FULL = 1; // The entire entity. +- PROJECTION = 2; // A projected subset of properties. +- // The entity may have no key. +- // A property value may have meaning 18. +- KEY_ONLY = 3; // Only the key. +- } +- +- // The resulting entity. +- required Entity entity = 1; +-} +- +-// A query. +-message Query { +- // The projection to return. If not set the entire entity is returned. +- repeated PropertyExpression projection = 2; +- +- // The kinds to query (if empty, returns entities from all kinds). +- repeated KindExpression kind = 3; +- +- // The filter to apply (optional). +- optional Filter filter = 4; +- +- // The order to apply to the query results (if empty, order is unspecified). +- repeated PropertyOrder order = 5; +- +- // The properties to group by (if empty, no grouping is applied to the +- // result set). +- repeated PropertyReference group_by = 6; +- +- // A starting point for the query results. Optional. Query cursors are +- // returned in query result batches. +- optional bytes /* serialized QueryCursor */ start_cursor = 7; +- +- // An ending point for the query results. Optional. Query cursors are +- // returned in query result batches. +- optional bytes /* serialized QueryCursor */ end_cursor = 8; +- +- // The number of results to skip. Applies before limit, but after all other +- // constraints (optional, defaults to 0). +- optional int32 offset = 10 [default=0]; +- +- // The maximum number of results to return. Applies after all other +- // constraints. Optional. +- optional int32 limit = 11; +-} +- +-// A representation of a kind. +-message KindExpression { +- // The name of the kind. +- required string name = 1; +-} +- +-// A reference to a property relative to the kind expressions. +-// exactly. +-message PropertyReference { +- // The name of the property. +- required string name = 2; +-} +- +-// A representation of a property in a projection. +-message PropertyExpression { +- enum AggregationFunction { +- FIRST = 1; +- } +- // The property to project. +- required PropertyReference property = 1; +- // The aggregation function to apply to the property. Optional. +- // Can only be used when grouping by at least one property. Must +- // then be set on all properties in the projection that are not +- // being grouped by. +- optional AggregationFunction aggregation_function = 2; +-} +- +-// The desired order for a specific property. +-message PropertyOrder { +- enum Direction { +- ASCENDING = 1; +- DESCENDING = 2; +- } +- // The property to order by. +- required PropertyReference property = 1; +- // The direction to order by. +- optional Direction direction = 2 [default=ASCENDING]; +-} +- +-// A holder for any type of filter. Exactly one field should be specified. +-message Filter { +- // A composite filter. +- optional CompositeFilter composite_filter = 1; +- // A filter on a property. +- optional PropertyFilter property_filter = 2; +-} +- +-// A filter that merges the multiple other filters using the given operation. +-message CompositeFilter { +- enum Operator { +- AND = 1; +- } +- +- // The operator for combining multiple filters. +- required Operator operator = 1; +- // The list of filters to combine. +- // Must contain at least one filter. +- repeated Filter filter = 2; +-} +- +-// A filter on a specific property. +-message PropertyFilter { +- enum Operator { +- LESS_THAN = 1; +- LESS_THAN_OR_EQUAL = 2; +- GREATER_THAN = 3; +- GREATER_THAN_OR_EQUAL = 4; +- EQUAL = 5; +- +- HAS_ANCESTOR = 11; +- } +- +- // The property to filter by. +- required PropertyReference property = 1; +- // The operator to filter by. +- required Operator operator = 2; +- // The value to compare the property to. +- required Value value = 3; +-} +- +-// A GQL query. +-message GqlQuery { +- required string query_string = 1; +- // When false, the query string must not contain a literal. +- optional bool allow_literal = 2 [default = false]; +- // A named argument must set field GqlQueryArg.name. +- // No two named arguments may have the same name. +- // For each non-reserved named binding site in the query string, +- // there must be a named argument with that name, +- // but not necessarily the inverse. +- repeated GqlQueryArg name_arg = 3; +- // Numbered binding site @1 references the first numbered argument, +- // effectively using 1-based indexing, rather than the usual 0. +- // A numbered argument must NOT set field GqlQueryArg.name. +- // For each binding site numbered i in query_string, +- // there must be an ith numbered argument. +- // The inverse must also be true. +- repeated GqlQueryArg number_arg = 4; +-} +- +-// A binding argument for a GQL query. +-// Exactly one of fields value and cursor must be set. +-message GqlQueryArg { +- // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". +- // Must not match regex "__.*__". +- // Must not be "". +- optional string name = 1; +- optional Value value = 2; +- optional bytes cursor = 3; +-} +- +-// A batch of results produced by a query. +-message QueryResultBatch { +- // The possible values for the 'more_results' field. +- enum MoreResultsType { +- NOT_FINISHED = 1; // There are additional batches to fetch from this query. +- MORE_RESULTS_AFTER_LIMIT = 2; // The query is finished, but there are more +- // results after the limit. +- NO_MORE_RESULTS = 3; // The query has been exhausted. +- } +- +- // The result type for every entity in entityResults. +- required EntityResult.ResultType entity_result_type = 1; +- // The results for this batch. +- repeated EntityResult entity_result = 2; +- +- // A cursor that points to the position after the last result in the batch. +- // May be absent. +- optional bytes /* serialized QueryCursor */ end_cursor = 4; +- +- // The state of the query after the current batch. +- required MoreResultsType more_results = 5; +- +- // The number of results skipped because of Query.offset. +- optional int32 skipped_results = 6; +-} +- +-// A set of changes to apply. +-// +-// No entity in this message may have a reserved property name, +-// not even a property in an entity in a value. +-// No value in this message may have meaning 18, +-// not even a value in an entity in another value. +-// +-// If entities with duplicate keys are present, an arbitrary choice will +-// be made as to which is written. +-message Mutation { +- // Entities to upsert. +- // Each upserted entity's key must have a complete path and +- // must not be reserved/read-only. +- repeated Entity upsert = 1; +- // Entities to update. +- // Each updated entity's key must have a complete path and +- // must not be reserved/read-only. +- repeated Entity update = 2; +- // Entities to insert. +- // Each inserted entity's key must have a complete path and +- // must not be reserved/read-only. +- repeated Entity insert = 3; +- // Insert entities with a newly allocated ID. +- // Each inserted entity's key must omit the final identifier in its path and +- // must not be reserved/read-only. +- repeated Entity insert_auto_id = 4; +- // Keys of entities to delete. +- // Each key must have a complete key path and must not be reserved/read-only. +- repeated Key delete = 5; +- // Ignore a user specified read-only period. Optional. +- optional bool force = 6; +-} +- +-// The result of applying a mutation. +-message MutationResult { +- // Number of index writes. +- required int32 index_updates = 1; +- // Keys for insertAutoId entities. One per entity from the +- // request, in the same order. +- repeated Key insert_auto_id_key = 2; +-} +- +-// Options shared by read requests. +-message ReadOptions { +- enum ReadConsistency { +- DEFAULT = 0; +- STRONG = 1; +- EVENTUAL = 2; +- } +- +- // The read consistency to use. +- // Cannot be set when transaction is set. +- // Lookup and ancestor queries default to STRONG, global queries default to +- // EVENTUAL and cannot be set to STRONG. +- optional ReadConsistency read_consistency = 1 [default=DEFAULT]; +- +- // The transaction to use. Optional. +- optional bytes /* serialized Transaction */ transaction = 2; +-} +- +-// The request for Lookup. +-message LookupRequest { +- +- // Options for this lookup request. Optional. +- optional ReadOptions read_options = 1; +- // Keys of entities to look up from the datastore. +- repeated Key key = 3; +-} +- +-// The response for Lookup. +-message LookupResponse { +- +- // The order of results in these fields is undefined and has no relation to +- // the order of the keys in the input. +- +- // Entities found as ResultType.FULL entities. +- repeated EntityResult found = 1; +- +- // Entities not found as ResultType.KEY_ONLY entities. +- repeated EntityResult missing = 2; +- +- // A list of keys that were not looked up due to resource constraints. +- repeated Key deferred = 3; +-} +- +- +-// The request for RunQuery. +-message RunQueryRequest { +- +- // The options for this query. +- optional ReadOptions read_options = 1; +- +- // Entities are partitioned into subsets, identified by a dataset (usually +- // implicitly specified by the project) and namespace ID. Queries are scoped +- // to a single partition. +- // This partition ID is normalized with the standard default context +- // partition ID, but all other partition IDs in RunQueryRequest are +- // normalized with this partition ID as the context partition ID. +- optional PartitionId partition_id = 2; +- +- // The query to run. +- // Either this field or field gql_query must be set, but not both. +- optional Query query = 3; +- // The GQL query to run. +- // Either this field or field query must be set, but not both. +- optional GqlQuery gql_query = 7; +-} +- +-// The response for RunQuery. +-message RunQueryResponse { +- +- // A batch of query results (always present). +- optional QueryResultBatch batch = 1; +- +-} +- +-// The request for BeginTransaction. +-message BeginTransactionRequest { +- +- enum IsolationLevel { +- SNAPSHOT = 0; // Read from a consistent snapshot. Concurrent transactions +- // conflict if their mutations conflict. For example: +- // Read(A),Write(B) may not conflict with Read(B),Write(A), +- // but Read(B),Write(B) does conflict with Read(B),Write(B). +- SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent +- // transactions conflict if they cannot be serialized. +- // For example Read(A),Write(B) does conflict with +- // Read(B),Write(A) but Read(A) may not conflict with +- // Write(A). +- } +- +- // The transaction isolation level. +- optional IsolationLevel isolation_level = 1 [default=SNAPSHOT]; +-} +- +-// The response for BeginTransaction. +-message BeginTransactionResponse { +- +- // The transaction identifier (always present). +- optional bytes /* serialized Transaction */ transaction = 1; +-} +- +-// The request for Rollback. +-message RollbackRequest { +- +- // The transaction identifier, returned by a call to +- // beginTransaction. +- required bytes /* serialized Transaction */ transaction = 1; +-} +- +-// The response for Rollback. +-message RollbackResponse { +-// Empty +-} +- +-// The request for Commit. +-message CommitRequest { +- +- enum Mode { +- TRANSACTIONAL = 1; +- NON_TRANSACTIONAL = 2; +- } +- +- // The transaction identifier, returned by a call to +- // beginTransaction. Must be set when mode is TRANSACTIONAL. +- optional bytes /* serialized Transaction */ transaction = 1; +- // The mutation to perform. Optional. +- optional Mutation mutation = 2; +- // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. +- optional Mode mode = 5 [default=TRANSACTIONAL]; +-} +- +-// The response for Commit. +-message CommitResponse { +- +- // The result of performing the mutation (if any). +- optional MutationResult mutation_result = 1; +-} +- +-// The request for AllocateIds. +-message AllocateIdsRequest { +- +- // A list of keys with incomplete key paths to allocate IDs for. +- // No key may be reserved/read-only. +- repeated Key key = 1; +-} +- +-// The response for AllocateIds. +-message AllocateIdsResponse { +- +- // The keys specified in the request (in the same order), each with +- // its key path completed with a newly allocated ID. +- repeated Key key = 1; +-} +- +-// Each rpc normalizes the partition IDs of the keys in its input entities, +-// and always returns entities with keys with normalized partition IDs. +-// (Note that applies to all entities, including entities in values.) +-service DatastoreService { +- // Look up some entities by key. +- rpc Lookup(LookupRequest) returns (LookupResponse) { +- }; +- // Query for entities. +- rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) { +- }; +- // Begin a new transaction. +- rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { +- }; +- // Commit a transaction, optionally creating, deleting or modifying some +- // entities. +- rpc Commit(CommitRequest) returns (CommitResponse) { +- }; +- // Roll back a transaction. +- rpc Rollback(RollbackRequest) returns (RollbackResponse) { +- }; +- // Allocate IDs for incomplete keys (useful for referencing an entity before +- // it is inserted). +- rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) { +- }; +-} +diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go +deleted file mode 100644 +index aafd683..0000000 +--- a/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go ++++ /dev/null +@@ -1,57 +0,0 @@ +-// Copyright 2014 Google Inc. All Rights Reserved. +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-// Package testutil contains helper functions for writing tests. +-package testutil +- +-import ( +- "io/ioutil" +- "log" +- "net/http" +- "os" +- +- "golang.org/x/net/context" +- "golang.org/x/oauth2" +- "golang.org/x/oauth2/google" +- "google.golang.org/cloud" +-) +- +-const ( +- envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" +- envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" +-) +- +-func Context(scopes ...string) context.Context { +- key, projID := os.Getenv(envPrivateKey), os.Getenv(envProjID) +- if key == "" || projID == "" { +- log.Fatal("GCLOUD_TESTS_GOLANG_KEY and GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") +- } +- jsonKey, err := ioutil.ReadFile(key) +- if err != nil { +- log.Fatalf("Cannot read the JSON key file, err: %v", err) +- } +- conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) +- if err != nil { +- log.Fatal(err) +- } +- return cloud.NewContext(projID, conf.Client(oauth2.NoContext)) +-} +- +-func NoAuthContext() context.Context { +- projID := os.Getenv(envProjID) +- if projID == "" { +- log.Fatal("GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") +- } +- return cloud.NewContext(projID, &http.Client{Transport: http.DefaultTransport}) +-} +-- +1.9.3 + diff --git a/SOURCES/Change-etcd-server-port.patch b/SOURCES/Change-etcd-server-port.patch index 52d0e2f..ba956af 100644 --- a/SOURCES/Change-etcd-server-port.patch +++ b/SOURCES/Change-etcd-server-port.patch @@ -1,6 +1,6 @@ -From 44d2db8243f51a8319aa3893233df0c87f097c3a Mon Sep 17 00:00:00 2001 +From 23d7f34af3e6974eea43abe1ca9b3054822014cf Mon Sep 17 00:00:00 2001 From: Jan Chaloupka -Date: Tue, 25 Aug 2015 08:26:42 +0200 +Date: Mon, 10 Aug 2015 09:55:49 +0200 Subject: [PATCH] Change etcd server port --- @@ -8,15 +8,15 @@ Subject: [PATCH] Change etcd server port 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/init/systemd/environ/apiserver b/contrib/init/systemd/environ/apiserver -index d0b1900..328b5c4 100644 +index 223b992..ddb5974 100644 --- a/contrib/init/systemd/environ/apiserver +++ b/contrib/init/systemd/environ/apiserver -@@ -14,7 +14,7 @@ KUBE_API_ADDRESS="--address=127.0.0.1" - # KUBELET_PORT="--kubelet_port=10250" +@@ -14,7 +14,7 @@ KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" + # KUBELET_PORT="--kubelet-port=10250" # Comma separated list of nodes in the etcd cluster --KUBE_ETCD_SERVERS="--etcd_servers=http://127.0.0.1:4001" -+KUBE_ETCD_SERVERS="--etcd_servers=http://127.0.0.1:2379" +-KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:4001" ++KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:2379" # Address range to use for services KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" diff --git a/SOURCES/Fix-Persistent-Volumes-and-Persistent-Volume-Claims.patch b/SOURCES/Fix-Persistent-Volumes-and-Persistent-Volume-Claims.patch deleted file mode 100644 index d13745a..0000000 --- a/SOURCES/Fix-Persistent-Volumes-and-Persistent-Volume-Claims.patch +++ /dev/null @@ -1,68 +0,0 @@ -From e77337bfa867704bb79b4224191bd69d5de3d847 Mon Sep 17 00:00:00 2001 -From: Jan Chaloupka -Date: Mon, 20 Jul 2015 20:13:42 +0200 -Subject: [PATCH] Fix Persistent Volumes and Persistent Volume Claims - ---- - hack/test-cmd.sh | 20 ++++++++++---------- - 1 file changed, 10 insertions(+), 10 deletions(-) - -diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh -index f5088a7..18f1c87 100755 ---- a/hack/test-cmd.sh -+++ b/hack/test-cmd.sh -@@ -680,19 +680,19 @@ __EOF__ - - ### Create and delete persistent volume examples - # Pre-condition: no persistent volumes currently exist -- kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" '' -+ kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f docs/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}" -- kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0001:' -+ kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:' - kubectl delete pv pv0001 "${kube_flags[@]}" - kubectl create -f docs/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}" -- kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0002:' -+ kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:' - kubectl delete pv pv0002 "${kube_flags[@]}" - kubectl create -f docs/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}" -- kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0003:' -+ kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:' - kubectl delete pv pv0003 "${kube_flags[@]}" - # Post-condition: no PVs -- kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" '' -+ kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" '' - - ############################ - # Persistent Volume Claims # -@@ -700,21 +700,21 @@ __EOF__ - - ### Create and delete persistent volume claim examples - # Pre-condition: no persistent volume claims currently exist -- kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" '' -+ kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f docs/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}" -- kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-1:' -+ kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:' - kubectl delete pvc myclaim-1 "${kube_flags[@]}" - - kubectl create -f docs/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}" -- kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-2:' -+ kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:' - kubectl delete pvc myclaim-2 "${kube_flags[@]}" - - kubectl create -f docs/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}" -- kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-3:' -+ kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:' - kubectl delete pvc myclaim-3 "${kube_flags[@]}" - # Post-condition: no PVCs -- kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" '' -+ kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" '' - - - --- -1.9.3 - diff --git a/SOURCES/add-pod-infrastructure-container.patch b/SOURCES/add-pod-infrastructure-container.patch new file mode 100644 index 0000000..b05ffad --- /dev/null +++ b/SOURCES/add-pod-infrastructure-container.patch @@ -0,0 +1,38 @@ +From 59fc4eb222f6a8a481ae479d971397c45e83e3ed Mon Sep 17 00:00:00 2001 +From: Jan Chaloupka +Date: Mon, 4 Jan 2016 08:31:03 +0100 +Subject: [PATCH] add pod infrastructure container + +--- + init/systemd/environ/kubelet | 3 +++ + init/systemd/kubelet.service | 1 + + 2 files changed, 4 insertions(+) + +diff --git a/init/systemd/environ/kubelet b/init/systemd/environ/kubelet +index db93a5e..028043f 100644 +--- a/init/systemd/environ/kubelet ++++ b/init/systemd/environ/kubelet +@@ -13,5 +13,8 @@ KUBELET_HOSTNAME="--hostname-override=127.0.0.1" + # location of the api-server + KUBELET_API_SERVER="--api-servers=http://127.0.0.1:8080" + ++# pod infrastructure container ++KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest" ++ + # Add your own! + KUBELET_ARGS="" +diff --git a/init/systemd/kubelet.service b/init/systemd/kubelet.service +index 41a25f5..fb540cb 100644 +--- a/init/systemd/kubelet.service ++++ b/init/systemd/kubelet.service +@@ -16,6 +16,7 @@ ExecStart=/usr/bin/kubelet \ + $KUBELET_PORT \ + $KUBELET_HOSTNAME \ + $KUBE_ALLOW_PRIV \ ++ $KUBELET_POD_INFRA_CONTAINER \ + $KUBELET_ARGS + Restart=on-failure + +-- +1.9.3 + diff --git a/SOURCES/build-with-debug-info.patch b/SOURCES/build-with-debug-info.patch new file mode 100644 index 0000000..a875233 --- /dev/null +++ b/SOURCES/build-with-debug-info.patch @@ -0,0 +1,34 @@ +From b2a5ea771b9aa2206d4a4445fe032a2c99038c28 Mon Sep 17 00:00:00 2001 +From: Jan Chaloupka +Date: Mon, 4 Jan 2016 06:44:01 +0100 +Subject: [PATCH] build with debug info + +--- + hack/lib/golang.sh | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh +index 04c56cc..26a8dc2 100755 +--- a/hack/lib/golang.sh ++++ b/hack/lib/golang.sh +@@ -371,7 +371,7 @@ kube::golang::build_binaries_for_platform() { + local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}") + CGO_ENABLED=0 go build -o "${outfile}" \ + "${goflags[@]:+${goflags[@]}}" \ +- -ldflags "${goldflags}" \ ++ -ldflags "-B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n') ${goldflags}" \ + "${binary}" + kube::log::progress "*" + done +@@ -379,7 +379,7 @@ kube::golang::build_binaries_for_platform() { + local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}") + go build -o "${outfile}" \ + "${goflags[@]:+${goflags[@]}}" \ +- -ldflags "${goldflags}" \ ++ -ldflags "-B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n') ${goldflags}" \ + "${binary}" + kube::log::progress "*" + done +-- +1.9.3 + diff --git a/SOURCES/change-internal-to-inteernal.patch b/SOURCES/change-internal-to-inteernal.patch deleted file mode 100644 index 0d3903a..0000000 --- a/SOURCES/change-internal-to-inteernal.patch +++ /dev/null @@ -1,4927 +0,0 @@ -From 3410b407040157a276456eaaa22c2297e302f70e Mon Sep 17 00:00:00 2001 -From: Jan Chaloupka -Date: Sat, 1 Aug 2015 10:42:54 +0200 -Subject: [PATCH] change internal to inteernal - ---- - .../gcloud-golang/compute/metadata/metadata.go | 2 +- - .../cloud/compute/metadata/metadata.go | 2 +- - .../src/google.golang.org/cloud/inteernal/cloud.go | 128 ++ - .../cloud/inteernal/datastore/datastore_v1.pb.go | 1633 ++++++++++++++++++++ - .../cloud/inteernal/datastore/datastore_v1.proto | 594 +++++++ - .../cloud/inteernal/testutil/context.go | 57 + - .../src/google.golang.org/cloud/internal/cloud.go | 128 -- - .../cloud/internal/datastore/datastore_v1.pb.go | 1633 -------------------- - .../cloud/internal/datastore/datastore_v1.proto | 594 ------- - .../cloud/internal/testutil/context.go | 57 - - 10 files changed, 2414 insertions(+), 2414 deletions(-) - create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/inteernal/cloud.go - create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.pb.go - create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.proto - create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/inteernal/testutil/context.go - delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go - delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go - delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto - delete mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go - -diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata/metadata.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata/metadata.go -index b007cde..c92267f 100644 ---- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata/metadata.go -+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata/metadata.go -@@ -30,7 +30,7 @@ import ( - "sync" - "time" - -- "google.golang.org/cloud/internal" -+ "google.golang.org/cloud/inteernal" - ) - - type cachedValue struct { -diff --git a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go -index 7753a05..6102500 100644 ---- a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go -+++ b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go -@@ -29,7 +29,7 @@ import ( - "sync" - "time" - -- "google.golang.org/cloud/internal" -+ "google.golang.org/cloud/inteernal" - ) - - type cachedValue struct { -diff --git a/Godeps/_workspace/src/google.golang.org/cloud/inteernal/cloud.go b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/cloud.go -new file mode 100644 -index 0000000..984323c ---- /dev/null -+++ b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/cloud.go -@@ -0,0 +1,128 @@ -+// Copyright 2014 Google Inc. All Rights Reserved. -+// -+// Licensed under the Apache License, Version 2.0 (the "License"); -+// you may not use this file except in compliance with the License. -+// You may obtain a copy of the License at -+// -+// http://www.apache.org/licenses/LICENSE-2.0 -+// -+// Unless required by applicable law or agreed to in writing, software -+// distributed under the License is distributed on an "AS IS" BASIS, -+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+// See the License for the specific language governing permissions and -+// limitations under the License. -+ -+// Package internal provides support for the cloud packages. -+// -+// Users should not import this package directly. -+package internal -+ -+import ( -+ "fmt" -+ "net/http" -+ "sync" -+ -+ "golang.org/x/net/context" -+) -+ -+type contextKey struct{} -+ -+func WithContext(parent context.Context, projID string, c *http.Client) context.Context { -+ if c == nil { -+ panic("nil *http.Client passed to WithContext") -+ } -+ if projID == "" { -+ panic("empty project ID passed to WithContext") -+ } -+ return context.WithValue(parent, contextKey{}, &cloudContext{ -+ ProjectID: projID, -+ HTTPClient: c, -+ }) -+} -+ -+const userAgent = "gcloud-golang/0.1" -+ -+type cloudContext struct { -+ ProjectID string -+ HTTPClient *http.Client -+ -+ mu sync.Mutex // guards svc -+ svc map[string]interface{} // e.g. "storage" => *rawStorage.Service -+} -+ -+// Service returns the result of the fill function if it's never been -+// called before for the given name (which is assumed to be an API -+// service name, like "datastore"). If it has already been cached, the fill -+// func is not run. -+// It's safe for concurrent use by multiple goroutines. -+func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { -+ return cc(ctx).service(name, fill) -+} -+ -+func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { -+ c.mu.Lock() -+ defer c.mu.Unlock() -+ -+ if c.svc == nil { -+ c.svc = make(map[string]interface{}) -+ } else if v, ok := c.svc[name]; ok { -+ return v -+ } -+ v := fill(c.HTTPClient) -+ c.svc[name] = v -+ return v -+} -+ -+// Transport is an http.RoundTripper that appends -+// Google Cloud client's user-agent to the original -+// request's user-agent header. -+type Transport struct { -+ // Base represents the actual http.RoundTripper -+ // the requests will be delegated to. -+ Base http.RoundTripper -+} -+ -+// RoundTrip appends a user-agent to the existing user-agent -+// header and delegates the request to the base http.RoundTripper. -+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { -+ req = cloneRequest(req) -+ ua := req.Header.Get("User-Agent") -+ if ua == "" { -+ ua = userAgent -+ } else { -+ ua = fmt.Sprintf("%s;%s", ua, userAgent) -+ } -+ req.Header.Set("User-Agent", ua) -+ return t.Base.RoundTrip(req) -+} -+ -+// cloneRequest returns a clone of the provided *http.Request. -+// The clone is a shallow copy of the struct and its Header map. -+func cloneRequest(r *http.Request) *http.Request { -+ // shallow copy of the struct -+ r2 := new(http.Request) -+ *r2 = *r -+ // deep copy of the Header -+ r2.Header = make(http.Header) -+ for k, s := range r.Header { -+ r2.Header[k] = s -+ } -+ return r2 -+} -+ -+func ProjID(ctx context.Context) string { -+ return cc(ctx).ProjectID -+} -+ -+func HTTPClient(ctx context.Context) *http.Client { -+ return cc(ctx).HTTPClient -+} -+ -+// cc returns the internal *cloudContext (cc) state for a context.Context. -+// It panics if the user did it wrong. -+func cc(ctx context.Context) *cloudContext { -+ if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { -+ return c -+ } -+ panic("invalid context.Context type; it should be created with cloud.NewContext") -+} -diff --git a/Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.pb.go -new file mode 100644 -index 0000000..be903e5 ---- /dev/null -+++ b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.pb.go -@@ -0,0 +1,1633 @@ -+// Code generated by protoc-gen-go. -+// source: datastore_v1.proto -+// DO NOT EDIT! -+ -+/* -+Package pb is a generated protocol buffer package. -+ -+It is generated from these files: -+ datastore_v1.proto -+ -+It has these top-level messages: -+ PartitionId -+ Key -+ Value -+ Property -+ Entity -+ EntityResult -+ Query -+ KindExpression -+ PropertyReference -+ PropertyExpression -+ PropertyOrder -+ Filter -+ CompositeFilter -+ PropertyFilter -+ GqlQuery -+ GqlQueryArg -+ QueryResultBatch -+ Mutation -+ MutationResult -+ ReadOptions -+ LookupRequest -+ LookupResponse -+ RunQueryRequest -+ RunQueryResponse -+ BeginTransactionRequest -+ BeginTransactionResponse -+ RollbackRequest -+ RollbackResponse -+ CommitRequest -+ CommitResponse -+ AllocateIdsRequest -+ AllocateIdsResponse -+*/ -+package pb -+ -+import proto "github.com/golang/protobuf/proto" -+import math "math" -+ -+// Reference imports to suppress errors if they are not otherwise used. -+var _ = proto.Marshal -+var _ = math.Inf -+ -+// Specifies what data the 'entity' field contains. -+// A ResultType is either implied (for example, in LookupResponse.found it -+// is always FULL) or specified by context (for example, in message -+// QueryResultBatch, field 'entity_result_type' specifies a ResultType -+// for all the values in field 'entity_result'). -+type EntityResult_ResultType int32 -+ -+const ( -+ EntityResult_FULL EntityResult_ResultType = 1 -+ EntityResult_PROJECTION EntityResult_ResultType = 2 -+ // The entity may have no key. -+ // A property value may have meaning 18. -+ EntityResult_KEY_ONLY EntityResult_ResultType = 3 -+) -+ -+var EntityResult_ResultType_name = map[int32]string{ -+ 1: "FULL", -+ 2: "PROJECTION", -+ 3: "KEY_ONLY", -+} -+var EntityResult_ResultType_value = map[string]int32{ -+ "FULL": 1, -+ "PROJECTION": 2, -+ "KEY_ONLY": 3, -+} -+ -+func (x EntityResult_ResultType) Enum() *EntityResult_ResultType { -+ p := new(EntityResult_ResultType) -+ *p = x -+ return p -+} -+func (x EntityResult_ResultType) String() string { -+ return proto.EnumName(EntityResult_ResultType_name, int32(x)) -+} -+func (x *EntityResult_ResultType) UnmarshalJSON(data []byte) error { -+ value, err := proto.UnmarshalJSONEnum(EntityResult_ResultType_value, data, "EntityResult_ResultType") -+ if err != nil { -+ return err -+ } -+ *x = EntityResult_ResultType(value) -+ return nil -+} -+ -+type PropertyExpression_AggregationFunction int32 -+ -+const ( -+ PropertyExpression_FIRST PropertyExpression_AggregationFunction = 1 -+) -+ -+var PropertyExpression_AggregationFunction_name = map[int32]string{ -+ 1: "FIRST", -+} -+var PropertyExpression_AggregationFunction_value = map[string]int32{ -+ "FIRST": 1, -+} -+ -+func (x PropertyExpression_AggregationFunction) Enum() *PropertyExpression_AggregationFunction { -+ p := new(PropertyExpression_AggregationFunction) -+ *p = x -+ return p -+} -+func (x PropertyExpression_AggregationFunction) String() string { -+ return proto.EnumName(PropertyExpression_AggregationFunction_name, int32(x)) -+} -+func (x *PropertyExpression_AggregationFunction) UnmarshalJSON(data []byte) error { -+ value, err := proto.UnmarshalJSONEnum(PropertyExpression_AggregationFunction_value, data, "PropertyExpression_AggregationFunction") -+ if err != nil { -+ return err -+ } -+ *x = PropertyExpression_AggregationFunction(value) -+ return nil -+} -+ -+type PropertyOrder_Direction int32 -+ -+const ( -+ PropertyOrder_ASCENDING PropertyOrder_Direction = 1 -+ PropertyOrder_DESCENDING PropertyOrder_Direction = 2 -+) -+ -+var PropertyOrder_Direction_name = map[int32]string{ -+ 1: "ASCENDING", -+ 2: "DESCENDING", -+} -+var PropertyOrder_Direction_value = map[string]int32{ -+ "ASCENDING": 1, -+ "DESCENDING": 2, -+} -+ -+func (x PropertyOrder_Direction) Enum() *PropertyOrder_Direction { -+ p := new(PropertyOrder_Direction) -+ *p = x -+ return p -+} -+func (x PropertyOrder_Direction) String() string { -+ return proto.EnumName(PropertyOrder_Direction_name, int32(x)) -+} -+func (x *PropertyOrder_Direction) UnmarshalJSON(data []byte) error { -+ value, err := proto.UnmarshalJSONEnum(PropertyOrder_Direction_value, data, "PropertyOrder_Direction") -+ if err != nil { -+ return err -+ } -+ *x = PropertyOrder_Direction(value) -+ return nil -+} -+ -+type CompositeFilter_Operator int32 -+ -+const ( -+ CompositeFilter_AND CompositeFilter_Operator = 1 -+) -+ -+var CompositeFilter_Operator_name = map[int32]string{ -+ 1: "AND", -+} -+var CompositeFilter_Operator_value = map[string]int32{ -+ "AND": 1, -+} -+ -+func (x CompositeFilter_Operator) Enum() *CompositeFilter_Operator { -+ p := new(CompositeFilter_Operator) -+ *p = x -+ return p -+} -+func (x CompositeFilter_Operator) String() string { -+ return proto.EnumName(CompositeFilter_Operator_name, int32(x)) -+} -+func (x *CompositeFilter_Operator) UnmarshalJSON(data []byte) error { -+ value, err := proto.UnmarshalJSONEnum(CompositeFilter_Operator_value, data, "CompositeFilter_Operator") -+ if err != nil { -+ return err -+ } -+ *x = CompositeFilter_Operator(value) -+ return nil -+} -+ -+type PropertyFilter_Operator int32 -+ -+const ( -+ PropertyFilter_LESS_THAN PropertyFilter_Operator = 1 -+ PropertyFilter_LESS_THAN_OR_EQUAL PropertyFilter_Operator = 2 -+ PropertyFilter_GREATER_THAN PropertyFilter_Operator = 3 -+ PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4 -+ PropertyFilter_EQUAL PropertyFilter_Operator = 5 -+ PropertyFilter_HAS_ANCESTOR PropertyFilter_Operator = 11 -+) -+ -+var PropertyFilter_Operator_name = map[int32]string{ -+ 1: "LESS_THAN", -+ 2: "LESS_THAN_OR_EQUAL", -+ 3: "GREATER_THAN", -+ 4: "GREATER_THAN_OR_EQUAL", -+ 5: "EQUAL", -+ 11: "HAS_ANCESTOR", -+} -+var PropertyFilter_Operator_value = map[string]int32{ -+ "LESS_THAN": 1, -+ "LESS_THAN_OR_EQUAL": 2, -+ "GREATER_THAN": 3, -+ "GREATER_THAN_OR_EQUAL": 4, -+ "EQUAL": 5, -+ "HAS_ANCESTOR": 11, -+} -+ -+func (x PropertyFilter_Operator) Enum() *PropertyFilter_Operator { -+ p := new(PropertyFilter_Operator) -+ *p = x -+ return p -+} -+func (x PropertyFilter_Operator) String() string { -+ return proto.EnumName(PropertyFilter_Operator_name, int32(x)) -+} -+func (x *PropertyFilter_Operator) UnmarshalJSON(data []byte) error { -+ value, err := proto.UnmarshalJSONEnum(PropertyFilter_Operator_value, data, "PropertyFilter_Operator") -+ if err != nil { -+ return err -+ } -+ *x = PropertyFilter_Operator(value) -+ return nil -+} -+ -+// The possible values for the 'more_results' field. -+type QueryResultBatch_MoreResultsType int32 -+ -+const ( -+ QueryResultBatch_NOT_FINISHED QueryResultBatch_MoreResultsType = 1 -+ QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2 -+ // results after the limit. -+ QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3 -+) -+ -+var QueryResultBatch_MoreResultsType_name = map[int32]string{ -+ 1: "NOT_FINISHED", -+ 2: "MORE_RESULTS_AFTER_LIMIT", -+ 3: "NO_MORE_RESULTS", -+} -+var QueryResultBatch_MoreResultsType_value = map[string]int32{ -+ "NOT_FINISHED": 1, -+ "MORE_RESULTS_AFTER_LIMIT": 2, -+ "NO_MORE_RESULTS": 3, -+} -+ -+func (x QueryResultBatch_MoreResultsType) Enum() *QueryResultBatch_MoreResultsType { -+ p := new(QueryResultBatch_MoreResultsType) -+ *p = x -+ return p -+} -+func (x QueryResultBatch_MoreResultsType) String() string { -+ return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x)) -+} -+func (x *QueryResultBatch_MoreResultsType) UnmarshalJSON(data []byte) error { -+ value, err := proto.UnmarshalJSONEnum(QueryResultBatch_MoreResultsType_value, data, "QueryResultBatch_MoreResultsType") -+ if err != nil { -+ return err -+ } -+ *x = QueryResultBatch_MoreResultsType(value) -+ return nil -+} -+ -+type ReadOptions_ReadConsistency int32 -+ -+const ( -+ ReadOptions_DEFAULT ReadOptions_ReadConsistency = 0 -+ ReadOptions_STRONG ReadOptions_ReadConsistency = 1 -+ ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2 -+) -+ -+var ReadOptions_ReadConsistency_name = map[int32]string{ -+ 0: "DEFAULT", -+ 1: "STRONG", -+ 2: "EVENTUAL", -+} -+var ReadOptions_ReadConsistency_value = map[string]int32{ -+ "DEFAULT": 0, -+ "STRONG": 1, -+ "EVENTUAL": 2, -+} -+ -+func (x ReadOptions_ReadConsistency) Enum() *ReadOptions_ReadConsistency { -+ p := new(ReadOptions_ReadConsistency) -+ *p = x -+ return p -+} -+func (x ReadOptions_ReadConsistency) String() string { -+ return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x)) -+} -+func (x *ReadOptions_ReadConsistency) UnmarshalJSON(data []byte) error { -+ value, err := proto.UnmarshalJSONEnum(ReadOptions_ReadConsistency_value, data, "ReadOptions_ReadConsistency") -+ if err != nil { -+ return err -+ } -+ *x = ReadOptions_ReadConsistency(value) -+ return nil -+} -+ -+type BeginTransactionRequest_IsolationLevel int32 -+ -+const ( -+ BeginTransactionRequest_SNAPSHOT BeginTransactionRequest_IsolationLevel = 0 -+ // conflict if their mutations conflict. For example: -+ // Read(A),Write(B) may not conflict with Read(B),Write(A), -+ // but Read(B),Write(B) does conflict with Read(B),Write(B). -+ BeginTransactionRequest_SERIALIZABLE BeginTransactionRequest_IsolationLevel = 1 -+) -+ -+var BeginTransactionRequest_IsolationLevel_name = map[int32]string{ -+ 0: "SNAPSHOT", -+ 1: "SERIALIZABLE", -+} -+var BeginTransactionRequest_IsolationLevel_value = map[string]int32{ -+ "SNAPSHOT": 0, -+ "SERIALIZABLE": 1, -+} -+ -+func (x BeginTransactionRequest_IsolationLevel) Enum() *BeginTransactionRequest_IsolationLevel { -+ p := new(BeginTransactionRequest_IsolationLevel) -+ *p = x -+ return p -+} -+func (x BeginTransactionRequest_IsolationLevel) String() string { -+ return proto.EnumName(BeginTransactionRequest_IsolationLevel_name, int32(x)) -+} -+func (x *BeginTransactionRequest_IsolationLevel) UnmarshalJSON(data []byte) error { -+ value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_IsolationLevel_value, data, "BeginTransactionRequest_IsolationLevel") -+ if err != nil { -+ return err -+ } -+ *x = BeginTransactionRequest_IsolationLevel(value) -+ return nil -+} -+ -+type CommitRequest_Mode int32 -+ -+const ( -+ CommitRequest_TRANSACTIONAL CommitRequest_Mode = 1 -+ CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2 -+) -+ -+var CommitRequest_Mode_name = map[int32]string{ -+ 1: "TRANSACTIONAL", -+ 2: "NON_TRANSACTIONAL", -+} -+var CommitRequest_Mode_value = map[string]int32{ -+ "TRANSACTIONAL": 1, -+ "NON_TRANSACTIONAL": 2, -+} -+ -+func (x CommitRequest_Mode) Enum() *CommitRequest_Mode { -+ p := new(CommitRequest_Mode) -+ *p = x -+ return p -+} -+func (x CommitRequest_Mode) String() string { -+ return proto.EnumName(CommitRequest_Mode_name, int32(x)) -+} -+func (x *CommitRequest_Mode) UnmarshalJSON(data []byte) error { -+ value, err := proto.UnmarshalJSONEnum(CommitRequest_Mode_value, data, "CommitRequest_Mode") -+ if err != nil { -+ return err -+ } -+ *x = CommitRequest_Mode(value) -+ return nil -+} -+ -+// An identifier for a particular subset of entities. -+// -+// Entities are partitioned into various subsets, each used by different -+// datasets and different namespaces within a dataset and so forth. -+// -+// All input partition IDs are normalized before use. -+// A partition ID is normalized as follows: -+// If the partition ID is unset or is set to an empty partition ID, replace it -+// with the context partition ID. -+// Otherwise, if the partition ID has no dataset ID, assign it the context -+// partition ID's dataset ID. -+// Unless otherwise documented, the context partition ID has the dataset ID set -+// to the context dataset ID and no other partition dimension set. -+// -+// A partition ID is empty if all of its fields are unset. -+// -+// Partition dimension: -+// A dimension may be unset. -+// A dimension's value must never be "". -+// A dimension's value must match [A-Za-z\d\.\-_]{1,100} -+// If the value of any dimension matches regex "__.*__", -+// the partition is reserved/read-only. -+// A reserved/read-only partition ID is forbidden in certain documented contexts. -+// -+// Dataset ID: -+// A dataset id's value must never be "". -+// A dataset id's value must match -+// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} -+type PartitionId struct { -+ // The dataset ID. -+ DatasetId *string `protobuf:"bytes,3,opt,name=dataset_id" json:"dataset_id,omitempty"` -+ // The namespace. -+ Namespace *string `protobuf:"bytes,4,opt,name=namespace" json:"namespace,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *PartitionId) Reset() { *m = PartitionId{} } -+func (m *PartitionId) String() string { return proto.CompactTextString(m) } -+func (*PartitionId) ProtoMessage() {} -+ -+func (m *PartitionId) GetDatasetId() string { -+ if m != nil && m.DatasetId != nil { -+ return *m.DatasetId -+ } -+ return "" -+} -+ -+func (m *PartitionId) GetNamespace() string { -+ if m != nil && m.Namespace != nil { -+ return *m.Namespace -+ } -+ return "" -+} -+ -+// A unique identifier for an entity. -+// If a key's partition id or any of its path kinds or names are -+// reserved/read-only, the key is reserved/read-only. -+// A reserved/read-only key is forbidden in certain documented contexts. -+type Key struct { -+ // Entities are partitioned into subsets, currently identified by a dataset -+ // (usually implicitly specified by the project) and namespace ID. -+ // Queries are scoped to a single partition. -+ PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id" json:"partition_id,omitempty"` -+ // The entity path. -+ // An entity path consists of one or more elements composed of a kind and a -+ // string or numerical identifier, which identify entities. The first -+ // element identifies a root entity, the second element identifies -+ // a child of the root entity, the third element a child of the -+ // second entity, and so forth. The entities identified by all prefixes of -+ // the path are called the element's ancestors. -+ // An entity path is always fully complete: ALL of the entity's ancestors -+ // are required to be in the path along with the entity identifier itself. -+ // The only exception is that in some documented cases, the identifier in the -+ // last path element (for the entity) itself may be omitted. A path can never -+ // be empty. -+ PathElement []*Key_PathElement `protobuf:"bytes,2,rep,name=path_element" json:"path_element,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *Key) Reset() { *m = Key{} } -+func (m *Key) String() string { return proto.CompactTextString(m) } -+func (*Key) ProtoMessage() {} -+ -+func (m *Key) GetPartitionId() *PartitionId { -+ if m != nil { -+ return m.PartitionId -+ } -+ return nil -+} -+ -+func (m *Key) GetPathElement() []*Key_PathElement { -+ if m != nil { -+ return m.PathElement -+ } -+ return nil -+} -+ -+// A (kind, ID/name) pair used to construct a key path. -+// -+// At most one of name or ID may be set. -+// If either is set, the element is complete. -+// If neither is set, the element is incomplete. -+type Key_PathElement struct { -+ // The kind of the entity. -+ // A kind matching regex "__.*__" is reserved/read-only. -+ // A kind must not contain more than 500 characters. -+ // Cannot be "". -+ Kind *string `protobuf:"bytes,1,req,name=kind" json:"kind,omitempty"` -+ // The ID of the entity. -+ // Never equal to zero. Values less than zero are discouraged and will not -+ // be supported in the future. -+ Id *int64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` -+ // The name of the entity. -+ // A name matching regex "__.*__" is reserved/read-only. -+ // A name must not be more than 500 characters. -+ // Cannot be "". -+ Name *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } -+func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } -+func (*Key_PathElement) ProtoMessage() {} -+ -+func (m *Key_PathElement) GetKind() string { -+ if m != nil && m.Kind != nil { -+ return *m.Kind -+ } -+ return "" -+} -+ -+func (m *Key_PathElement) GetId() int64 { -+ if m != nil && m.Id != nil { -+ return *m.Id -+ } -+ return 0 -+} -+ -+func (m *Key_PathElement) GetName() string { -+ if m != nil && m.Name != nil { -+ return *m.Name -+ } -+ return "" -+} -+ -+// A message that can hold any of the supported value types and associated -+// metadata. -+// -+// At most one of the Value fields may be set. -+// If none are set the value is "null". -+// -+type Value struct { -+ // A boolean value. -+ BooleanValue *bool `protobuf:"varint,1,opt,name=boolean_value" json:"boolean_value,omitempty"` -+ // An integer value. -+ IntegerValue *int64 `protobuf:"varint,2,opt,name=integer_value" json:"integer_value,omitempty"` -+ // A double value. -+ DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"` -+ // A timestamp value. -+ TimestampMicrosecondsValue *int64 `protobuf:"varint,4,opt,name=timestamp_microseconds_value" json:"timestamp_microseconds_value,omitempty"` -+ // A key value. -+ KeyValue *Key `protobuf:"bytes,5,opt,name=key_value" json:"key_value,omitempty"` -+ // A blob key value. -+ BlobKeyValue *string `protobuf:"bytes,16,opt,name=blob_key_value" json:"blob_key_value,omitempty"` -+ // A UTF-8 encoded string value. -+ StringValue *string `protobuf:"bytes,17,opt,name=string_value" json:"string_value,omitempty"` -+ // A blob value. -+ BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value" json:"blob_value,omitempty"` -+ // An entity value. -+ // May have no key. -+ // May have a key with an incomplete key path. -+ // May have a reserved/read-only key. -+ EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value" json:"entity_value,omitempty"` -+ // A list value. -+ // Cannot contain another list value. -+ // Cannot also have a meaning and indexing set. -+ ListValue []*Value `protobuf:"bytes,7,rep,name=list_value" json:"list_value,omitempty"` -+ // The meaning field is reserved and should not be used. -+ Meaning *int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"` -+ // If the value should be indexed. -+ // -+ // The indexed property may be set for a -+ // null value. -+ // When indexed is true, stringValue -+ // is limited to 500 characters and the blob value is limited to 500 bytes. -+ // Exception: If meaning is set to 2, string_value is limited to 2038 -+ // characters regardless of indexed. -+ // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 -+ // will be ignored on input (and will never be set on output). -+ // Input values by default have indexed set to -+ // true; however, you can explicitly set indexed to -+ // true if you want. (An output value never has -+ // indexed explicitly set to true.) If a value is -+ // itself an entity, it cannot have indexed set to -+ // true. -+ // Exception: An entity value with meaning 9, 20 or 21 may be indexed. -+ Indexed *bool `protobuf:"varint,15,opt,name=indexed,def=1" json:"indexed,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *Value) Reset() { *m = Value{} } -+func (m *Value) String() string { return proto.CompactTextString(m) } -+func (*Value) ProtoMessage() {} -+ -+const Default_Value_Indexed bool = true -+ -+func (m *Value) GetBooleanValue() bool { -+ if m != nil && m.BooleanValue != nil { -+ return *m.BooleanValue -+ } -+ return false -+} -+ -+func (m *Value) GetIntegerValue() int64 { -+ if m != nil && m.IntegerValue != nil { -+ return *m.IntegerValue -+ } -+ return 0 -+} -+ -+func (m *Value) GetDoubleValue() float64 { -+ if m != nil && m.DoubleValue != nil { -+ return *m.DoubleValue -+ } -+ return 0 -+} -+ -+func (m *Value) GetTimestampMicrosecondsValue() int64 { -+ if m != nil && m.TimestampMicrosecondsValue != nil { -+ return *m.TimestampMicrosecondsValue -+ } -+ return 0 -+} -+ -+func (m *Value) GetKeyValue() *Key { -+ if m != nil { -+ return m.KeyValue -+ } -+ return nil -+} -+ -+func (m *Value) GetBlobKeyValue() string { -+ if m != nil && m.BlobKeyValue != nil { -+ return *m.BlobKeyValue -+ } -+ return "" -+} -+ -+func (m *Value) GetStringValue() string { -+ if m != nil && m.StringValue != nil { -+ return *m.StringValue -+ } -+ return "" -+} -+ -+func (m *Value) GetBlobValue() []byte { -+ if m != nil { -+ return m.BlobValue -+ } -+ return nil -+} -+ -+func (m *Value) GetEntityValue() *Entity { -+ if m != nil { -+ return m.EntityValue -+ } -+ return nil -+} -+ -+func (m *Value) GetListValue() []*Value { -+ if m != nil { -+ return m.ListValue -+ } -+ return nil -+} -+ -+func (m *Value) GetMeaning() int32 { -+ if m != nil && m.Meaning != nil { -+ return *m.Meaning -+ } -+ return 0 -+} -+ -+func (m *Value) GetIndexed() bool { -+ if m != nil && m.Indexed != nil { -+ return *m.Indexed -+ } -+ return Default_Value_Indexed -+} -+ -+// An entity property. -+type Property struct { -+ // The name of the property. -+ // A property name matching regex "__.*__" is reserved. -+ // A reserved property name is forbidden in certain documented contexts. -+ // The name must not contain more than 500 characters. -+ // Cannot be "". -+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` -+ // The value(s) of the property. -+ // Each value can have only one value property populated. For example, -+ // you cannot have a values list of { value: { integerValue: 22, -+ // stringValue: "a" } }, but you can have { value: { listValue: -+ // [ { integerValue: 22 }, { stringValue: "a" } ] }. -+ Value *Value `protobuf:"bytes,4,req,name=value" json:"value,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *Property) Reset() { *m = Property{} } -+func (m *Property) String() string { return proto.CompactTextString(m) } -+func (*Property) ProtoMessage() {} -+ -+func (m *Property) GetName() string { -+ if m != nil && m.Name != nil { -+ return *m.Name -+ } -+ return "" -+} -+ -+func (m *Property) GetValue() *Value { -+ if m != nil { -+ return m.Value -+ } -+ return nil -+} -+ -+// An entity. -+// -+// An entity is limited to 1 megabyte when stored. That roughly -+// corresponds to a limit of 1 megabyte for the serialized form of this -+// message. -+type Entity struct { -+ // The entity's key. -+ // -+ // An entity must have a key, unless otherwise documented (for example, -+ // an entity in Value.entityValue may have no key). -+ // An entity's kind is its key's path's last element's kind, -+ // or null if it has no key. -+ Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` -+ // The entity's properties. -+ // Each property's name must be unique for its entity. -+ Property []*Property `protobuf:"bytes,2,rep,name=property" json:"property,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *Entity) Reset() { *m = Entity{} } -+func (m *Entity) String() string { return proto.CompactTextString(m) } -+func (*Entity) ProtoMessage() {} -+ -+func (m *Entity) GetKey() *Key { -+ if m != nil { -+ return m.Key -+ } -+ return nil -+} -+ -+func (m *Entity) GetProperty() []*Property { -+ if m != nil { -+ return m.Property -+ } -+ return nil -+} -+ -+// The result of fetching an entity from the datastore. -+type EntityResult struct { -+ // The resulting entity. -+ Entity *Entity `protobuf:"bytes,1,req,name=entity" json:"entity,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *EntityResult) Reset() { *m = EntityResult{} } -+func (m *EntityResult) String() string { return proto.CompactTextString(m) } -+func (*EntityResult) ProtoMessage() {} -+ -+func (m *EntityResult) GetEntity() *Entity { -+ if m != nil { -+ return m.Entity -+ } -+ return nil -+} -+ -+// A query. -+type Query struct { -+ // The projection to return. If not set the entire entity is returned. -+ Projection []*PropertyExpression `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"` -+ // The kinds to query (if empty, returns entities from all kinds). -+ Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"` -+ // The filter to apply (optional). -+ Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` -+ // The order to apply to the query results (if empty, order is unspecified). -+ Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"` -+ // The properties to group by (if empty, no grouping is applied to the -+ // result set). -+ GroupBy []*PropertyReference `protobuf:"bytes,6,rep,name=group_by" json:"group_by,omitempty"` -+ // A starting point for the query results. Optional. Query cursors are -+ // returned in query result batches. -+ StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor" json:"start_cursor,omitempty"` -+ // An ending point for the query results. Optional. Query cursors are -+ // returned in query result batches. -+ EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor" json:"end_cursor,omitempty"` -+ // The number of results to skip. Applies before limit, but after all other -+ // constraints (optional, defaults to 0). -+ Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` -+ // The maximum number of results to return. Applies after all other -+ // constraints. Optional. -+ Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *Query) Reset() { *m = Query{} } -+func (m *Query) String() string { return proto.CompactTextString(m) } -+func (*Query) ProtoMessage() {} -+ -+const Default_Query_Offset int32 = 0 -+ -+func (m *Query) GetProjection() []*PropertyExpression { -+ if m != nil { -+ return m.Projection -+ } -+ return nil -+} -+ -+func (m *Query) GetKind() []*KindExpression { -+ if m != nil { -+ return m.Kind -+ } -+ return nil -+} -+ -+func (m *Query) GetFilter() *Filter { -+ if m != nil { -+ return m.Filter -+ } -+ return nil -+} -+ -+func (m *Query) GetOrder() []*PropertyOrder { -+ if m != nil { -+ return m.Order -+ } -+ return nil -+} -+ -+func (m *Query) GetGroupBy() []*PropertyReference { -+ if m != nil { -+ return m.GroupBy -+ } -+ return nil -+} -+ -+func (m *Query) GetStartCursor() []byte { -+ if m != nil { -+ return m.StartCursor -+ } -+ return nil -+} -+ -+func (m *Query) GetEndCursor() []byte { -+ if m != nil { -+ return m.EndCursor -+ } -+ return nil -+} -+ -+func (m *Query) GetOffset() int32 { -+ if m != nil && m.Offset != nil { -+ return *m.Offset -+ } -+ return Default_Query_Offset -+} -+ -+func (m *Query) GetLimit() int32 { -+ if m != nil && m.Limit != nil { -+ return *m.Limit -+ } -+ return 0 -+} -+ -+// A representation of a kind. -+type KindExpression struct { -+ // The name of the kind. -+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *KindExpression) Reset() { *m = KindExpression{} } -+func (m *KindExpression) String() string { return proto.CompactTextString(m) } -+func (*KindExpression) ProtoMessage() {} -+ -+func (m *KindExpression) GetName() string { -+ if m != nil && m.Name != nil { -+ return *m.Name -+ } -+ return "" -+} -+ -+// A reference to a property relative to the kind expressions. -+// exactly. -+type PropertyReference struct { -+ // The name of the property. -+ Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *PropertyReference) Reset() { *m = PropertyReference{} } -+func (m *PropertyReference) String() string { return proto.CompactTextString(m) } -+func (*PropertyReference) ProtoMessage() {} -+ -+func (m *PropertyReference) GetName() string { -+ if m != nil && m.Name != nil { -+ return *m.Name -+ } -+ return "" -+} -+ -+// A representation of a property in a projection. -+type PropertyExpression struct { -+ // The property to project. -+ Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` -+ // The aggregation function to apply to the property. Optional. -+ // Can only be used when grouping by at least one property. Must -+ // then be set on all properties in the projection that are not -+ // being grouped by. -+ AggregationFunction *PropertyExpression_AggregationFunction `protobuf:"varint,2,opt,name=aggregation_function,enum=pb.PropertyExpression_AggregationFunction" json:"aggregation_function,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *PropertyExpression) Reset() { *m = PropertyExpression{} } -+func (m *PropertyExpression) String() string { return proto.CompactTextString(m) } -+func (*PropertyExpression) ProtoMessage() {} -+ -+func (m *PropertyExpression) GetProperty() *PropertyReference { -+ if m != nil { -+ return m.Property -+ } -+ return nil -+} -+ -+func (m *PropertyExpression) GetAggregationFunction() PropertyExpression_AggregationFunction { -+ if m != nil && m.AggregationFunction != nil { -+ return *m.AggregationFunction -+ } -+ return PropertyExpression_FIRST -+} -+ -+// The desired order for a specific property. -+type PropertyOrder struct { -+ // The property to order by. -+ Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` -+ // The direction to order by. -+ Direction *PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=pb.PropertyOrder_Direction,def=1" json:"direction,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *PropertyOrder) Reset() { *m = PropertyOrder{} } -+func (m *PropertyOrder) String() string { return proto.CompactTextString(m) } -+func (*PropertyOrder) ProtoMessage() {} -+ -+const Default_PropertyOrder_Direction PropertyOrder_Direction = PropertyOrder_ASCENDING -+ -+func (m *PropertyOrder) GetProperty() *PropertyReference { -+ if m != nil { -+ return m.Property -+ } -+ return nil -+} -+ -+func (m *PropertyOrder) GetDirection() PropertyOrder_Direction { -+ if m != nil && m.Direction != nil { -+ return *m.Direction -+ } -+ return Default_PropertyOrder_Direction -+} -+ -+// A holder for any type of filter. Exactly one field should be specified. -+type Filter struct { -+ // A composite filter. -+ CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter" json:"composite_filter,omitempty"` -+ // A filter on a property. -+ PropertyFilter *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter" json:"property_filter,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *Filter) Reset() { *m = Filter{} } -+func (m *Filter) String() string { return proto.CompactTextString(m) } -+func (*Filter) ProtoMessage() {} -+ -+func (m *Filter) GetCompositeFilter() *CompositeFilter { -+ if m != nil { -+ return m.CompositeFilter -+ } -+ return nil -+} -+ -+func (m *Filter) GetPropertyFilter() *PropertyFilter { -+ if m != nil { -+ return m.PropertyFilter -+ } -+ return nil -+} -+ -+// A filter that merges the multiple other filters using the given operation. -+type CompositeFilter struct { -+ // The operator for combining multiple filters. -+ Operator *CompositeFilter_Operator `protobuf:"varint,1,req,name=operator,enum=pb.CompositeFilter_Operator" json:"operator,omitempty"` -+ // The list of filters to combine. -+ // Must contain at least one filter. -+ Filter []*Filter `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *CompositeFilter) Reset() { *m = CompositeFilter{} } -+func (m *CompositeFilter) String() string { return proto.CompactTextString(m) } -+func (*CompositeFilter) ProtoMessage() {} -+ -+func (m *CompositeFilter) GetOperator() CompositeFilter_Operator { -+ if m != nil && m.Operator != nil { -+ return *m.Operator -+ } -+ return CompositeFilter_AND -+} -+ -+func (m *CompositeFilter) GetFilter() []*Filter { -+ if m != nil { -+ return m.Filter -+ } -+ return nil -+} -+ -+// A filter on a specific property. -+type PropertyFilter struct { -+ // The property to filter by. -+ Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` -+ // The operator to filter by. -+ Operator *PropertyFilter_Operator `protobuf:"varint,2,req,name=operator,enum=pb.PropertyFilter_Operator" json:"operator,omitempty"` -+ // The value to compare the property to. -+ Value *Value `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *PropertyFilter) Reset() { *m = PropertyFilter{} } -+func (m *PropertyFilter) String() string { return proto.CompactTextString(m) } -+func (*PropertyFilter) ProtoMessage() {} -+ -+func (m *PropertyFilter) GetProperty() *PropertyReference { -+ if m != nil { -+ return m.Property -+ } -+ return nil -+} -+ -+func (m *PropertyFilter) GetOperator() PropertyFilter_Operator { -+ if m != nil && m.Operator != nil { -+ return *m.Operator -+ } -+ return PropertyFilter_LESS_THAN -+} -+ -+func (m *PropertyFilter) GetValue() *Value { -+ if m != nil { -+ return m.Value -+ } -+ return nil -+} -+ -+// A GQL query. -+type GqlQuery struct { -+ QueryString *string `protobuf:"bytes,1,req,name=query_string" json:"query_string,omitempty"` -+ // When false, the query string must not contain a literal. -+ AllowLiteral *bool `protobuf:"varint,2,opt,name=allow_literal,def=0" json:"allow_literal,omitempty"` -+ // A named argument must set field GqlQueryArg.name. -+ // No two named arguments may have the same name. -+ // For each non-reserved named binding site in the query string, -+ // there must be a named argument with that name, -+ // but not necessarily the inverse. -+ NameArg []*GqlQueryArg `protobuf:"bytes,3,rep,name=name_arg" json:"name_arg,omitempty"` -+ // Numbered binding site @1 references the first numbered argument, -+ // effectively using 1-based indexing, rather than the usual 0. -+ // A numbered argument must NOT set field GqlQueryArg.name. -+ // For each binding site numbered i in query_string, -+ // there must be an ith numbered argument. -+ // The inverse must also be true. -+ NumberArg []*GqlQueryArg `protobuf:"bytes,4,rep,name=number_arg" json:"number_arg,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *GqlQuery) Reset() { *m = GqlQuery{} } -+func (m *GqlQuery) String() string { return proto.CompactTextString(m) } -+func (*GqlQuery) ProtoMessage() {} -+ -+const Default_GqlQuery_AllowLiteral bool = false -+ -+func (m *GqlQuery) GetQueryString() string { -+ if m != nil && m.QueryString != nil { -+ return *m.QueryString -+ } -+ return "" -+} -+ -+func (m *GqlQuery) GetAllowLiteral() bool { -+ if m != nil && m.AllowLiteral != nil { -+ return *m.AllowLiteral -+ } -+ return Default_GqlQuery_AllowLiteral -+} -+ -+func (m *GqlQuery) GetNameArg() []*GqlQueryArg { -+ if m != nil { -+ return m.NameArg -+ } -+ return nil -+} -+ -+func (m *GqlQuery) GetNumberArg() []*GqlQueryArg { -+ if m != nil { -+ return m.NumberArg -+ } -+ return nil -+} -+ -+// A binding argument for a GQL query. -+// Exactly one of fields value and cursor must be set. -+type GqlQueryArg struct { -+ // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". -+ // Must not match regex "__.*__". -+ // Must not be "". -+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -+ Value *Value `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -+ Cursor []byte `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *GqlQueryArg) Reset() { *m = GqlQueryArg{} } -+func (m *GqlQueryArg) String() string { return proto.CompactTextString(m) } -+func (*GqlQueryArg) ProtoMessage() {} -+ -+func (m *GqlQueryArg) GetName() string { -+ if m != nil && m.Name != nil { -+ return *m.Name -+ } -+ return "" -+} -+ -+func (m *GqlQueryArg) GetValue() *Value { -+ if m != nil { -+ return m.Value -+ } -+ return nil -+} -+ -+func (m *GqlQueryArg) GetCursor() []byte { -+ if m != nil { -+ return m.Cursor -+ } -+ return nil -+} -+ -+// A batch of results produced by a query. -+type QueryResultBatch struct { -+ // The result type for every entity in entityResults. -+ EntityResultType *EntityResult_ResultType `protobuf:"varint,1,req,name=entity_result_type,enum=pb.EntityResult_ResultType" json:"entity_result_type,omitempty"` -+ // The results for this batch. -+ EntityResult []*EntityResult `protobuf:"bytes,2,rep,name=entity_result" json:"entity_result,omitempty"` -+ // A cursor that points to the position after the last result in the batch. -+ // May be absent. -+ EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor" json:"end_cursor,omitempty"` -+ // The state of the query after the current batch. -+ MoreResults *QueryResultBatch_MoreResultsType `protobuf:"varint,5,req,name=more_results,enum=pb.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"` -+ // The number of results skipped because of Query.offset. -+ SkippedResults *int32 `protobuf:"varint,6,opt,name=skipped_results" json:"skipped_results,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *QueryResultBatch) Reset() { *m = QueryResultBatch{} } -+func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) } -+func (*QueryResultBatch) ProtoMessage() {} -+ -+func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType { -+ if m != nil && m.EntityResultType != nil { -+ return *m.EntityResultType -+ } -+ return EntityResult_FULL -+} -+ -+func (m *QueryResultBatch) GetEntityResult() []*EntityResult { -+ if m != nil { -+ return m.EntityResult -+ } -+ return nil -+} -+ -+func (m *QueryResultBatch) GetEndCursor() []byte { -+ if m != nil { -+ return m.EndCursor -+ } -+ return nil -+} -+ -+func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType { -+ if m != nil && m.MoreResults != nil { -+ return *m.MoreResults -+ } -+ return QueryResultBatch_NOT_FINISHED -+} -+ -+func (m *QueryResultBatch) GetSkippedResults() int32 { -+ if m != nil && m.SkippedResults != nil { -+ return *m.SkippedResults -+ } -+ return 0 -+} -+ -+// A set of changes to apply. -+// -+// No entity in this message may have a reserved property name, -+// not even a property in an entity in a value. -+// No value in this message may have meaning 18, -+// not even a value in an entity in another value. -+// -+// If entities with duplicate keys are present, an arbitrary choice will -+// be made as to which is written. -+type Mutation struct { -+ // Entities to upsert. -+ // Each upserted entity's key must have a complete path and -+ // must not be reserved/read-only. -+ Upsert []*Entity `protobuf:"bytes,1,rep,name=upsert" json:"upsert,omitempty"` -+ // Entities to update. -+ // Each updated entity's key must have a complete path and -+ // must not be reserved/read-only. -+ Update []*Entity `protobuf:"bytes,2,rep,name=update" json:"update,omitempty"` -+ // Entities to insert. -+ // Each inserted entity's key must have a complete path and -+ // must not be reserved/read-only. -+ Insert []*Entity `protobuf:"bytes,3,rep,name=insert" json:"insert,omitempty"` -+ // Insert entities with a newly allocated ID. -+ // Each inserted entity's key must omit the final identifier in its path and -+ // must not be reserved/read-only. -+ InsertAutoId []*Entity `protobuf:"bytes,4,rep,name=insert_auto_id" json:"insert_auto_id,omitempty"` -+ // Keys of entities to delete. -+ // Each key must have a complete key path and must not be reserved/read-only. -+ Delete []*Key `protobuf:"bytes,5,rep,name=delete" json:"delete,omitempty"` -+ // Ignore a user specified read-only period. Optional. -+ Force *bool `protobuf:"varint,6,opt,name=force" json:"force,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *Mutation) Reset() { *m = Mutation{} } -+func (m *Mutation) String() string { return proto.CompactTextString(m) } -+func (*Mutation) ProtoMessage() {} -+ -+func (m *Mutation) GetUpsert() []*Entity { -+ if m != nil { -+ return m.Upsert -+ } -+ return nil -+} -+ -+func (m *Mutation) GetUpdate() []*Entity { -+ if m != nil { -+ return m.Update -+ } -+ return nil -+} -+ -+func (m *Mutation) GetInsert() []*Entity { -+ if m != nil { -+ return m.Insert -+ } -+ return nil -+} -+ -+func (m *Mutation) GetInsertAutoId() []*Entity { -+ if m != nil { -+ return m.InsertAutoId -+ } -+ return nil -+} -+ -+func (m *Mutation) GetDelete() []*Key { -+ if m != nil { -+ return m.Delete -+ } -+ return nil -+} -+ -+func (m *Mutation) GetForce() bool { -+ if m != nil && m.Force != nil { -+ return *m.Force -+ } -+ return false -+} -+ -+// The result of applying a mutation. -+type MutationResult struct { -+ // Number of index writes. -+ IndexUpdates *int32 `protobuf:"varint,1,req,name=index_updates" json:"index_updates,omitempty"` -+ // Keys for insertAutoId entities. One per entity from the -+ // request, in the same order. -+ InsertAutoIdKey []*Key `protobuf:"bytes,2,rep,name=insert_auto_id_key" json:"insert_auto_id_key,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *MutationResult) Reset() { *m = MutationResult{} } -+func (m *MutationResult) String() string { return proto.CompactTextString(m) } -+func (*MutationResult) ProtoMessage() {} -+ -+func (m *MutationResult) GetIndexUpdates() int32 { -+ if m != nil && m.IndexUpdates != nil { -+ return *m.IndexUpdates -+ } -+ return 0 -+} -+ -+func (m *MutationResult) GetInsertAutoIdKey() []*Key { -+ if m != nil { -+ return m.InsertAutoIdKey -+ } -+ return nil -+} -+ -+// Options shared by read requests. -+type ReadOptions struct { -+ // The read consistency to use. -+ // Cannot be set when transaction is set. -+ // Lookup and ancestor queries default to STRONG, global queries default to -+ // EVENTUAL and cannot be set to STRONG. -+ ReadConsistency *ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,enum=pb.ReadOptions_ReadConsistency,def=0" json:"read_consistency,omitempty"` -+ // The transaction to use. Optional. -+ Transaction []byte `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *ReadOptions) Reset() { *m = ReadOptions{} } -+func (m *ReadOptions) String() string { return proto.CompactTextString(m) } -+func (*ReadOptions) ProtoMessage() {} -+ -+const Default_ReadOptions_ReadConsistency ReadOptions_ReadConsistency = ReadOptions_DEFAULT -+ -+func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency { -+ if m != nil && m.ReadConsistency != nil { -+ return *m.ReadConsistency -+ } -+ return Default_ReadOptions_ReadConsistency -+} -+ -+func (m *ReadOptions) GetTransaction() []byte { -+ if m != nil { -+ return m.Transaction -+ } -+ return nil -+} -+ -+// The request for Lookup. -+type LookupRequest struct { -+ // Options for this lookup request. Optional. -+ ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` -+ // Keys of entities to look up from the datastore. -+ Key []*Key `protobuf:"bytes,3,rep,name=key" json:"key,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *LookupRequest) Reset() { *m = LookupRequest{} } -+func (m *LookupRequest) String() string { return proto.CompactTextString(m) } -+func (*LookupRequest) ProtoMessage() {} -+ -+func (m *LookupRequest) GetReadOptions() *ReadOptions { -+ if m != nil { -+ return m.ReadOptions -+ } -+ return nil -+} -+ -+func (m *LookupRequest) GetKey() []*Key { -+ if m != nil { -+ return m.Key -+ } -+ return nil -+} -+ -+// The response for Lookup. -+type LookupResponse struct { -+ // Entities found as ResultType.FULL entities. -+ Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` -+ // Entities not found as ResultType.KEY_ONLY entities. -+ Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"` -+ // A list of keys that were not looked up due to resource constraints. -+ Deferred []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *LookupResponse) Reset() { *m = LookupResponse{} } -+func (m *LookupResponse) String() string { return proto.CompactTextString(m) } -+func (*LookupResponse) ProtoMessage() {} -+ -+func (m *LookupResponse) GetFound() []*EntityResult { -+ if m != nil { -+ return m.Found -+ } -+ return nil -+} -+ -+func (m *LookupResponse) GetMissing() []*EntityResult { -+ if m != nil { -+ return m.Missing -+ } -+ return nil -+} -+ -+func (m *LookupResponse) GetDeferred() []*Key { -+ if m != nil { -+ return m.Deferred -+ } -+ return nil -+} -+ -+// The request for RunQuery. -+type RunQueryRequest struct { -+ // The options for this query. -+ ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` -+ // Entities are partitioned into subsets, identified by a dataset (usually -+ // implicitly specified by the project) and namespace ID. Queries are scoped -+ // to a single partition. -+ // This partition ID is normalized with the standard default context -+ // partition ID, but all other partition IDs in RunQueryRequest are -+ // normalized with this partition ID as the context partition ID. -+ PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id" json:"partition_id,omitempty"` -+ // The query to run. -+ // Either this field or field gql_query must be set, but not both. -+ Query *Query `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"` -+ // The GQL query to run. -+ // Either this field or field query must be set, but not both. -+ GqlQuery *GqlQuery `protobuf:"bytes,7,opt,name=gql_query" json:"gql_query,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} } -+func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) } -+func (*RunQueryRequest) ProtoMessage() {} -+ -+func (m *RunQueryRequest) GetReadOptions() *ReadOptions { -+ if m != nil { -+ return m.ReadOptions -+ } -+ return nil -+} -+ -+func (m *RunQueryRequest) GetPartitionId() *PartitionId { -+ if m != nil { -+ return m.PartitionId -+ } -+ return nil -+} -+ -+func (m *RunQueryRequest) GetQuery() *Query { -+ if m != nil { -+ return m.Query -+ } -+ return nil -+} -+ -+func (m *RunQueryRequest) GetGqlQuery() *GqlQuery { -+ if m != nil { -+ return m.GqlQuery -+ } -+ return nil -+} -+ -+// The response for RunQuery. -+type RunQueryResponse struct { -+ // A batch of query results (always present). -+ Batch *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} } -+func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) } -+func (*RunQueryResponse) ProtoMessage() {} -+ -+func (m *RunQueryResponse) GetBatch() *QueryResultBatch { -+ if m != nil { -+ return m.Batch -+ } -+ return nil -+} -+ -+// The request for BeginTransaction. -+type BeginTransactionRequest struct { -+ // The transaction isolation level. -+ IsolationLevel *BeginTransactionRequest_IsolationLevel `protobuf:"varint,1,opt,name=isolation_level,enum=pb.BeginTransactionRequest_IsolationLevel,def=0" json:"isolation_level,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } -+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } -+func (*BeginTransactionRequest) ProtoMessage() {} -+ -+const Default_BeginTransactionRequest_IsolationLevel BeginTransactionRequest_IsolationLevel = BeginTransactionRequest_SNAPSHOT -+ -+func (m *BeginTransactionRequest) GetIsolationLevel() BeginTransactionRequest_IsolationLevel { -+ if m != nil && m.IsolationLevel != nil { -+ return *m.IsolationLevel -+ } -+ return Default_BeginTransactionRequest_IsolationLevel -+} -+ -+// The response for BeginTransaction. -+type BeginTransactionResponse struct { -+ // The transaction identifier (always present). -+ Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } -+func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } -+func (*BeginTransactionResponse) ProtoMessage() {} -+ -+func (m *BeginTransactionResponse) GetTransaction() []byte { -+ if m != nil { -+ return m.Transaction -+ } -+ return nil -+} -+ -+// The request for Rollback. -+type RollbackRequest struct { -+ // The transaction identifier, returned by a call to -+ // beginTransaction. -+ Transaction []byte `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } -+func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } -+func (*RollbackRequest) ProtoMessage() {} -+ -+func (m *RollbackRequest) GetTransaction() []byte { -+ if m != nil { -+ return m.Transaction -+ } -+ return nil -+} -+ -+// The response for Rollback. -+type RollbackResponse struct { -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } -+func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } -+func (*RollbackResponse) ProtoMessage() {} -+ -+// The request for Commit. -+type CommitRequest struct { -+ // The transaction identifier, returned by a call to -+ // beginTransaction. Must be set when mode is TRANSACTIONAL. -+ Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` -+ // The mutation to perform. Optional. -+ Mutation *Mutation `protobuf:"bytes,2,opt,name=mutation" json:"mutation,omitempty"` -+ // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. -+ Mode *CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=pb.CommitRequest_Mode,def=1" json:"mode,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *CommitRequest) Reset() { *m = CommitRequest{} } -+func (m *CommitRequest) String() string { return proto.CompactTextString(m) } -+func (*CommitRequest) ProtoMessage() {} -+ -+const Default_CommitRequest_Mode CommitRequest_Mode = CommitRequest_TRANSACTIONAL -+ -+func (m *CommitRequest) GetTransaction() []byte { -+ if m != nil { -+ return m.Transaction -+ } -+ return nil -+} -+ -+func (m *CommitRequest) GetMutation() *Mutation { -+ if m != nil { -+ return m.Mutation -+ } -+ return nil -+} -+ -+func (m *CommitRequest) GetMode() CommitRequest_Mode { -+ if m != nil && m.Mode != nil { -+ return *m.Mode -+ } -+ return Default_CommitRequest_Mode -+} -+ -+// The response for Commit. -+type CommitResponse struct { -+ // The result of performing the mutation (if any). -+ MutationResult *MutationResult `protobuf:"bytes,1,opt,name=mutation_result" json:"mutation_result,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *CommitResponse) Reset() { *m = CommitResponse{} } -+func (m *CommitResponse) String() string { return proto.CompactTextString(m) } -+func (*CommitResponse) ProtoMessage() {} -+ -+func (m *CommitResponse) GetMutationResult() *MutationResult { -+ if m != nil { -+ return m.MutationResult -+ } -+ return nil -+} -+ -+// The request for AllocateIds. -+type AllocateIdsRequest struct { -+ // A list of keys with incomplete key paths to allocate IDs for. -+ // No key may be reserved/read-only. -+ Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } -+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } -+func (*AllocateIdsRequest) ProtoMessage() {} -+ -+func (m *AllocateIdsRequest) GetKey() []*Key { -+ if m != nil { -+ return m.Key -+ } -+ return nil -+} -+ -+// The response for AllocateIds. -+type AllocateIdsResponse struct { -+ // The keys specified in the request (in the same order), each with -+ // its key path completed with a newly allocated ID. -+ Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` -+ XXX_unrecognized []byte `json:"-"` -+} -+ -+func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } -+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } -+func (*AllocateIdsResponse) ProtoMessage() {} -+ -+func (m *AllocateIdsResponse) GetKey() []*Key { -+ if m != nil { -+ return m.Key -+ } -+ return nil -+} -+ -+func init() { -+ proto.RegisterEnum("pb.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value) -+ proto.RegisterEnum("pb.PropertyExpression_AggregationFunction", PropertyExpression_AggregationFunction_name, PropertyExpression_AggregationFunction_value) -+ proto.RegisterEnum("pb.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value) -+ proto.RegisterEnum("pb.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value) -+ proto.RegisterEnum("pb.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value) -+ proto.RegisterEnum("pb.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value) -+ proto.RegisterEnum("pb.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value) -+ proto.RegisterEnum("pb.BeginTransactionRequest_IsolationLevel", BeginTransactionRequest_IsolationLevel_name, BeginTransactionRequest_IsolationLevel_value) -+ proto.RegisterEnum("pb.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value) -+} -diff --git a/Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.proto b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.proto -new file mode 100644 -index 0000000..bb4c199 ---- /dev/null -+++ b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/datastore/datastore_v1.proto -@@ -0,0 +1,594 @@ -+// Copyright 2013 Google Inc. All Rights Reserved. -+// -+// The datastore v1 service proto definitions -+ -+syntax = "proto2"; -+ -+package pb; -+option java_package = "com.google.api.services.datastore"; -+ -+ -+// An identifier for a particular subset of entities. -+// -+// Entities are partitioned into various subsets, each used by different -+// datasets and different namespaces within a dataset and so forth. -+// -+// All input partition IDs are normalized before use. -+// A partition ID is normalized as follows: -+// If the partition ID is unset or is set to an empty partition ID, replace it -+// with the context partition ID. -+// Otherwise, if the partition ID has no dataset ID, assign it the context -+// partition ID's dataset ID. -+// Unless otherwise documented, the context partition ID has the dataset ID set -+// to the context dataset ID and no other partition dimension set. -+// -+// A partition ID is empty if all of its fields are unset. -+// -+// Partition dimension: -+// A dimension may be unset. -+// A dimension's value must never be "". -+// A dimension's value must match [A-Za-z\d\.\-_]{1,100} -+// If the value of any dimension matches regex "__.*__", -+// the partition is reserved/read-only. -+// A reserved/read-only partition ID is forbidden in certain documented contexts. -+// -+// Dataset ID: -+// A dataset id's value must never be "". -+// A dataset id's value must match -+// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} -+message PartitionId { -+ // The dataset ID. -+ optional string dataset_id = 3; -+ // The namespace. -+ optional string namespace = 4; -+} -+ -+// A unique identifier for an entity. -+// If a key's partition id or any of its path kinds or names are -+// reserved/read-only, the key is reserved/read-only. -+// A reserved/read-only key is forbidden in certain documented contexts. -+message Key { -+ // Entities are partitioned into subsets, currently identified by a dataset -+ // (usually implicitly specified by the project) and namespace ID. -+ // Queries are scoped to a single partition. -+ optional PartitionId partition_id = 1; -+ -+ // A (kind, ID/name) pair used to construct a key path. -+ // -+ // At most one of name or ID may be set. -+ // If either is set, the element is complete. -+ // If neither is set, the element is incomplete. -+ message PathElement { -+ // The kind of the entity. -+ // A kind matching regex "__.*__" is reserved/read-only. -+ // A kind must not contain more than 500 characters. -+ // Cannot be "". -+ required string kind = 1; -+ // The ID of the entity. -+ // Never equal to zero. Values less than zero are discouraged and will not -+ // be supported in the future. -+ optional int64 id = 2; -+ // The name of the entity. -+ // A name matching regex "__.*__" is reserved/read-only. -+ // A name must not be more than 500 characters. -+ // Cannot be "". -+ optional string name = 3; -+ } -+ -+ // The entity path. -+ // An entity path consists of one or more elements composed of a kind and a -+ // string or numerical identifier, which identify entities. The first -+ // element identifies a root entity, the second element identifies -+ // a child of the root entity, the third element a child of the -+ // second entity, and so forth. The entities identified by all prefixes of -+ // the path are called the element's ancestors. -+ // An entity path is always fully complete: ALL of the entity's ancestors -+ // are required to be in the path along with the entity identifier itself. -+ // The only exception is that in some documented cases, the identifier in the -+ // last path element (for the entity) itself may be omitted. A path can never -+ // be empty. -+ repeated PathElement path_element = 2; -+} -+ -+// A message that can hold any of the supported value types and associated -+// metadata. -+// -+// At most one of the Value fields may be set. -+// If none are set the value is "null". -+// -+message Value { -+ // A boolean value. -+ optional bool boolean_value = 1; -+ // An integer value. -+ optional int64 integer_value = 2; -+ // A double value. -+ optional double double_value = 3; -+ // A timestamp value. -+ optional int64 timestamp_microseconds_value = 4; -+ // A key value. -+ optional Key key_value = 5; -+ // A blob key value. -+ optional string blob_key_value = 16; -+ // A UTF-8 encoded string value. -+ optional string string_value = 17; -+ // A blob value. -+ optional bytes blob_value = 18; -+ // An entity value. -+ // May have no key. -+ // May have a key with an incomplete key path. -+ // May have a reserved/read-only key. -+ optional Entity entity_value = 6; -+ // A list value. -+ // Cannot contain another list value. -+ // Cannot also have a meaning and indexing set. -+ repeated Value list_value = 7; -+ -+ // The meaning field is reserved and should not be used. -+ optional int32 meaning = 14; -+ -+ // If the value should be indexed. -+ // -+ // The indexed property may be set for a -+ // null value. -+ // When indexed is true, stringValue -+ // is limited to 500 characters and the blob value is limited to 500 bytes. -+ // Exception: If meaning is set to 2, string_value is limited to 2038 -+ // characters regardless of indexed. -+ // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 -+ // will be ignored on input (and will never be set on output). -+ // Input values by default have indexed set to -+ // true; however, you can explicitly set indexed to -+ // true if you want. (An output value never has -+ // indexed explicitly set to true.) If a value is -+ // itself an entity, it cannot have indexed set to -+ // true. -+ // Exception: An entity value with meaning 9, 20 or 21 may be indexed. -+ optional bool indexed = 15 [default = true]; -+} -+ -+// An entity property. -+message Property { -+ // The name of the property. -+ // A property name matching regex "__.*__" is reserved. -+ // A reserved property name is forbidden in certain documented contexts. -+ // The name must not contain more than 500 characters. -+ // Cannot be "". -+ required string name = 1; -+ -+ // The value(s) of the property. -+ // Each value can have only one value property populated. For example, -+ // you cannot have a values list of { value: { integerValue: 22, -+ // stringValue: "a" } }, but you can have { value: { listValue: -+ // [ { integerValue: 22 }, { stringValue: "a" } ] }. -+ required Value value = 4; -+} -+ -+// An entity. -+// -+// An entity is limited to 1 megabyte when stored. That roughly -+// corresponds to a limit of 1 megabyte for the serialized form of this -+// message. -+message Entity { -+ // The entity's key. -+ // -+ // An entity must have a key, unless otherwise documented (for example, -+ // an entity in Value.entityValue may have no key). -+ // An entity's kind is its key's path's last element's kind, -+ // or null if it has no key. -+ optional Key key = 1; -+ // The entity's properties. -+ // Each property's name must be unique for its entity. -+ repeated Property property = 2; -+} -+ -+// The result of fetching an entity from the datastore. -+message EntityResult { -+ // Specifies what data the 'entity' field contains. -+ // A ResultType is either implied (for example, in LookupResponse.found it -+ // is always FULL) or specified by context (for example, in message -+ // QueryResultBatch, field 'entity_result_type' specifies a ResultType -+ // for all the values in field 'entity_result'). -+ enum ResultType { -+ FULL = 1; // The entire entity. -+ PROJECTION = 2; // A projected subset of properties. -+ // The entity may have no key. -+ // A property value may have meaning 18. -+ KEY_ONLY = 3; // Only the key. -+ } -+ -+ // The resulting entity. -+ required Entity entity = 1; -+} -+ -+// A query. -+message Query { -+ // The projection to return. If not set the entire entity is returned. -+ repeated PropertyExpression projection = 2; -+ -+ // The kinds to query (if empty, returns entities from all kinds). -+ repeated KindExpression kind = 3; -+ -+ // The filter to apply (optional). -+ optional Filter filter = 4; -+ -+ // The order to apply to the query results (if empty, order is unspecified). -+ repeated PropertyOrder order = 5; -+ -+ // The properties to group by (if empty, no grouping is applied to the -+ // result set). -+ repeated PropertyReference group_by = 6; -+ -+ // A starting point for the query results. Optional. Query cursors are -+ // returned in query result batches. -+ optional bytes /* serialized QueryCursor */ start_cursor = 7; -+ -+ // An ending point for the query results. Optional. Query cursors are -+ // returned in query result batches. -+ optional bytes /* serialized QueryCursor */ end_cursor = 8; -+ -+ // The number of results to skip. Applies before limit, but after all other -+ // constraints (optional, defaults to 0). -+ optional int32 offset = 10 [default=0]; -+ -+ // The maximum number of results to return. Applies after all other -+ // constraints. Optional. -+ optional int32 limit = 11; -+} -+ -+// A representation of a kind. -+message KindExpression { -+ // The name of the kind. -+ required string name = 1; -+} -+ -+// A reference to a property relative to the kind expressions. -+// exactly. -+message PropertyReference { -+ // The name of the property. -+ required string name = 2; -+} -+ -+// A representation of a property in a projection. -+message PropertyExpression { -+ enum AggregationFunction { -+ FIRST = 1; -+ } -+ // The property to project. -+ required PropertyReference property = 1; -+ // The aggregation function to apply to the property. Optional. -+ // Can only be used when grouping by at least one property. Must -+ // then be set on all properties in the projection that are not -+ // being grouped by. -+ optional AggregationFunction aggregation_function = 2; -+} -+ -+// The desired order for a specific property. -+message PropertyOrder { -+ enum Direction { -+ ASCENDING = 1; -+ DESCENDING = 2; -+ } -+ // The property to order by. -+ required PropertyReference property = 1; -+ // The direction to order by. -+ optional Direction direction = 2 [default=ASCENDING]; -+} -+ -+// A holder for any type of filter. Exactly one field should be specified. -+message Filter { -+ // A composite filter. -+ optional CompositeFilter composite_filter = 1; -+ // A filter on a property. -+ optional PropertyFilter property_filter = 2; -+} -+ -+// A filter that merges the multiple other filters using the given operation. -+message CompositeFilter { -+ enum Operator { -+ AND = 1; -+ } -+ -+ // The operator for combining multiple filters. -+ required Operator operator = 1; -+ // The list of filters to combine. -+ // Must contain at least one filter. -+ repeated Filter filter = 2; -+} -+ -+// A filter on a specific property. -+message PropertyFilter { -+ enum Operator { -+ LESS_THAN = 1; -+ LESS_THAN_OR_EQUAL = 2; -+ GREATER_THAN = 3; -+ GREATER_THAN_OR_EQUAL = 4; -+ EQUAL = 5; -+ -+ HAS_ANCESTOR = 11; -+ } -+ -+ // The property to filter by. -+ required PropertyReference property = 1; -+ // The operator to filter by. -+ required Operator operator = 2; -+ // The value to compare the property to. -+ required Value value = 3; -+} -+ -+// A GQL query. -+message GqlQuery { -+ required string query_string = 1; -+ // When false, the query string must not contain a literal. -+ optional bool allow_literal = 2 [default = false]; -+ // A named argument must set field GqlQueryArg.name. -+ // No two named arguments may have the same name. -+ // For each non-reserved named binding site in the query string, -+ // there must be a named argument with that name, -+ // but not necessarily the inverse. -+ repeated GqlQueryArg name_arg = 3; -+ // Numbered binding site @1 references the first numbered argument, -+ // effectively using 1-based indexing, rather than the usual 0. -+ // A numbered argument must NOT set field GqlQueryArg.name. -+ // For each binding site numbered i in query_string, -+ // there must be an ith numbered argument. -+ // The inverse must also be true. -+ repeated GqlQueryArg number_arg = 4; -+} -+ -+// A binding argument for a GQL query. -+// Exactly one of fields value and cursor must be set. -+message GqlQueryArg { -+ // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". -+ // Must not match regex "__.*__". -+ // Must not be "". -+ optional string name = 1; -+ optional Value value = 2; -+ optional bytes cursor = 3; -+} -+ -+// A batch of results produced by a query. -+message QueryResultBatch { -+ // The possible values for the 'more_results' field. -+ enum MoreResultsType { -+ NOT_FINISHED = 1; // There are additional batches to fetch from this query. -+ MORE_RESULTS_AFTER_LIMIT = 2; // The query is finished, but there are more -+ // results after the limit. -+ NO_MORE_RESULTS = 3; // The query has been exhausted. -+ } -+ -+ // The result type for every entity in entityResults. -+ required EntityResult.ResultType entity_result_type = 1; -+ // The results for this batch. -+ repeated EntityResult entity_result = 2; -+ -+ // A cursor that points to the position after the last result in the batch. -+ // May be absent. -+ optional bytes /* serialized QueryCursor */ end_cursor = 4; -+ -+ // The state of the query after the current batch. -+ required MoreResultsType more_results = 5; -+ -+ // The number of results skipped because of Query.offset. -+ optional int32 skipped_results = 6; -+} -+ -+// A set of changes to apply. -+// -+// No entity in this message may have a reserved property name, -+// not even a property in an entity in a value. -+// No value in this message may have meaning 18, -+// not even a value in an entity in another value. -+// -+// If entities with duplicate keys are present, an arbitrary choice will -+// be made as to which is written. -+message Mutation { -+ // Entities to upsert. -+ // Each upserted entity's key must have a complete path and -+ // must not be reserved/read-only. -+ repeated Entity upsert = 1; -+ // Entities to update. -+ // Each updated entity's key must have a complete path and -+ // must not be reserved/read-only. -+ repeated Entity update = 2; -+ // Entities to insert. -+ // Each inserted entity's key must have a complete path and -+ // must not be reserved/read-only. -+ repeated Entity insert = 3; -+ // Insert entities with a newly allocated ID. -+ // Each inserted entity's key must omit the final identifier in its path and -+ // must not be reserved/read-only. -+ repeated Entity insert_auto_id = 4; -+ // Keys of entities to delete. -+ // Each key must have a complete key path and must not be reserved/read-only. -+ repeated Key delete = 5; -+ // Ignore a user specified read-only period. Optional. -+ optional bool force = 6; -+} -+ -+// The result of applying a mutation. -+message MutationResult { -+ // Number of index writes. -+ required int32 index_updates = 1; -+ // Keys for insertAutoId entities. One per entity from the -+ // request, in the same order. -+ repeated Key insert_auto_id_key = 2; -+} -+ -+// Options shared by read requests. -+message ReadOptions { -+ enum ReadConsistency { -+ DEFAULT = 0; -+ STRONG = 1; -+ EVENTUAL = 2; -+ } -+ -+ // The read consistency to use. -+ // Cannot be set when transaction is set. -+ // Lookup and ancestor queries default to STRONG, global queries default to -+ // EVENTUAL and cannot be set to STRONG. -+ optional ReadConsistency read_consistency = 1 [default=DEFAULT]; -+ -+ // The transaction to use. Optional. -+ optional bytes /* serialized Transaction */ transaction = 2; -+} -+ -+// The request for Lookup. -+message LookupRequest { -+ -+ // Options for this lookup request. Optional. -+ optional ReadOptions read_options = 1; -+ // Keys of entities to look up from the datastore. -+ repeated Key key = 3; -+} -+ -+// The response for Lookup. -+message LookupResponse { -+ -+ // The order of results in these fields is undefined and has no relation to -+ // the order of the keys in the input. -+ -+ // Entities found as ResultType.FULL entities. -+ repeated EntityResult found = 1; -+ -+ // Entities not found as ResultType.KEY_ONLY entities. -+ repeated EntityResult missing = 2; -+ -+ // A list of keys that were not looked up due to resource constraints. -+ repeated Key deferred = 3; -+} -+ -+ -+// The request for RunQuery. -+message RunQueryRequest { -+ -+ // The options for this query. -+ optional ReadOptions read_options = 1; -+ -+ // Entities are partitioned into subsets, identified by a dataset (usually -+ // implicitly specified by the project) and namespace ID. Queries are scoped -+ // to a single partition. -+ // This partition ID is normalized with the standard default context -+ // partition ID, but all other partition IDs in RunQueryRequest are -+ // normalized with this partition ID as the context partition ID. -+ optional PartitionId partition_id = 2; -+ -+ // The query to run. -+ // Either this field or field gql_query must be set, but not both. -+ optional Query query = 3; -+ // The GQL query to run. -+ // Either this field or field query must be set, but not both. -+ optional GqlQuery gql_query = 7; -+} -+ -+// The response for RunQuery. -+message RunQueryResponse { -+ -+ // A batch of query results (always present). -+ optional QueryResultBatch batch = 1; -+ -+} -+ -+// The request for BeginTransaction. -+message BeginTransactionRequest { -+ -+ enum IsolationLevel { -+ SNAPSHOT = 0; // Read from a consistent snapshot. Concurrent transactions -+ // conflict if their mutations conflict. For example: -+ // Read(A),Write(B) may not conflict with Read(B),Write(A), -+ // but Read(B),Write(B) does conflict with Read(B),Write(B). -+ SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent -+ // transactions conflict if they cannot be serialized. -+ // For example Read(A),Write(B) does conflict with -+ // Read(B),Write(A) but Read(A) may not conflict with -+ // Write(A). -+ } -+ -+ // The transaction isolation level. -+ optional IsolationLevel isolation_level = 1 [default=SNAPSHOT]; -+} -+ -+// The response for BeginTransaction. -+message BeginTransactionResponse { -+ -+ // The transaction identifier (always present). -+ optional bytes /* serialized Transaction */ transaction = 1; -+} -+ -+// The request for Rollback. -+message RollbackRequest { -+ -+ // The transaction identifier, returned by a call to -+ // beginTransaction. -+ required bytes /* serialized Transaction */ transaction = 1; -+} -+ -+// The response for Rollback. -+message RollbackResponse { -+// Empty -+} -+ -+// The request for Commit. -+message CommitRequest { -+ -+ enum Mode { -+ TRANSACTIONAL = 1; -+ NON_TRANSACTIONAL = 2; -+ } -+ -+ // The transaction identifier, returned by a call to -+ // beginTransaction. Must be set when mode is TRANSACTIONAL. -+ optional bytes /* serialized Transaction */ transaction = 1; -+ // The mutation to perform. Optional. -+ optional Mutation mutation = 2; -+ // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. -+ optional Mode mode = 5 [default=TRANSACTIONAL]; -+} -+ -+// The response for Commit. -+message CommitResponse { -+ -+ // The result of performing the mutation (if any). -+ optional MutationResult mutation_result = 1; -+} -+ -+// The request for AllocateIds. -+message AllocateIdsRequest { -+ -+ // A list of keys with incomplete key paths to allocate IDs for. -+ // No key may be reserved/read-only. -+ repeated Key key = 1; -+} -+ -+// The response for AllocateIds. -+message AllocateIdsResponse { -+ -+ // The keys specified in the request (in the same order), each with -+ // its key path completed with a newly allocated ID. -+ repeated Key key = 1; -+} -+ -+// Each rpc normalizes the partition IDs of the keys in its input entities, -+// and always returns entities with keys with normalized partition IDs. -+// (Note that applies to all entities, including entities in values.) -+service DatastoreService { -+ // Look up some entities by key. -+ rpc Lookup(LookupRequest) returns (LookupResponse) { -+ }; -+ // Query for entities. -+ rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) { -+ }; -+ // Begin a new transaction. -+ rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { -+ }; -+ // Commit a transaction, optionally creating, deleting or modifying some -+ // entities. -+ rpc Commit(CommitRequest) returns (CommitResponse) { -+ }; -+ // Roll back a transaction. -+ rpc Rollback(RollbackRequest) returns (RollbackResponse) { -+ }; -+ // Allocate IDs for incomplete keys (useful for referencing an entity before -+ // it is inserted). -+ rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) { -+ }; -+} -diff --git a/Godeps/_workspace/src/google.golang.org/cloud/inteernal/testutil/context.go b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/testutil/context.go -new file mode 100644 -index 0000000..aafd683 ---- /dev/null -+++ b/Godeps/_workspace/src/google.golang.org/cloud/inteernal/testutil/context.go -@@ -0,0 +1,57 @@ -+// Copyright 2014 Google Inc. All Rights Reserved. -+// -+// Licensed under the Apache License, Version 2.0 (the "License"); -+// you may not use this file except in compliance with the License. -+// You may obtain a copy of the License at -+// -+// http://www.apache.org/licenses/LICENSE-2.0 -+// -+// Unless required by applicable law or agreed to in writing, software -+// distributed under the License is distributed on an "AS IS" BASIS, -+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+// See the License for the specific language governing permissions and -+// limitations under the License. -+ -+// Package testutil contains helper functions for writing tests. -+package testutil -+ -+import ( -+ "io/ioutil" -+ "log" -+ "net/http" -+ "os" -+ -+ "golang.org/x/net/context" -+ "golang.org/x/oauth2" -+ "golang.org/x/oauth2/google" -+ "google.golang.org/cloud" -+) -+ -+const ( -+ envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" -+ envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" -+) -+ -+func Context(scopes ...string) context.Context { -+ key, projID := os.Getenv(envPrivateKey), os.Getenv(envProjID) -+ if key == "" || projID == "" { -+ log.Fatal("GCLOUD_TESTS_GOLANG_KEY and GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") -+ } -+ jsonKey, err := ioutil.ReadFile(key) -+ if err != nil { -+ log.Fatalf("Cannot read the JSON key file, err: %v", err) -+ } -+ conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) -+ if err != nil { -+ log.Fatal(err) -+ } -+ return cloud.NewContext(projID, conf.Client(oauth2.NoContext)) -+} -+ -+func NoAuthContext() context.Context { -+ projID := os.Getenv(envProjID) -+ if projID == "" { -+ log.Fatal("GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") -+ } -+ return cloud.NewContext(projID, &http.Client{Transport: http.DefaultTransport}) -+} -diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go -deleted file mode 100644 -index 984323c..0000000 ---- a/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go -+++ /dev/null -@@ -1,128 +0,0 @@ --// Copyright 2014 Google Inc. All Rights Reserved. --// --// Licensed under the Apache License, Version 2.0 (the "License"); --// you may not use this file except in compliance with the License. --// You may obtain a copy of the License at --// --// http://www.apache.org/licenses/LICENSE-2.0 --// --// Unless required by applicable law or agreed to in writing, software --// distributed under the License is distributed on an "AS IS" BASIS, --// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --// See the License for the specific language governing permissions and --// limitations under the License. -- --// Package internal provides support for the cloud packages. --// --// Users should not import this package directly. --package internal -- --import ( -- "fmt" -- "net/http" -- "sync" -- -- "golang.org/x/net/context" --) -- --type contextKey struct{} -- --func WithContext(parent context.Context, projID string, c *http.Client) context.Context { -- if c == nil { -- panic("nil *http.Client passed to WithContext") -- } -- if projID == "" { -- panic("empty project ID passed to WithContext") -- } -- return context.WithValue(parent, contextKey{}, &cloudContext{ -- ProjectID: projID, -- HTTPClient: c, -- }) --} -- --const userAgent = "gcloud-golang/0.1" -- --type cloudContext struct { -- ProjectID string -- HTTPClient *http.Client -- -- mu sync.Mutex // guards svc -- svc map[string]interface{} // e.g. "storage" => *rawStorage.Service --} -- --// Service returns the result of the fill function if it's never been --// called before for the given name (which is assumed to be an API --// service name, like "datastore"). If it has already been cached, the fill --// func is not run. --// It's safe for concurrent use by multiple goroutines. --func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { -- return cc(ctx).service(name, fill) --} -- --func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { -- c.mu.Lock() -- defer c.mu.Unlock() -- -- if c.svc == nil { -- c.svc = make(map[string]interface{}) -- } else if v, ok := c.svc[name]; ok { -- return v -- } -- v := fill(c.HTTPClient) -- c.svc[name] = v -- return v --} -- --// Transport is an http.RoundTripper that appends --// Google Cloud client's user-agent to the original --// request's user-agent header. --type Transport struct { -- // Base represents the actual http.RoundTripper -- // the requests will be delegated to. -- Base http.RoundTripper --} -- --// RoundTrip appends a user-agent to the existing user-agent --// header and delegates the request to the base http.RoundTripper. --func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { -- req = cloneRequest(req) -- ua := req.Header.Get("User-Agent") -- if ua == "" { -- ua = userAgent -- } else { -- ua = fmt.Sprintf("%s;%s", ua, userAgent) -- } -- req.Header.Set("User-Agent", ua) -- return t.Base.RoundTrip(req) --} -- --// cloneRequest returns a clone of the provided *http.Request. --// The clone is a shallow copy of the struct and its Header map. --func cloneRequest(r *http.Request) *http.Request { -- // shallow copy of the struct -- r2 := new(http.Request) -- *r2 = *r -- // deep copy of the Header -- r2.Header = make(http.Header) -- for k, s := range r.Header { -- r2.Header[k] = s -- } -- return r2 --} -- --func ProjID(ctx context.Context) string { -- return cc(ctx).ProjectID --} -- --func HTTPClient(ctx context.Context) *http.Client { -- return cc(ctx).HTTPClient --} -- --// cc returns the internal *cloudContext (cc) state for a context.Context. --// It panics if the user did it wrong. --func cc(ctx context.Context) *cloudContext { -- if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { -- return c -- } -- panic("invalid context.Context type; it should be created with cloud.NewContext") --} -diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go -deleted file mode 100644 -index be903e5..0000000 ---- a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go -+++ /dev/null -@@ -1,1633 +0,0 @@ --// Code generated by protoc-gen-go. --// source: datastore_v1.proto --// DO NOT EDIT! -- --/* --Package pb is a generated protocol buffer package. -- --It is generated from these files: -- datastore_v1.proto -- --It has these top-level messages: -- PartitionId -- Key -- Value -- Property -- Entity -- EntityResult -- Query -- KindExpression -- PropertyReference -- PropertyExpression -- PropertyOrder -- Filter -- CompositeFilter -- PropertyFilter -- GqlQuery -- GqlQueryArg -- QueryResultBatch -- Mutation -- MutationResult -- ReadOptions -- LookupRequest -- LookupResponse -- RunQueryRequest -- RunQueryResponse -- BeginTransactionRequest -- BeginTransactionResponse -- RollbackRequest -- RollbackResponse -- CommitRequest -- CommitResponse -- AllocateIdsRequest -- AllocateIdsResponse --*/ --package pb -- --import proto "github.com/golang/protobuf/proto" --import math "math" -- --// Reference imports to suppress errors if they are not otherwise used. --var _ = proto.Marshal --var _ = math.Inf -- --// Specifies what data the 'entity' field contains. --// A ResultType is either implied (for example, in LookupResponse.found it --// is always FULL) or specified by context (for example, in message --// QueryResultBatch, field 'entity_result_type' specifies a ResultType --// for all the values in field 'entity_result'). --type EntityResult_ResultType int32 -- --const ( -- EntityResult_FULL EntityResult_ResultType = 1 -- EntityResult_PROJECTION EntityResult_ResultType = 2 -- // The entity may have no key. -- // A property value may have meaning 18. -- EntityResult_KEY_ONLY EntityResult_ResultType = 3 --) -- --var EntityResult_ResultType_name = map[int32]string{ -- 1: "FULL", -- 2: "PROJECTION", -- 3: "KEY_ONLY", --} --var EntityResult_ResultType_value = map[string]int32{ -- "FULL": 1, -- "PROJECTION": 2, -- "KEY_ONLY": 3, --} -- --func (x EntityResult_ResultType) Enum() *EntityResult_ResultType { -- p := new(EntityResult_ResultType) -- *p = x -- return p --} --func (x EntityResult_ResultType) String() string { -- return proto.EnumName(EntityResult_ResultType_name, int32(x)) --} --func (x *EntityResult_ResultType) UnmarshalJSON(data []byte) error { -- value, err := proto.UnmarshalJSONEnum(EntityResult_ResultType_value, data, "EntityResult_ResultType") -- if err != nil { -- return err -- } -- *x = EntityResult_ResultType(value) -- return nil --} -- --type PropertyExpression_AggregationFunction int32 -- --const ( -- PropertyExpression_FIRST PropertyExpression_AggregationFunction = 1 --) -- --var PropertyExpression_AggregationFunction_name = map[int32]string{ -- 1: "FIRST", --} --var PropertyExpression_AggregationFunction_value = map[string]int32{ -- "FIRST": 1, --} -- --func (x PropertyExpression_AggregationFunction) Enum() *PropertyExpression_AggregationFunction { -- p := new(PropertyExpression_AggregationFunction) -- *p = x -- return p --} --func (x PropertyExpression_AggregationFunction) String() string { -- return proto.EnumName(PropertyExpression_AggregationFunction_name, int32(x)) --} --func (x *PropertyExpression_AggregationFunction) UnmarshalJSON(data []byte) error { -- value, err := proto.UnmarshalJSONEnum(PropertyExpression_AggregationFunction_value, data, "PropertyExpression_AggregationFunction") -- if err != nil { -- return err -- } -- *x = PropertyExpression_AggregationFunction(value) -- return nil --} -- --type PropertyOrder_Direction int32 -- --const ( -- PropertyOrder_ASCENDING PropertyOrder_Direction = 1 -- PropertyOrder_DESCENDING PropertyOrder_Direction = 2 --) -- --var PropertyOrder_Direction_name = map[int32]string{ -- 1: "ASCENDING", -- 2: "DESCENDING", --} --var PropertyOrder_Direction_value = map[string]int32{ -- "ASCENDING": 1, -- "DESCENDING": 2, --} -- --func (x PropertyOrder_Direction) Enum() *PropertyOrder_Direction { -- p := new(PropertyOrder_Direction) -- *p = x -- return p --} --func (x PropertyOrder_Direction) String() string { -- return proto.EnumName(PropertyOrder_Direction_name, int32(x)) --} --func (x *PropertyOrder_Direction) UnmarshalJSON(data []byte) error { -- value, err := proto.UnmarshalJSONEnum(PropertyOrder_Direction_value, data, "PropertyOrder_Direction") -- if err != nil { -- return err -- } -- *x = PropertyOrder_Direction(value) -- return nil --} -- --type CompositeFilter_Operator int32 -- --const ( -- CompositeFilter_AND CompositeFilter_Operator = 1 --) -- --var CompositeFilter_Operator_name = map[int32]string{ -- 1: "AND", --} --var CompositeFilter_Operator_value = map[string]int32{ -- "AND": 1, --} -- --func (x CompositeFilter_Operator) Enum() *CompositeFilter_Operator { -- p := new(CompositeFilter_Operator) -- *p = x -- return p --} --func (x CompositeFilter_Operator) String() string { -- return proto.EnumName(CompositeFilter_Operator_name, int32(x)) --} --func (x *CompositeFilter_Operator) UnmarshalJSON(data []byte) error { -- value, err := proto.UnmarshalJSONEnum(CompositeFilter_Operator_value, data, "CompositeFilter_Operator") -- if err != nil { -- return err -- } -- *x = CompositeFilter_Operator(value) -- return nil --} -- --type PropertyFilter_Operator int32 -- --const ( -- PropertyFilter_LESS_THAN PropertyFilter_Operator = 1 -- PropertyFilter_LESS_THAN_OR_EQUAL PropertyFilter_Operator = 2 -- PropertyFilter_GREATER_THAN PropertyFilter_Operator = 3 -- PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4 -- PropertyFilter_EQUAL PropertyFilter_Operator = 5 -- PropertyFilter_HAS_ANCESTOR PropertyFilter_Operator = 11 --) -- --var PropertyFilter_Operator_name = map[int32]string{ -- 1: "LESS_THAN", -- 2: "LESS_THAN_OR_EQUAL", -- 3: "GREATER_THAN", -- 4: "GREATER_THAN_OR_EQUAL", -- 5: "EQUAL", -- 11: "HAS_ANCESTOR", --} --var PropertyFilter_Operator_value = map[string]int32{ -- "LESS_THAN": 1, -- "LESS_THAN_OR_EQUAL": 2, -- "GREATER_THAN": 3, -- "GREATER_THAN_OR_EQUAL": 4, -- "EQUAL": 5, -- "HAS_ANCESTOR": 11, --} -- --func (x PropertyFilter_Operator) Enum() *PropertyFilter_Operator { -- p := new(PropertyFilter_Operator) -- *p = x -- return p --} --func (x PropertyFilter_Operator) String() string { -- return proto.EnumName(PropertyFilter_Operator_name, int32(x)) --} --func (x *PropertyFilter_Operator) UnmarshalJSON(data []byte) error { -- value, err := proto.UnmarshalJSONEnum(PropertyFilter_Operator_value, data, "PropertyFilter_Operator") -- if err != nil { -- return err -- } -- *x = PropertyFilter_Operator(value) -- return nil --} -- --// The possible values for the 'more_results' field. --type QueryResultBatch_MoreResultsType int32 -- --const ( -- QueryResultBatch_NOT_FINISHED QueryResultBatch_MoreResultsType = 1 -- QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2 -- // results after the limit. -- QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3 --) -- --var QueryResultBatch_MoreResultsType_name = map[int32]string{ -- 1: "NOT_FINISHED", -- 2: "MORE_RESULTS_AFTER_LIMIT", -- 3: "NO_MORE_RESULTS", --} --var QueryResultBatch_MoreResultsType_value = map[string]int32{ -- "NOT_FINISHED": 1, -- "MORE_RESULTS_AFTER_LIMIT": 2, -- "NO_MORE_RESULTS": 3, --} -- --func (x QueryResultBatch_MoreResultsType) Enum() *QueryResultBatch_MoreResultsType { -- p := new(QueryResultBatch_MoreResultsType) -- *p = x -- return p --} --func (x QueryResultBatch_MoreResultsType) String() string { -- return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x)) --} --func (x *QueryResultBatch_MoreResultsType) UnmarshalJSON(data []byte) error { -- value, err := proto.UnmarshalJSONEnum(QueryResultBatch_MoreResultsType_value, data, "QueryResultBatch_MoreResultsType") -- if err != nil { -- return err -- } -- *x = QueryResultBatch_MoreResultsType(value) -- return nil --} -- --type ReadOptions_ReadConsistency int32 -- --const ( -- ReadOptions_DEFAULT ReadOptions_ReadConsistency = 0 -- ReadOptions_STRONG ReadOptions_ReadConsistency = 1 -- ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2 --) -- --var ReadOptions_ReadConsistency_name = map[int32]string{ -- 0: "DEFAULT", -- 1: "STRONG", -- 2: "EVENTUAL", --} --var ReadOptions_ReadConsistency_value = map[string]int32{ -- "DEFAULT": 0, -- "STRONG": 1, -- "EVENTUAL": 2, --} -- --func (x ReadOptions_ReadConsistency) Enum() *ReadOptions_ReadConsistency { -- p := new(ReadOptions_ReadConsistency) -- *p = x -- return p --} --func (x ReadOptions_ReadConsistency) String() string { -- return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x)) --} --func (x *ReadOptions_ReadConsistency) UnmarshalJSON(data []byte) error { -- value, err := proto.UnmarshalJSONEnum(ReadOptions_ReadConsistency_value, data, "ReadOptions_ReadConsistency") -- if err != nil { -- return err -- } -- *x = ReadOptions_ReadConsistency(value) -- return nil --} -- --type BeginTransactionRequest_IsolationLevel int32 -- --const ( -- BeginTransactionRequest_SNAPSHOT BeginTransactionRequest_IsolationLevel = 0 -- // conflict if their mutations conflict. For example: -- // Read(A),Write(B) may not conflict with Read(B),Write(A), -- // but Read(B),Write(B) does conflict with Read(B),Write(B). -- BeginTransactionRequest_SERIALIZABLE BeginTransactionRequest_IsolationLevel = 1 --) -- --var BeginTransactionRequest_IsolationLevel_name = map[int32]string{ -- 0: "SNAPSHOT", -- 1: "SERIALIZABLE", --} --var BeginTransactionRequest_IsolationLevel_value = map[string]int32{ -- "SNAPSHOT": 0, -- "SERIALIZABLE": 1, --} -- --func (x BeginTransactionRequest_IsolationLevel) Enum() *BeginTransactionRequest_IsolationLevel { -- p := new(BeginTransactionRequest_IsolationLevel) -- *p = x -- return p --} --func (x BeginTransactionRequest_IsolationLevel) String() string { -- return proto.EnumName(BeginTransactionRequest_IsolationLevel_name, int32(x)) --} --func (x *BeginTransactionRequest_IsolationLevel) UnmarshalJSON(data []byte) error { -- value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_IsolationLevel_value, data, "BeginTransactionRequest_IsolationLevel") -- if err != nil { -- return err -- } -- *x = BeginTransactionRequest_IsolationLevel(value) -- return nil --} -- --type CommitRequest_Mode int32 -- --const ( -- CommitRequest_TRANSACTIONAL CommitRequest_Mode = 1 -- CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2 --) -- --var CommitRequest_Mode_name = map[int32]string{ -- 1: "TRANSACTIONAL", -- 2: "NON_TRANSACTIONAL", --} --var CommitRequest_Mode_value = map[string]int32{ -- "TRANSACTIONAL": 1, -- "NON_TRANSACTIONAL": 2, --} -- --func (x CommitRequest_Mode) Enum() *CommitRequest_Mode { -- p := new(CommitRequest_Mode) -- *p = x -- return p --} --func (x CommitRequest_Mode) String() string { -- return proto.EnumName(CommitRequest_Mode_name, int32(x)) --} --func (x *CommitRequest_Mode) UnmarshalJSON(data []byte) error { -- value, err := proto.UnmarshalJSONEnum(CommitRequest_Mode_value, data, "CommitRequest_Mode") -- if err != nil { -- return err -- } -- *x = CommitRequest_Mode(value) -- return nil --} -- --// An identifier for a particular subset of entities. --// --// Entities are partitioned into various subsets, each used by different --// datasets and different namespaces within a dataset and so forth. --// --// All input partition IDs are normalized before use. --// A partition ID is normalized as follows: --// If the partition ID is unset or is set to an empty partition ID, replace it --// with the context partition ID. --// Otherwise, if the partition ID has no dataset ID, assign it the context --// partition ID's dataset ID. --// Unless otherwise documented, the context partition ID has the dataset ID set --// to the context dataset ID and no other partition dimension set. --// --// A partition ID is empty if all of its fields are unset. --// --// Partition dimension: --// A dimension may be unset. --// A dimension's value must never be "". --// A dimension's value must match [A-Za-z\d\.\-_]{1,100} --// If the value of any dimension matches regex "__.*__", --// the partition is reserved/read-only. --// A reserved/read-only partition ID is forbidden in certain documented contexts. --// --// Dataset ID: --// A dataset id's value must never be "". --// A dataset id's value must match --// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} --type PartitionId struct { -- // The dataset ID. -- DatasetId *string `protobuf:"bytes,3,opt,name=dataset_id" json:"dataset_id,omitempty"` -- // The namespace. -- Namespace *string `protobuf:"bytes,4,opt,name=namespace" json:"namespace,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *PartitionId) Reset() { *m = PartitionId{} } --func (m *PartitionId) String() string { return proto.CompactTextString(m) } --func (*PartitionId) ProtoMessage() {} -- --func (m *PartitionId) GetDatasetId() string { -- if m != nil && m.DatasetId != nil { -- return *m.DatasetId -- } -- return "" --} -- --func (m *PartitionId) GetNamespace() string { -- if m != nil && m.Namespace != nil { -- return *m.Namespace -- } -- return "" --} -- --// A unique identifier for an entity. --// If a key's partition id or any of its path kinds or names are --// reserved/read-only, the key is reserved/read-only. --// A reserved/read-only key is forbidden in certain documented contexts. --type Key struct { -- // Entities are partitioned into subsets, currently identified by a dataset -- // (usually implicitly specified by the project) and namespace ID. -- // Queries are scoped to a single partition. -- PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id" json:"partition_id,omitempty"` -- // The entity path. -- // An entity path consists of one or more elements composed of a kind and a -- // string or numerical identifier, which identify entities. The first -- // element identifies a root entity, the second element identifies -- // a child of the root entity, the third element a child of the -- // second entity, and so forth. The entities identified by all prefixes of -- // the path are called the element's ancestors. -- // An entity path is always fully complete: ALL of the entity's ancestors -- // are required to be in the path along with the entity identifier itself. -- // The only exception is that in some documented cases, the identifier in the -- // last path element (for the entity) itself may be omitted. A path can never -- // be empty. -- PathElement []*Key_PathElement `protobuf:"bytes,2,rep,name=path_element" json:"path_element,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *Key) Reset() { *m = Key{} } --func (m *Key) String() string { return proto.CompactTextString(m) } --func (*Key) ProtoMessage() {} -- --func (m *Key) GetPartitionId() *PartitionId { -- if m != nil { -- return m.PartitionId -- } -- return nil --} -- --func (m *Key) GetPathElement() []*Key_PathElement { -- if m != nil { -- return m.PathElement -- } -- return nil --} -- --// A (kind, ID/name) pair used to construct a key path. --// --// At most one of name or ID may be set. --// If either is set, the element is complete. --// If neither is set, the element is incomplete. --type Key_PathElement struct { -- // The kind of the entity. -- // A kind matching regex "__.*__" is reserved/read-only. -- // A kind must not contain more than 500 characters. -- // Cannot be "". -- Kind *string `protobuf:"bytes,1,req,name=kind" json:"kind,omitempty"` -- // The ID of the entity. -- // Never equal to zero. Values less than zero are discouraged and will not -- // be supported in the future. -- Id *int64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` -- // The name of the entity. -- // A name matching regex "__.*__" is reserved/read-only. -- // A name must not be more than 500 characters. -- // Cannot be "". -- Name *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } --func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } --func (*Key_PathElement) ProtoMessage() {} -- --func (m *Key_PathElement) GetKind() string { -- if m != nil && m.Kind != nil { -- return *m.Kind -- } -- return "" --} -- --func (m *Key_PathElement) GetId() int64 { -- if m != nil && m.Id != nil { -- return *m.Id -- } -- return 0 --} -- --func (m *Key_PathElement) GetName() string { -- if m != nil && m.Name != nil { -- return *m.Name -- } -- return "" --} -- --// A message that can hold any of the supported value types and associated --// metadata. --// --// At most one of the Value fields may be set. --// If none are set the value is "null". --// --type Value struct { -- // A boolean value. -- BooleanValue *bool `protobuf:"varint,1,opt,name=boolean_value" json:"boolean_value,omitempty"` -- // An integer value. -- IntegerValue *int64 `protobuf:"varint,2,opt,name=integer_value" json:"integer_value,omitempty"` -- // A double value. -- DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"` -- // A timestamp value. -- TimestampMicrosecondsValue *int64 `protobuf:"varint,4,opt,name=timestamp_microseconds_value" json:"timestamp_microseconds_value,omitempty"` -- // A key value. -- KeyValue *Key `protobuf:"bytes,5,opt,name=key_value" json:"key_value,omitempty"` -- // A blob key value. -- BlobKeyValue *string `protobuf:"bytes,16,opt,name=blob_key_value" json:"blob_key_value,omitempty"` -- // A UTF-8 encoded string value. -- StringValue *string `protobuf:"bytes,17,opt,name=string_value" json:"string_value,omitempty"` -- // A blob value. -- BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value" json:"blob_value,omitempty"` -- // An entity value. -- // May have no key. -- // May have a key with an incomplete key path. -- // May have a reserved/read-only key. -- EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value" json:"entity_value,omitempty"` -- // A list value. -- // Cannot contain another list value. -- // Cannot also have a meaning and indexing set. -- ListValue []*Value `protobuf:"bytes,7,rep,name=list_value" json:"list_value,omitempty"` -- // The meaning field is reserved and should not be used. -- Meaning *int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"` -- // If the value should be indexed. -- // -- // The indexed property may be set for a -- // null value. -- // When indexed is true, stringValue -- // is limited to 500 characters and the blob value is limited to 500 bytes. -- // Exception: If meaning is set to 2, string_value is limited to 2038 -- // characters regardless of indexed. -- // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 -- // will be ignored on input (and will never be set on output). -- // Input values by default have indexed set to -- // true; however, you can explicitly set indexed to -- // true if you want. (An output value never has -- // indexed explicitly set to true.) If a value is -- // itself an entity, it cannot have indexed set to -- // true. -- // Exception: An entity value with meaning 9, 20 or 21 may be indexed. -- Indexed *bool `protobuf:"varint,15,opt,name=indexed,def=1" json:"indexed,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *Value) Reset() { *m = Value{} } --func (m *Value) String() string { return proto.CompactTextString(m) } --func (*Value) ProtoMessage() {} -- --const Default_Value_Indexed bool = true -- --func (m *Value) GetBooleanValue() bool { -- if m != nil && m.BooleanValue != nil { -- return *m.BooleanValue -- } -- return false --} -- --func (m *Value) GetIntegerValue() int64 { -- if m != nil && m.IntegerValue != nil { -- return *m.IntegerValue -- } -- return 0 --} -- --func (m *Value) GetDoubleValue() float64 { -- if m != nil && m.DoubleValue != nil { -- return *m.DoubleValue -- } -- return 0 --} -- --func (m *Value) GetTimestampMicrosecondsValue() int64 { -- if m != nil && m.TimestampMicrosecondsValue != nil { -- return *m.TimestampMicrosecondsValue -- } -- return 0 --} -- --func (m *Value) GetKeyValue() *Key { -- if m != nil { -- return m.KeyValue -- } -- return nil --} -- --func (m *Value) GetBlobKeyValue() string { -- if m != nil && m.BlobKeyValue != nil { -- return *m.BlobKeyValue -- } -- return "" --} -- --func (m *Value) GetStringValue() string { -- if m != nil && m.StringValue != nil { -- return *m.StringValue -- } -- return "" --} -- --func (m *Value) GetBlobValue() []byte { -- if m != nil { -- return m.BlobValue -- } -- return nil --} -- --func (m *Value) GetEntityValue() *Entity { -- if m != nil { -- return m.EntityValue -- } -- return nil --} -- --func (m *Value) GetListValue() []*Value { -- if m != nil { -- return m.ListValue -- } -- return nil --} -- --func (m *Value) GetMeaning() int32 { -- if m != nil && m.Meaning != nil { -- return *m.Meaning -- } -- return 0 --} -- --func (m *Value) GetIndexed() bool { -- if m != nil && m.Indexed != nil { -- return *m.Indexed -- } -- return Default_Value_Indexed --} -- --// An entity property. --type Property struct { -- // The name of the property. -- // A property name matching regex "__.*__" is reserved. -- // A reserved property name is forbidden in certain documented contexts. -- // The name must not contain more than 500 characters. -- // Cannot be "". -- Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` -- // The value(s) of the property. -- // Each value can have only one value property populated. For example, -- // you cannot have a values list of { value: { integerValue: 22, -- // stringValue: "a" } }, but you can have { value: { listValue: -- // [ { integerValue: 22 }, { stringValue: "a" } ] }. -- Value *Value `protobuf:"bytes,4,req,name=value" json:"value,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *Property) Reset() { *m = Property{} } --func (m *Property) String() string { return proto.CompactTextString(m) } --func (*Property) ProtoMessage() {} -- --func (m *Property) GetName() string { -- if m != nil && m.Name != nil { -- return *m.Name -- } -- return "" --} -- --func (m *Property) GetValue() *Value { -- if m != nil { -- return m.Value -- } -- return nil --} -- --// An entity. --// --// An entity is limited to 1 megabyte when stored. That roughly --// corresponds to a limit of 1 megabyte for the serialized form of this --// message. --type Entity struct { -- // The entity's key. -- // -- // An entity must have a key, unless otherwise documented (for example, -- // an entity in Value.entityValue may have no key). -- // An entity's kind is its key's path's last element's kind, -- // or null if it has no key. -- Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` -- // The entity's properties. -- // Each property's name must be unique for its entity. -- Property []*Property `protobuf:"bytes,2,rep,name=property" json:"property,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *Entity) Reset() { *m = Entity{} } --func (m *Entity) String() string { return proto.CompactTextString(m) } --func (*Entity) ProtoMessage() {} -- --func (m *Entity) GetKey() *Key { -- if m != nil { -- return m.Key -- } -- return nil --} -- --func (m *Entity) GetProperty() []*Property { -- if m != nil { -- return m.Property -- } -- return nil --} -- --// The result of fetching an entity from the datastore. --type EntityResult struct { -- // The resulting entity. -- Entity *Entity `protobuf:"bytes,1,req,name=entity" json:"entity,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *EntityResult) Reset() { *m = EntityResult{} } --func (m *EntityResult) String() string { return proto.CompactTextString(m) } --func (*EntityResult) ProtoMessage() {} -- --func (m *EntityResult) GetEntity() *Entity { -- if m != nil { -- return m.Entity -- } -- return nil --} -- --// A query. --type Query struct { -- // The projection to return. If not set the entire entity is returned. -- Projection []*PropertyExpression `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"` -- // The kinds to query (if empty, returns entities from all kinds). -- Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"` -- // The filter to apply (optional). -- Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` -- // The order to apply to the query results (if empty, order is unspecified). -- Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"` -- // The properties to group by (if empty, no grouping is applied to the -- // result set). -- GroupBy []*PropertyReference `protobuf:"bytes,6,rep,name=group_by" json:"group_by,omitempty"` -- // A starting point for the query results. Optional. Query cursors are -- // returned in query result batches. -- StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor" json:"start_cursor,omitempty"` -- // An ending point for the query results. Optional. Query cursors are -- // returned in query result batches. -- EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor" json:"end_cursor,omitempty"` -- // The number of results to skip. Applies before limit, but after all other -- // constraints (optional, defaults to 0). -- Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` -- // The maximum number of results to return. Applies after all other -- // constraints. Optional. -- Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *Query) Reset() { *m = Query{} } --func (m *Query) String() string { return proto.CompactTextString(m) } --func (*Query) ProtoMessage() {} -- --const Default_Query_Offset int32 = 0 -- --func (m *Query) GetProjection() []*PropertyExpression { -- if m != nil { -- return m.Projection -- } -- return nil --} -- --func (m *Query) GetKind() []*KindExpression { -- if m != nil { -- return m.Kind -- } -- return nil --} -- --func (m *Query) GetFilter() *Filter { -- if m != nil { -- return m.Filter -- } -- return nil --} -- --func (m *Query) GetOrder() []*PropertyOrder { -- if m != nil { -- return m.Order -- } -- return nil --} -- --func (m *Query) GetGroupBy() []*PropertyReference { -- if m != nil { -- return m.GroupBy -- } -- return nil --} -- --func (m *Query) GetStartCursor() []byte { -- if m != nil { -- return m.StartCursor -- } -- return nil --} -- --func (m *Query) GetEndCursor() []byte { -- if m != nil { -- return m.EndCursor -- } -- return nil --} -- --func (m *Query) GetOffset() int32 { -- if m != nil && m.Offset != nil { -- return *m.Offset -- } -- return Default_Query_Offset --} -- --func (m *Query) GetLimit() int32 { -- if m != nil && m.Limit != nil { -- return *m.Limit -- } -- return 0 --} -- --// A representation of a kind. --type KindExpression struct { -- // The name of the kind. -- Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *KindExpression) Reset() { *m = KindExpression{} } --func (m *KindExpression) String() string { return proto.CompactTextString(m) } --func (*KindExpression) ProtoMessage() {} -- --func (m *KindExpression) GetName() string { -- if m != nil && m.Name != nil { -- return *m.Name -- } -- return "" --} -- --// A reference to a property relative to the kind expressions. --// exactly. --type PropertyReference struct { -- // The name of the property. -- Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *PropertyReference) Reset() { *m = PropertyReference{} } --func (m *PropertyReference) String() string { return proto.CompactTextString(m) } --func (*PropertyReference) ProtoMessage() {} -- --func (m *PropertyReference) GetName() string { -- if m != nil && m.Name != nil { -- return *m.Name -- } -- return "" --} -- --// A representation of a property in a projection. --type PropertyExpression struct { -- // The property to project. -- Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` -- // The aggregation function to apply to the property. Optional. -- // Can only be used when grouping by at least one property. Must -- // then be set on all properties in the projection that are not -- // being grouped by. -- AggregationFunction *PropertyExpression_AggregationFunction `protobuf:"varint,2,opt,name=aggregation_function,enum=pb.PropertyExpression_AggregationFunction" json:"aggregation_function,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *PropertyExpression) Reset() { *m = PropertyExpression{} } --func (m *PropertyExpression) String() string { return proto.CompactTextString(m) } --func (*PropertyExpression) ProtoMessage() {} -- --func (m *PropertyExpression) GetProperty() *PropertyReference { -- if m != nil { -- return m.Property -- } -- return nil --} -- --func (m *PropertyExpression) GetAggregationFunction() PropertyExpression_AggregationFunction { -- if m != nil && m.AggregationFunction != nil { -- return *m.AggregationFunction -- } -- return PropertyExpression_FIRST --} -- --// The desired order for a specific property. --type PropertyOrder struct { -- // The property to order by. -- Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` -- // The direction to order by. -- Direction *PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=pb.PropertyOrder_Direction,def=1" json:"direction,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *PropertyOrder) Reset() { *m = PropertyOrder{} } --func (m *PropertyOrder) String() string { return proto.CompactTextString(m) } --func (*PropertyOrder) ProtoMessage() {} -- --const Default_PropertyOrder_Direction PropertyOrder_Direction = PropertyOrder_ASCENDING -- --func (m *PropertyOrder) GetProperty() *PropertyReference { -- if m != nil { -- return m.Property -- } -- return nil --} -- --func (m *PropertyOrder) GetDirection() PropertyOrder_Direction { -- if m != nil && m.Direction != nil { -- return *m.Direction -- } -- return Default_PropertyOrder_Direction --} -- --// A holder for any type of filter. Exactly one field should be specified. --type Filter struct { -- // A composite filter. -- CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter" json:"composite_filter,omitempty"` -- // A filter on a property. -- PropertyFilter *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter" json:"property_filter,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *Filter) Reset() { *m = Filter{} } --func (m *Filter) String() string { return proto.CompactTextString(m) } --func (*Filter) ProtoMessage() {} -- --func (m *Filter) GetCompositeFilter() *CompositeFilter { -- if m != nil { -- return m.CompositeFilter -- } -- return nil --} -- --func (m *Filter) GetPropertyFilter() *PropertyFilter { -- if m != nil { -- return m.PropertyFilter -- } -- return nil --} -- --// A filter that merges the multiple other filters using the given operation. --type CompositeFilter struct { -- // The operator for combining multiple filters. -- Operator *CompositeFilter_Operator `protobuf:"varint,1,req,name=operator,enum=pb.CompositeFilter_Operator" json:"operator,omitempty"` -- // The list of filters to combine. -- // Must contain at least one filter. -- Filter []*Filter `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *CompositeFilter) Reset() { *m = CompositeFilter{} } --func (m *CompositeFilter) String() string { return proto.CompactTextString(m) } --func (*CompositeFilter) ProtoMessage() {} -- --func (m *CompositeFilter) GetOperator() CompositeFilter_Operator { -- if m != nil && m.Operator != nil { -- return *m.Operator -- } -- return CompositeFilter_AND --} -- --func (m *CompositeFilter) GetFilter() []*Filter { -- if m != nil { -- return m.Filter -- } -- return nil --} -- --// A filter on a specific property. --type PropertyFilter struct { -- // The property to filter by. -- Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"` -- // The operator to filter by. -- Operator *PropertyFilter_Operator `protobuf:"varint,2,req,name=operator,enum=pb.PropertyFilter_Operator" json:"operator,omitempty"` -- // The value to compare the property to. -- Value *Value `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *PropertyFilter) Reset() { *m = PropertyFilter{} } --func (m *PropertyFilter) String() string { return proto.CompactTextString(m) } --func (*PropertyFilter) ProtoMessage() {} -- --func (m *PropertyFilter) GetProperty() *PropertyReference { -- if m != nil { -- return m.Property -- } -- return nil --} -- --func (m *PropertyFilter) GetOperator() PropertyFilter_Operator { -- if m != nil && m.Operator != nil { -- return *m.Operator -- } -- return PropertyFilter_LESS_THAN --} -- --func (m *PropertyFilter) GetValue() *Value { -- if m != nil { -- return m.Value -- } -- return nil --} -- --// A GQL query. --type GqlQuery struct { -- QueryString *string `protobuf:"bytes,1,req,name=query_string" json:"query_string,omitempty"` -- // When false, the query string must not contain a literal. -- AllowLiteral *bool `protobuf:"varint,2,opt,name=allow_literal,def=0" json:"allow_literal,omitempty"` -- // A named argument must set field GqlQueryArg.name. -- // No two named arguments may have the same name. -- // For each non-reserved named binding site in the query string, -- // there must be a named argument with that name, -- // but not necessarily the inverse. -- NameArg []*GqlQueryArg `protobuf:"bytes,3,rep,name=name_arg" json:"name_arg,omitempty"` -- // Numbered binding site @1 references the first numbered argument, -- // effectively using 1-based indexing, rather than the usual 0. -- // A numbered argument must NOT set field GqlQueryArg.name. -- // For each binding site numbered i in query_string, -- // there must be an ith numbered argument. -- // The inverse must also be true. -- NumberArg []*GqlQueryArg `protobuf:"bytes,4,rep,name=number_arg" json:"number_arg,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *GqlQuery) Reset() { *m = GqlQuery{} } --func (m *GqlQuery) String() string { return proto.CompactTextString(m) } --func (*GqlQuery) ProtoMessage() {} -- --const Default_GqlQuery_AllowLiteral bool = false -- --func (m *GqlQuery) GetQueryString() string { -- if m != nil && m.QueryString != nil { -- return *m.QueryString -- } -- return "" --} -- --func (m *GqlQuery) GetAllowLiteral() bool { -- if m != nil && m.AllowLiteral != nil { -- return *m.AllowLiteral -- } -- return Default_GqlQuery_AllowLiteral --} -- --func (m *GqlQuery) GetNameArg() []*GqlQueryArg { -- if m != nil { -- return m.NameArg -- } -- return nil --} -- --func (m *GqlQuery) GetNumberArg() []*GqlQueryArg { -- if m != nil { -- return m.NumberArg -- } -- return nil --} -- --// A binding argument for a GQL query. --// Exactly one of fields value and cursor must be set. --type GqlQueryArg struct { -- // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". -- // Must not match regex "__.*__". -- // Must not be "". -- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -- Value *Value `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -- Cursor []byte `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *GqlQueryArg) Reset() { *m = GqlQueryArg{} } --func (m *GqlQueryArg) String() string { return proto.CompactTextString(m) } --func (*GqlQueryArg) ProtoMessage() {} -- --func (m *GqlQueryArg) GetName() string { -- if m != nil && m.Name != nil { -- return *m.Name -- } -- return "" --} -- --func (m *GqlQueryArg) GetValue() *Value { -- if m != nil { -- return m.Value -- } -- return nil --} -- --func (m *GqlQueryArg) GetCursor() []byte { -- if m != nil { -- return m.Cursor -- } -- return nil --} -- --// A batch of results produced by a query. --type QueryResultBatch struct { -- // The result type for every entity in entityResults. -- EntityResultType *EntityResult_ResultType `protobuf:"varint,1,req,name=entity_result_type,enum=pb.EntityResult_ResultType" json:"entity_result_type,omitempty"` -- // The results for this batch. -- EntityResult []*EntityResult `protobuf:"bytes,2,rep,name=entity_result" json:"entity_result,omitempty"` -- // A cursor that points to the position after the last result in the batch. -- // May be absent. -- EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor" json:"end_cursor,omitempty"` -- // The state of the query after the current batch. -- MoreResults *QueryResultBatch_MoreResultsType `protobuf:"varint,5,req,name=more_results,enum=pb.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"` -- // The number of results skipped because of Query.offset. -- SkippedResults *int32 `protobuf:"varint,6,opt,name=skipped_results" json:"skipped_results,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *QueryResultBatch) Reset() { *m = QueryResultBatch{} } --func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) } --func (*QueryResultBatch) ProtoMessage() {} -- --func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType { -- if m != nil && m.EntityResultType != nil { -- return *m.EntityResultType -- } -- return EntityResult_FULL --} -- --func (m *QueryResultBatch) GetEntityResult() []*EntityResult { -- if m != nil { -- return m.EntityResult -- } -- return nil --} -- --func (m *QueryResultBatch) GetEndCursor() []byte { -- if m != nil { -- return m.EndCursor -- } -- return nil --} -- --func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType { -- if m != nil && m.MoreResults != nil { -- return *m.MoreResults -- } -- return QueryResultBatch_NOT_FINISHED --} -- --func (m *QueryResultBatch) GetSkippedResults() int32 { -- if m != nil && m.SkippedResults != nil { -- return *m.SkippedResults -- } -- return 0 --} -- --// A set of changes to apply. --// --// No entity in this message may have a reserved property name, --// not even a property in an entity in a value. --// No value in this message may have meaning 18, --// not even a value in an entity in another value. --// --// If entities with duplicate keys are present, an arbitrary choice will --// be made as to which is written. --type Mutation struct { -- // Entities to upsert. -- // Each upserted entity's key must have a complete path and -- // must not be reserved/read-only. -- Upsert []*Entity `protobuf:"bytes,1,rep,name=upsert" json:"upsert,omitempty"` -- // Entities to update. -- // Each updated entity's key must have a complete path and -- // must not be reserved/read-only. -- Update []*Entity `protobuf:"bytes,2,rep,name=update" json:"update,omitempty"` -- // Entities to insert. -- // Each inserted entity's key must have a complete path and -- // must not be reserved/read-only. -- Insert []*Entity `protobuf:"bytes,3,rep,name=insert" json:"insert,omitempty"` -- // Insert entities with a newly allocated ID. -- // Each inserted entity's key must omit the final identifier in its path and -- // must not be reserved/read-only. -- InsertAutoId []*Entity `protobuf:"bytes,4,rep,name=insert_auto_id" json:"insert_auto_id,omitempty"` -- // Keys of entities to delete. -- // Each key must have a complete key path and must not be reserved/read-only. -- Delete []*Key `protobuf:"bytes,5,rep,name=delete" json:"delete,omitempty"` -- // Ignore a user specified read-only period. Optional. -- Force *bool `protobuf:"varint,6,opt,name=force" json:"force,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *Mutation) Reset() { *m = Mutation{} } --func (m *Mutation) String() string { return proto.CompactTextString(m) } --func (*Mutation) ProtoMessage() {} -- --func (m *Mutation) GetUpsert() []*Entity { -- if m != nil { -- return m.Upsert -- } -- return nil --} -- --func (m *Mutation) GetUpdate() []*Entity { -- if m != nil { -- return m.Update -- } -- return nil --} -- --func (m *Mutation) GetInsert() []*Entity { -- if m != nil { -- return m.Insert -- } -- return nil --} -- --func (m *Mutation) GetInsertAutoId() []*Entity { -- if m != nil { -- return m.InsertAutoId -- } -- return nil --} -- --func (m *Mutation) GetDelete() []*Key { -- if m != nil { -- return m.Delete -- } -- return nil --} -- --func (m *Mutation) GetForce() bool { -- if m != nil && m.Force != nil { -- return *m.Force -- } -- return false --} -- --// The result of applying a mutation. --type MutationResult struct { -- // Number of index writes. -- IndexUpdates *int32 `protobuf:"varint,1,req,name=index_updates" json:"index_updates,omitempty"` -- // Keys for insertAutoId entities. One per entity from the -- // request, in the same order. -- InsertAutoIdKey []*Key `protobuf:"bytes,2,rep,name=insert_auto_id_key" json:"insert_auto_id_key,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *MutationResult) Reset() { *m = MutationResult{} } --func (m *MutationResult) String() string { return proto.CompactTextString(m) } --func (*MutationResult) ProtoMessage() {} -- --func (m *MutationResult) GetIndexUpdates() int32 { -- if m != nil && m.IndexUpdates != nil { -- return *m.IndexUpdates -- } -- return 0 --} -- --func (m *MutationResult) GetInsertAutoIdKey() []*Key { -- if m != nil { -- return m.InsertAutoIdKey -- } -- return nil --} -- --// Options shared by read requests. --type ReadOptions struct { -- // The read consistency to use. -- // Cannot be set when transaction is set. -- // Lookup and ancestor queries default to STRONG, global queries default to -- // EVENTUAL and cannot be set to STRONG. -- ReadConsistency *ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,enum=pb.ReadOptions_ReadConsistency,def=0" json:"read_consistency,omitempty"` -- // The transaction to use. Optional. -- Transaction []byte `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *ReadOptions) Reset() { *m = ReadOptions{} } --func (m *ReadOptions) String() string { return proto.CompactTextString(m) } --func (*ReadOptions) ProtoMessage() {} -- --const Default_ReadOptions_ReadConsistency ReadOptions_ReadConsistency = ReadOptions_DEFAULT -- --func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency { -- if m != nil && m.ReadConsistency != nil { -- return *m.ReadConsistency -- } -- return Default_ReadOptions_ReadConsistency --} -- --func (m *ReadOptions) GetTransaction() []byte { -- if m != nil { -- return m.Transaction -- } -- return nil --} -- --// The request for Lookup. --type LookupRequest struct { -- // Options for this lookup request. Optional. -- ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` -- // Keys of entities to look up from the datastore. -- Key []*Key `protobuf:"bytes,3,rep,name=key" json:"key,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *LookupRequest) Reset() { *m = LookupRequest{} } --func (m *LookupRequest) String() string { return proto.CompactTextString(m) } --func (*LookupRequest) ProtoMessage() {} -- --func (m *LookupRequest) GetReadOptions() *ReadOptions { -- if m != nil { -- return m.ReadOptions -- } -- return nil --} -- --func (m *LookupRequest) GetKey() []*Key { -- if m != nil { -- return m.Key -- } -- return nil --} -- --// The response for Lookup. --type LookupResponse struct { -- // Entities found as ResultType.FULL entities. -- Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` -- // Entities not found as ResultType.KEY_ONLY entities. -- Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"` -- // A list of keys that were not looked up due to resource constraints. -- Deferred []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *LookupResponse) Reset() { *m = LookupResponse{} } --func (m *LookupResponse) String() string { return proto.CompactTextString(m) } --func (*LookupResponse) ProtoMessage() {} -- --func (m *LookupResponse) GetFound() []*EntityResult { -- if m != nil { -- return m.Found -- } -- return nil --} -- --func (m *LookupResponse) GetMissing() []*EntityResult { -- if m != nil { -- return m.Missing -- } -- return nil --} -- --func (m *LookupResponse) GetDeferred() []*Key { -- if m != nil { -- return m.Deferred -- } -- return nil --} -- --// The request for RunQuery. --type RunQueryRequest struct { -- // The options for this query. -- ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"` -- // Entities are partitioned into subsets, identified by a dataset (usually -- // implicitly specified by the project) and namespace ID. Queries are scoped -- // to a single partition. -- // This partition ID is normalized with the standard default context -- // partition ID, but all other partition IDs in RunQueryRequest are -- // normalized with this partition ID as the context partition ID. -- PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id" json:"partition_id,omitempty"` -- // The query to run. -- // Either this field or field gql_query must be set, but not both. -- Query *Query `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"` -- // The GQL query to run. -- // Either this field or field query must be set, but not both. -- GqlQuery *GqlQuery `protobuf:"bytes,7,opt,name=gql_query" json:"gql_query,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} } --func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) } --func (*RunQueryRequest) ProtoMessage() {} -- --func (m *RunQueryRequest) GetReadOptions() *ReadOptions { -- if m != nil { -- return m.ReadOptions -- } -- return nil --} -- --func (m *RunQueryRequest) GetPartitionId() *PartitionId { -- if m != nil { -- return m.PartitionId -- } -- return nil --} -- --func (m *RunQueryRequest) GetQuery() *Query { -- if m != nil { -- return m.Query -- } -- return nil --} -- --func (m *RunQueryRequest) GetGqlQuery() *GqlQuery { -- if m != nil { -- return m.GqlQuery -- } -- return nil --} -- --// The response for RunQuery. --type RunQueryResponse struct { -- // A batch of query results (always present). -- Batch *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} } --func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) } --func (*RunQueryResponse) ProtoMessage() {} -- --func (m *RunQueryResponse) GetBatch() *QueryResultBatch { -- if m != nil { -- return m.Batch -- } -- return nil --} -- --// The request for BeginTransaction. --type BeginTransactionRequest struct { -- // The transaction isolation level. -- IsolationLevel *BeginTransactionRequest_IsolationLevel `protobuf:"varint,1,opt,name=isolation_level,enum=pb.BeginTransactionRequest_IsolationLevel,def=0" json:"isolation_level,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } --func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } --func (*BeginTransactionRequest) ProtoMessage() {} -- --const Default_BeginTransactionRequest_IsolationLevel BeginTransactionRequest_IsolationLevel = BeginTransactionRequest_SNAPSHOT -- --func (m *BeginTransactionRequest) GetIsolationLevel() BeginTransactionRequest_IsolationLevel { -- if m != nil && m.IsolationLevel != nil { -- return *m.IsolationLevel -- } -- return Default_BeginTransactionRequest_IsolationLevel --} -- --// The response for BeginTransaction. --type BeginTransactionResponse struct { -- // The transaction identifier (always present). -- Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } --func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } --func (*BeginTransactionResponse) ProtoMessage() {} -- --func (m *BeginTransactionResponse) GetTransaction() []byte { -- if m != nil { -- return m.Transaction -- } -- return nil --} -- --// The request for Rollback. --type RollbackRequest struct { -- // The transaction identifier, returned by a call to -- // beginTransaction. -- Transaction []byte `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } --func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } --func (*RollbackRequest) ProtoMessage() {} -- --func (m *RollbackRequest) GetTransaction() []byte { -- if m != nil { -- return m.Transaction -- } -- return nil --} -- --// The response for Rollback. --type RollbackResponse struct { -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } --func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } --func (*RollbackResponse) ProtoMessage() {} -- --// The request for Commit. --type CommitRequest struct { -- // The transaction identifier, returned by a call to -- // beginTransaction. Must be set when mode is TRANSACTIONAL. -- Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"` -- // The mutation to perform. Optional. -- Mutation *Mutation `protobuf:"bytes,2,opt,name=mutation" json:"mutation,omitempty"` -- // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. -- Mode *CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=pb.CommitRequest_Mode,def=1" json:"mode,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *CommitRequest) Reset() { *m = CommitRequest{} } --func (m *CommitRequest) String() string { return proto.CompactTextString(m) } --func (*CommitRequest) ProtoMessage() {} -- --const Default_CommitRequest_Mode CommitRequest_Mode = CommitRequest_TRANSACTIONAL -- --func (m *CommitRequest) GetTransaction() []byte { -- if m != nil { -- return m.Transaction -- } -- return nil --} -- --func (m *CommitRequest) GetMutation() *Mutation { -- if m != nil { -- return m.Mutation -- } -- return nil --} -- --func (m *CommitRequest) GetMode() CommitRequest_Mode { -- if m != nil && m.Mode != nil { -- return *m.Mode -- } -- return Default_CommitRequest_Mode --} -- --// The response for Commit. --type CommitResponse struct { -- // The result of performing the mutation (if any). -- MutationResult *MutationResult `protobuf:"bytes,1,opt,name=mutation_result" json:"mutation_result,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *CommitResponse) Reset() { *m = CommitResponse{} } --func (m *CommitResponse) String() string { return proto.CompactTextString(m) } --func (*CommitResponse) ProtoMessage() {} -- --func (m *CommitResponse) GetMutationResult() *MutationResult { -- if m != nil { -- return m.MutationResult -- } -- return nil --} -- --// The request for AllocateIds. --type AllocateIdsRequest struct { -- // A list of keys with incomplete key paths to allocate IDs for. -- // No key may be reserved/read-only. -- Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } --func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } --func (*AllocateIdsRequest) ProtoMessage() {} -- --func (m *AllocateIdsRequest) GetKey() []*Key { -- if m != nil { -- return m.Key -- } -- return nil --} -- --// The response for AllocateIds. --type AllocateIdsResponse struct { -- // The keys specified in the request (in the same order), each with -- // its key path completed with a newly allocated ID. -- Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` -- XXX_unrecognized []byte `json:"-"` --} -- --func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } --func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } --func (*AllocateIdsResponse) ProtoMessage() {} -- --func (m *AllocateIdsResponse) GetKey() []*Key { -- if m != nil { -- return m.Key -- } -- return nil --} -- --func init() { -- proto.RegisterEnum("pb.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value) -- proto.RegisterEnum("pb.PropertyExpression_AggregationFunction", PropertyExpression_AggregationFunction_name, PropertyExpression_AggregationFunction_value) -- proto.RegisterEnum("pb.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value) -- proto.RegisterEnum("pb.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value) -- proto.RegisterEnum("pb.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value) -- proto.RegisterEnum("pb.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value) -- proto.RegisterEnum("pb.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value) -- proto.RegisterEnum("pb.BeginTransactionRequest_IsolationLevel", BeginTransactionRequest_IsolationLevel_name, BeginTransactionRequest_IsolationLevel_value) -- proto.RegisterEnum("pb.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value) --} -diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto -deleted file mode 100644 -index bb4c199..0000000 ---- a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto -+++ /dev/null -@@ -1,594 +0,0 @@ --// Copyright 2013 Google Inc. All Rights Reserved. --// --// The datastore v1 service proto definitions -- --syntax = "proto2"; -- --package pb; --option java_package = "com.google.api.services.datastore"; -- -- --// An identifier for a particular subset of entities. --// --// Entities are partitioned into various subsets, each used by different --// datasets and different namespaces within a dataset and so forth. --// --// All input partition IDs are normalized before use. --// A partition ID is normalized as follows: --// If the partition ID is unset or is set to an empty partition ID, replace it --// with the context partition ID. --// Otherwise, if the partition ID has no dataset ID, assign it the context --// partition ID's dataset ID. --// Unless otherwise documented, the context partition ID has the dataset ID set --// to the context dataset ID and no other partition dimension set. --// --// A partition ID is empty if all of its fields are unset. --// --// Partition dimension: --// A dimension may be unset. --// A dimension's value must never be "". --// A dimension's value must match [A-Za-z\d\.\-_]{1,100} --// If the value of any dimension matches regex "__.*__", --// the partition is reserved/read-only. --// A reserved/read-only partition ID is forbidden in certain documented contexts. --// --// Dataset ID: --// A dataset id's value must never be "". --// A dataset id's value must match --// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99} --message PartitionId { -- // The dataset ID. -- optional string dataset_id = 3; -- // The namespace. -- optional string namespace = 4; --} -- --// A unique identifier for an entity. --// If a key's partition id or any of its path kinds or names are --// reserved/read-only, the key is reserved/read-only. --// A reserved/read-only key is forbidden in certain documented contexts. --message Key { -- // Entities are partitioned into subsets, currently identified by a dataset -- // (usually implicitly specified by the project) and namespace ID. -- // Queries are scoped to a single partition. -- optional PartitionId partition_id = 1; -- -- // A (kind, ID/name) pair used to construct a key path. -- // -- // At most one of name or ID may be set. -- // If either is set, the element is complete. -- // If neither is set, the element is incomplete. -- message PathElement { -- // The kind of the entity. -- // A kind matching regex "__.*__" is reserved/read-only. -- // A kind must not contain more than 500 characters. -- // Cannot be "". -- required string kind = 1; -- // The ID of the entity. -- // Never equal to zero. Values less than zero are discouraged and will not -- // be supported in the future. -- optional int64 id = 2; -- // The name of the entity. -- // A name matching regex "__.*__" is reserved/read-only. -- // A name must not be more than 500 characters. -- // Cannot be "". -- optional string name = 3; -- } -- -- // The entity path. -- // An entity path consists of one or more elements composed of a kind and a -- // string or numerical identifier, which identify entities. The first -- // element identifies a root entity, the second element identifies -- // a child of the root entity, the third element a child of the -- // second entity, and so forth. The entities identified by all prefixes of -- // the path are called the element's ancestors. -- // An entity path is always fully complete: ALL of the entity's ancestors -- // are required to be in the path along with the entity identifier itself. -- // The only exception is that in some documented cases, the identifier in the -- // last path element (for the entity) itself may be omitted. A path can never -- // be empty. -- repeated PathElement path_element = 2; --} -- --// A message that can hold any of the supported value types and associated --// metadata. --// --// At most one of the Value fields may be set. --// If none are set the value is "null". --// --message Value { -- // A boolean value. -- optional bool boolean_value = 1; -- // An integer value. -- optional int64 integer_value = 2; -- // A double value. -- optional double double_value = 3; -- // A timestamp value. -- optional int64 timestamp_microseconds_value = 4; -- // A key value. -- optional Key key_value = 5; -- // A blob key value. -- optional string blob_key_value = 16; -- // A UTF-8 encoded string value. -- optional string string_value = 17; -- // A blob value. -- optional bytes blob_value = 18; -- // An entity value. -- // May have no key. -- // May have a key with an incomplete key path. -- // May have a reserved/read-only key. -- optional Entity entity_value = 6; -- // A list value. -- // Cannot contain another list value. -- // Cannot also have a meaning and indexing set. -- repeated Value list_value = 7; -- -- // The meaning field is reserved and should not be used. -- optional int32 meaning = 14; -- -- // If the value should be indexed. -- // -- // The indexed property may be set for a -- // null value. -- // When indexed is true, stringValue -- // is limited to 500 characters and the blob value is limited to 500 bytes. -- // Exception: If meaning is set to 2, string_value is limited to 2038 -- // characters regardless of indexed. -- // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16 -- // will be ignored on input (and will never be set on output). -- // Input values by default have indexed set to -- // true; however, you can explicitly set indexed to -- // true if you want. (An output value never has -- // indexed explicitly set to true.) If a value is -- // itself an entity, it cannot have indexed set to -- // true. -- // Exception: An entity value with meaning 9, 20 or 21 may be indexed. -- optional bool indexed = 15 [default = true]; --} -- --// An entity property. --message Property { -- // The name of the property. -- // A property name matching regex "__.*__" is reserved. -- // A reserved property name is forbidden in certain documented contexts. -- // The name must not contain more than 500 characters. -- // Cannot be "". -- required string name = 1; -- -- // The value(s) of the property. -- // Each value can have only one value property populated. For example, -- // you cannot have a values list of { value: { integerValue: 22, -- // stringValue: "a" } }, but you can have { value: { listValue: -- // [ { integerValue: 22 }, { stringValue: "a" } ] }. -- required Value value = 4; --} -- --// An entity. --// --// An entity is limited to 1 megabyte when stored. That roughly --// corresponds to a limit of 1 megabyte for the serialized form of this --// message. --message Entity { -- // The entity's key. -- // -- // An entity must have a key, unless otherwise documented (for example, -- // an entity in Value.entityValue may have no key). -- // An entity's kind is its key's path's last element's kind, -- // or null if it has no key. -- optional Key key = 1; -- // The entity's properties. -- // Each property's name must be unique for its entity. -- repeated Property property = 2; --} -- --// The result of fetching an entity from the datastore. --message EntityResult { -- // Specifies what data the 'entity' field contains. -- // A ResultType is either implied (for example, in LookupResponse.found it -- // is always FULL) or specified by context (for example, in message -- // QueryResultBatch, field 'entity_result_type' specifies a ResultType -- // for all the values in field 'entity_result'). -- enum ResultType { -- FULL = 1; // The entire entity. -- PROJECTION = 2; // A projected subset of properties. -- // The entity may have no key. -- // A property value may have meaning 18. -- KEY_ONLY = 3; // Only the key. -- } -- -- // The resulting entity. -- required Entity entity = 1; --} -- --// A query. --message Query { -- // The projection to return. If not set the entire entity is returned. -- repeated PropertyExpression projection = 2; -- -- // The kinds to query (if empty, returns entities from all kinds). -- repeated KindExpression kind = 3; -- -- // The filter to apply (optional). -- optional Filter filter = 4; -- -- // The order to apply to the query results (if empty, order is unspecified). -- repeated PropertyOrder order = 5; -- -- // The properties to group by (if empty, no grouping is applied to the -- // result set). -- repeated PropertyReference group_by = 6; -- -- // A starting point for the query results. Optional. Query cursors are -- // returned in query result batches. -- optional bytes /* serialized QueryCursor */ start_cursor = 7; -- -- // An ending point for the query results. Optional. Query cursors are -- // returned in query result batches. -- optional bytes /* serialized QueryCursor */ end_cursor = 8; -- -- // The number of results to skip. Applies before limit, but after all other -- // constraints (optional, defaults to 0). -- optional int32 offset = 10 [default=0]; -- -- // The maximum number of results to return. Applies after all other -- // constraints. Optional. -- optional int32 limit = 11; --} -- --// A representation of a kind. --message KindExpression { -- // The name of the kind. -- required string name = 1; --} -- --// A reference to a property relative to the kind expressions. --// exactly. --message PropertyReference { -- // The name of the property. -- required string name = 2; --} -- --// A representation of a property in a projection. --message PropertyExpression { -- enum AggregationFunction { -- FIRST = 1; -- } -- // The property to project. -- required PropertyReference property = 1; -- // The aggregation function to apply to the property. Optional. -- // Can only be used when grouping by at least one property. Must -- // then be set on all properties in the projection that are not -- // being grouped by. -- optional AggregationFunction aggregation_function = 2; --} -- --// The desired order for a specific property. --message PropertyOrder { -- enum Direction { -- ASCENDING = 1; -- DESCENDING = 2; -- } -- // The property to order by. -- required PropertyReference property = 1; -- // The direction to order by. -- optional Direction direction = 2 [default=ASCENDING]; --} -- --// A holder for any type of filter. Exactly one field should be specified. --message Filter { -- // A composite filter. -- optional CompositeFilter composite_filter = 1; -- // A filter on a property. -- optional PropertyFilter property_filter = 2; --} -- --// A filter that merges the multiple other filters using the given operation. --message CompositeFilter { -- enum Operator { -- AND = 1; -- } -- -- // The operator for combining multiple filters. -- required Operator operator = 1; -- // The list of filters to combine. -- // Must contain at least one filter. -- repeated Filter filter = 2; --} -- --// A filter on a specific property. --message PropertyFilter { -- enum Operator { -- LESS_THAN = 1; -- LESS_THAN_OR_EQUAL = 2; -- GREATER_THAN = 3; -- GREATER_THAN_OR_EQUAL = 4; -- EQUAL = 5; -- -- HAS_ANCESTOR = 11; -- } -- -- // The property to filter by. -- required PropertyReference property = 1; -- // The operator to filter by. -- required Operator operator = 2; -- // The value to compare the property to. -- required Value value = 3; --} -- --// A GQL query. --message GqlQuery { -- required string query_string = 1; -- // When false, the query string must not contain a literal. -- optional bool allow_literal = 2 [default = false]; -- // A named argument must set field GqlQueryArg.name. -- // No two named arguments may have the same name. -- // For each non-reserved named binding site in the query string, -- // there must be a named argument with that name, -- // but not necessarily the inverse. -- repeated GqlQueryArg name_arg = 3; -- // Numbered binding site @1 references the first numbered argument, -- // effectively using 1-based indexing, rather than the usual 0. -- // A numbered argument must NOT set field GqlQueryArg.name. -- // For each binding site numbered i in query_string, -- // there must be an ith numbered argument. -- // The inverse must also be true. -- repeated GqlQueryArg number_arg = 4; --} -- --// A binding argument for a GQL query. --// Exactly one of fields value and cursor must be set. --message GqlQueryArg { -- // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*". -- // Must not match regex "__.*__". -- // Must not be "". -- optional string name = 1; -- optional Value value = 2; -- optional bytes cursor = 3; --} -- --// A batch of results produced by a query. --message QueryResultBatch { -- // The possible values for the 'more_results' field. -- enum MoreResultsType { -- NOT_FINISHED = 1; // There are additional batches to fetch from this query. -- MORE_RESULTS_AFTER_LIMIT = 2; // The query is finished, but there are more -- // results after the limit. -- NO_MORE_RESULTS = 3; // The query has been exhausted. -- } -- -- // The result type for every entity in entityResults. -- required EntityResult.ResultType entity_result_type = 1; -- // The results for this batch. -- repeated EntityResult entity_result = 2; -- -- // A cursor that points to the position after the last result in the batch. -- // May be absent. -- optional bytes /* serialized QueryCursor */ end_cursor = 4; -- -- // The state of the query after the current batch. -- required MoreResultsType more_results = 5; -- -- // The number of results skipped because of Query.offset. -- optional int32 skipped_results = 6; --} -- --// A set of changes to apply. --// --// No entity in this message may have a reserved property name, --// not even a property in an entity in a value. --// No value in this message may have meaning 18, --// not even a value in an entity in another value. --// --// If entities with duplicate keys are present, an arbitrary choice will --// be made as to which is written. --message Mutation { -- // Entities to upsert. -- // Each upserted entity's key must have a complete path and -- // must not be reserved/read-only. -- repeated Entity upsert = 1; -- // Entities to update. -- // Each updated entity's key must have a complete path and -- // must not be reserved/read-only. -- repeated Entity update = 2; -- // Entities to insert. -- // Each inserted entity's key must have a complete path and -- // must not be reserved/read-only. -- repeated Entity insert = 3; -- // Insert entities with a newly allocated ID. -- // Each inserted entity's key must omit the final identifier in its path and -- // must not be reserved/read-only. -- repeated Entity insert_auto_id = 4; -- // Keys of entities to delete. -- // Each key must have a complete key path and must not be reserved/read-only. -- repeated Key delete = 5; -- // Ignore a user specified read-only period. Optional. -- optional bool force = 6; --} -- --// The result of applying a mutation. --message MutationResult { -- // Number of index writes. -- required int32 index_updates = 1; -- // Keys for insertAutoId entities. One per entity from the -- // request, in the same order. -- repeated Key insert_auto_id_key = 2; --} -- --// Options shared by read requests. --message ReadOptions { -- enum ReadConsistency { -- DEFAULT = 0; -- STRONG = 1; -- EVENTUAL = 2; -- } -- -- // The read consistency to use. -- // Cannot be set when transaction is set. -- // Lookup and ancestor queries default to STRONG, global queries default to -- // EVENTUAL and cannot be set to STRONG. -- optional ReadConsistency read_consistency = 1 [default=DEFAULT]; -- -- // The transaction to use. Optional. -- optional bytes /* serialized Transaction */ transaction = 2; --} -- --// The request for Lookup. --message LookupRequest { -- -- // Options for this lookup request. Optional. -- optional ReadOptions read_options = 1; -- // Keys of entities to look up from the datastore. -- repeated Key key = 3; --} -- --// The response for Lookup. --message LookupResponse { -- -- // The order of results in these fields is undefined and has no relation to -- // the order of the keys in the input. -- -- // Entities found as ResultType.FULL entities. -- repeated EntityResult found = 1; -- -- // Entities not found as ResultType.KEY_ONLY entities. -- repeated EntityResult missing = 2; -- -- // A list of keys that were not looked up due to resource constraints. -- repeated Key deferred = 3; --} -- -- --// The request for RunQuery. --message RunQueryRequest { -- -- // The options for this query. -- optional ReadOptions read_options = 1; -- -- // Entities are partitioned into subsets, identified by a dataset (usually -- // implicitly specified by the project) and namespace ID. Queries are scoped -- // to a single partition. -- // This partition ID is normalized with the standard default context -- // partition ID, but all other partition IDs in RunQueryRequest are -- // normalized with this partition ID as the context partition ID. -- optional PartitionId partition_id = 2; -- -- // The query to run. -- // Either this field or field gql_query must be set, but not both. -- optional Query query = 3; -- // The GQL query to run. -- // Either this field or field query must be set, but not both. -- optional GqlQuery gql_query = 7; --} -- --// The response for RunQuery. --message RunQueryResponse { -- -- // A batch of query results (always present). -- optional QueryResultBatch batch = 1; -- --} -- --// The request for BeginTransaction. --message BeginTransactionRequest { -- -- enum IsolationLevel { -- SNAPSHOT = 0; // Read from a consistent snapshot. Concurrent transactions -- // conflict if their mutations conflict. For example: -- // Read(A),Write(B) may not conflict with Read(B),Write(A), -- // but Read(B),Write(B) does conflict with Read(B),Write(B). -- SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent -- // transactions conflict if they cannot be serialized. -- // For example Read(A),Write(B) does conflict with -- // Read(B),Write(A) but Read(A) may not conflict with -- // Write(A). -- } -- -- // The transaction isolation level. -- optional IsolationLevel isolation_level = 1 [default=SNAPSHOT]; --} -- --// The response for BeginTransaction. --message BeginTransactionResponse { -- -- // The transaction identifier (always present). -- optional bytes /* serialized Transaction */ transaction = 1; --} -- --// The request for Rollback. --message RollbackRequest { -- -- // The transaction identifier, returned by a call to -- // beginTransaction. -- required bytes /* serialized Transaction */ transaction = 1; --} -- --// The response for Rollback. --message RollbackResponse { --// Empty --} -- --// The request for Commit. --message CommitRequest { -- -- enum Mode { -- TRANSACTIONAL = 1; -- NON_TRANSACTIONAL = 2; -- } -- -- // The transaction identifier, returned by a call to -- // beginTransaction. Must be set when mode is TRANSACTIONAL. -- optional bytes /* serialized Transaction */ transaction = 1; -- // The mutation to perform. Optional. -- optional Mutation mutation = 2; -- // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL. -- optional Mode mode = 5 [default=TRANSACTIONAL]; --} -- --// The response for Commit. --message CommitResponse { -- -- // The result of performing the mutation (if any). -- optional MutationResult mutation_result = 1; --} -- --// The request for AllocateIds. --message AllocateIdsRequest { -- -- // A list of keys with incomplete key paths to allocate IDs for. -- // No key may be reserved/read-only. -- repeated Key key = 1; --} -- --// The response for AllocateIds. --message AllocateIdsResponse { -- -- // The keys specified in the request (in the same order), each with -- // its key path completed with a newly allocated ID. -- repeated Key key = 1; --} -- --// Each rpc normalizes the partition IDs of the keys in its input entities, --// and always returns entities with keys with normalized partition IDs. --// (Note that applies to all entities, including entities in values.) --service DatastoreService { -- // Look up some entities by key. -- rpc Lookup(LookupRequest) returns (LookupResponse) { -- }; -- // Query for entities. -- rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) { -- }; -- // Begin a new transaction. -- rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { -- }; -- // Commit a transaction, optionally creating, deleting or modifying some -- // entities. -- rpc Commit(CommitRequest) returns (CommitResponse) { -- }; -- // Roll back a transaction. -- rpc Rollback(RollbackRequest) returns (RollbackResponse) { -- }; -- // Allocate IDs for incomplete keys (useful for referencing an entity before -- // it is inserted). -- rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) { -- }; --} -diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go -deleted file mode 100644 -index aafd683..0000000 ---- a/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go -+++ /dev/null -@@ -1,57 +0,0 @@ --// Copyright 2014 Google Inc. All Rights Reserved. --// --// Licensed under the Apache License, Version 2.0 (the "License"); --// you may not use this file except in compliance with the License. --// You may obtain a copy of the License at --// --// http://www.apache.org/licenses/LICENSE-2.0 --// --// Unless required by applicable law or agreed to in writing, software --// distributed under the License is distributed on an "AS IS" BASIS, --// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --// See the License for the specific language governing permissions and --// limitations under the License. -- --// Package testutil contains helper functions for writing tests. --package testutil -- --import ( -- "io/ioutil" -- "log" -- "net/http" -- "os" -- -- "golang.org/x/net/context" -- "golang.org/x/oauth2" -- "golang.org/x/oauth2/google" -- "google.golang.org/cloud" --) -- --const ( -- envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" -- envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" --) -- --func Context(scopes ...string) context.Context { -- key, projID := os.Getenv(envPrivateKey), os.Getenv(envProjID) -- if key == "" || projID == "" { -- log.Fatal("GCLOUD_TESTS_GOLANG_KEY and GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") -- } -- jsonKey, err := ioutil.ReadFile(key) -- if err != nil { -- log.Fatalf("Cannot read the JSON key file, err: %v", err) -- } -- conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) -- if err != nil { -- log.Fatal(err) -- } -- return cloud.NewContext(projID, conf.Client(oauth2.NoContext)) --} -- --func NoAuthContext() context.Context { -- projID := os.Getenv(envProjID) -- if projID == "" { -- log.Fatal("GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.") -- } -- return cloud.NewContext(projID, &http.Client{Transport: http.DefaultTransport}) --} --- -1.9.3 - diff --git a/SOURCES/hack-test-cmd.sh.patch b/SOURCES/hack-test-cmd.sh.patch new file mode 100644 index 0000000..bce47e8 --- /dev/null +++ b/SOURCES/hack-test-cmd.sh.patch @@ -0,0 +1,36 @@ +From a739fe820689504f34137a9d7c6783524a6ba217 Mon Sep 17 00:00:00 2001 +From: Jan Chaloupka +Date: Thu, 26 Nov 2015 10:30:37 +0100 +Subject: [PATCH] hack hack/test-cmd.sh + +--- + hack/lib/init.sh | 2 +- + hack/test-cmd.sh | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/hack/lib/init.sh b/hack/lib/init.sh +index 8a2c428..75ee958 100644 +--- a/hack/lib/init.sh ++++ b/hack/lib/init.sh +@@ -39,4 +39,4 @@ source "${KUBE_ROOT}/hack/lib/version.sh" + source "${KUBE_ROOT}/hack/lib/golang.sh" + source "${KUBE_ROOT}/hack/lib/etcd.sh" + +-KUBE_OUTPUT_HOSTBIN="${KUBE_OUTPUT_BINPATH}/$(kube::util::host_platform)" ++KUBE_OUTPUT_HOSTBIN="/usr/bin" +diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh +index fb440ac..0fbc5f4 100755 +--- a/hack/test-cmd.sh ++++ b/hack/test-cmd.sh +@@ -724,7 +724,7 @@ __EOF__ + kubectl create -f examples/guestbook/redis-master-controller.yaml "${kube_flags[@]}" + kubectl create -f examples/guestbook/redis-slave-controller.yaml "${kube_flags[@]}" + # Command +- kubectl scale rc/redis-master rc/redis-slave --replicas=4 ++ kubectl scale rc/redis-master rc/redis-slave --replicas=4 "${kube_flags[@]}" + # Post-condition: 4 replicas each + kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4' + kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4' +-- +1.9.3 + diff --git a/SOURCES/internal-to-inteernal.patch b/SOURCES/internal-to-inteernal.patch new file mode 100644 index 0000000..0f71229 --- /dev/null +++ b/SOURCES/internal-to-inteernal.patch @@ -0,0 +1,3563 @@ +From af5af041018cd0c8b89fde3f8684fe00290b386d Mon Sep 17 00:00:00 2001 +From: Jan Chaloupka +Date: Wed, 30 Sep 2015 09:57:57 +0200 +Subject: [PATCH] internal to inteernal + +--- + .../src/github.com/gonum/blas/native/dgemm.go | 2 +- + .../github.com/gonum/blas/native/level1double.go | 2 +- + .../gonum/blas/native/level1double_ddot.go | 2 +- + .../github.com/gonum/blas/native/level1single.go | 2 +- + .../gonum/blas/native/level1single_dsdot.go | 2 +- + .../gonum/blas/native/level1single_sdot.go | 2 +- + .../gonum/blas/native/level1single_sdsdot.go | 2 +- + .../github.com/gonum/blas/native/level2double.go | 2 +- + .../github.com/gonum/blas/native/level2single.go | 2 +- + .../github.com/gonum/blas/native/level3double.go | 2 +- + .../github.com/gonum/blas/native/level3single.go | 2 +- + .../src/github.com/gonum/blas/native/sgemm.go | 2 +- + .../src/github.com/gonum/graph/inteernal/linear.go | 73 ++++ + .../src/github.com/gonum/graph/inteernal/set.go | 211 +++++++++++ + .../github.com/gonum/graph/inteernal/set_test.go | 413 +++++++++++++++++++++ + .../src/github.com/gonum/graph/inteernal/sort.go | 28 ++ + .../src/github.com/gonum/graph/internal/linear.go | 73 ---- + .../src/github.com/gonum/graph/internal/set.go | 211 ----------- + .../github.com/gonum/graph/internal/set_test.go | 413 --------------------- + .../src/github.com/gonum/graph/internal/sort.go | 28 -- + .../github.com/gonum/graph/network/betweenness.go | 2 +- + .../src/github.com/gonum/graph/path/a_star.go | 2 +- + .../github.com/gonum/graph/path/control_flow.go | 2 +- + .../github.com/gonum/graph/path/dijkstra_test.go | 2 +- + .../gonum/graph/path/floydwarshall_test.go | 2 +- + .../gonum/graph/path/johnson_apsp_test.go | 2 +- + .../github.com/gonum/graph/path/spanning_tree.go | 2 +- + .../github.com/gonum/graph/topo/bron_kerbosch.go | 2 +- + .../gonum/graph/topo/bron_kerbosch_test.go | 2 +- + .../github.com/gonum/graph/topo/johnson_cycles.go | 2 +- + .../gonum/graph/topo/johnson_cycles_test.go | 2 +- + .../src/github.com/gonum/graph/topo/tarjan.go | 2 +- + .../src/github.com/gonum/graph/topo/tarjan_test.go | 2 +- + .../src/github.com/gonum/graph/topo/topo_test.go | 2 +- + .../github.com/gonum/graph/traverse/traverse.go | 2 +- + .../gonum/graph/traverse/traverse_test.go | 2 +- + .../src/github.com/gonum/inteernal/asm/caxpy.go | 22 ++ + .../src/github.com/gonum/inteernal/asm/cdotc.go | 23 ++ + .../src/github.com/gonum/inteernal/asm/cdotu.go | 23 ++ + .../src/github.com/gonum/inteernal/asm/complex | 58 +++ + .../src/github.com/gonum/inteernal/asm/conj.go | 7 + + .../src/github.com/gonum/inteernal/asm/daxpy.go | 22 ++ + .../github.com/gonum/inteernal/asm/daxpy_amd64.go | 12 + + .../github.com/gonum/inteernal/asm/daxpy_amd64.s | 140 +++++++ + .../src/github.com/gonum/inteernal/asm/ddot.go | 23 ++ + .../github.com/gonum/inteernal/asm/ddot_amd64.go | 10 + + .../github.com/gonum/inteernal/asm/ddot_amd64.s | 140 +++++++ + .../src/github.com/gonum/inteernal/asm/dsdot.go | 23 ++ + .../src/github.com/gonum/inteernal/asm/generate.go | 8 + + .../src/github.com/gonum/inteernal/asm/saxpy.go | 22 ++ + .../src/github.com/gonum/inteernal/asm/sdot.go | 23 ++ + .../gonum/inteernal/asm/single_precision | 30 ++ + .../src/github.com/gonum/inteernal/asm/zaxpy.go | 22 ++ + .../src/github.com/gonum/inteernal/asm/zdotc.go | 25 ++ + .../src/github.com/gonum/inteernal/asm/zdotu.go | 23 ++ + .../src/github.com/gonum/internal/asm/caxpy.go | 22 -- + .../src/github.com/gonum/internal/asm/cdotc.go | 23 -- + .../src/github.com/gonum/internal/asm/cdotu.go | 23 -- + .../src/github.com/gonum/internal/asm/complex | 58 --- + .../src/github.com/gonum/internal/asm/conj.go | 7 - + .../src/github.com/gonum/internal/asm/daxpy.go | 22 -- + .../github.com/gonum/internal/asm/daxpy_amd64.go | 12 - + .../github.com/gonum/internal/asm/daxpy_amd64.s | 140 ------- + .../src/github.com/gonum/internal/asm/ddot.go | 23 -- + .../github.com/gonum/internal/asm/ddot_amd64.go | 10 - + .../src/github.com/gonum/internal/asm/ddot_amd64.s | 140 ------- + .../src/github.com/gonum/internal/asm/dsdot.go | 23 -- + .../src/github.com/gonum/internal/asm/generate.go | 8 - + .../src/github.com/gonum/internal/asm/saxpy.go | 22 -- + .../src/github.com/gonum/internal/asm/sdot.go | 23 -- + .../github.com/gonum/internal/asm/single_precision | 30 -- + .../src/github.com/gonum/internal/asm/zaxpy.go | 22 -- + .../src/github.com/gonum/internal/asm/zdotc.go | 25 -- + .../src/github.com/gonum/internal/asm/zdotu.go | 23 -- + .../src/github.com/gonum/matrix/mat64/inner.go | 2 +- + pkg/cmd/cli/describe/chaindescriber.go | 2 +- + 76 files changed, 1411 insertions(+), 1411 deletions(-) + create mode 100644 Godeps/_workspace/src/github.com/gonum/graph/inteernal/linear.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/graph/inteernal/set.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/graph/inteernal/set_test.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/graph/inteernal/sort.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/graph/internal/linear.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/graph/internal/set.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/graph/internal/set_test.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/graph/internal/sort.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/caxpy.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/cdotc.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/cdotu.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/complex + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/conj.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/daxpy.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/daxpy_amd64.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/daxpy_amd64.s + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/ddot.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/ddot_amd64.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/ddot_amd64.s + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/dsdot.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/generate.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/saxpy.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/sdot.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/single_precision + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/zaxpy.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/zdotc.go + create mode 100644 Godeps/_workspace/src/github.com/gonum/inteernal/asm/zdotu.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/caxpy.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/cdotc.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/cdotu.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/complex + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/conj.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/daxpy.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/daxpy_amd64.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/daxpy_amd64.s + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/ddot.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/ddot_amd64.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/ddot_amd64.s + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/dsdot.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/generate.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/saxpy.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/sdot.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/single_precision + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/zaxpy.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/zdotc.go + delete mode 100644 Godeps/_workspace/src/github.com/gonum/internal/asm/zdotu.go + +diff --git a/Godeps/_workspace/src/github.com/gonum/blas/native/dgemm.go b/Godeps/_workspace/src/github.com/gonum/blas/native/dgemm.go +index 850f62c..8dc9920 100644 +--- a/Godeps/_workspace/src/github.com/gonum/blas/native/dgemm.go ++++ b/Godeps/_workspace/src/github.com/gonum/blas/native/dgemm.go +@@ -10,7 +10,7 @@ import ( + "sync" + + "github.com/gonum/blas" +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + // Dgemm computes +diff --git a/Godeps/_workspace/src/github.com/gonum/blas/native/level1double.go b/Godeps/_workspace/src/github.com/gonum/blas/native/level1double.go +index 0d77243..65f2972 100644 +--- a/Godeps/_workspace/src/github.com/gonum/blas/native/level1double.go ++++ b/Godeps/_workspace/src/github.com/gonum/blas/native/level1double.go +@@ -8,7 +8,7 @@ import ( + "math" + + "github.com/gonum/blas" +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + var _ blas.Float64Level1 = Implementation{} +diff --git a/Godeps/_workspace/src/github.com/gonum/blas/native/level1double_ddot.go b/Godeps/_workspace/src/github.com/gonum/blas/native/level1double_ddot.go +index 7af4e04..84a16ed 100644 +--- a/Godeps/_workspace/src/github.com/gonum/blas/native/level1double_ddot.go ++++ b/Godeps/_workspace/src/github.com/gonum/blas/native/level1double_ddot.go +@@ -5,7 +5,7 @@ + package native + + import ( +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + // Ddot computes the dot product of the two vectors +diff --git a/Godeps/_workspace/src/github.com/gonum/blas/native/level1single.go b/Godeps/_workspace/src/github.com/gonum/blas/native/level1single.go +index 6bcba83..ca82629 100644 +--- a/Godeps/_workspace/src/github.com/gonum/blas/native/level1single.go ++++ b/Godeps/_workspace/src/github.com/gonum/blas/native/level1single.go +@@ -10,7 +10,7 @@ import ( + math "github.com/gonum/blas/native/internal/math32" + + "github.com/gonum/blas" +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + var _ blas.Float32Level1 = Implementation{} +diff --git a/Godeps/_workspace/src/github.com/gonum/blas/native/level1single_dsdot.go b/Godeps/_workspace/src/github.com/gonum/blas/native/level1single_dsdot.go +index 4665a01..a438155 100644 +--- a/Godeps/_workspace/src/github.com/gonum/blas/native/level1single_dsdot.go ++++ b/Godeps/_workspace/src/github.com/gonum/blas/native/level1single_dsdot.go +@@ -7,7 +7,7 @@ + package native + + import ( +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + // Dsdot computes the dot product of the two vectors +diff --git a/Godeps/_workspace/src/github.com/gonum/blas/native/level1single_sdot.go b/Godeps/_workspace/src/github.com/gonum/blas/native/level1single_sdot.go +index 1e5b565..7d96b74 100644 +--- a/Godeps/_workspace/src/github.com/gonum/blas/native/level1single_sdot.go ++++ b/Godeps/_workspace/src/github.com/gonum/blas/native/level1single_sdot.go +@@ -7,7 +7,7 @@ + package native + + import ( +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + // Sdot computes the dot product of the two vectors +diff --git a/Godeps/_workspace/src/github.com/gonum/blas/native/level1single_sdsdot.go b/Godeps/_workspace/src/github.com/gonum/blas/native/level1single_sdsdot.go +index d58be3d..79543d4 100644 +--- a/Godeps/_workspace/src/github.com/gonum/blas/native/level1single_sdsdot.go ++++ b/Godeps/_workspace/src/github.com/gonum/blas/native/level1single_sdsdot.go +@@ -7,7 +7,7 @@ + package native + + import ( +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + // Sdsdot computes the dot product of the two vectors plus a constant +diff --git a/Godeps/_workspace/src/github.com/gonum/blas/native/level2double.go b/Godeps/_workspace/src/github.com/gonum/blas/native/level2double.go +index 6d59559..af62cd1 100644 +--- a/Godeps/_workspace/src/github.com/gonum/blas/native/level2double.go ++++ b/Godeps/_workspace/src/github.com/gonum/blas/native/level2double.go +@@ -6,7 +6,7 @@ package native + + import ( + "github.com/gonum/blas" +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + var _ blas.Float64Level2 = Implementation{} +diff --git a/Godeps/_workspace/src/github.com/gonum/blas/native/level2single.go b/Godeps/_workspace/src/github.com/gonum/blas/native/level2single.go +index b213ad6..6720c0e 100644 +--- a/Godeps/_workspace/src/github.com/gonum/blas/native/level2single.go ++++ b/Godeps/_workspace/src/github.com/gonum/blas/native/level2single.go +@@ -8,7 +8,7 @@ package native + + import ( + "github.com/gonum/blas" +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + var _ blas.Float32Level2 = Implementation{} +diff --git a/Godeps/_workspace/src/github.com/gonum/blas/native/level3double.go b/Godeps/_workspace/src/github.com/gonum/blas/native/level3double.go +index 5a19652..47d5d15 100644 +--- a/Godeps/_workspace/src/github.com/gonum/blas/native/level3double.go ++++ b/Godeps/_workspace/src/github.com/gonum/blas/native/level3double.go +@@ -6,7 +6,7 @@ package native + + import ( + "github.com/gonum/blas" +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + var _ blas.Float64Level3 = Implementation{} +diff --git a/Godeps/_workspace/src/github.com/gonum/blas/native/level3single.go b/Godeps/_workspace/src/github.com/gonum/blas/native/level3single.go +index 6bc9a56..82212a0 100644 +--- a/Godeps/_workspace/src/github.com/gonum/blas/native/level3single.go ++++ b/Godeps/_workspace/src/github.com/gonum/blas/native/level3single.go +@@ -8,7 +8,7 @@ package native + + import ( + "github.com/gonum/blas" +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + var _ blas.Float32Level3 = Implementation{} +diff --git a/Godeps/_workspace/src/github.com/gonum/blas/native/sgemm.go b/Godeps/_workspace/src/github.com/gonum/blas/native/sgemm.go +index 047d4e5..0387945 100644 +--- a/Godeps/_workspace/src/github.com/gonum/blas/native/sgemm.go ++++ b/Godeps/_workspace/src/github.com/gonum/blas/native/sgemm.go +@@ -12,7 +12,7 @@ import ( + "sync" + + "github.com/gonum/blas" +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + // Sgemm computes +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/inteernal/linear.go b/Godeps/_workspace/src/github.com/gonum/graph/inteernal/linear.go +new file mode 100644 +index 0000000..3d64de9 +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/graph/inteernal/linear.go +@@ -0,0 +1,73 @@ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package internal ++ ++import ( ++ "github.com/gonum/graph" ++) ++ ++// NodeStack implements a LIFO stack of graph.Node. ++type NodeStack []graph.Node ++ ++// Len returns the number of graph.Nodes on the stack. ++func (s *NodeStack) Len() int { return len(*s) } ++ ++// Pop returns the last graph.Node on the stack and removes it ++// from the stack. ++func (s *NodeStack) Pop() graph.Node { ++ v := *s ++ v, n := v[:len(v)-1], v[len(v)-1] ++ *s = v ++ return n ++} ++ ++// Push adds the node n to the stack at the last position. ++func (s *NodeStack) Push(n graph.Node) { *s = append(*s, n) } ++ ++// NodeQueue implements a FIFO queue. ++type NodeQueue struct { ++ head int ++ data []graph.Node ++} ++ ++// Len returns the number of graph.Nodes in the queue. ++func (q *NodeQueue) Len() int { return len(q.data) - q.head } ++ ++// Enqueue adds the node n to the back of the queue. ++func (q *NodeQueue) Enqueue(n graph.Node) { ++ if len(q.data) == cap(q.data) && q.head > 0 { ++ l := q.Len() ++ copy(q.data, q.data[q.head:]) ++ q.head = 0 ++ q.data = append(q.data[:l], n) ++ } else { ++ q.data = append(q.data, n) ++ } ++} ++ ++// Dequeue returns the graph.Node at the front of the queue and ++// removes it from the queue. ++func (q *NodeQueue) Dequeue() graph.Node { ++ if q.Len() == 0 { ++ panic("queue: empty queue") ++ } ++ ++ var n graph.Node ++ n, q.data[q.head] = q.data[q.head], nil ++ q.head++ ++ ++ if q.Len() == 0 { ++ q.head = 0 ++ q.data = q.data[:0] ++ } ++ ++ return n ++} ++ ++// Reset clears the queue for reuse. ++func (q *NodeQueue) Reset() { ++ q.head = 0 ++ q.data = q.data[:0] ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/inteernal/set.go b/Godeps/_workspace/src/github.com/gonum/graph/inteernal/set.go +new file mode 100644 +index 0000000..3ad1bc8 +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/graph/inteernal/set.go +@@ -0,0 +1,211 @@ ++// Copyright ©2014 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package internal ++ ++import ( ++ "unsafe" ++ ++ "github.com/gonum/graph" ++) ++ ++// IntSet is a set of integer identifiers. ++type IntSet map[int]struct{} ++ ++// The simple accessor methods for Set are provided to allow ease of ++// implementation change should the need arise. ++ ++// Add inserts an element into the set. ++func (s IntSet) Add(e int) { ++ s[e] = struct{}{} ++} ++ ++// Has reports the existence of the element in the set. ++func (s IntSet) Has(e int) bool { ++ _, ok := s[e] ++ return ok ++} ++ ++// Remove deletes the specified element from the set. ++func (s IntSet) Remove(e int) { ++ delete(s, e) ++} ++ ++// Count reports the number of elements stored in the set. ++func (s IntSet) Count() int { ++ return len(s) ++} ++ ++// Same determines whether two sets are backed by the same store. In the ++// current implementation using hash maps it makes use of the fact that ++// hash maps (at least in the gc implementation) are passed as a pointer ++// to a runtime Hmap struct. ++// ++// A map is not seen by the runtime as a pointer though, so we cannot ++// directly compare the sets converted to unsafe.Pointer and need to take ++// the sets' addressed and dereference them as pointers to some comparable ++// type. ++func Same(s1, s2 Set) bool { ++ return *(*uintptr)(unsafe.Pointer(&s1)) == *(*uintptr)(unsafe.Pointer(&s2)) ++} ++ ++// A set is a set of nodes keyed in their integer identifiers. ++type Set map[int]graph.Node ++ ++// The simple accessor methods for Set are provided to allow ease of ++// implementation change should the need arise. ++ ++// Add inserts an element into the set. ++func (s Set) Add(n graph.Node) { ++ s[n.ID()] = n ++} ++ ++// Remove deletes the specified element from the set. ++func (s Set) Remove(e graph.Node) { ++ delete(s, e.ID()) ++} ++ ++// Has reports the existence of the element in the set. ++func (s Set) Has(n graph.Node) bool { ++ _, ok := s[n.ID()] ++ return ok ++} ++ ++// Clear returns an empty set, possibly using the same backing store. ++// Clear is not provided as a method since there is no way to replace ++// the calling value if clearing is performed by a make(set). Clear ++// should never be called without keeping the returned value. ++func Clear(s Set) Set { ++ if len(s) == 0 { ++ return s ++ } ++ ++ return make(Set) ++} ++ ++// Copy performs a perfect copy from s1 to dst (meaning the sets will ++// be equal). ++func (dst Set) Copy(src Set) Set { ++ if Same(src, dst) { ++ return dst ++ } ++ ++ if len(dst) > 0 { ++ dst = make(Set, len(src)) ++ } ++ ++ for e, n := range src { ++ dst[e] = n ++ } ++ ++ return dst ++} ++ ++// Equal reports set equality between the parameters. Sets are equal if ++// and only if they have the same elements. ++func Equal(s1, s2 Set) bool { ++ if Same(s1, s2) { ++ return true ++ } ++ ++ if len(s1) != len(s2) { ++ return false ++ } ++ ++ for e := range s1 { ++ if _, ok := s2[e]; !ok { ++ return false ++ } ++ } ++ ++ return true ++} ++ ++// Union takes the union of s1 and s2, and stores it in dst. ++// ++// The union of two sets, s1 and s2, is the set containing all the ++// elements of each, for instance: ++// ++// {a,b,c} UNION {d,e,f} = {a,b,c,d,e,f} ++// ++// Since sets may not have repetition, unions of two sets that overlap ++// do not contain repeat elements, that is: ++// ++// {a,b,c} UNION {b,c,d} = {a,b,c,d} ++// ++func (dst Set) Union(s1, s2 Set) Set { ++ if Same(s1, s2) { ++ return dst.Copy(s1) ++ } ++ ++ if !Same(s1, dst) && !Same(s2, dst) { ++ dst = Clear(dst) ++ } ++ ++ if !Same(dst, s1) { ++ for e, n := range s1 { ++ dst[e] = n ++ } ++ } ++ ++ if !Same(dst, s2) { ++ for e, n := range s2 { ++ dst[e] = n ++ } ++ } ++ ++ return dst ++} ++ ++// Intersect takes the intersection of s1 and s2, and stores it in dst. ++// ++// The intersection of two sets, s1 and s2, is the set containing all ++// the elements shared between the two sets, for instance: ++// ++// {a,b,c} INTERSECT {b,c,d} = {b,c} ++// ++// The intersection between a set and itself is itself, and thus ++// effectively a copy operation: ++// ++// {a,b,c} INTERSECT {a,b,c} = {a,b,c} ++// ++// The intersection between two sets that share no elements is the empty ++// set: ++// ++// {a,b,c} INTERSECT {d,e,f} = {} ++// ++func (dst Set) Intersect(s1, s2 Set) Set { ++ var swap Set ++ ++ if Same(s1, s2) { ++ return dst.Copy(s1) ++ } ++ if Same(s1, dst) { ++ swap = s2 ++ } else if Same(s2, dst) { ++ swap = s1 ++ } else { ++ dst = Clear(dst) ++ ++ if len(s1) > len(s2) { ++ s1, s2 = s2, s1 ++ } ++ ++ for e, n := range s1 { ++ if _, ok := s2[e]; ok { ++ dst[e] = n ++ } ++ } ++ ++ return dst ++ } ++ ++ for e := range dst { ++ if _, ok := swap[e]; !ok { ++ delete(dst, e) ++ } ++ } ++ ++ return dst ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/inteernal/set_test.go b/Godeps/_workspace/src/github.com/gonum/graph/inteernal/set_test.go +new file mode 100644 +index 0000000..fb39620 +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/graph/inteernal/set_test.go +@@ -0,0 +1,413 @@ ++// Copyright ©2014 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package internal ++ ++import "testing" ++ ++type node int ++ ++func (n node) ID() int { return int(n) } ++ ++// count reports the number of elements stored in the set. ++func (s Set) count() int { ++ return len(s) ++} ++ ++// TestSame tests the assumption that pointer equality via unsafe conversion ++// of a map[int]struct{} to uintptr is a valid test for perfect identity between ++// set values. If any of the tests in TestSame fail, the package is broken and same ++// must be reimplemented to conform to the runtime map implementation. The relevant ++// code to look at (at least for gc) is in runtime/hashmap.{h,goc}. ++func TestSame(t *testing.T) { ++ var ( ++ a = make(Set) ++ b = make(Set) ++ c = a ++ ) ++ ++ if Same(a, b) { ++ t.Error("Independently created sets test as same") ++ } ++ if !Same(a, c) { ++ t.Error("Set copy and original test as not same.") ++ } ++ a.Add(node(1)) ++ if !Same(a, c) { ++ t.Error("Set copy and original test as not same after addition.") ++ } ++ if !Same(nil, nil) { ++ t.Error("nil sets test as not same.") ++ } ++ if Same(b, nil) { ++ t.Error("nil and empty sets test as same.") ++ } ++} ++ ++func TestAdd(t *testing.T) { ++ s := make(Set) ++ if s == nil { ++ t.Fatal("Set cannot be created successfully") ++ } ++ ++ if s.count() != 0 { ++ t.Error("Set somehow contains new elements upon creation") ++ } ++ ++ s.Add(node(1)) ++ s.Add(node(3)) ++ s.Add(node(5)) ++ ++ if s.count() != 3 { ++ t.Error("Incorrect number of set elements after adding") ++ } ++ ++ if !s.Has(node(1)) || !s.Has(node(3)) || !s.Has(node(5)) { ++ t.Error("Set doesn't contain element that was added") ++ } ++ ++ s.Add(node(1)) ++ ++ if s.count() > 3 { ++ t.Error("Set double-adds element (element not unique)") ++ } else if s.count() < 3 { ++ t.Error("Set double-add lowered len") ++ } ++ ++ if !s.Has(node(1)) { ++ t.Error("Set doesn't contain double-added element") ++ } ++ ++ if !s.Has(node(3)) || !s.Has(node(5)) { ++ t.Error("Set removes element on double-add") ++ } ++ ++ for e, n := range s { ++ if e != n.ID() { ++ t.Error("Element ID did not match key: %d != %d", e, n.ID()) ++ } ++ } ++} ++ ++func TestRemove(t *testing.T) { ++ s := make(Set) ++ ++ s.Add(node(1)) ++ s.Add(node(3)) ++ s.Add(node(5)) ++ ++ s.Remove(node(1)) ++ ++ if s.count() != 2 { ++ t.Error("Incorrect number of set elements after removing an element") ++ } ++ ++ if s.Has(node(1)) { ++ t.Error("Element present after removal") ++ } ++ ++ if !s.Has(node(3)) || !s.Has(node(5)) { ++ t.Error("Set remove removed wrong element") ++ } ++ ++ s.Remove(node(1)) ++ ++ if s.count() != 2 || s.Has(node(1)) { ++ t.Error("Double set remove does something strange") ++ } ++ ++ s.Add(node(1)) ++ ++ if s.count() != 3 || !s.Has(node(1)) { ++ t.Error("Cannot add element after removal") ++ } ++} ++ ++func TestClear(t *testing.T) { ++ s := make(Set) ++ ++ s.Add(node(8)) ++ s.Add(node(9)) ++ s.Add(node(10)) ++ ++ s = Clear(s) ++ ++ if s.count() != 0 { ++ t.Error("Clear did not properly reset set to size 0") ++ } ++} ++ ++func TestSelfEqual(t *testing.T) { ++ s := make(Set) ++ ++ if !Equal(s, s) { ++ t.Error("Set is not equal to itself") ++ } ++ ++ s.Add(node(1)) ++ ++ if !Equal(s, s) { ++ t.Error("Set ceases self equality after adding element") ++ } ++} ++ ++func TestEqual(t *testing.T) { ++ s1 := make(Set) ++ s2 := make(Set) ++ ++ if !Equal(s1, s2) { ++ t.Error("Two different empty sets not equal") ++ } ++ ++ s1.Add(node(1)) ++ if Equal(s1, s2) { ++ t.Error("Two different sets with different elements not equal") ++ } ++ ++ s2.Add(node(1)) ++ if !Equal(s1, s2) { ++ t.Error("Two sets with same element not equal") ++ } ++} ++ ++func TestCopy(t *testing.T) { ++ s1 := make(Set) ++ s2 := make(Set) ++ ++ s1.Add(node(1)) ++ s1.Add(node(2)) ++ s1.Add(node(3)) ++ ++ s2.Copy(s1) ++ ++ if !Equal(s1, s2) { ++ t.Fatalf("Two sets not equal after copy") ++ } ++ ++ s2.Remove(node(1)) ++ ++ if Equal(s1, s2) { ++ t.Errorf("Mutating one set mutated another after copy") ++ } ++} ++ ++func TestSelfCopy(t *testing.T) { ++ s1 := make(Set) ++ ++ s1.Add(node(1)) ++ s1.Add(node(2)) ++ ++ s1.Copy(s1) ++ ++ if s1.count() != 2 { ++ t.Error("Something strange happened when copying into self") ++ } ++} ++ ++func TestUnionSame(t *testing.T) { ++ s1 := make(Set) ++ s2 := make(Set) ++ s3 := make(Set) ++ ++ s1.Add(node(1)) ++ s1.Add(node(2)) ++ ++ s2.Add(node(1)) ++ s2.Add(node(2)) ++ ++ s3.Union(s1, s2) ++ ++ if s3.count() != 2 { ++ t.Error("Union of same sets yields set with wrong len") ++ } ++ ++ if !s3.Has(node(1)) || !s3.Has(node(2)) { ++ t.Error("Union of same sets yields wrong elements") ++ } ++ ++ for i, s := range []Set{s1, s2, s3} { ++ for e, n := range s { ++ if e != n.ID() { ++ t.Error("Element ID did not match key in s%d: %d != %d", i+1, e, n.ID()) ++ } ++ } ++ } ++} ++ ++func TestUnionDiff(t *testing.T) { ++ s1 := make(Set) ++ s2 := make(Set) ++ s3 := make(Set) ++ ++ s1.Add(node(1)) ++ s1.Add(node(2)) ++ ++ s2.Add(node(3)) ++ ++ s3.Union(s1, s2) ++ ++ if s3.count() != 3 { ++ t.Error("Union of different sets yields set with wrong len") ++ } ++ ++ if !s3.Has(node(1)) || !s3.Has(node(2)) || !s3.Has(node(3)) { ++ t.Error("Union of different sets yields set with wrong elements") ++ } ++ ++ if s1.Has(node(3)) || !s1.Has(node(2)) || !s1.Has(node(1)) || s1.count() != 2 { ++ t.Error("Union of sets mutates non-destination set (argument 1)") ++ } ++ ++ if !s2.Has(node(3)) || s2.Has(node(1)) || s2.Has(node(2)) || s2.count() != 1 { ++ t.Error("Union of sets mutates non-destination set (argument 2)") ++ } ++ ++ for i, s := range []Set{s1, s2, s3} { ++ for e, n := range s { ++ if e != n.ID() { ++ t.Error("Element ID did not match key in s%d: %d != %d", i+1, e, n.ID()) ++ } ++ } ++ } ++} ++ ++func TestUnionOverlapping(t *testing.T) { ++ s1 := make(Set) ++ s2 := make(Set) ++ s3 := make(Set) ++ ++ s1.Add(node(1)) ++ s1.Add(node(2)) ++ ++ s2.Add(node(2)) ++ s2.Add(node(3)) ++ ++ s3.Union(s1, s2) ++ ++ if s3.count() != 3 { ++ t.Error("Union of overlapping sets yields set with wrong len") ++ } ++ ++ if !s3.Has(node(1)) || !s3.Has(node(2)) || !s3.Has(node(3)) { ++ t.Error("Union of overlapping sets yields set with wrong elements") ++ } ++ ++ if s1.Has(node(3)) || !s1.Has(node(2)) || !s1.Has(node(1)) || s1.count() != 2 { ++ t.Error("Union of sets mutates non-destination set (argument 1)") ++ } ++ ++ if !s2.Has(node(3)) || s2.Has(node(1)) || !s2.Has(node(2)) || s2.count() != 2 { ++ t.Error("Union of sets mutates non-destination set (argument 2)") ++ } ++ ++ for i, s := range []Set{s1, s2, s3} { ++ for e, n := range s { ++ if e != n.ID() { ++ t.Error("Element ID did not match key in s%d: %d != %d", i+1, e, n.ID()) ++ } ++ } ++ } ++} ++ ++func TestIntersectSame(t *testing.T) { ++ s1 := make(Set) ++ s2 := make(Set) ++ s3 := make(Set) ++ ++ s1.Add(node(2)) ++ s1.Add(node(3)) ++ ++ s2.Add(node(2)) ++ s2.Add(node(3)) ++ ++ s3.Intersect(s1, s2) ++ ++ if card := s3.count(); card != 2 { ++ t.Errorf("Intersection of identical sets yields set of wrong len %d", card) ++ } ++ ++ if !s3.Has(node(2)) || !s3.Has(node(3)) { ++ t.Error("Intersection of identical sets yields set of wrong elements") ++ } ++ ++ for i, s := range []Set{s1, s2, s3} { ++ for e, n := range s { ++ if e != n.ID() { ++ t.Error("Element ID did not match key in s%d: %d != %d", i+1, e, n.ID()) ++ } ++ } ++ } ++} ++ ++func TestIntersectDiff(t *testing.T) { ++ s1 := make(Set) ++ s2 := make(Set) ++ s3 := make(Set) ++ ++ s1.Add(node(2)) ++ s1.Add(node(3)) ++ ++ s2.Add(node(1)) ++ s2.Add(node(4)) ++ ++ s3.Intersect(s1, s2) ++ ++ if card := s3.count(); card != 0 { ++ t.Errorf("Intersection of different yields non-empty set %d", card) ++ } ++ ++ if !s1.Has(node(2)) || !s1.Has(node(3)) || s1.Has(node(1)) || s1.Has(node(4)) || s1.count() != 2 { ++ t.Error("Intersection of sets mutates non-destination set (argument 1)") ++ } ++ ++ if s2.Has(node(2)) || s2.Has(node(3)) || !s2.Has(node(1)) || !s2.Has(node(4)) || s2.count() != 2 { ++ t.Error("Intersection of sets mutates non-destination set (argument 1)") ++ } ++ ++ for i, s := range []Set{s1, s2, s3} { ++ for e, n := range s { ++ if e != n.ID() { ++ t.Error("Element ID did not match key in s%d: %d != %d", i+1, e, n.ID()) ++ } ++ } ++ } ++} ++ ++func TestIntersectOverlapping(t *testing.T) { ++ s1 := make(Set) ++ s2 := make(Set) ++ s3 := make(Set) ++ ++ s1.Add(node(2)) ++ s1.Add(node(3)) ++ ++ s2.Add(node(3)) ++ s2.Add(node(4)) ++ ++ s3.Intersect(s1, s2) ++ ++ if card := s3.count(); card != 1 { ++ t.Errorf("Intersection of overlapping sets yields set of incorrect len %d", card) ++ } ++ ++ if !s3.Has(node(3)) { ++ t.Errorf("Intersection of overlapping sets yields set with wrong element") ++ } ++ ++ if !s1.Has(node(2)) || !s1.Has(node(3)) || s1.Has(node(4)) || s1.count() != 2 { ++ t.Error("Intersection of sets mutates non-destination set (argument 1)") ++ } ++ ++ if s2.Has(node(2)) || !s2.Has(node(3)) || !s2.Has(node(4)) || s2.count() != 2 { ++ t.Error("Intersection of sets mutates non-destination set (argument 1)") ++ } ++ ++ for i, s := range []Set{s1, s2, s3} { ++ for e, n := range s { ++ if e != n.ID() { ++ t.Error("Element ID did not match key in s%d: %d != %d", i+1, e, n.ID()) ++ } ++ } ++ } ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/inteernal/sort.go b/Godeps/_workspace/src/github.com/gonum/graph/inteernal/sort.go +new file mode 100644 +index 0000000..3bfee0f +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/graph/inteernal/sort.go +@@ -0,0 +1,28 @@ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package internal ++ ++// BySliceValues implements the sort.Interface sorting a slice of ++// []int lexically by the values of the []int. ++type BySliceValues [][]int ++ ++func (c BySliceValues) Len() int { return len(c) } ++func (c BySliceValues) Less(i, j int) bool { ++ a, b := c[i], c[j] ++ l := len(a) ++ if len(b) < l { ++ l = len(b) ++ } ++ for k, v := range a[:l] { ++ if v < b[k] { ++ return true ++ } ++ if v > b[k] { ++ return false ++ } ++ } ++ return len(a) < len(b) ++} ++func (c BySliceValues) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/internal/linear.go b/Godeps/_workspace/src/github.com/gonum/graph/internal/linear.go +deleted file mode 100644 +index 3d64de9..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/graph/internal/linear.go ++++ /dev/null +@@ -1,73 +0,0 @@ +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package internal +- +-import ( +- "github.com/gonum/graph" +-) +- +-// NodeStack implements a LIFO stack of graph.Node. +-type NodeStack []graph.Node +- +-// Len returns the number of graph.Nodes on the stack. +-func (s *NodeStack) Len() int { return len(*s) } +- +-// Pop returns the last graph.Node on the stack and removes it +-// from the stack. +-func (s *NodeStack) Pop() graph.Node { +- v := *s +- v, n := v[:len(v)-1], v[len(v)-1] +- *s = v +- return n +-} +- +-// Push adds the node n to the stack at the last position. +-func (s *NodeStack) Push(n graph.Node) { *s = append(*s, n) } +- +-// NodeQueue implements a FIFO queue. +-type NodeQueue struct { +- head int +- data []graph.Node +-} +- +-// Len returns the number of graph.Nodes in the queue. +-func (q *NodeQueue) Len() int { return len(q.data) - q.head } +- +-// Enqueue adds the node n to the back of the queue. +-func (q *NodeQueue) Enqueue(n graph.Node) { +- if len(q.data) == cap(q.data) && q.head > 0 { +- l := q.Len() +- copy(q.data, q.data[q.head:]) +- q.head = 0 +- q.data = append(q.data[:l], n) +- } else { +- q.data = append(q.data, n) +- } +-} +- +-// Dequeue returns the graph.Node at the front of the queue and +-// removes it from the queue. +-func (q *NodeQueue) Dequeue() graph.Node { +- if q.Len() == 0 { +- panic("queue: empty queue") +- } +- +- var n graph.Node +- n, q.data[q.head] = q.data[q.head], nil +- q.head++ +- +- if q.Len() == 0 { +- q.head = 0 +- q.data = q.data[:0] +- } +- +- return n +-} +- +-// Reset clears the queue for reuse. +-func (q *NodeQueue) Reset() { +- q.head = 0 +- q.data = q.data[:0] +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/internal/set.go b/Godeps/_workspace/src/github.com/gonum/graph/internal/set.go +deleted file mode 100644 +index 3ad1bc8..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/graph/internal/set.go ++++ /dev/null +@@ -1,211 +0,0 @@ +-// Copyright ©2014 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package internal +- +-import ( +- "unsafe" +- +- "github.com/gonum/graph" +-) +- +-// IntSet is a set of integer identifiers. +-type IntSet map[int]struct{} +- +-// The simple accessor methods for Set are provided to allow ease of +-// implementation change should the need arise. +- +-// Add inserts an element into the set. +-func (s IntSet) Add(e int) { +- s[e] = struct{}{} +-} +- +-// Has reports the existence of the element in the set. +-func (s IntSet) Has(e int) bool { +- _, ok := s[e] +- return ok +-} +- +-// Remove deletes the specified element from the set. +-func (s IntSet) Remove(e int) { +- delete(s, e) +-} +- +-// Count reports the number of elements stored in the set. +-func (s IntSet) Count() int { +- return len(s) +-} +- +-// Same determines whether two sets are backed by the same store. In the +-// current implementation using hash maps it makes use of the fact that +-// hash maps (at least in the gc implementation) are passed as a pointer +-// to a runtime Hmap struct. +-// +-// A map is not seen by the runtime as a pointer though, so we cannot +-// directly compare the sets converted to unsafe.Pointer and need to take +-// the sets' addressed and dereference them as pointers to some comparable +-// type. +-func Same(s1, s2 Set) bool { +- return *(*uintptr)(unsafe.Pointer(&s1)) == *(*uintptr)(unsafe.Pointer(&s2)) +-} +- +-// A set is a set of nodes keyed in their integer identifiers. +-type Set map[int]graph.Node +- +-// The simple accessor methods for Set are provided to allow ease of +-// implementation change should the need arise. +- +-// Add inserts an element into the set. +-func (s Set) Add(n graph.Node) { +- s[n.ID()] = n +-} +- +-// Remove deletes the specified element from the set. +-func (s Set) Remove(e graph.Node) { +- delete(s, e.ID()) +-} +- +-// Has reports the existence of the element in the set. +-func (s Set) Has(n graph.Node) bool { +- _, ok := s[n.ID()] +- return ok +-} +- +-// Clear returns an empty set, possibly using the same backing store. +-// Clear is not provided as a method since there is no way to replace +-// the calling value if clearing is performed by a make(set). Clear +-// should never be called without keeping the returned value. +-func Clear(s Set) Set { +- if len(s) == 0 { +- return s +- } +- +- return make(Set) +-} +- +-// Copy performs a perfect copy from s1 to dst (meaning the sets will +-// be equal). +-func (dst Set) Copy(src Set) Set { +- if Same(src, dst) { +- return dst +- } +- +- if len(dst) > 0 { +- dst = make(Set, len(src)) +- } +- +- for e, n := range src { +- dst[e] = n +- } +- +- return dst +-} +- +-// Equal reports set equality between the parameters. Sets are equal if +-// and only if they have the same elements. +-func Equal(s1, s2 Set) bool { +- if Same(s1, s2) { +- return true +- } +- +- if len(s1) != len(s2) { +- return false +- } +- +- for e := range s1 { +- if _, ok := s2[e]; !ok { +- return false +- } +- } +- +- return true +-} +- +-// Union takes the union of s1 and s2, and stores it in dst. +-// +-// The union of two sets, s1 and s2, is the set containing all the +-// elements of each, for instance: +-// +-// {a,b,c} UNION {d,e,f} = {a,b,c,d,e,f} +-// +-// Since sets may not have repetition, unions of two sets that overlap +-// do not contain repeat elements, that is: +-// +-// {a,b,c} UNION {b,c,d} = {a,b,c,d} +-// +-func (dst Set) Union(s1, s2 Set) Set { +- if Same(s1, s2) { +- return dst.Copy(s1) +- } +- +- if !Same(s1, dst) && !Same(s2, dst) { +- dst = Clear(dst) +- } +- +- if !Same(dst, s1) { +- for e, n := range s1 { +- dst[e] = n +- } +- } +- +- if !Same(dst, s2) { +- for e, n := range s2 { +- dst[e] = n +- } +- } +- +- return dst +-} +- +-// Intersect takes the intersection of s1 and s2, and stores it in dst. +-// +-// The intersection of two sets, s1 and s2, is the set containing all +-// the elements shared between the two sets, for instance: +-// +-// {a,b,c} INTERSECT {b,c,d} = {b,c} +-// +-// The intersection between a set and itself is itself, and thus +-// effectively a copy operation: +-// +-// {a,b,c} INTERSECT {a,b,c} = {a,b,c} +-// +-// The intersection between two sets that share no elements is the empty +-// set: +-// +-// {a,b,c} INTERSECT {d,e,f} = {} +-// +-func (dst Set) Intersect(s1, s2 Set) Set { +- var swap Set +- +- if Same(s1, s2) { +- return dst.Copy(s1) +- } +- if Same(s1, dst) { +- swap = s2 +- } else if Same(s2, dst) { +- swap = s1 +- } else { +- dst = Clear(dst) +- +- if len(s1) > len(s2) { +- s1, s2 = s2, s1 +- } +- +- for e, n := range s1 { +- if _, ok := s2[e]; ok { +- dst[e] = n +- } +- } +- +- return dst +- } +- +- for e := range dst { +- if _, ok := swap[e]; !ok { +- delete(dst, e) +- } +- } +- +- return dst +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/internal/set_test.go b/Godeps/_workspace/src/github.com/gonum/graph/internal/set_test.go +deleted file mode 100644 +index fb39620..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/graph/internal/set_test.go ++++ /dev/null +@@ -1,413 +0,0 @@ +-// Copyright ©2014 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package internal +- +-import "testing" +- +-type node int +- +-func (n node) ID() int { return int(n) } +- +-// count reports the number of elements stored in the set. +-func (s Set) count() int { +- return len(s) +-} +- +-// TestSame tests the assumption that pointer equality via unsafe conversion +-// of a map[int]struct{} to uintptr is a valid test for perfect identity between +-// set values. If any of the tests in TestSame fail, the package is broken and same +-// must be reimplemented to conform to the runtime map implementation. The relevant +-// code to look at (at least for gc) is in runtime/hashmap.{h,goc}. +-func TestSame(t *testing.T) { +- var ( +- a = make(Set) +- b = make(Set) +- c = a +- ) +- +- if Same(a, b) { +- t.Error("Independently created sets test as same") +- } +- if !Same(a, c) { +- t.Error("Set copy and original test as not same.") +- } +- a.Add(node(1)) +- if !Same(a, c) { +- t.Error("Set copy and original test as not same after addition.") +- } +- if !Same(nil, nil) { +- t.Error("nil sets test as not same.") +- } +- if Same(b, nil) { +- t.Error("nil and empty sets test as same.") +- } +-} +- +-func TestAdd(t *testing.T) { +- s := make(Set) +- if s == nil { +- t.Fatal("Set cannot be created successfully") +- } +- +- if s.count() != 0 { +- t.Error("Set somehow contains new elements upon creation") +- } +- +- s.Add(node(1)) +- s.Add(node(3)) +- s.Add(node(5)) +- +- if s.count() != 3 { +- t.Error("Incorrect number of set elements after adding") +- } +- +- if !s.Has(node(1)) || !s.Has(node(3)) || !s.Has(node(5)) { +- t.Error("Set doesn't contain element that was added") +- } +- +- s.Add(node(1)) +- +- if s.count() > 3 { +- t.Error("Set double-adds element (element not unique)") +- } else if s.count() < 3 { +- t.Error("Set double-add lowered len") +- } +- +- if !s.Has(node(1)) { +- t.Error("Set doesn't contain double-added element") +- } +- +- if !s.Has(node(3)) || !s.Has(node(5)) { +- t.Error("Set removes element on double-add") +- } +- +- for e, n := range s { +- if e != n.ID() { +- t.Error("Element ID did not match key: %d != %d", e, n.ID()) +- } +- } +-} +- +-func TestRemove(t *testing.T) { +- s := make(Set) +- +- s.Add(node(1)) +- s.Add(node(3)) +- s.Add(node(5)) +- +- s.Remove(node(1)) +- +- if s.count() != 2 { +- t.Error("Incorrect number of set elements after removing an element") +- } +- +- if s.Has(node(1)) { +- t.Error("Element present after removal") +- } +- +- if !s.Has(node(3)) || !s.Has(node(5)) { +- t.Error("Set remove removed wrong element") +- } +- +- s.Remove(node(1)) +- +- if s.count() != 2 || s.Has(node(1)) { +- t.Error("Double set remove does something strange") +- } +- +- s.Add(node(1)) +- +- if s.count() != 3 || !s.Has(node(1)) { +- t.Error("Cannot add element after removal") +- } +-} +- +-func TestClear(t *testing.T) { +- s := make(Set) +- +- s.Add(node(8)) +- s.Add(node(9)) +- s.Add(node(10)) +- +- s = Clear(s) +- +- if s.count() != 0 { +- t.Error("Clear did not properly reset set to size 0") +- } +-} +- +-func TestSelfEqual(t *testing.T) { +- s := make(Set) +- +- if !Equal(s, s) { +- t.Error("Set is not equal to itself") +- } +- +- s.Add(node(1)) +- +- if !Equal(s, s) { +- t.Error("Set ceases self equality after adding element") +- } +-} +- +-func TestEqual(t *testing.T) { +- s1 := make(Set) +- s2 := make(Set) +- +- if !Equal(s1, s2) { +- t.Error("Two different empty sets not equal") +- } +- +- s1.Add(node(1)) +- if Equal(s1, s2) { +- t.Error("Two different sets with different elements not equal") +- } +- +- s2.Add(node(1)) +- if !Equal(s1, s2) { +- t.Error("Two sets with same element not equal") +- } +-} +- +-func TestCopy(t *testing.T) { +- s1 := make(Set) +- s2 := make(Set) +- +- s1.Add(node(1)) +- s1.Add(node(2)) +- s1.Add(node(3)) +- +- s2.Copy(s1) +- +- if !Equal(s1, s2) { +- t.Fatalf("Two sets not equal after copy") +- } +- +- s2.Remove(node(1)) +- +- if Equal(s1, s2) { +- t.Errorf("Mutating one set mutated another after copy") +- } +-} +- +-func TestSelfCopy(t *testing.T) { +- s1 := make(Set) +- +- s1.Add(node(1)) +- s1.Add(node(2)) +- +- s1.Copy(s1) +- +- if s1.count() != 2 { +- t.Error("Something strange happened when copying into self") +- } +-} +- +-func TestUnionSame(t *testing.T) { +- s1 := make(Set) +- s2 := make(Set) +- s3 := make(Set) +- +- s1.Add(node(1)) +- s1.Add(node(2)) +- +- s2.Add(node(1)) +- s2.Add(node(2)) +- +- s3.Union(s1, s2) +- +- if s3.count() != 2 { +- t.Error("Union of same sets yields set with wrong len") +- } +- +- if !s3.Has(node(1)) || !s3.Has(node(2)) { +- t.Error("Union of same sets yields wrong elements") +- } +- +- for i, s := range []Set{s1, s2, s3} { +- for e, n := range s { +- if e != n.ID() { +- t.Error("Element ID did not match key in s%d: %d != %d", i+1, e, n.ID()) +- } +- } +- } +-} +- +-func TestUnionDiff(t *testing.T) { +- s1 := make(Set) +- s2 := make(Set) +- s3 := make(Set) +- +- s1.Add(node(1)) +- s1.Add(node(2)) +- +- s2.Add(node(3)) +- +- s3.Union(s1, s2) +- +- if s3.count() != 3 { +- t.Error("Union of different sets yields set with wrong len") +- } +- +- if !s3.Has(node(1)) || !s3.Has(node(2)) || !s3.Has(node(3)) { +- t.Error("Union of different sets yields set with wrong elements") +- } +- +- if s1.Has(node(3)) || !s1.Has(node(2)) || !s1.Has(node(1)) || s1.count() != 2 { +- t.Error("Union of sets mutates non-destination set (argument 1)") +- } +- +- if !s2.Has(node(3)) || s2.Has(node(1)) || s2.Has(node(2)) || s2.count() != 1 { +- t.Error("Union of sets mutates non-destination set (argument 2)") +- } +- +- for i, s := range []Set{s1, s2, s3} { +- for e, n := range s { +- if e != n.ID() { +- t.Error("Element ID did not match key in s%d: %d != %d", i+1, e, n.ID()) +- } +- } +- } +-} +- +-func TestUnionOverlapping(t *testing.T) { +- s1 := make(Set) +- s2 := make(Set) +- s3 := make(Set) +- +- s1.Add(node(1)) +- s1.Add(node(2)) +- +- s2.Add(node(2)) +- s2.Add(node(3)) +- +- s3.Union(s1, s2) +- +- if s3.count() != 3 { +- t.Error("Union of overlapping sets yields set with wrong len") +- } +- +- if !s3.Has(node(1)) || !s3.Has(node(2)) || !s3.Has(node(3)) { +- t.Error("Union of overlapping sets yields set with wrong elements") +- } +- +- if s1.Has(node(3)) || !s1.Has(node(2)) || !s1.Has(node(1)) || s1.count() != 2 { +- t.Error("Union of sets mutates non-destination set (argument 1)") +- } +- +- if !s2.Has(node(3)) || s2.Has(node(1)) || !s2.Has(node(2)) || s2.count() != 2 { +- t.Error("Union of sets mutates non-destination set (argument 2)") +- } +- +- for i, s := range []Set{s1, s2, s3} { +- for e, n := range s { +- if e != n.ID() { +- t.Error("Element ID did not match key in s%d: %d != %d", i+1, e, n.ID()) +- } +- } +- } +-} +- +-func TestIntersectSame(t *testing.T) { +- s1 := make(Set) +- s2 := make(Set) +- s3 := make(Set) +- +- s1.Add(node(2)) +- s1.Add(node(3)) +- +- s2.Add(node(2)) +- s2.Add(node(3)) +- +- s3.Intersect(s1, s2) +- +- if card := s3.count(); card != 2 { +- t.Errorf("Intersection of identical sets yields set of wrong len %d", card) +- } +- +- if !s3.Has(node(2)) || !s3.Has(node(3)) { +- t.Error("Intersection of identical sets yields set of wrong elements") +- } +- +- for i, s := range []Set{s1, s2, s3} { +- for e, n := range s { +- if e != n.ID() { +- t.Error("Element ID did not match key in s%d: %d != %d", i+1, e, n.ID()) +- } +- } +- } +-} +- +-func TestIntersectDiff(t *testing.T) { +- s1 := make(Set) +- s2 := make(Set) +- s3 := make(Set) +- +- s1.Add(node(2)) +- s1.Add(node(3)) +- +- s2.Add(node(1)) +- s2.Add(node(4)) +- +- s3.Intersect(s1, s2) +- +- if card := s3.count(); card != 0 { +- t.Errorf("Intersection of different yields non-empty set %d", card) +- } +- +- if !s1.Has(node(2)) || !s1.Has(node(3)) || s1.Has(node(1)) || s1.Has(node(4)) || s1.count() != 2 { +- t.Error("Intersection of sets mutates non-destination set (argument 1)") +- } +- +- if s2.Has(node(2)) || s2.Has(node(3)) || !s2.Has(node(1)) || !s2.Has(node(4)) || s2.count() != 2 { +- t.Error("Intersection of sets mutates non-destination set (argument 1)") +- } +- +- for i, s := range []Set{s1, s2, s3} { +- for e, n := range s { +- if e != n.ID() { +- t.Error("Element ID did not match key in s%d: %d != %d", i+1, e, n.ID()) +- } +- } +- } +-} +- +-func TestIntersectOverlapping(t *testing.T) { +- s1 := make(Set) +- s2 := make(Set) +- s3 := make(Set) +- +- s1.Add(node(2)) +- s1.Add(node(3)) +- +- s2.Add(node(3)) +- s2.Add(node(4)) +- +- s3.Intersect(s1, s2) +- +- if card := s3.count(); card != 1 { +- t.Errorf("Intersection of overlapping sets yields set of incorrect len %d", card) +- } +- +- if !s3.Has(node(3)) { +- t.Errorf("Intersection of overlapping sets yields set with wrong element") +- } +- +- if !s1.Has(node(2)) || !s1.Has(node(3)) || s1.Has(node(4)) || s1.count() != 2 { +- t.Error("Intersection of sets mutates non-destination set (argument 1)") +- } +- +- if s2.Has(node(2)) || !s2.Has(node(3)) || !s2.Has(node(4)) || s2.count() != 2 { +- t.Error("Intersection of sets mutates non-destination set (argument 1)") +- } +- +- for i, s := range []Set{s1, s2, s3} { +- for e, n := range s { +- if e != n.ID() { +- t.Error("Element ID did not match key in s%d: %d != %d", i+1, e, n.ID()) +- } +- } +- } +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/internal/sort.go b/Godeps/_workspace/src/github.com/gonum/graph/internal/sort.go +deleted file mode 100644 +index 3bfee0f..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/graph/internal/sort.go ++++ /dev/null +@@ -1,28 +0,0 @@ +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package internal +- +-// BySliceValues implements the sort.Interface sorting a slice of +-// []int lexically by the values of the []int. +-type BySliceValues [][]int +- +-func (c BySliceValues) Len() int { return len(c) } +-func (c BySliceValues) Less(i, j int) bool { +- a, b := c[i], c[j] +- l := len(a) +- if len(b) < l { +- l = len(b) +- } +- for k, v := range a[:l] { +- if v < b[k] { +- return true +- } +- if v > b[k] { +- return false +- } +- } +- return len(a) < len(b) +-} +-func (c BySliceValues) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/network/betweenness.go b/Godeps/_workspace/src/github.com/gonum/graph/network/betweenness.go +index ad16732..dbe216e 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/network/betweenness.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/network/betweenness.go +@@ -8,7 +8,7 @@ import ( + "math" + + "github.com/gonum/graph" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + "github.com/gonum/graph/path" + ) + +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/path/a_star.go b/Godeps/_workspace/src/github.com/gonum/graph/path/a_star.go +index b41d194..d742cff 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/path/a_star.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/path/a_star.go +@@ -8,7 +8,7 @@ import ( + "container/heap" + + "github.com/gonum/graph" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + ) + + // Heuristic returns an estimate of the cost of travelling between two nodes. +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/path/control_flow.go b/Godeps/_workspace/src/github.com/gonum/graph/path/control_flow.go +index 219226d..9bebf2c 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/path/control_flow.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/path/control_flow.go +@@ -6,7 +6,7 @@ package path + + import ( + "github.com/gonum/graph" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + ) + + // PostDominatores returns all dominators for all nodes in g. It does not +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/path/dijkstra_test.go b/Godeps/_workspace/src/github.com/gonum/graph/path/dijkstra_test.go +index c22d28f..0526ba9 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/path/dijkstra_test.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/path/dijkstra_test.go +@@ -11,7 +11,7 @@ import ( + "testing" + + "github.com/gonum/graph" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + "github.com/gonum/graph/path" + ) + +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/path/floydwarshall_test.go b/Godeps/_workspace/src/github.com/gonum/graph/path/floydwarshall_test.go +index ea78e79..d23bb6a 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/path/floydwarshall_test.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/path/floydwarshall_test.go +@@ -11,7 +11,7 @@ import ( + "testing" + + "github.com/gonum/graph" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + "github.com/gonum/graph/path" + ) + +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/path/johnson_apsp_test.go b/Godeps/_workspace/src/github.com/gonum/graph/path/johnson_apsp_test.go +index 814000f..588581d 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/path/johnson_apsp_test.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/path/johnson_apsp_test.go +@@ -11,7 +11,7 @@ import ( + "testing" + + "github.com/gonum/graph" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + "github.com/gonum/graph/path" + ) + +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/path/spanning_tree.go b/Godeps/_workspace/src/github.com/gonum/graph/path/spanning_tree.go +index 99b30cb..2e3a501 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/path/spanning_tree.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/path/spanning_tree.go +@@ -9,7 +9,7 @@ import ( + + "github.com/gonum/graph" + "github.com/gonum/graph/concrete" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + ) + + // EdgeListerGraph is an undirected graph than returns its complete set of edges. +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/topo/bron_kerbosch.go b/Godeps/_workspace/src/github.com/gonum/graph/topo/bron_kerbosch.go +index 5e30d5b..a012b9b 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/topo/bron_kerbosch.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/topo/bron_kerbosch.go +@@ -6,7 +6,7 @@ package topo + + import ( + "github.com/gonum/graph" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + ) + + // VertexOrdering returns the vertex ordering and the k-cores of +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/topo/bron_kerbosch_test.go b/Godeps/_workspace/src/github.com/gonum/graph/topo/bron_kerbosch_test.go +index 3d22c36..73e303c 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/topo/bron_kerbosch_test.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/topo/bron_kerbosch_test.go +@@ -10,7 +10,7 @@ import ( + "testing" + + "github.com/gonum/graph/concrete" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + "github.com/gonum/graph/topo" + ) + +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/topo/johnson_cycles.go b/Godeps/_workspace/src/github.com/gonum/graph/topo/johnson_cycles.go +index 36d4cbd..ce9318e 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/topo/johnson_cycles.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/topo/johnson_cycles.go +@@ -8,7 +8,7 @@ import ( + "sort" + + "github.com/gonum/graph" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + ) + + // johnson implements Johnson's "Finding all the elementary +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/topo/johnson_cycles_test.go b/Godeps/_workspace/src/github.com/gonum/graph/topo/johnson_cycles_test.go +index a1ee6e1..149fff2 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/topo/johnson_cycles_test.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/topo/johnson_cycles_test.go +@@ -10,7 +10,7 @@ import ( + "testing" + + "github.com/gonum/graph/concrete" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + "github.com/gonum/graph/topo" + ) + +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/topo/tarjan.go b/Godeps/_workspace/src/github.com/gonum/graph/topo/tarjan.go +index 908358c..f779bbb 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/topo/tarjan.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/topo/tarjan.go +@@ -9,7 +9,7 @@ import ( + "sort" + + "github.com/gonum/graph" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + ) + + // Unorderable is an error containing sets of unorderable graph.Nodes. +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/topo/tarjan_test.go b/Godeps/_workspace/src/github.com/gonum/graph/topo/tarjan_test.go +index 504633f..63c1a82 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/topo/tarjan_test.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/topo/tarjan_test.go +@@ -10,7 +10,7 @@ import ( + "testing" + + "github.com/gonum/graph/concrete" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + "github.com/gonum/graph/topo" + ) + +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/topo/topo_test.go b/Godeps/_workspace/src/github.com/gonum/graph/topo/topo_test.go +index d903ee1..d71b788 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/topo/topo_test.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/topo/topo_test.go +@@ -11,7 +11,7 @@ import ( + + "github.com/gonum/graph" + "github.com/gonum/graph/concrete" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + "github.com/gonum/graph/topo" + ) + +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/traverse/traverse.go b/Godeps/_workspace/src/github.com/gonum/graph/traverse/traverse.go +index bb0fdad..a7eb54d 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/traverse/traverse.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/traverse/traverse.go +@@ -7,7 +7,7 @@ package traverse + + import ( + "github.com/gonum/graph" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + ) + + // BreadthFirst implements stateful breadth-first graph traversal. +diff --git a/Godeps/_workspace/src/github.com/gonum/graph/traverse/traverse_test.go b/Godeps/_workspace/src/github.com/gonum/graph/traverse/traverse_test.go +index c5a4e2a..b771b46 100644 +--- a/Godeps/_workspace/src/github.com/gonum/graph/traverse/traverse_test.go ++++ b/Godeps/_workspace/src/github.com/gonum/graph/traverse/traverse_test.go +@@ -12,7 +12,7 @@ import ( + + "github.com/gonum/graph" + "github.com/gonum/graph/concrete" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + "github.com/gonum/graph/traverse" + ) + +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/caxpy.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/caxpy.go +new file mode 100644 +index 0000000..80d802a +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/caxpy.go +@@ -0,0 +1,22 @@ ++// Generated code do not edit. Run `go generate`. ++ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package asm ++ ++// The extra z parameter is needed because of floats.AddScaledTo ++func CaxpyUnitary(alpha complex64, x, y, z []complex64) { ++ for i, v := range x { ++ z[i] = alpha*v + y[i] ++ } ++} ++ ++func CaxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) { ++ for i := 0; i < int(n); i++ { ++ y[iy] += alpha * x[ix] ++ ix += incX ++ iy += incY ++ } ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/cdotc.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/cdotc.go +new file mode 100644 +index 0000000..ed999e5 +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/cdotc.go +@@ -0,0 +1,23 @@ ++// Generated code do not edit. Run `go generate`. ++ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package asm ++ ++func CdotcUnitary(x, y []complex64) (sum complex64) { ++ for i, v := range x { ++ sum += y[i] * conj(v) ++ } ++ return ++} ++ ++func CdotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { ++ for i := 0; i < int(n); i++ { ++ sum += y[iy] * conj(x[ix]) ++ ix += incX ++ iy += incY ++ } ++ return ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/cdotu.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/cdotu.go +new file mode 100644 +index 0000000..3392ee2 +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/cdotu.go +@@ -0,0 +1,23 @@ ++// Generated code do not edit. Run `go generate`. ++ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package asm ++ ++func CdotuUnitary(x, y []complex64) (sum complex64) { ++ for i, v := range x { ++ sum += y[i] * v ++ } ++ return ++} ++ ++func CdotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { ++ for i := 0; i < int(n); i++ { ++ sum += y[iy] * x[ix] ++ ix += incX ++ iy += incY ++ } ++ return ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/complex b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/complex +new file mode 100644 +index 0000000..b26e4e6 +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/complex +@@ -0,0 +1,58 @@ ++#!/usr/bin/env bash ++ ++# Copyright ©2015 The gonum Authors. All rights reserved. ++# Use of this source code is governed by a BSD-style ++# license that can be found in the LICENSE file. ++ ++echo Generating zdotu.go ++echo -e '// Generated code do not edit. Run `go generate`.\n' > zdotu.go ++cat ddot.go \ ++| grep -v '//+build' \ ++| gofmt -r 'float64 -> complex128' \ ++| sed 's/Ddot/Zdotu/' \ ++>> zdotu.go ++ ++echo Generating zdotc.go ++echo -e '// Generated code do not edit. Run `go generate`.\n' > zdotc.go ++cat ddot.go \ ++| grep -v '//+build' \ ++| gofmt -r 'float64 -> complex128' \ ++| gofmt -r 'y[i] * v -> y[i] * cmplx.Conj(v)' \ ++| sed 's/Ddot/Zdotc/' \ ++| goimports \ ++>> zdotc.go ++ ++echo Generating zaxpy.go ++echo -e '// Generated code do not edit. Run `go generate`.\n' > zaxpy.go ++cat daxpy.go \ ++| grep -v '//+build' \ ++| gofmt -r 'float64 -> complex128' \ ++| sed 's/Daxpy/Zaxpy/' \ ++>> zaxpy.go ++ ++echo Generating cdotu.go ++echo -e '// Generated code do not edit. Run `go generate`.\n' > cdotu.go ++cat ddot.go \ ++| grep -v '//+build' \ ++| gofmt -r 'float64 -> complex64' \ ++| sed 's/Ddot/Cdotu/' \ ++>> cdotu.go ++ ++echo Generating cdotc.go ++echo -e '// Generated code do not edit. Run `go generate`.\n' > cdotc.go ++cat ddot.go \ ++| grep -v '//+build' \ ++| gofmt -r 'float64 -> complex64' \ ++| gofmt -r 'y[i] * v -> y[i] * conj(v)' \ ++| sed 's/Ddot/Cdotc/' \ ++| goimports \ ++>> cdotc.go ++ ++echo Generating caxpy.go ++echo -e '// Generated code do not edit. Run `go generate`.\n' > caxpy.go ++cat daxpy.go \ ++| grep -v '//+build' \ ++| gofmt -r 'float64 -> complex64' \ ++| sed 's/Daxpy/Caxpy/' \ ++>> caxpy.go ++ +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/conj.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/conj.go +new file mode 100644 +index 0000000..1cadb2a +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/conj.go +@@ -0,0 +1,7 @@ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package asm ++ ++func conj(c complex64) complex64 { return complex(real(c), -imag(c)) } +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/daxpy.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/daxpy.go +new file mode 100644 +index 0000000..24979fc +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/daxpy.go +@@ -0,0 +1,22 @@ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//+build !amd64 noasm ++ ++package asm ++ ++// The extra z parameter is needed because of floats.AddScaledTo ++func DaxpyUnitary(alpha float64, x, y, z []float64) { ++ for i, v := range x { ++ z[i] = alpha*v + y[i] ++ } ++} ++ ++func DaxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { ++ for i := 0; i < int(n); i++ { ++ y[iy] += alpha * x[ix] ++ ix += incX ++ iy += incY ++ } ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/daxpy_amd64.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/daxpy_amd64.go +new file mode 100644 +index 0000000..d1aeacf +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/daxpy_amd64.go +@@ -0,0 +1,12 @@ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//+build !noasm ++ ++package asm ++ ++// The extra z parameter is needed because of floats.AddScaledTo ++func DaxpyUnitary(alpha float64, x, y, z []float64) ++ ++func DaxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/daxpy_amd64.s b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/daxpy_amd64.s +new file mode 100644 +index 0000000..18f2d3c +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/daxpy_amd64.s +@@ -0,0 +1,140 @@ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++// ++// Some of the loop unrolling code is copied from: ++// http://golang.org/src/math/big/arith_amd64.s ++// which is distributed under these terms: ++// ++// Copyright (c) 2012 The Go Authors. All rights reserved. ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++//+build !noasm ++ ++// TODO(fhs): use textflag.h after we drop Go 1.3 support ++//#include "textflag.h" ++// Don't insert stack check preamble. ++#define NOSPLIT 4 ++ ++ ++// func DaxpyUnitary(alpha float64, x, y, z []float64) ++// This function assumes len(y) >= len(x). ++TEXT ·DaxpyUnitary(SB),NOSPLIT,$0 ++ MOVHPD alpha+0(FP), X7 ++ MOVLPD alpha+0(FP), X7 ++ MOVQ x_len+16(FP), DI // n = len(x) ++ MOVQ x+8(FP), R8 ++ MOVQ y+32(FP), R9 ++ MOVQ z+56(FP), R10 ++ ++ MOVQ $0, SI // i = 0 ++ SUBQ $2, DI // n -= 2 ++ JL V1 // if n < 0 goto V1 ++ ++U1: // n >= 0 ++ // y[i] += alpha * x[i] unrolled 2x. ++ MOVUPD 0(R8)(SI*8), X0 ++ MOVUPD 0(R9)(SI*8), X1 ++ MULPD X7, X0 ++ ADDPD X0, X1 ++ MOVUPD X1, 0(R10)(SI*8) ++ ++ ADDQ $2, SI // i += 2 ++ SUBQ $2, DI // n -= 2 ++ JGE U1 // if n >= 0 goto U1 ++ ++V1: ++ ADDQ $2, DI // n += 2 ++ JLE E1 // if n <= 0 goto E1 ++ ++ // y[i] += alpha * x[i] for last iteration if n is odd. ++ MOVSD 0(R8)(SI*8), X0 ++ MOVSD 0(R9)(SI*8), X1 ++ MULSD X7, X0 ++ ADDSD X0, X1 ++ MOVSD X1, 0(R10)(SI*8) ++ ++E1: ++ RET ++ ++ ++// func DaxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) ++TEXT ·DaxpyInc(SB),NOSPLIT,$0 ++ MOVHPD alpha+0(FP), X7 ++ MOVLPD alpha+0(FP), X7 ++ MOVQ x+8(FP), R8 ++ MOVQ y+32(FP), R9 ++ MOVQ n+56(FP), CX ++ MOVQ incX+64(FP), R11 ++ MOVQ incY+72(FP), R12 ++ MOVQ ix+80(FP), SI ++ MOVQ iy+88(FP), DI ++ ++ MOVQ SI, AX // nextX = ix ++ MOVQ DI, BX // nextY = iy ++ ADDQ R11, AX // nextX += incX ++ ADDQ R12, BX // nextY += incX ++ SHLQ $1, R11 // indX *= 2 ++ SHLQ $1, R12 // indY *= 2 ++ ++ SUBQ $2, CX // n -= 2 ++ JL V2 // if n < 0 goto V2 ++ ++U2: // n >= 0 ++ // y[i] += alpha * x[i] unrolled 2x. ++ MOVHPD 0(R8)(SI*8), X0 ++ MOVHPD 0(R9)(DI*8), X1 ++ MOVLPD 0(R8)(AX*8), X0 ++ MOVLPD 0(R9)(BX*8), X1 ++ ++ MULPD X7, X0 ++ ADDPD X0, X1 ++ MOVHPD X1, 0(R9)(DI*8) ++ MOVLPD X1, 0(R9)(BX*8) ++ ++ ADDQ R11, SI // ix += incX ++ ADDQ R12, DI // iy += incY ++ ADDQ R11, AX // nextX += incX ++ ADDQ R12, BX // nextY += incY ++ ++ SUBQ $2, CX // n -= 2 ++ JGE U2 // if n >= 0 goto U2 ++ ++V2: ++ ADDQ $2, CX // n += 2 ++ JLE E2 // if n <= 0 goto E2 ++ ++ // y[i] += alpha * x[i] for the last iteration if n is odd. ++ MOVSD 0(R8)(SI*8), X0 ++ MOVSD 0(R9)(DI*8), X1 ++ MULSD X7, X0 ++ ADDSD X0, X1 ++ MOVSD X1, 0(R9)(DI*8) ++ ++E2: ++ RET +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/ddot.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/ddot.go +new file mode 100644 +index 0000000..7e69957 +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/ddot.go +@@ -0,0 +1,23 @@ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//+build !amd64 noasm ++ ++package asm ++ ++func DdotUnitary(x, y []float64) (sum float64) { ++ for i, v := range x { ++ sum += y[i] * v ++ } ++ return ++} ++ ++func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) { ++ for i := 0; i < int(n); i++ { ++ sum += y[iy] * x[ix] ++ ix += incX ++ iy += incY ++ } ++ return ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/ddot_amd64.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/ddot_amd64.go +new file mode 100644 +index 0000000..7fa634a +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/ddot_amd64.go +@@ -0,0 +1,10 @@ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//+build !noasm ++ ++package asm ++ ++func DdotUnitary(x, y []float64) (sum float64) ++func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/ddot_amd64.s b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/ddot_amd64.s +new file mode 100644 +index 0000000..a898bbb +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/ddot_amd64.s +@@ -0,0 +1,140 @@ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++// ++// Some of the loop unrolling code is copied from: ++// http://golang.org/src/math/big/arith_amd64.s ++// which is distributed under these terms: ++// ++// Copyright (c) 2012 The Go Authors. All rights reserved. ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++//+build !noasm ++ ++// TODO(fhs): use textflag.h after we drop Go 1.3 support ++//#include "textflag.h" ++// Don't insert stack check preamble. ++#define NOSPLIT 4 ++ ++ ++// func DdotUnitary(x, y []float64) (sum float64) ++// This function assumes len(y) >= len(x). ++TEXT ·DdotUnitary(SB),NOSPLIT,$0 ++ MOVQ x_len+8(FP), DI // n = len(x) ++ MOVQ x+0(FP), R8 ++ MOVQ y+24(FP), R9 ++ ++ MOVQ $0, SI // i = 0 ++ MOVSD $(0.0), X7 // sum = 0 ++ ++ SUBQ $2, DI // n -= 2 ++ JL V1 // if n < 0 goto V1 ++ ++U1: // n >= 0 ++ // sum += x[i] * y[i] unrolled 2x. ++ MOVUPD 0(R8)(SI*8), X0 ++ MOVUPD 0(R9)(SI*8), X1 ++ MULPD X1, X0 ++ ADDPD X0, X7 ++ ++ ADDQ $2, SI // i += 2 ++ SUBQ $2, DI // n -= 2 ++ JGE U1 // if n >= 0 goto U1 ++ ++V1: // n > 0 ++ ADDQ $2, DI // n += 2 ++ JLE E1 // if n <= 0 goto E1 ++ ++ // sum += x[i] * y[i] for last iteration if n is odd. ++ MOVSD 0(R8)(SI*8), X0 ++ MOVSD 0(R9)(SI*8), X1 ++ MULSD X1, X0 ++ ADDSD X0, X7 ++ ++E1: ++ // Add the two sums together. ++ MOVSD X7, X0 ++ UNPCKHPD X7, X7 ++ ADDSD X0, X7 ++ MOVSD X7, sum+48(FP) // return final sum ++ RET ++ ++ ++// func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) ++TEXT ·DdotInc(SB),NOSPLIT,$0 ++ MOVQ x+0(FP), R8 ++ MOVQ y+24(FP), R9 ++ MOVQ n+48(FP), CX ++ MOVQ incX+56(FP), R11 ++ MOVQ incY+64(FP), R12 ++ MOVQ ix+72(FP), R13 ++ MOVQ iy+80(FP), R14 ++ ++ MOVSD $(0.0), X7 // sum = 0 ++ LEAQ (R8)(R13*8), SI // p = &x[ix] ++ LEAQ (R9)(R14*8), DI // q = &y[ix] ++ SHLQ $3, R11 // incX *= sizeof(float64) ++ SHLQ $3, R12 // indY *= sizeof(float64) ++ ++ SUBQ $2, CX // n -= 2 ++ JL V2 // if n < 0 goto V2 ++ ++U2: // n >= 0 ++ // sum += *p * *q unrolled 2x. ++ MOVHPD (SI), X0 ++ MOVHPD (DI), X1 ++ ADDQ R11, SI // p += incX ++ ADDQ R12, DI // q += incY ++ MOVLPD (SI), X0 ++ MOVLPD (DI), X1 ++ ADDQ R11, SI // p += incX ++ ADDQ R12, DI // q += incY ++ ++ MULPD X1, X0 ++ ADDPD X0, X7 ++ ++ SUBQ $2, CX // n -= 2 ++ JGE U2 // if n >= 0 goto U2 ++ ++V2: ++ ADDQ $2, CX // n += 2 ++ JLE E2 // if n <= 0 goto E2 ++ ++ // sum += *p * *q for the last iteration if n is odd. ++ MOVSD (SI), X0 ++ MULSD (DI), X0 ++ ADDSD X0, X7 ++ ++E2: ++ // Add the two sums together. ++ MOVSD X7, X0 ++ UNPCKHPD X7, X7 ++ ADDSD X0, X7 ++ MOVSD X7, sum+88(FP) // return final sum ++ RET ++ +\ No newline at end of file +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/dsdot.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/dsdot.go +new file mode 100644 +index 0000000..8450689 +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/dsdot.go +@@ -0,0 +1,23 @@ ++// Generated code do not edit. Run `go generate`. ++ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package asm ++ ++func DsdotUnitary(x, y []float32) (sum float64) { ++ for i, v := range x { ++ sum += float64(y[i]) * float64(v) ++ } ++ return ++} ++ ++func DsdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) { ++ for i := 0; i < int(n); i++ { ++ sum += float64(y[iy]) * float64(x[ix]) ++ ix += incX ++ iy += incY ++ } ++ return ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/generate.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/generate.go +new file mode 100644 +index 0000000..e252140 +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/generate.go +@@ -0,0 +1,8 @@ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:generate ./single_precision ++//go:generate ./complex ++ ++package asm +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/saxpy.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/saxpy.go +new file mode 100644 +index 0000000..3ef767f +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/saxpy.go +@@ -0,0 +1,22 @@ ++// Generated code do not edit. Run `go generate`. ++ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package asm ++ ++// The extra z parameter is needed because of floats.AddScaledTo ++func SaxpyUnitary(alpha float32, x, y, z []float32) { ++ for i, v := range x { ++ z[i] = alpha*v + y[i] ++ } ++} ++ ++func SaxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { ++ for i := 0; i < int(n); i++ { ++ y[iy] += alpha * x[ix] ++ ix += incX ++ iy += incY ++ } ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/sdot.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/sdot.go +new file mode 100644 +index 0000000..0cef5de +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/sdot.go +@@ -0,0 +1,23 @@ ++// Generated code do not edit. Run `go generate`. ++ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package asm ++ ++func SdotUnitary(x, y []float32) (sum float32) { ++ for i, v := range x { ++ sum += y[i] * v ++ } ++ return ++} ++ ++func SdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) { ++ for i := 0; i < int(n); i++ { ++ sum += y[iy] * x[ix] ++ ix += incX ++ iy += incY ++ } ++ return ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/single_precision b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/single_precision +new file mode 100644 +index 0000000..a937a97 +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/single_precision +@@ -0,0 +1,30 @@ ++#!/usr/bin/env bash ++ ++# Copyright ©2015 The gonum Authors. All rights reserved. ++# Use of this source code is governed by a BSD-style ++# license that can be found in the LICENSE file. ++ ++echo Generating dsdot.go ++echo -e '// Generated code do not edit. Run `go generate`.\n' > dsdot.go ++cat ddot.go \ ++| grep -v '//+build' \ ++| gofmt -r '[]float64 -> []float32' \ ++| gofmt -r 'a * b -> float64(a) * float64(b)' \ ++| sed 's/Ddot/Dsdot/' \ ++>> dsdot.go ++ ++echo Generating sdot.go ++echo -e '// Generated code do not edit. Run `go generate`.\n' > sdot.go ++cat ddot.go \ ++| grep -v '//+build' \ ++| gofmt -r 'float64 -> float32' \ ++| sed 's/Ddot/Sdot/' \ ++>> sdot.go ++ ++echo Generating saxpy.go ++echo -e '// Generated code do not edit. Run `go generate`.\n' > saxpy.go ++cat daxpy.go \ ++| grep -v '//+build' \ ++| gofmt -r 'float64 -> float32' \ ++| sed 's/Daxpy/Saxpy/' \ ++>> saxpy.go +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/zaxpy.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/zaxpy.go +new file mode 100644 +index 0000000..9478f25 +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/zaxpy.go +@@ -0,0 +1,22 @@ ++// Generated code do not edit. Run `go generate`. ++ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package asm ++ ++// The extra z parameter is needed because of floats.AddScaledTo ++func ZaxpyUnitary(alpha complex128, x, y, z []complex128) { ++ for i, v := range x { ++ z[i] = alpha*v + y[i] ++ } ++} ++ ++func ZaxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { ++ for i := 0; i < int(n); i++ { ++ y[iy] += alpha * x[ix] ++ ix += incX ++ iy += incY ++ } ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/zdotc.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/zdotc.go +new file mode 100644 +index 0000000..7b8febc +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/zdotc.go +@@ -0,0 +1,25 @@ ++// Generated code do not edit. Run `go generate`. ++ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package asm ++ ++import "math/cmplx" ++ ++func ZdotcUnitary(x, y []complex128) (sum complex128) { ++ for i, v := range x { ++ sum += y[i] * cmplx.Conj(v) ++ } ++ return ++} ++ ++func ZdotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { ++ for i := 0; i < int(n); i++ { ++ sum += y[iy] * cmplx.Conj(x[ix]) ++ ix += incX ++ iy += incY ++ } ++ return ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/inteernal/asm/zdotu.go b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/zdotu.go +new file mode 100644 +index 0000000..82c1fe2 +--- /dev/null ++++ b/Godeps/_workspace/src/github.com/gonum/inteernal/asm/zdotu.go +@@ -0,0 +1,23 @@ ++// Generated code do not edit. Run `go generate`. ++ ++// Copyright ©2015 The gonum Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package asm ++ ++func ZdotuUnitary(x, y []complex128) (sum complex128) { ++ for i, v := range x { ++ sum += y[i] * v ++ } ++ return ++} ++ ++func ZdotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { ++ for i := 0; i < int(n); i++ { ++ sum += y[iy] * x[ix] ++ ix += incX ++ iy += incY ++ } ++ return ++} +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/caxpy.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/caxpy.go +deleted file mode 100644 +index 80d802a..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/caxpy.go ++++ /dev/null +@@ -1,22 +0,0 @@ +-// Generated code do not edit. Run `go generate`. +- +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package asm +- +-// The extra z parameter is needed because of floats.AddScaledTo +-func CaxpyUnitary(alpha complex64, x, y, z []complex64) { +- for i, v := range x { +- z[i] = alpha*v + y[i] +- } +-} +- +-func CaxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) { +- for i := 0; i < int(n); i++ { +- y[iy] += alpha * x[ix] +- ix += incX +- iy += incY +- } +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/cdotc.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/cdotc.go +deleted file mode 100644 +index ed999e5..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/cdotc.go ++++ /dev/null +@@ -1,23 +0,0 @@ +-// Generated code do not edit. Run `go generate`. +- +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package asm +- +-func CdotcUnitary(x, y []complex64) (sum complex64) { +- for i, v := range x { +- sum += y[i] * conj(v) +- } +- return +-} +- +-func CdotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { +- for i := 0; i < int(n); i++ { +- sum += y[iy] * conj(x[ix]) +- ix += incX +- iy += incY +- } +- return +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/cdotu.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/cdotu.go +deleted file mode 100644 +index 3392ee2..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/cdotu.go ++++ /dev/null +@@ -1,23 +0,0 @@ +-// Generated code do not edit. Run `go generate`. +- +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package asm +- +-func CdotuUnitary(x, y []complex64) (sum complex64) { +- for i, v := range x { +- sum += y[i] * v +- } +- return +-} +- +-func CdotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { +- for i := 0; i < int(n); i++ { +- sum += y[iy] * x[ix] +- ix += incX +- iy += incY +- } +- return +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/complex b/Godeps/_workspace/src/github.com/gonum/internal/asm/complex +deleted file mode 100644 +index b26e4e6..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/complex ++++ /dev/null +@@ -1,58 +0,0 @@ +-#!/usr/bin/env bash +- +-# Copyright ©2015 The gonum Authors. All rights reserved. +-# Use of this source code is governed by a BSD-style +-# license that can be found in the LICENSE file. +- +-echo Generating zdotu.go +-echo -e '// Generated code do not edit. Run `go generate`.\n' > zdotu.go +-cat ddot.go \ +-| grep -v '//+build' \ +-| gofmt -r 'float64 -> complex128' \ +-| sed 's/Ddot/Zdotu/' \ +->> zdotu.go +- +-echo Generating zdotc.go +-echo -e '// Generated code do not edit. Run `go generate`.\n' > zdotc.go +-cat ddot.go \ +-| grep -v '//+build' \ +-| gofmt -r 'float64 -> complex128' \ +-| gofmt -r 'y[i] * v -> y[i] * cmplx.Conj(v)' \ +-| sed 's/Ddot/Zdotc/' \ +-| goimports \ +->> zdotc.go +- +-echo Generating zaxpy.go +-echo -e '// Generated code do not edit. Run `go generate`.\n' > zaxpy.go +-cat daxpy.go \ +-| grep -v '//+build' \ +-| gofmt -r 'float64 -> complex128' \ +-| sed 's/Daxpy/Zaxpy/' \ +->> zaxpy.go +- +-echo Generating cdotu.go +-echo -e '// Generated code do not edit. Run `go generate`.\n' > cdotu.go +-cat ddot.go \ +-| grep -v '//+build' \ +-| gofmt -r 'float64 -> complex64' \ +-| sed 's/Ddot/Cdotu/' \ +->> cdotu.go +- +-echo Generating cdotc.go +-echo -e '// Generated code do not edit. Run `go generate`.\n' > cdotc.go +-cat ddot.go \ +-| grep -v '//+build' \ +-| gofmt -r 'float64 -> complex64' \ +-| gofmt -r 'y[i] * v -> y[i] * conj(v)' \ +-| sed 's/Ddot/Cdotc/' \ +-| goimports \ +->> cdotc.go +- +-echo Generating caxpy.go +-echo -e '// Generated code do not edit. Run `go generate`.\n' > caxpy.go +-cat daxpy.go \ +-| grep -v '//+build' \ +-| gofmt -r 'float64 -> complex64' \ +-| sed 's/Daxpy/Caxpy/' \ +->> caxpy.go +- +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/conj.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/conj.go +deleted file mode 100644 +index 1cadb2a..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/conj.go ++++ /dev/null +@@ -1,7 +0,0 @@ +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package asm +- +-func conj(c complex64) complex64 { return complex(real(c), -imag(c)) } +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/daxpy.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/daxpy.go +deleted file mode 100644 +index 24979fc..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/daxpy.go ++++ /dev/null +@@ -1,22 +0,0 @@ +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//+build !amd64 noasm +- +-package asm +- +-// The extra z parameter is needed because of floats.AddScaledTo +-func DaxpyUnitary(alpha float64, x, y, z []float64) { +- for i, v := range x { +- z[i] = alpha*v + y[i] +- } +-} +- +-func DaxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { +- for i := 0; i < int(n); i++ { +- y[iy] += alpha * x[ix] +- ix += incX +- iy += incY +- } +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/daxpy_amd64.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/daxpy_amd64.go +deleted file mode 100644 +index d1aeacf..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/daxpy_amd64.go ++++ /dev/null +@@ -1,12 +0,0 @@ +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//+build !noasm +- +-package asm +- +-// The extra z parameter is needed because of floats.AddScaledTo +-func DaxpyUnitary(alpha float64, x, y, z []float64) +- +-func DaxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/daxpy_amd64.s b/Godeps/_workspace/src/github.com/gonum/internal/asm/daxpy_amd64.s +deleted file mode 100644 +index 18f2d3c..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/daxpy_amd64.s ++++ /dev/null +@@ -1,140 +0,0 @@ +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +-// +-// Some of the loop unrolling code is copied from: +-// http://golang.org/src/math/big/arith_amd64.s +-// which is distributed under these terms: +-// +-// Copyright (c) 2012 The Go Authors. All rights reserved. +-// +-// Redistribution and use in source and binary forms, with or without +-// modification, are permitted provided that the following conditions are +-// met: +-// +-// * Redistributions of source code must retain the above copyright +-// notice, this list of conditions and the following disclaimer. +-// * Redistributions in binary form must reproduce the above +-// copyright notice, this list of conditions and the following disclaimer +-// in the documentation and/or other materials provided with the +-// distribution. +-// * Neither the name of Google Inc. nor the names of its +-// contributors may be used to endorse or promote products derived from +-// this software without specific prior written permission. +-// +-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-//+build !noasm +- +-// TODO(fhs): use textflag.h after we drop Go 1.3 support +-//#include "textflag.h" +-// Don't insert stack check preamble. +-#define NOSPLIT 4 +- +- +-// func DaxpyUnitary(alpha float64, x, y, z []float64) +-// This function assumes len(y) >= len(x). +-TEXT ·DaxpyUnitary(SB),NOSPLIT,$0 +- MOVHPD alpha+0(FP), X7 +- MOVLPD alpha+0(FP), X7 +- MOVQ x_len+16(FP), DI // n = len(x) +- MOVQ x+8(FP), R8 +- MOVQ y+32(FP), R9 +- MOVQ z+56(FP), R10 +- +- MOVQ $0, SI // i = 0 +- SUBQ $2, DI // n -= 2 +- JL V1 // if n < 0 goto V1 +- +-U1: // n >= 0 +- // y[i] += alpha * x[i] unrolled 2x. +- MOVUPD 0(R8)(SI*8), X0 +- MOVUPD 0(R9)(SI*8), X1 +- MULPD X7, X0 +- ADDPD X0, X1 +- MOVUPD X1, 0(R10)(SI*8) +- +- ADDQ $2, SI // i += 2 +- SUBQ $2, DI // n -= 2 +- JGE U1 // if n >= 0 goto U1 +- +-V1: +- ADDQ $2, DI // n += 2 +- JLE E1 // if n <= 0 goto E1 +- +- // y[i] += alpha * x[i] for last iteration if n is odd. +- MOVSD 0(R8)(SI*8), X0 +- MOVSD 0(R9)(SI*8), X1 +- MULSD X7, X0 +- ADDSD X0, X1 +- MOVSD X1, 0(R10)(SI*8) +- +-E1: +- RET +- +- +-// func DaxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) +-TEXT ·DaxpyInc(SB),NOSPLIT,$0 +- MOVHPD alpha+0(FP), X7 +- MOVLPD alpha+0(FP), X7 +- MOVQ x+8(FP), R8 +- MOVQ y+32(FP), R9 +- MOVQ n+56(FP), CX +- MOVQ incX+64(FP), R11 +- MOVQ incY+72(FP), R12 +- MOVQ ix+80(FP), SI +- MOVQ iy+88(FP), DI +- +- MOVQ SI, AX // nextX = ix +- MOVQ DI, BX // nextY = iy +- ADDQ R11, AX // nextX += incX +- ADDQ R12, BX // nextY += incX +- SHLQ $1, R11 // indX *= 2 +- SHLQ $1, R12 // indY *= 2 +- +- SUBQ $2, CX // n -= 2 +- JL V2 // if n < 0 goto V2 +- +-U2: // n >= 0 +- // y[i] += alpha * x[i] unrolled 2x. +- MOVHPD 0(R8)(SI*8), X0 +- MOVHPD 0(R9)(DI*8), X1 +- MOVLPD 0(R8)(AX*8), X0 +- MOVLPD 0(R9)(BX*8), X1 +- +- MULPD X7, X0 +- ADDPD X0, X1 +- MOVHPD X1, 0(R9)(DI*8) +- MOVLPD X1, 0(R9)(BX*8) +- +- ADDQ R11, SI // ix += incX +- ADDQ R12, DI // iy += incY +- ADDQ R11, AX // nextX += incX +- ADDQ R12, BX // nextY += incY +- +- SUBQ $2, CX // n -= 2 +- JGE U2 // if n >= 0 goto U2 +- +-V2: +- ADDQ $2, CX // n += 2 +- JLE E2 // if n <= 0 goto E2 +- +- // y[i] += alpha * x[i] for the last iteration if n is odd. +- MOVSD 0(R8)(SI*8), X0 +- MOVSD 0(R9)(DI*8), X1 +- MULSD X7, X0 +- ADDSD X0, X1 +- MOVSD X1, 0(R9)(DI*8) +- +-E2: +- RET +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/ddot.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/ddot.go +deleted file mode 100644 +index 7e69957..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/ddot.go ++++ /dev/null +@@ -1,23 +0,0 @@ +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//+build !amd64 noasm +- +-package asm +- +-func DdotUnitary(x, y []float64) (sum float64) { +- for i, v := range x { +- sum += y[i] * v +- } +- return +-} +- +-func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) { +- for i := 0; i < int(n); i++ { +- sum += y[iy] * x[ix] +- ix += incX +- iy += incY +- } +- return +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/ddot_amd64.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/ddot_amd64.go +deleted file mode 100644 +index 7fa634a..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/ddot_amd64.go ++++ /dev/null +@@ -1,10 +0,0 @@ +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//+build !noasm +- +-package asm +- +-func DdotUnitary(x, y []float64) (sum float64) +-func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/ddot_amd64.s b/Godeps/_workspace/src/github.com/gonum/internal/asm/ddot_amd64.s +deleted file mode 100644 +index a898bbb..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/ddot_amd64.s ++++ /dev/null +@@ -1,140 +0,0 @@ +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +-// +-// Some of the loop unrolling code is copied from: +-// http://golang.org/src/math/big/arith_amd64.s +-// which is distributed under these terms: +-// +-// Copyright (c) 2012 The Go Authors. All rights reserved. +-// +-// Redistribution and use in source and binary forms, with or without +-// modification, are permitted provided that the following conditions are +-// met: +-// +-// * Redistributions of source code must retain the above copyright +-// notice, this list of conditions and the following disclaimer. +-// * Redistributions in binary form must reproduce the above +-// copyright notice, this list of conditions and the following disclaimer +-// in the documentation and/or other materials provided with the +-// distribution. +-// * Neither the name of Google Inc. nor the names of its +-// contributors may be used to endorse or promote products derived from +-// this software without specific prior written permission. +-// +-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +- +-//+build !noasm +- +-// TODO(fhs): use textflag.h after we drop Go 1.3 support +-//#include "textflag.h" +-// Don't insert stack check preamble. +-#define NOSPLIT 4 +- +- +-// func DdotUnitary(x, y []float64) (sum float64) +-// This function assumes len(y) >= len(x). +-TEXT ·DdotUnitary(SB),NOSPLIT,$0 +- MOVQ x_len+8(FP), DI // n = len(x) +- MOVQ x+0(FP), R8 +- MOVQ y+24(FP), R9 +- +- MOVQ $0, SI // i = 0 +- MOVSD $(0.0), X7 // sum = 0 +- +- SUBQ $2, DI // n -= 2 +- JL V1 // if n < 0 goto V1 +- +-U1: // n >= 0 +- // sum += x[i] * y[i] unrolled 2x. +- MOVUPD 0(R8)(SI*8), X0 +- MOVUPD 0(R9)(SI*8), X1 +- MULPD X1, X0 +- ADDPD X0, X7 +- +- ADDQ $2, SI // i += 2 +- SUBQ $2, DI // n -= 2 +- JGE U1 // if n >= 0 goto U1 +- +-V1: // n > 0 +- ADDQ $2, DI // n += 2 +- JLE E1 // if n <= 0 goto E1 +- +- // sum += x[i] * y[i] for last iteration if n is odd. +- MOVSD 0(R8)(SI*8), X0 +- MOVSD 0(R9)(SI*8), X1 +- MULSD X1, X0 +- ADDSD X0, X7 +- +-E1: +- // Add the two sums together. +- MOVSD X7, X0 +- UNPCKHPD X7, X7 +- ADDSD X0, X7 +- MOVSD X7, sum+48(FP) // return final sum +- RET +- +- +-// func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) +-TEXT ·DdotInc(SB),NOSPLIT,$0 +- MOVQ x+0(FP), R8 +- MOVQ y+24(FP), R9 +- MOVQ n+48(FP), CX +- MOVQ incX+56(FP), R11 +- MOVQ incY+64(FP), R12 +- MOVQ ix+72(FP), R13 +- MOVQ iy+80(FP), R14 +- +- MOVSD $(0.0), X7 // sum = 0 +- LEAQ (R8)(R13*8), SI // p = &x[ix] +- LEAQ (R9)(R14*8), DI // q = &y[ix] +- SHLQ $3, R11 // incX *= sizeof(float64) +- SHLQ $3, R12 // indY *= sizeof(float64) +- +- SUBQ $2, CX // n -= 2 +- JL V2 // if n < 0 goto V2 +- +-U2: // n >= 0 +- // sum += *p * *q unrolled 2x. +- MOVHPD (SI), X0 +- MOVHPD (DI), X1 +- ADDQ R11, SI // p += incX +- ADDQ R12, DI // q += incY +- MOVLPD (SI), X0 +- MOVLPD (DI), X1 +- ADDQ R11, SI // p += incX +- ADDQ R12, DI // q += incY +- +- MULPD X1, X0 +- ADDPD X0, X7 +- +- SUBQ $2, CX // n -= 2 +- JGE U2 // if n >= 0 goto U2 +- +-V2: +- ADDQ $2, CX // n += 2 +- JLE E2 // if n <= 0 goto E2 +- +- // sum += *p * *q for the last iteration if n is odd. +- MOVSD (SI), X0 +- MULSD (DI), X0 +- ADDSD X0, X7 +- +-E2: +- // Add the two sums together. +- MOVSD X7, X0 +- UNPCKHPD X7, X7 +- ADDSD X0, X7 +- MOVSD X7, sum+88(FP) // return final sum +- RET +- +\ No newline at end of file +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/dsdot.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/dsdot.go +deleted file mode 100644 +index 8450689..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/dsdot.go ++++ /dev/null +@@ -1,23 +0,0 @@ +-// Generated code do not edit. Run `go generate`. +- +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package asm +- +-func DsdotUnitary(x, y []float32) (sum float64) { +- for i, v := range x { +- sum += float64(y[i]) * float64(v) +- } +- return +-} +- +-func DsdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) { +- for i := 0; i < int(n); i++ { +- sum += float64(y[iy]) * float64(x[ix]) +- ix += incX +- iy += incY +- } +- return +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/generate.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/generate.go +deleted file mode 100644 +index e252140..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/generate.go ++++ /dev/null +@@ -1,8 +0,0 @@ +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//go:generate ./single_precision +-//go:generate ./complex +- +-package asm +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/saxpy.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/saxpy.go +deleted file mode 100644 +index 3ef767f..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/saxpy.go ++++ /dev/null +@@ -1,22 +0,0 @@ +-// Generated code do not edit. Run `go generate`. +- +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package asm +- +-// The extra z parameter is needed because of floats.AddScaledTo +-func SaxpyUnitary(alpha float32, x, y, z []float32) { +- for i, v := range x { +- z[i] = alpha*v + y[i] +- } +-} +- +-func SaxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { +- for i := 0; i < int(n); i++ { +- y[iy] += alpha * x[ix] +- ix += incX +- iy += incY +- } +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/sdot.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/sdot.go +deleted file mode 100644 +index 0cef5de..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/sdot.go ++++ /dev/null +@@ -1,23 +0,0 @@ +-// Generated code do not edit. Run `go generate`. +- +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package asm +- +-func SdotUnitary(x, y []float32) (sum float32) { +- for i, v := range x { +- sum += y[i] * v +- } +- return +-} +- +-func SdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) { +- for i := 0; i < int(n); i++ { +- sum += y[iy] * x[ix] +- ix += incX +- iy += incY +- } +- return +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/single_precision b/Godeps/_workspace/src/github.com/gonum/internal/asm/single_precision +deleted file mode 100644 +index a937a97..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/single_precision ++++ /dev/null +@@ -1,30 +0,0 @@ +-#!/usr/bin/env bash +- +-# Copyright ©2015 The gonum Authors. All rights reserved. +-# Use of this source code is governed by a BSD-style +-# license that can be found in the LICENSE file. +- +-echo Generating dsdot.go +-echo -e '// Generated code do not edit. Run `go generate`.\n' > dsdot.go +-cat ddot.go \ +-| grep -v '//+build' \ +-| gofmt -r '[]float64 -> []float32' \ +-| gofmt -r 'a * b -> float64(a) * float64(b)' \ +-| sed 's/Ddot/Dsdot/' \ +->> dsdot.go +- +-echo Generating sdot.go +-echo -e '// Generated code do not edit. Run `go generate`.\n' > sdot.go +-cat ddot.go \ +-| grep -v '//+build' \ +-| gofmt -r 'float64 -> float32' \ +-| sed 's/Ddot/Sdot/' \ +->> sdot.go +- +-echo Generating saxpy.go +-echo -e '// Generated code do not edit. Run `go generate`.\n' > saxpy.go +-cat daxpy.go \ +-| grep -v '//+build' \ +-| gofmt -r 'float64 -> float32' \ +-| sed 's/Daxpy/Saxpy/' \ +->> saxpy.go +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/zaxpy.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/zaxpy.go +deleted file mode 100644 +index 9478f25..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/zaxpy.go ++++ /dev/null +@@ -1,22 +0,0 @@ +-// Generated code do not edit. Run `go generate`. +- +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package asm +- +-// The extra z parameter is needed because of floats.AddScaledTo +-func ZaxpyUnitary(alpha complex128, x, y, z []complex128) { +- for i, v := range x { +- z[i] = alpha*v + y[i] +- } +-} +- +-func ZaxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { +- for i := 0; i < int(n); i++ { +- y[iy] += alpha * x[ix] +- ix += incX +- iy += incY +- } +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/zdotc.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/zdotc.go +deleted file mode 100644 +index 7b8febc..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/zdotc.go ++++ /dev/null +@@ -1,25 +0,0 @@ +-// Generated code do not edit. Run `go generate`. +- +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package asm +- +-import "math/cmplx" +- +-func ZdotcUnitary(x, y []complex128) (sum complex128) { +- for i, v := range x { +- sum += y[i] * cmplx.Conj(v) +- } +- return +-} +- +-func ZdotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { +- for i := 0; i < int(n); i++ { +- sum += y[iy] * cmplx.Conj(x[ix]) +- ix += incX +- iy += incY +- } +- return +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/internal/asm/zdotu.go b/Godeps/_workspace/src/github.com/gonum/internal/asm/zdotu.go +deleted file mode 100644 +index 82c1fe2..0000000 +--- a/Godeps/_workspace/src/github.com/gonum/internal/asm/zdotu.go ++++ /dev/null +@@ -1,23 +0,0 @@ +-// Generated code do not edit. Run `go generate`. +- +-// Copyright ©2015 The gonum Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-package asm +- +-func ZdotuUnitary(x, y []complex128) (sum complex128) { +- for i, v := range x { +- sum += y[i] * v +- } +- return +-} +- +-func ZdotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { +- for i := 0; i < int(n); i++ { +- sum += y[iy] * x[ix] +- ix += incX +- iy += incY +- } +- return +-} +diff --git a/Godeps/_workspace/src/github.com/gonum/matrix/mat64/inner.go b/Godeps/_workspace/src/github.com/gonum/matrix/mat64/inner.go +index 77616ba..1610a83 100644 +--- a/Godeps/_workspace/src/github.com/gonum/matrix/mat64/inner.go ++++ b/Godeps/_workspace/src/github.com/gonum/matrix/mat64/inner.go +@@ -6,7 +6,7 @@ package mat64 + + import ( + "github.com/gonum/blas" +- "github.com/gonum/internal/asm" ++ "github.com/gonum/inteernal/asm" + ) + + // Inner computes the generalized inner product +diff --git a/pkg/cmd/cli/describe/chaindescriber.go b/pkg/cmd/cli/describe/chaindescriber.go +index 2ca7c6e..2d2b6c8 100644 +--- a/pkg/cmd/cli/describe/chaindescriber.go ++++ b/pkg/cmd/cli/describe/chaindescriber.go +@@ -8,7 +8,7 @@ import ( + "github.com/golang/glog" + "github.com/gonum/graph" + "github.com/gonum/graph/encoding/dot" +- "github.com/gonum/graph/internal" ++ "github.com/gonum/graph/inteernal" + "github.com/gonum/graph/path" + kapi "k8s.io/kubernetes/pkg/api" + utilerrors "k8s.io/kubernetes/pkg/util/errors" +-- +1.9.3 + diff --git a/SOURCES/keep-solid-port-for-kube-proxy.patch b/SOURCES/keep-solid-port-for-kube-proxy.patch new file mode 100644 index 0000000..9e7f43c --- /dev/null +++ b/SOURCES/keep-solid-port-for-kube-proxy.patch @@ -0,0 +1,56 @@ +From 805d0b60b4e97561f201ea5e457e8c71b067f25b Mon Sep 17 00:00:00 2001 +From: Jan Chaloupka +Date: Thu, 26 Nov 2015 13:00:29 +0100 +Subject: [PATCH] keep solid port for kube-proxy + +--- + hack/test-cmd.sh | 19 ++++++------------- + 1 file changed, 6 insertions(+), 13 deletions(-) + +diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh +index fb440ac..9daac99 100755 +--- a/hack/test-cmd.sh ++++ b/hack/test-cmd.sh +@@ -30,7 +30,6 @@ function stop-proxy() + { + [[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null + PROXY_PID= +- PROXY_PORT= + } + + # Starts "kubect proxy" to test the client proxy. $1: api_prefix +@@ -40,18 +39,11 @@ function start-proxy() + + kube::log::status "Starting kubectl proxy" + +- for retry in $(seq 1 3); do +- PROXY_PORT=$(kube::util::get_random_port) +- kube::log::status "On try ${retry}, use proxy port ${PROXY_PORT} if it's free" +- if kube::util::test_host_port_free "127.0.0.1" "${PROXY_PORT}"; then +- if [ $# -eq 0 ]; then +- kubectl proxy -p ${PROXY_PORT} --www=. 1>&2 & break +- else +- kubectl proxy -p ${PROXY_PORT} --www=. --api-prefix="$1" 1>&2 & break +- fi +- fi +- sleep 1; +- done ++ if [ $# -eq 0 ]; then ++ kubectl proxy -p ${PROXY_PORT} --www=. 1>&2 & ++ else ++ kubectl proxy -p ${PROXY_PORT} --www=. --api-prefix="$1" 1>&2 & ++ fi + + PROXY_PID=$! + if [ $# -eq 0 ]; then +@@ -119,6 +111,7 @@ API_HOST=${API_HOST:-127.0.0.1} + KUBELET_PORT=${KUBELET_PORT:-10250} + KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248} + CTLRMGR_PORT=${CTLRMGR_PORT:-10252} ++PROXY_PORT=${PROXY_PORT:-8001} + PROXY_HOST=127.0.0.1 # kubectl only serves on localhost. + + # ensure ~/.kube/config isn't loaded by tests +-- +1.9.3 + diff --git a/SOURCES/reenable-ui.patch b/SOURCES/reenable-ui.patch new file mode 100644 index 0000000..95cdc50 --- /dev/null +++ b/SOURCES/reenable-ui.patch @@ -0,0 +1,38 @@ +From c42439278f1709fc3d5eec16a30ff1c1db11f78f Mon Sep 17 00:00:00 2001 +From: Jan Chaloupka +Date: Tue, 24 Nov 2015 15:52:03 +0100 +Subject: [PATCH] reenable /ui, origin e9a12151d67cedb4d3816a791ff02cfc61f024c9 + +--- + Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/master.go | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/master.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/master.go +index 22e2d6e..c602abd 100644 +--- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/master.go ++++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/master.go +@@ -78,7 +78,7 @@ import ( + "k8s.io/kubernetes/pkg/storage" + etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" + "k8s.io/kubernetes/pkg/tools" +- //"k8s.io/kubernetes/pkg/ui" ++ "k8s.io/kubernetes/pkg/ui" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" + +@@ -688,9 +688,9 @@ func (m *Master) init(c *Config) { + if c.EnableLogsSupport { + apiserver.InstallLogsSupport(m.muxHelper) + } +- /*if c.EnableUISupport { +- ui.InstallSupport(m.mux) +- }*/ ++ if c.EnableUISupport { ++ ui.InstallSupport(m.mux, m.enableSwaggerSupport) ++ } + + if c.EnableProfiling { + m.mux.HandleFunc("/debug/pprof/", pprof.Index) +-- +1.9.3 + diff --git a/SPECS/kubernetes.spec b/SPECS/kubernetes.spec index e13b15c..b509624 100644 --- a/SPECS/kubernetes.spec +++ b/SPECS/kubernetes.spec @@ -1,7 +1,12 @@ +%if 0%{?fedora} +%global with_devel 1 +%global with_bundled 1 +%global with_debug 1 +%else %global with_devel 0 %global with_bundled 1 -#debuginfo not supported with Go in RHEL -%global with_debug 0 +%global with_debug 1 +%endif %if 0%{?with_debug} # https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12 @@ -9,19 +14,44 @@ %else %global debug_package %{nil} %endif -%global provider github -%global provider_tld com -%global project GoogleCloudPlatform -%global repo kubernetes -# https://github.com/GoogleCloudPlatform/kubernetes +%global provider github +%global provider_tld com +%global project openshift +%global repo ose +# https://github.com/openshift/ose %global provider_prefix %{provider}.%{provider_tld}/%{project}/%{repo} %global import_path k8s.io/kubernetes -%global commit b9a88a7d0e357be2174011dd2b127038c6ea8929 +%global commit 86327329213fed4af2661c5ae1e92f9956b24f55 %global shortcommit %(c=%{commit}; echo ${c:0:7}) -%global con_commit bb44ddd48d365784343c488a6f3cae97620a780d +%global openshift_ip github.com/openshift/origin + +%global k8s_provider github +%global k8s_provider_tld com +%global k8s_project kubernetes +%global k8s_repo kubernetes +# https://github.com/kubernetes/kubernetes +%global k8s_provider_prefix %{k8s_provider}.%{k8s_provider_tld}/%{k8s_project}/%{k8s_repo} +%global k8s_commit 4c8e6f47ec23f390978e651232b375f5f9cde3c7 +%global k8s_shortcommit %(c=%{k8s_commit}; echo ${c:0:7}) +%global k8s_src_dir Godeps/_workspace/src/k8s.io/kubernetes/ +%global k8s_src_dir_sed Godeps\\/_workspace\\/src\\/k8s\\.io\\/kubernetes\\/ + +%global con_provider github +%global con_provider_tld com +%global con_project kubernetes +%global con_repo contrib +# https://github.com/kubernetes/kubernetes +%global con_provider_prefix %{con_provider}.%{con_provider_tld}/%{con_project}/%{con_repo} +%global con_commit 1c4eb2d56c70adfb2eda7c7d2543b40274d5ede8 %global con_shortcommit %(c=%{con_commit}; echo ${c:0:7}) -%global con_repo contrib + +%global O4N_GIT_MAJOR_VERSION 3 +%global O4N_GIT_MINOR_VERSION 1+ +%global O4N_GIT_VERSION v3.1.1.0 +%global K8S_GIT_VERSION v1.2.0-alpha.1-1107-g4c8e6f4 +%global kube_version 1.2.0 +%global kube_git_version v%{kube_version} #I really need this, otherwise "version_ldflags=$(kube::version_ldflags)" # does not work @@ -29,21 +59,29 @@ %global _checkshell /bin/bash Name: kubernetes -Version: 1.0.3 -Release: 0.2.git%{shortcommit}%{?dist} +Version: %{kube_version} +Release: 0.6.alpha1.git%{shortcommit}%{?dist} Summary: Container cluster management License: ASL 2.0 URL: %{import_path} ExclusiveArch: x86_64 Source0: https://%{provider_prefix}/archive/%{commit}/%{repo}-%{shortcommit}.tar.gz -Source1: https://%{provider}.%{provider_tld}/%{project}/%{con_repo}/archive/%{con_commit}/%{con_repo}-%{con_shortcommit}.tar.gz -Source2: genmanpages.sh -Patch1: Fix-Persistent-Volumes-and-Persistent-Volume-Claims.patch +Source1: https://%{k8s_provider_prefix}/archive/%{k8s_commit}/%{k8s_repo}-%{k8s_shortcommit}.tar.gz +Source2: https://%{con_provider_prefix}/archive/%{con_commit}/%{con_repo}-%{con_shortcommit}.tar.gz + +Source33: genmanpages.sh +Patch0: build-with-debug-info.patch +Patch1: add-pod-infrastructure-container.patch Patch2: Change-etcd-server-port.patch -%if 0%{?with_debug} -Patch3: build-with-debug-info.patch -%endif -Patch4: change-internal-to-inteernal.patch +Patch4: internal-to-inteernal.patch +Patch5: 0001-internal-inteernal.patch + +Patch9: hack-test-cmd.sh.patch +# Due to k8s 5d08dcf8377e76f2ce303dc79404f511ebef82e3 +Patch10: keep-solid-port-for-kube-proxy.patch + +# ui is enable in pure kubernetes +Patch12: reenable-ui.patch # It obsoletes cadvisor but needs its source code (literally integrated) Obsoletes: cadvisor @@ -59,45 +97,12 @@ Requires: kubernetes-node = %{version}-%{release} %if 0%{?with_devel} %package devel Summary: %{summary} -BuildRequires: golang >= 1.2.1-3 +BuildArch: noarch -Provides: golang(%{import_path}/cmd/genutils) = %{version}-%{release} Provides: golang(%{import_path}/cmd/kube-apiserver/app) = %{version}-%{release} Provides: golang(%{import_path}/cmd/kube-controller-manager/app) = %{version}-%{release} Provides: golang(%{import_path}/cmd/kube-proxy/app) = %{version}-%{release} Provides: golang(%{import_path}/cmd/kubelet/app) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/archive) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/assert) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/backoff) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/controllermanager) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/election) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/executor) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/executor/config) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/executor/messages) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/executor/service) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/hyperkube) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/minion) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/minion/config) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/offers) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/offers/metrics) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/proc) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/profile) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/queue) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/redirfd) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/runtime) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/scheduler) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/scheduler/config) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/scheduler/constraint) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/scheduler/ha) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/scheduler/meta) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/scheduler/metrics) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/scheduler/podtask) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/scheduler/resource) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/scheduler/service) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/scheduler/uid) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/mesos/pkg/service) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/submit-queue/github) = %{version}-%{release} -Provides: golang(%{import_path}/contrib/submit-queue/jenkins) = %{version}-%{release} Provides: golang(%{import_path}/pkg/admission) = %{version}-%{release} Provides: golang(%{import_path}/pkg/api) = %{version}-%{release} Provides: golang(%{import_path}/pkg/api/endpoints) = %{version}-%{release} @@ -111,30 +116,38 @@ Provides: golang(%{import_path}/pkg/api/rest) = %{version}-%{release} Provides: golang(%{import_path}/pkg/api/rest/resttest) = %{version}-%{release} Provides: golang(%{import_path}/pkg/api/testapi) = %{version}-%{release} Provides: golang(%{import_path}/pkg/api/testing) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/util) = %{version}-%{release} Provides: golang(%{import_path}/pkg/api/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/api/v1beta3) = %{version}-%{release} Provides: golang(%{import_path}/pkg/api/validation) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/experimental) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/experimental/latest) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/experimental/testapi) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/experimental/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/apis/experimental/validation) = %{version}-%{release} Provides: golang(%{import_path}/pkg/apiserver) = %{version}-%{release} Provides: golang(%{import_path}/pkg/apiserver/metrics) = %{version}-%{release} Provides: golang(%{import_path}/pkg/auth/authenticator) = %{version}-%{release} Provides: golang(%{import_path}/pkg/auth/authenticator/bearertoken) = %{version}-%{release} Provides: golang(%{import_path}/pkg/auth/authorizer) = %{version}-%{release} Provides: golang(%{import_path}/pkg/auth/authorizer/abac) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/auth/authorizer/union) = %{version}-%{release} Provides: golang(%{import_path}/pkg/auth/handlers) = %{version}-%{release} Provides: golang(%{import_path}/pkg/auth/user) = %{version}-%{release} Provides: golang(%{import_path}/pkg/capabilities) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/client) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/client/auth) = %{version}-%{release} Provides: golang(%{import_path}/pkg/client/cache) = %{version}-%{release} Provides: golang(%{import_path}/pkg/client/chaosclient) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/client/clientcmd) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/client/clientcmd/api) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/client/clientcmd/api/latest) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/client/clientcmd/api/v1) = %{version}-%{release} Provides: golang(%{import_path}/pkg/client/metrics) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/client/portforward) = %{version}-%{release} Provides: golang(%{import_path}/pkg/client/record) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/client/remotecommand) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/client/testclient) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/auth) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/clientcmd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/clientcmd/api) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/clientcmd/api/latest) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/clientcmd/api/v1) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/portforward) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/remotecommand) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/client/unversioned/testclient) = %{version}-%{release} Provides: golang(%{import_path}/pkg/cloudprovider) = %{version}-%{release} Provides: golang(%{import_path}/pkg/cloudprovider/providers) = %{version}-%{release} Provides: golang(%{import_path}/pkg/cloudprovider/providers/aws) = %{version}-%{release} @@ -146,11 +159,14 @@ Provides: golang(%{import_path}/pkg/cloudprovider/providers/ovirt) = %{version}- Provides: golang(%{import_path}/pkg/cloudprovider/providers/rackspace) = %{version}-%{release} Provides: golang(%{import_path}/pkg/cloudprovider/providers/vagrant) = %{version}-%{release} Provides: golang(%{import_path}/pkg/controller) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/daemon) = %{version}-%{release} Provides: golang(%{import_path}/pkg/controller/endpoint) = %{version}-%{release} Provides: golang(%{import_path}/pkg/controller/framework) = %{version}-%{release} Provides: golang(%{import_path}/pkg/controller/namespace) = %{version}-%{release} Provides: golang(%{import_path}/pkg/controller/node) = %{version}-%{release} Provides: golang(%{import_path}/pkg/controller/persistentvolume) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/podautoscaler) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/controller/podautoscaler/metrics) = %{version}-%{release} Provides: golang(%{import_path}/pkg/controller/replication) = %{version}-%{release} Provides: golang(%{import_path}/pkg/controller/resourcequota) = %{version}-%{release} Provides: golang(%{import_path}/pkg/controller/route) = %{version}-%{release} @@ -160,15 +176,10 @@ Provides: golang(%{import_path}/pkg/conversion) = %{version}-%{release} Provides: golang(%{import_path}/pkg/conversion/queryparams) = %{version}-%{release} Provides: golang(%{import_path}/pkg/credentialprovider) = %{version}-%{release} Provides: golang(%{import_path}/pkg/credentialprovider/gcp) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/expapi) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/expapi/latest) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/expapi/v1) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/expapi/validation) = %{version}-%{release} Provides: golang(%{import_path}/pkg/fieldpath) = %{version}-%{release} Provides: golang(%{import_path}/pkg/fields) = %{version}-%{release} Provides: golang(%{import_path}/pkg/healthz) = %{version}-%{release} Provides: golang(%{import_path}/pkg/httplog) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/hyperkube) = %{version}-%{release} Provides: golang(%{import_path}/pkg/kubectl) = %{version}-%{release} Provides: golang(%{import_path}/pkg/kubectl/cmd) = %{version}-%{release} Provides: golang(%{import_path}/pkg/kubectl/cmd/config) = %{version}-%{release} @@ -185,8 +196,10 @@ Provides: golang(%{import_path}/pkg/kubelet/lifecycle) = %{version}-%{release} Provides: golang(%{import_path}/pkg/kubelet/metrics) = %{version}-%{release} Provides: golang(%{import_path}/pkg/kubelet/network) = %{version}-%{release} Provides: golang(%{import_path}/pkg/kubelet/network/exec) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/network/hairpin) = %{version}-%{release} Provides: golang(%{import_path}/pkg/kubelet/prober) = %{version}-%{release} Provides: golang(%{import_path}/pkg/kubelet/qos) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/kubelet/qos/util) = %{version}-%{release} Provides: golang(%{import_path}/pkg/kubelet/rkt) = %{version}-%{release} Provides: golang(%{import_path}/pkg/kubelet/types) = %{version}-%{release} Provides: golang(%{import_path}/pkg/kubelet/util) = %{version}-%{release} @@ -201,10 +214,13 @@ Provides: golang(%{import_path}/pkg/proxy) = %{version}-%{release} Provides: golang(%{import_path}/pkg/proxy/config) = %{version}-%{release} Provides: golang(%{import_path}/pkg/proxy/iptables) = %{version}-%{release} Provides: golang(%{import_path}/pkg/proxy/userspace) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/registry) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/componentstatus) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/controller) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/controller/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/daemonset) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/daemonset/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/deployment) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/deployment/etcd) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/endpoint) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/endpoint/etcd) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/event) = %{version}-%{release} @@ -215,12 +231,14 @@ Provides: golang(%{import_path}/pkg/registry/generic/etcd) = %{version}-%{releas Provides: golang(%{import_path}/pkg/registry/generic/rest) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/horizontalpodautoscaler) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/horizontalpodautoscaler/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/job) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/job/etcd) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/limitrange) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/limitrange/etcd) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/registry/minion) = %{version}-%{release} -Provides: golang(%{import_path}/pkg/registry/minion/etcd) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/namespace) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/namespace/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/node) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/node/etcd) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/persistentvolume) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/persistentvolume/etcd) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/persistentvolumeclaim) = %{version}-%{release} @@ -234,6 +252,8 @@ Provides: golang(%{import_path}/pkg/registry/resourcequota) = %{version}-%{relea Provides: golang(%{import_path}/pkg/registry/resourcequota/etcd) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/secret) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/secret/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/securitycontextconstraints) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/securitycontextconstraints/etcd) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/service) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/service/allocator) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/service/allocator/etcd) = %{version}-%{release} @@ -245,8 +265,15 @@ Provides: golang(%{import_path}/pkg/registry/service/portallocator) = %{version} Provides: golang(%{import_path}/pkg/registry/service/portallocator/controller) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/serviceaccount) = %{version}-%{release} Provides: golang(%{import_path}/pkg/registry/serviceaccount/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/thirdpartyresource) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/thirdpartyresource/etcd) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/thirdpartyresourcedata) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/registry/thirdpartyresourcedata/etcd) = %{version}-%{release} Provides: golang(%{import_path}/pkg/runtime) = %{version}-%{release} Provides: golang(%{import_path}/pkg/securitycontext) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/securitycontextconstraints) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/securitycontextconstraints/selinux) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/securitycontextconstraints/user) = %{version}-%{release} Provides: golang(%{import_path}/pkg/storage) = %{version}-%{release} Provides: golang(%{import_path}/pkg/storage/etcd) = %{version}-%{release} Provides: golang(%{import_path}/pkg/tools) = %{version}-%{release} @@ -258,12 +285,14 @@ Provides: golang(%{import_path}/pkg/ui/data/swagger) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/bandwidth) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/config) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/dbus) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/errors) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/exec) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/fielderrors) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/flushwriter) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/httpstream) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/httpstream/spdy) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/io) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/iptables) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/jsonpath) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/mount) = %{version}-%{release} @@ -273,6 +302,7 @@ Provides: golang(%{import_path}/pkg/util/operationmanager) = %{version}-%{releas Provides: golang(%{import_path}/pkg/util/procfs) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/proxy) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/rand) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/util/sets) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/slice) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/strategicpatch) = %{version}-%{release} Provides: golang(%{import_path}/pkg/util/wait) = %{version}-%{release} @@ -282,6 +312,9 @@ Provides: golang(%{import_path}/pkg/version) = %{version}-%{release} Provides: golang(%{import_path}/pkg/version/verflag) = %{version}-%{release} Provides: golang(%{import_path}/pkg/volume) = %{version}-%{release} Provides: golang(%{import_path}/pkg/volume/aws_ebs) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/cephfs) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/cinder) = %{version}-%{release} +Provides: golang(%{import_path}/pkg/volume/downwardapi) = %{version}-%{release} Provides: golang(%{import_path}/pkg/volume/empty_dir) = %{version}-%{release} Provides: golang(%{import_path}/pkg/volume/gce_pd) = %{version}-%{release} Provides: golang(%{import_path}/pkg/volume/git_repo) = %{version}-%{release} @@ -299,6 +332,7 @@ Provides: golang(%{import_path}/plugin/cmd/kube-scheduler/app) = %{version}-%{re Provides: golang(%{import_path}/plugin/pkg/admission/admit) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/admission/deny) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/admission/exec/denyprivileged) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/admission/initialresources) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/admission/limitranger) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/admission/namespace/autoprovision) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/admission/namespace/exists) = %{version}-%{release} @@ -306,17 +340,13 @@ Provides: golang(%{import_path}/plugin/pkg/admission/namespace/lifecycle) = %{ve Provides: golang(%{import_path}/plugin/pkg/admission/resourcequota) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/admission/securitycontext/scdeny) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/admission/serviceaccount) = %{version}-%{release} -Provides: golang(%{import_path}/plugin/pkg/auth) = %{version}-%{release} -Provides: golang(%{import_path}/plugin/pkg/auth/authenticator) = %{version}-%{release} -Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/password) = %{version}-%{release} -Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/password/allow) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/password/passwordfile) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/request/basicauth) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/request/keystone) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/request/union) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/request/x509) = %{version}-%{release} +Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/token/oidc) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/token/tokenfile) = %{version}-%{release} -Provides: golang(%{import_path}/plugin/pkg/auth/authenticator/token/tokentest) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/scheduler) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/scheduler/algorithm) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/scheduler/algorithm/predicates) = %{version}-%{release} @@ -330,8 +360,6 @@ Provides: golang(%{import_path}/plugin/pkg/scheduler/api/validation) = %{version Provides: golang(%{import_path}/plugin/pkg/scheduler/factory) = %{version}-%{release} Provides: golang(%{import_path}/plugin/pkg/scheduler/metrics) = %{version}-%{release} Provides: golang(%{import_path}/test/e2e) = %{version}-%{release} -Provides: golang(%{import_path}/test/integration) = %{version}-%{release} -Provides: golang(%{import_path}/test/integration/framework) = %{version}-%{release} %description devel %{summary} @@ -405,32 +433,76 @@ BuildRequires: golang >= 1.2-7 Kubernetes client tools like kubectl %prep -#%%setup -q -n %{con_repo}-%{con_commit} -T -b 1 -%setup -q -n %{repo}-%{commit} -# move content of contrib back to kubernetes -#mv ../%{con_repo}-%{con_commit}/init contrib/init - -%patch1 -p1 -%patch2 -p1 +%setup -q -n %{k8s_repo}-%{k8s_commit} -T -b 1 %if 0%{?with_debug} -%patch3 -p1 +%patch0 -p1 %endif +# Hack test-cmd.sh to be run with os binaries +%patch9 -p1 +# Keep solid port for kube-proxy +%patch10 -p1 + +%setup -q -n %{con_repo}-%{con_commit} -T -b 2 +%patch1 -p1 + +%setup -q -n %{repo}-%{commit} + +# clean the directory up to Godeps +dirs=$(ls | grep -v "^Godeps") +rm -rf $dirs + +# Only for go >= 1.5 +%if 0%{?fedora} +# internal -> inteernal #%patch4 -p1 +#%patch5 -p1 +%endif + +# reenable /ui +%patch12 -p1 + +# move k8s code from Godeps +mv Godeps/_workspace/src/k8s.io/kubernetes/* . +# copy missing source code +cp ../%{k8s_repo}-%{k8s_commit}/cmd/kube-apiserver/apiserver.go cmd/kube-apiserver/. +cp ../%{k8s_repo}-%{k8s_commit}/cmd/kube-controller-manager/controller-manager.go cmd/kube-controller-manager/. +cp ../%{k8s_repo}-%{k8s_commit}/cmd/kubelet/kubelet.go cmd/kubelet/. +cp ../%{k8s_repo}-%{k8s_commit}/cmd/kube-proxy/proxy.go cmd/kube-proxy/. +cp ../%{k8s_repo}-%{k8s_commit}/plugin/cmd/kube-scheduler/scheduler.go plugin/cmd/kube-scheduler/. +cp -r ../%{k8s_repo}-%{k8s_commit}/cmd/kubectl cmd/. +# copy hack directory +cp -r ../%{k8s_repo}-%{k8s_commit}/hack . +# copy contrib directory +cp -r ../%{k8s_repo}-%{k8s_commit}/contrib . +# copy contrib folder +cp -r ../%{con_repo}-%{con_commit}/init contrib/. +# copy docs +cp -r ../%{k8s_repo}-%{k8s_commit}/docs/admin docs/. +cp -r ../%{k8s_repo}-%{k8s_commit}/docs/man docs/. +# copy cmd/kube-version change +cp -r ../%{k8s_repo}-%{k8s_commit}/cmd/kube-version-change cmd/. +rm -rf cmd/kube-version-change/import_known_versions.go +# copy LICENSE and *.md +cp ../%{k8s_repo}-%{k8s_commit}/LICENSE . +cp ../%{k8s_repo}-%{k8s_commit}/*.md . + +%patch2 -p1 %build export KUBE_GIT_TREE_STATE="clean" export KUBE_GIT_COMMIT=%{commit} -export KUBE_GIT_VERSION=v1.0.3-34-gb9a88a7d0e357b +export KUBE_GIT_VERSION=%{kube_git_version} -hack/build-go.sh --use_go_build -hack/build-go.sh --use_go_build cmd/kube-version-change +# remove import_known_versions.go +rm -rf cmd/kube-version-change/import_known_versions.go +hack/build-go.sh --use_go_build cmd/kube-apiserver cmd/kube-controller-manager plugin/cmd/kube-scheduler cmd/kubelet cmd/kube-proxy cmd/kube-version-change cmd/kubectl # convert md to man pushd docs pushd admin cp kube-apiserver.md kube-controller-manager.md kube-proxy.md kube-scheduler.md kubelet.md .. popd -cp %{SOURCE2} genmanpages.sh +cp %{SOURCE33} genmanpages.sh bash genmanpages.sh popd @@ -462,6 +534,7 @@ install -m 0644 -t %{buildroot}%{_unitdir} contrib/init/systemd/*.service # install manpages install -d %{buildroot}%{_mandir}/man1 install -p -m 644 docs/man/man1/* %{buildroot}%{_mandir}/man1 +# from k8s tarball copied docs/man/man1/*.1 # install the place the kubelet defaults to put volumes install -d %{buildroot}%{_sharedstatedir}/kubelet @@ -470,26 +543,38 @@ install -d %{buildroot}%{_sharedstatedir}/kubelet install -d -m 0755 %{buildroot}%{_tmpfilesdir} install -p -m 0644 -t %{buildroot}/%{_tmpfilesdir} contrib/init/systemd/tmpfiles.d/kubernetes.conf -%if 0%{?with_debug} -# remove porter as it is built inside docker container without options for debug info -rm -rf contrib/for-tests/porter +# source codes for building projects +%if 0%{?with_devel} +install -d -p %{buildroot}/%{gopath}/src/%{import_path}/ +echo "%%dir %%{gopath}/src/%%{import_path}/." >> devel.file-list +# find all *.go but no *_test.go files and generate devel.file-list +for file in $(find . -iname "*.go" \! -iname "*_test.go") ; do + echo "%%dir %%{gopath}/src/%%{import_path}/$(dirname $file)" >> devel.file-list + install -d -p %{buildroot}/%{gopath}/src/%{import_path}/$(dirname $file) + cp -pav $file %{buildroot}/%{gopath}/src/%{import_path}/$file + echo "%%{gopath}/src/%%{import_path}/$file" >> devel.file-list +done %endif %if 0%{?with_devel} -# install devel source codes -install -d %{buildroot}/%{gopath}/src/%{import_path} -for d in build cluster cmd contrib examples hack pkg plugin test; do - cp -rpav $d %{buildroot}/%{gopath}/src/%{import_path}/ -done +sort -u -o devel.file-list devel.file-list %endif # place files for unit-test rpm install -d -m 0755 %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/ -cp -pav README.md %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/. -for d in _output Godeps api cmd docs examples hack pkg plugin third_party test; do +pushd ../%{k8s_repo}-%{k8s_commit} +# only files for hack/test-cmd.sh atm +for d in docs examples hack; do cp -a $d %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/ done +popd +# rpmdiff issues +chmod 0644 %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/docs/admin/ovs-networking.png +chmod 0644 %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/docs/admin/resource-quota.md +chmod 0644 %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/docs/devel/scheduler.md +chmod 0644 %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/docs/devel/scheduler_algorithm.md +chmod 0755 %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/hack/build-ui.sh chmod 0644 %{buildroot}%{_sharedstatedir}/kubernetes-unit-test/hack/lib/util.sh %check @@ -509,15 +594,19 @@ hack/test-integration.sh --use_go_build %endif fi +#define license tag if not already defined +%{!?_licensedir:%global license %doc} + %files # empty as it depends on master and node %files master -%doc README.md LICENSE CONTRIB.md CONTRIBUTING.md DESIGN.md +%license LICENSE +%doc *.md %{_mandir}/man1/kube-apiserver.1* %{_mandir}/man1/kube-controller-manager.1* %{_mandir}/man1/kube-scheduler.1* -%{_bindir}/kube-apiserver +%attr(754, -, kube) %caps(cap_net_bind_service=ep) %{_bindir}/kube-apiserver %{_bindir}/kube-controller-manager %{_bindir}/kube-scheduler %{_bindir}/kube-version-change @@ -532,7 +621,8 @@ fi %{_tmpfilesdir}/kubernetes.conf %files node -%doc README.md LICENSE CONTRIB.md CONTRIBUTING.md DESIGN.md +%license LICENSE +%doc *.md %{_mandir}/man1/kubelet.1* %{_mandir}/man1/kube-proxy.1* %{_bindir}/kubelet @@ -548,7 +638,8 @@ fi %{_tmpfilesdir}/kubernetes.conf %files client -%doc README.md LICENSE CONTRIB.md CONTRIBUTING.md DESIGN.md +%license LICENSE +%doc *.md %{_mandir}/man1/kubectl.1* %{_mandir}/man1/kubectl-* %{_bindir}/kubectl @@ -558,10 +649,9 @@ fi %{_sharedstatedir}/kubernetes-unit-test/ %if 0%{?with_devel} -%files devel -%doc README.md LICENSE CONTRIB.md CONTRIBUTING.md DESIGN.md +%files devel -f devel.file-list +%doc *.md %dir %{gopath}/src/k8s.io -%{gopath}/src/%{import_path} %endif %pre master @@ -594,18 +684,157 @@ getent passwd kube >/dev/null || useradd -r -g kube -d / -s /sbin/nologin \ %systemd_postun %changelog -* Fri Sep 18 2015 jchaloup - 1.0.3-0.2.gitb9a88a7 -- Fix GIT_VERSION - resolves: #1263783 +* Tue Jan 05 2016 jchaloup - 1.2.0-0.6.alpha1.git8632732 +- Move definition of all version, git and commit macros at one place + +* Mon Jan 04 2016 jchaloup - 1.2.0-0.5.alpha1.git8632732 +- Set kube-apiserver's group to kube + resolves: #1295545 + +* Mon Jan 04 2016 jchaloup - 1.2.0-0.4.alpha1.git8632732 +- Set kube-apiserver permission to 754 + +* Mon Jan 04 2016 jchaloup - 1.2.0-0.3.alpha1.git8632732 +- Fix rpmdiff complaint + +* Tue Dec 01 2015 jchaloup - 1.2.0-0.2.alpha1.git0e71938 +- Build kubernetes from ose's Godeps using hack/build-go.sh + ose's Godeps = kubernetes upstream + additional patches +- Build with debug info +- Use internal pod infrastructure container +- Set CAP_NET_BIND_SERVICE on the kube-apiserver so it can use 443, set 0 permission for others +- Rebase to ose-3.1.1.0 (19 Dec 2015) + +* Thu Nov 05 2015 jchaloup - 1.2.0-0.1.alpha1.git0e71938 +- Rebase to ose 3.1.0.0 +- Remove Ceph and FC volume patch as they are already merged in k8s tarball + +* Fri Oct 23 2015 Colin Walters - 1.1.0-0.41.alpha1.git6de3e85 +- Add patch to support FC volumes + Resolves: #1274443 + +* Fri Oct 23 2015 Colin Walters - 1.1.0-0.40.alpha1.git6de3e85 +- Add patch to drop dependency on Ceph server side + Resolves: #1274421 + +* Mon Oct 12 2015 jchaloup - 1.1.0-0.39.alpha1.git6de3e85 +- Add missing short option for --server of kubectl +- Update unit-test-subpackage (only test-cmd.sh atm) + related: #1211266 + +* Fri Oct 09 2015 jchaloup - 1.1.0-0.38.alpha1.git6de3e85 +- Add normalization of flags + related: #1211266 + +* Wed Sep 30 2015 jchaloup - 1.1.0-0.37.alpha1.git5f38cb0 +- Do not unset default cluster, otherwise k8s ends with error when no cluster set +- Built k8s from o7t/ose 6de3e8543699213f0cdb28032be82b4dae408dfe + related: #1211266 + +* Wed Sep 30 2015 jchaloup - 1.1.0-0.36.alpha0.git5f38cb0 +- Bump to o4n 5f38cb0e98c9e854cafba9c7f98dafd51e955ad8 + related: #1211266 + +* Tue Sep 29 2015 jchaloup - 1.1.0-0.35.alpha1.git2695cdc +- Update git version of k8s and o4n, add macros + related: #1211266 -* Tue Aug 25 2015 jchaloup - 1.0.3-0.1.gitb9a88a7 -- The closest version of 1.0 is 1.0.3 - related: #1232005 +* Tue Sep 29 2015 jchaloup - 1.1.0-0.34.alpha1.git2695cdc +- Built k8s from o4n tarball +- Bump to upstream 2695cdcd29a8f11ef60278758e11f4817daf3c7c + related: #1211266 + +* Tue Sep 22 2015 jchaloup - 1.1.0-0.33.alpha1.git09cf38e +- Bump to upstream 09cf38e9a80327e2d41654db277d00f19e2c84d0 + related: #1211266 + +* Thu Sep 17 2015 jchaloup - 1.1.0-0.32.alpha1.git400e685 +- Bump to upstream 400e6856b082ecf4b295568acda68d630fc000f1 + related: #1211266 + +* Wed Sep 16 2015 jchaloup - 1.1.0-0.31.gitd549fc4 +- Bump to upstream d549fc400ac3e5901bd089b40168e1e6fb17341d + related: #1211266 + +* Tue Sep 15 2015 jchaloup - 1.1.0-0.30.gitc9570e3 +- Bump to upstream c9570e34d03c6700d83f796c0125d17c5064e57d + related: #1211266 + +* Mon Sep 14 2015 jchaloup - 1.1.0-0.29.git86b4e77 +- Bump to upstream 86b4e777e1947c1bc00e422306a3ca74cbd54dbe + related: #1211266 + +* Thu Sep 10 2015 jchaloup - 1.1.0-0.28.gitf867ba3 +- Bump to upstream f867ba3ba13e3dad422efd21c74f52b9762de37e + related: #1211266 + +* Wed Sep 09 2015 jchaloup - 1.1.0-0.27.git0f4fa4e +- Bump to upstream 0f4fa4ed25ae9a9d1824fe55aeefb4d4ebfecdfd + related: #1211266 + +* Tue Sep 08 2015 jchaloup - 1.1.0-0.26.git196f58b +- Bump to upstream 196f58b9cb25a2222c7f9aacd624737910b03acb + related: #1211266 + +* Mon Sep 07 2015 jchaloup - 1.1.0-0.25.git96e0ed5 +- Bump to upstream 96e0ed5749608d4cc32f61b3674deb04c8fa90ad + related: #1211266 + +* Sat Sep 05 2015 jchaloup - 1.1.0-0.24.git2e2def3 +- Bump to upstream 2e2def36a904fe9a197da5fc70e433e2e884442f + related: #1211266 + +* Fri Sep 04 2015 jchaloup - 1.1.0-0.23.gite724a52 +- Bump to upstream e724a5210adf717f62a72162621ace1e08730c75 + related: #1211266 -* Tue Aug 25 2015 jchaloup - 1.0-0.1.gitb9a88a7 -- Bump to upstream b9a88a7d0e357be2174011dd2b127038c6ea8929 -- Downgrade to kubernetes-1.0 - related: #1232005 +* Thu Sep 03 2015 jchaloup - 1.1.0-0.22.gitb6f2f39 +- Bump to upstream b6f2f396baec5105ff928cf61903c2c368259b21 + related: #1211266 + +* Wed Sep 02 2015 jchaloup - 1.1.0-0.21.gitb4a3698 +- Bump to upstream b4a3698faed81410468eccf9f328ca6df3d0cca3 + related: #1211266 + +* Tue Sep 01 2015 jchaloup - 1.1.0-0.20.git2f9652c +- Bump to upstream 2f9652c7f1d4b8f333c0b5c8c1270db83b913436 + related: #1211266 + +* Mon Aug 31 2015 jchaloup - 1.1.0-0.19.git66a644b +- Bump to upstream 66a644b275ede9ddb98eb3f76e8d1840cafc2147 + related: #1211266 + +* Thu Aug 27 2015 jchaloup - 1.1.0-0.18.gitab73849 +- Bump to upstream ab7384943748312f5e9294f42d42ed3983c7c96c + related: #1211266 + +* Wed Aug 26 2015 jchaloup - 1.1.0-0.17.git00e3442 +- Bump to upstream 00e34429e0242323ed34347cf0ab65b3d62b21f7 + related: #1211266 + +* Tue Aug 25 2015 jchaloup - 1.1.0-0.16.gita945785 +- Bump to upstream a945785409d5b68f3a2721d2209300edb5abf1ce + related: #1211266 + +* Mon Aug 24 2015 jchaloup - 1.1.0-0.15.git5fe7029 +- Bump to upstream 5fe7029e688e1e5873a0b95a622edda5b5156d2b + related: #1211266 + +* Fri Aug 21 2015 jchaloup - 1.1.0-0.14.gitb6f18c7 +- Bump to upstream b6f18c7ce08714c8d4f6019463879a164a41750e + related: #1211266 + +* Thu Aug 20 2015 jchaloup - 1.1.0-0.13.git44fa48e +- Bump to upstream 44fa48e5af44d3e988fa943d96a2de732d8cc666 + related: #1211266 + +* Wed Aug 19 2015 jchaloup - 1.1.0-0.12.gitb5a4a54 +- Bump to upstream b5a4a548df0cffb99bdcc3b9b9e48d4025d0541c + related: #1211266 + +* Tue Aug 18 2015 jchaloup - 1.1.0-0.11.git919c7e9 +- Bump to upstream 919c7e94e23d2dcd5bdd96896e0a7990f9ae3338 + related: #1211266 * Tue Aug 18 2015 jchaloup - 1.1.0-0.10.git280b66c - Bump to upstream 280b66c9012c21e253acd4e730f8684c39ca08ec @@ -692,7 +921,6 @@ getent passwd kube >/dev/null || useradd -r -g kube -d / -s /sbin/nologin \ resolves: #1241469 * Mon Jul 20 2015 jchaloup - 1.0.0-0.8.gitb2dafda -- Undo 'Set CAP_NET_BIND_SERVICE on the kube-apiserver so it can use 443' - Fix dependency and tests for go-1.5 - with_debug off as the builds ends with error "ELFRESERVE too small: ..."