mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #23220 from icecrime/bump_etcd_2.3.2
Bump etcd to 2.3.2
This commit is contained in:
commit
b16f735e90
39 changed files with 22581 additions and 9620 deletions
|
@ -79,9 +79,9 @@ clone git github.com/vishvananda/netlink 631962935bff4f3d20ff32a72e8944f6d2836a2
|
|||
clone git github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
|
||||
clone git github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
|
||||
clone git github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
|
||||
clone git github.com/coreos/etcd v2.2.0
|
||||
clone git github.com/coreos/etcd v2.3.2
|
||||
fix_rewritten_imports github.com/coreos/etcd
|
||||
clone git github.com/ugorji/go 5abd4e96a45c386928ed2ca2a7ef63e2533e18ec
|
||||
clone git github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
|
||||
clone git github.com/hashicorp/consul v0.5.2
|
||||
clone git github.com/boltdb/bolt v1.2.1
|
||||
clone git github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
|
||||
|
|
|
@ -35,9 +35,25 @@ func main() {
|
|||
log.Fatal(err)
|
||||
}
|
||||
kapi := client.NewKeysAPI(c)
|
||||
resp, err := kapi.Set(context.Background(), "foo", "bar", nil)
|
||||
// set "/foo" key with "bar" value
|
||||
log.Print("Setting '/foo' key with 'bar' value")
|
||||
resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
// print common key info
|
||||
log.Printf("Set is done. Metadata is %q\n", resp)
|
||||
}
|
||||
// get "/foo" key's value
|
||||
log.Print("Getting '/foo' key value")
|
||||
resp, err = kapi.Get(context.Background(), "/foo", nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
// print common key info
|
||||
log.Printf("Get is done. Metadata is %q\n", resp)
|
||||
// print value
|
||||
log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -61,7 +77,7 @@ If the response gets from the cluster is invalid, a plain string error will be r
|
|||
Here is the example code to handle client errors:
|
||||
|
||||
```go
|
||||
cfg := client.Config{Endpoints: []string{"http://etcd1:2379,http://etcd2:2379,http://etcd3:2379"}}
|
||||
cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
|
||||
c, err := client.New(cfg)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
|
|
|
@ -56,22 +56,22 @@ func NewAuthRoleAPI(c Client) AuthRoleAPI {
|
|||
}
|
||||
|
||||
type AuthRoleAPI interface {
|
||||
// Add a role.
|
||||
// AddRole adds a role.
|
||||
AddRole(ctx context.Context, role string) error
|
||||
|
||||
// Remove a role.
|
||||
// RemoveRole removes a role.
|
||||
RemoveRole(ctx context.Context, role string) error
|
||||
|
||||
// Get role details.
|
||||
// GetRole retrieves role details.
|
||||
GetRole(ctx context.Context, role string) (*Role, error)
|
||||
|
||||
// Grant a role some permission prefixes for the KV store.
|
||||
// GrantRoleKV grants a role some permission prefixes for the KV store.
|
||||
GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
|
||||
|
||||
// Revoke some some permission prefixes for a role on the KV store.
|
||||
// RevokeRoleKV revokes some permission prefixes for a role on the KV store.
|
||||
RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
|
||||
|
||||
// List roles.
|
||||
// ListRoles lists roles.
|
||||
ListRoles(ctx context.Context) ([]string, error)
|
||||
}
|
||||
|
||||
|
@ -115,17 +115,20 @@ func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var userList struct {
|
||||
Roles []string `json:"roles"`
|
||||
var roleList struct {
|
||||
Roles []Role `json:"roles"`
|
||||
}
|
||||
err = json.Unmarshal(body, &userList)
|
||||
if err != nil {
|
||||
if err = json.Unmarshal(body, &roleList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return userList.Roles, nil
|
||||
ret := make([]string, 0, len(roleList.Roles))
|
||||
for _, r := range roleList.Roles {
|
||||
ret = append(ret, r.Role)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
|
||||
|
@ -218,17 +221,16 @@ func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
var sec authError
|
||||
err := json.Unmarshal(body, &sec)
|
||||
err = json.Unmarshal(body, &sec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, sec
|
||||
}
|
||||
var role Role
|
||||
err = json.Unmarshal(body, &role)
|
||||
if err != nil {
|
||||
if err = json.Unmarshal(body, &role); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &role, nil
|
||||
|
|
|
@ -36,6 +36,21 @@ type User struct {
|
|||
Revoke []string `json:"revoke,omitempty"`
|
||||
}
|
||||
|
||||
// userListEntry is the user representation given by the server for ListUsers
|
||||
type userListEntry struct {
|
||||
User string `json:"user"`
|
||||
Roles []Role `json:"roles"`
|
||||
}
|
||||
|
||||
type UserRoles struct {
|
||||
User string `json:"user"`
|
||||
Roles []Role `json:"roles"`
|
||||
}
|
||||
|
||||
type userName struct {
|
||||
User string `json:"user"`
|
||||
}
|
||||
|
||||
func v2AuthURL(ep url.URL, action string, name string) *url.URL {
|
||||
if name != "" {
|
||||
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
|
||||
|
@ -78,9 +93,9 @@ func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
||||
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
||||
var sec authError
|
||||
err := json.Unmarshal(body, &sec)
|
||||
err = json.Unmarshal(body, &sec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -117,25 +132,25 @@ func NewAuthUserAPI(c Client) AuthUserAPI {
|
|||
}
|
||||
|
||||
type AuthUserAPI interface {
|
||||
// Add a user.
|
||||
// AddUser adds a user.
|
||||
AddUser(ctx context.Context, username string, password string) error
|
||||
|
||||
// Remove a user.
|
||||
// RemoveUser removes a user.
|
||||
RemoveUser(ctx context.Context, username string) error
|
||||
|
||||
// Get user details.
|
||||
// GetUser retrieves user details.
|
||||
GetUser(ctx context.Context, username string) (*User, error)
|
||||
|
||||
// Grant a user some permission roles.
|
||||
// GrantUser grants a user some permission roles.
|
||||
GrantUser(ctx context.Context, username string, roles []string) (*User, error)
|
||||
|
||||
// Revoke some permission roles from a user.
|
||||
// RevokeUser revokes some permission roles from a user.
|
||||
RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
|
||||
|
||||
// Change the user's password.
|
||||
// ChangePassword changes the user's password.
|
||||
ChangePassword(ctx context.Context, username string, password string) (*User, error)
|
||||
|
||||
// List users.
|
||||
// ListUsers lists the users.
|
||||
ListUsers(ctx context.Context) ([]string, error)
|
||||
}
|
||||
|
||||
|
@ -179,22 +194,28 @@ func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
var sec authError
|
||||
err := json.Unmarshal(body, &sec)
|
||||
err = json.Unmarshal(body, &sec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, sec
|
||||
}
|
||||
|
||||
var userList struct {
|
||||
Users []string `json:"users"`
|
||||
Users []userListEntry `json:"users"`
|
||||
}
|
||||
err = json.Unmarshal(body, &userList)
|
||||
if err != nil {
|
||||
|
||||
if err = json.Unmarshal(body, &userList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return userList.Users, nil
|
||||
|
||||
ret := make([]string, 0, len(userList.Users))
|
||||
for _, u := range userList.Users {
|
||||
ret = append(ret, u.User)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
|
||||
|
@ -221,9 +242,9 @@ func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAct
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
||||
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
||||
var sec authError
|
||||
err := json.Unmarshal(body, &sec)
|
||||
err = json.Unmarshal(body, &sec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -280,18 +301,24 @@ func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
var sec authError
|
||||
err := json.Unmarshal(body, &sec)
|
||||
err = json.Unmarshal(body, &sec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, sec
|
||||
}
|
||||
var user User
|
||||
err = json.Unmarshal(body, &user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if err = json.Unmarshal(body, &user); err != nil {
|
||||
var userR UserRoles
|
||||
if urerr := json.Unmarshal(body, &userR); urerr != nil {
|
||||
return nil, err
|
||||
}
|
||||
user.User = userR.User
|
||||
for _, r := range userR.Roles {
|
||||
user.Roles = append(user.Roles, r.Role)
|
||||
}
|
||||
}
|
||||
return &user, nil
|
||||
}
|
||||
|
|
106
vendor/src/github.com/coreos/etcd/client/client.go
vendored
106
vendor/src/github.com/coreos/etcd/client/client.go
vendored
|
@ -24,6 +24,7 @@ import (
|
|||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -34,6 +35,7 @@ var (
|
|||
ErrNoEndpoints = errors.New("client: no endpoints available")
|
||||
ErrTooManyRedirects = errors.New("client: too many redirects")
|
||||
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
|
||||
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
|
||||
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
|
||||
)
|
||||
|
||||
|
@ -48,6 +50,29 @@ var DefaultTransport CancelableTransport = &http.Transport{
|
|||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
type EndpointSelectionMode int
|
||||
|
||||
const (
|
||||
// EndpointSelectionRandom is the default value of the 'SelectionMode'.
|
||||
// As the name implies, the client object will pick a node from the members
|
||||
// of the cluster in a random fashion. If the cluster has three members, A, B,
|
||||
// and C, the client picks any node from its three members as its request
|
||||
// destination.
|
||||
EndpointSelectionRandom EndpointSelectionMode = iota
|
||||
|
||||
// If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
|
||||
// requests are sent directly to the cluster leader. This reduces
|
||||
// forwarding roundtrips compared to making requests to etcd followers
|
||||
// who then forward them to the cluster leader. In the event of a leader
|
||||
// failure, however, clients configured this way cannot prioritize among
|
||||
// the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
|
||||
// to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
|
||||
// maintain its knowledge of current cluster state.
|
||||
//
|
||||
// This mode should be used with Client.AutoSync().
|
||||
EndpointSelectionPrioritizeLeader
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// Endpoints defines a set of URLs (schemes, hosts and ports only)
|
||||
// that can be used to communicate with a logical etcd cluster. For
|
||||
|
@ -73,7 +98,7 @@ type Config struct {
|
|||
// CheckRedirect specifies the policy for handling HTTP redirects.
|
||||
// If CheckRedirect is not nil, the Client calls it before
|
||||
// following an HTTP redirect. The sole argument is the number of
|
||||
// requests that have alrady been made. If CheckRedirect returns
|
||||
// requests that have already been made. If CheckRedirect returns
|
||||
// an error, Client.Do will not make any further requests and return
|
||||
// the error back it to the caller.
|
||||
//
|
||||
|
@ -99,11 +124,17 @@ type Config struct {
|
|||
// watch start. But if server is behind some kind of proxy, the response
|
||||
// header may be cached at proxy, and Client cannot rely on this behavior.
|
||||
//
|
||||
// Especially, wait request will ignore this timeout.
|
||||
//
|
||||
// One API call may send multiple requests to different etcd servers until it
|
||||
// succeeds. Use context of the API to specify the overall timeout.
|
||||
//
|
||||
// A HeaderTimeoutPerRequest of zero means no timeout.
|
||||
HeaderTimeoutPerRequest time.Duration
|
||||
|
||||
// SelectionMode is an EndpointSelectionMode enum that specifies the
|
||||
// policy for choosing the etcd cluster node to which requests are sent.
|
||||
SelectionMode EndpointSelectionMode
|
||||
}
|
||||
|
||||
func (cfg *Config) transport() CancelableTransport {
|
||||
|
@ -162,6 +193,11 @@ type Client interface {
|
|||
// this may differ from the initial Endpoints provided in the Config.
|
||||
Endpoints() []string
|
||||
|
||||
// SetEndpoints sets the set of API endpoints used by Client to resolve
|
||||
// HTTP requests. If the given endpoints are not valid, an error will be
|
||||
// returned
|
||||
SetEndpoints(eps []string) error
|
||||
|
||||
httpClient
|
||||
}
|
||||
|
||||
|
@ -169,6 +205,7 @@ func New(cfg Config) (Client, error) {
|
|||
c := &httpClusterClient{
|
||||
clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
|
||||
rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
|
||||
selectionMode: cfg.SelectionMode,
|
||||
}
|
||||
if cfg.Username != "" {
|
||||
c.credentials = &credentials{
|
||||
|
@ -176,7 +213,7 @@ func New(cfg Config) (Client, error) {
|
|||
password: cfg.Password,
|
||||
}
|
||||
}
|
||||
if err := c.reset(cfg.Endpoints); err != nil {
|
||||
if err := c.SetEndpoints(cfg.Endpoints); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
|
@ -216,10 +253,21 @@ type httpClusterClient struct {
|
|||
pinned int
|
||||
credentials *credentials
|
||||
sync.RWMutex
|
||||
rand *rand.Rand
|
||||
rand *rand.Rand
|
||||
selectionMode EndpointSelectionMode
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) reset(eps []string) error {
|
||||
func (c *httpClusterClient) getLeaderEndpoint() (string, error) {
|
||||
mAPI := NewMembersAPI(c)
|
||||
leader, err := mAPI.Leader(context.Background())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
||||
if len(eps) == 0 {
|
||||
return ErrNoEndpoints
|
||||
}
|
||||
|
@ -233,9 +281,28 @@ func (c *httpClusterClient) reset(eps []string) error {
|
|||
neps[i] = *u
|
||||
}
|
||||
|
||||
c.endpoints = shuffleEndpoints(c.rand, neps)
|
||||
// TODO: pin old endpoint if possible, and rebalance when new endpoint appears
|
||||
c.pinned = 0
|
||||
switch c.selectionMode {
|
||||
case EndpointSelectionRandom:
|
||||
c.endpoints = shuffleEndpoints(c.rand, neps)
|
||||
c.pinned = 0
|
||||
case EndpointSelectionPrioritizeLeader:
|
||||
c.endpoints = neps
|
||||
lep, err := c.getLeaderEndpoint()
|
||||
if err != nil {
|
||||
return ErrNoLeaderEndpoint
|
||||
}
|
||||
|
||||
for i := range c.endpoints {
|
||||
if c.endpoints[i].String() == lep {
|
||||
c.pinned = i
|
||||
break
|
||||
}
|
||||
}
|
||||
// If endpoints doesn't have the lu, just keep c.pinned = 0.
|
||||
// Forwarding between follower and leader would be required but it works.
|
||||
default:
|
||||
return errors.New(fmt.Sprintf("invalid endpoint selection mode: %d", c.selectionMode))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -275,7 +342,9 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
|||
resp, body, err = hc.Do(ctx, action)
|
||||
if err != nil {
|
||||
cerr.Errors = append(cerr.Errors, err)
|
||||
// mask previous errors with context error, which is controlled by user
|
||||
if err == ctx.Err() {
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -341,7 +410,7 @@ func (c *httpClusterClient) Sync(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
return c.reset(eps)
|
||||
return c.SetEndpoints(eps)
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
|
||||
|
@ -378,9 +447,24 @@ func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Respon
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
hctx, hcancel := context.WithCancel(ctx)
|
||||
if c.headerTimeout > 0 {
|
||||
isWait := false
|
||||
if req != nil && req.URL != nil {
|
||||
ws := req.URL.Query().Get("wait")
|
||||
if len(ws) != 0 {
|
||||
var err error
|
||||
isWait, err = strconv.ParseBool(ws)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var hctx context.Context
|
||||
var hcancel context.CancelFunc
|
||||
if !isWait && c.headerTimeout > 0 {
|
||||
hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
|
||||
} else {
|
||||
hctx, hcancel = context.WithCancel(ctx)
|
||||
}
|
||||
defer hcancel()
|
||||
|
||||
|
|
|
@ -16,6 +16,6 @@ package client
|
|||
|
||||
// Discoverer is an interface that wraps the Discover method.
|
||||
type Discoverer interface {
|
||||
// Dicover looks up the etcd servers for the domain.
|
||||
// Discover looks up the etcd servers for the domain.
|
||||
Discover(domain string) ([]string, error)
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
27
vendor/src/github.com/coreos/etcd/client/keys.go
vendored
27
vendor/src/github.com/coreos/etcd/client/keys.go
vendored
|
@ -14,7 +14,7 @@
|
|||
|
||||
package client
|
||||
|
||||
//go:generate codecgen -r "Node|Response" -o keys.generated.go keys.go
|
||||
//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@ -106,7 +106,7 @@ type KeysAPI interface {
|
|||
|
||||
// Set assigns a new value to a Node identified by a given key. The caller
|
||||
// may define a set of conditions in the SetOptions. If SetOptions.Dir=true
|
||||
// than value is ignored.
|
||||
// then value is ignored.
|
||||
Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
|
||||
|
||||
// Delete removes a Node identified by the given key, optionally destroying
|
||||
|
@ -184,6 +184,11 @@ type SetOptions struct {
|
|||
// a TTL of 0.
|
||||
TTL time.Duration
|
||||
|
||||
// Refresh set to true means a TTL value can be updated
|
||||
// without firing a watch or changing the node value. A
|
||||
// value must not be provided when refreshing a key.
|
||||
Refresh bool
|
||||
|
||||
// Dir specifies whether or not this Node should be created as a directory.
|
||||
Dir bool
|
||||
}
|
||||
|
@ -234,7 +239,7 @@ type DeleteOptions struct {
|
|||
|
||||
type Watcher interface {
|
||||
// Next blocks until an etcd event occurs, then returns a Response
|
||||
// represeting that event. The behavior of Next depends on the
|
||||
// representing that event. The behavior of Next depends on the
|
||||
// WatcherOptions used to construct the Watcher. Next is designed to
|
||||
// be called repeatedly, each time blocking until a subsequent event
|
||||
// is available.
|
||||
|
@ -279,7 +284,7 @@ type Node struct {
|
|||
// Nodes holds the children of this Node, only if this Node is a directory.
|
||||
// This slice of will be arbitrarily deep (children, grandchildren, great-
|
||||
// grandchildren, etc.) if a recursive Get or Watch request were made.
|
||||
Nodes []*Node `json:"nodes"`
|
||||
Nodes Nodes `json:"nodes"`
|
||||
|
||||
// CreatedIndex is the etcd index at-which this Node was created.
|
||||
CreatedIndex uint64 `json:"createdIndex"`
|
||||
|
@ -303,6 +308,14 @@ func (n *Node) TTLDuration() time.Duration {
|
|||
return time.Duration(n.TTL) * time.Second
|
||||
}
|
||||
|
||||
type Nodes []*Node
|
||||
|
||||
// interfaces for sorting
|
||||
|
||||
func (ns Nodes) Len() int { return len(ns) }
|
||||
func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
|
||||
func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
|
||||
|
||||
type httpKeysAPI struct {
|
||||
client httpClient
|
||||
prefix string
|
||||
|
@ -320,6 +333,7 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions
|
|||
act.PrevIndex = opts.PrevIndex
|
||||
act.PrevExist = opts.PrevExist
|
||||
act.TTL = opts.TTL
|
||||
act.Refresh = opts.Refresh
|
||||
act.Dir = opts.Dir
|
||||
}
|
||||
|
||||
|
@ -511,6 +525,7 @@ type setAction struct {
|
|||
PrevIndex uint64
|
||||
PrevExist PrevExistType
|
||||
TTL time.Duration
|
||||
Refresh bool
|
||||
Dir bool
|
||||
}
|
||||
|
||||
|
@ -542,6 +557,10 @@ func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
|||
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
|
||||
}
|
||||
|
||||
if a.Refresh {
|
||||
form.Add("refresh", "true")
|
||||
}
|
||||
|
||||
u.RawQuery = params.Encode()
|
||||
body := strings.NewReader(form.Encode())
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
|
||||
var (
|
||||
defaultV2MembersPrefix = "/v2/members"
|
||||
defaultLeaderSuffix = "/leader"
|
||||
)
|
||||
|
||||
type Member struct {
|
||||
|
@ -105,6 +106,9 @@ type MembersAPI interface {
|
|||
|
||||
// Update instructs etcd to update an existing Member in the cluster.
|
||||
Update(ctx context.Context, mID string, peerURLs []string) error
|
||||
|
||||
// Leader gets current leader of the cluster
|
||||
Leader(ctx context.Context) (*Member, error)
|
||||
}
|
||||
|
||||
type httpMembersAPI struct {
|
||||
|
@ -199,6 +203,25 @@ func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
|
|||
return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
|
||||
}
|
||||
|
||||
func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
|
||||
req := &membersAPIActionLeader{}
|
||||
resp, body, err := m.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var leader Member
|
||||
if err := json.Unmarshal(body, &leader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &leader, nil
|
||||
}
|
||||
|
||||
type membersAPIActionList struct{}
|
||||
|
||||
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
|
||||
|
@ -255,6 +278,15 @@ func assertStatusCode(got int, want ...int) (err error) {
|
|||
return fmt.Errorf("unexpected status code %d", got)
|
||||
}
|
||||
|
||||
type membersAPIActionLeader struct{}
|
||||
|
||||
func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2MembersURL(ep)
|
||||
u.Path = path.Join(u.Path, defaultLeaderSuffix)
|
||||
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||
return req
|
||||
}
|
||||
|
||||
// v2MembersURL add the necessary path to the provided endpoint
|
||||
// to route requests to the default v2 members API.
|
||||
func v2MembersURL(ep url.URL) *url.URL {
|
||||
|
|
|
@ -27,7 +27,7 @@ var (
|
|||
|
||||
type srvDiscover struct{}
|
||||
|
||||
// NewSRVDiscover constructs a new Dicoverer that uses the stdlib to lookup SRV records.
|
||||
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
|
||||
func NewSRVDiscover() Discoverer {
|
||||
return &srvDiscover{}
|
||||
}
|
||||
|
@ -50,8 +50,8 @@ func (d *srvDiscover) Discover(domain string) ([]string, error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
errHTTPS := updateURLs("etcd-server-ssl", "https")
|
||||
errHTTP := updateURLs("etcd-server", "http")
|
||||
errHTTPS := updateURLs("etcd-client-ssl", "https")
|
||||
errHTTP := updateURLs("etcd-client", "http")
|
||||
|
||||
if errHTTPS != nil && errHTTP != nil {
|
||||
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
|
||||
|
|
23
vendor/src/github.com/coreos/etcd/client/util.go
vendored
Normal file
23
vendor/src/github.com/coreos/etcd/client/util.go
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
|
||||
func IsKeyNotFound(err error) bool {
|
||||
if cErr, ok := err.(Error); ok {
|
||||
return cErr.Code == ErrorCodeKeyNotFound
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -2,6 +2,8 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package pathutil implements utility functions for handling slash-separated
|
||||
// paths.
|
||||
package pathutil
|
||||
|
||||
import "path"
|
||||
|
|
17
vendor/src/github.com/coreos/etcd/pkg/types/doc.go
vendored
Normal file
17
vendor/src/github.com/coreos/etcd/pkg/types/doc.go
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package types declares various data types and implements type-checking
|
||||
// functions.
|
||||
package types
|
|
@ -16,26 +16,21 @@ package types
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// URLsMap is a map from a name to its URLs.
|
||||
type URLsMap map[string]URLs
|
||||
|
||||
// NewURLsMap returns a URLsMap instantiated from the given string,
|
||||
// which consists of discovery-formatted names-to-URLs, like:
|
||||
// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
|
||||
func NewURLsMap(s string) (URLsMap, error) {
|
||||
m := parse(s)
|
||||
|
||||
cl := URLsMap{}
|
||||
v, err := url.ParseQuery(strings.Replace(s, ",", "&", -1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for name, urls := range v {
|
||||
if len(urls) == 0 || urls[0] == "" {
|
||||
return nil, fmt.Errorf("empty URL given for %q", name)
|
||||
}
|
||||
for name, urls := range m {
|
||||
us, err := NewURLs(urls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -45,9 +40,9 @@ func NewURLsMap(s string) (URLsMap, error) {
|
|||
return cl, nil
|
||||
}
|
||||
|
||||
// String returns NameURLPairs into discovery-formatted name-to-URLs sorted by name.
|
||||
// String turns URLsMap into discovery-formatted name-to-URLs sorted by name.
|
||||
func (c URLsMap) String() string {
|
||||
pairs := make([]string, 0)
|
||||
var pairs []string
|
||||
for name, urls := range c {
|
||||
for _, url := range urls {
|
||||
pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String()))
|
||||
|
@ -60,7 +55,7 @@ func (c URLsMap) String() string {
|
|||
// URLs returns a list of all URLs.
|
||||
// The returned list is sorted in ascending lexicographical order.
|
||||
func (c URLsMap) URLs() []string {
|
||||
urls := make([]string, 0)
|
||||
var urls []string
|
||||
for _, us := range c {
|
||||
for _, u := range us {
|
||||
urls = append(urls, u.String())
|
||||
|
@ -70,6 +65,29 @@ func (c URLsMap) URLs() []string {
|
|||
return urls
|
||||
}
|
||||
|
||||
// Len returns the size of URLsMap.
|
||||
func (c URLsMap) Len() int {
|
||||
return len(c)
|
||||
}
|
||||
|
||||
// parse parses the given string and returns a map listing the values specified for each key.
|
||||
func parse(s string) map[string][]string {
|
||||
m := make(map[string][]string)
|
||||
for s != "" {
|
||||
key := s
|
||||
if i := strings.IndexAny(key, ","); i >= 0 {
|
||||
key, s = key[:i], key[i+1:]
|
||||
} else {
|
||||
s = ""
|
||||
}
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
value := ""
|
||||
if i := strings.Index(key, "="); i >= 0 {
|
||||
key, value = key[:i], key[i+1:]
|
||||
}
|
||||
m[key] = append(m[key], value)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
|
45
vendor/src/github.com/ugorji/go/codec/0doc.go
vendored
45
vendor/src/github.com/ugorji/go/codec/0doc.go
vendored
|
@ -98,7 +98,21 @@ with the standard net/rpc package.
|
|||
|
||||
Usage
|
||||
|
||||
Typical usage model:
|
||||
The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
|
||||
|
||||
The Encoder and Decoder are NOT safe for concurrent use.
|
||||
|
||||
Consequently, the usage model is basically:
|
||||
|
||||
- Create and initialize the Handle before any use.
|
||||
Once created, DO NOT modify it.
|
||||
- Multiple Encoders or Decoders can now use the Handle concurrently.
|
||||
They only read information off the Handle (never write).
|
||||
- However, each Encoder or Decoder MUST not be used concurrently
|
||||
- To re-use an Encoder/Decoder, call Reset(...) on it first.
|
||||
This allows you use state maintained on the Encoder/Decoder.
|
||||
|
||||
Sample usage model:
|
||||
|
||||
// create and configure Handle
|
||||
var (
|
||||
|
@ -148,3 +162,32 @@ Typical usage model:
|
|||
*/
|
||||
package codec
|
||||
|
||||
// Benefits of go-codec:
|
||||
//
|
||||
// - encoding/json always reads whole file into memory first.
|
||||
// This makes it unsuitable for parsing very large files.
|
||||
// - encoding/xml cannot parse into a map[string]interface{}
|
||||
// I found this out on reading https://github.com/clbanning/mxj
|
||||
|
||||
// TODO:
|
||||
//
|
||||
// - (En|De)coder should store an error when it occurs.
|
||||
// Until reset, subsequent calls return that error that was stored.
|
||||
// This means that free panics must go away.
|
||||
// All errors must be raised through errorf method.
|
||||
// - Decoding using a chan is good, but incurs concurrency costs.
|
||||
// This is because there's no fast way to use a channel without it
|
||||
// having to switch goroutines constantly.
|
||||
// Callback pattern is still the best. Maybe cnsider supporting something like:
|
||||
// type X struct {
|
||||
// Name string
|
||||
// Ys []Y
|
||||
// Ys chan <- Y
|
||||
// Ys func(interface{}) -> call this interface for each entry in there.
|
||||
// }
|
||||
// - Consider adding a isZeroer interface { isZero() bool }
|
||||
// It is used within isEmpty, for omitEmpty support.
|
||||
// - Consider making Handle used AS-IS within the encoding/decoding session.
|
||||
// This means that we don't cache Handle information within the (En|De)coder,
|
||||
// except we really need it at Reset(...)
|
||||
// - Handle recursive types during encoding/decoding?
|
||||
|
|
151
vendor/src/github.com/ugorji/go/codec/binc.go
vendored
151
vendor/src/github.com/ugorji/go/codec/binc.go
vendored
|
@ -5,6 +5,7 @@ package codec
|
|||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -58,8 +59,8 @@ type bincEncDriver struct {
|
|||
e *Encoder
|
||||
w encWriter
|
||||
m map[string]uint16 // symbols
|
||||
s uint16 // symbols sequencer
|
||||
b [scratchByteArrayLen]byte
|
||||
s uint16 // symbols sequencer
|
||||
encNoSeparator
|
||||
}
|
||||
|
||||
|
@ -69,7 +70,15 @@ func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool {
|
|||
|
||||
func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {
|
||||
if rt == timeTypId {
|
||||
bs := encodeTime(v.(time.Time))
|
||||
var bs []byte
|
||||
switch x := v.(type) {
|
||||
case time.Time:
|
||||
bs = encodeTime(x)
|
||||
case *time.Time:
|
||||
bs = encodeTime(*x)
|
||||
default:
|
||||
e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v)
|
||||
}
|
||||
e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
|
||||
e.w.writeb(bs)
|
||||
}
|
||||
|
@ -309,9 +318,9 @@ func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
|
|||
//------------------------------------
|
||||
|
||||
type bincDecSymbol struct {
|
||||
i uint16
|
||||
s string
|
||||
b []byte
|
||||
i uint16
|
||||
}
|
||||
|
||||
type bincDecDriver struct {
|
||||
|
@ -320,7 +329,6 @@ type bincDecDriver struct {
|
|||
r decReader
|
||||
br bool // bytes reader
|
||||
bdRead bool
|
||||
bdType valueType
|
||||
bd byte
|
||||
vd byte
|
||||
vs byte
|
||||
|
@ -338,24 +346,23 @@ func (d *bincDecDriver) readNextBd() {
|
|||
d.vd = d.bd >> 4
|
||||
d.vs = d.bd & 0x0f
|
||||
d.bdRead = true
|
||||
d.bdType = valueTypeUnset
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) IsContainerType(vt valueType) (b bool) {
|
||||
switch vt {
|
||||
case valueTypeNil:
|
||||
return d.vd == bincVdSpecial && d.vs == bincSpNil
|
||||
case valueTypeBytes:
|
||||
return d.vd == bincVdByteArray
|
||||
case valueTypeString:
|
||||
return d.vd == bincVdString
|
||||
case valueTypeArray:
|
||||
return d.vd == bincVdArray
|
||||
case valueTypeMap:
|
||||
return d.vd == bincVdMap
|
||||
func (d *bincDecDriver) ContainerType() (vt valueType) {
|
||||
if d.vd == bincVdSpecial && d.vs == bincSpNil {
|
||||
return valueTypeNil
|
||||
} else if d.vd == bincVdByteArray {
|
||||
return valueTypeBytes
|
||||
} else if d.vd == bincVdString {
|
||||
return valueTypeString
|
||||
} else if d.vd == bincVdArray {
|
||||
return valueTypeArray
|
||||
} else if d.vd == bincVdMap {
|
||||
return valueTypeMap
|
||||
} else {
|
||||
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||
}
|
||||
d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||
return // "unreachable"
|
||||
return valueTypeUnset
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) TryDecodeAsNil() bool {
|
||||
|
@ -686,7 +693,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
|
|||
if withString {
|
||||
s = string(bs2)
|
||||
}
|
||||
d.s = append(d.s, bincDecSymbol{symbol, s, bs2})
|
||||
d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2})
|
||||
}
|
||||
default:
|
||||
d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x",
|
||||
|
@ -775,97 +782,95 @@ func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []b
|
|||
return
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
|
||||
func (d *bincDecDriver) DecodeNaked() {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
|
||||
n := &d.d.n
|
||||
var decodeFurther bool
|
||||
|
||||
switch d.vd {
|
||||
case bincVdSpecial:
|
||||
switch d.vs {
|
||||
case bincSpNil:
|
||||
vt = valueTypeNil
|
||||
n.v = valueTypeNil
|
||||
case bincSpFalse:
|
||||
vt = valueTypeBool
|
||||
v = false
|
||||
n.v = valueTypeBool
|
||||
n.b = false
|
||||
case bincSpTrue:
|
||||
vt = valueTypeBool
|
||||
v = true
|
||||
n.v = valueTypeBool
|
||||
n.b = true
|
||||
case bincSpNan:
|
||||
vt = valueTypeFloat
|
||||
v = math.NaN()
|
||||
n.v = valueTypeFloat
|
||||
n.f = math.NaN()
|
||||
case bincSpPosInf:
|
||||
vt = valueTypeFloat
|
||||
v = math.Inf(1)
|
||||
n.v = valueTypeFloat
|
||||
n.f = math.Inf(1)
|
||||
case bincSpNegInf:
|
||||
vt = valueTypeFloat
|
||||
v = math.Inf(-1)
|
||||
n.v = valueTypeFloat
|
||||
n.f = math.Inf(-1)
|
||||
case bincSpZeroFloat:
|
||||
vt = valueTypeFloat
|
||||
v = float64(0)
|
||||
n.v = valueTypeFloat
|
||||
n.f = float64(0)
|
||||
case bincSpZero:
|
||||
vt = valueTypeUint
|
||||
v = uint64(0) // int8(0)
|
||||
n.v = valueTypeUint
|
||||
n.u = uint64(0) // int8(0)
|
||||
case bincSpNegOne:
|
||||
vt = valueTypeInt
|
||||
v = int64(-1) // int8(-1)
|
||||
n.v = valueTypeInt
|
||||
n.i = int64(-1) // int8(-1)
|
||||
default:
|
||||
d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs)
|
||||
return
|
||||
}
|
||||
case bincVdSmallInt:
|
||||
vt = valueTypeUint
|
||||
v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
|
||||
n.v = valueTypeUint
|
||||
n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
|
||||
case bincVdPosInt:
|
||||
vt = valueTypeUint
|
||||
v = d.decUint()
|
||||
n.v = valueTypeUint
|
||||
n.u = d.decUint()
|
||||
case bincVdNegInt:
|
||||
vt = valueTypeInt
|
||||
v = -(int64(d.decUint()))
|
||||
n.v = valueTypeInt
|
||||
n.i = -(int64(d.decUint()))
|
||||
case bincVdFloat:
|
||||
vt = valueTypeFloat
|
||||
v = d.decFloat()
|
||||
n.v = valueTypeFloat
|
||||
n.f = d.decFloat()
|
||||
case bincVdSymbol:
|
||||
vt = valueTypeSymbol
|
||||
v = d.DecodeString()
|
||||
n.v = valueTypeSymbol
|
||||
n.s = d.DecodeString()
|
||||
case bincVdString:
|
||||
vt = valueTypeString
|
||||
v = d.DecodeString()
|
||||
n.v = valueTypeString
|
||||
n.s = d.DecodeString()
|
||||
case bincVdByteArray:
|
||||
vt = valueTypeBytes
|
||||
v = d.DecodeBytes(nil, false, false)
|
||||
n.v = valueTypeBytes
|
||||
n.l = d.DecodeBytes(nil, false, false)
|
||||
case bincVdTimestamp:
|
||||
vt = valueTypeTimestamp
|
||||
n.v = valueTypeTimestamp
|
||||
tt, err := decodeTime(d.r.readx(int(d.vs)))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
v = tt
|
||||
n.t = tt
|
||||
case bincVdCustomExt:
|
||||
vt = valueTypeExt
|
||||
n.v = valueTypeExt
|
||||
l := d.decLen()
|
||||
var re RawExt
|
||||
re.Tag = uint64(d.r.readn1())
|
||||
re.Data = d.r.readx(l)
|
||||
v = &re
|
||||
vt = valueTypeExt
|
||||
n.u = uint64(d.r.readn1())
|
||||
n.l = d.r.readx(l)
|
||||
case bincVdArray:
|
||||
vt = valueTypeArray
|
||||
n.v = valueTypeArray
|
||||
decodeFurther = true
|
||||
case bincVdMap:
|
||||
vt = valueTypeMap
|
||||
n.v = valueTypeMap
|
||||
decodeFurther = true
|
||||
default:
|
||||
d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd)
|
||||
return
|
||||
}
|
||||
|
||||
if !decodeFurther {
|
||||
d.bdRead = false
|
||||
}
|
||||
if vt == valueTypeUint && d.h.SignedInteger {
|
||||
d.bdType = valueTypeInt
|
||||
v = int64(v.(uint64))
|
||||
if n.v == valueTypeUint && d.h.SignedInteger {
|
||||
n.v = valueTypeInt
|
||||
n.i = int64(n.u)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -889,6 +894,10 @@ type BincHandle struct {
|
|||
binaryEncodingType
|
||||
}
|
||||
|
||||
func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
|
||||
return h.SetExt(rt, tag, &setExtWrapper{b: ext})
|
||||
}
|
||||
|
||||
func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
|
||||
return &bincEncDriver{e: e, w: e.w}
|
||||
}
|
||||
|
@ -897,5 +906,13 @@ func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
|
|||
return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes}
|
||||
}
|
||||
|
||||
func (e *bincEncDriver) reset() {
|
||||
e.w = e.e.w
|
||||
}
|
||||
|
||||
func (d *bincDecDriver) reset() {
|
||||
d.r = d.d.r
|
||||
}
|
||||
|
||||
var _ decDriver = (*bincDecDriver)(nil)
|
||||
var _ encDriver = (*bincEncDriver)(nil)
|
||||
|
|
130
vendor/src/github.com/ugorji/go/codec/cbor.go
vendored
130
vendor/src/github.com/ugorji/go/codec/cbor.go
vendored
|
@ -3,7 +3,10 @@
|
|||
|
||||
package codec
|
||||
|
||||
import "math"
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
cborMajorUint byte = iota
|
||||
|
@ -57,11 +60,11 @@ const (
|
|||
// -------------------
|
||||
|
||||
type cborEncDriver struct {
|
||||
noBuiltInTypes
|
||||
encNoSeparator
|
||||
e *Encoder
|
||||
w encWriter
|
||||
h *CborHandle
|
||||
noBuiltInTypes
|
||||
encNoSeparator
|
||||
x [8]byte
|
||||
}
|
||||
|
||||
|
@ -158,7 +161,11 @@ func (e *cborEncDriver) EncodeSymbol(v string) {
|
|||
}
|
||||
|
||||
func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
|
||||
e.encLen(cborBaseBytes, len(v))
|
||||
if c == c_RAW {
|
||||
e.encLen(cborBaseBytes, len(v))
|
||||
} else {
|
||||
e.encLen(cborBaseString, len(v))
|
||||
}
|
||||
e.w.writeb(v)
|
||||
}
|
||||
|
||||
|
@ -168,11 +175,10 @@ type cborDecDriver struct {
|
|||
d *Decoder
|
||||
h *CborHandle
|
||||
r decReader
|
||||
b [scratchByteArrayLen]byte
|
||||
br bool // bytes reader
|
||||
bdRead bool
|
||||
bdType valueType
|
||||
bd byte
|
||||
b [scratchByteArrayLen]byte
|
||||
noBuiltInTypes
|
||||
decNoSeparator
|
||||
}
|
||||
|
@ -180,24 +186,23 @@ type cborDecDriver struct {
|
|||
func (d *cborDecDriver) readNextBd() {
|
||||
d.bd = d.r.readn1()
|
||||
d.bdRead = true
|
||||
d.bdType = valueTypeUnset
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) IsContainerType(vt valueType) (bv bool) {
|
||||
switch vt {
|
||||
case valueTypeNil:
|
||||
return d.bd == cborBdNil
|
||||
case valueTypeBytes:
|
||||
return d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString)
|
||||
case valueTypeString:
|
||||
return d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray)
|
||||
case valueTypeArray:
|
||||
return d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap)
|
||||
case valueTypeMap:
|
||||
return d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag)
|
||||
func (d *cborDecDriver) ContainerType() (vt valueType) {
|
||||
if d.bd == cborBdNil {
|
||||
return valueTypeNil
|
||||
} else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
|
||||
return valueTypeBytes
|
||||
} else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) {
|
||||
return valueTypeString
|
||||
} else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
|
||||
return valueTypeArray
|
||||
} else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
|
||||
return valueTypeMap
|
||||
} else {
|
||||
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||
}
|
||||
d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||
return // "unreachable"
|
||||
return valueTypeUnset
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) TryDecodeAsNil() bool {
|
||||
|
@ -439,71 +444,72 @@ func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxta
|
|||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
|
||||
func (d *cborDecDriver) DecodeNaked() {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
|
||||
n := &d.d.n
|
||||
var decodeFurther bool
|
||||
|
||||
switch d.bd {
|
||||
case cborBdNil:
|
||||
vt = valueTypeNil
|
||||
n.v = valueTypeNil
|
||||
case cborBdFalse:
|
||||
vt = valueTypeBool
|
||||
v = false
|
||||
n.v = valueTypeBool
|
||||
n.b = false
|
||||
case cborBdTrue:
|
||||
vt = valueTypeBool
|
||||
v = true
|
||||
n.v = valueTypeBool
|
||||
n.b = true
|
||||
case cborBdFloat16, cborBdFloat32:
|
||||
vt = valueTypeFloat
|
||||
v = d.DecodeFloat(true)
|
||||
n.v = valueTypeFloat
|
||||
n.f = d.DecodeFloat(true)
|
||||
case cborBdFloat64:
|
||||
vt = valueTypeFloat
|
||||
v = d.DecodeFloat(false)
|
||||
n.v = valueTypeFloat
|
||||
n.f = d.DecodeFloat(false)
|
||||
case cborBdIndefiniteBytes:
|
||||
vt = valueTypeBytes
|
||||
v = d.DecodeBytes(nil, false, false)
|
||||
n.v = valueTypeBytes
|
||||
n.l = d.DecodeBytes(nil, false, false)
|
||||
case cborBdIndefiniteString:
|
||||
vt = valueTypeString
|
||||
v = d.DecodeString()
|
||||
n.v = valueTypeString
|
||||
n.s = d.DecodeString()
|
||||
case cborBdIndefiniteArray:
|
||||
vt = valueTypeArray
|
||||
n.v = valueTypeArray
|
||||
decodeFurther = true
|
||||
case cborBdIndefiniteMap:
|
||||
vt = valueTypeMap
|
||||
n.v = valueTypeMap
|
||||
decodeFurther = true
|
||||
default:
|
||||
switch {
|
||||
case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
|
||||
if d.h.SignedInteger {
|
||||
vt = valueTypeInt
|
||||
v = d.DecodeInt(64)
|
||||
n.v = valueTypeInt
|
||||
n.i = d.DecodeInt(64)
|
||||
} else {
|
||||
vt = valueTypeUint
|
||||
v = d.DecodeUint(64)
|
||||
n.v = valueTypeUint
|
||||
n.u = d.DecodeUint(64)
|
||||
}
|
||||
case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
|
||||
vt = valueTypeInt
|
||||
v = d.DecodeInt(64)
|
||||
n.v = valueTypeInt
|
||||
n.i = d.DecodeInt(64)
|
||||
case d.bd >= cborBaseBytes && d.bd < cborBaseString:
|
||||
vt = valueTypeBytes
|
||||
v = d.DecodeBytes(nil, false, false)
|
||||
n.v = valueTypeBytes
|
||||
n.l = d.DecodeBytes(nil, false, false)
|
||||
case d.bd >= cborBaseString && d.bd < cborBaseArray:
|
||||
vt = valueTypeString
|
||||
v = d.DecodeString()
|
||||
n.v = valueTypeString
|
||||
n.s = d.DecodeString()
|
||||
case d.bd >= cborBaseArray && d.bd < cborBaseMap:
|
||||
vt = valueTypeArray
|
||||
n.v = valueTypeArray
|
||||
decodeFurther = true
|
||||
case d.bd >= cborBaseMap && d.bd < cborBaseTag:
|
||||
vt = valueTypeMap
|
||||
n.v = valueTypeMap
|
||||
decodeFurther = true
|
||||
case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
|
||||
vt = valueTypeExt
|
||||
var re RawExt
|
||||
ui := d.decUint()
|
||||
n.v = valueTypeExt
|
||||
n.u = d.decUint()
|
||||
n.l = nil
|
||||
d.bdRead = false
|
||||
re.Tag = ui
|
||||
d.d.decode(&re.Value)
|
||||
v = &re
|
||||
// d.d.decode(&re.Value) // handled by decode itself.
|
||||
// decodeFurther = true
|
||||
default:
|
||||
d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
|
||||
|
@ -550,8 +556,12 @@ func (d *cborDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurthe
|
|||
// // Now, vv contains the same string "one-byte"
|
||||
//
|
||||
type CborHandle struct {
|
||||
BasicHandle
|
||||
binaryEncodingType
|
||||
BasicHandle
|
||||
}
|
||||
|
||||
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
||||
return h.SetExt(rt, tag, &setExtWrapper{i: ext})
|
||||
}
|
||||
|
||||
func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
|
||||
|
@ -562,5 +572,13 @@ func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
|
|||
return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver) reset() {
|
||||
e.w = e.e.w
|
||||
}
|
||||
|
||||
func (d *cborDecDriver) reset() {
|
||||
d.r = d.d.r
|
||||
}
|
||||
|
||||
var _ decDriver = (*cborDecDriver)(nil)
|
||||
var _ encDriver = (*cborEncDriver)(nil)
|
||||
|
|
1375
vendor/src/github.com/ugorji/go/codec/decode.go
vendored
1375
vendor/src/github.com/ugorji/go/codec/decode.go
vendored
File diff suppressed because it is too large
Load diff
945
vendor/src/github.com/ugorji/go/codec/encode.go
vendored
945
vendor/src/github.com/ugorji/go/codec/encode.go
vendored
File diff suppressed because it is too large
Load diff
24555
vendor/src/github.com/ugorji/go/codec/fast-path.generated.go
vendored
24555
vendor/src/github.com/ugorji/go/codec/fast-path.generated.go
vendored
File diff suppressed because it is too large
Load diff
|
@ -1,4 +1,4 @@
|
|||
// //+build ignore
|
||||
// +build !notfastpath
|
||||
|
||||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
@ -48,8 +48,8 @@ var fastpathTV fastpathT
|
|||
type fastpathE struct {
|
||||
rtid uintptr
|
||||
rt reflect.Type
|
||||
encfn func(encFnInfo, reflect.Value)
|
||||
decfn func(decFnInfo, reflect.Value)
|
||||
encfn func(*encFnInfo, reflect.Value)
|
||||
decfn func(*decFnInfo, reflect.Value)
|
||||
}
|
||||
|
||||
type fastpathA [{{ .FastpathLen }}]fastpathE
|
||||
|
@ -85,7 +85,7 @@ func init() {
|
|||
return
|
||||
}
|
||||
i := 0
|
||||
fn := func(v interface{}, fe func(encFnInfo, reflect.Value), fd func(decFnInfo, reflect.Value)) (f fastpathE) {
|
||||
fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
|
||||
xrt := reflect.TypeOf(v)
|
||||
xptr := reflect.ValueOf(xrt).Pointer()
|
||||
fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
|
||||
|
@ -93,11 +93,11 @@ func init() {
|
|||
return
|
||||
}
|
||||
|
||||
{{range .Values}}{{if not .Primitive}}{{if .Slice }}
|
||||
fn([]{{ .Elem }}(nil), (encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
|
||||
fn([]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
|
||||
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .Slice }}
|
||||
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
|
||||
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
|
||||
|
||||
sort.Sort(fastpathAslice(fastpathAV[:]))
|
||||
}
|
||||
|
@ -106,118 +106,149 @@ func init() {
|
|||
|
||||
// -- -- fast path type switch
|
||||
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
|
||||
if !fastpathEnabled {
|
||||
return false
|
||||
}
|
||||
switch v := iv.(type) {
|
||||
{{range .Values}}{{if not .Primitive}}{{if .Slice }}
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
|
||||
case []{{ .Elem }}:{{else}}
|
||||
case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
|
||||
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if .Slice }}
|
||||
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if not .MapKey }}
|
||||
case *[]{{ .Elem }}:{{else}}
|
||||
case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
|
||||
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
|
||||
{{end}}{{end}}
|
||||
default:
|
||||
_ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
|
||||
if !fastpathEnabled {
|
||||
return false
|
||||
}
|
||||
switch v := iv.(type) {
|
||||
{{range .Values}}{{if not .Primitive}}{{if .Slice }}
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
|
||||
case []{{ .Elem }}:
|
||||
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e)
|
||||
case *[]{{ .Elem }}:
|
||||
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
|
||||
{{end}}{{end}}{{end}}
|
||||
default:
|
||||
_ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
|
||||
if !fastpathEnabled {
|
||||
return false
|
||||
}
|
||||
switch v := iv.(type) {
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .Slice }}
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
|
||||
case map[{{ .MapKey }}]{{ .Elem }}:
|
||||
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e)
|
||||
case *map[{{ .MapKey }}]{{ .Elem }}:
|
||||
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
|
||||
{{end}}{{end}}{{end}}
|
||||
default:
|
||||
_ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// -- -- fast path functions
|
||||
{{range .Values}}{{if not .Primitive}}{{if .Slice }}
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
|
||||
|
||||
func (f encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
|
||||
func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
|
||||
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e)
|
||||
}
|
||||
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) {
|
||||
ee := e.e
|
||||
ee := e.e
|
||||
cr := e.cr
|
||||
if checkNil && v == nil {
|
||||
ee.EncodeNil()
|
||||
return
|
||||
}
|
||||
ee.EncodeArrayStart(len(v))
|
||||
if e.be {
|
||||
for _, v2 := range v {
|
||||
{{ encmd .Elem "v2"}}
|
||||
}
|
||||
} else {
|
||||
for j, v2 := range v {
|
||||
if j > 0 {
|
||||
ee.EncodeArrayEntrySeparator()
|
||||
}
|
||||
{{ encmd .Elem "v2"}}
|
||||
}
|
||||
ee.EncodeArrayEnd()
|
||||
for _, v2 := range v {
|
||||
if cr != nil { cr.sendContainerState(containerArrayElem) }
|
||||
{{ encmd .Elem "v2"}}
|
||||
}
|
||||
if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}}
|
||||
}
|
||||
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .Slice }}
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
|
||||
|
||||
func (f encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
|
||||
func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
|
||||
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e)
|
||||
}
|
||||
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) {
|
||||
ee := e.e
|
||||
cr := e.cr
|
||||
if checkNil && v == nil {
|
||||
ee.EncodeNil()
|
||||
return
|
||||
}
|
||||
ee.EncodeMapStart(len(v))
|
||||
{{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0{{end}}
|
||||
if e.be {
|
||||
for k2, v2 := range v {
|
||||
{{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
|
||||
{{end}}if e.h.Canonical {
|
||||
{{if eq .MapKey "interface{}"}}{{/* out of band
|
||||
*/}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
|
||||
e2 := NewEncoderBytes(&mksv, e.hh)
|
||||
v2 := make([]bytesI, len(v))
|
||||
var i, l int
|
||||
var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}}
|
||||
for k2, _ := range v {
|
||||
l = len(mksv)
|
||||
e2.MustEncode(k2)
|
||||
vp = &v2[i]
|
||||
vp.v = mksv[l:]
|
||||
vp.i = k2
|
||||
i++
|
||||
}
|
||||
sort.Sort(bytesISlice(v2))
|
||||
for j := range v2 {
|
||||
if cr != nil { cr.sendContainerState(containerMapKey) }
|
||||
e.asis(v2[j].v)
|
||||
if cr != nil { cr.sendContainerState(containerMapValue) }
|
||||
e.encode(v[v2[j].i])
|
||||
} {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
|
||||
var i int
|
||||
for k, _ := range v {
|
||||
v2[i] = {{ $x }}(k)
|
||||
i++
|
||||
}
|
||||
sort.Sort({{ sorttype .MapKey false}}(v2))
|
||||
for _, k2 := range v2 {
|
||||
if cr != nil { cr.sendContainerState(containerMapKey) }
|
||||
{{if eq .MapKey "string"}}if asSymbols {
|
||||
ee.EncodeSymbol(k2)
|
||||
} else {
|
||||
ee.EncodeString(c_UTF8, k2)
|
||||
}{{else}}{{ encmd .MapKey "k2"}}{{end}}
|
||||
{{ encmd .Elem "v2"}}
|
||||
}
|
||||
}{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
|
||||
if cr != nil { cr.sendContainerState(containerMapValue) }
|
||||
{{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
|
||||
} {{end}}
|
||||
} else {
|
||||
j := 0
|
||||
for k2, v2 := range v {
|
||||
if j > 0 {
|
||||
ee.EncodeMapEntrySeparator()
|
||||
}
|
||||
if cr != nil { cr.sendContainerState(containerMapKey) }
|
||||
{{if eq .MapKey "string"}}if asSymbols {
|
||||
ee.EncodeSymbol(k2)
|
||||
} else {
|
||||
ee.EncodeString(c_UTF8, k2)
|
||||
}{{else}}{{ encmd .MapKey "k2"}}{{end}}
|
||||
ee.EncodeMapKVSeparator()
|
||||
if cr != nil { cr.sendContainerState(containerMapValue) }
|
||||
{{ encmd .Elem "v2"}}
|
||||
j++
|
||||
}
|
||||
ee.EncodeMapEnd()
|
||||
}
|
||||
if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}}
|
||||
}
|
||||
|
||||
{{end}}{{end}}{{end}}
|
||||
|
@ -226,11 +257,14 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele
|
|||
|
||||
// -- -- fast path type switch
|
||||
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
|
||||
if !fastpathEnabled {
|
||||
return false
|
||||
}
|
||||
switch v := iv.(type) {
|
||||
{{range .Values}}{{if not .Primitive}}{{if .Slice }}
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
|
||||
case []{{ .Elem }}:{{else}}
|
||||
case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
|
||||
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if .Slice }}
|
||||
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if not .MapKey }}
|
||||
case *[]{{ .Elem }}:{{else}}
|
||||
case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
|
||||
v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d)
|
||||
|
@ -239,22 +273,23 @@ func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
|
|||
}
|
||||
{{end}}{{end}}
|
||||
default:
|
||||
_ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// -- -- fast path functions
|
||||
{{range .Values}}{{if not .Primitive}}{{if .Slice }}
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
|
||||
{{/*
|
||||
Slices can change if they
|
||||
- did not come from an array
|
||||
- are addressable (from a ptr)
|
||||
- are settable (e.g. contained in an interface{})
|
||||
*/}}
|
||||
func (f decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
|
||||
func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
|
||||
array := f.seq == seqTypeArray
|
||||
if !array && rv.CanAddr() { // CanSet => CanAddr + Exported
|
||||
if !array && rv.CanAddr() { {{/* // CanSet => CanAddr + Exported */}}
|
||||
vp := rv.Addr().Interface().(*[]{{ .Elem }})
|
||||
v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d)
|
||||
if changed {
|
||||
|
@ -272,10 +307,9 @@ func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil
|
|||
*vp = v
|
||||
}
|
||||
}
|
||||
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool,
|
||||
d *Decoder) (_ []{{ .Elem }}, changed bool) {
|
||||
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) {
|
||||
dd := d.d
|
||||
// if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil()
|
||||
{{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}}
|
||||
if checkNil && dd.TryDecodeAsNil() {
|
||||
if v != nil {
|
||||
changed = true
|
||||
|
@ -284,54 +318,87 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil b
|
|||
}
|
||||
|
||||
slh, containerLenS := d.decSliceHelperStart()
|
||||
if canChange && v == nil {
|
||||
if containerLenS <= 0 {
|
||||
v = []{{ .Elem }}{}
|
||||
} else {
|
||||
v = make([]{{ .Elem }}, containerLenS, containerLenS)
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
if containerLenS == 0 {
|
||||
if canChange && len(v) != 0 {
|
||||
v = v[:0]
|
||||
changed = true
|
||||
}{{/*
|
||||
// slh.End() // dd.ReadArrayEnd()
|
||||
*/}}
|
||||
return v, changed
|
||||
if canChange {
|
||||
if v == nil {
|
||||
v = []{{ .Elem }}{}
|
||||
} else if len(v) != 0 {
|
||||
v = v[:0]
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
slh.End()
|
||||
return
|
||||
}
|
||||
|
||||
// for j := 0; j < containerLenS; j++ {
|
||||
if containerLenS > 0 {
|
||||
decLen := containerLenS
|
||||
x2read := containerLenS
|
||||
var xtrunc bool
|
||||
if containerLenS > cap(v) {
|
||||
if canChange {
|
||||
s := make([]{{ .Elem }}, containerLenS, containerLenS)
|
||||
if canChange { {{/*
|
||||
// fast-path is for "basic" immutable types, so no need to copy them over
|
||||
// s := make([]{{ .Elem }}, decInferLen(containerLenS, d.h.MaxInitLen))
|
||||
// copy(s, v[:cap(v)])
|
||||
v = s
|
||||
// v = s */}}
|
||||
var xlen int
|
||||
xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
|
||||
if xtrunc {
|
||||
if xlen <= cap(v) {
|
||||
v = v[:xlen]
|
||||
} else {
|
||||
v = make([]{{ .Elem }}, xlen)
|
||||
}
|
||||
} else {
|
||||
v = make([]{{ .Elem }}, xlen)
|
||||
}
|
||||
changed = true
|
||||
} else {
|
||||
d.arrayCannotExpand(len(v), containerLenS)
|
||||
decLen = len(v)
|
||||
}
|
||||
x2read = len(v)
|
||||
} else if containerLenS != len(v) {
|
||||
v = v[:containerLenS]
|
||||
changed = true
|
||||
}
|
||||
// all checks done. cannot go past len.
|
||||
if canChange {
|
||||
v = v[:containerLenS]
|
||||
changed = true
|
||||
}
|
||||
} {{/* // all checks done. cannot go past len. */}}
|
||||
j := 0
|
||||
for ; j < decLen; j++ {
|
||||
for ; j < x2read; j++ {
|
||||
slh.ElemContainerState(j)
|
||||
{{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
|
||||
}
|
||||
if !canChange {
|
||||
for ; j < containerLenS; j++ {
|
||||
if xtrunc { {{/* // means canChange=true, changed=true already. */}}
|
||||
for ; j < containerLenS; j++ {
|
||||
v = append(v, {{ zerocmd .Elem }})
|
||||
slh.ElemContainerState(j)
|
||||
{{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
|
||||
}
|
||||
} else if !canChange {
|
||||
for ; j < containerLenS; j++ {
|
||||
slh.ElemContainerState(j)
|
||||
d.swallow()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
j := 0
|
||||
for ; !dd.CheckBreak(); j++ {
|
||||
breakFound := dd.CheckBreak() {{/* check break first, so we can initialize v with a capacity of 4 if necessary */}}
|
||||
if breakFound {
|
||||
if canChange {
|
||||
if v == nil {
|
||||
v = []{{ .Elem }}{}
|
||||
} else if len(v) != 0 {
|
||||
v = v[:0]
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
slh.End()
|
||||
return
|
||||
}
|
||||
if cap(v) == 0 {
|
||||
v = make([]{{ .Elem }}, 1, 4)
|
||||
changed = true
|
||||
}
|
||||
j := 0
|
||||
for ; !breakFound; j++ {
|
||||
if j >= len(v) {
|
||||
if canChange {
|
||||
v = append(v, {{ zerocmd .Elem }})
|
||||
|
@ -339,32 +406,35 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil b
|
|||
} else {
|
||||
d.arrayCannotExpand(len(v), j+1)
|
||||
}
|
||||
}
|
||||
if j > 0 {
|
||||
slh.Sep(j)
|
||||
}
|
||||
if j < len(v) { // all checks done. cannot go past len.
|
||||
slh.ElemContainerState(j)
|
||||
if j < len(v) { {{/* // all checks done. cannot go past len. */}}
|
||||
{{ if eq .Elem "interface{}" }}d.decode(&v[j])
|
||||
{{ else }}v[j] = {{ decmd .Elem }}{{ end }}
|
||||
} else {
|
||||
d.swallow()
|
||||
}
|
||||
breakFound = dd.CheckBreak()
|
||||
}
|
||||
if canChange && j < len(v) {
|
||||
v = v[:j]
|
||||
changed = true
|
||||
}
|
||||
slh.End()
|
||||
}
|
||||
slh.End()
|
||||
return v, changed
|
||||
}
|
||||
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .Slice }}
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
|
||||
{{/*
|
||||
Maps can change if they are
|
||||
- addressable (from a ptr)
|
||||
- settable (e.g. contained in an interface{})
|
||||
*/}}
|
||||
func (f decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
|
||||
func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
|
||||
if rv.CanAddr() {
|
||||
vp := rv.Addr().Interface().(*map[{{ .MapKey }}]{{ .Elem }})
|
||||
v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d)
|
||||
|
@ -385,7 +455,8 @@ func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .E
|
|||
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool,
|
||||
d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) {
|
||||
dd := d.d
|
||||
// if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil()
|
||||
cr := d.cr
|
||||
{{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}}
|
||||
if checkNil && dd.TryDecodeAsNil() {
|
||||
if v != nil {
|
||||
changed = true
|
||||
|
@ -395,47 +466,45 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Ele
|
|||
|
||||
containerLen := dd.ReadMapStart()
|
||||
if canChange && v == nil {
|
||||
if containerLen > 0 {
|
||||
v = make(map[{{ .MapKey }}]{{ .Elem }}, containerLen)
|
||||
} else {
|
||||
v = make(map[{{ .MapKey }}]{{ .Elem }}) // supports indefinite-length, etc
|
||||
}
|
||||
xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }})
|
||||
v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen)
|
||||
changed = true
|
||||
}
|
||||
{{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}}
|
||||
var mk {{ .MapKey }}
|
||||
var mv {{ .Elem }}
|
||||
if containerLen > 0 {
|
||||
for j := 0; j < containerLen; j++ {
|
||||
{{ if eq .MapKey "interface{}" }}var mk interface{}
|
||||
if cr != nil { cr.sendContainerState(containerMapKey) }
|
||||
{{ if eq .MapKey "interface{}" }}mk = nil
|
||||
d.decode(&mk)
|
||||
if bv, bok := mk.([]byte); bok {
|
||||
mk = string(bv) // maps cannot have []byte as key. switch to string.
|
||||
}{{ else }}mk := {{ decmd .MapKey }}{{ end }}
|
||||
mv := v[mk]
|
||||
{{ if eq .Elem "interface{}" }}d.decode(&mv)
|
||||
{{ else }}mv = {{ decmd .Elem }}{{ end }}
|
||||
mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
|
||||
}{{ else }}mk = {{ decmd .MapKey }}{{ end }}
|
||||
if cr != nil { cr.sendContainerState(containerMapValue) }
|
||||
{{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
|
||||
d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
|
||||
if v != nil {
|
||||
v[mk] = mv
|
||||
}
|
||||
}
|
||||
} else if containerLen < 0 {
|
||||
for j := 0; !dd.CheckBreak(); j++ {
|
||||
if j > 0 {
|
||||
dd.ReadMapEntrySeparator()
|
||||
}
|
||||
{{ if eq .MapKey "interface{}" }}var mk interface{}
|
||||
if cr != nil { cr.sendContainerState(containerMapKey) }
|
||||
{{ if eq .MapKey "interface{}" }}mk = nil
|
||||
d.decode(&mk)
|
||||
if bv, bok := mk.([]byte); bok {
|
||||
mk = string(bv) // maps cannot have []byte as key. switch to string.
|
||||
}{{ else }}mk := {{ decmd .MapKey }}{{ end }}
|
||||
dd.ReadMapKVSeparator()
|
||||
mv := v[mk]
|
||||
{{ if eq .Elem "interface{}" }}d.decode(&mv)
|
||||
{{ else }}mv = {{ decmd .Elem }}{{ end }}
|
||||
mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
|
||||
}{{ else }}mk = {{ decmd .MapKey }}{{ end }}
|
||||
if cr != nil { cr.sendContainerState(containerMapValue) }
|
||||
{{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
|
||||
d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
|
||||
if v != nil {
|
||||
v[mk] = mv
|
||||
}
|
||||
}
|
||||
dd.ReadMapEnd()
|
||||
}
|
||||
if cr != nil { cr.sendContainerState(containerMapEnd) }
|
||||
return v, changed
|
||||
}
|
||||
|
||||
|
|
32
vendor/src/github.com/ugorji/go/codec/fast-path.not.go
vendored
Normal file
32
vendor/src/github.com/ugorji/go/codec/fast-path.not.go
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
// +build notfastpath
|
||||
|
||||
package codec
|
||||
|
||||
import "reflect"
|
||||
|
||||
// The generated fast-path code is very large, and adds a few seconds to the build time.
|
||||
// This causes test execution, execution of small tools which use codec, etc
|
||||
// to take a long time.
|
||||
//
|
||||
// To mitigate, we now support the notfastpath tag.
|
||||
// This tag disables fastpath during build, allowing for faster build, test execution,
|
||||
// short-program runs, etc.
|
||||
|
||||
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
|
||||
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
|
||||
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
|
||||
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
|
||||
|
||||
type fastpathT struct{}
|
||||
type fastpathE struct {
|
||||
rtid uintptr
|
||||
rt reflect.Type
|
||||
encfn func(*encFnInfo, reflect.Value)
|
||||
decfn func(*decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathA [0]fastpathE
|
||||
|
||||
func (x fastpathA) index(rtid uintptr) int { return -1 }
|
||||
|
||||
var fastpathAV fastpathA
|
||||
var fastpathTV fastpathT
|
|
@ -1,80 +1,101 @@
|
|||
{{var "v"}} := {{ if not isArray}}*{{ end }}{{ .Varname }}
|
||||
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart()
|
||||
|
||||
var {{var "c"}} bool
|
||||
_ = {{var "c"}}
|
||||
|
||||
{{ if not isArray }}if {{var "v"}} == nil {
|
||||
if {{var "l"}} <= 0 {
|
||||
{{var "v"}} = make({{ .CTyp }}, 0)
|
||||
} else {
|
||||
{{var "v"}} = make({{ .CTyp }}, {{var "l"}})
|
||||
}
|
||||
{{var "c"}} = true
|
||||
}
|
||||
{{ end }}
|
||||
if {{var "l"}} == 0 { {{ if isSlice }}
|
||||
if len({{var "v"}}) != 0 {
|
||||
{{var "v"}} = {{var "v"}}[:0]
|
||||
{{var "c"}} = true
|
||||
} {{ end }}
|
||||
{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
|
||||
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}
|
||||
var {{var "c"}} bool {{/* // changed */}}
|
||||
if {{var "l"}} == 0 {
|
||||
{{if isSlice }}if {{var "v"}} == nil {
|
||||
{{var "v"}} = []{{ .Typ }}{}
|
||||
{{var "c"}} = true
|
||||
} else if len({{var "v"}}) != 0 {
|
||||
{{var "v"}} = {{var "v"}}[:0]
|
||||
{{var "c"}} = true
|
||||
} {{end}} {{if isChan }}if {{var "v"}} == nil {
|
||||
{{var "v"}} = make({{ .CTyp }}, 0)
|
||||
{{var "c"}} = true
|
||||
} {{end}}
|
||||
} else if {{var "l"}} > 0 {
|
||||
{{ if isChan }}
|
||||
{{if isChan }}if {{var "v"}} == nil {
|
||||
{{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
|
||||
{{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
|
||||
{{var "c"}} = true
|
||||
}
|
||||
for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
|
||||
{{var "h"}}.ElemContainerState({{var "r"}})
|
||||
var {{var "t"}} {{ .Typ }}
|
||||
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
{{var "v"}} <- {{var "t"}}
|
||||
{{ else }}
|
||||
{{var "n"}} := {{var "l"}}
|
||||
if {{var "l"}} > cap({{var "v"}}) {
|
||||
{{ if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
|
||||
{{var "n"}} = len({{var "v"}})
|
||||
{{ else }}{{ if .Immutable }}
|
||||
{{var "v2"}} := {{var "v"}}
|
||||
{{var "v"}} = make([]{{ .Typ }}, {{var "l"}}, {{var "l"}})
|
||||
if len({{var "v"}}) > 0 {
|
||||
copy({{var "v"}}, {{var "v2"}}[:cap({{var "v2"}})])
|
||||
}
|
||||
{{ else }}{{var "v"}} = make([]{{ .Typ }}, {{var "l"}}, {{var "l"}})
|
||||
{{ end }}{{var "c"}} = true
|
||||
{{ end }}
|
||||
} else if {{var "l"}} != len({{var "v"}}) {
|
||||
{{ if isSlice }}{{var "v"}} = {{var "v"}}[:{{var "l"}}]
|
||||
{{var "c"}} = true {{ end }}
|
||||
{{var "v"}} <- {{var "t"}}
|
||||
}
|
||||
{{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
|
||||
var {{var "rt"}} bool {{/* truncated */}}
|
||||
if {{var "l"}} > cap({{var "v"}}) {
|
||||
{{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
|
||||
{{ else }}{{if not .Immutable }}
|
||||
{{var "rg"}} := len({{var "v"}}) > 0
|
||||
{{var "v2"}} := {{var "v"}} {{end}}
|
||||
{{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
|
||||
if {{var "rt"}} {
|
||||
if {{var "rl"}} <= cap({{var "v"}}) {
|
||||
{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
|
||||
} else {
|
||||
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
|
||||
}
|
||||
} else {
|
||||
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
|
||||
}
|
||||
{{var "c"}} = true
|
||||
{{var "rr"}} = len({{var "v"}}) {{if not .Immutable }}
|
||||
if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}}
|
||||
} {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) {
|
||||
{{var "v"}} = {{var "v"}}[:{{var "l"}}]
|
||||
{{var "c"}} = true
|
||||
} {{end}} {{/* end isSlice:47 */}}
|
||||
{{var "j"}} := 0
|
||||
for ; {{var "j"}} < {{var "n"}} ; {{var "j"}}++ {
|
||||
for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ {
|
||||
{{var "h"}}.ElemContainerState({{var "j"}})
|
||||
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
} {{ if isArray }}
|
||||
for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
|
||||
}
|
||||
{{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
|
||||
{{var "h"}}.ElemContainerState({{var "j"}})
|
||||
z.DecSwallow()
|
||||
}{{ end }}
|
||||
{{ end }}{{/* closing if not chan */}}
|
||||
} else {
|
||||
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
|
||||
if {{var "j"}} >= len({{var "v"}}) {
|
||||
{{ if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
|
||||
{{ else if isSlice}}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
|
||||
{{var "c"}} = true {{ end }}
|
||||
}
|
||||
{{ else }}if {{var "rt"}} {
|
||||
for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
|
||||
{{var "v"}} = append({{var "v"}}, {{ zero}})
|
||||
{{var "h"}}.ElemContainerState({{var "j"}})
|
||||
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
}
|
||||
if {{var "j"}} > 0 {
|
||||
{{var "h"}}.Sep({{var "j"}})
|
||||
}
|
||||
{{ if isChan}}
|
||||
} {{end}} {{/* end isArray:56 */}}
|
||||
{{end}} {{/* end isChan:16 */}}
|
||||
} else { {{/* len < 0 */}}
|
||||
{{var "j"}} := 0
|
||||
for ; !r.CheckBreak(); {{var "j"}}++ {
|
||||
{{if isChan }}
|
||||
{{var "h"}}.ElemContainerState({{var "j"}})
|
||||
var {{var "t"}} {{ .Typ }}
|
||||
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
{{var "v"}} <- {{var "t"}}
|
||||
{{ else }}
|
||||
if {{var "j"}} >= len({{var "v"}}) {
|
||||
{{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
|
||||
{{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
|
||||
{{var "c"}} = true {{end}}
|
||||
}
|
||||
{{var "h"}}.ElemContainerState({{var "j"}})
|
||||
if {{var "j"}} < len({{var "v"}}) {
|
||||
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
} else {
|
||||
z.DecSwallow()
|
||||
}
|
||||
{{ end }}
|
||||
{{end}}
|
||||
}
|
||||
{{var "h"}}.End()
|
||||
{{if isSlice }}if {{var "j"}} < len({{var "v"}}) {
|
||||
{{var "v"}} = {{var "v"}}[:{{var "j"}}]
|
||||
{{var "c"}} = true
|
||||
} else if {{var "j"}} == 0 && {{var "v"}} == nil {
|
||||
{{var "v"}} = []{{ .Typ }}{}
|
||||
{{var "c"}} = true
|
||||
}{{end}}
|
||||
}
|
||||
{{ if not isArray }}if {{var "c"}} {
|
||||
{{var "h"}}.End()
|
||||
{{if not isArray }}if {{var "c"}} {
|
||||
*{{ .Varname }} = {{var "v"}}
|
||||
}{{ end }}
|
||||
|
||||
}{{end}}
|
||||
|
|
|
@ -1,46 +1,58 @@
|
|||
{{var "v"}} := *{{ .Varname }}
|
||||
{{var "l"}} := r.ReadMapStart()
|
||||
{{var "bh"}} := z.DecBasicHandle()
|
||||
if {{var "v"}} == nil {
|
||||
if {{var "l"}} > 0 {
|
||||
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "l"}})
|
||||
} else {
|
||||
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}) // supports indefinite-length, etc
|
||||
}
|
||||
{{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
|
||||
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
|
||||
*{{ .Varname }} = {{var "v"}}
|
||||
}
|
||||
var {{var "mk"}} {{ .KTyp }}
|
||||
var {{var "mv"}} {{ .Typ }}
|
||||
var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
|
||||
if {{var "bh"}}.MapValueReset {
|
||||
{{if decElemKindPtr}}{{var "mg"}} = true
|
||||
{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
|
||||
{{else if not decElemKindImmutable}}{{var "mg"}} = true
|
||||
{{end}} }
|
||||
if {{var "l"}} > 0 {
|
||||
for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
|
||||
var {{var "mk"}} {{ .KTyp }}
|
||||
z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
|
||||
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
|
||||
{{ if eq .KTyp "interface{}" }}// special case if a byte array.
|
||||
if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
|
||||
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
|
||||
{{var "mk"}} = string({{var "bv"}})
|
||||
}
|
||||
{{ end }}
|
||||
{{var "mv"}} := {{var "v"}}[{{var "mk"}}]
|
||||
}{{ end }}{{if decElemKindPtr}}
|
||||
{{var "ms"}} = true{{end}}
|
||||
if {{var "mg"}} {
|
||||
{{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
|
||||
if {{var "mok"}} {
|
||||
{{var "ms"}} = false
|
||||
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
|
||||
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
|
||||
z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
|
||||
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
if {{var "v"}} != nil {
|
||||
if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
|
||||
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
|
||||
}
|
||||
}
|
||||
} else if {{var "l"}} < 0 {
|
||||
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
|
||||
if {{var "j"}} > 0 {
|
||||
r.ReadMapEntrySeparator()
|
||||
}
|
||||
var {{var "mk"}} {{ .KTyp }}
|
||||
z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
|
||||
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
|
||||
{{ if eq .KTyp "interface{}" }}// special case if a byte array.
|
||||
if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
|
||||
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
|
||||
{{var "mk"}} = string({{var "bv"}})
|
||||
}
|
||||
{{ end }}
|
||||
r.ReadMapKVSeparator()
|
||||
{{var "mv"}} := {{var "v"}}[{{var "mk"}}]
|
||||
}{{ end }}{{if decElemKindPtr}}
|
||||
{{var "ms"}} = true {{ end }}
|
||||
if {{var "mg"}} {
|
||||
{{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
|
||||
if {{var "mok"}} {
|
||||
{{var "ms"}} = false
|
||||
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
|
||||
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
|
||||
z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
|
||||
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
if {{var "v"}} != nil {
|
||||
if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
|
||||
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
|
||||
}
|
||||
}
|
||||
r.ReadMapEnd()
|
||||
} // else len==0: TODO: Should we clear map entries?
|
||||
z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
|
||||
|
|
|
@ -10,6 +10,11 @@
|
|||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// This file is used to generate helper code for codecgen.
|
||||
// The values here i.e. genHelper(En|De)coder are not to be used directly by
|
||||
// library users. They WILL change continously and without notice.
|
||||
|
@ -60,6 +65,65 @@ func (f genHelperEncoder) EncFallback(iv interface{}) {
|
|||
f.e.encodeI(iv, false, false)
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
|
||||
bs, fnerr := iv.MarshalText()
|
||||
f.e.marshal(bs, fnerr, false, c_UTF8)
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
|
||||
bs, fnerr := iv.MarshalJSON()
|
||||
f.e.marshal(bs, fnerr, true, c_UTF8)
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
|
||||
bs, fnerr := iv.MarshalBinary()
|
||||
f.e.marshal(bs, fnerr, false, c_RAW)
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
|
||||
if _, ok := f.e.hh.(*BincHandle); ok {
|
||||
return timeTypId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) IsJSONHandle() bool {
|
||||
return f.e.js
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) HasExtensions() bool {
|
||||
return len(f.e.h.extHandle) != 0
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
|
||||
rt := reflect.TypeOf(v)
|
||||
if rt.Kind() == reflect.Ptr {
|
||||
rt = rt.Elem()
|
||||
}
|
||||
rtid := reflect.ValueOf(rt).Pointer()
|
||||
if xfFn := f.e.h.getExt(rtid); xfFn != nil {
|
||||
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncSendContainerState(c containerState) {
|
||||
if f.e.cr != nil {
|
||||
f.e.cr.sendContainerState(c)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------- DECODER FOLLOWS -----------------
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
|
||||
return f.d.h
|
||||
|
@ -100,3 +164,70 @@ func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
|
|||
func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
|
||||
f.d.arrayCannotExpand(sliceLen, streamLen)
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
|
||||
fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
|
||||
if fnerr != nil {
|
||||
panic(fnerr)
|
||||
}
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
|
||||
// bs := f.dd.DecodeBytes(f.d.b[:], true, true)
|
||||
// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
|
||||
fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
|
||||
if fnerr != nil {
|
||||
panic(fnerr)
|
||||
}
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
|
||||
fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true))
|
||||
if fnerr != nil {
|
||||
panic(fnerr)
|
||||
}
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
|
||||
if _, ok := f.d.hh.(*BincHandle); ok {
|
||||
return timeTypId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) IsJSONHandle() bool {
|
||||
return f.d.js
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) HasExtensions() bool {
|
||||
return len(f.d.h.extHandle) != 0
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
|
||||
rt := reflect.TypeOf(v).Elem()
|
||||
rtid := reflect.ValueOf(rt).Pointer()
|
||||
if xfFn := f.d.h.getExt(rtid); xfFn != nil {
|
||||
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
|
||||
return decInferLen(clen, maxlen, unit)
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecSendContainerState(c containerState) {
|
||||
if f.d.cr != nil {
|
||||
f.d.cr.sendContainerState(c)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,11 @@
|
|||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// This file is used to generate helper code for codecgen.
|
||||
// The values here i.e. genHelper(En|De)coder are not to be used directly by
|
||||
// library users. They WILL change continously and without notice.
|
||||
|
@ -48,6 +53,7 @@ type genHelperDecoder struct {
|
|||
func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
|
||||
return f.e.h
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncBinary() bool {
|
||||
return f.e.be // f.e.hh.isBinaryEncoding()
|
||||
|
@ -57,6 +63,57 @@ func (f genHelperEncoder) EncFallback(iv interface{}) {
|
|||
// println(">>>>>>>>> EncFallback")
|
||||
f.e.encodeI(iv, false, false)
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
|
||||
bs, fnerr := iv.MarshalText()
|
||||
f.e.marshal(bs, fnerr, false, c_UTF8)
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
|
||||
bs, fnerr := iv.MarshalJSON()
|
||||
f.e.marshal(bs, fnerr, true, c_UTF8)
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
|
||||
bs, fnerr := iv.MarshalBinary()
|
||||
f.e.marshal(bs, fnerr, false, c_RAW)
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
|
||||
if _, ok := f.e.hh.(*BincHandle); ok {
|
||||
return timeTypId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) IsJSONHandle() bool {
|
||||
return f.e.js
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) HasExtensions() bool {
|
||||
return len(f.e.h.extHandle) != 0
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
|
||||
rt := reflect.TypeOf(v)
|
||||
if rt.Kind() == reflect.Ptr {
|
||||
rt = rt.Elem()
|
||||
}
|
||||
rtid := reflect.ValueOf(rt).Pointer()
|
||||
if xfFn := f.e.h.getExt(rtid); xfFn != nil {
|
||||
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperEncoder) EncSendContainerState(c containerState) {
|
||||
if f.e.cr != nil {
|
||||
f.e.cr.sendContainerState(c)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------- DECODER FOLLOWS -----------------
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
|
||||
|
@ -91,7 +148,64 @@ func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
|
|||
func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
|
||||
f.d.arrayCannotExpand(sliceLen, streamLen)
|
||||
}
|
||||
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
|
||||
fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
|
||||
if fnerr != nil {
|
||||
panic(fnerr)
|
||||
}
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
|
||||
// bs := f.dd.DecodeBytes(f.d.b[:], true, true)
|
||||
// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
|
||||
fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
|
||||
if fnerr != nil {
|
||||
panic(fnerr)
|
||||
}
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
|
||||
fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true))
|
||||
if fnerr != nil {
|
||||
panic(fnerr)
|
||||
}
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
|
||||
if _, ok := f.d.hh.(*BincHandle); ok {
|
||||
return timeTypId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) IsJSONHandle() bool {
|
||||
return f.d.js
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) HasExtensions() bool {
|
||||
return len(f.d.h.extHandle) != 0
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
|
||||
rt := reflect.TypeOf(v).Elem()
|
||||
rtid := reflect.ValueOf(rt).Pointer()
|
||||
if xfFn := f.d.h.getExt(rtid); xfFn != nil {
|
||||
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
|
||||
return decInferLen(clen, maxlen, unit)
|
||||
}
|
||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||
func (f genHelperDecoder) DecSendContainerState(c containerState) {
|
||||
if f.d.cr != nil {
|
||||
f.d.cr.sendContainerState(c)
|
||||
}
|
||||
}
|
||||
|
||||
{{/*
|
||||
|
||||
|
|
|
@ -8,132 +8,165 @@ package codec
|
|||
const genDecMapTmpl = `
|
||||
{{var "v"}} := *{{ .Varname }}
|
||||
{{var "l"}} := r.ReadMapStart()
|
||||
{{var "bh"}} := z.DecBasicHandle()
|
||||
if {{var "v"}} == nil {
|
||||
if {{var "l"}} > 0 {
|
||||
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "l"}})
|
||||
} else {
|
||||
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}) // supports indefinite-length, etc
|
||||
}
|
||||
{{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
|
||||
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
|
||||
*{{ .Varname }} = {{var "v"}}
|
||||
}
|
||||
var {{var "mk"}} {{ .KTyp }}
|
||||
var {{var "mv"}} {{ .Typ }}
|
||||
var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
|
||||
if {{var "bh"}}.MapValueReset {
|
||||
{{if decElemKindPtr}}{{var "mg"}} = true
|
||||
{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
|
||||
{{else if not decElemKindImmutable}}{{var "mg"}} = true
|
||||
{{end}} }
|
||||
if {{var "l"}} > 0 {
|
||||
for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
|
||||
var {{var "mk"}} {{ .KTyp }}
|
||||
z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
|
||||
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
|
||||
{{ if eq .KTyp "interface{}" }}// special case if a byte array.
|
||||
if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
|
||||
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
|
||||
{{var "mk"}} = string({{var "bv"}})
|
||||
}
|
||||
{{ end }}
|
||||
{{var "mv"}} := {{var "v"}}[{{var "mk"}}]
|
||||
}{{ end }}{{if decElemKindPtr}}
|
||||
{{var "ms"}} = true{{end}}
|
||||
if {{var "mg"}} {
|
||||
{{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
|
||||
if {{var "mok"}} {
|
||||
{{var "ms"}} = false
|
||||
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
|
||||
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
|
||||
z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
|
||||
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
if {{var "v"}} != nil {
|
||||
if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
|
||||
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
|
||||
}
|
||||
}
|
||||
} else if {{var "l"}} < 0 {
|
||||
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
|
||||
if {{var "j"}} > 0 {
|
||||
r.ReadMapEntrySeparator()
|
||||
}
|
||||
var {{var "mk"}} {{ .KTyp }}
|
||||
z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
|
||||
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
|
||||
{{ if eq .KTyp "interface{}" }}// special case if a byte array.
|
||||
if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
|
||||
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
|
||||
{{var "mk"}} = string({{var "bv"}})
|
||||
}
|
||||
{{ end }}
|
||||
r.ReadMapKVSeparator()
|
||||
{{var "mv"}} := {{var "v"}}[{{var "mk"}}]
|
||||
}{{ end }}{{if decElemKindPtr}}
|
||||
{{var "ms"}} = true {{ end }}
|
||||
if {{var "mg"}} {
|
||||
{{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
|
||||
if {{var "mok"}} {
|
||||
{{var "ms"}} = false
|
||||
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
|
||||
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
|
||||
z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
|
||||
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
if {{var "v"}} != nil {
|
||||
if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
|
||||
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
|
||||
}
|
||||
}
|
||||
r.ReadMapEnd()
|
||||
} // else len==0: TODO: Should we clear map entries?
|
||||
z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
|
||||
`
|
||||
|
||||
const genDecListTmpl = `
|
||||
{{var "v"}} := {{ if not isArray}}*{{ end }}{{ .Varname }}
|
||||
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart()
|
||||
|
||||
var {{var "c"}} bool
|
||||
_ = {{var "c"}}
|
||||
|
||||
{{ if not isArray }}if {{var "v"}} == nil {
|
||||
if {{var "l"}} <= 0 {
|
||||
{{var "v"}} = make({{ .CTyp }}, 0)
|
||||
} else {
|
||||
{{var "v"}} = make({{ .CTyp }}, {{var "l"}})
|
||||
}
|
||||
{{var "c"}} = true
|
||||
}
|
||||
{{ end }}
|
||||
if {{var "l"}} == 0 { {{ if isSlice }}
|
||||
if len({{var "v"}}) != 0 {
|
||||
{{var "v"}} = {{var "v"}}[:0]
|
||||
{{var "c"}} = true
|
||||
} {{ end }}
|
||||
{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
|
||||
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}
|
||||
var {{var "c"}} bool {{/* // changed */}}
|
||||
if {{var "l"}} == 0 {
|
||||
{{if isSlice }}if {{var "v"}} == nil {
|
||||
{{var "v"}} = []{{ .Typ }}{}
|
||||
{{var "c"}} = true
|
||||
} else if len({{var "v"}}) != 0 {
|
||||
{{var "v"}} = {{var "v"}}[:0]
|
||||
{{var "c"}} = true
|
||||
} {{end}} {{if isChan }}if {{var "v"}} == nil {
|
||||
{{var "v"}} = make({{ .CTyp }}, 0)
|
||||
{{var "c"}} = true
|
||||
} {{end}}
|
||||
} else if {{var "l"}} > 0 {
|
||||
{{ if isChan }}
|
||||
{{if isChan }}if {{var "v"}} == nil {
|
||||
{{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
|
||||
{{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
|
||||
{{var "c"}} = true
|
||||
}
|
||||
for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
|
||||
{{var "h"}}.ElemContainerState({{var "r"}})
|
||||
var {{var "t"}} {{ .Typ }}
|
||||
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
{{var "v"}} <- {{var "t"}}
|
||||
{{ else }}
|
||||
{{var "n"}} := {{var "l"}}
|
||||
if {{var "l"}} > cap({{var "v"}}) {
|
||||
{{ if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
|
||||
{{var "n"}} = len({{var "v"}})
|
||||
{{ else }}{{ if .Immutable }}
|
||||
{{var "v2"}} := {{var "v"}}
|
||||
{{var "v"}} = make([]{{ .Typ }}, {{var "l"}}, {{var "l"}})
|
||||
if len({{var "v"}}) > 0 {
|
||||
copy({{var "v"}}, {{var "v2"}}[:cap({{var "v2"}})])
|
||||
}
|
||||
{{ else }}{{var "v"}} = make([]{{ .Typ }}, {{var "l"}}, {{var "l"}})
|
||||
{{ end }}{{var "c"}} = true
|
||||
{{ end }}
|
||||
} else if {{var "l"}} != len({{var "v"}}) {
|
||||
{{ if isSlice }}{{var "v"}} = {{var "v"}}[:{{var "l"}}]
|
||||
{{var "c"}} = true {{ end }}
|
||||
{{var "v"}} <- {{var "t"}}
|
||||
}
|
||||
{{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
|
||||
var {{var "rt"}} bool {{/* truncated */}}
|
||||
if {{var "l"}} > cap({{var "v"}}) {
|
||||
{{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
|
||||
{{ else }}{{if not .Immutable }}
|
||||
{{var "rg"}} := len({{var "v"}}) > 0
|
||||
{{var "v2"}} := {{var "v"}} {{end}}
|
||||
{{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
|
||||
if {{var "rt"}} {
|
||||
if {{var "rl"}} <= cap({{var "v"}}) {
|
||||
{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
|
||||
} else {
|
||||
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
|
||||
}
|
||||
} else {
|
||||
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
|
||||
}
|
||||
{{var "c"}} = true
|
||||
{{var "rr"}} = len({{var "v"}}) {{if not .Immutable }}
|
||||
if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}}
|
||||
} {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) {
|
||||
{{var "v"}} = {{var "v"}}[:{{var "l"}}]
|
||||
{{var "c"}} = true
|
||||
} {{end}} {{/* end isSlice:47 */}}
|
||||
{{var "j"}} := 0
|
||||
for ; {{var "j"}} < {{var "n"}} ; {{var "j"}}++ {
|
||||
for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ {
|
||||
{{var "h"}}.ElemContainerState({{var "j"}})
|
||||
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
} {{ if isArray }}
|
||||
for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
|
||||
}
|
||||
{{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
|
||||
{{var "h"}}.ElemContainerState({{var "j"}})
|
||||
z.DecSwallow()
|
||||
}{{ end }}
|
||||
{{ end }}{{/* closing if not chan */}}
|
||||
} else {
|
||||
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
|
||||
if {{var "j"}} >= len({{var "v"}}) {
|
||||
{{ if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
|
||||
{{ else if isSlice}}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
|
||||
{{var "c"}} = true {{ end }}
|
||||
}
|
||||
{{ else }}if {{var "rt"}} {
|
||||
for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
|
||||
{{var "v"}} = append({{var "v"}}, {{ zero}})
|
||||
{{var "h"}}.ElemContainerState({{var "j"}})
|
||||
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
}
|
||||
if {{var "j"}} > 0 {
|
||||
{{var "h"}}.Sep({{var "j"}})
|
||||
}
|
||||
{{ if isChan}}
|
||||
} {{end}} {{/* end isArray:56 */}}
|
||||
{{end}} {{/* end isChan:16 */}}
|
||||
} else { {{/* len < 0 */}}
|
||||
{{var "j"}} := 0
|
||||
for ; !r.CheckBreak(); {{var "j"}}++ {
|
||||
{{if isChan }}
|
||||
{{var "h"}}.ElemContainerState({{var "j"}})
|
||||
var {{var "t"}} {{ .Typ }}
|
||||
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
{{var "v"}} <- {{var "t"}}
|
||||
{{ else }}
|
||||
if {{var "j"}} >= len({{var "v"}}) {
|
||||
{{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
|
||||
{{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
|
||||
{{var "c"}} = true {{end}}
|
||||
}
|
||||
{{var "h"}}.ElemContainerState({{var "j"}})
|
||||
if {{var "j"}} < len({{var "v"}}) {
|
||||
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
|
||||
} else {
|
||||
z.DecSwallow()
|
||||
}
|
||||
{{ end }}
|
||||
{{end}}
|
||||
}
|
||||
{{var "h"}}.End()
|
||||
{{if isSlice }}if {{var "j"}} < len({{var "v"}}) {
|
||||
{{var "v"}} = {{var "v"}}[:{{var "j"}}]
|
||||
{{var "c"}} = true
|
||||
} else if {{var "j"}} == 0 && {{var "v"}} == nil {
|
||||
{{var "v"}} = []{{ .Typ }}{}
|
||||
{{var "c"}} = true
|
||||
}{{end}}
|
||||
}
|
||||
{{ if not isArray }}if {{var "c"}} {
|
||||
{{var "h"}}.End()
|
||||
{{if not isArray }}if {{var "c"}} {
|
||||
*{{ .Varname }} = {{var "v"}}
|
||||
}{{ end }}
|
||||
|
||||
}{{end}}
|
||||
`
|
||||
|
||||
|
|
800
vendor/src/github.com/ugorji/go/codec/gen.go
vendored
800
vendor/src/github.com/ugorji/go/codec/gen.go
vendored
File diff suppressed because it is too large
Load diff
429
vendor/src/github.com/ugorji/go/codec/helper.go
vendored
429
vendor/src/github.com/ugorji/go/codec/helper.go
vendored
|
@ -101,6 +101,7 @@ package codec
|
|||
// check for these error conditions.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
|
@ -111,12 +112,11 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
scratchByteArrayLen = 32
|
||||
initCollectionCap = 32 // 32 is defensive. 16 is preferred.
|
||||
|
||||
// Support encoding.(Binary|Text)(Unm|M)arshaler.
|
||||
// This constant flag will enable or disable it.
|
||||
|
@ -147,6 +147,12 @@ const (
|
|||
|
||||
// if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue
|
||||
derefForIsEmptyValue = false
|
||||
|
||||
// if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first.
|
||||
// Only concern is that, if the slice already contained some garbage, we will decode into that garbage.
|
||||
// The chances of this are slim, so leave this "optimization".
|
||||
// TODO: should this be true, to ensure that we always decode into a "zero" "empty" value?
|
||||
resetSliceElemToZeroValue bool = false
|
||||
)
|
||||
|
||||
var oneByteArr = [1]byte{0}
|
||||
|
@ -193,16 +199,43 @@ const (
|
|||
seqTypeChan
|
||||
)
|
||||
|
||||
// note that containerMapStart and containerArraySend are not sent.
|
||||
// This is because the ReadXXXStart and EncodeXXXStart already does these.
|
||||
type containerState uint8
|
||||
|
||||
const (
|
||||
_ containerState = iota
|
||||
|
||||
containerMapStart // slot left open, since Driver method already covers it
|
||||
containerMapKey
|
||||
containerMapValue
|
||||
containerMapEnd
|
||||
containerArrayStart // slot left open, since Driver methods already cover it
|
||||
containerArrayElem
|
||||
containerArrayEnd
|
||||
)
|
||||
|
||||
type containerStateRecv interface {
|
||||
sendContainerState(containerState)
|
||||
}
|
||||
|
||||
// mirror json.Marshaler and json.Unmarshaler here,
|
||||
// so we don't import the encoding/json package
|
||||
type jsonMarshaler interface {
|
||||
MarshalJSON() ([]byte, error)
|
||||
}
|
||||
type jsonUnmarshaler interface {
|
||||
UnmarshalJSON([]byte) error
|
||||
}
|
||||
|
||||
var (
|
||||
bigen = binary.BigEndian
|
||||
structInfoFieldName = "_struct"
|
||||
|
||||
cachedTypeInfo = make(map[uintptr]*typeInfo, 64)
|
||||
cachedTypeInfoMutex sync.RWMutex
|
||||
|
||||
// mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
|
||||
intfSliceTyp = reflect.TypeOf([]interface{}(nil))
|
||||
intfTyp = intfSliceTyp.Elem()
|
||||
mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
|
||||
mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
|
||||
intfSliceTyp = reflect.TypeOf([]interface{}(nil))
|
||||
intfTyp = intfSliceTyp.Elem()
|
||||
|
||||
stringTyp = reflect.TypeOf("")
|
||||
timeTyp = reflect.TypeOf(time.Time{})
|
||||
|
@ -217,6 +250,9 @@ var (
|
|||
textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
|
||||
jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
|
||||
jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
|
||||
|
||||
selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
|
||||
|
||||
uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
|
||||
|
@ -225,6 +261,9 @@ var (
|
|||
timeTypId = reflect.ValueOf(timeTyp).Pointer()
|
||||
stringTypId = reflect.ValueOf(stringTyp).Pointer()
|
||||
|
||||
mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer()
|
||||
mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer()
|
||||
intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer()
|
||||
// mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer()
|
||||
|
||||
intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits())
|
||||
|
@ -238,6 +277,8 @@ var (
|
|||
noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo")
|
||||
)
|
||||
|
||||
var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
|
||||
|
||||
// Selfer defines methods by which a value can encode or decode itself.
|
||||
//
|
||||
// Any type which implements Selfer will be able to encode or decode itself.
|
||||
|
@ -263,6 +304,11 @@ type MapBySlice interface {
|
|||
//
|
||||
// BasicHandle encapsulates the common options and extension functions.
|
||||
type BasicHandle struct {
|
||||
// TypeInfos is used to get the type info for any type.
|
||||
//
|
||||
// If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
|
||||
TypeInfos *TypeInfos
|
||||
|
||||
extHandle
|
||||
EncodeOptions
|
||||
DecodeOptions
|
||||
|
@ -272,6 +318,13 @@ func (x *BasicHandle) getBasicHandle() *BasicHandle {
|
|||
return x
|
||||
}
|
||||
|
||||
func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
|
||||
if x.TypeInfos != nil {
|
||||
return x.TypeInfos.get(rtid, rt)
|
||||
}
|
||||
return defTypeInfos.get(rtid, rt)
|
||||
}
|
||||
|
||||
// Handle is the interface for a specific encoding format.
|
||||
//
|
||||
// Typically, a Handle is pre-configured before first time use,
|
||||
|
@ -298,33 +351,45 @@ type RawExt struct {
|
|||
Value interface{}
|
||||
}
|
||||
|
||||
// Ext handles custom (de)serialization of custom types / extensions.
|
||||
type Ext interface {
|
||||
// BytesExt handles custom (de)serialization of types to/from []byte.
|
||||
// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
|
||||
type BytesExt interface {
|
||||
// WriteExt converts a value to a []byte.
|
||||
// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
|
||||
//
|
||||
// Note: v *may* be a pointer to the extension type, if the extension type was a struct or array.
|
||||
WriteExt(v interface{}) []byte
|
||||
|
||||
// ReadExt updates a value from a []byte.
|
||||
// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
|
||||
ReadExt(dst interface{}, src []byte)
|
||||
}
|
||||
|
||||
// InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
|
||||
// The Encoder or Decoder will then handle the further (de)serialization of that known type.
|
||||
//
|
||||
// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types.
|
||||
type InterfaceExt interface {
|
||||
// ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64.
|
||||
// It is used by codecs (e.g. cbor) which use the format to do custom serialization of the types.
|
||||
//
|
||||
// Note: v *may* be a pointer to the extension type, if the extension type was a struct or array.
|
||||
ConvertExt(v interface{}) interface{}
|
||||
|
||||
// UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time.
|
||||
// It is used by codecs (e.g. cbor) which use the format to do custom serialization of the types.
|
||||
UpdateExt(dst interface{}, src interface{})
|
||||
}
|
||||
|
||||
// bytesExt is a wrapper implementation to support former AddExt exported method.
|
||||
type bytesExt struct {
|
||||
// Ext handles custom (de)serialization of custom types / extensions.
|
||||
type Ext interface {
|
||||
BytesExt
|
||||
InterfaceExt
|
||||
}
|
||||
|
||||
// addExtWrapper is a wrapper implementation to support former AddExt exported method.
|
||||
type addExtWrapper struct {
|
||||
encFn func(reflect.Value) ([]byte, error)
|
||||
decFn func(reflect.Value, []byte) error
|
||||
}
|
||||
|
||||
func (x bytesExt) WriteExt(v interface{}) []byte {
|
||||
// fmt.Printf(">>>>>>>>>> WriteExt: %T, %v\n", v, v)
|
||||
func (x addExtWrapper) WriteExt(v interface{}) []byte {
|
||||
bs, err := x.encFn(reflect.ValueOf(v))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -332,21 +397,56 @@ func (x bytesExt) WriteExt(v interface{}) []byte {
|
|||
return bs
|
||||
}
|
||||
|
||||
func (x bytesExt) ReadExt(v interface{}, bs []byte) {
|
||||
// fmt.Printf(">>>>>>>>>> ReadExt: %T, %v\n", v, v)
|
||||
func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
|
||||
if err := x.decFn(reflect.ValueOf(v), bs); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (x bytesExt) ConvertExt(v interface{}) interface{} {
|
||||
func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
|
||||
return x.WriteExt(v)
|
||||
}
|
||||
|
||||
func (x bytesExt) UpdateExt(dest interface{}, v interface{}) {
|
||||
func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
|
||||
x.ReadExt(dest, v.([]byte))
|
||||
}
|
||||
|
||||
type setExtWrapper struct {
|
||||
b BytesExt
|
||||
i InterfaceExt
|
||||
}
|
||||
|
||||
func (x *setExtWrapper) WriteExt(v interface{}) []byte {
|
||||
if x.b == nil {
|
||||
panic("BytesExt.WriteExt is not supported")
|
||||
}
|
||||
return x.b.WriteExt(v)
|
||||
}
|
||||
|
||||
func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) {
|
||||
if x.b == nil {
|
||||
panic("BytesExt.WriteExt is not supported")
|
||||
|
||||
}
|
||||
x.b.ReadExt(v, bs)
|
||||
}
|
||||
|
||||
func (x *setExtWrapper) ConvertExt(v interface{}) interface{} {
|
||||
if x.i == nil {
|
||||
panic("InterfaceExt.ConvertExt is not supported")
|
||||
|
||||
}
|
||||
return x.i.ConvertExt(v)
|
||||
}
|
||||
|
||||
func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) {
|
||||
if x.i == nil {
|
||||
panic("InterfaceExxt.UpdateExt is not supported")
|
||||
|
||||
}
|
||||
x.i.UpdateExt(dest, v)
|
||||
}
|
||||
|
||||
// type errorString string
|
||||
// func (x errorString) Error() string { return string(x) }
|
||||
|
||||
|
@ -399,9 +499,9 @@ type extTypeTagFn struct {
|
|||
ext Ext
|
||||
}
|
||||
|
||||
type extHandle []*extTypeTagFn
|
||||
type extHandle []extTypeTagFn
|
||||
|
||||
// DEPRECATED: AddExt is deprecated in favor of SetExt. It exists for compatibility only.
|
||||
// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead.
|
||||
//
|
||||
// AddExt registes an encode and decode function for a reflect.Type.
|
||||
// AddExt internally calls SetExt.
|
||||
|
@ -413,10 +513,10 @@ func (o *extHandle) AddExt(
|
|||
if encfn == nil || decfn == nil {
|
||||
return o.SetExt(rt, uint64(tag), nil)
|
||||
}
|
||||
return o.SetExt(rt, uint64(tag), bytesExt{encfn, decfn})
|
||||
return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
|
||||
}
|
||||
|
||||
// SetExt registers a tag and Ext for a reflect.Type.
|
||||
// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead.
|
||||
//
|
||||
// Note that the type must be a named type, and specifically not
|
||||
// a pointer or Interface. An error is returned if that is not honored.
|
||||
|
@ -438,12 +538,17 @@ func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
|
|||
}
|
||||
}
|
||||
|
||||
*o = append(*o, &extTypeTagFn{rtid, rt, tag, ext})
|
||||
if *o == nil {
|
||||
*o = make([]extTypeTagFn, 0, 4)
|
||||
}
|
||||
*o = append(*o, extTypeTagFn{rtid, rt, tag, ext})
|
||||
return
|
||||
}
|
||||
|
||||
func (o extHandle) getExt(rtid uintptr) *extTypeTagFn {
|
||||
for _, v := range o {
|
||||
var v *extTypeTagFn
|
||||
for i := range o {
|
||||
v = &o[i]
|
||||
if v.rtid == rtid {
|
||||
return v
|
||||
}
|
||||
|
@ -452,7 +557,9 @@ func (o extHandle) getExt(rtid uintptr) *extTypeTagFn {
|
|||
}
|
||||
|
||||
func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn {
|
||||
for _, v := range o {
|
||||
var v *extTypeTagFn
|
||||
for i := range o {
|
||||
v = &o[i]
|
||||
if v.tag == tag {
|
||||
return v
|
||||
}
|
||||
|
@ -471,6 +578,10 @@ type structFieldInfo struct {
|
|||
toArray bool // if field is _struct, is the toArray set?
|
||||
}
|
||||
|
||||
// func (si *structFieldInfo) isZero() bool {
|
||||
// return si.encName == "" && len(si.is) == 0 && si.i == 0 && !si.omitEmpty && !si.toArray
|
||||
// }
|
||||
|
||||
// rv returns the field of the struct.
|
||||
// If anonymous, it returns an Invalid
|
||||
func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value) {
|
||||
|
@ -516,9 +627,9 @@ func (si *structFieldInfo) setToZeroValue(v reflect.Value) {
|
|||
}
|
||||
|
||||
func parseStructFieldInfo(fname string, stag string) *structFieldInfo {
|
||||
if fname == "" {
|
||||
panic(noFieldNameToStructFieldInfoErr)
|
||||
}
|
||||
// if fname == "" {
|
||||
// panic(noFieldNameToStructFieldInfoErr)
|
||||
// }
|
||||
si := structFieldInfo{
|
||||
encName: fname,
|
||||
}
|
||||
|
@ -571,6 +682,8 @@ type typeInfo struct {
|
|||
rt reflect.Type
|
||||
rtid uintptr
|
||||
|
||||
numMeth uint16 // number of methods
|
||||
|
||||
// baseId gives pointer to the base reflect.Type, after deferencing
|
||||
// the pointers. E.g. base type of ***time.Time is time.Time.
|
||||
base reflect.Type
|
||||
|
@ -589,6 +702,11 @@ type typeInfo struct {
|
|||
tmIndir int8 // number of indirections to get to textMarshaler type
|
||||
tunmIndir int8 // number of indirections to get to textUnmarshaler type
|
||||
|
||||
jm bool // base type (T or *T) is a jsonMarshaler
|
||||
junm bool // base type (T or *T) is a jsonUnmarshaler
|
||||
jmIndir int8 // number of indirections to get to jsonMarshaler type
|
||||
junmIndir int8 // number of indirections to get to jsonUnmarshaler type
|
||||
|
||||
cs bool // base type (T or *T) is a Selfer
|
||||
csIndir int8 // number of indirections to get to Selfer type
|
||||
|
||||
|
@ -623,33 +741,49 @@ func (ti *typeInfo) indexForEncName(name string) int {
|
|||
return -1
|
||||
}
|
||||
|
||||
func getStructTag(t reflect.StructTag) (s string) {
|
||||
// TypeInfos caches typeInfo for each type on first inspection.
|
||||
//
|
||||
// It is configured with a set of tag keys, which are used to get
|
||||
// configuration for the type.
|
||||
type TypeInfos struct {
|
||||
infos map[uintptr]*typeInfo
|
||||
mu sync.RWMutex
|
||||
tags []string
|
||||
}
|
||||
|
||||
// NewTypeInfos creates a TypeInfos given a set of struct tags keys.
|
||||
//
|
||||
// This allows users customize the struct tag keys which contain configuration
|
||||
// of their types.
|
||||
func NewTypeInfos(tags []string) *TypeInfos {
|
||||
return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)}
|
||||
}
|
||||
|
||||
func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
|
||||
// check for tags: codec, json, in that order.
|
||||
// this allows seamless support for many configured structs.
|
||||
s = t.Get("codec")
|
||||
if s == "" {
|
||||
s = t.Get("json")
|
||||
for _, x := range x.tags {
|
||||
s = t.Get(x)
|
||||
if s != "" {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
|
||||
func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
|
||||
var ok bool
|
||||
cachedTypeInfoMutex.RLock()
|
||||
pti, ok = cachedTypeInfo[rtid]
|
||||
cachedTypeInfoMutex.RUnlock()
|
||||
x.mu.RLock()
|
||||
pti, ok = x.infos[rtid]
|
||||
x.mu.RUnlock()
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
cachedTypeInfoMutex.Lock()
|
||||
defer cachedTypeInfoMutex.Unlock()
|
||||
if pti, ok = cachedTypeInfo[rtid]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
// do not hold lock while computing this.
|
||||
// it may lead to duplication, but that's ok.
|
||||
ti := typeInfo{rt: rt, rtid: rtid}
|
||||
pti = &ti
|
||||
ti.numMeth = uint16(rt.NumMethod())
|
||||
|
||||
var indir int8
|
||||
if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok {
|
||||
|
@ -664,6 +798,12 @@ func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
|
|||
if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok {
|
||||
ti.tunm, ti.tunmIndir = true, indir
|
||||
}
|
||||
if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok {
|
||||
ti.jm, ti.jmIndir = true, indir
|
||||
}
|
||||
if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok {
|
||||
ti.junm, ti.junmIndir = true, indir
|
||||
}
|
||||
if ok, indir = implementsIntf(rt, selferTyp); ok {
|
||||
ti.cs, ti.csIndir = true, indir
|
||||
}
|
||||
|
@ -690,11 +830,11 @@ func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
|
|||
if rt.Kind() == reflect.Struct {
|
||||
var siInfo *structFieldInfo
|
||||
if f, ok := rt.FieldByName(structInfoFieldName); ok {
|
||||
siInfo = parseStructFieldInfo(structInfoFieldName, getStructTag(f.Tag))
|
||||
siInfo = parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag))
|
||||
ti.toArray = siInfo.toArray
|
||||
}
|
||||
sfip := make([]*structFieldInfo, 0, rt.NumField())
|
||||
rgetTypeInfo(rt, nil, make(map[string]bool, 16), &sfip, siInfo)
|
||||
x.rget(rt, nil, make(map[string]bool, 16), &sfip, siInfo)
|
||||
|
||||
ti.sfip = make([]*structFieldInfo, len(sfip))
|
||||
ti.sfi = make([]*structFieldInfo, len(sfip))
|
||||
|
@ -703,48 +843,78 @@ func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
|
|||
copy(ti.sfi, sfip)
|
||||
}
|
||||
// sfi = sfip
|
||||
cachedTypeInfo[rtid] = pti
|
||||
|
||||
x.mu.Lock()
|
||||
if pti, ok = x.infos[rtid]; !ok {
|
||||
pti = &ti
|
||||
x.infos[rtid] = pti
|
||||
}
|
||||
x.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool,
|
||||
func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool,
|
||||
sfi *[]*structFieldInfo, siInfo *structFieldInfo,
|
||||
) {
|
||||
for j := 0; j < rt.NumField(); j++ {
|
||||
f := rt.Field(j)
|
||||
// func types are skipped.
|
||||
if tk := f.Type.Kind(); tk == reflect.Func {
|
||||
fkind := f.Type.Kind()
|
||||
// skip if a func type, or is unexported, or structTag value == "-"
|
||||
if fkind == reflect.Func {
|
||||
continue
|
||||
}
|
||||
stag := getStructTag(f.Tag)
|
||||
// if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
|
||||
if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded
|
||||
continue
|
||||
}
|
||||
stag := x.structTag(f.Tag)
|
||||
if stag == "-" {
|
||||
continue
|
||||
}
|
||||
if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
|
||||
var si *structFieldInfo
|
||||
// if anonymous and no struct tag (or it's blank), and a struct (or pointer to struct), inline it.
|
||||
if f.Anonymous && fkind != reflect.Interface {
|
||||
doInline := stag == ""
|
||||
if !doInline {
|
||||
si = parseStructFieldInfo("", stag)
|
||||
doInline = si.encName == ""
|
||||
// doInline = si.isZero()
|
||||
}
|
||||
if doInline {
|
||||
ft := f.Type
|
||||
for ft.Kind() == reflect.Ptr {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
if ft.Kind() == reflect.Struct {
|
||||
indexstack2 := make([]int, len(indexstack)+1, len(indexstack)+4)
|
||||
copy(indexstack2, indexstack)
|
||||
indexstack2[len(indexstack)] = j
|
||||
// indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
|
||||
x.rget(ft, indexstack2, fnameToHastag, sfi, siInfo)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// after the anonymous dance: if an unexported field, skip
|
||||
if f.PkgPath != "" { // unexported
|
||||
continue
|
||||
}
|
||||
// if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it.
|
||||
if f.Anonymous && stag == "" {
|
||||
ft := f.Type
|
||||
for ft.Kind() == reflect.Ptr {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
if ft.Kind() == reflect.Struct {
|
||||
indexstack2 := make([]int, len(indexstack)+1, len(indexstack)+4)
|
||||
copy(indexstack2, indexstack)
|
||||
indexstack2[len(indexstack)] = j
|
||||
// indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
|
||||
rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// do not let fields with same name in embedded structs override field at higher level.
|
||||
// this must be done after anonymous check, to allow anonymous field
|
||||
// still include their child fields
|
||||
if _, ok := fnameToHastag[f.Name]; ok {
|
||||
continue
|
||||
}
|
||||
si := parseStructFieldInfo(f.Name, stag)
|
||||
if f.Name == "" {
|
||||
panic(noFieldNameToStructFieldInfoErr)
|
||||
}
|
||||
if si == nil {
|
||||
si = parseStructFieldInfo(f.Name, stag)
|
||||
} else if si.encName == "" {
|
||||
si.encName = f.Name
|
||||
}
|
||||
// si.ikind = int(f.Type.Kind())
|
||||
if len(indexstack) == 0 {
|
||||
si.i = int16(j)
|
||||
|
@ -779,8 +949,9 @@ func panicToErr(err *error) {
|
|||
// panic(fmt.Errorf("%s: "+format, params2...))
|
||||
// }
|
||||
|
||||
func isMutableKind(k reflect.Kind) (v bool) {
|
||||
return k == reflect.Int ||
|
||||
func isImmutableKind(k reflect.Kind) (v bool) {
|
||||
return false ||
|
||||
k == reflect.Int ||
|
||||
k == reflect.Int8 ||
|
||||
k == reflect.Int16 ||
|
||||
k == reflect.Int32 ||
|
||||
|
@ -790,6 +961,7 @@ func isMutableKind(k reflect.Kind) (v bool) {
|
|||
k == reflect.Uint16 ||
|
||||
k == reflect.Uint32 ||
|
||||
k == reflect.Uint64 ||
|
||||
k == reflect.Uintptr ||
|
||||
k == reflect.Float32 ||
|
||||
k == reflect.Float64 ||
|
||||
k == reflect.Bool ||
|
||||
|
@ -844,3 +1016,114 @@ func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) {
|
|||
i = int64(v)
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------ SORT -----------------
|
||||
|
||||
func isNaN(f float64) bool { return f != f }
|
||||
|
||||
// -----------------------
|
||||
|
||||
type intSlice []int64
|
||||
type uintSlice []uint64
|
||||
type floatSlice []float64
|
||||
type boolSlice []bool
|
||||
type stringSlice []string
|
||||
type bytesSlice [][]byte
|
||||
|
||||
func (p intSlice) Len() int { return len(p) }
|
||||
func (p intSlice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func (p uintSlice) Len() int { return len(p) }
|
||||
func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func (p floatSlice) Len() int { return len(p) }
|
||||
func (p floatSlice) Less(i, j int) bool {
|
||||
return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j])
|
||||
}
|
||||
func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func (p stringSlice) Len() int { return len(p) }
|
||||
func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func (p bytesSlice) Len() int { return len(p) }
|
||||
func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 }
|
||||
func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func (p boolSlice) Len() int { return len(p) }
|
||||
func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] }
|
||||
func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// ---------------------
|
||||
|
||||
type intRv struct {
|
||||
v int64
|
||||
r reflect.Value
|
||||
}
|
||||
type intRvSlice []intRv
|
||||
type uintRv struct {
|
||||
v uint64
|
||||
r reflect.Value
|
||||
}
|
||||
type uintRvSlice []uintRv
|
||||
type floatRv struct {
|
||||
v float64
|
||||
r reflect.Value
|
||||
}
|
||||
type floatRvSlice []floatRv
|
||||
type boolRv struct {
|
||||
v bool
|
||||
r reflect.Value
|
||||
}
|
||||
type boolRvSlice []boolRv
|
||||
type stringRv struct {
|
||||
v string
|
||||
r reflect.Value
|
||||
}
|
||||
type stringRvSlice []stringRv
|
||||
type bytesRv struct {
|
||||
v []byte
|
||||
r reflect.Value
|
||||
}
|
||||
type bytesRvSlice []bytesRv
|
||||
|
||||
func (p intRvSlice) Len() int { return len(p) }
|
||||
func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
|
||||
func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func (p uintRvSlice) Len() int { return len(p) }
|
||||
func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
|
||||
func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func (p floatRvSlice) Len() int { return len(p) }
|
||||
func (p floatRvSlice) Less(i, j int) bool {
|
||||
return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v)
|
||||
}
|
||||
func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func (p stringRvSlice) Len() int { return len(p) }
|
||||
func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
|
||||
func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func (p bytesRvSlice) Len() int { return len(p) }
|
||||
func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
|
||||
func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func (p boolRvSlice) Len() int { return len(p) }
|
||||
func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v }
|
||||
func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// -----------------
|
||||
|
||||
type bytesI struct {
|
||||
v []byte
|
||||
i interface{}
|
||||
}
|
||||
|
||||
type bytesISlice []bytesI
|
||||
|
||||
func (p bytesISlice) Len() int { return len(p) }
|
||||
func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
|
||||
func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
|
|
@ -149,3 +149,94 @@ func halfFloatToFloatBits(yy uint16) (d uint32) {
|
|||
m = m << 13
|
||||
return (s << 31) | (e << 23) | m
|
||||
}
|
||||
|
||||
// GrowCap will return a new capacity for a slice, given the following:
|
||||
// - oldCap: current capacity
|
||||
// - unit: in-memory size of an element
|
||||
// - num: number of elements to add
|
||||
func growCap(oldCap, unit, num int) (newCap int) {
|
||||
// appendslice logic (if cap < 1024, *2, else *1.25):
|
||||
// leads to many copy calls, especially when copying bytes.
|
||||
// bytes.Buffer model (2*cap + n): much better for bytes.
|
||||
// smarter way is to take the byte-size of the appended element(type) into account
|
||||
|
||||
// maintain 3 thresholds:
|
||||
// t1: if cap <= t1, newcap = 2x
|
||||
// t2: if cap <= t2, newcap = 1.75x
|
||||
// t3: if cap <= t3, newcap = 1.5x
|
||||
// else newcap = 1.25x
|
||||
//
|
||||
// t1, t2, t3 >= 1024 always.
|
||||
// i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same)
|
||||
//
|
||||
// With this, appending for bytes increase by:
|
||||
// 100% up to 4K
|
||||
// 75% up to 8K
|
||||
// 50% up to 16K
|
||||
// 25% beyond that
|
||||
|
||||
// unit can be 0 e.g. for struct{}{}; handle that appropriately
|
||||
var t1, t2, t3 int // thresholds
|
||||
if unit <= 1 {
|
||||
t1, t2, t3 = 4*1024, 8*1024, 16*1024
|
||||
} else if unit < 16 {
|
||||
t3 = 16 / unit * 1024
|
||||
t1 = t3 * 1 / 4
|
||||
t2 = t3 * 2 / 4
|
||||
} else {
|
||||
t1, t2, t3 = 1024, 1024, 1024
|
||||
}
|
||||
|
||||
var x int // temporary variable
|
||||
|
||||
// x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively
|
||||
if oldCap <= t1 { // [0,t1]
|
||||
x = 8
|
||||
} else if oldCap > t3 { // (t3,infinity]
|
||||
x = 5
|
||||
} else if oldCap <= t2 { // (t1,t2]
|
||||
x = 7
|
||||
} else { // (t2,t3]
|
||||
x = 6
|
||||
}
|
||||
newCap = x * oldCap / 4
|
||||
|
||||
if num > 0 {
|
||||
newCap += num
|
||||
}
|
||||
|
||||
// ensure newCap is a multiple of 64 (if it is > 64) or 16.
|
||||
if newCap > 64 {
|
||||
if x = newCap % 64; x != 0 {
|
||||
x = newCap / 64
|
||||
newCap = 64 * (x + 1)
|
||||
}
|
||||
} else {
|
||||
if x = newCap % 16; x != 0 {
|
||||
x = newCap / 16
|
||||
newCap = 16 * (x + 1)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func expandSliceValue(s reflect.Value, num int) reflect.Value {
|
||||
if num <= 0 {
|
||||
return s
|
||||
}
|
||||
l0 := s.Len()
|
||||
l1 := l0 + num // new slice length
|
||||
if l1 < l0 {
|
||||
panic("ExpandSlice: slice overflow")
|
||||
}
|
||||
c0 := s.Cap()
|
||||
if l1 <= c0 {
|
||||
return s.Slice(0, l1)
|
||||
}
|
||||
st := s.Type()
|
||||
c1 := growCap(c0, int(st.Elem().Size()), num)
|
||||
s2 := reflect.MakeSlice(st, l1, c1)
|
||||
// println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1)
|
||||
reflect.Copy(s2, s)
|
||||
return s2
|
||||
}
|
||||
|
|
|
@ -26,6 +26,9 @@ type unsafeBytes struct {
|
|||
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
|
||||
// In regular safe mode, it is an allocation and copy.
|
||||
func stringView(v []byte) string {
|
||||
if len(v) == 0 {
|
||||
return ""
|
||||
}
|
||||
x := unsafeString{uintptr(unsafe.Pointer(&v[0])), len(v)}
|
||||
return *(*string)(unsafe.Pointer(&x))
|
||||
}
|
||||
|
@ -34,6 +37,9 @@ func stringView(v []byte) string {
|
|||
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
|
||||
// In regular safe mode, it is an allocation and copy.
|
||||
func bytesView(v string) []byte {
|
||||
if len(v) == 0 {
|
||||
return zeroByteSlice
|
||||
}
|
||||
x := unsafeBytes{uintptr(unsafe.Pointer(&v)), len(v), len(v)}
|
||||
return *(*[]byte)(unsafe.Pointer(&x))
|
||||
}
|
||||
|
|
562
vendor/src/github.com/ugorji/go/codec/json.go
vendored
562
vendor/src/github.com/ugorji/go/codec/json.go
vendored
|
@ -3,8 +3,9 @@
|
|||
|
||||
package codec
|
||||
|
||||
// This json support uses base64 encoding for bytes, because you cannot
|
||||
// By default, this json support uses base64 encoding for bytes, because you cannot
|
||||
// store and read any arbitrary string in json (only unicode).
|
||||
// However, the user can configre how to encode/decode bytes.
|
||||
//
|
||||
// This library specifically supports UTF-8 for encoding and decoding only.
|
||||
//
|
||||
|
@ -27,10 +28,14 @@ package codec
|
|||
// - encode does not beautify. There is no whitespace when encoding.
|
||||
// - rpc calls which take single integer arguments or write single numeric arguments will need care.
|
||||
|
||||
// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver
|
||||
// MUST not call one-another.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unicode/utf16"
|
||||
"unicode/utf8"
|
||||
|
@ -52,12 +57,13 @@ var jsonUint64Pow10 = [...]uint64{
|
|||
}
|
||||
|
||||
const (
|
||||
// if jsonTrackSkipWhitespace, we track Whitespace and reduce the number of redundant checks.
|
||||
// Make it a const flag, so that it can be elided during linking if false.
|
||||
// jsonUnreadAfterDecNum controls whether we unread after decoding a number.
|
||||
//
|
||||
// It is not a clear win, because we continually set a flag behind a pointer
|
||||
// and then check it each time, as opposed to just 4 conditionals on a stack variable.
|
||||
jsonTrackSkipWhitespace = true
|
||||
// instead of unreading, just update d.tok (iff it's not a whitespace char)
|
||||
// However, doing this means that we may HOLD onto some data which belongs to another stream.
|
||||
// Thus, it is safest to unread the data when done.
|
||||
// keep behind a constant flag for now.
|
||||
jsonUnreadAfterDecNum = true
|
||||
|
||||
// If !jsonValidateSymbols, decoding will be faster, by skipping some checks:
|
||||
// - If we see first character of null, false or true,
|
||||
|
@ -87,9 +93,31 @@ type jsonEncDriver struct {
|
|||
h *JsonHandle
|
||||
b [64]byte // scratch
|
||||
bs []byte // scratch
|
||||
se setExtWrapper
|
||||
c containerState
|
||||
noBuiltInTypes
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) sendContainerState(c containerState) {
|
||||
// determine whether to output separators
|
||||
if c == containerMapKey {
|
||||
if e.c != containerMapStart {
|
||||
e.w.writen1(',')
|
||||
}
|
||||
} else if c == containerMapValue {
|
||||
e.w.writen1(':')
|
||||
} else if c == containerMapEnd {
|
||||
e.w.writen1('}')
|
||||
} else if c == containerArrayElem {
|
||||
if e.c != containerArrayStart {
|
||||
e.w.writen1(',')
|
||||
}
|
||||
} else if c == containerArrayEnd {
|
||||
e.w.writen1(']')
|
||||
}
|
||||
e.c = c
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeNil() {
|
||||
e.w.writeb(jsonLiterals[9:13]) // null
|
||||
}
|
||||
|
@ -121,7 +149,7 @@ func (e *jsonEncDriver) EncodeUint(v uint64) {
|
|||
|
||||
func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
|
||||
if v := ext.ConvertExt(rv); v == nil {
|
||||
e.EncodeNil()
|
||||
e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil()
|
||||
} else {
|
||||
en.encode(v)
|
||||
}
|
||||
|
@ -130,7 +158,7 @@ func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Enco
|
|||
func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
|
||||
// only encodes re.Value (never re.Data)
|
||||
if re.Value == nil {
|
||||
e.EncodeNil()
|
||||
e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil()
|
||||
} else {
|
||||
en.encode(re.Value)
|
||||
}
|
||||
|
@ -138,30 +166,12 @@ func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
|
|||
|
||||
func (e *jsonEncDriver) EncodeArrayStart(length int) {
|
||||
e.w.writen1('[')
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeArrayEntrySeparator() {
|
||||
e.w.writen1(',')
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeArrayEnd() {
|
||||
e.w.writen1(']')
|
||||
e.c = containerArrayStart
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeMapStart(length int) {
|
||||
e.w.writen1('{')
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeMapEntrySeparator() {
|
||||
e.w.writen1(',')
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeMapKVSeparator() {
|
||||
e.w.writen1(':')
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeMapEnd() {
|
||||
e.w.writen1('}')
|
||||
e.c = containerMapStart
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeString(c charEncoding, v string) {
|
||||
|
@ -175,11 +185,13 @@ func (e *jsonEncDriver) EncodeSymbol(v string) {
|
|||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
|
||||
// if encoding raw bytes and RawBytesExt is configured, use it to encode
|
||||
if c == c_RAW && e.se.i != nil {
|
||||
e.EncodeExt(v, 0, &e.se, e.e)
|
||||
return
|
||||
}
|
||||
if c == c_RAW {
|
||||
slen := base64.StdEncoding.EncodedLen(len(v))
|
||||
if e.bs == nil {
|
||||
e.bs = e.b[:]
|
||||
}
|
||||
if cap(e.bs) >= slen {
|
||||
e.bs = e.bs[:slen]
|
||||
} else {
|
||||
|
@ -195,6 +207,10 @@ func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
|
|||
}
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) EncodeAsis(v []byte) {
|
||||
e.w.writeb(v)
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) quoteStr(s string) {
|
||||
// adapted from std pkg encoding/json
|
||||
const hex = "0123456789abcdef"
|
||||
|
@ -266,7 +282,7 @@ func (e *jsonEncDriver) quoteStr(s string) {
|
|||
//--------------------------------
|
||||
|
||||
type jsonNum struct {
|
||||
bytes []byte // may have [+-.eE0-9]
|
||||
// bytes []byte // may have [+-.eE0-9]
|
||||
mantissa uint64 // where mantissa ends, and maybe dot begins.
|
||||
exponent int16 // exponent value.
|
||||
manOverflow bool
|
||||
|
@ -276,7 +292,6 @@ type jsonNum struct {
|
|||
}
|
||||
|
||||
func (x *jsonNum) reset() {
|
||||
x.bytes = x.bytes[:0]
|
||||
x.manOverflow = false
|
||||
x.neg = false
|
||||
x.dot = false
|
||||
|
@ -309,29 +324,26 @@ func (x *jsonNum) uintExp() (n uint64, overflow bool) {
|
|||
// return
|
||||
}
|
||||
|
||||
func (x *jsonNum) floatVal() (f float64) {
|
||||
// these constants are only used withn floatVal.
|
||||
// They are brought out, so that floatVal can be inlined.
|
||||
const (
|
||||
jsonUint64MantissaBits = 52
|
||||
jsonMaxExponent = int16(len(jsonFloat64Pow10)) - 1
|
||||
)
|
||||
|
||||
func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) {
|
||||
// We do not want to lose precision.
|
||||
// Consequently, we will delegate to strconv.ParseFloat if any of the following happen:
|
||||
// - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits)
|
||||
// We expect up to 99.... (19 digits)
|
||||
// - The mantissa cannot fit into a 52 bits of uint64
|
||||
// - The exponent is beyond our scope ie beyong 22.
|
||||
const uint64MantissaBits = 52
|
||||
const maxExponent = int16(len(jsonFloat64Pow10)) - 1
|
||||
parseUsingStrConv = x.manOverflow ||
|
||||
x.exponent > jsonMaxExponent ||
|
||||
(x.exponent < 0 && -(x.exponent) > jsonMaxExponent) ||
|
||||
x.mantissa>>jsonUint64MantissaBits != 0
|
||||
|
||||
parseUsingStrConv := x.manOverflow ||
|
||||
x.exponent > maxExponent ||
|
||||
(x.exponent < 0 && -(x.exponent) > maxExponent) ||
|
||||
x.mantissa>>uint64MantissaBits != 0
|
||||
if parseUsingStrConv {
|
||||
var err error
|
||||
if f, err = strconv.ParseFloat(stringView(x.bytes), 64); err != nil {
|
||||
panic(fmt.Errorf("parse float: %s, %v", x.bytes, err))
|
||||
return
|
||||
}
|
||||
if x.neg {
|
||||
f = -f
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -350,162 +362,221 @@ func (x *jsonNum) floatVal() (f float64) {
|
|||
}
|
||||
|
||||
type jsonDecDriver struct {
|
||||
d *Decoder
|
||||
h *JsonHandle
|
||||
r decReader // *bytesDecReader decReader
|
||||
ct valueType // container type. one of unset, array or map.
|
||||
bstr [8]byte // scratch used for string \UXXX parsing
|
||||
b [64]byte // scratch
|
||||
noBuiltInTypes
|
||||
d *Decoder
|
||||
h *JsonHandle
|
||||
r decReader
|
||||
|
||||
wsSkipped bool // whitespace skipped
|
||||
c containerState
|
||||
// tok is used to store the token read right after skipWhiteSpace.
|
||||
tok uint8
|
||||
|
||||
bstr [8]byte // scratch used for string \UXXX parsing
|
||||
b [64]byte // scratch, used for parsing strings or numbers
|
||||
b2 [64]byte // scratch, used only for decodeBytes (after base64)
|
||||
bs []byte // scratch. Initialized from b. Used for parsing strings or numbers.
|
||||
|
||||
se setExtWrapper
|
||||
|
||||
n jsonNum
|
||||
noBuiltInTypes
|
||||
}
|
||||
|
||||
// This will skip whitespace characters and return the next byte to read.
|
||||
// The next byte determines what the value will be one of.
|
||||
func (d *jsonDecDriver) skipWhitespace(unread bool) (b byte) {
|
||||
// as initReadNext is not called all the time, we set ct to unSet whenever
|
||||
// we skipwhitespace, as this is the signal that something new is about to be read.
|
||||
d.ct = valueTypeUnset
|
||||
b = d.r.readn1()
|
||||
if !jsonTrackSkipWhitespace || !d.wsSkipped {
|
||||
for ; b == ' ' || b == '\t' || b == '\r' || b == '\n'; b = d.r.readn1() {
|
||||
}
|
||||
if jsonTrackSkipWhitespace {
|
||||
d.wsSkipped = true
|
||||
}
|
||||
}
|
||||
if unread {
|
||||
func jsonIsWS(b byte) bool {
|
||||
return b == ' ' || b == '\t' || b == '\r' || b == '\n'
|
||||
}
|
||||
|
||||
// // This will skip whitespace characters and return the next byte to read.
|
||||
// // The next byte determines what the value will be one of.
|
||||
// func (d *jsonDecDriver) skipWhitespace() {
|
||||
// // fast-path: do not enter loop. Just check first (in case no whitespace).
|
||||
// b := d.r.readn1()
|
||||
// if jsonIsWS(b) {
|
||||
// r := d.r
|
||||
// for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
|
||||
// }
|
||||
// }
|
||||
// d.tok = b
|
||||
// }
|
||||
|
||||
func (d *jsonDecDriver) uncacheRead() {
|
||||
if d.tok != 0 {
|
||||
d.r.unreadn1()
|
||||
d.tok = 0
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) sendContainerState(c containerState) {
|
||||
if d.tok == 0 {
|
||||
var b byte
|
||||
r := d.r
|
||||
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
|
||||
}
|
||||
d.tok = b
|
||||
}
|
||||
var xc uint8 // char expected
|
||||
if c == containerMapKey {
|
||||
if d.c != containerMapStart {
|
||||
xc = ','
|
||||
}
|
||||
} else if c == containerMapValue {
|
||||
xc = ':'
|
||||
} else if c == containerMapEnd {
|
||||
xc = '}'
|
||||
} else if c == containerArrayElem {
|
||||
if d.c != containerArrayStart {
|
||||
xc = ','
|
||||
}
|
||||
} else if c == containerArrayEnd {
|
||||
xc = ']'
|
||||
}
|
||||
if xc != 0 {
|
||||
if d.tok != xc {
|
||||
d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok)
|
||||
}
|
||||
d.tok = 0
|
||||
}
|
||||
d.c = c
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) CheckBreak() bool {
|
||||
b := d.skipWhitespace(true)
|
||||
return b == '}' || b == ']'
|
||||
if d.tok == 0 {
|
||||
var b byte
|
||||
r := d.r
|
||||
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
|
||||
}
|
||||
d.tok = b
|
||||
}
|
||||
if d.tok == '}' || d.tok == ']' {
|
||||
// d.tok = 0 // only checking, not consuming
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) {
|
||||
bs := d.r.readx(int(toIdx - fromIdx))
|
||||
d.tok = 0
|
||||
if jsonValidateSymbols {
|
||||
if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) {
|
||||
d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs)
|
||||
return
|
||||
}
|
||||
}
|
||||
if jsonTrackSkipWhitespace {
|
||||
d.wsSkipped = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) TryDecodeAsNil() bool {
|
||||
b := d.skipWhitespace(true)
|
||||
if b == 'n' {
|
||||
d.readStrIdx(9, 13) // null
|
||||
d.ct = valueTypeNil
|
||||
if d.tok == 0 {
|
||||
var b byte
|
||||
r := d.r
|
||||
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
|
||||
}
|
||||
d.tok = b
|
||||
}
|
||||
if d.tok == 'n' {
|
||||
d.readStrIdx(10, 13) // ull
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) DecodeBool() bool {
|
||||
b := d.skipWhitespace(false)
|
||||
if b == 'f' {
|
||||
if d.tok == 0 {
|
||||
var b byte
|
||||
r := d.r
|
||||
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
|
||||
}
|
||||
d.tok = b
|
||||
}
|
||||
if d.tok == 'f' {
|
||||
d.readStrIdx(5, 9) // alse
|
||||
return false
|
||||
}
|
||||
if b == 't' {
|
||||
if d.tok == 't' {
|
||||
d.readStrIdx(1, 4) // rue
|
||||
return true
|
||||
}
|
||||
d.d.errorf("json: decode bool: got first char %c", b)
|
||||
d.d.errorf("json: decode bool: got first char %c", d.tok)
|
||||
return false // "unreachable"
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) ReadMapStart() int {
|
||||
d.expectChar('{')
|
||||
d.ct = valueTypeMap
|
||||
if d.tok == 0 {
|
||||
var b byte
|
||||
r := d.r
|
||||
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
|
||||
}
|
||||
d.tok = b
|
||||
}
|
||||
if d.tok != '{' {
|
||||
d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok)
|
||||
}
|
||||
d.tok = 0
|
||||
d.c = containerMapStart
|
||||
return -1
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) ReadArrayStart() int {
|
||||
d.expectChar('[')
|
||||
d.ct = valueTypeArray
|
||||
if d.tok == 0 {
|
||||
var b byte
|
||||
r := d.r
|
||||
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
|
||||
}
|
||||
d.tok = b
|
||||
}
|
||||
if d.tok != '[' {
|
||||
d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok)
|
||||
}
|
||||
d.tok = 0
|
||||
d.c = containerArrayStart
|
||||
return -1
|
||||
}
|
||||
func (d *jsonDecDriver) ReadMapEnd() {
|
||||
d.expectChar('}')
|
||||
}
|
||||
func (d *jsonDecDriver) ReadArrayEnd() {
|
||||
d.expectChar(']')
|
||||
}
|
||||
func (d *jsonDecDriver) ReadArrayEntrySeparator() {
|
||||
d.expectChar(',')
|
||||
}
|
||||
func (d *jsonDecDriver) ReadMapEntrySeparator() {
|
||||
d.expectChar(',')
|
||||
}
|
||||
func (d *jsonDecDriver) ReadMapKVSeparator() {
|
||||
d.expectChar(':')
|
||||
}
|
||||
func (d *jsonDecDriver) expectChar(c uint8) {
|
||||
b := d.skipWhitespace(false)
|
||||
if b != c {
|
||||
d.d.errorf("json: expect char %c but got char %c", c, b)
|
||||
return
|
||||
}
|
||||
if jsonTrackSkipWhitespace {
|
||||
d.wsSkipped = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) IsContainerType(vt valueType) bool {
|
||||
func (d *jsonDecDriver) ContainerType() (vt valueType) {
|
||||
// check container type by checking the first char
|
||||
if d.ct == valueTypeUnset {
|
||||
b := d.skipWhitespace(true)
|
||||
if b == '{' {
|
||||
d.ct = valueTypeMap
|
||||
} else if b == '[' {
|
||||
d.ct = valueTypeArray
|
||||
} else if b == 'n' {
|
||||
d.ct = valueTypeNil
|
||||
} else if b == '"' {
|
||||
d.ct = valueTypeString
|
||||
if d.tok == 0 {
|
||||
var b byte
|
||||
r := d.r
|
||||
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
|
||||
}
|
||||
d.tok = b
|
||||
}
|
||||
if vt == valueTypeNil || vt == valueTypeBytes || vt == valueTypeString ||
|
||||
vt == valueTypeArray || vt == valueTypeMap {
|
||||
return d.ct == vt
|
||||
if b := d.tok; b == '{' {
|
||||
return valueTypeMap
|
||||
} else if b == '[' {
|
||||
return valueTypeArray
|
||||
} else if b == 'n' {
|
||||
return valueTypeNil
|
||||
} else if b == '"' {
|
||||
return valueTypeString
|
||||
}
|
||||
// ugorji: made switch into conditionals, so that IsContainerType can be inlined.
|
||||
// switch vt {
|
||||
// case valueTypeNil, valueTypeBytes, valueTypeString, valueTypeArray, valueTypeMap:
|
||||
// return d.ct == vt
|
||||
// }
|
||||
d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||
return false // "unreachable"
|
||||
return valueTypeUnset
|
||||
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||
// return false // "unreachable"
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) decNum(storeBytes bool) {
|
||||
// storeBytes = true // TODO: remove.
|
||||
|
||||
// If it is has a . or an e|E, decode as a float; else decode as an int.
|
||||
b := d.skipWhitespace(false)
|
||||
if d.tok == 0 {
|
||||
var b byte
|
||||
r := d.r
|
||||
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
|
||||
}
|
||||
d.tok = b
|
||||
}
|
||||
b := d.tok
|
||||
if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) {
|
||||
d.d.errorf("json: decNum: got first char '%c'", b)
|
||||
return
|
||||
}
|
||||
d.tok = 0
|
||||
|
||||
const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
|
||||
const jsonNumUintMaxVal = 1<<uint64(64) - 1
|
||||
|
||||
// var n jsonNum // create stack-copy jsonNum, and set to pointer at end.
|
||||
// n.bytes = d.n.bytes[:0]
|
||||
n := &d.n
|
||||
r := d.r
|
||||
n.reset()
|
||||
d.bs = d.bs[:0]
|
||||
|
||||
// The format of a number is as below:
|
||||
// parsing: sign? digit* dot? digit* e? sign? digit*
|
||||
|
@ -524,7 +595,7 @@ LOOP:
|
|||
case 0:
|
||||
state = 2
|
||||
// do not add sign to the slice ...
|
||||
b, eof = d.r.readn1eof()
|
||||
b, eof = r.readn1eof()
|
||||
continue
|
||||
case 6: // typ = jsonNumFloat
|
||||
state = 7
|
||||
|
@ -537,7 +608,7 @@ LOOP:
|
|||
state = 2
|
||||
n.neg = true
|
||||
// do not add sign to the slice ...
|
||||
b, eof = d.r.readn1eof()
|
||||
b, eof = r.readn1eof()
|
||||
continue
|
||||
case 6: // typ = jsonNumFloat
|
||||
eNeg = true
|
||||
|
@ -601,9 +672,9 @@ LOOP:
|
|||
break LOOP
|
||||
}
|
||||
if storeBytes {
|
||||
n.bytes = append(n.bytes, b)
|
||||
d.bs = append(d.bs, b)
|
||||
}
|
||||
b, eof = d.r.readn1eof()
|
||||
b, eof = r.readn1eof()
|
||||
}
|
||||
|
||||
if jsonTruncateMantissa && n.mantissa != 0 {
|
||||
|
@ -624,10 +695,13 @@ LOOP:
|
|||
// d.n = n
|
||||
|
||||
if !eof {
|
||||
d.r.unreadn1()
|
||||
}
|
||||
if jsonTrackSkipWhitespace {
|
||||
d.wsSkipped = false
|
||||
if jsonUnreadAfterDecNum {
|
||||
r.unreadn1()
|
||||
} else {
|
||||
if !jsonIsWS(b) {
|
||||
d.tok = b
|
||||
}
|
||||
}
|
||||
}
|
||||
// fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n",
|
||||
// n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex)
|
||||
|
@ -659,13 +733,28 @@ func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) {
|
|||
i = -i
|
||||
}
|
||||
if chkOvf.Int(i, bitsize) {
|
||||
d.d.errorf("json: overflow %v bits: %s", bitsize, n.bytes)
|
||||
d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs)
|
||||
return
|
||||
}
|
||||
// fmt.Printf("DecodeInt: %v\n", i)
|
||||
return
|
||||
}
|
||||
|
||||
// floatVal MUST only be called after a decNum, as d.bs now contains the bytes of the number
|
||||
func (d *jsonDecDriver) floatVal() (f float64) {
|
||||
f, useStrConv := d.n.floatVal()
|
||||
if useStrConv {
|
||||
var err error
|
||||
if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != nil {
|
||||
panic(fmt.Errorf("parse float: %s, %v", d.bs, err))
|
||||
}
|
||||
if d.n.neg {
|
||||
f = -f
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) {
|
||||
d.decNum(false)
|
||||
n := &d.n
|
||||
|
@ -690,7 +779,7 @@ func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) {
|
|||
}
|
||||
}
|
||||
if chkOvf.Uint(u, bitsize) {
|
||||
d.d.errorf("json: overflow %v bits: %s", bitsize, n.bytes)
|
||||
d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs)
|
||||
return
|
||||
}
|
||||
// fmt.Printf("DecodeUint: %v\n", u)
|
||||
|
@ -699,10 +788,9 @@ func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) {
|
|||
|
||||
func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
|
||||
d.decNum(true)
|
||||
n := &d.n
|
||||
f = n.floatVal()
|
||||
f = d.floatVal()
|
||||
if chkOverflow32 && chkOvf.Float32(f) {
|
||||
d.d.errorf("json: overflow float32: %v, %s", f, n.bytes)
|
||||
d.d.errorf("json: overflow float32: %v, %s", f, d.bs)
|
||||
return
|
||||
}
|
||||
return
|
||||
|
@ -722,14 +810,24 @@ func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxta
|
|||
}
|
||||
|
||||
func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
|
||||
// zerocopy doesn't matter for json, as the bytes must be parsed.
|
||||
bs0 := d.appendStringAsBytes(d.b[:0])
|
||||
if isstring {
|
||||
return bs0
|
||||
// if decoding into raw bytes, and the RawBytesExt is configured, use it to decode.
|
||||
if !isstring && d.se.i != nil {
|
||||
bsOut = bs
|
||||
d.DecodeExt(&bsOut, 0, &d.se)
|
||||
return
|
||||
}
|
||||
d.appendStringAsBytes()
|
||||
// if isstring, then just return the bytes, even if it is using the scratch buffer.
|
||||
// the bytes will be converted to a string as needed.
|
||||
if isstring {
|
||||
return d.bs
|
||||
}
|
||||
bs0 := d.bs
|
||||
slen := base64.StdEncoding.DecodedLen(len(bs0))
|
||||
if cap(bs) >= slen {
|
||||
if slen <= cap(bs) {
|
||||
bsOut = bs[:slen]
|
||||
} else if zerocopy && slen <= cap(d.b2) {
|
||||
bsOut = d.b2[:slen]
|
||||
} else {
|
||||
bsOut = make([]byte, slen)
|
||||
}
|
||||
|
@ -745,17 +843,36 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
|
|||
}
|
||||
|
||||
func (d *jsonDecDriver) DecodeString() (s string) {
|
||||
return string(d.appendStringAsBytes(d.b[:0]))
|
||||
d.appendStringAsBytes()
|
||||
// if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key
|
||||
if d.c == containerMapKey {
|
||||
return d.d.string(d.bs)
|
||||
}
|
||||
return string(d.bs)
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) appendStringAsBytes(v []byte) []byte {
|
||||
d.expectChar('"')
|
||||
func (d *jsonDecDriver) appendStringAsBytes() {
|
||||
if d.tok == 0 {
|
||||
var b byte
|
||||
r := d.r
|
||||
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
|
||||
}
|
||||
d.tok = b
|
||||
}
|
||||
if d.tok != '"' {
|
||||
d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok)
|
||||
}
|
||||
d.tok = 0
|
||||
|
||||
v := d.bs[:0]
|
||||
var c uint8
|
||||
r := d.r
|
||||
for {
|
||||
c := d.r.readn1()
|
||||
c = r.readn1()
|
||||
if c == '"' {
|
||||
break
|
||||
} else if c == '\\' {
|
||||
c = d.r.readn1()
|
||||
c = r.readn1()
|
||||
switch c {
|
||||
case '"', '\\', '/', '\'':
|
||||
v = append(v, c)
|
||||
|
@ -779,27 +896,24 @@ func (d *jsonDecDriver) appendStringAsBytes(v []byte) []byte {
|
|||
v = append(v, d.bstr[:w2]...)
|
||||
default:
|
||||
d.d.errorf("json: unsupported escaped value: %c", c)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
v = append(v, c)
|
||||
}
|
||||
}
|
||||
if jsonTrackSkipWhitespace {
|
||||
d.wsSkipped = false
|
||||
}
|
||||
return v
|
||||
d.bs = v
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune {
|
||||
if checkSlashU && !(d.r.readn1() == '\\' && d.r.readn1() == 'u') {
|
||||
r := d.r
|
||||
if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') {
|
||||
d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`)
|
||||
return 0
|
||||
}
|
||||
// u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64)
|
||||
var u uint32
|
||||
for i := 0; i < 4; i++ {
|
||||
v := d.r.readn1()
|
||||
v := r.readn1()
|
||||
if '0' <= v && v <= '9' {
|
||||
v = v - '0'
|
||||
} else if 'a' <= v && v <= 'z' {
|
||||
|
@ -815,69 +929,83 @@ func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune {
|
|||
return rune(u)
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
|
||||
n := d.skipWhitespace(true)
|
||||
switch n {
|
||||
func (d *jsonDecDriver) DecodeNaked() {
|
||||
z := &d.d.n
|
||||
// var decodeFurther bool
|
||||
|
||||
if d.tok == 0 {
|
||||
var b byte
|
||||
r := d.r
|
||||
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
|
||||
}
|
||||
d.tok = b
|
||||
}
|
||||
switch d.tok {
|
||||
case 'n':
|
||||
d.readStrIdx(9, 13) // null
|
||||
vt = valueTypeNil
|
||||
d.readStrIdx(10, 13) // ull
|
||||
z.v = valueTypeNil
|
||||
case 'f':
|
||||
d.readStrIdx(4, 9) // false
|
||||
vt = valueTypeBool
|
||||
v = false
|
||||
d.readStrIdx(5, 9) // alse
|
||||
z.v = valueTypeBool
|
||||
z.b = false
|
||||
case 't':
|
||||
d.readStrIdx(0, 4) // true
|
||||
vt = valueTypeBool
|
||||
v = true
|
||||
d.readStrIdx(1, 4) // rue
|
||||
z.v = valueTypeBool
|
||||
z.b = true
|
||||
case '{':
|
||||
vt = valueTypeMap
|
||||
decodeFurther = true
|
||||
z.v = valueTypeMap
|
||||
// d.tok = 0 // don't consume. kInterfaceNaked will call ReadMapStart
|
||||
// decodeFurther = true
|
||||
case '[':
|
||||
vt = valueTypeArray
|
||||
decodeFurther = true
|
||||
z.v = valueTypeArray
|
||||
// d.tok = 0 // don't consume. kInterfaceNaked will call ReadArrayStart
|
||||
// decodeFurther = true
|
||||
case '"':
|
||||
vt = valueTypeString
|
||||
v = d.DecodeString()
|
||||
z.v = valueTypeString
|
||||
z.s = d.DecodeString()
|
||||
default: // number
|
||||
d.decNum(true)
|
||||
n := &d.n
|
||||
// if the string had a any of [.eE], then decode as float.
|
||||
switch {
|
||||
case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow:
|
||||
vt = valueTypeFloat
|
||||
v = n.floatVal()
|
||||
z.v = valueTypeFloat
|
||||
z.f = d.floatVal()
|
||||
case n.exponent == 0:
|
||||
u := n.mantissa
|
||||
switch {
|
||||
case n.neg:
|
||||
vt = valueTypeInt
|
||||
v = -int64(u)
|
||||
z.v = valueTypeInt
|
||||
z.i = -int64(u)
|
||||
case d.h.SignedInteger:
|
||||
vt = valueTypeInt
|
||||
v = int64(u)
|
||||
z.v = valueTypeInt
|
||||
z.i = int64(u)
|
||||
default:
|
||||
vt = valueTypeUint
|
||||
v = u
|
||||
z.v = valueTypeUint
|
||||
z.u = u
|
||||
}
|
||||
default:
|
||||
u, overflow := n.uintExp()
|
||||
switch {
|
||||
case overflow:
|
||||
vt = valueTypeFloat
|
||||
v = n.floatVal()
|
||||
z.v = valueTypeFloat
|
||||
z.f = d.floatVal()
|
||||
case n.neg:
|
||||
vt = valueTypeInt
|
||||
v = -int64(u)
|
||||
z.v = valueTypeInt
|
||||
z.i = -int64(u)
|
||||
case d.h.SignedInteger:
|
||||
vt = valueTypeInt
|
||||
v = int64(u)
|
||||
z.v = valueTypeInt
|
||||
z.i = int64(u)
|
||||
default:
|
||||
vt = valueTypeUint
|
||||
v = u
|
||||
z.v = valueTypeUint
|
||||
z.u = u
|
||||
}
|
||||
}
|
||||
// fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v)
|
||||
}
|
||||
// if decodeFurther {
|
||||
// d.s.sc.retryRead()
|
||||
// }
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -887,7 +1015,8 @@ func (d *jsonDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurthe
|
|||
//
|
||||
// Json is comprehensively supported:
|
||||
// - decodes numbers into interface{} as int, uint or float64
|
||||
// - encodes and decodes []byte using base64 Std Encoding
|
||||
// - configurable way to encode/decode []byte .
|
||||
// by default, encodes and decodes []byte using base64 Std Encoding
|
||||
// - UTF-8 support for encoding and decoding
|
||||
//
|
||||
// It has better performance than the json library in the standard library,
|
||||
|
@ -899,21 +1028,40 @@ func (d *jsonDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurthe
|
|||
// For example, a user can read a json value, then a cbor value, then a msgpack value,
|
||||
// all from the same stream in sequence.
|
||||
type JsonHandle struct {
|
||||
BasicHandle
|
||||
textEncodingType
|
||||
BasicHandle
|
||||
// RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
|
||||
// If not configured, raw bytes are encoded to/from base64 text.
|
||||
RawBytesExt InterfaceExt
|
||||
}
|
||||
|
||||
func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
||||
return h.SetExt(rt, tag, &setExtWrapper{i: ext})
|
||||
}
|
||||
|
||||
func (h *JsonHandle) newEncDriver(e *Encoder) encDriver {
|
||||
return &jsonEncDriver{e: e, w: e.w, h: h}
|
||||
hd := jsonEncDriver{e: e, w: e.w, h: h}
|
||||
hd.bs = hd.b[:0]
|
||||
hd.se.i = h.RawBytesExt
|
||||
return &hd
|
||||
}
|
||||
|
||||
func (h *JsonHandle) newDecDriver(d *Decoder) decDriver {
|
||||
// d := jsonDecDriver{r: r.(*bytesDecReader), h: h}
|
||||
hd := jsonDecDriver{d: d, r: d.r, h: h}
|
||||
hd.n.bytes = d.b[:]
|
||||
hd.bs = hd.b[:0]
|
||||
hd.se.i = h.RawBytesExt
|
||||
return &hd
|
||||
}
|
||||
|
||||
func (e *jsonEncDriver) reset() {
|
||||
e.w = e.e.w
|
||||
}
|
||||
|
||||
func (d *jsonDecDriver) reset() {
|
||||
d.r = d.d.r
|
||||
}
|
||||
|
||||
var jsonEncodeTerminate = []byte{' '}
|
||||
|
||||
func (h *JsonHandle) rpcEncodeTerminate() []byte {
|
||||
|
|
167
vendor/src/github.com/ugorji/go/codec/msgpack.go
vendored
167
vendor/src/github.com/ugorji/go/codec/msgpack.go
vendored
|
@ -24,6 +24,7 @@ import (
|
|||
"io"
|
||||
"math"
|
||||
"net/rpc"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -102,11 +103,11 @@ var (
|
|||
//---------------------------------------------
|
||||
|
||||
type msgpackEncDriver struct {
|
||||
noBuiltInTypes
|
||||
encNoSeparator
|
||||
e *Encoder
|
||||
w encWriter
|
||||
h *MsgpackHandle
|
||||
noBuiltInTypes
|
||||
encNoSeparator
|
||||
x [8]byte
|
||||
}
|
||||
|
||||
|
@ -270,7 +271,6 @@ type msgpackDecDriver struct {
|
|||
bd byte
|
||||
bdRead bool
|
||||
br bool // bytes reader
|
||||
bdType valueType
|
||||
noBuiltInTypes
|
||||
noStreamingCodec
|
||||
decNoSeparator
|
||||
|
@ -281,106 +281,100 @@ type msgpackDecDriver struct {
|
|||
// It is called when a nil interface{} is passed, leaving it up to the DecDriver
|
||||
// to introspect the stream and decide how best to decode.
|
||||
// It deciphers the value by looking at the stream first.
|
||||
func (d *msgpackDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
|
||||
func (d *msgpackDecDriver) DecodeNaked() {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
bd := d.bd
|
||||
n := &d.d.n
|
||||
var decodeFurther bool
|
||||
|
||||
switch bd {
|
||||
case mpNil:
|
||||
vt = valueTypeNil
|
||||
n.v = valueTypeNil
|
||||
d.bdRead = false
|
||||
case mpFalse:
|
||||
vt = valueTypeBool
|
||||
v = false
|
||||
n.v = valueTypeBool
|
||||
n.b = false
|
||||
case mpTrue:
|
||||
vt = valueTypeBool
|
||||
v = true
|
||||
n.v = valueTypeBool
|
||||
n.b = true
|
||||
|
||||
case mpFloat:
|
||||
vt = valueTypeFloat
|
||||
v = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
|
||||
n.v = valueTypeFloat
|
||||
n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
|
||||
case mpDouble:
|
||||
vt = valueTypeFloat
|
||||
v = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
|
||||
n.v = valueTypeFloat
|
||||
n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
|
||||
|
||||
case mpUint8:
|
||||
vt = valueTypeUint
|
||||
v = uint64(d.r.readn1())
|
||||
n.v = valueTypeUint
|
||||
n.u = uint64(d.r.readn1())
|
||||
case mpUint16:
|
||||
vt = valueTypeUint
|
||||
v = uint64(bigen.Uint16(d.r.readx(2)))
|
||||
n.v = valueTypeUint
|
||||
n.u = uint64(bigen.Uint16(d.r.readx(2)))
|
||||
case mpUint32:
|
||||
vt = valueTypeUint
|
||||
v = uint64(bigen.Uint32(d.r.readx(4)))
|
||||
n.v = valueTypeUint
|
||||
n.u = uint64(bigen.Uint32(d.r.readx(4)))
|
||||
case mpUint64:
|
||||
vt = valueTypeUint
|
||||
v = uint64(bigen.Uint64(d.r.readx(8)))
|
||||
n.v = valueTypeUint
|
||||
n.u = uint64(bigen.Uint64(d.r.readx(8)))
|
||||
|
||||
case mpInt8:
|
||||
vt = valueTypeInt
|
||||
v = int64(int8(d.r.readn1()))
|
||||
n.v = valueTypeInt
|
||||
n.i = int64(int8(d.r.readn1()))
|
||||
case mpInt16:
|
||||
vt = valueTypeInt
|
||||
v = int64(int16(bigen.Uint16(d.r.readx(2))))
|
||||
n.v = valueTypeInt
|
||||
n.i = int64(int16(bigen.Uint16(d.r.readx(2))))
|
||||
case mpInt32:
|
||||
vt = valueTypeInt
|
||||
v = int64(int32(bigen.Uint32(d.r.readx(4))))
|
||||
n.v = valueTypeInt
|
||||
n.i = int64(int32(bigen.Uint32(d.r.readx(4))))
|
||||
case mpInt64:
|
||||
vt = valueTypeInt
|
||||
v = int64(int64(bigen.Uint64(d.r.readx(8))))
|
||||
n.v = valueTypeInt
|
||||
n.i = int64(int64(bigen.Uint64(d.r.readx(8))))
|
||||
|
||||
default:
|
||||
switch {
|
||||
case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
|
||||
// positive fixnum (always signed)
|
||||
vt = valueTypeInt
|
||||
v = int64(int8(bd))
|
||||
n.v = valueTypeInt
|
||||
n.i = int64(int8(bd))
|
||||
case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
|
||||
// negative fixnum
|
||||
vt = valueTypeInt
|
||||
v = int64(int8(bd))
|
||||
n.v = valueTypeInt
|
||||
n.i = int64(int8(bd))
|
||||
case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
|
||||
if d.h.RawToString {
|
||||
var rvm string
|
||||
vt = valueTypeString
|
||||
v = &rvm
|
||||
n.v = valueTypeString
|
||||
n.s = d.DecodeString()
|
||||
} else {
|
||||
var rvm = zeroByteSlice
|
||||
vt = valueTypeBytes
|
||||
v = &rvm
|
||||
n.v = valueTypeBytes
|
||||
n.l = d.DecodeBytes(nil, false, false)
|
||||
}
|
||||
decodeFurther = true
|
||||
case bd == mpBin8, bd == mpBin16, bd == mpBin32:
|
||||
var rvm = zeroByteSlice
|
||||
vt = valueTypeBytes
|
||||
v = &rvm
|
||||
decodeFurther = true
|
||||
n.v = valueTypeBytes
|
||||
n.l = d.DecodeBytes(nil, false, false)
|
||||
case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
|
||||
vt = valueTypeArray
|
||||
n.v = valueTypeArray
|
||||
decodeFurther = true
|
||||
case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
|
||||
vt = valueTypeMap
|
||||
n.v = valueTypeMap
|
||||
decodeFurther = true
|
||||
case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
|
||||
n.v = valueTypeExt
|
||||
clen := d.readExtLen()
|
||||
var re RawExt
|
||||
re.Tag = uint64(d.r.readn1())
|
||||
re.Data = d.r.readx(clen)
|
||||
v = &re
|
||||
vt = valueTypeExt
|
||||
n.u = uint64(d.r.readn1())
|
||||
n.l = d.r.readx(clen)
|
||||
default:
|
||||
d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
|
||||
return
|
||||
}
|
||||
}
|
||||
if !decodeFurther {
|
||||
d.bdRead = false
|
||||
}
|
||||
if vt == valueTypeUint && d.h.SignedInteger {
|
||||
d.bdType = valueTypeInt
|
||||
v = int64(v.(uint64))
|
||||
if n.v == valueTypeUint && d.h.SignedInteger {
|
||||
n.v = valueTypeInt
|
||||
n.i = int64(n.v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -536,15 +530,11 @@ func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOu
|
|||
d.readNextBd()
|
||||
}
|
||||
var clen int
|
||||
if isstring {
|
||||
clen = d.readContainerLen(msgpackContainerStr)
|
||||
// ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin
|
||||
if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
|
||||
clen = d.readContainerLen(msgpackContainerBin)
|
||||
} else {
|
||||
// bytes can be decoded from msgpackContainerStr or msgpackContainerBin
|
||||
if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
|
||||
clen = d.readContainerLen(msgpackContainerBin)
|
||||
} else {
|
||||
clen = d.readContainerLen(msgpackContainerStr)
|
||||
}
|
||||
clen = d.readContainerLen(msgpackContainerStr)
|
||||
}
|
||||
// println("DecodeBytes: clen: ", clen)
|
||||
d.bdRead = false
|
||||
|
@ -569,28 +559,27 @@ func (d *msgpackDecDriver) DecodeString() (s string) {
|
|||
func (d *msgpackDecDriver) readNextBd() {
|
||||
d.bd = d.r.readn1()
|
||||
d.bdRead = true
|
||||
d.bdType = valueTypeUnset
|
||||
}
|
||||
|
||||
func (d *msgpackDecDriver) IsContainerType(vt valueType) bool {
|
||||
func (d *msgpackDecDriver) ContainerType() (vt valueType) {
|
||||
bd := d.bd
|
||||
switch vt {
|
||||
case valueTypeNil:
|
||||
return bd == mpNil
|
||||
case valueTypeBytes:
|
||||
return bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
|
||||
(!d.h.RawToString &&
|
||||
(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)))
|
||||
case valueTypeString:
|
||||
return d.h.RawToString &&
|
||||
(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))
|
||||
case valueTypeArray:
|
||||
return bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax)
|
||||
case valueTypeMap:
|
||||
return bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax)
|
||||
if bd == mpNil {
|
||||
return valueTypeNil
|
||||
} else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
|
||||
(!d.h.RawToString &&
|
||||
(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) {
|
||||
return valueTypeBytes
|
||||
} else if d.h.RawToString &&
|
||||
(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) {
|
||||
return valueTypeString
|
||||
} else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) {
|
||||
return valueTypeArray
|
||||
} else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) {
|
||||
return valueTypeMap
|
||||
} else {
|
||||
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||
}
|
||||
d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||
return false // "unreachable"
|
||||
return valueTypeUnset
|
||||
}
|
||||
|
||||
func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) {
|
||||
|
@ -617,7 +606,7 @@ func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int)
|
|||
} else if (ct.bFixMin & bd) == ct.bFixMin {
|
||||
clen = int(ct.bFixMin ^ bd)
|
||||
} else {
|
||||
d.d.errorf("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
|
||||
d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd)
|
||||
return
|
||||
}
|
||||
d.bdRead = false
|
||||
|
@ -704,7 +693,6 @@ func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs
|
|||
//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
|
||||
type MsgpackHandle struct {
|
||||
BasicHandle
|
||||
binaryEncodingType
|
||||
|
||||
// RawToString controls how raw bytes are decoded into a nil interface{}.
|
||||
RawToString bool
|
||||
|
@ -720,6 +708,11 @@ type MsgpackHandle struct {
|
|||
// type is provided (e.g. decoding into a nil interface{}), you get back
|
||||
// a []byte or string based on the setting of RawToString.
|
||||
WriteExt bool
|
||||
binaryEncodingType
|
||||
}
|
||||
|
||||
func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
|
||||
return h.SetExt(rt, tag, &setExtWrapper{b: ext})
|
||||
}
|
||||
|
||||
func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver {
|
||||
|
@ -730,6 +723,14 @@ func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver {
|
|||
return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes}
|
||||
}
|
||||
|
||||
func (e *msgpackEncDriver) reset() {
|
||||
e.w = e.e.w
|
||||
}
|
||||
|
||||
func (d *msgpackDecDriver) reset() {
|
||||
d.r = d.d.r
|
||||
}
|
||||
|
||||
//--------------------------------------------------
|
||||
|
||||
type msgpackSpecRpcCodec struct {
|
||||
|
|
153
vendor/src/github.com/ugorji/go/codec/noop.go
vendored
153
vendor/src/github.com/ugorji/go/codec/noop.go
vendored
|
@ -11,6 +11,7 @@ import (
|
|||
// NoopHandle returns a no-op handle. It basically does nothing.
|
||||
// It is only useful for benchmarking, as it gives an idea of the
|
||||
// overhead from the codec framework.
|
||||
//
|
||||
// LIBRARY USERS: *** DO NOT USE ***
|
||||
func NoopHandle(slen int) *noopHandle {
|
||||
h := noopHandle{}
|
||||
|
@ -37,38 +38,57 @@ type noopHandle struct {
|
|||
}
|
||||
|
||||
type noopDrv struct {
|
||||
d *Decoder
|
||||
e *Encoder
|
||||
i int
|
||||
S []string
|
||||
B [][]byte
|
||||
mk bool // are we about to read a map key?
|
||||
ct valueType // last request for IsContainerType.
|
||||
cb bool // last response for IsContainerType.
|
||||
mks []bool // stack. if map (true), else if array (false)
|
||||
mk bool // top of stack. what container are we on? map or array?
|
||||
ct valueType // last response for IsContainerType.
|
||||
cb int // counter for ContainerType
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
func (h *noopDrv) r(v int) int { return h.rand.Intn(v) }
|
||||
func (h *noopDrv) m(v int) int { h.i++; return h.i % v }
|
||||
|
||||
func (h *noopDrv) newEncDriver(_ *Encoder) encDriver { return h }
|
||||
func (h *noopDrv) newDecDriver(_ *Decoder) decDriver { return h }
|
||||
func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h }
|
||||
func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h }
|
||||
|
||||
func (h *noopDrv) reset() {}
|
||||
func (h *noopDrv) uncacheRead() {}
|
||||
|
||||
// --- encDriver
|
||||
|
||||
func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {}
|
||||
func (h *noopDrv) EncodeNil() {}
|
||||
func (h *noopDrv) EncodeInt(i int64) {}
|
||||
func (h *noopDrv) EncodeUint(i uint64) {}
|
||||
func (h *noopDrv) EncodeBool(b bool) {}
|
||||
func (h *noopDrv) EncodeFloat32(f float32) {}
|
||||
func (h *noopDrv) EncodeFloat64(f float64) {}
|
||||
func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {}
|
||||
func (h *noopDrv) EncodeArrayStart(length int) {}
|
||||
func (h *noopDrv) EncodeArrayEnd() {}
|
||||
func (h *noopDrv) EncodeArrayEntrySeparator() {}
|
||||
func (h *noopDrv) EncodeMapStart(length int) {}
|
||||
func (h *noopDrv) EncodeMapEnd() {}
|
||||
func (h *noopDrv) EncodeMapEntrySeparator() {}
|
||||
func (h *noopDrv) EncodeMapKVSeparator() {}
|
||||
// stack functions (for map and array)
|
||||
func (h *noopDrv) start(b bool) {
|
||||
// println("start", len(h.mks)+1)
|
||||
h.mks = append(h.mks, b)
|
||||
h.mk = b
|
||||
}
|
||||
func (h *noopDrv) end() {
|
||||
// println("end: ", len(h.mks)-1)
|
||||
h.mks = h.mks[:len(h.mks)-1]
|
||||
if len(h.mks) > 0 {
|
||||
h.mk = h.mks[len(h.mks)-1]
|
||||
} else {
|
||||
h.mk = false
|
||||
}
|
||||
}
|
||||
|
||||
func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {}
|
||||
func (h *noopDrv) EncodeNil() {}
|
||||
func (h *noopDrv) EncodeInt(i int64) {}
|
||||
func (h *noopDrv) EncodeUint(i uint64) {}
|
||||
func (h *noopDrv) EncodeBool(b bool) {}
|
||||
func (h *noopDrv) EncodeFloat32(f float32) {}
|
||||
func (h *noopDrv) EncodeFloat64(f float64) {}
|
||||
func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {}
|
||||
func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) }
|
||||
func (h *noopDrv) EncodeMapStart(length int) { h.start(false) }
|
||||
func (h *noopDrv) EncodeEnd() { h.end() }
|
||||
|
||||
func (h *noopDrv) EncodeString(c charEncoding, v string) {}
|
||||
func (h *noopDrv) EncodeSymbol(v string) {}
|
||||
func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {}
|
||||
|
@ -90,28 +110,54 @@ func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8
|
|||
|
||||
func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] }
|
||||
|
||||
func (h *noopDrv) ReadMapEnd() { h.mk = false }
|
||||
func (h *noopDrv) ReadArrayEnd() {}
|
||||
func (h *noopDrv) ReadArrayEntrySeparator() {}
|
||||
func (h *noopDrv) ReadMapEntrySeparator() { h.mk = true }
|
||||
func (h *noopDrv) ReadMapKVSeparator() { h.mk = false }
|
||||
func (h *noopDrv) ReadEnd() { h.end() }
|
||||
|
||||
// toggle map/slice
|
||||
func (h *noopDrv) ReadMapStart() int { h.mk = true; return h.m(10) }
|
||||
func (h *noopDrv) ReadArrayStart() int { return h.m(10) }
|
||||
func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) }
|
||||
func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) }
|
||||
|
||||
func (h *noopDrv) IsContainerType(vt valueType) bool {
|
||||
func (h *noopDrv) ContainerType() (vt valueType) {
|
||||
// return h.m(2) == 0
|
||||
// handle kStruct
|
||||
if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap {
|
||||
h.cb = !h.cb
|
||||
h.ct = vt
|
||||
return h.cb
|
||||
}
|
||||
// go in a loop and check it.
|
||||
h.ct = vt
|
||||
h.cb = h.m(7) == 0
|
||||
return h.cb
|
||||
// handle kStruct, which will bomb is it calls this and doesn't get back a map or array.
|
||||
// consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2
|
||||
// for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs)
|
||||
// however, every 10th time it is called, we just return something else.
|
||||
var vals = [...]valueType{valueTypeArray, valueTypeMap}
|
||||
// ------------ TAKE ------------
|
||||
// if h.cb%2 == 0 {
|
||||
// if h.ct == valueTypeMap || h.ct == valueTypeArray {
|
||||
// } else {
|
||||
// h.ct = vals[h.m(2)]
|
||||
// }
|
||||
// } else if h.cb%5 == 0 {
|
||||
// h.ct = valueType(h.m(8))
|
||||
// } else {
|
||||
// h.ct = vals[h.m(2)]
|
||||
// }
|
||||
// ------------ TAKE ------------
|
||||
// if h.cb%16 == 0 {
|
||||
// h.ct = valueType(h.cb % 8)
|
||||
// } else {
|
||||
// h.ct = vals[h.cb%2]
|
||||
// }
|
||||
h.ct = vals[h.cb%2]
|
||||
h.cb++
|
||||
return h.ct
|
||||
|
||||
// if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes {
|
||||
// return h.ct
|
||||
// }
|
||||
// return valueTypeUnset
|
||||
// TODO: may need to tweak this so it works.
|
||||
// if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap {
|
||||
// h.cb = !h.cb
|
||||
// h.ct = vt
|
||||
// return h.cb
|
||||
// }
|
||||
// // go in a loop and check it.
|
||||
// h.ct = vt
|
||||
// h.cb = h.m(7) == 0
|
||||
// return h.cb
|
||||
}
|
||||
func (h *noopDrv) TryDecodeAsNil() bool {
|
||||
if h.mk {
|
||||
|
@ -124,7 +170,7 @@ func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (h *noopDrv) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
|
||||
func (h *noopDrv) DecodeNaked() {
|
||||
// use h.r (random) not h.m() because h.m() could cause the same value to be given.
|
||||
var sk int
|
||||
if h.mk {
|
||||
|
@ -133,32 +179,35 @@ func (h *noopDrv) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool
|
|||
} else {
|
||||
sk = h.r(12)
|
||||
}
|
||||
n := &h.d.n
|
||||
switch sk {
|
||||
case 0:
|
||||
vt = valueTypeNil
|
||||
n.v = valueTypeNil
|
||||
case 1:
|
||||
vt, v = valueTypeBool, false
|
||||
n.v, n.b = valueTypeBool, false
|
||||
case 2:
|
||||
vt, v = valueTypeBool, true
|
||||
n.v, n.b = valueTypeBool, true
|
||||
case 3:
|
||||
vt, v = valueTypeInt, h.DecodeInt(64)
|
||||
n.v, n.i = valueTypeInt, h.DecodeInt(64)
|
||||
case 4:
|
||||
vt, v = valueTypeUint, h.DecodeUint(64)
|
||||
n.v, n.u = valueTypeUint, h.DecodeUint(64)
|
||||
case 5:
|
||||
vt, v = valueTypeFloat, h.DecodeFloat(true)
|
||||
n.v, n.f = valueTypeFloat, h.DecodeFloat(true)
|
||||
case 6:
|
||||
vt, v = valueTypeFloat, h.DecodeFloat(false)
|
||||
n.v, n.f = valueTypeFloat, h.DecodeFloat(false)
|
||||
case 7:
|
||||
vt, v = valueTypeString, h.DecodeString()
|
||||
n.v, n.s = valueTypeString, h.DecodeString()
|
||||
case 8:
|
||||
vt, v = valueTypeBytes, h.B[h.m(len(h.B))]
|
||||
n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))]
|
||||
case 9:
|
||||
vt, decodeFurther = valueTypeArray, true
|
||||
n.v = valueTypeArray
|
||||
case 10:
|
||||
vt, decodeFurther = valueTypeMap, true
|
||||
n.v = valueTypeMap
|
||||
default:
|
||||
vt, v = valueTypeExt, &RawExt{Tag: h.DecodeUint(64), Data: h.B[h.m(len(h.B))]}
|
||||
n.v = valueTypeExt
|
||||
n.u = h.DecodeUint(64)
|
||||
n.l = h.B[h.m(len(h.B))]
|
||||
}
|
||||
h.ct = vt
|
||||
h.ct = n.v
|
||||
return
|
||||
}
|
||||
|
|
|
@ -49,7 +49,8 @@ _build() {
|
|||
# [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak
|
||||
# [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak
|
||||
else
|
||||
rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go
|
||||
rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go \
|
||||
*safe.generated.go *_generated_test.go *.generated_ffjson_expose.go
|
||||
fi
|
||||
|
||||
cat > gen.generated.go <<EOF
|
||||
|
@ -77,28 +78,15 @@ EOF
|
|||
\`
|
||||
|
||||
EOF
|
||||
# All functions, variables which must exist are put in this file.
|
||||
# This way, build works before we generate the right things.
|
||||
cat > fast-path.generated.go <<EOF
|
||||
|
||||
cat > gen-from-tmpl.codec.generated.go <<EOF
|
||||
package codec
|
||||
import "reflect"
|
||||
// func GenBytesToStringRO(b []byte) string { return string(b) }
|
||||
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
|
||||
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
|
||||
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
|
||||
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
|
||||
type fastpathE struct {
|
||||
rtid uintptr
|
||||
rt reflect.Type
|
||||
encfn func(encFnInfo, reflect.Value)
|
||||
decfn func(decFnInfo, reflect.Value)
|
||||
import "io"
|
||||
func GenInternalGoFile(r io.Reader, w io.Writer, safe bool) error {
|
||||
return genInternalGoFile(r, w, safe)
|
||||
}
|
||||
type fastpathA [0]fastpathE
|
||||
func (x fastpathA) index(rtid uintptr) int { return -1 }
|
||||
var fastpathAV fastpathA
|
||||
|
||||
EOF
|
||||
|
||||
|
||||
cat > gen-from-tmpl.generated.go <<EOF
|
||||
//+build ignore
|
||||
|
||||
|
@ -129,8 +117,8 @@ run("gen-helper.go.tmpl", "gen-helper.generated.go", false)
|
|||
}
|
||||
|
||||
EOF
|
||||
go run gen-from-tmpl.generated.go && \
|
||||
rm -f gen-from-tmpl.generated.go
|
||||
go run -tags=notfastpath gen-from-tmpl.generated.go && \
|
||||
rm -f gen-from-tmpl.*generated.go
|
||||
}
|
||||
|
||||
_codegenerators() {
|
||||
|
@ -140,18 +128,35 @@ _codegenerators() {
|
|||
"1" == $( _needgen "values_ffjson${zsfx}" ) ||
|
||||
1 == 0 ]]
|
||||
then
|
||||
true && \
|
||||
echo "codecgen - !unsafe ... " && \
|
||||
codecgen -rt codecgen -t 'x,codecgen,!unsafe' -o values_codecgen${zsfx} $zfin && \
|
||||
echo "codecgen - unsafe ... " && \
|
||||
codecgen -u -rt codecgen -t 'x,codecgen,unsafe' -o values_codecgen_unsafe${zsfx} $zfin && \
|
||||
echo "msgp ... " && \
|
||||
msgp -tests=false -pkg=codec -o=values_msgp${zsfx} -file=$zfin && \
|
||||
# codecgen creates some temporary files in the directory (main, pkg).
|
||||
# Consequently, we should start msgp and ffjson first, and also put a small time latency before
|
||||
# starting codecgen.
|
||||
# Without this, ffjson chokes on one of the temporary files from codecgen.
|
||||
if [[ $zexternal == "1" ]]
|
||||
then
|
||||
echo "ffjson ... " && \
|
||||
ffjson -w values_ffjson${zsfx} $zfin && \
|
||||
ffjson -w values_ffjson${zsfx} $zfin &
|
||||
zzzIdFF=$!
|
||||
echo "msgp ... " && \
|
||||
msgp -tests=false -o=values_msgp${zsfx} -file=$zfin &
|
||||
zzzIdMsgp=$!
|
||||
|
||||
sleep 1 # give ffjson and msgp some buffer time. see note above.
|
||||
fi
|
||||
|
||||
echo "codecgen - !unsafe ... " && \
|
||||
codecgen -rt codecgen -t 'x,codecgen,!unsafe' -o values_codecgen${zsfx} -d 19780 $zfin &
|
||||
zzzIdC=$!
|
||||
echo "codecgen - unsafe ... " && \
|
||||
codecgen -u -rt codecgen -t 'x,codecgen,unsafe' -o values_codecgen_unsafe${zsfx} -d 19781 $zfin &
|
||||
zzzIdCU=$!
|
||||
wait $zzzIdC $zzzIdCU $zzzIdMsgp $zzzIdFF && \
|
||||
# remove (M|Unm)arshalJSON implementations, so they don't conflict with encoding/json bench \
|
||||
sed -i 's+ MarshalJSON(+ _MarshalJSON(+g' values_ffjson${zsfx} && \
|
||||
sed -i 's+ UnmarshalJSON(+ _UnmarshalJSON(+g' values_ffjson${zsfx} && \
|
||||
if [[ $zexternal == "1" ]]
|
||||
then
|
||||
sed -i 's+ MarshalJSON(+ _MarshalJSON(+g' values_ffjson${zsfx} && \
|
||||
sed -i 's+ UnmarshalJSON(+ _UnmarshalJSON(+g' values_ffjson${zsfx}
|
||||
fi && \
|
||||
echo "generators done!" && \
|
||||
true
|
||||
fi
|
||||
|
@ -160,11 +165,12 @@ _codegenerators() {
|
|||
# _init reads the arguments and sets up the flags
|
||||
_init() {
|
||||
OPTIND=1
|
||||
while getopts "fb" flag
|
||||
while getopts "fbx" flag
|
||||
do
|
||||
case "x$flag" in
|
||||
'xf') zforce=1;;
|
||||
'xb') zbak=1;;
|
||||
'xx') zexternal=1;;
|
||||
*) echo "prebuild.sh accepts [-fb] only"; return 1;;
|
||||
esac
|
||||
done
|
||||
|
|
115
vendor/src/github.com/ugorji/go/codec/simple.go
vendored
115
vendor/src/github.com/ugorji/go/codec/simple.go
vendored
|
@ -3,7 +3,10 @@
|
|||
|
||||
package codec
|
||||
|
||||
import "math"
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
_ uint8 = iota
|
||||
|
@ -26,12 +29,12 @@ const (
|
|||
)
|
||||
|
||||
type simpleEncDriver struct {
|
||||
noBuiltInTypes
|
||||
encNoSeparator
|
||||
e *Encoder
|
||||
h *SimpleHandle
|
||||
w encWriter
|
||||
noBuiltInTypes
|
||||
b [8]byte
|
||||
encNoSeparator
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver) EncodeNil() {
|
||||
|
@ -150,7 +153,6 @@ type simpleDecDriver struct {
|
|||
h *SimpleHandle
|
||||
r decReader
|
||||
bdRead bool
|
||||
bdType valueType
|
||||
bd byte
|
||||
br bool // bytes reader
|
||||
noBuiltInTypes
|
||||
|
@ -162,28 +164,27 @@ type simpleDecDriver struct {
|
|||
func (d *simpleDecDriver) readNextBd() {
|
||||
d.bd = d.r.readn1()
|
||||
d.bdRead = true
|
||||
d.bdType = valueTypeUnset
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver) IsContainerType(vt valueType) bool {
|
||||
switch vt {
|
||||
case valueTypeNil:
|
||||
return d.bd == simpleVdNil
|
||||
case valueTypeBytes:
|
||||
const x uint8 = simpleVdByteArray
|
||||
return d.bd == x || d.bd == x+1 || d.bd == x+2 || d.bd == x+3 || d.bd == x+4
|
||||
case valueTypeString:
|
||||
const x uint8 = simpleVdString
|
||||
return d.bd == x || d.bd == x+1 || d.bd == x+2 || d.bd == x+3 || d.bd == x+4
|
||||
case valueTypeArray:
|
||||
const x uint8 = simpleVdArray
|
||||
return d.bd == x || d.bd == x+1 || d.bd == x+2 || d.bd == x+3 || d.bd == x+4
|
||||
case valueTypeMap:
|
||||
const x uint8 = simpleVdMap
|
||||
return d.bd == x || d.bd == x+1 || d.bd == x+2 || d.bd == x+3 || d.bd == x+4
|
||||
func (d *simpleDecDriver) ContainerType() (vt valueType) {
|
||||
if d.bd == simpleVdNil {
|
||||
return valueTypeNil
|
||||
} else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 ||
|
||||
d.bd == simpleVdByteArray+2 || d.bd == simpleVdByteArray+3 || d.bd == simpleVdByteArray+4 {
|
||||
return valueTypeBytes
|
||||
} else if d.bd == simpleVdString || d.bd == simpleVdString+1 ||
|
||||
d.bd == simpleVdString+2 || d.bd == simpleVdString+3 || d.bd == simpleVdString+4 {
|
||||
return valueTypeString
|
||||
} else if d.bd == simpleVdArray || d.bd == simpleVdArray+1 ||
|
||||
d.bd == simpleVdArray+2 || d.bd == simpleVdArray+3 || d.bd == simpleVdArray+4 {
|
||||
return valueTypeArray
|
||||
} else if d.bd == simpleVdMap || d.bd == simpleVdMap+1 ||
|
||||
d.bd == simpleVdMap+2 || d.bd == simpleVdMap+3 || d.bd == simpleVdMap+4 {
|
||||
return valueTypeMap
|
||||
} else {
|
||||
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||
}
|
||||
d.d.errorf("isContainerType: unsupported parameter: %v", vt)
|
||||
return false // "unreachable"
|
||||
return valueTypeUnset
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver) TryDecodeAsNil() bool {
|
||||
|
@ -407,59 +408,59 @@ func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs [
|
|||
return
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
|
||||
func (d *simpleDecDriver) DecodeNaked() {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
|
||||
n := &d.d.n
|
||||
var decodeFurther bool
|
||||
|
||||
switch d.bd {
|
||||
case simpleVdNil:
|
||||
vt = valueTypeNil
|
||||
n.v = valueTypeNil
|
||||
case simpleVdFalse:
|
||||
vt = valueTypeBool
|
||||
v = false
|
||||
n.v = valueTypeBool
|
||||
n.b = false
|
||||
case simpleVdTrue:
|
||||
vt = valueTypeBool
|
||||
v = true
|
||||
n.v = valueTypeBool
|
||||
n.b = true
|
||||
case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
|
||||
if d.h.SignedInteger {
|
||||
vt = valueTypeInt
|
||||
v = d.DecodeInt(64)
|
||||
n.v = valueTypeInt
|
||||
n.i = d.DecodeInt(64)
|
||||
} else {
|
||||
vt = valueTypeUint
|
||||
v = d.DecodeUint(64)
|
||||
n.v = valueTypeUint
|
||||
n.u = d.DecodeUint(64)
|
||||
}
|
||||
case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
|
||||
vt = valueTypeInt
|
||||
v = d.DecodeInt(64)
|
||||
n.v = valueTypeInt
|
||||
n.i = d.DecodeInt(64)
|
||||
case simpleVdFloat32:
|
||||
vt = valueTypeFloat
|
||||
v = d.DecodeFloat(true)
|
||||
n.v = valueTypeFloat
|
||||
n.f = d.DecodeFloat(true)
|
||||
case simpleVdFloat64:
|
||||
vt = valueTypeFloat
|
||||
v = d.DecodeFloat(false)
|
||||
n.v = valueTypeFloat
|
||||
n.f = d.DecodeFloat(false)
|
||||
case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
|
||||
vt = valueTypeString
|
||||
v = d.DecodeString()
|
||||
n.v = valueTypeString
|
||||
n.s = d.DecodeString()
|
||||
case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
|
||||
vt = valueTypeBytes
|
||||
v = d.DecodeBytes(nil, false, false)
|
||||
n.v = valueTypeBytes
|
||||
n.l = d.DecodeBytes(nil, false, false)
|
||||
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
|
||||
vt = valueTypeExt
|
||||
n.v = valueTypeExt
|
||||
l := d.decLen()
|
||||
var re RawExt
|
||||
re.Tag = uint64(d.r.readn1())
|
||||
re.Data = d.r.readx(l)
|
||||
v = &re
|
||||
n.u = uint64(d.r.readn1())
|
||||
n.l = d.r.readx(l)
|
||||
case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
|
||||
vt = valueTypeArray
|
||||
n.v = valueTypeArray
|
||||
decodeFurther = true
|
||||
case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
|
||||
vt = valueTypeMap
|
||||
n.v = valueTypeMap
|
||||
decodeFurther = true
|
||||
default:
|
||||
d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
|
||||
return
|
||||
}
|
||||
|
||||
if !decodeFurther {
|
||||
|
@ -493,6 +494,10 @@ type SimpleHandle struct {
|
|||
binaryEncodingType
|
||||
}
|
||||
|
||||
func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
|
||||
return h.SetExt(rt, tag, &setExtWrapper{b: ext})
|
||||
}
|
||||
|
||||
func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver {
|
||||
return &simpleEncDriver{e: e, w: e.w, h: h}
|
||||
}
|
||||
|
@ -501,5 +506,13 @@ func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver {
|
|||
return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes}
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver) reset() {
|
||||
e.w = e.e.w
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver) reset() {
|
||||
d.r = d.d.r
|
||||
}
|
||||
|
||||
var _ decDriver = (*simpleDecDriver)(nil)
|
||||
var _ encDriver = (*simpleEncDriver)(nil)
|
||||
|
|
|
@ -5,8 +5,9 @@
|
|||
# So it can process them (so we don't have to checkin the files).
|
||||
|
||||
# Ensure msgpack-python and cbor are installed first, using:
|
||||
# pip install --user msgpack-python
|
||||
# pip install --user cbor
|
||||
# sudo apt-get install python-dev
|
||||
# sudo apt-get install python-pip
|
||||
# pip install --user msgpack-python msgpack-rpc-python cbor
|
||||
|
||||
import cbor, msgpack, msgpackrpc, sys, os, threading
|
||||
|
||||
|
|
74
vendor/src/github.com/ugorji/go/codec/tests.sh
vendored
Executable file
74
vendor/src/github.com/ugorji/go/codec/tests.sh
vendored
Executable file
|
@ -0,0 +1,74 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Run all the different permutations of all the tests.
|
||||
# This helps ensure that nothing gets broken.
|
||||
|
||||
_run() {
|
||||
# 1. VARIATIONS: regular (t), canonical (c), IO R/W (i),
|
||||
# binc-nosymbols (n), struct2array (s), intern string (e),
|
||||
# 2. MODE: reflection (r), external (x), codecgen (g), unsafe (u), notfastpath (f)
|
||||
# 3. OPTIONS: verbose (v), reset (z), must (m),
|
||||
#
|
||||
# Use combinations of mode to get exactly what you want,
|
||||
# and then pass the variations you need.
|
||||
|
||||
ztags=""
|
||||
zargs=""
|
||||
local OPTIND
|
||||
OPTIND=1
|
||||
while getopts "xurtcinsvgzmef" flag
|
||||
do
|
||||
case "x$flag" in
|
||||
'xr') ;;
|
||||
'xf') ztags="$ztags notfastpath" ;;
|
||||
'xg') ztags="$ztags codecgen" ;;
|
||||
'xx') ztags="$ztags x" ;;
|
||||
'xu') ztags="$ztags unsafe" ;;
|
||||
'xv') zargs="$zargs -tv" ;;
|
||||
'xz') zargs="$zargs -tr" ;;
|
||||
'xm') zargs="$zargs -tm" ;;
|
||||
*) ;;
|
||||
esac
|
||||
done
|
||||
# shift $((OPTIND-1))
|
||||
printf '............. TAGS: %s .............\n' "$ztags"
|
||||
# echo ">>>>>>> TAGS: $ztags"
|
||||
|
||||
OPTIND=1
|
||||
while getopts "xurtcinsvgzmef" flag
|
||||
do
|
||||
case "x$flag" in
|
||||
'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;;
|
||||
'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;;
|
||||
'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;;
|
||||
'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" $zargs -tn; sleep 2 ;;
|
||||
'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;;
|
||||
'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;;
|
||||
*) ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND-1))
|
||||
|
||||
OPTIND=1
|
||||
}
|
||||
|
||||
# echo ">>>>>>> RUNNING VARIATIONS OF TESTS"
|
||||
if [[ "x$@" = "x" ]]; then
|
||||
# All: r, x, g, gu
|
||||
_run "-rtcinsm" # regular
|
||||
_run "-rtcinsmz" # regular with reset
|
||||
_run "-rtcinsmf" # regular with no fastpath (notfastpath)
|
||||
_run "-xtcinsm" # external
|
||||
_run "-gxtcinsm" # codecgen: requires external
|
||||
_run "-gxutcinsm" # codecgen + unsafe
|
||||
elif [[ "x$@" = "x-Z" ]]; then
|
||||
# Regular
|
||||
_run "-rtcinsm" # regular
|
||||
_run "-rtcinsmz" # regular with reset
|
||||
elif [[ "x$@" = "x-F" ]]; then
|
||||
# regular with notfastpath
|
||||
_run "-rtcinsmf" # regular
|
||||
_run "-rtcinsmzf" # regular with reset
|
||||
else
|
||||
_run "$@"
|
||||
fi
|
29
vendor/src/github.com/ugorji/go/codec/time.go
vendored
29
vendor/src/github.com/ugorji/go/codec/time.go
vendored
|
@ -4,6 +4,7 @@
|
|||
package codec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -11,6 +12,34 @@ var (
|
|||
timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
|
||||
)
|
||||
|
||||
type timeExt struct{}
|
||||
|
||||
func (x timeExt) WriteExt(v interface{}) (bs []byte) {
|
||||
switch v2 := v.(type) {
|
||||
case time.Time:
|
||||
bs = encodeTime(v2)
|
||||
case *time.Time:
|
||||
bs = encodeTime(*v2)
|
||||
default:
|
||||
panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2))
|
||||
}
|
||||
return
|
||||
}
|
||||
func (x timeExt) ReadExt(v interface{}, bs []byte) {
|
||||
tt, err := decodeTime(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
*(v.(*time.Time)) = tt
|
||||
}
|
||||
|
||||
func (x timeExt) ConvertExt(v interface{}) interface{} {
|
||||
return x.WriteExt(v)
|
||||
}
|
||||
func (x timeExt) UpdateExt(v interface{}, src interface{}) {
|
||||
x.ReadExt(v, src.([]byte))
|
||||
}
|
||||
|
||||
// EncodeTime encodes a time.Time as a []byte, including
|
||||
// information on the instant in time and UTC offset.
|
||||
//
|
||||
|
|
Loading…
Add table
Reference in a new issue