mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #16088 from jfrazelle/better-canonical-json-package
replace canonical json package
This commit is contained in:
commit
479fcbb1d0
31 changed files with 3833 additions and 781 deletions
|
@ -37,9 +37,9 @@ clone git github.com/hashicorp/consul v0.5.2
|
|||
clone git github.com/docker/distribution ec87e9b6971d831f0eff752ddb54fb64693e51cd # docker/1.8 branch
|
||||
clone git github.com/vbatts/tar-split v0.9.6
|
||||
|
||||
clone git github.com/docker/notary 8e8122eb5528f621afcd4e2854c47302f17392f7
|
||||
clone git github.com/endophage/gotuf a592b03b28b02bb29bb5878308fb1abed63383b5
|
||||
clone git github.com/tent/canonical-json-go 96e4ba3a7613a1216cbd1badca4efe382adea337
|
||||
clone git github.com/docker/notary ac05822d7d71ef077df3fc24f506672282a1feea
|
||||
clone git github.com/endophage/gotuf 9bcdad0308e34a49f38448b8ad436ad8860825ce
|
||||
clone git github.com/jfrazelle/go 6e461eb70cb4187b41a84e9a567d7137bdbe0f16
|
||||
clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
|
||||
|
||||
clone git github.com/opencontainers/runc v0.0.3 # libcontainer
|
||||
|
|
|
@ -32,3 +32,28 @@ func (cl *memChangelist) Clear(archive string) error {
|
|||
func (cl *memChangelist) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cl *memChangelist) NewIterator() (ChangeIterator, error) {
|
||||
return &MemChangeListIterator{index: 0, collection: cl.changes}, nil
|
||||
}
|
||||
|
||||
// MemChangeListIterator is a concrete instance of ChangeIterator
|
||||
type MemChangeListIterator struct {
|
||||
index int
|
||||
collection []Change // Same type as memChangeList.changes
|
||||
}
|
||||
|
||||
// Next returns the next Change
|
||||
func (m *MemChangeListIterator) Next() (item Change, err error) {
|
||||
if m.index >= len(m.collection) {
|
||||
return nil, IteratorBoundsError(m.index)
|
||||
}
|
||||
item = m.collection[m.index]
|
||||
m.index++
|
||||
return item, err
|
||||
}
|
||||
|
||||
// HasNext indicates whether the iterator is exhausted
|
||||
func (m *MemChangeListIterator) HasNext() bool {
|
||||
return m.index < len(m.collection)
|
||||
}
|
||||
|
|
|
@ -28,30 +28,51 @@ func NewFileChangelist(dir string) (*FileChangelist, error) {
|
|||
return &FileChangelist{dir: dir}, nil
|
||||
}
|
||||
|
||||
// getFileNames reads directory, filtering out child directories
|
||||
func getFileNames(dirName string) ([]os.FileInfo, error) {
|
||||
var dirListing, fileInfos []os.FileInfo
|
||||
dir, err := os.Open(dirName)
|
||||
if err != nil {
|
||||
return fileInfos, err
|
||||
}
|
||||
defer dir.Close()
|
||||
dirListing, err = dir.Readdir(0)
|
||||
if err != nil {
|
||||
return fileInfos, err
|
||||
}
|
||||
for _, f := range dirListing {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
fileInfos = append(fileInfos, f)
|
||||
}
|
||||
return fileInfos, nil
|
||||
}
|
||||
|
||||
// Read a JSON formatted file from disk; convert to TufChange struct
|
||||
func unmarshalFile(dirname string, f os.FileInfo) (*TufChange, error) {
|
||||
c := &TufChange{}
|
||||
raw, err := ioutil.ReadFile(path.Join(dirname, f.Name()))
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
err = json.Unmarshal(raw, c)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// List returns a list of sorted changes
|
||||
func (cl FileChangelist) List() []Change {
|
||||
var changes []Change
|
||||
dir, err := os.Open(cl.dir)
|
||||
if err != nil {
|
||||
return changes
|
||||
}
|
||||
defer dir.Close()
|
||||
fileInfos, err := dir.Readdir(0)
|
||||
fileInfos, err := getFileNames(cl.dir)
|
||||
if err != nil {
|
||||
return changes
|
||||
}
|
||||
sort.Sort(fileChanges(fileInfos))
|
||||
for _, f := range fileInfos {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
raw, err := ioutil.ReadFile(path.Join(cl.dir, f.Name()))
|
||||
if err != nil {
|
||||
logrus.Warn(err.Error())
|
||||
continue
|
||||
}
|
||||
c := &TufChange{}
|
||||
err = json.Unmarshal(raw, c)
|
||||
c, err := unmarshalFile(cl.dir, f)
|
||||
if err != nil {
|
||||
logrus.Warn(err.Error())
|
||||
continue
|
||||
|
@ -94,6 +115,47 @@ func (cl FileChangelist) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// NewIterator creates an iterator from FileChangelist
|
||||
func (cl FileChangelist) NewIterator() (ChangeIterator, error) {
|
||||
fileInfos, err := getFileNames(cl.dir)
|
||||
if err != nil {
|
||||
return &FileChangeListIterator{}, err
|
||||
}
|
||||
sort.Sort(fileChanges(fileInfos))
|
||||
return &FileChangeListIterator{dirname: cl.dir, collection: fileInfos}, nil
|
||||
}
|
||||
|
||||
// IteratorBoundsError is an Error type used by Next()
|
||||
type IteratorBoundsError int
|
||||
|
||||
// Error implements the Error interface
|
||||
func (e IteratorBoundsError) Error() string {
|
||||
return fmt.Sprintf("Iterator index (%d) out of bounds", e)
|
||||
}
|
||||
|
||||
// FileChangeListIterator is a concrete instance of ChangeIterator
|
||||
type FileChangeListIterator struct {
|
||||
index int
|
||||
dirname string
|
||||
collection []os.FileInfo
|
||||
}
|
||||
|
||||
// Next returns the next Change in the FileChangeList
|
||||
func (m *FileChangeListIterator) Next() (item Change, err error) {
|
||||
if m.index >= len(m.collection) {
|
||||
return nil, IteratorBoundsError(m.index)
|
||||
}
|
||||
f := m.collection[m.index]
|
||||
m.index++
|
||||
item, err = unmarshalFile(m.dirname, f)
|
||||
return
|
||||
}
|
||||
|
||||
// HasNext indicates whether iterator is exhausted
|
||||
func (m *FileChangeListIterator) HasNext() bool {
|
||||
return m.index < len(m.collection)
|
||||
}
|
||||
|
||||
type fileChanges []os.FileInfo
|
||||
|
||||
// Len returns the length of a file change list
|
||||
|
|
|
@ -18,6 +18,10 @@ type Changelist interface {
|
|||
// Close syncronizes any pending writes to the underlying
|
||||
// storage and closes the file/connection
|
||||
Close() error
|
||||
|
||||
// NewIterator returns an iterator for walking through the list
|
||||
// of changes currently stored
|
||||
NewIterator() (ChangeIterator, error)
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -57,3 +61,10 @@ type Change interface {
|
|||
// action, it will be nil.
|
||||
Content() []byte
|
||||
}
|
||||
|
||||
// ChangeIterator is the interface for iterating across collections of
|
||||
// TUF Change items
|
||||
type ChangeIterator interface {
|
||||
Next() (Change, error)
|
||||
HasNext() bool
|
||||
}
|
||||
|
|
|
@ -173,6 +173,9 @@ func (r *NotaryRepository) Initialize(uCryptoService *cryptoservice.UnlockedCryp
|
|||
|
||||
// All the timestamp keys are generated by the remote server.
|
||||
remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rawTSKey, err := remote.GetKey("timestamp")
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -456,17 +459,20 @@ func (r *NotaryRepository) bootstrapRepo() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root := &data.Signed{}
|
||||
root := &data.SignedRoot{}
|
||||
err = json.Unmarshal(rootJSON, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tufRepo.SetRoot(root)
|
||||
err = tufRepo.SetRoot(root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
targetsJSON, err := r.fileStore.GetMeta("targets", 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
targets := &data.Signed{}
|
||||
targets := &data.SignedTargets{}
|
||||
err = json.Unmarshal(targetsJSON, targets)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -476,7 +482,7 @@ func (r *NotaryRepository) bootstrapRepo() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
snapshot := &data.Signed{}
|
||||
snapshot := &data.SignedSnapshot{}
|
||||
err = json.Unmarshal(snapshotJSON, snapshot)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -557,6 +563,9 @@ func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
|
|||
return nil, store.ErrMetaNotFound{}
|
||||
}
|
||||
}
|
||||
// can't just unmarshal into SignedRoot because validate root
|
||||
// needs the root.Signed field to still be []byte for signature
|
||||
// validation
|
||||
root := &data.Signed{}
|
||||
err = json.Unmarshal(rootJSON, root)
|
||||
if err != nil {
|
||||
|
@ -571,7 +580,11 @@ func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
|
|||
kdb := keys.NewDB()
|
||||
r.tufRepo = tuf.NewTufRepo(kdb, r.cryptoService)
|
||||
|
||||
err = r.tufRepo.SetRoot(root)
|
||||
signedRoot, err := data.RootFromSigned(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = r.tufRepo.SetRoot(signedRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -26,9 +26,16 @@ func getRemoteStore(baseURL, gun string, rt http.RoundTripper) (store.RemoteStor
|
|||
}
|
||||
|
||||
func applyChangelist(repo *tuf.TufRepo, cl changelist.Changelist) error {
|
||||
changes := cl.List()
|
||||
logrus.Debugf("applying %d changes", len(changes))
|
||||
for _, c := range changes {
|
||||
it, err := cl.NewIterator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
index := 0
|
||||
for it.HasNext() {
|
||||
c, err := it.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch c.Scope() {
|
||||
case changelist.ScopeTargets:
|
||||
err := applyTargetsChange(repo, c)
|
||||
|
@ -38,7 +45,9 @@ func applyChangelist(repo *tuf.TufRepo, cl changelist.Changelist) error {
|
|||
default:
|
||||
logrus.Debug("scope not supported: ", c.Scope())
|
||||
}
|
||||
index++
|
||||
}
|
||||
logrus.Debugf("applied %d change(s)", index)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -16,6 +16,11 @@ import (
|
|||
"github.com/docker/notary/trustmanager"
|
||||
)
|
||||
|
||||
const (
|
||||
zipSymlinkAttr = 0xA1ED0000
|
||||
zipMadeByUNIX = 3 << 8
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoValidPrivateKey is returned if a key being imported doesn't
|
||||
// look like a private key
|
||||
|
@ -126,15 +131,32 @@ func moveKeys(oldKeyStore, newKeyStore *trustmanager.KeyFileStore) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Recreate symlinks
|
||||
for _, relKeyPath := range oldKeyStore.ListFiles(true) {
|
||||
fullKeyPath := filepath.Join(oldKeyStore.BaseDir(), relKeyPath)
|
||||
|
||||
fi, err := os.Lstat(fullKeyPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if (fi.Mode() & os.ModeSymlink) != 0 {
|
||||
target, err := os.Readlink(fullKeyPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
os.Symlink(target, filepath.Join(newKeyStore.BaseDir(), relKeyPath))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addKeysToArchive(zipWriter *zip.Writer, newKeyStore *trustmanager.KeyFileStore, subDir string) error {
|
||||
// List all files but no symlinks
|
||||
for _, relKeyPath := range newKeyStore.ListFiles(false) {
|
||||
for _, relKeyPath := range newKeyStore.ListFiles(true) {
|
||||
fullKeyPath := filepath.Join(newKeyStore.BaseDir(), relKeyPath)
|
||||
|
||||
fi, err := os.Stat(fullKeyPath)
|
||||
fi, err := os.Lstat(fullKeyPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -145,6 +167,27 @@ func addKeysToArchive(zipWriter *zip.Writer, newKeyStore *trustmanager.KeyFileSt
|
|||
}
|
||||
|
||||
infoHeader.Name = filepath.Join(subDir, relKeyPath)
|
||||
|
||||
// Is this a symlink? If so, encode properly in the zip file.
|
||||
if (fi.Mode() & os.ModeSymlink) != 0 {
|
||||
infoHeader.CreatorVersion = zipMadeByUNIX
|
||||
infoHeader.ExternalAttrs = zipSymlinkAttr
|
||||
|
||||
zipFileEntryWriter, err := zipWriter.CreateHeader(infoHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
target, err := os.Readlink(fullKeyPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write relative path
|
||||
if _, err = zipFileEntryWriter.Write([]byte(target)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
zipFileEntryWriter, err := zipWriter.CreateHeader(infoHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -154,10 +197,12 @@ func addKeysToArchive(zipWriter *zip.Writer, newKeyStore *trustmanager.KeyFileSt
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = zipFileEntryWriter.Write(fileContents); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -197,7 +242,6 @@ func (km *KeyStoreManager) ExportAllKeys(dest io.Writer, newPassphraseRetriever
|
|||
return err
|
||||
}
|
||||
if err := addKeysToArchive(zipWriter, tempNonRootKeyStore, privNonRootKeysSubdir); err != nil {
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -206,7 +250,13 @@ func (km *KeyStoreManager) ExportAllKeys(dest io.Writer, newPassphraseRetriever
|
|||
return nil
|
||||
}
|
||||
|
||||
// ImportKeysZip imports keys from a zip file provided as an io.ReaderAt. The
|
||||
// IsZipSymlink returns true if the file described by the zip file header is a
|
||||
// symlink.
|
||||
func IsZipSymlink(f *zip.File) bool {
|
||||
return f.CreatorVersion&0xFF00 == zipMadeByUNIX && f.ExternalAttrs == zipSymlinkAttr
|
||||
}
|
||||
|
||||
// ImportKeysZip imports keys from a zip file provided as an zip.Reader. The
|
||||
// keys in the root_keys directory are left encrypted, but the other keys are
|
||||
// decrypted with the specified passphrase.
|
||||
func (km *KeyStoreManager) ImportKeysZip(zipReader zip.Reader) error {
|
||||
|
@ -239,6 +289,13 @@ func (km *KeyStoreManager) ImportKeysZip(zipReader zip.Reader) error {
|
|||
// Note that using / as a separator is okay here - the zip
|
||||
// package guarantees that the separator will be /
|
||||
if strings.HasPrefix(fNameTrimmed, rootKeysPrefix) {
|
||||
if IsZipSymlink(f) {
|
||||
newName := filepath.Join(km.rootKeyStore.BaseDir(), strings.TrimPrefix(f.Name, rootKeysPrefix))
|
||||
err = os.Symlink(string(fileBytes), newName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err = checkRootKeyIsEncrypted(fileBytes); err != nil {
|
||||
rc.Close()
|
||||
return err
|
||||
|
@ -246,10 +303,19 @@ func (km *KeyStoreManager) ImportKeysZip(zipReader zip.Reader) error {
|
|||
// Root keys are preserved without decrypting
|
||||
keyName := strings.TrimPrefix(fNameTrimmed, rootKeysPrefix)
|
||||
newRootKeys[keyName] = fileBytes
|
||||
}
|
||||
} else if strings.HasPrefix(fNameTrimmed, nonRootKeysPrefix) {
|
||||
if IsZipSymlink(f) {
|
||||
newName := filepath.Join(km.nonRootKeyStore.BaseDir(), strings.TrimPrefix(f.Name, nonRootKeysPrefix))
|
||||
err = os.Symlink(string(fileBytes), newName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Nonroot keys are preserved without decrypting
|
||||
keyName := strings.TrimPrefix(fNameTrimmed, nonRootKeysPrefix)
|
||||
newNonRootKeys[keyName] = fileBytes
|
||||
}
|
||||
} else {
|
||||
// This path inside the zip archive doesn't look like a
|
||||
// root key, non-root key, or alias. To avoid adding a file
|
||||
|
@ -281,7 +347,6 @@ func (km *KeyStoreManager) ImportKeysZip(zipReader zip.Reader) error {
|
|||
func moveKeysByGUN(oldKeyStore, newKeyStore *trustmanager.KeyFileStore, gun string) error {
|
||||
// List all files but no symlinks
|
||||
for relKeyPath := range oldKeyStore.ListKeys() {
|
||||
|
||||
// Skip keys that aren't associated with this GUN
|
||||
if !strings.HasPrefix(relKeyPath, filepath.FromSlash(gun)) {
|
||||
continue
|
||||
|
|
|
@ -150,7 +150,7 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]s
|
|||
}
|
||||
|
||||
if len(retPass) < 8 {
|
||||
fmt.Fprintln(out, "Please use a password manager to generate and store a good random passphrase.")
|
||||
fmt.Fprintln(out, "Passphrase is too short. Please use a password manager to generate and store a good random passphrase.")
|
||||
return "", false, ErrTooShort
|
||||
}
|
||||
|
||||
|
|
|
@ -189,7 +189,7 @@ func (f *SimpleFileStore) genFileName(name string) string {
|
|||
return fmt.Sprintf("%s.%s", name, f.fileExt)
|
||||
}
|
||||
|
||||
// Link creates a symlink beetween the ID of the certificate used by a repository
|
||||
// Link creates a symlink between the ID of the certificate used by a repository
|
||||
// and the ID of the root key that is being used.
|
||||
// We use full path for the source and local for the destination to use relative
|
||||
// path for the symlink
|
||||
|
|
|
@ -223,7 +223,12 @@ func (c Client) verifyRoot(role string, s *data.Signed, minVersion int) error {
|
|||
// This will cause keyDB to get updated, overwriting any keyIDs associated
|
||||
// with the roles in root.json
|
||||
logrus.Debug("updating known root roles and keys")
|
||||
err = c.local.SetRoot(s)
|
||||
root, err := data.RootFromSigned(s)
|
||||
if err != nil {
|
||||
logrus.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
err = c.local.SetRoot(root)
|
||||
if err != nil {
|
||||
logrus.Error(err.Error())
|
||||
return err
|
||||
|
@ -298,7 +303,11 @@ func (c *Client) downloadTimestamp() error {
|
|||
if download {
|
||||
c.cache.SetMeta(role, raw)
|
||||
}
|
||||
c.local.SetTimestamp(s)
|
||||
ts, err := data.TimestampFromSigned(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.local.SetTimestamp(ts)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -366,7 +375,11 @@ func (c *Client) downloadSnapshot() error {
|
|||
return err
|
||||
}
|
||||
logrus.Debug("successfully verified snapshot")
|
||||
c.local.SetSnapshot(s)
|
||||
snap, err := data.SnapshotFromSigned(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.local.SetSnapshot(snap)
|
||||
if download {
|
||||
err = c.cache.SetMeta(role, raw)
|
||||
if err != nil {
|
||||
|
@ -393,7 +406,11 @@ func (c *Client) downloadTargets(role string) error {
|
|||
logrus.Error("Error getting targets file:", err)
|
||||
return err
|
||||
}
|
||||
err = c.local.SetTargets(role, s)
|
||||
t, err := data.TargetsFromSigned(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.local.SetTargets(role, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/hex"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
cjson "github.com/tent/canonical-json-go"
|
||||
"github.com/jfrazelle/go/canonical/json"
|
||||
)
|
||||
|
||||
type Key interface {
|
||||
|
@ -57,7 +57,7 @@ func (k TUFKey) Algorithm() KeyAlgorithm {
|
|||
func (k *TUFKey) ID() string {
|
||||
if k.id == "" {
|
||||
pubK := NewPublicKey(k.Algorithm(), k.Public())
|
||||
data, err := cjson.Marshal(&pubK)
|
||||
data, err := json.MarshalCanonical(&pubK)
|
||||
if err != nil {
|
||||
logrus.Error("Error generating key ID:", err)
|
||||
}
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
package data
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
cjson "github.com/tent/canonical-json-go"
|
||||
"github.com/jfrazelle/go/canonical/json"
|
||||
)
|
||||
|
||||
type SignedRoot struct {
|
||||
|
@ -58,7 +57,7 @@ func NewRoot(keys map[string]PublicKey, roles map[string]*RootRole, consistent b
|
|||
}
|
||||
|
||||
func (r SignedRoot) ToSigned() (*Signed, error) {
|
||||
s, err := cjson.Marshal(r.Signed)
|
||||
s, err := json.MarshalCanonical(r.Signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -2,11 +2,10 @@ package data
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
cjson "github.com/tent/canonical-json-go"
|
||||
"github.com/jfrazelle/go/canonical/json"
|
||||
)
|
||||
|
||||
type SignedSnapshot struct {
|
||||
|
@ -61,7 +60,7 @@ func (sp *SignedSnapshot) hashForRole(role string) []byte {
|
|||
}
|
||||
|
||||
func (sp SignedSnapshot) ToSigned() (*Signed, error) {
|
||||
s, err := cjson.Marshal(sp.Signed)
|
||||
s, err := json.MarshalCanonical(sp.Signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -3,10 +3,8 @@ package data
|
|||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
cjson "github.com/tent/canonical-json-go"
|
||||
"github.com/jfrazelle/go/canonical/json"
|
||||
)
|
||||
|
||||
type SignedTargets struct {
|
||||
|
@ -16,9 +14,7 @@ type SignedTargets struct {
|
|||
}
|
||||
|
||||
type Targets struct {
|
||||
Type string `json:"_type"`
|
||||
Version int `json:"version"`
|
||||
Expires time.Time `json:"expires"`
|
||||
SignedCommon
|
||||
Targets Files `json:"targets"`
|
||||
Delegations Delegations `json:"delegations,omitempty"`
|
||||
}
|
||||
|
@ -27,9 +23,11 @@ func NewTargets() *SignedTargets {
|
|||
return &SignedTargets{
|
||||
Signatures: make([]Signature, 0),
|
||||
Signed: Targets{
|
||||
SignedCommon: SignedCommon{
|
||||
Type: TUFTypes["targets"],
|
||||
Version: 0,
|
||||
Expires: DefaultExpires("targets"),
|
||||
},
|
||||
Targets: make(Files),
|
||||
Delegations: *NewDelegations(),
|
||||
},
|
||||
|
@ -86,7 +84,7 @@ func (t *SignedTargets) AddDelegation(role *Role, keys []*PublicKey) error {
|
|||
}
|
||||
|
||||
func (t SignedTargets) ToSigned() (*Signed, error) {
|
||||
s, err := cjson.Marshal(t.Signed)
|
||||
s, err := json.MarshalCanonical(t.Signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -2,10 +2,9 @@ package data
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
cjson "github.com/tent/canonical-json-go"
|
||||
"github.com/jfrazelle/go/canonical/json"
|
||||
)
|
||||
|
||||
type SignedTimestamp struct {
|
||||
|
@ -44,7 +43,7 @@ func NewTimestamp(snapshot *Signed) (*SignedTimestamp, error) {
|
|||
}
|
||||
|
||||
func (ts SignedTimestamp) ToSigned() (*Signed, error) {
|
||||
s, err := cjson.Marshal(ts.Signed)
|
||||
s, err := json.MarshalCanonical(ts.Signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package data
|
|||
import (
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
@ -12,6 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/jfrazelle/go/canonical/json"
|
||||
)
|
||||
|
||||
type KeyAlgorithm string
|
||||
|
@ -85,6 +85,17 @@ type Signed struct {
|
|||
Signatures []Signature `json:"signatures"`
|
||||
}
|
||||
|
||||
type SignedCommon struct {
|
||||
Type string `json:"_type"`
|
||||
Expires time.Time `json:"expires"`
|
||||
Version int `json:"version"`
|
||||
}
|
||||
|
||||
type SignedMeta struct {
|
||||
Signed SignedCommon `json:"signed"`
|
||||
Signatures []Signature `json:"signatures"`
|
||||
}
|
||||
|
||||
type Signature struct {
|
||||
KeyID string `json:"keyid"`
|
||||
Method SigAlgorithm `json:"method"`
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package signed
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -9,7 +8,7 @@ import (
|
|||
"github.com/Sirupsen/logrus"
|
||||
"github.com/endophage/gotuf/data"
|
||||
"github.com/endophage/gotuf/keys"
|
||||
"github.com/tent/canonical-json-go"
|
||||
"github.com/jfrazelle/go/canonical/json"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -21,12 +20,6 @@ var (
|
|||
ErrWrongType = errors.New("tuf: meta file has wrong type")
|
||||
)
|
||||
|
||||
type signedMeta struct {
|
||||
Type string `json:"_type"`
|
||||
Expires time.Time `json:"expires"`
|
||||
Version int `json:"version"`
|
||||
}
|
||||
|
||||
// VerifyRoot checks if a given root file is valid against a known set of keys.
|
||||
// Threshold is always assumed to be 1
|
||||
func VerifyRoot(s *data.Signed, minVersion int, keys map[string]data.PublicKey) error {
|
||||
|
@ -38,7 +31,7 @@ func VerifyRoot(s *data.Signed, minVersion int, keys map[string]data.PublicKey)
|
|||
if err := json.Unmarshal(s.Signed, &decoded); err != nil {
|
||||
return err
|
||||
}
|
||||
msg, err := cjson.Marshal(decoded)
|
||||
msg, err := json.MarshalCanonical(decoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -76,7 +69,7 @@ func Verify(s *data.Signed, role string, minVersion int, db *keys.KeyDB) error {
|
|||
}
|
||||
|
||||
func verifyMeta(s *data.Signed, role string, minVersion int) error {
|
||||
sm := &signedMeta{}
|
||||
sm := &data.SignedCommon{}
|
||||
if err := json.Unmarshal(s.Signed, sm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -117,7 +110,7 @@ func VerifySignatures(s *data.Signed, role string, db *keys.KeyDB) error {
|
|||
if err := json.Unmarshal(s.Signed, &decoded); err != nil {
|
||||
return err
|
||||
}
|
||||
msg, err := cjson.Marshal(decoded)
|
||||
msg, err := json.MarshalCanonical(decoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package store
|
|||
import (
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -12,6 +11,7 @@ import (
|
|||
logrus "github.com/Sirupsen/logrus"
|
||||
"github.com/endophage/gotuf/data"
|
||||
"github.com/endophage/gotuf/utils"
|
||||
"github.com/jfrazelle/go/canonical/json"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -14,10 +14,12 @@ import (
|
|||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ErrServerUnavailable struct{}
|
||||
type ErrServerUnavailable struct {
|
||||
code int
|
||||
}
|
||||
|
||||
func (err ErrServerUnavailable) Error() string {
|
||||
return "Unable to reach trust server at this time."
|
||||
return fmt.Sprintf("Unable to reach trust server at this time: %d.", err.code)
|
||||
}
|
||||
|
||||
type ErrShortRead struct{}
|
||||
|
@ -85,13 +87,15 @@ func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
|
|||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, ErrMetaNotFound{}
|
||||
} else if resp.StatusCode != http.StatusOK {
|
||||
return nil, ErrServerUnavailable{code: resp.StatusCode}
|
||||
}
|
||||
if resp.ContentLength > size {
|
||||
return nil, ErrMaliciousServer{}
|
||||
}
|
||||
logrus.Debugf("%d when retrieving metadata for %s", resp.StatusCode, name)
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, ErrMetaNotFound{}
|
||||
}
|
||||
b := io.LimitReader(resp.Body, size)
|
||||
body, err := ioutil.ReadAll(b)
|
||||
if resp.ContentLength > 0 && int64(len(body)) < resp.ContentLength {
|
||||
|
@ -113,9 +117,18 @@ func (s HTTPStore) SetMeta(name string, blob []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.roundTrip.RoundTrip(req)
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return ErrMetaNotFound{}
|
||||
} else if resp.StatusCode != http.StatusOK {
|
||||
return ErrServerUnavailable{code: resp.StatusCode}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
|
||||
url, err := s.buildMetaURL("")
|
||||
|
@ -140,9 +153,18 @@ func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.roundTrip.RoundTrip(req)
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return ErrMetaNotFound{}
|
||||
} else if resp.StatusCode != http.StatusOK {
|
||||
return ErrServerUnavailable{code: resp.StatusCode}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
|
||||
var filename string
|
||||
|
@ -188,6 +210,12 @@ func (s HTTPStore) GetTarget(path string) (io.ReadCloser, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, ErrMetaNotFound{}
|
||||
} else if resp.StatusCode != http.StatusOK {
|
||||
return nil, ErrServerUnavailable{code: resp.StatusCode}
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
|
@ -205,6 +233,11 @@ func (s HTTPStore) GetKey(role string) ([]byte, error) {
|
|||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, ErrMetaNotFound{}
|
||||
} else if resp.StatusCode != http.StatusOK {
|
||||
return nil, ErrServerUnavailable{code: resp.StatusCode}
|
||||
}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
41
vendor/src/github.com/endophage/gotuf/tuf.go
vendored
41
vendor/src/github.com/endophage/gotuf/tuf.go
vendored
|
@ -258,16 +258,12 @@ func (tr *TufRepo) InitTimestamp() error {
|
|||
// SetRoot parses the Signed object into a SignedRoot object, sets
|
||||
// the keys and roles in the KeyDB, and sets the TufRepo.Root field
|
||||
// to the SignedRoot object.
|
||||
func (tr *TufRepo) SetRoot(s *data.Signed) error {
|
||||
r, err := data.RootFromSigned(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, key := range r.Signed.Keys {
|
||||
func (tr *TufRepo) SetRoot(s *data.SignedRoot) error {
|
||||
for _, key := range s.Signed.Keys {
|
||||
logrus.Debug("Adding key ", key.ID())
|
||||
tr.keysDB.AddKey(key)
|
||||
}
|
||||
for roleName, role := range r.Signed.Roles {
|
||||
for roleName, role := range s.Signed.Roles {
|
||||
logrus.Debugf("Adding role %s with keys %s", roleName, strings.Join(role.KeyIDs, ","))
|
||||
baseRole, err := data.NewRole(
|
||||
roleName,
|
||||
|
@ -284,48 +280,35 @@ func (tr *TufRepo) SetRoot(s *data.Signed) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
tr.Root = r
|
||||
tr.Root = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTimestamp parses the Signed object into a SignedTimestamp object
|
||||
// and sets the TufRepo.Timestamp field.
|
||||
func (tr *TufRepo) SetTimestamp(s *data.Signed) error {
|
||||
ts, err := data.TimestampFromSigned(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tr.Timestamp = ts
|
||||
func (tr *TufRepo) SetTimestamp(s *data.SignedTimestamp) error {
|
||||
tr.Timestamp = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetSnapshot parses the Signed object into a SignedSnapshots object
|
||||
// and sets the TufRepo.Snapshot field.
|
||||
func (tr *TufRepo) SetSnapshot(s *data.Signed) error {
|
||||
snap, err := data.SnapshotFromSigned(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tr.Snapshot = snap
|
||||
func (tr *TufRepo) SetSnapshot(s *data.SignedSnapshot) error {
|
||||
tr.Snapshot = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTargets parses the Signed object into a SignedTargets object,
|
||||
// reads the delegated roles and keys into the KeyDB, and sets the
|
||||
// SignedTargets object agaist the role in the TufRepo.Targets map.
|
||||
func (tr *TufRepo) SetTargets(role string, s *data.Signed) error {
|
||||
t, err := data.TargetsFromSigned(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, k := range t.Signed.Delegations.Keys {
|
||||
func (tr *TufRepo) SetTargets(role string, s *data.SignedTargets) error {
|
||||
for _, k := range s.Signed.Delegations.Keys {
|
||||
tr.keysDB.AddKey(k)
|
||||
}
|
||||
for _, r := range t.Signed.Delegations.Roles {
|
||||
for _, r := range s.Signed.Delegations.Roles {
|
||||
tr.keysDB.AddRole(r)
|
||||
}
|
||||
tr.Targets[role] = t
|
||||
tr.Targets[role] = s
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ package utils
|
|||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -78,3 +79,15 @@ type NoopCloser struct {
|
|||
func (nc *NoopCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DoHash(alg string, d []byte) []byte {
|
||||
switch alg {
|
||||
case "sha256":
|
||||
digest := sha256.Sum256(d)
|
||||
return digest[:]
|
||||
case "sha512":
|
||||
digest := sha512.Sum512(d)
|
||||
return digest[:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
1085
vendor/src/github.com/jfrazelle/go/canonical/json/decode.go
vendored
Normal file
1085
vendor/src/github.com/jfrazelle/go/canonical/json/decode.go
vendored
Normal file
File diff suppressed because it is too large
Load diff
1234
vendor/src/github.com/jfrazelle/go/canonical/json/encode.go
vendored
Normal file
1234
vendor/src/github.com/jfrazelle/go/canonical/json/encode.go
vendored
Normal file
File diff suppressed because it is too large
Load diff
143
vendor/src/github.com/jfrazelle/go/canonical/json/fold.go
vendored
Normal file
143
vendor/src/github.com/jfrazelle/go/canonical/json/fold.go
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||
kelvin = '\u212a'
|
||||
smallLongEss = '\u017f'
|
||||
)
|
||||
|
||||
// foldFunc returns one of four different case folding equivalence
|
||||
// functions, from most general (and slow) to fastest:
|
||||
//
|
||||
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||
//
|
||||
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||
// * S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// * k maps to K and to U+212A 'K' Kelvin sign
|
||||
// See http://play.golang.org/p/tTxjOc0OGo
|
||||
//
|
||||
// The returned function is specialized for matching against s and
|
||||
// should only be given s. It's not curried for performance reasons.
|
||||
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||
nonLetter := false
|
||||
special := false // special letter
|
||||
for _, b := range s {
|
||||
if b >= utf8.RuneSelf {
|
||||
return bytes.EqualFold
|
||||
}
|
||||
upper := b & caseMask
|
||||
if upper < 'A' || upper > 'Z' {
|
||||
nonLetter = true
|
||||
} else if upper == 'K' || upper == 'S' {
|
||||
// See above for why these letters are special.
|
||||
special = true
|
||||
}
|
||||
}
|
||||
if special {
|
||||
return equalFoldRight
|
||||
}
|
||||
if nonLetter {
|
||||
return asciiEqualFold
|
||||
}
|
||||
return simpleLetterEqualFold
|
||||
}
|
||||
|
||||
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||
// known to be all ASCII (including punctuation), but contains an 's',
|
||||
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||
// See comments on foldFunc.
|
||||
func equalFoldRight(s, t []byte) bool {
|
||||
for _, sb := range s {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
tb := t[0]
|
||||
if tb < utf8.RuneSelf {
|
||||
if sb != tb {
|
||||
sbUpper := sb & caseMask
|
||||
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||
if sbUpper != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t = t[1:]
|
||||
continue
|
||||
}
|
||||
// sb is ASCII and t is not. t must be either kelvin
|
||||
// sign or long s; sb must be s, S, k, or K.
|
||||
tr, size := utf8.DecodeRune(t)
|
||||
switch sb {
|
||||
case 's', 'S':
|
||||
if tr != smallLongEss {
|
||||
return false
|
||||
}
|
||||
case 'k', 'K':
|
||||
if tr != kelvin {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
t = t[size:]
|
||||
|
||||
}
|
||||
if len(t) > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||
// s is all ASCII (but may contain non-letters) and contains no
|
||||
// special-folding letters.
|
||||
// See comments on foldFunc.
|
||||
func asciiEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, sb := range s {
|
||||
tb := t[i]
|
||||
if sb == tb {
|
||||
continue
|
||||
}
|
||||
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||
if sb&caseMask != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||
// use when s is all ASCII letters (no underscores, etc) and also
|
||||
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||
// See comments on foldFunc.
|
||||
func simpleLetterEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, b := range s {
|
||||
if b&caseMask != t[i]&caseMask {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
137
vendor/src/github.com/jfrazelle/go/canonical/json/indent.go
vendored
Normal file
137
vendor/src/github.com/jfrazelle/go/canonical/json/indent.go
vendored
Normal file
|
@ -0,0 +1,137 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import "bytes"
|
||||
|
||||
// Compact appends to dst the JSON-encoded src with
|
||||
// insignificant space characters elided.
|
||||
func Compact(dst *bytes.Buffer, src []byte) error {
|
||||
return compact(dst, src, false)
|
||||
}
|
||||
|
||||
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
|
||||
origLen := dst.Len()
|
||||
var scan scanner
|
||||
scan.reset()
|
||||
start := 0
|
||||
for i, c := range src {
|
||||
if escape && (c == '<' || c == '>' || c == '&') {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u00`)
|
||||
dst.WriteByte(hex[c>>4])
|
||||
dst.WriteByte(hex[c&0xF])
|
||||
start = i + 1
|
||||
}
|
||||
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u202`)
|
||||
dst.WriteByte(hex[src[i+2]&0xF])
|
||||
start = i + 3
|
||||
}
|
||||
v := scan.step(&scan, int(c))
|
||||
if v >= scanSkipSpace {
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
}
|
||||
if start < len(src) {
|
||||
dst.Write(src[start:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
|
||||
dst.WriteByte('\n')
|
||||
dst.WriteString(prefix)
|
||||
for i := 0; i < depth; i++ {
|
||||
dst.WriteString(indent)
|
||||
}
|
||||
}
|
||||
|
||||
// Indent appends to dst an indented form of the JSON-encoded src.
|
||||
// Each element in a JSON object or array begins on a new,
|
||||
// indented line beginning with prefix followed by one or more
|
||||
// copies of indent according to the indentation nesting.
|
||||
// The data appended to dst does not begin with the prefix nor
|
||||
// any indentation, and has no trailing newline, to make it
|
||||
// easier to embed inside other formatted JSON data.
|
||||
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||
origLen := dst.Len()
|
||||
var scan scanner
|
||||
scan.reset()
|
||||
needIndent := false
|
||||
depth := 0
|
||||
for _, c := range src {
|
||||
scan.bytes++
|
||||
v := scan.step(&scan, int(c))
|
||||
if v == scanSkipSpace {
|
||||
continue
|
||||
}
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if needIndent && v != scanEndObject && v != scanEndArray {
|
||||
needIndent = false
|
||||
depth++
|
||||
newline(dst, prefix, indent, depth)
|
||||
}
|
||||
|
||||
// Emit semantically uninteresting bytes
|
||||
// (in particular, punctuation in strings) unmodified.
|
||||
if v == scanContinue {
|
||||
dst.WriteByte(c)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add spacing around real punctuation.
|
||||
switch c {
|
||||
case '{', '[':
|
||||
// delay indent so that empty object and array are formatted as {} and [].
|
||||
needIndent = true
|
||||
dst.WriteByte(c)
|
||||
|
||||
case ',':
|
||||
dst.WriteByte(c)
|
||||
newline(dst, prefix, indent, depth)
|
||||
|
||||
case ':':
|
||||
dst.WriteByte(c)
|
||||
dst.WriteByte(' ')
|
||||
|
||||
case '}', ']':
|
||||
if needIndent {
|
||||
// suppress indent in empty object/array
|
||||
needIndent = false
|
||||
} else {
|
||||
depth--
|
||||
newline(dst, prefix, indent, depth)
|
||||
}
|
||||
dst.WriteByte(c)
|
||||
|
||||
default:
|
||||
dst.WriteByte(c)
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
623
vendor/src/github.com/jfrazelle/go/canonical/json/scanner.go
vendored
Normal file
623
vendor/src/github.com/jfrazelle/go/canonical/json/scanner.go
vendored
Normal file
|
@ -0,0 +1,623 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
// JSON value parser state machine.
|
||||
// Just about at the limit of what is reasonable to write by hand.
|
||||
// Some parts are a bit tedious, but overall it nicely factors out the
|
||||
// otherwise common code from the multiple scanning functions
|
||||
// in this package (Compact, Indent, checkValid, nextValue, etc).
|
||||
//
|
||||
// This file starts with two simple examples using the scanner
|
||||
// before diving into the scanner itself.
|
||||
|
||||
import "strconv"
|
||||
|
||||
// checkValid verifies that data is valid JSON-encoded data.
|
||||
// scan is passed in for use by checkValid to avoid an allocation.
|
||||
func checkValid(data []byte, scan *scanner) error {
|
||||
scan.reset()
|
||||
for _, c := range data {
|
||||
scan.bytes++
|
||||
if scan.step(scan, int(c)) == scanError {
|
||||
return scan.err
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextValue splits data after the next whole JSON value,
|
||||
// returning that value and the bytes that follow it as separate slices.
|
||||
// scan is passed in for use by nextValue to avoid an allocation.
|
||||
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
|
||||
scan.reset()
|
||||
for i, c := range data {
|
||||
v := scan.step(scan, int(c))
|
||||
if v >= scanEnd {
|
||||
switch v {
|
||||
case scanError:
|
||||
return nil, nil, scan.err
|
||||
case scanEnd:
|
||||
return data[0:i], data[i:], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return nil, nil, scan.err
|
||||
}
|
||||
return data, nil, nil
|
||||
}
|
||||
|
||||
// A SyntaxError is a description of a JSON syntax error.
|
||||
type SyntaxError struct {
|
||||
msg string // description of error
|
||||
Offset int64 // error occurred after reading Offset bytes
|
||||
}
|
||||
|
||||
func (e *SyntaxError) Error() string { return e.msg }
|
||||
|
||||
// A scanner is a JSON scanning state machine.
|
||||
// Callers call scan.reset() and then pass bytes in one at a time
|
||||
// by calling scan.step(&scan, c) for each byte.
|
||||
// The return value, referred to as an opcode, tells the
|
||||
// caller about significant parsing events like beginning
|
||||
// and ending literals, objects, and arrays, so that the
|
||||
// caller can follow along if it wishes.
|
||||
// The return value scanEnd indicates that a single top-level
|
||||
// JSON value has been completed, *before* the byte that
|
||||
// just got passed in. (The indication must be delayed in order
|
||||
// to recognize the end of numbers: is 123 a whole value or
|
||||
// the beginning of 12345e+6?).
|
||||
type scanner struct {
|
||||
// The step is a func to be called to execute the next transition.
|
||||
// Also tried using an integer constant and a single func
|
||||
// with a switch, but using the func directly was 10% faster
|
||||
// on a 64-bit Mac Mini, and it's nicer to read.
|
||||
step func(*scanner, int) int
|
||||
|
||||
// Reached end of top-level value.
|
||||
endTop bool
|
||||
|
||||
// Stack of what we're in the middle of - array values, object keys, object values.
|
||||
parseState []int
|
||||
|
||||
// Error that happened, if any.
|
||||
err error
|
||||
|
||||
// 1-byte redo (see undo method)
|
||||
redo bool
|
||||
redoCode int
|
||||
redoState func(*scanner, int) int
|
||||
|
||||
// total bytes consumed, updated by decoder.Decode
|
||||
bytes int64
|
||||
}
|
||||
|
||||
// These values are returned by the state transition functions
|
||||
// assigned to scanner.state and the method scanner.eof.
|
||||
// They give details about the current state of the scan that
|
||||
// callers might be interested to know about.
|
||||
// It is okay to ignore the return value of any particular
|
||||
// call to scanner.state: if one call returns scanError,
|
||||
// every subsequent call will return scanError too.
|
||||
const (
|
||||
// Continue.
|
||||
scanContinue = iota // uninteresting byte
|
||||
scanBeginLiteral // end implied by next result != scanContinue
|
||||
scanBeginObject // begin object
|
||||
scanObjectKey // just finished object key (string)
|
||||
scanObjectValue // just finished non-last object value
|
||||
scanEndObject // end object (implies scanObjectValue if possible)
|
||||
scanBeginArray // begin array
|
||||
scanArrayValue // just finished array value
|
||||
scanEndArray // end array (implies scanArrayValue if possible)
|
||||
scanSkipSpace // space byte; can skip; known to be last "continue" result
|
||||
|
||||
// Stop.
|
||||
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
|
||||
scanError // hit an error, scanner.err.
|
||||
)
|
||||
|
||||
// These values are stored in the parseState stack.
|
||||
// They give the current state of a composite value
|
||||
// being scanned. If the parser is inside a nested value
|
||||
// the parseState describes the nested state, outermost at entry 0.
|
||||
const (
|
||||
parseObjectKey = iota // parsing object key (before colon)
|
||||
parseObjectValue // parsing object value (after colon)
|
||||
parseArrayValue // parsing array value
|
||||
)
|
||||
|
||||
// reset prepares the scanner for use.
|
||||
// It must be called before calling s.step.
|
||||
func (s *scanner) reset() {
|
||||
s.step = stateBeginValue
|
||||
s.parseState = s.parseState[0:0]
|
||||
s.err = nil
|
||||
s.redo = false
|
||||
s.endTop = false
|
||||
}
|
||||
|
||||
// eof tells the scanner that the end of input has been reached.
|
||||
// It returns a scan status just as s.step does.
|
||||
func (s *scanner) eof() int {
|
||||
if s.err != nil {
|
||||
return scanError
|
||||
}
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
s.step(s, ' ')
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
if s.err == nil {
|
||||
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
|
||||
}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// pushParseState pushes a new parse state p onto the parse stack.
|
||||
func (s *scanner) pushParseState(p int) {
|
||||
s.parseState = append(s.parseState, p)
|
||||
}
|
||||
|
||||
// popParseState pops a parse state (already obtained) off the stack
|
||||
// and updates s.step accordingly.
|
||||
func (s *scanner) popParseState() {
|
||||
n := len(s.parseState) - 1
|
||||
s.parseState = s.parseState[0:n]
|
||||
s.redo = false
|
||||
if n == 0 {
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
} else {
|
||||
s.step = stateEndValue
|
||||
}
|
||||
}
|
||||
|
||||
func isSpace(c rune) bool {
|
||||
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
|
||||
}
|
||||
|
||||
// stateBeginValueOrEmpty is the state after reading `[`.
|
||||
func stateBeginValueOrEmpty(s *scanner, c int) int {
|
||||
if c <= ' ' && isSpace(rune(c)) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == ']' {
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginValue(s, c)
|
||||
}
|
||||
|
||||
// stateBeginValue is the state at the beginning of the input.
|
||||
func stateBeginValue(s *scanner, c int) int {
|
||||
if c <= ' ' && isSpace(rune(c)) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
switch c {
|
||||
case '{':
|
||||
s.step = stateBeginStringOrEmpty
|
||||
s.pushParseState(parseObjectKey)
|
||||
return scanBeginObject
|
||||
case '[':
|
||||
s.step = stateBeginValueOrEmpty
|
||||
s.pushParseState(parseArrayValue)
|
||||
return scanBeginArray
|
||||
case '"':
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
case '-':
|
||||
s.step = stateNeg
|
||||
return scanBeginLiteral
|
||||
case '0': // beginning of 0.123
|
||||
s.step = state0
|
||||
return scanBeginLiteral
|
||||
case 't': // beginning of true
|
||||
s.step = stateT
|
||||
return scanBeginLiteral
|
||||
case 'f': // beginning of false
|
||||
s.step = stateF
|
||||
return scanBeginLiteral
|
||||
case 'n': // beginning of null
|
||||
s.step = stateN
|
||||
return scanBeginLiteral
|
||||
}
|
||||
if '1' <= c && c <= '9' { // beginning of 1234.5
|
||||
s.step = state1
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of value")
|
||||
}
|
||||
|
||||
// stateBeginStringOrEmpty is the state after reading `{`.
|
||||
func stateBeginStringOrEmpty(s *scanner, c int) int {
|
||||
if c <= ' ' && isSpace(rune(c)) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '}' {
|
||||
n := len(s.parseState)
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginString(s, c)
|
||||
}
|
||||
|
||||
// stateBeginString is the state after reading `{"key": value,`.
|
||||
func stateBeginString(s *scanner, c int) int {
|
||||
if c <= ' ' && isSpace(rune(c)) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '"' {
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of object key string")
|
||||
}
|
||||
|
||||
// stateEndValue is the state after completing a value,
|
||||
// such as after reading `{}` or `true` or `["x"`.
|
||||
func stateEndValue(s *scanner, c int) int {
|
||||
n := len(s.parseState)
|
||||
if n == 0 {
|
||||
// Completed top-level before the current byte.
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
return stateEndTop(s, c)
|
||||
}
|
||||
if c <= ' ' && isSpace(rune(c)) {
|
||||
s.step = stateEndValue
|
||||
return scanSkipSpace
|
||||
}
|
||||
ps := s.parseState[n-1]
|
||||
switch ps {
|
||||
case parseObjectKey:
|
||||
if c == ':' {
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
s.step = stateBeginValue
|
||||
return scanObjectKey
|
||||
}
|
||||
return s.error(c, "after object key")
|
||||
case parseObjectValue:
|
||||
if c == ',' {
|
||||
s.parseState[n-1] = parseObjectKey
|
||||
s.step = stateBeginString
|
||||
return scanObjectValue
|
||||
}
|
||||
if c == '}' {
|
||||
s.popParseState()
|
||||
return scanEndObject
|
||||
}
|
||||
return s.error(c, "after object key:value pair")
|
||||
case parseArrayValue:
|
||||
if c == ',' {
|
||||
s.step = stateBeginValue
|
||||
return scanArrayValue
|
||||
}
|
||||
if c == ']' {
|
||||
s.popParseState()
|
||||
return scanEndArray
|
||||
}
|
||||
return s.error(c, "after array element")
|
||||
}
|
||||
return s.error(c, "")
|
||||
}
|
||||
|
||||
// stateEndTop is the state after finishing the top-level value,
|
||||
// such as after reading `{}` or `[1,2,3]`.
|
||||
// Only space characters should be seen now.
|
||||
func stateEndTop(s *scanner, c int) int {
|
||||
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
|
||||
// Complain about non-space byte on next call.
|
||||
s.error(c, "after top-level value")
|
||||
}
|
||||
return scanEnd
|
||||
}
|
||||
|
||||
// stateInString is the state after reading `"`.
|
||||
func stateInString(s *scanner, c int) int {
|
||||
if c == '"' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
if c == '\\' {
|
||||
s.step = stateInStringEsc
|
||||
return scanContinue
|
||||
}
|
||||
if c < 0x20 {
|
||||
return s.error(c, "in string literal")
|
||||
}
|
||||
return scanContinue
|
||||
}
|
||||
|
||||
// stateInStringEsc is the state after reading `"\` during a quoted string.
|
||||
func stateInStringEsc(s *scanner, c int) int {
|
||||
switch c {
|
||||
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'u' {
|
||||
s.step = stateInStringEscU
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in string escape code")
|
||||
}
|
||||
|
||||
// stateInStringEscU is the state after reading `"\u` during a quoted string.
|
||||
func stateInStringEscU(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU1
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
|
||||
func stateInStringEscU1(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU12
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
|
||||
func stateInStringEscU12(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU123
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
|
||||
func stateInStringEscU123(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateNeg is the state after reading `-` during a number.
|
||||
func stateNeg(s *scanner, c int) int {
|
||||
if c == '0' {
|
||||
s.step = state0
|
||||
return scanContinue
|
||||
}
|
||||
if '1' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in numeric literal")
|
||||
}
|
||||
|
||||
// state1 is the state after reading a non-zero integer during a number,
|
||||
// such as after reading `1` or `100` but not `0`.
|
||||
func state1(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return state0(s, c)
|
||||
}
|
||||
|
||||
// state0 is the state after reading `0` during a number.
|
||||
func state0(s *scanner, c int) int {
|
||||
if c == '.' {
|
||||
s.step = stateDot
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateDot is the state after reading the integer and decimal point in a number,
|
||||
// such as after reading `1.`.
|
||||
func stateDot(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateDot0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "after decimal point in numeric literal")
|
||||
}
|
||||
|
||||
// stateDot0 is the state after reading the integer, decimal point, and subsequent
|
||||
// digits of a number, such as after reading `3.14`.
|
||||
func stateDot0(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateDot0
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateE is the state after reading the mantissa and e in a number,
|
||||
// such as after reading `314e` or `0.314e`.
|
||||
func stateE(s *scanner, c int) int {
|
||||
if c == '+' {
|
||||
s.step = stateESign
|
||||
return scanContinue
|
||||
}
|
||||
if c == '-' {
|
||||
s.step = stateESign
|
||||
return scanContinue
|
||||
}
|
||||
return stateESign(s, c)
|
||||
}
|
||||
|
||||
// stateESign is the state after reading the mantissa, e, and sign in a number,
|
||||
// such as after reading `314e-` or `0.314e+`.
|
||||
func stateESign(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateE0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in exponent of numeric literal")
|
||||
}
|
||||
|
||||
// stateE0 is the state after reading the mantissa, e, optional sign,
|
||||
// and at least one digit of the exponent in a number,
|
||||
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
|
||||
func stateE0(s *scanner, c int) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateE0
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateT is the state after reading `t`.
|
||||
func stateT(s *scanner, c int) int {
|
||||
if c == 'r' {
|
||||
s.step = stateTr
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'r')")
|
||||
}
|
||||
|
||||
// stateTr is the state after reading `tr`.
|
||||
func stateTr(s *scanner, c int) int {
|
||||
if c == 'u' {
|
||||
s.step = stateTru
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateTru is the state after reading `tru`.
|
||||
func stateTru(s *scanner, c int) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateF is the state after reading `f`.
|
||||
func stateF(s *scanner, c int) int {
|
||||
if c == 'a' {
|
||||
s.step = stateFa
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'a')")
|
||||
}
|
||||
|
||||
// stateFa is the state after reading `fa`.
|
||||
func stateFa(s *scanner, c int) int {
|
||||
if c == 'l' {
|
||||
s.step = stateFal
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateFal is the state after reading `fal`.
|
||||
func stateFal(s *scanner, c int) int {
|
||||
if c == 's' {
|
||||
s.step = stateFals
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 's')")
|
||||
}
|
||||
|
||||
// stateFals is the state after reading `fals`.
|
||||
func stateFals(s *scanner, c int) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateN is the state after reading `n`.
|
||||
func stateN(s *scanner, c int) int {
|
||||
if c == 'u' {
|
||||
s.step = stateNu
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateNu is the state after reading `nu`.
|
||||
func stateNu(s *scanner, c int) int {
|
||||
if c == 'l' {
|
||||
s.step = stateNul
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateNul is the state after reading `nul`.
|
||||
func stateNul(s *scanner, c int) int {
|
||||
if c == 'l' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateError is the state after reaching a syntax error,
|
||||
// such as after reading `[1}` or `5.1.2`.
|
||||
func stateError(s *scanner, c int) int {
|
||||
return scanError
|
||||
}
|
||||
|
||||
// error records an error and switches to the error state.
|
||||
func (s *scanner) error(c int, context string) int {
|
||||
s.step = stateError
|
||||
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// quoteChar formats c as a quoted character literal
|
||||
func quoteChar(c int) string {
|
||||
// special cases - different from quoted strings
|
||||
if c == '\'' {
|
||||
return `'\''`
|
||||
}
|
||||
if c == '"' {
|
||||
return `'"'`
|
||||
}
|
||||
|
||||
// use quoted string with different quotation marks
|
||||
s := strconv.Quote(string(c))
|
||||
return "'" + s[1:len(s)-1] + "'"
|
||||
}
|
||||
|
||||
// undo causes the scanner to return scanCode from the next state transition.
|
||||
// This gives callers a simple 1-byte undo mechanism.
|
||||
func (s *scanner) undo(scanCode int) {
|
||||
if s.redo {
|
||||
panic("json: invalid use of scanner")
|
||||
}
|
||||
s.redoCode = scanCode
|
||||
s.redoState = s.step
|
||||
s.step = stateRedo
|
||||
s.redo = true
|
||||
}
|
||||
|
||||
// stateRedo helps implement the scanner's 1-byte undo.
|
||||
func stateRedo(s *scanner, c int) int {
|
||||
s.redo = false
|
||||
s.step = s.redoState
|
||||
return s.redoCode
|
||||
}
|
207
vendor/src/github.com/jfrazelle/go/canonical/json/stream.go
vendored
Normal file
207
vendor/src/github.com/jfrazelle/go/canonical/json/stream.go
vendored
Normal file
|
@ -0,0 +1,207 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// A Decoder reads and decodes JSON objects from an input stream.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
buf []byte
|
||||
d decodeState
|
||||
scan scanner
|
||||
err error
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// The decoder introduces its own buffering and may
|
||||
// read data from r beyond the JSON values requested.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: r}
|
||||
}
|
||||
|
||||
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
||||
// Number instead of as a float64.
|
||||
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
|
||||
|
||||
// Decode reads the next JSON-encoded value from its
|
||||
// input and stores it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about
|
||||
// the conversion of JSON into a Go value.
|
||||
func (dec *Decoder) Decode(v interface{}) error {
|
||||
if dec.err != nil {
|
||||
return dec.err
|
||||
}
|
||||
|
||||
n, err := dec.readValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Don't save err from unmarshal into dec.err:
|
||||
// the connection is still usable since we read a complete JSON
|
||||
// object from it before the error happened.
|
||||
dec.d.init(dec.buf[0:n])
|
||||
err = dec.d.unmarshal(v)
|
||||
|
||||
// Slide rest of data down.
|
||||
rest := copy(dec.buf, dec.buf[n:])
|
||||
dec.buf = dec.buf[0:rest]
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Buffered returns a reader of the data remaining in the Decoder's
|
||||
// buffer. The reader is valid until the next call to Decode.
|
||||
func (dec *Decoder) Buffered() io.Reader {
|
||||
return bytes.NewReader(dec.buf)
|
||||
}
|
||||
|
||||
// readValue reads a JSON value into dec.buf.
|
||||
// It returns the length of the encoding.
|
||||
func (dec *Decoder) readValue() (int, error) {
|
||||
dec.scan.reset()
|
||||
|
||||
scanp := 0
|
||||
var err error
|
||||
Input:
|
||||
for {
|
||||
// Look in the buffer for a new value.
|
||||
for i, c := range dec.buf[scanp:] {
|
||||
dec.scan.bytes++
|
||||
v := dec.scan.step(&dec.scan, int(c))
|
||||
if v == scanEnd {
|
||||
scanp += i
|
||||
break Input
|
||||
}
|
||||
// scanEnd is delayed one byte.
|
||||
// We might block trying to get that byte from src,
|
||||
// so instead invent a space byte.
|
||||
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||
scanp += i + 1
|
||||
break Input
|
||||
}
|
||||
if v == scanError {
|
||||
dec.err = dec.scan.err
|
||||
return 0, dec.scan.err
|
||||
}
|
||||
}
|
||||
scanp = len(dec.buf)
|
||||
|
||||
// Did the last read have an error?
|
||||
// Delayed until now to allow buffer scan.
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||
break Input
|
||||
}
|
||||
if nonSpace(dec.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}
|
||||
dec.err = err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Make room to read more into the buffer.
|
||||
const minRead = 512
|
||||
if cap(dec.buf)-len(dec.buf) < minRead {
|
||||
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
|
||||
copy(newBuf, dec.buf)
|
||||
dec.buf = newBuf
|
||||
}
|
||||
|
||||
// Read. Delay error for next iteration (after scan).
|
||||
var n int
|
||||
n, err = dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||
}
|
||||
return scanp, nil
|
||||
}
|
||||
|
||||
func nonSpace(b []byte) bool {
|
||||
for _, c := range b {
|
||||
if !isSpace(rune(c)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// An Encoder writes JSON objects to an output stream.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
err error
|
||||
canonical bool
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w: w}
|
||||
}
|
||||
|
||||
// Canonical causes the encoder to switch to Canonical JSON mode.
|
||||
// Read more at: http://wiki.laptop.org/go/Canonical_JSON
|
||||
func (enc *Encoder) Canonical() { enc.canonical = true }
|
||||
|
||||
// Encode writes the JSON encoding of v to the stream,
|
||||
// followed by a newline character.
|
||||
//
|
||||
// See the documentation for Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
if enc.err != nil {
|
||||
return enc.err
|
||||
}
|
||||
e := newEncodeState(enc.canonical)
|
||||
err := e.marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !enc.canonical {
|
||||
// Terminate each value with a newline.
|
||||
// This makes the output look a little nicer
|
||||
// when debugging, and some kind of space
|
||||
// is required if the encoded value was a number,
|
||||
// so that the reader knows there aren't more
|
||||
// digits coming.
|
||||
e.WriteByte('\n')
|
||||
}
|
||||
|
||||
if _, err = enc.w.Write(e.Bytes()); err != nil {
|
||||
enc.err = err
|
||||
}
|
||||
encodeStatePool.Put(e)
|
||||
return err
|
||||
}
|
||||
|
||||
// RawMessage is a raw encoded JSON object.
|
||||
// It implements Marshaler and Unmarshaler and can
|
||||
// be used to delay JSON decoding or precompute a JSON encoding.
|
||||
type RawMessage []byte
|
||||
|
||||
// MarshalJSON returns *m as the JSON encoding of m.
|
||||
func (m *RawMessage) MarshalJSON() ([]byte, error) {
|
||||
return *m, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets *m to a copy of data.
|
||||
func (m *RawMessage) UnmarshalJSON(data []byte) error {
|
||||
if m == nil {
|
||||
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
|
||||
}
|
||||
*m = append((*m)[0:0], data...)
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ Marshaler = (*RawMessage)(nil)
|
||||
var _ Unmarshaler = (*RawMessage)(nil)
|
44
vendor/src/github.com/jfrazelle/go/canonical/json/tags.go
vendored
Normal file
44
vendor/src/github.com/jfrazelle/go/canonical/json/tags.go
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
if idx := strings.Index(tag, ","); idx != -1 {
|
||||
return tag[:idx], tagOptions(tag[idx+1:])
|
||||
}
|
||||
return tag, tagOptions("")
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var next string
|
||||
i := strings.Index(s, ",")
|
||||
if i >= 0 {
|
||||
s, next = s[:i], s[i+1:]
|
||||
}
|
||||
if s == optionName {
|
||||
return true
|
||||
}
|
||||
s = next
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.1
|
||||
- tip
|
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,620 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cjson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"math"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
func Marshal(v interface{}) ([]byte, error) {
|
||||
e := &encodeState{}
|
||||
err := e.marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return e.Bytes(), nil
|
||||
}
|
||||
|
||||
// Marshaler is the interface implemented by objects that
|
||||
// can marshal themselves into valid JSON.
|
||||
type Marshaler interface {
|
||||
MarshalJSON() ([]byte, error)
|
||||
}
|
||||
|
||||
// An UnsupportedTypeError is returned by Marshal when attempting
|
||||
// to encode an unsupported value type.
|
||||
type UnsupportedTypeError struct {
|
||||
Type reflect.Type
|
||||
}
|
||||
|
||||
func (e *UnsupportedTypeError) Error() string {
|
||||
return "json: unsupported type: " + e.Type.String()
|
||||
}
|
||||
|
||||
type UnsupportedValueError struct {
|
||||
Value reflect.Value
|
||||
Str string
|
||||
}
|
||||
|
||||
func (e *UnsupportedValueError) Error() string {
|
||||
return "json: unsupported value: " + e.Str
|
||||
}
|
||||
|
||||
type InvalidUTF8Error struct {
|
||||
S string
|
||||
}
|
||||
|
||||
func (e *InvalidUTF8Error) Error() string {
|
||||
return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
|
||||
}
|
||||
|
||||
type MarshalerError struct {
|
||||
Type reflect.Type
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *MarshalerError) Error() string {
|
||||
return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
var hex = "0123456789abcdef"
|
||||
|
||||
var numberType = reflect.TypeOf(Number(""))
|
||||
|
||||
// A Number represents a JSON number literal.
|
||||
type Number string
|
||||
|
||||
// String returns the literal text of the number.
|
||||
func (n Number) String() string { return string(n) }
|
||||
|
||||
// Float64 returns the number as a float64.
|
||||
func (n Number) Float64() (float64, error) {
|
||||
return strconv.ParseFloat(string(n), 64)
|
||||
}
|
||||
|
||||
// Int64 returns the number as an int64.
|
||||
func (n Number) Int64() (int64, error) {
|
||||
return strconv.ParseInt(string(n), 10, 64)
|
||||
}
|
||||
|
||||
// An encodeState encodes JSON into a bytes.Buffer.
|
||||
type encodeState struct {
|
||||
bytes.Buffer // accumulated output
|
||||
scratch [64]byte
|
||||
}
|
||||
|
||||
func (e *encodeState) marshal(v interface{}) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if _, ok := r.(runtime.Error); ok {
|
||||
panic(r)
|
||||
}
|
||||
err = r.(error)
|
||||
}
|
||||
}()
|
||||
e.reflectValue(reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *encodeState) error(err error) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var byteSliceType = reflect.TypeOf([]byte(nil))
|
||||
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *encodeState) reflectValue(v reflect.Value) {
|
||||
e.reflectValueQuoted(v, false)
|
||||
}
|
||||
|
||||
// reflectValueQuoted writes the value in v to the output.
|
||||
// If quoted is true, the serialization is wrapped in a JSON string.
|
||||
func (e *encodeState) reflectValueQuoted(v reflect.Value, quoted bool) {
|
||||
if !v.IsValid() {
|
||||
e.WriteString("null")
|
||||
return
|
||||
}
|
||||
|
||||
m, ok := v.Interface().(Marshaler)
|
||||
if !ok {
|
||||
// T doesn't match the interface. Check against *T too.
|
||||
if v.Kind() != reflect.Ptr && v.CanAddr() {
|
||||
m, ok = v.Addr().Interface().(Marshaler)
|
||||
if ok {
|
||||
v = v.Addr()
|
||||
}
|
||||
}
|
||||
}
|
||||
if ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
|
||||
b, err := m.MarshalJSON()
|
||||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err})
|
||||
}
|
||||
|
||||
// canonicalize the json if it's an object
|
||||
b = bytes.TrimSpace(b)
|
||||
if len(b) > 0 && b[0] == '{' {
|
||||
var temp interface{}
|
||||
err = json.Unmarshal(b, &temp)
|
||||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err})
|
||||
}
|
||||
b, err = Marshal(temp)
|
||||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err})
|
||||
}
|
||||
}
|
||||
e.Buffer.Write(b)
|
||||
return
|
||||
}
|
||||
|
||||
writeString := (*encodeState).WriteString
|
||||
if quoted {
|
||||
writeString = (*encodeState).string
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
x := v.Bool()
|
||||
if x {
|
||||
writeString(e, "true")
|
||||
} else {
|
||||
writeString(e, "false")
|
||||
}
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
|
||||
if quoted {
|
||||
writeString(e, string(b))
|
||||
} else {
|
||||
e.Write(b)
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
|
||||
if quoted {
|
||||
writeString(e, string(b))
|
||||
} else {
|
||||
e.Write(b)
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
f := v.Float()
|
||||
if math.IsInf(f, 0) || math.IsNaN(f) || math.Floor(f) != f {
|
||||
e.error(&UnsupportedValueError{v, "floating point number"})
|
||||
}
|
||||
b := strconv.AppendInt(e.scratch[:0], int64(f), 10)
|
||||
if quoted {
|
||||
writeString(e, string(b))
|
||||
} else {
|
||||
e.Write(b)
|
||||
}
|
||||
case reflect.String:
|
||||
if v.Type() == numberType {
|
||||
numStr := v.String()
|
||||
if numStr == "" {
|
||||
numStr = "0" // Number's zero-val
|
||||
}
|
||||
e.WriteString(numStr)
|
||||
break
|
||||
}
|
||||
if quoted {
|
||||
sb, err := Marshal(v.String())
|
||||
if err != nil {
|
||||
e.error(err)
|
||||
}
|
||||
e.string(string(sb))
|
||||
} else {
|
||||
e.string(v.String())
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
e.WriteByte('{')
|
||||
first := true
|
||||
for _, f := range cachedTypeFields(v.Type()) {
|
||||
fv := fieldByIndex(v, f.index)
|
||||
if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
|
||||
continue
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
e.WriteByte(',')
|
||||
}
|
||||
e.string(f.name)
|
||||
e.WriteByte(':')
|
||||
e.reflectValueQuoted(fv, f.quoted)
|
||||
}
|
||||
e.WriteByte('}')
|
||||
|
||||
case reflect.Map:
|
||||
if v.Type().Key().Kind() != reflect.String {
|
||||
e.error(&UnsupportedTypeError{v.Type()})
|
||||
}
|
||||
if v.IsNil() {
|
||||
e.WriteString("null")
|
||||
break
|
||||
}
|
||||
e.WriteByte('{')
|
||||
var sv stringValues = v.MapKeys()
|
||||
sort.Sort(sv)
|
||||
for i, k := range sv {
|
||||
if i > 0 {
|
||||
e.WriteByte(',')
|
||||
}
|
||||
e.string(k.String())
|
||||
e.WriteByte(':')
|
||||
e.reflectValue(v.MapIndex(k))
|
||||
}
|
||||
e.WriteByte('}')
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
e.WriteString("null")
|
||||
break
|
||||
}
|
||||
if v.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// Byte slices get special treatment; arrays don't.
|
||||
s := v.Bytes()
|
||||
e.WriteByte('"')
|
||||
if len(s) < 1024 {
|
||||
// for small buffers, using Encode directly is much faster.
|
||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
|
||||
base64.StdEncoding.Encode(dst, s)
|
||||
e.Write(dst)
|
||||
} else {
|
||||
// for large buffers, avoid unnecessary extra temporary
|
||||
// buffer space.
|
||||
enc := base64.NewEncoder(base64.StdEncoding, e)
|
||||
enc.Write(s)
|
||||
enc.Close()
|
||||
}
|
||||
e.WriteByte('"')
|
||||
break
|
||||
}
|
||||
// Slices can be marshalled as nil, but otherwise are handled
|
||||
// as arrays.
|
||||
fallthrough
|
||||
case reflect.Array:
|
||||
e.WriteByte('[')
|
||||
n := v.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
if i > 0 {
|
||||
e.WriteByte(',')
|
||||
}
|
||||
e.reflectValue(v.Index(i))
|
||||
}
|
||||
e.WriteByte(']')
|
||||
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
e.WriteString("null")
|
||||
return
|
||||
}
|
||||
e.reflectValue(v.Elem())
|
||||
|
||||
default:
|
||||
e.error(&UnsupportedTypeError{v.Type()})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func isValidTag(s string) bool {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
switch {
|
||||
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
||||
// Backslash and quote chars are reserved, but
|
||||
// otherwise any punctuation chars are allowed
|
||||
// in a tag name.
|
||||
default:
|
||||
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func fieldByIndex(v reflect.Value, index []int) reflect.Value {
|
||||
for _, i := range index {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
return reflect.Value{}
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
v = v.Field(i)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
|
||||
// It implements the methods to sort by string.
|
||||
type stringValues []reflect.Value
|
||||
|
||||
func (sv stringValues) Len() int { return len(sv) }
|
||||
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
|
||||
func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
|
||||
func (sv stringValues) get(i int) string { return sv[i].String() }
|
||||
|
||||
func (e *encodeState) string(s string) (int, error) {
|
||||
len0 := e.Len()
|
||||
e.WriteByte('"')
|
||||
start := 0
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if b != '\\' && b != '"' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
e.WriteString(s[start:i])
|
||||
}
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
e.WriteByte('\\')
|
||||
e.WriteByte(b)
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRuneInString(s[i:])
|
||||
if c == utf8.RuneError && size == 1 {
|
||||
e.error(&InvalidUTF8Error{s})
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start < len(s) {
|
||||
e.WriteString(s[start:])
|
||||
}
|
||||
e.WriteByte('"')
|
||||
return e.Len() - len0, nil
|
||||
}
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string
|
||||
tag bool
|
||||
index []int
|
||||
typ reflect.Type
|
||||
omitEmpty bool
|
||||
quoted bool
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from json tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that JSON should recognize for the given type.
|
||||
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
||||
// and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" { // unexported
|
||||
continue
|
||||
}
|
||||
tag := sf.Tag.Get("json")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
name, opts := parseTag(tag)
|
||||
if !isValidTag(name) {
|
||||
name = ""
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, field{name, tagged, index, ft,
|
||||
opts.Contains("omitempty"), opts.Contains("string")})
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
next = append(next, field{name: ft.Name(), index: index, typ: ft})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Remove fields with annihilating name collisions
|
||||
// and also fields shadowed by fields with explicit JSON tags.
|
||||
name := ""
|
||||
out := fields[:0]
|
||||
for _, f := range fields {
|
||||
if f.name != name {
|
||||
name = f.name
|
||||
out = append(out, f)
|
||||
continue
|
||||
}
|
||||
if n := len(out); n > 0 && out[n-1].name == name && (!out[n-1].tag || f.tag) {
|
||||
out = out[:n-1]
|
||||
}
|
||||
}
|
||||
fields = out
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
if idx := strings.Index(tag, ","); idx != -1 {
|
||||
return tag[:idx], tagOptions(tag[idx+1:])
|
||||
}
|
||||
return tag, tagOptions("")
|
||||
}
|
||||
|
||||
// Contains returns whether checks that a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var next string
|
||||
i := strings.Index(s, ",")
|
||||
if i >= 0 {
|
||||
s, next = s[:i], s[i+1:]
|
||||
}
|
||||
if s == optionName {
|
||||
return true
|
||||
}
|
||||
s = next
|
||||
}
|
||||
return false
|
||||
}
|
Loading…
Reference in a new issue