http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/d0d9944b/newtmgr/vendor/github.com/ugorji/go/codec/helper.go
----------------------------------------------------------------------
diff --git a/newtmgr/vendor/github.com/ugorji/go/codec/helper.go 
b/newtmgr/vendor/github.com/ugorji/go/codec/helper.go
new file mode 100644
index 0000000..f3d2600
--- /dev/null
+++ b/newtmgr/vendor/github.com/ugorji/go/codec/helper.go
@@ -0,0 +1,1272 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE 
file.
+
+package codec
+
+// Contains code shared by both encode and decode.
+
+// Some shared ideas around encoding/decoding
+// ------------------------------------------
+//
+// If an interface{} is passed, we first do a type assertion to see if it is
+// a primitive type or a map/slice of primitive types, and use a fastpath to 
handle it.
+//
+// If we start with a reflect.Value, we are already in reflect.Value land and
+// will try to grab the function for the underlying Type and directly call 
that function.
+// This is more performant than calling reflect.Value.Interface().
+//
+// This still helps us bypass many layers of reflection, and give best 
performance.
+//
+// Containers
+// ------------
+// Containers in the stream are either associative arrays (key-value pairs) or
+// regular arrays (indexed by incrementing integers).
+//
+// Some streams support indefinite-length containers, and use a breaking
+// byte-sequence to denote that the container has come to an end.
+//
+// Some streams also are text-based, and use explicit separators to denote the
+// end/beginning of different values.
+//
+// During encode, we use a high-level condition to determine how to iterate 
through
+// the container. That decision is based on whether the container is 
text-based (with
+// separators) or binary (without separators). If binary, we do not even call 
the
+// encoding of separators.
+//
+// During decode, we use a different high-level condition to determine how to 
iterate
+// through the containers. That decision is based on whether the stream 
contained
+// a length prefix, or if it used explicit breaks. If length-prefixed, we 
assume that
+// it has to be binary, and we do not even try to read separators.
+//
+// The only codec that may suffer (slightly) is cbor, and only when decoding 
indefinite-length.
+// It may suffer because we treat it like a text-based codec, and read 
separators.
+// However, this read is a no-op and the cost is insignificant.
+//
+// Philosophy
+// ------------
+// On decode, this codec will update containers appropriately:
+//    - If struct, update fields from stream into fields of struct.
+//      If field in stream not found in struct, handle appropriately (based on 
option).
+//      If a struct field has no corresponding value in the stream, leave it 
AS IS.
+//      If nil in stream, set value to nil/zero value.
+//    - If map, update map from stream.
+//      If the stream value is NIL, set the map to nil.
+//    - if slice, try to update up to length of array in stream.
+//      if container len is less than stream array length,
+//      and container cannot be expanded, handled (based on option).
+//      This means you can decode 4-element stream array into 1-element array.
+//
+// ------------------------------------
+// On encode, user can specify omitEmpty. This means that the value will be 
omitted
+// if the zero value. The problem may occur during decode, where omitted 
values do not affect
+// the value being decoded into. This means that if decoding into a struct 
with an
+// int field with current value=5, and the field is omitted in the stream, 
then after
+// decoding, the value will still be 5 (not 0).
+// omitEmpty only works if you guarantee that you always decode into 
zero-values.
+//
+// ------------------------------------
+// We could have truncated a map to remove keys not available in the stream,
+// or set values in the struct which are not in the stream to their zero 
values.
+// We decided against it because there is no efficient way to do it.
+// We may introduce it as an option later.
+// However, that will require enabling it for both runtime and code generation 
modes.
+//
+// To support truncate, we need to do 2 passes over the container:
+//   map
+//   - first collect all keys (e.g. in k1)
+//   - for each key in stream, mark k1 that the key should not be removed
+//   - after updating map, do second pass and call delete for all keys in k1 
which are not marked
+//   struct:
+//   - for each field, track the *typeInfo s1
+//   - iterate through all s1, and for each one not marked, set value to zero
+//   - this involves checking the possible anonymous fields which are nil ptrs.
+//     too much work.
+//
+// ------------------------------------------
+// Error Handling is done within the library using panic.
+//
+// This way, the code doesn't have to keep checking if an error has happened,
+// and we don't have to keep sending the error value along with each call
+// or storing it in the En|Decoder and checking it constantly along the way.
+//
+// The disadvantage is that small functions which use panics cannot be inlined.
+// The code accounts for that by only using panics behind an interface;
+// since interface calls cannot be inlined, this is irrelevant.
+//
+// We considered storing the error is En|Decoder.
+//   - once it has its err field set, it cannot be used again.
+//   - panicing will be optional, controlled by const flag.
+//   - code should always check error first and return early.
+// We eventually decided against it as it makes the code clumsier to always
+// check for these error conditions.
+
+import (
+       "bytes"
+       "encoding"
+       "encoding/binary"
+       "errors"
+       "fmt"
+       "math"
+       "reflect"
+       "sort"
+       "strings"
+       "sync"
+       "time"
+)
+
+const (
+       scratchByteArrayLen = 32
+       initCollectionCap   = 32 // 32 is defensive. 16 is preferred.
+
+       // Support encoding.(Binary|Text)(Unm|M)arshaler.
+       // This constant flag will enable or disable it.
+       supportMarshalInterfaces = true
+
+       // Each Encoder or Decoder uses a cache of functions based on 
conditionals,
+       // so that the conditionals are not run every time.
+       //
+       // Either a map or a slice is used to keep track of the functions.
+       // The map is more natural, but has a higher cost than a slice/array.
+       // This flag (useMapForCodecCache) controls which is used.
+       //
+       // From benchmarks, slices with linear search perform better with < 32 
entries.
+       // We have typically seen a high threshold of about 24 entries.
+       useMapForCodecCache = false
+
+       // for debugging, set this to false, to catch panic traces.
+       // Note that this will always cause rpc tests to fail, since they need 
io.EOF sent via panic.
+       recoverPanicToErr = true
+
+       // Fast path functions try to create a fast path encode or decode 
implementation
+       // for common maps and slices, by by-passing reflection altogether.
+       fastpathEnabled = true
+
+       // if checkStructForEmptyValue, check structs fields to see if an empty 
value.
+       // This could be an expensive call, so possibly disable it.
+       checkStructForEmptyValue = false
+
+       // if derefForIsEmptyValue, deref pointers and interfaces when checking 
isEmptyValue
+       derefForIsEmptyValue = false
+
+       // if resetSliceElemToZeroValue, then on decoding a slice, reset the 
element to a zero value first.
+       // Only concern is that, if the slice already contained some garbage, 
we will decode into that garbage.
+       // The chances of this are slim, so leave this "optimization".
+       // TODO: should this be true, to ensure that we always decode into a 
"zero" "empty" value?
+       resetSliceElemToZeroValue bool = false
+)
+
+var (
+       oneByteArr    = [1]byte{0}
+       zeroByteSlice = oneByteArr[:0:0]
+)
+
+type charEncoding uint8
+
+const (
+       c_RAW charEncoding = iota
+       c_UTF8
+       c_UTF16LE
+       c_UTF16BE
+       c_UTF32LE
+       c_UTF32BE
+)
+
+// valueType is the stream type
+type valueType uint8
+
+const (
+       valueTypeUnset valueType = iota
+       valueTypeNil
+       valueTypeInt
+       valueTypeUint
+       valueTypeFloat
+       valueTypeBool
+       valueTypeString
+       valueTypeSymbol
+       valueTypeBytes
+       valueTypeMap
+       valueTypeArray
+       valueTypeTimestamp
+       valueTypeExt
+
+       // valueTypeInvalid = 0xff
+)
+
+type seqType uint8
+
+const (
+       _ seqType = iota
+       seqTypeArray
+       seqTypeSlice
+       seqTypeChan
+)
+
+// note that containerMapStart and containerArraySend are not sent.
+// This is because the ReadXXXStart and EncodeXXXStart already does these.
+type containerState uint8
+
+const (
+       _ containerState = iota
+
+       containerMapStart // slot left open, since Driver method already covers 
it
+       containerMapKey
+       containerMapValue
+       containerMapEnd
+       containerArrayStart // slot left open, since Driver methods already 
cover it
+       containerArrayElem
+       containerArrayEnd
+)
+
+type rgetPoolT struct {
+       encNames [8]string
+       fNames   [8]string
+       etypes   [8]uintptr
+       sfis     [8]*structFieldInfo
+}
+
+var rgetPool = sync.Pool{
+       New: func() interface{} { return new(rgetPoolT) },
+}
+
+type rgetT struct {
+       fNames   []string
+       encNames []string
+       etypes   []uintptr
+       sfis     []*structFieldInfo
+}
+
+type containerStateRecv interface {
+       sendContainerState(containerState)
+}
+
+// mirror json.Marshaler and json.Unmarshaler here,
+// so we don't import the encoding/json package
+type jsonMarshaler interface {
+       MarshalJSON() ([]byte, error)
+}
+type jsonUnmarshaler interface {
+       UnmarshalJSON([]byte) error
+}
+
+var (
+       bigen               = binary.BigEndian
+       structInfoFieldName = "_struct"
+
+       mapStrIntfTyp  = reflect.TypeOf(map[string]interface{}(nil))
+       mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
+       intfSliceTyp   = reflect.TypeOf([]interface{}(nil))
+       intfTyp        = intfSliceTyp.Elem()
+
+       stringTyp     = reflect.TypeOf("")
+       timeTyp       = reflect.TypeOf(time.Time{})
+       rawExtTyp     = reflect.TypeOf(RawExt{})
+       uint8SliceTyp = reflect.TypeOf([]uint8(nil))
+
+       mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
+
+       binaryMarshalerTyp   = 
reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
+       binaryUnmarshalerTyp = 
reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
+
+       textMarshalerTyp   = 
reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+       textUnmarshalerTyp = 
reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+
+       jsonMarshalerTyp   = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
+       jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
+
+       selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
+
+       uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
+       rawExtTypId     = reflect.ValueOf(rawExtTyp).Pointer()
+       intfTypId       = reflect.ValueOf(intfTyp).Pointer()
+       timeTypId       = reflect.ValueOf(timeTyp).Pointer()
+       stringTypId     = reflect.ValueOf(stringTyp).Pointer()
+
+       mapStrIntfTypId  = reflect.ValueOf(mapStrIntfTyp).Pointer()
+       mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer()
+       intfSliceTypId   = reflect.ValueOf(intfSliceTyp).Pointer()
+       // mapBySliceTypId  = reflect.ValueOf(mapBySliceTyp).Pointer()
+
+       intBitsize  uint8 = uint8(reflect.TypeOf(int(0)).Bits())
+       uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits())
+
+       bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+       bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+       chkOvf checkOverflow
+
+       noFieldNameToStructFieldInfoErr = errors.New("no field name passed to 
parseStructFieldInfo")
+)
+
+var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
+
+// Selfer defines methods by which a value can encode or decode itself.
+//
+// Any type which implements Selfer will be able to encode or decode itself.
+// Consequently, during (en|de)code, this takes precedence over
+// (text|binary)(M|Unm)arshal or extension support.
+type Selfer interface {
+       CodecEncodeSelf(*Encoder)
+       CodecDecodeSelf(*Decoder)
+}
+
+// MapBySlice represents a slice which should be encoded as a map in the 
stream.
+// The slice contains a sequence of key-value pairs.
+// This affords storing a map in a specific sequence in the stream.
+//
+// The support of MapBySlice affords the following:
+//   - A slice type which implements MapBySlice will be encoded as a map
+//   - A slice can be decoded from a map in the stream
+type MapBySlice interface {
+       MapBySlice()
+}
+
+// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
+//
+// BasicHandle encapsulates the common options and extension functions.
+type BasicHandle struct {
+       // TypeInfos is used to get the type info for any type.
+       //
+       // If not configured, the default TypeInfos is used, which uses struct 
tag keys: codec, json
+       TypeInfos *TypeInfos
+
+       extHandle
+       EncodeOptions
+       DecodeOptions
+}
+
+func (x *BasicHandle) getBasicHandle() *BasicHandle {
+       return x
+}
+
+func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti 
*typeInfo) {
+       if x.TypeInfos != nil {
+               return x.TypeInfos.get(rtid, rt)
+       }
+       return defTypeInfos.get(rtid, rt)
+}
+
+// Handle is the interface for a specific encoding format.
+//
+// Typically, a Handle is pre-configured before first time use,
+// and not modified while in use. Such a pre-configured Handle
+// is safe for concurrent access.
+type Handle interface {
+       getBasicHandle() *BasicHandle
+       newEncDriver(w *Encoder) encDriver
+       newDecDriver(r *Decoder) decDriver
+       isBinary() bool
+}
+
+// RawExt represents raw unprocessed extension data.
+// Some codecs will decode extension data as a *RawExt if there is no 
registered extension for the tag.
+//
+// Only one of Data or Value is nil. If Data is nil, then the content of the 
RawExt is in the Value.
+type RawExt struct {
+       Tag uint64
+       // Data is the []byte which represents the raw ext. If Data is nil, ext 
is exposed in Value.
+       // Data is used by codecs (e.g. binc, msgpack, simple) which do custom 
serialization of the types
+       Data []byte
+       // Value represents the extension, if Data is nil.
+       // Value is used by codecs (e.g. cbor) which use the format to do 
custom serialization of the types.
+       Value interface{}
+}
+
+// BytesExt handles custom (de)serialization of types to/from []byte.
+// It is used by codecs (e.g. binc, msgpack, simple) which do custom 
serialization of the types.
+type BytesExt interface {
+       // WriteExt converts a value to a []byte.
+       //
+       // Note: v *may* be a pointer to the extension type, if the extension 
type was a struct or array.
+       WriteExt(v interface{}) []byte
+
+       // ReadExt updates a value from a []byte.
+       ReadExt(dst interface{}, src []byte)
+}
+
+// InterfaceExt handles custom (de)serialization of types to/from another 
interface{} value.
+// The Encoder or Decoder will then handle the further (de)serialization of 
that known type.
+//
+// It is used by codecs (e.g. cbor, json) which use the format to do custom 
serialization of the types.
+type InterfaceExt interface {
+       // ConvertExt converts a value into a simpler interface for easy 
encoding e.g. convert time.Time to int64.
+       //
+       // Note: v *may* be a pointer to the extension type, if the extension 
type was a struct or array.
+       ConvertExt(v interface{}) interface{}
+
+       // UpdateExt updates a value from a simpler interface for easy decoding 
e.g. convert int64 to time.Time.
+       UpdateExt(dst interface{}, src interface{})
+}
+
+// Ext handles custom (de)serialization of custom types / extensions.
+type Ext interface {
+       BytesExt
+       InterfaceExt
+}
+
+// addExtWrapper is a wrapper implementation to support former AddExt exported 
method.
+type addExtWrapper struct {
+       encFn func(reflect.Value) ([]byte, error)
+       decFn func(reflect.Value, []byte) error
+}
+
+func (x addExtWrapper) WriteExt(v interface{}) []byte {
+       bs, err := x.encFn(reflect.ValueOf(v))
+       if err != nil {
+               panic(err)
+       }
+       return bs
+}
+
+func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
+       if err := x.decFn(reflect.ValueOf(v), bs); err != nil {
+               panic(err)
+       }
+}
+
+func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
+       return x.WriteExt(v)
+}
+
+func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
+       x.ReadExt(dest, v.([]byte))
+}
+
+type setExtWrapper struct {
+       b BytesExt
+       i InterfaceExt
+}
+
+func (x *setExtWrapper) WriteExt(v interface{}) []byte {
+       if x.b == nil {
+               panic("BytesExt.WriteExt is not supported")
+       }
+       return x.b.WriteExt(v)
+}
+
+func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) {
+       if x.b == nil {
+               panic("BytesExt.WriteExt is not supported")
+
+       }
+       x.b.ReadExt(v, bs)
+}
+
+func (x *setExtWrapper) ConvertExt(v interface{}) interface{} {
+       if x.i == nil {
+               panic("InterfaceExt.ConvertExt is not supported")
+
+       }
+       return x.i.ConvertExt(v)
+}
+
+func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) {
+       if x.i == nil {
+               panic("InterfaceExxt.UpdateExt is not supported")
+
+       }
+       x.i.UpdateExt(dest, v)
+}
+
+// type errorString string
+// func (x errorString) Error() string { return string(x) }
+
+type binaryEncodingType struct{}
+
+func (_ binaryEncodingType) isBinary() bool { return true }
+
+type textEncodingType struct{}
+
+func (_ textEncodingType) isBinary() bool { return false }
+
+// noBuiltInTypes is embedded into many types which do not support builtins
+// e.g. msgpack, simple, cbor.
+type noBuiltInTypes struct{}
+
+func (_ noBuiltInTypes) IsBuiltinType(rt uintptr) bool           { return 
false }
+func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
+
+type noStreamingCodec struct{}
+
+func (_ noStreamingCodec) CheckBreak() bool { return false }
+
+// bigenHelper.
+// Users must already slice the x completely, because we will not reslice.
+type bigenHelper struct {
+       x []byte // must be correctly sliced to appropriate len. slicing is a 
cost.
+       w encWriter
+}
+
+func (z bigenHelper) writeUint16(v uint16) {
+       bigen.PutUint16(z.x, v)
+       z.w.writeb(z.x)
+}
+
+func (z bigenHelper) writeUint32(v uint32) {
+       bigen.PutUint32(z.x, v)
+       z.w.writeb(z.x)
+}
+
+func (z bigenHelper) writeUint64(v uint64) {
+       bigen.PutUint64(z.x, v)
+       z.w.writeb(z.x)
+}
+
+type extTypeTagFn struct {
+       rtid uintptr
+       rt   reflect.Type
+       tag  uint64
+       ext  Ext
+}
+
+type extHandle []extTypeTagFn
+
+// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+//
+// AddExt registes an encode and decode function for a reflect.Type.
+// AddExt internally calls SetExt.
+// To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
+func (o *extHandle) AddExt(
+       rt reflect.Type, tag byte,
+       encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, 
[]byte) error,
+) (err error) {
+       if encfn == nil || decfn == nil {
+               return o.SetExt(rt, uint64(tag), nil)
+       }
+       return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
+}
+
+// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+//
+// Note that the type must be a named type, and specifically not
+// a pointer or Interface. An error is returned if that is not honored.
+//
+// To Deregister an ext, call SetExt with nil Ext
+func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
+       // o is a pointer, because we may need to initialize it
+       if rt.PkgPath() == "" || rt.Kind() == reflect.Interface {
+               err = fmt.Errorf("codec.Handle.AddExt: Takes named type, 
especially not a pointer or interface: %T",
+                       reflect.Zero(rt).Interface())
+               return
+       }
+
+       rtid := reflect.ValueOf(rt).Pointer()
+       for _, v := range *o {
+               if v.rtid == rtid {
+                       v.tag, v.ext = tag, ext
+                       return
+               }
+       }
+
+       if *o == nil {
+               *o = make([]extTypeTagFn, 0, 4)
+       }
+       *o = append(*o, extTypeTagFn{rtid, rt, tag, ext})
+       return
+}
+
+func (o extHandle) getExt(rtid uintptr) *extTypeTagFn {
+       var v *extTypeTagFn
+       for i := range o {
+               v = &o[i]
+               if v.rtid == rtid {
+                       return v
+               }
+       }
+       return nil
+}
+
+func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn {
+       var v *extTypeTagFn
+       for i := range o {
+               v = &o[i]
+               if v.tag == tag {
+                       return v
+               }
+       }
+       return nil
+}
+
+type structFieldInfo struct {
+       encName string // encode name
+
+       // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been 
set.
+
+       is        []int // (recursive/embedded) field index in struct
+       i         int16 // field index in struct
+       omitEmpty bool
+       toArray   bool // if field is _struct, is the toArray set?
+}
+
+// func (si *structFieldInfo) isZero() bool {
+//     return si.encName == "" && len(si.is) == 0 && si.i == 0 && 
!si.omitEmpty && !si.toArray
+// }
+
+// rv returns the field of the struct.
+// If anonymous, it returns an Invalid
+func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 
reflect.Value) {
+       if si.i != -1 {
+               v = v.Field(int(si.i))
+               return v
+       }
+       // replicate FieldByIndex
+       for _, x := range si.is {
+               for v.Kind() == reflect.Ptr {
+                       if v.IsNil() {
+                               if !update {
+                                       return
+                               }
+                               v.Set(reflect.New(v.Type().Elem()))
+                       }
+                       v = v.Elem()
+               }
+               v = v.Field(x)
+       }
+       return v
+}
+
+func (si *structFieldInfo) setToZeroValue(v reflect.Value) {
+       if si.i != -1 {
+               v = v.Field(int(si.i))
+               v.Set(reflect.Zero(v.Type()))
+               // v.Set(reflect.New(v.Type()).Elem())
+               // v.Set(reflect.New(v.Type()))
+       } else {
+               // replicate FieldByIndex
+               for _, x := range si.is {
+                       for v.Kind() == reflect.Ptr {
+                               if v.IsNil() {
+                                       return
+                               }
+                               v = v.Elem()
+                       }
+                       v = v.Field(x)
+               }
+               v.Set(reflect.Zero(v.Type()))
+       }
+}
+
+func parseStructFieldInfo(fname string, stag string) *structFieldInfo {
+       // if fname == "" {
+       //      panic(noFieldNameToStructFieldInfoErr)
+       // }
+       si := structFieldInfo{
+               encName: fname,
+       }
+
+       if stag != "" {
+               for i, s := range strings.Split(stag, ",") {
+                       if i == 0 {
+                               if s != "" {
+                                       si.encName = s
+                               }
+                       } else {
+                               if s == "omitempty" {
+                                       si.omitEmpty = true
+                               } else if s == "toarray" {
+                                       si.toArray = true
+                               }
+                       }
+               }
+       }
+       // si.encNameBs = []byte(si.encName)
+       return &si
+}
+
+type sfiSortedByEncName []*structFieldInfo
+
+func (p sfiSortedByEncName) Len() int {
+       return len(p)
+}
+
+func (p sfiSortedByEncName) Less(i, j int) bool {
+       return p[i].encName < p[j].encName
+}
+
+func (p sfiSortedByEncName) Swap(i, j int) {
+       p[i], p[j] = p[j], p[i]
+}
+
+// typeInfo keeps information about each type referenced in the encode/decode 
sequence.
+//
+// During an encode/decode sequence, we work as below:
+//   - If base is a built in type, en/decode base value
+//   - If base is registered as an extension, en/decode base value
+//   - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
+//   - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
+//   - Else decode appropriately based on the reflect.Kind
+type typeInfo struct {
+       sfi  []*structFieldInfo // sorted. Used when enc/dec struct to map.
+       sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array.
+
+       rt   reflect.Type
+       rtid uintptr
+
+       numMeth uint16 // number of methods
+
+       // baseId gives pointer to the base reflect.Type, after deferencing
+       // the pointers. E.g. base type of ***time.Time is time.Time.
+       base      reflect.Type
+       baseId    uintptr
+       baseIndir int8 // number of indirections to get to base
+
+       mbs bool // base type (T or *T) is a MapBySlice
+
+       bm        bool // base type (T or *T) is a binaryMarshaler
+       bunm      bool // base type (T or *T) is a binaryUnmarshaler
+       bmIndir   int8 // number of indirections to get to binaryMarshaler type
+       bunmIndir int8 // number of indirections to get to binaryUnmarshaler 
type
+
+       tm        bool // base type (T or *T) is a textMarshaler
+       tunm      bool // base type (T or *T) is a textUnmarshaler
+       tmIndir   int8 // number of indirections to get to textMarshaler type
+       tunmIndir int8 // number of indirections to get to textUnmarshaler type
+
+       jm        bool // base type (T or *T) is a jsonMarshaler
+       junm      bool // base type (T or *T) is a jsonUnmarshaler
+       jmIndir   int8 // number of indirections to get to jsonMarshaler type
+       junmIndir int8 // number of indirections to get to jsonUnmarshaler type
+
+       cs      bool // base type (T or *T) is a Selfer
+       csIndir int8 // number of indirections to get to Selfer type
+
+       toArray bool // whether this (struct) type should be encoded as an array
+}
+
+func (ti *typeInfo) indexForEncName(name string) int {
+       // NOTE: name may be a stringView, so don't pass it to another function.
+       //tisfi := ti.sfi
+       const binarySearchThreshold = 16
+       if sfilen := len(ti.sfi); sfilen < binarySearchThreshold {
+               // linear search. faster than binary search in my testing up to 
16-field structs.
+               for i, si := range ti.sfi {
+                       if si.encName == name {
+                               return i
+                       }
+               }
+       } else {
+               // binary search. adapted from sort/search.go.
+               h, i, j := 0, 0, sfilen
+               for i < j {
+                       h = i + (j-i)/2
+                       if ti.sfi[h].encName < name {
+                               i = h + 1
+                       } else {
+                               j = h
+                       }
+               }
+               if i < sfilen && ti.sfi[i].encName == name {
+                       return i
+               }
+       }
+       return -1
+}
+
+// TypeInfos caches typeInfo for each type on first inspection.
+//
+// It is configured with a set of tag keys, which are used to get
+// configuration for the type.
+type TypeInfos struct {
+       infos map[uintptr]*typeInfo
+       mu    sync.RWMutex
+       tags  []string
+}
+
+// NewTypeInfos creates a TypeInfos given a set of struct tags keys.
+//
+// This allows users customize the struct tag keys which contain configuration
+// of their types.
+func NewTypeInfos(tags []string) *TypeInfos {
+       return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)}
+}
+
+func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
+       // check for tags: codec, json, in that order.
+       // this allows seamless support for many configured structs.
+       for _, x := range x.tags {
+               s = t.Get(x)
+               if s != "" {
+                       return s
+               }
+       }
+       return
+}
+
+func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+       var ok bool
+       x.mu.RLock()
+       pti, ok = x.infos[rtid]
+       x.mu.RUnlock()
+       if ok {
+               return
+       }
+
+       // do not hold lock while computing this.
+       // it may lead to duplication, but that's ok.
+       ti := typeInfo{rt: rt, rtid: rtid}
+       ti.numMeth = uint16(rt.NumMethod())
+
+       var indir int8
+       if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok {
+               ti.bm, ti.bmIndir = true, indir
+       }
+       if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok {
+               ti.bunm, ti.bunmIndir = true, indir
+       }
+       if ok, indir = implementsIntf(rt, textMarshalerTyp); ok {
+               ti.tm, ti.tmIndir = true, indir
+       }
+       if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok {
+               ti.tunm, ti.tunmIndir = true, indir
+       }
+       if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok {
+               ti.jm, ti.jmIndir = true, indir
+       }
+       if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok {
+               ti.junm, ti.junmIndir = true, indir
+       }
+       if ok, indir = implementsIntf(rt, selferTyp); ok {
+               ti.cs, ti.csIndir = true, indir
+       }
+       if ok, _ = implementsIntf(rt, mapBySliceTyp); ok {
+               ti.mbs = true
+       }
+
+       pt := rt
+       var ptIndir int8
+       // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { 
}
+       for pt.Kind() == reflect.Ptr {
+               pt = pt.Elem()
+               ptIndir++
+       }
+       if ptIndir == 0 {
+               ti.base = rt
+               ti.baseId = rtid
+       } else {
+               ti.base = pt
+               ti.baseId = reflect.ValueOf(pt).Pointer()
+               ti.baseIndir = ptIndir
+       }
+
+       if rt.Kind() == reflect.Struct {
+               var siInfo *structFieldInfo
+               if f, ok := rt.FieldByName(structInfoFieldName); ok {
+                       siInfo = parseStructFieldInfo(structInfoFieldName, 
x.structTag(f.Tag))
+                       ti.toArray = siInfo.toArray
+               }
+               pi := rgetPool.Get()
+               pv := pi.(*rgetPoolT)
+               pv.etypes[0] = ti.baseId
+               vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], 
pv.sfis[:0]}
+               x.rget(rt, rtid, nil, &vv, siInfo)
+               ti.sfip = make([]*structFieldInfo, len(vv.sfis))
+               ti.sfi = make([]*structFieldInfo, len(vv.sfis))
+               copy(ti.sfip, vv.sfis)
+               sort.Sort(sfiSortedByEncName(vv.sfis))
+               copy(ti.sfi, vv.sfis)
+               rgetPool.Put(pi)
+       }
+       // sfi = sfip
+
+       x.mu.Lock()
+       if pti, ok = x.infos[rtid]; !ok {
+               pti = &ti
+               x.infos[rtid] = pti
+       }
+       x.mu.Unlock()
+       return
+}
+
+func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr,
+       indexstack []int, pv *rgetT, siInfo *structFieldInfo,
+) {
+       // This will read up the fields and store how to access the value.
+       // It uses the go language's rules for embedding, as below:
+       //   - if a field has been seen while traversing, skip it
+       //   - if an encName has been seen while traversing, skip it
+       //   - if an embedded type has been seen, skip it
+       //
+       // Also, per Go's rules, embedded fields must be analyzed AFTER all 
top-level fields.
+       //
+       // Note: we consciously use slices, not a map, to simulate a set.
+       //       Typically, types have < 16 fields, and iteration using equals 
is faster than maps there
+
+       type anonField struct {
+               ft  reflect.Type
+               idx int
+       }
+
+       var anonFields []anonField
+
+LOOP:
+       for j, jlen := 0, rt.NumField(); j < jlen; j++ {
+               f := rt.Field(j)
+               fkind := f.Type.Kind()
+               // skip if a func type, or is unexported, or structTag value == 
"-"
+               switch fkind {
+               case reflect.Func, reflect.Complex64, reflect.Complex128, 
reflect.UnsafePointer:
+                       continue LOOP
+               }
+
+               // if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == 
utf8.RuneError || !unicode.IsUpper(r1) {
+               if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded
+                       continue
+               }
+               stag := x.structTag(f.Tag)
+               if stag == "-" {
+                       continue
+               }
+               var si *structFieldInfo
+               // if anonymous and no struct tag (or it's blank), and a struct 
(or pointer to struct), inline it.
+               if f.Anonymous && fkind != reflect.Interface {
+                       doInline := stag == ""
+                       if !doInline {
+                               si = parseStructFieldInfo("", stag)
+                               doInline = si.encName == ""
+                               // doInline = si.isZero()
+                       }
+                       if doInline {
+                               ft := f.Type
+                               for ft.Kind() == reflect.Ptr {
+                                       ft = ft.Elem()
+                               }
+                               if ft.Kind() == reflect.Struct {
+                                       // handle anonymous fields after 
handling all the non-anon fields
+                                       anonFields = append(anonFields, 
anonField{ft, j})
+                                       continue
+                               }
+                       }
+               }
+
+               // after the anonymous dance: if an unexported field, skip
+               if f.PkgPath != "" { // unexported
+                       continue
+               }
+
+               if f.Name == "" {
+                       panic(noFieldNameToStructFieldInfoErr)
+               }
+
+               for _, k := range pv.fNames {
+                       if k == f.Name {
+                               continue LOOP
+                       }
+               }
+               pv.fNames = append(pv.fNames, f.Name)
+
+               if si == nil {
+                       si = parseStructFieldInfo(f.Name, stag)
+               } else if si.encName == "" {
+                       si.encName = f.Name
+               }
+
+               for _, k := range pv.encNames {
+                       if k == si.encName {
+                               continue LOOP
+                       }
+               }
+               pv.encNames = append(pv.encNames, si.encName)
+
+               // si.ikind = int(f.Type.Kind())
+               if len(indexstack) == 0 {
+                       si.i = int16(j)
+               } else {
+                       si.i = -1
+                       si.is = make([]int, len(indexstack)+1)
+                       copy(si.is, indexstack)
+                       si.is[len(indexstack)] = j
+                       // si.is = append(append(make([]int, 0, 
len(indexstack)+4), indexstack...), j)
+               }
+
+               if siInfo != nil {
+                       if siInfo.omitEmpty {
+                               si.omitEmpty = true
+                       }
+               }
+               pv.sfis = append(pv.sfis, si)
+       }
+
+       // now handle anonymous fields
+LOOP2:
+       for _, af := range anonFields {
+               // if etypes contains this, then do not call rget again (as the 
fields are already seen here)
+               ftid := reflect.ValueOf(af.ft).Pointer()
+               for _, k := range pv.etypes {
+                       if k == ftid {
+                               continue LOOP2
+                       }
+               }
+               pv.etypes = append(pv.etypes, ftid)
+
+               indexstack2 := make([]int, len(indexstack)+1)
+               copy(indexstack2, indexstack)
+               indexstack2[len(indexstack)] = af.idx
+               // indexstack2 := append(append(make([]int, 0, 
len(indexstack)+4), indexstack...), j)
+               x.rget(af.ft, ftid, indexstack2, pv, siInfo)
+       }
+}
+
+func panicToErr(err *error) {
+       if recoverPanicToErr {
+               if x := recover(); x != nil {
+                       //debug.PrintStack()
+                       panicValToErr(x, err)
+               }
+       }
+}
+
+// func doPanic(tag string, format string, params ...interface{}) {
+//     params2 := make([]interface{}, len(params)+1)
+//     params2[0] = tag
+//     copy(params2[1:], params)
+//     panic(fmt.Errorf("%s: "+format, params2...))
+// }
+
+func isImmutableKind(k reflect.Kind) (v bool) {
+       return false ||
+               k == reflect.Int ||
+               k == reflect.Int8 ||
+               k == reflect.Int16 ||
+               k == reflect.Int32 ||
+               k == reflect.Int64 ||
+               k == reflect.Uint ||
+               k == reflect.Uint8 ||
+               k == reflect.Uint16 ||
+               k == reflect.Uint32 ||
+               k == reflect.Uint64 ||
+               k == reflect.Uintptr ||
+               k == reflect.Float32 ||
+               k == reflect.Float64 ||
+               k == reflect.Bool ||
+               k == reflect.String
+}
+
+// these functions must be inlinable, and not call anybody
+type checkOverflow struct{}
+
+func (_ checkOverflow) Float32(f float64) (overflow bool) {
+       if f < 0 {
+               f = -f
+       }
+       return math.MaxFloat32 < f && f <= math.MaxFloat64
+}
+
+func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
+       if bitsize == 0 || bitsize >= 64 || v == 0 {
+               return
+       }
+       if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
+               overflow = true
+       }
+       return
+}
+
+func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
+       if bitsize == 0 || bitsize >= 64 || v == 0 {
+               return
+       }
+       if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
+               overflow = true
+       }
+       return
+}
+
+func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) {
+       //e.g. -127 to 128 for int8
+       pos := (v >> 63) == 0
+       ui2 := v & 0x7fffffffffffffff
+       if pos {
+               if ui2 > math.MaxInt64 {
+                       overflow = true
+                       return
+               }
+       } else {
+               if ui2 > math.MaxInt64-1 {
+                       overflow = true
+                       return
+               }
+       }
+       i = int64(v)
+       return
+}
+
+// ------------------ SORT -----------------
+
+func isNaN(f float64) bool { return f != f }
+
+// -----------------------
+
+type intSlice []int64
+type uintSlice []uint64
+type floatSlice []float64
+type boolSlice []bool
+type stringSlice []string
+type bytesSlice [][]byte
+
+func (p intSlice) Len() int           { return len(p) }
+func (p intSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p intSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p uintSlice) Len() int           { return len(p) }
+func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uintSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p floatSlice) Len() int { return len(p) }
+func (p floatSlice) Less(i, j int) bool {
+       return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j])
+}
+func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p stringSlice) Len() int           { return len(p) }
+func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p stringSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p bytesSlice) Len() int           { return len(p) }
+func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == 
-1 }
+func (p bytesSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p boolSlice) Len() int           { return len(p) }
+func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] }
+func (p boolSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+// ---------------------
+
+type intRv struct {
+       v int64
+       r reflect.Value
+}
+type intRvSlice []intRv
+type uintRv struct {
+       v uint64
+       r reflect.Value
+}
+type uintRvSlice []uintRv
+type floatRv struct {
+       v float64
+       r reflect.Value
+}
+type floatRvSlice []floatRv
+type boolRv struct {
+       v bool
+       r reflect.Value
+}
+type boolRvSlice []boolRv
+type stringRv struct {
+       v string
+       r reflect.Value
+}
+type stringRvSlice []stringRv
+type bytesRv struct {
+       v []byte
+       r reflect.Value
+}
+type bytesRvSlice []bytesRv
+
+func (p intRvSlice) Len() int           { return len(p) }
+func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p intRvSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p uintRvSlice) Len() int           { return len(p) }
+func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p uintRvSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p floatRvSlice) Len() int { return len(p) }
+func (p floatRvSlice) Less(i, j int) bool {
+       return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v)
+}
+func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p stringRvSlice) Len() int           { return len(p) }
+func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
+func (p stringRvSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p bytesRvSlice) Len() int           { return len(p) }
+func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, 
p[j].v) == -1 }
+func (p bytesRvSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p boolRvSlice) Len() int           { return len(p) }
+func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v }
+func (p boolRvSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+// -----------------
+
+type bytesI struct {
+       v []byte
+       i interface{}
+}
+
+type bytesISlice []bytesI
+
+func (p bytesISlice) Len() int           { return len(p) }
+func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, 
p[j].v) == -1 }
+func (p bytesISlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+// -----------------
+
+type set []uintptr
+
+func (s *set) add(v uintptr) (exists bool) {
+       // e.ci is always nil, or len >= 1
+       // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Add: %v, exists: 
%v\n", v, exists) }()
+       x := *s
+       if x == nil {
+               x = make([]uintptr, 1, 8)
+               x[0] = v
+               *s = x
+               return
+       }
+       // typically, length will be 1. make this perform.
+       if len(x) == 1 {
+               if j := x[0]; j == 0 {
+                       x[0] = v
+               } else if j == v {
+                       exists = true
+               } else {
+                       x = append(x, v)
+                       *s = x
+               }
+               return
+       }
+       // check if it exists
+       for _, j := range x {
+               if j == v {
+                       exists = true
+                       return
+               }
+       }
+       // try to replace a "deleted" slot
+       for i, j := range x {
+               if j == 0 {
+                       x[i] = v
+                       return
+               }
+       }
+       // if unable to replace deleted slot, just append it.
+       x = append(x, v)
+       *s = x
+       return
+}
+
+func (s *set) remove(v uintptr) (exists bool) {
+       // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Rm: %v, exists: %v\n", 
v, exists) }()
+       x := *s
+       if len(x) == 0 {
+               return
+       }
+       if len(x) == 1 {
+               if x[0] == v {
+                       x[0] = 0
+               }
+               return
+       }
+       for i, j := range x {
+               if j == v {
+                       exists = true
+                       x[i] = 0 // set it to 0, as way to delete it.
+                       // copy(x[i:], x[i+1:])
+                       // x = x[:len(x)-1]
+                       return
+               }
+       }
+       return
+}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/d0d9944b/newtmgr/vendor/github.com/ugorji/go/codec/helper_internal.go
----------------------------------------------------------------------
diff --git a/newtmgr/vendor/github.com/ugorji/go/codec/helper_internal.go 
b/newtmgr/vendor/github.com/ugorji/go/codec/helper_internal.go
new file mode 100644
index 0000000..dea981f
--- /dev/null
+++ b/newtmgr/vendor/github.com/ugorji/go/codec/helper_internal.go
@@ -0,0 +1,242 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE 
file.
+
+package codec
+
+// All non-std package dependencies live in this file,
+// so porting to different environment is easy (just update functions).
+
+import (
+       "errors"
+       "fmt"
+       "math"
+       "reflect"
+)
+
+func panicValToErr(panicVal interface{}, err *error) {
+       if panicVal == nil {
+               return
+       }
+       // case nil
+       switch xerr := panicVal.(type) {
+       case error:
+               *err = xerr
+       case string:
+               *err = errors.New(xerr)
+       default:
+               *err = fmt.Errorf("%v", panicVal)
+       }
+       return
+}
+
+func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
+       switch v.Kind() {
+       case reflect.Invalid:
+               return true
+       case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+               return v.Len() == 0
+       case reflect.Bool:
+               return !v.Bool()
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, 
reflect.Int64:
+               return v.Int() == 0
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, 
reflect.Uint64, reflect.Uintptr:
+               return v.Uint() == 0
+       case reflect.Float32, reflect.Float64:
+               return v.Float() == 0
+       case reflect.Interface, reflect.Ptr:
+               if deref {
+                       if v.IsNil() {
+                               return true
+                       }
+                       return hIsEmptyValue(v.Elem(), deref, checkStruct)
+               } else {
+                       return v.IsNil()
+               }
+       case reflect.Struct:
+               if !checkStruct {
+                       return false
+               }
+               // return true if all fields are empty. else return false.
+               // we cannot use equality check, because some fields may be 
maps/slices/etc
+               // and consequently the structs are not comparable.
+               // return v.Interface() == reflect.Zero(v.Type()).Interface()
+               for i, n := 0, v.NumField(); i < n; i++ {
+                       if !hIsEmptyValue(v.Field(i), deref, checkStruct) {
+                               return false
+                       }
+               }
+               return true
+       }
+       return false
+}
+
+func isEmptyValue(v reflect.Value) bool {
+       return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue)
+}
+
+func pruneSignExt(v []byte, pos bool) (n int) {
+       if len(v) < 2 {
+       } else if pos && v[0] == 0 {
+               for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
+               }
+       } else if !pos && v[0] == 0xff {
+               for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ 
{
+               }
+       }
+       return
+}
+
+func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) {
+       if typ == nil {
+               return
+       }
+       rt := typ
+       // The type might be a pointer and we need to keep
+       // dereferencing to the base type until we find an implementation.
+       for {
+               if rt.Implements(iTyp) {
+                       return true, indir
+               }
+               if p := rt; p.Kind() == reflect.Ptr {
+                       indir++
+                       if indir >= math.MaxInt8 { // insane number of 
indirections
+                               return false, 0
+                       }
+                       rt = p.Elem()
+                       continue
+               }
+               break
+       }
+       // No luck yet, but if this is a base type (non-pointer), the pointer 
might satisfy.
+       if typ.Kind() != reflect.Ptr {
+               // Not a pointer, but does the pointer work?
+               if reflect.PtrTo(typ).Implements(iTyp) {
+                       return true, -1
+               }
+       }
+       return false, 0
+}
+
+// validate that this function is correct ...
+// culled from OGRE (Object-Oriented Graphics Rendering Engine)
+// function: halfToFloatI 
(http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)
+func halfFloatToFloatBits(yy uint16) (d uint32) {
+       y := uint32(yy)
+       s := (y >> 15) & 0x01
+       e := (y >> 10) & 0x1f
+       m := y & 0x03ff
+
+       if e == 0 {
+               if m == 0 { // plu or minus 0
+                       return s << 31
+               } else { // Denormalized number -- renormalize it
+                       for (m & 0x00000400) == 0 {
+                               m <<= 1
+                               e -= 1
+                       }
+                       e += 1
+                       const zz uint32 = 0x0400
+                       m &= ^zz
+               }
+       } else if e == 31 {
+               if m == 0 { // Inf
+                       return (s << 31) | 0x7f800000
+               } else { // NaN
+                       return (s << 31) | 0x7f800000 | (m << 13)
+               }
+       }
+       e = e + (127 - 15)
+       m = m << 13
+       return (s << 31) | (e << 23) | m
+}
+
+// GrowCap will return a new capacity for a slice, given the following:
+//   - oldCap: current capacity
+//   - unit: in-memory size of an element
+//   - num: number of elements to add
+func growCap(oldCap, unit, num int) (newCap int) {
+       // appendslice logic (if cap < 1024, *2, else *1.25):
+       //   leads to many copy calls, especially when copying bytes.
+       //   bytes.Buffer model (2*cap + n): much better for bytes.
+       // smarter way is to take the byte-size of the appended element(type) 
into account
+
+       // maintain 3 thresholds:
+       // t1: if cap <= t1, newcap = 2x
+       // t2: if cap <= t2, newcap = 1.75x
+       // t3: if cap <= t3, newcap = 1.5x
+       //     else          newcap = 1.25x
+       //
+       // t1, t2, t3 >= 1024 always.
+       // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 
are all same)
+       //
+       // With this, appending for bytes increase by:
+       //    100% up to 4K
+       //     75% up to 8K
+       //     50% up to 16K
+       //     25% beyond that
+
+       // unit can be 0 e.g. for struct{}{}; handle that appropriately
+       var t1, t2, t3 int // thresholds
+       if unit <= 1 {
+               t1, t2, t3 = 4*1024, 8*1024, 16*1024
+       } else if unit < 16 {
+               t3 = 16 / unit * 1024
+               t1 = t3 * 1 / 4
+               t2 = t3 * 2 / 4
+       } else {
+               t1, t2, t3 = 1024, 1024, 1024
+       }
+
+       var x int // temporary variable
+
+       // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 
100% respectively
+       if oldCap <= t1 { // [0,t1]
+               x = 8
+       } else if oldCap > t3 { // (t3,infinity]
+               x = 5
+       } else if oldCap <= t2 { // (t1,t2]
+               x = 7
+       } else { // (t2,t3]
+               x = 6
+       }
+       newCap = x * oldCap / 4
+
+       if num > 0 {
+               newCap += num
+       }
+
+       // ensure newCap is a multiple of 64 (if it is > 64) or 16.
+       if newCap > 64 {
+               if x = newCap % 64; x != 0 {
+                       x = newCap / 64
+                       newCap = 64 * (x + 1)
+               }
+       } else {
+               if x = newCap % 16; x != 0 {
+                       x = newCap / 16
+                       newCap = 16 * (x + 1)
+               }
+       }
+       return
+}
+
+func expandSliceValue(s reflect.Value, num int) reflect.Value {
+       if num <= 0 {
+               return s
+       }
+       l0 := s.Len()
+       l1 := l0 + num // new slice length
+       if l1 < l0 {
+               panic("ExpandSlice: slice overflow")
+       }
+       c0 := s.Cap()
+       if l1 <= c0 {
+               return s.Slice(0, l1)
+       }
+       st := s.Type()
+       c1 := growCap(c0, int(st.Elem().Size()), num)
+       s2 := reflect.MakeSlice(st, l1, c1)
+       // println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", 
len-new: ", l1)
+       reflect.Copy(s2, s)
+       return s2
+}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/d0d9944b/newtmgr/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
----------------------------------------------------------------------
diff --git a/newtmgr/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go 
b/newtmgr/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
new file mode 100644
index 0000000..7c2ffc0
--- /dev/null
+++ b/newtmgr/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
@@ -0,0 +1,20 @@
+//+build !unsafe
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE 
file.
+
+package codec
+
+// stringView returns a view of the []byte as a string.
+// In unsafe mode, it doesn't incur allocation and copying caused by 
conversion.
+// In regular safe mode, it is an allocation and copy.
+func stringView(v []byte) string {
+       return string(v)
+}
+
+// bytesView returns a view of the string as a []byte.
+// In unsafe mode, it doesn't incur allocation and copying caused by 
conversion.
+// In regular safe mode, it is an allocation and copy.
+func bytesView(v string) []byte {
+       return []byte(v)
+}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/d0d9944b/newtmgr/vendor/github.com/ugorji/go/codec/helper_unsafe.go
----------------------------------------------------------------------
diff --git a/newtmgr/vendor/github.com/ugorji/go/codec/helper_unsafe.go 
b/newtmgr/vendor/github.com/ugorji/go/codec/helper_unsafe.go
new file mode 100644
index 0000000..2928e4f
--- /dev/null
+++ b/newtmgr/vendor/github.com/ugorji/go/codec/helper_unsafe.go
@@ -0,0 +1,49 @@
+//+build unsafe
+
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE 
file.
+
+package codec
+
+import (
+       "unsafe"
+)
+
+// This file has unsafe variants of some helper methods.
+
+type unsafeString struct {
+       Data uintptr
+       Len  int
+}
+
+type unsafeSlice struct {
+       Data uintptr
+       Len  int
+       Cap  int
+}
+
+// stringView returns a view of the []byte as a string.
+// In unsafe mode, it doesn't incur allocation and copying caused by 
conversion.
+// In regular safe mode, it is an allocation and copy.
+func stringView(v []byte) string {
+       if len(v) == 0 {
+               return ""
+       }
+
+       bx := (*unsafeSlice)(unsafe.Pointer(&v))
+       sx := unsafeString{bx.Data, bx.Len}
+       return *(*string)(unsafe.Pointer(&sx))
+}
+
+// bytesView returns a view of the string as a []byte.
+// In unsafe mode, it doesn't incur allocation and copying caused by 
conversion.
+// In regular safe mode, it is an allocation and copy.
+func bytesView(v string) []byte {
+       if len(v) == 0 {
+               return zeroByteSlice
+       }
+
+       sx := (*unsafeString)(unsafe.Pointer(&v))
+       bx := unsafeSlice{sx.Data, sx.Len, sx.Len}
+       return *(*[]byte)(unsafe.Pointer(&bx))
+}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/d0d9944b/newtmgr/vendor/github.com/ugorji/go/codec/json.go
----------------------------------------------------------------------
diff --git a/newtmgr/vendor/github.com/ugorji/go/codec/json.go 
b/newtmgr/vendor/github.com/ugorji/go/codec/json.go
new file mode 100644
index 0000000..a04dfcb
--- /dev/null
+++ b/newtmgr/vendor/github.com/ugorji/go/codec/json.go
@@ -0,0 +1,1213 @@
+// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE 
file.
+
+package codec
+
+// By default, this json support uses base64 encoding for bytes, because you 
cannot
+// store and read any arbitrary string in json (only unicode).
+// However, the user can configre how to encode/decode bytes.
+//
+// This library specifically supports UTF-8 for encoding and decoding only.
+//
+// Note that the library will happily encode/decode things which are not valid
+// json e.g. a map[int64]string. We do it for consistency. With valid json,
+// we will encode and decode appropriately.
+// Users can specify their map type if necessary to force it.
+//
+// Note:
+//   - we cannot use strconv.Quote and strconv.Unquote because json 
quotes/unquotes differently.
+//     We implement it here.
+//   - Also, strconv.ParseXXX for floats and integers
+//     - only works on strings resulting in unnecessary allocation and 
[]byte-string conversion.
+//     - it does a lot of redundant checks, because json numbers are simpler 
that what it supports.
+//   - We parse numbers (floats and integers) directly here.
+//     We only delegate parsing floats if it is a hairy float which could 
cause a loss of precision.
+//     In that case, we delegate to strconv.ParseFloat.
+//
+// Note:
+//   - encode does not beautify. There is no whitespace when encoding.
+//   - rpc calls which take single integer arguments or write single numeric 
arguments will need care.
+
+// Top-level methods of json(End|Dec)Driver (which are implementations of 
(en|de)cDriver
+// MUST not call one-another.
+
+import (
+       "bytes"
+       "encoding/base64"
+       "fmt"
+       "reflect"
+       "strconv"
+       "unicode/utf16"
+       "unicode/utf8"
+)
+
+//--------------------------------
+
+var (
+       jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 
'n', 'u', 'l', 'l'}
+
+       jsonFloat64Pow10 = [...]float64{
+               1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+               1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+               1e20, 1e21, 1e22,
+       }
+
+       jsonUint64Pow10 = [...]uint64{
+               1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+               1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+       }
+
+       // jsonTabs and jsonSpaces are used as caches for indents
+       jsonTabs, jsonSpaces string
+)
+
+const (
+       // jsonUnreadAfterDecNum controls whether we unread after decoding a 
number.
+       //
+       // instead of unreading, just update d.tok (iff it's not a whitespace 
char)
+       // However, doing this means that we may HOLD onto some data which 
belongs to another stream.
+       // Thus, it is safest to unread the data when done.
+       // keep behind a constant flag for now.
+       jsonUnreadAfterDecNum = true
+
+       // If !jsonValidateSymbols, decoding will be faster, by skipping some 
checks:
+       //   - If we see first character of null, false or true,
+       //     do not validate subsequent characters.
+       //   - e.g. if we see a n, assume null and skip next 3 characters,
+       //     and do not validate they are ull.
+       // P.S. Do not expect a significant decoding boost from this.
+       jsonValidateSymbols = true
+
+       // if jsonTruncateMantissa, truncate mantissa if trailing 0's.
+       // This is important because it could allow some floats to be decoded 
without
+       // deferring to strconv.ParseFloat.
+       jsonTruncateMantissa = true
+
+       // if mantissa >= jsonNumUintCutoff before multiplying by 10, this is 
an overflow
+       jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
+
+       // if mantissa >= jsonNumUintMaxVal, this is an overflow
+       jsonNumUintMaxVal = 1<<uint64(64) - 1
+
+       // jsonNumDigitsUint64Largest = 19
+
+       jsonSpacesOrTabsLen = 128
+)
+
+func init() {
+       var bs [jsonSpacesOrTabsLen]byte
+       for i := 0; i < jsonSpacesOrTabsLen; i++ {
+               bs[i] = ' '
+       }
+       jsonSpaces = string(bs[:])
+
+       for i := 0; i < jsonSpacesOrTabsLen; i++ {
+               bs[i] = '\t'
+       }
+       jsonTabs = string(bs[:])
+}
+
+type jsonEncDriver struct {
+       e  *Encoder
+       w  encWriter
+       h  *JsonHandle
+       b  [64]byte // scratch
+       bs []byte   // scratch
+       se setExtWrapper
+       ds string // indent string
+       dl uint16 // indent level
+       dt bool   // indent using tabs
+       d  bool   // indent
+       c  containerState
+       noBuiltInTypes
+}
+
+// indent is done as below:
+//   - newline and indent are added before each mapKey or arrayElem
+//   - newline and indent are added before each ending,
+//     except there was no entry (so we can have {} or [])
+
+func (e *jsonEncDriver) sendContainerState(c containerState) {
+       // determine whether to output separators
+       if c == containerMapKey {
+               if e.c != containerMapStart {
+                       e.w.writen1(',')
+               }
+               if e.d {
+                       e.writeIndent()
+               }
+       } else if c == containerMapValue {
+               if e.d {
+                       e.w.writen2(':', ' ')
+               } else {
+                       e.w.writen1(':')
+               }
+       } else if c == containerMapEnd {
+               if e.d {
+                       e.dl--
+                       if e.c != containerMapStart {
+                               e.writeIndent()
+                       }
+               }
+               e.w.writen1('}')
+       } else if c == containerArrayElem {
+               if e.c != containerArrayStart {
+                       e.w.writen1(',')
+               }
+               if e.d {
+                       e.writeIndent()
+               }
+       } else if c == containerArrayEnd {
+               if e.d {
+                       e.dl--
+                       if e.c != containerArrayStart {
+                               e.writeIndent()
+                       }
+               }
+               e.w.writen1(']')
+       }
+       e.c = c
+}
+
+func (e *jsonEncDriver) writeIndent() {
+       e.w.writen1('\n')
+       if x := len(e.ds) * int(e.dl); x <= jsonSpacesOrTabsLen {
+               if e.dt {
+                       e.w.writestr(jsonTabs[:x])
+               } else {
+                       e.w.writestr(jsonSpaces[:x])
+               }
+       } else {
+               for i := uint16(0); i < e.dl; i++ {
+                       e.w.writestr(e.ds)
+               }
+       }
+}
+
+func (e *jsonEncDriver) EncodeNil() {
+       e.w.writeb(jsonLiterals[9:13]) // null
+}
+
+func (e *jsonEncDriver) EncodeBool(b bool) {
+       if b {
+               e.w.writeb(jsonLiterals[0:4]) // true
+       } else {
+               e.w.writeb(jsonLiterals[4:9]) // false
+       }
+}
+
+func (e *jsonEncDriver) EncodeFloat32(f float32) {
+       e.w.writeb(strconv.AppendFloat(e.b[:0], float64(f), 'E', -1, 32))
+}
+
+func (e *jsonEncDriver) EncodeFloat64(f float64) {
+       // e.w.writestr(strconv.FormatFloat(f, 'E', -1, 64))
+       e.w.writeb(strconv.AppendFloat(e.b[:0], f, 'E', -1, 64))
+}
+
+func (e *jsonEncDriver) EncodeInt(v int64) {
+       if x := e.h.IntegerAsString; x == 'A' || x == 'L' && (v > 1<<53 || v < 
-(1<<53)) {
+               e.w.writen1('"')
+               e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+               e.w.writen1('"')
+               return
+       }
+       e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriver) EncodeUint(v uint64) {
+       if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 {
+               e.w.writen1('"')
+               e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+               e.w.writen1('"')
+               return
+       }
+       e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
+}
+
+func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en 
*Encoder) {
+       if v := ext.ConvertExt(rv); v == nil {
+               e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil()
+       } else {
+               en.encode(v)
+       }
+}
+
+func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
+       // only encodes re.Value (never re.Data)
+       if re.Value == nil {
+               e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil()
+       } else {
+               en.encode(re.Value)
+       }
+}
+
+func (e *jsonEncDriver) EncodeArrayStart(length int) {
+       if e.d {
+               e.dl++
+       }
+       e.w.writen1('[')
+       e.c = containerArrayStart
+}
+
+func (e *jsonEncDriver) EncodeMapStart(length int) {
+       if e.d {
+               e.dl++
+       }
+       e.w.writen1('{')
+       e.c = containerMapStart
+}
+
+func (e *jsonEncDriver) EncodeString(c charEncoding, v string) {
+       // e.w.writestr(strconv.Quote(v))
+       e.quoteStr(v)
+}
+
+func (e *jsonEncDriver) EncodeSymbol(v string) {
+       // e.EncodeString(c_UTF8, v)
+       e.quoteStr(v)
+}
+
+func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
+       // if encoding raw bytes and RawBytesExt is configured, use it to encode
+       if c == c_RAW && e.se.i != nil {
+               e.EncodeExt(v, 0, &e.se, e.e)
+               return
+       }
+       if c == c_RAW {
+               slen := base64.StdEncoding.EncodedLen(len(v))
+               if cap(e.bs) >= slen {
+                       e.bs = e.bs[:slen]
+               } else {
+                       e.bs = make([]byte, slen)
+               }
+               base64.StdEncoding.Encode(e.bs, v)
+               e.w.writen1('"')
+               e.w.writeb(e.bs)
+               e.w.writen1('"')
+       } else {
+               // e.EncodeString(c, string(v))
+               e.quoteStr(stringView(v))
+       }
+}
+
+func (e *jsonEncDriver) EncodeAsis(v []byte) {
+       e.w.writeb(v)
+}
+
+func (e *jsonEncDriver) quoteStr(s string) {
+       // adapted from std pkg encoding/json
+       const hex = "0123456789abcdef"
+       w := e.w
+       w.writen1('"')
+       start := 0
+       for i := 0; i < len(s); {
+               if b := s[i]; b < utf8.RuneSelf {
+                       if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b 
!= '>' && b != '&' {
+                               i++
+                               continue
+                       }
+                       if start < i {
+                               w.writestr(s[start:i])
+                       }
+                       switch b {
+                       case '\\', '"':
+                               w.writen2('\\', b)
+                       case '\n':
+                               w.writen2('\\', 'n')
+                       case '\r':
+                               w.writen2('\\', 'r')
+                       case '\b':
+                               w.writen2('\\', 'b')
+                       case '\f':
+                               w.writen2('\\', 'f')
+                       case '\t':
+                               w.writen2('\\', 't')
+                       default:
+                               // encode all bytes < 0x20 (except \r, \n).
+                               // also encode < > & to prevent security holes 
when served to some browsers.
+                               w.writestr(`\u00`)
+                               w.writen2(hex[b>>4], hex[b&0xF])
+                       }
+                       i++
+                       start = i
+                       continue
+               }
+               c, size := utf8.DecodeRuneInString(s[i:])
+               if c == utf8.RuneError && size == 1 {
+                       if start < i {
+                               w.writestr(s[start:i])
+                       }
+                       w.writestr(`\ufffd`)
+                       i += size
+                       start = i
+                       continue
+               }
+               // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
+               // Both technically valid JSON, but bomb on JSONP, so fix here.
+               if c == '\u2028' || c == '\u2029' {
+                       if start < i {
+                               w.writestr(s[start:i])
+                       }
+                       w.writestr(`\u202`)
+                       w.writen1(hex[c&0xF])
+                       i += size
+                       start = i
+                       continue
+               }
+               i += size
+       }
+       if start < len(s) {
+               w.writestr(s[start:])
+       }
+       w.writen1('"')
+}
+
+//--------------------------------
+
+type jsonNum struct {
+       // bytes            []byte // may have [+-.eE0-9]
+       mantissa         uint64 // where mantissa ends, and maybe dot begins.
+       exponent         int16  // exponent value.
+       manOverflow      bool
+       neg              bool // started with -. No initial sign in the bytes 
above.
+       dot              bool // has dot
+       explicitExponent bool // explicit exponent
+}
+
+func (x *jsonNum) reset() {
+       x.manOverflow = false
+       x.neg = false
+       x.dot = false
+       x.explicitExponent = false
+       x.mantissa = 0
+       x.exponent = 0
+}
+
+// uintExp is called only if exponent > 0.
+func (x *jsonNum) uintExp() (n uint64, overflow bool) {
+       n = x.mantissa
+       e := x.exponent
+       if e >= int16(len(jsonUint64Pow10)) {
+               overflow = true
+               return
+       }
+       n *= jsonUint64Pow10[e]
+       if n < x.mantissa || n > jsonNumUintMaxVal {
+               overflow = true
+               return
+       }
+       return
+       // for i := int16(0); i < e; i++ {
+       //      if n >= jsonNumUintCutoff {
+       //              overflow = true
+       //              return
+       //      }
+       //      n *= 10
+       // }
+       // return
+}
+
+// these constants are only used withn floatVal.
+// They are brought out, so that floatVal can be inlined.
+const (
+       jsonUint64MantissaBits = 52
+       jsonMaxExponent        = int16(len(jsonFloat64Pow10)) - 1
+)
+
+func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) {
+       // We do not want to lose precision.
+       // Consequently, we will delegate to strconv.ParseFloat if any of the 
following happen:
+       //    - There are more digits than in math.MaxUint64: 
18446744073709551615 (20 digits)
+       //      We expect up to 99.... (19 digits)
+       //    - The mantissa cannot fit into a 52 bits of uint64
+       //    - The exponent is beyond our scope ie beyong 22.
+       parseUsingStrConv = x.manOverflow ||
+               x.exponent > jsonMaxExponent ||
+               (x.exponent < 0 && -(x.exponent) > jsonMaxExponent) ||
+               x.mantissa>>jsonUint64MantissaBits != 0
+
+       if parseUsingStrConv {
+               return
+       }
+
+       // all good. so handle parse here.
+       f = float64(x.mantissa)
+       // fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f)
+       if x.neg {
+               f = -f
+       }
+       if x.exponent > 0 {
+               f *= jsonFloat64Pow10[x.exponent]
+       } else if x.exponent < 0 {
+               f /= jsonFloat64Pow10[-x.exponent]
+       }
+       return
+}
+
+type jsonDecDriver struct {
+       noBuiltInTypes
+       d *Decoder
+       h *JsonHandle
+       r decReader
+
+       c containerState
+       // tok is used to store the token read right after skipWhiteSpace.
+       tok uint8
+
+       bstr [8]byte  // scratch used for string \UXXX parsing
+       b    [64]byte // scratch, used for parsing strings or numbers
+       b2   [64]byte // scratch, used only for decodeBytes (after base64)
+       bs   []byte   // scratch. Initialized from b. Used for parsing strings 
or numbers.
+
+       se setExtWrapper
+
+       n jsonNum
+}
+
+func jsonIsWS(b byte) bool {
+       return b == ' ' || b == '\t' || b == '\r' || b == '\n'
+}
+
+// // This will skip whitespace characters and return the next byte to read.
+// // The next byte determines what the value will be one of.
+// func (d *jsonDecDriver) skipWhitespace() {
+//     // fast-path: do not enter loop. Just check first (in case no 
whitespace).
+//     b := d.r.readn1()
+//     if jsonIsWS(b) {
+//             r := d.r
+//             for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+//             }
+//     }
+//     d.tok = b
+// }
+
+func (d *jsonDecDriver) uncacheRead() {
+       if d.tok != 0 {
+               d.r.unreadn1()
+               d.tok = 0
+       }
+}
+
+func (d *jsonDecDriver) sendContainerState(c containerState) {
+       if d.tok == 0 {
+               var b byte
+               r := d.r
+               for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+               }
+               d.tok = b
+       }
+       var xc uint8 // char expected
+       if c == containerMapKey {
+               if d.c != containerMapStart {
+                       xc = ','
+               }
+       } else if c == containerMapValue {
+               xc = ':'
+       } else if c == containerMapEnd {
+               xc = '}'
+       } else if c == containerArrayElem {
+               if d.c != containerArrayStart {
+                       xc = ','
+               }
+       } else if c == containerArrayEnd {
+               xc = ']'
+       }
+       if xc != 0 {
+               if d.tok != xc {
+                       d.d.errorf("json: expect char '%c' but got char '%c'", 
xc, d.tok)
+               }
+               d.tok = 0
+       }
+       d.c = c
+}
+
+func (d *jsonDecDriver) CheckBreak() bool {
+       if d.tok == 0 {
+               var b byte
+               r := d.r
+               for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+               }
+               d.tok = b
+       }
+       if d.tok == '}' || d.tok == ']' {
+               // d.tok = 0 // only checking, not consuming
+               return true
+       }
+       return false
+}
+
+func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) {
+       bs := d.r.readx(int(toIdx - fromIdx))
+       d.tok = 0
+       if jsonValidateSymbols {
+               if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) {
+                       d.d.errorf("json: expecting %s: got %s", 
jsonLiterals[fromIdx:toIdx], bs)
+                       return
+               }
+       }
+}
+
+func (d *jsonDecDriver) TryDecodeAsNil() bool {
+       if d.tok == 0 {
+               var b byte
+               r := d.r
+               for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+               }
+               d.tok = b
+       }
+       if d.tok == 'n' {
+               d.readStrIdx(10, 13) // ull
+               return true
+       }
+       return false
+}
+
+func (d *jsonDecDriver) DecodeBool() bool {
+       if d.tok == 0 {
+               var b byte
+               r := d.r
+               for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+               }
+               d.tok = b
+       }
+       if d.tok == 'f' {
+               d.readStrIdx(5, 9) // alse
+               return false
+       }
+       if d.tok == 't' {
+               d.readStrIdx(1, 4) // rue
+               return true
+       }
+       d.d.errorf("json: decode bool: got first char %c", d.tok)
+       return false // "unreachable"
+}
+
+func (d *jsonDecDriver) ReadMapStart() int {
+       if d.tok == 0 {
+               var b byte
+               r := d.r
+               for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+               }
+               d.tok = b
+       }
+       if d.tok != '{' {
+               d.d.errorf("json: expect char '%c' but got char '%c'", '{', 
d.tok)
+       }
+       d.tok = 0
+       d.c = containerMapStart
+       return -1
+}
+
+func (d *jsonDecDriver) ReadArrayStart() int {
+       if d.tok == 0 {
+               var b byte
+               r := d.r
+               for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+               }
+               d.tok = b
+       }
+       if d.tok != '[' {
+               d.d.errorf("json: expect char '%c' but got char '%c'", '[', 
d.tok)
+       }
+       d.tok = 0
+       d.c = containerArrayStart
+       return -1
+}
+
+func (d *jsonDecDriver) ContainerType() (vt valueType) {
+       // check container type by checking the first char
+       if d.tok == 0 {
+               var b byte
+               r := d.r
+               for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+               }
+               d.tok = b
+       }
+       if b := d.tok; b == '{' {
+               return valueTypeMap
+       } else if b == '[' {
+               return valueTypeArray
+       } else if b == 'n' {
+               return valueTypeNil
+       } else if b == '"' {
+               return valueTypeString
+       }
+       return valueTypeUnset
+       // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
+       // return false // "unreachable"
+}
+
+func (d *jsonDecDriver) decNum(storeBytes bool) {
+       // If it is has a . or an e|E, decode as a float; else decode as an int.
+       if d.tok == 0 {
+               var b byte
+               r := d.r
+               for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+               }
+               d.tok = b
+       }
+       b := d.tok
+       var str bool
+       if b == '"' {
+               str = true
+               b = d.r.readn1()
+       }
+       if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) {
+               d.d.errorf("json: decNum: got first char '%c'", b)
+               return
+       }
+       d.tok = 0
+
+       const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
+       const jsonNumUintMaxVal = 1<<uint64(64) - 1
+
+       n := &d.n
+       r := d.r
+       n.reset()
+       d.bs = d.bs[:0]
+
+       if str && storeBytes {
+               d.bs = append(d.bs, '"')
+       }
+
+       // The format of a number is as below:
+       // parsing:     sign? digit* dot? digit* e?  sign? digit*
+       // states:  0   1*    2      3*   4      5*  6     7
+       // We honor this state so we can break correctly.
+       var state uint8 = 0
+       var eNeg bool
+       var e int16
+       var eof bool
+LOOP:
+       for !eof {
+               // fmt.Printf("LOOP: b: %q\n", b)
+               switch b {
+               case '+':
+                       switch state {
+                       case 0:
+                               state = 2
+                               // do not add sign to the slice ...
+                               b, eof = r.readn1eof()
+                               continue
+                       case 6: // typ = jsonNumFloat
+                               state = 7
+                       default:
+                               break LOOP
+                       }
+               case '-':
+                       switch state {
+                       case 0:
+                               state = 2
+                               n.neg = true
+                               // do not add sign to the slice ...
+                               b, eof = r.readn1eof()
+                               continue
+                       case 6: // typ = jsonNumFloat
+                               eNeg = true
+                               state = 7
+                       default:
+                               break LOOP
+                       }
+               case '.':
+                       switch state {
+                       case 0, 2: // typ = jsonNumFloat
+                               state = 4
+                               n.dot = true
+                       default:
+                               break LOOP
+                       }
+               case 'e', 'E':
+                       switch state {
+                       case 0, 2, 4: // typ = jsonNumFloat
+                               state = 6
+                               // n.mantissaEndIndex = int16(len(n.bytes))
+                               n.explicitExponent = true
+                       default:
+                               break LOOP
+                       }
+               case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+                       switch state {
+                       case 0:
+                               state = 2
+                               fallthrough
+                       case 2:
+                               fallthrough
+                       case 4:
+                               if n.dot {
+                                       n.exponent--
+                               }
+                               if n.mantissa >= jsonNumUintCutoff {
+                                       n.manOverflow = true
+                                       break
+                               }
+                               v := uint64(b - '0')
+                               n.mantissa *= 10
+                               if v != 0 {
+                                       n1 := n.mantissa + v
+                                       if n1 < n.mantissa || n1 > 
jsonNumUintMaxVal {
+                                               n.manOverflow = true // n+v 
overflows
+                                               break
+                                       }
+                                       n.mantissa = n1
+                               }
+                       case 6:
+                               state = 7
+                               fallthrough
+                       case 7:
+                               if !(b == '0' && e == 0) {
+                                       e = e*10 + int16(b-'0')
+                               }
+                       default:
+                               break LOOP
+                       }
+               case '"':
+                       if str {
+                               if storeBytes {
+                                       d.bs = append(d.bs, '"')
+                               }
+                               b, eof = r.readn1eof()
+                       }
+                       break LOOP
+               default:
+                       break LOOP
+               }
+               if storeBytes {
+                       d.bs = append(d.bs, b)
+               }
+               b, eof = r.readn1eof()
+       }
+
+       if jsonTruncateMantissa && n.mantissa != 0 {
+               for n.mantissa%10 == 0 {
+                       n.mantissa /= 10
+                       n.exponent++
+               }
+       }
+
+       if e != 0 {
+               if eNeg {
+                       n.exponent -= e
+               } else {
+                       n.exponent += e
+               }
+       }
+
+       // d.n = n
+
+       if !eof {
+               if jsonUnreadAfterDecNum {
+                       r.unreadn1()
+               } else {
+                       if !jsonIsWS(b) {
+                               d.tok = b
+                       }
+               }
+       }
+       // fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, 
mantissaEndIndex: %v\n",
+       //      n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex)
+       return
+}
+
+func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) {
+       d.decNum(false)
+       n := &d.n
+       if n.manOverflow {
+               d.d.errorf("json: overflow integer after: %v", n.mantissa)
+               return
+       }
+       var u uint64
+       if n.exponent == 0 {
+               u = n.mantissa
+       } else if n.exponent < 0 {
+               d.d.errorf("json: fractional integer")
+               return
+       } else if n.exponent > 0 {
+               var overflow bool
+               if u, overflow = n.uintExp(); overflow {
+                       d.d.errorf("json: overflow integer")
+                       return
+               }
+       }
+       i = int64(u)
+       if n.neg {
+               i = -i
+       }
+       if chkOvf.Int(i, bitsize) {
+               d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs)
+               return
+       }
+       // fmt.Printf("DecodeInt: %v\n", i)
+       return
+}
+
+// floatVal MUST only be called after a decNum, as d.bs now contains the bytes 
of the number
+func (d *jsonDecDriver) floatVal() (f float64) {
+       f, useStrConv := d.n.floatVal()
+       if useStrConv {
+               var err error
+               if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != 
nil {
+                       panic(fmt.Errorf("parse float: %s, %v", d.bs, err))
+               }
+               if d.n.neg {
+                       f = -f
+               }
+       }
+       return
+}
+
+func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) {
+       d.decNum(false)
+       n := &d.n
+       if n.neg {
+               d.d.errorf("json: unsigned integer cannot be negative")
+               return
+       }
+       if n.manOverflow {
+               d.d.errorf("json: overflow integer after: %v", n.mantissa)
+               return
+       }
+       if n.exponent == 0 {
+               u = n.mantissa
+       } else if n.exponent < 0 {
+               d.d.errorf("json: fractional integer")
+               return
+       } else if n.exponent > 0 {
+               var overflow bool
+               if u, overflow = n.uintExp(); overflow {
+                       d.d.errorf("json: overflow integer")
+                       return
+               }
+       }
+       if chkOvf.Uint(u, bitsize) {
+               d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs)
+               return
+       }
+       // fmt.Printf("DecodeUint: %v\n", u)
+       return
+}
+
+func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
+       d.decNum(true)
+       f = d.floatVal()
+       if chkOverflow32 && chkOvf.Float32(f) {
+               d.d.errorf("json: overflow float32: %v, %s", f, d.bs)
+               return
+       }
+       return
+}
+
+func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) 
(realxtag uint64) {
+       if ext == nil {
+               re := rv.(*RawExt)
+               re.Tag = xtag
+               d.d.decode(&re.Value)
+       } else {
+               var v interface{}
+               d.d.decode(&v)
+               ext.UpdateExt(rv, v)
+       }
+       return
+}
+
+func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut 
[]byte) {
+       // if decoding into raw bytes, and the RawBytesExt is configured, use 
it to decode.
+       if !isstring && d.se.i != nil {
+               bsOut = bs
+               d.DecodeExt(&bsOut, 0, &d.se)
+               return
+       }
+       d.appendStringAsBytes()
+       // if isstring, then just return the bytes, even if it is using the 
scratch buffer.
+       // the bytes will be converted to a string as needed.
+       if isstring {
+               return d.bs
+       }
+       bs0 := d.bs
+       slen := base64.StdEncoding.DecodedLen(len(bs0))
+       if slen <= cap(bs) {
+               bsOut = bs[:slen]
+       } else if zerocopy && slen <= cap(d.b2) {
+               bsOut = d.b2[:slen]
+       } else {
+               bsOut = make([]byte, slen)
+       }
+       slen2, err := base64.StdEncoding.Decode(bsOut, bs0)
+       if err != nil {
+               d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, 
err)
+               return nil
+       }
+       if slen != slen2 {
+               bsOut = bsOut[:slen2]
+       }
+       return
+}
+
+func (d *jsonDecDriver) DecodeString() (s string) {
+       d.appendStringAsBytes()
+       // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key
+       if d.c == containerMapKey {
+               return d.d.string(d.bs)
+       }
+       return string(d.bs)
+}
+
+func (d *jsonDecDriver) appendStringAsBytes() {
+       if d.tok == 0 {
+               var b byte
+               r := d.r
+               for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+               }
+               d.tok = b
+       }
+       if d.tok != '"' {
+               d.d.errorf("json: expect char '%c' but got char '%c'", '"', 
d.tok)
+       }
+       d.tok = 0
+
+       v := d.bs[:0]
+       var c uint8
+       r := d.r
+       for {
+               c = r.readn1()
+               if c == '"' {
+                       break
+               } else if c == '\\' {
+                       c = r.readn1()
+                       switch c {
+                       case '"', '\\', '/', '\'':
+                               v = append(v, c)
+                       case 'b':
+                               v = append(v, '\b')
+                       case 'f':
+                               v = append(v, '\f')
+                       case 'n':
+                               v = append(v, '\n')
+                       case 'r':
+                               v = append(v, '\r')
+                       case 't':
+                               v = append(v, '\t')
+                       case 'u':
+                               rr := d.jsonU4(false)
+                               // fmt.Printf("$$$$$$$$$: is surrogate: %v\n", 
utf16.IsSurrogate(rr))
+                               if utf16.IsSurrogate(rr) {
+                                       rr = utf16.DecodeRune(rr, 
d.jsonU4(true))
+                               }
+                               w2 := utf8.EncodeRune(d.bstr[:], rr)
+                               v = append(v, d.bstr[:w2]...)
+                       default:
+                               d.d.errorf("json: unsupported escaped value: 
%c", c)
+                       }
+               } else {
+                       v = append(v, c)
+               }
+       }
+       d.bs = v
+}
+
+func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune {
+       r := d.r
+       if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') {
+               d.d.errorf(`json: unquoteStr: invalid unicode sequence. 
Expecting \u`)
+               return 0
+       }
+       // u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64)
+       var u uint32
+       for i := 0; i < 4; i++ {
+               v := r.readn1()
+               if '0' <= v && v <= '9' {
+                       v = v - '0'
+               } else if 'a' <= v && v <= 'z' {
+                       v = v - 'a' + 10
+               } else if 'A' <= v && v <= 'Z' {
+                       v = v - 'A' + 10
+               } else {
+                       d.d.errorf(`json: unquoteStr: invalid hex char in \u 
unicode sequence: %q`, v)
+                       return 0
+               }
+               u = u*16 + uint32(v)
+       }
+       return rune(u)
+}
+
+func (d *jsonDecDriver) DecodeNaked() {
+       z := &d.d.n
+       // var decodeFurther bool
+
+       if d.tok == 0 {
+               var b byte
+               r := d.r
+               for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
+               }
+               d.tok = b
+       }
+       switch d.tok {
+       case 'n':
+               d.readStrIdx(10, 13) // ull
+               z.v = valueTypeNil
+       case 'f':
+               d.readStrIdx(5, 9) // alse
+               z.v = valueTypeBool
+               z.b = false
+       case 't':
+               d.readStrIdx(1, 4) // rue
+               z.v = valueTypeBool
+               z.b = true
+       case '{':
+               z.v = valueTypeMap
+               // d.tok = 0 // don't consume. kInterfaceNaked will call 
ReadMapStart
+               // decodeFurther = true
+       case '[':
+               z.v = valueTypeArray
+               // d.tok = 0 // don't consume. kInterfaceNaked will call 
ReadArrayStart
+               // decodeFurther = true
+       case '"':
+               z.v = valueTypeString
+               z.s = d.DecodeString()
+       default: // number
+               d.decNum(true)
+               n := &d.n
+               // if the string had a any of [.eE], then decode as float.
+               switch {
+               case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow:
+                       z.v = valueTypeFloat
+                       z.f = d.floatVal()
+               case n.exponent == 0:
+                       u := n.mantissa
+                       switch {
+                       case n.neg:
+                               z.v = valueTypeInt
+                               z.i = -int64(u)
+                       case d.h.SignedInteger:
+                               z.v = valueTypeInt
+                               z.i = int64(u)
+                       default:
+                               z.v = valueTypeUint
+                               z.u = u
+                       }
+               default:
+                       u, overflow := n.uintExp()
+                       switch {
+                       case overflow:
+                               z.v = valueTypeFloat
+                               z.f = d.floatVal()
+                       case n.neg:
+                               z.v = valueTypeInt
+                               z.i = -int64(u)
+                       case d.h.SignedInteger:
+                               z.v = valueTypeInt
+                               z.i = int64(u)
+                       default:
+                               z.v = valueTypeUint
+                               z.u = u
+                       }
+               }
+               // fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v)
+       }
+       // if decodeFurther {
+       //      d.s.sc.retryRead()
+       // }
+       return
+}
+
+//----------------------
+
+// JsonHandle is a handle for JSON encoding format.
+//
+// Json is comprehensively supported:
+//    - decodes numbers into interface{} as int, uint or float64
+//    - configurable way to encode/decode []byte .
+//      by default, encodes and decodes []byte using base64 Std Encoding
+//    - UTF-8 support for encoding and decoding
+//
+// It has better performance than the json library in the standard library,
+// by leveraging the performance improvements of the codec library and
+// minimizing allocations.
+//
+// In addition, it doesn't read more bytes than necessary during a decode, 
which allows
+// reading multiple values from a stream containing json and non-json content.
+// For example, a user can read a json value, then a cbor value, then a 
msgpack value,
+// all from the same stream in sequence.
+type JsonHandle struct {
+       textEncodingType
+       BasicHandle
+       // RawBytesExt, if configured, is used to encode and decode raw bytes 
in a custom way.
+       // If not configured, raw bytes are encoded to/from base64 text.
+       RawBytesExt InterfaceExt
+
+       // Indent indicates how a value is encoded.
+       //   - If positive, indent by that number of spaces.
+       //   - If negative, indent by that number of tabs.
+       Indent int8
+
+       // IntegerAsString controls how integers (signed and unsigned) are 
encoded.
+       //
+       // Per the JSON Spec, JSON numbers are 64-bit floating point numbers.
+       // Consequently, integers > 2^53 cannot be represented as a JSON number 
without losing precision.
+       // This can be mitigated by configuring how to encode integers.
+       //
+       // IntegerAsString interpretes the following values:
+       //   - if 'L', then encode integers > 2^53 as a json string.
+       //   - if 'A', then encode all integers as a json string
+       //             containing the exact integer representation as a decimal.
+       //   - else    encode all integers as a json number (default)
+       IntegerAsString uint8
+}
+
+func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext 
InterfaceExt) (err error) {
+       return h.SetExt(rt, tag, &setExtWrapper{i: ext})
+}
+
+func (h *JsonHandle) newEncDriver(e *Encoder) encDriver {
+       hd := jsonEncDriver{e: e, h: h}
+       hd.bs = hd.b[:0]
+
+       hd.reset()
+
+       return &hd
+}
+
+func (h *JsonHandle) newDecDriver(d *Decoder) decDriver {
+       // d := jsonDecDriver{r: r.(*bytesDecReader), h: h}
+       hd := jsonDecDriver{d: d, h: h}
+       hd.bs = hd.b[:0]
+       hd.reset()
+       return &hd
+}
+
+func (e *jsonEncDriver) reset() {
+       e.w = e.e.w
+       e.se.i = e.h.RawBytesExt
+       if e.bs != nil {
+               e.bs = e.bs[:0]
+       }
+       e.d, e.dt, e.dl, e.ds = false, false, 0, ""
+       e.c = 0
+       if e.h.Indent > 0 {
+               e.d = true
+               e.ds = jsonSpaces[:e.h.Indent]
+       } else if e.h.Indent < 0 {
+               e.d = true
+               e.dt = true
+               e.ds = jsonTabs[:-(e.h.Indent)]
+       }
+}
+
+func (d *jsonDecDriver) reset() {
+       d.r = d.d.r
+       d.se.i = d.h.RawBytesExt
+       if d.bs != nil {
+               d.bs = d.bs[:0]
+       }
+       d.c, d.tok = 0, 0
+       d.n.reset()
+}
+
+var jsonEncodeTerminate = []byte{' '}
+
+func (h *JsonHandle) rpcEncodeTerminate() []byte {
+       return jsonEncodeTerminate
+}
+
+var _ decDriver = (*jsonDecDriver)(nil)
+var _ encDriver = (*jsonEncDriver)(nil)

Reply via email to