helper_unsafe.go 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350
  1. // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
  2. // Use of this source code is governed by a MIT license found in the LICENSE file.
  3. //go:build !safe && !codec.safe && !appengine && go1.9
  4. // +build !safe,!codec.safe,!appengine,go1.9
  5. // minimum of go 1.9 is needed, as that is the minimum for all features and linked functions we need
  6. // - typedmemclr was introduced in go 1.8
  7. // - mapassign_fastXXX was introduced in go 1.9
  8. // etc
  9. package codec
  10. import (
  11. "reflect"
  12. _ "runtime" // needed for go linkname(s)
  13. "sync/atomic"
  14. "time"
  15. "unsafe"
  16. )
  17. // This file has unsafe variants of some helper functions.
  18. // MARKER: See helper_unsafe.go for the usage documentation.
  19. // There are a number of helper_*unsafe*.go files.
  20. //
  21. // - helper_unsafe
  22. // unsafe variants of dependent functions
  23. // - helper_unsafe_compiler_gc (gc)
  24. // unsafe variants of dependent functions which cannot be shared with gollvm or gccgo
  25. // - helper_not_unsafe_not_gc (gccgo/gollvm or safe)
  26. // safe variants of functions in helper_unsafe_compiler_gc
  27. // - helper_not_unsafe (safe)
  28. // safe variants of functions in helper_unsafe
  29. // - helper_unsafe_compiler_not_gc (gccgo, gollvm)
  30. // unsafe variants of functions/variables which non-standard compilers need
  31. //
  32. // This way, we can judiciously use build tags to include the right set of files
  33. // for any compiler, and make it run optimally in unsafe mode.
  34. //
  35. // As of March 2021, we cannot differentiate whether running with gccgo or gollvm
  36. // using a build constraint, as both satisfy 'gccgo' build tag.
  37. // Consequently, we must use the lowest common denominator to support both.
  38. // For reflect.Value code, we decided to do the following:
  39. // - if we know the kind, we can elide conditional checks for
  40. // - SetXXX (Int, Uint, String, Bool, etc)
  41. // - SetLen
  42. //
  43. // We can also optimize
  44. // - IsNil
  45. // MARKER: Some functions here will not be hit during code coverage runs due to optimizations, e.g.
  46. // - rvCopySlice: called by decode if rvGrowSlice did not set new slice into pointer to orig slice.
  47. // however, helper_unsafe sets it, so no need to call rvCopySlice later
  48. // - rvSlice: same as above
  49. const safeMode = false
  50. // helperUnsafeDirectAssignMapEntry says that we should not copy the pointer in the map
  51. // to another value during mapRange/iteration and mapGet calls, but directly assign it.
  52. //
  53. // The only callers of mapRange/iteration is encode.
  54. // Here, we just walk through the values and encode them
  55. //
  56. // The only caller of mapGet is decode.
  57. // Here, it does a Get if the underlying value is a pointer, and decodes into that.
  58. //
  59. // For both users, we are very careful NOT to modify or keep the pointers around.
  60. // Consequently, it is ok for take advantage of the performance that the map is not modified
  61. // during an iteration and we can just "peek" at the internal value" in the map and use it.
  62. const helperUnsafeDirectAssignMapEntry = true
  63. // MARKER: keep in sync with GO_ROOT/src/reflect/value.go
  64. const (
  65. unsafeFlagStickyRO = 1 << 5
  66. unsafeFlagEmbedRO = 1 << 6
  67. unsafeFlagIndir = 1 << 7
  68. unsafeFlagAddr = 1 << 8
  69. unsafeFlagRO = unsafeFlagStickyRO | unsafeFlagEmbedRO
  70. // unsafeFlagKindMask = (1 << 5) - 1 // 5 bits for 27 kinds (up to 31)
  71. // unsafeTypeKindDirectIface = 1 << 5
  72. )
  73. // transientSizeMax below is used in TransientAddr as the backing storage.
  74. //
  75. // Must be >= 16 as the maximum size is a complex128 (or string on 64-bit machines).
  76. const transientSizeMax = 64
  77. // should struct/array support internal strings and slices?
  78. const transientValueHasStringSlice = false
  79. type unsafeString struct {
  80. Data unsafe.Pointer
  81. Len int
  82. }
  83. type unsafeSlice struct {
  84. Data unsafe.Pointer
  85. Len int
  86. Cap int
  87. }
  88. type unsafeIntf struct {
  89. typ unsafe.Pointer
  90. ptr unsafe.Pointer
  91. }
  92. type unsafeReflectValue struct {
  93. unsafeIntf
  94. flag uintptr
  95. }
  96. // keep in sync with stdlib runtime/type.go
  97. type unsafeRuntimeType struct {
  98. size uintptr
  99. // ... many other fields here
  100. }
  101. // unsafeZeroAddr and unsafeZeroSlice points to a read-only block of memory
  102. // used for setting a zero value for most types or creating a read-only
  103. // zero value for a given type.
  104. var (
  105. unsafeZeroAddr = unsafe.Pointer(&unsafeZeroArr[0])
  106. unsafeZeroSlice = unsafeSlice{unsafeZeroAddr, 0, 0}
  107. )
  108. // We use a scratch memory and an unsafeSlice for transient values:
  109. //
  110. // unsafeSlice is used for standalone strings and slices (outside an array or struct).
  111. // scratch memory is used for other kinds, based on contract below:
  112. // - numbers, bool are always transient
  113. // - structs and arrays are transient iff they have no pointers i.e.
  114. // no string, slice, chan, func, interface, map, etc only numbers and bools.
  115. // - slices and strings are transient (using the unsafeSlice)
  116. type unsafePerTypeElem struct {
  117. arr [transientSizeMax]byte // for bool, number, struct, array kinds
  118. slice unsafeSlice // for string and slice kinds
  119. }
  120. func (x *unsafePerTypeElem) addrFor(k reflect.Kind) unsafe.Pointer {
  121. if k == reflect.String || k == reflect.Slice {
  122. x.slice = unsafeSlice{} // memclr
  123. return unsafe.Pointer(&x.slice)
  124. }
  125. x.arr = [transientSizeMax]byte{} // memclr
  126. return unsafe.Pointer(&x.arr)
  127. }
  128. type perType struct {
  129. elems [2]unsafePerTypeElem
  130. }
  131. type decPerType struct {
  132. perType
  133. }
  134. type encPerType struct{}
  135. // TransientAddrK is used for getting a *transient* value to be decoded into,
  136. // which will right away be used for something else.
  137. //
  138. // See notes in helper.go about "Transient values during decoding"
  139. func (x *perType) TransientAddrK(t reflect.Type, k reflect.Kind) reflect.Value {
  140. return rvZeroAddrTransientAnyK(t, k, x.elems[0].addrFor(k))
  141. }
  142. func (x *perType) TransientAddr2K(t reflect.Type, k reflect.Kind) reflect.Value {
  143. return rvZeroAddrTransientAnyK(t, k, x.elems[1].addrFor(k))
  144. }
  145. func (encPerType) AddressableRO(v reflect.Value) reflect.Value {
  146. return rvAddressableReadonly(v)
  147. }
  148. // byteAt returns the byte given an index which is guaranteed
  149. // to be within the bounds of the slice i.e. we defensively
  150. // already verified that the index is less than the length of the slice.
  151. func byteAt(b []byte, index uint) byte {
  152. // return b[index]
  153. return *(*byte)(unsafe.Pointer(uintptr((*unsafeSlice)(unsafe.Pointer(&b)).Data) + uintptr(index)))
  154. }
  155. func byteSliceOf(b []byte, start, end uint) []byte {
  156. s := (*unsafeSlice)(unsafe.Pointer(&b))
  157. s.Data = unsafe.Pointer(uintptr(s.Data) + uintptr(start))
  158. s.Len = int(end - start)
  159. s.Cap -= int(start)
  160. return b
  161. }
  162. // func byteSliceWithLen(b []byte, length uint) []byte {
  163. // (*unsafeSlice)(unsafe.Pointer(&b)).Len = int(length)
  164. // return b
  165. // }
  166. func setByteAt(b []byte, index uint, val byte) {
  167. // b[index] = val
  168. *(*byte)(unsafe.Pointer(uintptr((*unsafeSlice)(unsafe.Pointer(&b)).Data) + uintptr(index))) = val
  169. }
  170. // stringView returns a view of the []byte as a string.
  171. // In unsafe mode, it doesn't incur allocation and copying caused by conversion.
  172. // In regular safe mode, it is an allocation and copy.
  173. func stringView(v []byte) string {
  174. return *(*string)(unsafe.Pointer(&v))
  175. }
  176. // bytesView returns a view of the string as a []byte.
  177. // In unsafe mode, it doesn't incur allocation and copying caused by conversion.
  178. // In regular safe mode, it is an allocation and copy.
  179. func bytesView(v string) (b []byte) {
  180. sx := (*unsafeString)(unsafe.Pointer(&v))
  181. bx := (*unsafeSlice)(unsafe.Pointer(&b))
  182. bx.Data, bx.Len, bx.Cap = sx.Data, sx.Len, sx.Len
  183. return
  184. }
  185. func byteSliceSameData(v1 []byte, v2 []byte) bool {
  186. return (*unsafeSlice)(unsafe.Pointer(&v1)).Data == (*unsafeSlice)(unsafe.Pointer(&v2)).Data
  187. }
  188. // MARKER: okBytesN functions will copy N bytes into the top slots of the return array.
  189. // These functions expect that the bound check already occured and are are valid.
  190. // copy(...) does a number of checks which are unnecessary in this situation when in bounds.
  191. func okBytes2(b []byte) [2]byte {
  192. return *((*[2]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
  193. }
  194. func okBytes3(b []byte) [3]byte {
  195. return *((*[3]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
  196. }
  197. func okBytes4(b []byte) [4]byte {
  198. return *((*[4]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
  199. }
  200. func okBytes8(b []byte) [8]byte {
  201. return *((*[8]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
  202. }
  203. // isNil says whether the value v is nil.
  204. // This applies to references like map/ptr/unsafepointer/chan/func,
  205. // and non-reference values like interface/slice.
  206. func isNil(v interface{}) (rv reflect.Value, isnil bool) {
  207. var ui = (*unsafeIntf)(unsafe.Pointer(&v))
  208. isnil = ui.ptr == nil
  209. if !isnil {
  210. rv, isnil = unsafeIsNilIntfOrSlice(ui, v)
  211. }
  212. return
  213. }
  214. func unsafeIsNilIntfOrSlice(ui *unsafeIntf, v interface{}) (rv reflect.Value, isnil bool) {
  215. rv = reflect.ValueOf(v) // reflect.ValueOf is currently not inline'able - so call it directly
  216. tk := rv.Kind()
  217. isnil = (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.ptr) == nil
  218. return
  219. }
  220. // return the pointer for a reference (map/chan/func/pointer/unsafe.Pointer).
  221. // true references (map, func, chan, ptr - NOT slice) may be double-referenced? as flagIndir
  222. //
  223. // Assumes that v is a reference (map/func/chan/ptr/func)
  224. func rvRefPtr(v *unsafeReflectValue) unsafe.Pointer {
  225. if v.flag&unsafeFlagIndir != 0 {
  226. return *(*unsafe.Pointer)(v.ptr)
  227. }
  228. return v.ptr
  229. }
  230. func eq4i(i0, i1 interface{}) bool {
  231. v0 := (*unsafeIntf)(unsafe.Pointer(&i0))
  232. v1 := (*unsafeIntf)(unsafe.Pointer(&i1))
  233. return v0.typ == v1.typ && v0.ptr == v1.ptr
  234. }
  235. func rv4iptr(i interface{}) (v reflect.Value) {
  236. // Main advantage here is that it is inlined, nothing escapes to heap, i is never nil
  237. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  238. uv.unsafeIntf = *(*unsafeIntf)(unsafe.Pointer(&i))
  239. uv.flag = uintptr(rkindPtr)
  240. return
  241. }
  242. func rv4istr(i interface{}) (v reflect.Value) {
  243. // Main advantage here is that it is inlined, nothing escapes to heap, i is never nil
  244. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  245. uv.unsafeIntf = *(*unsafeIntf)(unsafe.Pointer(&i))
  246. uv.flag = uintptr(rkindString) | unsafeFlagIndir
  247. return
  248. }
  249. func rv2i(rv reflect.Value) (i interface{}) {
  250. // We tap into implememtation details from
  251. // the source go stdlib reflect/value.go, and trims the implementation.
  252. //
  253. // e.g.
  254. // - a map/ptr is a reference, thus flagIndir is not set on it
  255. // - an int/slice is not a reference, thus flagIndir is set on it
  256. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  257. if refBitset.isset(byte(rv.Kind())) && urv.flag&unsafeFlagIndir != 0 {
  258. urv.ptr = *(*unsafe.Pointer)(urv.ptr)
  259. }
  260. return *(*interface{})(unsafe.Pointer(&urv.unsafeIntf))
  261. }
  262. func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value {
  263. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  264. urv.flag = (urv.flag & unsafeFlagRO) | uintptr(reflect.Ptr)
  265. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&ptrType))).ptr
  266. return rv
  267. }
  268. func rvIsNil(rv reflect.Value) bool {
  269. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  270. if urv.flag&unsafeFlagIndir != 0 {
  271. return *(*unsafe.Pointer)(urv.ptr) == nil
  272. }
  273. return urv.ptr == nil
  274. }
  275. func rvSetSliceLen(rv reflect.Value, length int) {
  276. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  277. (*unsafeString)(urv.ptr).Len = length
  278. }
  279. func rvZeroAddrK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
  280. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  281. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  282. urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
  283. urv.ptr = unsafeNew(urv.typ)
  284. return
  285. }
  286. func rvZeroAddrTransientAnyK(t reflect.Type, k reflect.Kind, addr unsafe.Pointer) (rv reflect.Value) {
  287. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  288. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  289. urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
  290. urv.ptr = addr
  291. return
  292. }
  293. func rvZeroK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
  294. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  295. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  296. if refBitset.isset(byte(k)) {
  297. urv.flag = uintptr(k)
  298. } else if rtsize2(urv.typ) <= uintptr(len(unsafeZeroArr)) {
  299. urv.flag = uintptr(k) | unsafeFlagIndir
  300. urv.ptr = unsafeZeroAddr
  301. } else { // meaning struct or array
  302. urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
  303. urv.ptr = unsafeNew(urv.typ)
  304. }
  305. return
  306. }
  307. // rvConvert will convert a value to a different type directly,
  308. // ensuring that they still point to the same underlying value.
  309. func rvConvert(v reflect.Value, t reflect.Type) reflect.Value {
  310. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  311. uv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  312. return v
  313. }
  314. // rvAddressableReadonly returns an addressable reflect.Value.
  315. //
  316. // Use it within encode calls, when you just want to "read" the underlying ptr
  317. // without modifying the value.
  318. //
  319. // Note that it cannot be used for r/w use, as those non-addressable values
  320. // may have been stored in read-only memory, and trying to write the pointer
  321. // may cause a segfault.
  322. func rvAddressableReadonly(v reflect.Value) reflect.Value {
  323. // hack to make an addressable value out of a non-addressable one.
  324. // Assume folks calling it are passing a value that can be addressable, but isn't.
  325. // This assumes that the flagIndir is already set on it.
  326. // so we just set the flagAddr bit on the flag (and do not set the flagIndir).
  327. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  328. uv.flag = uv.flag | unsafeFlagAddr // | unsafeFlagIndir
  329. return v
  330. }
  331. func rtsize2(rt unsafe.Pointer) uintptr {
  332. return ((*unsafeRuntimeType)(rt)).size
  333. }
  334. func rt2id(rt reflect.Type) uintptr {
  335. return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).ptr)
  336. }
  337. func i2rtid(i interface{}) uintptr {
  338. return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ)
  339. }
  340. // --------------------------
  341. func unsafeCmpZero(ptr unsafe.Pointer, size int) bool {
  342. // verified that size is always within right range, so no chance of OOM
  343. var s1 = unsafeString{ptr, size}
  344. var s2 = unsafeString{unsafeZeroAddr, size}
  345. if size > len(unsafeZeroArr) {
  346. arr := make([]byte, size)
  347. s2.Data = unsafe.Pointer(&arr[0])
  348. }
  349. return *(*string)(unsafe.Pointer(&s1)) == *(*string)(unsafe.Pointer(&s2)) // memcmp
  350. }
  351. func isEmptyValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
  352. urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  353. if urv.flag == 0 {
  354. return true
  355. }
  356. if recursive {
  357. return isEmptyValueFallbackRecur(urv, v, tinfos)
  358. }
  359. return unsafeCmpZero(urv.ptr, int(rtsize2(urv.typ)))
  360. }
  361. func isEmptyValueFallbackRecur(urv *unsafeReflectValue, v reflect.Value, tinfos *TypeInfos) bool {
  362. const recursive = true
  363. switch v.Kind() {
  364. case reflect.Invalid:
  365. return true
  366. case reflect.String:
  367. return (*unsafeString)(urv.ptr).Len == 0
  368. case reflect.Slice:
  369. return (*unsafeSlice)(urv.ptr).Len == 0
  370. case reflect.Bool:
  371. return !*(*bool)(urv.ptr)
  372. case reflect.Int:
  373. return *(*int)(urv.ptr) == 0
  374. case reflect.Int8:
  375. return *(*int8)(urv.ptr) == 0
  376. case reflect.Int16:
  377. return *(*int16)(urv.ptr) == 0
  378. case reflect.Int32:
  379. return *(*int32)(urv.ptr) == 0
  380. case reflect.Int64:
  381. return *(*int64)(urv.ptr) == 0
  382. case reflect.Uint:
  383. return *(*uint)(urv.ptr) == 0
  384. case reflect.Uint8:
  385. return *(*uint8)(urv.ptr) == 0
  386. case reflect.Uint16:
  387. return *(*uint16)(urv.ptr) == 0
  388. case reflect.Uint32:
  389. return *(*uint32)(urv.ptr) == 0
  390. case reflect.Uint64:
  391. return *(*uint64)(urv.ptr) == 0
  392. case reflect.Uintptr:
  393. return *(*uintptr)(urv.ptr) == 0
  394. case reflect.Float32:
  395. return *(*float32)(urv.ptr) == 0
  396. case reflect.Float64:
  397. return *(*float64)(urv.ptr) == 0
  398. case reflect.Complex64:
  399. return unsafeCmpZero(urv.ptr, 8)
  400. case reflect.Complex128:
  401. return unsafeCmpZero(urv.ptr, 16)
  402. case reflect.Struct:
  403. // return isEmptyStruct(v, tinfos, recursive)
  404. if tinfos == nil {
  405. tinfos = defTypeInfos
  406. }
  407. ti := tinfos.find(uintptr(urv.typ))
  408. if ti == nil {
  409. ti = tinfos.load(v.Type())
  410. }
  411. return unsafeCmpZero(urv.ptr, int(ti.size))
  412. case reflect.Interface, reflect.Ptr:
  413. // isnil := urv.ptr == nil // (not sufficient, as a pointer value encodes the type)
  414. isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
  415. if recursive && !isnil {
  416. return isEmptyValue(v.Elem(), tinfos, recursive)
  417. }
  418. return isnil
  419. case reflect.UnsafePointer:
  420. return urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
  421. case reflect.Chan:
  422. return urv.ptr == nil || len_chan(rvRefPtr(urv)) == 0
  423. case reflect.Map:
  424. return urv.ptr == nil || len_map(rvRefPtr(urv)) == 0
  425. case reflect.Array:
  426. return v.Len() == 0 ||
  427. urv.ptr == nil ||
  428. urv.typ == nil ||
  429. rtsize2(urv.typ) == 0 ||
  430. unsafeCmpZero(urv.ptr, int(rtsize2(urv.typ)))
  431. }
  432. return false
  433. }
  434. // --------------------------
  435. type structFieldInfos struct {
  436. c unsafe.Pointer // source
  437. s unsafe.Pointer // sorted
  438. length int
  439. }
  440. func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
  441. s := (*unsafeSlice)(unsafe.Pointer(&sorted))
  442. x.s = s.Data
  443. x.length = s.Len
  444. s = (*unsafeSlice)(unsafe.Pointer(&source))
  445. x.c = s.Data
  446. }
  447. func (x *structFieldInfos) sorted() (v []*structFieldInfo) {
  448. *(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.s, x.length, x.length}
  449. // s := (*unsafeSlice)(unsafe.Pointer(&v))
  450. // s.Data = x.sorted0
  451. // s.Len = x.length
  452. // s.Cap = s.Len
  453. return
  454. }
  455. func (x *structFieldInfos) source() (v []*structFieldInfo) {
  456. *(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.c, x.length, x.length}
  457. return
  458. }
  459. // atomicXXX is expected to be 2 words (for symmetry with atomic.Value)
  460. //
  461. // Note that we do not atomically load/store length and data pointer separately,
  462. // as this could lead to some races. Instead, we atomically load/store cappedSlice.
  463. //
  464. // Note: with atomic.(Load|Store)Pointer, we MUST work with an unsafe.Pointer directly.
  465. // ----------------------
  466. type atomicTypeInfoSlice struct {
  467. v unsafe.Pointer // *[]rtid2ti
  468. }
  469. func (x *atomicTypeInfoSlice) load() (s []rtid2ti) {
  470. x2 := atomic.LoadPointer(&x.v)
  471. if x2 != nil {
  472. s = *(*[]rtid2ti)(x2)
  473. }
  474. return
  475. }
  476. func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
  477. atomic.StorePointer(&x.v, unsafe.Pointer(&p))
  478. }
  479. // MARKER: in safe mode, atomicXXX are atomic.Value, which contains an interface{}.
  480. // This is 2 words.
  481. // consider padding atomicXXX here with a uintptr, so they fit into 2 words also.
  482. // --------------------------
  483. type atomicRtidFnSlice struct {
  484. v unsafe.Pointer // *[]codecRtidFn
  485. }
  486. func (x *atomicRtidFnSlice) load() (s []codecRtidFn) {
  487. x2 := atomic.LoadPointer(&x.v)
  488. if x2 != nil {
  489. s = *(*[]codecRtidFn)(x2)
  490. }
  491. return
  492. }
  493. func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
  494. atomic.StorePointer(&x.v, unsafe.Pointer(&p))
  495. }
  496. // --------------------------
  497. type atomicClsErr struct {
  498. v unsafe.Pointer // *clsErr
  499. }
  500. func (x *atomicClsErr) load() (e clsErr) {
  501. x2 := (*clsErr)(atomic.LoadPointer(&x.v))
  502. if x2 != nil {
  503. e = *x2
  504. }
  505. return
  506. }
  507. func (x *atomicClsErr) store(p clsErr) {
  508. atomic.StorePointer(&x.v, unsafe.Pointer(&p))
  509. }
  510. // --------------------------
  511. // to create a reflect.Value for each member field of fauxUnion,
  512. // we first create a global fauxUnion, and create reflect.Value
  513. // for them all.
  514. // This way, we have the flags and type in the reflect.Value.
  515. // Then, when a reflect.Value is called, we just copy it,
  516. // update the ptr to the fauxUnion's, and return it.
  517. type unsafeDecNakedWrapper struct {
  518. fauxUnion
  519. ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above
  520. }
  521. func (n *unsafeDecNakedWrapper) init() {
  522. n.ru = rv4iptr(&n.u).Elem()
  523. n.ri = rv4iptr(&n.i).Elem()
  524. n.rf = rv4iptr(&n.f).Elem()
  525. n.rl = rv4iptr(&n.l).Elem()
  526. n.rs = rv4iptr(&n.s).Elem()
  527. n.rt = rv4iptr(&n.t).Elem()
  528. n.rb = rv4iptr(&n.b).Elem()
  529. // n.rr[] = reflect.ValueOf(&n.)
  530. }
  531. var defUnsafeDecNakedWrapper unsafeDecNakedWrapper
  532. func init() {
  533. defUnsafeDecNakedWrapper.init()
  534. }
  535. func (n *fauxUnion) ru() (v reflect.Value) {
  536. v = defUnsafeDecNakedWrapper.ru
  537. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.u)
  538. return
  539. }
  540. func (n *fauxUnion) ri() (v reflect.Value) {
  541. v = defUnsafeDecNakedWrapper.ri
  542. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.i)
  543. return
  544. }
  545. func (n *fauxUnion) rf() (v reflect.Value) {
  546. v = defUnsafeDecNakedWrapper.rf
  547. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.f)
  548. return
  549. }
  550. func (n *fauxUnion) rl() (v reflect.Value) {
  551. v = defUnsafeDecNakedWrapper.rl
  552. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.l)
  553. return
  554. }
  555. func (n *fauxUnion) rs() (v reflect.Value) {
  556. v = defUnsafeDecNakedWrapper.rs
  557. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.s)
  558. return
  559. }
  560. func (n *fauxUnion) rt() (v reflect.Value) {
  561. v = defUnsafeDecNakedWrapper.rt
  562. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.t)
  563. return
  564. }
  565. func (n *fauxUnion) rb() (v reflect.Value) {
  566. v = defUnsafeDecNakedWrapper.rb
  567. ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.b)
  568. return
  569. }
  570. // --------------------------
  571. func rvSetBytes(rv reflect.Value, v []byte) {
  572. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  573. *(*[]byte)(urv.ptr) = v
  574. }
  575. func rvSetString(rv reflect.Value, v string) {
  576. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  577. *(*string)(urv.ptr) = v
  578. }
  579. func rvSetBool(rv reflect.Value, v bool) {
  580. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  581. *(*bool)(urv.ptr) = v
  582. }
  583. func rvSetTime(rv reflect.Value, v time.Time) {
  584. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  585. *(*time.Time)(urv.ptr) = v
  586. }
  587. func rvSetFloat32(rv reflect.Value, v float32) {
  588. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  589. *(*float32)(urv.ptr) = v
  590. }
  591. func rvSetFloat64(rv reflect.Value, v float64) {
  592. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  593. *(*float64)(urv.ptr) = v
  594. }
  595. func rvSetComplex64(rv reflect.Value, v complex64) {
  596. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  597. *(*complex64)(urv.ptr) = v
  598. }
  599. func rvSetComplex128(rv reflect.Value, v complex128) {
  600. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  601. *(*complex128)(urv.ptr) = v
  602. }
  603. func rvSetInt(rv reflect.Value, v int) {
  604. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  605. *(*int)(urv.ptr) = v
  606. }
  607. func rvSetInt8(rv reflect.Value, v int8) {
  608. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  609. *(*int8)(urv.ptr) = v
  610. }
  611. func rvSetInt16(rv reflect.Value, v int16) {
  612. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  613. *(*int16)(urv.ptr) = v
  614. }
  615. func rvSetInt32(rv reflect.Value, v int32) {
  616. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  617. *(*int32)(urv.ptr) = v
  618. }
  619. func rvSetInt64(rv reflect.Value, v int64) {
  620. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  621. *(*int64)(urv.ptr) = v
  622. }
  623. func rvSetUint(rv reflect.Value, v uint) {
  624. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  625. *(*uint)(urv.ptr) = v
  626. }
  627. func rvSetUintptr(rv reflect.Value, v uintptr) {
  628. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  629. *(*uintptr)(urv.ptr) = v
  630. }
  631. func rvSetUint8(rv reflect.Value, v uint8) {
  632. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  633. *(*uint8)(urv.ptr) = v
  634. }
  635. func rvSetUint16(rv reflect.Value, v uint16) {
  636. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  637. *(*uint16)(urv.ptr) = v
  638. }
  639. func rvSetUint32(rv reflect.Value, v uint32) {
  640. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  641. *(*uint32)(urv.ptr) = v
  642. }
  643. func rvSetUint64(rv reflect.Value, v uint64) {
  644. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  645. *(*uint64)(urv.ptr) = v
  646. }
  647. // ----------------
  648. // rvSetZero is rv.Set(reflect.Zero(rv.Type()) for all kinds (including reflect.Interface).
  649. func rvSetZero(rv reflect.Value) {
  650. rvSetDirectZero(rv)
  651. }
  652. func rvSetIntf(rv reflect.Value, v reflect.Value) {
  653. rv.Set(v)
  654. }
  655. // rvSetDirect is rv.Set for all kinds except reflect.Interface.
  656. //
  657. // Callers MUST not pass a value of kind reflect.Interface, as it may cause unexpected segfaults.
  658. func rvSetDirect(rv reflect.Value, v reflect.Value) {
  659. // MARKER: rv.Set for kind reflect.Interface may do a separate allocation if a scalar value.
  660. // The book-keeping is onerous, so we just do the simple ones where a memmove is sufficient.
  661. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  662. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  663. if uv.flag&unsafeFlagIndir == 0 {
  664. *(*unsafe.Pointer)(urv.ptr) = uv.ptr
  665. } else if uv.ptr == unsafeZeroAddr {
  666. if urv.ptr != unsafeZeroAddr {
  667. typedmemclr(urv.typ, urv.ptr)
  668. }
  669. } else {
  670. typedmemmove(urv.typ, urv.ptr, uv.ptr)
  671. }
  672. }
  673. // rvSetDirectZero is rv.Set(reflect.Zero(rv.Type()) for all kinds except reflect.Interface.
  674. func rvSetDirectZero(rv reflect.Value) {
  675. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  676. if urv.ptr != unsafeZeroAddr {
  677. typedmemclr(urv.typ, urv.ptr)
  678. }
  679. }
  680. // rvMakeSlice updates the slice to point to a new array.
  681. // It copies data from old slice to new slice.
  682. // It returns set=true iff it updates it, else it just returns a new slice pointing to a newly made array.
  683. func rvMakeSlice(rv reflect.Value, ti *typeInfo, xlen, xcap int) (_ reflect.Value, set bool) {
  684. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  685. ux := (*unsafeSlice)(urv.ptr)
  686. t := ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
  687. s := unsafeSlice{newarray(t, xcap), xlen, xcap}
  688. if ux.Len > 0 {
  689. typedslicecopy(t, s, *ux)
  690. }
  691. *ux = s
  692. return rv, true
  693. }
  694. // rvSlice returns a sub-slice of the slice given new lenth,
  695. // without modifying passed in value.
  696. // It is typically called when we know that SetLen(...) cannot be done.
  697. func rvSlice(rv reflect.Value, length int) reflect.Value {
  698. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  699. var x []struct{}
  700. ux := (*unsafeSlice)(unsafe.Pointer(&x))
  701. *ux = *(*unsafeSlice)(urv.ptr)
  702. ux.Len = length
  703. urv.ptr = unsafe.Pointer(ux)
  704. return rv
  705. }
  706. // rcGrowSlice updates the slice to point to a new array with the cap incremented, and len set to the new cap value.
  707. // It copies data from old slice to new slice.
  708. // It returns set=true iff it updates it, else it just returns a new slice pointing to a newly made array.
  709. func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value, newcap int, set bool) {
  710. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  711. ux := (*unsafeSlice)(urv.ptr)
  712. t := ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
  713. *ux = unsafeGrowslice(t, *ux, cap, incr)
  714. ux.Len = ux.Cap
  715. return rv, ux.Cap, true
  716. }
  717. // ------------
  718. func rvSliceIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
  719. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  720. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  721. uv.ptr = unsafe.Pointer(uintptr(((*unsafeSlice)(urv.ptr)).Data) + uintptr(int(ti.elemsize)*i))
  722. uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
  723. uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
  724. return
  725. }
  726. func rvSliceZeroCap(t reflect.Type) (v reflect.Value) {
  727. urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  728. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  729. urv.flag = uintptr(reflect.Slice) | unsafeFlagIndir
  730. urv.ptr = unsafe.Pointer(&unsafeZeroSlice)
  731. return
  732. }
  733. func rvLenSlice(rv reflect.Value) int {
  734. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  735. return (*unsafeSlice)(urv.ptr).Len
  736. }
  737. func rvCapSlice(rv reflect.Value) int {
  738. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  739. return (*unsafeSlice)(urv.ptr).Cap
  740. }
  741. func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
  742. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  743. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  744. uv.ptr = unsafe.Pointer(uintptr(urv.ptr) + uintptr(int(ti.elemsize)*i))
  745. uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
  746. uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
  747. return
  748. }
  749. // if scratch is nil, then return a writable view (assuming canAddr=true)
  750. func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
  751. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  752. bx := (*unsafeSlice)(unsafe.Pointer(&bs))
  753. bx.Data = urv.ptr
  754. bx.Len = rv.Len()
  755. bx.Cap = bx.Len
  756. return
  757. }
  758. func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
  759. // It is possible that this slice is based off an array with a larger
  760. // len that we want (where array len == slice cap).
  761. // However, it is ok to create an array type that is a subset of the full
  762. // e.g. full slice is based off a *[16]byte, but we can create a *[4]byte
  763. // off of it. That is ok.
  764. //
  765. // Consequently, we use rvLenSlice, not rvCapSlice.
  766. t := reflectArrayOf(rvLenSlice(rv), rv.Type().Elem())
  767. // v = rvZeroAddrK(t, reflect.Array)
  768. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  769. uv.flag = uintptr(reflect.Array) | unsafeFlagIndir | unsafeFlagAddr
  770. uv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  771. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  772. uv.ptr = *(*unsafe.Pointer)(urv.ptr) // slice rv has a ptr to the slice.
  773. return
  774. }
  775. func rvGetSlice4Array(rv reflect.Value, v interface{}) {
  776. // v is a pointer to a slice to be populated
  777. uv := (*unsafeIntf)(unsafe.Pointer(&v))
  778. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  779. s := (*unsafeSlice)(uv.ptr)
  780. s.Data = urv.ptr
  781. s.Len = rv.Len()
  782. s.Cap = s.Len
  783. }
  784. func rvCopySlice(dest, src reflect.Value, elemType reflect.Type) {
  785. typedslicecopy((*unsafeIntf)(unsafe.Pointer(&elemType)).ptr,
  786. *(*unsafeSlice)((*unsafeReflectValue)(unsafe.Pointer(&dest)).ptr),
  787. *(*unsafeSlice)((*unsafeReflectValue)(unsafe.Pointer(&src)).ptr))
  788. }
  789. // ------------
  790. func rvGetBool(rv reflect.Value) bool {
  791. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  792. return *(*bool)(v.ptr)
  793. }
  794. func rvGetBytes(rv reflect.Value) []byte {
  795. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  796. return *(*[]byte)(v.ptr)
  797. }
  798. func rvGetTime(rv reflect.Value) time.Time {
  799. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  800. return *(*time.Time)(v.ptr)
  801. }
  802. func rvGetString(rv reflect.Value) string {
  803. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  804. return *(*string)(v.ptr)
  805. }
  806. func rvGetFloat64(rv reflect.Value) float64 {
  807. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  808. return *(*float64)(v.ptr)
  809. }
  810. func rvGetFloat32(rv reflect.Value) float32 {
  811. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  812. return *(*float32)(v.ptr)
  813. }
  814. func rvGetComplex64(rv reflect.Value) complex64 {
  815. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  816. return *(*complex64)(v.ptr)
  817. }
  818. func rvGetComplex128(rv reflect.Value) complex128 {
  819. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  820. return *(*complex128)(v.ptr)
  821. }
  822. func rvGetInt(rv reflect.Value) int {
  823. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  824. return *(*int)(v.ptr)
  825. }
  826. func rvGetInt8(rv reflect.Value) int8 {
  827. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  828. return *(*int8)(v.ptr)
  829. }
  830. func rvGetInt16(rv reflect.Value) int16 {
  831. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  832. return *(*int16)(v.ptr)
  833. }
  834. func rvGetInt32(rv reflect.Value) int32 {
  835. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  836. return *(*int32)(v.ptr)
  837. }
  838. func rvGetInt64(rv reflect.Value) int64 {
  839. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  840. return *(*int64)(v.ptr)
  841. }
  842. func rvGetUint(rv reflect.Value) uint {
  843. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  844. return *(*uint)(v.ptr)
  845. }
  846. func rvGetUint8(rv reflect.Value) uint8 {
  847. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  848. return *(*uint8)(v.ptr)
  849. }
  850. func rvGetUint16(rv reflect.Value) uint16 {
  851. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  852. return *(*uint16)(v.ptr)
  853. }
  854. func rvGetUint32(rv reflect.Value) uint32 {
  855. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  856. return *(*uint32)(v.ptr)
  857. }
  858. func rvGetUint64(rv reflect.Value) uint64 {
  859. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  860. return *(*uint64)(v.ptr)
  861. }
  862. func rvGetUintptr(rv reflect.Value) uintptr {
  863. v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  864. return *(*uintptr)(v.ptr)
  865. }
  866. func rvLenMap(rv reflect.Value) int {
  867. // maplen is not inlined, because as of go1.16beta, go:linkname's are not inlined.
  868. // thus, faster to call rv.Len() directly.
  869. //
  870. // MARKER: review after https://github.com/golang/go/issues/20019 fixed.
  871. // return rv.Len()
  872. return len_map(rvRefPtr((*unsafeReflectValue)(unsafe.Pointer(&rv))))
  873. }
  874. // copy is an intrinsic, which may use asm if length is small,
  875. // or make a runtime call to runtime.memmove if length is large.
  876. // Performance suffers when you always call runtime.memmove function.
  877. //
  878. // Consequently, there's no value in a copybytes call - just call copy() directly
  879. // func copybytes(to, from []byte) (n int) {
  880. // n = (*unsafeSlice)(unsafe.Pointer(&from)).Len
  881. // memmove(
  882. // (*unsafeSlice)(unsafe.Pointer(&to)).Data,
  883. // (*unsafeSlice)(unsafe.Pointer(&from)).Data,
  884. // uintptr(n),
  885. // )
  886. // return
  887. // }
  888. // func copybytestr(to []byte, from string) (n int) {
  889. // n = (*unsafeSlice)(unsafe.Pointer(&from)).Len
  890. // memmove(
  891. // (*unsafeSlice)(unsafe.Pointer(&to)).Data,
  892. // (*unsafeSlice)(unsafe.Pointer(&from)).Data,
  893. // uintptr(n),
  894. // )
  895. // return
  896. // }
  897. // Note: it is hard to find len(...) of an array type,
  898. // as that is a field in the arrayType representing the array, and hard to introspect.
  899. //
  900. // func rvLenArray(rv reflect.Value) int { return rv.Len() }
  901. // ------------ map range and map indexing ----------
  902. // regular calls to map via reflection: MapKeys, MapIndex, MapRange/MapIter etc
  903. // will always allocate for each map key or value.
  904. //
  905. // It is more performant to provide a value that the map entry is set into,
  906. // and that elides the allocation.
  907. // go 1.4+ has runtime/hashmap.go or runtime/map.go which has a
  908. // hIter struct with the first 2 values being key and value
  909. // of the current iteration.
  910. //
  911. // This *hIter is passed to mapiterinit, mapiternext, mapiterkey, mapiterelem.
  912. // We bypass the reflect wrapper functions and just use the *hIter directly.
  913. //
  914. // Though *hIter has many fields, we only care about the first 2.
  915. //
  916. // We directly embed this in unsafeMapIter below
  917. //
  918. // hiter is typically about 12 words, but we just fill up unsafeMapIter to 32 words,
  919. // so it fills multiple cache lines and can give some extra space to accomodate small growth.
  920. type unsafeMapIter struct {
  921. mtyp, mptr unsafe.Pointer
  922. k, v reflect.Value
  923. kisref bool
  924. visref bool
  925. mapvalues bool
  926. done bool
  927. started bool
  928. _ [3]byte // padding
  929. it struct {
  930. key unsafe.Pointer
  931. value unsafe.Pointer
  932. _ [20]uintptr // padding for other fields (to make up 32 words for enclosing struct)
  933. }
  934. }
  935. func (t *unsafeMapIter) Next() (r bool) {
  936. if t == nil || t.done {
  937. return
  938. }
  939. if t.started {
  940. mapiternext((unsafe.Pointer)(&t.it))
  941. } else {
  942. t.started = true
  943. }
  944. t.done = t.it.key == nil
  945. if t.done {
  946. return
  947. }
  948. if helperUnsafeDirectAssignMapEntry || t.kisref {
  949. (*unsafeReflectValue)(unsafe.Pointer(&t.k)).ptr = t.it.key
  950. } else {
  951. k := (*unsafeReflectValue)(unsafe.Pointer(&t.k))
  952. typedmemmove(k.typ, k.ptr, t.it.key)
  953. }
  954. if t.mapvalues {
  955. if helperUnsafeDirectAssignMapEntry || t.visref {
  956. (*unsafeReflectValue)(unsafe.Pointer(&t.v)).ptr = t.it.value
  957. } else {
  958. v := (*unsafeReflectValue)(unsafe.Pointer(&t.v))
  959. typedmemmove(v.typ, v.ptr, t.it.value)
  960. }
  961. }
  962. return true
  963. }
  964. func (t *unsafeMapIter) Key() (r reflect.Value) {
  965. return t.k
  966. }
  967. func (t *unsafeMapIter) Value() (r reflect.Value) {
  968. return t.v
  969. }
  970. func (t *unsafeMapIter) Done() {}
  971. type mapIter struct {
  972. unsafeMapIter
  973. }
  974. func mapRange(t *mapIter, m, k, v reflect.Value, mapvalues bool) {
  975. if rvIsNil(m) {
  976. t.done = true
  977. return
  978. }
  979. t.done = false
  980. t.started = false
  981. t.mapvalues = mapvalues
  982. // var urv *unsafeReflectValue
  983. urv := (*unsafeReflectValue)(unsafe.Pointer(&m))
  984. t.mtyp = urv.typ
  985. t.mptr = rvRefPtr(urv)
  986. // t.it = (*unsafeMapHashIter)(reflect_mapiterinit(t.mtyp, t.mptr))
  987. mapiterinit(t.mtyp, t.mptr, unsafe.Pointer(&t.it))
  988. t.k = k
  989. t.kisref = refBitset.isset(byte(k.Kind()))
  990. if mapvalues {
  991. t.v = v
  992. t.visref = refBitset.isset(byte(v.Kind()))
  993. } else {
  994. t.v = reflect.Value{}
  995. }
  996. }
  997. // unsafeMapKVPtr returns the pointer if flagIndir, else it returns a pointer to the pointer.
  998. // It is needed as maps always keep a reference to the underlying value.
  999. func unsafeMapKVPtr(urv *unsafeReflectValue) unsafe.Pointer {
  1000. if urv.flag&unsafeFlagIndir == 0 {
  1001. return unsafe.Pointer(&urv.ptr)
  1002. }
  1003. return urv.ptr
  1004. }
  1005. // func mapDelete(m, k reflect.Value) {
  1006. // var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
  1007. // var kptr = unsafeMapKVPtr(urv)
  1008. // urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
  1009. // mapdelete(urv.typ, rv2ptr(urv), kptr)
  1010. // }
  1011. // return an addressable reflect value that can be used in mapRange and mapGet operations.
  1012. //
  1013. // all calls to mapGet or mapRange will call here to get an addressable reflect.Value.
  1014. func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
  1015. // return rvZeroAddrK(t, k)
  1016. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  1017. urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
  1018. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
  1019. // since we always set the ptr when helperUnsafeDirectAssignMapEntry=true,
  1020. // we should only allocate if it is not true
  1021. if !helperUnsafeDirectAssignMapEntry {
  1022. urv.ptr = unsafeNew(urv.typ)
  1023. }
  1024. return
  1025. }
  1026. // ---------- ENCODER optimized ---------------
  1027. func (e *Encoder) jsondriver() *jsonEncDriver {
  1028. return (*jsonEncDriver)((*unsafeIntf)(unsafe.Pointer(&e.e)).ptr)
  1029. }
  1030. func (d *Decoder) zerocopystate() bool {
  1031. return d.decByteState == decByteStateZerocopy && d.h.ZeroCopy
  1032. }
  1033. func (d *Decoder) stringZC(v []byte) (s string) {
  1034. // MARKER: inline zerocopystate directly so genHelper forwarding function fits within inlining cost
  1035. // if d.zerocopystate() {
  1036. if d.decByteState == decByteStateZerocopy && d.h.ZeroCopy {
  1037. return stringView(v)
  1038. }
  1039. return d.string(v)
  1040. }
  1041. func (d *Decoder) mapKeyString(callFnRvk *bool, kstrbs, kstr2bs *[]byte) string {
  1042. if !d.zerocopystate() {
  1043. *callFnRvk = true
  1044. if d.decByteState == decByteStateReuseBuf {
  1045. *kstrbs = append((*kstrbs)[:0], (*kstr2bs)...)
  1046. *kstr2bs = *kstrbs
  1047. }
  1048. }
  1049. return stringView(*kstr2bs)
  1050. }
  1051. // ---------- DECODER optimized ---------------
  1052. func (d *Decoder) jsondriver() *jsonDecDriver {
  1053. return (*jsonDecDriver)((*unsafeIntf)(unsafe.Pointer(&d.d)).ptr)
  1054. }
  1055. // ---------- structFieldInfo optimized ---------------
  1056. func (n *structFieldInfoPathNode) rvField(v reflect.Value) (rv reflect.Value) {
  1057. // we already know this is exported, and maybe embedded (based on what si says)
  1058. uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
  1059. urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
  1060. // clear flagEmbedRO if necessary, and inherit permission bits from v
  1061. urv.flag = uv.flag&(unsafeFlagStickyRO|unsafeFlagIndir|unsafeFlagAddr) | uintptr(n.kind)
  1062. urv.typ = ((*unsafeIntf)(unsafe.Pointer(&n.typ))).ptr
  1063. urv.ptr = unsafe.Pointer(uintptr(uv.ptr) + uintptr(n.offset))
  1064. return
  1065. }
  1066. // runtime chan and map are designed such that the first field is the count.
  1067. // len builtin uses this to get the length of a chan/map easily.
  1068. // leverage this knowledge, since maplen and chanlen functions from runtime package
  1069. // are go:linkname'd here, and thus not inlined as of go1.16beta
  1070. func len_map_chan(m unsafe.Pointer) int {
  1071. if m == nil {
  1072. return 0
  1073. }
  1074. return *((*int)(m))
  1075. }
  1076. func len_map(m unsafe.Pointer) int {
  1077. // return maplen(m)
  1078. return len_map_chan(m)
  1079. }
  1080. func len_chan(m unsafe.Pointer) int {
  1081. // return chanlen(m)
  1082. return len_map_chan(m)
  1083. }
  1084. func unsafeNew(typ unsafe.Pointer) unsafe.Pointer {
  1085. return mallocgc(rtsize2(typ), typ, true)
  1086. }
  1087. // ---------- go linknames (LINKED to runtime/reflect) ---------------
  1088. // MARKER: always check that these linknames match subsequent versions of go
  1089. //
  1090. // Note that as of Jan 2021 (go 1.16 release), go:linkname(s) are not inlined
  1091. // outside of the standard library use (e.g. within sync, reflect, etc).
  1092. // If these link'ed functions were normally inlined, calling them here would
  1093. // not necessarily give a performance boost, due to function overhead.
  1094. //
  1095. // However, it seems most of these functions are not inlined anyway,
  1096. // as only maplen, chanlen and mapaccess are small enough to get inlined.
  1097. //
  1098. // We checked this by going into $GOROOT/src/runtime and running:
  1099. // $ go build -tags codec.notfastpath -gcflags "-m=2"
  1100. // reflect.{unsafe_New, unsafe_NewArray} are not supported in gollvm,
  1101. // failing with "error: undefined reference" error.
  1102. // however, runtime.{mallocgc, newarray} are supported, so use that instead.
  1103. //go:linkname memmove runtime.memmove
  1104. //go:noescape
  1105. func memmove(to, from unsafe.Pointer, n uintptr)
  1106. //go:linkname mallocgc runtime.mallocgc
  1107. //go:noescape
  1108. func mallocgc(size uintptr, typ unsafe.Pointer, needzero bool) unsafe.Pointer
  1109. //go:linkname newarray runtime.newarray
  1110. //go:noescape
  1111. func newarray(typ unsafe.Pointer, n int) unsafe.Pointer
  1112. //go:linkname mapiterinit runtime.mapiterinit
  1113. //go:noescape
  1114. func mapiterinit(typ unsafe.Pointer, m unsafe.Pointer, it unsafe.Pointer)
  1115. //go:linkname mapiternext runtime.mapiternext
  1116. //go:noescape
  1117. func mapiternext(it unsafe.Pointer) (key unsafe.Pointer)
  1118. //go:linkname mapdelete runtime.mapdelete
  1119. //go:noescape
  1120. func mapdelete(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer)
  1121. //go:linkname mapassign runtime.mapassign
  1122. //go:noescape
  1123. func mapassign(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
  1124. //go:linkname mapaccess2 runtime.mapaccess2
  1125. //go:noescape
  1126. func mapaccess2(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer, ok bool)
  1127. // reflect.typed{memmove, memclr, slicecopy} will handle checking if the type has pointers or not,
  1128. // and if a writeBarrier is needed, before delegating to the right method in the runtime.
  1129. //
  1130. // This is why we use the functions in reflect, and not the ones in runtime directly.
  1131. // Calling runtime.XXX here will lead to memory issues.
  1132. //go:linkname typedslicecopy reflect.typedslicecopy
  1133. //go:noescape
  1134. func typedslicecopy(elemType unsafe.Pointer, dst, src unsafeSlice) int
  1135. //go:linkname typedmemmove reflect.typedmemmove
  1136. //go:noescape
  1137. func typedmemmove(typ unsafe.Pointer, dst, src unsafe.Pointer)
  1138. //go:linkname typedmemclr reflect.typedmemclr
  1139. //go:noescape
  1140. func typedmemclr(typ unsafe.Pointer, dst unsafe.Pointer)