pre_go19.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. // Copyright 2016 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. //go:build !go1.9
  5. // +build !go1.9
  6. package syncmap
  7. import (
  8. "sync"
  9. "sync/atomic"
  10. "unsafe"
  11. )
  12. // Map is a concurrent map with amortized-constant-time loads, stores, and deletes.
  13. // It is safe for multiple goroutines to call a Map's methods concurrently.
  14. //
  15. // The zero Map is valid and empty.
  16. //
  17. // A Map must not be copied after first use.
  18. type Map struct {
  19. mu sync.Mutex
  20. // read contains the portion of the map's contents that are safe for
  21. // concurrent access (with or without mu held).
  22. //
  23. // The read field itself is always safe to load, but must only be stored with
  24. // mu held.
  25. //
  26. // Entries stored in read may be updated concurrently without mu, but updating
  27. // a previously-expunged entry requires that the entry be copied to the dirty
  28. // map and unexpunged with mu held.
  29. read atomic.Value // readOnly
  30. // dirty contains the portion of the map's contents that require mu to be
  31. // held. To ensure that the dirty map can be promoted to the read map quickly,
  32. // it also includes all of the non-expunged entries in the read map.
  33. //
  34. // Expunged entries are not stored in the dirty map. An expunged entry in the
  35. // clean map must be unexpunged and added to the dirty map before a new value
  36. // can be stored to it.
  37. //
  38. // If the dirty map is nil, the next write to the map will initialize it by
  39. // making a shallow copy of the clean map, omitting stale entries.
  40. dirty map[interface{}]*entry
  41. // misses counts the number of loads since the read map was last updated that
  42. // needed to lock mu to determine whether the key was present.
  43. //
  44. // Once enough misses have occurred to cover the cost of copying the dirty
  45. // map, the dirty map will be promoted to the read map (in the unamended
  46. // state) and the next store to the map will make a new dirty copy.
  47. misses int
  48. }
  49. // readOnly is an immutable struct stored atomically in the Map.read field.
  50. type readOnly struct {
  51. m map[interface{}]*entry
  52. amended bool // true if the dirty map contains some key not in m.
  53. }
  54. // expunged is an arbitrary pointer that marks entries which have been deleted
  55. // from the dirty map.
  56. var expunged = unsafe.Pointer(new(interface{}))
  57. // An entry is a slot in the map corresponding to a particular key.
  58. type entry struct {
  59. // p points to the interface{} value stored for the entry.
  60. //
  61. // If p == nil, the entry has been deleted and m.dirty == nil.
  62. //
  63. // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
  64. // is missing from m.dirty.
  65. //
  66. // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
  67. // != nil, in m.dirty[key].
  68. //
  69. // An entry can be deleted by atomic replacement with nil: when m.dirty is
  70. // next created, it will atomically replace nil with expunged and leave
  71. // m.dirty[key] unset.
  72. //
  73. // An entry's associated value can be updated by atomic replacement, provided
  74. // p != expunged. If p == expunged, an entry's associated value can be updated
  75. // only after first setting m.dirty[key] = e so that lookups using the dirty
  76. // map find the entry.
  77. p unsafe.Pointer // *interface{}
  78. }
  79. func newEntry(i interface{}) *entry {
  80. return &entry{p: unsafe.Pointer(&i)}
  81. }
  82. // Load returns the value stored in the map for a key, or nil if no
  83. // value is present.
  84. // The ok result indicates whether value was found in the map.
  85. func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
  86. read, _ := m.read.Load().(readOnly)
  87. e, ok := read.m[key]
  88. if !ok && read.amended {
  89. m.mu.Lock()
  90. // Avoid reporting a spurious miss if m.dirty got promoted while we were
  91. // blocked on m.mu. (If further loads of the same key will not miss, it's
  92. // not worth copying the dirty map for this key.)
  93. read, _ = m.read.Load().(readOnly)
  94. e, ok = read.m[key]
  95. if !ok && read.amended {
  96. e, ok = m.dirty[key]
  97. // Regardless of whether the entry was present, record a miss: this key
  98. // will take the slow path until the dirty map is promoted to the read
  99. // map.
  100. m.missLocked()
  101. }
  102. m.mu.Unlock()
  103. }
  104. if !ok {
  105. return nil, false
  106. }
  107. return e.load()
  108. }
  109. func (e *entry) load() (value interface{}, ok bool) {
  110. p := atomic.LoadPointer(&e.p)
  111. if p == nil || p == expunged {
  112. return nil, false
  113. }
  114. return *(*interface{})(p), true
  115. }
  116. // Store sets the value for a key.
  117. func (m *Map) Store(key, value interface{}) {
  118. read, _ := m.read.Load().(readOnly)
  119. if e, ok := read.m[key]; ok && e.tryStore(&value) {
  120. return
  121. }
  122. m.mu.Lock()
  123. read, _ = m.read.Load().(readOnly)
  124. if e, ok := read.m[key]; ok {
  125. if e.unexpungeLocked() {
  126. // The entry was previously expunged, which implies that there is a
  127. // non-nil dirty map and this entry is not in it.
  128. m.dirty[key] = e
  129. }
  130. e.storeLocked(&value)
  131. } else if e, ok := m.dirty[key]; ok {
  132. e.storeLocked(&value)
  133. } else {
  134. if !read.amended {
  135. // We're adding the first new key to the dirty map.
  136. // Make sure it is allocated and mark the read-only map as incomplete.
  137. m.dirtyLocked()
  138. m.read.Store(readOnly{m: read.m, amended: true})
  139. }
  140. m.dirty[key] = newEntry(value)
  141. }
  142. m.mu.Unlock()
  143. }
  144. // tryStore stores a value if the entry has not been expunged.
  145. //
  146. // If the entry is expunged, tryStore returns false and leaves the entry
  147. // unchanged.
  148. func (e *entry) tryStore(i *interface{}) bool {
  149. p := atomic.LoadPointer(&e.p)
  150. if p == expunged {
  151. return false
  152. }
  153. for {
  154. if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
  155. return true
  156. }
  157. p = atomic.LoadPointer(&e.p)
  158. if p == expunged {
  159. return false
  160. }
  161. }
  162. }
  163. // unexpungeLocked ensures that the entry is not marked as expunged.
  164. //
  165. // If the entry was previously expunged, it must be added to the dirty map
  166. // before m.mu is unlocked.
  167. func (e *entry) unexpungeLocked() (wasExpunged bool) {
  168. return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
  169. }
  170. // storeLocked unconditionally stores a value to the entry.
  171. //
  172. // The entry must be known not to be expunged.
  173. func (e *entry) storeLocked(i *interface{}) {
  174. atomic.StorePointer(&e.p, unsafe.Pointer(i))
  175. }
  176. // LoadOrStore returns the existing value for the key if present.
  177. // Otherwise, it stores and returns the given value.
  178. // The loaded result is true if the value was loaded, false if stored.
  179. func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
  180. // Avoid locking if it's a clean hit.
  181. read, _ := m.read.Load().(readOnly)
  182. if e, ok := read.m[key]; ok {
  183. actual, loaded, ok := e.tryLoadOrStore(value)
  184. if ok {
  185. return actual, loaded
  186. }
  187. }
  188. m.mu.Lock()
  189. read, _ = m.read.Load().(readOnly)
  190. if e, ok := read.m[key]; ok {
  191. if e.unexpungeLocked() {
  192. m.dirty[key] = e
  193. }
  194. actual, loaded, _ = e.tryLoadOrStore(value)
  195. } else if e, ok := m.dirty[key]; ok {
  196. actual, loaded, _ = e.tryLoadOrStore(value)
  197. m.missLocked()
  198. } else {
  199. if !read.amended {
  200. // We're adding the first new key to the dirty map.
  201. // Make sure it is allocated and mark the read-only map as incomplete.
  202. m.dirtyLocked()
  203. m.read.Store(readOnly{m: read.m, amended: true})
  204. }
  205. m.dirty[key] = newEntry(value)
  206. actual, loaded = value, false
  207. }
  208. m.mu.Unlock()
  209. return actual, loaded
  210. }
  211. // tryLoadOrStore atomically loads or stores a value if the entry is not
  212. // expunged.
  213. //
  214. // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
  215. // returns with ok==false.
  216. func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) {
  217. p := atomic.LoadPointer(&e.p)
  218. if p == expunged {
  219. return nil, false, false
  220. }
  221. if p != nil {
  222. return *(*interface{})(p), true, true
  223. }
  224. // Copy the interface after the first load to make this method more amenable
  225. // to escape analysis: if we hit the "load" path or the entry is expunged, we
  226. // shouldn't bother heap-allocating.
  227. ic := i
  228. for {
  229. if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
  230. return i, false, true
  231. }
  232. p = atomic.LoadPointer(&e.p)
  233. if p == expunged {
  234. return nil, false, false
  235. }
  236. if p != nil {
  237. return *(*interface{})(p), true, true
  238. }
  239. }
  240. }
  241. // Delete deletes the value for a key.
  242. func (m *Map) Delete(key interface{}) {
  243. read, _ := m.read.Load().(readOnly)
  244. e, ok := read.m[key]
  245. if !ok && read.amended {
  246. m.mu.Lock()
  247. read, _ = m.read.Load().(readOnly)
  248. e, ok = read.m[key]
  249. if !ok && read.amended {
  250. delete(m.dirty, key)
  251. }
  252. m.mu.Unlock()
  253. }
  254. if ok {
  255. e.delete()
  256. }
  257. }
  258. func (e *entry) delete() (hadValue bool) {
  259. for {
  260. p := atomic.LoadPointer(&e.p)
  261. if p == nil || p == expunged {
  262. return false
  263. }
  264. if atomic.CompareAndSwapPointer(&e.p, p, nil) {
  265. return true
  266. }
  267. }
  268. }
  269. // Range calls f sequentially for each key and value present in the map.
  270. // If f returns false, range stops the iteration.
  271. //
  272. // Range does not necessarily correspond to any consistent snapshot of the Map's
  273. // contents: no key will be visited more than once, but if the value for any key
  274. // is stored or deleted concurrently, Range may reflect any mapping for that key
  275. // from any point during the Range call.
  276. //
  277. // Range may be O(N) with the number of elements in the map even if f returns
  278. // false after a constant number of calls.
  279. func (m *Map) Range(f func(key, value interface{}) bool) {
  280. // We need to be able to iterate over all of the keys that were already
  281. // present at the start of the call to Range.
  282. // If read.amended is false, then read.m satisfies that property without
  283. // requiring us to hold m.mu for a long time.
  284. read, _ := m.read.Load().(readOnly)
  285. if read.amended {
  286. // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
  287. // (assuming the caller does not break out early), so a call to Range
  288. // amortizes an entire copy of the map: we can promote the dirty copy
  289. // immediately!
  290. m.mu.Lock()
  291. read, _ = m.read.Load().(readOnly)
  292. if read.amended {
  293. read = readOnly{m: m.dirty}
  294. m.read.Store(read)
  295. m.dirty = nil
  296. m.misses = 0
  297. }
  298. m.mu.Unlock()
  299. }
  300. for k, e := range read.m {
  301. v, ok := e.load()
  302. if !ok {
  303. continue
  304. }
  305. if !f(k, v) {
  306. break
  307. }
  308. }
  309. }
  310. func (m *Map) missLocked() {
  311. m.misses++
  312. if m.misses < len(m.dirty) {
  313. return
  314. }
  315. m.read.Store(readOnly{m: m.dirty})
  316. m.dirty = nil
  317. m.misses = 0
  318. }
  319. func (m *Map) dirtyLocked() {
  320. if m.dirty != nil {
  321. return
  322. }
  323. read, _ := m.read.Load().(readOnly)
  324. m.dirty = make(map[interface{}]*entry, len(read.m))
  325. for k, e := range read.m {
  326. if !e.tryExpungeLocked() {
  327. m.dirty[k] = e
  328. }
  329. }
  330. }
  331. func (e *entry) tryExpungeLocked() (isExpunged bool) {
  332. p := atomic.LoadPointer(&e.p)
  333. for p == nil {
  334. if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
  335. return true
  336. }
  337. p = atomic.LoadPointer(&e.p)
  338. }
  339. return p == expunged
  340. }