generic_sync.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. // Copyright 2016 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package generic_sync
  5. import (
  6. "sync"
  7. "sync/atomic"
  8. "unsafe"
  9. )
  10. // MapOf is like a Go map[interface{}]interface{} but is safe for concurrent use
  11. // by multiple goroutines without additional locking or coordination.
  12. // Loads, stores, and deletes run in amortized constant time.
  13. //
  14. // The MapOf type is specialized. Most code should use a plain Go map instead,
  15. // with separate locking or coordination, for better type safety and to make it
  16. // easier to maintain other invariants along with the map content.
  17. //
  18. // The MapOf type is optimized for two common use cases: (1) when the entry for a given
  19. // key is only ever written once but read many times, as in caches that only grow,
  20. // or (2) when multiple goroutines read, write, and overwrite entries for disjoint
  21. // sets of keys. In these two cases, use of a MapOf may significantly reduce lock
  22. // contention compared to a Go map paired with a separate Mutex or RWMutex.
  23. //
  24. // The zero MapOf is empty and ready for use. A MapOf must not be copied after first use.
  25. type MapOf[K comparable, V any] struct {
  26. mu sync.Mutex
  27. // read contains the portion of the map's contents that are safe for
  28. // concurrent access (with or without mu held).
  29. //
  30. // The read field itself is always safe to load, but must only be stored with
  31. // mu held.
  32. //
  33. // Entries stored in read may be updated concurrently without mu, but updating
  34. // a previously-expunged entry requires that the entry be copied to the dirty
  35. // map and unexpunged with mu held.
  36. read atomic.Value // readOnly
  37. // dirty contains the portion of the map's contents that require mu to be
  38. // held. To ensure that the dirty map can be promoted to the read map quickly,
  39. // it also includes all of the non-expunged entries in the read map.
  40. //
  41. // Expunged entries are not stored in the dirty map. An expunged entry in the
  42. // clean map must be unexpunged and added to the dirty map before a new value
  43. // can be stored to it.
  44. //
  45. // If the dirty map is nil, the next write to the map will initialize it by
  46. // making a shallow copy of the clean map, omitting stale entries.
  47. dirty map[K]*entry[V]
  48. // misses counts the number of loads since the read map was last updated that
  49. // needed to lock mu to determine whether the key was present.
  50. //
  51. // Once enough misses have occurred to cover the cost of copying the dirty
  52. // map, the dirty map will be promoted to the read map (in the unamended
  53. // state) and the next store to the map will make a new dirty copy.
  54. misses int
  55. }
  56. // readOnly is an immutable struct stored atomically in the MapOf.read field.
  57. type readOnly[K comparable, V any] struct {
  58. m map[K]*entry[V]
  59. amended bool // true if the dirty map contains some key not in m.
  60. }
  61. // expunged is an arbitrary pointer that marks entries which have been deleted
  62. // from the dirty map.
  63. var expunged = unsafe.Pointer(new(interface{}))
  64. // An entry is a slot in the map corresponding to a particular key.
  65. type entry[V any] struct {
  66. // p points to the interface{} value stored for the entry.
  67. //
  68. // If p == nil, the entry has been deleted and m.dirty == nil.
  69. //
  70. // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
  71. // is missing from m.dirty.
  72. //
  73. // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
  74. // != nil, in m.dirty[key].
  75. //
  76. // An entry can be deleted by atomic replacement with nil: when m.dirty is
  77. // next created, it will atomically replace nil with expunged and leave
  78. // m.dirty[key] unset.
  79. //
  80. // An entry's associated value can be updated by atomic replacement, provided
  81. // p != expunged. If p == expunged, an entry's associated value can be updated
  82. // only after first setting m.dirty[key] = e so that lookups using the dirty
  83. // map find the entry.
  84. p unsafe.Pointer // *interface{}
  85. }
  86. func newEntry[V any](i V) *entry[V] {
  87. return &entry[V]{p: unsafe.Pointer(&i)}
  88. }
  89. // Load returns the value stored in the map for a key, or nil if no
  90. // value is present.
  91. // The ok result indicates whether value was found in the map.
  92. func (m *MapOf[K, V]) Load(key K) (value V, ok bool) {
  93. read, _ := m.read.Load().(readOnly[K, V])
  94. e, ok := read.m[key]
  95. if !ok && read.amended {
  96. m.mu.Lock()
  97. // Avoid reporting a spurious miss if m.dirty got promoted while we were
  98. // blocked on m.mu. (If further loads of the same key will not miss, it's
  99. // not worth copying the dirty map for this key.)
  100. read, _ = m.read.Load().(readOnly[K, V])
  101. e, ok = read.m[key]
  102. if !ok && read.amended {
  103. e, ok = m.dirty[key]
  104. // Regardless of whether the entry was present, record a miss: this key
  105. // will take the slow path until the dirty map is promoted to the read
  106. // map.
  107. m.missLocked()
  108. }
  109. m.mu.Unlock()
  110. }
  111. if !ok {
  112. return value, false
  113. }
  114. return e.load()
  115. }
  116. func (m *MapOf[K, V]) Has(key K) bool {
  117. _, ok := m.Load(key)
  118. return ok
  119. }
  120. func (e *entry[V]) load() (value V, ok bool) {
  121. p := atomic.LoadPointer(&e.p)
  122. if p == nil || p == expunged {
  123. return value, false
  124. }
  125. return *(*V)(p), true
  126. }
  127. // Store sets the value for a key.
  128. func (m *MapOf[K, V]) Store(key K, value V) {
  129. read, _ := m.read.Load().(readOnly[K, V])
  130. if e, ok := read.m[key]; ok && e.tryStore(&value) {
  131. return
  132. }
  133. m.mu.Lock()
  134. read, _ = m.read.Load().(readOnly[K, V])
  135. if e, ok := read.m[key]; ok {
  136. if e.unexpungeLocked() {
  137. // The entry was previously expunged, which implies that there is a
  138. // non-nil dirty map and this entry is not in it.
  139. m.dirty[key] = e
  140. }
  141. e.storeLocked(&value)
  142. } else if e, ok := m.dirty[key]; ok {
  143. e.storeLocked(&value)
  144. } else {
  145. if !read.amended {
  146. // We're adding the first new key to the dirty map.
  147. // Make sure it is allocated and mark the read-only map as incomplete.
  148. m.dirtyLocked()
  149. m.read.Store(readOnly[K, V]{m: read.m, amended: true})
  150. }
  151. m.dirty[key] = newEntry(value)
  152. }
  153. m.mu.Unlock()
  154. }
  155. // tryStore stores a value if the entry has not been expunged.
  156. //
  157. // If the entry is expunged, tryStore returns false and leaves the entry
  158. // unchanged.
  159. func (e *entry[V]) tryStore(i *V) bool {
  160. for {
  161. p := atomic.LoadPointer(&e.p)
  162. if p == expunged {
  163. return false
  164. }
  165. if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
  166. return true
  167. }
  168. }
  169. }
  170. // unexpungeLocked ensures that the entry is not marked as expunged.
  171. //
  172. // If the entry was previously expunged, it must be added to the dirty map
  173. // before m.mu is unlocked.
  174. func (e *entry[V]) unexpungeLocked() (wasExpunged bool) {
  175. return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
  176. }
  177. // storeLocked unconditionally stores a value to the entry.
  178. //
  179. // The entry must be known not to be expunged.
  180. func (e *entry[V]) storeLocked(i *V) {
  181. atomic.StorePointer(&e.p, unsafe.Pointer(i))
  182. }
  183. // LoadOrStore returns the existing value for the key if present.
  184. // Otherwise, it stores and returns the given value.
  185. // The loaded result is true if the value was loaded, false if stored.
  186. func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
  187. // Avoid locking if it's a clean hit.
  188. read, _ := m.read.Load().(readOnly[K, V])
  189. if e, ok := read.m[key]; ok {
  190. actual, loaded, ok := e.tryLoadOrStore(value)
  191. if ok {
  192. return actual, loaded
  193. }
  194. }
  195. m.mu.Lock()
  196. read, _ = m.read.Load().(readOnly[K, V])
  197. if e, ok := read.m[key]; ok {
  198. if e.unexpungeLocked() {
  199. m.dirty[key] = e
  200. }
  201. actual, loaded, _ = e.tryLoadOrStore(value)
  202. } else if e, ok := m.dirty[key]; ok {
  203. actual, loaded, _ = e.tryLoadOrStore(value)
  204. m.missLocked()
  205. } else {
  206. if !read.amended {
  207. // We're adding the first new key to the dirty map.
  208. // Make sure it is allocated and mark the read-only map as incomplete.
  209. m.dirtyLocked()
  210. m.read.Store(readOnly[K, V]{m: read.m, amended: true})
  211. }
  212. m.dirty[key] = newEntry(value)
  213. actual, loaded = value, false
  214. }
  215. m.mu.Unlock()
  216. return actual, loaded
  217. }
  218. // tryLoadOrStore atomically loads or stores a value if the entry is not
  219. // expunged.
  220. //
  221. // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
  222. // returns with ok==false.
  223. func (e *entry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) {
  224. p := atomic.LoadPointer(&e.p)
  225. if p == expunged {
  226. return actual, false, false
  227. }
  228. if p != nil {
  229. return *(*V)(p), true, true
  230. }
  231. // Copy the interface after the first load to make this method more amenable
  232. // to escape analysis: if we hit the "load" path or the entry is expunged, we
  233. // shouldn'V bother heap-allocating.
  234. ic := i
  235. for {
  236. if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
  237. return i, false, true
  238. }
  239. p = atomic.LoadPointer(&e.p)
  240. if p == expunged {
  241. return actual, false, false
  242. }
  243. if p != nil {
  244. return *(*V)(p), true, true
  245. }
  246. }
  247. }
  248. // Delete deletes the value for a key.
  249. func (m *MapOf[K, V]) Delete(key K) {
  250. read, _ := m.read.Load().(readOnly[K, V])
  251. e, ok := read.m[key]
  252. if !ok && read.amended {
  253. m.mu.Lock()
  254. read, _ = m.read.Load().(readOnly[K, V])
  255. e, ok = read.m[key]
  256. if !ok && read.amended {
  257. delete(m.dirty, key)
  258. }
  259. m.mu.Unlock()
  260. }
  261. if ok {
  262. e.delete()
  263. }
  264. }
  265. func (e *entry[V]) delete() (hadValue bool) {
  266. for {
  267. p := atomic.LoadPointer(&e.p)
  268. if p == nil || p == expunged {
  269. return false
  270. }
  271. if atomic.CompareAndSwapPointer(&e.p, p, nil) {
  272. return true
  273. }
  274. }
  275. }
  276. // Range calls f sequentially for each key and value present in the map.
  277. // If f returns false, range stops the iteration.
  278. //
  279. // Range does not necessarily correspond to any consistent snapshot of the MapOf's
  280. // contents: no key will be visited more than once, but if the value for any key
  281. // is stored or deleted concurrently, Range may reflect any mapping for that key
  282. // from any point during the Range call.
  283. //
  284. // Range may be O(N) with the number of elements in the map even if f returns
  285. // false after a constant number of calls.
  286. func (m *MapOf[K, V]) Range(f func(key K, value V) bool) {
  287. // We need to be able to iterate over all of the keys that were already
  288. // present at the start of the call to Range.
  289. // If read.amended is false, then read.m satisfies that property without
  290. // requiring us to hold m.mu for a long time.
  291. read, _ := m.read.Load().(readOnly[K, V])
  292. if read.amended {
  293. // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
  294. // (assuming the caller does not break out early), so a call to Range
  295. // amortizes an entire copy of the map: we can promote the dirty copy
  296. // immediately!
  297. m.mu.Lock()
  298. read, _ = m.read.Load().(readOnly[K, V])
  299. if read.amended {
  300. read = readOnly[K, V]{m: m.dirty}
  301. m.read.Store(read)
  302. m.dirty = nil
  303. m.misses = 0
  304. }
  305. m.mu.Unlock()
  306. }
  307. for k, e := range read.m {
  308. v, ok := e.load()
  309. if !ok {
  310. continue
  311. }
  312. if !f(k, v) {
  313. break
  314. }
  315. }
  316. }
  317. // Values returns a slice of the values in the map.
  318. func (m *MapOf[K, V]) Values() []V {
  319. var values []V
  320. m.Range(func(key K, value V) bool {
  321. values = append(values, value)
  322. return true
  323. })
  324. return values
  325. }
  326. func (m *MapOf[K, V]) Count() int {
  327. return len(m.dirty)
  328. }
  329. func (m *MapOf[K, V]) Empty() bool {
  330. return m.Count() == 0
  331. }
  332. func (m *MapOf[K, V]) ToMap() map[K]V {
  333. ans := make(map[K]V)
  334. m.Range(func(key K, value V) bool {
  335. ans[key] = value
  336. return true
  337. })
  338. return ans
  339. }
  340. func (m *MapOf[K, V]) Clear() {
  341. m.Range(func(key K, value V) bool {
  342. m.Delete(key)
  343. return true
  344. })
  345. }
  346. func (m *MapOf[K, V]) missLocked() {
  347. m.misses++
  348. if m.misses < len(m.dirty) {
  349. return
  350. }
  351. m.read.Store(readOnly[K, V]{m: m.dirty})
  352. m.dirty = nil
  353. m.misses = 0
  354. }
  355. func (m *MapOf[K, V]) dirtyLocked() {
  356. if m.dirty != nil {
  357. return
  358. }
  359. read, _ := m.read.Load().(readOnly[K, V])
  360. m.dirty = make(map[K]*entry[V], len(read.m))
  361. for k, e := range read.m {
  362. if !e.tryExpungeLocked() {
  363. m.dirty[k] = e
  364. }
  365. }
  366. }
  367. func (e *entry[V]) tryExpungeLocked() (isExpunged bool) {
  368. p := atomic.LoadPointer(&e.p)
  369. for p == nil {
  370. if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
  371. return true
  372. }
  373. p = atomic.LoadPointer(&e.p)
  374. }
  375. return p == expunged
  376. }