AtomicsObject.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. /*
  2. * Copyright (c) 2021, Tim Flynn <trflynn89@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. // This file explicitly implements support for JS Atomics API, which can
  7. // involve slow (non-lock-free) atomic ops.
  8. #include <AK/Platform.h>
  9. #ifdef AK_COMPILER_CLANG
  10. # pragma clang diagnostic ignored "-Watomic-alignment"
  11. #endif
  12. #include <AK/Atomic.h>
  13. #include <AK/ByteBuffer.h>
  14. #include <AK/Endian.h>
  15. #include <AK/TypeCasts.h>
  16. #include <LibJS/Runtime/Agent.h>
  17. #include <LibJS/Runtime/AtomicsObject.h>
  18. #include <LibJS/Runtime/GlobalObject.h>
  19. #include <LibJS/Runtime/TypedArray.h>
  20. #include <LibJS/Runtime/Value.h>
  21. #include <LibJS/Runtime/ValueInlines.h>
  22. namespace JS {
  23. JS_DEFINE_ALLOCATOR(AtomicsObject);
  24. // 25.4.2.1 ValidateIntegerTypedArray ( typedArray [ , waitable ] ), https://tc39.es/ecma262/#sec-validateintegertypedarray
  25. static ThrowCompletionOr<ArrayBuffer*> validate_integer_typed_array(VM& vm, TypedArrayBase& typed_array, bool waitable = false)
  26. {
  27. // 1. If waitable is not present, set waitable to false.
  28. // 2. Perform ? ValidateTypedArray(typedArray).
  29. TRY(validate_typed_array(vm, typed_array));
  30. // 3. Let buffer be typedArray.[[ViewedArrayBuffer]].
  31. auto* buffer = typed_array.viewed_array_buffer();
  32. auto const& type_name = typed_array.element_name();
  33. // 4. If waitable is true, then
  34. if (waitable) {
  35. // a. If typedArray.[[TypedArrayName]] is not "Int32Array" or "BigInt64Array", throw a TypeError exception.
  36. if ((type_name != vm.names.Int32Array.as_string()) && (type_name != vm.names.BigInt64Array.as_string()))
  37. return vm.throw_completion<TypeError>(ErrorType::TypedArrayTypeIsNot, type_name, "Int32 or BigInt64"sv);
  38. }
  39. // 5. Else,
  40. else {
  41. // a. Let type be TypedArrayElementType(typedArray).
  42. // b. If IsUnclampedIntegerElementType(type) is false and IsBigIntElementType(type) is false, throw a TypeError exception.
  43. if (!typed_array.is_unclamped_integer_element_type() && !typed_array.is_bigint_element_type())
  44. return vm.throw_completion<TypeError>(ErrorType::TypedArrayTypeIsNot, type_name, "an unclamped integer or BigInt"sv);
  45. }
  46. // 6. Return buffer.
  47. return buffer;
  48. }
  49. // 25.4.2.2 ValidateAtomicAccess ( typedArray, requestIndex ), https://tc39.es/ecma262/#sec-validateatomicaccess
  50. static ThrowCompletionOr<size_t> validate_atomic_access(VM& vm, TypedArrayBase& typed_array, Value request_index)
  51. {
  52. // 1. Let length be typedArray.[[ArrayLength]].
  53. auto length = typed_array.array_length();
  54. // 2. Let accessIndex be ? ToIndex(requestIndex).
  55. auto access_index = TRY(request_index.to_index(vm));
  56. // 3. Assert: accessIndex ≥ 0.
  57. // 4. If accessIndex ≥ length, throw a RangeError exception.
  58. if (access_index >= length)
  59. return vm.throw_completion<RangeError>(ErrorType::IndexOutOfRange, access_index, typed_array.array_length());
  60. // 5. Let elementSize be TypedArrayElementSize(typedArray).
  61. auto element_size = typed_array.element_size();
  62. // 6. Let offset be typedArray.[[ByteOffset]].
  63. auto offset = typed_array.byte_offset();
  64. // 7. Return (accessIndex × elementSize) + offset.
  65. return (access_index * element_size) + offset;
  66. }
  67. // 25.4.2.11 AtomicReadModifyWrite ( typedArray, index, value, op ), https://tc39.es/ecma262/#sec-atomicreadmodifywrite
  68. static ThrowCompletionOr<Value> atomic_read_modify_write(VM& vm, TypedArrayBase& typed_array, Value index, Value value, ReadWriteModifyFunction operation)
  69. {
  70. // 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
  71. auto* buffer = TRY(validate_integer_typed_array(vm, typed_array));
  72. // 2. Let indexedPosition be ? ValidateAtomicAccess(typedArray, index).
  73. auto indexed_position = TRY(validate_atomic_access(vm, typed_array, index));
  74. Value value_to_set;
  75. // 3. If typedArray.[[ContentType]] is BigInt, let v be ? ToBigInt(value).
  76. if (typed_array.content_type() == TypedArrayBase::ContentType::BigInt)
  77. value_to_set = TRY(value.to_bigint(vm));
  78. // 4. Otherwise, let v be 𝔽(? ToIntegerOrInfinity(value)).
  79. else
  80. value_to_set = Value(TRY(value.to_integer_or_infinity(vm)));
  81. // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
  82. if (buffer->is_detached())
  83. return vm.throw_completion<TypeError>(ErrorType::DetachedArrayBuffer);
  84. // 6. NOTE: The above check is not redundant with the check in ValidateIntegerTypedArray because the call to ToBigInt or ToIntegerOrInfinity on the preceding lines can have arbitrary side effects, which could cause the buffer to become detached.
  85. // 7. Let elementType be TypedArrayElementType(typedArray).
  86. // 8. Return GetModifySetValueInBuffer(buffer, indexedPosition, elementType, v, op).
  87. return typed_array.get_modify_set_value_in_buffer(indexed_position, value_to_set, move(operation));
  88. }
  89. enum class WaitMode {
  90. Sync,
  91. Async,
  92. };
  93. // 25.4.3.14 DoWait ( mode, typedArray, index, value, timeout ), https://tc39.es/ecma262/#sec-dowait
  94. static ThrowCompletionOr<Value> do_wait(VM& vm, WaitMode mode, TypedArrayBase& typed_array, Value index_value, Value expected_value, Value timeout_value)
  95. {
  96. // 1. Let iieoRecord be ? ValidateIntegerTypedArray(typedArray, true).
  97. // 2. Let buffer be iieoRecord.[[Object]].[[ViewedArrayBuffer]].
  98. // FIXME: An IIEO record is a new structure from the resizable array buffer proposal. Use it when the proposal is implemented.
  99. auto* buffer = TRY(validate_integer_typed_array(vm, typed_array, true));
  100. // 3. If IsSharedArrayBuffer(buffer) is false, throw a TypeError exception.
  101. if (!buffer->is_shared_array_buffer())
  102. return vm.throw_completion<TypeError>(ErrorType::NotASharedArrayBuffer);
  103. // 4. Let i be ? ValidateAtomicAccess(iieoRecord, index).
  104. auto index = TRY(validate_atomic_access(vm, typed_array, index_value));
  105. // 5. Let arrayTypeName be typedArray.[[TypedArrayName]].
  106. auto const& array_type_name = typed_array.element_name();
  107. // 6. If arrayTypeName is "BigInt64Array", let v be ? ToBigInt64(value).
  108. i64 value = 0;
  109. if (array_type_name == vm.names.BigInt64Array.as_string())
  110. value = TRY(expected_value.to_bigint_int64(vm));
  111. // 7. Else, let v be ? ToInt32(value).
  112. else
  113. value = TRY(expected_value.to_i32(vm));
  114. // 8. Let q be ? ToNumber(timeout).
  115. auto timeout_number = TRY(timeout_value.to_number(vm));
  116. // 9. If q is either NaN or +∞𝔽, let t be +∞; else if q is -∞𝔽, let t be 0; else let t be max(ℝ(q), 0).
  117. double timeout = 0;
  118. if (timeout_number.is_nan() || timeout_number.is_positive_infinity())
  119. timeout = js_infinity().as_double();
  120. else if (timeout_number.is_negative_infinity())
  121. timeout = 0.0;
  122. else
  123. timeout = max(timeout_number.as_double(), 0.0);
  124. // 10. If mode is sync and AgentCanSuspend() is false, throw a TypeError exception.
  125. if (mode == WaitMode::Sync && !agent_can_suspend())
  126. return vm.throw_completion<TypeError>(ErrorType::AgentCannotSuspend);
  127. // FIXME: Implement the remaining steps when we support SharedArrayBuffer.
  128. (void)index;
  129. (void)value;
  130. (void)timeout;
  131. return vm.throw_completion<InternalError>(ErrorType::NotImplemented, "SharedArrayBuffer"sv);
  132. }
  133. template<typename T, typename AtomicFunction>
  134. static ThrowCompletionOr<Value> perform_atomic_operation(VM& vm, TypedArrayBase& typed_array, AtomicFunction&& operation)
  135. {
  136. auto index = vm.argument(1);
  137. auto value = vm.argument(2);
  138. auto operation_wrapper = [&, operation = forward<AtomicFunction>(operation)](ByteBuffer x_bytes, ByteBuffer y_bytes) -> ByteBuffer {
  139. if constexpr (IsFloatingPoint<T>) {
  140. (void)operation;
  141. VERIFY_NOT_REACHED();
  142. } else {
  143. using U = Conditional<IsSame<ClampedU8, T>, u8, T>;
  144. auto* x = reinterpret_cast<U*>(x_bytes.data());
  145. auto* y = reinterpret_cast<U*>(y_bytes.data());
  146. operation(x, *y);
  147. return x_bytes;
  148. }
  149. };
  150. return atomic_read_modify_write(vm, typed_array, index, value, move(operation_wrapper));
  151. }
  152. AtomicsObject::AtomicsObject(Realm& realm)
  153. : Object(ConstructWithPrototypeTag::Tag, realm.intrinsics().object_prototype())
  154. {
  155. }
  156. void AtomicsObject::initialize(Realm& realm)
  157. {
  158. Base::initialize(realm);
  159. auto& vm = this->vm();
  160. u8 attr = Attribute::Writable | Attribute::Configurable;
  161. define_native_function(realm, vm.names.add, add, 3, attr);
  162. define_native_function(realm, vm.names.and_, and_, 3, attr);
  163. define_native_function(realm, vm.names.compareExchange, compare_exchange, 4, attr);
  164. define_native_function(realm, vm.names.exchange, exchange, 3, attr);
  165. define_native_function(realm, vm.names.isLockFree, is_lock_free, 1, attr);
  166. define_native_function(realm, vm.names.load, load, 2, attr);
  167. define_native_function(realm, vm.names.or_, or_, 3, attr);
  168. define_native_function(realm, vm.names.store, store, 3, attr);
  169. define_native_function(realm, vm.names.sub, sub, 3, attr);
  170. define_native_function(realm, vm.names.wait, wait, 4, attr);
  171. define_native_function(realm, vm.names.waitAsync, wait_async, 4, attr);
  172. define_native_function(realm, vm.names.notify, notify, 3, attr);
  173. define_native_function(realm, vm.names.xor_, xor_, 3, attr);
  174. // 25.4.15 Atomics [ @@toStringTag ], https://tc39.es/ecma262/#sec-atomics-@@tostringtag
  175. define_direct_property(vm.well_known_symbol_to_string_tag(), PrimitiveString::create(vm, "Atomics"_string), Attribute::Configurable);
  176. }
  177. // 25.4.3 Atomics.add ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.add
  178. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::add)
  179. {
  180. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  181. auto atomic_add = [](auto* storage, auto value) { return AK::atomic_fetch_add(storage, value); };
  182. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  183. if (is<ClassName>(typed_array)) \
  184. return TRY(perform_atomic_operation<Type>(vm, *typed_array, move(atomic_add)));
  185. JS_ENUMERATE_TYPED_ARRAYS
  186. #undef __JS_ENUMERATE
  187. VERIFY_NOT_REACHED();
  188. }
  189. // 25.4.4 Atomics.and ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.and
  190. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::and_)
  191. {
  192. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  193. auto atomic_and = [](auto* storage, auto value) { return AK::atomic_fetch_and(storage, value); };
  194. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  195. if (is<ClassName>(typed_array)) \
  196. return TRY(perform_atomic_operation<Type>(vm, *typed_array, move(atomic_and)));
  197. JS_ENUMERATE_TYPED_ARRAYS
  198. #undef __JS_ENUMERATE
  199. VERIFY_NOT_REACHED();
  200. }
  201. // Implementation of 25.4.5 Atomics.compareExchange ( typedArray, index, expectedValue, replacementValue ), https://tc39.es/ecma262/#sec-atomics.compareexchange
  202. template<typename T>
  203. static ThrowCompletionOr<Value> atomic_compare_exchange_impl(VM& vm, TypedArrayBase& typed_array)
  204. {
  205. // 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
  206. auto* buffer = TRY(validate_integer_typed_array(vm, typed_array));
  207. // 2. Let block be buffer.[[ArrayBufferData]].
  208. auto& block = buffer->buffer();
  209. // 3. Let indexedPosition be ? ValidateAtomicAccess(typedArray, index).
  210. auto indexed_position = TRY(validate_atomic_access(vm, typed_array, vm.argument(1)));
  211. Value expected;
  212. Value replacement;
  213. // 4. If typedArray.[[ContentType]] is BigInt, then
  214. if (typed_array.content_type() == TypedArrayBase::ContentType::BigInt) {
  215. // a. Let expected be ? ToBigInt(expectedValue).
  216. expected = TRY(vm.argument(2).to_bigint(vm));
  217. // b. Let replacement be ? ToBigInt(replacementValue).
  218. replacement = TRY(vm.argument(3).to_bigint(vm));
  219. }
  220. // 5. Else,
  221. else {
  222. // a. Let expected be 𝔽(? ToIntegerOrInfinity(expectedValue)).
  223. expected = Value(TRY(vm.argument(2).to_integer_or_infinity(vm)));
  224. // b. Let replacement be 𝔽(? ToIntegerOrInfinity(replacementValue)).
  225. replacement = Value(TRY(vm.argument(3).to_integer_or_infinity(vm)));
  226. }
  227. // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
  228. if (buffer->is_detached())
  229. return vm.template throw_completion<TypeError>(ErrorType::DetachedArrayBuffer);
  230. // 7. NOTE: The above check is not redundant with the check in ValidateIntegerTypedArray because the call to ToBigInt or ToIntegerOrInfinity on the preceding lines can have arbitrary side effects, which could cause the buffer to become detached.
  231. // 8. Let elementType be TypedArrayElementType(typedArray).
  232. // 9. Let elementSize be TypedArrayElementSize(typedArray).
  233. // 10. Let isLittleEndian be the value of the [[LittleEndian]] field of the surrounding agent's Agent Record.
  234. constexpr bool is_little_endian = AK::HostIsLittleEndian;
  235. // 11. Let expectedBytes be NumericToRawBytes(elementType, expected, isLittleEndian).
  236. auto expected_bytes = MUST(ByteBuffer::create_uninitialized(sizeof(T)));
  237. numeric_to_raw_bytes<T>(vm, expected, is_little_endian, expected_bytes);
  238. // 12. Let replacementBytes be NumericToRawBytes(elementType, replacement, isLittleEndian).
  239. auto replacement_bytes = MUST(ByteBuffer::create_uninitialized(sizeof(T)));
  240. numeric_to_raw_bytes<T>(vm, replacement, is_little_endian, replacement_bytes);
  241. // FIXME: Implement SharedArrayBuffer case.
  242. // 13. If IsSharedArrayBuffer(buffer) is true, then
  243. // a-i.
  244. // 14. Else,
  245. // a. Let rawBytesRead be a List of length elementSize whose elements are the sequence of elementSize bytes starting with block[indexedPosition].
  246. // FIXME: Propagate errors.
  247. auto raw_bytes_read = MUST(block.slice(indexed_position, sizeof(T)));
  248. // b. If ByteListEqual(rawBytesRead, expectedBytes) is true, then
  249. // i. Store the individual bytes of replacementBytes into block, starting at block[indexedPosition].
  250. if constexpr (IsFloatingPoint<T>) {
  251. VERIFY_NOT_REACHED();
  252. } else {
  253. using U = Conditional<IsSame<ClampedU8, T>, u8, T>;
  254. auto* v = reinterpret_cast<U*>(block.span().slice(indexed_position).data());
  255. auto* e = reinterpret_cast<U*>(expected_bytes.data());
  256. auto* r = reinterpret_cast<U*>(replacement_bytes.data());
  257. (void)AK::atomic_compare_exchange_strong(v, *e, *r);
  258. }
  259. // 15. Return RawBytesToNumeric(elementType, rawBytesRead, isLittleEndian).
  260. return raw_bytes_to_numeric<T>(vm, raw_bytes_read, is_little_endian);
  261. }
  262. // 25.4.5 Atomics.compareExchange ( typedArray, index, expectedValue, replacementValue ), https://tc39.es/ecma262/#sec-atomics.compareexchange
  263. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::compare_exchange)
  264. {
  265. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  266. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  267. if (is<ClassName>(typed_array)) \
  268. return TRY(atomic_compare_exchange_impl<Type>(vm, *typed_array));
  269. JS_ENUMERATE_TYPED_ARRAYS
  270. #undef __JS_ENUMERATE
  271. VERIFY_NOT_REACHED();
  272. }
  273. // 25.4.6 Atomics.exchange ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.exchange
  274. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::exchange)
  275. {
  276. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  277. auto atomic_exchange = [](auto* storage, auto value) { return AK::atomic_exchange(storage, value); };
  278. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  279. if (is<ClassName>(typed_array)) \
  280. return TRY(perform_atomic_operation<Type>(vm, *typed_array, move(atomic_exchange)));
  281. JS_ENUMERATE_TYPED_ARRAYS
  282. #undef __JS_ENUMERATE
  283. VERIFY_NOT_REACHED();
  284. }
  285. // 25.4.7 Atomics.isLockFree ( size ), https://tc39.es/ecma262/#sec-atomics.islockfree
  286. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::is_lock_free)
  287. {
  288. auto size = TRY(vm.argument(0).to_integer_or_infinity(vm));
  289. if (size == 1)
  290. return Value(AK::atomic_is_lock_free<u8>());
  291. if (size == 2)
  292. return Value(AK::atomic_is_lock_free<u16>());
  293. if (size == 4)
  294. return Value(true);
  295. if (size == 8)
  296. return Value(AK::atomic_is_lock_free<u64>());
  297. return Value(false);
  298. }
  299. // 25.4.8 Atomics.load ( typedArray, index ), https://tc39.es/ecma262/#sec-atomics.load
  300. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::load)
  301. {
  302. // 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
  303. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  304. TRY(validate_integer_typed_array(vm, *typed_array));
  305. // 2. Let indexedPosition be ? ValidateAtomicAccess(typedArray, index).
  306. auto indexed_position = TRY(validate_atomic_access(vm, *typed_array, vm.argument(1)));
  307. // 3. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
  308. if (typed_array->viewed_array_buffer()->is_detached())
  309. return vm.throw_completion<TypeError>(ErrorType::DetachedArrayBuffer);
  310. // 4. NOTE: The above check is not redundant with the check in ValidateIntegerTypedArray because the call to ValidateAtomicAccess on the preceding line can have arbitrary side effects, which could cause the buffer to become detached.
  311. // 5. Let elementType be TypedArrayElementType(typedArray).
  312. // 6. Return GetValueFromBuffer(buffer, indexedPosition, elementType, true, SeqCst).
  313. return typed_array->get_value_from_buffer(indexed_position, ArrayBuffer::Order::SeqCst, true);
  314. }
  315. // 25.4.9 Atomics.or ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.or
  316. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::or_)
  317. {
  318. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  319. auto atomic_or = [](auto* storage, auto value) { return AK::atomic_fetch_or(storage, value); };
  320. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  321. if (is<ClassName>(typed_array)) \
  322. return TRY(perform_atomic_operation<Type>(vm, *typed_array, move(atomic_or)));
  323. JS_ENUMERATE_TYPED_ARRAYS
  324. #undef __JS_ENUMERATE
  325. VERIFY_NOT_REACHED();
  326. }
  327. // 25.4.10 Atomics.store ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.store
  328. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::store)
  329. {
  330. // 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
  331. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  332. TRY(validate_integer_typed_array(vm, *typed_array));
  333. // 2. Let indexedPosition be ? ValidateAtomicAccess(typedArray, index).
  334. auto indexed_position = TRY(validate_atomic_access(vm, *typed_array, vm.argument(1)));
  335. auto value = vm.argument(2);
  336. Value value_to_set;
  337. // 3. If typedArray.[[ContentType]] is BigInt, let v be ? ToBigInt(value).
  338. if (typed_array->content_type() == TypedArrayBase::ContentType::BigInt)
  339. value_to_set = TRY(value.to_bigint(vm));
  340. // 4. Otherwise, let v be 𝔽(? ToIntegerOrInfinity(value)).
  341. else
  342. value_to_set = Value(TRY(value.to_integer_or_infinity(vm)));
  343. // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
  344. if (typed_array->viewed_array_buffer()->is_detached())
  345. return vm.throw_completion<TypeError>(ErrorType::DetachedArrayBuffer);
  346. // 6. NOTE: The above check is not redundant with the check in ValidateIntegerTypedArray because the call to ToBigInt or ToIntegerOrInfinity on the preceding lines can have arbitrary side effects, which could cause the buffer to become detached.
  347. // 7. Let elementType be TypedArrayElementType(typedArray).
  348. // 8. Perform SetValueInBuffer(buffer, indexedPosition, elementType, v, true, SeqCst).
  349. typed_array->set_value_in_buffer(indexed_position, value_to_set, ArrayBuffer::Order::SeqCst, true);
  350. // 9. Return v.
  351. return value_to_set;
  352. }
  353. // 25.4.11 Atomics.sub ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.sub
  354. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::sub)
  355. {
  356. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  357. auto atomic_sub = [](auto* storage, auto value) { return AK::atomic_fetch_sub(storage, value); };
  358. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  359. if (is<ClassName>(typed_array)) \
  360. return TRY(perform_atomic_operation<Type>(vm, *typed_array, move(atomic_sub)));
  361. JS_ENUMERATE_TYPED_ARRAYS
  362. #undef __JS_ENUMERATE
  363. VERIFY_NOT_REACHED();
  364. }
  365. // 25.4.13 Atomics.wait ( typedArray, index, value, timeout ), https://tc39.es/ecma262/#sec-atomics.wait
  366. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::wait)
  367. {
  368. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  369. auto index = vm.argument(1);
  370. auto value = vm.argument(2);
  371. auto timeout = vm.argument(3);
  372. // 1. Return ? DoWait(sync, typedArray, index, value, timeout).
  373. return TRY(do_wait(vm, WaitMode::Sync, *typed_array, index, value, timeout));
  374. }
  375. // 25.4.14 Atomics.waitAsync ( typedArray, index, value, timeout ), https://tc39.es/ecma262/#sec-atomics.waitasync
  376. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::wait_async)
  377. {
  378. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  379. auto index = vm.argument(1);
  380. auto value = vm.argument(2);
  381. auto timeout = vm.argument(3);
  382. // 1. Return ? DoWait(async, typedArray, index, value, timeout).
  383. return TRY(do_wait(vm, WaitMode::Async, *typed_array, index, value, timeout));
  384. }
  385. // 25.4.15 Atomics.notify ( typedArray, index, count ), https://tc39.es/ecma262/#sec-atomics.notify
  386. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::notify)
  387. {
  388. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  389. auto index = vm.argument(1);
  390. auto count_value = vm.argument(2);
  391. // 1. Let byteIndexInBuffer be ? ValidateAtomicAccessOnIntegerTypedArray(typedArray, index, true).
  392. // FIXME: ValidateAtomicAccessOnIntegerTypedArray is a new AO from the resizable array buffer proposal. Use it when the proposal is implemented.
  393. TRY(validate_integer_typed_array(vm, *typed_array, true));
  394. auto byte_index_in_buffer = TRY(validate_atomic_access(vm, *typed_array, index));
  395. // 2. If count is undefined, then
  396. double count = 0.0;
  397. if (count_value.is_undefined()) {
  398. // a. Let c be +∞.
  399. count = js_infinity().as_double();
  400. }
  401. // 3. Else,
  402. else {
  403. // a. Let intCount be ? ToIntegerOrInfinity(count).
  404. auto int_count = TRY(count_value.to_integer_or_infinity(vm));
  405. // b. Let c be max(intCount, 0).
  406. count = max(int_count, 0.0);
  407. }
  408. // 4. Let buffer be typedArray.[[ViewedArrayBuffer]].
  409. auto* buffer = typed_array->viewed_array_buffer();
  410. // 5. Let block be buffer.[[ArrayBufferData]].
  411. auto& block = buffer->buffer();
  412. // 6. If IsSharedArrayBuffer(buffer) is false, return +0𝔽.
  413. if (!buffer->is_shared_array_buffer())
  414. return Value { 0 };
  415. // FIXME: Implement the remaining steps when we support SharedArrayBuffer.
  416. (void)byte_index_in_buffer;
  417. (void)count;
  418. (void)block;
  419. return vm.throw_completion<InternalError>(ErrorType::NotImplemented, "SharedArrayBuffer"sv);
  420. }
  421. // 25.4.14 Atomics.xor ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.xor
  422. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::xor_)
  423. {
  424. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  425. auto atomic_xor = [](auto* storage, auto value) { return AK::atomic_fetch_xor(storage, value); };
  426. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  427. if (is<ClassName>(typed_array)) \
  428. return TRY(perform_atomic_operation<Type>(vm, *typed_array, move(atomic_xor)));
  429. JS_ENUMERATE_TYPED_ARRAYS
  430. #undef __JS_ENUMERATE
  431. VERIFY_NOT_REACHED();
  432. }
  433. }