AtomicsObject.cpp 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. /*
  2. * Copyright (c) 2021, Tim Flynn <trflynn89@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. // This file explicitly implements support for JS Atomics API, which can
  7. // involve slow (non-lock-free) atomic ops.
  8. #include <AK/Platform.h>
  9. #ifdef AK_COMPILER_CLANG
  10. # pragma clang diagnostic ignored "-Watomic-alignment"
  11. #endif
  12. #include <AK/Atomic.h>
  13. #include <AK/ByteBuffer.h>
  14. #include <AK/Endian.h>
  15. #include <AK/TypeCasts.h>
  16. #include <LibJS/Runtime/Agent.h>
  17. #include <LibJS/Runtime/AtomicsObject.h>
  18. #include <LibJS/Runtime/GlobalObject.h>
  19. #include <LibJS/Runtime/TypedArray.h>
  20. #include <LibJS/Runtime/Value.h>
  21. #include <LibJS/Runtime/ValueInlines.h>
  22. namespace JS {
  23. GC_DEFINE_ALLOCATOR(AtomicsObject);
  24. // 25.4.2.1 ValidateIntegerTypedArray ( typedArray, waitable ), https://tc39.es/ecma262/#sec-validateintegertypedarray
  25. static ThrowCompletionOr<TypedArrayWithBufferWitness> validate_integer_typed_array(VM& vm, TypedArrayBase const& typed_array, bool waitable)
  26. {
  27. // 1. Let taRecord be ? ValidateTypedArray(typedArray, unordered).
  28. auto typed_array_record = TRY(validate_typed_array(vm, typed_array, ArrayBuffer::Order::Unordered));
  29. // 2. NOTE: Bounds checking is not a synchronizing operation when typedArray's backing buffer is a growable SharedArrayBuffer.
  30. auto const& type_name = typed_array.element_name();
  31. // 3. If waitable is true, then
  32. if (waitable) {
  33. // a. If typedArray.[[TypedArrayName]] is neither "Int32Array" nor "BigInt64Array", throw a TypeError exception.
  34. if ((type_name != vm.names.Int32Array.as_string()) && (type_name != vm.names.BigInt64Array.as_string()))
  35. return vm.throw_completion<TypeError>(ErrorType::TypedArrayTypeIsNot, type_name, "Int32 or BigInt64"sv);
  36. }
  37. // 4. Else,
  38. else {
  39. // a. Let type be TypedArrayElementType(typedArray).
  40. // b. If IsUnclampedIntegerElementType(type) is false and IsBigIntElementType(type) is false, throw a TypeError exception.
  41. if (!typed_array.is_unclamped_integer_element_type() && !typed_array.is_bigint_element_type())
  42. return vm.throw_completion<TypeError>(ErrorType::TypedArrayTypeIsNot, type_name, "an unclamped integer or BigInt"sv);
  43. }
  44. // 5. Return taRecord.
  45. return typed_array_record;
  46. }
  47. // 25.4.2.2 ValidateAtomicAccess ( taRecord, requestIndex ), https://tc39.es/ecma262/#sec-validateatomicaccess
  48. static ThrowCompletionOr<size_t> validate_atomic_access(VM& vm, TypedArrayWithBufferWitness const& typed_array_record, Value request_index)
  49. {
  50. // 1. Let length be TypedArrayLength(taRecord).
  51. auto length = typed_array_length(typed_array_record);
  52. // 2. Let accessIndex be ? ToIndex(requestIndex).
  53. // 3. Assert: accessIndex ≥ 0.
  54. auto access_index = TRY(request_index.to_index(vm));
  55. // 4. If accessIndex ≥ length, throw a RangeError exception.
  56. if (access_index >= length)
  57. return vm.throw_completion<RangeError>(ErrorType::IndexOutOfRange, access_index, length);
  58. // 5. Let typedArray be taRecord.[[Object]].
  59. auto const& typed_array = *typed_array_record.object;
  60. // 6. Let elementSize be TypedArrayElementSize(typedArray).
  61. auto element_size = typed_array.element_size();
  62. // 7. Let offset be typedArray.[[ByteOffset]].
  63. auto offset = typed_array.byte_offset();
  64. // 8. Return (accessIndex × elementSize) + offset.
  65. return (access_index * element_size) + offset;
  66. }
  67. // 25.4.3.3 ValidateAtomicAccessOnIntegerTypedArray ( typedArray, requestIndex [ , waitable ] ), https://tc39.es/ecma262/#sec-validateatomicaccessonintegertypedarray
  68. static ThrowCompletionOr<size_t> validate_atomic_access_on_integer_typed_array(VM& vm, TypedArrayBase const& typed_array, Value request_index, bool waitable = false)
  69. {
  70. // 1. If waitable is not present, set waitable to false.
  71. // 2. Let taRecord be ? ValidateIntegerTypedArray(typedArray, waitable).
  72. auto typed_array_record = TRY(validate_integer_typed_array(vm, typed_array, waitable));
  73. // 3. Return ? ValidateAtomicAccess(taRecord, requestIndex).
  74. return TRY(validate_atomic_access(vm, typed_array_record, request_index));
  75. }
  76. // 25.4.3.4 RevalidateAtomicAccess ( typedArray, byteIndexInBuffer ), https://tc39.es/ecma262/#sec-revalidateatomicaccess
  77. static ThrowCompletionOr<void> revalidate_atomic_access(VM& vm, TypedArrayBase const& typed_array, size_t byte_index_in_buffer)
  78. {
  79. // 1. Let taRecord be MakeTypedArrayWithBufferWitnessRecord(typedArray, unordered).
  80. auto typed_array_record = make_typed_array_with_buffer_witness_record(typed_array, ArrayBuffer::Order::Unordered);
  81. // 2. NOTE: Bounds checking is not a synchronizing operation when typedArray's backing buffer is a growable SharedArrayBuffer.
  82. // 3. If IsTypedArrayOutOfBounds(taRecord) is true, throw a TypeError exception.
  83. if (is_typed_array_out_of_bounds(typed_array_record))
  84. return vm.throw_completion<TypeError>(ErrorType::BufferOutOfBounds, "TypedArray"sv);
  85. // 4. Assert: byteIndexInBuffer ≥ typedArray.[[ByteOffset]].
  86. VERIFY(byte_index_in_buffer >= typed_array.byte_offset());
  87. // 5. If byteIndexInBuffer ≥ taRecord.[[CachedBufferByteLength]], throw a RangeError exception.
  88. if (byte_index_in_buffer >= typed_array_record.cached_buffer_byte_length.length())
  89. return vm.throw_completion<RangeError>(ErrorType::IndexOutOfRange, byte_index_in_buffer, typed_array_record.cached_buffer_byte_length.length());
  90. // 6. Return unused.
  91. return {};
  92. }
  93. // 25.4.2.17 AtomicReadModifyWrite ( typedArray, index, value, op ), https://tc39.es/ecma262/#sec-atomicreadmodifywrite
  94. static ThrowCompletionOr<Value> atomic_read_modify_write(VM& vm, TypedArrayBase& typed_array, Value index, Value value, ReadWriteModifyFunction operation)
  95. {
  96. // 1. Let byteIndexInBuffer be ? ValidateAtomicAccessOnIntegerTypedArray(typedArray, index).
  97. auto byte_index_in_buffer = TRY(validate_atomic_access_on_integer_typed_array(vm, typed_array, index));
  98. Value value_to_set;
  99. // 2. If typedArray.[[ContentType]] is bigint, let v be ? ToBigInt(value).
  100. if (typed_array.content_type() == TypedArrayBase::ContentType::BigInt)
  101. value_to_set = TRY(value.to_bigint(vm));
  102. // 3. Otherwise, let v be 𝔽(? ToIntegerOrInfinity(value)).
  103. else
  104. value_to_set = Value(TRY(value.to_integer_or_infinity(vm)));
  105. // 4. Perform ? RevalidateAtomicAccess(typedArray, byteIndexInBuffer).
  106. TRY(revalidate_atomic_access(vm, typed_array, byte_index_in_buffer));
  107. // 5. Let buffer be typedArray.[[ViewedArrayBuffer]].
  108. // 6. Let elementType be TypedArrayElementType(typedArray).
  109. // 7. Return GetModifySetValueInBuffer(buffer, byteIndexInBuffer, elementType, v, op).
  110. return typed_array.get_modify_set_value_in_buffer(byte_index_in_buffer, value_to_set, move(operation));
  111. }
  112. enum class WaitMode {
  113. Sync,
  114. Async,
  115. };
  116. // 25.4.3.14 DoWait ( mode, typedArray, index, value, timeout ), https://tc39.es/ecma262/#sec-dowait
  117. static ThrowCompletionOr<Value> do_wait(VM& vm, WaitMode mode, TypedArrayBase& typed_array, Value index_value, Value expected_value, Value timeout_value)
  118. {
  119. // 1. Let taRecord be ? ValidateIntegerTypedArray(typedArray, true).
  120. auto typed_array_record = TRY(validate_integer_typed_array(vm, typed_array, true));
  121. // 2. Let buffer be taRecord.[[Object]].[[ViewedArrayBuffer]].
  122. auto* buffer = typed_array_record.object->viewed_array_buffer();
  123. // 3. If IsSharedArrayBuffer(buffer) is false, throw a TypeError exception.
  124. if (!buffer->is_shared_array_buffer())
  125. return vm.throw_completion<TypeError>(ErrorType::NotASharedArrayBuffer);
  126. // 4. Let i be ? ValidateAtomicAccess(taRecord, index).
  127. auto index = TRY(validate_atomic_access(vm, typed_array_record, index_value));
  128. // 5. Let arrayTypeName be typedArray.[[TypedArrayName]].
  129. auto const& array_type_name = typed_array.element_name();
  130. // 6. If arrayTypeName is "BigInt64Array", let v be ? ToBigInt64(value).
  131. i64 value = 0;
  132. if (array_type_name == vm.names.BigInt64Array.as_string())
  133. value = TRY(expected_value.to_bigint_int64(vm));
  134. // 7. Else, let v be ? ToInt32(value).
  135. else
  136. value = TRY(expected_value.to_i32(vm));
  137. // 8. Let q be ? ToNumber(timeout).
  138. auto timeout_number = TRY(timeout_value.to_number(vm));
  139. // 9. If q is either NaN or +∞𝔽, let t be +∞; else if q is -∞𝔽, let t be 0; else let t be max(ℝ(q), 0).
  140. double timeout = 0;
  141. if (timeout_number.is_nan() || timeout_number.is_positive_infinity())
  142. timeout = js_infinity().as_double();
  143. else if (timeout_number.is_negative_infinity())
  144. timeout = 0.0;
  145. else
  146. timeout = max(timeout_number.as_double(), 0.0);
  147. // 10. If mode is sync and AgentCanSuspend() is false, throw a TypeError exception.
  148. if (mode == WaitMode::Sync && !agent_can_suspend())
  149. return vm.throw_completion<TypeError>(ErrorType::AgentCannotSuspend);
  150. // FIXME: Implement the remaining steps when we support SharedArrayBuffer.
  151. (void)index;
  152. (void)value;
  153. (void)timeout;
  154. return vm.throw_completion<InternalError>(ErrorType::NotImplemented, "SharedArrayBuffer"sv);
  155. }
  156. template<typename T, typename AtomicFunction>
  157. static ThrowCompletionOr<Value> perform_atomic_operation(VM& vm, TypedArrayBase& typed_array, AtomicFunction&& operation)
  158. {
  159. auto index = vm.argument(1);
  160. auto value = vm.argument(2);
  161. auto operation_wrapper = [&, operation = forward<AtomicFunction>(operation)](ByteBuffer x_bytes, ByteBuffer y_bytes) -> ByteBuffer {
  162. if constexpr (IsFloatingPoint<T>) {
  163. (void)operation;
  164. VERIFY_NOT_REACHED();
  165. } else {
  166. using U = Conditional<IsSame<ClampedU8, T>, u8, T>;
  167. auto* x = reinterpret_cast<U*>(x_bytes.data());
  168. auto* y = reinterpret_cast<U*>(y_bytes.data());
  169. operation(x, *y);
  170. return x_bytes;
  171. }
  172. };
  173. return atomic_read_modify_write(vm, typed_array, index, value, move(operation_wrapper));
  174. }
  175. AtomicsObject::AtomicsObject(Realm& realm)
  176. : Object(ConstructWithPrototypeTag::Tag, realm.intrinsics().object_prototype())
  177. {
  178. }
  179. void AtomicsObject::initialize(Realm& realm)
  180. {
  181. Base::initialize(realm);
  182. auto& vm = this->vm();
  183. u8 attr = Attribute::Writable | Attribute::Configurable;
  184. define_native_function(realm, vm.names.add, add, 3, attr);
  185. define_native_function(realm, vm.names.and_, and_, 3, attr);
  186. define_native_function(realm, vm.names.compareExchange, compare_exchange, 4, attr);
  187. define_native_function(realm, vm.names.exchange, exchange, 3, attr);
  188. define_native_function(realm, vm.names.isLockFree, is_lock_free, 1, attr);
  189. define_native_function(realm, vm.names.load, load, 2, attr);
  190. define_native_function(realm, vm.names.or_, or_, 3, attr);
  191. define_native_function(realm, vm.names.pause, pause, 0, attr);
  192. define_native_function(realm, vm.names.store, store, 3, attr);
  193. define_native_function(realm, vm.names.sub, sub, 3, attr);
  194. define_native_function(realm, vm.names.wait, wait, 4, attr);
  195. define_native_function(realm, vm.names.waitAsync, wait_async, 4, attr);
  196. define_native_function(realm, vm.names.notify, notify, 3, attr);
  197. define_native_function(realm, vm.names.xor_, xor_, 3, attr);
  198. // 25.4.17 Atomics [ @@toStringTag ], https://tc39.es/ecma262/#sec-atomics-@@tostringtag
  199. define_direct_property(vm.well_known_symbol_to_string_tag(), PrimitiveString::create(vm, "Atomics"_string), Attribute::Configurable);
  200. }
  201. // 25.4.4 Atomics.add ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.add
  202. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::add)
  203. {
  204. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  205. auto atomic_add = [](auto* storage, auto value) { return AK::atomic_fetch_add(storage, value); };
  206. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  207. if (is<ClassName>(typed_array)) \
  208. return TRY(perform_atomic_operation<Type>(vm, *typed_array, move(atomic_add)));
  209. JS_ENUMERATE_TYPED_ARRAYS
  210. #undef __JS_ENUMERATE
  211. VERIFY_NOT_REACHED();
  212. }
  213. // 25.4.5 Atomics.and ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.and
  214. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::and_)
  215. {
  216. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  217. auto atomic_and = [](auto* storage, auto value) { return AK::atomic_fetch_and(storage, value); };
  218. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  219. if (is<ClassName>(typed_array)) \
  220. return TRY(perform_atomic_operation<Type>(vm, *typed_array, move(atomic_and)));
  221. JS_ENUMERATE_TYPED_ARRAYS
  222. #undef __JS_ENUMERATE
  223. VERIFY_NOT_REACHED();
  224. }
  225. // 25.4.6 Atomics.compareExchange ( typedArray, index, expectedValue, replacementValue ), https://tc39.es/ecma262/#sec-atomics.compareexchange
  226. template<typename T>
  227. static ThrowCompletionOr<Value> atomic_compare_exchange_impl(VM& vm, TypedArrayBase& typed_array, Value index, Value expected_value, Value replacement_value)
  228. {
  229. // 1. Let byteIndexInBuffer be ? ValidateAtomicAccessOnIntegerTypedArray(typedArray, index).
  230. auto byte_index_in_buffer = TRY(validate_atomic_access_on_integer_typed_array(vm, typed_array, index));
  231. // 2. Let buffer be typedArray.[[ViewedArrayBuffer]].
  232. auto* buffer = typed_array.viewed_array_buffer();
  233. // 3. Let block be buffer.[[ArrayBufferData]].
  234. auto& block = buffer->buffer();
  235. Value expected;
  236. Value replacement;
  237. // 4. If typedArray.[[ContentType]] is bigint, then
  238. if (typed_array.content_type() == TypedArrayBase::ContentType::BigInt) {
  239. // a. Let expected be ? ToBigInt(expectedValue).
  240. expected = TRY(expected_value.to_bigint(vm));
  241. // b. Let replacement be ? ToBigInt(replacementValue).
  242. replacement = TRY(replacement_value.to_bigint(vm));
  243. }
  244. // 5. Else,
  245. else {
  246. // a. Let expected be 𝔽(? ToIntegerOrInfinity(expectedValue)).
  247. expected = Value(TRY(expected_value.to_integer_or_infinity(vm)));
  248. // b. Let replacement be 𝔽(? ToIntegerOrInfinity(replacementValue)).
  249. replacement = Value(TRY(replacement_value.to_integer_or_infinity(vm)));
  250. }
  251. // 6. Perform ? RevalidateAtomicAccess(typedArray, byteIndexInBuffer).
  252. TRY(revalidate_atomic_access(vm, typed_array, byte_index_in_buffer));
  253. // 7. Let elementType be TypedArrayElementType(typedArray).
  254. // 8. Let elementSize be TypedArrayElementSize(typedArray).
  255. // 9. Let isLittleEndian be the value of the [[LittleEndian]] field of the surrounding agent's Agent Record.
  256. static constexpr bool is_little_endian = AK::HostIsLittleEndian;
  257. // 10. Let expectedBytes be NumericToRawBytes(elementType, expected, isLittleEndian).
  258. auto expected_bytes = MUST(ByteBuffer::create_uninitialized(sizeof(T)));
  259. numeric_to_raw_bytes<T>(vm, expected, is_little_endian, expected_bytes);
  260. // 11. Let replacementBytes be NumericToRawBytes(elementType, replacement, isLittleEndian).
  261. auto replacement_bytes = MUST(ByteBuffer::create_uninitialized(sizeof(T)));
  262. numeric_to_raw_bytes<T>(vm, replacement, is_little_endian, replacement_bytes);
  263. // FIXME: Implement SharedArrayBuffer case.
  264. // 12. If IsSharedArrayBuffer(buffer) is true, then
  265. // a. Let rawBytesRead be AtomicCompareExchangeInSharedBlock(block, byteIndexInBuffer, elementSize, expectedBytes, replacementBytes).
  266. // 13. Else,
  267. // a. Let rawBytesRead be a List of length elementSize whose elements are the sequence of elementSize bytes starting with block[byteIndexInBuffer].
  268. // FIXME: Propagate errors.
  269. auto raw_bytes_read = MUST(block.slice(byte_index_in_buffer, sizeof(T)));
  270. // b. If ByteListEqual(rawBytesRead, expectedBytes) is true, then
  271. // i. Store the individual bytes of replacementBytes into block, starting at block[byteIndexInBuffer].
  272. if constexpr (IsFloatingPoint<T>) {
  273. VERIFY_NOT_REACHED();
  274. } else {
  275. using U = Conditional<IsSame<ClampedU8, T>, u8, T>;
  276. auto* v = reinterpret_cast<U*>(block.span().slice(byte_index_in_buffer).data());
  277. auto* e = reinterpret_cast<U*>(expected_bytes.data());
  278. auto* r = reinterpret_cast<U*>(replacement_bytes.data());
  279. (void)AK::atomic_compare_exchange_strong(v, *e, *r);
  280. }
  281. // 14. Return RawBytesToNumeric(elementType, rawBytesRead, isLittleEndian).
  282. return raw_bytes_to_numeric<T>(vm, raw_bytes_read, is_little_endian);
  283. }
  284. // 25.4.6 Atomics.compareExchange ( typedArray, index, expectedValue, replacementValue ), https://tc39.es/ecma262/#sec-atomics.compareexchange
  285. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::compare_exchange)
  286. {
  287. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  288. auto index = vm.argument(1);
  289. auto expected_value = vm.argument(2);
  290. auto replacement_value = vm.argument(3);
  291. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  292. if (is<ClassName>(typed_array)) \
  293. return TRY(atomic_compare_exchange_impl<Type>(vm, *typed_array, index, expected_value, replacement_value));
  294. JS_ENUMERATE_TYPED_ARRAYS
  295. #undef __JS_ENUMERATE
  296. VERIFY_NOT_REACHED();
  297. }
  298. // 25.4.7 Atomics.exchange ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.exchange
  299. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::exchange)
  300. {
  301. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  302. auto atomic_exchange = [](auto* storage, auto value) { return AK::atomic_exchange(storage, value); };
  303. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  304. if (is<ClassName>(typed_array)) \
  305. return TRY(perform_atomic_operation<Type>(vm, *typed_array, move(atomic_exchange)));
  306. JS_ENUMERATE_TYPED_ARRAYS
  307. #undef __JS_ENUMERATE
  308. VERIFY_NOT_REACHED();
  309. }
  310. // 25.4.8 Atomics.isLockFree ( size ), https://tc39.es/ecma262/#sec-atomics.islockfree
  311. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::is_lock_free)
  312. {
  313. auto size = TRY(vm.argument(0).to_integer_or_infinity(vm));
  314. if (size == 1)
  315. return Value(AK::atomic_is_lock_free<u8>());
  316. if (size == 2)
  317. return Value(AK::atomic_is_lock_free<u16>());
  318. if (size == 4)
  319. return Value(true);
  320. if (size == 8)
  321. return Value(AK::atomic_is_lock_free<u64>());
  322. return Value(false);
  323. }
  324. // 25.4.9 Atomics.load ( typedArray, index ), https://tc39.es/ecma262/#sec-atomics.load
  325. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::load)
  326. {
  327. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  328. auto index = vm.argument(1);
  329. // 1. Let byteIndexInBuffer be ? ValidateAtomicAccessOnIntegerTypedArray(typedArray, index).
  330. auto byte_index_in_buffer = TRY(validate_atomic_access_on_integer_typed_array(vm, *typed_array, index));
  331. // 2. Perform ? RevalidateAtomicAccess(typedArray, byteIndexInBuffer).
  332. TRY(revalidate_atomic_access(vm, *typed_array, byte_index_in_buffer));
  333. // 3. Let buffer be typedArray.[[ViewedArrayBuffer]].
  334. // 4. Let elementType be TypedArrayElementType(typedArray).
  335. // 5. Return GetValueFromBuffer(buffer, byteIndexInBuffer, elementType, true, seq-cst).
  336. return typed_array->get_value_from_buffer(byte_index_in_buffer, ArrayBuffer::Order::SeqCst, true);
  337. }
  338. // 25.4.10 Atomics.or ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.or
  339. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::or_)
  340. {
  341. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  342. auto atomic_or = [](auto* storage, auto value) { return AK::atomic_fetch_or(storage, value); };
  343. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  344. if (is<ClassName>(typed_array)) \
  345. return TRY(perform_atomic_operation<Type>(vm, *typed_array, move(atomic_or)));
  346. JS_ENUMERATE_TYPED_ARRAYS
  347. #undef __JS_ENUMERATE
  348. VERIFY_NOT_REACHED();
  349. }
  350. // 1 Atomics.pause ( [ N ] ), http://tc39.es/proposal-atomics-microwait/
  351. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::pause)
  352. {
  353. // NOTE: This value is arbitrary, but intends to put an upper bound on the spin loop of between ~10-100ns on most systems.
  354. constexpr i32 MAXIMUM_ITERATIONS = 1000;
  355. // NOTE: This value is arbitrary, but intends to account for function call overhead.
  356. constexpr i32 DEFAULT_ITERATIONS = 100;
  357. // 1. If N is neither undefined nor an integral Number, throw a TypeError exception.
  358. auto pause = vm.argument(0);
  359. if (!pause.is_undefined() && !pause.is_integral_number())
  360. return vm.throw_completion<TypeError>(ErrorType::NotAnIntegerOrUndefined, "pause time");
  361. // 2. If the execution environment of the ECMAScript implementation supports signaling to the operating system or CPU that the current executing code is in a spin-wait loop, such as executing a pause CPU instruction, send that signal.
  362. // When N is not undefined, it determines the number of times that signal is sent.
  363. u32 N = DEFAULT_ITERATIONS;
  364. if (!pause.is_undefined()) {
  365. auto integral = pause.as_i32_clamped_integral_number();
  366. if (integral < 0)
  367. N = MAXIMUM_ITERATIONS + max(integral, -MAXIMUM_ITERATIONS) + 1;
  368. else
  369. // Implementation note: `N` is not required to be the _number of times_ that the signal is sent, but it seems like reasonable behaviour regardless.
  370. N = min(integral, MAXIMUM_ITERATIONS);
  371. }
  372. // The number of times the signal is sent for an integral Number N is less than or equal to the number times it is sent for N + 1 if both N and N + 1 have the same sign.
  373. for (; N != 0; N--)
  374. AK::atomic_pause();
  375. // 3. Return undefined.
  376. return js_undefined();
  377. }
  378. // 25.4.11 Atomics.store ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.store
  379. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::store)
  380. {
  381. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  382. auto index = vm.argument(1);
  383. auto value = vm.argument(2);
  384. // 1. Let byteIndexInBuffer be ? ValidateAtomicAccessOnIntegerTypedArray(typedArray, index).
  385. auto byte_index_in_buffer = TRY(validate_atomic_access_on_integer_typed_array(vm, *typed_array, index));
  386. // 2. If typedArray.[[ContentType]] is bigint, let v be ? ToBigInt(value).
  387. if (typed_array->content_type() == TypedArrayBase::ContentType::BigInt)
  388. value = TRY(value.to_bigint(vm));
  389. // 3. Otherwise, let v be 𝔽(? ToIntegerOrInfinity(value)).
  390. else
  391. value = Value(TRY(value.to_integer_or_infinity(vm)));
  392. // 4. Perform ? RevalidateAtomicAccess(typedArray, byteIndexInBuffer).
  393. TRY(revalidate_atomic_access(vm, *typed_array, byte_index_in_buffer));
  394. // 5. Let buffer be typedArray.[[ViewedArrayBuffer]].
  395. // 6. Let elementType be TypedArrayElementType(typedArray).
  396. // 7. Perform SetValueInBuffer(buffer, byteIndexInBuffer, elementType, v, true, seq-cst).
  397. typed_array->set_value_in_buffer(byte_index_in_buffer, value, ArrayBuffer::Order::SeqCst, true);
  398. // 8. Return v.
  399. return value;
  400. }
  401. // 25.4.12 Atomics.sub ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.sub
  402. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::sub)
  403. {
  404. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  405. auto atomic_sub = [](auto* storage, auto value) { return AK::atomic_fetch_sub(storage, value); };
  406. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  407. if (is<ClassName>(typed_array)) \
  408. return TRY(perform_atomic_operation<Type>(vm, *typed_array, move(atomic_sub)));
  409. JS_ENUMERATE_TYPED_ARRAYS
  410. #undef __JS_ENUMERATE
  411. VERIFY_NOT_REACHED();
  412. }
  413. // 25.4.13 Atomics.wait ( typedArray, index, value, timeout ), https://tc39.es/ecma262/#sec-atomics.wait
  414. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::wait)
  415. {
  416. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  417. auto index = vm.argument(1);
  418. auto value = vm.argument(2);
  419. auto timeout = vm.argument(3);
  420. // 1. Return ? DoWait(sync, typedArray, index, value, timeout).
  421. return TRY(do_wait(vm, WaitMode::Sync, *typed_array, index, value, timeout));
  422. }
  423. // 25.4.14 Atomics.waitAsync ( typedArray, index, value, timeout ), https://tc39.es/ecma262/#sec-atomics.waitasync
  424. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::wait_async)
  425. {
  426. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  427. auto index = vm.argument(1);
  428. auto value = vm.argument(2);
  429. auto timeout = vm.argument(3);
  430. // 1. Return ? DoWait(async, typedArray, index, value, timeout).
  431. return TRY(do_wait(vm, WaitMode::Async, *typed_array, index, value, timeout));
  432. }
  433. // 25.4.15 Atomics.notify ( typedArray, index, count ), https://tc39.es/ecma262/#sec-atomics.notify
  434. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::notify)
  435. {
  436. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  437. auto index = vm.argument(1);
  438. auto count_value = vm.argument(2);
  439. // 1. Let byteIndexInBuffer be ? ValidateAtomicAccessOnIntegerTypedArray(typedArray, index, true).
  440. auto byte_index_in_buffer = TRY(validate_atomic_access_on_integer_typed_array(vm, *typed_array, index, true));
  441. // 2. If count is undefined, then
  442. double count = 0.0;
  443. if (count_value.is_undefined()) {
  444. // a. Let c be +∞.
  445. count = js_infinity().as_double();
  446. }
  447. // 3. Else,
  448. else {
  449. // a. Let intCount be ? ToIntegerOrInfinity(count).
  450. auto int_count = TRY(count_value.to_integer_or_infinity(vm));
  451. // b. Let c be max(intCount, 0).
  452. count = max(int_count, 0.0);
  453. }
  454. // 4. Let buffer be typedArray.[[ViewedArrayBuffer]].
  455. auto* buffer = typed_array->viewed_array_buffer();
  456. // 5. Let block be buffer.[[ArrayBufferData]].
  457. auto& block = buffer->buffer();
  458. // 6. If IsSharedArrayBuffer(buffer) is false, return +0𝔽.
  459. if (!buffer->is_shared_array_buffer())
  460. return Value { 0 };
  461. // FIXME: Implement the remaining steps when we support SharedArrayBuffer.
  462. (void)byte_index_in_buffer;
  463. (void)count;
  464. (void)block;
  465. return vm.throw_completion<InternalError>(ErrorType::NotImplemented, "SharedArrayBuffer"sv);
  466. }
  467. // 25.4.16 Atomics.xor ( typedArray, index, value ), https://tc39.es/ecma262/#sec-atomics.xor
  468. JS_DEFINE_NATIVE_FUNCTION(AtomicsObject::xor_)
  469. {
  470. auto* typed_array = TRY(typed_array_from(vm, vm.argument(0)));
  471. auto atomic_xor = [](auto* storage, auto value) { return AK::atomic_fetch_xor(storage, value); };
  472. #define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
  473. if (is<ClassName>(typed_array)) \
  474. return TRY(perform_atomic_operation<Type>(vm, *typed_array, move(atomic_xor)));
  475. JS_ENUMERATE_TYPED_ARRAYS
  476. #undef __JS_ENUMERATE
  477. VERIFY_NOT_REACHED();
  478. }
  479. }