Linux ip-172-26-2-223 5.4.0-1018-aws #18-Ubuntu SMP Wed Jun 24 01:15:00 UTC 2020 x86_64
Apache
: 172.26.2.223 | : 3.12.123.254
Cant Read [ /etc/named.conf ]
8.1.13
www
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
CREATE WP USER
BLACK DEFEND!
README
+ Create Folder
+ Create File
/
usr /
include /
c++ /
9 /
bits /
[ HOME SHELL ]
Name
Size
Permission
Action
algorithmfwd.h
21.23
KB
-rw-r--r--
alloc_traits.h
20.98
KB
-rw-r--r--
allocated_ptr.h
3.22
KB
-rw-r--r--
allocator.h
7.76
KB
-rw-r--r--
atomic_base.h
24.41
KB
-rw-r--r--
atomic_futex.h
9.28
KB
-rw-r--r--
atomic_lockfree_defines.h
2.3
KB
-rw-r--r--
basic_ios.h
15.7
KB
-rw-r--r--
basic_ios.tcc
5.94
KB
-rw-r--r--
basic_string.h
241.28
KB
-rw-r--r--
basic_string.tcc
52.63
KB
-rw-r--r--
boost_concept_check.h
26.52
KB
-rw-r--r--
c++0x_warning.h
1.44
KB
-rw-r--r--
char_traits.h
24.25
KB
-rw-r--r--
codecvt.h
24.83
KB
-rw-r--r--
concept_check.h
3.34
KB
-rw-r--r--
cpp_type_traits.h
9.71
KB
-rw-r--r--
cxxabi_forced.h
1.77
KB
-rw-r--r--
cxxabi_init_exception.h
2.17
KB
-rw-r--r--
deque.tcc
33.6
KB
-rw-r--r--
enable_special_members.h
12.1
KB
-rw-r--r--
erase_if.h
1.99
KB
-rw-r--r--
exception.h
2.43
KB
-rw-r--r--
exception_defines.h
1.61
KB
-rw-r--r--
exception_ptr.h
5.84
KB
-rw-r--r--
forward_list.h
48.25
KB
-rw-r--r--
forward_list.tcc
13.55
KB
-rw-r--r--
fs_dir.h
14.81
KB
-rw-r--r--
fs_fwd.h
11.41
KB
-rw-r--r--
fs_ops.h
9.5
KB
-rw-r--r--
fs_path.h
36.09
KB
-rw-r--r--
fstream.tcc
32.87
KB
-rw-r--r--
functexcept.h
3.35
KB
-rw-r--r--
functional_hash.h
8.37
KB
-rw-r--r--
gslice.h
5.39
KB
-rw-r--r--
gslice_array.h
7.67
KB
-rw-r--r--
hash_bytes.h
2.1
KB
-rw-r--r--
hashtable.h
73.06
KB
-rw-r--r--
hashtable_policy.h
66.51
KB
-rw-r--r--
indirect_array.h
7.68
KB
-rw-r--r--
invoke.h
3.57
KB
-rw-r--r--
ios_base.h
30.7
KB
-rw-r--r--
istream.tcc
30.36
KB
-rw-r--r--
list.tcc
16.57
KB
-rw-r--r--
locale_classes.h
24.32
KB
-rw-r--r--
locale_classes.tcc
8.18
KB
-rw-r--r--
locale_conv.h
18.36
KB
-rw-r--r--
locale_facets.h
90.17
KB
-rw-r--r--
locale_facets.tcc
38.62
KB
-rw-r--r--
locale_facets_nonio.h
67.38
KB
-rw-r--r--
locale_facets_nonio.tcc
44.22
KB
-rw-r--r--
localefwd.h
5.8
KB
-rw-r--r--
mask_array.h
7.5
KB
-rw-r--r--
memoryfwd.h
2.4
KB
-rw-r--r--
move.h
6.38
KB
-rw-r--r--
nested_exception.h
4.69
KB
-rw-r--r--
node_handle.h
8.02
KB
-rw-r--r--
ostream.tcc
12.03
KB
-rw-r--r--
ostream_insert.h
3.91
KB
-rw-r--r--
parse_numbers.h
7.76
KB
-rw-r--r--
postypes.h
8.27
KB
-rw-r--r--
predefined_ops.h
8.87
KB
-rw-r--r--
ptr_traits.h
6.57
KB
-rw-r--r--
quoted_string.h
4.93
KB
-rw-r--r--
random.h
173.87
KB
-rw-r--r--
random.tcc
103.14
KB
-rw-r--r--
range_access.h
9.85
KB
-rw-r--r--
refwrap.h
12.62
KB
-rw-r--r--
regex.h
96.39
KB
-rw-r--r--
regex.tcc
16.19
KB
-rw-r--r--
regex_automaton.h
10.49
KB
-rw-r--r--
regex_automaton.tcc
7.54
KB
-rw-r--r--
regex_compiler.h
16.1
KB
-rw-r--r--
regex_compiler.tcc
18.49
KB
-rw-r--r--
regex_constants.h
14.36
KB
-rw-r--r--
regex_error.h
4.79
KB
-rw-r--r--
regex_executor.h
7.31
KB
-rw-r--r--
regex_executor.tcc
18.4
KB
-rw-r--r--
regex_scanner.h
6.92
KB
-rw-r--r--
regex_scanner.tcc
14.66
KB
-rw-r--r--
shared_ptr.h
23.65
KB
-rw-r--r--
shared_ptr_atomic.h
9.55
KB
-rw-r--r--
shared_ptr_base.h
52.51
KB
-rw-r--r--
slice_array.h
9.21
KB
-rw-r--r--
specfun.h
45.95
KB
-rw-r--r--
sstream.tcc
9.9
KB
-rw-r--r--
std_abs.h
3.3
KB
-rw-r--r--
std_function.h
23.01
KB
-rw-r--r--
std_mutex.h
4.66
KB
-rw-r--r--
stl_algo.h
210.37
KB
-rw-r--r--
stl_algobase.h
50.21
KB
-rw-r--r--
stl_bvector.h
33.09
KB
-rw-r--r--
stl_construct.h
7.22
KB
-rw-r--r--
stl_deque.h
78.24
KB
-rw-r--r--
stl_function.h
41.3
KB
-rw-r--r--
stl_heap.h
19.73
KB
-rw-r--r--
stl_iterator.h
41.75
KB
-rw-r--r--
stl_iterator_base_funcs.h
7.99
KB
-rw-r--r--
stl_iterator_base_types.h
8.48
KB
-rw-r--r--
stl_list.h
66.22
KB
-rw-r--r--
stl_map.h
52.24
KB
-rw-r--r--
stl_multimap.h
41.25
KB
-rw-r--r--
stl_multiset.h
35.63
KB
-rw-r--r--
stl_numeric.h
14.04
KB
-rw-r--r--
stl_pair.h
18.21
KB
-rw-r--r--
stl_queue.h
24
KB
-rw-r--r--
stl_raw_storage_iter.h
3.74
KB
-rw-r--r--
stl_relops.h
4.49
KB
-rw-r--r--
stl_set.h
35.93
KB
-rw-r--r--
stl_stack.h
11.94
KB
-rw-r--r--
stl_tempbuf.h
8.09
KB
-rw-r--r--
stl_tree.h
73.26
KB
-rw-r--r--
stl_uninitialized.h
30.72
KB
-rw-r--r--
stl_vector.h
63.45
KB
-rw-r--r--
stream_iterator.h
6.71
KB
-rw-r--r--
streambuf.tcc
4.81
KB
-rw-r--r--
streambuf_iterator.h
13.56
KB
-rw-r--r--
string_view.tcc
6.54
KB
-rw-r--r--
stringfwd.h
2.63
KB
-rw-r--r--
uniform_int_dist.h
10.01
KB
-rw-r--r--
unique_lock.h
5.96
KB
-rw-r--r--
unique_ptr.h
26.92
KB
-rw-r--r--
unordered_map.h
75.08
KB
-rw-r--r--
unordered_set.h
59.25
KB
-rw-r--r--
uses_allocator.h
6.66
KB
-rw-r--r--
valarray_after.h
22.3
KB
-rw-r--r--
valarray_array.h
20.8
KB
-rw-r--r--
valarray_array.tcc
7.08
KB
-rw-r--r--
valarray_before.h
18.69
KB
-rw-r--r--
vector.tcc
30.15
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : atomic_base.h
// -*- C++ -*- header. // Copyright (C) 2008-2019 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file bits/atomic_base.h * This is an internal header file, included by other library headers. * Do not attempt to use it directly. @headername{atomic} */ #ifndef _GLIBCXX_ATOMIC_BASE_H #define _GLIBCXX_ATOMIC_BASE_H 1 #pragma GCC system_header #include <bits/c++config.h> #include <stdint.h> #include <bits/atomic_lockfree_defines.h> #ifndef _GLIBCXX_ALWAYS_INLINE #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__)) #endif namespace std _GLIBCXX_VISIBILITY(default) { _GLIBCXX_BEGIN_NAMESPACE_VERSION /** * @defgroup atomics Atomics * * Components for performing atomic operations. * @{ */ /// Enumeration for memory_order #if __cplusplus > 201703L enum class memory_order : int { relaxed, consume, acquire, release, acq_rel, seq_cst }; inline constexpr memory_order memory_order_relaxed = memory_order::relaxed; inline constexpr memory_order memory_order_consume = memory_order::consume; inline constexpr memory_order memory_order_acquire = memory_order::acquire; inline constexpr memory_order memory_order_release = memory_order::release; inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel; inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst; #else typedef enum memory_order { memory_order_relaxed, memory_order_consume, memory_order_acquire, memory_order_release, memory_order_acq_rel, memory_order_seq_cst } memory_order; #endif enum __memory_order_modifier { __memory_order_mask = 0x0ffff, __memory_order_modifier_mask = 0xffff0000, __memory_order_hle_acquire = 0x10000, __memory_order_hle_release = 0x20000 }; constexpr memory_order operator|(memory_order __m, __memory_order_modifier __mod) { return memory_order(int(__m) | int(__mod)); } constexpr memory_order operator&(memory_order __m, __memory_order_modifier __mod) { return memory_order(int(__m) & int(__mod)); } // Drop release ordering as per [atomics.types.operations.req]/21 constexpr memory_order __cmpexch_failure_order2(memory_order __m) noexcept { return __m == memory_order_acq_rel ? memory_order_acquire : __m == memory_order_release ? memory_order_relaxed : __m; } constexpr memory_order __cmpexch_failure_order(memory_order __m) noexcept { return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask) | __memory_order_modifier(__m & __memory_order_modifier_mask)); } _GLIBCXX_ALWAYS_INLINE void atomic_thread_fence(memory_order __m) noexcept { __atomic_thread_fence(int(__m)); } _GLIBCXX_ALWAYS_INLINE void atomic_signal_fence(memory_order __m) noexcept { __atomic_signal_fence(int(__m)); } /// kill_dependency template<typename _Tp> inline _Tp kill_dependency(_Tp __y) noexcept { _Tp __ret(__y); return __ret; } // Base types for atomics. template<typename _IntTp> struct __atomic_base; #define ATOMIC_VAR_INIT(_VI) { _VI } template<typename _Tp> struct atomic; template<typename _Tp> struct atomic<_Tp*>; /* The target's "set" value for test-and-set may not be exactly 1. */ #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1 typedef bool __atomic_flag_data_type; #else typedef unsigned char __atomic_flag_data_type; #endif /** * @brief Base type for atomic_flag. * * Base type is POD with data, allowing atomic_flag to derive from * it and meet the standard layout type requirement. In addition to * compatibility with a C interface, this allows different * implementations of atomic_flag to use the same atomic operation * functions, via a standard conversion to the __atomic_flag_base * argument. */ _GLIBCXX_BEGIN_EXTERN_C struct __atomic_flag_base { __atomic_flag_data_type _M_i; }; _GLIBCXX_END_EXTERN_C #define ATOMIC_FLAG_INIT { 0 } /// atomic_flag struct atomic_flag : public __atomic_flag_base { atomic_flag() noexcept = default; ~atomic_flag() noexcept = default; atomic_flag(const atomic_flag&) = delete; atomic_flag& operator=(const atomic_flag&) = delete; atomic_flag& operator=(const atomic_flag&) volatile = delete; // Conversion to ATOMIC_FLAG_INIT. constexpr atomic_flag(bool __i) noexcept : __atomic_flag_base{ _S_init(__i) } { } _GLIBCXX_ALWAYS_INLINE bool test_and_set(memory_order __m = memory_order_seq_cst) noexcept { return __atomic_test_and_set (&_M_i, int(__m)); } _GLIBCXX_ALWAYS_INLINE bool test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_test_and_set (&_M_i, int(__m)); } _GLIBCXX_ALWAYS_INLINE void clear(memory_order __m = memory_order_seq_cst) noexcept { memory_order __b = __m & __memory_order_mask; __glibcxx_assert(__b != memory_order_consume); __glibcxx_assert(__b != memory_order_acquire); __glibcxx_assert(__b != memory_order_acq_rel); __atomic_clear (&_M_i, int(__m)); } _GLIBCXX_ALWAYS_INLINE void clear(memory_order __m = memory_order_seq_cst) volatile noexcept { memory_order __b = __m & __memory_order_mask; __glibcxx_assert(__b != memory_order_consume); __glibcxx_assert(__b != memory_order_acquire); __glibcxx_assert(__b != memory_order_acq_rel); __atomic_clear (&_M_i, int(__m)); } private: static constexpr __atomic_flag_data_type _S_init(bool __i) { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; } }; /// Base class for atomic integrals. // // For each of the integral types, define atomic_[integral type] struct // // atomic_bool bool // atomic_char char // atomic_schar signed char // atomic_uchar unsigned char // atomic_short short // atomic_ushort unsigned short // atomic_int int // atomic_uint unsigned int // atomic_long long // atomic_ulong unsigned long // atomic_llong long long // atomic_ullong unsigned long long // atomic_char8_t char8_t // atomic_char16_t char16_t // atomic_char32_t char32_t // atomic_wchar_t wchar_t // // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or // 8 bytes, since that is what GCC built-in functions for atomic // memory access expect. template<typename _ITp> struct __atomic_base { using value_type = _ITp; using difference_type = value_type; private: typedef _ITp __int_type; static constexpr int _S_alignment = sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp); alignas(_S_alignment) __int_type _M_i; public: __atomic_base() noexcept = default; ~__atomic_base() noexcept = default; __atomic_base(const __atomic_base&) = delete; __atomic_base& operator=(const __atomic_base&) = delete; __atomic_base& operator=(const __atomic_base&) volatile = delete; // Requires __int_type convertible to _M_i. constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { } operator __int_type() const noexcept { return load(); } operator __int_type() const volatile noexcept { return load(); } __int_type operator=(__int_type __i) noexcept { store(__i); return __i; } __int_type operator=(__int_type __i) volatile noexcept { store(__i); return __i; } __int_type operator++(int) noexcept { return fetch_add(1); } __int_type operator++(int) volatile noexcept { return fetch_add(1); } __int_type operator--(int) noexcept { return fetch_sub(1); } __int_type operator--(int) volatile noexcept { return fetch_sub(1); } __int_type operator++() noexcept { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); } __int_type operator++() volatile noexcept { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); } __int_type operator--() noexcept { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); } __int_type operator--() volatile noexcept { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); } __int_type operator+=(__int_type __i) noexcept { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); } __int_type operator+=(__int_type __i) volatile noexcept { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); } __int_type operator-=(__int_type __i) noexcept { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); } __int_type operator-=(__int_type __i) volatile noexcept { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); } __int_type operator&=(__int_type __i) noexcept { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); } __int_type operator&=(__int_type __i) volatile noexcept { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); } __int_type operator|=(__int_type __i) noexcept { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); } __int_type operator|=(__int_type __i) volatile noexcept { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); } __int_type operator^=(__int_type __i) noexcept { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); } __int_type operator^=(__int_type __i) volatile noexcept { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); } bool is_lock_free() const noexcept { // Use a fake, minimally aligned pointer. return __atomic_is_lock_free(sizeof(_M_i), reinterpret_cast<void *>(-_S_alignment)); } bool is_lock_free() const volatile noexcept { // Use a fake, minimally aligned pointer. return __atomic_is_lock_free(sizeof(_M_i), reinterpret_cast<void *>(-_S_alignment)); } _GLIBCXX_ALWAYS_INLINE void store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { memory_order __b = __m & __memory_order_mask; __glibcxx_assert(__b != memory_order_acquire); __glibcxx_assert(__b != memory_order_acq_rel); __glibcxx_assert(__b != memory_order_consume); __atomic_store_n(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE void store(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { memory_order __b = __m & __memory_order_mask; __glibcxx_assert(__b != memory_order_acquire); __glibcxx_assert(__b != memory_order_acq_rel); __glibcxx_assert(__b != memory_order_consume); __atomic_store_n(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type load(memory_order __m = memory_order_seq_cst) const noexcept { memory_order __b = __m & __memory_order_mask; __glibcxx_assert(__b != memory_order_release); __glibcxx_assert(__b != memory_order_acq_rel); return __atomic_load_n(&_M_i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type load(memory_order __m = memory_order_seq_cst) const volatile noexcept { memory_order __b = __m & __memory_order_mask; __glibcxx_assert(__b != memory_order_release); __glibcxx_assert(__b != memory_order_acq_rel); return __atomic_load_n(&_M_i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type exchange(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_exchange_n(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_exchange_n(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE bool compare_exchange_weak(__int_type& __i1, __int_type __i2, memory_order __m1, memory_order __m2) noexcept { memory_order __b2 = __m2 & __memory_order_mask; memory_order __b1 = __m1 & __memory_order_mask; __glibcxx_assert(__b2 != memory_order_release); __glibcxx_assert(__b2 != memory_order_acq_rel); __glibcxx_assert(__b2 <= __b1); return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, int(__m1), int(__m2)); } _GLIBCXX_ALWAYS_INLINE bool compare_exchange_weak(__int_type& __i1, __int_type __i2, memory_order __m1, memory_order __m2) volatile noexcept { memory_order __b2 = __m2 & __memory_order_mask; memory_order __b1 = __m1 & __memory_order_mask; __glibcxx_assert(__b2 != memory_order_release); __glibcxx_assert(__b2 != memory_order_acq_rel); __glibcxx_assert(__b2 <= __b1); return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, int(__m1), int(__m2)); } _GLIBCXX_ALWAYS_INLINE bool compare_exchange_weak(__int_type& __i1, __int_type __i2, memory_order __m = memory_order_seq_cst) noexcept { return compare_exchange_weak(__i1, __i2, __m, __cmpexch_failure_order(__m)); } _GLIBCXX_ALWAYS_INLINE bool compare_exchange_weak(__int_type& __i1, __int_type __i2, memory_order __m = memory_order_seq_cst) volatile noexcept { return compare_exchange_weak(__i1, __i2, __m, __cmpexch_failure_order(__m)); } _GLIBCXX_ALWAYS_INLINE bool compare_exchange_strong(__int_type& __i1, __int_type __i2, memory_order __m1, memory_order __m2) noexcept { memory_order __b2 = __m2 & __memory_order_mask; memory_order __b1 = __m1 & __memory_order_mask; __glibcxx_assert(__b2 != memory_order_release); __glibcxx_assert(__b2 != memory_order_acq_rel); __glibcxx_assert(__b2 <= __b1); return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, int(__m1), int(__m2)); } _GLIBCXX_ALWAYS_INLINE bool compare_exchange_strong(__int_type& __i1, __int_type __i2, memory_order __m1, memory_order __m2) volatile noexcept { memory_order __b2 = __m2 & __memory_order_mask; memory_order __b1 = __m1 & __memory_order_mask; __glibcxx_assert(__b2 != memory_order_release); __glibcxx_assert(__b2 != memory_order_acq_rel); __glibcxx_assert(__b2 <= __b1); return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, int(__m1), int(__m2)); } _GLIBCXX_ALWAYS_INLINE bool compare_exchange_strong(__int_type& __i1, __int_type __i2, memory_order __m = memory_order_seq_cst) noexcept { return compare_exchange_strong(__i1, __i2, __m, __cmpexch_failure_order(__m)); } _GLIBCXX_ALWAYS_INLINE bool compare_exchange_strong(__int_type& __i1, __int_type __i2, memory_order __m = memory_order_seq_cst) volatile noexcept { return compare_exchange_strong(__i1, __i2, __m, __cmpexch_failure_order(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_add(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_add(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_sub(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_sub(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_and(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_and(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_or(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_or(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_xor(&_M_i, __i, int(__m)); } _GLIBCXX_ALWAYS_INLINE __int_type fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_xor(&_M_i, __i, int(__m)); } }; /// Partial specialization for pointer types. template<typename _PTp> struct __atomic_base<_PTp*> { private: typedef _PTp* __pointer_type; __pointer_type _M_p; // Factored out to facilitate explicit specialization. constexpr ptrdiff_t _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); } constexpr ptrdiff_t _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); } public: __atomic_base() noexcept = default; ~__atomic_base() noexcept = default; __atomic_base(const __atomic_base&) = delete; __atomic_base& operator=(const __atomic_base&) = delete; __atomic_base& operator=(const __atomic_base&) volatile = delete; // Requires __pointer_type convertible to _M_p. constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { } operator __pointer_type() const noexcept { return load(); } operator __pointer_type() const volatile noexcept { return load(); } __pointer_type operator=(__pointer_type __p) noexcept { store(__p); return __p; } __pointer_type operator=(__pointer_type __p) volatile noexcept { store(__p); return __p; } __pointer_type operator++(int) noexcept { return fetch_add(1); } __pointer_type operator++(int) volatile noexcept { return fetch_add(1); } __pointer_type operator--(int) noexcept { return fetch_sub(1); } __pointer_type operator--(int) volatile noexcept { return fetch_sub(1); } __pointer_type operator++() noexcept { return __atomic_add_fetch(&_M_p, _M_type_size(1), int(memory_order_seq_cst)); } __pointer_type operator++() volatile noexcept { return __atomic_add_fetch(&_M_p, _M_type_size(1), int(memory_order_seq_cst)); } __pointer_type operator--() noexcept { return __atomic_sub_fetch(&_M_p, _M_type_size(1), int(memory_order_seq_cst)); } __pointer_type operator--() volatile noexcept { return __atomic_sub_fetch(&_M_p, _M_type_size(1), int(memory_order_seq_cst)); } __pointer_type operator+=(ptrdiff_t __d) noexcept { return __atomic_add_fetch(&_M_p, _M_type_size(__d), int(memory_order_seq_cst)); } __pointer_type operator+=(ptrdiff_t __d) volatile noexcept { return __atomic_add_fetch(&_M_p, _M_type_size(__d), int(memory_order_seq_cst)); } __pointer_type operator-=(ptrdiff_t __d) noexcept { return __atomic_sub_fetch(&_M_p, _M_type_size(__d), int(memory_order_seq_cst)); } __pointer_type operator-=(ptrdiff_t __d) volatile noexcept { return __atomic_sub_fetch(&_M_p, _M_type_size(__d), int(memory_order_seq_cst)); } bool is_lock_free() const noexcept { // Produce a fake, minimally aligned pointer. return __atomic_is_lock_free(sizeof(_M_p), reinterpret_cast<void *>(-__alignof(_M_p))); } bool is_lock_free() const volatile noexcept { // Produce a fake, minimally aligned pointer. return __atomic_is_lock_free(sizeof(_M_p), reinterpret_cast<void *>(-__alignof(_M_p))); } _GLIBCXX_ALWAYS_INLINE void store(__pointer_type __p, memory_order __m = memory_order_seq_cst) noexcept { memory_order __b = __m & __memory_order_mask; __glibcxx_assert(__b != memory_order_acquire); __glibcxx_assert(__b != memory_order_acq_rel); __glibcxx_assert(__b != memory_order_consume); __atomic_store_n(&_M_p, __p, int(__m)); } _GLIBCXX_ALWAYS_INLINE void store(__pointer_type __p, memory_order __m = memory_order_seq_cst) volatile noexcept { memory_order __b = __m & __memory_order_mask; __glibcxx_assert(__b != memory_order_acquire); __glibcxx_assert(__b != memory_order_acq_rel); __glibcxx_assert(__b != memory_order_consume); __atomic_store_n(&_M_p, __p, int(__m)); } _GLIBCXX_ALWAYS_INLINE __pointer_type load(memory_order __m = memory_order_seq_cst) const noexcept { memory_order __b = __m & __memory_order_mask; __glibcxx_assert(__b != memory_order_release); __glibcxx_assert(__b != memory_order_acq_rel); return __atomic_load_n(&_M_p, int(__m)); } _GLIBCXX_ALWAYS_INLINE __pointer_type load(memory_order __m = memory_order_seq_cst) const volatile noexcept { memory_order __b = __m & __memory_order_mask; __glibcxx_assert(__b != memory_order_release); __glibcxx_assert(__b != memory_order_acq_rel); return __atomic_load_n(&_M_p, int(__m)); } _GLIBCXX_ALWAYS_INLINE __pointer_type exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_exchange_n(&_M_p, __p, int(__m)); } _GLIBCXX_ALWAYS_INLINE __pointer_type exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_exchange_n(&_M_p, __p, int(__m)); } _GLIBCXX_ALWAYS_INLINE bool compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, memory_order __m1, memory_order __m2) noexcept { memory_order __b2 = __m2 & __memory_order_mask; memory_order __b1 = __m1 & __memory_order_mask; __glibcxx_assert(__b2 != memory_order_release); __glibcxx_assert(__b2 != memory_order_acq_rel); __glibcxx_assert(__b2 <= __b1); return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, int(__m1), int(__m2)); } _GLIBCXX_ALWAYS_INLINE bool compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, memory_order __m1, memory_order __m2) volatile noexcept { memory_order __b2 = __m2 & __memory_order_mask; memory_order __b1 = __m1 & __memory_order_mask; __glibcxx_assert(__b2 != memory_order_release); __glibcxx_assert(__b2 != memory_order_acq_rel); __glibcxx_assert(__b2 <= __b1); return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, int(__m1), int(__m2)); } _GLIBCXX_ALWAYS_INLINE __pointer_type fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); } _GLIBCXX_ALWAYS_INLINE __pointer_type fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); } _GLIBCXX_ALWAYS_INLINE __pointer_type fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); } _GLIBCXX_ALWAYS_INLINE __pointer_type fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); } }; /// @} group atomics _GLIBCXX_END_NAMESPACE_VERSION } // namespace std #endif
Close