annotate DEPENDENCIES/generic/include/boost/atomic/detail/ops_linux_arm.hpp @ 133:4acb5d8d80b6 tip

Don't fail environmental check if README.md exists (but .txt and no-suffix don't)
author Chris Cannam
date Tue, 30 Jul 2019 12:25:44 +0100
parents f46d142149f5
children
rev   line source
Chris@102 1 /*
Chris@102 2 * Distributed under the Boost Software License, Version 1.0.
Chris@102 3 * (See accompanying file LICENSE_1_0.txt or copy at
Chris@102 4 * http://www.boost.org/LICENSE_1_0.txt)
Chris@102 5 *
Chris@102 6 * Copyright (c) 2009, 2011 Helge Bahmann
Chris@102 7 * Copyright (c) 2009 Phil Endecott
Chris@102 8 * Copyright (c) 2013 Tim Blechmann
Chris@102 9 * Linux-specific code by Phil Endecott
Chris@102 10 * Copyright (c) 2014 Andrey Semashev
Chris@102 11 */
Chris@102 12 /*!
Chris@102 13 * \file atomic/detail/ops_linux_arm.hpp
Chris@102 14 *
Chris@102 15 * This header contains implementation of the \c operations template.
Chris@102 16 */
Chris@102 17
Chris@102 18 #ifndef BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
Chris@102 19 #define BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
Chris@102 20
Chris@102 21 #include <boost/memory_order.hpp>
Chris@102 22 #include <boost/atomic/detail/config.hpp>
Chris@102 23 #include <boost/atomic/detail/storage_type.hpp>
Chris@102 24 #include <boost/atomic/detail/operations_fwd.hpp>
Chris@102 25 #include <boost/atomic/capabilities.hpp>
Chris@102 26 #include <boost/atomic/detail/ops_cas_based.hpp>
Chris@102 27 #include <boost/atomic/detail/ops_extending_cas_based.hpp>
Chris@102 28
Chris@102 29 #ifdef BOOST_HAS_PRAGMA_ONCE
Chris@102 30 #pragma once
Chris@102 31 #endif
Chris@102 32
Chris@102 33 namespace boost {
Chris@102 34 namespace atomics {
Chris@102 35 namespace detail {
Chris@102 36
Chris@102 37 // Different ARM processors have different atomic instructions. In particular,
Chris@102 38 // architecture versions before v6 (which are still in widespread use, e.g. the
Chris@102 39 // Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap.
Chris@102 40 // On Linux the kernel provides some support that lets us abstract away from
Chris@102 41 // these differences: it provides emulated CAS and barrier functions at special
Chris@102 42 // addresses that are guaranteed not to be interrupted by the kernel. Using
Chris@102 43 // this facility is slightly slower than inline assembler would be, but much
Chris@102 44 // faster than a system call.
Chris@102 45 //
Chris@102 46 // While this emulated CAS is "strong" in the sense that it does not fail
Chris@102 47 // "spuriously" (i.e.: it never fails to perform the exchange when the value
Chris@102 48 // found equals the value expected), it does not return the found value on
Chris@102 49 // failure. To satisfy the atomic API, compare_exchange_{weak|strong} must
Chris@102 50 // return the found value on failure, and we have to manually load this value
Chris@102 51 // after the emulated CAS reports failure. This in turn introduces a race
Chris@102 52 // between the CAS failing (due to the "wrong" value being found) and subsequently
Chris@102 53 // loading (which might turn up the "right" value). From an application's
Chris@102 54 // point of view this looks like "spurious failure", and therefore the
Chris@102 55 // emulated CAS is only good enough to provide compare_exchange_weak
Chris@102 56 // semantics.
Chris@102 57
Chris@102 58 struct linux_arm_cas_base
Chris@102 59 {
Chris@102 60 static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
Chris@102 61 {
Chris@102 62 if ((order & memory_order_release) != 0)
Chris@102 63 hardware_full_fence();
Chris@102 64 }
Chris@102 65
Chris@102 66 static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
Chris@102 67 {
Chris@102 68 if (order == memory_order_seq_cst)
Chris@102 69 hardware_full_fence();
Chris@102 70 }
Chris@102 71
Chris@102 72 static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
Chris@102 73 {
Chris@102 74 if ((order & (memory_order_consume | memory_order_acquire)) != 0)
Chris@102 75 hardware_full_fence();
Chris@102 76 }
Chris@102 77
Chris@102 78 static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
Chris@102 79 {
Chris@102 80 typedef void (*kernel_dmb_t)(void);
Chris@102 81 ((kernel_dmb_t)0xffff0fa0)();
Chris@102 82 }
Chris@102 83 };
Chris@102 84
Chris@102 85 template< bool Signed >
Chris@102 86 struct linux_arm_cas :
Chris@102 87 public linux_arm_cas_base
Chris@102 88 {
Chris@102 89 typedef typename make_storage_type< 4u, Signed >::type storage_type;
Chris@102 90
Chris@102 91 static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
Chris@102 92 {
Chris@102 93 fence_before_store(order);
Chris@102 94 storage = v;
Chris@102 95 fence_after_store(order);
Chris@102 96 }
Chris@102 97
Chris@102 98 static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
Chris@102 99 {
Chris@102 100 storage_type v = storage;
Chris@102 101 fence_after_load(order);
Chris@102 102 return v;
Chris@102 103 }
Chris@102 104
Chris@102 105 static BOOST_FORCEINLINE bool compare_exchange_strong(
Chris@102 106 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
Chris@102 107 {
Chris@102 108 while (true)
Chris@102 109 {
Chris@102 110 storage_type tmp = expected;
Chris@102 111 if (compare_exchange_weak(storage, tmp, desired, success_order, failure_order))
Chris@102 112 return true;
Chris@102 113 if (tmp != expected)
Chris@102 114 {
Chris@102 115 expected = tmp;
Chris@102 116 return false;
Chris@102 117 }
Chris@102 118 }
Chris@102 119 }
Chris@102 120
Chris@102 121 static BOOST_FORCEINLINE bool compare_exchange_weak(
Chris@102 122 storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
Chris@102 123 {
Chris@102 124 typedef storage_type (*kernel_cmpxchg32_t)(storage_type oldval, storage_type newval, volatile storage_type* ptr);
Chris@102 125
Chris@102 126 if (((kernel_cmpxchg32_t)0xffff0fc0)(expected, desired, &storage) == 0)
Chris@102 127 {
Chris@102 128 return true;
Chris@102 129 }
Chris@102 130 else
Chris@102 131 {
Chris@102 132 expected = storage;
Chris@102 133 return false;
Chris@102 134 }
Chris@102 135 }
Chris@102 136
Chris@102 137 static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
Chris@102 138 {
Chris@102 139 return true;
Chris@102 140 }
Chris@102 141 };
Chris@102 142
Chris@102 143 template< bool Signed >
Chris@102 144 struct operations< 1u, Signed > :
Chris@102 145 public extending_cas_based_operations< cas_based_operations< linux_arm_cas< Signed > >, 1u, Signed >
Chris@102 146 {
Chris@102 147 };
Chris@102 148
Chris@102 149 template< bool Signed >
Chris@102 150 struct operations< 2u, Signed > :
Chris@102 151 public extending_cas_based_operations< cas_based_operations< linux_arm_cas< Signed > >, 2u, Signed >
Chris@102 152 {
Chris@102 153 };
Chris@102 154
Chris@102 155 template< bool Signed >
Chris@102 156 struct operations< 4u, Signed > :
Chris@102 157 public cas_based_operations< linux_arm_cas< Signed > >
Chris@102 158 {
Chris@102 159 };
Chris@102 160
Chris@102 161 BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
Chris@102 162 {
Chris@102 163 if (order != memory_order_relaxed)
Chris@102 164 linux_arm_cas_base::hardware_full_fence();
Chris@102 165 }
Chris@102 166
Chris@102 167 BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
Chris@102 168 {
Chris@102 169 if (order != memory_order_relaxed)
Chris@102 170 __asm__ __volatile__ ("" ::: "memory");
Chris@102 171 }
Chris@102 172
Chris@102 173 } // namespace detail
Chris@102 174 } // namespace atomics
Chris@102 175 } // namespace boost
Chris@102 176
Chris@102 177 #endif // BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_