41 #ifndef OIIO_THREAD_ALLOW_DCLP
42 # define OIIO_THREAD_ALLOW_DCLP 1
83 using std::recursive_mutex;
104 #if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
105 for (
int i = 0; i < delay; ++i)
106 __asm__ __volatile__(
"pause;");
108 #elif defined(__GNUC__) && (defined(__arm__) || defined(__s390__))
109 for (
int i = 0; i < delay; ++i)
110 __asm__ __volatile__(
"NOP;");
112 #elif defined(_MSC_VER)
113 for (
int i = 0; i < delay; ++i) {
123 for (
int i = 0; i < delay; ++i)
135 , m_pausemax(pausemax)
141 if (m_count <= m_pausemax) {
203 #if OIIO_THREAD_ALLOW_DCLP
218 }
while (*(
volatile bool*)&m_locked);
230 m_locked.clear(std::memory_order_release);
237 return !m_locked.test_and_set(std::memory_order_acquire);
259 std::atomic_flag m_locked = ATOMIC_FLAG_INIT;
318 #if OIIO_THREAD_ALLOW_DCLP
319 while (*(
volatile int *)&m_readers > 0)
322 while (m_readers > 0)
348 class read_lock_guard {
361 class write_lock_guard {
410 int oldval = m_bits.fetch_add(1, std::memory_order_acquire);
411 if (!(oldval & WRITER))
415 int expected = (--m_bits) & NOTWRITER;
419 if (m_bits.compare_exchange_weak(expected, expected + 1,
420 std::memory_order_acquire))
425 expected = m_bits.load() & NOTWRITER;
426 }
while (!m_bits.compare_exchange_weak(expected, expected + 1,
427 std::memory_order_acquire));
437 m_bits.fetch_sub(1, std::memory_order_release);
446 if (m_bits.compare_exchange_weak(expected, WRITER,
447 std::memory_order_acquire))
453 }
while (!m_bits.compare_exchange_weak(expected, WRITER,
454 std::memory_order_acquire));
462 m_bits.fetch_sub(WRITER, std::memory_order_release);
500 enum { WRITER = 1<<30, NOTWRITER = WRITER-1 };
501 std::atomic<int> m_bits { 0 };
524 template<
class Mutex,
class Key,
class Hash,
size_t Bins = 16>
534 struct AlignedMutex {
538 AlignedMutex m_mutex[Bins];
555 m_threads.emplace_back(t);
559 template<
typename FUNC,
typename... Args>
570 for (
auto&
t : m_threads)
578 return m_threads.size();
582 mutable mutex m_mutex;
583 std::vector<std::unique_ptr<thread>> m_threads;
661 void resize(
int nthreads = -1);
672 template<
typename F>
auto push(F&&
f) -> std::future<decltype(f(0))>
674 auto pck = std::make_shared<std::packaged_task<decltype(f(0))(int)>>(
679 auto _f =
new std::function<void(int id)>(
680 [pck](
int id) { (*pck)(
id); });
681 push_queue_and_notify(_f);
683 return pck->get_future();
691 template<
typename F,
typename... Rest>
693 auto pck = std::make_shared<std::packaged_task<decltype(
f(0,
rest...))(
int)>>(
694 std::bind(std::forward<F>(
f), std::placeholders::_1, std::forward<Rest>(
rest)...)
699 auto _f =
new std::function<void(int id)>([pck](
int id) {
702 push_queue_and_notify (_f);
704 return pck->get_future();
719 bool this_thread_is_in_pool()
const;
736 size_t jobs_in_queue()
const;
744 bool very_busy()
const;
755 std::unique_ptr<Impl> m_impl;
758 void push_queue_and_notify(std::function<
void(
int id)>*
f);
794 , m_submitter_thread(std::this_thread::get_id())
811 std::this_thread::get_id() == submitter()
812 &&
"All tasks in a tast_set should be added by the same thread");
813 m_futures.emplace_back(std::move(
f));
821 void wait_for_task(
size_t taskindex,
bool block =
false);
827 void wait(
bool block =
false);
833 const std::chrono::milliseconds wait_time(0);
834 for (
auto&&
f : m_futures)
835 OIIO_ASSERT(
f.wait_for(wait_time) == std::future_status::ready);
841 std::vector<std::future<void>> m_futures;
typedef int(APIENTRYP RE_PFNGLXSWAPINTERVALSGIPROC)(int)
void add_thread(thread *t)
std::lock_guard< mutex > lock_guard
OIIO_UTIL_API thread_pool * default_thread_pool()
STATIC_INLINE size_t Hash(const char *s, size_t len)
spin_rw_mutex::write_lock_guard spin_rw_write_lock
Mutex & operator[](const Key &key) noexcept
spin_mutex::lock_guard spin_lock
atomic_backoff(int pausemax=16) noexcept
~spin_mutex(void) noexcept
void unlock_shared() noexcept
~write_lock_guard() noexcept
void unlock()
unlock() is a synonym for exclusive (write) unlock.
lock_guard(spin_mutex &fm) noexcept
write_lock_guard(spin_rw_mutex &fm) noexcept
void write_unlock() noexcept
~read_lock_guard() noexcept
void read_unlock() noexcept
~spin_rw_mutex() noexcept
spin_mutex(const spin_mutex &) noexcept
void operator()() noexcept
task_set(thread_pool *pool=nullptr)
void write_lock() noexcept
Wrappers and utilities for atomics.
void lock_shared() noexcept
**Note that the tasks the is the thread number *for the or if it s being executed by a non pool thread(this *can happen in cases where the whole pool is occupied and the calling *thread contributes to running the work load).**Thread pool.Have fun
ImageBuf OIIO_API resize(const ImageBuf &src, string_view filtername="", float filterwidth=0.0f, ROI roi={}, int nthreads=0)
void push(std::future< void > &&f)
void lock()
lock() is a synonym for exclusive (write) lock.
auto push(F &&f, Rest &&...rest) -> std::future< decltype(f(0, rest...))>
LeafData & operator=(const LeafData &)=delete
const spin_rw_mutex & operator=(const spin_rw_mutex &)=delete
std::thread::id submitter() const
std::lock_guard< recursive_mutex > recursive_lock_guard
spin_mutex(void) noexcept
spin_rw_mutex::read_lock_guard spin_rw_read_lock
void pause(int delay) noexcept
void read_lock() noexcept
**If you just want to fire and args
read_lock_guard(spin_rw_mutex &fm) noexcept
auto push(F &&f) -> std::future< decltype(f(0))>
#define OIIO_NAMESPACE_END
const spin_mutex & operator=(const spin_mutex &) noexcept
GA_API const UT_StringHolder rest
thread * create_thread(FUNC func, Args &&...args)
**Note that the tasks the is the thread number *for the pool
*get result *(waiting if necessary)*A common idiom is to fire a bunch of sub tasks at the and then *wait for them to all complete We provide a helper task_set
#define OIIO_NAMESPACE_BEGIN