#ifndef NETGEN_CORE_TASKMANAGER_HPP #define NETGEN_CORE_TASKMANAGER_HPP /*********************************************************************/ /* File: taskmanager.hpp */ /* Author: M. Hochsterger, J. Schoeberl */ /* Date: 10. Mar. 2015 */ /*********************************************************************/ #include #include #include #include #include #include "array.hpp" #include "paje_trace.hpp" #include "taskmanager.hpp" #ifdef USE_NUMA #include #include #endif namespace ngcore { using std::atomic; using std::function; class TaskInfo { public: int task_nr; int ntasks; int thread_nr; int nthreads; // int node_nr; // int nnodes; }; NGCORE_API extern class TaskManager * task_manager; class TaskManager { // PajeTrace *trace; class alignas(64) NodeData { public: atomic start_cnt{0}; atomic participate{0}; }; NGCORE_API static const function * func; NGCORE_API static const function * startup_function; NGCORE_API static const function * cleanup_function; NGCORE_API static atomic ntasks; NGCORE_API static Exception * ex; NGCORE_API static atomic jobnr; static atomic complete[8]; // max nodes static atomic done; static atomic active_workers; static atomic workers_on_node[8]; // max nodes // Array*> sync; NGCORE_API static int sleep_usecs; NGCORE_API static bool sleep; static NodeData *nodedata[8]; static int num_nodes; NGCORE_API static int num_threads; NGCORE_API static int max_threads; #ifdef WIN32 // no exported thread_local in dlls on Windows static thread_local int thread_id; #else NGCORE_API static thread_local int thread_id; #endif NGCORE_API static bool use_paje_trace; public: NGCORE_API TaskManager(); NGCORE_API ~TaskManager(); NGCORE_API void StartWorkers(); NGCORE_API void StopWorkers(); void SuspendWorkers(int asleep_usecs = 1000 ) { sleep_usecs = asleep_usecs; sleep = true; } void ResumeWorkers() { sleep = false; } NGCORE_API static void SetNumThreads(int amax_threads); static int GetMaxThreads() { return max_threads; } // static int GetNumThreads() { return task_manager ? task_manager->num_threads : 1; } static int GetNumThreads() { return num_threads; } #ifdef WIN32 NGCORE_API static int GetThreadId(); #else static int GetThreadId() { return thread_id; } #endif int GetNumNodes() const { return num_nodes; } static void SetPajeTrace (bool use) { use_paje_trace = use; } NGCORE_API static bool ProcessTask(); NGCORE_API static void CreateJob (const function & afunc, int antasks = task_manager->GetNumThreads()); static void SetStartupFunction (const function & func) { startup_function = &func; } static void SetStartupFunction () { startup_function = nullptr; } static void SetCleanupFunction (const function & func) { cleanup_function = &func; } static void SetCleanupFunction () { cleanup_function = nullptr; } void Done() { done = true; } NGCORE_API void Loop(int thread_num); NGCORE_API static std::list> Timing (); }; NGCORE_API void RunWithTaskManager (function alg); // For Python context manager NGCORE_API int EnterTaskManager (); NGCORE_API void ExitTaskManager (int num_threads); class RegionTaskManager { int nthreads_before; int nthreads; bool started_taskmanager; public: RegionTaskManager(int anthreads=TaskManager::GetMaxThreads()) : nthreads(anthreads) { if(task_manager || nthreads==0) { // already running, no need to do anything started_taskmanager = false; return; } else { nthreads_before = TaskManager::GetMaxThreads(); TaskManager::SetNumThreads(nthreads); nthreads = EnterTaskManager(); started_taskmanager = true; } } ~RegionTaskManager() { if(started_taskmanager) { ExitTaskManager(nthreads); TaskManager::SetNumThreads(nthreads_before); } } }; NETGEN_INLINE int TasksPerThread (int tpt) { // return task_manager ? tpt*task_manager->GetNumThreads() : 1; return tpt*TaskManager::GetNumThreads(); } class TotalCosts { size_t cost; public: TotalCosts (size_t _cost) : cost(_cost) { ; } size_t operator ()() { return cost; } }; template NETGEN_INLINE void ParallelFor (T_Range r, TFUNC f, int antasks = TaskManager::GetNumThreads(), TotalCosts costs = 1000) { // if (task_manager && costs() >= 1000) TaskManager::CreateJob ([r, f] (TaskInfo & ti) { auto myrange = r.Split (ti.task_nr, ti.ntasks); for (auto i : myrange) f(i); }, antasks); /* else for (auto i : r) f(i); */ } /* template NETGEN_INLINE void ParallelFor (size_t n, TFUNC f, int antasks = task_manager ? task_manager->GetNumThreads() : 0) { ParallelFor (IntRange (n), f, antasks); } */ template NETGEN_INLINE void ParallelFor (size_t n, Args...args) { ParallelFor (IntRange (n), args...); } template NETGEN_INLINE void ParallelForRange (T_Range r, TFUNC f, int antasks = TaskManager::GetNumThreads(), TotalCosts costs = 1000) { // if (task_manager && costs() >= 1000) TaskManager::CreateJob ([r, f] (TaskInfo & ti) { auto myrange = r.Split (ti.task_nr, ti.ntasks); f(myrange); }, antasks); /* else f(r); */ } /* template NETGEN_INLINE void ParallelForRange (size_t n, TFUNC f, int antasks = task_manager ? task_manager->GetNumThreads() : 0) { ParallelForRange (IntRange(n), f, antasks); } */ template NETGEN_INLINE void ParallelForRange (size_t n, Args...args) { ParallelForRange (IntRange(n), args...); } template NETGEN_INLINE void ParallelJob (TFUNC f, int antasks = TaskManager::GetNumThreads()) { TaskManager::CreateJob (f, antasks); } /* Usage example: ShareLoop myloop(100); task_manager->CreateJob ([]() { for (int i : myloop) cout << "i = " << i << endl; }); */ class SharedLoop { atomic cnt; IntRange r; class SharedIterator { atomic & cnt; int myval; int endval; public: SharedIterator (atomic & acnt, int aendval, bool begin_iterator) : cnt (acnt) { endval = aendval; myval = begin_iterator ? cnt++ : endval; if (myval > endval) myval = endval; } SharedIterator & operator++ () { myval = cnt++; if (myval > endval) myval = endval; return *this; } int operator* () const { return myval; } bool operator!= (const SharedIterator & it2) const { return myval != it2.myval; } }; public: SharedLoop (IntRange ar) : r(ar) { cnt = r.begin(); } SharedIterator begin() { return SharedIterator (cnt, r.end(), true); } SharedIterator end() { return SharedIterator (cnt, r.end(), false); } }; /* class alignas(4096) AtomicRange { mutex lock; int begin; int end; public: void Set (IntRange r) { lock_guard guard(lock); begin = r.begin(); end = r.end(); } IntRange Get() { lock_guard guard(lock); return IntRange(begin, end); } bool PopFirst (int & first) { lock_guard guard(lock); bool non_empty = end > begin; first = begin; if (non_empty) begin++; return non_empty; } bool PopHalf (IntRange & r) { lock_guard guard(lock); bool non_empty = end > begin; if (non_empty) { int mid = (begin+end+1)/2; r = IntRange(begin, mid); begin = mid; } return non_empty; } }; */ // lock free popfirst // faster for large loops, bug slower for small loops (~1000) ???? /* class alignas(4096) AtomicRange { mutex lock; atomic begin; int end; public: void Set (IntRange r) { lock_guard guard(lock); // begin = r.begin(); begin.store(r.begin(), std::memory_order_relaxed); end = r.end(); } void SetNoLock (IntRange r) { begin.store(r.begin(), std::memory_order_relaxed); end = r.end(); } // IntRange Get() // { // lock_guard guard(lock); // return IntRange(begin, end); // } bool PopFirst (int & first) { // int oldbegin = begin; int oldbegin = begin.load(std::memory_order_relaxed); if (oldbegin >= end) return false; while (!begin.compare_exchange_weak (oldbegin, oldbegin+1, std::memory_order_relaxed, std::memory_order_relaxed)) if (oldbegin >= end) return false; first = oldbegin; return true; } bool PopHalf (IntRange & r) { // int oldbegin = begin; int oldbegin = begin.load(std::memory_order_relaxed); if (oldbegin >= end) return false; lock_guard guard(lock); while (!begin.compare_exchange_weak (oldbegin, (oldbegin+end+1)/2, std::memory_order_relaxed, std::memory_order_relaxed)) if (oldbegin >= end) return false; r = IntRange(oldbegin, (oldbegin+end+1)/2); return true; } }; // inline ostream & operator<< (ostream & ost, AtomicRange & r) // { // ost << r.Get(); // return ost; // } */ class alignas(4096) AtomicRange { atomic begin; atomic end; public: void Set (IntRange r) { begin.store(std::numeric_limits::max(), std::memory_order_release); end.store(r.end(), std::memory_order_release); begin.store(r.begin(), std::memory_order_release); } void SetNoLock (IntRange r) { end.store(r.end(), std::memory_order_release); begin.store(r.begin(), std::memory_order_release); } // IntRange Get() // { // lock_guard guard(lock); // return IntRange(begin, end); // } bool PopFirst (size_t & hfirst) { // first = begin++; // return first < end; size_t first = begin.load(std::memory_order_relaxed); size_t nextfirst = first+1; if (first >= end) nextfirst = std::numeric_limits::max()-1; // while (!begin.compare_exchange_weak (first, nextfirst)) while (!begin.compare_exchange_weak (first, nextfirst, std::memory_order_relaxed, std::memory_order_relaxed)) { first = begin; nextfirst = first+1; if (nextfirst >= end) nextfirst = std::numeric_limits::max()-1; } hfirst = first; return first < end; } bool PopHalf (IntRange & r) { /* // int oldbegin = begin; size_t oldbegin = begin.load(std::memory_order_acquire); size_t oldend = end.load(std::memory_order_acquire); if (oldbegin >= oldend) return false; // lock_guard guard(lock); while (!begin.compare_exchange_weak (oldbegin, (oldbegin+oldend+1)/2, std::memory_order_relaxed, std::memory_order_relaxed)) { oldend = end.load(std::memory_order_acquire); if (oldbegin >= oldend) return false; } r = IntRange(oldbegin, (oldbegin+oldend+1)/2); return true; */ size_t oldbegin = begin; // .load(std::memory_order_acquire); size_t oldend = end; // .load(std::memory_order_acquire); if (oldbegin >= oldend) return false; size_t nextbegin = (oldbegin+oldend+1)/2; if (nextbegin >= oldend) nextbegin = std::numeric_limits::max()-1; while (!begin.compare_exchange_weak (oldbegin, nextbegin)) // std::memory_order_relaxed, std::memory_order_relaxed)) { oldend = end; // .load(std::memory_order_acquire); if (oldbegin >= oldend) return false; nextbegin = (oldbegin+oldend+1)/2; if (nextbegin >= oldend) nextbegin = std::numeric_limits::max()-1; } r = IntRange(oldbegin, (oldbegin+oldend+1)/2); return true; } }; class SharedLoop2 { Array ranges; atomic processed; atomic total; atomic participants; class SharedIterator { FlatArray ranges; atomic & processed; size_t total; size_t myval; size_t processed_by_me = 0; int me; int steal_from; public: SharedIterator (FlatArray _ranges, atomic & _processed, size_t _total, int _me, bool begin_it) : ranges(_ranges), processed(_processed), total(_total) { if (begin_it) { // me = TaskManager::GetThreadId(); me = _me; steal_from = me; GetNext(); } } ~SharedIterator() { if (processed_by_me) processed += processed_by_me; } SharedIterator & operator++ () { GetNext(); return *this;} void GetNext() { size_t nr; if (ranges[me].PopFirst(nr)) { processed_by_me++; myval = nr; return; } GetNext2(); } void GetNext2() { processed += processed_by_me; processed_by_me = 0; // done with my work, going to steal ... while (1) { if (processed >= total) return; steal_from++; if (steal_from == ranges.Size()) steal_from = 0; // steal half of the work reserved for 'from': IntRange steal; if (ranges[steal_from].PopHalf(steal)) { myval = steal.First(); processed_by_me++; if (myval+1 < steal.Next()) ranges[me].Set (IntRange(myval+1, steal.Next())); return; } } } size_t operator* () const { return myval; } bool operator!= (const SharedIterator & it2) const { return processed < total; } }; public: SharedLoop2 () : ranges(TaskManager::GetNumThreads()) { ; } SharedLoop2 (IntRange r) : ranges(TaskManager::GetNumThreads()) { Reset (r); } void Reset (IntRange r) { for (size_t i = 0; i < ranges.Size(); i++) ranges[i].SetNoLock (r.Split(i,ranges.Size())); total.store(r.Size(), std::memory_order_relaxed); participants.store(0, std::memory_order_relaxed); processed.store(0, std::memory_order_release); } SharedIterator begin() { /* int me = participants++; if (me < ranges.Size()) return SharedIterator (ranges, processed, total, me, true); else // more participants than buckets. set processed to total, and the loop is terminated immediately return SharedIterator (ranges, total, total, me, true); */ return SharedIterator (ranges, processed, total, TaskManager::GetThreadId(), true); } SharedIterator end() { return SharedIterator (ranges, processed, total, -1, false); } }; class Partitioning { Array part; size_t total_costs; public: Partitioning () { ; } template Partitioning (const Array & apart) { part = apart; } template Partitioning & operator= (const Array & apart) { part = apart; return *this; } size_t GetTotalCosts() const { return total_costs; } template void Calc (size_t n, TFUNC costs, int size = task_manager ? task_manager->GetNumThreads() : 1) { Array prefix (n); /* size_t sum = 0; for (auto i : ngstd::Range(n)) { sum += costs(i); prefix[i] = sum; } total_costs = sum; */ Array partial_sums(TaskManager::GetNumThreads()+1); partial_sums[0] = 0; ParallelJob ([&] (TaskInfo ti) { IntRange r = IntRange(n).Split(ti.task_nr, ti.ntasks); size_t mysum = 0; for (size_t i : r) { size_t c = costs(i); mysum += c; prefix[i] = c; } partial_sums[ti.task_nr+1] = mysum; }); for (size_t i = 1; i < partial_sums.Size(); i++) partial_sums[i] += partial_sums[i-1]; total_costs = partial_sums.Last(); ParallelJob ([&] (TaskInfo ti) { IntRange r = IntRange(n).Split(ti.task_nr, ti.ntasks); size_t mysum = partial_sums[ti.task_nr]; for (size_t i : r) { mysum += prefix[i]; prefix[i] = mysum; } }); part.SetSize (size+1); part[0] = 0; for (int i = 1; i <= size; i++) part[i] = BinSearch (prefix, total_costs*i/size); } size_t Size() const { return part.Size()-1; } IntRange operator[] (size_t i) const { return ngcore::Range(part[i], part[i+1]); } IntRange Range() const { return ngcore::Range(part[0], part[Size()]); } private: template int BinSearch(const Tarray & v, size_t i) { int n = v.Size(); if (n == 0) return 0; int first = 0; int last = n-1; if(v[0]>i) return 0; if(v[n-1] <= i) return n; while(last-first>1) { int m = (first+last)/2; if(v[m] NETGEN_INLINE void ParallelFor (const Partitioning & part, TFUNC f, int tasks_per_thread = 1) { if (task_manager) { int ntasks = tasks_per_thread * task_manager->GetNumThreads(); if (ntasks % part.Size() != 0) throw Exception ("tasks must be a multiple of part.size"); task_manager -> CreateJob ([&] (TaskInfo & ti) { int tasks_per_part = ti.ntasks / part.Size(); int mypart = ti.task_nr / tasks_per_part; int num_in_part = ti.task_nr % tasks_per_part; auto myrange = part[mypart].Split (num_in_part, tasks_per_part); for (auto i : myrange) f(i); }, ntasks); } else { for (auto i : part.Range()) f(i); } } template NETGEN_INLINE void ParallelForRange (const Partitioning & part, TFUNC f, int tasks_per_thread = 1, TotalCosts costs = 1000) { if (task_manager && costs() >= 1000) { int ntasks = tasks_per_thread * task_manager->GetNumThreads(); if (ntasks % part.Size() != 0) throw Exception ("tasks must be a multiple of part.size"); task_manager -> CreateJob ([&] (TaskInfo & ti) { int tasks_per_part = ti.ntasks / part.Size(); int mypart = ti.task_nr / tasks_per_part; int num_in_part = ti.task_nr % tasks_per_part; auto myrange = part[mypart].Split (num_in_part, tasks_per_part); f(myrange); }, ntasks); } else { f(part.Range()); } } template auto ParallelReduce (size_t n, FUNC f, OP op, T initial1) { typedef decltype (op(initial1,initial1)) TRES; TRES initial(initial1); /* for (size_t i = 0; i < n; i++) initial = op(initial, f(i)); */ Array part_reduce(TaskManager::GetNumThreads()); ParallelJob ([&] (TaskInfo ti) { auto r = Range(n).Split(ti.task_nr, ti.ntasks); auto var = initial; for (auto i : r) var = op(var, f(i)); part_reduce[ti.task_nr] = var; }); for (auto v : part_reduce) initial = op(initial, v); return initial; } // // some suggar for working with arrays // // template template // const FlatArray FlatArray::operator= (ParallelValue val) // { // ParallelForRange (Size(), // [this, val] (IntRange r) // { // for (auto i : r) // (*this)[i] = val; // }); // return *this; // } // // template template // const FlatArray FlatArray::operator= (ParallelFunction func) // { // ParallelForRange (Size(), // [this, func] (IntRange r) // { // for (auto i : r) // (*this)[i] = func(i); // }); // return *this; // } class Tasks { size_t num; public: explicit Tasks (size_t _num = TaskManager::GetNumThreads()) : num(_num) { ; } auto GetNum() const { return num; } }; /* // some idea, not yet supported using namespace std; template class ParallelValue { T val; public: ParallelValue (const T & _val) : val(_val) { ; } operator T () const { return val; } }; template class ParallelFunction { FUNC f; public: ParallelFunction (const FUNC & _f) : f(_f) { ; } operator FUNC () const { return f; } auto operator() (size_t i) const { return f(i); } }; */ /* currently not used, plus causing problems on MSVC 2017 template ::value, int>::type = 0> inline ParallelFunction operator| (const T & func, Tasks tasks) { return func; } template ::value, int>::type = 0> inline ParallelValue operator| (const T & obj, Tasks tasks) { return obj; } inline Tasks operator "" _tasks_per_thread (unsigned long long n) { return Tasks(n * TaskManager::GetNumThreads()); } */ /* thought to be used as: array = 1 | tasks class DefaultTasks { public: operator Tasks () const { return TaskManager::GetNumThreads(); } }; static DefaultTasks tasks; */ #ifdef USE_NUMA template class NumaInterleavedArray : public Array { T * numa_ptr; size_t numa_size; public: NumaInterleavedArray () { numa_size = 0; numa_ptr = nullptr; } NumaInterleavedArray (size_t s) : Array (s, (T*)numa_alloc_interleaved(s*sizeof(T))) { numa_ptr = this->data; numa_size = s; } ~NumaInterleavedArray () { numa_free (numa_ptr, numa_size*sizeof(T)); } NumaInterleavedArray & operator= (T val) { Array::operator= (val); return *this; } NumaInterleavedArray & operator= (NumaInterleavedArray && a2) { Array::operator= ((Array&&)a2); ngcore::Swap (numa_ptr, a2.numa_ptr); ngcore::Swap (numa_size, a2.numa_size); return *this; } void Swap (NumaInterleavedArray & b) { Array::Swap(b); ngcore::Swap (numa_ptr, b.numa_ptr); ngcore::Swap (numa_size, b.numa_size); } void SetSize (size_t size) { std::cerr << "************************* NumaDistArray::SetSize not overloaded" << std::endl; Array::SetSize(size); } }; template class NumaDistributedArray : public Array { T * numa_ptr; size_t numa_size; public: NumaDistributedArray () { numa_size = 0; numa_ptr = nullptr; } NumaDistributedArray (size_t s) : Array (s, (T*)numa_alloc_local(s*sizeof(T))) { numa_ptr = this->data; numa_size = s; /* int avail = */ numa_available(); // initialize libnuma int num_nodes = numa_num_configured_nodes(); size_t pagesize = numa_pagesize(); int npages = ceil ( double(s)*sizeof(T) / pagesize ); // cout << "size = " << numa_size << endl; // cout << "npages = " << npages << endl; for (int i = 0; i < num_nodes; i++) { int beg = (i * npages) / num_nodes; int end = ( (i+1) * npages) / num_nodes; // cout << "node " << i << " : [" << beg << "-" << end << ")" << endl; numa_tonode_memory(numa_ptr+beg*pagesize/sizeof(T), (end-beg)*pagesize, i); } } ~NumaDistributedArray () { numa_free (numa_ptr, numa_size*sizeof(T)); } NumaDistributedArray & operator= (NumaDistributedArray && a2) { Array::operator= ((Array&&)a2); ngcore::Swap (numa_ptr, a2.numa_ptr); ngcore::Swap (numa_size, a2.numa_size); return *this; } void Swap (NumaDistributedArray & b) { Array::Swap(b); ngcore::Swap (numa_ptr, b.numa_ptr); ngcore::Swap (numa_size, b.numa_size); } void SetSize (size_t size) { std::cerr << "************************* NumaDistArray::SetSize not overloaded" << std::endl; Array::SetSize(size); } }; template class NumaLocalArray : public Array { T * numa_ptr; size_t numa_size; public: NumaLocalArray () { numa_size = 0; numa_ptr = nullptr; } NumaLocalArray (size_t s) : Array (s, (T*)numa_alloc_local(s*sizeof(T))) { numa_ptr = this->data; numa_size = s; } ~NumaLocalArray () { numa_free (numa_ptr, numa_size*sizeof(T)); } NumaLocalArray & operator= (T val) { Array::operator= (val); return *this; } NumaLocalArray & operator= (NumaLocalArray && a2) { Array::operator= ((Array&&)a2); ngcore::Swap (numa_ptr, a2.numa_ptr); ngcore::Swap (numa_size, a2.numa_size); return *this; } void Swap (NumaLocalArray & b) { Array::Swap(b); ngcore::Swap (numa_ptr, b.numa_ptr); ngcore::Swap (numa_size, b.numa_size); } void SetSize (size_t size) { std::cerr << "************************* NumaDistArray::SetSize not overloaded" << std::endl; Array::SetSize(size); } }; #else // USE_NUMA template using NumaDistributedArray = Array; template using NumaInterleavedArray = Array; template using NumaLocalArray = Array; #endif // USE_NUMA } #endif // NETGEN_CORE_TASKMANAGER_HPP