29#include "../../../deps/riscv-opcodes/encoding.h"
31#define _OMP_PRINTF(...) \
33 printf("[omp] "__VA_ARGS__); \
35#define OMP_PRINTF(d, ...) \
36 if (OMP_DEBUG_LEVEL >= d) { \
37 _OMP_PRINTF(__VA_ARGS__); \
40#define OMP_PRINTF(d, ...)
46#ifdef OMPSTATIC_NUMTHREADS
47#define _OMP_T const omp_t
48#define _OMP_TEAM_T const omp_team_t
51#define _OMP_TEAM_T omp_team_t
57#define __snrt_omp_bootstrap(core_idx) \
58 if (snrt_omp_bootstrap(core_idx)) do { \
59 snrt_cluster_hw_barrier(); \
66#define __snrt_omp_destroy(core_idx) \
69 snrt_cluster_hw_barrier();
77#ifndef OMPSTATIC_NUMTHREADS
89#ifndef OMPSTATIC_NUMTHREADS
120#ifndef OMPSTATIC_NUMTHREADS
121extern __thread
omp_t volatile *omp_p;
131unsigned snrt_omp_bootstrap(uint32_t core_idx);
132void partialParallelRegion(int32_t argc,
void *data,
133 void (*fn)(
void *, uint32_t),
int num_threads);
135void omp_print_prof(
void);
144#ifndef OMPSTATIC_NUMTHREADS
145static inline omp_t *omp_getData() {
return (
omp_t *)omp_p; }
147 return &_this->plainTeam;
150static inline const omp_t *omp_getData() {
return &omp_p; }
152 return &_this->plainTeam;
156static inline unsigned omp_get_thread_num(
void) {
157 return snrt_cluster_core_idx();
160static inline unsigned omp_get_num_threads(
void) {
161 return snrt_cluster_compute_core_num();
164static inline void parallelRegion(int32_t argc,
void *data,
165 void (*fn)(
void *, uint32_t),
167#ifndef OMPSTATIC_NUMTHREADS
168 omp_p->plainTeam.nbThreads = num_threads;
171 OMP_PRINTF(10,
"num_threads=%d nbThreads=%d omp_p->numThreads=%d\n",
172 num_threads, omp_p->plainTeam.nbThreads, omp_p->numThreads);
175 (void)eu_dispatch_push(fn, argc, data, num_threads);
177 eu_run_empty(snrt_cluster_core_idx());
_kmp_ptr32 * kmpc_args
Usually the arguments passed to __kmpc_fork_call would do a malloc with the amount of arguments passe...
Definition omp.h:110
snrt_barrier_t * kmpc_barrier
Pointer to the barrier register used for synchronization eg with #pragma omp barrier.
Definition omp.h:103
Definition sync_decls.h:9