Snitch Runtime
Loading...
Searching...
No Matches
sync.h
Go to the documentation of this file.
1// Copyright 2023 ETH Zurich and University of Bologna.
2// Licensed under the Apache License, Version 2.0, see LICENSE for details.
3// SPDX-License-Identifier: Apache-2.0
4//
5// Luca Colagrande <colluca@iis.ee.ethz.ch>
6// Viviane Potocnik <vivianep@iis.ee.ethz.ch>
7
13#pragma once
14
15#include "../../deps/riscv-opcodes/encoding.h"
16
17#include <math.h>
18
19#define SNRT_BROADCAST_MASK ((SNRT_CLUSTER_NUM - 1) * SNRT_CLUSTER_OFFSET)
20
21//================================================================================
22// Mutex functions
23//================================================================================
24
28inline volatile uint32_t *snrt_mutex() { return &_snrt_mutex; }
29
38inline void snrt_mutex_acquire(volatile uint32_t *pmtx) {
39 asm volatile(
40 "li t0,1 # t0 = 1\n"
41 "1:\n"
42 " amoswap.w.aq t0,t0,(%0) # t0 = oldlock & lock = 1\n"
43 " bnez t0,1b # Retry if previously set)\n"
44 : "+r"(pmtx)
45 :
46 : "t0");
47}
48
54inline void snrt_mutex_ttas_acquire(volatile uint32_t *pmtx) {
55 asm volatile(
56 "1:\n"
57 " lw t0, 0(%0)\n"
58 " bnez t0, 1b\n"
59 " li t0,1 # t0 = 1\n"
60 "2:\n"
61 " amoswap.w.aq t0,t0,(%0) # t0 = oldlock & lock = 1\n"
62 " bnez t0,2b # Retry if previously set)\n"
63 : "+r"(pmtx)
64 :
65 : "t0");
66}
67
71inline void snrt_mutex_release(volatile uint32_t *pmtx) {
72 asm volatile("amoswap.w.rl x0,x0,(%0) # Release lock by storing 0\n"
73 : "+r"(pmtx));
74}
75
76//================================================================================
77// Barrier functions
78//================================================================================
79
80inline void snrt_wake_all(uint32_t core_mask) {
81#ifdef SNRT_SUPPORTS_MULTICAST
82 // Multicast cluster interrupt to every other cluster's core
83 // Note: we need to address another cluster's address space
84 // because the cluster XBAR has not been extended to support
85 // multicast yet. We address the second cluster, if we are the
86 // first cluster, and the first cluster otherwise.
87 uintptr_t addr = (uintptr_t)snrt_cluster_clint_set_ptr() -
88 SNRT_CLUSTER_OFFSET * snrt_cluster_idx();
89 if (snrt_cluster_idx() == 0) addr += SNRT_CLUSTER_OFFSET;
90 snrt_enable_multicast(SNRT_BROADCAST_MASK);
91 *((uint32_t *)addr) = core_mask;
93#else
94 for (int i = 0; i < snrt_cluster_num(); i++) {
95 if (snrt_cluster_idx() != i) {
96 void *ptr = snrt_remote_l1_ptr(snrt_cluster_clint_set_ptr(),
97 snrt_cluster_idx(), i);
98 *((uint32_t *)ptr) = core_mask;
99 }
100 }
101
102#endif
103}
104
111 asm volatile("csrr x0, 0x7C2" ::: "memory");
112}
113
120static inline void snrt_inter_cluster_barrier() {
121 // Everyone increments a shared counter
122 uint32_t cnt =
123 __atomic_add_fetch(&(_snrt_barrier.cnt), 1, __ATOMIC_RELAXED);
124
125 // All but the last cluster enter WFI, while the last cluster resets the
126 // counter for the next barrier and multicasts an interrupt to wake up the
127 // other clusters.
128 if (cnt == snrt_cluster_num()) {
129 _snrt_barrier.cnt = 0;
130 // Wake all clusters
131 snrt_wake_all(1 << snrt_cluster_core_idx());
132 } else {
133 snrt_wfi();
134 // Clear interrupt for next barrier
135 snrt_int_clr_mcip();
136 }
137}
138
148inline void snrt_global_barrier() {
150
151 // Synchronize all DM cores in software
152 if (snrt_is_dm_core()) {
153 snrt_inter_cluster_barrier();
154 }
155 // Synchronize cores in a cluster with the HW barrier
157}
158
166inline void snrt_partial_barrier(snrt_barrier_t *barr, uint32_t n) {
167 // Remember previous iteration
168 uint32_t prev_it = barr->iteration;
169 uint32_t cnt = __atomic_add_fetch(&barr->cnt, 1, __ATOMIC_RELAXED);
170
171 // Increment the barrier counter
172 if (cnt == n) {
173 barr->cnt = 0;
174 __atomic_add_fetch(&barr->iteration, 1, __ATOMIC_RELAXED);
175 } else {
176 // Some threads have not reached the barrier --> Let's wait
177 while (prev_it == barr->iteration)
178 ;
179 }
180}
181
182//================================================================================
183// Reduction functions
184//================================================================================
185
196inline uint32_t snrt_global_all_to_all_reduction(uint32_t value) {
197 // Reduce cores within cluster in TCDM
198 uint32_t *cluster_result = &(cls()->reduction);
199 uint32_t tmp = __atomic_fetch_add(cluster_result, value, __ATOMIC_RELAXED);
200
201 // Wait for writeback to ensure AMO is seen by all cores after barrier
204
205 // Reduce DM cores across clusters in global memory
206 if (snrt_is_dm_core()) {
207 __atomic_add_fetch(&_reduction_result, *cluster_result,
208 __ATOMIC_RELAXED);
209 snrt_inter_cluster_barrier();
210 *cluster_result = _reduction_result;
211 }
213 return *cluster_result;
214}
215
232inline void snrt_global_reduction_dma(double *dst_buffer, double *src_buffer,
233 size_t len) {
234 // If we have a single cluster, no reduction has to be done
235 if (snrt_cluster_num() > 1) {
236 // Iterate levels in the binary reduction tree
237 int num_levels = ceil(log2(snrt_cluster_num()));
238 for (unsigned int level = 0; level < num_levels; level++) {
239 // Determine whether the current cluster is an active cluster.
240 // An active cluster is a cluster that participates in the current
241 // level of the reduction tree. Every second cluster among the
242 // active ones is a sender.
243 uint32_t is_active = (snrt_cluster_idx() % (1 << level)) == 0;
244 uint32_t is_sender = (snrt_cluster_idx() % (1 << (level + 1))) != 0;
245
246 // If the cluster is a sender, it sends the data in its source
247 // buffer to the respective receiver's destination buffer
248 if (is_active && is_sender) {
249 if (!snrt_is_compute_core()) {
250 uint64_t dst = (uint64_t)dst_buffer -
251 (1 << level) * SNRT_CLUSTER_OFFSET;
252 snrt_dma_start_1d(dst, (uint64_t)src_buffer,
253 len * sizeof(double));
254 snrt_dma_wait_all();
255 }
256 }
257
258 // Synchronize senders and receivers
260
261 // Every cluster which is not a sender performs the reduction
262 if (is_active && !is_sender) {
263 // Computation is parallelized over the compute cores
264 if (snrt_is_compute_core()) {
265 uint32_t items_per_core =
266 len / snrt_cluster_compute_core_num();
267 uint32_t core_offset =
268 snrt_cluster_core_idx() * items_per_core;
269 for (uint32_t i = 0; i < items_per_core; i++) {
270 uint32_t abs_i = core_offset + i;
271 src_buffer[abs_i] += dst_buffer[abs_i];
272 }
273 }
274 }
275
276 // Synchronize compute and DM cores for next tree level
278 }
279 }
280}
281
282//================================================================================
283// Memory consistency
284//================================================================================
285
292inline void snrt_wait_writeback(uint32_t val) {
293 asm volatile("mv %0, %0" : "+r"(val)::);
294}
295
296//================================================================================
297// Multicast functions
298//================================================================================
299
307inline void snrt_enable_multicast(uint32_t mask) { write_csr(0x7c4, mask); }
308
312inline void snrt_disable_multicast() { write_csr(0x7c4, 0); }
Definition sync_decls.h:9
void snrt_partial_barrier(snrt_barrier_t *barr, uint32_t n)
Generic software barrier.
Definition sync.h:166
void snrt_enable_multicast(uint32_t mask)
Enable LSU multicast.
Definition sync.h:307
void snrt_mutex_ttas_acquire(volatile uint32_t *pmtx)
Acquire a mutex, blocking.
Definition sync.h:54
void snrt_mutex_acquire(volatile uint32_t *pmtx)
Acquire a mutex, blocking.
Definition sync.h:38
volatile uint32_t * snrt_mutex()
Get a pointer to a mutex variable.
Definition sync.h:28
void snrt_wait_writeback(uint32_t val)
Ensure value is written back to the register file.
Definition sync.h:292
uint32_t snrt_global_all_to_all_reduction(uint32_t value)
Perform a global sum reduction, blocking.
Definition sync.h:196
void snrt_global_barrier()
Synchronize all Snitch cores.
Definition sync.h:148
void snrt_cluster_hw_barrier()
Synchronize cores in a cluster with a hardware barrier, blocking.
Definition sync.h:110
void snrt_mutex_release(volatile uint32_t *pmtx)
Release a previously-acquired mutex.
Definition sync.h:71
void snrt_disable_multicast()
Disable LSU multicast.
Definition sync.h:312
void snrt_global_reduction_dma(double *dst_buffer, double *src_buffer, size_t len)
Perform a sum reduction among clusters, blocking.
Definition sync.h:232