Skip to content

Commit 3f62b56

Browse files
committed
util: Add mmio helpers functions for riscv
Fix the issue where using intrinsics to generate atomic load/store instructions on RISC-V caused SEGFAULT when accessing MMIO regions, by adding a RISC-V specific implementation to resolve the problem. Signed-off-by: Zheng Zhang <[email protected]>
1 parent 7a04b9e commit 3f62b56

File tree

1 file changed

+99
-2
lines changed

1 file changed

+99
-2
lines changed

util/mmio.h

Lines changed: 99 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,55 @@ static inline uint8_t mmio_read8(const void *addr)
124124
return res;
125125
}
126126

127-
#else /* __s390x__ */
127+
#elif defined(__riscv)
128+
#define __MMIO_LE_TYPE(sz) __le##sz
129+
#define __MMIO_BE_TYPE(sz) __be##sz
130+
131+
#define MAKE_WRITE(_NAME_, _SZ_) \
132+
static inline void _NAME_##_be(void *addr, __MMIO_BE_TYPE(_SZ_) val) \
133+
{ \
134+
__atomic_thread_fence(__ATOMIC_RELEASE); \
135+
*(volatile uint##_SZ_##_t *)addr = (uint##_SZ_##_t) val; \
136+
__atomic_thread_fence(__ATOMIC_SEQ_CST); \
137+
} \
138+
static inline void _NAME_##_le(void *addr, __MMIO_LE_TYPE(_SZ_) val) \
139+
{ \
140+
__atomic_thread_fence(__ATOMIC_RELEASE); \
141+
*(volatile uint##_SZ_##_t *)addr = (uint##_SZ_##_t) val; \
142+
__atomic_thread_fence(__ATOMIC_SEQ_CST); \
143+
}
144+
145+
#define MAKE_READ(_NAME_, _SZ_) \
146+
static inline __MMIO_BE_TYPE(_SZ_) _NAME_##_be(const void *addr) \
147+
{ \
148+
__atomic_thread_fence(__ATOMIC_RELEASE); \
149+
__MMIO_BE_TYPE(_SZ_) val = *(const uint##_SZ_##_t *)addr; \
150+
__atomic_thread_fence(__ATOMIC_SEQ_CST); \
151+
return val; \
152+
} \
153+
static inline __MMIO_LE_TYPE(_SZ_) _NAME_##_le(const void *addr) \
154+
{ \
155+
__atomic_thread_fence(__ATOMIC_RELEASE); \
156+
__MMIO_LE_TYPE(_SZ_) val = *(const uint##_SZ_##_t *)addr; \
157+
__atomic_thread_fence(__ATOMIC_SEQ_CST); \
158+
return val; \
159+
}
160+
161+
static inline void mmio_write8(void *addr, uint8_t val)
162+
{
163+
__atomic_thread_fence(__ATOMIC_RELEASE);
164+
*(uint8_t *)addr = val;
165+
__atomic_thread_fence(__ATOMIC_SEQ_CST);
166+
}
167+
static inline uint8_t mmio_read8(const void *addr)
168+
{
169+
__atomic_thread_fence(__ATOMIC_RELEASE);
170+
uint8_t val = *(uint8_t *)addr;
171+
__atomic_thread_fence(__ATOMIC_SEQ_CST);
172+
return val;
173+
}
174+
175+
#else
128176

129177
#define MAKE_WRITE(_NAME_, _SZ_) \
130178
static inline void _NAME_##_be(void *addr, __be##_SZ_ value) \
@@ -161,7 +209,7 @@ static inline uint8_t mmio_read8(const void *addr)
161209
return atomic_load_explicit((_Atomic(uint8_t) *)addr,
162210
memory_order_relaxed);
163211
}
164-
#endif /* __s390x__ */
212+
#endif
165213

166214
MAKE_WRITE(mmio_write16, 16)
167215
MAKE_WRITE(mmio_write32, 32)
@@ -170,8 +218,10 @@ MAKE_READ(mmio_read16, 16)
170218
MAKE_READ(mmio_read32, 32)
171219

172220
#if SIZEOF_LONG == 8
221+
173222
MAKE_WRITE(mmio_write64, 64)
174223
MAKE_READ(mmio_read64, 64)
224+
175225
#else
176226
void mmio_write64_be(void *addr, __be64 val);
177227
static inline void mmio_write64_le(void *addr, __le64 val)
@@ -234,6 +284,53 @@ static inline void _mmio_memcpy_x64(void *dest, const void *src, size_t bytecnt)
234284
})
235285
#elif defined(__s390x__)
236286
void mmio_memcpy_x64(void *dst, const void *src, size_t bytecnt);
287+
#elif defined(__riscv)
288+
static inline void _mmio_memcpy_x64_64b(void *dest, const void *src)
289+
{
290+
#if defined(__riscv_vector)
291+
const uint64_t *s = (const uint64_t *)src;
292+
volatile uint64_t *d = (uint64_t *)dest;
293+
size_t n = 8;
294+
295+
while (n) {
296+
__atomic_thread_fence(__ATOMIC_RELEASE);
297+
size_t vl = vsetvl_e64m1(n);
298+
vuint64m1_t v = vle64_v_u64m1(s, vl);
299+
vse64_v_u64m1(d, v, vl);
300+
__atomic_thread_fence(__ATOMIC_SEQ_CST);
301+
s += vl;
302+
d += vl;
303+
n -= vl;
304+
}
305+
#else
306+
const uint64_t *s = (const uint64_t *)src;
307+
volatile uint64_t *d = (uint64_t *)dest;
308+
__atomic_thread_fence(__ATOMIC_RELEASE);
309+
for (int i = 0; i < 8; i++)
310+
d[i] = s[i];
311+
__atomic_thread_fence(__ATOMIC_SEQ_CST);
312+
#endif
313+
}
314+
315+
static inline void _mmio_memcpy_x64(void *dest, const void *src, size_t bytecnt)
316+
{
317+
const char *s = (const char *)src;
318+
char *d = (char *)dest;
319+
do {
320+
_mmio_memcpy_x64_64b(d, s);
321+
bytecnt -= 64;
322+
s += 64;
323+
d += 64;
324+
} while (bytecnt > 0);
325+
}
326+
327+
#define mmio_memcpy_x64(dest, src, bytecount) \
328+
({ \
329+
if (__builtin_constant_p((bytecount) == 64)) \
330+
_mmio_memcpy_x64_64b((dest), (src)); \
331+
else \
332+
_mmio_memcpy_x64((dest), (src), (bytecount)); \
333+
})
237334
#else
238335
/* Transfer is some multiple of 64 bytes */
239336
static inline void mmio_memcpy_x64(void *dest, const void *src, size_t bytecnt)

0 commit comments

Comments
 (0)