diff --git a/CMakeLists.txt b/CMakeLists.txt index 2aefdecad794f1051289c6b4eb33e9280a283882..8766eb3545420406fff27acbc0adecfbcd9bc83e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -87,6 +87,14 @@ if (OS_ARCH STREQUAL "aarch64") add_compile_options(-mtune=cortex-a72 -fsigned-char -g -ggdb3 -march=armv8-a+crc -funwind-tables) elseif (OS_ARCH STREQUAL "x86_64") add_compile_options(-msse4.2 ) +elseif (OS_ARCH STREQUAL "riscv64") + option(HAS_RISCV_ZBC_EXTENSION OFF) + message(STATUS "HAS_RISCV_ZBC_EXTENSION = ${HAS_RISCV_ZBC_EXTENSION}") + if (HAS_RISCV_ZBC_EXTENSION) + add_compile_options(-march=rv64gc_zbc) + else () + add_compile_options(-march=rv64gc) + endif (HAS_RISCV_ZBC_EXTENSION) endif () Add_Definitions(-DWSEC_COMPILE_CAC_OPENSSL -DWSEC_AES_GCM_SUPPORT -DWSEC_USE_OPENSSL_110 -DWSEC_COMPILE_SDP) diff --git a/src/storage/gstor/gstor_handle.c b/src/storage/gstor/gstor_handle.c index 9a5e5d6248f47da4ee1a8fe8a076467460f88740..f76a6104f211f53bc0aed4977a475ecebfc2372b 100644 --- a/src/storage/gstor/gstor_handle.c +++ b/src/storage/gstor/gstor_handle.c @@ -110,7 +110,7 @@ static void knl_init_new_session(knl_session_t *knl_session, cm_stack_t *stack, uint32 sid = g_instance->hwm; knl_session->id = sid; kernel->sessions[sid] = knl_session; -#if defined(__arm__) || defined(__aarch64__) +#if defined(__arm__) || defined(__aarch64__) || defined(__riscv) CM_MFENCE; #endif g_instance->hwm++; diff --git a/src/storage/gstor/zekernel/common/cm_atomic.h b/src/storage/gstor/zekernel/common/cm_atomic.h index 60411502067fb32f71ced791b21dbb65f38ede70..c4c582b5569fae43f335e0b7eb115d85b060e4f5 100644 --- a/src/storage/gstor/zekernel/common/cm_atomic.h +++ b/src/storage/gstor/zekernel/common/cm_atomic.h @@ -85,7 +85,7 @@ static inline bool32 cm_atomic32_cas(atomic32_t *val, int32 oldval, int32 newval typedef volatile int32 atomic32_t; typedef volatile int64 atomic_t; -#if defined(__arm__) || defined(__aarch64__) +#if defined(__arm__) || defined(__aarch64__) || defined(__riscv) static inline int64 cm_atomic_get(atomic_t *val) { return __atomic_load_n(val, __ATOMIC_SEQ_CST); diff --git a/src/storage/gstor/zekernel/common/cm_checksum.c b/src/storage/gstor/zekernel/common/cm_checksum.c index b795d6776e6937595f9e232680c7f036ef3ea4c4..9fe2acbf3509f084866925b8487c3d2957ae2fa8 100644 --- a/src/storage/gstor/zekernel/common/cm_checksum.c +++ b/src/storage/gstor/zekernel/common/cm_checksum.c @@ -101,6 +101,14 @@ uint32 cm_crc32c_aarch(const void *data, uint32 len, uint32 crc) } #endif +#if defined(__riscv_zbc) +uint32 cm_crc32c_riscv(const void *data, uint32 len, uint32 crc) +{ + return crc32_le_generic(crc, data, len, CRC32C_POLY_LE, + CRC32C_POLY_QT_LE); +} +#endif + uint32 cm_crc32c_sse42(const void *data, uint32 len, uint32 crc) { #if defined(DB_HAVE_SSE4_2) diff --git a/src/storage/gstor/zekernel/common/cm_checksum.h b/src/storage/gstor/zekernel/common/cm_checksum.h index 37bfde132ccfa6e81f9176d48bfd920d7f04d93a..bff89834ce8e09179f8c0be0d8036aaca95cbe71 100644 --- a/src/storage/gstor/zekernel/common/cm_checksum.h +++ b/src/storage/gstor/zekernel/common/cm_checksum.h @@ -43,6 +43,133 @@ #define DB_HAVE_SSE4_2 #include #define DB_HAVE__GET_CPUID +#elif defined(__riscv_zbc) +#if __riscv_xlen == 64 +/* Slide by XLEN bits per iteration */ +# define STEP_ORDER 3 + +/* Each below polynomial quotient has an implicit bit for 2^XLEN */ + +/* Polynomial quotient of (2^(XLEN+32))/CRC32C_POLY, in LE format */ +# define CRC32C_POLY_QT_LE 0xa434f61c6f5389f8 + +static inline uint64 crc32_le_prep(uint32 crc, unsigned long const *ptr) +{ + return (uint64)crc ^ (uint64)(*ptr); +} + +static inline uint32 crc32_le_zbc(unsigned long s, uint32 poly, unsigned long poly_qt) +{ + uint32 crc; + + /* We don't have a "clmulrh" insn, so use clmul + slli instead. */ + __asm__ volatile (".option push\n" + ".option arch,+zbc\n" + "clmul %0, %1, %2\n" + "slli %0, %0, 1\n" + "xor %0, %0, %1\n" + "clmulr %0, %0, %3\n" + "srli %0, %0, 32\n" + ".option pop\n" + : "=&r" (crc) + : "r" (s), + "r" (poly_qt), + "r" ((uint64)poly << 32) + :); + return crc; +} + +#elif __riscv_xlen == 32 +# define STEP_ORDER 2 +/* Each quotient should match the upper half of its analog in RV64 */ +# define CRC32C_POLY_QT_LE 0x6f5389f8 + +static inline uint32 crc32_le_prep(uint32 crc, unsigned long const *ptr) +{ + return crc ^ (uint32)(*ptr); +} + +static inline uint32 crc32_le_zbc(unsigned long s, uint32 poly, unsigned long poly_qt) +{ + uint32 crc; + + /* We don't have a "clmulrh" insn, so use clmul + slli instead. */ + __asm__ volatile (".option push\n" + ".option arch,+zbc\n" + "clmul %0, %1, %2\n" + "slli %0, %0, 1\n" + "xor %0, %0, %1\n" + "clmulr %0, %0, %3\n" + ".option pop\n" + : "=&r" (crc) + : "r" (s), + "r" (poly_qt), + "r" (poly) + :); + return crc; +} +#endif + +#define STEP (1 << STEP_ORDER) +#define OFFSET_MASK (STEP - 1) +#define CRC32C_POLY_LE 0x82F63B78 +#define min(X,Y) ((X) < (Y) ? (X) : (Y)) + +static inline uint32 crc32_le_unaligned(uint32 crc, unsigned char const *p, + size_t len, uint32 poly, + unsigned long poly_qt) +{ + size_t bits = len * 8; + unsigned long s = 0; + uint32 crc_low = 0; + + for (int i = 0; i < len; i++) + s = ((unsigned long)*p++ << (__riscv_xlen - 8)) | (s >> 8); + + s ^= (unsigned long)crc << (__riscv_xlen - bits); + if (__riscv_xlen == 32 || len < sizeof(uint32)) + crc_low = crc >> bits; + + crc = crc32_le_zbc(s, poly, poly_qt); + crc ^= crc_low; + + return crc; +} + +static inline uint32 crc32_le_generic(uint32 crc, unsigned char const *p, + size_t len, uint32 poly, + unsigned long poly_qt) +{ + size_t offset, head_len, tail_len; + unsigned long const *p_ul; + unsigned long s; + + /* Handle the unaligned head. */ + offset = (unsigned long)p & OFFSET_MASK; + if (offset && len) { + head_len = min(STEP - offset, len); + crc = crc32_le_unaligned(crc, p, head_len, poly, poly_qt); + p += head_len; + len -= head_len; + } + + tail_len = len & OFFSET_MASK; + len = len >> STEP_ORDER; + p_ul = (unsigned long const *)p; + + for (int i = 0; i < len; i++) { + s = crc32_le_prep(crc, p_ul); + crc = crc32_le_zbc(s, poly, poly_qt); + p_ul++; + } + + /* Handle the tail bytes. */ + p = (unsigned char const *)p_ul; + if (tail_len) + crc = crc32_le_unaligned(crc, p, tail_len, poly, poly_qt); + + return crc; +} #endif #ifdef __cplusplus @@ -107,6 +234,20 @@ static inline uint32 cm_get_crc32c_aarch(const void *data, uint32 len) } #endif +#if defined(__riscv_zbc) +uint32 cm_crc32c_riscv(const void *data, uint32 len, uint32 crc); + +static inline uint32 cm_get_crc32c_riscv(const void *data, uint32 len) +{ + uint32 crc; + + cm_init_crc32c(&crc); + crc = cm_crc32c_riscv(data, len, crc); + cm_final_crc32c(&crc); + return crc; +} +#endif + static inline uint32 cm_get_crc32_sse42(const void *data, uint32 len) { uint32 crc; @@ -138,6 +279,8 @@ static inline uint32 cm_get_checksum(const void *data, uint32 len) if (cm_crc32c_aarch_available()) { return cm_get_crc32c_aarch(data, len); } +#elif defined(__riscv_zbc) + return cm_get_crc32c_riscv(data, len); #else if (cm_crc32c_sse42_available()) { return cm_get_crc32_sse42(data, len); diff --git a/src/storage/gstor/zekernel/common/cm_memory.h b/src/storage/gstor/zekernel/common/cm_memory.h index 8db1ffaaa3c9c36c13ae245f2e95c230372d180a..f165e65799ef45e54f61decb4694e7ff109efcc3 100644 --- a/src/storage/gstor/zekernel/common/cm_memory.h +++ b/src/storage/gstor/zekernel/common/cm_memory.h @@ -72,6 +72,12 @@ extern "C" { __asm__ volatile("" :: \ : "memory"); \ } +#elif defined(__riscv) +#define CM_MFENCE \ + { \ + __asm__ volatile("fence rw,rw" :: \ + : "memory"); \ + } #endif #define TEMP_POOL_SIZE_THRESHOLD (((uint64)SIZE_M(1024)) * 2) // if totoal/buf_pool_num < 2G, then use one TEMP pool diff --git a/src/storage/gstor/zekernel/common/cm_spinlock.h b/src/storage/gstor/zekernel/common/cm_spinlock.h index 6f708626ca5e038cf8eecf2ed973af4326374f36..5e53b8862099520d2f7e35ae62174df982d88594 100644 --- a/src/storage/gstor/zekernel/common/cm_spinlock.h +++ b/src/storage/gstor/zekernel/common/cm_spinlock.h @@ -36,7 +36,7 @@ extern "C" { typedef volatile uint32 spinlock_t; typedef volatile uint32 ip_spinlock_t; -#if defined(__arm__) || defined(__aarch64__) +#if defined(__arm__) || defined(__aarch64__) || defined(__riscv) #define GS_INIT_SPIN_LOCK(lock) \ { \ __atomic_store_n(&lock, 0, __ATOMIC_SEQ_CST); \ @@ -69,7 +69,7 @@ typedef struct st_recursive_lock { uint16 r_cnt; } recursive_lock_t; -#if defined(__arm__) || defined(__aarch64__) || defined(__loongarch__) +#if defined(__arm__) || defined(__aarch64__) || defined(__loongarch__) || defined(__riscv) #define fas_cpu_pause() \ { \ __asm__ volatile("nop"); \ @@ -104,7 +104,7 @@ static inline void cm_spin_sleep_ex(uint32 tick) #else -#if defined(__arm__) || defined(__aarch64__) +#if defined(__arm__) || defined(__aarch64__) || defined(__riscv) static inline uint32 cm_spin_set(spinlock_t *ptr, uint32 value) { uint32 oldvalue = 0; @@ -151,7 +151,7 @@ static inline void cm_spin_lock(spinlock_t *lock, spin_statis_t *stat) } for (;;) { -#if defined(__arm__) || defined(__aarch64__) +#if defined(__arm__) || defined(__aarch64__) || defined(__riscv) while (__atomic_load_n(lock, __ATOMIC_SEQ_CST) != 0) { #else while (*lock != 0) { @@ -187,7 +187,7 @@ static inline void cm_spin_lock_ex(spinlock_t *lock, spin_statis_t *stat, uint32 } for (;;) { -#if defined(__arm__) || defined(__aarch64__) +#if defined(__arm__) || defined(__aarch64__) || defined(__riscv) while (__atomic_load_n(lock, __ATOMIC_SEQ_CST) != 0) { #else while (*lock != 0) { @@ -218,7 +218,7 @@ static inline void cm_spin_lock_ex(spinlock_t *lock, spin_statis_t *stat, uint32 } } -#if !defined(__arm__) && !defined(__aarch64__) +#if !defined(__arm__) && !defined(__aarch64__) && !defined(__riscv) static inline void cm_spin_unlock(spinlock_t *lock) { if (SECUREC_UNLIKELY(lock == NULL)) { @@ -231,7 +231,7 @@ static inline void cm_spin_unlock(spinlock_t *lock) static inline bool32 cm_spin_try_lock(spinlock_t *lock) { -#if defined(__arm__) || defined(__aarch64__) +#if defined(__arm__) || defined(__aarch64__) || defined(__riscv) if (__atomic_load_n(lock, __ATOMIC_SEQ_CST) != 0) { #else if (*lock != 0) { @@ -248,7 +248,7 @@ static inline bool32 cm_spin_timed_lock(spinlock_t *lock, uint32 timeout_ticks) uint32 sleep_times = 0; for (;;) { -#if defined(__arm__) || defined(__aarch64__) +#if defined(__arm__) || defined(__aarch64__) || defined(__riscv) while (__atomic_load_n(lock, __ATOMIC_SEQ_CST) != 0) { #else while (*lock != 0) { diff --git a/src/storage/gstor/zekernel/common/cm_thread.c b/src/storage/gstor/zekernel/common/cm_thread.c index 8007d058f3f79367b5d32b796ceb7c782bfa370d..7d3eb48ad0f36bd73396f1a306f6beb38b745097 100644 --- a/src/storage/gstor/zekernel/common/cm_thread.c +++ b/src/storage/gstor/zekernel/common/cm_thread.c @@ -320,7 +320,7 @@ uint32 cm_get_current_thread_id() #define __SYS_GET_SPID 186 #elif (defined __aarch64__) #define __SYS_GET_SPID 178 -#elif (defined __loongarch__) +#elif (defined __loongarch__) || (defined __riscv) #include #define __SYS_GET_SPID SYS_gettid #endif diff --git a/src/storage/gstor/zekernel/kernel/buffer/knl_buffer_access.c b/src/storage/gstor/zekernel/kernel/buffer/knl_buffer_access.c index 104cc314d6a01a4ada37d67b9ba138d6ad783a31..2f12b0e0dafb062355dee559d37f23c9aa992339 100644 --- a/src/storage/gstor/zekernel/kernel/buffer/knl_buffer_access.c +++ b/src/storage/gstor/zekernel/kernel/buffer/knl_buffer_access.c @@ -366,7 +366,7 @@ static status_t buf_construct_group_members(knl_session_t *session, buf_ctrl_t * } } -#if defined(__arm__) || defined(__aarch64__) +#if defined(__arm__) || defined(__aarch64__) || defined(__riscv) CM_MFENCE; #endif @@ -1084,7 +1084,7 @@ static status_t buf_batch_load_pages(knl_session_t *session, char *read_buf, } } -#if defined(__arm__) || defined(__aarch64__) +#if defined(__arm__) || defined(__aarch64__) || defined(__riscv) CM_MFENCE; #endif diff --git a/src/storage/gstor/zekernel/kernel/include/knl_defs.h b/src/storage/gstor/zekernel/kernel/include/knl_defs.h index bbfeaa39898b91205e043a0a10c75716fd456824..05941595260da2a4cf02dfe40d94c01d04511103 100644 --- a/src/storage/gstor/zekernel/kernel/include/knl_defs.h +++ b/src/storage/gstor/zekernel/kernel/include/knl_defs.h @@ -75,7 +75,7 @@ typedef uint64 knl_scn_t; (time_val)->tv_usec = (long)(((uint64)(scn)) >> 12 & 0x00000000000fffffULL); \ } while (0) -#if defined(WIN32) || defined(__arm__) || defined(__aarch64__) +#if defined(WIN32) || defined(__arm__) || defined(__aarch64__) || defined(__riscv) #define KNL_GET_SCN(p_scn) ((knl_scn_t)cm_atomic_get(p_scn)) #define KNL_SET_SCN(p_scn, scn) (cm_atomic_set((atomic_t *)p_scn, (int64)scn)) #define KNL_INC_SCN(p_scn) ((knl_scn_t)cm_atomic_inc(p_scn)) diff --git a/src/storage/gstor/zekernel/kernel/knl_database.h b/src/storage/gstor/zekernel/kernel/knl_database.h index 41b1d241cc11c53b9e19de8686b809d4e59a41e7..a334e8719f716b10f8945a8f97686a74aeb6931f 100644 --- a/src/storage/gstor/zekernel/kernel/knl_database.h +++ b/src/storage/gstor/zekernel/kernel/knl_database.h @@ -229,7 +229,7 @@ typedef struct st_trig_name_list { knl_scn_t db_inc_scn(knl_session_t *session); knl_scn_t db_next_scn(knl_session_t *session); -#if defined(WIN32) || defined(__arm__) || defined(__aarch64__) +#if defined(WIN32) || defined(__arm__) || defined(__aarch64__) || defined(__riscv) #define DB_GET_LSN(p_lsn) ((uint64)cm_atomic_get(p_lsn)) #define DB_SET_LSN(p_lsn, lsn) (cm_atomic_set((atomic_t *)&p_lsn, (int64)lsn)) #else diff --git a/src/storage/gstor/zekernel/kernel/persist/knl_log.c b/src/storage/gstor/zekernel/kernel/persist/knl_log.c index eb3e47e9f714bb4fad3b9541cfba296e7306812b..a0ca051d4c7bbd01d843bf4bb48142d0c7bd10a3 100644 --- a/src/storage/gstor/zekernel/kernel/persist/knl_log.c +++ b/src/storage/gstor/zekernel/kernel/persist/knl_log.c @@ -807,7 +807,7 @@ void log_proc(thread_t *thread) // important: this function ensures clean read-only after set SCN void log_reset_readonly(buf_ctrl_t *ctrl) { -#if !defined(__arm__) && !defined(__aarch64__) +#if !defined(__arm__) && !defined(__aarch64__) && !defined(__riscv) if (SECUREC_UNLIKELY(ctrl == NULL)) { return; } @@ -838,7 +838,7 @@ void log_set_page_lsn(knl_session_t *session, uint64 lsn, uint64 lfn) } #endif -#if defined(__arm__) || defined(__aarch64__) +#if defined(__arm__) || defined(__aarch64__) || defined(__riscv) CM_MFENCE; #endif log_reset_readonly(ctrl); @@ -874,7 +874,7 @@ static void log_set_commit_progress(knl_session_t *begin, knl_session_t *end, lo for (;;) { next = curr->log_next; -#if defined(__arm__) || defined(__aarch64__) +#if defined(__arm__) || defined(__aarch64__) || defined(__riscv) CM_MFENCE; #endif