From 9b0195263cbb4ab20d2b2d6c6c019fafad8759f6 Mon Sep 17 00:00:00 2001 From: zhaoxuhui Date: Mon, 24 Mar 2025 17:23:05 +0800 Subject: [PATCH 1/3] [libcbs] fix some bugs for scheduler --- libcbs/src/scheduler/base.hbs | 6 ++++ libcbs/src/scheduler/queue.hbs | 13 +++++++- libcbs/src/scheduler/scheduler.cbs | 41 ++++++++++++++++-------- libcbs/src/scheduler/scheduler.hbs | 24 ++++++++++++-- libcbs/test/scheduler/scheduler_test.cbs | 3 +- 5 files changed, 69 insertions(+), 18 deletions(-) create mode 100644 libcbs/src/scheduler/base.hbs diff --git a/libcbs/src/scheduler/base.hbs b/libcbs/src/scheduler/base.hbs new file mode 100644 index 000000000000..16f17c8a3465 --- /dev/null +++ b/libcbs/src/scheduler/base.hbs @@ -0,0 +1,6 @@ +#ifndef BISHENG_C_SCHEDULER_BASE_H +#define BISHENG_C_SCHEDULER_BASE_H + +struct Void {}; + +#endif diff --git a/libcbs/src/scheduler/queue.hbs b/libcbs/src/scheduler/queue.hbs index c9efda5e4e3b..6d4c6dbec524 100644 --- a/libcbs/src/scheduler/queue.hbs +++ b/libcbs/src/scheduler/queue.hbs @@ -49,6 +49,17 @@ T struct Queue::pop(struct Queue *this) { return res; } +T struct Queue::peek(struct Queue *this) { + pthread_mutex_lock(&this->mutex); + if (this->count == 0) { + pthread_mutex_unlock(&this->mutex); + return NULL; + } + T res = this->buf[this->readIndex]; + pthread_mutex_unlock(&this->mutex); + return res; +} + void struct Queue::init(struct Queue *this, unsigned int capacity) { if (capacity == 0) @@ -59,7 +70,7 @@ void struct Queue::init(struct Queue *this, unsigned int capacity) this->buf = malloc(sizeof(T) * capacity); if (this->buf == NULL) { - fprintf(stderr, "Error: maloc failed, size: %lu\n", (sizeof(T) * capacity)); + fprintf(stderr, "Error: malloc failed, size: %lu\n", (sizeof(T) * capacity)); return; } int res = pthread_mutex_init(&this->mutex, NULL); diff --git a/libcbs/src/scheduler/scheduler.cbs b/libcbs/src/scheduler/scheduler.cbs index b18576536118..b8748f903a87 100644 --- a/libcbs/src/scheduler/scheduler.cbs +++ b/libcbs/src/scheduler/scheduler.cbs @@ -19,6 +19,10 @@ struct ThreadContext * getCurrentCtx() { return g_curCtx; } +struct Task *getCurrentTask() { + return (struct Task *)g_curCtx->runningTask; +} + int taskCount() { return atomic_load(&g_taskCount); } @@ -55,7 +59,7 @@ impl trait Future for struct _Yield; trait Future *yield() { struct _Yield *ptr = malloc(sizeof(struct _Yield)); - if (ptr != NULL) { + if (ptr == NULL) { fprintf(stderr, "Error: memory alloc failed\n"); exit(EXIT_FAILURE); } @@ -63,12 +67,13 @@ trait Future *yield() { return ptr; } -static void * stealTask() { - void * task = NULL; +static struct Task * stealTask() { + struct Task * task = NULL; for (unsigned int i = 1; i < S.threadCount; i++) { if (S.isInit && S.threads[i] != NULL) { - task = S.threads[i]->localQueue.pop(); - if (task) { + task = (struct Task *)S.threads[i]->localQueue.peek(); + if (task && task->state == READY) { + S.threads[i]->localQueue.pop(); return task; } } @@ -77,18 +82,20 @@ static void * stealTask() { } static void * getReadyTask() { - void * task = NULL; + struct Task * task = NULL; do { // 1.Get the task from the local queue first. if (g_curCtx->id > 0) { - task = g_curCtx->localQueue.pop(); - if (task) { + task = (struct Task *)g_curCtx->localQueue.peek(); + if (task && task->state == READY) { + g_curCtx->localQueue.pop(); break; } } // 2.If the local queue is empty, try to get the task from the global queue. - task = S.globalQueue->pop(); - if (task) { + task = (struct Task *)S.globalQueue->peek(); + if (task && task->state == READY) { + S.globalQueue->pop(); break; } // 3.Get thetask from other queues when the global queue is empty. @@ -115,6 +122,7 @@ static int findQueue() { void *schedule(void *arg) { struct ThreadContext *ctx = (struct ThreadContext *)arg; g_curCtx = ctx; + ++S.running_threads; while (true) { struct Task *task = (struct Task *)getReadyTask(); @@ -126,7 +134,6 @@ void *schedule(void *arg) { ctx->runningTask = NULL; if (s.isPending) { - atomic_store(&task->state, PARKED); int index = findQueue(); if (index == 0) { S.globalQueue->push(task); @@ -146,6 +153,7 @@ void *schedule(void *arg) { break; } } + --S.running_threads; return NULL; } @@ -165,7 +173,6 @@ void struct Scheduler::init(unsigned int threadCount) { g_mainThread = malloc(sizeof(struct ThreadContext)); g_taskCount = 0; - if (g_mainThread == NULL) { fprintf(stderr, "Error: main thread memory alloc failed\n"); exit(EXIT_FAILURE); @@ -175,6 +182,7 @@ void struct Scheduler::init(unsigned int threadCount) { g_curCtx = g_mainThread; S.threads[0] = g_mainThread; S.isInit = true; + S.running_threads = 0; unsigned int i; for (i = 1; i <= threadCount; i++) { @@ -227,9 +235,16 @@ struct Task *struct Scheduler::spawn(trait Future *future) { return t; } +void struct Scheduler::deinit() { + S.isInit = false; +} + void struct Scheduler::destroy() { unsigned int i; - S.isInit = false; + assert(!S.isInit); + while (S.running_threads > 0) { + // All threads must finish before deallocating + } if (S.globalQueue != NULL) { S.globalQueue->destroy(); free(S.globalQueue); diff --git a/libcbs/src/scheduler/scheduler.hbs b/libcbs/src/scheduler/scheduler.hbs index d9b06f80b35b..b6d24a37b6c9 100644 --- a/libcbs/src/scheduler/scheduler.hbs +++ b/libcbs/src/scheduler/scheduler.hbs @@ -3,11 +3,10 @@ #include #include +#include "base.hbs" #include "future.hbs" #include "queue.hbs" -struct Void {}; - enum State { READY, PARKED, @@ -32,19 +31,38 @@ struct Scheduler { struct Queue *globalQueue; unsigned int threadCount; struct ThreadContext **threads; + atomic_int running_threads; }; int taskCount(); void taskAddOne(); +// Get global scheduler struct Scheduler *getScheduler(); struct ThreadContext * getCurrentCtx(); +struct Task * getCurrentTask(); void struct Scheduler::init(unsigned int threadCount); void struct Scheduler::run(); +/** + * Place an asynchronous task (Future) in the task queue + * so that it can be executed asynchronously + */ struct Task * struct Scheduler::spawn(trait Future * future); + +/** + * Start the de initialization of the scheduler, by telling all scheduling threads to stop + */ +void struct Scheduler::deinit(); + +/** + * Destroy the scheduler so that it no longer waits for tasks + */ void struct Scheduler::destroy(); -// async void yield() +/** + * Give up execution authority for the currently executing asynchronous task + * and return control to the scheduler. + */ trait Future *yield(); #endif diff --git a/libcbs/test/scheduler/scheduler_test.cbs b/libcbs/test/scheduler/scheduler_test.cbs index 1645d918a7ec..f40f38e58bf5 100644 --- a/libcbs/test/scheduler/scheduler_test.cbs +++ b/libcbs/test/scheduler/scheduler_test.cbs @@ -6,7 +6,7 @@ atomic_int g_task_num = 200; void isComplete() { atomic_fetch_sub(&g_task_num, 1); if (atomic_load(&g_task_num) == 0) { - struct Scheduler::destroy(); + struct Scheduler::deinit(); } } @@ -41,6 +41,7 @@ int main() { struct Scheduler::spawn(taskFunc(i)); } struct Scheduler::run(); + struct Scheduler::destroy(); return 0; } -- Gitee From 0ee44c2dd9dd86187f5742e6b30383004da17e72 Mon Sep 17 00:00:00 2001 From: zhaoxuhui Date: Mon, 24 Mar 2025 17:54:03 +0800 Subject: [PATCH 2/3] [libcbs] add coroutine time api and test --- clang/lib/Analysis/BSC/BSCBorrowChecker.cpp | 2 +- clang/lib/Sema/BSC/SemaBSCCoroutine.cpp | 26 ++- libcbs/src/CMakeLists.txt | 10 + libcbs/src/scheduler/queue.hbs | 1 + libcbs/src/scheduler/scheduler.cbs | 39 ++-- libcbs/src/scheduler/sync/error.hbs | 21 ++ libcbs/src/scheduler/sync/mutex.cbs | 51 +++++ libcbs/src/scheduler/sync/mutex.hbs | 25 +++ libcbs/src/scheduler/sync/notify.cbs | 102 +++++++++ libcbs/src/scheduler/sync/notify.hbs | 57 +++++ libcbs/src/scheduler/sync/semaphore.cbs | 222 ++++++++++++++++++++ libcbs/src/scheduler/sync/semaphore.hbs | 54 +++++ libcbs/src/scheduler/time/time.cbs | 92 ++++++++ libcbs/src/scheduler/time/time.hbs | 96 +++++++++ libcbs/test/scheduler/CMakeLists.txt | 16 ++ libcbs/test/scheduler/scheduler_test.cbs | 17 +- libcbs/test/scheduler/sync_test.cbs | 52 +++++ libcbs/test/scheduler/time_test.cbs | 80 +++++++ 18 files changed, 926 insertions(+), 37 deletions(-) create mode 100644 libcbs/src/scheduler/sync/error.hbs create mode 100644 libcbs/src/scheduler/sync/mutex.cbs create mode 100644 libcbs/src/scheduler/sync/mutex.hbs create mode 100644 libcbs/src/scheduler/sync/notify.cbs create mode 100644 libcbs/src/scheduler/sync/notify.hbs create mode 100644 libcbs/src/scheduler/sync/semaphore.cbs create mode 100644 libcbs/src/scheduler/sync/semaphore.hbs create mode 100644 libcbs/src/scheduler/time/time.cbs create mode 100644 libcbs/src/scheduler/time/time.hbs create mode 100644 libcbs/test/scheduler/sync_test.cbs create mode 100644 libcbs/test/scheduler/time_test.cbs diff --git a/clang/lib/Analysis/BSC/BSCBorrowChecker.cpp b/clang/lib/Analysis/BSC/BSCBorrowChecker.cpp index 4d3ff316ed87..d8658c26bebe 100644 --- a/clang/lib/Analysis/BSC/BSCBorrowChecker.cpp +++ b/clang/lib/Analysis/BSC/BSCBorrowChecker.cpp @@ -481,7 +481,7 @@ void ActionExtract::VisitDeclStmt(DeclStmt *DS) { for (Decl *D : DS->decls()) { if (VarDecl *VD = dyn_cast(D)) { if (VD->getType()->isStructureType() && IsTrackedType(VD->getType()) && - isa(VD->getInit())) { + VD->getInit() && isa(VD->getInit())) { BuildOnGet = false; Dest = std::make_unique(VD->getName().str(), VD->getType(), VD->getLocation()); diff --git a/clang/lib/Sema/BSC/SemaBSCCoroutine.cpp b/clang/lib/Sema/BSC/SemaBSCCoroutine.cpp index a67c1c8dfea5..559766f6c691 100644 --- a/clang/lib/Sema/BSC/SemaBSCCoroutine.cpp +++ b/clang/lib/Sema/BSC/SemaBSCCoroutine.cpp @@ -1974,7 +1974,7 @@ public: ResReturnVD, ResReturnVD->getType(), VK_LValue, SourceLocation()); ResExpr = ImplicitCastExpr::Create(SemaRef.Context, ResReturnVD->getType(), - CK_LValueToRValue, ResExpr, nullptr, + CK_NoOp, ResExpr, nullptr, VK_PRValue, FPOptionsOverride()); SmallVector Args; @@ -2070,6 +2070,15 @@ public: SemaRef, cast(CE->getType().getTypePtr()) ->getPointeeType()))); } + } else { + QualType QT = AE->getType(); + IsOptimization = + !(QT.getTypePtr()->isBSCFutureType()) && + (implementedFutureType(SemaRef, QT) || + (isa(QT.getTypePtr()) && + implementedFutureType( + SemaRef, cast(QT.getTypePtr()) + ->getPointeeType()))); } Expr *ResultStmt = SemaRef @@ -2329,7 +2338,7 @@ public: Expr *AwaitResultRef = SemaRef.BuildDeclRefExpr( AwaitResultVD, AwaitResultVD->getType(), VK_LValue, SourceLocation()); AwaitResultRef = ImplicitCastExpr::Create( - SemaRef.Context, AwaitResultVD->getType(), CK_LValueToRValue, + SemaRef.Context, AwaitResultVD->getType(), CK_NoOp, AwaitResultRef, nullptr, VK_PRValue, FPOptionsOverride()); return AwaitResultRef; } @@ -2924,7 +2933,7 @@ ExprResult Sema::BuildAwaitExpr(SourceLocation AwaitLoc, Expr *E) { const RecordType *FutureType = dyn_cast(AwaitReturnTy.getDesugaredType(Context)); RecordDecl *FutureRD = FutureType->getDecl(); - + bool HasFound = false; BSCMethodDecl *PollFD = lookupBSCMethodInRecord(*this, "poll", FutureRD); if (PollFD != nullptr) { const RecordType *PollResultType = dyn_cast( @@ -2935,6 +2944,17 @@ ExprResult Sema::BuildAwaitExpr(SourceLocation AwaitLoc, Expr *E) { FieldIt != Field_end; ++FieldIt) { if (FieldIt->getDeclName().getAsString() == "res") { AwaitReturnTy = FieldIt->getType(); + HasFound = true; + break; + } + } + // When the definition and implementation of the poll function are written into .hbs and .cbs respectively, + // FieldDecl is not instantiated when parsing the header file, so the corresponding `res` cannot be found. + if (!HasFound) { + if (auto *CTSD = dyn_cast(PollResult)) { + if (CTSD->getTemplateArgs().size() == 1) { + AwaitReturnTy = CTSD->getTemplateArgs()[0].getAsType(); + } } } } diff --git a/libcbs/src/CMakeLists.txt b/libcbs/src/CMakeLists.txt index 2fbd38579c36..a6a39ac901f6 100644 --- a/libcbs/src/CMakeLists.txt +++ b/libcbs/src/CMakeLists.txt @@ -2,6 +2,10 @@ add_library( stdcbs STATIC bishengc_safety/bishengc_safety.cbs scheduler/scheduler.cbs + scheduler/sync/mutex.cbs + scheduler/sync/notify.cbs + scheduler/sync/semaphore.cbs + scheduler/time/time.cbs string/string.cbs hash/sip.cbs raw_table/raw_table.cbs @@ -16,6 +20,8 @@ target_include_directories(stdcbs raw_vec vec scheduler + scheduler/sync + scheduler/time string list hash @@ -43,6 +49,10 @@ install(FILES raw_vec/raw_vec.hbs scheduler/queue.hbs scheduler/scheduler.hbs + scheduler/sync/mutex.hbs + scheduler/sync/notify.hbs + scheduler/sync/semaphore.hbs + scheduler/time/time.hbs string/string.hbs list/list.hbs hash/sip.hbs diff --git a/libcbs/src/scheduler/queue.hbs b/libcbs/src/scheduler/queue.hbs index 6d4c6dbec524..a855b1a644d6 100644 --- a/libcbs/src/scheduler/queue.hbs +++ b/libcbs/src/scheduler/queue.hbs @@ -20,6 +20,7 @@ void struct Queue::push(struct Queue *this, T value) { pthread_mutex_lock(&this->mutex); this->buf[this->writeIndex] = value; if ((this->writeIndex + 1) % this->capacity == this->readIndex) { + printf("push a: %d %d %d\n",this->writeIndex, this->capacity, this->readIndex); this->writeIndex = this->capacity; this->readIndex = 0; this->capacity = 2 * this->capacity; diff --git a/libcbs/src/scheduler/scheduler.cbs b/libcbs/src/scheduler/scheduler.cbs index b8748f903a87..71e15c9ff9a5 100644 --- a/libcbs/src/scheduler/scheduler.cbs +++ b/libcbs/src/scheduler/scheduler.cbs @@ -59,7 +59,7 @@ impl trait Future for struct _Yield; trait Future *yield() { struct _Yield *ptr = malloc(sizeof(struct _Yield)); - if (ptr == NULL) { + if (ptr != NULL) { fprintf(stderr, "Error: memory alloc failed\n"); exit(EXIT_FAILURE); } @@ -67,13 +67,12 @@ trait Future *yield() { return ptr; } -static struct Task * stealTask() { - struct Task * task = NULL; +static void * stealTask() { + void * task = NULL; for (unsigned int i = 1; i < S.threadCount; i++) { if (S.isInit && S.threads[i] != NULL) { - task = (struct Task *)S.threads[i]->localQueue.peek(); - if (task && task->state == READY) { - S.threads[i]->localQueue.pop(); + task = S.threads[i]->localQueue.pop(); + if (task) { return task; } } @@ -82,20 +81,18 @@ static struct Task * stealTask() { } static void * getReadyTask() { - struct Task * task = NULL; + void * task = NULL; do { // 1.Get the task from the local queue first. if (g_curCtx->id > 0) { - task = (struct Task *)g_curCtx->localQueue.peek(); - if (task && task->state == READY) { - g_curCtx->localQueue.pop(); + task = g_curCtx->localQueue.pop(); + if (task) { break; } } // 2.If the local queue is empty, try to get the task from the global queue. - task = (struct Task *)S.globalQueue->peek(); - if (task && task->state == READY) { - S.globalQueue->pop(); + task = S.globalQueue->pop(); + if (task) { break; } // 3.Get thetask from other queues when the global queue is empty. @@ -122,7 +119,6 @@ static int findQueue() { void *schedule(void *arg) { struct ThreadContext *ctx = (struct ThreadContext *)arg; g_curCtx = ctx; - ++S.running_threads; while (true) { struct Task *task = (struct Task *)getReadyTask(); @@ -130,10 +126,13 @@ void *schedule(void *arg) { if (!task && !S.isInit) { break; } + // atomic_store(&task->state, RUNNING); struct PollResult s = task->future->poll(); ctx->runningTask = NULL; if (s.isPending) { + // if (atomic_load(&task->state) == RUNNING) + // atomic_store(&task->state, READY); int index = findQueue(); if (index == 0) { S.globalQueue->push(task); @@ -153,7 +152,6 @@ void *schedule(void *arg) { break; } } - --S.running_threads; return NULL; } @@ -173,6 +171,7 @@ void struct Scheduler::init(unsigned int threadCount) { g_mainThread = malloc(sizeof(struct ThreadContext)); g_taskCount = 0; + if (g_mainThread == NULL) { fprintf(stderr, "Error: main thread memory alloc failed\n"); exit(EXIT_FAILURE); @@ -182,7 +181,6 @@ void struct Scheduler::init(unsigned int threadCount) { g_curCtx = g_mainThread; S.threads[0] = g_mainThread; S.isInit = true; - S.running_threads = 0; unsigned int i; for (i = 1; i <= threadCount; i++) { @@ -235,16 +233,9 @@ struct Task *struct Scheduler::spawn(trait Future *future) { return t; } -void struct Scheduler::deinit() { - S.isInit = false; -} - void struct Scheduler::destroy() { unsigned int i; - assert(!S.isInit); - while (S.running_threads > 0) { - // All threads must finish before deallocating - } + S.isInit = false; if (S.globalQueue != NULL) { S.globalQueue->destroy(); free(S.globalQueue); diff --git a/libcbs/src/scheduler/sync/error.hbs b/libcbs/src/scheduler/sync/error.hbs new file mode 100644 index 000000000000..c589001fc7ff --- /dev/null +++ b/libcbs/src/scheduler/sync/error.hbs @@ -0,0 +1,21 @@ +#ifndef BISHENG_C_SYNC_ERROR_H +#define BISHENG_C_SYNC_ERROR_H + +typedef enum SemaphoreError { + CLOSED, + NOPERMITS, + LESSPERMITS +}SemaphoreError; + +typedef enum MutexError { + BLOCK, +}MutexError; + + +struct OptionalRes { + int has_error; // 表示是否有错误,1 表示有,0 表示无 + T1 err; // 存储 Error 枚举值 + T2 res; +}; + +#endif diff --git a/libcbs/src/scheduler/sync/mutex.cbs b/libcbs/src/scheduler/sync/mutex.cbs new file mode 100644 index 000000000000..79dabb4fae68 --- /dev/null +++ b/libcbs/src/scheduler/sync/mutex.cbs @@ -0,0 +1,51 @@ +#include "mutex.hbs" + +struct Mutex struct Mutex::new(T value) { + struct Semaphore sem = struct Semaphore::new(1); + struct Mutex mutex = {.sem = sem, .value = value}; + return mutex; +} + +// async struct MutexGuard struct Mutex::lock(This *this) { +// struct OptionalRes res = await this->sem.acquire(); +// struct MutexGuard MG = {.lock = this, .sp = res.res}; +// return MG; +// } + +// struct MutexGuard struct Mutex::blocking_lock(This *this) { + // 通过struct Scheduler::block_on()调用 +// } + +struct OptionalRes> struct Mutex::try_lock(This *this) { + struct OptionalRes> ret; + struct OptionalRes res = this->sem.try_acquire(); + if (res.has_error) { + ret.has_error = 1; + ret.err = MutexError::BLOCK; + } else { + ret.has_error = 0; + struct MutexGuard MG = {.lock = this, .sp = res.res}; + ret.res = MG; + } + return ret; +} + +T * borrow struct Mutex::get_mut(This * borrow this) { + T * borrow value= this->value; + return value; +} + +void struct MutexGuard::drop(This *this) { + this->sp.drop(); +} + +const T * borrow struct MutexGuard::deref(const This * borrow this) { + const T * borrow value = this->lock->value; + return value; +} + +T * borrow struct MutexGuard::deref_mut(This * borrow this) { + T * borrow value= this->lock->value; + return value; +} + diff --git a/libcbs/src/scheduler/sync/mutex.hbs b/libcbs/src/scheduler/sync/mutex.hbs new file mode 100644 index 000000000000..b5073bdd855a --- /dev/null +++ b/libcbs/src/scheduler/sync/mutex.hbs @@ -0,0 +1,25 @@ +#ifndef BISHENG_C_MUTEX_H +#define BISHENG_C_MUTEX_H + +#include "semaphore.hbs" + +struct Mutex { + struct Semaphore sem; + T value; +}; + +struct MutexGuard { + struct Mutex *lock; + struct SemaphorePermit sp; +}; + +struct Mutex struct Mutex::new(T value); +async struct MutexGuard struct Mutex::lock(This *this); +struct MutexGuard struct Mutex::blocking_lock(This *this); +OptionalRes> struct Mutex::try_lock(This *this); +T * borrow struct Mutex::get_mut(This * borrow this); + +void struct MutexGuard::drop(This *this); +const T * borrow struct MutexGuard::deref(const This * borrow this); +T * borrow struct MutexGuard::deref_mut(This * borrow this); +#endif diff --git a/libcbs/src/scheduler/sync/notify.cbs b/libcbs/src/scheduler/sync/notify.cbs new file mode 100644 index 000000000000..a7f0eb1ea92f --- /dev/null +++ b/libcbs/src/scheduler/sync/notify.cbs @@ -0,0 +1,102 @@ +#include "notify.hbs" +#include + +uint64_t set_state(uint64_t data, uint64_t state) { + return (data & NOTIFY_WAITERS_CALLS_MASK) | (state & STATE_MASK); +} + +uint64_t get_state(uint64_t data) { + return data & STATE_MASK; +} + +uint64_t get_num_notify_waiters_calls(uint64_t data) { + return (data & NOTIFY_WAITERS_CALLS_MASK) >> NOTIFY_WAITERS_SHIFT; +} + +uint64_t inc_num_notify_waiters_calls(uint64_t data) { + return data + (1 << NOTIFY_WAITERS_SHIFT); +} + +void atoimc_inc_num_notify_waiters_calls(atomic_int data) { + atomic_fetch_add(&data, 1 << NOTIFY_WAITERS_SHIFT); +} + +struct NotifyWaitlist struct NotifyWaitlist::new() { + + struct NotifyWaitlist waiters; + waiters.queue.init(128); + return waiters; +} + +struct Notify struct Notify::new() { + struct NotifyWaitlist waiters = struct NotifyWaitlist::new(); + struct Notify notify = {.state = EMPTY, .waiters = waiters}; + return notify; +} + +struct Notified struct Notify::notified(This * this) { + uint64_t calls_num = get_num_notify_waiters_calls(Init); + struct Notified notified = {.notify = this, .state = Init, .notify_waiters_calls = calls_num}; + return notified; +} + +void notify_locked(struct NotifyWaitlist waitlist, atomic_int state, int curr, enum NotifyOneStrategy strategy) { + switch (get_state(curr)) + { + case EMPTY: + case NOTIFIED: + if (atomic_compare_exchange_weak(&state, &curr, set_state(curr, NOTIFIED))) { + break; + } else { + uint64_t actual_state = get_state(state); + assert(actual_state == EMPTY || actual_state == NOTIFIED); + atomic_int actual = set_state(state, NOTIFIED); + atomic_load(&actual); + } + break; + case WAITING: + if (strategy == Fifo) { + if (!waitlist.queue.isEmpty()) { + struct NotifyWaiter *waiter = (struct NotifyWaiter *)waitlist.queue.pop(); + atomic_store(&waiter->task->state, READY); + } + } + break; + default: + break; + } +} + +void struct Notify::notify_with_strategy(This * this, enum NotifyOneStrategy strategy) { + int curr = atomic_load(&this->state); + while (get_state(curr) == EMPTY || get_state(curr) == NOTIFIED) { + uint64_t new = set_state(curr, NOTIFIED); + if (atomic_compare_exchange_weak(&this->state, &curr, new)) { + break; + } else { + curr = this->state; + } + } + pthread_mutex_lock(&this->waiters.queue.mutex); + curr = atomic_load(&this->state); + notify_locked(this->waiters, this->state, curr, strategy); + pthread_mutex_unlock(&this->waiters.queue.mutex); +} + +void struct Notify::notify_one(This * this) { + this->notify_with_strategy(Fifo); +} + +// void struct Notify::notify_waiters(This * this) { +// pthread_mutex_lock(&this->waiters.queue.mutex); +// int curr = atomic_load(&this->state); +// if (get_state(curr) == EMPTY || get_state(curr) == NOTIFIED) { +// atoimc_inc_num_notify_waiters_calls(this->state); +// pthread_mutex_unlock(&this->waiters.queue.mutex); +// return; +// } + +// int new_state = set_state(inc_num_notify_waiters_calls(curr), EMPTY); +// atomic_store(&this->state, new_state); +// pthread_mutex_unlock(&this->waiters.queue.mutex); +// } diff --git a/libcbs/src/scheduler/sync/notify.hbs b/libcbs/src/scheduler/sync/notify.hbs new file mode 100644 index 000000000000..cd4ea2321dcc --- /dev/null +++ b/libcbs/src/scheduler/sync/notify.hbs @@ -0,0 +1,57 @@ +#ifndef BISHENG_C_NOTIFY_H +#define BISHENG_C_NOTIFY_H + +#include +#include "queue.hbs" +#include "scheduler.hbs" + +const uint64_t NOTIFY_WAITERS_SHIFT = 2; +const uint64_t STATE_MASK = (1 << NOTIFY_WAITERS_SHIFT) - 1; +const uint64_t NOTIFY_WAITERS_CALLS_MASK = ~STATE_MASK; + +enum NotifyState { + EMPTY, // initial idle state + WAITING, // one or more threads are currently waiting to be notified + NOTIFIED // pending notification +}; + +enum NotifiedState { + Init, + Waiting, + Done +}; + +enum NotifyOneStrategy { + Fifo, + Lifo +}; + +struct NotifyWaiter { + struct Task * task; +}; + +struct NotifyWaitlist { + struct Queue queue; +}; + +struct Notify { + atomic_int state; + struct NotifyWaitlist waiters; +}; + +struct Notified { + struct Notify *notify; + atomic_int state; + uint64_t notify_waiters_calls; + struct NotifyWaiter waiter; +}; + +struct Notify struct Notify::new(); +/** + * Wait for a notification + */ +struct Notified struct Notify::notified(This * this); +void struct Notify::notify_one(This * this); +void struct Notify::notify_las(This * this); +void struct Notify::notify_all(This * this); +#endif diff --git a/libcbs/src/scheduler/sync/semaphore.cbs b/libcbs/src/scheduler/sync/semaphore.cbs new file mode 100644 index 000000000000..60b696274160 --- /dev/null +++ b/libcbs/src/scheduler/sync/semaphore.cbs @@ -0,0 +1,222 @@ +#include "semaphore.hbs" +#include + +pthread_mutex_t mutex; + +struct Waiter *struct Waiter::new(uint32_t permits, struct Task *task) { + // struct Waiter waiter = {.permits = permits, .task = task}; + struct Waiter *ptr = malloc(sizeof(struct Waiter)); + ptr->permits = permits; + ptr->task = task; + return ptr; +} + +struct Semaphore struct Semaphore::new(uint32_t permits) { + struct Semaphore sem; + sem.waiters.queue.init(128); + sem.waiters.closed = 0; + sem.permits = permits; + sem.closed = 0; + pthread_mutex_init(&sem.mutex, NULL); + return sem; +} + +struct Semaphore struct Semaphore::new_closed() { + struct Semaphore sem; + sem.waiters.queue.init(128); + sem.waiters.closed = 0; + sem.permits = 0; + sem.closed = 1; + pthread_mutex_init(&sem.mutex, NULL); + return sem; +} + +uint32_t struct Semaphore::available_permits(const This * this) { + return atomic_load(&this->permits); +} + +void struct Semaphore::add_permits(This * this, uint32_t permits) { + if (permits == 0) + return; + + struct Queue queue = this->waiters.queue; + while (permits > 0 && !queue.isEmpty()) { + struct Waiter *waiter = (struct Waiter *)queue.peek(); + uint32_t w_permits = waiter->permits; + if (w_permits > permits) + break; + queue.pop(); + permits -= w_permits; + waiter->permits = 0; + // printf("struct Semaphore::add_permits\n"); + atomic_store(&waiter->task->state, READY); + } + + if (permits > 0) { + pthread_mutex_lock(&this->mutex); + atomic_fetch_add(&this->permits, permits); + pthread_mutex_unlock(&this->mutex); + } +} + +uint32_t struct Semaphore::forget_permits(This * this, uint32_t num) { + if (num == 0) + return 0; + + int ret = num; + pthread_mutex_lock(&this->mutex); + if (this->available_permits() >= num) { + atomic_fetch_sub(&this->permits, num); + } else { + ret = this->permits; + atomic_store(&this->permits, 0); + } + pthread_mutex_unlock(&this->mutex); + return ret; +} + +struct PollResult> struct Acquire::poll(This *this) { + struct OptionalRes res = {.has_error = 0}; + if (this->node->permits == 0) + return struct PollResult>::completed(res); + + if (this->sem->closed) { + res.has_error = 1; + res.err = SemaphoreError::CLOSED; + return struct PollResult>::completed(res); + } + + pthread_mutex_lock(&this->sem->mutex); + if (this->sem->available_permits() >= this->node->permits && this->sem->waiters.queue.isEmpty()) { + // printf("struct Acquire::poll completed: %d\n", this->sem.available_permits()); + atomic_fetch_sub(&this->sem->permits, this->node->permits); + pthread_mutex_unlock(&this->sem->mutex); + return struct PollResult>::completed(res); + } else { + pthread_mutex_unlock(&this->sem->mutex); + // printf("struct Acquire::poll pending\n"); + this->sem->waiters.queue.push(this->node); + atomic_store(&this->node->task->state, PARKED); + return struct PollResult>::pending(); + } +} + +void struct Acquire::free(This *this) {} + +impl trait Future> for struct Acquire; + +struct Acquire acquire(struct Waiter *waiter, struct Semaphore *sem) { + struct Acquire acquire = {.node = waiter, .sem = sem}; + return acquire; +} + +async struct OptionalRes struct Semaphore::acquire(This *this) { + // printf("struct Semaphore::acquire\n"); + struct Task * task = getCurrentTask(); + struct Waiter *waiter = struct Waiter::new(1, task); + struct OptionalRes res = await acquire(waiter, this); + //printf("struct Semaphore::acquire: %d\n", this->available_permits()); + if (!res.has_error) { + struct SemaphorePermit SP = {.sem = *this, .permits = 1}; + res.res = SP; + } + return res; +} + +async struct OptionalRes struct Semaphore::acquire_many(This *this, uint32_t num) { + struct Task * task = getCurrentTask(); + struct Waiter *waiter = struct Waiter::new(num, task); + // await acquire(waiter, this); + // struct SemaphorePermit SP = {.sem = this, .permits = num}; + struct OptionalRes res = await acquire(waiter, this); + if (!res.has_error) { + struct SemaphorePermit SP = {.sem = *this, .permits = num}; + res.res = SP; + } + return res; +} + +struct OptionalRes struct Semaphore::try_acquire_many(This *this, uint32_t num) { + struct OptionalRes res; + pthread_mutex_lock(&this->mutex); + if (this->closed) { + pthread_mutex_unlock(&this->mutex); + res.has_error = 1; + res.err = SemaphoreError::CLOSED; + return res; + } + + if (this->permits < num) { + pthread_mutex_unlock(&this->mutex); + res.has_error = 1; + res.err = SemaphoreError::NOPERMITS; + return res; + } + res.has_error = 0; + atomic_fetch_sub(&this->permits, num); + struct SemaphorePermit SP = {.sem = *this, .permits = num}; + res.res = SP; + pthread_mutex_unlock(&this->mutex); + return res; +} + +struct OptionalRes struct Semaphore::try_acquire(This *this) { + return this->try_acquire_many(1); +} + +void struct Semaphore::close(This * this) { + if (this->closed) + return; + + pthread_mutex_lock(&this->mutex); + this->closed = 1; + pthread_mutex_unlock(&this->mutex); +} + +_Bool struct Semaphore::is_close(This * this) { + _Bool res; + pthread_mutex_lock(&this->mutex); + res = this->closed; + pthread_mutex_unlock(&this->mutex); + return res; +} + +void struct SemaphorePermit::drop(This *this) { + // printf("struct SemaphorePermit::drop\n"); + this->sem.add_permits(this->permits); +} + +void struct SemaphorePermit::forget(This *this) { + this->permits = 0; +} + +void struct SemaphorePermit::merge(This *this, struct SemaphorePermit *other) { + // merging permits from different semaphore instances + assert(&this->sem == &other->sem); + this->permits += other->permits; + other->permits = 0; +} + +struct OptionalRes struct SemaphorePermit::split(This *this, uint32_t num) { + struct OptionalRes res; + if (this->permits < num) { + res.has_error = 1; + res.err = SemaphoreError::LESSPERMITS; + return res; + } + + this->permits -= num; + struct SemaphorePermit SP = {.sem = this->sem, .permits = num}; + res.has_error = 0; + res.res = SP; + return res; +} + +uint32_t struct SemaphorePermit::num_permits(This *this) { + return this->permits; +} + + + + + diff --git a/libcbs/src/scheduler/sync/semaphore.hbs b/libcbs/src/scheduler/sync/semaphore.hbs new file mode 100644 index 000000000000..3e11a3b5f4fa --- /dev/null +++ b/libcbs/src/scheduler/sync/semaphore.hbs @@ -0,0 +1,54 @@ +#ifndef BISHENG_C_SEMAPHORE_H +#define BISHENG_C_SEMAPHORE_H + +#include +#include "error.hbs" +#include "queue.hbs" +#include "scheduler.hbs" + +struct Waiter { + uint32_t permits; + struct Task * task; +}; + +struct Waitlist { + struct Queue queue; + _Bool closed; +}; + +struct Semaphore { + pthread_mutex_t mutex; + struct Waitlist waiters; + atomic_int permits; + _Bool closed; +}; + +struct SemaphorePermit { + struct Semaphore sem; + uint32_t permits; +}; + +struct Acquire { + struct Waiter *node; + struct Semaphore *sem; + uint32_t permits; +}; + +struct Semaphore struct Semaphore::new(uint32_t permits); +struct Semaphore struct Semaphore::new_closed(); +uint32_t struct Semaphore::available_permits(const This * this); +void struct Semaphore::add_permits(This * this, uint32_t permits); +uint32_t struct Semaphore::forget_permits(This * this, uint32_t num); +async struct OptionalRes struct Semaphore::acquire(This * this); +async struct OptionalRes struct Semaphore::acquire_many(This * this, uint32_t num); +struct OptionalRes struct Semaphore::try_acquire(This *this); +struct OptionalRes struct Semaphore::try_acquire_many(This *this, uint32_t num); +void struct Semaphore::close(This * this); +_Bool struct Semaphore::is_close(This * this); + +void struct SemaphorePermit::drop(This *this); +void struct SemaphorePermit::forget(This *this); +void struct SemaphorePermit::merge(This *this, struct SemaphorePermit *other); +struct OptionalRes struct SemaphorePermit::split(This *this, uint32_t num); +uint32_t struct SemaphorePermit::num_permits(This *this); +#endif diff --git a/libcbs/src/scheduler/time/time.cbs b/libcbs/src/scheduler/time/time.cbs new file mode 100644 index 000000000000..252abc86915d --- /dev/null +++ b/libcbs/src/scheduler/time/time.cbs @@ -0,0 +1,92 @@ +#include "time.hbs" + +#include +#include + +struct PollResult struct Sleep::poll(This *this) { + clock_t curTime = clock(); + if (curTime < this->endTime) { + this->completed = 0; + return struct PollResult::pending(); + } else { + this->completed = 1; + return struct PollResult::completed((struct Void){}); + } +} + +void struct Sleep::free(This *this) {} + +impl trait Future for struct Sleep; + +struct Sleep sleep(uint64_t ms) { + clock_t endTime = clock() + (ms * CLOCKS_PER_SEC) / 1000; + struct Sleep s = {.endTime = endTime, .completed = 0}; + return s; +} + +struct Sleep sleep_until(clock_t time) { + struct Sleep s = {.endTime = time, .completed = 0}; + return s; +} + +clock_t struct Sleep::deadline(const This * borrow this) { + return this->endTime; +} + +void struct Sleep::reset(This * borrow this, clock_t time) { + this->endTime = time; +} + +_Bool struct Sleep::is_elapsed(const This * borrow this) { + return clock() > this->endTime; +} + +struct Interval interval(uint64_t ms) { + struct Interval interval; + interval.nextTick = clock() + (ms * CLOCKS_PER_SEC) / 1000; + interval.duration = ms; + return interval; +} + +struct Interval interval_at(clock_t start, uint64_t ms) { + struct Interval interval; + interval.nextTick = start + (ms * CLOCKS_PER_SEC) / 1000; + interval.duration = ms; + return interval; +} + +async void struct Interval::tick(This * borrow this) { + await sleep_until(this->nextTick); + this->nextTick += (this->duration * CLOCKS_PER_SEC) / 1000; +} + +// If the task times out and returns false +struct PollResult<_Bool> struct Timeout::poll(This *this) { + struct PollResult res1 = this->future->poll(); + struct PollResult res2 = this->delay.poll(); + if (!res1.isPending) { + return struct PollResult<_Bool>::completed(1); + } + if (!res2.isPending) { + return struct PollResult<_Bool>::completed(0); + } + return struct PollResult<_Bool>::pending(); +} + +void struct Timeout::free(This *this) {} + +impl trait Future<_Bool> for struct Timeout; + +struct Timeout timeout(uint64_t ms, trait Future *future) { + clock_t endTime = clock() + (ms * CLOCKS_PER_SEC) / 1000; + struct Sleep sleep = {.endTime = endTime, .completed = 0}; + struct Timeout t = {.future = future, .delay = sleep}; + return t; +} + +struct Timeout timeout_at(clock_t time, trait Future *future) { + struct Sleep sleep = {.endTime = time, .completed = 0}; + struct Timeout t = {.future = future, .delay = sleep}; + return t; +} + diff --git a/libcbs/src/scheduler/time/time.hbs b/libcbs/src/scheduler/time/time.hbs new file mode 100644 index 000000000000..37a748e8322f --- /dev/null +++ b/libcbs/src/scheduler/time/time.hbs @@ -0,0 +1,96 @@ +#ifndef BISHENG_C_TIME_H +#define BISHENG_C_TIME_H + +#include +#include +#include "base.hbs" +#include "future.hbs" + +struct Sleep { + clock_t endTime; + _Bool completed; +}; + +// The sleep struct implements the trait future +struct PollResult struct Sleep::poll(This *this); +void struct Sleep::free(This *this); + +/** + * Pause the execution of the current asynchronous task for a period of time, + * The parameter is the millisecond of sleep. + */ +struct Sleep sleep(uint64_t ms); + +/** + * Pause the execution of the current asynchronous task until the specified time point. + * The parameter is of type clock_t, representing the time point at which the wait ends. + */ +struct Sleep sleep_until(clock_t time); + +/** + * Get the deadline set for the Sleep instance. + */ +clock_t struct Sleep::deadline(const This * borrow this); + +/** + * Used to reset the deadline for the Sleep instance + */ +void struct Sleep::reset(This * borrow this, clock_t time); + +/** + * Check if the deadline set for the Sleep instance has passed. + * If the deadline has been reached or exceeded, the function returns true; + * Otherwise, return false. + */ +_Bool struct Sleep::is_elapsed(const This * borrow this); + +struct Interval { + clock_t nextTick; + uint64_t duration; +}; + +/** + * Create a periodic timer that will trigger repeatedly at specified time intervals. + * The parameter is a time interval in millisecond and returns an instance of Interval type + */ +struct Interval interval(uint64_t ms); + +/** + * Create a periodic timer, it can specify the first trigger time of the timer. + * The first parameter is of type clock_t, representing the first trigger time; + * The second parameter is of type uint64_t, representing the time interval in milliseconds + * and returns an instance of type Interval. + */ +struct Interval interval_at(clock_t start, uint64_t ms); + +/** + * Used to wait for the next timer trigger. + */ +async void struct Interval::tick(This * borrow this); + +// `struct Timeout` should be a generic type, such as `struct Timeout`. +// However, due to `struct Timeout` requiring the implementation of `trait future`. +// The current implementation of generic traits is incomplete. +// We do not support the writing style 'impl trait Future for struct Timeout'. +// Here we temporarily assume that all task return types are void. +// If data transmission is required in the future, it can be done through a channel. +struct Timeout { + trait Future *future; + struct Sleep delay; +}; +struct PollResult<_Bool> struct Timeout::poll(This *this); +void struct Timeout::free(This *this); + +/** + * Requires a `Future` to complete before the specified duration has elapsed. + * If the future completed before the duration has elapsed, then return true; + * Otherwise, return false and the future is canceled. + */ +struct Timeout timeout(uint64_t ms, trait Future *future);\ +/** + * Requires a `Future` to complete before the specified instant in time. + * If the future completed before the instant is reached, then return true; + * Otherwise, return false. + */ +struct Timeout timeout_at(clock_t time, trait Future *future); +#endif diff --git a/libcbs/test/scheduler/CMakeLists.txt b/libcbs/test/scheduler/CMakeLists.txt index be667127f819..39b553bbf2cb 100644 --- a/libcbs/test/scheduler/CMakeLists.txt +++ b/libcbs/test/scheduler/CMakeLists.txt @@ -7,3 +7,19 @@ add_libcbs_test( SRCS scheduler_test.cbs ) + +add_libcbs_test( + test_time + SUITE + libcbs-scheduler-tests + SRCS + time_test.cbs +) + +add_libcbs_test( + test_sync + SUITE + libcbs-scheduler-tests + SRCS + sync_test.cbs +) \ No newline at end of file diff --git a/libcbs/test/scheduler/scheduler_test.cbs b/libcbs/test/scheduler/scheduler_test.cbs index f40f38e58bf5..300b87107da1 100644 --- a/libcbs/test/scheduler/scheduler_test.cbs +++ b/libcbs/test/scheduler/scheduler_test.cbs @@ -1,12 +1,12 @@ #include #include "scheduler.hbs" -atomic_int g_task_num = 200; +atomic_int g_task_num = 20; void isComplete() { atomic_fetch_sub(&g_task_num, 1); if (atomic_load(&g_task_num) == 0) { - struct Scheduler::deinit(); + struct Scheduler::destroy(); } } @@ -35,13 +35,12 @@ async void taskFunc(int i) { } int main() { - struct Scheduler::init(4); - // Task allocation to the global queue - for (int i = 0; i < 100; i++) { - struct Scheduler::spawn(taskFunc(i)); - } - struct Scheduler::run(); - struct Scheduler::destroy(); + // struct Scheduler::init(4); + // // Task allocation to the global queue + // for (int i = 0; i < 10; i++) { + // struct Scheduler::spawn(taskFunc(i)); + // } + // struct Scheduler::run(); return 0; } diff --git a/libcbs/test/scheduler/sync_test.cbs b/libcbs/test/scheduler/sync_test.cbs new file mode 100644 index 000000000000..06e8a49aea32 --- /dev/null +++ b/libcbs/test/scheduler/sync_test.cbs @@ -0,0 +1,52 @@ + +#include "error.hbs" +#include "mutex.hbs" +#include "semaphore.hbs" +#include "time.hbs" + +atomic_int g_task_num = 4; + +void isComplete() { + atomic_fetch_sub(&g_task_num, 1); + if (atomic_load(&g_task_num) == 0) { + struct Scheduler::destroy(); + } +} + +async void SimulationTask(struct Semaphore sem) { + printf("SimulationTask\n"); + struct OptionalRes res = await sem.acquire(); + if (res.has_error) { + printf("has_error"); + } else { + printf("Task acquired a permit\n"); + await sleep(2000); + res.res.drop(); + printf("Task released a permit\n"); + } + + isComplete(); +} + +async void test_semaphore() { + printf("test_semaphore\n"); + struct Semaphore sem = struct Semaphore::new(2); + for (int i = 0; i < 3; i++) { + struct Scheduler::spawn(SimulationTask(sem)); + } + isComplete(); +} + +async void test_mutex() { + printf("test_mutex\n"); + struct Mutex mutex = struct Mutex::new(2); + await mutex.lock(); + struct Scheduler::destroy(); +} + +int main() { + struct Scheduler::init(4); + struct Scheduler::spawn(test_mutex()); + struct Scheduler::run(); + return 0; +} diff --git a/libcbs/test/scheduler/time_test.cbs b/libcbs/test/scheduler/time_test.cbs new file mode 100644 index 000000000000..96dcdf8707d0 --- /dev/null +++ b/libcbs/test/scheduler/time_test.cbs @@ -0,0 +1,80 @@ + +#include +#include +#include "scheduler.hbs" +#include "time.hbs" + +atomic_int g_task_num = 5; +void isComplete() { + atomic_fetch_sub(&g_task_num, 1); + if (atomic_load(&g_task_num) == 0) { + struct Scheduler::destroy(); + } +} + +async void test_sleep() { + struct Sleep s = sleep(2000); + printf("Start executing the test_sleep task!\n"); + await s; + printf("Task paused for 2 seconds before resuming execution\n"); + printf("Time is elapsed: %d\n", s.is_elapsed()); + s.reset(clock()); + printf("The current deadline is %ld\n",s.deadline()); + isComplete(); +} + +async void test_sleep_until() { + printf("Start executing the test_sleep_until task!\n"); + await sleep_until(clock() + 2 * CLOCKS_PER_SEC); + printf("Task paused for 2 seconds before resuming execution\n"); + isComplete(); +} + +async void test_interval() { + printf("Start executing the test_interval task!\n"); + struct Interval inter = interval(2000); + for (int i = 0; i < 5; i++) { + await inter.tick(); + printf("The %dth trigger of the interval\n", i + 1); + } + isComplete(); +} + +async void SimulationTask() { + await sleep(2000); +} + +async void test_timeout() { + printf("Start executing the test_timeout task!\n"); + struct Timeout t = timeout(1999, SimulationTask()); + _Bool res = await t; + if (res == 0) { + printf("task time out."); + } else { + printf("task completed."); + } + isComplete(); +} + +async void test_timeout_at() { + printf("Start executing the test_timeout_at task!\n"); + _Bool res = await timeout_at(clock() + 3 * CLOCKS_PER_SEC, SimulationTask()); + if (res == 0) { + printf("task time out."); + } else { + printf("task completed."); + } + isComplete(); +} + +int main() { + struct Scheduler::init(4); + trait Future *task = test_sleep(); + struct Scheduler::spawn(task); + struct Scheduler::spawn(test_sleep_until()); + struct Scheduler::spawn(test_interval()); + struct Scheduler::spawn(test_timeout()); + struct Scheduler::spawn(test_timeout_at()); + struct Scheduler::run(); + return 0; +} -- Gitee From 9722e0444974013cac9b55dc83264bc3d114c665 Mon Sep 17 00:00:00 2001 From: zhaoxuhui Date: Mon, 31 Mar 2025 09:46:13 +0800 Subject: [PATCH 3/3] [BSC] async modifies generic functions --- clang/lib/Parse/ParseDecl.cpp | 15 ++ clang/lib/Sema/BSC/SemaBSCCoroutine.cpp | 240 ++++++++++++++++++++---- clang/lib/Sema/BSC/SemaBSCTrait.cpp | 6 +- 3 files changed, 217 insertions(+), 44 deletions(-) diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp index 3faa4ceba9ca..0212b44dcebd 100644 --- a/clang/lib/Parse/ParseDecl.cpp +++ b/clang/lib/Parse/ParseDecl.cpp @@ -1837,6 +1837,21 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(DeclaratorContext Context, ProhibitAttributes(DeclAttrs); SingleDecl = ParseDeclarationStartingWithTemplate(Context, DeclEnd, DeclAttrs); +#if ENABLE_BSC + FunctionDecl *FD = dyn_cast_or_null(SingleDecl); + if (!FD) { + FunctionTemplateDecl *FTD = dyn_cast_or_null(SingleDecl); + if (FTD) + FD = dyn_cast_or_null(FTD->getTemplatedDecl()); + } + if (getLangOpts().BSC && FD) { + // If async modifies generic functions or generic member functions, + // the entry is here. + SmallVector Decls = + Actions.ActOnAsyncFunctionDefinition(FD); + return Actions.BuildDeclaratorGroup(Decls); + } +#endif break; } #endif diff --git a/clang/lib/Sema/BSC/SemaBSCCoroutine.cpp b/clang/lib/Sema/BSC/SemaBSCCoroutine.cpp index 559766f6c691..c4fda19c0dd5 100644 --- a/clang/lib/Sema/BSC/SemaBSCCoroutine.cpp +++ b/clang/lib/Sema/BSC/SemaBSCCoroutine.cpp @@ -560,7 +560,8 @@ static RecordDecl *buildOpaqueFutureRecordDecl(Sema &S, FunctionDecl *FD) { // build struct Future for async function static RecordDecl *buildFutureRecordDecl( Sema &S, FunctionDecl *FD, ArrayRef Args, - std::vector> LocalVarList) { + std::vector> LocalVarList, + bool IsTemplate = false) { std::vector> paramList; DeclarationName funcName = FD->getDeclName(); SourceLocation SLoc = FD->getBeginLoc(); @@ -632,10 +633,27 @@ static RecordDecl *buildFutureRecordDecl( addAsyncRecordDecl(S.Context, FutureStateName, S.Context.IntTy, SLoc, ELoc, RD); RD->completeDefinition(); - S.PushOnScopeChains(RD, S.getCurScope(), true); + if (!IsTemplate) + S.PushOnScopeChains(RD, S.getCurScope(), true); return RD; } +// build generic struct Future for async function +static ClassTemplateDecl *buildFutureClassTemplateDecl( + Sema &S, FunctionDecl *FD, ArrayRef Args, + std::vector> LocalVarList, + TemplateParameterList *TParams) { + RecordDecl *RD = buildFutureRecordDecl(S, FD, Args, LocalVarList, true); + SourceLocation SLoc = FD->getBeginLoc(); + ClassTemplateDecl *CTD = ClassTemplateDecl::Create( + S.Context, S.CurContext, SLoc, RD->getDeclName(), + TParams, RD); + S.PushOnScopeChains(CTD, S.getCurScope(), true); + RD->setDescribedClassTemplate(CTD); + CTD->setLexicalDeclContext(S.CurContext); + return CTD; +} + static std::pair generateVoidStruct(Sema &S, SourceLocation BLoc, SourceLocation ELoc) { std::string Recordname = "Void"; @@ -700,13 +718,15 @@ static VarDecl *buildVtableInitDecl(Sema &S, FunctionDecl *FD, return x; } -static FunctionDecl *buildFutureInitFunctionDefinition(Sema &S, RecordDecl *RD, +static FunctionDecl *buildFutureInitFunctionDefinition(Sema &S, RecordDecl *RD, QualType RDType, FunctionDecl *FD, - FunctionDecl *FDecl) { + FunctionDecl *FDecl, + bool IsTemplate = false) { FunctionDecl *NewFD = nullptr; SourceLocation SLoc = FD->getBeginLoc(); SourceLocation NLoc = FD->getNameInfo().getLoc(); SourceLocation ELoc = FD->getEndLoc(); + QualType FuncRetType = S.Context.getPointerType(RDType); FunctionDecl::param_const_iterator pi; if (isa(FD)) { @@ -730,8 +750,10 @@ static FunctionDecl *buildFutureInitFunctionDefinition(Sema &S, RecordDecl *RD, ParmVarDecls.push_back(PVD); } NewFD->setParams(ParmVarDecls); - NewFD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); - S.PushOnScopeChains(NewFD, S.getCurScope(), true); + if (!IsTemplate) { + NewFD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); + S.PushOnScopeChains(NewFD, S.getCurScope(), true); + } // Enter the scope of this instantiation. We don't use // PushDeclContext because we don't have a scope. Sema::ContextRAII savedContext(S, NewFD); @@ -742,9 +764,9 @@ static FunctionDecl *buildFutureInitFunctionDefinition(Sema &S, RecordDecl *RD, std::string IName = "data"; VarDecl *VD = VarDecl::Create( S.Context, NewFD, SLoc, SLoc, &(S.Context.Idents).get(IName), - S.Context.getPointerType(S.Context.getRecordType(RD)), + FuncRetType, S.Context.getTrivialTypeSourceInfo( - S.Context.getPointerType(S.Context.getRecordType(RD)), SLoc), + FuncRetType, SLoc), SC_None); DeclGroupRef DataDG(VD); @@ -786,7 +808,7 @@ static FunctionDecl *buildFutureInitFunctionDefinition(Sema &S, RecordDecl *RD, // bsc: sizeof(struct __Futurex) Expr *SizeOfExpr = S.CreateUnaryExprOrTypeTraitExpr( - S.Context.getTrivialTypeSourceInfo(S.Context.getRecordType(RD)), + S.Context.getTrivialTypeSourceInfo(RDType), NLoc, UETT_SizeOf, SourceRange()) .get(); SmallVector Args; @@ -798,7 +820,7 @@ static FunctionDecl *buildFutureInitFunctionDefinition(Sema &S, RecordDecl *RD, SourceLocation()) .get(); CE = S.ImpCastExprToType( - CE, S.Context.getPointerType(S.Context.getRecordType(RD)), + CE, FuncRetType, CK_BitCast) .get(); VD->setInit(CE); @@ -921,12 +943,29 @@ static FunctionDecl *buildFutureInitFunctionDefinition(Sema &S, RecordDecl *RD, return NewFD; } +static FunctionTemplateDecl *buildFutureInitFunctionTemplateDefinition(Sema &S, RecordDecl *RD, QualType RDType, + FunctionDecl *FD, + FunctionDecl *FDecl, + TemplateParameterList *TParams) { + FunctionDecl *NewFD = buildFutureInitFunctionDefinition(S, RD, RDType, FD, FDecl, true); + SourceLocation NLoc = NewFD->getNameInfo().getLoc(); + DeclarationName funcName = NewFD->getDeclName(); + FunctionTemplateDecl *FTD= FunctionTemplateDecl::Create(S.Context, NewFD->getDeclContext(), + NLoc, + funcName, TParams, + NewFD); + FTD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); + S.PushOnScopeChains(FTD, S.getCurScope(), true); + return FTD; +} + /** * Build the Future init declaration */ static FunctionDecl *buildFutureInitFunctionDeclaration(Sema &S, FunctionDecl *FD, - QualType FuncRetType) { + QualType FuncRetType, + bool IsTempalte = false) { SourceLocation SLoc = FD->getBeginLoc(); SourceLocation NLoc = FD->getNameInfo().getLoc(); SourceLocation ELoc = FD->getEndLoc(); @@ -960,19 +999,40 @@ static FunctionDecl *buildFutureInitFunctionDeclaration(Sema &S, ParmVarDecls.push_back(PVD); } NewFD->setParams(ParmVarDecls); - NewFD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); - S.PushOnScopeChains(NewFD, S.getCurScope(), true); + if (!IsTempalte) { + NewFD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); + S.PushOnScopeChains(NewFD, S.getCurScope(), true); + } return NewFD; } +/** + * Build the Future init template declaration + */ +static FunctionTemplateDecl *buildFutureInitFunctionTemplateDeclaration(Sema &S, + FunctionDecl *FD, + QualType FuncRetType, + TemplateParameterList *TParams) { + FunctionDecl *NewFD = buildFutureInitFunctionDeclaration(S, FD, FuncRetType, true); + SourceLocation NLoc = NewFD->getNameInfo().getLoc(); + DeclarationName funcName = NewFD->getDeclName(); + FunctionTemplateDecl *FTD= FunctionTemplateDecl::Create(S.Context, NewFD->getDeclContext(), + NLoc, + funcName, TParams, + NewFD); + FTD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); + S.PushOnScopeChains(FTD, S.getCurScope(), true); + return FTD; +} + // build function struct Future for async function static FunctionDecl * -buildFutureStructInitFunctionDefinition(Sema &S, RecordDecl *RD, - FunctionDecl *OriginFD) { +buildFutureStructInitFunctionDefinition(Sema &S, RecordDecl *RD, QualType FuncRetType, + FunctionDecl *OriginFD, bool IsTemplate = false) { SourceLocation SLoc = OriginFD->getBeginLoc(); SourceLocation NLoc = OriginFD->getNameInfo().getLoc(); SourceLocation ELoc = OriginFD->getEndLoc(); - QualType FuncRetType = S.Context.getRecordType(RD); + // QualType FuncRetType = S.Context.getRecordType(RD); SmallVector ParamTys; FunctionDecl::param_const_iterator pi; for (pi = OriginFD->param_begin(); pi != OriginFD->param_end(); pi++) { @@ -989,6 +1049,7 @@ buildFutureStructInitFunctionDefinition(Sema &S, RecordDecl *RD, S.Context, OriginFD->getDeclContext(), SLoc, NLoc, ELoc, &(S.Context.Idents).get(FuncName), FuncType, OriginFD->getTypeSourceInfo(), SC_None, BMD->getExtendedType()); + // if (BMD->getExtendedType()) } else { NewFD = buildAsyncFuncDecl( S.Context, OriginFD->getDeclContext(), SLoc, NLoc, @@ -1005,8 +1066,10 @@ buildFutureStructInitFunctionDefinition(Sema &S, RecordDecl *RD, ParmVarDecls.push_back(PVD); } NewFD->setParams(ParmVarDecls); - NewFD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); - S.PushOnScopeChains(NewFD, S.getCurScope(), true); + if (!IsTemplate) { + NewFD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); + S.PushOnScopeChains(NewFD, S.getCurScope(), true); + } // Enter the scope of this instantiation. We don't use // PushDeclContext because we don't have a scope. @@ -1098,6 +1161,22 @@ buildFutureStructInitFunctionDefinition(Sema &S, RecordDecl *RD, return NewFD; } +// build function struct Future for async function +static FunctionTemplateDecl * +buildFutureStructInitFunctionTemplateDefinition(Sema &S, RecordDecl *RD, QualType FuncRetType, + FunctionDecl *OriginFD, TemplateParameterList *TParams) { + FunctionDecl *NewFD = buildFutureStructInitFunctionDefinition(S, RD, FuncRetType, OriginFD, true); + SourceLocation NLoc = NewFD->getNameInfo().getLoc(); + DeclarationName funcName = NewFD->getDeclName(); + FunctionTemplateDecl *FTD= FunctionTemplateDecl::Create(S.Context, NewFD->getDeclContext(), + NLoc, + funcName, TParams, + NewFD); + FTD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); + S.PushOnScopeChains(FTD, S.getCurScope(), true); + return FTD; +} + static IfStmt *processAwaitExprStatus(Sema &S, int AwaitCount, RecordDecl *RD, Expr *ICE, ParmVarDecl *PVD, VarDecl *PollResultVar, @@ -2427,16 +2506,17 @@ static BSCMethodDecl *buildFreeFunctionDeclaration(Sema &S, RecordDecl *RD, return NewFD; } -static BSCMethodDecl *buildFreeFunctionDefinition(Sema &S, RecordDecl *RD, +static BSCMethodDecl *buildFreeFunctionDefinition(Sema &S, RecordDecl *RD, QualType ParamType, FunctionDecl *FD, - bool IsOptimization) { + bool IsOptimization, + bool IsTemplate = false) { SourceLocation SLoc = FD->getBeginLoc(); SourceLocation NLoc = FD->getNameInfo().getLoc(); SourceLocation ELoc = FD->getEndLoc(); std::string FName = "free"; QualType FuncRetType = S.Context.VoidTy; - QualType ParamType = S.Context.getPointerType(S.Context.getRecordType(RD)); + // QualType ParamType = S.Context.getPointerType(S.Context.getRecordType(RD)); SmallVector ParamTys; ParamTys.push_back(ParamType); @@ -2445,8 +2525,10 @@ static BSCMethodDecl *buildFreeFunctionDefinition(Sema &S, RecordDecl *RD, BSCMethodDecl *NewFD = buildAsyncBSCMethodDecl( S.Context, RD, SLoc, NLoc, ELoc, &(S.Context.Idents).get(FName), FuncType, nullptr, SC_None, RD->getTypeForDecl()->getCanonicalTypeInternal()); - NewFD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); - S.PushOnScopeChains(NewFD, S.getCurScope(), true); + if (!IsTemplate) { + NewFD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); + S.PushOnScopeChains(NewFD, S.getCurScope(), true); + } S.Context.BSCDeclContextMap[RD->getTypeForDecl()] = RD; @@ -2659,6 +2741,22 @@ static BSCMethodDecl *buildFreeFunctionDefinition(Sema &S, RecordDecl *RD, return NewFD; } +static FunctionTemplateDecl *buildFreeFunctionTemplateDefinition(Sema &S, RecordDecl *RD, QualType ParamType, + FunctionDecl *FD, + bool IsOptimization, + TemplateParameterList *TParams) { + BSCMethodDecl *BD = buildFreeFunctionDefinition(S, RD, ParamType, FD, IsOptimization, true); + SourceLocation NLoc = BD->getNameInfo().getLoc(); + DeclarationName funcName = BD->getDeclName(); + FunctionTemplateDecl *FTD= FunctionTemplateDecl::Create(S.Context, BD->getDeclContext(), + NLoc, + funcName, TParams, + BD); + FTD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); + S.PushOnScopeChains(FTD, S.getCurScope(), true); + return FTD; +} + static BSCMethodDecl *buildPollFunctionDeclaration(Sema &S, RecordDecl *RD, RecordDecl *PollResultRD, FunctionDecl *FD) { @@ -2719,11 +2817,12 @@ static BSCMethodDecl *buildPollFunctionDeclaration(Sema &S, RecordDecl *RD, return NewFD->isInvalidDecl() ? nullptr : NewFD; } -static BSCMethodDecl *buildPollFunctionDefinition(Sema &S, RecordDecl *RD, +static BSCMethodDecl *buildPollFunctionDefinition(Sema &S, RecordDecl *RD, QualType ParamType, RecordDecl *PollResultRD, FunctionDecl *FD, RecordDecl *FatPointerRD, - int FutureStateNumber) { + int FutureStateNumber, + bool IsTemplate = false) { SourceLocation SLoc = FD->getBeginLoc(); SourceLocation NLoc = FD->getNameInfo().getLoc(); SourceLocation ELoc = FD->getEndLoc(); @@ -2745,7 +2844,7 @@ static BSCMethodDecl *buildPollFunctionDefinition(Sema &S, RecordDecl *RD, std::string FName = "poll"; QualType FuncRetType = S.Context.getRecordType(PollResultRD); - QualType ParamType = S.Context.getPointerType(S.Context.getRecordType(RD)); + // QualType ParamType = S.Context.getPointerType(S.Context.getRecordType(RD)); SmallVector ParamTys; ParamTys.push_back(ParamType); @@ -2756,8 +2855,10 @@ static BSCMethodDecl *buildPollFunctionDefinition(Sema &S, RecordDecl *RD, S.Context, RD, SLoc, NLoc, ELoc, &(S.Context.Idents).get(FName), OriginType, nullptr, SC_None, RD->getTypeForDecl()->getCanonicalTypeInternal()); - NewFD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); - S.PushOnScopeChains(NewFD, S.getCurScope(), true); + if (!IsTemplate) { + NewFD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); + S.PushOnScopeChains(NewFD, S.getCurScope(), true); + } // Enter the scope of this instantiation. We don't use // PushDeclContext because we don't have a scope. @@ -2882,6 +2983,24 @@ static BSCMethodDecl *buildPollFunctionDefinition(Sema &S, RecordDecl *RD, return NewFD->isInvalidDecl() ? nullptr : NewFD; } +static FunctionTemplateDecl *buildPollFunctionTemplateDefinition(Sema &S, RecordDecl *RD, QualType ParamType, + RecordDecl *PollResultRD, + FunctionDecl *FD, + RecordDecl *FatPointerRD, + int FutureStateNumber, + TemplateParameterList *TParams) { + BSCMethodDecl *BD = buildPollFunctionDefinition(S, RD, ParamType, PollResultRD, FD, FatPointerRD, FutureStateNumber, true); + SourceLocation NLoc = BD->getNameInfo().getLoc(); + DeclarationName funcName = BD->getDeclName(); + FunctionTemplateDecl *FTD= FunctionTemplateDecl::Create(S.Context, BD->getDeclContext(), + NLoc, + funcName, TParams, + BD); + FTD->setLexicalDeclContext(S.Context.getTranslationUnitDecl()); + S.PushOnScopeChains(FTD, S.getCurScope(), true); + return FTD; +} + // BSC extensions for await keyword ExprResult Sema::BuildAwaitExpr(SourceLocation AwaitLoc, Expr *E) { assert(E && "null expression"); @@ -3109,12 +3228,35 @@ SmallVector Sema::ActOnAsyncFunctionDefinition(FunctionDecl *FD) { } ReturnTy = Context.getRecordType(std::get<0>(VoidRD)); } + + // If the return type of the function is generic, + // obtain a list of template paramenters + TemplateParameterList *TParams = nullptr; + ArrayRef ArrTArg; + QualType TemplateReturnTy = ReturnTy; + if (auto *ElabType = dyn_cast(ReturnTy.getTypePtr())) + TemplateReturnTy = ElabType->getNamedType(); + if (auto *TSType = dyn_cast(TemplateReturnTy)) { + TemplateDecl *Template = TSType->getTemplateName().getAsTemplateDecl(); + ArrTArg = TSType->template_arguments(); + if (Template) + TParams = Template->getTemplateParameters(); + } + QualType PollResultType = lookupGenericType(*this, FD->getBeginLoc(), ReturnTy, "PollResult"); if (PollResultType.isNull()) { return Decls; } RecordDecl *PollResultRD = PollResultType->getAsRecordDecl(); + if (!PollResultRD) { + if (auto *TSType = dyn_cast(PollResultType)) { + TemplateDecl *Template = TSType->getTemplateName().getAsTemplateDecl(); + ClassTemplateDecl *CTD = dyn_cast(Template); + PollResultRD = CTD->getBSCTemplatedDecl(); + } + } + QualType VtableType = lookupGenericType(*this, FD->getBeginLoc(), ReturnTy, "__Trait_Future_Vtable"); if (VtableType.isNull()) { @@ -3134,14 +3276,33 @@ SmallVector Sema::ActOnAsyncFunctionDefinition(FunctionDecl *FD) { bool IsRecursiveCall = RecursiveCallVisitor(FD).VisitStmt(FD->getBody()); bool IsOptimization = FD->isStatic() && !IsRecursiveCall; - RecordDecl *RD = buildFutureRecordDecl(*this, FD, AwaitFinder.GetAwaitExpr(), - VarFinder.GetLocalVarList()); - auto RDType = Context.getRecordType(RD); - QualType PointerStructTy = Context.getPointerType(RDType); - if (!RD) { - return Decls; + RecordDecl *RD = nullptr; + QualType RDType, PointerStructTy; + if (TParams) { + ClassTemplateDecl *CTD = buildFutureClassTemplateDecl(*this, FD, AwaitFinder.GetAwaitExpr(), + VarFinder.GetLocalVarList(), TParams); + if (!CTD) { + return Decls; + } + TemplateName TN(CTD); + RDType = Context.getTemplateSpecializationType(TN, ArrTArg); + PointerStructTy = Context.getPointerType(RDType); + Decls.push_back(CTD); + Context.BSCDesugaredMap[FD].push_back(CTD); + RD = CTD->getBSCTemplatedDecl(); + } else { + RD = buildFutureRecordDecl(*this, FD, AwaitFinder.GetAwaitExpr(), + VarFinder.GetLocalVarList()); + if (!RD) { + return Decls; + } + RDType = Context.getRecordType(RD); + PointerStructTy = Context.getPointerType(RDType); + Decls.push_back(RD); + Context.BSCDesugaredMap[FD].push_back(RD); } + // Handle declaration first. FunctionDecl *FutureInitDef = buildFutureInitFunctionDeclaration( *this, FD, IsOptimization ? RDType : PointerStructTy); @@ -3154,15 +3315,12 @@ SmallVector Sema::ActOnAsyncFunctionDefinition(FunctionDecl *FD) { const int FutureStateNumber = AwaitFinder.GetAwaitExprNum() + 1; - Decls.push_back(RD); - Context.BSCDesugaredMap[FD].push_back(RD); - FunctionDecl *FutureInit = nullptr; if (IsOptimization) { - FutureInit = buildFutureStructInitFunctionDefinition(*this, RD, FD); + FutureInit = buildFutureStructInitFunctionDefinition(*this, RD, RDType, FD); } else { FutureInit = - buildFutureInitFunctionDefinition(*this, RD, FD, FutureInitDef); + buildFutureInitFunctionDefinition(*this, RD, RDType, FD, FutureInitDef); } if (!FutureInit) { @@ -3172,7 +3330,7 @@ SmallVector Sema::ActOnAsyncFunctionDefinition(FunctionDecl *FD) { Context.BSCDesugaredMap[FD].push_back(FutureInit); BSCMethodDecl *FreeDecl = - buildFreeFunctionDefinition(*this, RD, FD, IsOptimization); + buildFreeFunctionDefinition(*this, RD, PointerStructTy, FD, IsOptimization); if (!FreeDecl) { return Decls; } @@ -3180,7 +3338,7 @@ SmallVector Sema::ActOnAsyncFunctionDefinition(FunctionDecl *FD) { Context.BSCDesugaredMap[FD].push_back(FreeDecl); BSCMethodDecl *PollDecl = buildPollFunctionDefinition( - *this, RD, PollResultRD, FD, FatPointerRD, FutureStateNumber); + *this, RD, PointerStructTy, PollResultRD, FD, FatPointerRD, FutureStateNumber); if (!PollDecl) { return Decls; } diff --git a/clang/lib/Sema/BSC/SemaBSCTrait.cpp b/clang/lib/Sema/BSC/SemaBSCTrait.cpp index 31210494b9c4..0bdfa86a437a 100644 --- a/clang/lib/Sema/BSC/SemaBSCTrait.cpp +++ b/clang/lib/Sema/BSC/SemaBSCTrait.cpp @@ -1298,13 +1298,13 @@ void Sema::checkBSCFunctionContainsTrait(Decl *D) { ->getPointeeType() .getCanonicalType() .getTypePtr(); - if (const FunctionProtoType *FPT = + if (const FunctionProtoType *FPT = FT->getAs()) { T = FPT->getReturnType().getTypePtr(); - } else if (const FunctionNoProtoType *FNPT = + } else if (const FunctionNoProtoType *FNPT = FT->getAs()) { T = FNPT->getReturnType().getTypePtr(); - } else + } else return; } else return; -- Gitee