From 49488f0e1dd39434bbf2d1b73ae3fd9b5b6d2998 Mon Sep 17 00:00:00 2001 From: Ni Cunshu Date: Wed, 30 Aug 2023 17:59:16 +0800 Subject: [PATCH 1/3] mm: gmem: add gm_dev_destroy euleros inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7WLVX --------------------------------------------- gm_dev_destroy function is needed by some driver's code Fixes: 848492f233ce ("mm: gmem: Introduce vm_object for gmem") Signed-off-by: Cunshu Ni --- mm/gmem.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/mm/gmem.c b/mm/gmem.c index d490e84291af..73ce5b2235a4 100644 --- a/mm/gmem.c +++ b/mm/gmem.c @@ -188,6 +188,15 @@ gm_ret_t gm_dev_create(gm_mmu_t *mmu, void *dev_data, gm_dev_cap_t cap, gm_dev_t } EXPORT_SYMBOL_GPL(gm_dev_create); +// Destroy a GMEM device and reclaim the resources. +gm_ret_t gm_dev_destroy(gm_dev_t *dev) +{ + // TODO: implement it + xa_erase(&gm_dev_id_pool, dev->id); + return GM_RET_SUCCESS; +} +EXPORT_SYMBOL_GPL(gm_dev_destroy); + /* Handle the page fault triggered by a given device */ gm_ret_t gm_dev_fault(struct mm_struct *mm, gm_va_t addr, gm_dev_t *dev, int behavior) { -- Gitee From bb659473cc9ff43b0f32d2193905a4121df14c2e Mon Sep 17 00:00:00 2001 From: Ni Cunshu Date: Wed, 30 Aug 2023 16:19:45 +0800 Subject: [PATCH 2/3] drivers: remote_pager: introduce remote_pager module for gmem euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7WLVX --------------------------------------------- Introduce remote_pager, an extension module for GMEM, which can be used for communication between the remote and host Signed-off-by: Cunshu Ni --- drivers/Kconfig | 2 + drivers/Makefile | 1 + drivers/remote_pager/Kconfig | 19 + drivers/remote_pager/Makefile | 11 + drivers/remote_pager/main.c | 42 + .../msg_chan/msg_layer/msg_layer.c | 271 +++++++ .../msg_chan/msg_layer/msg_layer.h | 49 ++ drivers/remote_pager/msg_handler.h | 133 ++++ drivers/remote_pager/msg_handler_comm.c | 142 ++++ drivers/remote_pager/msg_handler_origin.c | 476 ++++++++++++ drivers/remote_pager/msg_handler_peer.c | 731 ++++++++++++++++++ drivers/remote_pager/svm_proc_mng.c | 419 ++++++++++ drivers/remote_pager/svm_proc_mng.h | 66 ++ drivers/remote_pager/swap/device/ksymbol.c | 83 ++ drivers/remote_pager/swap/device/ksymbol.h | 35 + .../remote_pager/swap/device/swap_manager.c | 256 ++++++ .../remote_pager/swap/device/swap_manager.h | 28 + .../swap/device/swap_policy/policy_list_lru.c | 108 +++ .../swap/device/swap_policy/swap_policy.h | 16 + drivers/remote_pager/wait_station.c | 82 ++ drivers/remote_pager/wait_station.h | 31 + include/linux/remote_pager/msg_chan.h | 44 ++ 22 files changed, 3045 insertions(+) create mode 100644 drivers/remote_pager/Kconfig create mode 100644 drivers/remote_pager/Makefile create mode 100644 drivers/remote_pager/main.c create mode 100644 drivers/remote_pager/msg_chan/msg_layer/msg_layer.c create mode 100644 drivers/remote_pager/msg_chan/msg_layer/msg_layer.h create mode 100644 drivers/remote_pager/msg_handler.h create mode 100644 drivers/remote_pager/msg_handler_comm.c create mode 100644 drivers/remote_pager/msg_handler_origin.c create mode 100644 drivers/remote_pager/msg_handler_peer.c create mode 100644 drivers/remote_pager/svm_proc_mng.c create mode 100644 drivers/remote_pager/svm_proc_mng.h create mode 100644 drivers/remote_pager/swap/device/ksymbol.c create mode 100644 drivers/remote_pager/swap/device/ksymbol.h create mode 100644 drivers/remote_pager/swap/device/swap_manager.c create mode 100644 drivers/remote_pager/swap/device/swap_manager.h create mode 100644 drivers/remote_pager/swap/device/swap_policy/policy_list_lru.c create mode 100644 drivers/remote_pager/swap/device/swap_policy/swap_policy.h create mode 100644 drivers/remote_pager/wait_station.c create mode 100644 drivers/remote_pager/wait_station.h create mode 100644 include/linux/remote_pager/msg_chan.h diff --git a/drivers/Kconfig b/drivers/Kconfig index 514ae6b24cb2..cc09f02c76d0 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -243,4 +243,6 @@ source "drivers/hte/Kconfig" source "drivers/cdx/Kconfig" +source "drivers/remote_pager/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index 7241d80a7b29..45e1f63f20ad 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -195,3 +195,4 @@ obj-$(CONFIG_PECI) += peci/ obj-$(CONFIG_HTE) += hte/ obj-$(CONFIG_DRM_ACCEL) += accel/ obj-$(CONFIG_CDX_BUS) += cdx/ +obj-$(CONFIG_REMOTE_PAGER) += remote_pager/ diff --git a/drivers/remote_pager/Kconfig b/drivers/remote_pager/Kconfig new file mode 100644 index 000000000000..27fb70b017f5 --- /dev/null +++ b/drivers/remote_pager/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 + +menu "remote pager device" + +config REMOTE_PAGER + tristate "remote pager" + default m + depends on GMEM + help + If unsure, say Y. + +config REMOTE_PAGER_MASTER + tristate "remote pager master" + default m + depends on REMOTE_PAGER + help + If unsure, say Y. + +endmenu diff --git a/drivers/remote_pager/Makefile b/drivers/remote_pager/Makefile new file mode 100644 index 000000000000..7d8cf1627a92 --- /dev/null +++ b/drivers/remote_pager/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_REMOTE_PAGER) += remote_pager.o + +remote_pager-$(CONFIG_REMOTE_PAGER) := main.o \ + wait_station.o \ + msg_handler_comm.o \ + msg_chan/msg_layer/msg_layer.o \ + svm_proc_mng.o + +remote_pager-$(CONFIG_REMOTE_PAGER_MASTER) += msg_handler_origin.o diff --git a/drivers/remote_pager/main.c b/drivers/remote_pager/main.c new file mode 100644 index 000000000000..66f303ce61c9 --- /dev/null +++ b/drivers/remote_pager/main.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generalized Memory Management. + * + * Copyright (c) 2023- Huawei, Inc. + * Author: Chunsheng Luo + * Co-Author: Jun Chen + */ +#include +#include "msg_chan/msg_layer/msg_layer.h" +#include "msg_handler.h" + +#ifdef CONFIG_REMOTE_PAGER_SLAVE +#include "swap/device/swap_manager.h" +#endif + +static int __init remote_pager_init(void) +{ + msg_handle_init(); + +#ifdef CONFIG_REMOTE_PAGER_SLAVE + init_swap_manager("list_lru"); +#endif + return 0; +} + +static void __exit remote_pager_exit(void) +{ + /* + * If module_init() is implemented, module_exit() + * should be implemented as well. + */ +} + +module_init(remote_pager_init); +module_exit(remote_pager_exit); + +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_DESCRIPTION("Remote-pager"); +MODULE_ALIAS("Remote-pager"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/remote_pager/msg_chan/msg_layer/msg_layer.c b/drivers/remote_pager/msg_chan/msg_layer/msg_layer.c new file mode 100644 index 000000000000..2adb2f1706f6 --- /dev/null +++ b/drivers/remote_pager/msg_chan/msg_layer/msg_layer.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generalized Memory Management. + * + * Copyright (c) 2023- Huawei, Inc. + * Author: Chunsheng Luo + * Co-Author: Jiangtian Feng, Jun Chen + */ +#include +#include +#include +#include + +#include "msg_layer.h" + +#define MAX_NUM_NODES 16 +#define MSG_SLEEP_MIN 2 +#define MSG_SLEEP_MAX 3 + +/* Per-node handle */ +struct sock_handle { + int nid; + int status; + int chan_id; + struct task_struct *recv_handler; +}; + +static struct sock_handle sock_handles[MAX_NUM_NODES]; +static struct phys_channel_ops *g_phys_chan_ops; + +int msg_send(int chan_id, void *msg_data, size_t msg_len) +{ + int ret = 0; + + if (!g_phys_chan_ops) + return -ENOENT; + + ret = g_phys_chan_ops->copy_to(chan_id, msg_data, msg_len, 1); + ret |= g_phys_chan_ops->notify(chan_id); + + return ret; +} + +static inline int build_msg(int type, int from_nid, int to_nid, void *msg_data, size_t msg_len) +{ + struct rpg_kmsg_message *msg = (struct rpg_kmsg_message *)msg_data; + + msg->header.type = type; + msg->header.prio = RPG_KMSG_PRIO_NORMAL; + msg->header.size = msg_len; + msg->header.from_nid = from_nid; + msg->header.to_nid = to_nid; + + return 0; +} + +int msg_send_nid(int type, int from_nid, int to_nid, void *msg_data, size_t msg_len) +{ + struct sock_handle *sh = sock_handles + to_nid; + + build_msg(type, from_nid, to_nid, msg_data, msg_len); + + return msg_send(sh->chan_id, msg_data, msg_len); +} +EXPORT_SYMBOL(msg_send_nid); + +int msg_recv(int chan_id, void *buf, size_t len) +{ + if (!g_phys_chan_ops) + return -ENOENT; + + return g_phys_chan_ops->copy_from(chan_id, buf, len, 1); +} + +extern int handle_remote_pager_work(void *msg); +static int recv_handler(void *arg) +{ + struct sock_handle *sh = arg; + + log_info("RECV handler for %d is ready ha %ld\n", sh->nid, sizeof(struct rpg_kmsg_hdr)); + + while (!kthread_should_stop()) { + size_t len; + int ret; + size_t offset; + struct rpg_kmsg_hdr header; + char *data = NULL; + size_t msg_len = 0; + + /* compose header */ + offset = 0; + len = sizeof(header); + while (len > 0) { + ret = msg_recv(sh->chan_id, (char *)(&header) + offset, len); + if (ret == -ENOENT) { + pr_err("no msg chan failed\n"); + usleep_range(MSG_SLEEP_MIN, MSG_SLEEP_MAX); + break; + } + + if ((ret == -1) || kthread_should_stop()) + return 0; + + offset += ret; + len -= ret; + } + + if (ret < 0) + break; + + msg_len = header.size; + if (!msg_len) { + pr_err("msg_len is zero failed? from_nid %d prio:%d type:%d size:%ld\n", + header.from_nid, header.prio, header.type, header.size); + continue; + } + + /* compose body */ + data = kmalloc(msg_len, GFP_KERNEL); + BUG_ON(!data && "Unable to alloc a message"); + memcpy(data, &header, sizeof(header)); + + offset = sizeof(header); + len = msg_len - offset; + + while (len > 0) { + ret = msg_recv(sh->chan_id, data + offset, len); + if (ret == -1 || kthread_should_stop()) + return 0; + + offset += ret; + len -= ret; + } + + if (ret < 0) + break; + + /* Call pcn_kmsg upper layer */ + handle_remote_pager_work(data); + } + + return 0; +} + +int msg_open(int nid) +{ + int chan_id = 0; + struct sock_handle *sh = sock_handles + nid; + struct task_struct *tsk_recv; + + if (sh->status == MSG_CHAN_ENABLE) { + pr_err("node:%d msg chan is enabled\n", nid); + return 0; + } + + if (!g_phys_chan_ops) + return -ENOENT; + + chan_id = g_phys_chan_ops->open(nid); + if (chan_id < 0) { + log_err("open msg channel failed %d\n", chan_id); + return chan_id; + } + + tsk_recv = kthread_run(recv_handler, sock_handles + nid, "remote-pager-recv"); + if (IS_ERR(tsk_recv)) { + log_err("Cannot create %s handler, %ld\n", "remote-pager-recv", PTR_ERR(tsk_recv)); + return PTR_ERR(tsk_recv); + } + + sh->chan_id = chan_id; + sh->status = MSG_CHAN_ENABLE; + sh->nid = nid; + sh->recv_handler = tsk_recv; + + pr_err("%s chanid %d\n", __func__, chan_id); + + return chan_id; +} +EXPORT_SYMBOL(msg_open); + +int msg_close(int nid) +{ + struct sock_handle *sh = sock_handles + nid; + + /* TODO: Get sock_handle, then set sock_handle disable and destroy recv task */ + if (sh->status != MSG_CHAN_ENABLE) { + pr_err("node:%d msg chan is disabled\n", nid); + return 0; + } + + if (sh->recv_handler) { + kthread_stop(sh->recv_handler); + sh->recv_handler = NULL; + } + + if (g_phys_chan_ops) + g_phys_chan_ops->close(sh->chan_id); + + sh->chan_id = 0; + sh->status = MSG_CHAN_DISABLE; + + return 0; +} +EXPORT_SYMBOL(msg_close); + +int handle_migrate_page(void *peer_addr, struct page *local_page, size_t size, int dir) +{ + if (!g_phys_chan_ops) + return -ENOENT; + + return g_phys_chan_ops->migrate_page(peer_addr, local_page, size, dir); +} +EXPORT_SYMBOL(handle_migrate_page); + +static DEFINE_SPINLOCK(install_lock); +static int default_msg_chan_id; +int msg_layer_install_phy_ops(struct phys_channel_ops *ops, int default_chan_id) +{ + int ret = 0; + + if (!ops) { + pr_err("install NULL as msg channel\n"); + return -EINVAL; + } + + spin_lock(&install_lock); + if (g_phys_chan_ops) { + ret = -EEXIST; + pr_err("phy_ops areadly be installed\n"); + goto unlock; + } + + /* must before msg_open */ + g_phys_chan_ops = ops; + if (default_chan_id >= 0) { + ret = msg_open(default_chan_id); + if (ret) { + pr_err("can not open msg channel %d\n", default_chan_id); + g_phys_chan_ops = NULL; + goto unlock; + } + } + + default_msg_chan_id = default_chan_id; + +unlock: + spin_unlock(&install_lock); + return ret; +} +EXPORT_SYMBOL(msg_layer_install_phy_ops); + +int msg_layer_uninstall_phy_ops(struct phys_channel_ops *ops) +{ + if (!ops || ops != g_phys_chan_ops) { + pr_err("Invalid phy_ops\n"); + return -EINVAL; + } + + spin_lock(&install_lock); + if (default_msg_chan_id >= 0) + msg_close(default_msg_chan_id); + + g_phys_chan_ops = NULL; + default_msg_chan_id = -1; + spin_unlock(&install_lock); + + return 0; +} +EXPORT_SYMBOL(msg_layer_uninstall_phy_ops); +MODULE_LICENSE("GPL"); diff --git a/drivers/remote_pager/msg_chan/msg_layer/msg_layer.h b/drivers/remote_pager/msg_chan/msg_layer/msg_layer.h new file mode 100644 index 000000000000..221842e8c434 --- /dev/null +++ b/drivers/remote_pager/msg_chan/msg_layer/msg_layer.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Generalized Memory Management. + * + * Copyright (c) 2023- Huawei, Inc. + * Author: Chunsheng Luo + * Co-Author: Jiangtian Feng + */ +#ifndef __MSG_LAYER_H__ +#define __MSG_LAYER_H__ + +#include +#include + +#define RPG_KMSG_MAX_SIZE (64UL << 10) +#define RPG_KMSG_MAX_PAYLOAD_SIZE \ + (RPG_KMSG_MAX_SIZE - sizeof(struct rpg_kmsg_hdr)) + +/* Enumerate message priority. XXX Priority is not supported yet. */ +enum rpg_kmsg_prio { + RPG_KMSG_PRIO_LOW, + RPG_KMSG_PRIO_NORMAL, + RPG_KMSG_PRIO_HIGH, +}; + +#define MSG_CHAN_DISABLE 0 +#define MSG_CHAN_ENABLE 1 + +struct rpg_kmsg_hdr { + int from_nid :6; + int to_nid :6; + enum rpg_kmsg_prio prio :2; + int type :8; + size_t size; +} __packed; + +struct rpg_kmsg_message { + struct rpg_kmsg_hdr header; + unsigned char data[RPG_KMSG_MAX_PAYLOAD_SIZE]; +} __packed; + +int msg_send_nid(int type, int from_nid, int to_nid, void *msg_data, size_t msg_len); +int msg_send(int chan_id, void *msg_data, size_t msg_len); +int msg_recv(int chan_id, void *buf, size_t len); +int msg_open(int nid); +int msg_close(int nid); +int handle_migrate_page(void *peer_addr, struct page *local_page, size_t size, int dir); + +#endif diff --git a/drivers/remote_pager/msg_handler.h b/drivers/remote_pager/msg_handler.h new file mode 100644 index 000000000000..0b24f7e97976 --- /dev/null +++ b/drivers/remote_pager/msg_handler.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Generalized Memory Management. + * + * Copyright (c) 2023- Huawei, Inc. + * Author: Liming Huang + * Co-Author: Jun Chen + * + */ +#ifndef _REMOTE_PAGER_MSG_HANDLER_H_ +#define _REMOTE_PAGER_MSG_HANDLER_H_ + +#include +#include + +#ifdef WITH_GMEM +#include +#endif + +#include "wait_station.h" +#include "msg_chan/msg_layer/msg_layer.h" + +#define PXD_JUDGE(pxd) (((pxd) == NULL) || (pxd##_none(*(pxd##_t *)(pxd)) != 0) || \ + (pxd##_bad(*(pxd##_t *)(pxd)) != 0)) +#define PMD_JUDGE(pmd) (((pmd) == NULL) || (pmd_none(*(pmd_t *)(pmd)) != 0) || \ + (pmd_bad(*(pmd_t *)(pmd)) != 0)) + +#define GMEM_COPY_PAGE 1 + +/* Function pointer to callback function */ +typedef int (*rpg_kmsg_cbftn)(struct rpg_kmsg_message *); + +enum rpg_kmsg_type { + /* TASK CMD */ + GMEM_TASK_PAIRING_REQUEST, + GMEM_TASK_EXIT_ORIGIN, + GMEM_TASK_EXIT_REMOTE, + + /* VMA CMD */ + GMEM_ALLOC_VMA_REQUEST, + GMEM_FREE_VMA_REQUEST, + + /* PAGE CMD */ + GMEM_ALLOC_PAGE_REQUEST, + GMEM_FREE_PAGE_REQUEST, + GMEM_PAGE_FAULT_REQUEST, + GMEM_EVICT_PAGE_REQUEST, + + /* ADVISE CMD */ + GMEM_HMADVISE_REQUEST, + GMEM_HMEMCPY_REQUEST, + + GMEM_COMMON_RESPONSE, + GMEM_MSG_MAX_ID, +}; + +enum msg_location { + MSG_ON_ORIGIN, + MSG_ON_REMOTE, +}; + +struct rpg_kmsg_work { + struct work_struct work; + void *msg; +}; + +struct msg_handler_st { + rpg_kmsg_cbftn fnt; +}; + +struct comm_msg_rsp { + struct rpg_kmsg_hdr header; + int peer_ws; + int ret; +}; + +struct gm_pair_msg_rq { + struct rpg_kmsg_hdr header; + unsigned int my_ws; + unsigned int my_pid; + unsigned int peer_nid; + unsigned int peer_pid; +}; + +struct gm_pager_msg_rq { + struct rpg_kmsg_hdr header; + unsigned int my_ws; + unsigned int peer_pid; + unsigned long va; + unsigned long dma_addr; + unsigned long size; + unsigned long prot; + unsigned long flags; + int behavior; +}; + +struct gm_evict_page_msg_rq { + struct rpg_kmsg_hdr header; + unsigned int peer_pid; + unsigned int ws; + unsigned long va; + unsigned long size; +}; + + +int gmem_register_pair_remote_task(int origin_nid, int origin_pid, int remote_nid, int remote_pid); + +#ifdef WITH_GMEM +gm_dev_t *gmem_id_to_device(unsigned int id); +#endif + + +/* msg handler */ +int gmem_handle_task_pairing(struct rpg_kmsg_message *msg); +int gmem_handle_comm_msg_rsp(struct rpg_kmsg_message *msg); +int gmem_handle_alloc_vma_fixed(struct rpg_kmsg_message *msg); +int gmem_handle_free_vma(struct rpg_kmsg_message *msg); + +int gmem_handle_alloc_page(struct rpg_kmsg_message *msg); +int gmem_handle_free_page(struct rpg_kmsg_message *msg); +int gmem_handle_hmadvise(struct rpg_kmsg_message *msg); +int gmem_handle_hmemcpy(struct rpg_kmsg_message *msg); +int gmem_handle_dev_fault(struct rpg_kmsg_message *msg); +int gmem_handle_evict_page(struct rpg_kmsg_message *msg); + +int gmem_add_to_svm_proc(int my_nid, int my_pid, int peer_nid, int peer_pid); +int gmem_send_comm_msg_reply(unsigned int from_nid, unsigned int to_nid, + unsigned int peer_ws, int ret); + +int handle_remote_pager_work(void *msg); +int msg_handle_init(void); + +#endif diff --git a/drivers/remote_pager/msg_handler_comm.c b/drivers/remote_pager/msg_handler_comm.c new file mode 100644 index 000000000000..129a4247305f --- /dev/null +++ b/drivers/remote_pager/msg_handler_comm.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generalized Memory Management. + * + * Copyright (C) 2023- Huawei, Inc. + * Author: Chushu Ni + * Co-Author: Chunsheng Luo + */ +#include +#include +#include +#include + +#include "msg_handler.h" +#include "svm_proc_mng.h" + +static struct workqueue_struct *remote_pager_wq; + +struct msg_handler_st rpg_kmsg_cbftns[GMEM_MSG_MAX_ID] = { +#ifdef CONFIG_REMOTE_PAGER_SLAVE + /* HOST TO REMOTE */ + [GMEM_TASK_PAIRING_REQUEST] = { + gmem_handle_task_pairing + }, + [GMEM_ALLOC_VMA_REQUEST] = { + gmem_handle_alloc_vma_fixed + }, + [GMEM_FREE_VMA_REQUEST] = { + gmem_handle_free_vma + }, + [GMEM_ALLOC_PAGE_REQUEST] = { + gmem_handle_alloc_page + }, + [GMEM_FREE_PAGE_REQUEST] = { + gmem_handle_free_page + }, + [GMEM_HMADVISE_REQUEST] = { + gmem_handle_hmadvise + }, + [GMEM_HMEMCPY_REQUEST] = { + gmem_handle_hmemcpy + }, +#endif + +#ifdef CONFIG_REMOTE_PAGER_MASTER + /* REMOTE TO HOST */ + [GMEM_PAGE_FAULT_REQUEST] = { + gmem_handle_dev_fault + }, + [GMEM_EVICT_PAGE_REQUEST] = { + gmem_handle_evict_page + }, +#endif + + /* BOTH */ + [GMEM_COMMON_RESPONSE] = { + gmem_handle_comm_msg_rsp + }, +}; + +int gmem_handle_comm_msg_rsp(struct rpg_kmsg_message *msg) +{ + struct comm_msg_rsp *rsp = (struct comm_msg_rsp *)msg; + struct wait_station *my_ws = wait_station(rsp->peer_ws); + + my_ws->private = rsp; + /* must first set my_ws */ + smp_rmb(); + complete(&my_ws->pendings); + + return 0; +} + +int gmem_send_comm_msg_reply(unsigned int from_nid, unsigned int to_nid, + unsigned int peer_ws, int reply) +{ + struct comm_msg_rsp rsp; + int ret = reply; + + rsp.ret = reply; + rsp.peer_ws = peer_ws; + ret = msg_send_nid(GMEM_COMMON_RESPONSE, from_nid, + to_nid, &rsp, sizeof(struct comm_msg_rsp)); + + return ret; +} + +int gmem_add_to_svm_proc(int my_nid, int my_pid, int peer_nid, int peer_pid) +{ + struct svm_proc *peer_proc; + + peer_proc = alloc_svm_proc(my_nid, my_pid, peer_nid, peer_pid); + if (!peer_proc) + return -1; + + return 0; +} + +void process_remote_pager_work(struct work_struct *work) +{ + struct rpg_kmsg_work *w = container_of(work, struct rpg_kmsg_work, work); + struct rpg_kmsg_message *msg = w->msg; + rpg_kmsg_cbftn ftn; + + ftn = rpg_kmsg_cbftns[msg->header.type].fnt; + if (ftn != NULL) + ftn(msg); + else + pr_err("No callback registered for %d\n", msg->header.type); + kfree(w); +} + +int handle_remote_pager_work(void *msg) +{ + struct rpg_kmsg_work *w = kmalloc(sizeof(*w), GFP_ATOMIC); + + w->msg = msg; + + INIT_WORK(&w->work, process_remote_pager_work); + /* should firstly initialize w */ + smp_wmb(); + queue_work(remote_pager_wq, &w->work); + + return 0; +} + +int msg_handle_init(void) +{ + unsigned int flags = __WQ_LEGACY | WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE; + + remote_pager_wq = alloc_workqueue("remote_wq", flags, 0); + if (!remote_pager_wq) { + pr_err("%s alloc workqueue failed %lx\n", __func__, (unsigned long)remote_pager_wq); + return -1; + } + + pr_err("%s alloc workqueue%lx\n", __func__, (unsigned long)remote_pager_wq); +#ifndef WITH_GMEM + msg_open(0); +#endif + return 0; +} diff --git a/drivers/remote_pager/msg_handler_origin.c b/drivers/remote_pager/msg_handler_origin.c new file mode 100644 index 000000000000..25a772eef031 --- /dev/null +++ b/drivers/remote_pager/msg_handler_origin.c @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generalized Memory Management. + * + * Copyright (c) 2023- Huawei, Inc. + * Author: Bin Wang + * Co-Author: Chunsheng Luo, Cunshu Ni + * + */ +#include +#include +#include +#include +#include +#include + +#include "msg_handler.h" +#include "wait_station.h" +#include "svm_proc_mng.h" + +#define NPU_PAGE_SIZE PAGE_SIZE +#define MAX_NR_NPU 8 +#define GMEM_DEBUG 0 + +static gm_dev_t *gm_devs[MAX_NR_NPU]; + +gm_dev_t *gmem_id_to_device(unsigned int id) +{ + if (id >= MAX_NR_NPU) { + pr_err("device id is invalid. (dev_id = %u)\n", id); + return NULL; + } + + return gm_devs[id]; +} + +int gmem_register_pair_remote_task(int origin_nid, int origin_pid, int remote_nid, int remote_pid) +{ + struct gm_pair_msg_rq req; + struct comm_msg_rsp *rsp; + int ret = 0; + struct wait_station *ws; + + /* open msg chan */ + pr_err("%s origin_nid %d, origin_pid %d, remote_nid %d, remote_pid %d\n", __func__, + origin_nid, origin_pid, remote_nid, remote_pid); + ret = msg_open(remote_nid); + if (ret < 0) { + pr_err("%s open msg chan failed\n", __func__); + return ret; + } + + /* start pairing */ + ws = get_wait_station(); + req.my_pid = origin_pid; + req.my_ws = ws->id; + req.peer_nid = remote_nid; + req.peer_pid = remote_pid; + + ret = msg_send_nid(GMEM_TASK_PAIRING_REQUEST, origin_nid, + remote_nid, &req, sizeof(struct gm_pair_msg_rq)); + rsp = wait_at_station(ws); + if ((long)rsp != -ETIMEDOUT) { + ret = rsp->ret; + kfree(rsp); + gmem_add_to_svm_proc(origin_nid, origin_pid, remote_nid, remote_pid); + } + + return ret; +} +EXPORT_SYMBOL(gmem_register_pair_remote_task); + +int gmem_handle_dev_fault(struct rpg_kmsg_message *msg) +{ + int ret; + struct gm_pager_msg_rq *recv = (struct gm_pager_msg_rq *)msg; + unsigned int my_pid = recv->peer_pid; + unsigned int nid = recv->header.to_nid; + unsigned int peer_nid = recv->header.from_nid; + unsigned int peer_ws = recv->my_ws; + gm_dev_t *dev = gm_devs[peer_nid]; + struct task_struct *tsk; + struct mm_struct *mm; + + tsk = find_get_task_by_vpid(my_pid); + if (!tsk) { + pr_err("svm process does not have task_struct\n"); + ret = GM_RET_FAILURE_UNKNOWN; + goto out; + } + + mm = get_task_mm(tsk); + if (!mm) { + pr_err("no mm\n"); + ret = GM_RET_FAILURE_UNKNOWN; + goto put_task; + } + + if (!dev) { + pr_info("gmem: device get failed, dev_id %ld\n", (unsigned long)peer_nid); + ret = -ENODEV; + goto put_mm; + } + + ret = gm_dev_fault(mm, recv->va, dev, 0); + if (ret != GM_RET_SUCCESS && ret != GM_RET_PAGE_EXIST) { + pr_info("gmem dev fault failed\n"); + ret = -EFAULT; + goto put_mm; + } + +put_mm: + mmput(mm); +put_task: + put_task_struct(tsk); +out: + gmem_send_comm_msg_reply(nid, peer_nid, peer_ws, ret); + kfree(msg); + return ret; +} + +gm_ret_t gmem_map(struct gm_fault_t *gmf) +{ + int ret = 0; + struct wait_station *ws; + struct comm_msg_rsp *rsp; + struct mm_struct *mm = gmf->mm; + struct svm_proc *proc = search_svm_proc_by_mm(mm); + struct gm_pager_msg_rq req = { + .peer_pid = proc->peer_pid, + .va = gmf->va, + .size = gmf->size, + .behavior = gmf->behavior + }; + + if (!proc) { + pr_err("can not find proc\n"); + return -EBUSY; + } + + ws = get_wait_station(); + req.my_ws = ws->id; + + if (gmf->copy) { + req.flags |= GMEM_COPY_PAGE; + req.dma_addr = gmf->dma_addr; + } + + ret = msg_send_nid(GMEM_ALLOC_PAGE_REQUEST, proc->nid, proc->peer_nid, + &req, sizeof(struct gm_pager_msg_rq)); + rsp = wait_at_station(ws); + if ((long)rsp == -ETIMEDOUT) + return -EBUSY; + ret |= rsp->ret; + kfree(rsp); + if (ret) { + if (ret == GM_RET_MIGRATING) { + pr_info("gmem: race with migrating\n"); + return ret; + } else { + pr_info("send alloc page message failed %d\n", ret); + return GM_RET_FAILURE_UNKNOWN; + } + } + + return GM_RET_SUCCESS; +} + +gm_ret_t gmem_unmap(struct gm_fault_t *gmf) +{ + int ret; + struct wait_station *ws; + struct comm_msg_rsp *rsp; + struct mm_struct *mm = gmf->mm; + struct svm_proc *proc = search_svm_proc_by_mm(mm); + struct gm_pager_msg_rq req = { + .peer_pid = proc->peer_pid, + .va = gmf->va, + .size = gmf->size, + }; + + if (!proc) { + pr_err("can not find proc\n"); + return -EBUSY; + } + + if (gmf->copy) { + req.flags |= GMEM_COPY_PAGE; + req.dma_addr = gmf->dma_addr; + } + + ws = get_wait_station(); + req.my_ws = ws->id; + + ret = msg_send_nid(GMEM_FREE_PAGE_REQUEST, proc->nid, proc->peer_nid, + &req, sizeof(struct gm_pager_msg_rq)); + rsp = wait_at_station(ws); + if ((long)rsp == -ETIMEDOUT) + return -EBUSY; + ret |= rsp->ret; + kfree(rsp); + if (ret) { + pr_info("send free page message failed %d\n", ret); + return GM_RET_FAILURE_UNKNOWN; + } + + return GM_RET_SUCCESS; +} + +gm_ret_t gmem_alloc(struct gm_fault_t *gmf) +{ + int ret = 0; + struct wait_station *ws; + struct comm_msg_rsp *rsp; + struct mm_struct *mm = gmf->mm; + struct svm_proc *proc = search_svm_proc_by_mm(mm); + struct gm_pager_msg_rq req = { + .peer_pid = proc->peer_pid, + .va = gmf->va, + .size = gmf->size, + .prot = gmf->prot, + }; + + if (!proc) { + pr_err("can not find proc\n"); + return -EBUSY; + } + + ws = get_wait_station(); + req.my_ws = ws->id; + ret = msg_send_nid(GMEM_ALLOC_VMA_REQUEST, proc->nid, proc->peer_nid, + &req, sizeof(struct gm_pager_msg_rq)); + rsp = wait_at_station(ws); + if ((long)rsp == -ETIMEDOUT) + return -EBUSY; + ret |= rsp->ret; + kfree(rsp); + if (ret) { + pr_info("send alloc vma message failed %d\n", ret); + return GM_RET_NOMEM; + } + + return GM_RET_SUCCESS; +} + +gm_ret_t gmem_free(struct gm_fault_t *gmf) +{ + int ret = 0; + struct wait_station *ws; + struct comm_msg_rsp *rsp; + struct mm_struct *mm = gmf->mm; + struct svm_proc *proc = search_svm_proc_by_mm(mm); + struct gm_pager_msg_rq req = { + .peer_pid = proc->peer_pid, + .va = gmf->va, + .size = gmf->size, + }; + + if (!proc) { + pr_err("can not find proc\n"); + return -EBUSY; + } + + ws = get_wait_station(); + req.my_ws = ws->id; + ret = msg_send_nid(GMEM_FREE_VMA_REQUEST, proc->nid, proc->peer_nid, + &req, sizeof(struct gm_pager_msg_rq)); + rsp = wait_at_station(ws); + if ((long)rsp == -ETIMEDOUT) + return -EBUSY; + ret |= rsp->ret; + kfree(rsp); + if (ret) { + pr_info("send free vma message failed %d\n", ret); + return GM_RET_FAILURE_UNKNOWN; + } + + return GM_RET_SUCCESS; +} + +int gmem_handle_evict_page(struct rpg_kmsg_message *msg) +{ + struct gm_evict_page_msg_rq *recv = (struct gm_evict_page_msg_rq *)msg; + unsigned int nid = recv->header.to_nid; + unsigned int peer_nid = recv->header.from_nid; + unsigned int peer_ws = recv->ws; + unsigned int pid = recv->peer_pid; + unsigned long size = recv->size; + unsigned long addr = recv->va; + struct vm_area_struct *vma; + struct page *page; + dma_addr_t dma_addr; + gm_mapping_t *gm_page; + struct device *dma_dev; + struct gm_fault_t gmf; + struct svm_proc *proc; + struct task_struct *tsk; + struct mm_struct *mm; + int ret; + struct folio *folio = NULL; + + proc = search_svm_proc_by_pid(pid); + if (!proc) { + pr_err("can not find svm_proc of task-%d\n", pid); + ret = -EINVAL; + goto response; + } + + tsk = find_get_task_by_vpid(pid); + if (!tsk) { + pr_err("can not find task of task-%d\n", pid); + ret = -EINVAL; + goto response; + } + + mm = get_task_mm(tsk); + if (!mm) { + pr_err("task-%d exited\n", pid); + ret = -EINTR; + goto put_task; + } + + if (mm != proc->mm) { + pr_err("miss match\n"); + ret = -EINTR; + goto put_mm; + } + + gmf.mm = mm; + gmf.va = addr; + gmf.size = size; + gmf.copy = GMEM_COPY_PAGE; + + vma = find_vma(mm, addr); + if (!vma || !vma->vm_obj) { + pr_err("evict addr %lx vma %lx vm_obj %lx, no vma or vm_obj\n", addr, + (unsigned long)vma, vma ? (unsigned long)vma->vm_obj : 0); + ret = -EINVAL; + goto put_mm; + } + + gm_page = vm_object_lookup(vma->vm_obj, addr); + if (!gm_page) { + pr_err("evictim gm_page is NULL\n"); + ret = -EINVAL; + goto put_mm; + } + + mutex_lock(&gm_page->lock); + if (gm_mapping_willneed(gm_page)) { + pr_info("gmem: racing with prefetch or willneed so cancel evict\n"); + clear_gm_mapping_willneed(gm_page); + ret = -EINVAL; + goto unlock; + } + + if (!gm_mapping_device(gm_page)) { + pr_info("gmem: page is not in device\n"); + ret = -EINVAL; + goto unlock; + } + + if (size == HPAGE_PMD_SIZE) { + folio = vma_alloc_folio(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma, addr, true); + page = &folio->page; + } else { + page = alloc_page(GFP_KERNEL); + } + + if (!page) { + pr_err("gmem: gmem_evict_page alloc hugepage failed\n"); + ret = -ENOMEM; + goto unlock; + } + + dma_dev = gm_page->dev->dma_dev; + dma_addr = dma_map_page(dma_dev, page, 0, size, DMA_BIDIRECTIONAL); + gmf.dev = gm_page->dev; + gmf.dma_addr = dma_addr; + + ret = gmem_unmap(&gmf); + dma_unmap_page(dma_dev, dma_addr, size, DMA_BIDIRECTIONAL); + if (ret) { + pr_err("gmem_unmap failed, ret %d\n", ret); + put_page(page); + goto unlock; + } + + set_gm_mapping_host(gm_page, page); + +unlock: + mutex_unlock(&gm_page->lock); +put_mm: + mmput(mm); +put_task: + put_task_struct(tsk); +response: + gmem_send_comm_msg_reply(nid, peer_nid, peer_ws, ret); + kfree(msg); + return ret; +} + +gm_ret_t gmem_create(gm_dev_t *dev, void **pmap) +{ + return GM_RET_SUCCESS; +} + +gm_mmu_t gm_mmu = { + .peer_va_alloc_fixed = gmem_alloc, + .pmap_create = gmem_create, + .peer_va_free = gmem_free, + .peer_map = gmem_map, + .peer_unmap = gmem_unmap, +}; + +#define ASCEND910_HBM_START 0x0000000800000000 +#define ASCEND910_HBM_END 0x0000000fffffffff + +gm_ret_t mmu_dev_create(struct device *dev, int devid) +{ + gm_ret_t ret; + + ret = gm_dev_create(&gm_mmu, NULL, GM_DEV_CAP_REPLAYABLE | GM_DEV_CAP_PEER, &dev->gm_dev); + if (ret != GM_RET_SUCCESS) { + pr_err("NPU gmem device create failed\n"); + return ret; + } + + ret = gm_dev_register_physmem(dev->gm_dev, ASCEND910_HBM_START, ASCEND910_HBM_END); + if (ret != GM_RET_SUCCESS) { + pr_err("NPU gmem device register physical memory failed\n"); + goto free_gm_dev; + } + + dev->gm_dev->dma_dev = dev; + gm_devs[devid] = dev->gm_dev; + + pr_info("Create NPU gmem device and register HBM\n"); + return ret; +free_gm_dev: + gm_dev_destroy(dev->gm_dev); + dev->gm_dev = NULL; + return ret; +} +EXPORT_SYMBOL(mmu_dev_create); + +gm_ret_t mmu_as_attach(struct device *dev) +{ + gm_ret_t ret; + gm_dev_t *gm_dev = dev->gm_dev; + gm_context_t *gm_ctx; + + if (!gm_dev) { + pr_err("NPU device gm_dev is NULL\n"); + return GM_RET_FAILURE_UNKNOWN; + } + + if (!current->mm->gm_as) { + ret = gm_as_create(0, ULONG_MAX, GM_AS_ALLOC_DEFAULT, NPU_PAGE_SIZE, + ¤t->mm->gm_as); + if (ret != GM_RET_SUCCESS) { + pr_err("Process %d create gm_as failed\n", current->pid); + return ret; + } + } + + ret = gm_as_attach(current->mm->gm_as, gm_dev, 0, 1, &gm_ctx); + if (ret != GM_RET_SUCCESS) { + pr_err("gm_dev attach to process %d failed\n", current->pid); + return ret; + } + + pr_info("Attach gm_dev to process %d\n", current->pid); + return ret; +} +EXPORT_SYMBOL(mmu_as_attach); diff --git a/drivers/remote_pager/msg_handler_peer.c b/drivers/remote_pager/msg_handler_peer.c new file mode 100644 index 000000000000..9912f89a43c6 --- /dev/null +++ b/drivers/remote_pager/msg_handler_peer.c @@ -0,0 +1,731 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generalized Memory Management. + * + * Copyright (C) 2023- Huawei, Inc. + * Author: Chunsheng Luo + * Co-Author: Weixi Zhu, Jun Chen, Jiangtian Feng + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "msg_handler.h" +#include "svm_proc_mng.h" +#include "swap/device/swap_manager.h" + +#define GM_READ 0x00000001 +#define GM_WRITE 0x00000002 +#define GM_EXEC 0x00000004 + +#define MAX_RETRY_TIME 10 + +#ifndef WITH_GMEM +enum gm_ret { + GM_RET_SUCCESS = 0, + GM_RET_NOMEM, + GM_RET_PAGE_EXIST, + GM_RET_DMA_ERROR, + GM_RET_MIGRATING, + GM_RET_FAILURE_UNKNOWN, + GM_RET_UNIMPLEMENTED, +}; +#endif + +static inline vm_fault_t get_page_size(enum page_entry_size pe_size, + unsigned int *page_size, + unsigned long *addr) +{ + switch (pe_size) { + case PE_SIZE_PTE: + *page_size = PAGE_SIZE; + break; + case PE_SIZE_PMD: + *page_size = HPAGE_SIZE; + *addr = round_down(*addr, HPAGE_SIZE); + break; + default: + return VM_FAULT_FALLBACK; + } + return 0; +} + +static inline bool addr_is_mapped(unsigned long addr, pmd_t *pmd, + enum page_entry_size pe_size) +{ + pte_t *pte; + bool ret; + + if (pe_size == PE_SIZE_PMD) + return !pmd_none(*pmd); + if (pmd_none(*pmd)) + return false; + pte = pte_offset_map(pmd, addr); + ret = !pte_none(*pte); + pte_unmap(pte); + return ret; +} + +static vm_fault_t __gmem_fault(struct vm_fault *vmf, + enum page_entry_size pe_size) +{ + vm_fault_t ret = VM_FAULT_SIGBUS; + int msg_ret = GM_RET_FAILURE_UNKNOWN; + unsigned long addr = vmf->address; + unsigned int page_size; + struct gm_pager_msg_rq req = { 0 }; + struct comm_msg_rsp *rsp; + struct wait_station *ws; + struct page_info *page_info; + struct mm_struct *mm; + struct svm_proc *proc; + + ret = get_page_size(pe_size, &page_size, &addr); + if (ret) + return ret; + + mm = vmf->vma->vm_mm; + proc = search_svm_proc_by_mm(mm); + if (!proc) { + pr_err("%s: failed to get svm proc\n", __func__); + return VM_FAULT_SIGBUS; + } + + page_info = get_page_info(&proc->pager, addr, page_size, page_size); + if (!page_info) { + pr_err("%s: failed to get page_info\n", __func__); + return VM_FAULT_SIGBUS; + } + mutex_lock(&page_info->lock); + + if (addr_is_mapped(addr, vmf->pmd, pe_size)) + goto unlock; + + req.va = addr; + req.size = page_size; + + /* start fault */ + ws = get_wait_station(); + req.my_ws = ws->id; + req.peer_pid = proc->peer_pid; + + ret = msg_send_nid(GMEM_PAGE_FAULT_REQUEST, proc->nid, proc->peer_nid, + &req, sizeof(req)); + rsp = wait_at_station(ws); + if ((long)rsp != -ETIMEDOUT) { + msg_ret = rsp->ret; + kfree(rsp); + } + if (msg_ret == GM_RET_PAGE_EXIST) { + pr_warn("gmem: weird page exist\n"); + } else if (msg_ret != GM_RET_SUCCESS) { + ret = VM_FAULT_SIGBUS; + goto unlock; + } + + ret = VM_FAULT_NOPAGE; + +unlock: + mutex_unlock(&page_info->lock); + return ret; +} + +static vm_fault_t gmem_fault(struct vm_fault *vmf) +{ + return __gmem_fault(vmf, PE_SIZE_PTE); +} + +static vm_fault_t gmem_huge_fault(struct vm_fault *vmf, + enum page_entry_size pe_size) +{ + int ret = 0; + + ret = __gmem_fault(vmf, pe_size); + + return ret; +} + +static const struct vm_operations_struct gmem_vma_ops = { + .fault = gmem_fault, + .huge_fault = gmem_huge_fault, +}; + +int gmem_handle_task_pairing(struct rpg_kmsg_message *msg) +{ + struct gm_pair_msg_rq *recv = (struct gm_pair_msg_rq *)msg; + unsigned int peer_nid = recv->header.from_nid; + unsigned int peer_pid = recv->my_pid; + unsigned int peer_ws = recv->my_ws; + unsigned int my_nid = recv->peer_nid; + unsigned int my_pid = recv->peer_pid; + int ret = 0; + + gmem_add_to_svm_proc(my_nid, my_pid, peer_nid, peer_pid); + gmem_send_comm_msg_reply(my_nid, peer_nid, peer_ws, ret); + kfree(msg); + return 0; +} + +#define VM_PEER_SHARED BIT(56) + +int vma_is_gmem(struct vm_area_struct *vma) +{ + return (vma->vm_flags & VM_PEER_SHARED) != 0; +} + +int gmem_handle_alloc_vma_fixed(struct rpg_kmsg_message *msg) +{ + struct gm_pager_msg_rq *data = (struct gm_pager_msg_rq *)msg; + unsigned long va = data->va; + unsigned long size = data->size; + unsigned long gmem_prot = data->prot; + unsigned int my_pid = data->peer_pid; + unsigned int peer_nid = data->header.from_nid; + unsigned int nid = data->header.to_nid; + unsigned int peer_ws = data->my_ws; + unsigned long prot = 0; + unsigned long populate; + struct task_struct *tsk; + struct mm_struct *mm; + unsigned long addr; + struct vm_area_struct *vma; + int ret = GM_RET_SUCCESS; + + if (gmem_prot & GM_READ) + prot |= PROT_READ; + if (gmem_prot & GM_WRITE) + prot |= PROT_WRITE; + if (gmem_prot & GM_EXEC) + prot |= PROT_EXEC; + + tsk = find_get_task_by_vpid(my_pid); + if (!tsk) { + pr_err("svm process does not have task_struct\n"); + ret = GM_RET_FAILURE_UNKNOWN; + goto out; + } + + mm = get_task_mm(tsk); + if (!mm) { + pr_err("no mm\n"); + ret = -1; + goto put_task; + } + + mmap_write_lock(mm); + current->mm = mm; + addr = __do_mmap_mm(mm, NULL, va, size, prot, + MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, 0, + 0, &populate, NULL); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto unlock; + } + + vma = find_vma(mm, addr); + if (!vma) { + ret = GM_RET_FAILURE_UNKNOWN; + goto unlock; + } + + vma->vm_ops = &gmem_vma_ops; + vma->vm_flags |= VM_HUGEPAGE; + vma->vm_flags |= VM_PEER_SHARED; + +unlock: + current->mm = NULL; + mmap_write_unlock(mm); + mmput(mm); +put_task: + put_task_struct(tsk); +out: + pr_info("%s va %lx vma message %d\n", __func__, va, ret); + gmem_send_comm_msg_reply(nid, peer_nid, peer_ws, ret); + kfree(msg); + return ret; +} + +int gmem_handle_free_vma(struct rpg_kmsg_message *msg) +{ + struct gm_pager_msg_rq *recv = (struct gm_pager_msg_rq *)msg; + unsigned long va = recv->va; + unsigned long size = recv->size; + unsigned int my_pid = recv->peer_pid; + unsigned int nid = recv->header.to_nid; + unsigned int peer_nid = recv->header.from_nid; + unsigned int peer_ws = recv->my_ws; + struct task_struct *tsk; + struct mm_struct *mm; + + int ret = 0; + + tsk = find_get_task_by_vpid(my_pid); + if (!tsk) { + pr_err("svm process does not have task_struct\n"); + ret = GM_RET_FAILURE_UNKNOWN; + goto out; + } + + mm = get_task_mm(tsk); + if (!mm) { + pr_err("no mm\n"); + ret = -1; + goto put_task; + } + + mmap_write_lock(mm); + ret = __do_munmap(mm, va, size, NULL, false); + mmap_write_unlock(mm); + + if (ret < 0) + ret = GM_RET_FAILURE_UNKNOWN; + else + ret = GM_RET_SUCCESS; + + mmput(mm); +put_task: + put_task_struct(tsk); +out: + gmem_send_comm_msg_reply(nid, peer_nid, peer_ws, ret); + kfree(msg); + return ret; +} + +pmd_t *get_huge_pmd(const struct vm_area_struct *vma, u64 va) +{ + pgd_t *pgd = NULL; + p4d_t *p4d = NULL; + pud_t *pud = NULL; + pmd_t *pmd = NULL; + + if ((vma == NULL) || (vma->vm_mm == NULL)) { + pr_err("Vm_mm none. (va=0x%llx)\n", va); + return NULL; + } + /* too much log, not print */ + pgd = pgd_offset(vma->vm_mm, va); + if (PXD_JUDGE(pgd)) + return NULL; + + p4d = p4d_offset(pgd, va); + if (PXD_JUDGE(p4d) != 0) + return NULL; + + /* if kernel version is above 4.11.0,then 5 level pt arrived. + pud_offset(pgd,va) changed to pud_offset(p4d,va) for x86 + but not changed in arm64 */ + pud = pud_offset(p4d, va); + if (PXD_JUDGE(pud) != 0) + return NULL; + + pmd = pmd_offset(pud, va); + return pmd; +} + +static inline struct page *alloc_transhuge_page_node(int nid, int zero) +{ + struct page *page; + gfp_t gfp_mask = GFP_TRANSHUGE | __GFP_THISNODE | __GFP_NOWARN; + + if (zero) + gfp_mask |= __GFP_ZERO; + + page = alloc_pages_node(nid, gfp_mask, HPAGE_PMD_ORDER); + if (!page) + return NULL; + + INIT_LIST_HEAD(&page->lru); + INIT_LIST_HEAD(page_deferred_list(page)); + set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); + + return page; +} + +int gmem_hugepage_remap_owner(struct svm_proc *svm_proc, u64 addr, + pgprot_t prot, struct page *hpage) +{ + int ret; + + ret = hugetlb_insert_hugepage_pte(svm_proc->mm, addr, prot, hpage); + if (ret != 0) { + pr_err("insert_hugepage owner fail. (va=0x%llx)\n", addr); + return ret; + } + + return 0; +} + +int gmem_hugepage_remap_local(struct svm_proc *svm_proc, u64 addr, + pgprot_t prot, struct page *hpage) +{ + int ret = 0; + struct local_pair_proc *item = NULL; + struct local_pair_proc *next = NULL; + + list_for_each_entry_safe(item, next, &svm_proc->tasks_list, node) { + ret = hugetlb_insert_hugepage_pte(item->mm, addr, prot, hpage); + if (ret != 0) { + pr_err("insert_hugepage local fail. (va=0x%llx)\n", addr); + return ret; + } + } + + return 0; +} + + +int gmem_hugepage_remap(struct svm_proc *svm_proc, u64 addr, pgprot_t prot, + struct page *hpage) +{ + int ret; + + ret = gmem_hugepage_remap_owner(svm_proc, addr, prot, hpage); + if (ret != 0) { + pr_err("gmem_hugepage_remap_owner fail. (va=0x%llx)\n", addr); + return ret; + } + + ret = gmem_hugepage_remap_local(svm_proc, addr, prot, hpage); + if (ret != 0) { + pr_err("gmem_hugepage_remap_local fail. (va=0x%llx)\n", addr); + return ret; + } + + return 0; +} + +int gmem_handle_alloc_page(struct rpg_kmsg_message *msg) +{ + struct gm_pager_msg_rq *recv = (struct gm_pager_msg_rq *)msg; + unsigned long addr = recv->va; + unsigned int page_size = recv->size; + unsigned int my_pid = recv->peer_pid; + unsigned int peer_ws = recv->my_ws; + int nid = recv->header.to_nid; + int peer_nid = recv->header.from_nid; + struct page_info *page_info; + struct svm_proc *proc = search_svm_proc_by_pid(my_pid); + struct page *page; + unsigned long long prot_val; + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct *vma; + struct page *pgtable; + pmd_t *pmd; + spinlock_t *ptl; + int n_retry = 0; + int ret = 0; + + if (!proc) { + pr_info("can not find proc of %d\n", my_pid); + ret = -EINVAL; + goto out; + } + + page_info = get_page_info(&proc->pager, addr, page_size, page_size); + if (!page_info) { + pr_err("%s: failed to get page_info\n", __func__); + ret = -EINVAL; + goto out; + } + + if (recv->behavior == MADV_WILLNEED) { + if (!page_info->page) + goto new_page; + + ret = update_page(page_info->page); + if (ret) + pr_err("update_page failed, error: %d\n", ret); + + goto out; + } + +new_page: + /* TODO: How Can Know HBM node */ + page = alloc_transhuge_page_node(1, !recv->dma_addr); + if (!page) { + do_swap(); + if (n_retry++ < MAX_RETRY_TIME) { + goto new_page; + } else { + ret = -ENOMEM; + goto out; + } + } + + /* We need a condition */ + if (need_wake_up_swapd()) + wake_up_swapd(); + + if (recv->dma_addr) { + handle_migrate_page((void *)recv->dma_addr, page, page_size, + FORM_PEER); + } + + tsk = find_get_task_by_vpid(my_pid); + if (!tsk) { + pr_err("svm process does not have task_struct\n"); + ret = GM_RET_FAILURE_UNKNOWN; + goto out; + } + + mm = get_task_mm(tsk); + if (!mm) { + pr_err("no mm\n"); + ret = -1; + goto put_task; + } + + vma = find_vma(mm, addr); + if (vma->vm_flags & VM_WRITE) { + prot_val = (pgprot_val(PAGE_SHARED_EXEC) & (~PTE_RDONLY)) | + PTE_DIRTY; + } else { + prot_val = pgprot_val(PAGE_READONLY_EXEC); + } + + /* TODO: 9 Consider multiple processes bind */ + ret = gmem_hugepage_remap(proc, addr, __pgprot(prot_val), page); + if (ret) + goto put_mm; + + vma = find_vma(mm, addr); + if (!vma->anon_vma) + __anon_vma_prepare_symbol(vma); + __page_set_anon_rmap_symbol(page, vma, addr, 1); + add_swap_page(page); + + pmd = get_huge_pmd(vma, addr); + pgtable = alloc_pages(GFP_KERNEL | ___GFP_ZERO, 0); + ptl = pmd_lock(vma->vm_mm, pmd); + pgtable_trans_huge_deposit_symbol(vma->vm_mm, pmd, pgtable); + spin_unlock(ptl); + page_info->page = page; + +put_mm: + mmput(mm); +put_task: + put_task_struct(tsk); +out: + gmem_send_comm_msg_reply(nid, peer_nid, peer_ws, ret); + kfree(msg); + return ret; +} + +static inline void zap_clear_pmd(struct vm_area_struct *vma, u64 vaddr, + pmd_t *pmd) +{ + pmd_clear(pmd); + flush_tlb_range(vma, vaddr, vaddr + HPAGE_SIZE); +} + +void zap_vma_pmd(struct vm_area_struct *vma, u64 vaddr) +{ + pmd_t *pmd = NULL; + + pmd = get_huge_pmd(vma, vaddr); + + if (pmd == NULL) + return; + + zap_clear_pmd(vma, vaddr, pmd); +} + +void gmem_hugepage_unmap_local(struct svm_proc *svm_proc, u64 addr) +{ + struct local_pair_proc *item = NULL; + struct local_pair_proc *next = NULL; + struct vm_area_struct *vma; + + list_for_each_entry_safe(item, next, &svm_proc->tasks_list, node) { + vma = find_vma(item->mm, addr); + if (!vma) + zap_vma_pmd(vma, addr); + } +} + +void gmem_unmap_hugepage(struct svm_proc *svm_proc, u64 addr) +{ + struct vm_area_struct *vma; + + vma = find_vma(svm_proc->mm, addr); + + if (!vma) + zap_vma_pmd(vma, addr); + + gmem_hugepage_unmap_local(svm_proc, addr); +} + +int gmem_handle_free_page(struct rpg_kmsg_message *msg) +{ + struct gm_pager_msg_rq *recv = (struct gm_pager_msg_rq *)msg; + unsigned long addr = recv->va; + unsigned long page_size = recv->size; + unsigned int my_pid = recv->peer_pid; + unsigned int peer_ws = recv->my_ws; + int peer_nid = recv->header.from_nid; + int nid = recv->header.to_nid; + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct *vma; + pmd_t *pmd; + struct page_info *page_info; + struct svm_proc *proc = search_svm_proc_by_pid(my_pid); + struct page *page = NULL; + struct page *pgtable; + spinlock_t *ptl; + int ret = 0; + + if (!proc) { + pr_info("can not find proc of %d\n", my_pid); + ret = -EINVAL; + goto out; + } + + page_info = get_page_info(&proc->pager, addr, page_size, page_size); + if (!page_info) { + pr_err("%s: failed to get page_info\n", __func__); + ret = -EINVAL; + goto out; + } + + page = page_info->page; + if (!page) { + pr_err("%s: page reference in page_info is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + tsk = find_get_task_by_vpid(my_pid); + if (!tsk) { + pr_err("svm process does not have task_struct\n"); + ret = GM_RET_FAILURE_UNKNOWN; + goto out; + } + + mm = get_task_mm(tsk); + if (!mm) { + pr_err("no mm\n"); + ret = -1; + goto put_task; + } + + vma = find_vma(mm, addr); + pmd = get_huge_pmd(vma, addr); + ptl = pmd_lock(vma->vm_mm, pmd); + pgtable = pgtable_trans_huge_withdraw_symbol(proc->mm, pmd); + spin_unlock(ptl); + pte_free(mm, pgtable); + /* mm should be freed at first */ + smp_rmb(); + + del_swap_page(page); + zap_clear_pmd(vma, addr, pmd); + + mmput(mm); + + if (recv->dma_addr) + handle_migrate_page((void *)recv->dma_addr, page, page_size, + TO_PEER); + + free_page_info(&proc->pager, page_info); + put_page(page); + +put_task: + put_task_struct(tsk); +out: + gmem_send_comm_msg_reply(nid, peer_nid, peer_ws, ret); + kfree(msg); + return ret; +} + +int gmem_handle_hmadvise(struct rpg_kmsg_message *msg) +{ + kfree(msg); + return 0; +} + +int gmem_handle_hmemcpy(struct rpg_kmsg_message *msg) +{ + kfree(msg); + return 0; +} + +static int sync_gmem_vma_to_custom_process(struct svm_proc *svm_proc, + struct local_pair_proc *local_proc) +{ + struct mm_struct *mm = svm_proc->mm; + struct vm_area_struct *vma, *local_vma; + unsigned long populate; + struct mm_struct *old_mm = current->mm; + unsigned long addr; + unsigned long prot = PROT_READ; + + + mmap_write_lock(mm); + for (vma = mm->mmap; vma; vma = vma->vm_next) { + if (!vma_is_gmem(vma)) + continue; + current->mm = local_proc->mm; + pr_err("%s cur %lx local %lx start %lx -- end %lx\n", __func__, + (unsigned long)current->mm, + (unsigned long)local_proc->mm, vma->vm_start, + vma->vm_end); + prot = PROT_READ; + if (vma->vm_flags & VM_WRITE) + prot |= PROT_WRITE; + addr = __do_mmap_mm(local_proc->mm, NULL, vma->vm_start, + vma->vm_end - vma->vm_start, prot, + MAP_SHARED | MAP_ANONYMOUS | + MAP_FIXED_NOREPLACE, 0, + 0, &populate, NULL); + if (IS_ERR_VALUE(addr)) { + pr_err("%s failed start %lx - end %lx ret %ld\n", + __func__, vma->vm_start, vma->vm_end, addr); + continue; + } + local_vma = find_vma(local_proc->mm, addr); + if (!local_vma) { + local_vma->vm_ops = vma->vm_ops; + local_vma->vm_flags |= VM_HUGEPAGE; + } + } + mmap_write_unlock(mm); + current->mm = old_mm; + + return 0; +} + +int gmem_register_pair_local_task(unsigned int bind_to_pid, + unsigned int local_pid) +{ + int ret = 0; + struct svm_proc *proc = search_svm_proc_by_pid(bind_to_pid); + struct local_pair_proc *local_proc; + + pr_debug("%s bind_to_pid %d local_pid %d\n", __func__, bind_to_pid, + local_pid); + + local_proc = insert_local_proc(proc, local_pid); + if (IS_ERR(local_proc)) { + pr_err("%s failed\n", __func__); + return PTR_ERR(local_proc); + } + + /* sync vma and vma_ops to local_pid */ + sync_gmem_vma_to_custom_process(proc, local_proc); + + return ret; +} +EXPORT_SYMBOL(gmem_register_pair_local_task); diff --git a/drivers/remote_pager/svm_proc_mng.c b/drivers/remote_pager/svm_proc_mng.c new file mode 100644 index 000000000000..4829818fd060 --- /dev/null +++ b/drivers/remote_pager/svm_proc_mng.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generalized Memory Management. + * + * Copyright (C) 2023- Huawei, Inc. + * Author: Jiangtian Feng + * Co-Author: Jun Chen, Chuangchuang Fang + * + */ + +#include +#include +#include +#include +#include + +#include "svm_proc_mng.h" + +struct svm_proc_node { + struct svm_proc svm_proc; + struct hlist_node list; +}; + +static inline struct svm_proc_node *to_proc_node(struct svm_proc *proc) +{ + return list_entry(proc, struct svm_proc_node, svm_proc); +} + +#define _PROC_LIST_MAX 0x0f +#define _PROC_LIST_SHIFT 4 +static DEFINE_RWLOCK(svm_proc_hash_rwlock); +static DEFINE_HASHTABLE(svm_proc_hashtable, _PROC_LIST_SHIFT); + +static unsigned int get_hash_tag(int pid) +{ + return (unsigned int)pid % _PROC_LIST_MAX; +} + +static void add_to_hashtable(struct svm_proc *proc) +{ + struct svm_proc_node *node = to_proc_node(proc); + unsigned int tag = get_hash_tag(proc->pid); + + write_lock(&svm_proc_hash_rwlock); + hash_add(svm_proc_hashtable, &node->list, tag); + write_unlock(&svm_proc_hash_rwlock); +} + +static void del_from_hashtable(struct svm_proc *proc) +{ + struct svm_proc_node *node; + + write_lock(&svm_proc_hash_rwlock); + node = to_proc_node(proc); + hash_del(&node->list); + write_unlock(&svm_proc_hash_rwlock); +} + +struct svm_proc *search_svm_proc_by_mm(struct mm_struct *mm) +{ + struct svm_proc_node *node; + unsigned int tag; + + read_lock(&svm_proc_hash_rwlock); + hash_for_each(svm_proc_hashtable, tag, node, list) { + if (node->svm_proc.mm == mm) { + read_unlock(&svm_proc_hash_rwlock); + return &node->svm_proc; + } + } + read_unlock(&svm_proc_hash_rwlock); + + return search_svm_proc_by_local_mm(mm); +} + +struct svm_proc *search_svm_proc_by_local_mm(struct mm_struct *mm) +{ + struct svm_proc_node *node; + unsigned int hash_tag; + struct local_pair_proc *item = NULL; + struct local_pair_proc *next = NULL; + + read_lock(&svm_proc_hash_rwlock); + hash_for_each(svm_proc_hashtable, hash_tag, node, list) { + list_for_each_entry_safe(item, next, &node->svm_proc.tasks_list, node) { + if (item->mm == mm) { + read_unlock(&svm_proc_hash_rwlock); + return &node->svm_proc; + } + } + } + read_unlock(&svm_proc_hash_rwlock); + + return NULL; +} + +struct svm_proc *search_svm_proc_by_pid(unsigned int pid) +{ + struct svm_proc_node *node; + unsigned int tag = get_hash_tag(pid); + + read_lock(&svm_proc_hash_rwlock); + hash_for_each_possible(svm_proc_hashtable, node, list, tag) { + if (node->svm_proc.pid == pid) { + read_unlock(&svm_proc_hash_rwlock); + return &node->svm_proc; + } + } + read_unlock(&svm_proc_hash_rwlock); + + return NULL; +} + +static struct page_info *__search_page_info(struct page_mng *pager, + unsigned long va, unsigned long len) +{ + struct rb_node *node = pager->rbtree.rb_node; + struct page_info *page_info = NULL; + + while (node) { + page_info = rb_entry(node, struct page_info, node); + + if (va + len <= page_info->va) + node = node->rb_left; + else if (va >= page_info->va + page_info->len) + node = node->rb_right; + else + break; + } + + if (page_info) { + if (va < page_info->va || va + len > page_info->va + page_info->len) + return NULL; + } + return page_info; +} + +struct page_info *search_page_info(struct page_mng *pager, unsigned long va, unsigned long len) +{ + struct page_info *page_info; + + if (!pager) + return NULL; + + down_read(&pager->rw_sem); + page_info = __search_page_info(pager, va, len); + up_read(&pager->rw_sem); + + return page_info; +} + +static int insert_page_info(struct page_mng *pager, struct page_info *page_info) +{ + struct rb_node **new_node; + struct rb_node *parent = NULL; + struct page_info *cur = NULL; + + down_write(&pager->rw_sem); + new_node = &(pager->rbtree.rb_node); + + /* Figure out where to put new node */ + while (*new_node) { + cur = rb_entry(*new_node, struct page_info, node); + parent = *new_node; + if (page_info->va + page_info->len <= cur->va) { + new_node = &((*new_node)->rb_left); + } else if (page_info->va >= cur->va + cur->len) { + new_node = &((*new_node)->rb_right); + } else { + up_write(&pager->rw_sem); + return -EFAULT; + } + } + /* Add new node and rebalance tree. */ + rb_link_node(&page_info->node, parent, new_node); + rb_insert_color(&page_info->node, &pager->rbtree); + + up_write(&pager->rw_sem); + + return 0; +} + +static void erase_page_info(struct page_mng *pager, struct page_info *page_info) +{ + rb_erase(&page_info->node, &pager->rbtree); +} + +static struct page_info *alloc_page_info(unsigned long va, unsigned long len, + unsigned int page_size) +{ + + struct page_info *page_info; + size_t size; + + size = sizeof(struct page_info); + page_info = kzalloc(size, GFP_KERNEL); + if (!page_info) { + pr_err("alloc page_info failed: (size=%lx)\n", (unsigned long)size); + return NULL; + } + + page_info->va = va; + page_info->len = len; + mutex_init(&page_info->lock); + + return page_info; +} + +struct page_info *get_page_info(struct page_mng *pager, + unsigned long va, unsigned long len, unsigned int page_size) +{ + struct page_info *page_info = search_page_info(pager, va, len); + + if (page_info) + return page_info; + + page_info = alloc_page_info(va, len, page_size); + if (page_info) { + if (insert_page_info(pager, page_info)) { + kfree(page_info); + page_info = search_page_info(pager, va, len); + } + } + + return page_info; +} + +void free_page_info(struct page_mng *pager, struct page_info *page_info) +{ + down_write(&pager->rw_sem); + erase_page_info(pager, page_info); + up_write(&pager->rw_sem); + kfree(page_info); +} + +static void free_pager(struct page_mng *pager) +{ + struct page_info *page_info = NULL; + struct rb_node *node = NULL; + + down_write(&pager->rw_sem); + node = rb_first(&pager->rbtree); + while (node) { + page_info = rb_entry(node, struct page_info, node); + node = rb_next(node); + erase_page_info(pager, page_info); + kfree(page_info); + } + up_write(&pager->rw_sem); +} + +static void free_svm_proc(struct svm_proc *proc) +{ + struct local_pair_proc *item = NULL; + struct local_pair_proc *next = NULL; + struct mm_struct *mm = proc->mm; + int count; + + free_pager(&proc->pager); + del_from_hashtable(proc); + + count = atomic_read(&mm->mm_users); + if (count) { + pr_err("mm_users is %d\n", count); + mmput(mm); + } + + if (!list_empty(&proc->tasks_list)) { + list_for_each_entry_safe(item, next, &proc->tasks_list, node) + list_del(&item->node); + } + pr_err("svm proc clean up done pid %d, peer_pid %d\n", proc->pid, proc->peer_pid); +} + +static void svm_proc_mm_release(struct mmu_notifier *subscription, struct mm_struct *mm) +{ + struct svm_proc *proc = container_of(subscription, struct svm_proc, notifier); + + free_svm_proc(proc); + kfree(proc); +} + +static const struct mmu_notifier_ops svm_proc_mmu_notifier_ops = { + .release = svm_proc_mm_release, +}; + +static int svm_proc_mmu_notifier_register(struct svm_proc *proc) +{ + proc->notifier.ops = &svm_proc_mmu_notifier_ops; + + return mmu_notifier_register(&proc->notifier, proc->mm); +} + +static void local_pair_proc_mm_release(struct mmu_notifier *subscription, struct mm_struct *mm) +{ + struct local_pair_proc *local_proc = + container_of(subscription, struct local_pair_proc, notifier); + + list_del(&local_proc->node); + kfree(local_proc); + pr_debug("clean pair proc resources\n"); +} + +static const struct mmu_notifier_ops local_pair_proc_mmu_notifier_ops = { + .release = local_pair_proc_mm_release, +}; + +static int local_pair_proc_mmu_notifier_register(struct local_pair_proc *local_proc) +{ + local_proc->notifier.ops = &local_pair_proc_mmu_notifier_ops; + + return mmu_notifier_register(&local_proc->notifier, local_proc->mm); +} + +struct local_pair_proc *insert_local_proc(struct svm_proc *proc, unsigned int pid) +{ + int ret = 0; + struct local_pair_proc *local_proc = kzalloc(sizeof(struct local_pair_proc), GFP_KERNEL); + + if (!local_proc) + return ERR_PTR(-ENOMEM); + + local_proc->tsk = find_get_task_by_vpid(pid); + if (!local_proc->tsk) { + pr_err("can not find process by pid %d\n", pid); + ret = -EINVAL; + goto free; + } + + local_proc->pid = pid; + local_proc->mm = get_task_mm(local_proc->tsk); + /* task is exiting */ + if (!local_proc->mm) { + pr_err("can not get process[%d] mm\n", pid); + ret = -EINTR; + goto put_task; + } + + ret = local_pair_proc_mmu_notifier_register(local_proc); + if (ret) { + pr_err("register mmu notifier failed\n"); + goto put_mm; + } + + mmput(local_proc->mm); + put_task_struct(local_proc->tsk); + + list_add(&local_proc->node, &proc->tasks_list); + pr_debug("%s bind_to_pid %d local_pid %d\n", __func__, proc->pid, local_proc->pid); + + return local_proc; + +put_mm: + mmput(local_proc->mm); +put_task: + put_task_struct(local_proc->tsk); +free: + kfree(local_proc); + return ERR_PTR(ret); +} + +struct svm_proc *alloc_svm_proc(int nid, int pid, int peer_nid, int peer_pid) +{ + struct svm_proc *proc; + int ret; + + proc = kzalloc(sizeof(struct svm_proc), GFP_KERNEL); + if (!proc) + return ERR_PTR(-ENOMEM); + + proc->pager.rbtree = RB_ROOT; + init_rwsem(&proc->pager.rw_sem); + + proc->pid = pid; + proc->nid = nid; + proc->peer_nid = peer_nid; + proc->peer_pid = peer_pid; + INIT_LIST_HEAD(&proc->tasks_list); + + proc->tsk = find_get_task_by_vpid(pid); + if (!proc->tsk) { + pr_err("can not find process by pid %d\n", pid); + ret = -EINVAL; + goto free; + } + + proc->mm = get_task_mm(proc->tsk); + /* task is exiting */ + if (!proc->mm) { + pr_err("can not get process[%d] mm\n", pid); + ret = -EINTR; + goto put_task; + } + + ret = svm_proc_mmu_notifier_register(proc); + if (ret) { + pr_err("register mmu notifier failed\n"); + goto put_mm; + } + + /* + * destroying svm_proc depends on mmu_notifier. + * we have to put mm to make sure mmu_notifier can be called + */ + mmput(proc->mm); + put_task_struct(proc->tsk); + + add_to_hashtable(proc); + + return proc; + +put_mm: + mmput(proc->mm); +put_task: + put_task_struct(proc->tsk); +free: + kfree(proc); + return ERR_PTR(ret); +} diff --git a/drivers/remote_pager/svm_proc_mng.h b/drivers/remote_pager/svm_proc_mng.h new file mode 100644 index 000000000000..98bd0e579bbe --- /dev/null +++ b/drivers/remote_pager/svm_proc_mng.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Generalized Memory Management. + * + * Copyright (C) 2023- Huawei, Inc. + * Author: Jiangtian Feng + * Co-Author: Jun Chen + */ + +#ifndef _REMOTE_PAGER_PROC_MNG_H_ +#define _REMOTE_PAGER_PROC_MNG_H_ + +#include +#include +#include +#include +#include + +struct page_info { + struct rb_node node; + unsigned long va; + unsigned long len; + struct mutex lock; + struct page *page; +}; + +struct page_mng { + struct rw_semaphore rw_sem; + struct rb_root rbtree; +}; + +struct local_pair_proc { + struct list_head node; + pid_t pid; + struct task_struct *tsk; + struct mm_struct *mm; + struct mmu_notifier notifier; +}; + +struct svm_proc { + int pid; + int nid; + int peer_pid; + int peer_nid; + struct mm_struct *mm; /* never dereference */ + struct task_struct *tsk; + struct list_head tasks_list; /* bind to svm_proc local tasks */ + struct mmu_notifier notifier; + + struct page_mng pager; +}; + +struct page_info *search_page_info(struct page_mng *pager, + unsigned long va, unsigned long len); +struct page_info *get_page_info(struct page_mng *pager, + unsigned long va, unsigned long len, unsigned int page_size); +void free_page_info(struct page_mng *pager, struct page_info *page_info); + +struct svm_proc *alloc_svm_proc(int nid, int pid, int peer_nid, int peer_pid); +struct svm_proc *search_svm_proc_by_mm(struct mm_struct *mm); +struct svm_proc *search_svm_proc_by_pid(unsigned int pid); +struct local_pair_proc *insert_local_proc(struct svm_proc *proc, + unsigned int local_pid); +struct svm_proc *search_svm_proc_by_local_mm(struct mm_struct *mm); + +#endif diff --git a/drivers/remote_pager/swap/device/ksymbol.c b/drivers/remote_pager/swap/device/ksymbol.c new file mode 100644 index 000000000000..9587cd760b27 --- /dev/null +++ b/drivers/remote_pager/swap/device/ksymbol.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generalized Memory Management. + * + * Copyright (c) 2023- Huawei, Inc. + * Author: Jun Chen + * Co-Author: Cunshu Ni + */ +#include + +#include "ksymbol.h" + +/* + * Get some symbol which are not exported by kernel + * rmap_walk_anon + * __anon_vma_prepare + * __page_set_anon_rmap + * pgtable_trans_huge_deposit + * pgtable_trans_huge_withdraw + */ + +rmap_walk_anon_symbol_t rmap_walk_anon_symbol; +__anon_vma_prepare_symbol_t __anon_vma_prepare_symbol; +__page_set_anon_rmap_symbol_t __page_set_anon_rmap_symbol; +pgtable_trans_huge_deposit_symbol_t pgtable_trans_huge_deposit_symbol; +pgtable_trans_huge_withdraw_symbol_t pgtable_trans_huge_withdraw_symbol; + +static unsigned long (*kallsyms_lookup_name_symbol)(const char *name); + +static int init_kallsyms_lookup_name(void) +{ + int ret; + + struct kprobe kprobe = { + .symbol_name = "kallsyms_lookup_name", + }; + + ret = register_kprobe(&kprobe); + if (ret) + return ret; + + kallsyms_lookup_name_symbol = (void *)kprobe.addr; + + unregister_kprobe(&kprobe); + + return 0; +} + +static void *__kallsyms_lookup_name(const char *name) +{ + void *symbol; + + symbol = (void *)kallsyms_lookup_name_symbol(name); + if (!symbol) + pr_err("Can not find symbol %s\n", name); + + return symbol; +} + +int kernel_symbol_init(void) +{ + int ret; + + ret = init_kallsyms_lookup_name(); + if (ret) { + pr_err("Can not find symbol kallsyms_lookup_name\n"); + return ret; + } + + rmap_walk_anon_symbol = __kallsyms_lookup_name("rmap_walk_anon"); + __anon_vma_prepare_symbol = __kallsyms_lookup_name("__anon_vma_prepare"); + __page_set_anon_rmap_symbol = __kallsyms_lookup_name("__page_set_anon_rmap"); + pgtable_trans_huge_deposit_symbol = __kallsyms_lookup_name("pgtable_trans_huge_deposit"); + pgtable_trans_huge_withdraw_symbol = __kallsyms_lookup_name("pgtable_trans_huge_withdraw"); + + ret = !rmap_walk_anon_symbol || + !__anon_vma_prepare_symbol || + !__page_set_anon_rmap_symbol || + !pgtable_trans_huge_deposit_symbol || + !pgtable_trans_huge_withdraw_symbol; + + return ret; +} diff --git a/drivers/remote_pager/swap/device/ksymbol.h b/drivers/remote_pager/swap/device/ksymbol.h new file mode 100644 index 000000000000..3ea8d1c488dc --- /dev/null +++ b/drivers/remote_pager/swap/device/ksymbol.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Generalized Memory Management. + * + * Copyright (c) 2023- Huawei, Inc. + * Author: Jun Chen + * Co-Author: Jiangtian Feng + */ +#ifndef __GMEM_KSYMBOL_H__ +#define __GMEM_KSYMBOL_H__ + +#include + +typedef int (*rmap_walk_anon_symbol_t)(struct page *page, + struct rmap_walk_control *rwc, bool locked); +extern rmap_walk_anon_symbol_t rmap_walk_anon_symbol; + +typedef int (*__anon_vma_prepare_symbol_t)(struct vm_area_struct *vma); +extern __anon_vma_prepare_symbol_t __anon_vma_prepare_symbol; + +typedef int (*__page_set_anon_rmap_symbol_t)(struct page *page, + struct vm_area_struct *vma, unsigned long address, int exclusive); +extern __page_set_anon_rmap_symbol_t __page_set_anon_rmap_symbol; + +typedef int (*pgtable_trans_huge_deposit_symbol_t)(struct mm_struct *mm, + pmd_t *pmdp, pgtable_t pgtable); +extern pgtable_trans_huge_deposit_symbol_t pgtable_trans_huge_deposit_symbol; + +typedef int (*pgtable_trans_huge_withdraw_symbol_t)(struct mm_struct *mm, + pmd_t *pmdp); +extern pgtable_trans_huge_withdraw_symbol_t pgtable_trans_huge_withdraw_symbol; + +int kernel_symbol_init(void); + +#endif diff --git a/drivers/remote_pager/swap/device/swap_manager.c b/drivers/remote_pager/swap/device/swap_manager.c new file mode 100644 index 000000000000..6a4429c07950 --- /dev/null +++ b/drivers/remote_pager/swap/device/swap_manager.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#include "../../msg_handler.h" +#include "../../svm_proc_mng.h" + +#include "ksymbol.h" +#include "swap_policy/swap_policy.h" + +extern struct swap_policy swap_policy_list_lru; + +static struct swap_manager { + struct swap_policy *policy; +} manager; + +int add_swap_page(struct page *page) +{ + if (manager.policy) + return manager.policy->add_page(page); + + return -ENODEV; +} + +int del_swap_page(struct page *page) +{ + if (manager.policy) + return manager.policy->del_page(page); + + return -ENODEV; +} + +int update_page(struct page *page) +{ + if (manager.policy) + return manager.policy->update_page(page); + + return -ENODEV; +} + +static int pick_victim_pages(struct list_head *page_list, int nid, unsigned long *nr) +{ + if (manager.policy) + return manager.policy->pick_victim_pages(page_list, nid, nr); + + return -ENODEV; +} + +static bool __do_swap_one_page(struct page *page, struct vm_area_struct *vma, + unsigned long addr, void *args) +{ + struct gm_evict_page_msg_rq req; + struct svm_proc *proc; + struct wait_station *ws; + struct comm_msg_rsp *rsp; + int ret = 0; + + proc = search_svm_proc_by_mm(vma->vm_mm); + if (!proc) { + pr_err("can not find proc of mm\n"); + return 0; /* return 0 if failed */ + } + + get_page(page); + + ws = get_wait_station(); + req.peer_pid = proc->peer_pid; + req.va = addr; + req.size = PageCompound(page) ? HPAGE_PMD_SIZE : PAGE_SIZE; + req.ws = ws->id; + ret = msg_send_nid(GMEM_EVICT_PAGE_REQUEST, proc->nid, proc->peer_nid, &req, sizeof(req)); + if (ret) { + pr_err("send GMEM_EVICT_PAGE_REQUEST failed\n"); + put_wait_station(ws); + goto out; + } + + rsp = wait_at_station(ws); + if (IS_ERR(rsp)) { + ret = PTR_ERR(rsp); + } else { + ret = rsp->ret; + kfree(rsp); + } + + if (ret) + pr_err("GMEM_EVICT_PAGE_REQUEST receive %d\n", ret); + +out: + put_page(page); + + return !ret; /* return 1 if success */ +} + +static int do_swap_one_page(struct page *page) +{ + struct rmap_walk_control rwc = { + .rmap_one = __do_swap_one_page, + }; + + rmap_walk_anon_symbol(page, &rwc, false); + + return 0; +} + +static int do_swap_pages(struct list_head *page_list) +{ + struct list_head *list = page_list; + struct page *page, *tmp; + + list_for_each_entry_safe(page, tmp, list, lru) + do_swap_one_page(page); + + return 0; +} + +static int swap_one_page_node(int nid) +{ + LIST_HEAD(evict_pages); + unsigned long nr_to_evict = 1; + + pick_victim_pages(&evict_pages, nid, &nr_to_evict); + + do_swap_pages(&evict_pages); + + return 0; +} + +#define HBM_WATERMARK_LOW 0x8000000 /* 128M */ +#define HBM_WATERMARK_HIG (2 * HBM_WATERMARK_LOW) + +static unsigned long zone_node_page_free(int node) +{ + struct zone *zones = NODE_DATA(node)->node_zones; + int i; + unsigned long count = 0; + + for (i = 0; i < MAX_NR_ZONES; i++) + count += zone_page_state(zones + i, NR_FREE_PAGES); + + return count * PAGE_SIZE; +} + +int need_wake_up_swapd_node(int nid) +{ + return zone_node_page_free(nid) < HBM_WATERMARK_LOW; +} + +int need_wake_up_swapd(void) +{ + int nid; + + for_each_node_state(nid, N_NORMAL_MEMORY) { + if (zone_node_page_free(nid) < HBM_WATERMARK_LOW) + return 1; + } + + return 0; +} + +int do_swap_node(int nid) +{ + swap_one_page_node(nid); + + return 0; +} + +int do_swap(void) +{ + int nid; + + for_each_node_state(nid, N_NORMAL_MEMORY) { + swap_one_page_node(nid); + } + + return 0; +} + +static int swapd_func(void *id) +{ + int nid = (unsigned long)id; + + while (!kthread_should_stop()) { + swap_one_page_node(nid); + if (zone_node_page_free(nid) > HBM_WATERMARK_HIG) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } else { + cond_resched(); + } + } + + return 0; +} + +static struct task_struct *swapd_task[MAX_NUMNODES]; + +static void init_swapd(void) +{ + unsigned long nid; + + for_each_online_node(nid) { + swapd_task[nid] = kthread_run(swapd_func, (void *)nid, "swapd"); + if (IS_ERR(swapd_task[nid])) + /* TODO: free task */ + swapd_task[nid] = NULL; + } +} + +static void wake_up_swapd_node(int nid) +{ + struct task_struct *tsk = swapd_task[nid]; + + if (likely(tsk)) + wake_up_process(tsk); +} + +void wake_up_swapd(void) +{ + unsigned long nid; + + for_each_online_node(nid) + wake_up_swapd_node(nid); +} + +int init_swap_manager(char *policy_name) +{ + int ret = 0; + + ret = kernel_symbol_init(); + if (ret) { + panic("Can not get all symbol\n"); + return ret; + } + + if (!policy_name) + return -EINVAL; + + if (!strncmp(policy_name, "list_lru", strlen("list_lru"))) + manager.policy = &swap_policy_list_lru; + + if (!manager.policy) + return -ENOENT; + + if (manager.policy->init) { + ret = manager.policy->init(); + if (ret) + return ret; + } + + init_swapd(); + + return 0; +} + diff --git a/drivers/remote_pager/swap/device/swap_manager.h b/drivers/remote_pager/swap/device/swap_manager.h new file mode 100644 index 000000000000..3e97b265154b --- /dev/null +++ b/drivers/remote_pager/swap/device/swap_manager.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Generalized Memory Management. + * + * Copyright (C) 2023- Huawei, Inc. + * Author: Chen Jun + * + */ +#ifndef _GMEM_SWAP_MANAGER_H_ +#define _GMEM_SWAP_MANAGER_H_ + +#include + +#include "ksymbol.h" + +int init_swap_manager(char *policy_name); + +int add_swap_page(struct page *page); +int del_swap_page(struct page *page); +int update_page(struct page *page); + +int need_wake_up_swapd_node(int nid); +int need_wake_up_swapd(void); +int do_swap_node(int nid); +int do_swap(void); +void wake_up_swapd(void); + +#endif /* _GMEM_SWAP_MANAGER_H_ */ diff --git a/drivers/remote_pager/swap/device/swap_policy/policy_list_lru.c b/drivers/remote_pager/swap/device/swap_policy/policy_list_lru.c new file mode 100644 index 000000000000..1bbe890969ed --- /dev/null +++ b/drivers/remote_pager/swap/device/swap_policy/policy_list_lru.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generalized Memory Management. + * + * Copyright (C) 2023- Huawei, Inc. + * Author: Chen Jun + * + */ +#include "swap_policy.h" + +#include +#include + +static struct list_lru swap_list_lru; + +static int add_page(struct page *page) +{ + struct list_lru *lru = &swap_list_lru; + int nid = page_to_nid(page); + struct list_head *item = &page->lru; + struct list_lru_node *nlru = &lru->node[nid]; + struct list_lru_one *l; + + spin_lock(&nlru->lock); + if (list_empty(item)) { + l = &nlru->lru; + list_move_tail(item, &l->list); + SetPageLRU(page); + spin_unlock(&nlru->lock); + return 0; + } + spin_unlock(&nlru->lock); + return -EINVAL; +} + +static int del_page(struct page *page) +{ + struct list_lru *lru = &swap_list_lru; + int nid = page_to_nid(page); + struct list_head *item = &page->lru; + struct list_lru_node *nlru = &lru->node[nid]; + struct list_lru_one *l; + + spin_lock(&nlru->lock); + if (PageLRU(page) && !list_empty(item)) { + l = &nlru->lru; + list_del_init(item); + __ClearPageLRU(page); + spin_unlock(&nlru->lock); + return 0; + } + spin_unlock(&nlru->lock); + return -EINVAL; +} + +static int update_page(struct page *page) +{ + struct list_lru *lru = &swap_list_lru; + int nid = page_to_nid(page); + struct list_head *item = &page->lru; + struct list_lru_node *nlru = &lru->node[nid]; + struct list_lru_one *l; + int page_isolate; + + spin_lock(&nlru->lock); + l = &nlru->lru; + list_move_tail(item, &l->list); + page_isolate = PageIsolated(page); + __ClearPageIsolated(page); + SetPageLRU(page); + spin_unlock(&nlru->lock); + + return 0; +} + +static enum lru_status lru_isolate_shrink(struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) +{ + struct list_head *victim_list = arg; + struct page *page = list_entry(item, struct page, lru); + + list_lru_isolate_move(lru, item, victim_list); + __ClearPageLRU(page); + __SetPageIsolated(page); + return LRU_REMOVED; +} + +static int pick_victim_pages(struct list_head *page_list, int nid, unsigned long *nr) +{ + list_lru_walk_node(&swap_list_lru, nid, lru_isolate_shrink, page_list, nr); + + return 0; +} + +static int init(void) +{ + list_lru_init(&swap_list_lru); + + return 0; +} + +struct swap_policy swap_policy_list_lru = { + .init = init, + .add_page = add_page, + .del_page = del_page, + .update_page = update_page, + .pick_victim_pages = pick_victim_pages, +}; diff --git a/drivers/remote_pager/swap/device/swap_policy/swap_policy.h b/drivers/remote_pager/swap/device/swap_policy/swap_policy.h new file mode 100644 index 000000000000..1113096e8548 --- /dev/null +++ b/drivers/remote_pager/swap/device/swap_policy/swap_policy.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _GMEM_SWAP_POLICY_H +#define _GMEM_SWAP_POLICY_H + +#include + +struct swap_policy { + int (*init)(void); + int (*add_page)(struct page *page); + int (*del_page)(struct page *page); + int (*update_page)(struct page *page); + int (*pick_victim_pages)(struct list_head *page_list, int nid, unsigned long *nr); +}; + +#endif /* _GMEM_SWAP_POLICY_H */ diff --git a/drivers/remote_pager/wait_station.c b/drivers/remote_pager/wait_station.c new file mode 100644 index 000000000000..12a10b8e6eab --- /dev/null +++ b/drivers/remote_pager/wait_station.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0, BSD +/* + * Waiting stations allows threads to be waited for a given + * number of events are completed + * + * Original file developed by SSRG at Virginia Tech. + * + * author, Javier Malave, Rebecca Shapiro, Andrew Hughes, + * Narf Industries 2020 (modifications for upstream RFC) + * + */ + +#include +#include +#include + +#include "wait_station.h" + +#define MAX_WAIT_STATIONS 1024 +#define MAX_WAIT_IO_TIMEOUT (300 * HZ) + +static struct wait_station wait_stations[MAX_WAIT_STATIONS]; + +static DEFINE_SPINLOCK(wait_station_lock); +static DECLARE_BITMAP(wait_station_available, MAX_WAIT_STATIONS) = { 0 }; + +struct wait_station *get_wait_station(void) +{ + int id; + struct wait_station *ws; + + spin_lock(&wait_station_lock); + id = find_first_zero_bit(wait_station_available, MAX_WAIT_STATIONS); + ws = wait_stations + id; + set_bit(id, wait_station_available); + spin_unlock(&wait_station_lock); + + ws->id = id; + ws->private = (void *)0xbad0face; + init_completion(&ws->pendings); + + return ws; +} +EXPORT_SYMBOL_GPL(get_wait_station); + +struct wait_station *wait_station(int id) +{ + /* memory barrier */ + smp_rmb(); + return wait_stations + id; +} +EXPORT_SYMBOL_GPL(wait_station); + +void put_wait_station(struct wait_station *ws) +{ + int id = ws->id; + + spin_lock(&wait_station_lock); + clear_bit(id, wait_station_available); + spin_unlock(&wait_station_lock); +} +EXPORT_SYMBOL_GPL(put_wait_station); + +void *wait_at_station(struct wait_station *ws) +{ + void *ret; + + if (!try_wait_for_completion(&ws->pendings)) { + if (wait_for_completion_io_timeout(&ws->pendings, MAX_WAIT_IO_TIMEOUT) == 0) { + pr_err("%s timeout\n", __func__); + ret = ERR_PTR(-ETIMEDOUT); + goto out; + } + } + /* memory barrier */ + smp_rmb(); + ret = ws->private; +out: + put_wait_station(ws); + return ret; +} +EXPORT_SYMBOL_GPL(wait_at_station); diff --git a/drivers/remote_pager/wait_station.h b/drivers/remote_pager/wait_station.h new file mode 100644 index 000000000000..03fcee64ecaa --- /dev/null +++ b/drivers/remote_pager/wait_station.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0, BSD + * + * /kernel/popcorn/wait_station.c + * + * Waiting stations allows threads to be waited for a given + * number of events are completed + * + * Original file developed by SSRG at Virginia Tech. + * + * author, Javier Malave, Rebecca Shapiro, Andrew Hughes, + * Narf Industries 2020 (modifications for upstream RFC) + * + */ + +#ifndef _REMOTE_PAGER_WAIT_STATION_H_ +#define _REMOTE_PAGER_WAIT_STATION_H_ + +#include +#include + +struct wait_station { + unsigned int id; + void *private; + struct completion pendings; +}; + +struct wait_station *get_wait_station(void); +struct wait_station *wait_station(int id); +void put_wait_station(struct wait_station *ws); +void *wait_at_station(struct wait_station *ws); +#endif diff --git a/include/linux/remote_pager/msg_chan.h b/include/linux/remote_pager/msg_chan.h new file mode 100644 index 000000000000..a8049def052d --- /dev/null +++ b/include/linux/remote_pager/msg_chan.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __RPG_MSG_CHAN_H__ +#define __RPG_MSG_CHAN_H__ + +#include + +/* + * struct phys_channel_ops - Channel physical layer ops + * @open: Open the communication channel of node nid and alloc physical resources, + * returns the channel ID + * @notify: Notify peer of chan_id to receive messages + * @copy_to: Copy the msg_data message from origin to peer + * @copy_from: Copy the msg_data message from peer to origin + * @close: Close channel and free physical resources + */ +struct phys_channel_ops { + char *name; + int (*open)(int nid); + int (*notify)(int chan_id); + int (*copy_to)(int chan_id, void *msg_data, size_t msg_len, int flags); + int (*copy_from)(int chan_id, void *buf, size_t len, int flags); + int (*migrate_page)(void *peer_addr, struct page *local_page, size_t size, int dir); + int (*close)(int chan_id); +}; + +int msg_layer_install_phy_ops(struct phys_channel_ops *ops, int default_chan_id); +int msg_layer_uninstall_phy_ops(struct phys_channel_ops *ops); + +#define log_err(fmt, ...) pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__) +#define log_info(fmt, ...) pr_info("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__) + +#define MSG_CMD_START 0x1 +#define MSG_CMD_IRQ_END 0x2 +#define MSG_CMD_FIFO_NO_MEM 0x3 +#define MSG_CMD_CHANN_OPEN 0x4 + +#define CHAN_STAT_ENABLE 1 +#define CHAN_STAT_DISABLE 0 + +#define TO_PEER 0 +#define FROM_PEER 1 + +#endif -- Gitee From 6f1a9fb71ce298e91c796d811d87e7a7745cb4e5 Mon Sep 17 00:00:00 2001 From: Ni Cunshu Date: Thu, 31 Aug 2023 18:41:18 +0800 Subject: [PATCH 3/3] openeuler_defconfig: Enable remote_pager euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7WLVX --------------------------------------------- enable remote_pager for ARM64 and X86 Signed-off-by: Cunshu Ni --- arch/arm64/configs/openeuler_defconfig | 7 +++++++ arch/x86/configs/openeuler_defconfig | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 081a223bc65b..57a3f0fb1979 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -6593,6 +6593,13 @@ CONFIG_TEE=m # CONFIG_PECI is not set # CONFIG_HTE is not set # CONFIG_CDX_BUS is not set + +# +# remote pager device +# +CONFIG_REMOTE_PAGER=m +CONFIG_REMOTE_PAGER_MASTER=m +# end of remote pager device # end of Device Drivers # diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index f6140635690e..d08d6958cc0d 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -7838,6 +7838,13 @@ CONFIG_INTEL_TH_PTI=m # CONFIG_MOST is not set # CONFIG_PECI is not set # CONFIG_HTE is not set + +# +# remote pager device +# +CONFIG_REMOTE_PAGER=m +CONFIG_REMOTE_PAGER_MASTER=m +# end of remote pager device # end of Device Drivers # -- Gitee