From 3034ea1cffa33598ec97a03440a7c542fd73b936 Mon Sep 17 00:00:00 2001 From: "minjie.yu" Date: Wed, 5 Jun 2024 16:08:54 +0800 Subject: [PATCH] feat: support hyperhold Signed-off-by: minjie.yu --- drivers/Kconfig | 2 + drivers/Makefile | 3 + drivers/hyperhold/Kconfig | 14 + drivers/hyperhold/Makefile | 4 + drivers/hyperhold/hp_core.c | 854 ++++++++++++++++++++++++++++++++++ drivers/hyperhold/hp_device.c | 240 ++++++++++ drivers/hyperhold/hp_device.h | 38 ++ drivers/hyperhold/hp_iotab.c | 271 +++++++++++ drivers/hyperhold/hp_iotab.h | 63 +++ drivers/hyperhold/hp_space.c | 122 +++++ drivers/hyperhold/hp_space.h | 30 ++ drivers/hyperhold/hyperhold.h | 52 +++ 12 files changed, 1693 insertions(+) create mode 100644 drivers/hyperhold/Kconfig create mode 100644 drivers/hyperhold/Makefile create mode 100644 drivers/hyperhold/hp_core.c create mode 100644 drivers/hyperhold/hp_device.c create mode 100644 drivers/hyperhold/hp_device.h create mode 100644 drivers/hyperhold/hp_iotab.c create mode 100644 drivers/hyperhold/hp_iotab.h create mode 100644 drivers/hyperhold/hp_space.c create mode 100644 drivers/hyperhold/hp_space.h create mode 100644 drivers/hyperhold/hyperhold.h diff --git a/drivers/Kconfig b/drivers/Kconfig index efb66e25fa2d..9638eaeb9590 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -11,6 +11,8 @@ source "drivers/pcmcia/Kconfig" source "drivers/rapidio/Kconfig" +source "drivers/hyperhold/Kconfig" + source "drivers/base/Kconfig" source "drivers/bus/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index 1bec7819a837..4131a349e13b 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -75,6 +75,9 @@ obj-$(CONFIG_CONNECTOR) += connector/ obj-$(CONFIG_FB_I810) += video/fbdev/i810/ obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ +# Hyperhold driver +obj-$(CONFIG_HYPERHOLD) += hyperhold/ + obj-$(CONFIG_PARPORT) += parport/ obj-y += base/ block/ misc/ mfd/ nfc/ obj-$(CONFIG_LIBNVDIMM) += nvdimm/ diff --git a/drivers/hyperhold/Kconfig b/drivers/hyperhold/Kconfig new file mode 100644 index 000000000000..4bba0efd1c3e --- /dev/null +++ b/drivers/hyperhold/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +config HYPERHOLD + bool "Hyperhold driver" + select HYPERHOLD_ZSWAPD + select HYPERHOLD_MEMCG + default n + help + Hyperhold driver. + +config HYPERHOLD_DEBUG + bool "Debug info for Hyperhold driver" + depends on HYPERHOLD + help + Debug info for Hyperhold driver. diff --git a/drivers/hyperhold/Makefile b/drivers/hyperhold/Makefile new file mode 100644 index 000000000000..b45a1a678466 --- /dev/null +++ b/drivers/hyperhold/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +hyperhold-y := hp_core.o hp_device.o hp_space.o hp_iotab.o + +obj-$(CONFIG_HYPERHOLD) += hyperhold.o diff --git a/drivers/hyperhold/hp_core.c b/drivers/hyperhold/hp_core.c new file mode 100644 index 000000000000..6de2f06c9c63 --- /dev/null +++ b/drivers/hyperhold/hp_core.c @@ -0,0 +1,854 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/hyperhold/hp_core.c + * + * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. + */ + + #define pr_fmt(fmt) "[HYPERHOLD]" fmt + +#include +#include +#include +#include + +#include "hyperhold.h" +#include "hp_device.h" +#include "hp_space.h" +#include "hp_iotab.h" + +#define HP_DFLT_DEVICE "/dev/by-name/hyperhold" +#define HP_DFLT_EXT_SIZE (1 << 15) +#define HP_DEV_NAME_LEN 256 +#define HP_STATE_LEN 10 + +#define CHECK(cond, ...) ((cond) || (pr_err(__VA_ARGS__), false)) +#define CHECK_BOUND(var, min, max) \ + CHECK((var) >= (min) && (var) <= (max), \ + "%s %u out of bounds %u ~ %u!\n", #var, (var), (min), (max)) +#define CHECK_INITED CHECK(hyperhold.inited, "hyperhold is not enable!\n") +#define CHECK_ENABLE (CHECK_INITED && CHECK(hyperhold.enable, "hyperhold is readonly!\n")) + +struct hyperhold { + bool enable; + bool inited; + + char device_name[HP_DEV_NAME_LEN]; + u32 extent_size; + u32 enable_soft_crypt; + + struct hp_device dev; + struct hp_space spc; + + struct workqueue_struct *read_wq; + struct workqueue_struct *write_wq; + + struct mutex init_lock; +}; + +struct hyperhold hyperhold; + +atomic64_t mem_used = ATOMIC64_INIT(0); +#ifdef CONFIG_HYPERHOLD_DEBUG +/* + * return the memory overhead of hyperhold module + */ +u64 hyperhold_memory_used(void) +{ + return atomic64_read(&mem_used) + hpio_memory() + space_memory(); +} +#endif + +void hyperhold_disable(bool force) +{ + if (!CHECK_INITED) + return; + if (!force && !CHECK_ENABLE) + return; + + mutex_lock(&hyperhold.init_lock); + hyperhold.enable = false; + if (!wait_for_space_empty(&hyperhold.spc, force)) + goto out; + hyperhold.inited = false; + wait_for_iotab_empty(); + destroy_workqueue(hyperhold.read_wq); + destroy_workqueue(hyperhold.write_wq); + deinit_space(&hyperhold.spc); + crypto_deinit(&hyperhold.dev); + unbind_bdev(&hyperhold.dev); +out: + if (hyperhold.inited) + pr_info("hyperhold is disabled, read only.\n"); + else + pr_info("hyperhold is totally disabled!\n"); + mutex_unlock(&hyperhold.init_lock); +} +EXPORT_SYMBOL(hyperhold_disable); + +void hyperhold_enable(void) +{ + bool enable = true; + + if (hyperhold.inited) + goto out; + + mutex_lock(&hyperhold.init_lock); + if (hyperhold.inited) + goto unlock; + if (!bind_bdev(&hyperhold.dev, hyperhold.device_name)) + goto err1; + if (!crypto_init(&hyperhold.dev, hyperhold.enable_soft_crypt)) + goto err2; + if (!init_space(&hyperhold.spc, hyperhold.dev.dev_size, hyperhold.extent_size)) + goto err3; + hyperhold.read_wq = alloc_workqueue("hyperhold_read", WQ_HIGHPRI | WQ_UNBOUND, 0); + if (!hyperhold.read_wq) + goto err4; + hyperhold.write_wq = alloc_workqueue("hyperhold_write", 0, 0); + if (!hyperhold.write_wq) + goto err5; + hyperhold.inited = true; + goto unlock; +err5: + destroy_workqueue(hyperhold.read_wq); +err4: + deinit_space(&hyperhold.spc); +err3: + crypto_deinit(&hyperhold.dev); +err2: + unbind_bdev(&hyperhold.dev); +err1: + enable = false; +unlock: + mutex_unlock(&hyperhold.init_lock); +out: + if (enable) { + hyperhold.enable = true; + pr_info("hyperhold is enabled.\n"); + } else { + hyperhold.enable = false; + pr_err("hyperhold enable failed!\n"); + } +} +EXPORT_SYMBOL(hyperhold_enable); + +static int enable_sysctl_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + const struct cred *cred = current_cred(); + char *filter_buf; + + filter_buf = strstrip((char *)buffer); + if (write) { + if (!uid_eq(cred->euid, GLOBAL_MEMMGR_UID) && + !uid_eq(cred->euid, GLOBAL_ROOT_UID)) { + pr_err("no permission to enable/disable eswap!\n"); + return 0; + } + if (!strcmp(filter_buf, "enable")) + hyperhold_enable(); + else if (!strcmp(filter_buf, "disable")) + hyperhold_disable(false); + else if (!strcmp(filter_buf, "force_disable")) + hyperhold_disable(true); + } else { + if (*lenp < HP_STATE_LEN || *ppos) { + *lenp = 0; + return 0; + } + if (hyperhold.enable) + strcpy(buffer, "enable\n"); + else if (hyperhold.inited) + strcpy(buffer, "readonly\n"); + else + strcpy(buffer, "disable\n"); + *lenp = strlen(buffer); + *ppos += *lenp; +#ifdef CONFIG_HYPERHOLD_DEBUG + pr_info("hyperhold memory overhead = %llu.\n", hyperhold_memory_used()); +#endif + } + return 0; +} + +static int device_sysctl_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + + mutex_lock(&hyperhold.init_lock); + if (write && hyperhold.inited) { + pr_err("hyperhold device is busy!\n"); + ret = -EBUSY; + goto unlock; + } + ret = proc_dostring(table, write, buffer, lenp, ppos); + if (write && !ret) { + hyperhold.enable_soft_crypt = 1; + pr_info("device changed, default enable soft crypt.\n"); + } +unlock: + mutex_unlock(&hyperhold.init_lock); + + return ret; +} + +static int extent_sysctl_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + + mutex_lock(&hyperhold.init_lock); + if (write && hyperhold.inited) { + pr_err("hyperhold device is busy!\n"); + ret = -EBUSY; + goto unlock; + } + ret = proc_douintvec(table, write, buffer, lenp, ppos); +unlock: + mutex_unlock(&hyperhold.init_lock); + + return ret; +} + +static int crypto_sysctl_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + + mutex_lock(&hyperhold.init_lock); + if (write && hyperhold.inited) { + pr_err("hyperhold device is busy!\n"); + ret = -EBUSY; + goto unlock; + } + ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos); +unlock: + mutex_unlock(&hyperhold.init_lock); + + return ret; +} + +static struct ctl_table_header *hp_sysctl_header; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) +static struct ctl_table hp_sys_table[] = { + { + .procname = "enable", + .mode = 0666, + .proc_handler = enable_sysctl_handler, + }, + { + .procname = "device", + .data = &hyperhold.device_name, + .maxlen = sizeof(hyperhold.device_name), + .mode = 0644, + .proc_handler = device_sysctl_handler, + }, + { + .procname = "extent_size", + .data = &hyperhold.extent_size, + .maxlen = sizeof(hyperhold.extent_size), + .mode = 0644, + .proc_handler = extent_sysctl_handler, + }, + { + .procname = "soft_crypt", + .data = &hyperhold.enable_soft_crypt, + .maxlen = sizeof(hyperhold.enable_soft_crypt), + .mode = 0644, + .proc_handler = crypto_sysctl_handler, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + {} +}; +#else +static struct ctl_table hp_table[] = { + { + .procname = "enable", + .mode = 0666, + .proc_handler = enable_sysctl_handler, + }, + { + .procname = "device", + .data = &hyperhold.device_name, + .maxlen = sizeof(hyperhold.device_name), + .mode = 0644, + .proc_handler = device_sysctl_handler, + }, + { + .procname = "extent_size", + .data = &hyperhold.extent_size, + .maxlen = sizeof(hyperhold.extent_size), + .mode = 0644, + .proc_handler = extent_sysctl_handler, + }, + { + .procname = "soft_crypt", + .data = &hyperhold.enable_soft_crypt, + .maxlen = sizeof(hyperhold.enable_soft_crypt), + .mode = 0644, + .proc_handler = crypto_sysctl_handler, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + {} +}; +static struct ctl_table hp_kernel_table[] = { + { + .procname = "hyperhold", + .mode = 0555, + .child = hp_table, + }, + {} +}; +static struct ctl_table hp_sys_table[] = { + { + .procname = "kernel", + .mode = 0555, + .child = hp_kernel_table, + }, + {} +}; +#endif + +bool is_hyperhold_enable(void) +{ + return hyperhold.enable; +} + +static int __init hyperhold_init(void) +{ + strcpy(hyperhold.device_name, HP_DFLT_DEVICE); + hyperhold.extent_size = HP_DFLT_EXT_SIZE; + hyperhold.enable_soft_crypt = 1; + mutex_init(&hyperhold.init_lock); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) + hp_sysctl_header = register_sysctl("kernel/hyperhold", hp_sys_table); +#else + hp_sysctl_header = register_sysctl_table(hp_sys_table); +#endif + if (!hp_sysctl_header) { + pr_err("register hyperhold sysctl table failed!\n"); + return -EINVAL; + } + + return 0; +} + +static void __exit hyperhold_exit(void) +{ + unregister_sysctl_table(hp_sysctl_header); + hyperhold_disable(true); +} + +static struct hp_space *space_of(u32 eid) +{ + return &hyperhold.spc; +} + +/* replace this func for multi devices */ +static struct hp_device *device_of(u32 eid) +{ + return &hyperhold.dev; +} + +/* replace this func for multi devices */ +u32 hyperhold_nr_extent(void) +{ + if (!CHECK_INITED) + return 0; + + return hyperhold.spc.nr_ext; +} +EXPORT_SYMBOL(hyperhold_nr_extent); + +u32 hyperhold_extent_size(u32 eid) +{ + struct hp_space *spc = NULL; + + if (!CHECK_INITED) + return 0; + spc = space_of(eid); + if (!CHECK(spc, "invalid eid %u!\n", eid)) + return 0; + + return spc->ext_size; +} +EXPORT_SYMBOL(hyperhold_extent_size); + +/* replace this func for multi devices */ +long hyperhold_address(u32 eid, u32 offset) +{ + struct hp_space *spc = NULL; + + if (!CHECK_INITED) + return -EINVAL; + spc = space_of(eid); + if (!CHECK(spc, "invalid eid %u!\n", eid)) + return -EINVAL; + if (!CHECK_BOUND(offset, 0, spc->ext_size - 1)) + return -EINVAL; + + return (u64)eid * spc->ext_size + offset; +} +EXPORT_SYMBOL(hyperhold_address); + +/* replace this func for multi devices */ +int hyperhold_addr_extent(u64 addr) +{ + struct hp_space *spc = NULL; + u32 eid; + + if (!CHECK_INITED) + return -EINVAL; + eid = div_u64(addr, hyperhold.spc.ext_size); + spc = space_of(eid); + if (!CHECK(spc, "invalid eid %u!\n", eid)) + return -EINVAL; + + return eid; +} +EXPORT_SYMBOL(hyperhold_addr_extent); + +/* replace this func for multi devices */ +int hyperhold_addr_offset(u64 addr) +{ + if (!CHECK_INITED) + return -EINVAL; + + return do_div(addr, hyperhold.spc.ext_size); +} +EXPORT_SYMBOL(hyperhold_addr_offset); + +/* replace this func for multi devices */ +int hyperhold_alloc_extent(void) +{ + if (!CHECK_ENABLE) + return -EINVAL; + + return alloc_eid(&hyperhold.spc); +} +EXPORT_SYMBOL(hyperhold_alloc_extent); + +void hyperhold_free_extent(u32 eid) +{ + struct hp_space *spc = NULL; + + if (!CHECK_INITED) + return; + spc = space_of(eid); + if (!CHECK(spc, "invalid eid %u!\n", eid)) + return; + + free_eid(spc, eid); +} +EXPORT_SYMBOL(hyperhold_free_extent); + +void hyperhold_should_free_extent(u32 eid) +{ + struct hpio *hpio = NULL; + struct hp_space *spc = NULL; + + if (!CHECK_INITED) + return; + spc = space_of(eid); + if (!CHECK(spc, "invalid eid %u", eid)) + return; + + hpio = hpio_get(eid); + if (!hpio) { + free_eid(spc, eid); + return; + } + hpio->free_extent = hyperhold_free_extent; + hpio_put(hpio); +} +EXPORT_SYMBOL(hyperhold_should_free_extent); + +/* + * alloc hpio struct for r/w extent at @eid, will fill hpio with new alloced + * pages if @new_page. @return NULL on fail. + */ +struct hpio *hyperhold_io_alloc(u32 eid, gfp_t gfp, unsigned int op, bool new_page) +{ + struct hpio *hpio = NULL; + struct hp_space *spc; + u32 nr_page; + + if (!CHECK_ENABLE) + return NULL; + spc = space_of(eid); + if (!CHECK(spc, "invalid eid %u!\n", eid)) + return NULL; + + nr_page = spc->ext_size / PAGE_SIZE; + hpio = hpio_alloc(nr_page, gfp, op, new_page); + if (!hpio) + goto err; + hpio->eid = eid; + + return hpio; +err: + hpio_free(hpio); + + return NULL; +} +EXPORT_SYMBOL(hyperhold_io_alloc); + +void hyperhold_io_free(struct hpio *hpio) +{ + if (!CHECK_INITED) + return; + if (!CHECK(hpio, "hpio is null!\n")) + return; + + hpio_free(hpio); +} +EXPORT_SYMBOL(hyperhold_io_free); + +/* + * find exist read hpio of the extent @eid in iotab and inc its refcnt, + * alloc a new hpio and insert it into iotab if there is no hpio for @eid + */ +struct hpio *hyperhold_io_get(u32 eid, gfp_t gfp, unsigned int op) +{ + struct hp_space *spc = NULL; + u32 nr_page; + + if (!CHECK_INITED) + return NULL; + spc = space_of(eid); + if (!CHECK(spc, "invalid eid %u", eid)) + return NULL; + + nr_page = spc->ext_size / PAGE_SIZE; + return hpio_get_alloc(eid, nr_page, gfp, op); +} +EXPORT_SYMBOL(hyperhold_io_get); + +bool hyperhold_io_put(struct hpio *hpio) +{ + if (!CHECK_INITED) + return false; + if (!CHECK(hpio, "hpio is null!\n")) + return false; + + return hpio_put(hpio); +} +EXPORT_SYMBOL(hyperhold_io_put); + +/* + * notify all threads waiting for this hpio + */ +void hyperhold_io_complete(struct hpio *hpio) +{ + if (!CHECK_INITED) + return; + if (!CHECK(hpio, "hpio is null!\n")) + return; + + hpio_complete(hpio); +} +EXPORT_SYMBOL(hyperhold_io_complete); + +void hyperhold_io_wait(struct hpio *hpio) +{ + if (!CHECK_INITED) + return; + if (!CHECK(hpio, "hpio is null!\n")) + return; + + hpio_wait(hpio); +} +EXPORT_SYMBOL(hyperhold_io_wait); + +bool hyperhold_io_success(struct hpio *hpio) +{ + if (!CHECK_INITED) + return false; + if (!CHECK(hpio, "hpio is null!\n")) + return false; + + return hpio_get_state(hpio) == HPIO_DONE; +} +EXPORT_SYMBOL(hyperhold_io_success); + +int hyperhold_io_extent(struct hpio *hpio) +{ + if (!CHECK_INITED) + return -EINVAL; + if (!CHECK(hpio, "hpio is null!\n")) + return -EINVAL; + + return hpio->eid; +} +EXPORT_SYMBOL(hyperhold_io_extent); + +int hyperhold_io_operate(struct hpio *hpio) +{ + if (!CHECK_INITED) + return -EINVAL; + if (!CHECK(hpio, "hpio is null!\n")) + return -EINVAL; + + return hpio->op; +} +EXPORT_SYMBOL(hyperhold_io_operate); + +struct page *hyperhold_io_page(struct hpio *hpio, u32 index) +{ + if (!CHECK_INITED) + return NULL; + if (!CHECK(hpio, "hpio is null!\n")) + return NULL; + if (!CHECK_BOUND(index, 0, hpio->nr_page - 1)) + return NULL; + + return hpio->pages[index]; +} +EXPORT_SYMBOL(hyperhold_io_page); + +bool hyperhold_io_add_page(struct hpio *hpio, u32 index, struct page *page) +{ + if (!CHECK_INITED) + return false; + if (!CHECK(hpio, "hpio is null!\n")) + return false; + if (!CHECK(page, "page is null!\n")) + return false; + if (!CHECK_BOUND(index, 0, hpio->nr_page - 1)) + return false; + + get_page(page); + atomic64_add(PAGE_SIZE, &mem_used); + BUG_ON(hpio->pages[index]); + hpio->pages[index] = page; + + return true; +} +EXPORT_SYMBOL(hyperhold_io_add_page); + +u32 hyperhold_io_nr_page(struct hpio *hpio) +{ + if (!CHECK_INITED) + return 0; + if (!CHECK(hpio, "hpio is null!\n")) + return 0; + + return hpio->nr_page; +} +EXPORT_SYMBOL(hyperhold_io_nr_page); + +void *hyperhold_io_private(struct hpio *hpio) +{ + if (!CHECK_INITED) + return NULL; + if (!CHECK(hpio, "hpio is null!\n")) + return NULL; + + return hpio->private; +} +EXPORT_SYMBOL(hyperhold_io_private); + +static struct page *get_encrypted_page(struct hp_device *dev, struct page *page, unsigned int op) +{ + struct page *encrypted_page = NULL; + + if (!dev->ctfm) { + encrypted_page = page; + get_page(encrypted_page); + goto out; + } + + encrypted_page = alloc_page(GFP_NOIO); + if (!encrypted_page) { + pr_err("alloc encrypted page failed!\n"); + goto out; + } + encrypted_page->index = page->index; + + /* just alloc a new page for read */ + if (!op_is_write(op)) + goto out; + + /* encrypt page for write */ + if (soft_crypt_page(dev->ctfm, encrypted_page, page, HP_DEV_ENCRYPT)) { + put_page(encrypted_page); + encrypted_page = NULL; + } +out: + return encrypted_page; +} + +static void put_encrypted_pages(struct bio *bio) +{ + struct bio_vec *bv = NULL; + struct bvec_iter_all iter; + + bio_for_each_segment_all(bv, bio, iter) + put_page(bv->bv_page); +} + +static void hp_endio_work(struct work_struct *work) +{ + struct hpio *hpio = container_of(work, struct hpio, endio_work); + struct hp_device *dev = NULL; + struct bio_vec *bv = NULL; + struct bvec_iter_all iter; + struct page *page = NULL; + u32 ext_size; + sector_t sec; + int i; + + if (op_is_write(hpio->op)) + goto endio; + ext_size = space_of(hpio->eid)->ext_size; + dev = device_of(hpio->eid); + sec = hpio->eid * ext_size / dev->sec_size; + i = 0; + bio_for_each_segment_all(bv, hpio->bio, iter) { + page = bv->bv_page; + BUG_ON(i >= hpio->nr_page); + BUG_ON(!hpio->pages[i]); + if (dev->ctfm) + BUG_ON(soft_crypt_page(dev->ctfm, hpio->pages[i], page, HP_DEV_DECRYPT)); + sec += PAGE_SIZE / dev->sec_size; + i++; + } +endio: + put_encrypted_pages(hpio->bio); + bio_put(hpio->bio); + if (hpio->endio) + hpio->endio(hpio); +} + +static void hpio_endio(struct bio *bio) +{ + struct hpio *hpio = bio->bi_private; + struct workqueue_struct *wq = NULL; + + pr_info("hpio %p for eid %u returned %d.\n", + hpio, hpio->eid, bio->bi_status); + hpio_set_state(hpio, bio->bi_status ? HPIO_FAIL : HPIO_DONE); + wq = op_is_write(hpio->op) ? hyperhold.write_wq : hyperhold.read_wq; + queue_work(wq, &hpio->endio_work); + atomic64_sub(sizeof(struct bio), &mem_used); +} + +static int hpio_submit(struct hpio *hpio) +{ + struct hp_device *dev = NULL; + struct bio *bio = NULL; + struct page *page = NULL; + u32 ext_size; + sector_t sec; + int i; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) + dev = device_of(hpio->eid); + bio = bio_alloc(dev->bdev, BIO_MAX_VECS, + hpio->op, GFP_NOIO); +#else + bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); +#endif + if (!bio) { + pr_err("bio alloc failed!\n"); + return -ENOMEM; + } + atomic64_add(sizeof(struct bio), &mem_used); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) + bio->bi_opf = hpio->op; +#else + dev = device_of(hpio->eid); + bio_set_op_attrs(bio, hpio->op, 0); +#endif + bio_set_dev(bio, dev->bdev); + + ext_size = space_of(hpio->eid)->ext_size; + sec = div_u64((u64)hpio->eid * ext_size, dev->sec_size); + bio->bi_iter.bi_sector = sec; + for (i = 0; i < hpio->nr_page; i++) { + if (!hpio->pages[i]) + break; + hpio->pages[i]->index = sec; + page = get_encrypted_page(dev, hpio->pages[i], hpio->op); + if (!page) + goto err; + if (!bio_add_page(bio, page, PAGE_SIZE, 0)) { + put_page(page); + goto err; + } + sec += PAGE_SIZE / dev->sec_size; + } + + if (dev->blk_key) + inline_crypt_bio(dev->blk_key, bio); + bio->bi_private = hpio; + bio->bi_end_io = hpio_endio; + hpio->bio = bio; + submit_bio(bio); + pr_info("submit hpio %p for eid %u.\n", hpio, hpio->eid); + + return 0; +err: + put_encrypted_pages(bio); + bio_put(bio); + atomic64_sub(sizeof(struct bio), &mem_used); + return -EIO; +} + +static int rw_extent_async(struct hpio *hpio, hp_endio endio, void *priv, unsigned int op) +{ + int ret = 0; + + if (!hpio_change_state(hpio, HPIO_INIT, HPIO_SUBMIT)) + return -EAGAIN; + + hpio->private = priv; + hpio->endio = endio; + INIT_WORK(&hpio->endio_work, hp_endio_work); + + ret = hpio_submit(hpio); + if (ret) { + hpio_set_state(hpio, HPIO_FAIL); + hpio_complete(hpio); + } + + return ret; +} + +int hyperhold_write_async(struct hpio *hpio, hp_endio endio, void *priv) +{ + if (!CHECK_ENABLE) { + hpio_set_state(hpio, HPIO_FAIL); + hpio_complete(hpio); + return -EINVAL; + } + + BUG_ON(!op_is_write(hpio->op)); + + return rw_extent_async(hpio, endio, priv, REQ_OP_WRITE); +} +EXPORT_SYMBOL(hyperhold_write_async); + +int hyperhold_read_async(struct hpio *hpio, hp_endio endio, void *priv) +{ + if (!CHECK_INITED) { + hpio_set_state(hpio, HPIO_FAIL); + hpio_complete(hpio); + return -EINVAL; + } + + if (op_is_write(hpio->op)) + return -EAGAIN; + + return rw_extent_async(hpio, endio, priv, REQ_OP_READ); +} +EXPORT_SYMBOL(hyperhold_read_async); + +module_init(hyperhold_init) +module_exit(hyperhold_exit) diff --git a/drivers/hyperhold/hp_device.c b/drivers/hyperhold/hp_device.c new file mode 100644 index 000000000000..6669724652b8 --- /dev/null +++ b/drivers/hyperhold/hp_device.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/hyperhold/hp_device.c + * + * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. + */ + +#define pr_fmt(fmt) "[HYPERHOLD]" fmt + +#include +#include +#include +#include + +#include "hp_device.h" + +#define HP_CIPHER_MODE BLK_ENCRYPTION_MODE_AES_256_XTS +#define HP_CIPHER_NAME "xts(aes)" +#define HP_KEY_SIZE (64) +#define HP_IV_SIZE (16) + +union hp_iv { + __le64 index; + __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; +}; + +void unbind_bdev(struct hp_device *dev) +{ + int ret; + + if (!dev->bdev) + goto close; + if (!dev->old_block_size) + goto put; + ret = set_blocksize(dev->bdev, dev->old_block_size); + if (ret) + pr_err("set old block size %d failed, err = %d!\n", + dev->old_block_size, ret); + dev->old_block_size = 0; +put: + blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE); + dev->bdev = NULL; +close: + if (dev->filp) + filp_close(dev->filp, NULL); + dev->filp = NULL; + + pr_info("hyperhold bdev unbinded.\n"); +} + +bool bind_bdev(struct hp_device *dev, const char *name) +{ + struct inode *inode = NULL; + int ret; + + dev->filp = filp_open(name, O_RDWR | O_LARGEFILE, 0); + if (IS_ERR(dev->filp)) { + pr_err("open file %s failed, err = %ld!\n", name, PTR_ERR(dev->filp)); + dev->filp = NULL; + goto err; + } + inode = dev->filp->f_mapping->host; + if (!S_ISBLK(inode->i_mode)) { + pr_err("%s is not a block device!\n", name); + goto err; + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) + dev->bdev = blkdev_get_by_dev(inode->i_rdev, BLK_OPEN_READ | BLK_OPEN_WRITE, dev, NULL); +#else + dev->bdev = blkdev_get_by_dev(inode->i_rdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, dev); +#endif + if (IS_ERR(dev->bdev)) { + ret = PTR_ERR(dev->bdev); + dev->bdev = NULL; + pr_err("get blkdev %s failed, err = %d!\n", name, ret); + goto err; + } + dev->old_block_size = block_size(dev->bdev); + ret = set_blocksize(dev->bdev, PAGE_SIZE); + if (ret) { + pr_err("set %s block size failed, err = %d!\n", name, ret); + goto err; + } + dev->dev_size = (u64)i_size_read(inode); + dev->sec_size = SECTOR_SIZE; + + pr_info("hyperhold bind bdev %s of size %llu / %u succ.\n", + name, dev->dev_size, dev->sec_size); + + return true; +err: + unbind_bdev(dev); + + return false; +} + +int soft_crypt_page(struct crypto_skcipher *ctfm, struct page *dst_page, + struct page *src_page, unsigned int op) +{ + struct skcipher_request *req = NULL; + DECLARE_CRYPTO_WAIT(wait); + struct scatterlist dst, src; + int ret = 0; + union hp_iv iv; + + memset(&iv, 0, sizeof(union hp_iv)); + iv.index = cpu_to_le64(src_page->index); + + req = skcipher_request_alloc(ctfm, GFP_NOIO); + if (!req) { + pr_err("alloc skcipher request failed!\n"); + return -ENOMEM; + } + + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &wait); + sg_init_table(&dst, 1); + sg_set_page(&dst, dst_page, PAGE_SIZE, 0); + sg_init_table(&src, 1); + sg_set_page(&src, src_page, PAGE_SIZE, 0); + skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &iv); + if (op == HP_DEV_ENCRYPT) + ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); + else if (op == HP_DEV_DECRYPT) + ret = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); + else + BUG(); + + skcipher_request_free(req); + + if (ret) + pr_err("%scrypt failed!\n", op == HP_DEV_ENCRYPT ? "en" : "de"); + + return ret; +} + +static struct crypto_skcipher *soft_crypto_init(const u8 *key) +{ + char *cipher = HP_CIPHER_NAME; + u32 key_len = HP_KEY_SIZE; + struct crypto_skcipher *ctfm = NULL; + int ret; + + ctfm = crypto_alloc_skcipher(cipher, 0, 0); + if (IS_ERR(ctfm)) { + pr_err("alloc ctfm failed, ret = %ld!\n", PTR_ERR(ctfm)); + ctfm = NULL; + goto err; + } + crypto_skcipher_clear_flags(ctfm, ~0); + crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + ret = crypto_skcipher_setkey(ctfm, key, key_len); + if (ret) { + pr_err("ctfm setkey failed, ret = %d!\n", ret); + goto err; + } + + return ctfm; +err: + if (ctfm) + crypto_free_skcipher(ctfm); + + return NULL; +} + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION +void inline_crypt_bio(struct blk_crypto_key *blk_key, struct bio *bio) +{ + union hp_iv iv; + + memset(&iv, 0, sizeof(union hp_iv)); + iv.index = cpu_to_le64(bio->bi_iter.bi_sector); + + bio_crypt_set_ctx(bio, blk_key, iv.dun, GFP_NOIO); +} + +static struct blk_crypto_key *inline_crypto_init(const u8 *key) +{ + struct blk_crypto_key *blk_key = NULL; + u32 dun_bytes = HP_IV_SIZE - sizeof(__le64); + int ret; + + blk_key = kzalloc(sizeof(struct blk_crypto_key), GFP_KERNEL); + if (!blk_key) { + pr_err("blk key alloc failed!\n"); + goto err; + } + ret = blk_crypto_init_key(blk_key, key, HP_CIPHER_MODE, dun_bytes, PAGE_SIZE); + if (ret) { + pr_err("blk key init failed, ret = %d!\n", ret); + goto err; + } + + return blk_key; +err: + if (blk_key) + kfree_sensitive(blk_key); + + return NULL; +} +#else +void inline_crypt_bio(struct blk_crypto_key *blk_key, struct bio *bio) {} +static struct blk_crypto_key *inline_crypto_init(const u8 *key) +{ + pr_err("CONFIG_BLK_INLINE_ENCRYPTION is not enabled!\n"); + return NULL; +} +#endif + +bool crypto_init(struct hp_device *dev, bool soft) +{ + u8 key[HP_KEY_SIZE]; + bool ret = false; + + get_random_bytes(key, HP_KEY_SIZE); + if (soft) { + dev->ctfm = soft_crypto_init(key); + ret = dev->ctfm; + } else { + dev->blk_key = inline_crypto_init(key); + ret = dev->blk_key; + if (ret) + pr_warn("soft crypt has been turned off, now apply hard crypt!\n"); + } + memzero_explicit(key, HP_KEY_SIZE); + + return ret; +} + +void crypto_deinit(struct hp_device *dev) +{ + if (dev->ctfm) { + crypto_free_skcipher(dev->ctfm); + dev->ctfm = NULL; + } + if (dev->blk_key) { + kfree_sensitive(dev->blk_key); + dev->blk_key = NULL; + } +} diff --git a/drivers/hyperhold/hp_device.h b/drivers/hyperhold/hp_device.h new file mode 100644 index 000000000000..06f007891481 --- /dev/null +++ b/drivers/hyperhold/hp_device.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * drivers/hyperhold/hp_device.h + * + * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. + */ + +#ifndef _HP_DEVICE_H_ +#define _HP_DEVICE_H_ + +#include +#include +#include + +enum { + HP_DEV_ENCRYPT, + HP_DEV_DECRYPT, +}; + +struct hp_device { + struct file *filp; + struct block_device *bdev; + u32 old_block_size; + u64 dev_size; + u32 sec_size; + + struct crypto_skcipher *ctfm; + struct blk_crypto_key *blk_key; +}; + +void unbind_bdev(struct hp_device *dev); +bool bind_bdev(struct hp_device *dev, const char *name); +bool crypto_init(struct hp_device *dev, bool soft); +void crypto_deinit(struct hp_device *dev); +int soft_crypt_page(struct crypto_skcipher *ctfm, + struct page *dst_page, struct page *src_page, unsigned int op); +void inline_crypt_bio(struct blk_crypto_key *blk_key, struct bio *bio); +#endif diff --git a/drivers/hyperhold/hp_iotab.c b/drivers/hyperhold/hp_iotab.c new file mode 100644 index 000000000000..258cb83a16c3 --- /dev/null +++ b/drivers/hyperhold/hp_iotab.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/hyperhold/hp_iotab.c + * + * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. + */ + +#define pr_fmt(fmt) "[HYPERHOLD]" fmt + +#include +#include + +#include "hp_iotab.h" + +atomic64_t hpio_mem = ATOMIC64_INIT(0); +u64 hpio_memory(void) +{ + return atomic64_read(&hpio_mem); +} + +struct hp_iotab { + struct list_head io_list; + rwlock_t lock; + u32 io_cnt; + wait_queue_head_t empty_wq; +}; + +/* store all inflight hpio in iotab */ +struct hp_iotab iotab = { + .io_list = LIST_HEAD_INIT(iotab.io_list), + .lock = __RW_LOCK_UNLOCKED(iotab.lock), + .io_cnt = 0, + .empty_wq = __WAIT_QUEUE_HEAD_INITIALIZER(iotab.empty_wq), +}; + +static struct hpio *__iotab_search_get(struct hp_iotab *iotab, u32 eid) +{ + struct hpio *hpio = NULL; + + list_for_each_entry(hpio, &iotab->io_list, list) + if (hpio->eid == eid && kref_get_unless_zero(&hpio->refcnt)) + return hpio; + + return NULL; +} + +static struct hpio *iotab_search_get(struct hp_iotab *iotab, u32 eid) +{ + struct hpio *hpio = NULL; + unsigned long flags; + + read_lock_irqsave(&iotab->lock, flags); + hpio = __iotab_search_get(iotab, eid); + read_unlock_irqrestore(&iotab->lock, flags); + + pr_info("find hpio %p for eid %u.\n", hpio, eid); + + return hpio; +} + +/* + * insert @hpio into @iotab, cancel insertion if there is a hpio of the same + * @eid, inc the refcnt of duplicated hpio and return it + */ +static struct hpio *iotab_insert(struct hp_iotab *iotab, struct hpio *hpio) +{ + struct hpio *dup = NULL; + unsigned long flags; + + write_lock_irqsave(&iotab->lock, flags); + dup = __iotab_search_get(iotab, hpio->eid); + if (dup) { + pr_info("find exist hpio %p for eid %u, insert hpio %p failed.\n", + dup, hpio->eid, hpio); + goto unlock; + } + list_add(&hpio->list, &iotab->io_list); + iotab->io_cnt++; + pr_info("insert new hpio %p for eid %u.\n", hpio, hpio->eid); +unlock: + write_unlock_irqrestore(&iotab->lock, flags); + + return dup; +} + +static void iotab_delete(struct hp_iotab *iotab, struct hpio *hpio) +{ + unsigned long flags; + + write_lock_irqsave(&iotab->lock, flags); + list_del(&hpio->list); + iotab->io_cnt--; + if (!iotab->io_cnt) + wake_up(&iotab->empty_wq); + write_unlock_irqrestore(&iotab->lock, flags); + + pr_info("delete hpio %p for eid %u from iotab.\n", hpio, hpio->eid); +} + +static void hpio_clear_pages(struct hpio *hpio) +{ + int i; + + if (!hpio->pages) + return; + + for (i = 0; i < hpio->nr_page; i++) + if (hpio->pages[i]) { + put_page(hpio->pages[i]); + atomic64_sub(PAGE_SIZE, &hpio_mem); + } + kfree(hpio->pages); + atomic64_sub(sizeof(struct page *) * hpio->nr_page, &hpio_mem); + hpio->nr_page = 0; + hpio->pages = NULL; +} + +/* + * alloc pages array for @hpio, fill in new alloced pages if @new_page + */ +static bool hpio_fill_pages(struct hpio *hpio, u32 nr_page, gfp_t gfp, bool new_page) +{ + int i; + + BUG_ON(hpio->pages); + hpio->nr_page = nr_page; + hpio->pages = kcalloc(hpio->nr_page, sizeof(struct page *), gfp); + if (!hpio->pages) + goto err; + atomic64_add(sizeof(struct page *) * hpio->nr_page, &hpio_mem); + + if (!new_page) + goto out; + for (i = 0; i < hpio->nr_page; i++) { + hpio->pages[i] = alloc_page(gfp); + if (!hpio->pages[i]) + goto err; + atomic64_add(PAGE_SIZE, &hpio_mem); + } +out: + return true; +err: + hpio_clear_pages(hpio); + + return false; +} + +void hpio_free(struct hpio *hpio) +{ + if (!hpio) + return; + + pr_info("free hpio = %p.\n", hpio); + + hpio_clear_pages(hpio); + kfree(hpio); + atomic64_sub(sizeof(struct hpio), &hpio_mem); +} + +struct hpio *hpio_alloc(u32 nr_page, gfp_t gfp, unsigned int op, bool new_page) +{ + struct hpio *hpio = NULL; + + hpio = kzalloc(sizeof(struct hpio), gfp); + if (!hpio) + goto err; + atomic64_add(sizeof(struct hpio), &hpio_mem); + if (!hpio_fill_pages(hpio, nr_page, gfp, new_page)) + goto err; + hpio->op = op; + atomic_set(&hpio->state, HPIO_INIT); + kref_init(&hpio->refcnt); + init_completion(&hpio->wait); + + return hpio; +err: + hpio_free(hpio); + + return NULL; +} + +struct hpio *hpio_get(u32 eid) +{ + return iotab_search_get(&iotab, eid); +} + +struct hpio *hpio_get_alloc(u32 eid, u32 nr_page, gfp_t gfp, unsigned int op) +{ + struct hpio *hpio = NULL; + struct hpio *dup = NULL; + + hpio = iotab_search_get(&iotab, eid); + if (hpio) { + pr_info("find exist hpio %p for eid %u.\n", hpio, eid); + goto out; + } + hpio = hpio_alloc(nr_page, gfp, op, true); + if (!hpio) + goto out; + hpio->eid = eid; + + pr_info("alloc hpio %p for eid %u.\n", hpio, eid); + + dup = iotab_insert(&iotab, hpio); + if (dup) { + hpio_free(hpio); + hpio = dup; + } +out: + return hpio; +} + +static void hpio_release(struct kref *kref) +{ + struct hpio *hpio = container_of(kref, struct hpio, refcnt); + + iotab_delete(&iotab, hpio); + if (hpio->free_extent) + hpio->free_extent(hpio->eid); + hpio_free(hpio); +} + +bool hpio_put(struct hpio *hpio) +{ + pr_info("put hpio %p for eid %u, ref = %u.\n", hpio, hpio->eid, kref_read(&hpio->refcnt)); + return kref_put(&hpio->refcnt, hpio_release); +} + +void hpio_complete(struct hpio *hpio) +{ + pr_info("complete hpio %p for eid %u.\n", hpio, hpio->eid); + complete_all(&hpio->wait); +} + +void hpio_wait(struct hpio *hpio) +{ + wait_for_completion(&hpio->wait); +} + +enum hpio_state hpio_get_state(struct hpio *hpio) +{ + return atomic_read(&hpio->state); +} + +void hpio_set_state(struct hpio *hpio, enum hpio_state state) +{ + atomic_set(&hpio->state, state); +} + +bool hpio_change_state(struct hpio *hpio, enum hpio_state from, enum hpio_state to) +{ + return atomic_cmpxchg(&hpio->state, from, to) == from; +} + +static void dump_iotab(struct hp_iotab *iotab) +{ + struct hpio *hpio = NULL; + unsigned long flags; + + pr_info("dump inflight hpio in iotab.\n"); + read_lock_irqsave(&iotab->lock, flags); + list_for_each_entry(hpio, &iotab->io_list, list) + pr_info("hpio %p for eid %u is inflight.\n", hpio, hpio->eid); + read_unlock_irqrestore(&iotab->lock, flags); +} + +void wait_for_iotab_empty(void) +{ + dump_iotab(&iotab); + wait_event(iotab.empty_wq, !iotab.io_cnt); +} diff --git a/drivers/hyperhold/hp_iotab.h b/drivers/hyperhold/hp_iotab.h new file mode 100644 index 000000000000..b3785f7aaad9 --- /dev/null +++ b/drivers/hyperhold/hp_iotab.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * drivers/hyperhold/hp_iotab.h + * + * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. + */ + +#ifndef _HP_IOTAB_H_ +#define _HP_IOTAB_H_ + +#include +#include +#include +#include + +enum hpio_state { + HPIO_INIT, + HPIO_SUBMIT, + HPIO_DONE, + HPIO_FAIL, +}; + +struct hpio; + +typedef void (*hp_endio)(struct hpio *); + +struct hpio { + u32 eid; + struct page **pages; + u32 nr_page; + void *private; + + unsigned int op; + void (*free_extent)(u32 eid); + + atomic_t state; + struct kref refcnt; + struct completion wait; + hp_endio endio; + struct work_struct endio_work; + + struct bio *bio; + struct list_head list; +}; + +struct hpio *hpio_alloc(u32 nr_page, gfp_t gfp, unsigned int op, bool new_page); +void hpio_free(struct hpio *hpio); + +struct hpio *hpio_get(u32 eid); +bool hpio_put(struct hpio *hpio); +struct hpio *hpio_get_alloc(u32 eid, u32 nr_page, gfp_t gfp, unsigned int op); + +void hpio_complete(struct hpio *hpio); +void hpio_wait(struct hpio *hpio); + +enum hpio_state hpio_get_state(struct hpio *hpio); +void hpio_set_state(struct hpio *hpio, enum hpio_state state); +bool hpio_change_state(struct hpio *hpio, enum hpio_state from, enum hpio_state to); + +void wait_for_iotab_empty(void); + +u64 hpio_memory(void); +#endif diff --git a/drivers/hyperhold/hp_space.c b/drivers/hyperhold/hp_space.c new file mode 100644 index 000000000000..cb3d3439c5a6 --- /dev/null +++ b/drivers/hyperhold/hp_space.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/hyperhold/hp_space.c + * + * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. + */ + +#define pr_fmt(fmt) "[HYPERHOLD]" fmt + +#include + +#include "hp_space.h" + +atomic64_t spc_mem = ATOMIC64_INIT(0); + +u64 space_memory(void) +{ + return atomic64_read(&spc_mem); +} + +void deinit_space(struct hp_space *spc) +{ + kvfree(spc->bitmap); + atomic64_sub(BITS_TO_LONGS(spc->nr_ext) * sizeof(long), &spc_mem); + spc->ext_size = 0; + spc->nr_ext = 0; + atomic_set(&spc->last_alloc_bit, 0); + atomic_set(&spc->nr_alloced, 0); + + pr_info("hyperhold space deinited.\n"); +} + +bool init_space(struct hp_space *spc, u64 dev_size, u32 ext_size) +{ + if (ext_size & (PAGE_SIZE - 1)) { + pr_err("extent size %u do not align to page size %lu!", ext_size, PAGE_SIZE); + return false; + } + if (dev_size & (ext_size - 1)) { + pr_err("device size %llu do not align to extent size %u!", dev_size, ext_size); + return false; + } + spc->ext_size = ext_size; + spc->nr_ext = div_u64(dev_size, ext_size); + atomic_set(&spc->last_alloc_bit, 0); + atomic_set(&spc->nr_alloced, 0); + init_waitqueue_head(&spc->empty_wq); + spc->bitmap = kvzalloc(BITS_TO_LONGS(spc->nr_ext) * sizeof(long), GFP_KERNEL); + if (!spc->bitmap) { + pr_err("hyperhold bitmap alloc failed.\n"); + return false; + } + atomic64_add(BITS_TO_LONGS(spc->nr_ext) * sizeof(long), &spc_mem); + + pr_info("hyperhold space init succ, capacity = %u x %u.\n", ext_size, spc->nr_ext); + + return true; +} + +int alloc_eid(struct hp_space *spc) +{ + u32 bit; + u32 last_bit; + +retry: + last_bit = atomic_read(&spc->last_alloc_bit); + bit = find_next_zero_bit(spc->bitmap, spc->nr_ext, last_bit); + if (bit == spc->nr_ext) + bit = find_next_zero_bit(spc->bitmap, spc->nr_ext, 0); + if (bit == spc->nr_ext) + goto full; + if (test_and_set_bit(bit, spc->bitmap)) + goto retry; + + atomic_set(&spc->last_alloc_bit, bit); + atomic_inc(&spc->nr_alloced); + + pr_info("hyperhold alloc extent %u.\n", bit); + + return bit; +full: + pr_err("hyperhold space is full.\n"); + + return -ENOSPC; +} + +void free_eid(struct hp_space *spc, u32 eid) +{ + if (!test_and_clear_bit(eid, spc->bitmap)) { + pr_err("eid is not alloced!\n"); + BUG(); + return; + } + if (atomic_dec_and_test(&spc->nr_alloced)) { + pr_info("notify space empty.\n"); + wake_up(&spc->empty_wq); + } + pr_info("hyperhold free extent %u.\n", eid); +} + +static void dump_space(struct hp_space *spc) +{ + u32 i = 0; + + pr_info("dump alloced extent in space.\n"); + for (i = 0; i < spc->nr_ext; i++) + if (test_bit(i, spc->bitmap)) + pr_info("alloced eid %u.\n", i); +} + +bool wait_for_space_empty(struct hp_space *spc, bool force) +{ + if (!atomic_read(&spc->nr_alloced)) + return true; + if (!force) + return false; + + dump_space(spc); + wait_event(spc->empty_wq, !atomic_read(&spc->nr_alloced)); + + return true; +} diff --git a/drivers/hyperhold/hp_space.h b/drivers/hyperhold/hp_space.h new file mode 100644 index 000000000000..caaaf92a07f7 --- /dev/null +++ b/drivers/hyperhold/hp_space.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * drivers/hyperhold/hp_space.h + * + * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. + */ + +#ifndef _HP_SPACE_H_ +#define _HP_SPACE_H_ + +#include + +struct hp_space { + u32 ext_size; + u32 nr_ext; + unsigned long *bitmap; + atomic_t last_alloc_bit; + atomic_t nr_alloced; + wait_queue_head_t empty_wq; +}; + +void deinit_space(struct hp_space *spc); +bool init_space(struct hp_space *spc, u64 dev_size, u32 ext_size); +int alloc_eid(struct hp_space *spc); +void free_eid(struct hp_space *spc, u32 eid); + +bool wait_for_space_empty(struct hp_space *spc, bool force); + +u64 space_memory(void); +#endif diff --git a/drivers/hyperhold/hyperhold.h b/drivers/hyperhold/hyperhold.h new file mode 100644 index 000000000000..b65ff5444513 --- /dev/null +++ b/drivers/hyperhold/hyperhold.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * drivers/hyperhold/hyperhold.h + * + * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. + */ + +#ifndef _HYPERHOLD_H_ +#define _HYPERHOLD_H_ + +#include + +struct hpio; + +typedef void (*hp_endio)(struct hpio *); + +void hyperhold_disable(bool force); +void hyperhold_enable(void); +bool is_hyperhold_enable(void); + +u32 hyperhold_nr_extent(void); +u32 hyperhold_extent_size(u32 eid); +long hyperhold_address(u32 eid, u32 offset); +int hyperhold_addr_extent(u64 addr); +int hyperhold_addr_offset(u64 addr); + +int hyperhold_alloc_extent(void); +void hyperhold_free_extent(u32 eid); +void hyperhold_should_free_extent(u32 eid); + +struct hpio *hyperhold_io_alloc(u32 eid, gfp_t gfp, unsigned int op, bool new_page); +void hyperhold_io_free(struct hpio *hpio); + +struct hpio *hyperhold_io_get(u32 eid, gfp_t gfp, unsigned int op); +bool hyperhold_io_put(struct hpio *hpio); + +void hyperhold_io_complete(struct hpio *hpio); +void hyperhold_io_wait(struct hpio *hpio); + +bool hyperhold_io_success(struct hpio *hpio); + +int hyperhold_io_extent(struct hpio *hpio); +int hyperhold_io_operate(struct hpio *hpio); +struct page *hyperhold_io_page(struct hpio *hpio, u32 index); +bool hyperhold_io_add_page(struct hpio *hpio, u32 index, struct page *page); +u32 hyperhold_io_nr_page(struct hpio *hpio); +void *hyperhold_io_private(struct hpio *hpio); + +int hyperhold_write_async(struct hpio *hpio, hp_endio endio, void *priv); +int hyperhold_read_async(struct hpio *hpio, hp_endio endio, void *priv); + +#endif -- Gitee