diff --git a/.oebuild/manifest.yaml b/.oebuild/manifest.yaml index e2a9cdd6cd38e2aa0884e31ac3fbe0eb0e7020b5..984c9d0ce0356b57fde53743e045914377a8ef06 100644 --- a/.oebuild/manifest.yaml +++ b/.oebuild/manifest.yaml @@ -30,6 +30,9 @@ manifest_list: HiEdge-driver: remote_url: https://gitee.com/HiEuler/hiedge_driver.git version: 0a87fac88ab30e0f5673a40440a9468fd1a97dae + Hispark-ss928v100-gcc-sdk: + remote_url: https://gitee.com/HiSpark/ss928v100_gcc.git + version: 5cf64d39a0e74640705c2c73519fb97bef118c2d Jailhouse: remote_url: https://gitee.com/src-openeuler/Jailhouse.git version: 5fc52fe2d58fd3fd929d97d1f10dbf7b49cdb53e diff --git a/bsp/meta-hisilicon/recipes-bsp/ss928/files/0001-yocto-928-sdk-build-support.patch b/bsp/meta-hisilicon/recipes-bsp/ss928/files/0001-yocto-928-sdk-build-support.patch new file mode 100644 index 0000000000000000000000000000000000000000..bdbdffa9387575da26f8f2604284e004366a37ae --- /dev/null +++ b/bsp/meta-hisilicon/recipes-bsp/ss928/files/0001-yocto-928-sdk-build-support.patch @@ -0,0 +1,452 @@ +From 6d1c8fd5830e8f2e967ca9ca5fe45c999116356e Mon Sep 17 00:00:00 2001 +From: oee +Date: Tue, 16 Sep 2025 14:41:51 +0800 +Subject: [PATCH] add 928 sdk build support + +Signed-off-by: oee +--- + smp/a55_linux/mpp/cfg.mak | 4 +- + .../security_subsys/cipher/v3/Makefile | 4 +- + .../mpp/sample/ss928_fb_tool/Makefile | 15 + + .../mpp/sample/ss928_fb_tool/ss928_fb_tool.c | 370 ++++++++++++++++++ + 4 files changed, 389 insertions(+), 4 deletions(-) + create mode 100644 smp/a55_linux/mpp/sample/ss928_fb_tool/Makefile + create mode 100644 smp/a55_linux/mpp/sample/ss928_fb_tool/ss928_fb_tool.c + +diff --git a/smp/a55_linux/mpp/cfg.mak b/smp/a55_linux/mpp/cfg.mak +index 7056005..0eb5388 100644 +--- a/smp/a55_linux/mpp/cfg.mak ++++ b/smp/a55_linux/mpp/cfg.mak +@@ -44,14 +44,14 @@ export CONFIG_KERNEL_VERSION=linux-6.6.y + # CONFIG_KERNEL_AARCH64_V01C01_LINUX_MUSL is not set + # CONFIG_KERNEL_AARCH64_LINUX_MUSL_LLVM1504 is not set + export CONFIG_KERNEL_AARCH64_V01C01_LINUX=y +-export CONFIG_OT_CROSS=aarch64-v01c01-linux-gnu- ++export CONFIG_OT_CROSS=aarch64-openeuler-linux- + export CONFIG_LIBC_TYPE=glibc + export CONFIG_KERNEL_BIT=KERNEL_BIT_64 + # CONFIG_USER_AARCH64_MIX210 is not set + export CONFIG_USER_AARCH64_V01C01_LINUX=y + # CONFIG_USER_AARCH64_V01C01_LINUX_MUSL is not set + # CONFIG_USER_AARCH64_LINUX_MUSL_LLVM1504 is not set +-export CONFIG_OT_CROSS_LIB=aarch64-v01c01-linux-gnu- ++export CONFIG_OT_CROSS_LIB=aarch64-openeuler-linux- + export CONFIG_USER_BIT=USER_BIT_64 + export CONFIG_LINUX_STYLE=y + # CONFIG_BOTH_STYLE is not set +diff --git a/smp/a55_linux/mpp/component/security_subsys/cipher/v3/Makefile b/smp/a55_linux/mpp/component/security_subsys/cipher/v3/Makefile +index 9ce31b0..b7cd6d5 100644 +--- a/smp/a55_linux/mpp/component/security_subsys/cipher/v3/Makefile ++++ b/smp/a55_linux/mpp/component/security_subsys/cipher/v3/Makefile +@@ -4,9 +4,9 @@ + default: + @cp -r src/common/common_check_param.c src/api/ + @cp -r src/common/common_check_param.c src/drv/drivers/ +- @cd src && make mbedtls_patch && make ++# @cd src && make mbedtls_patch && make + + clean: + @rm -rf src/api/common_check_param.c + @rm -rf src/drv/drivers/common_check_param.c +- @cd src && make mbedtls_clean && make clean ++# @cd src && make mbedtls_clean && make clean +diff --git a/smp/a55_linux/mpp/sample/ss928_fb_tool/Makefile b/smp/a55_linux/mpp/sample/ss928_fb_tool/Makefile +new file mode 100644 +index 0000000..b6d5a87 +--- /dev/null ++++ b/smp/a55_linux/mpp/sample/ss928_fb_tool/Makefile +@@ -0,0 +1,15 @@ ++#include $(SCRIPTS_DIR)/mpp/sample/Makefile.param ++include ../Makefile.param ++ ++SMP_SRCS := $(wildcard *.c) ++TARGET := $(SMP_SRCS:%.c=%) ++ ++MPI_LIBS += $(REL_LIB)/libss_tde.a ++ ++#TARGET_PATH := $(OUTPUT_DIR)/ss928_fb_tool ++TARGET_PATH := $(PWD) ++ ++CFLAGS += -DCONFIG_SUPPORT_SAMPLE_ROTATION ++ ++#include $(SCRIPTS_DIR)/mpp/sample/$(ARM_ARCH)_$(OSTYPE).mak ++include $(PWD)/../$(ARM_ARCH)_$(OSTYPE).mak +diff --git a/smp/a55_linux/mpp/sample/ss928_fb_tool/ss928_fb_tool.c b/smp/a55_linux/mpp/sample/ss928_fb_tool/ss928_fb_tool.c +new file mode 100644 +index 0000000..937f774 +--- /dev/null ++++ b/smp/a55_linux/mpp/sample/ss928_fb_tool/ss928_fb_tool.c +@@ -0,0 +1,370 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "securec.h" ++#include "loadbmp.h" ++ ++#include "gfbg.h" ++#include "ot_common_tde.h" ++#include "ss_mpi_tde.h" ++#include "sample_comm.h" ++ ++#define unused __attribute__((unused)) ++ ++#define FILE_LENGTH_MAX 12 ++#define CMAP_LENGTH_MAX 256 ++#define WIDTH_1080P 1920 ++#define HEIGHT_1080P 1080 ++#define WIDTH_800 800 ++#define HEIGHT_600 600 ++ ++#define SAMPLE_IMAGE_WIDTH 300 ++#define SAMPLE_IMAGE_HEIGHT 150 ++#define SAMPLE_IMAGE_NUM 20 ++#define GFBG_RED_1555 0xFC00 ++#define GFBG_RED_8888 0xFFff0000 ++#define WIDTH_1920 1920 ++#define HEIGHT_1080 1080 ++ ++#define GRAPHICS_LAYER_G0 0 ++#define GRAPHICS_LAYER_G1 1 ++#define GRAPHICS_LAYER_G2 2 ++#define GRAPHICS_LAYER_G3 3 ++ ++static unused struct fb_bitfield g_r16 = {10, 5, 0}; ++static unused struct fb_bitfield g_g16 = {5, 5, 0}; ++static unused struct fb_bitfield g_b16 = {0, 5, 0}; ++static unused struct fb_bitfield g_a16 = {15, 1, 0}; ++ ++static unused struct fb_bitfield g_a32 = {24, 8, 0}; ++static unused struct fb_bitfield g_r32 = {16, 8, 0}; ++static unused struct fb_bitfield g_g32 = {8, 8, 0}; ++static unused struct fb_bitfield g_b32 = {0, 8, 0}; ++ ++static unused struct fb_bitfield g_a4 = {0, 0, 0}; ++static unused struct fb_bitfield g_r4 = {0, 4, 0}; ++static unused struct fb_bitfield g_g4 = {0, 4, 0}; ++static unused struct fb_bitfield g_b4 = {0, 4, 0}; ++ ++ot_vo_intf_type g_vo_intf_type = OT_VO_INTF_BT1120; ++osd_color_format g_osd_color_fmt = OSD_COLOR_FORMAT_RGB1555; ++ ++#if (defined(CONFIG_OT_GFBG_SUPPORT) && defined(CONFIG_OT_VO_SUPPORT)) ++#define GFBG_BE_WITH_VO 1 ++#else ++#define GFBG_BE_WITH_VO 0 ++#endif ++ ++typedef struct { ++ ot_vo_dev vo_dev; ++ ot_vo_intf_type vo_intf_type; ++}vo_device_info; ++ ++typedef struct { ++ td_s32 fd; /* fb's file describe */ ++ td_s32 layer; /* which graphic layer */ ++ td_s32 ctrlkey; /* {0,1,2,3}={1buffer, 2buffer, 0buffer pan display, 0buffer refresh} */ ++ td_bool compress; /* image compressed or not */ ++ ot_fb_color_format color_format; /* color format. */ ++} pthread_gfbg_sample_info; ++ ++ ++void sigint_handler(int signum) {} ++static td_void sample_gfbg_to_exit(td_void) ++{ ++ signal(SIGINT, sigint_handler); ++ printf("Waiting for Ctrl+C...\n"); ++ pause(); ++ ++ return; ++} ++ ++static td_s32 sample_gfbg_start_vo(vo_device_info *vo_dev_info) ++{ ++#if GFBG_BE_WITH_VO ++ ot_vo_intf_type gfbg_vo_intf_type = vo_dev_info->vo_intf_type; ++ g_vo_intf_type = vo_dev_info->vo_intf_type; ++ ot_vo_dev vo_dev = vo_dev_info->vo_dev; ++ ot_vo_pub_attr pub_attr; ++ td_u32 vo_frm_rate; ++ ot_size size; ++ td_s32 ret; ++ sample_vo_cfg vo_config = {0}; ++ ++ /* step 1(start vo): start vo device. */ ++ pub_attr.intf_type = gfbg_vo_intf_type; ++ pub_attr.intf_sync = OT_VO_OUT_1080P60; ++ pub_attr.bg_color = COLOR_RGB_WHITE; ++ ret = sample_comm_vo_get_width_height(pub_attr.intf_sync, &size.width, &size.height, &vo_frm_rate); ++ if (ret != TD_SUCCESS) { ++ sample_print("get vo width and height failed with %d!\n", ret); ++ return ret; ++ } ++ ret = sample_comm_vo_start_dev(vo_dev, &pub_attr, &vo_config.user_sync, vo_config.dev_frame_rate); ++ if (ret != TD_SUCCESS) { ++ sample_print("start vo device failed with %d!\n", ret); ++ return ret; ++ } ++ /* ++ * step 2(start vo): bind G3 to VO device. ++ * do this after VO device started. ++ * set bind relationship. ++ */ ++ ret = ss_mpi_vo_unbind_layer(OT_VO_LAYER_G3, vo_dev); ++ if (ret != TD_SUCCESS) { ++ sample_print("un_bind_graphic_layer failed with %d!\n", ret); ++ return ret; ++ } ++ ++ ret = ss_mpi_vo_bind_layer(OT_VO_LAYER_G3, vo_dev); ++ if (ret != TD_SUCCESS) { ++ sample_print("bind_graphic_layer failed with %d!\n", ret); ++ return ret; ++ } ++ /* step 3(start vo): start hdmi device. */ ++ if (gfbg_vo_intf_type == OT_VO_INTF_HDMI) { ++ sample_comm_vo_hdmi_start(pub_attr.intf_sync); ++ } ++ /* if it's displayed on bt1120, we should start bt1120 */ ++ if (gfbg_vo_intf_type == OT_VO_INTF_BT1120) { ++ sample_comm_vo_bt1120_start(vo_dev, &pub_attr); ++ } ++ return TD_SUCCESS; ++#else ++ return TD_SUCCESS; ++#endif ++} ++ ++static td_s32 sample_init_var(pthread_gfbg_sample_info *info) ++{ ++ struct fb_var_screeninfo var; ++ ++ if (ioctl(info->fd, FBIOGET_VSCREENINFO, &var) < 0) { ++ sample_print("get variable screen info failed!\n"); ++ return TD_FAILURE; ++ } ++ ++ switch (info->color_format) { ++ case OT_FB_FORMAT_ARGB8888: ++ var.transp = g_a32; ++ var.red = g_r32; ++ var.green = g_g32; ++ var.blue = g_b32; ++ var.bits_per_pixel = 32; /* 32 for 4 byte */ ++ g_osd_color_fmt = OSD_COLOR_FORMAT_RGB8888; ++ break; ++ default: ++ var.transp = g_a16; ++ var.red = g_r16; ++ var.green = g_g16; ++ var.blue = g_b16; ++ var.bits_per_pixel = 16; /* 16 for 2 byte */ ++ break; ++ } ++ ++ switch (info->ctrlkey) { ++ case 3: /* 3 mouse case */ ++ var.xres_virtual = 48; /* 48 for alg data */ ++ var.yres_virtual = 48; /* 48 for alg data */ ++ var.xres = 48; /* 48 for alg data */ ++ var.yres = 48; /* 48 for alg data */ ++ break; ++ default: ++ var.xres_virtual = WIDTH_1080P; ++ var.yres_virtual = HEIGHT_1080P * 2; /* 2 for 2buf */ ++ var.xres = WIDTH_1080P; ++ var.yres = HEIGHT_1080P; ++ } ++ var.activate = FB_ACTIVATE_NOW; ++ ++ if (ioctl(info->fd, FBIOPUT_VSCREENINFO, &var) < 0) { ++ sample_print("put variable screen info failed!\n"); ++ return TD_FAILURE; ++ } ++ return TD_SUCCESS; ++} ++ ++static td_s32 sample_init_frame_buffer(pthread_gfbg_sample_info *info, const char *input_file) ++{ ++ td_bool show; ++ ot_fb_point point = {0, 0}; ++ td_char file[PATH_MAX + 1] = {0}; ++ ++ if (strlen(input_file) > PATH_MAX || realpath(input_file, file) == TD_NULL) { ++ return TD_FAILURE; ++ } ++ /* step 1. open framebuffer device overlay 0 */ ++ info->fd = open(file, O_RDWR, 0); ++ if (info->fd < 0) { ++ perror("Error opening file"); ++ sample_print("open %s failed!\n", file); ++ return TD_FAILURE; ++ } ++ ++ show = TD_FALSE; ++ if (ioctl(info->fd, FBIOPUT_SHOW_GFBG, &show) < 0) { ++ sample_print("FBIOPUT_SHOW_GFBG failed!\n"); ++ close(info->fd); ++ info->fd = -1; ++ return TD_FAILURE; ++ } ++ ++ printf("filename:%s\n",file); ++ ++ /* step 2. set the screen original position */ ++ switch (info->ctrlkey) { ++ case 3: /* 3 mouse case */ ++ point.x_pos = 150; /* 150 x pos */ ++ point.y_pos = 150; /* 150 y pos */ ++ break; ++ default: ++ point.x_pos = 0; ++ point.y_pos = 0; ++ } ++ ++ if (ioctl(info->fd, FBIOPUT_SCREEN_ORIGIN_GFBG, &point) < 0) { ++ sample_print("set screen original show position failed!\n"); ++ close(info->fd); ++ info->fd = -1; ++ return TD_FAILURE; ++ } ++ ++ return TD_SUCCESS; ++} ++ ++// TODO: 优化函数 ++static const char* sample_get_file_name(pthread_gfbg_sample_info *info) ++{ ++ const char* filename_list[]={ ++ "/dev/fb0", ++ "/dev/fb1", ++ "/dev/fb2", ++ "/dev/fb2", ++ }; ++ if(info->layer>=GRAPHICS_LAYER_G0&&info->layer<=GRAPHICS_LAYER_G3) ++ return filename_list[info->layer]; ++ else ++ return "/dev/fb0"; ++} ++ ++static td_void sample_gfbg_stop_vo(vo_device_info *vo_dev_info) ++{ ++#if GFBG_BE_WITH_VO ++ ot_vo_intf_type gfbg_vo_intf_type = vo_dev_info->vo_intf_type; ++ ot_vo_dev vo_dev = vo_dev_info->vo_dev; ++ ++ if (gfbg_vo_intf_type == OT_VO_INTF_HDMI) { ++ sample_comm_vo_hdmi_stop(); ++ } ++ sample_comm_vo_stop_dev(vo_dev); ++ return; ++#else ++ return; ++#endif ++} ++ ++static int sample_gfbg_pandisplay_init(pthread_gfbg_sample_info *info) ++{ ++ const char* file = sample_get_file_name(info); ++ ++ if (sample_init_frame_buffer(info, file) != TD_SUCCESS) { ++ return -1; ++ } ++ ++ if (sample_init_var(info) != TD_SUCCESS) { ++ close(info->fd); ++ info->fd = -1; ++ return -1; ++ } ++ ++ td_bool show; ++ struct fb_var_screeninfo var; ++ ++ if (ioctl(info->fd, FBIOGET_VSCREENINFO, &var) < 0) { ++ sample_print("get variable screen info failed!\n"); ++ return TD_FAILURE; ++ } ++ ++ show = TD_TRUE; ++ if (ioctl(info->fd, FBIOPUT_SHOW_GFBG, &show) < 0) { ++ sample_print("FBIOPUT_SHOW_GFBG failed!\n"); ++ return TD_FAILURE; ++ } ++ ++ // close(info->fd); ++ return 0; ++} ++ ++static td_s32 sample_gfbg_standard_mode(vo_device_info *vo_dev_info) ++{ ++ td_s32 ret; ++ pthread_gfbg_sample_info info0; ++ ot_vo_dev vo_dev = vo_dev_info->vo_dev; ++ ot_vb_cfg vb_conf; ++ ++ /* step 1: init variable */ ++ if (memset_s(&vb_conf, sizeof(ot_vb_cfg), 0, sizeof(ot_vb_cfg)) != EOK) { ++ sample_print("%s:%d:memset_s failed\n", __FUNCTION__, __LINE__); ++ return TD_FAILURE; ++ } ++ /* step 2: mpp system init. */ ++ ret = sample_comm_sys_init(&vb_conf); ++ if (ret != TD_SUCCESS) { ++ sample_print("system init failed with %d!\n", ret); ++ return ret; ++ } ++ /* ++ * step 3: start VO device. ++ * NOTE: step 3 is optional when VO is running on other system. ++ */ ++ ret = sample_gfbg_start_vo(vo_dev_info); ++ if (ret != TD_SUCCESS) { ++ sample_print("VO device %d start failed\n", vo_dev_info->vo_dev); ++ goto sample_gfbg_standard_mode_0; ++ } ++ /* step 4: start gfbg. */ ++ info0.layer = vo_dev; /* VO device number */ ++ info0.fd = -1; ++ info0.ctrlkey = 2; /* 2 none buffer */ ++ info0.compress = TD_FALSE; /* compress opened or not */ ++ info0.color_format = OT_FB_FORMAT_ARGB8888; ++ sample_gfbg_pandisplay_init(&info0); ++ ++ sample_gfbg_to_exit(); ++ ++ sample_gfbg_stop_vo(vo_dev_info); ++sample_gfbg_standard_mode_0: ++ sample_comm_sys_exit(); ++ return ret; ++} ++ ++int main() ++{ ++ SDK_init(); ++ vo_device_info vo_dev_info; ++ vo_dev_info.vo_dev = SAMPLE_VO_DEV_DHD0; ++ vo_dev_info.vo_intf_type = OT_VO_INTF_HDMI; ++ ++ sample_gfbg_standard_mode(&vo_dev_info); ++ ++ SDK_exit(); ++ return 0; ++} +-- +2.34.1 + diff --git a/bsp/meta-hisilicon/recipes-bsp/ss928/files/hieulerpi1-fb.service b/bsp/meta-hisilicon/recipes-bsp/ss928/files/hieulerpi1-fb.service new file mode 100644 index 0000000000000000000000000000000000000000..5c78eb8a0bd8f8dfa289bb65063db3e48796aa42 --- /dev/null +++ b/bsp/meta-hisilicon/recipes-bsp/ss928/files/hieulerpi1-fb.service @@ -0,0 +1,19 @@ +[Unit] +Description=FB Tool Service with initialization +After=hieulerpi1-bsp.service +StartLimitIntervalSec=500 +StartLimitBurst=5 + +[Service] +Type=simple +ExecStart=/root/sample/ss928_fb_tool +ExecStop=/bin/kill -SIGINT $MAINPID +TimeoutStartSec=30 +TimeoutStopSec=10 +Restart=on-failure +RestartSec=5 +RemainAfterExit=no + +[Install] +WantedBy=multi-user.target + diff --git a/bsp/meta-hisilicon/recipes-bsp/ss928/files/load_sdk_driver b/bsp/meta-hisilicon/recipes-bsp/ss928/files/load_sdk_driver new file mode 100644 index 0000000000000000000000000000000000000000..f7ba58c721724eda0e97dbc17dfc684098fe6b05 --- /dev/null +++ b/bsp/meta-hisilicon/recipes-bsp/ss928/files/load_sdk_driver @@ -0,0 +1,280 @@ +#!/bin/sh +# Usage: ./load_ss928v100 [ -r|-i|-a ] [ -sensor0~3 ] +# -r : rmmod all modules +# -i : insmod all modules +# -a : rmmod all moules and then insmod them +# e.g: ./load_ss928v100 -i -sensor0 os08a20 -sensor1 os08a20 -sensor2 os08a20 -sensor3 os08a20 + +####### select sensor type for your test ######################## +#### os08a20 #### +#### os05a10_slave #### +#### imx347_slave #### +#### imx485 #### +#### os04a10 #### +#### os08b10 #### +####################Variables Definition########################## + +SNS_TYPE0=os08a20; # sensor type +SNS_TYPE1=os08a20; # sensor type +SNS_TYPE2=os08a20; # sensor type +SNS_TYPE3=os08a20; # sensor type + +#DDR start:0x40000000, IPCM(2M); DSP(62M); MCU(192M); kernel start:0x50000000, OS(512M); MMZ start:0x70000000 +mem_total=8192 # 4096M, total mem +mem_start=0x40000000 # phy mem start +ipcm_mem_size=2 # 2M, ipcm mem +dsp_mem_size=62 # 62M, dsp mem +mcu_mem_size=192 # 192M, mcu mem +os_mem_size=512 # 512M, os mem + +mmz_start=0x70000000; # mmz start addr +mmz_size=3328M; # 3328M, mmz size + +reserve_mem_size=0 # 0M, reserve mmz size +################################################################## +ko_path=/ko + +function report_error() +{ + echo "******* Error: There's something wrong, please check! *****" + exit 1 +} + +function insert_ko() +{ + # sys config + insmod $ko_path/sys_config.ko sensors=sns0=$SNS_TYPE0,sns1=$SNS_TYPE1,sns2=$SNS_TYPE2,sns3=$SNS_TYPE3 + # driver load + insmod $ko_path/ot_osal.ko anony=1 mmz_allocator=ot mmz=anonymous,0,$mmz_start,$mmz_size$1 || report_error + insmod $ko_path/ot_irq.ko + insmod $ko_path/ot_user_proc.ko + insmod $ko_path/ot_base.ko + insmod $ko_path/ot_sys.ko + insmod $ko_path/ot_tde.ko + insmod $ko_path/ot_vo.ko + insmod $ko_path/svp_npu/ot_svp_npu.ko + # gfbg: default fb0:argb1555,3840x2160,2buf;fb1:argb8888,1920x1080,2buf;fb2:clut4,3840x2160,1buf + insmod $ko_path/gfbg.ko video="gfbg:vram0_size:32400,vram1_size:16200,vram3_size:4052" + + insmod $ko_path/ot_hdmi.ko + insmod $ko_path/ot_mipi_tx.ko g_smooth=0 + insmod $ko_path/ot_mipi_rx.ko +} + +function remove_ko() +{ + rmmod ot_mipi_rx + rmmod ot_mipi_tx + rmmod ot_hdmi + rmmod gfbg + rmmod svp_npu/ot_svp_npu.ko + rmmod ot_vo + rmmod ot_tde + rmmod ot_sys + rmmod ot_base + rmmod ot_user_proc + rmmod ot_irq + rmmod ot_osal + rmmod sys_config +} + +load_usage() +{ + echo "Usage: $0 [-option] [-sensor0~3] " + echo "options:" + echo " -i insert modules" + echo " -r remove modules" + echo " -a remove modules first, then insert modules" + echo " -sensor0~3 sensor_name config sensor type [default: os08a20]" + echo " -total mem_size config total mem size [unit: M, default: 4096]" + echo " -osmem os_mem_size config os mem size [unit: M, default: 512]" + echo " -h help information" + echo -e "Available sensors: os08a20 os05a10_slave imx347_slave" + echo -e "for example: $0 -i -sensor0 os08a20 -sensor1 os08a20 -sensor2 os08a20 -sensor3 os08a20 -total 4096 -osmem 512\n" +} + +function get_mem_size_from_cmdline() { + input="$1" + default_size="$2" + number=$(echo "$input" | sed 's/\([0-9]\+\).*/\1/') + suffix=$(echo "$input" | sed 's/[^MG]//g') + + if [ -z "$suffix" ]; then + echo "$default_size" + return + fi + + if [ -z "$number" ]; then + echo "$default_size" + return + fi + + if [ "$suffix" = "M" ]; then + echo "$number" + elif [ "$suffix" = "G" ]; then + result=$(($number * 1024)) + echo "$result" + else + echo "Invalid suffix: $suffix" + return 1 + fi +} + +function extract_and_update_mem_sizes() { + # 从 /proc/cmdline 提取 mem 和 total_mem 参数 + cmd_mem_size=$(cat /proc/cmdline | grep -oE 'mem=[0-9]+[MG]' | head -n 1 | sed 's/mem=//') + cmd_total_mem_size=$(cat /proc/cmdline | grep -oE 'total_mem=[0-9]+[MG]' | head -n 1 | sed 's/total_mem=//') + + if [ -z "$cmd_total_mem_size" ]; then + cmd_total_mem_size=$(cat /proc/cmdline | grep -oE 'total_mem_size=[0-9]+[MG]' | head -n 1 | sed 's/total_mem_size=//') + fi + + # 如果 cmd_mem_size 为空,则使用默认值 + if [ -z "$cmd_mem_size" ]; then + os_mem_size=$(get_mem_size_from_cmdline "" "$os_mem_size") + else + os_mem_size=$(get_mem_size_from_cmdline "$cmd_mem_size" "$os_mem_size") + fi + + # 如果 cmd_total_mem_size 为空,则使用默认值 + if [ -z "$cmd_total_mem_size" ]; then + mem_total=$(get_mem_size_from_cmdline "" "$mem_total") + else + mem_total=$(get_mem_size_from_cmdline "$cmd_total_mem_size" "$mem_total") + fi + + echo "os_mem_size: $os_mem_size" + echo "mem_total: $mem_total" +} + +function calc_mmz_info() +{ + mmz_start=`echo "$mem_start $os_mem_size $mcu_mem_size $dsp_mem_size $ipcm_mem_size" | + awk 'BEGIN { temp = 0; } + { + temp = strtonum($1)/1024/1024 + $2 + $3 + $4 + $5; + } + END { printf("0x%x00000\n", temp); }'` + + mmz_size=`echo "$mem_total $os_mem_size $mcu_mem_size $dsp_mem_size $ipcm_mem_size $reserve_mem_size" | + awk 'BEGIN { temp = 0; } + { + temp = $1 - $2 - $3 - $4 - $5 - $6; + } + END { printf("%dM\n", temp); }'` + echo "mmz_start: $mmz_start, mmz_size: $mmz_size" +} + +b_arg_insmod=0 +b_arg_remove=0 +function parse_arg() +{ + ######################parse arg################################### + b_arg_sensor0=0 + b_arg_sensor1=0 + b_arg_sensor2=0 + b_arg_sensor3=0 + b_arg_total_mem=0 + b_arg_os_mem=0 + + for arg in $@ + do + if [ $b_arg_sensor0 -eq 1 ] ; then + b_arg_sensor0=0; + SNS_TYPE0=$arg; + fi + if [ $b_arg_sensor1 -eq 1 ] ; then + b_arg_sensor1=0; + SNS_TYPE1=$arg; + fi + if [ $b_arg_sensor2 -eq 1 ] ; then + b_arg_sensor2=0; + SNS_TYPE2=$arg; + fi + if [ $b_arg_sensor3 -eq 1 ] ; then + b_arg_sensor3=0; + SNS_TYPE3=$arg; + fi + + if [ $b_arg_total_mem -eq 1 ]; then + b_arg_total_mem=0; + mem_total=$arg; + + if [ -z $mem_total ]; then + echo "[error] mem_total is null" + exit; + fi + fi + + if [ $b_arg_os_mem -eq 1 ] ; then + b_arg_os_mem=0; + os_mem_size=$arg; + + if [ -z $os_mem_size ]; then + echo "[error] os_mem_size is null" + exit; + fi + fi + + case $arg in + "-i") + b_arg_insmod=1; + ;; + "-r") + b_arg_remove=1; + ;; + "-a") + b_arg_insmod=1; + b_arg_remove=1; + ;; + "-h") + load_usage; + ;; + "-sensor0") + b_arg_sensor0=1; + ;; + "-sensor") + b_arg_sensor0=1; + ;; + "-sensor1") + b_arg_sensor1=1; + ;; + "-sensor2") + b_arg_sensor2=1; + ;; + "-sensor3") + b_arg_sensor3=1; + ;; + "-osmem") + b_arg_os_mem=1; + ;; + "-total") + b_arg_total_mem=1; + ;; + esac + done + + if [ $os_mem_size -ge $((mem_total - dsp_mem_size - reserve_mem_size)) ] ; then + echo "[err] os_mem[$os_mem_size], over total_mem[$mem_total] - dsp_mem[$dsp_mem_size] - reserve_mem[$reserve_mem_size]" + exit; + fi + #######################parse arg end######################## +} + +if [ $# -lt 1 ]; then + load_usage; + exit 0; +fi + +parse_arg $@ +extract_and_update_mem_sizes; +calc_mmz_info; + +#######################Action############################### +if [ $b_arg_remove -eq 1 ]; then + remove_ko; +fi + +if [ $b_arg_insmod -eq 1 ]; then + insert_ko; +fi diff --git a/bsp/meta-hisilicon/recipes-bsp/ss928/hieulerpi1-bsp-pkg.bb b/bsp/meta-hisilicon/recipes-bsp/ss928/hieulerpi1-bsp-pkg.bb index 818613c4d64bb0a7104aa6472436de66a930f1db..fdfb050adddac36c62259242388be28f4e683d24 100644 --- a/bsp/meta-hisilicon/recipes-bsp/ss928/hieulerpi1-bsp-pkg.bb +++ b/bsp/meta-hisilicon/recipes-bsp/ss928/hieulerpi1-bsp-pkg.bb @@ -4,15 +4,20 @@ LICENSE = "MIT" LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/MIT;md5=0835ade698e0bcf8506ecda2f7b4f302" DEPENDS = "update-rc.d-native" +DEPENDS += " ${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', 'hieulerpi1-sdk-pkg', '', d)} " +do_fetch[depends] += "${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', 'hieulerpi1-sdk-pkg:do_deploy', '', d)}" OPENEULER_LOCAL_NAME = "HiEuler-driver" RT_SUFFIX = "${@bb.utils.contains('DISTRO_FEATURES', 'preempt-rt', '-rt', '', d)}" -KN_SUFFIX = "${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', '-6.6', '', d)}" SRC_URI = " \ - file://HiEuler-driver/drivers${KN_SUFFIX}/ko${RT_SUFFIX}.tar.gz \ - file://HiEuler-driver/drivers${KN_SUFFIX}/ko-extra.tar.gz \ + ${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', ' \ + file://${DEPLOY_DIR}/third_party_sdk/ko.tar.gz \ + ', ' \ + file://HiEuler-driver/drivers/ko${RT_SUFFIX}.tar.gz \ + file://HiEuler-driver/drivers/ko-extra.tar.gz \ + ', d)} \ file://HiEuler-driver/drivers/btools \ file://HiEuler-driver/drivers/S90AutoRun \ file://HiEuler-driver/drivers/pinmux.sh \ @@ -21,7 +26,7 @@ SRC_URI = " \ file://HiEuler-driver/drivers/ws73.tar.gz \ file://HiEuler-driver/drivers/sparklink-tools.tar.gz \ file://HiEuler-driver/mcu \ - ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', ' file://hieulerpi1-bsp.service ', '', d)} \ + ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', ' file://hieulerpi1-bsp.service file://hieulerpi1-fb.service ', '', d)} \ " S = "${WORKDIR}/HiEuler-driver/drivers" @@ -40,29 +45,51 @@ do_install () { ln -s /usr/bin/btools ${D}/usr/bin/bspmm ln -s /usr/bin/btools ${D}/usr/bin/i2c_read ln -s /usr/bin/btools ${D}/usr/bin/i2c_write - install -m 0755 ${WORKDIR}/ko-extra/pre_vo ${D}/usr/bin/ + if [ -e ${WORKDIR}/ko-extra/pre_vo ];then + install -m 0755 ${WORKDIR}/ko-extra/pre_vo ${D}/usr/bin/ + fi cp -r ${WORKDIR}/ko ${D}/ - # cp -f ${WORKDIR}/ko-extra/ch343.ko ${D}/ko + if [ -e ${WORKDIR}/ko-extra/ch343.ko ];then + cp -f ${WORKDIR}/ko-extra/ch343.ko ${D}/ko + fi #for mipi, use load_ss928v100 from ko-extra - cp -f ${WORKDIR}/ko-extra/load_ss928v100 ${D}/ko - # optimize awk format which may not recognized - sed -i 's/\$1\/1024\/1024/strtonum(\$1)\/1024\/1024/' ${D}/ko/load_ss928v100* - chmod 755 ${D}/ko/load_ss928v100 + if [ -e ${WORKDIR}/ko-extra/load_ss928v100 ];then + cp -f ${WORKDIR}/ko-extra/load_ss928v100 ${D}/ko + # optimize awk format which may not recognized + sed -i 's/\$1\/1024\/1024/strtonum(\$1)\/1024\/1024/' ${D}/ko/load_ss928v100* + chmod 755 ${D}/ko/load_ss928v100 + fi # install wifi-1102a firmware - # cp -f ${WORKDIR}/wifi-1102a-tools/plat.ko ${D}/ko - # cp -f ${WORKDIR}/wifi-1102a-tools/wifi.ko ${D}/ko - # install -m 0755 ${WORKDIR}/wifi-1102a-tools/start_wifi ${D}/usr/bin/ - # install -d ${D}/vendor - # cp -rf ${WORKDIR}/wifi-1102a-tools/vendor/* ${D}/vendor + if [ -e ${WORKDIR}/wifi-1102a-tools ];then + cp -f ${WORKDIR}/wifi-1102a-tools/plat.ko ${D}/ko + cp -f ${WORKDIR}/wifi-1102a-tools/wifi.ko ${D}/ko + install -m 0755 ${WORKDIR}/wifi-1102a-tools/start_wifi ${D}/usr/bin/ + install -d ${D}/vendor + cp -rf ${WORKDIR}/wifi-1102a-tools/vendor/* ${D}/vendor + fi install -m 0755 ${S}/S90AutoRun ${D}${sysconfdir}/init.d/ install -m 0755 ${S}/pinmux.sh ${D}${sysconfdir}/init.d/ + + # workaround for 6.6 new version, just load sdk ko, other ko need pack later + if [ -e ${WORKDIR}/ko/load_sdk_driver ];then + echo "#!/bin/sh" > ${D}${sysconfdir}/init.d/S90AutoRun + echo "/ko/load_sdk_driver -i" >> ${D}${sysconfdir}/init.d/S90AutoRun + fi + if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then install -d ${D}${systemd_system_unitdir} install -m 0644 ${WORKDIR}/hieulerpi1-bsp.service ${D}${systemd_system_unitdir} + install -m 0644 ${WORKDIR}/hieulerpi1-fb.service ${D}${systemd_system_unitdir} + if ${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', 'true', 'false', d)}; then + # enable auto start + install -d ${D}${sysconfdir}/systemd/system/multi-user.target.wants/ + ln -sf ${systemd_system_unitdir}/hieulerpi1-bsp.service ${D}${sysconfdir}/systemd/system/multi-user.target.wants/hieulerpi1-bsp.service + ln -sf ${systemd_system_unitdir}/hieulerpi1-fb.service ${D}${sysconfdir}/systemd/system/multi-user.target.wants/hieulerpi1-fb.service + fi else update-rc.d -r ${D} S90AutoRun start 90 5 . update-rc.d -r ${D} pinmux.sh start 90 5 . diff --git a/bsp/meta-hisilicon/recipes-bsp/ss928/hieulerpi1-sdk-pkg.bb b/bsp/meta-hisilicon/recipes-bsp/ss928/hieulerpi1-sdk-pkg.bb new file mode 100644 index 0000000000000000000000000000000000000000..752821024d747aa3679c48844bbb70fa17a85be5 --- /dev/null +++ b/bsp/meta-hisilicon/recipes-bsp/ss928/hieulerpi1-sdk-pkg.bb @@ -0,0 +1,54 @@ +DESCRIPTION = "use yocto to re-compile ko libs and initscripts for hieulerpi1, just for kernel 6.6" +SECTION = "base" +LICENSE = "Apache-2.0" +LIC_FILES_CHKSUM = "file://LICENSE;md5=34d15ab872e1eb3db3292ffb63006766" + +inherit module + +DEPENDS = "update-rc.d-native" + +OPENEULER_LOCAL_NAME = "Hispark-ss928v100-gcc-sdk" + +SRC_URI = " \ + file://Hispark-ss928v100-gcc-sdk \ + file://0001-yocto-928-sdk-build-support.patch \ + file://load_sdk_driver \ +" + +S = "${WORKDIR}/Hispark-ss928v100-gcc-sdk" + +INSANE_SKIP:${PN} += "already-stripped" +FILES:${PN} = "/root/sample" + +do_compile() { + export KERNEL_ROOT=${STAGING_KERNEL_BUILDDIR} + cd ${S}/smp/a55_linux/mpp/out/obj + oe_runmake + cd - + cd ${S}/smp/a55_linux/mpp/sample + oe_runmake + cd - +} + +do_install () { + cd ${S}/smp/a55_linux/mpp/out/ + install -m 0750 ${WORKDIR}/load_sdk_driver ko/ + tar czf ko.tar.gz ko/ + tar czf include.tar.gz include/ + tar caf lib.tar.gz lib/ + cd - + install -d ${D}/root/sample + find ${S}/smp/a55_linux/mpp/sample -type f -executable ! -name "*.so*" ! -name "*.a" ! -name "*.o" ! -name "*.c" ! -name "Makefile" \ + | xargs -I {} install -m 0755 {} ${D}/root/sample +} + +do_deploy[nostamp] = "1" +do_deploy() { + install -d ${DEPLOY_DIR}/third_party_sdk + install -m 0644 ${S}/smp/a55_linux/mpp/out/ko.tar.gz ${DEPLOY_DIR}/third_party_sdk + install -m 0644 ${S}/smp/a55_linux/mpp/out/lib.tar.gz ${DEPLOY_DIR}/third_party_sdk + install -m 0644 ${S}/smp/a55_linux/mpp/out/include.tar.gz ${DEPLOY_DIR}/third_party_sdk +} + +addtask deploy after do_install + diff --git a/bsp/meta-hisilicon/recipes-bsp/ss928/hieulerpi1-user-driver_2.0.2.2.bb b/bsp/meta-hisilicon/recipes-bsp/ss928/hieulerpi1-user-driver_2.0.2.2.bb index 20f477b376b2ee62b2e33b9d1d6e15fb6c244e29..5c6e4298c0974bf73066b7848d40586e74142363 100644 --- a/bsp/meta-hisilicon/recipes-bsp/ss928/hieulerpi1-user-driver_2.0.2.2.bb +++ b/bsp/meta-hisilicon/recipes-bsp/ss928/hieulerpi1-user-driver_2.0.2.2.bb @@ -3,6 +3,9 @@ DESCRIPTION = "user lib and headers repack from SS928V100_SDK" HOMEPAGE = "https://gitee.com/HiEuler/hardware_driver" LICENSE = "CLOSED" +DEPENDS += " ${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', 'hieulerpi1-sdk-pkg', '', d)} " +do_fetch[depends] += "${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', 'hieulerpi1-sdk-pkg:do_deploy', '', d)}" + inherit pkgconfig # This driver library is depended by many ROS packages, @@ -24,11 +27,14 @@ roslike_libdir_set[eventmask] = "bb.event.RecipePreFinalise" OPENEULER_LOCAL_NAME = "HiEuler-driver" -KN_SUFFIX = "${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', '-6.6', '', d)}" - SRC_URI = " \ - file://HiEuler-driver/drivers${KN_SUFFIX}/lib.tar.gz \ - file://HiEuler-driver/drivers${KN_SUFFIX}/include.tar.gz \ + ${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', ' \ + file://${DEPLOY_DIR}/third_party_sdk/lib.tar.gz \ + file://${DEPLOY_DIR}/third_party_sdk/include.tar.gz \ + ', ' \ + file://HiEuler-driver/drivers/lib.tar.gz \ + file://HiEuler-driver/drivers/include.tar.gz \ + ', d)} \ file://hieulerpi1-user-driver.pc.in \ " @@ -80,4 +86,4 @@ FILES:${PN}-staticdev += " \ # set these as private libraries, don't try to search provider for it PRIVATE_LIBS = "libgraph.so libascendcl.so " -INSANE_SKIP:${PN} += "already-stripped dev-so" +INSANE_SKIP:${PN} += "already-stripped dev-so installed-vs-shipped" diff --git a/bsp/meta-hisilicon/recipes-core/device_sample/fb-tool_0.0.0.bb b/bsp/meta-hisilicon/recipes-core/device_sample/fb-tool_0.0.0.bb deleted file mode 100755 index 23d20ea25d9ebabaa2a2d70c3866aca99395dbc4..0000000000000000000000000000000000000000 --- a/bsp/meta-hisilicon/recipes-core/device_sample/fb-tool_0.0.0.bb +++ /dev/null @@ -1,46 +0,0 @@ -SUMMARY = "hieuler device sample for fb_tool" -DESCRIPTION = "device samples of hieulerpi of fb_tool" -HOMEPAGE = "https://gitee.com/HiEuler/externed_device_sample" -LICENSE = "CLOSED" - -inherit pkgconfig - -OPENEULER_LOCAL_NAME = "HiEuler-driver" - -KN_SUFFIX = "${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', '-6.6', '', d)}" - -SRC_URI = " \ - file://HiEuler-driver/drivers${KN_SUFFIX}/ss928_fb_tool \ -" - -RDEPENDS:${PN} = "hieulerpi1-user-driver" - -S = "${WORKDIR}" - -do_compile:prepend () { -} - -TARGET_CC_ARCH += "${LDFLAGS}" -# Makefile does not support the use of the CC environment variable, -# so use make CC="${CC}" -EXTRA_OEMAKE += 'CC="${CC}"' - -# workaround to fix error: -# `undefined reference to `vtable for __cxxabiv1::__class_type_info'` -LDFLAGS:remove = "-Wl,--as-needed" - -do_compile () { - export REL_DIR=${S}/externed_device_sample/mpp/out - # TODO -} - -do_install () { - install -d ${D}/root/ - install -m 755 ${S}/HiEuler-driver/drivers-6.6/ss928_fb_tool ${D}/root/ -} - -FILES:${PN} = " \ - /root/ss928_fb_tool \ -" - -INSANE_SKIP:${PN} += "already-stripped" diff --git a/bsp/meta-hisilicon/recipes-core/images/image-hieulerpi1.inc b/bsp/meta-hisilicon/recipes-core/images/image-hieulerpi1.inc index 0073e20071979e5a26eda8dbcb837e11a586f4ca..78ee70723654085c4c49878f612ecf201b6d86ac 100644 --- a/bsp/meta-hisilicon/recipes-core/images/image-hieulerpi1.inc +++ b/bsp/meta-hisilicon/recipes-core/images/image-hieulerpi1.inc @@ -10,7 +10,7 @@ v4l-utils \ hostapd \ hieulerpi1-user-driver \ bluez5 \ -${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', 'fb-tool', 'device-sample', d)} \ +${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', 'hieulerpi1-sdk-pkg', 'device-sample', d)} \ " # add related packages for ros diff --git a/bsp/meta-hisilicon/recipes-kernel/linux/files/patch/0001-oh_ss928-oee.patch b/bsp/meta-hisilicon/recipes-kernel/linux/files/patch/0001-oh_ss928-oee.patch new file mode 100644 index 0000000000000000000000000000000000000000..123d33183f1ecfb340c0167466d51aa56c5523b2 --- /dev/null +++ b/bsp/meta-hisilicon/recipes-kernel/linux/files/patch/0001-oh_ss928-oee.patch @@ -0,0 +1,136883 @@ +From 72b2e1893d62a53a6e138cd0855fee8093a4400a Mon Sep 17 00:00:00 2001 +From: ss +Date: Fri, 22 Aug 2025 17:28:44 +0800 +Subject: [PATCH] oh_ss928 for oee + +源补丁来源说明获取: +从OpenHarmony5.1_SS928V100_* 的kernel.patch中提取出对应oh_ss928.patch + +本补丁基于oh_ss928.patch,按照如下内容进行额外修改(修正原补丁不规范问题,适配yocto编译): + +=== +1、修改autoconf路径不规范导致yocto编译问题: +涉及文件: +arch/arm64/boot/dts/vendor/Makefile +修改内容: ++HOST_EXTRACFLAGS += -include include/generated/autoconf.h +dtb-$(CONFIG_ARCH_SS928V100) += ss928v100-demb-flash.dtb +dtb-$(CONFIG_ARCH_SS928V100) += ss928v100-demb-emmc.dtb +dtb-$(CONFIG_ARCH_SS927V100) += ss927v100-demb-flash.dtb +dtb-$(CONFIG_ARCH_SS927V100) += ss927v100-demb-emmc.dtb + +=== +2、注释include路径不规范 +涉及文件: +arch/arm64/boot/dts/vendor/ss928v100-demb-emmc.dts +arch/arm64/boot/dts/vendor/ss928v100-demb-flash.dts +注释内容: +//#include "../../../../../include/generated/autoconf.h" + +=== +3、驱动usb的core头文件找不到问题: +如下修改的include没有实际生效(DWC3的config是打开的)(预计因为yocto构建下, +build目录和source目录有隔离,所以目录不对): +``` +drivers/vendor/usb/Makefile ++ccflags-y += -Idrivers/usb/dwc3/ -DUSB_KERNEL_VERSION=\"$(USB_KERNEL_VERSION)\" +``` +解决方案: +修改头文件包含路径 +drivers/vendor/usb/proc.c +#include 修改为: #include "../../usb/dwc3/core.h" +drivers/vendor/usb/wing_usb.c +#include 修改为:#include "../../usb/dwc3/core.h" + +=== +4、修正报错: +drivers/pwm/sysfs.c: In function 'duty_cycle1_store': +drivers/pwm/sysfs.c:140:15: error: implicit declaration of function 'pwm_apply_state'; did you mean 'pwm_apply_args'? [-Werror=implicit-function-declaration] + 140 | ret = pwm_apply_state(pwm, &state); + | ^~~~~~~~~~~~~~~ + | pwm_apply_args + CC mm/show_mem.o +drivers/pwm/pwm-bsp.c:309:22: error: initialization of 'int (*)(struct pwm_chip *, struct pwm_device *, struct pwm_state *)' from incompatible pointer type 'void (*)(struct pwm_chip *, struct pwm_device *, struct pwm_state *)' [-Werror=incompatible-pointer-types] + 309 | .get_state = bsp_pwm_get_state, + | ^~~~~~~~~~~~~~~~~ +drivers/pwm/pwm-bsp.c:309:22: note: (near initialization for 'bsp_pwm_ops.get_state') +drivers/pwm/pwm-bsp.c: In function 'bsp_pwm_remove': +drivers/pwm/pwm-bsp.c:404:24: error: void value not ignored as it ought to be + 404 | ret |= pwmchip_remove(&pwm_chip->chip); + | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +原因分析:oh_ss928.patch如下代码不能删除,补丁中误删了?需将其加回来。 +-static inline int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state) +-{ +- return pwm_apply_might_sleep(pwm, state); +-} + +=== +5 、bsp_pwm_get_state 需要改成int返回类型,修改了对应函数类型 + +=== +6、 pwmchip_remove是void返回,不用接受ret + +=== +7、报错: +drivers/vendor/mmc/sdhci_shaolinsword.c:739:10: error: 'struct sdhci_ops' has no member named 'platform_init' + 739 | .platform_init = sdhci_hl_pltfm_init, + | ^~~~~~~~~~~~~ +drivers/vendor/mmc/sdhci_shaolinsword.c:739:26: error: initialization of 'void (*)(struct sdhci_host *)' from incompatible pointer type 'int (*)(struct sdhci_host *)' [-Werror=incompatible-pointer-types] + 739 | .platform_init = sdhci_hl_pltfm_init, + | ^~~~~~~~~~~~~~~~~~~ + +platform_init在新内核中已移除,原回调在sdhci_pltfm_init函数组最后调用,在sdhci_pltfm_init之后追加主动调用即可。 +修改如下: +- .platform_init = sdhci_hl_pltfm_init, +53191 host = sdhci_pltfm_init(pdev, &hl_host->sdhci_hl_pdata, sizeof(struct sdhci_hl_priv)); +53192 if (IS_ERR(host)) +53193 return PTR_ERR(host); +53194 + ret = sdhci_hl_pltfm_init(host); +53195 + if (ret) +53196 + goto pltfm_free; + +=== +8、报错: +drivers/vendor/mmc/sdhci_shaolinsword.c:739:26: note: (near initialization for 'sdhci_hl_ops.hw_reset') + CC fs/lockd/host.o + AR drivers/media/platform/sunxi/sun4i-csi/built-in.a +drivers/vendor/mmc/sdhci_shaolinsword.c:746:20: error: 'SDHCI_QUIRK2_RESET_AFTER_SET_XFER_MODE' undeclared here (not in a function); did you mean 'SDHCI_QUIRK_RESET_AFTER_REQUEST'? + 746 | .quirks2 = SDHCI_QUIRK2_RESET_AFTER_SET_XFER_MODE | + | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +修改如下: +sdhci_hl_pltfm_init 本身给huanglong 设备用 在 928 dts中没有找到相应节点匹配,先保证编译: +SDHCI_QUIRK2_RESET_AFTER_SET_XFER_MODE 改成 SDHCI_QUIRK2_PRESET_VALUE_BROKEN + +--- + OAT.xml | 632 --- + README.OpenSource | 11 - + README_OpenHarmony.md | 5 - + arch/arm64/Kconfig | 16 +- + arch/arm64/Kconfig.platforms | 17 + + arch/arm64/Makefile | 3 + + arch/arm64/boot/Makefile | 25 +- + arch/arm64/boot/dts/Makefile | 5 + + arch/arm64/boot/dts/vendor/Makefile | 3 + + .../boot/dts/vendor/ss928v100-demb-emmc.dts | 23 + + .../boot/dts/vendor/ss928v100-demb-flash.dts | 23 + + arch/arm64/boot/dts/vendor/ss928v100-demb.dts | 349 ++ + arch/arm64/boot/dts/vendor/ss928v100.dtsi | 1017 +++++ + .../boot/dts/vendor/ss928v100_family_usb.dtsi | 111 + + arch/arm64/configs/ss928v100_defconfig | 239 ++ + arch/arm64/configs/ss928v100_emmc_defconfig | 3386 +++++++++++++++ + arch/arm64/configs/ss928v100_nand_defconfig | 3277 ++++++++++++++ + arch/arm64/mm/fault.c | 2 +- + arch/arm64/mm/flush.c | 1 + + arch/arm64/net/bpf_jit_comp.c | 4 +- + crypto/asymmetric_keys/pkcs7_parser.h | 5 - + drivers/Kconfig | 8 +- + drivers/Makefile | 8 +- + drivers/accesstokenid/Kconfig | 5 - + drivers/accesstokenid/Makefile | 2 - + drivers/accesstokenid/access_tokenid.c | 397 -- + drivers/accesstokenid/access_tokenid.h | 73 - + drivers/android/Kconfig | 21 - + drivers/android/binder.c | 326 +- + drivers/android/binder_internal.h | 13 - + drivers/base/cpu.c | 38 - + drivers/block/zram/Kconfig | 2 - + drivers/block/zram/Makefile | 5 - + drivers/block/zram/zram_drv.c | 173 +- + drivers/block/zram/zram_drv.h | 102 - + drivers/block/zram/zram_group/Kconfig | 24 - + .../block/zram/zram_group/group_writeback.c | 735 ---- + drivers/block/zram/zram_group/zlist.c | 235 - + drivers/block/zram/zram_group/zlist.h | 97 - + drivers/block/zram/zram_group/zram_group.c | 672 --- + drivers/block/zram/zram_group/zram_group.h | 98 - + drivers/clk/Kconfig | 1 + + drivers/clk/Makefile | 1 + + drivers/clk/vendor/Kconfig | 15 + + drivers/clk/vendor/Makefile | 8 + + drivers/clk/vendor/clk.c | 316 ++ + drivers/clk/vendor/clk.h | 147 + + drivers/clk/vendor/clk_ss928v100.c | 646 +++ + drivers/clk/vendor/clkgate-separated.c | 123 + + drivers/clk/vendor/crg.h | 40 + + drivers/clk/vendor/reset.c | 159 + + drivers/clk/vendor/reset.h | 49 + + drivers/dma-buf/Kconfig | 13 - + drivers/dma-buf/Makefile | 2 - + drivers/dma-buf/dma-buf-process-info.c | 165 - + drivers/dma-buf/dma-buf-process-info.h | 83 - + drivers/dma-buf/dma-buf.c | 17 - + drivers/dma/Kconfig | 14 + + drivers/dma/Makefile | 1 + + drivers/dma/edmacv310.c | 1463 +++++++ + drivers/dma/edmacv310.h | 147 + + drivers/edmac/Kconfig | 29 + + drivers/edmac/Makefile | 4 + + drivers/edmac/edma_ss928v100.h | 59 + + drivers/edmac/edmacv310.c | 950 +++++ + drivers/edmac/edmacv310.h | 186 + + drivers/gpio/Makefile | 1 + + drivers/gpio/gpio-pl061.c | 16 +- + drivers/gpio/vendor/Makefile | 1 + + drivers/gpio/vendor/vendor_gpio.c | 92 + + drivers/gpio/vendor/vendor_gpio.h | 27 + + drivers/hck/Kconfig | 21 - + drivers/hck/Makefile | 4 - + drivers/hck/vendor_hooks.c | 17 - + drivers/hyperhold/Kconfig | 14 - + drivers/hyperhold/Makefile | 4 - + drivers/hyperhold/hp_core.c | 854 ---- + drivers/hyperhold/hp_device.c | 240 -- + drivers/hyperhold/hp_device.h | 38 - + drivers/hyperhold/hp_iotab.c | 271 -- + drivers/hyperhold/hp_iotab.h | 63 - + drivers/hyperhold/hp_space.c | 122 - + drivers/hyperhold/hp_space.h | 30 - + drivers/hyperhold/hyperhold.h | 52 - + drivers/i2c/Makefile | 1 + + drivers/i2c/busses/Kconfig | 30 + + drivers/i2c/busses/Makefile | 4 + + drivers/i2c/busses/i2c-bsp.c | 1537 +++++++ + drivers/i2c/i2c-dev.c | 10 + + drivers/i2c/vendor/Makefile | 1 + + drivers/i2c/vendor/vendor_i2c_dev.c | 72 + + drivers/i2c/vendor/vendor_i2c_dev.h | 18 + + .../iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 15 +- + drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 339 +- + drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 24 + + drivers/iommu/iommu-sva.c | 10 +- + drivers/iommu/iommu.c | 174 + + drivers/mfd/Kconfig | 12 +- + drivers/mfd/Makefile | 3 + + drivers/mfd/bsp_fmc.c | 135 + + drivers/mmc/core/core.h | 4 + + drivers/mmc/core/mmc.c | 310 +- + drivers/mmc/host/cqhci-core.c | 48 +- + drivers/mmc/host/cqhci.h | 9 + + drivers/mmc/host/sdhci.c | 97 +- + drivers/mmc/host/sdhci.h | 25 +- + drivers/mtd/Makefile | 6 + + drivers/mtd/nand/Kconfig | 28 + + drivers/mtd/nand/Makefile | 4 + + drivers/mtd/nand/fmc100/Kconfig | 16 + + drivers/mtd/nand/fmc100/Makefile | 26 + + drivers/mtd/nand/fmc100/fmc100.c | 1250 ++++++ + drivers/mtd/nand/fmc100/fmc100.h | 383 ++ + drivers/mtd/nand/fmc100/fmc100_os.c | 264 ++ + drivers/mtd/nand/fmc100/fmc100_spi_general.c | 335 ++ + drivers/mtd/nand/fmc100/fmc_spi_nand_ids.c | 476 +++ + drivers/mtd/nand/fmc100_nand/Kconfig | 46 + + drivers/mtd/nand/fmc100_nand/Makefile | 26 + + drivers/mtd/nand/fmc100_nand/fmc100_nand.c | 1141 +++++ + drivers/mtd/nand/fmc100_nand/fmc100_nand.h | 185 + + drivers/mtd/nand/fmc100_nand/fmc100_nand_os.c | 194 + + drivers/mtd/nand/fmc100_nand/fmc100_nand_os.h | 70 + + .../mtd/nand/fmc100_nand/fmc_nand_spl_ids.c | 191 + + drivers/mtd/nand/raw/Makefile | 5 + + drivers/mtd/nand/raw/internals.h | 4 + + drivers/mtd/nand/raw/match_table.c | 113 + + drivers/mtd/nand/raw/match_table.h | 62 + + drivers/mtd/nand/raw/nand_base.c | 40 + + drivers/mtd/nand/raw/nand_ids.c | 3 + + drivers/mtd/nand/raw/nfc_gen.c | 237 ++ + drivers/mtd/nand/raw/nfc_gen.h | 256 ++ + drivers/mtd/nand/raw/nfc_spl_ids.c | 169 + + drivers/mtd/spi-nor/Makefile | 3 + + drivers/mtd/spi-nor/bsp-generic.c | 25 + + drivers/mtd/spi-nor/controllers/Kconfig | 41 + + drivers/mtd/spi-nor/controllers/Makefile | 1 + + drivers/mtd/spi-nor/controllers/bsp-sfc.c | 793 ++++ + drivers/mtd/spi-nor/core.c | 312 +- + drivers/mtd/spi-nor/core.h | 28 + + drivers/net/ethernet/Kconfig | 1 + + drivers/net/ethernet/Makefile | 1 + + drivers/net/ethernet/vendor/Kconfig | 22 + + drivers/net/ethernet/vendor/Makefile | 6 + + drivers/net/ethernet/vendor/gmac/Kconfig | 104 + + drivers/net/ethernet/vendor/gmac/Makefile | 11 + + .../ethernet/vendor/gmac/autoeee/autoeee.c | 136 + + .../ethernet/vendor/gmac/autoeee/autoeee.h | 49 + + .../vendor/gmac/autoeee/phy_id_table.c | 177 + + drivers/net/ethernet/vendor/gmac/gmac.c | 2630 ++++++++++++ + drivers/net/ethernet/vendor/gmac/gmac.h | 982 +++++ + .../ethernet/vendor/gmac/gmac_ethtool_ops.c | 621 +++ + .../ethernet/vendor/gmac/gmac_ethtool_ops.h | 12 + + .../ethernet/vendor/gmac/gmac_external_phy.c | 563 +++ + drivers/net/ethernet/vendor/gmac/gmac_mdio.c | 72 + + drivers/net/ethernet/vendor/gmac/gmac_mdio.h | 114 + + .../ethernet/vendor/gmac/gmac_netdev_ops.c | 785 ++++ + .../ethernet/vendor/gmac/gmac_netdev_ops.h | 14 + + .../net/ethernet/vendor/gmac/gmac_phy_fixup.c | 396 ++ + .../net/ethernet/vendor/gmac/gmac_phy_fixup.h | 31 + + drivers/net/ethernet/vendor/gmac/gmac_pm.c | 410 ++ + drivers/net/ethernet/vendor/gmac/gmac_pm.h | 56 + + drivers/net/ethernet/vendor/gmac/gmac_proc.c | 125 + + drivers/net/ethernet/vendor/gmac/gmac_proc.h | 22 + + drivers/net/ethernet/vendor/gmac/version.mak | 1 + + drivers/net/phy/Kconfig | 7 + + drivers/net/phy/Makefile | 1 + + drivers/net/phy/mdio_bsp_gemac.c | 222 + + drivers/net/phy/mdio_bsp_gemac.h | 27 + + drivers/pci/Kconfig | 1 + + drivers/pci/Makefile | 1 + + drivers/pci/bsp_pcie/Kconfig | 27 + + drivers/pci/bsp_pcie/Makefile | 8 + + drivers/pci/bsp_pcie/pci.h | 85 + + drivers/pci/bsp_pcie/pcie.c | 606 +++ + drivers/pci/bsp_pcie/pcie_ss928v100.c | 383 ++ + drivers/pci/bsp_pcie/pcie_ss928v100.h | 74 + + drivers/pci/of.c | 10 + + drivers/pwm/Kconfig | 9 + + drivers/pwm/Makefile | 1 + + drivers/pwm/pwm-bsp.c | 426 ++ + drivers/pwm/sysfs.c | 117 +- + drivers/spi/Makefile | 1 + + drivers/spi/spi-pl022.c | 45 + + drivers/spi/vendor/Makefile | 1 + + drivers/spi/vendor/vendor_spi.c | 216 + + drivers/spi/vendor/vendor_spi.h | 85 + + drivers/staging/Kconfig | 9 - + drivers/staging/Makefile | 5 - + drivers/staging/blackbox/Kconfig | 108 - + drivers/staging/blackbox/Makefile | 5 - + drivers/staging/blackbox/blackbox_common.c | 255 -- + drivers/staging/blackbox/blackbox_core.c | 592 --- + drivers/staging/blackbox/blackbox_storage.c | 194 - + drivers/staging/hievent/Kconfig | 12 - + drivers/staging/hievent/Makefile | 2 - + drivers/staging/hievent/hievent_driver.c | 423 -- + drivers/staging/hievent/hievent_driver.h | 22 - + drivers/staging/hievent/hiview_hievent.c | 488 --- + drivers/staging/hievent/hiview_hievent.h | 34 - + drivers/staging/hilog/Kconfig | 22 - + drivers/staging/hilog/Makefile | 5 - + drivers/staging/hilog/hilog.c | 408 -- + drivers/staging/hisysevent/Kconfig | 6 - + drivers/staging/hisysevent/Makefile | 6 - + .../staging/hisysevent/hisysevent_builder.c | 363 -- + .../staging/hisysevent/hisysevent_builder.h | 87 - + .../staging/hisysevent/hisysevent_raw_data.c | 117 - + .../staging/hisysevent/hisysevent_raw_data.h | 33 - + .../hisysevent/hisysevent_raw_data_encoder.c | 123 - + .../hisysevent/hisysevent_raw_data_encoder.h | 21 - + .../staging/hisysevent/hiview_hisysevent.c | 145 - + drivers/staging/hungtask/Kconfig | 14 - + drivers/staging/hungtask/Makefile | 3 - + drivers/staging/hungtask/hungtask_base.c | 1031 ----- + drivers/staging/hungtask/hungtask_user.c | 260 -- + drivers/staging/hungtask/hungtask_user.h | 37 - + drivers/staging/zerohung/Kconfig | 7 - + drivers/staging/zerohung/Makefile | 2 - + drivers/staging/zerohung/watchpoint/Makefile | 2 - + .../zerohung/watchpoint/hung_wp_screen.c | 299 -- + drivers/staging/zerohung/zrhung_event.c | 61 - + drivers/tee/optee/supp.c | 35 +- + drivers/thermal/thermal_core.c | 1 - + drivers/thermal/thermal_core.h | 3 - + drivers/thermal/thermal_netlink.c | 9 +- + drivers/tty/serial/amba-pl011.c | 23 + + drivers/usb/gadget/Kconfig | 7 - + drivers/usb/gadget/epautoconf.c | 14 + + drivers/usb/gadget/function/Makefile | 5 - + drivers/usb/gadget/function/f_generic.c | 3789 ----------------- + drivers/usb/gadget/function/f_mass_storage.c | 17 + + drivers/usb/gadget/function/u_generic.h | 356 -- + drivers/usb/gadget/function/uvc_v4l2.c | 30 +- + drivers/usb/serial/usb_wwan.c | 4 + + drivers/vendor/Kconfig | 10 + + drivers/vendor/Makefile | 6 + + drivers/vendor/basedrv_clk/Kconfig | 3 + + drivers/vendor/basedrv_clk/Makefile | 7 + + drivers/vendor/basedrv_clk/basedrv-clock.h | 20 + + drivers/vendor/basedrv_clk/basedrv_clk.c | 341 ++ + drivers/vendor/basedrv_clk/basedrv_clk.h | 106 + + drivers/vendor/basedrv_clk/ss928v100/Makefile | 6 + + .../basedrv_clk/ss928v100/clk_ss928v100.c | 98 + + .../basedrv_clk/ss928v100/clk_ss928v100.h | 20 + + .../vendor/basedrv_clk/ss928v100/clk_ups.c | 532 +++ + drivers/vendor/cma/Kconfig | 16 + + drivers/vendor/cma/Makefile | 2 + + drivers/vendor/cma/cma.c | 176 + + drivers/vendor/mmc/CMakeLists.txt | 26 + + drivers/vendor/mmc/ChangLog | 7 + + drivers/vendor/mmc/Kconfig | 64 + + drivers/vendor/mmc/Makefile | 53 + + drivers/vendor/mmc/adapter/nebula_adapter.c | 1256 ++++++ + drivers/vendor/mmc/adapter/nebula_fmea.c | 116 + + drivers/vendor/mmc/adapter/nebula_quick.c | 254 ++ + drivers/vendor/mmc/adapter/nebula_quick.h | 87 + + drivers/vendor/mmc/adapter/nebula_quirk_ids.h | 57 + + drivers/vendor/mmc/dfx/mci_proc.c | 309 ++ + drivers/vendor/mmc/dfx/mci_proc.h | 19 + + drivers/vendor/mmc/dfx/nebula_dfx.c | 531 +++ + drivers/vendor/mmc/dfx/nebula_dfx.h | 38 + + drivers/vendor/mmc/driver_obj.mk | 3 + + drivers/vendor/mmc/dtsi_usage.txt | 58 + + drivers/vendor/mmc/nebula_fmea.h | 28 + + drivers/vendor/mmc/nebula_intf.c | 100 + + drivers/vendor/mmc/nebula_intf.h | 17 + + drivers/vendor/mmc/platform/platform_comm.c | 609 +++ + drivers/vendor/mmc/platform/platform_priv.h | 224 + + drivers/vendor/mmc/platform/platform_timing.h | 74 + + .../vendor/mmc/platform/sdhci_hi3519dv500.c | 354 ++ + .../vendor/mmc/platform/sdhci_hi3751v811_c.c | 117 + + drivers/vendor/mmc/platform/sdhci_hiwing.c | 188 + + .../vendor/mmc/platform/sdhci_shaolinaxe.c | 147 + + .../vendor/mmc/platform/sdhci_shaolinfist.c | 123 + + .../vendor/mmc/platform/sdhci_shaolinspear.c | 232 + + .../mmc/platform/sdhci_shaolinsword_c.c | 118 + + drivers/vendor/mmc/platform/sdhci_ss928v100.c | 346 ++ + drivers/vendor/mmc/platform/sdhci_tianhe.c | 371 ++ + drivers/vendor/mmc/platform/sdhci_tianhe.h | 101 + + .../vendor/mmc/platform/sdhci_wudangstick.c | 511 +++ + drivers/vendor/mmc/sdhci_nebula.c | 198 + + drivers/vendor/mmc/sdhci_nebula.h | 381 ++ + drivers/vendor/mmc/sdhci_shaolinsword.c | 826 ++++ + drivers/vendor/mmc/version.mak | 1 + + drivers/vendor/npu/Kconfig | 7 + + drivers/vendor/npu/Makefile | 6 + + drivers/vendor/npu/npu_misc.c | 770 ++++ + drivers/vendor/npu/npu_svm.c | 1413 ++++++ + drivers/vendor/npu/smmu_power_on.c | 91 + + drivers/vendor/usb/Kconfig | 6 + + drivers/vendor/usb/Makefile | 12 + + drivers/vendor/usb/defconfig | 5 + + drivers/vendor/usb/driver_config.mk | 5 + + drivers/vendor/usb/driver_obj.mk | 2 + + drivers/vendor/usb/proc.c | 152 + + drivers/vendor/usb/proc.h | 18 + + drivers/vendor/usb/version.mak | 1 + + drivers/vendor/usb/wing_usb.c | 1013 +++++ + drivers/vendor/usb/wing_usb.h | 144 + + drivers/vendor/usb_phy/Kconfig | 27 + + drivers/vendor/usb_phy/Makefile | 22 + + drivers/vendor/usb_phy/common.c | 93 + + drivers/vendor/usb_phy/driver_config.mk | 7 + + drivers/vendor/usb_phy/driver_obj.mk | 2 + + drivers/vendor/usb_phy/missile.c | 235 + + drivers/vendor/usb_phy/nano.c | 957 +++++ + drivers/vendor/usb_phy/phy.c | 291 ++ + drivers/vendor/usb_phy/phy.h | 134 + + drivers/vendor/usb_phy/platform/ss626v100.c | 201 + + drivers/vendor/usb_phy/proc.c | 138 + + drivers/vendor/usb_phy/proc.h | 17 + + drivers/vendor/usb_phy/reg_common.h | 257 ++ + drivers/vendor/usb_phy/reg_default.h | 17 + + drivers/vendor/usb_phy/reg_hiwingv500.h | 17 + + drivers/vendor/usb_phy/xvp.c | 137 + + fs/Kconfig | 6 - + fs/Makefile | 6 +- + fs/epfs/Kconfig | 12 - + fs/epfs/Makefile | 3 - + fs/epfs/dentry.c | 23 - + fs/epfs/dir.c | 18 - + fs/epfs/epfs.h | 43 - + fs/epfs/file.c | 299 -- + fs/epfs/inode.c | 126 - + fs/epfs/internal.h | 38 - + fs/epfs/main.c | 44 - + fs/epfs/super.c | 127 - + fs/exec.c | 2 - + fs/ext4/extents.c | 9 +- + fs/ext4/super.c | 7 - + fs/f2fs/f2fs.h | 4 +- + fs/f2fs/file.c | 24 +- + fs/hmdfs/Kconfig | 40 - + fs/hmdfs/Makefile | 17 - + fs/hmdfs/authority/authentication.c | 462 -- + fs/hmdfs/authority/authentication.h | 352 -- + fs/hmdfs/authority/config.c | 377 -- + fs/hmdfs/client_writeback.c | 543 --- + fs/hmdfs/client_writeback.h | 136 - + fs/hmdfs/comm/connection.c | 1279 ------ + fs/hmdfs/comm/connection.h | 358 -- + fs/hmdfs/comm/crypto.c | 262 -- + fs/hmdfs/comm/crypto.h | 36 - + fs/hmdfs/comm/device_node.c | 1647 ------- + fs/hmdfs/comm/device_node.h | 108 - + fs/hmdfs/comm/message_verify.c | 980 ----- + fs/hmdfs/comm/message_verify.h | 27 - + fs/hmdfs/comm/node_cb.c | 73 - + fs/hmdfs/comm/node_cb.h | 43 - + fs/hmdfs/comm/protocol.h | 454 -- + fs/hmdfs/comm/socket_adapter.c | 1121 ----- + fs/hmdfs/comm/socket_adapter.h | 179 - + fs/hmdfs/comm/transport.c | 1253 ------ + fs/hmdfs/comm/transport.h | 76 - + fs/hmdfs/dentry.c | 357 -- + fs/hmdfs/file_cloud.c | 425 -- + fs/hmdfs/file_local.c | 405 -- + fs/hmdfs/file_merge.c | 841 ---- + fs/hmdfs/file_remote.c | 1063 ----- + fs/hmdfs/file_remote.h | 30 - + fs/hmdfs/file_root.c | 174 - + fs/hmdfs/hmdfs.h | 370 -- + fs/hmdfs/hmdfs_client.c | 1123 ----- + fs/hmdfs/hmdfs_client.h | 121 - + fs/hmdfs/hmdfs_dentryfile.c | 2890 ------------- + fs/hmdfs/hmdfs_dentryfile.h | 349 -- + fs/hmdfs/hmdfs_dentryfile_cloud.c | 171 - + fs/hmdfs/hmdfs_dentryfile_cloud.h | 63 - + fs/hmdfs/hmdfs_device_view.h | 263 -- + fs/hmdfs/hmdfs_merge_view.h | 241 -- + fs/hmdfs/hmdfs_server.c | 2125 --------- + fs/hmdfs/hmdfs_server.h | 79 - + fs/hmdfs/hmdfs_share.c | 349 -- + fs/hmdfs/hmdfs_share.h | 63 - + fs/hmdfs/hmdfs_trace.h | 954 ----- + fs/hmdfs/inode.c | 357 -- + fs/hmdfs/inode.h | 264 -- + fs/hmdfs/inode_cloud.c | 446 -- + fs/hmdfs/inode_cloud_merge.c | 724 ---- + fs/hmdfs/inode_local.c | 1074 ----- + fs/hmdfs/inode_merge.c | 1414 ------ + fs/hmdfs/inode_remote.c | 996 ----- + fs/hmdfs/inode_root.c | 376 -- + fs/hmdfs/main.c | 1134 ----- + fs/hmdfs/server_writeback.c | 135 - + fs/hmdfs/server_writeback.h | 40 - + fs/hmdfs/stash.c | 2226 ---------- + fs/hmdfs/stash.h | 25 - + fs/hmdfs/super.c | 187 - + fs/proc/Makefile | 1 - + fs/proc/base.c | 208 - + fs/proc/meminfo.c | 31 +- + fs/proc/task_mmu.c | 14 - + fs/pstore/Kconfig | 63 - + fs/pstore/internal.h | 3 - + fs/pstore/platform.c | 109 - + fs/pstore/ram.c | 47 +- + fs/sharefs/Kconfig | 24 - + fs/sharefs/Makefile | 12 - + fs/sharefs/authentication.c | 98 - + fs/sharefs/authentication.h | 79 - + fs/sharefs/config.c | 372 -- + fs/sharefs/dentry.c | 41 - + fs/sharefs/file.c | 269 -- + fs/sharefs/inode.c | 376 -- + fs/sharefs/lookup.c | 338 -- + fs/sharefs/main.c | 193 - + fs/sharefs/sharefs.h | 245 -- + fs/sharefs/super.c | 214 - + fs/verity/enable.c | 238 +- + fs/verity/fsverity_private.h | 16 +- + fs/verity/hash_algs.c | 2 - + fs/verity/open.c | 24 +- + fs/verity/signature.c | 47 +- + fs/verity/verify.c | 22 - + include/dfx/hiview_hisysevent.h | 67 - + include/dfx/hung_wp_screen.h | 24 - + include/dfx/hungtask_base.h | 111 - + include/dfx/zrhung.h | 12 - + include/dt-bindings/clock/basedrv-clock.h | 20 + + include/dt-bindings/clock/ss928v100_clock.h | 100 + + include/linux/blackbox.h | 84 - + include/linux/blackbox_common.h | 44 - + include/linux/blackbox_storage.h | 22 - + include/linux/bpf.h | 17 +- + include/linux/bsp_cma.h | 41 + + include/linux/code_sign.h | 92 - + include/linux/cpuhotplug.h | 3 - + include/linux/cpumask.h | 46 - + include/linux/edmac.h | 80 + + include/linux/fsverity.h | 37 - + include/linux/gfp.h | 14 +- + include/linux/gfp_types.h | 6 +- + include/linux/hck/lite_hck_ced.h | 50 - + include/linux/hck/lite_hck_code_sign.h | 39 - + include/linux/hck/lite_hck_hideaddr.h | 25 - + include/linux/hck/lite_hck_inet.h | 26 - + include/linux/hck/lite_hck_jit_memory.h | 41 - + include/linux/hck/lite_hck_sample.h | 36 - + include/linux/hck/lite_hck_xpm.h | 55 - + include/linux/hck/lite_vendor_hooks.h | 126 - + include/linux/highmem.h | 2 +- + include/linux/i2c.h | 4 + + include/linux/iommu.h | 4 + + include/linux/lsm_hook_defs.h | 3 - + include/linux/memcontrol.h | 49 - + include/linux/mfd/bsp_fmc.h | 479 +++ + include/linux/mm.h | 27 - + include/linux/mm_inline.h | 4 - + include/linux/mm_types.h | 13 - + include/linux/mman.h | 3 +- + include/linux/mmc/host.h | 5 + + include/linux/mmzone.h | 52 +- + include/linux/mtd/mtd.h | 42 +- + include/linux/mtd/spi-nor.h | 80 +- + include/linux/netdevice.h | 4 +- + include/linux/nmi.h | 21 - + include/linux/oid_registry.h | 3 - + include/linux/page-flags.h | 28 - + include/linux/pstore.h | 8 - + include/linux/pstore_ram.h | 1 - + include/linux/pwm.h | 8 +- + include/linux/sched.h | 124 - + include/linux/sched/core_ctl.h | 14 - + include/linux/sched/cpufreq.h | 3 - + include/linux/sched/frame_rtg.h | 75 - + include/linux/sched/isolation.h | 19 - + include/linux/sched/prio.h | 18 +- + include/linux/sched/rtg.h | 65 - + include/linux/sched/rtg_ctrl.h | 99 - + include/linux/sched/stat.h | 8 - + include/linux/sched/sysctl.h | 20 - + include/linux/securec.h | 629 +++ + include/linux/securectype.h | 585 +++ + include/linux/security.h | 9 - + include/linux/stop_machine.h | 11 - + include/linux/swap.h | 20 - + include/linux/timer.h | 7 - + include/linux/uidgid.h | 8 - + include/linux/vendor/sva_ext.h | 88 + + include/linux/vendor/vendor_i2c.h | 27 + + include/linux/vm_event_item.h | 18 - + include/trace/events/eas_sched.h | 76 - + include/trace/events/mmflags.h | 17 +- + include/trace/events/rtg.h | 146 - + include/trace/events/sched.h | 164 - + include/trace/events/vmscan.h | 30 - + include/trace/events/walt.h | 256 -- + include/uapi/asm-generic/mman-common.h | 4 - + include/uapi/linux/android/binder.h | 19 - + include/uapi/linux/fsverity.h | 22 - + include/uapi/linux/sched.h | 4 +- + include/uapi/linux/sched/types.h | 19 - + init/Kconfig | 63 - + init/init_task.c | 3 - + kernel/bpf/arraymap.c | 26 +- + kernel/bpf/core.c | 1 - + kernel/bpf/syscall.c | 7 +- + kernel/bpf/trampoline.c | 47 +- + kernel/bpf/verifier.c | 93 +- + kernel/cgroup/cgroup-v1.c | 5 - + kernel/cpu.c | 17 - + kernel/cred.c | 2 - + kernel/dma/contiguous.c | 19 + + kernel/exit.c | 15 - + kernel/fork.c | 18 +- + kernel/hung_task.c | 41 +- + kernel/irq/cpuhotplug.c | 63 +- + kernel/irq/proc.c | 6 - + kernel/nsproxy.c | 6 - + kernel/sched/Makefile | 4 - + kernel/sched/core.c | 828 +--- + kernel/sched/core_ctl.c | 1061 ----- + kernel/sched/core_ctl.h | 19 - + kernel/sched/cpufreq_schedutil.c | 48 - + kernel/sched/cpupri.c | 3 - + kernel/sched/cputime.c | 15 - + kernel/sched/deadline.c | 6 - + kernel/sched/debug.c | 24 - + kernel/sched/fair.c | 725 +--- + kernel/sched/features.h | 7 - + kernel/sched/rt.c | 294 +- + kernel/sched/rtg/Kconfig | 40 - + kernel/sched/rtg/Makefile | 3 - + kernel/sched/rtg/frame_rtg.c | 1229 ------ + kernel/sched/rtg/frame_rtg.h | 116 - + kernel/sched/rtg/rtg.c | 1258 ------ + kernel/sched/rtg/rtg.h | 64 - + kernel/sched/rtg/rtg_ctrl.c | 934 ---- + kernel/sched/rtg/rtg_ctrl.h | 90 - + kernel/sched/sched.h | 476 +-- + kernel/sched/sched_avg.c | 186 - + kernel/sched/stop_task.c | 6 - + kernel/sched/topology.c | 26 +- + kernel/sched/walt.c | 1862 -------- + kernel/sched/walt.h | 255 -- + kernel/smp.c | 4 +- + kernel/stop_machine.c | 4 - + kernel/sysctl.c | 52 - + kernel/time/timer.c | 77 - + kernel/watchdog.c | 37 +- + lib/Kconfig.debug | 7 - + lib/Makefile | 2 +- + lib/securec/LICENSE | 124 + + lib/securec/Makefile | 1 + + lib/securec/README.en.md | 59 + + lib/securec/README.md | 56 + + lib/securec/src/Makefile | 17 + + lib/securec/src/input.inl | 2229 ++++++++++ + lib/securec/src/memcpy_s.c | 555 +++ + lib/securec/src/memmove_s.c | 123 + + lib/securec/src/memset_s.c | 510 +++ + lib/securec/src/output.inl | 1720 ++++++++ + lib/securec/src/scanf_s.c | 51 + + lib/securec/src/secinput.h | 181 + + lib/securec/src/securecutil.c | 81 + + lib/securec/src/securecutil.h | 574 +++ + lib/securec/src/secureinput_a.c | 38 + + lib/securec/src/secureprintoutput.h | 146 + + lib/securec/src/secureprintoutput_a.c | 112 + + lib/securec/src/snprintf_s.c | 110 + + lib/securec/src/sprintf_s.c | 58 + + lib/securec/src/sscanf_s.c | 58 + + lib/securec/src/strcat_s.c | 101 + + lib/securec/src/strcpy_s.c | 353 ++ + lib/securec/src/strncat_s.c | 119 + + lib/securec/src/strncpy_s.c | 145 + + lib/securec/src/strtok_s.c | 116 + + lib/securec/src/vscanf_s.c | 63 + + lib/securec/src/vsnprintf_s.c | 138 + + lib/securec/src/vsprintf_s.c | 67 + + lib/securec/src/vsscanf_s.c | 87 + + mm/Kconfig | 68 - + mm/Makefile | 6 - + mm/compaction.c | 2 +- + mm/internal.h | 141 - + mm/memcg_control.c | 488 --- + mm/memcg_reclaim.c | 536 --- + mm/memcontrol.c | 80 +- + mm/memory.c | 40 +- + mm/memory_hotplug.c | 7 - + mm/memory_monitor.c | 58 - + mm/mm_init.c | 6 - + mm/mmap.c | 25 - + mm/mprotect.c | 9 - + mm/page_alloc.c | 43 +- + mm/purgeable.c | 348 -- + mm/purgeable_ashmem_trigger.c | 134 - + mm/rmap.c | 28 - + mm/slub.c | 138 +- + mm/swap.c | 7 - + mm/swapfile.c | 23 - + mm/vmscan.c | 325 +- + mm/vmstat.c | 31 +- + mm/workingset.c | 66 - + mm/zswapd.c | 911 ---- + mm/zswapd_control.c | 860 ---- + mm/zswapd_internal.h | 41 - + net/core/net-sysfs.c | 6 +- + net/l2tp/l2tp_core.c | 15 +- + net/l2tp/l2tp_core.h | 3 +- + net/l2tp/l2tp_netlink.c | 4 +- + net/l2tp/l2tp_ppp.c | 3 +- + samples/Kconfig | 23 - + samples/Makefile | 1 - + samples/hck/Makefile | 6 - + samples/hck/call.c | 24 - + samples/hck/register.c | 48 - + samples/hck/register_one.c | 31 - + security/Kconfig | 2 - + security/Makefile | 3 - + security/security.c | 15 - + security/selinux/hooks.c | 2 - + security/selinux/include/classmap.h | 10 - + .../selftests/bpf/prog_tests/tailcalls.c | 156 - + .../selftests/bpf/progs/tailcall_freplace.c | 23 - + .../testing/selftests/bpf/progs/tc_bpf2bpf.c | 23 - + 617 files changed, 58428 insertions(+), 67769 deletions(-) + delete mode 100644 OAT.xml + delete mode 100644 README.OpenSource + delete mode 100644 README_OpenHarmony.md + create mode 100644 arch/arm64/boot/dts/vendor/Makefile + create mode 100644 arch/arm64/boot/dts/vendor/ss928v100-demb-emmc.dts + create mode 100644 arch/arm64/boot/dts/vendor/ss928v100-demb-flash.dts + create mode 100644 arch/arm64/boot/dts/vendor/ss928v100-demb.dts + create mode 100644 arch/arm64/boot/dts/vendor/ss928v100.dtsi + create mode 100644 arch/arm64/boot/dts/vendor/ss928v100_family_usb.dtsi + create mode 100644 arch/arm64/configs/ss928v100_defconfig + create mode 100644 arch/arm64/configs/ss928v100_emmc_defconfig + create mode 100644 arch/arm64/configs/ss928v100_nand_defconfig + delete mode 100644 drivers/accesstokenid/Kconfig + delete mode 100644 drivers/accesstokenid/Makefile + delete mode 100644 drivers/accesstokenid/access_tokenid.c + delete mode 100644 drivers/accesstokenid/access_tokenid.h + delete mode 100644 drivers/block/zram/zram_group/Kconfig + delete mode 100644 drivers/block/zram/zram_group/group_writeback.c + delete mode 100644 drivers/block/zram/zram_group/zlist.c + delete mode 100644 drivers/block/zram/zram_group/zlist.h + delete mode 100644 drivers/block/zram/zram_group/zram_group.c + delete mode 100644 drivers/block/zram/zram_group/zram_group.h + create mode 100644 drivers/clk/vendor/Kconfig + create mode 100644 drivers/clk/vendor/Makefile + create mode 100644 drivers/clk/vendor/clk.c + create mode 100644 drivers/clk/vendor/clk.h + create mode 100644 drivers/clk/vendor/clk_ss928v100.c + create mode 100644 drivers/clk/vendor/clkgate-separated.c + create mode 100644 drivers/clk/vendor/crg.h + create mode 100644 drivers/clk/vendor/reset.c + create mode 100644 drivers/clk/vendor/reset.h + delete mode 100755 drivers/dma-buf/dma-buf-process-info.c + delete mode 100755 drivers/dma-buf/dma-buf-process-info.h + create mode 100644 drivers/dma/edmacv310.c + create mode 100644 drivers/dma/edmacv310.h + create mode 100644 drivers/edmac/Kconfig + create mode 100644 drivers/edmac/Makefile + create mode 100644 drivers/edmac/edma_ss928v100.h + create mode 100644 drivers/edmac/edmacv310.c + create mode 100644 drivers/edmac/edmacv310.h + create mode 100644 drivers/gpio/vendor/Makefile + create mode 100644 drivers/gpio/vendor/vendor_gpio.c + create mode 100644 drivers/gpio/vendor/vendor_gpio.h + delete mode 100644 drivers/hck/Kconfig + delete mode 100644 drivers/hck/Makefile + delete mode 100644 drivers/hck/vendor_hooks.c + delete mode 100644 drivers/hyperhold/Kconfig + delete mode 100644 drivers/hyperhold/Makefile + delete mode 100644 drivers/hyperhold/hp_core.c + delete mode 100644 drivers/hyperhold/hp_device.c + delete mode 100644 drivers/hyperhold/hp_device.h + delete mode 100644 drivers/hyperhold/hp_iotab.c + delete mode 100644 drivers/hyperhold/hp_iotab.h + delete mode 100644 drivers/hyperhold/hp_space.c + delete mode 100644 drivers/hyperhold/hp_space.h + delete mode 100644 drivers/hyperhold/hyperhold.h + create mode 100644 drivers/i2c/busses/i2c-bsp.c + create mode 100644 drivers/i2c/vendor/Makefile + create mode 100644 drivers/i2c/vendor/vendor_i2c_dev.c + create mode 100644 drivers/i2c/vendor/vendor_i2c_dev.h + create mode 100644 drivers/mfd/bsp_fmc.c + create mode 100644 drivers/mtd/nand/fmc100/Kconfig + create mode 100644 drivers/mtd/nand/fmc100/Makefile + create mode 100644 drivers/mtd/nand/fmc100/fmc100.c + create mode 100644 drivers/mtd/nand/fmc100/fmc100.h + create mode 100644 drivers/mtd/nand/fmc100/fmc100_os.c + create mode 100644 drivers/mtd/nand/fmc100/fmc100_spi_general.c + create mode 100644 drivers/mtd/nand/fmc100/fmc_spi_nand_ids.c + create mode 100644 drivers/mtd/nand/fmc100_nand/Kconfig + create mode 100644 drivers/mtd/nand/fmc100_nand/Makefile + create mode 100644 drivers/mtd/nand/fmc100_nand/fmc100_nand.c + create mode 100644 drivers/mtd/nand/fmc100_nand/fmc100_nand.h + create mode 100644 drivers/mtd/nand/fmc100_nand/fmc100_nand_os.c + create mode 100644 drivers/mtd/nand/fmc100_nand/fmc100_nand_os.h + create mode 100644 drivers/mtd/nand/fmc100_nand/fmc_nand_spl_ids.c + create mode 100644 drivers/mtd/nand/raw/match_table.c + create mode 100644 drivers/mtd/nand/raw/match_table.h + create mode 100644 drivers/mtd/nand/raw/nfc_gen.c + create mode 100644 drivers/mtd/nand/raw/nfc_gen.h + create mode 100644 drivers/mtd/nand/raw/nfc_spl_ids.c + create mode 100644 drivers/mtd/spi-nor/bsp-generic.c + create mode 100644 drivers/mtd/spi-nor/controllers/bsp-sfc.c + create mode 100644 drivers/net/ethernet/vendor/Kconfig + create mode 100644 drivers/net/ethernet/vendor/Makefile + create mode 100644 drivers/net/ethernet/vendor/gmac/Kconfig + create mode 100644 drivers/net/ethernet/vendor/gmac/Makefile + create mode 100644 drivers/net/ethernet/vendor/gmac/autoeee/autoeee.c + create mode 100644 drivers/net/ethernet/vendor/gmac/autoeee/autoeee.h + create mode 100644 drivers/net/ethernet/vendor/gmac/autoeee/phy_id_table.c + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac.c + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac.h + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_ethtool_ops.c + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_ethtool_ops.h + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_external_phy.c + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_mdio.c + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_mdio.h + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_netdev_ops.c + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_netdev_ops.h + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_phy_fixup.c + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_phy_fixup.h + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_pm.c + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_pm.h + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_proc.c + create mode 100644 drivers/net/ethernet/vendor/gmac/gmac_proc.h + create mode 100644 drivers/net/ethernet/vendor/gmac/version.mak + create mode 100644 drivers/net/phy/mdio_bsp_gemac.c + create mode 100644 drivers/net/phy/mdio_bsp_gemac.h + create mode 100644 drivers/pci/bsp_pcie/Kconfig + create mode 100644 drivers/pci/bsp_pcie/Makefile + create mode 100644 drivers/pci/bsp_pcie/pci.h + create mode 100644 drivers/pci/bsp_pcie/pcie.c + create mode 100644 drivers/pci/bsp_pcie/pcie_ss928v100.c + create mode 100644 drivers/pci/bsp_pcie/pcie_ss928v100.h + create mode 100644 drivers/pwm/pwm-bsp.c + create mode 100644 drivers/spi/vendor/Makefile + create mode 100644 drivers/spi/vendor/vendor_spi.c + create mode 100644 drivers/spi/vendor/vendor_spi.h + delete mode 100644 drivers/staging/blackbox/Kconfig + delete mode 100644 drivers/staging/blackbox/Makefile + delete mode 100644 drivers/staging/blackbox/blackbox_common.c + delete mode 100644 drivers/staging/blackbox/blackbox_core.c + delete mode 100644 drivers/staging/blackbox/blackbox_storage.c + delete mode 100644 drivers/staging/hievent/Kconfig + delete mode 100644 drivers/staging/hievent/Makefile + delete mode 100644 drivers/staging/hievent/hievent_driver.c + delete mode 100644 drivers/staging/hievent/hievent_driver.h + delete mode 100644 drivers/staging/hievent/hiview_hievent.c + delete mode 100644 drivers/staging/hievent/hiview_hievent.h + delete mode 100644 drivers/staging/hilog/Kconfig + delete mode 100644 drivers/staging/hilog/Makefile + delete mode 100644 drivers/staging/hilog/hilog.c + delete mode 100644 drivers/staging/hisysevent/Kconfig + delete mode 100644 drivers/staging/hisysevent/Makefile + delete mode 100644 drivers/staging/hisysevent/hisysevent_builder.c + delete mode 100644 drivers/staging/hisysevent/hisysevent_builder.h + delete mode 100644 drivers/staging/hisysevent/hisysevent_raw_data.c + delete mode 100644 drivers/staging/hisysevent/hisysevent_raw_data.h + delete mode 100644 drivers/staging/hisysevent/hisysevent_raw_data_encoder.c + delete mode 100644 drivers/staging/hisysevent/hisysevent_raw_data_encoder.h + delete mode 100644 drivers/staging/hisysevent/hiview_hisysevent.c + delete mode 100644 drivers/staging/hungtask/Kconfig + delete mode 100644 drivers/staging/hungtask/Makefile + delete mode 100644 drivers/staging/hungtask/hungtask_base.c + delete mode 100644 drivers/staging/hungtask/hungtask_user.c + delete mode 100644 drivers/staging/hungtask/hungtask_user.h + delete mode 100644 drivers/staging/zerohung/Kconfig + delete mode 100644 drivers/staging/zerohung/Makefile + delete mode 100644 drivers/staging/zerohung/watchpoint/Makefile + delete mode 100644 drivers/staging/zerohung/watchpoint/hung_wp_screen.c + delete mode 100644 drivers/staging/zerohung/zrhung_event.c + delete mode 100644 drivers/usb/gadget/function/f_generic.c + delete mode 100644 drivers/usb/gadget/function/u_generic.h + create mode 100644 drivers/vendor/Kconfig + create mode 100644 drivers/vendor/Makefile + create mode 100644 drivers/vendor/basedrv_clk/Kconfig + create mode 100644 drivers/vendor/basedrv_clk/Makefile + create mode 100644 drivers/vendor/basedrv_clk/basedrv-clock.h + create mode 100644 drivers/vendor/basedrv_clk/basedrv_clk.c + create mode 100644 drivers/vendor/basedrv_clk/basedrv_clk.h + create mode 100644 drivers/vendor/basedrv_clk/ss928v100/Makefile + create mode 100644 drivers/vendor/basedrv_clk/ss928v100/clk_ss928v100.c + create mode 100644 drivers/vendor/basedrv_clk/ss928v100/clk_ss928v100.h + create mode 100644 drivers/vendor/basedrv_clk/ss928v100/clk_ups.c + create mode 100644 drivers/vendor/cma/Kconfig + create mode 100644 drivers/vendor/cma/Makefile + create mode 100644 drivers/vendor/cma/cma.c + create mode 100644 drivers/vendor/mmc/CMakeLists.txt + create mode 100644 drivers/vendor/mmc/ChangLog + create mode 100644 drivers/vendor/mmc/Kconfig + create mode 100644 drivers/vendor/mmc/Makefile + create mode 100644 drivers/vendor/mmc/adapter/nebula_adapter.c + create mode 100644 drivers/vendor/mmc/adapter/nebula_fmea.c + create mode 100644 drivers/vendor/mmc/adapter/nebula_quick.c + create mode 100644 drivers/vendor/mmc/adapter/nebula_quick.h + create mode 100644 drivers/vendor/mmc/adapter/nebula_quirk_ids.h + create mode 100644 drivers/vendor/mmc/dfx/mci_proc.c + create mode 100644 drivers/vendor/mmc/dfx/mci_proc.h + create mode 100644 drivers/vendor/mmc/dfx/nebula_dfx.c + create mode 100644 drivers/vendor/mmc/dfx/nebula_dfx.h + create mode 100644 drivers/vendor/mmc/driver_obj.mk + create mode 100644 drivers/vendor/mmc/dtsi_usage.txt + create mode 100644 drivers/vendor/mmc/nebula_fmea.h + create mode 100644 drivers/vendor/mmc/nebula_intf.c + create mode 100644 drivers/vendor/mmc/nebula_intf.h + create mode 100644 drivers/vendor/mmc/platform/platform_comm.c + create mode 100644 drivers/vendor/mmc/platform/platform_priv.h + create mode 100644 drivers/vendor/mmc/platform/platform_timing.h + create mode 100644 drivers/vendor/mmc/platform/sdhci_hi3519dv500.c + create mode 100644 drivers/vendor/mmc/platform/sdhci_hi3751v811_c.c + create mode 100644 drivers/vendor/mmc/platform/sdhci_hiwing.c + create mode 100644 drivers/vendor/mmc/platform/sdhci_shaolinaxe.c + create mode 100644 drivers/vendor/mmc/platform/sdhci_shaolinfist.c + create mode 100644 drivers/vendor/mmc/platform/sdhci_shaolinspear.c + create mode 100644 drivers/vendor/mmc/platform/sdhci_shaolinsword_c.c + create mode 100644 drivers/vendor/mmc/platform/sdhci_ss928v100.c + create mode 100644 drivers/vendor/mmc/platform/sdhci_tianhe.c + create mode 100644 drivers/vendor/mmc/platform/sdhci_tianhe.h + create mode 100644 drivers/vendor/mmc/platform/sdhci_wudangstick.c + create mode 100644 drivers/vendor/mmc/sdhci_nebula.c + create mode 100644 drivers/vendor/mmc/sdhci_nebula.h + create mode 100644 drivers/vendor/mmc/sdhci_shaolinsword.c + create mode 100644 drivers/vendor/mmc/version.mak + create mode 100644 drivers/vendor/npu/Kconfig + create mode 100644 drivers/vendor/npu/Makefile + create mode 100644 drivers/vendor/npu/npu_misc.c + create mode 100644 drivers/vendor/npu/npu_svm.c + create mode 100644 drivers/vendor/npu/smmu_power_on.c + create mode 100755 drivers/vendor/usb/Kconfig + create mode 100755 drivers/vendor/usb/Makefile + create mode 100755 drivers/vendor/usb/defconfig + create mode 100755 drivers/vendor/usb/driver_config.mk + create mode 100755 drivers/vendor/usb/driver_obj.mk + create mode 100644 drivers/vendor/usb/proc.c + create mode 100755 drivers/vendor/usb/proc.h + create mode 100755 drivers/vendor/usb/version.mak + create mode 100644 drivers/vendor/usb/wing_usb.c + create mode 100755 drivers/vendor/usb/wing_usb.h + create mode 100644 drivers/vendor/usb_phy/Kconfig + create mode 100644 drivers/vendor/usb_phy/Makefile + create mode 100644 drivers/vendor/usb_phy/common.c + create mode 100644 drivers/vendor/usb_phy/driver_config.mk + create mode 100644 drivers/vendor/usb_phy/driver_obj.mk + create mode 100644 drivers/vendor/usb_phy/missile.c + create mode 100644 drivers/vendor/usb_phy/nano.c + create mode 100644 drivers/vendor/usb_phy/phy.c + create mode 100644 drivers/vendor/usb_phy/phy.h + create mode 100644 drivers/vendor/usb_phy/platform/ss626v100.c + create mode 100644 drivers/vendor/usb_phy/proc.c + create mode 100644 drivers/vendor/usb_phy/proc.h + create mode 100644 drivers/vendor/usb_phy/reg_common.h + create mode 100644 drivers/vendor/usb_phy/reg_default.h + create mode 100644 drivers/vendor/usb_phy/reg_hiwingv500.h + create mode 100644 drivers/vendor/usb_phy/xvp.c + delete mode 100644 fs/epfs/Kconfig + delete mode 100644 fs/epfs/Makefile + delete mode 100644 fs/epfs/dentry.c + delete mode 100644 fs/epfs/dir.c + delete mode 100644 fs/epfs/epfs.h + delete mode 100644 fs/epfs/file.c + delete mode 100644 fs/epfs/inode.c + delete mode 100644 fs/epfs/internal.h + delete mode 100644 fs/epfs/main.c + delete mode 100644 fs/epfs/super.c + delete mode 100644 fs/hmdfs/Kconfig + delete mode 100644 fs/hmdfs/Makefile + delete mode 100644 fs/hmdfs/authority/authentication.c + delete mode 100644 fs/hmdfs/authority/authentication.h + delete mode 100644 fs/hmdfs/authority/config.c + delete mode 100644 fs/hmdfs/client_writeback.c + delete mode 100644 fs/hmdfs/client_writeback.h + delete mode 100644 fs/hmdfs/comm/connection.c + delete mode 100644 fs/hmdfs/comm/connection.h + delete mode 100644 fs/hmdfs/comm/crypto.c + delete mode 100644 fs/hmdfs/comm/crypto.h + delete mode 100644 fs/hmdfs/comm/device_node.c + delete mode 100644 fs/hmdfs/comm/device_node.h + delete mode 100644 fs/hmdfs/comm/message_verify.c + delete mode 100644 fs/hmdfs/comm/message_verify.h + delete mode 100644 fs/hmdfs/comm/node_cb.c + delete mode 100644 fs/hmdfs/comm/node_cb.h + delete mode 100644 fs/hmdfs/comm/protocol.h + delete mode 100644 fs/hmdfs/comm/socket_adapter.c + delete mode 100644 fs/hmdfs/comm/socket_adapter.h + delete mode 100644 fs/hmdfs/comm/transport.c + delete mode 100644 fs/hmdfs/comm/transport.h + delete mode 100644 fs/hmdfs/dentry.c + delete mode 100644 fs/hmdfs/file_cloud.c + delete mode 100644 fs/hmdfs/file_local.c + delete mode 100644 fs/hmdfs/file_merge.c + delete mode 100644 fs/hmdfs/file_remote.c + delete mode 100644 fs/hmdfs/file_remote.h + delete mode 100644 fs/hmdfs/file_root.c + delete mode 100644 fs/hmdfs/hmdfs.h + delete mode 100644 fs/hmdfs/hmdfs_client.c + delete mode 100644 fs/hmdfs/hmdfs_client.h + delete mode 100644 fs/hmdfs/hmdfs_dentryfile.c + delete mode 100644 fs/hmdfs/hmdfs_dentryfile.h + delete mode 100644 fs/hmdfs/hmdfs_dentryfile_cloud.c + delete mode 100644 fs/hmdfs/hmdfs_dentryfile_cloud.h + delete mode 100644 fs/hmdfs/hmdfs_device_view.h + delete mode 100644 fs/hmdfs/hmdfs_merge_view.h + delete mode 100644 fs/hmdfs/hmdfs_server.c + delete mode 100644 fs/hmdfs/hmdfs_server.h + delete mode 100644 fs/hmdfs/hmdfs_share.c + delete mode 100644 fs/hmdfs/hmdfs_share.h + delete mode 100644 fs/hmdfs/hmdfs_trace.h + delete mode 100644 fs/hmdfs/inode.c + delete mode 100644 fs/hmdfs/inode.h + delete mode 100644 fs/hmdfs/inode_cloud.c + delete mode 100644 fs/hmdfs/inode_cloud_merge.c + delete mode 100644 fs/hmdfs/inode_local.c + delete mode 100644 fs/hmdfs/inode_merge.c + delete mode 100644 fs/hmdfs/inode_remote.c + delete mode 100644 fs/hmdfs/inode_root.c + delete mode 100644 fs/hmdfs/main.c + delete mode 100644 fs/hmdfs/server_writeback.c + delete mode 100644 fs/hmdfs/server_writeback.h + delete mode 100644 fs/hmdfs/stash.c + delete mode 100644 fs/hmdfs/stash.h + delete mode 100644 fs/hmdfs/super.c + delete mode 100644 fs/sharefs/Kconfig + delete mode 100644 fs/sharefs/Makefile + delete mode 100644 fs/sharefs/authentication.c + delete mode 100644 fs/sharefs/authentication.h + delete mode 100644 fs/sharefs/config.c + delete mode 100644 fs/sharefs/dentry.c + delete mode 100644 fs/sharefs/file.c + delete mode 100644 fs/sharefs/inode.c + delete mode 100644 fs/sharefs/lookup.c + delete mode 100644 fs/sharefs/main.c + delete mode 100644 fs/sharefs/sharefs.h + delete mode 100644 fs/sharefs/super.c + delete mode 100644 include/dfx/hiview_hisysevent.h + delete mode 100644 include/dfx/hung_wp_screen.h + delete mode 100644 include/dfx/hungtask_base.h + delete mode 100644 include/dfx/zrhung.h + create mode 100644 include/dt-bindings/clock/basedrv-clock.h + create mode 100644 include/dt-bindings/clock/ss928v100_clock.h + delete mode 100644 include/linux/blackbox.h + delete mode 100644 include/linux/blackbox_common.h + delete mode 100644 include/linux/blackbox_storage.h + create mode 100644 include/linux/bsp_cma.h + delete mode 100644 include/linux/code_sign.h + create mode 100644 include/linux/edmac.h + delete mode 100644 include/linux/hck/lite_hck_ced.h + delete mode 100644 include/linux/hck/lite_hck_code_sign.h + delete mode 100644 include/linux/hck/lite_hck_hideaddr.h + delete mode 100644 include/linux/hck/lite_hck_inet.h + delete mode 100644 include/linux/hck/lite_hck_jit_memory.h + delete mode 100644 include/linux/hck/lite_hck_sample.h + delete mode 100644 include/linux/hck/lite_hck_xpm.h + delete mode 100644 include/linux/hck/lite_vendor_hooks.h + create mode 100644 include/linux/mfd/bsp_fmc.h + delete mode 100755 include/linux/sched/core_ctl.h + delete mode 100755 include/linux/sched/frame_rtg.h + delete mode 100755 include/linux/sched/rtg.h + delete mode 100755 include/linux/sched/rtg_ctrl.h + create mode 100644 include/linux/securec.h + create mode 100644 include/linux/securectype.h + create mode 100644 include/linux/vendor/sva_ext.h + create mode 100644 include/linux/vendor/vendor_i2c.h + delete mode 100755 include/trace/events/eas_sched.h + delete mode 100755 include/trace/events/rtg.h + delete mode 100755 include/trace/events/walt.h + delete mode 100755 kernel/sched/core_ctl.c + delete mode 100755 kernel/sched/core_ctl.h + delete mode 100755 kernel/sched/rtg/Kconfig + delete mode 100755 kernel/sched/rtg/Makefile + delete mode 100755 kernel/sched/rtg/frame_rtg.c + delete mode 100755 kernel/sched/rtg/frame_rtg.h + delete mode 100755 kernel/sched/rtg/rtg.c + delete mode 100755 kernel/sched/rtg/rtg.h + delete mode 100755 kernel/sched/rtg/rtg_ctrl.c + delete mode 100755 kernel/sched/rtg/rtg_ctrl.h + delete mode 100755 kernel/sched/sched_avg.c + delete mode 100755 kernel/sched/walt.c + delete mode 100755 kernel/sched/walt.h + create mode 100644 lib/securec/LICENSE + create mode 100644 lib/securec/Makefile + create mode 100644 lib/securec/README.en.md + create mode 100644 lib/securec/README.md + create mode 100644 lib/securec/src/Makefile + create mode 100644 lib/securec/src/input.inl + create mode 100644 lib/securec/src/memcpy_s.c + create mode 100644 lib/securec/src/memmove_s.c + create mode 100644 lib/securec/src/memset_s.c + create mode 100644 lib/securec/src/output.inl + create mode 100644 lib/securec/src/scanf_s.c + create mode 100644 lib/securec/src/secinput.h + create mode 100644 lib/securec/src/securecutil.c + create mode 100644 lib/securec/src/securecutil.h + create mode 100644 lib/securec/src/secureinput_a.c + create mode 100644 lib/securec/src/secureprintoutput.h + create mode 100644 lib/securec/src/secureprintoutput_a.c + create mode 100644 lib/securec/src/snprintf_s.c + create mode 100644 lib/securec/src/sprintf_s.c + create mode 100644 lib/securec/src/sscanf_s.c + create mode 100644 lib/securec/src/strcat_s.c + create mode 100644 lib/securec/src/strcpy_s.c + create mode 100644 lib/securec/src/strncat_s.c + create mode 100644 lib/securec/src/strncpy_s.c + create mode 100644 lib/securec/src/strtok_s.c + create mode 100644 lib/securec/src/vscanf_s.c + create mode 100644 lib/securec/src/vsnprintf_s.c + create mode 100644 lib/securec/src/vsprintf_s.c + create mode 100644 lib/securec/src/vsscanf_s.c + delete mode 100644 mm/memcg_control.c + delete mode 100644 mm/memcg_reclaim.c + delete mode 100644 mm/memory_monitor.c + delete mode 100644 mm/purgeable.c + delete mode 100644 mm/purgeable_ashmem_trigger.c + delete mode 100644 mm/zswapd.c + delete mode 100644 mm/zswapd_control.c + delete mode 100644 mm/zswapd_internal.h + delete mode 100644 samples/hck/Makefile + delete mode 100644 samples/hck/call.c + delete mode 100644 samples/hck/register.c + delete mode 100644 samples/hck/register_one.c + delete mode 100644 tools/testing/selftests/bpf/progs/tailcall_freplace.c + delete mode 100644 tools/testing/selftests/bpf/progs/tc_bpf2bpf.c + +diff --git a/OAT.xml b/OAT.xml +deleted file mode 100644 +index 125e60d97..000000000 +--- a/OAT.xml ++++ /dev/null +@@ -1,632 +0,0 @@ +- +- +- +- +- +- COPYING +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +diff --git a/README.OpenSource b/README.OpenSource +deleted file mode 100644 +index 270e65b15..000000000 +--- a/README.OpenSource ++++ /dev/null +@@ -1,11 +0,0 @@ +-[ +- { +- "Name": "linux-6.6", +- "License": "GPL-2.0+", +- "License File": "COPYING", +- "Version Number": "6.6.86", +- "Owner": "liuyu82@huawei.com", +- "Upstream URL": "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/log/?h=linux-6.6.y", +- "Description": "linux kernel 6.6" +- } +-] +diff --git a/README_OpenHarmony.md b/README_OpenHarmony.md +deleted file mode 100644 +index bf38204cc..000000000 +--- a/README_OpenHarmony.md ++++ /dev/null +@@ -1,5 +0,0 @@ +-# kernel_linux_common_6.6 +- +-#### 介绍 +-仓用途:linux-6.6原生仓,包含linux-6.6原生代码。 +-其他内核通用说明详见:[内核文档](https://gitee.com/openharmony/docs/blob/master/zh-cn/device-dev/kernel/Readme-CN.md) +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 658c6a61a..7c3c124c0 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -1961,7 +1961,6 @@ config CC_HAS_BRANCH_PROT_PAC_RET + + config CC_HAS_SIGN_RETURN_ADDRESS + # GCC 7, 8 +- def_bool $(cc-option,-msign-return-address=all) + + config AS_HAS_ARMV8_3 + def_bool $(cc-option,-Wa$(comma)-march=armv8.3-a) +@@ -2343,6 +2342,21 @@ config DMI + However, even with this option, the resultant kernel should + continue to boot on existing non-UEFI platforms. + ++config BUILD_ARM64_APPENDED_DTB_IMAGE ++ bool "Build a concatenated uImage/dtb by default" ++ depends on OF ++ help ++ Enabling this option will cause a concatenated uImage and list of ++ DTBs to be built by default (instead of a standalone uImage) ++ The image will built in arch/arm64/boot/uImage ++ ++config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES ++ string "Default dtb names" ++ depends on BUILD_ARM64_APPENDED_DTB_IMAGE ++ help ++ Space separated list of names of dtbs to append when ++ building a concatenated uImage. ++ + endmenu # "Boot options" + + menu "Power management options" +diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms +index 606912019..eaa1aac4d 100644 +--- a/arch/arm64/Kconfig.platforms ++++ b/arch/arm64/Kconfig.platforms +@@ -153,6 +153,23 @@ config ARCH_KEEMBAY + help + This enables support for Intel Movidius SoC code-named Keem Bay. + ++config ARCH_BSP ++ bool "Vendor SoC Family" ++ select ARM_TIMER_SP804 ++ select PINCTRL ++ help ++ This enables support for Vendor ARMv8 SoC family ++ ++ ++config ARCH_SS928V100 ++ bool "Vendor ss928v100 family" ++ depends on ARCH_BSP ++ select ARM_TIMER_SP804 ++ select HISILICON_IRQ_MBIGEN if PCI ++ select PINCTRL ++ help ++ Support for Vendor SS928V100 Soc family ++ + config ARCH_MEDIATEK + bool "MediaTek SoC Family" + select ARM_GIC +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile +index 117828607..e6d119b04 100644 +--- a/arch/arm64/Makefile ++++ b/arch/arm64/Makefile +@@ -162,6 +162,9 @@ vmlinuz.efi: Image + Image vmlinuz.efi: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + ++uImage: Image dtbs ++ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ ++ + Image.%: Image + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile +index 1761f5972443..f4537e8af7e0 100644 +--- a/arch/arm64/boot/Makefile ++++ b/arch/arm64/boot/Makefile +@@ -16,7 +16,7 @@ + + OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S + +-targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo Image.zst ++targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo Image.zst uImage + + $(obj)/Image: vmlinux FORCE + $(call if_changed,objcopy) +@@ -39,6 +39,29 @@ $(obj)/Image.lzo: $(obj)/Image FORCE + $(obj)/Image.zst: $(obj)/Image FORCE + $(call if_changed,zstd) + ++TEXT_OFFSET := 0x0 ++UIMAGE_LOADADDR := $(TEXT_OFFSET) ++UIMAGE_ENTRYADDR := $(TEXT_OFFSET) ++check_for_multiple_loadaddr = \ ++if [ $(words $(UIMAGE_LOADADDR)) -ne 1 ]; then \ ++ echo 'multiple (or no) load addresses: $(UIMAGE_LOADADDR)'; \ ++ echo 'This is incompatible with uImages'; \ ++ echo 'Specify LOADADDR on the commandline to build an uImage'; \ ++ false; \ ++fi ++ ++DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES)) ++DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES)) ++DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST)) ++ ++$(obj)/uImage: $(obj)/Image FORCE ++ @$(check_for_multiple_loadaddr) ++ @dd if=$< of=$<.dd ibs=4096 conv=sync && mv $<.dd $< ++ $(call cmd,uimage) ++ $(if $(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),@$(kecho) ' CAT $(DTB_OBJS) to $@') ++ $(if $(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),@cat $(DTB_OBJS) >>$@,) ++ @$(kecho) ' Image $@ is ready' ++ + EFI_ZBOOT_PAYLOAD := Image + EFI_ZBOOT_BFD_TARGET := elf64-littleaarch64 + EFI_ZBOOT_MACH_TYPE := ARM64 +diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile +index 30dd6347a929..01f0bcf63ad1 100644 +--- a/arch/arm64/boot/dts/Makefile ++++ b/arch/arm64/boot/dts/Makefile +@@ -33,3 +33,8 @@ subdir-y += tesla + subdir-y += ti + subdir-y += toshiba + subdir-y += xilinx ++subdir-y += vendor ++ ++dtbs: $(addprefix $(obj)/, $(DTB_LIST)) ++ ++clean-files := dts/*.dtb *.dtb +diff --git a/arch/arm64/boot/dts/vendor/Makefile b/arch/arm64/boot/dts/vendor/Makefile +new file mode 100644 +index 000000000000..90df17a4ccd1 +--- /dev/null ++++ b/arch/arm64/boot/dts/vendor/Makefile +@@ -0,0 +1,5 @@ ++HOST_EXTRACFLAGS += -include include/generated/autoconf.h ++dtb-$(CONFIG_ARCH_SS928V100) += ss928v100-demb-flash.dtb ++dtb-$(CONFIG_ARCH_SS928V100) += ss928v100-demb-emmc.dtb ++dtb-$(CONFIG_ARCH_SS927V100) += ss927v100-demb-flash.dtb ++dtb-$(CONFIG_ARCH_SS927V100) += ss927v100-demb-emmc.dtb +\ No newline at end of file +diff --git a/arch/arm64/boot/dts/vendor/ss928v100-demb-emmc.dts b/arch/arm64/boot/dts/vendor/ss928v100-demb-emmc.dts +new file mode 100644 +index 000000000..a5996206b +--- /dev/null ++++ b/arch/arm64/boot/dts/vendor/ss928v100-demb-emmc.dts +@@ -0,0 +1,23 @@ ++/* Copyright (c) 2017 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++//#include "../../../../../include/generated/autoconf.h" ++#include "ss928v100-demb.dts" ++ ++&mmc0 { ++ status = "okay"; ++}; ++ +diff --git a/arch/arm64/boot/dts/vendor/ss928v100-demb-flash.dts b/arch/arm64/boot/dts/vendor/ss928v100-demb-flash.dts +new file mode 100644 +index 000000000..b5fbc2595 +--- /dev/null ++++ b/arch/arm64/boot/dts/vendor/ss928v100-demb-flash.dts +@@ -0,0 +1,23 @@ ++/* Copyright (c) 2017 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++//#include "../../../../../include/generated/autoconf.h" ++#include "ss928v100-demb.dts" ++ ++&mmc0 { ++ status = "disabled"; ++}; ++ +diff --git a/arch/arm64/boot/dts/vendor/ss928v100-demb.dts b/arch/arm64/boot/dts/vendor/ss928v100-demb.dts +new file mode 100644 +index 000000000..cc7e375a8 +--- /dev/null ++++ b/arch/arm64/boot/dts/vendor/ss928v100-demb.dts +@@ -0,0 +1,349 @@ ++/* Copyright (c) 2017 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++/dts-v1/; ++/* reserved for warmreset */ ++/* reserved for arm trustedfirmware */ ++/* Modify this configuration according to the system framework */ ++/memreserve/ 0x52fff000 0x01a02000; ++#include "ss928v100.dtsi" ++ ++/ { ++ model = "Vendor SS928V100 DEMO Board"; ++ compatible = "vendor,ss928v100"; ++ ++ aliases { ++ serial0 = &uart0; ++ ++ serial1 = &uart1; ++ serial2 = &uart2; ++ serial3 = &uart3; ++ serial4 = &uart4; ++ serial5 = &uart5; ++ ++ i2c0 = &i2c_bus0; ++ i2c1 = &i2c_bus1; ++ i2c2 = &i2c_bus2; ++ i2c3 = &i2c_bus3; ++ i2c4 = &i2c_bus4; ++ i2c5 = &i2c_bus5; ++ ++ spi0 = &spi_bus0; ++ spi1 = &spi_bus1; ++ spi2 = &spi_bus2; ++ spi3 = &spi_bus3; ++ ++ gpio0 = &gpio_chip0; ++ gpio1 = &gpio_chip1; ++ gpio2 = &gpio_chip2; ++ gpio3 = &gpio_chip3; ++ gpio4 = &gpio_chip4; ++ gpio5 = &gpio_chip5; ++ gpio6 = &gpio_chip6; ++ gpio7 = &gpio_chip7; ++ gpio8 = &gpio_chip8; ++ gpio9 = &gpio_chip9; ++ gpio10 = &gpio_chip10; ++ gpio11 = &gpio_chip11; ++ gpio12 = &gpio_chip12; ++ gpio13 = &gpio_chip13; ++ gpio14 = &gpio_chip14; ++ gpio15 = &gpio_chip15; ++ gpio16 = &gpio_chip16; ++ gpio17 = &gpio_chip17; ++ }; ++ ++ chosen { ++ bootargs = "earlycon=pl011,0x11040000 mem=512M console=ttyAMA0,115200 clk_ignore_unused root=/dev/mtdblock2 rootfstype=yaffs2 rw mtdparts=bspnand:1M(boot),9M(kernel),32M(rootfs),1M(this_bootargs_string_is_reserved_for_bootargs_form_uboot!!!_it_must_be_longer_than_bootargs_form_uboot!!!_this_bootargs_string_is_reserved_for_bootargs_form_uboot!!!_it_must_be_longer_than_bootargs_form_uboot!!!_this_bootargs_string_is_reserved_for_bootargs_form_uboot!!!_it_must_be_longer_than_bootargs_form_uboot!!!_this_bootargs_string_is_reserved_for_bootargs_form_uboot!!!_it_must_be_longer_than_bootargs_form_uboot!!!_this_bootargs_string_is_reserved_for_bootargs_form_uboot!!!_it_must_be_longer_than_bootargs_form_uboot!!!)"; ++ ++ linux,initrd-start = <0x60000040>; ++ linux,initrd-end = <0x61000000>; ++ }; ++ ++ cpus { ++ #address-cells = <2>; ++ #size-cells = <0>; ++ ++ cpu@0 { ++ compatible = "arm,cortex-a55"; ++ device_type = "cpu"; ++ reg = <0x0 0x0>; ++ enable-method = "psci"; ++ //clock-latency = <100000>; /* From legacy driver */ ++ }; ++ ++ cpu@1 { ++ compatible = "arm,cortex-a55"; ++ device_type = "cpu"; ++ reg = <0x0 0x100>; ++ enable-method = "psci"; ++ //clock-latency = <200000>; /* From legacy driver */ ++ }; ++ ++ cpu@2 { ++ compatible = "arm,cortex-a55"; ++ device_type = "cpu"; ++ reg = <0x0 0x200>; ++ enable-method = "psci"; ++ }; ++ ++ cpu@3 { ++ compatible = "arm,cortex-a55"; ++ device_type = "cpu"; ++ reg = <0x0 0x300>; ++ enable-method = "psci"; ++ }; ++ }; ++ ++ memory { ++ device_type = "memory"; ++ reg = <0x0 0x50000000 0x1 0xf0000000>; /* system memory base */ ++ }; ++}; ++ ++&ipcm { ++ status = "okay"; ++}; ++ ++&uart0 { ++ status = "okay"; ++}; ++ ++&uart1 { ++ status = "disabled"; ++}; ++ ++&uart2 { ++ status = "disabled"; ++}; ++ ++&uart3 { ++ status = "disabled"; ++}; ++ ++&uart4 { ++ status = "disabled"; ++}; ++&uart5 { ++ status = "disabled"; ++}; ++ ++&i2c_bus0 { ++ status = "okay"; ++}; ++ ++&i2c_bus1 { ++ status = "okay"; ++}; ++ ++&i2c_bus2 { ++ status = "okay"; ++}; ++ ++&i2c_bus3 { ++ status = "okay"; ++}; ++ ++&i2c_bus4 { ++ status = "okay"; ++}; ++ ++&i2c_bus5 { ++ status = "okay"; ++}; ++ ++&spi_bus0{ ++ status = "okay"; ++ ++ spidev@0 { ++ compatible = "rohm,dh2228fv"; ++ reg = <0>; ++ pl022,interface = <0>; ++ pl022,com-mode = <0>; ++ spi-max-frequency = <25000000>; ++ }; ++}; ++ ++&spi_bus1{ ++ status = "okay"; ++ spidev@0 { ++ compatible = "rohm,dh2228fv"; ++ reg = <0>; ++ pl022,interface = <0>; ++ pl022,com-mode = <0>; ++ spi-max-frequency = <25000000>; ++ }; ++ spidev@1 { ++ compatible = "rohm,dh2228fv"; ++ reg = <1>; ++ pl022,interface = <0>; ++ pl022,com-mode = <0>; ++ spi-max-frequency = <25000000>; ++ }; ++}; ++ ++&spi_bus2{ ++ status = "okay"; ++ spidev@0 { ++ compatible = "rohm,dh2228fv"; ++ reg = <0>; ++ pl022,interface = <0>; ++ pl022,com-mode = <0>; ++ spi-max-frequency = <25000000>; ++ }; ++}; ++ ++&spi_bus3{ ++ status = "okay"; ++ ++ spidev@0 { ++ compatible = "rohm,dh2228fv"; ++ reg = <0>; ++ pl022,interface = <0>; ++ pl022,com-mode = <0>; ++ spi-max-frequency = <25000000>; ++ }; ++}; ++ ++&gpio_chip0 { ++ status = "okay"; ++}; ++ ++&gpio_chip1 { ++ status = "okay"; ++}; ++ ++&gpio_chip2 { ++ status = "okay"; ++}; ++ ++&gpio_chip3 { ++ status = "okay"; ++}; ++ ++&gpio_chip4 { ++ status = "okay"; ++}; ++ ++&gpio_chip5 { ++ status = "okay"; ++}; ++ ++&gpio_chip6 { ++ status = "okay"; ++}; ++ ++&gpio_chip7 { ++ status = "okay"; ++}; ++ ++&gpio_chip8 { ++ status = "okay"; ++}; ++ ++&gpio_chip9 { ++ status = "okay"; ++}; ++ ++&gpio_chip10 { ++ status = "okay"; ++}; ++ ++&gpio_chip11 { ++ status = "okay"; ++}; ++ ++&gpio_chip12 { ++ status = "okay"; ++}; ++ ++&gpio_chip13 { ++ status = "okay"; ++}; ++ ++&gpio_chip14 { ++ status = "okay"; ++}; ++ ++&gpio_chip15 { ++ status = "okay"; ++}; ++&gpio_chip16 { ++ status = "okay"; ++}; ++&gpio_chip17 { ++ status = "okay"; ++}; ++ ++&sfc { ++ sfc@0 { ++ compatible = "jedec,spi-nor"; ++ reg = <0>; ++ spi-max-frequency = <200000000>; ++ m25p,fast-read; ++ }; ++}; ++ ++&snfc { ++ nand@0 { ++ compatible = "jedec,spi-nand"; ++ reg = <0>; ++ spi-max-frequency = <200000000>; ++ }; ++}; ++ ++&nfc { ++ nand@0 { ++ compatible = "jedec,nand"; ++ reg = <0>; ++ nand-max-frequency = <200000000>; ++ }; ++}; ++ ++&mdio { ++ ethphy: ethernet-phy@1 { ++ reg = <1>; ++ }; ++}; ++ ++&mdio1 { ++ ethphy1: ethernet-phy@3 { ++ reg = <3>; ++ }; ++}; ++ ++&gmac { ++ phy-handle = <ðphy>; ++ phy-mode = "rgmii-id"; ++}; ++ ++&gmac1 { ++ phy-handle = <ðphy1>; ++ phy-mode = "rgmii-id"; ++}; ++ ++&pwm { ++ status = "okay"; ++}; ++ ++&mmc1 { ++ status = "okay"; ++}; ++ ++&mmc2 { ++ status = "okay"; ++}; +diff --git a/arch/arm64/boot/dts/vendor/ss928v100.dtsi b/arch/arm64/boot/dts/vendor/ss928v100.dtsi +new file mode 100644 +index 000000000..ef21741cf +--- /dev/null ++++ b/arch/arm64/boot/dts/vendor/ss928v100.dtsi +@@ -0,0 +1,1017 @@ ++/* Copyright (c) 2017 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++/* reserved for arm trustedfirmware */ ++#include ++#include ++#include ++#include "ss928v100_family_usb.dtsi" ++ ++/ { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ interrupt-parent = <&gic>; ++ ++ gic: interrupt-controller@12400000 { ++ compatible = "arm,gic-v3"; ++ #interrupt-cells = <3>; ++ #address-cells = <0>; ++ interrupt-controller; ++ reg = <0x0 0x12400000 0x0 0x10000>, /* gic distributor base */ ++ <0x0 0x12440000 0x0 0x140000>; /* gic redistributor base */ ++ }; ++ ++ psci { ++ compatible = "arm,psci-0.2"; ++ method = "smc"; ++ }; ++ ++ pmu { ++ compatible = "arm,armv8-pmuv3"; ++ interrupts = ; ++ }; ++ ++ clock: clock0 { ++ compatible = "vendor,ss928v100_clock", "syscon"; ++ #clock-cells = <1>; ++ #reset-cells = <2>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x0 0x11010000 0x0 0x44a0>; ++ }; ++ ++ smmu0: smmu_npu@14040000 { ++ compatible = "arm,smmu-v3"; ++ reg = <0x0 0x14040000 0x0 0x40000>; //SMMU TCU ++ interrupts = ; ++ interrupt-names = "combined"; ++ #iommu-cells = <0x1>; ++ vendor,broken-prefetch-cmd; ++ }; ++ ++ svm0: svm_npu@14020000 { ++ compatible = "vendor,svm"; ++ crg-base = <0x11010000>; ++ crg-size = <0x10000>; ++ npu_crg_6560 = <0x6680>; ++ ranges; ++ #size-cells = <0x2>; ++ #address-cells = <0x2>; ++ ++ svm_aicore { ++ reg = <0x0 0x14020000 0x0 0x10000>; ++ iommus = <&smmu0 0x1>; ++ dma-can-stall; ++ pasid-num-bits = <16>; ++ }; ++ }; ++ ++ smmu1: smmu_pqp@15410000 { ++ compatible = "arm,smmu-v3"; ++ reg = <0x0 0x15410000 0x0 0x40000>; /*SMMU TCU*/ ++ ++ interrupts = ; ++ interrupt-names = "combined"; ++ #iommu-cells = <0x1>; ++ vendor,broken-prefetch-cmd; ++ }; ++ ++ svm1: svm_pqp@15400000 { ++ compatible = "vendor,svm"; ++ ranges; ++ #size-cells = <0x2>; ++ #address-cells = <0x2>; ++ crg-base = <0x11010000>; ++ crg-size = <0x10000>; ++ pqp_crg_6592 = <0x6700>; ++ svm_aicore { ++ reg = <0x0 0x15400000 0x0 0x10000>; ++ iommus = <&smmu1 0x1>; ++ dma-can-stall; ++ pasid-num-bits = <16>; ++ }; ++ ++ svm_hwts { ++ iommus = <&smmu1 0x2>; ++ dma-can-stall; ++ pasid-bits = <0x10>; ++ vendor,smmu_bypass; ++ }; ++ }; ++ ++ firmware { ++ optee { ++ compatible = "linaro,optee-tz"; ++ method = "smc"; ++ }; ++ }; ++ ++ ipcm: ipcm@11031000 { ++ compatible = "vendor,ipcm-interrupt"; ++ interrupt-parent = <&gic>; ++ interrupts = , <0 27 IRQ_TYPE_LEVEL_HIGH>; ++ reg = <0x0 0x11031000 0x0 0x1000>; ++ status = "disabled"; ++ ++ }; ++ ++ soc { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "simple-bus"; ++ device_type = "soc"; ++ ranges = <0x0 0x00000000 0x0 0xffffffff>; ++ ++ clk_3m: clk_3m { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <3000000>; ++ }; ++ ++ i2c_bus0: i2c@11060000 { ++ compatible = "vendor,i2c"; ++ reg = <0x11060000 0x1000>; ++ clocks = <&clock SS928V100_I2C0_CLK>; ++ clock-frequency = <100000>; ++ status = "disabled"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ /* dmas = <&edmacv310_0 0 0>, <&edmacv310_0 1 1>; */ ++ /* dma-names = "rx","tx"; */ ++ rtc: rtc@32 { ++ compatible = "epson,rx8900"; ++ reg = <0x32>; ++ epson,vdet-disable; ++ trickle-diode-disable; ++ }; ++ }; ++ ++ ++ i2c_bus1: i2c@11061000 { ++ compatible = "vendor,i2c"; ++ reg = <0x11061000 0x1000>; ++ clocks = <&clock SS928V100_I2C1_CLK>; ++ clock-frequency = <100000>; ++ /* dmas = <&edmacv310_0 2 2>, <&edmacv310_0 3 3>; */ ++ /* dma-names = "rx","tx"; */ ++ status = "disabled"; ++ }; ++ ++ i2c_bus2: i2c@11062000 { ++ compatible = "vendor,i2c"; ++ reg = <0x11062000 0x1000>; ++ clocks = <&clock SS928V100_I2C2_CLK>; ++ clock-frequency = <100000>; ++ /* dmas = <&edmacv310_0 4 4>, <&edmacv310_0 5 5>; */ ++ /* dma-names = "rx","tx"; */ ++ status = "disabled"; ++ }; ++ ++ i2c_bus3: i2c@11063000 { ++ compatible = "vendor,i2c"; ++ reg = <0x11063000 0x1000>; ++ clocks = <&clock SS928V100_I2C3_CLK>; ++ clock-frequency = <100000>; ++ /* dmas = <&edmacv310_0 6 6>, <&edmacv310_0 7 7>; */ ++ /* dma-names = "rx","tx"; */ ++ status = "disabled"; ++ }; ++ ++ i2c_bus4: i2c@11064000 { ++ compatible = "vendor,i2c"; ++ reg = <0x11064000 0x1000>; ++ clocks = <&clock SS928V100_I2C4_CLK>; ++ clock-frequency = <100000>; ++ /* dmas = <&edmacv310_0 8 8>, <&edmacv310_0 9 9>; */ ++ /* dma-names = "rx","tx"; */ ++ status = "disabled"; ++ }; ++ ++ i2c_bus5: i2c@11065000 { ++ compatible = "vendor,i2c"; ++ reg = <0x11065000 0x1000>; ++ clocks = <&clock SS928V100_I2C5_CLK>; ++ clock-frequency = <100000>; ++ /* dmas = <&edmacv310_0 10 10>, <&edmacv310_0 11 11>; */ ++ /* dma-names = "rx","tx"; */ ++ status = "disabled"; ++ }; ++ ++ amba { ++ compatible = "arm,amba-bus"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges; ++ ++ arm-timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = , ++ ; ++ clock-frequency = <24000000>; ++ always-on; ++ }; ++ ++ timer@11000000 { ++ compatible = "vendor,bsp_sp804"; ++ reg = <0x11000000 0x1000>, /* clocksource */ ++ <0x11001000 0x1000>, ++ <0x11002000 0x1000>, ++ <0x11003000 0x1000>, ++ <0x11004000 0x1000>; ++ ++ interrupts = , ++ , ++ , ++ ; ++ ++ clocks = <&clk_3m>; ++ clock-names = "apb_pclk"; ++ }; ++ ++ uart0: uart@11040000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x11040000 0x1000>; ++ interrupts = ; ++ clocks = <&clock SS928V100_UART0_CLK>; ++ clock-names = "apb_pclk"; ++ /* dmas = <&edmacv310_0 20 20>, <&edmacv310_0 21 21>; */ ++ /* dma-names = "rx","tx"; */ ++ status = "disabled"; ++ }; ++ ++ uart1: uart@11041000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x11041000 0x1000>; ++ interrupts = ; ++ clocks = <&clock SS928V100_UART1_CLK>; ++ clock-names = "apb_pclk"; ++ /* dmas = <&edmacv310_0 22 22>, <&edmacv310_0 23 23>; */ ++ /* dma-names = "rx","tx"; */ ++ status = "disabled"; ++ }; ++ ++ uart2: uart@11042000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x11042000 0x1000>; ++ interrupts = ; ++ clocks = <&clock SS928V100_UART2_CLK>; ++ clock-names = "apb_pclk"; ++ /* dmas = <&edmacv310_0 24 24>, <&edmacv310_0 25 25>; */ ++ /* dma-names = "rx","tx"; */ ++ status = "disabled"; ++ }; ++ ++ uart3: uart@11043000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x11043000 0x1000>; ++ interrupts = ; ++ clocks = <&clock SS928V100_UART3_CLK>; ++ clock-names = "apb_pclk"; ++ /* dmas = <&edmacv310_0 26 26>, <&edmacv310_0 27 27>; */ ++ /* dma-names = "rx","tx"; */ ++ status = "disabled"; ++ }; ++ ++ uart4: uart@11044000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x11044000 0x1000>; ++ interrupts = ; ++ clocks = <&clock SS928V100_UART4_CLK>; ++ clock-names = "apb_pclk"; ++ /* dmas = <&edmacv310_0 28 28>, <&edmacv310_0 29 29>; */ ++ /* dma-names = "rx","tx"; */ ++ status = "disabled"; ++ }; ++ ++ uart5: uart@11045000 { ++ compatible = "arm,pl011", "arm,primecell"; ++ reg = <0x11045000 0x1000>; ++ interrupts = ; ++ clocks = <&clock SS928V100_UART5_CLK>; ++ clock-names = "apb_pclk"; ++ /* dmas = <&edmacv310_0 30 30>, <&edmacv310_0 31 31>; */ ++ /* dma-names = "rx","tx"; */ ++ status = "disabled"; ++ }; ++ ++ spi_bus0: spi@11070000 { ++ compatible = "arm,pl022", "arm,primecell"; ++ arm,primecell-periphid = <0x00800022>; ++ reg = <0x11070000 0x1000>; ++ interrupts = ; ++ clocks = <&clock SS928V100_SPI0_CLK>; ++ clock-names = "apb_pclk"; ++ #address-cells = <1>; ++ vendor,slave_mode = <0>; ++ vendor,slave_tx_disable = <0>; ++ #size-cells = <0>; ++ status = "disabled"; ++ num-cs = <1>; ++ /* dmas = <&edmacv310_0 12 12>, <&edmacv310_0 13 13>; */ ++ /* dma-names = "rx","tx"; */ ++ }; ++ ++ spi_bus1: spi@11071000 { ++ compatible = "arm,pl022", "arm,primecell"; ++ arm,primecell-periphid = <0x00800022>; ++ reg = <0x11071000 0x1000>, <0x110d2100 0x4>; ++ interrupts = ; ++ clocks = <&clock SS928V100_SPI1_CLK>; ++ clock-names = "apb_pclk"; ++ #address-cells = <1>; ++ vendor,slave_mode = <0>; ++ vendor,slave_tx_disable = <0>; ++ #size-cells = <0>; ++ status = "disabled"; ++ num-cs = <2>; ++ spi_cs_sb = <2>; ++ spi_cs_mask_bit = <0x4>; ++ /* dmas = <&edmacv310_0 14 14>, <&edmacv310_0 15 15>; */ ++ /* dma-names = "rx","tx"; */ ++ }; ++ ++ spi_bus2: spi@11073000 { ++ compatible = "arm,pl022", "arm,primecell"; ++ arm,primecell-periphid = <0x00800022>; ++ reg = <0x11073000 0x1000>; ++ interrupts = ; ++ clocks = <&clock SS928V100_SPI2_CLK>; ++ clock-names = "apb_pclk"; ++ #address-cells = <1>; ++ vendor,slave_mode = <0>; ++ vendor,slave_tx_disable = <0>; ++ #size-cells = <0>; ++ status = "disabled"; ++ num-cs = <1>; ++ /* dmas = <&edmacv310_0 16 16>, <&edmacv310_0 17 17>; */ ++ /* dma-names = "rx","tx"; */ ++ }; ++ ++ spi_bus3: spi@11074000 { ++ compatible = "arm,pl022", "arm,primecell"; ++ arm,primecell-periphid = <0x00800022>; ++ reg = <0x11074000 0x1000>; ++ interrupts = ; ++ clocks = <&clock SS928V100_SPI3_CLK>; ++ clock-names = "apb_pclk"; ++ vendor,slave_mode = <0>; ++ vendor,slave_tx_disable = <0>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ num-cs = <1>; ++ /* dmas = <&edmacv310_0 18 18>, <&edmacv310_0 19 19>; */ ++ /* dma-names = "rx","tx"; */ ++ }; ++ ++ gpio_chip0: gpio_chip@11090000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x11090000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip1: gpio_chip@11091000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x11091000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip2: gpio_chip@11092000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x11092000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip3: gpio_chip@11093000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x11093000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip4: gpio_chip@11094000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x11094000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip5: gpio_chip@11095000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x11095000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip6: gpio_chip@11096000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x11096000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip7: gpio_chip@11097000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x11097000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip8: gpio_chip@11098000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x11098000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip9: gpio_chip@11099000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x11099000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip10: gpio_chip@1109A000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x1109A000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip11: gpio_chip@1109B000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x1109B000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip12: gpio_chip@1109C000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x1109C000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip13: gpio_chip@1109D000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x1109D000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip14: gpio_chip@1109E000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x1109E000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip15: gpio_chip@1109F000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x1109F000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip16: gpio_chip@110a0000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x110a0000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ gpio_chip17: gpio_chip@110a1000 { ++ compatible = "arm,pl061", "arm,primecell"; ++ reg = <0x110a1000 0x1000>; ++ interrupts = ; ++ #gpio-cells = <2>; ++ clocks = <&clock SS928V100_FIXED_50M>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ }; ++ ++ misc_ctrl: misc-controller@11024000 { ++ compatible = "vendor,miscctrl", "syscon"; ++ reg = <0x11024000 0x5000>; ++ }; ++ ++ ioconfig0: ioconfig0@10230000 { ++ compatible = "vendor,ioconfig", "syscon"; ++ reg = <0x10230000 0x10000>; ++ }; ++ ++ ioconfig1: ioconfig1@102f0000 { ++ compatible = "vendor,ioconfig", "syscon"; ++ reg = <0x102f0000 0x10000>; ++ }; ++ ++ /*FLASH DTS nodes*/ ++ fmc: flash-memory-controller@10000000 { ++ compatible = "vendor,fmc"; ++ reg = <0x10000000 0x1000>, <0x0f000000 0x1000000>; ++ reg-names = "control", "memory"; ++ clocks = <&clock SS928V100_FMC_CLK>; ++ max-dma-size = <0x2000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ sfc:spi_nor_controller { ++ compatible = "vendor,fmc-spi-nor"; ++ assigned-clocks = <&clock SS928V100_FMC_CLK>; ++ assigned-clock-rates = <24000000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ snfc:spi_nand_controller { ++ compatible = "vendor,fmc-spi-nand"; ++ assigned-clocks = <&clock SS928V100_FMC_CLK>; ++ assigned-clock-rates = <24000000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ nfc:parallel-nand-controller { ++ compatible = "vendor,fmc-nand"; ++ assigned-clocks = <&clock SS928V100_FMC_CLK>; ++ assigned-clock-rates = <200000000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ }; ++ ++ /*ethernet DTS nodes*/ ++ mdio: mdio@102903c0 { ++ compatible = "vendor,gemac-mdio"; ++ reg = <0x102903c0 0x20>; ++ clocks = <&clock SS928V100_ETH_CLK>; ++ resets = <&clock 0x37cc 0>; ++ reset-names = "phy_reset"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ mdio1: mdio@102a03c0 { ++ compatible = "vendor,gemac-mdio"; ++ reg = <0x102a03c0 0x20>; ++ clocks = <&clock SS928V100_ETH1_CLK>; ++ resets = <&clock 0x380c 0>; ++ reset-names = "phy_reset"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ gmac: ethernet@10290000 { ++ compatible = "vendor,gmac-v5"; ++ reg = <0x10290000 0x1000>,<0x1029300c 0x4>; ++ interrupts = , , ++ , ; ++ ++ clocks = <&clock SS928V100_ETH_CLK>, ++ <&clock SS928V100_ETH_MACIF_CLK>; ++ clock-names = "gmac_clk", ++ "macif_clk"; ++ ++ resets = <&clock 0x37c4 0>, ++ <&clock 0x37c0 0>; ++ reset-names = "port_reset", ++ "macif_reset"; ++ ++ mac-address = [00 00 00 00 00 00]; ++ }; ++ ++ gmac1: ethernet@102a0000 { ++ compatible = "vendor,gmac-v5"; ++ reg = <0x102a0000 0x1000>,<0x102a300c 0x4>; ++ interrupts =, , ++ , ; ++ ++ clocks = <&clock SS928V100_ETH1_CLK>, ++ <&clock SS928V100_ETH1_MACIF_CLK>; ++ clock-names = "gmac_clk", ++ "macif_clk"; ++ ++ resets = <&clock 0x3804 0>, ++ <&clock 0x3800 0>; ++ reset-names = "port_reset", ++ "macif_reset"; ++ ++ mac-address = [00 00 00 00 00 00]; ++ }; ++ ++ mmc0: eMMC@0x10020000 { ++ compatible = "nebula,sdhci"; ++ reg = <0x10020000 0x1000>; ++ interrupts = ; ++ clocks = <&clock SS928V100_MMC0_CLK>; ++ clock-names = "mmc_clk"; ++ resets = <&clock 0x34c0 16>, <&clock 0x34c0 17>, <&clock 0x34c0 18>, <&clock 0x34c4 1>; ++ reset-names = "crg_reset", "crg_rx", "crg_tx", "dll_reset"; ++ max-frequency = <200000000>; ++ crg_regmap = <&clock>; ++ non-removable; ++ iocfg_regmap = <&ioconfig0>; ++ bus-width = <8>; ++ mmc-cmd-queue; ++ cap-mmc-highspeed; ++ mmc-hs400-1_8v; ++ mmc-hs400-enhanced-strobe; ++ cap-mmc-hw-reset; ++ no-sdio; ++ no-sd; ++ devid = <0>; ++ status = "okay"; ++ }; ++ ++ mmc1: SDIO@0x10030000 { ++ compatible = "nebula,sdhci"; ++ reg = <0x10030000 0x1000>; ++ interrupts = ; ++ clocks = <&clock SS928V100_MMC1_CLK>; ++ clock-names = "mmc_clk"; ++ resets = <&clock 0x35c0 16>, <&clock 0x35c0 17>, <&clock 0x35c0 18>, <&clock 0x35c4 1>; ++ reset-names = "crg_reset", "crg_rx", "crg_tx", "dll_reset"; ++ max-frequency = <200000000>; ++ crg_regmap = <&clock>; ++ iocfg_regmap = <&ioconfig1>; ++ bus-width = ; ++ cap-sd-highspeed; ++ sd-uhs-sdr104; ++ sd-uhs-sdr50; ++ full-pwr-cycle; ++ disable-wp; ++ no-emmc; ++ no-sdio; ++ devid = <1>; ++ status = "okay"; ++ }; ++ ++ mmc2: SDIO1@0x10040000 { ++ compatible = "nebula,sdhci"; ++ reg = <0x10040000 0x1000>; ++ interrupts = ; ++ clocks = <&clock SS928V100_MMC2_CLK>; ++ clock-names = "mmc_clk"; ++ resets = <&clock 0x36c0 16>, <&clock 0x36c0 17>, <&clock 0x36c0 18>, <&clock 0x36c4 1>; ++ reset-names = "crg_reset", "crg_rx", "crg_tx", "dll_reset"; ++ max-frequency = <200000000>; ++ crg_regmap = <&clock>; ++ non-removable; ++ iocfg_regmap = <&ioconfig1>; ++ bus-width = <4>; ++ cap-sd-highspeed; ++ no-emmc; ++ no-sd; ++ devid = <2>; ++ status = "okay"; ++ }; ++ ++ pcie0: pcie@0x103d0000 { ++ device_type = "pcie"; ++ compatible = "vendor,pcie"; ++ #size-cells = <2>; ++ #address-cells = <3>; ++ #interrupt-cells = <1>; ++ bus-range = <0x0 0xff>; ++ reg = <0x00 0x103d0000 0x00 0x2000>; ++ ranges = <0x02000000 0x00 0x30000000 0x30000000 0x00 0x10000000>; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &gic GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH ++ 0x0 0x0 0x0 0x2 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH ++ 0x0 0x0 0x0 0x3 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH ++ 0x0 0x0 0x0 0x4 &gic GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; ++ /* msi interrupts */ ++ interrupts = ; ++ interrupt-names = "msi"; ++ ++ pcie_controller = <0>; ++ dev_mem_size = <0x10000000>; ++ dev_conf_size = <0x10000000>; ++ sys_ctrl_base = <0x11020000>; ++ pcie_dbi_base = <0x103d0000>; ++ ep_conf_base = <0x20000000>; ++ pcie_clk_rest_reg = <0x3a40>; ++ }; ++ ++ pcie_mcc: pcie_mcc@0x0 { ++ compatible = "vendor,pcie_mcc"; ++ interrupts = , /* pcie0 inta */ ++ , /* pcie0 intb */ ++ , /* pcie0 intc */ ++ , /* pcie0 intd */ ++ , /* pcie0 dma*/ ++ ; /* global soft irq */ ++ }; ++ ++ edmacv310_0: edma-controller@10280000 { ++ compatible = "vendor,edmacv310"; ++ reg = <0x10280000 0x1000>, <0x102e0024 0x4>; ++ reg-names = "dmac", "dma_peri_channel_req_sel"; ++ interrupts = ; ++ clocks = <&clock SS928V100_EDMAC_CLK>, ++ <&clock SS928V100_EDMAC_AXICLK>; ++ clock-names = "apb_pclk", "axi_aclk"; ++ #clock-cells = <2>; ++ resets = <&clock 0x2A80 0>; ++ reset-names = "dma-reset"; ++ dma-requests = <32>; ++ dma-channels = <8>; ++ devid = <0>; ++ #dma-cells = <2>; ++ status = "disabled"; ++ }; ++ ++ ++ /*SDK DTS nodes*/ ++ sys: sys@11010000 { ++ compatible = "vendor,sys"; ++ reg = <0x11014500 0xBB00>, ++ <0x11020000 0x4000>, ++ <0x11140000 0x20000>, ++ <0X11024000 0x5000>; ++ reg-names = "crg", "sys", "ddr", "misc"; ++ }; ++ ++ mipi_rx: mipi_rx@0x173c0000 { ++ compatible = "vendor,mipi_rx"; ++ reg = <0x173c0000 0x10000>; ++ reg-names = "mipi_rx"; ++ interrupts = ; ++ interrupt-names = "mipi_rx"; ++ }; ++ ++ gfbg: gfbg@0x17A00000 { ++ compatible = "vendor,gfbg"; ++ reg = <0x17A00000 0x40000>; ++ reg-names = "gfbg"; ++ interrupts = ; ++ interrupt-names = "gfbg"; ++ }; ++ ++ hdmi: hdmi@0x17B40000 { ++ compatible = "vendor,hdmi"; ++ reg = <0x17B40000 0x20000>,<0x17BC0000 0x10000>; ++ reg-names = "hdmi0","phy"; ++ interrupts = ,, ++ ; ++ interrupt-names = "tx_aon","tx_sec","tx_pwd"; ++ }; ++ ++ mipi_tx: mipi_tx@0x17A80000 { ++ compatible = "vendor,mipi_tx"; ++ reg = <0x17A80000 0x10000>; ++ reg-names = "mipi_tx"; ++ interrupts = ; ++ interrupt-names = "mipi_tx"; ++ }; ++ ++ tde: tde@0x17280000 { ++ compatible = "vendor,tde"; ++ reg = <0x17280000 0x10000>; ++ reg-names = "tde"; ++ interrupts = ; ++ interrupt-names = "tde_osr_isr"; ++ }; ++ ++ npu: npu@0x14000000 { ++ compatible = "vendor,npu"; ++ reg = <0x14000000 0x100000>, ++ <0x14100000 0x200000>,<0x14300000 0x200000>, ++ <0x17150000 0x10000>,<0x11010000 0x10000>; ++ reg-names = "npu_top","npu_htws","npu_aicore", ++ "npu_peri","crg"; ++ interrupts = ,, ++ ,, ++ ,, ++ ,, ++ ; ++ interrupt-names = "hwts_dfx","hwts_normal_s","hwts_debug_s", ++ "hwts_error_s","hwts_normal_ns","hwts_debug_ns", ++ "hwts_error_ns","hwts_aicpu_s","hwts_aicpu_ns"; ++ }; ++ ++ pqp: pqp@0x15000000 { ++ compatible = "vendor,pqp"; ++ reg = <0x15000000 0x10000>; ++ reg-names = "pqp"; ++ interrupts = , ; ++ interrupt-names = "pqp_ns","pqp_s"; ++ }; ++ ++ dsp: dsp@0x16110000 { ++ compatible = "vendor,dsp"; ++ reg = <0x16110000 0x20000>,<0x16310000 0x20000>; ++ reg-names = "dsp0","dsp1"; ++ }; ++ ++ avs: avs@0x17930000 { ++ compatible = "vendor,avs"; ++ reg = <0x17930000 0x10000>; ++ reg-names = "avs"; ++ interrupts = ; ++ interrupt-names = "avs"; ++ }; ++ ++ vo: vo@0x17A00000 { ++ compatible = "vendor,vo"; ++ reg = <0x17A00000 0x40000>; ++ reg-names = "vo"; ++ interrupts = ; ++ interrupt-names = "vo"; ++ }; ++ ++ svp_npu: svp_npu@0x15000000 { ++ compatible = "vendor,svp_npu"; ++ reg = <0x15000000 0x10000>; ++ reg-names = "svp_npu"; ++ interrupts = , ; ++ interrupt-names = "svp_npu_ns","svp_npu_s"; ++ }; ++ ++ cipher: cipher@0x10100000 { ++ compatible = "vendor,cipher"; ++ reg = <0x10100000 0x10000>; ++ reg-names = "cipher"; ++ interrupts = ,, ++ ,; ++ interrupt-names = "nsec_spacc","sec_spacc","nsec_pke","sec_pke"; ++ }; ++ ++ klad: klad@0x10110000 { ++ compatible = "vendor,klad"; ++ reg = <0x10110000 0x1000>; ++ reg-names = "klad"; ++ interrupts = ,, ++ ,; ++ interrupt-names = "nsec_rkp","sec_rkp","nsec_klad","sec_klad"; ++ }; ++ ++ otp: otp@0x10120000 { ++ compatible = "vendor,otp"; ++ reg = <0x10120000 0x1000>; ++ reg-names = "otp"; ++ }; ++ ++ ++ ir: ir@0x110F0000 { ++ compatible = "vendor,ir"; ++ reg = <0x110F0000 0x10000>; ++ reg-names = "ir"; ++ interrupts = ; ++ interrupt-names = "ir"; ++ }; ++ ++ irq: irq@120f0000 { ++ compatible = "vendor,ot_irq"; ++ reg = <0x17240000 0x10000>, <0x17250000 0x10000>, ++ <0x172c0000 0x10000>, ++ <0x17400000 0x200000>, ++ <0x17800000 0x40000>, ++ <0x17840000 0x40000>, ++ <0x17140000 0x10000>,<0x171c0000 0x10000>, ++ <0x17A00000 0x40000>, ++ <0x17c00000 0x10000>,<0x17c40000 0x10000>, ++ <0x17900000 0x10000>, ++ <0x17030000 0x10000>, ++ <0x17030000 0x10000>, ++ <0x15000000 0x10000>, ++ <0x17000000 0x10000>, ++ <0x17030000 0x10000>, ++ <0x17180000 0x10000>, ++ <0x17100000 0x10000>; ++ reg-names = "vgs0", "vgs1", ++ "gdc", ++ "vi_cap0", "vi_proc0", "vi_proc1", ++ "vedu0","jpge", ++ "vo", ++ "aiao","acodec", ++ "vpss0", ++ "dpu_rect", ++ "dpu_match", ++ "svp_npu", ++ "ive", ++ "mau0", ++ "jpegd", ++ "vdh_scd"; ++ interrupts = , , ++ , ++ , ,, ++ ,, ++ , ++ , ++ , ++ , ++ , ++ , , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ interrupt-names = "vgs0", "vgs1", ++ "gdc", ++ "vi_cap0", "vi_proc0", "vi_proc1", ++ "vedu0","jpge", ++ "vo", ++ "AIO","vpss0", ++ "rect", ++ "match", ++ "svp_npu_ns","svp_npu_s", ++ "ive", ++ "mau0", ++ "jpegd", ++ "vdh_bsp","vdh_pxp","scd","vdh_mdma"; ++ }; ++ ++ wdg: wdg@0x11030000 { ++ compatible = "vendor,wdg"; ++ reg = <0x11030000 0x1000>; ++ reg-names = "wdg0"; ++ interrupts = ; ++ interrupt-names = "wdg"; ++ }; ++ ++ pwm: pwm@0x1102D000 { ++ compatible = "vendor,pwm"; ++ reg = <0x110B0000 0x1000>, <0x1102D000 0x1000>; ++ reg-names = "pwm0", "pwm1"; ++ clocks = <&clock SS928V100_PWM0_CLK>, <&clock SS928V100_PWM1_CLK>; ++ clock-names = "pwm0", "pwm1"; ++ resets = <&clock 0x4588 0>, <&clock 0x4590 0>; ++ reset-names = "pwm0", "pwm1"; ++ status = "disabled"; ++ }; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/vendor/ss928v100_family_usb.dtsi b/arch/arm64/boot/dts/vendor/ss928v100_family_usb.dtsi +new file mode 100644 +index 000000000..ae13bf4b0 +--- /dev/null ++++ b/arch/arm64/boot/dts/vendor/ss928v100_family_usb.dtsi +@@ -0,0 +1,111 @@ ++#include ++ ++/ { ++ ups_clock:ups_clock { ++ compatible = "basedrv-ip,clock"; ++ reg = <0x0 0x11010000 0x0 0x10000>,<0x0 0x11020000 0x0 0x10000>; ++ reg-names = "peri_crg","peri_ctrl"; ++ #clock-cells = <1>; ++ status = "okay"; ++ }; ++ ++ usb2phy0:usb2phy@10310000 { ++ compatible = "usb2phy,xvpphy"; ++ reg = <0x0 0x10310000 0x0 0x1000>,<0x0 0x11020000 0x0 0x10000>; ++ reg-names = "u2_phy","peri_ctrl"; ++ clocks = <&ups_clock PERI_CRG3632_USB2_PHY0>; ++ clock-names = "phy-clk"; ++ u2phy-trim = <0x0A33CC2B 0x00260F0F>; ++ #phy-cells = <0>; ++ status = "okay"; ++ }; ++ ++ usb2phy1:usb2phy@10330000 { ++ compatible = "usb2phy,xvpphy"; ++ reg = <0x0 0x10330000 0x0 0x1000>,<0x0 0x11020000 0x0 0x10000>; ++ reg-names = "u2_phy","peri_ctrl"; ++ clocks = <&ups_clock PERI_CRG3640_USB2_PHY1>; ++ clock-names = "phy-clk"; ++ u2phy-trim = <0x0A33CC2B 0x00260F0F>; ++ #phy-cells = <0>; ++ status = "okay"; ++ }; ++ ++ combophy0:combophy0@10220004 { ++ compatible = "combophy,common"; ++ reg = <0x0 0x10220004 0x0 0x1000>,<0x0 0x11020000 0x0 0x10000>; ++ reg-names = "combophy","peri_ctrl"; ++ clocks = <&ups_clock PERI_CRG3665_COMBPHY0_CLK>; ++ clock-names = "phy-clk"; ++ #phy-cells = <0>; ++ status = "okay"; ++ }; ++ ++ combophy1:combophy1@10220008 { ++ compatible = "combophy,common"; ++ reg = <0x0 0x10220008 0x0 0x1000>,<0x0 0x11020000 0x0 0x10000>; ++ reg-names = "combophy","peri_ctrl"; ++ clocks = <&ups_clock PERI_CRG3673_COMBPHY1_CLK>; ++ clock-names = "phy-clk"; ++ #phy-cells = <0>; ++ status = "okay"; ++ }; ++ ++ usbctr0:usbctrl@10300000 { ++ compatible = "wing-usb,host"; ++ reg = <0x0 0x10300000 0x0 0x10000>; ++ host-mode; ++ tx-thrcfg = <0x22080000>; ++ rx-thrcfg = <0x22800000>; ++ disable-suspend; ++ phys = <&usb2phy0>,<&combophy0>; ++ phy-names = "usb2-phy", "usb3-phy"; ++ clocks = <&ups_clock PERI_CRG3664_USB30_CTRL0>; ++ clock-names = "ctrl-clk"; ++ status = "okay"; ++ ranges; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ xhci0:xhci@10300000 { ++ compatible = "generic-xhci"; ++ reg = <0x0 0x10300000 0x0 0x10000>; ++ interrupts = ; ++ usb2-lpm-disable; ++ }; ++ }; ++ ++ usb30drd:usb30drd@0x10320000 { ++ compatible = "wing-usb,drd"; ++ reg = <0x0 0x10320000 0x0 0x10000>; ++ controller_id = <0>; ++ support-drd; ++ tx-thrcfg = <0x22080000>; ++ rx-thrcfg = <0x22200000>; ++ phys = <&usb2phy1>,<&combophy1>; ++ phy-names = "usb2-phy", "usb3-phy"; ++ clocks = <&ups_clock PERI_CRG3672_USB30_CTRL1>; ++ clock-names = "ctrl-clk"; ++ init_mode="device"; ++ status = "okay"; ++ ranges; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dwc3@10320000{ ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x10320000 0x0 0x10000>; ++ interrupts = ; ++ interrupt-names = "peripheral"; ++ maximum-speed = "super-speed"; ++ dr_mode = "otg"; ++ usb-role-switch; ++ snps,usb2-lpm-disable; ++ snps,usb2-gadget-lpm-disable; ++ snps,dis-u1-entry-quirk; ++ snps,dis-u2-entry-quirk; ++ snps,dis_u2_susphy_quirk; ++ snps,dis_u3_susphy_quirk; ++ linux,sysdev_is_parent; ++ extcon = <&usb30drd>; ++ }; ++ }; ++}; +diff --git a/arch/arm64/configs/ss928v100_defconfig b/arch/arm64/configs/ss928v100_defconfig +new file mode 100644 +index 000000000..37ad67cfd +--- /dev/null ++++ b/arch/arm64/configs/ss928v100_defconfig +@@ -0,0 +1,239 @@ ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_SYSVIPC=y ++CONFIG_USELIB=y ++CONFIG_BPF_SYSCALL=y ++# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_NAMESPACES=y ++CONFIG_RELAY=y ++CONFIG_EXPERT=y ++# CONFIG_FHANDLE is not set ++CONFIG_ARCH_BSP=y ++CONFIG_ARCH_SS928V100=y ++CONFIG_SCHED_MC=y ++CONFIG_NR_CPUS=4 ++CONFIG_HOTPLUG_CPU=y ++CONFIG_HZ_100=y ++CONFIG_CMDLINE="mem=128M console=ttyAMA0,115200 console=ttyMTD,blackbox" ++# CONFIG_EFI is not set ++CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y ++CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES="vendor/ss928v100-demb-flash" ++# CONFIG_SUSPEND is not set ++CONFIG_PM=y ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_STAT=y ++CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y ++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y ++CONFIG_CPU_FREQ_GOV_POWERSAVE=y ++CONFIG_CPU_FREQ_GOV_ONDEMAND=y ++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y ++CONFIG_CPUFREQ_DT=y ++# CONFIG_SECCOMP is not set ++CONFIG_COMPAT_32BIT_TIME=y ++CONFIG_MODULES=y ++CONFIG_MODULE_FORCE_LOAD=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++CONFIG_PARTITION_ADVANCED=y ++CONFIG_CMDLINE_PARTITION=y ++# CONFIG_SWAP is not set ++# CONFIG_COMPAT_BRK is not set ++CONFIG_KSM=y ++CONFIG_CMA=y ++CONFIG_USERFAULTFD=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IPV6_ROUTER_PREF=y ++CONFIG_IPV6_SIT=m ++CONFIG_NETFILTER=y ++# CONFIG_NETFILTER_EGRESS is not set ++# CONFIG_ETHTOOL_NETLINK is not set ++CONFIG_UEVENT_HELPER=y ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_DEVTMPFS=y ++CONFIG_DEVTMPFS_MOUNT=y ++# CONFIG_PREVENT_FIRMWARE_BUILD is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK2MTD=y ++CONFIG_MTD_SPI_NAND_BSP=y ++CONFIG_MTD_SPI_NAND_FMC100=y ++CONFIG_MTD_RAW_NAND=y ++CONFIG_MTD_SPI_NOR=y ++CONFIG_SPI_BSP_SFC=y ++CONFIG_MTD_UBI=y ++# CONFIG_BLK_DEV is not set ++CONFIG_SCSI=y ++CONFIG_BLK_DEV_SD=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_ALACRITECH is not set ++# CONFIG_NET_VENDOR_AMAZON is not set ++# CONFIG_NET_VENDOR_AMD is not set ++# CONFIG_NET_VENDOR_AQUANTIA is not set ++# CONFIG_NET_VENDOR_ARC is not set ++# CONFIG_NET_VENDOR_ASIX is not set ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CADENCE is not set ++# CONFIG_NET_VENDOR_CAVIUM is not set ++# CONFIG_NET_VENDOR_CORTINA is not set ++# CONFIG_NET_VENDOR_DAVICOM is not set ++# CONFIG_NET_VENDOR_ENGLEDER is not set ++# CONFIG_NET_VENDOR_EZCHIP is not set ++# CONFIG_NET_VENDOR_FUNGIBLE is not set ++# CONFIG_NET_VENDOR_GOOGLE is not set ++# CONFIG_NET_VENDOR_HISILICON is not set ++# CONFIG_NET_VENDOR_HUAWEI is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++CONFIG_ETH_GMAC=y ++# CONFIG_NET_VENDOR_ADI is not set ++# CONFIG_NET_VENDOR_LITEX is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MELLANOX is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_MICROSEMI is not set ++# CONFIG_NET_VENDOR_MICROSOFT is not set ++# CONFIG_NET_VENDOR_NI is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_NETRONOME is not set ++# CONFIG_NET_VENDOR_PENSANDO is not set ++# CONFIG_NET_VENDOR_QUALCOMM is not set ++# CONFIG_NET_VENDOR_RENESAS is not set ++# CONFIG_NET_VENDOR_ROCKER is not set ++# CONFIG_NET_VENDOR_SAMSUNG is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SOLARFLARE is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_SOCIONEXT is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_NET_VENDOR_SYNOPSYS is not set ++# CONFIG_NET_VENDOR_VERTEXCOM is not set ++# CONFIG_NET_VENDOR_VIA is not set ++# CONFIG_NET_VENDOR_WANGXUN is not set ++# CONFIG_NET_VENDOR_WIZNET is not set ++# CONFIG_NET_VENDOR_XILINX is not set ++CONFIG_MDIO_BSP_GEMAC=y ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_SERIAL_AMBA_PL011=y ++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++# CONFIG_I2C_COMPAT is not set ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_MUX=y ++# CONFIG_I2C_HELPER_AUTO is not set ++CONFIG_I2C_BSP=y ++CONFIG_SPI=y ++CONFIG_SPI_PL022=y ++CONFIG_SPI_SPIDEV=y ++CONFIG_PINCTRL_SINGLE=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_GPIO_GENERIC_PLATFORM=y ++CONFIG_GPIO_PL061=y ++# CONFIG_HWMON is not set ++CONFIG_MFD_BSP_FMC=y ++CONFIG_MFD_SYSCON=y ++CONFIG_FB=y ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++# CONFIG_VIRTIO_MENU is not set ++# CONFIG_VHOST_MENU is not set ++# CONFIG_SURFACE_PLATFORMS is not set ++# CONFIG_FSL_ERRATUM_A008585 is not set ++CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y ++CONFIG_IOMMU_DEFAULT_DMA_LAZY=y ++CONFIG_ARM_SMMU_V3=y ++CONFIG_ARM_SMMU_V3_SVA=y ++# CONFIG_NVMEM is not set ++CONFIG_VENDOR_NPU=y ++CONFIG_EXT3_FS=y ++CONFIG_XFS_FS=y ++CONFIG_BTRFS_FS=y ++CONFIG_QUOTA=y ++CONFIG_QFMT_V1=m ++CONFIG_QFMT_V2=m ++CONFIG_AUTOFS_FS=m ++CONFIG_FUSE_FS=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_CONFIGFS_FS=y ++CONFIG_UBIFS_FS=y ++CONFIG_CRAMFS=y ++CONFIG_SQUASHFS=y ++CONFIG_SQUASHFS_LZO=y ++CONFIG_SQUASHFS_XZ=y ++CONFIG_NFS_FS=y ++CONFIG_NLS=y ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_CODEPAGE_737=m ++CONFIG_NLS_CODEPAGE_775=m ++CONFIG_NLS_CODEPAGE_850=m ++CONFIG_NLS_CODEPAGE_852=m ++CONFIG_NLS_CODEPAGE_855=m ++CONFIG_NLS_CODEPAGE_857=m ++CONFIG_NLS_CODEPAGE_860=m ++CONFIG_NLS_CODEPAGE_861=m ++CONFIG_NLS_CODEPAGE_862=m ++CONFIG_NLS_CODEPAGE_863=m ++CONFIG_NLS_CODEPAGE_864=m ++CONFIG_NLS_CODEPAGE_865=m ++CONFIG_NLS_CODEPAGE_866=m ++CONFIG_NLS_CODEPAGE_869=m ++CONFIG_NLS_CODEPAGE_936=y ++CONFIG_NLS_CODEPAGE_950=m ++CONFIG_NLS_CODEPAGE_932=m ++CONFIG_NLS_CODEPAGE_949=m ++CONFIG_NLS_CODEPAGE_874=m ++CONFIG_NLS_ISO8859_8=m ++CONFIG_NLS_CODEPAGE_1250=m ++CONFIG_NLS_CODEPAGE_1251=m ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_NLS_ISO8859_2=m ++CONFIG_NLS_ISO8859_3=m ++CONFIG_NLS_ISO8859_4=m ++CONFIG_NLS_ISO8859_5=m ++CONFIG_NLS_ISO8859_6=m ++CONFIG_NLS_ISO8859_7=m ++CONFIG_NLS_ISO8859_9=m ++CONFIG_NLS_ISO8859_13=m ++CONFIG_NLS_ISO8859_14=m ++CONFIG_NLS_ISO8859_15=m ++CONFIG_NLS_KOI8_R=m ++CONFIG_NLS_KOI8_U=m ++CONFIG_NLS_UTF8=y ++CONFIG_KEYS=y ++CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" ++CONFIG_INIT_STACK_NONE=y ++CONFIG_CRYPTO_AES=y ++CONFIG_CRYPTO_ECB=y ++CONFIG_CRYPTO_CCM=m ++CONFIG_CRYPTO_SEQIV=m ++CONFIG_CRYPTO_ECHAINIV=m ++CONFIG_CRYPTO_CMAC=y ++# CONFIG_RAID6_PQ_BENCHMARK is not set ++CONFIG_CRC_CCITT=y ++CONFIG_CRC_ITU_T=y ++# CONFIG_XZ_DEC_X86 is not set ++# CONFIG_XZ_DEC_POWERPC is not set ++# CONFIG_XZ_DEC_IA64 is not set ++# CONFIG_XZ_DEC_ARM is not set ++# CONFIG_XZ_DEC_ARMTHUMB is not set ++# CONFIG_XZ_DEC_SPARC is not set ++CONFIG_DMA_CMA=y ++CONFIG_CMA_SIZE_MBYTES=4 ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_PANIC_ON_OOPS=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_SCHEDSTATS=y ++CONFIG_RCU_CPU_STALL_TIMEOUT=60 ++# CONFIG_FTRACE is not set ++# CONFIG_STRICT_DEVMEM is not set +diff --git a/arch/arm64/configs/ss928v100_emmc_defconfig b/arch/arm64/configs/ss928v100_emmc_defconfig +new file mode 100644 +index 000000000..4b0cfee2c +--- /dev/null ++++ b/arch/arm64/configs/ss928v100_emmc_defconfig +@@ -0,0 +1,3386 @@ ++# ++# Automatically generated file; DO NOT EDIT. ++# Linux/arm64 6.6.90 Kernel Configuration ++# ++CONFIG_CC_VERSION_TEXT="clang version 15.0.4 (v050 musl1.2.3 2023-03-18 10:36:32)" ++CONFIG_GCC_VERSION=0 ++CONFIG_CC_IS_CLANG=y ++CONFIG_CLANG_VERSION=150004 ++CONFIG_AS_IS_LLVM=y ++CONFIG_AS_VERSION=150004 ++CONFIG_LD_VERSION=0 ++CONFIG_LD_IS_LLD=y ++CONFIG_LLD_VERSION=150004 ++CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y ++CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y ++CONFIG_TOOLS_SUPPORT_RELR=y ++CONFIG_CC_HAS_ASM_INLINE=y ++CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y ++CONFIG_PAHOLE_VERSION=121 ++CONFIG_IRQ_WORK=y ++CONFIG_BUILDTIME_TABLE_SORT=y ++CONFIG_THREAD_INFO_IN_TASK=y ++ ++# ++# General setup ++# ++CONFIG_INIT_ENV_ARG_LIMIT=32 ++# CONFIG_COMPILE_TEST is not set ++# CONFIG_WERROR is not set ++CONFIG_LOCALVERSION="" ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_BUILD_SALT="" ++CONFIG_DEFAULT_INIT="" ++CONFIG_DEFAULT_HOSTNAME="(none)" ++CONFIG_SYSVIPC=y ++CONFIG_SYSVIPC_SYSCTL=y ++# CONFIG_POSIX_MQUEUE is not set ++# CONFIG_WATCH_QUEUE is not set ++CONFIG_CROSS_MEMORY_ATTACH=y ++CONFIG_USELIB=y ++# CONFIG_AUDIT is not set ++CONFIG_HAVE_ARCH_AUDITSYSCALL=y ++ ++# ++# IRQ subsystem ++# ++CONFIG_GENERIC_IRQ_PROBE=y ++CONFIG_GENERIC_IRQ_SHOW=y ++CONFIG_GENERIC_IRQ_SHOW_LEVEL=y ++CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y ++CONFIG_GENERIC_IRQ_MIGRATION=y ++CONFIG_HARDIRQS_SW_RESEND=y ++CONFIG_IRQ_DOMAIN=y ++CONFIG_IRQ_DOMAIN_HIERARCHY=y ++CONFIG_GENERIC_IRQ_IPI=y ++CONFIG_GENERIC_MSI_IRQ=y ++CONFIG_IRQ_FORCED_THREADING=y ++CONFIG_SPARSE_IRQ=y ++# end of IRQ subsystem ++ ++CONFIG_GENERIC_TIME_VSYSCALL=y ++CONFIG_GENERIC_CLOCKEVENTS=y ++CONFIG_ARCH_HAS_TICK_BROADCAST=y ++CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y ++CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y ++CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y ++CONFIG_CONTEXT_TRACKING=y ++CONFIG_CONTEXT_TRACKING_IDLE=y ++ ++# ++# Timers subsystem ++# ++CONFIG_HZ_PERIODIC=y ++# CONFIG_NO_HZ_IDLE is not set ++# CONFIG_NO_HZ_FULL is not set ++# CONFIG_NO_HZ is not set ++# CONFIG_HIGH_RES_TIMERS is not set ++# end of Timers subsystem ++ ++CONFIG_BPF=y ++CONFIG_HAVE_EBPF_JIT=y ++CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y ++ ++# ++# BPF subsystem ++# ++CONFIG_BPF_SYSCALL=y ++# CONFIG_BPF_JIT is not set ++# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set ++# CONFIG_BPF_PRELOAD is not set ++# end of BPF subsystem ++ ++CONFIG_PREEMPT_NONE_BUILD=y ++CONFIG_PREEMPT_NONE=y ++# CONFIG_PREEMPT_VOLUNTARY is not set ++# CONFIG_PREEMPT is not set ++# CONFIG_PREEMPT_DYNAMIC is not set ++ ++# ++# CPU/Task time and stats accounting ++# ++CONFIG_TICK_CPU_ACCOUNTING=y ++# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set ++# CONFIG_IRQ_TIME_ACCOUNTING is not set ++# CONFIG_BSD_PROCESS_ACCT is not set ++# CONFIG_TASKSTATS is not set ++# CONFIG_PSI is not set ++# end of CPU/Task time and stats accounting ++ ++CONFIG_CPU_ISOLATION=y ++ ++# ++# RCU Subsystem ++# ++CONFIG_TREE_RCU=y ++# CONFIG_RCU_EXPERT is not set ++CONFIG_TREE_SRCU=y ++CONFIG_TASKS_RCU_GENERIC=y ++CONFIG_TASKS_TRACE_RCU=y ++CONFIG_RCU_STALL_COMMON=y ++CONFIG_RCU_NEED_SEGCBLIST=y ++# end of RCU Subsystem ++ ++# CONFIG_IKCONFIG is not set ++# CONFIG_IKHEADERS is not set ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 ++CONFIG_GENERIC_SCHED_CLOCK=y ++ ++# ++# Scheduler features ++# ++# end of Scheduler features ++ ++CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y ++CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y ++CONFIG_CC_HAS_INT128=y ++CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough" ++CONFIG_GCC10_NO_ARRAY_BOUNDS=y ++CONFIG_ARCH_SUPPORTS_INT128=y ++# CONFIG_CGROUPS is not set ++CONFIG_NAMESPACES=y ++CONFIG_UTS_NS=y ++CONFIG_TIME_NS=y ++CONFIG_IPC_NS=y ++# CONFIG_USER_NS is not set ++CONFIG_PID_NS=y ++CONFIG_NET_NS=y ++# CONFIG_CHECKPOINT_RESTORE is not set ++# CONFIG_SCHED_AUTOGROUP is not set ++CONFIG_RELAY=y ++# CONFIG_BLK_DEV_INITRD is not set ++# CONFIG_BOOT_CONFIG is not set ++CONFIG_INITRAMFS_PRESERVE_MTIME=y ++CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_LD_ORPHAN_WARN=y ++CONFIG_LD_ORPHAN_WARN_LEVEL="warn" ++CONFIG_SYSCTL=y ++CONFIG_SYSCTL_EXCEPTION_TRACE=y ++CONFIG_EXPERT=y ++CONFIG_MULTIUSER=y ++# CONFIG_SGETMASK_SYSCALL is not set ++CONFIG_SYSFS_SYSCALL=y ++# CONFIG_FHANDLE is not set ++CONFIG_POSIX_TIMERS=y ++CONFIG_PRINTK=y ++CONFIG_BUG=y ++CONFIG_ELF_CORE=y ++CONFIG_BASE_FULL=y ++CONFIG_FUTEX=y ++CONFIG_FUTEX_PI=y ++CONFIG_EPOLL=y ++CONFIG_SIGNALFD=y ++CONFIG_TIMERFD=y ++CONFIG_EVENTFD=y ++CONFIG_SHMEM=y ++CONFIG_AIO=y ++CONFIG_IO_URING=y ++CONFIG_ADVISE_SYSCALLS=y ++CONFIG_MEMBARRIER=y ++CONFIG_KALLSYMS=y ++# CONFIG_KALLSYMS_SELFTEST is not set ++# CONFIG_KALLSYMS_ALL is not set ++CONFIG_KALLSYMS_BASE_RELATIVE=y ++CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y ++# CONFIG_KCMP is not set ++CONFIG_RSEQ=y ++CONFIG_CACHESTAT_SYSCALL=y ++# CONFIG_DEBUG_RSEQ is not set ++CONFIG_HAVE_PERF_EVENTS=y ++# CONFIG_PC104 is not set ++ ++# ++# Kernel Performance Events And Counters ++# ++# CONFIG_PERF_EVENTS is not set ++# end of Kernel Performance Events And Counters ++ ++# CONFIG_PROFILING is not set ++ ++# ++# Kexec and crash features ++# ++# CONFIG_KEXEC_FILE is not set ++# CONFIG_CRASH_DUMP is not set ++# end of Kexec and crash features ++# end of General setup ++ ++CONFIG_ARM64=y ++CONFIG_CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y ++CONFIG_64BIT=y ++CONFIG_MMU=y ++CONFIG_ARM64_PAGE_SHIFT=12 ++CONFIG_ARM64_CONT_PTE_SHIFT=4 ++CONFIG_ARM64_CONT_PMD_SHIFT=4 ++CONFIG_ARCH_MMAP_RND_BITS_MIN=18 ++CONFIG_ARCH_MMAP_RND_BITS_MAX=24 ++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 ++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 ++CONFIG_NO_IOPORT_MAP=y ++CONFIG_STACKTRACE_SUPPORT=y ++CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 ++CONFIG_LOCKDEP_SUPPORT=y ++CONFIG_GENERIC_BUG=y ++CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y ++CONFIG_GENERIC_HWEIGHT=y ++CONFIG_GENERIC_CSUM=y ++CONFIG_GENERIC_CALIBRATE_DELAY=y ++CONFIG_SMP=y ++CONFIG_KERNEL_MODE_NEON=y ++CONFIG_FIX_EARLYCON_MEM=y ++CONFIG_PGTABLE_LEVELS=3 ++CONFIG_ARCH_SUPPORTS_UPROBES=y ++CONFIG_ARCH_PROC_KCORE_TEXT=y ++CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y ++ ++# ++# Platform selection ++# ++# CONFIG_ARCH_ACTIONS is not set ++# CONFIG_ARCH_SUNXI is not set ++# CONFIG_ARCH_ALPINE is not set ++# CONFIG_ARCH_APPLE is not set ++# CONFIG_ARCH_BCM is not set ++# CONFIG_ARCH_BERLIN is not set ++# CONFIG_ARCH_BITMAIN is not set ++# CONFIG_ARCH_EXYNOS is not set ++# CONFIG_ARCH_SPARX5 is not set ++# CONFIG_ARCH_K3 is not set ++# CONFIG_ARCH_LG1K is not set ++# CONFIG_ARCH_HISI is not set ++# CONFIG_ARCH_KEEMBAY is not set ++CONFIG_ARCH_BSP=y ++CONFIG_ARCH_SS928V100=y ++# CONFIG_ARCH_MEDIATEK is not set ++# CONFIG_ARCH_MESON is not set ++# CONFIG_ARCH_MVEBU is not set ++# CONFIG_ARCH_NXP is not set ++# CONFIG_ARCH_MA35 is not set ++# CONFIG_ARCH_NPCM is not set ++# CONFIG_ARCH_QCOM is not set ++# CONFIG_ARCH_REALTEK is not set ++# CONFIG_ARCH_RENESAS is not set ++# CONFIG_ARCH_ROCKCHIP is not set ++# CONFIG_ARCH_SEATTLE is not set ++# CONFIG_ARCH_INTEL_SOCFPGA is not set ++# CONFIG_ARCH_STM32 is not set ++# CONFIG_ARCH_SYNQUACER is not set ++# CONFIG_ARCH_TEGRA is not set ++# CONFIG_ARCH_SPRD is not set ++# CONFIG_ARCH_THUNDER is not set ++# CONFIG_ARCH_THUNDER2 is not set ++# CONFIG_ARCH_UNIPHIER is not set ++# CONFIG_ARCH_VEXPRESS is not set ++# CONFIG_ARCH_VISCONTI is not set ++# CONFIG_ARCH_XGENE is not set ++# CONFIG_ARCH_ZYNQMP is not set ++# end of Platform selection ++ ++# ++# Kernel Features ++# ++ ++# ++# ARM errata workarounds via the alternatives framework ++# ++CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y ++CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y ++CONFIG_ARM64_ERRATUM_826319=y ++CONFIG_ARM64_ERRATUM_827319=y ++CONFIG_ARM64_ERRATUM_824069=y ++CONFIG_ARM64_ERRATUM_819472=y ++CONFIG_ARM64_ERRATUM_832075=y ++CONFIG_ARM64_ERRATUM_843419=y ++CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y ++CONFIG_ARM64_ERRATUM_1024718=y ++CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y ++CONFIG_ARM64_ERRATUM_1165522=y ++CONFIG_ARM64_ERRATUM_1319367=y ++CONFIG_ARM64_ERRATUM_1530923=y ++CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y ++CONFIG_ARM64_ERRATUM_2441007=y ++CONFIG_ARM64_ERRATUM_1286807=y ++CONFIG_ARM64_ERRATUM_1463225=y ++CONFIG_ARM64_ERRATUM_1542419=y ++CONFIG_ARM64_ERRATUM_1508412=y ++CONFIG_ARM64_ERRATUM_2051678=y ++CONFIG_ARM64_ERRATUM_2077057=y ++CONFIG_ARM64_ERRATUM_2658417=y ++CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y ++CONFIG_ARM64_ERRATUM_2054223=y ++CONFIG_ARM64_ERRATUM_2067961=y ++CONFIG_ARM64_ERRATUM_2441009=y ++CONFIG_ARM64_ERRATUM_2457168=y ++CONFIG_ARM64_ERRATUM_2645198=y ++CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD=y ++CONFIG_ARM64_ERRATUM_2966298=y ++CONFIG_ARM64_ERRATUM_3117295=y ++CONFIG_ARM64_ERRATUM_3194386=y ++CONFIG_CAVIUM_ERRATUM_22375=y ++CONFIG_CAVIUM_ERRATUM_23154=y ++CONFIG_CAVIUM_ERRATUM_27456=y ++CONFIG_CAVIUM_ERRATUM_30115=y ++CONFIG_CAVIUM_TX2_ERRATUM_219=y ++CONFIG_FUJITSU_ERRATUM_010001=y ++CONFIG_HISILICON_ERRATUM_161600802=y ++CONFIG_QCOM_FALKOR_ERRATUM_1003=y ++CONFIG_QCOM_FALKOR_ERRATUM_1009=y ++CONFIG_QCOM_QDF2400_ERRATUM_0065=y ++CONFIG_QCOM_FALKOR_ERRATUM_E1041=y ++CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y ++CONFIG_ROCKCHIP_ERRATUM_3588001=y ++CONFIG_SOCIONEXT_SYNQUACER_PREITS=y ++# end of ARM errata workarounds via the alternatives framework ++ ++CONFIG_ARM64_4K_PAGES=y ++# CONFIG_ARM64_16K_PAGES is not set ++# CONFIG_ARM64_64K_PAGES is not set ++CONFIG_ARM64_VA_BITS_39=y ++# CONFIG_ARM64_VA_BITS_48 is not set ++CONFIG_ARM64_VA_BITS=39 ++CONFIG_ARM64_PA_BITS_48=y ++CONFIG_ARM64_PA_BITS=48 ++# CONFIG_CPU_BIG_ENDIAN is not set ++CONFIG_CPU_LITTLE_ENDIAN=y ++CONFIG_SCHED_MC=y ++# CONFIG_SCHED_CLUSTER is not set ++# CONFIG_SCHED_SMT is not set ++CONFIG_NR_CPUS=4 ++CONFIG_HOTPLUG_CPU=y ++# CONFIG_NUMA is not set ++CONFIG_HZ_100=y ++# CONFIG_HZ_250 is not set ++# CONFIG_HZ_300 is not set ++# CONFIG_HZ_1000 is not set ++CONFIG_HZ=100 ++CONFIG_ARCH_SPARSEMEM_ENABLE=y ++CONFIG_CC_HAVE_SHADOW_CALL_STACK=y ++# CONFIG_PARAVIRT is not set ++# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set ++CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y ++CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y ++CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y ++CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y ++CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y ++# CONFIG_XEN is not set ++CONFIG_ARCH_FORCE_MAX_ORDER=10 ++CONFIG_UNMAP_KERNEL_AT_EL0=y ++CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y ++CONFIG_RODATA_FULL_DEFAULT_ENABLED=y ++# CONFIG_ARM64_SW_TTBR0_PAN is not set ++CONFIG_ARM64_TAGGED_ADDR_ABI=y ++# CONFIG_COMPAT is not set ++ ++# ++# ARMv8.1 architectural features ++# ++CONFIG_ARM64_HW_AFDBM=y ++CONFIG_ARM64_PAN=y ++CONFIG_AS_HAS_LSE_ATOMICS=y ++CONFIG_ARM64_LSE_ATOMICS=y ++CONFIG_ARM64_USE_LSE_ATOMICS=y ++# end of ARMv8.1 architectural features ++ ++# ++# ARMv8.2 architectural features ++# ++CONFIG_AS_HAS_ARMV8_2=y ++CONFIG_AS_HAS_SHA3=y ++# CONFIG_ARM64_PMEM is not set ++CONFIG_ARM64_RAS_EXTN=y ++CONFIG_ARM64_CNP=y ++# end of ARMv8.2 architectural features ++ ++# ++# ARMv8.3 architectural features ++# ++CONFIG_ARM64_PTR_AUTH=y ++CONFIG_ARM64_PTR_AUTH_KERNEL=y ++CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y ++CONFIG_AS_HAS_ARMV8_3=y ++CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y ++CONFIG_AS_HAS_LDAPR=y ++# end of ARMv8.3 architectural features ++ ++# ++# ARMv8.4 architectural features ++# ++CONFIG_ARM64_AMU_EXTN=y ++CONFIG_AS_HAS_ARMV8_4=y ++CONFIG_ARM64_TLB_RANGE=y ++# end of ARMv8.4 architectural features ++ ++# ++# ARMv8.5 architectural features ++# ++CONFIG_AS_HAS_ARMV8_5=y ++CONFIG_ARM64_BTI=y ++CONFIG_ARM64_BTI_KERNEL=y ++CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y ++CONFIG_ARM64_E0PD=y ++CONFIG_ARM64_AS_HAS_MTE=y ++CONFIG_ARM64_MTE=y ++# end of ARMv8.5 architectural features ++ ++# ++# ARMv8.7 architectural features ++# ++CONFIG_ARM64_EPAN=y ++# end of ARMv8.7 architectural features ++ ++CONFIG_ARM64_SVE=y ++# CONFIG_ARM64_PSEUDO_NMI is not set ++CONFIG_RELOCATABLE=y ++# CONFIG_RANDOMIZE_BASE is not set ++CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y ++CONFIG_STACKPROTECTOR_PER_TASK=y ++# end of Kernel Features ++ ++# ++# Boot options ++# ++CONFIG_CMDLINE="mem=128M console=ttyAMA0,115200 console=ttyMTD,blackbox" ++CONFIG_CMDLINE_FROM_BOOTLOADER=y ++# CONFIG_CMDLINE_FORCE is not set ++# CONFIG_EFI is not set ++CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y ++CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES="vendor/ss928v100-demb-emmc" ++# end of Boot options ++ ++# ++# Power management options ++# ++# CONFIG_SUSPEND is not set ++CONFIG_PM=y ++# CONFIG_PM_DEBUG is not set ++CONFIG_PM_CLK=y ++# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set ++# CONFIG_ENERGY_MODEL is not set ++CONFIG_ARCH_SUSPEND_POSSIBLE=y ++# end of Power management options ++ ++# ++# CPU Power Management ++# ++ ++# ++# CPU Idle ++# ++# CONFIG_CPU_IDLE is not set ++# end of CPU Idle ++ ++# ++# CPU Frequency scaling ++# ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_GOV_ATTR_SET=y ++CONFIG_CPU_FREQ_GOV_COMMON=y ++CONFIG_CPU_FREQ_STAT=y ++# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set ++CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y ++# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set ++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y ++CONFIG_CPU_FREQ_GOV_POWERSAVE=y ++CONFIG_CPU_FREQ_GOV_USERSPACE=y ++CONFIG_CPU_FREQ_GOV_ONDEMAND=y ++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y ++# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set ++ ++# ++# CPU frequency scaling drivers ++# ++CONFIG_CPUFREQ_DT=y ++CONFIG_CPUFREQ_DT_PLATDEV=y ++# end of CPU Frequency scaling ++# end of CPU Power Management ++ ++CONFIG_HAVE_KVM=y ++# CONFIG_VIRTUALIZATION is not set ++CONFIG_CPU_MITIGATIONS=y ++ ++# ++# General architecture-dependent options ++# ++CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y ++CONFIG_HOTPLUG_CORE_SYNC=y ++CONFIG_HOTPLUG_CORE_SYNC_DEAD=y ++# CONFIG_KPROBES is not set ++# CONFIG_JUMP_LABEL is not set ++CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y ++CONFIG_HAVE_IOREMAP_PROT=y ++CONFIG_HAVE_KPROBES=y ++CONFIG_HAVE_KRETPROBES=y ++CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y ++CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y ++CONFIG_HAVE_NMI=y ++CONFIG_TRACE_IRQFLAGS_SUPPORT=y ++CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y ++CONFIG_HAVE_ARCH_TRACEHOOK=y ++CONFIG_HAVE_DMA_CONTIGUOUS=y ++CONFIG_GENERIC_SMP_IDLE_THREAD=y ++CONFIG_GENERIC_IDLE_POLL_SETUP=y ++CONFIG_ARCH_HAS_FORTIFY_SOURCE=y ++CONFIG_ARCH_HAS_KEEPINITRD=y ++CONFIG_ARCH_HAS_SET_MEMORY=y ++CONFIG_ARCH_HAS_SET_DIRECT_MAP=y ++CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y ++CONFIG_ARCH_WANTS_NO_INSTR=y ++CONFIG_HAVE_ASM_MODVERSIONS=y ++CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y ++CONFIG_HAVE_RSEQ=y ++CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y ++CONFIG_HAVE_PERF_REGS=y ++CONFIG_HAVE_PERF_USER_STACK_DUMP=y ++CONFIG_HAVE_ARCH_JUMP_LABEL=y ++CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y ++CONFIG_MMU_GATHER_TABLE_FREE=y ++CONFIG_MMU_GATHER_RCU_TABLE_FREE=y ++CONFIG_MMU_LAZY_TLB_REFCOUNT=y ++CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y ++CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y ++CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y ++CONFIG_HAVE_CMPXCHG_LOCAL=y ++CONFIG_HAVE_CMPXCHG_DOUBLE=y ++CONFIG_HAVE_ARCH_SECCOMP=y ++CONFIG_HAVE_ARCH_SECCOMP_FILTER=y ++# CONFIG_SECCOMP is not set ++CONFIG_HAVE_ARCH_STACKLEAK=y ++CONFIG_HAVE_STACKPROTECTOR=y ++CONFIG_STACKPROTECTOR=y ++CONFIG_STACKPROTECTOR_STRONG=y ++CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y ++# CONFIG_SHADOW_CALL_STACK is not set ++CONFIG_ARCH_SUPPORTS_LTO_CLANG=y ++CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y ++CONFIG_HAS_LTO_CLANG=y ++CONFIG_LTO_NONE=y ++# CONFIG_LTO_CLANG_FULL is not set ++# CONFIG_LTO_CLANG_THIN is not set ++CONFIG_ARCH_SUPPORTS_CFI_CLANG=y ++CONFIG_HAVE_CONTEXT_TRACKING_USER=y ++CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y ++CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y ++CONFIG_HAVE_MOVE_PUD=y ++CONFIG_HAVE_MOVE_PMD=y ++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y ++CONFIG_HAVE_ARCH_HUGE_VMAP=y ++CONFIG_HAVE_ARCH_HUGE_VMALLOC=y ++CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y ++CONFIG_HAVE_MOD_ARCH_SPECIFIC=y ++CONFIG_MODULES_USE_ELF_RELA=y ++CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y ++CONFIG_SOFTIRQ_ON_OWN_STACK=y ++CONFIG_ARCH_HAS_ELF_RANDOMIZE=y ++CONFIG_HAVE_ARCH_MMAP_RND_BITS=y ++CONFIG_ARCH_MMAP_RND_BITS=18 ++CONFIG_PAGE_SIZE_LESS_THAN_64KB=y ++CONFIG_PAGE_SIZE_LESS_THAN_256KB=y ++CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y ++CONFIG_CLONE_BACKWARDS=y ++CONFIG_COMPAT_32BIT_TIME=y ++CONFIG_HAVE_ARCH_VMAP_STACK=y ++CONFIG_VMAP_STACK=y ++CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y ++CONFIG_RANDOMIZE_KSTACK_OFFSET=y ++# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set ++CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y ++CONFIG_STRICT_KERNEL_RWX=y ++CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y ++CONFIG_STRICT_MODULE_RWX=y ++CONFIG_HAVE_ARCH_COMPILER_H=y ++CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y ++CONFIG_ARCH_HAS_RELR=y ++CONFIG_RELR=y ++CONFIG_HAVE_PREEMPT_DYNAMIC=y ++CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y ++CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y ++CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y ++CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y ++CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y ++ ++# ++# GCOV-based kernel profiling ++# ++CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y ++# end of GCOV-based kernel profiling ++ ++CONFIG_HAVE_GCC_PLUGINS=y ++CONFIG_FUNCTION_ALIGNMENT_4B=y ++CONFIG_FUNCTION_ALIGNMENT=4 ++# end of General architecture-dependent options ++ ++CONFIG_RT_MUTEXES=y ++CONFIG_BASE_SMALL=0 ++CONFIG_MODULES=y ++CONFIG_MODULE_FORCE_LOAD=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set ++# CONFIG_MODVERSIONS is not set ++# CONFIG_MODULE_SRCVERSION_ALL is not set ++# CONFIG_MODULE_SIG is not set ++CONFIG_MODULE_COMPRESS_NONE=y ++# CONFIG_MODULE_COMPRESS_GZIP is not set ++# CONFIG_MODULE_COMPRESS_XZ is not set ++# CONFIG_MODULE_COMPRESS_ZSTD is not set ++# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set ++CONFIG_MODPROBE_PATH="/sbin/modprobe" ++# CONFIG_TRIM_UNUSED_KSYMS is not set ++CONFIG_BLOCK=y ++CONFIG_BLOCK_LEGACY_AUTOLOAD=y ++CONFIG_BLK_CGROUP_PUNT_BIO=y ++CONFIG_BLK_DEV_BSG_COMMON=y ++# CONFIG_BLK_DEV_BSGLIB is not set ++# CONFIG_BLK_DEV_INTEGRITY is not set ++# CONFIG_BLK_DEV_ZONED is not set ++# CONFIG_BLK_WBT is not set ++# CONFIG_BLK_SED_OPAL is not set ++# CONFIG_BLK_INLINE_ENCRYPTION is not set ++ ++# ++# Partition Types ++# ++CONFIG_PARTITION_ADVANCED=y ++# CONFIG_ACORN_PARTITION is not set ++# CONFIG_AIX_PARTITION is not set ++# CONFIG_OSF_PARTITION is not set ++# CONFIG_AMIGA_PARTITION is not set ++# CONFIG_ATARI_PARTITION is not set ++# CONFIG_MAC_PARTITION is not set ++CONFIG_MSDOS_PARTITION=y ++# CONFIG_BSD_DISKLABEL is not set ++# CONFIG_MINIX_SUBPARTITION is not set ++# CONFIG_SOLARIS_X86_PARTITION is not set ++# CONFIG_UNIXWARE_DISKLABEL is not set ++# CONFIG_LDM_PARTITION is not set ++# CONFIG_SGI_PARTITION is not set ++# CONFIG_ULTRIX_PARTITION is not set ++# CONFIG_SUN_PARTITION is not set ++# CONFIG_KARMA_PARTITION is not set ++CONFIG_EFI_PARTITION=y ++# CONFIG_SYSV68_PARTITION is not set ++CONFIG_CMDLINE_PARTITION=y ++# end of Partition Types ++ ++CONFIG_BLK_PM=y ++ ++# ++# IO Schedulers ++# ++CONFIG_MQ_IOSCHED_DEADLINE=y ++CONFIG_MQ_IOSCHED_KYBER=y ++# CONFIG_IOSCHED_BFQ is not set ++# end of IO Schedulers ++ ++CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y ++CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y ++CONFIG_ARCH_INLINE_SPIN_LOCK=y ++CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y ++CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y ++CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y ++CONFIG_ARCH_INLINE_SPIN_UNLOCK=y ++CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y ++CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y ++CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y ++CONFIG_ARCH_INLINE_READ_LOCK=y ++CONFIG_ARCH_INLINE_READ_LOCK_BH=y ++CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y ++CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y ++CONFIG_ARCH_INLINE_READ_UNLOCK=y ++CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y ++CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y ++CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y ++CONFIG_ARCH_INLINE_WRITE_LOCK=y ++CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y ++CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y ++CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y ++CONFIG_ARCH_INLINE_WRITE_UNLOCK=y ++CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y ++CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y ++CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y ++CONFIG_INLINE_SPIN_TRYLOCK=y ++CONFIG_INLINE_SPIN_TRYLOCK_BH=y ++CONFIG_INLINE_SPIN_LOCK=y ++CONFIG_INLINE_SPIN_LOCK_BH=y ++CONFIG_INLINE_SPIN_LOCK_IRQ=y ++CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y ++CONFIG_INLINE_SPIN_UNLOCK_BH=y ++CONFIG_INLINE_SPIN_UNLOCK_IRQ=y ++CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y ++CONFIG_INLINE_READ_LOCK=y ++CONFIG_INLINE_READ_LOCK_BH=y ++CONFIG_INLINE_READ_LOCK_IRQ=y ++CONFIG_INLINE_READ_LOCK_IRQSAVE=y ++CONFIG_INLINE_READ_UNLOCK=y ++CONFIG_INLINE_READ_UNLOCK_BH=y ++CONFIG_INLINE_READ_UNLOCK_IRQ=y ++CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y ++CONFIG_INLINE_WRITE_LOCK=y ++CONFIG_INLINE_WRITE_LOCK_BH=y ++CONFIG_INLINE_WRITE_LOCK_IRQ=y ++CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y ++CONFIG_INLINE_WRITE_UNLOCK=y ++CONFIG_INLINE_WRITE_UNLOCK_BH=y ++CONFIG_INLINE_WRITE_UNLOCK_IRQ=y ++CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y ++CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y ++CONFIG_MUTEX_SPIN_ON_OWNER=y ++CONFIG_RWSEM_SPIN_ON_OWNER=y ++CONFIG_LOCK_SPIN_ON_OWNER=y ++CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y ++CONFIG_QUEUED_SPINLOCKS=y ++CONFIG_ARCH_USE_QUEUED_RWLOCKS=y ++CONFIG_QUEUED_RWLOCKS=y ++CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y ++CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y ++ ++# ++# Executable file formats ++# ++CONFIG_BINFMT_ELF=y ++CONFIG_ARCH_BINFMT_ELF_STATE=y ++CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y ++CONFIG_ARCH_HAVE_ELF_PROT=y ++CONFIG_ARCH_USE_GNU_PROPERTY=y ++CONFIG_ELFCORE=y ++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y ++CONFIG_BINFMT_SCRIPT=y ++# CONFIG_BINFMT_MISC is not set ++CONFIG_COREDUMP=y ++# end of Executable file formats ++ ++# ++# Memory Management options ++# ++# CONFIG_SWAP is not set ++ ++# ++# SLAB allocator options ++# ++# CONFIG_SLAB_DEPRECATED is not set ++CONFIG_SLUB=y ++# CONFIG_SLUB_TINY is not set ++CONFIG_SLAB_MERGE_DEFAULT=y ++# CONFIG_SLAB_FREELIST_RANDOM is not set ++# CONFIG_SLAB_FREELIST_HARDENED is not set ++# CONFIG_SLUB_STATS is not set ++CONFIG_SLUB_CPU_PARTIAL=y ++# CONFIG_RANDOM_KMALLOC_CACHES is not set ++# end of SLAB allocator options ++ ++# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set ++# CONFIG_COMPAT_BRK is not set ++CONFIG_SPARSEMEM=y ++CONFIG_SPARSEMEM_EXTREME=y ++CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y ++CONFIG_SPARSEMEM_VMEMMAP=y ++CONFIG_HAVE_FAST_GUP=y ++CONFIG_ARCH_KEEP_MEMBLOCK=y ++CONFIG_MEMORY_ISOLATION=y ++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y ++CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y ++# CONFIG_MEMORY_HOTPLUG is not set ++CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y ++CONFIG_SPLIT_PTLOCK_CPUS=4 ++CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y ++CONFIG_COMPACTION=y ++CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 ++# CONFIG_PAGE_REPORTING is not set ++CONFIG_MIGRATION=y ++CONFIG_CONTIG_ALLOC=y ++CONFIG_PCP_BATCH_SCALE_MAX=5 ++CONFIG_PHYS_ADDR_T_64BIT=y ++CONFIG_KSM=y ++CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 ++CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y ++# CONFIG_MEMORY_FAILURE is not set ++CONFIG_ARCH_WANTS_THP_SWAP=y ++# CONFIG_TRANSPARENT_HUGEPAGE is not set ++CONFIG_CMA=y ++# CONFIG_CMA_DEBUG is not set ++# CONFIG_CMA_SYSFS is not set ++CONFIG_CMA_AREAS=7 ++CONFIG_GENERIC_EARLY_IOREMAP=y ++# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set ++# CONFIG_IDLE_PAGE_TRACKING is not set ++CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y ++CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y ++CONFIG_ARCH_HAS_PTE_DEVMAP=y ++CONFIG_ARCH_HAS_ZONE_DMA_SET=y ++CONFIG_ZONE_DMA=y ++CONFIG_ZONE_DMA32=y ++CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y ++CONFIG_ARCH_USES_PG_ARCH_X=y ++CONFIG_VM_EVENT_COUNTERS=y ++# CONFIG_PERCPU_STATS is not set ++ ++# ++# GUP_TEST needs to have DEBUG_FS enabled ++# ++# CONFIG_DMAPOOL_TEST is not set ++CONFIG_ARCH_HAS_PTE_SPECIAL=y ++CONFIG_MEMFD_CREATE=y ++CONFIG_SECRETMEM=y ++# CONFIG_ANON_VMA_NAME is not set ++CONFIG_USERFAULTFD=y ++CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y ++# CONFIG_LRU_GEN is not set ++CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y ++CONFIG_PER_VMA_LOCK=y ++CONFIG_LOCK_MM_AND_FIND_VMA=y ++ ++# ++# Data Access Monitoring ++# ++# CONFIG_DAMON is not set ++# end of Data Access Monitoring ++# end of Memory Management options ++ ++CONFIG_NET=y ++CONFIG_NET_INGRESS=y ++CONFIG_NET_EGRESS=y ++CONFIG_NET_XGRESS=y ++ ++# ++# Networking options ++# ++CONFIG_PACKET=y ++# CONFIG_PACKET_DIAG is not set ++CONFIG_UNIX=y ++CONFIG_UNIX_SCM=y ++CONFIG_AF_UNIX_OOB=y ++# CONFIG_UNIX_DIAG is not set ++# CONFIG_TLS is not set ++# CONFIG_XFRM_USER is not set ++# CONFIG_NET_KEY is not set ++# CONFIG_XDP_SOCKETS is not set ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++# CONFIG_IP_ADVANCED_ROUTER is not set ++# CONFIG_IP_PNP is not set ++# CONFIG_NET_IPIP is not set ++# CONFIG_NET_IPGRE_DEMUX is not set ++CONFIG_NET_IP_TUNNEL=m ++# CONFIG_IP_MROUTE is not set ++# CONFIG_SYN_COOKIES is not set ++# CONFIG_NET_IPVTI is not set ++# CONFIG_NET_FOU is not set ++# CONFIG_NET_FOU_IP_TUNNELS is not set ++# CONFIG_INET_AH is not set ++# CONFIG_INET_ESP is not set ++# CONFIG_INET_IPCOMP is not set ++CONFIG_INET_TABLE_PERTURB_ORDER=16 ++CONFIG_INET_TUNNEL=m ++CONFIG_INET_DIAG=y ++CONFIG_INET_TCP_DIAG=y ++# CONFIG_INET_UDP_DIAG is not set ++# CONFIG_INET_RAW_DIAG is not set ++# CONFIG_INET_DIAG_DESTROY is not set ++# CONFIG_TCP_CONG_ADVANCED is not set ++CONFIG_TCP_CONG_CUBIC=y ++CONFIG_DEFAULT_TCP_CONG="cubic" ++# CONFIG_TCP_MD5SIG is not set ++CONFIG_IPV6=y ++CONFIG_IPV6_ROUTER_PREF=y ++# CONFIG_IPV6_ROUTE_INFO is not set ++# CONFIG_IPV6_OPTIMISTIC_DAD is not set ++# CONFIG_INET6_AH is not set ++# CONFIG_INET6_ESP is not set ++# CONFIG_INET6_IPCOMP is not set ++# CONFIG_IPV6_MIP6 is not set ++# CONFIG_IPV6_ILA is not set ++# CONFIG_IPV6_VTI is not set ++CONFIG_IPV6_SIT=m ++# CONFIG_IPV6_SIT_6RD is not set ++CONFIG_IPV6_NDISC_NODETYPE=y ++# CONFIG_IPV6_TUNNEL is not set ++# CONFIG_IPV6_MULTIPLE_TABLES is not set ++# CONFIG_IPV6_MROUTE is not set ++# CONFIG_IPV6_SEG6_LWTUNNEL is not set ++# CONFIG_IPV6_SEG6_HMAC is not set ++# CONFIG_IPV6_RPL_LWTUNNEL is not set ++# CONFIG_IPV6_IOAM6_LWTUNNEL is not set ++# CONFIG_MPTCP is not set ++# CONFIG_NETWORK_SECMARK is not set ++CONFIG_NET_PTP_CLASSIFY=y ++# CONFIG_NETWORK_PHY_TIMESTAMPING is not set ++CONFIG_NETFILTER=y ++CONFIG_NETFILTER_ADVANCED=y ++ ++# ++# Core Netfilter Configuration ++# ++CONFIG_NETFILTER_INGRESS=y ++# CONFIG_NETFILTER_EGRESS is not set ++CONFIG_NETFILTER_BPF_LINK=y ++# CONFIG_NETFILTER_NETLINK_ACCT is not set ++# CONFIG_NETFILTER_NETLINK_QUEUE is not set ++# CONFIG_NETFILTER_NETLINK_LOG is not set ++# CONFIG_NETFILTER_NETLINK_OSF is not set ++# CONFIG_NF_CONNTRACK is not set ++# CONFIG_NF_LOG_SYSLOG is not set ++# CONFIG_NF_TABLES is not set ++# CONFIG_NETFILTER_XTABLES is not set ++# end of Core Netfilter Configuration ++ ++# CONFIG_IP_SET is not set ++# CONFIG_IP_VS is not set ++ ++# ++# IP: Netfilter Configuration ++# ++# CONFIG_NF_SOCKET_IPV4 is not set ++# CONFIG_NF_TPROXY_IPV4 is not set ++# CONFIG_NF_DUP_IPV4 is not set ++# CONFIG_NF_LOG_ARP is not set ++# CONFIG_NF_LOG_IPV4 is not set ++# CONFIG_NF_REJECT_IPV4 is not set ++# CONFIG_IP_NF_IPTABLES is not set ++# CONFIG_IP_NF_ARPTABLES is not set ++# end of IP: Netfilter Configuration ++ ++# ++# IPv6: Netfilter Configuration ++# ++# CONFIG_NF_SOCKET_IPV6 is not set ++# CONFIG_NF_TPROXY_IPV6 is not set ++# CONFIG_NF_DUP_IPV6 is not set ++# CONFIG_NF_REJECT_IPV6 is not set ++# CONFIG_NF_LOG_IPV6 is not set ++# CONFIG_IP6_NF_IPTABLES is not set ++# end of IPv6: Netfilter Configuration ++ ++# CONFIG_BPFILTER is not set ++# CONFIG_IP_DCCP is not set ++# CONFIG_IP_SCTP is not set ++# CONFIG_RDS is not set ++# CONFIG_TIPC is not set ++# CONFIG_ATM is not set ++# CONFIG_L2TP is not set ++# CONFIG_BRIDGE is not set ++# CONFIG_NET_DSA is not set ++# CONFIG_VLAN_8021Q is not set ++# CONFIG_LLC2 is not set ++# CONFIG_ATALK is not set ++# CONFIG_X25 is not set ++# CONFIG_LAPB is not set ++# CONFIG_PHONET is not set ++# CONFIG_6LOWPAN is not set ++# CONFIG_IEEE802154 is not set ++# CONFIG_NET_SCHED is not set ++# CONFIG_DCB is not set ++# CONFIG_DNS_RESOLVER is not set ++# CONFIG_BATMAN_ADV is not set ++# CONFIG_OPENVSWITCH is not set ++# CONFIG_VSOCKETS is not set ++# CONFIG_NETLINK_DIAG is not set ++# CONFIG_MPLS is not set ++# CONFIG_NET_NSH is not set ++# CONFIG_HSR is not set ++# CONFIG_NET_SWITCHDEV is not set ++# CONFIG_NET_L3_MASTER_DEV is not set ++# CONFIG_QRTR is not set ++# CONFIG_NET_NCSI is not set ++CONFIG_PCPU_DEV_REFCNT=y ++CONFIG_MAX_SKB_FRAGS=17 ++CONFIG_RPS=y ++CONFIG_RFS_ACCEL=y ++CONFIG_SOCK_RX_QUEUE_MAPPING=y ++CONFIG_XPS=y ++CONFIG_NET_RX_BUSY_POLL=y ++CONFIG_BQL=y ++CONFIG_NET_FLOW_LIMIT=y ++ ++# ++# Network testing ++# ++# CONFIG_NET_PKTGEN is not set ++# end of Network testing ++# end of Networking options ++ ++# CONFIG_HAMRADIO is not set ++# CONFIG_CAN is not set ++# CONFIG_BT is not set ++# CONFIG_AF_RXRPC is not set ++# CONFIG_AF_KCM is not set ++# CONFIG_MCTP is not set ++CONFIG_WIRELESS=y ++# CONFIG_CFG80211 is not set ++ ++# ++# CFG80211 needs to be enabled for MAC80211 ++# ++CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 ++# CONFIG_RFKILL is not set ++# CONFIG_NET_9P is not set ++# CONFIG_CAIF is not set ++# CONFIG_CEPH_LIB is not set ++# CONFIG_NFC is not set ++# CONFIG_PSAMPLE is not set ++# CONFIG_NET_IFE is not set ++# CONFIG_LWTUNNEL is not set ++CONFIG_DST_CACHE=y ++CONFIG_GRO_CELLS=y ++CONFIG_NET_SELFTESTS=y ++CONFIG_NET_SOCK_MSG=y ++CONFIG_PAGE_POOL=y ++# CONFIG_PAGE_POOL_STATS is not set ++# CONFIG_FAILOVER is not set ++# CONFIG_ETHTOOL_NETLINK is not set ++ ++# ++# Device Drivers ++# ++CONFIG_ARM_AMBA=y ++CONFIG_HAVE_PCI=y ++# CONFIG_PCI is not set ++# CONFIG_PCCARD is not set ++ ++# ++# Generic Driver Options ++# ++CONFIG_UEVENT_HELPER=y ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_DEVTMPFS=y ++CONFIG_DEVTMPFS_MOUNT=y ++# CONFIG_DEVTMPFS_SAFE is not set ++CONFIG_STANDALONE=y ++# CONFIG_PREVENT_FIRMWARE_BUILD is not set ++ ++# ++# Firmware loader ++# ++CONFIG_FW_LOADER=y ++CONFIG_EXTRA_FIRMWARE="" ++# CONFIG_FW_LOADER_USER_HELPER is not set ++# CONFIG_FW_LOADER_COMPRESS is not set ++# CONFIG_FW_UPLOAD is not set ++# end of Firmware loader ++ ++CONFIG_ALLOW_DEV_COREDUMP=y ++# CONFIG_DEBUG_DRIVER is not set ++# CONFIG_DEBUG_DEVRES is not set ++# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set ++# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set ++CONFIG_GENERIC_CPU_AUTOPROBE=y ++CONFIG_GENERIC_CPU_VULNERABILITIES=y ++CONFIG_SOC_BUS=y ++CONFIG_REGMAP=y ++CONFIG_REGMAP_MMIO=y ++CONFIG_GENERIC_ARCH_TOPOLOGY=y ++# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set ++# end of Generic Driver Options ++ ++# ++# Bus devices ++# ++# CONFIG_BRCMSTB_GISB_ARB is not set ++# CONFIG_MOXTET is not set ++# CONFIG_VEXPRESS_CONFIG is not set ++# CONFIG_MHI_BUS is not set ++# CONFIG_MHI_BUS_EP is not set ++# end of Bus devices ++ ++# ++# Cache Drivers ++# ++# end of Cache Drivers ++ ++# CONFIG_CONNECTOR is not set ++ ++# ++# Firmware Drivers ++# ++ ++# ++# ARM System Control and Management Interface Protocol ++# ++# CONFIG_ARM_SCMI_PROTOCOL is not set ++# end of ARM System Control and Management Interface Protocol ++ ++# CONFIG_FIRMWARE_MEMMAP is not set ++# CONFIG_ARM_FFA_TRANSPORT is not set ++# CONFIG_GOOGLE_FIRMWARE is not set ++CONFIG_ARM_PSCI_FW=y ++CONFIG_HAVE_ARM_SMCCC=y ++CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y ++CONFIG_ARM_SMCCC_SOC_ID=y ++ ++# ++# Tegra firmware driver ++# ++# end of Tegra firmware driver ++# end of Firmware Drivers ++ ++# CONFIG_GNSS is not set ++# CONFIG_MTD is not set ++CONFIG_DTC=y ++CONFIG_OF=y ++# CONFIG_OF_UNITTEST is not set ++CONFIG_OF_FLATTREE=y ++CONFIG_OF_EARLY_FLATTREE=y ++CONFIG_OF_KOBJ=y ++CONFIG_OF_ADDRESS=y ++CONFIG_OF_IRQ=y ++CONFIG_OF_RESERVED_MEM=y ++# CONFIG_OF_OVERLAY is not set ++# CONFIG_PARPORT is not set ++# CONFIG_BLK_DEV is not set ++ ++# ++# NVME Support ++# ++# CONFIG_NVME_FC is not set ++# CONFIG_NVME_TCP is not set ++# CONFIG_NVME_TARGET is not set ++# end of NVME Support ++ ++# ++# Misc devices ++# ++# CONFIG_AD525X_DPOT is not set ++# CONFIG_DUMMY_IRQ is not set ++# CONFIG_ICS932S401 is not set ++# CONFIG_ENCLOSURE_SERVICES is not set ++# CONFIG_APDS9802ALS is not set ++# CONFIG_ISL29003 is not set ++# CONFIG_ISL29020 is not set ++# CONFIG_SENSORS_TSL2550 is not set ++# CONFIG_SENSORS_BH1770 is not set ++# CONFIG_SENSORS_APDS990X is not set ++# CONFIG_HMC6352 is not set ++# CONFIG_DS1682 is not set ++# CONFIG_LATTICE_ECP3_CONFIG is not set ++# CONFIG_SRAM is not set ++# CONFIG_XILINX_SDFEC is not set ++# CONFIG_OPEN_DICE is not set ++# CONFIG_VCPU_STALL_DETECTOR is not set ++# CONFIG_C2PORT is not set ++ ++# ++# EEPROM support ++# ++# CONFIG_EEPROM_AT24 is not set ++# CONFIG_EEPROM_AT25 is not set ++# CONFIG_EEPROM_LEGACY is not set ++# CONFIG_EEPROM_MAX6875 is not set ++# CONFIG_EEPROM_93CX6 is not set ++# CONFIG_EEPROM_93XX46 is not set ++# CONFIG_EEPROM_IDT_89HPESX is not set ++# CONFIG_EEPROM_EE1004 is not set ++# end of EEPROM support ++ ++# ++# Texas Instruments shared transport line discipline ++# ++# CONFIG_TI_ST is not set ++# end of Texas Instruments shared transport line discipline ++ ++# CONFIG_SENSORS_LIS3_SPI is not set ++# CONFIG_SENSORS_LIS3_I2C is not set ++# CONFIG_ALTERA_STAPL is not set ++# CONFIG_ECHO is not set ++# CONFIG_MISC_RTSX_USB is not set ++# CONFIG_PVPANIC is not set ++# end of Misc devices ++ ++# ++# SCSI device support ++# ++CONFIG_SCSI_MOD=y ++# CONFIG_RAID_ATTRS is not set ++CONFIG_SCSI_COMMON=y ++CONFIG_SCSI=y ++CONFIG_SCSI_DMA=y ++CONFIG_SCSI_PROC_FS=y ++ ++# ++# SCSI support type (disk, tape, CD-ROM) ++# ++CONFIG_BLK_DEV_SD=y ++# CONFIG_CHR_DEV_ST is not set ++# CONFIG_CHR_DEV_SG is not set ++CONFIG_BLK_DEV_BSG=y ++# CONFIG_CHR_DEV_SCH is not set ++# CONFIG_SCSI_CONSTANTS is not set ++# CONFIG_SCSI_LOGGING is not set ++# CONFIG_SCSI_SCAN_ASYNC is not set ++ ++# ++# SCSI Transports ++# ++# CONFIG_SCSI_SPI_ATTRS is not set ++# CONFIG_SCSI_FC_ATTRS is not set ++# CONFIG_SCSI_ISCSI_ATTRS is not set ++# CONFIG_SCSI_SAS_ATTRS is not set ++# CONFIG_SCSI_SAS_LIBSAS is not set ++# CONFIG_SCSI_SRP_ATTRS is not set ++# end of SCSI Transports ++ ++CONFIG_SCSI_LOWLEVEL=y ++# CONFIG_ISCSI_TCP is not set ++# CONFIG_ISCSI_BOOT_SYSFS is not set ++# CONFIG_SCSI_DEBUG is not set ++# CONFIG_SCSI_DH is not set ++# end of SCSI device support ++ ++# CONFIG_ATA is not set ++# CONFIG_MD is not set ++# CONFIG_TARGET_CORE is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_CORE=y ++# CONFIG_BONDING is not set ++# CONFIG_DUMMY is not set ++# CONFIG_WIREGUARD is not set ++# CONFIG_EQUALIZER is not set ++# CONFIG_NET_TEAM is not set ++# CONFIG_MACVLAN is not set ++# CONFIG_IPVLAN is not set ++# CONFIG_VXLAN is not set ++# CONFIG_GENEVE is not set ++# CONFIG_BAREUDP is not set ++# CONFIG_GTP is not set ++# CONFIG_AMT is not set ++# CONFIG_MACSEC is not set ++# CONFIG_NETCONSOLE is not set ++# CONFIG_TUN is not set ++# CONFIG_TUN_VNET_CROSS_LE is not set ++# CONFIG_VETH is not set ++# CONFIG_NLMON is not set ++CONFIG_ETHERNET=y ++# CONFIG_NET_VENDOR_ALACRITECH is not set ++# CONFIG_ALTERA_TSE is not set ++# CONFIG_NET_VENDOR_AMAZON is not set ++# CONFIG_NET_VENDOR_AMD is not set ++# CONFIG_NET_VENDOR_AQUANTIA is not set ++# CONFIG_NET_VENDOR_ARC is not set ++# CONFIG_NET_VENDOR_ASIX is not set ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CADENCE is not set ++# CONFIG_NET_VENDOR_CAVIUM is not set ++# CONFIG_NET_VENDOR_CORTINA is not set ++# CONFIG_NET_VENDOR_DAVICOM is not set ++# CONFIG_DNET is not set ++# CONFIG_NET_VENDOR_ENGLEDER is not set ++# CONFIG_NET_VENDOR_EZCHIP is not set ++# CONFIG_NET_VENDOR_FUNGIBLE is not set ++# CONFIG_NET_VENDOR_GOOGLE is not set ++# CONFIG_NET_VENDOR_HISILICON is not set ++# CONFIG_NET_VENDOR_HUAWEI is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++CONFIG_NET_VENDOR_BSP=y ++CONFIG_ETH_GMAC=y ++# CONFIG_GMAC is not set ++# CONFIG_GMAC_HAS_INTERNAL_PHY is not set ++CONFIG_RX_FLOW_CTRL_SUPPORT=y ++CONFIG_TX_FLOW_CTRL_SUPPORT=y ++CONFIG_TX_FLOW_CTRL_PAUSE_TIME=0xFFFF ++CONFIG_TX_FLOW_CTRL_PAUSE_INTERVAL=0xFFFF ++CONFIG_TX_FLOW_CTRL_ACTIVE_THRESHOLD=16 ++CONFIG_TX_FLOW_CTRL_DEACTIVE_THRESHOLD=32 ++# CONFIG_NET_VENDOR_ADI is not set ++# CONFIG_NET_VENDOR_LITEX is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MELLANOX is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_MICROSEMI is not set ++# CONFIG_NET_VENDOR_MICROSOFT is not set ++# CONFIG_NET_VENDOR_NI is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_NETRONOME is not set ++# CONFIG_ETHOC is not set ++# CONFIG_NET_VENDOR_PENSANDO is not set ++# CONFIG_NET_VENDOR_QUALCOMM is not set ++# CONFIG_NET_VENDOR_RENESAS is not set ++# CONFIG_NET_VENDOR_ROCKER is not set ++# CONFIG_NET_VENDOR_SAMSUNG is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SOLARFLARE is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_SOCIONEXT is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_NET_VENDOR_SYNOPSYS is not set ++# CONFIG_NET_VENDOR_VERTEXCOM is not set ++# CONFIG_NET_VENDOR_VIA is not set ++# CONFIG_NET_VENDOR_WANGXUN is not set ++# CONFIG_NET_VENDOR_WIZNET is not set ++# CONFIG_NET_VENDOR_XILINX is not set ++CONFIG_PHYLIB=y ++CONFIG_SWPHY=y ++CONFIG_FIXED_PHY=y ++ ++# ++# MII PHY device drivers ++# ++# CONFIG_AMD_PHY is not set ++# CONFIG_ADIN_PHY is not set ++# CONFIG_ADIN1100_PHY is not set ++# CONFIG_AQUANTIA_PHY is not set ++# CONFIG_AX88796B_PHY is not set ++# CONFIG_BROADCOM_PHY is not set ++# CONFIG_BCM54140_PHY is not set ++# CONFIG_BCM7XXX_PHY is not set ++# CONFIG_BCM84881_PHY is not set ++# CONFIG_BCM87XX_PHY is not set ++# CONFIG_CICADA_PHY is not set ++# CONFIG_CORTINA_PHY is not set ++# CONFIG_DAVICOM_PHY is not set ++# CONFIG_ICPLUS_PHY is not set ++# CONFIG_LXT_PHY is not set ++# CONFIG_INTEL_XWAY_PHY is not set ++# CONFIG_LSI_ET1011C_PHY is not set ++# CONFIG_MARVELL_PHY is not set ++# CONFIG_MARVELL_10G_PHY is not set ++# CONFIG_MARVELL_88Q2XXX_PHY is not set ++# CONFIG_MARVELL_88X2222_PHY is not set ++# CONFIG_MAXLINEAR_GPHY is not set ++# CONFIG_MEDIATEK_GE_PHY is not set ++# CONFIG_MICREL_PHY is not set ++# CONFIG_MICROCHIP_T1S_PHY is not set ++# CONFIG_MICROCHIP_PHY is not set ++# CONFIG_MICROCHIP_T1_PHY is not set ++# CONFIG_MICROSEMI_PHY is not set ++# CONFIG_MOTORCOMM_PHY is not set ++# CONFIG_NATIONAL_PHY is not set ++# CONFIG_NXP_CBTX_PHY is not set ++# CONFIG_NXP_C45_TJA11XX_PHY is not set ++# CONFIG_NCN26000_PHY is not set ++# CONFIG_QSEMI_PHY is not set ++# CONFIG_REALTEK_PHY is not set ++# CONFIG_RENESAS_PHY is not set ++# CONFIG_ROCKCHIP_PHY is not set ++# CONFIG_SMSC_PHY is not set ++# CONFIG_STE10XP is not set ++# CONFIG_TERANETICS_PHY is not set ++# CONFIG_DP83822_PHY is not set ++# CONFIG_DP83TC811_PHY is not set ++# CONFIG_DP83848_PHY is not set ++# CONFIG_DP83867_PHY is not set ++# CONFIG_DP83869_PHY is not set ++# CONFIG_DP83TD510_PHY is not set ++# CONFIG_VITESSE_PHY is not set ++# CONFIG_XILINX_GMII2RGMII is not set ++CONFIG_MDIO_BSP_GEMAC=y ++# CONFIG_MICREL_KS8995MA is not set ++# CONFIG_PSE_CONTROLLER is not set ++CONFIG_MDIO_DEVICE=y ++CONFIG_MDIO_BUS=y ++CONFIG_FWNODE_MDIO=y ++CONFIG_OF_MDIO=y ++CONFIG_MDIO_DEVRES=y ++# CONFIG_MDIO_BITBANG is not set ++# CONFIG_MDIO_BCM_UNIMAC is not set ++# CONFIG_MDIO_HISI_FEMAC is not set ++# CONFIG_MDIO_MVUSB is not set ++# CONFIG_MDIO_MSCC_MIIM is not set ++# CONFIG_MDIO_OCTEON is not set ++# CONFIG_MDIO_IPQ4019 is not set ++# CONFIG_MDIO_IPQ8064 is not set ++ ++# ++# MDIO Multiplexers ++# ++# CONFIG_MDIO_BUS_MUX_GPIO is not set ++# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set ++# CONFIG_MDIO_BUS_MUX_MMIOREG is not set ++ ++# ++# PCS device drivers ++# ++# end of PCS device drivers ++ ++# CONFIG_PPP is not set ++# CONFIG_SLIP is not set ++CONFIG_USB_NET_DRIVERS=y ++# CONFIG_USB_CATC is not set ++# CONFIG_USB_KAWETH is not set ++# CONFIG_USB_PEGASUS is not set ++# CONFIG_USB_RTL8150 is not set ++# CONFIG_USB_RTL8152 is not set ++# CONFIG_USB_LAN78XX is not set ++# CONFIG_USB_USBNET is not set ++# CONFIG_USB_IPHETH is not set ++CONFIG_WLAN=y ++CONFIG_WLAN_VENDOR_ADMTEK=y ++CONFIG_WLAN_VENDOR_ATH=y ++# CONFIG_ATH_DEBUG is not set ++CONFIG_WLAN_VENDOR_ATMEL=y ++CONFIG_WLAN_VENDOR_BROADCOM=y ++CONFIG_WLAN_VENDOR_CISCO=y ++CONFIG_WLAN_VENDOR_INTEL=y ++CONFIG_WLAN_VENDOR_INTERSIL=y ++# CONFIG_HOSTAP is not set ++CONFIG_WLAN_VENDOR_MARVELL=y ++CONFIG_WLAN_VENDOR_MEDIATEK=y ++CONFIG_WLAN_VENDOR_MICROCHIP=y ++CONFIG_WLAN_VENDOR_PURELIFI=y ++CONFIG_WLAN_VENDOR_RALINK=y ++CONFIG_WLAN_VENDOR_REALTEK=y ++CONFIG_WLAN_VENDOR_RSI=y ++CONFIG_WLAN_VENDOR_SILABS=y ++CONFIG_WLAN_VENDOR_ST=y ++CONFIG_WLAN_VENDOR_TI=y ++CONFIG_WLAN_VENDOR_ZYDAS=y ++CONFIG_WLAN_VENDOR_QUANTENNA=y ++# CONFIG_WAN is not set ++ ++# ++# Wireless WAN ++# ++# CONFIG_WWAN is not set ++# end of Wireless WAN ++ ++# CONFIG_NET_FAILOVER is not set ++# CONFIG_ISDN is not set ++ ++# ++# Input device support ++# ++CONFIG_INPUT=y ++# CONFIG_INPUT_FF_MEMLESS is not set ++# CONFIG_INPUT_SPARSEKMAP is not set ++# CONFIG_INPUT_MATRIXKMAP is not set ++ ++# ++# Userland interfaces ++# ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_JOYDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_EVBUG is not set ++ ++# ++# Input Device Drivers ++# ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_INPUT_JOYSTICK is not set ++# CONFIG_INPUT_TABLET is not set ++# CONFIG_INPUT_TOUCHSCREEN is not set ++# CONFIG_INPUT_MISC is not set ++# CONFIG_RMI4_CORE is not set ++ ++# ++# Hardware I/O ports ++# ++# CONFIG_SERIO is not set ++# CONFIG_GAMEPORT is not set ++# end of Hardware I/O ports ++# end of Input device support ++ ++# ++# Character devices ++# ++CONFIG_TTY=y ++CONFIG_VT=y ++CONFIG_CONSOLE_TRANSLATIONS=y ++CONFIG_VT_CONSOLE=y ++CONFIG_HW_CONSOLE=y ++# CONFIG_VT_HW_CONSOLE_BINDING is not set ++CONFIG_UNIX98_PTYS=y ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_LEGACY_TIOCSTI=y ++CONFIG_LDISC_AUTOLOAD=y ++ ++# ++# Serial drivers ++# ++CONFIG_SERIAL_EARLYCON=y ++# CONFIG_SERIAL_8250 is not set ++ ++# ++# Non-8250 serial port support ++# ++# CONFIG_SERIAL_AMBA_PL010 is not set ++CONFIG_SERIAL_AMBA_PL011=y ++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y ++# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set ++# CONFIG_SERIAL_MAX3100 is not set ++# CONFIG_SERIAL_MAX310X is not set ++# CONFIG_SERIAL_UARTLITE is not set ++CONFIG_SERIAL_CORE=y ++CONFIG_SERIAL_CORE_CONSOLE=y ++# CONFIG_SERIAL_SIFIVE is not set ++# CONFIG_SERIAL_SCCNXP is not set ++# CONFIG_SERIAL_SC16IS7XX is not set ++# CONFIG_SERIAL_ALTERA_JTAGUART is not set ++# CONFIG_SERIAL_ALTERA_UART is not set ++# CONFIG_SERIAL_XILINX_PS_UART is not set ++# CONFIG_SERIAL_ARC is not set ++# CONFIG_SERIAL_FSL_LPUART is not set ++# CONFIG_SERIAL_FSL_LINFLEXUART is not set ++# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set ++# CONFIG_SERIAL_SPRD is not set ++# end of Serial drivers ++ ++# CONFIG_SERIAL_NONSTANDARD is not set ++# CONFIG_N_GSM is not set ++# CONFIG_NULL_TTY is not set ++# CONFIG_HVC_DCC is not set ++# CONFIG_SERIAL_DEV_BUS is not set ++# CONFIG_TTY_PRINTK is not set ++# CONFIG_VIRTIO_CONSOLE is not set ++# CONFIG_IPMI_HANDLER is not set ++# CONFIG_HW_RANDOM is not set ++CONFIG_DEVMEM=y ++CONFIG_DEVPORT=y ++# CONFIG_TCG_TPM is not set ++# CONFIG_XILLYBUS is not set ++# CONFIG_XILLYUSB is not set ++# end of Character devices ++ ++# ++# I2C support ++# ++CONFIG_I2C=y ++CONFIG_I2C_BOARDINFO=y ++# CONFIG_I2C_COMPAT is not set ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_MUX=y ++ ++# ++# Multiplexer I2C Chip support ++# ++# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set ++# CONFIG_I2C_MUX_GPIO is not set ++# CONFIG_I2C_MUX_GPMUX is not set ++# CONFIG_I2C_MUX_LTC4306 is not set ++# CONFIG_I2C_MUX_PCA9541 is not set ++# CONFIG_I2C_MUX_PCA954x is not set ++# CONFIG_I2C_MUX_PINCTRL is not set ++# CONFIG_I2C_MUX_REG is not set ++# CONFIG_I2C_DEMUX_PINCTRL is not set ++# CONFIG_I2C_MUX_MLXCPLD is not set ++# end of Multiplexer I2C Chip support ++ ++# CONFIG_I2C_HELPER_AUTO is not set ++# CONFIG_I2C_SMBUS is not set ++ ++# ++# I2C Algorithms ++# ++# CONFIG_I2C_ALGOBIT is not set ++# CONFIG_I2C_ALGOPCF is not set ++# CONFIG_I2C_ALGOPCA is not set ++# end of I2C Algorithms ++ ++# ++# I2C Hardware Bus support ++# ++ ++# ++# I2C system bus drivers (mostly embedded / system-on-chip) ++# ++# CONFIG_I2C_CADENCE is not set ++# CONFIG_I2C_CBUS_GPIO is not set ++# CONFIG_I2C_DESIGNWARE_PLATFORM is not set ++# CONFIG_I2C_EMEV2 is not set ++# CONFIG_I2C_GPIO is not set ++CONFIG_I2C_BSP=y ++# CONFIG_I2C_HISI is not set ++# CONFIG_I2C_NOMADIK is not set ++# CONFIG_I2C_OCORES is not set ++# CONFIG_I2C_PCA_PLATFORM is not set ++# CONFIG_I2C_RK3X is not set ++# CONFIG_I2C_SIMTEC is not set ++# CONFIG_I2C_XILINX is not set ++ ++# ++# External I2C/SMBus adapter drivers ++# ++# CONFIG_I2C_DIOLAN_U2C is not set ++# CONFIG_I2C_CP2615 is not set ++# CONFIG_I2C_ROBOTFUZZ_OSIF is not set ++# CONFIG_I2C_TAOS_EVM is not set ++# CONFIG_I2C_TINY_USB is not set ++ ++# ++# Other I2C/SMBus bus drivers ++# ++# CONFIG_I2C_VIRTIO is not set ++CONFIG_DMA_MSG_MIN_LEN=5 ++CONFIG_DMA_MSG_MAX_LEN=4090 ++# end of I2C Hardware Bus support ++ ++# CONFIG_I2C_STUB is not set ++# CONFIG_I2C_SLAVE is not set ++# CONFIG_I2C_DEBUG_CORE is not set ++# CONFIG_I2C_DEBUG_ALGO is not set ++# CONFIG_I2C_DEBUG_BUS is not set ++# end of I2C support ++ ++# CONFIG_I3C is not set ++CONFIG_SPI=y ++# CONFIG_SPI_DEBUG is not set ++CONFIG_SPI_MASTER=y ++CONFIG_SPI_MEM=y ++ ++# ++# SPI Master Controller Drivers ++# ++# CONFIG_SPI_ALTERA is not set ++# CONFIG_SPI_AXI_SPI_ENGINE is not set ++# CONFIG_SPI_BITBANG is not set ++# CONFIG_SPI_CADENCE is not set ++# CONFIG_SPI_CADENCE_QUADSPI is not set ++# CONFIG_SPI_CADENCE_XSPI is not set ++# CONFIG_SPI_DESIGNWARE is not set ++# CONFIG_SPI_GPIO is not set ++# CONFIG_SPI_FSL_SPI is not set ++# CONFIG_SPI_MICROCHIP_CORE is not set ++# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set ++# CONFIG_SPI_OC_TINY is not set ++CONFIG_SPI_PL022=y ++# CONFIG_SPI_SC18IS602 is not set ++# CONFIG_SPI_SIFIVE is not set ++# CONFIG_SPI_SN_F_OSPI is not set ++# CONFIG_SPI_MXIC is not set ++# CONFIG_SPI_XCOMM is not set ++# CONFIG_SPI_XILINX is not set ++# CONFIG_SPI_ZYNQMP_GQSPI is not set ++# CONFIG_SPI_AMD is not set ++ ++# ++# SPI Multiplexer support ++# ++# CONFIG_SPI_MUX is not set ++ ++# ++# SPI Protocol Masters ++# ++CONFIG_SPI_SPIDEV=y ++# CONFIG_SPI_LOOPBACK_TEST is not set ++# CONFIG_SPI_TLE62X0 is not set ++# CONFIG_SPI_SLAVE is not set ++# CONFIG_SPMI is not set ++# CONFIG_HSI is not set ++CONFIG_PPS=y ++# CONFIG_PPS_DEBUG is not set ++# CONFIG_NTP_PPS is not set ++ ++# ++# PPS clients support ++# ++# CONFIG_PPS_CLIENT_KTIMER is not set ++# CONFIG_PPS_CLIENT_LDISC is not set ++# CONFIG_PPS_CLIENT_GPIO is not set ++ ++# ++# PPS generators support ++# ++ ++# ++# PTP clock support ++# ++CONFIG_PTP_1588_CLOCK=y ++CONFIG_PTP_1588_CLOCK_OPTIONAL=y ++ ++# ++# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. ++# ++CONFIG_PTP_1588_CLOCK_KVM=y ++# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set ++# CONFIG_PTP_1588_CLOCK_IDTCM is not set ++# CONFIG_PTP_1588_CLOCK_MOCK is not set ++# end of PTP clock support ++ ++CONFIG_PINCTRL=y ++CONFIG_GENERIC_PINCTRL_GROUPS=y ++CONFIG_PINMUX=y ++CONFIG_GENERIC_PINMUX_FUNCTIONS=y ++CONFIG_PINCONF=y ++CONFIG_GENERIC_PINCONF=y ++# CONFIG_DEBUG_PINCTRL is not set ++# CONFIG_PINCTRL_CY8C95X0 is not set ++# CONFIG_PINCTRL_MCP23S08 is not set ++# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set ++# CONFIG_PINCTRL_OCELOT is not set ++CONFIG_PINCTRL_SINGLE=y ++# CONFIG_PINCTRL_STMFX is not set ++# CONFIG_PINCTRL_SX150X is not set ++ ++# ++# Renesas pinctrl drivers ++# ++# end of Renesas pinctrl drivers ++ ++CONFIG_GPIOLIB=y ++CONFIG_GPIOLIB_FASTPATH_LIMIT=512 ++CONFIG_OF_GPIO=y ++CONFIG_GPIOLIB_IRQCHIP=y ++# CONFIG_DEBUG_GPIO is not set ++CONFIG_GPIO_SYSFS=y ++CONFIG_GPIO_CDEV=y ++CONFIG_GPIO_CDEV_V1=y ++CONFIG_GPIO_GENERIC=y ++ ++# ++# Memory mapped GPIO drivers ++# ++# CONFIG_GPIO_74XX_MMIO is not set ++# CONFIG_GPIO_ALTERA is not set ++# CONFIG_GPIO_CADENCE is not set ++# CONFIG_GPIO_DWAPB is not set ++# CONFIG_GPIO_FTGPIO010 is not set ++CONFIG_GPIO_GENERIC_PLATFORM=y ++# CONFIG_GPIO_GRGPIO is not set ++# CONFIG_GPIO_HISI is not set ++# CONFIG_GPIO_HLWD is not set ++# CONFIG_GPIO_LOGICVC is not set ++# CONFIG_GPIO_MB86S7X is not set ++CONFIG_GPIO_PL061=y ++# CONFIG_GPIO_SIFIVE is not set ++# CONFIG_GPIO_SYSCON is not set ++# CONFIG_GPIO_XGENE is not set ++# CONFIG_GPIO_XILINX is not set ++# CONFIG_GPIO_AMD_FCH is not set ++# end of Memory mapped GPIO drivers ++ ++# ++# I2C GPIO expanders ++# ++# CONFIG_GPIO_ADNP is not set ++# CONFIG_GPIO_FXL6408 is not set ++# CONFIG_GPIO_DS4520 is not set ++# CONFIG_GPIO_GW_PLD is not set ++# CONFIG_GPIO_MAX7300 is not set ++# CONFIG_GPIO_MAX732X is not set ++# CONFIG_GPIO_PCA953X is not set ++# CONFIG_GPIO_PCA9570 is not set ++# CONFIG_GPIO_PCF857X is not set ++# CONFIG_GPIO_TPIC2810 is not set ++# end of I2C GPIO expanders ++ ++# ++# MFD GPIO expanders ++# ++# end of MFD GPIO expanders ++ ++# ++# SPI GPIO expanders ++# ++# CONFIG_GPIO_74X164 is not set ++# CONFIG_GPIO_MAX3191X is not set ++# CONFIG_GPIO_MAX7301 is not set ++# CONFIG_GPIO_MC33880 is not set ++# CONFIG_GPIO_PISOSR is not set ++# CONFIG_GPIO_XRA1403 is not set ++# end of SPI GPIO expanders ++ ++# ++# USB GPIO expanders ++# ++# end of USB GPIO expanders ++ ++# ++# Virtual GPIO drivers ++# ++# CONFIG_GPIO_AGGREGATOR is not set ++# CONFIG_GPIO_LATCH is not set ++# CONFIG_GPIO_MOCKUP is not set ++# CONFIG_GPIO_SIM is not set ++# end of Virtual GPIO drivers ++ ++# CONFIG_W1 is not set ++CONFIG_POWER_RESET=y ++# CONFIG_POWER_RESET_BRCMSTB is not set ++# CONFIG_POWER_RESET_GPIO is not set ++# CONFIG_POWER_RESET_GPIO_RESTART is not set ++# CONFIG_POWER_RESET_LTC2952 is not set ++# CONFIG_POWER_RESET_RESTART is not set ++# CONFIG_POWER_RESET_XGENE is not set ++# CONFIG_POWER_RESET_SYSCON is not set ++# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set ++# CONFIG_SYSCON_REBOOT_MODE is not set ++# CONFIG_NVMEM_REBOOT_MODE is not set ++CONFIG_POWER_SUPPLY=y ++# CONFIG_POWER_SUPPLY_DEBUG is not set ++# CONFIG_IP5XXX_POWER is not set ++# CONFIG_TEST_POWER is not set ++# CONFIG_CHARGER_ADP5061 is not set ++# CONFIG_BATTERY_CW2015 is not set ++# CONFIG_BATTERY_DS2780 is not set ++# CONFIG_BATTERY_DS2781 is not set ++# CONFIG_BATTERY_DS2782 is not set ++# CONFIG_BATTERY_SAMSUNG_SDI is not set ++# CONFIG_BATTERY_SBS is not set ++# CONFIG_CHARGER_SBS is not set ++# CONFIG_MANAGER_SBS is not set ++# CONFIG_BATTERY_BQ27XXX is not set ++# CONFIG_BATTERY_MAX17040 is not set ++# CONFIG_BATTERY_MAX17042 is not set ++# CONFIG_CHARGER_MAX8903 is not set ++# CONFIG_CHARGER_LP8727 is not set ++# CONFIG_CHARGER_GPIO is not set ++# CONFIG_CHARGER_LT3651 is not set ++# CONFIG_CHARGER_LTC4162L is not set ++# CONFIG_CHARGER_DETECTOR_MAX14656 is not set ++# CONFIG_CHARGER_MAX77976 is not set ++# CONFIG_CHARGER_BQ2415X is not set ++# CONFIG_CHARGER_BQ24190 is not set ++# CONFIG_CHARGER_BQ24257 is not set ++# CONFIG_CHARGER_BQ24735 is not set ++# CONFIG_CHARGER_BQ2515X is not set ++# CONFIG_CHARGER_BQ25890 is not set ++# CONFIG_CHARGER_BQ25980 is not set ++# CONFIG_CHARGER_BQ256XX is not set ++# CONFIG_BATTERY_GAUGE_LTC2941 is not set ++# CONFIG_BATTERY_GOLDFISH is not set ++# CONFIG_BATTERY_RT5033 is not set ++# CONFIG_CHARGER_RT9455 is not set ++# CONFIG_CHARGER_BD99954 is not set ++# CONFIG_BATTERY_UG3105 is not set ++# CONFIG_HWMON is not set ++# CONFIG_THERMAL is not set ++# CONFIG_WATCHDOG is not set ++CONFIG_SSB_POSSIBLE=y ++# CONFIG_SSB is not set ++CONFIG_BCMA_POSSIBLE=y ++# CONFIG_BCMA is not set ++ ++# ++# Multifunction device drivers ++# ++CONFIG_MFD_CORE=y ++# CONFIG_MFD_ACT8945A is not set ++# CONFIG_MFD_AS3711 is not set ++# CONFIG_MFD_SMPRO is not set ++# CONFIG_MFD_AS3722 is not set ++# CONFIG_PMIC_ADP5520 is not set ++# CONFIG_MFD_AAT2870_CORE is not set ++# CONFIG_MFD_ATMEL_FLEXCOM is not set ++# CONFIG_MFD_ATMEL_HLCDC is not set ++# CONFIG_MFD_BCM590XX is not set ++# CONFIG_MFD_BD9571MWV is not set ++# CONFIG_MFD_AXP20X_I2C is not set ++# CONFIG_MFD_CS42L43_I2C is not set ++# CONFIG_MFD_MADERA is not set ++# CONFIG_MFD_MAX5970 is not set ++# CONFIG_PMIC_DA903X is not set ++# CONFIG_MFD_DA9052_SPI is not set ++# CONFIG_MFD_DA9052_I2C is not set ++# CONFIG_MFD_DA9055 is not set ++# CONFIG_MFD_DA9062 is not set ++# CONFIG_MFD_DA9063 is not set ++# CONFIG_MFD_DA9150 is not set ++# CONFIG_MFD_DLN2 is not set ++# CONFIG_MFD_GATEWORKS_GSC is not set ++# CONFIG_MFD_MC13XXX_SPI is not set ++# CONFIG_MFD_MC13XXX_I2C is not set ++# CONFIG_MFD_MP2629 is not set ++# CONFIG_MFD_HI6421_PMIC is not set ++CONFIG_MFD_BSP_FMC=y ++# CONFIG_MFD_IQS62X is not set ++# CONFIG_MFD_KEMPLD is not set ++# CONFIG_MFD_88PM800 is not set ++# CONFIG_MFD_88PM805 is not set ++# CONFIG_MFD_88PM860X is not set ++# CONFIG_MFD_MAX14577 is not set ++# CONFIG_MFD_MAX77541 is not set ++# CONFIG_MFD_MAX77620 is not set ++# CONFIG_MFD_MAX77650 is not set ++# CONFIG_MFD_MAX77686 is not set ++# CONFIG_MFD_MAX77693 is not set ++# CONFIG_MFD_MAX77714 is not set ++# CONFIG_MFD_MAX77843 is not set ++# CONFIG_MFD_MAX8907 is not set ++# CONFIG_MFD_MAX8925 is not set ++# CONFIG_MFD_MAX8997 is not set ++# CONFIG_MFD_MAX8998 is not set ++# CONFIG_MFD_MT6360 is not set ++# CONFIG_MFD_MT6370 is not set ++# CONFIG_MFD_MT6397 is not set ++# CONFIG_MFD_MENF21BMC is not set ++# CONFIG_MFD_OCELOT is not set ++# CONFIG_EZX_PCAP is not set ++# CONFIG_MFD_CPCAP is not set ++# CONFIG_MFD_VIPERBOARD is not set ++# CONFIG_MFD_NTXEC is not set ++# CONFIG_MFD_RETU is not set ++# CONFIG_MFD_PCF50633 is not set ++# CONFIG_MFD_SY7636A is not set ++# CONFIG_MFD_RT4831 is not set ++# CONFIG_MFD_RT5033 is not set ++# CONFIG_MFD_RT5120 is not set ++# CONFIG_MFD_RC5T583 is not set ++# CONFIG_MFD_RK8XX_I2C is not set ++# CONFIG_MFD_RK8XX_SPI is not set ++# CONFIG_MFD_RN5T618 is not set ++# CONFIG_MFD_SEC_CORE is not set ++# CONFIG_MFD_SI476X_CORE is not set ++# CONFIG_MFD_SM501 is not set ++# CONFIG_MFD_SKY81452 is not set ++# CONFIG_MFD_STMPE is not set ++CONFIG_MFD_SYSCON=y ++# CONFIG_MFD_LP3943 is not set ++# CONFIG_MFD_LP8788 is not set ++# CONFIG_MFD_TI_LMU is not set ++# CONFIG_MFD_PALMAS is not set ++# CONFIG_TPS6105X is not set ++# CONFIG_TPS65010 is not set ++# CONFIG_TPS6507X is not set ++# CONFIG_MFD_TPS65086 is not set ++# CONFIG_MFD_TPS65090 is not set ++# CONFIG_MFD_TPS65217 is not set ++# CONFIG_MFD_TI_LP873X is not set ++# CONFIG_MFD_TI_LP87565 is not set ++# CONFIG_MFD_TPS65218 is not set ++# CONFIG_MFD_TPS65219 is not set ++# CONFIG_MFD_TPS6586X is not set ++# CONFIG_MFD_TPS65910 is not set ++# CONFIG_MFD_TPS65912_I2C is not set ++# CONFIG_MFD_TPS65912_SPI is not set ++# CONFIG_MFD_TPS6594_I2C is not set ++# CONFIG_MFD_TPS6594_SPI is not set ++# CONFIG_TWL4030_CORE is not set ++# CONFIG_TWL6040_CORE is not set ++# CONFIG_MFD_WL1273_CORE is not set ++# CONFIG_MFD_LM3533 is not set ++# CONFIG_MFD_TC3589X is not set ++# CONFIG_MFD_TQMX86 is not set ++# CONFIG_MFD_LOCHNAGAR is not set ++# CONFIG_MFD_ARIZONA_I2C is not set ++# CONFIG_MFD_ARIZONA_SPI is not set ++# CONFIG_MFD_WM8400 is not set ++# CONFIG_MFD_WM831X_I2C is not set ++# CONFIG_MFD_WM831X_SPI is not set ++# CONFIG_MFD_WM8350_I2C is not set ++# CONFIG_MFD_WM8994 is not set ++# CONFIG_MFD_ROHM_BD718XX is not set ++# CONFIG_MFD_ROHM_BD71828 is not set ++# CONFIG_MFD_ROHM_BD957XMUF is not set ++# CONFIG_MFD_STPMIC1 is not set ++# CONFIG_MFD_STMFX is not set ++# CONFIG_MFD_ATC260X_I2C is not set ++# CONFIG_MFD_QCOM_PM8008 is not set ++# CONFIG_MFD_INTEL_M10_BMC_SPI is not set ++# CONFIG_MFD_RSMU_I2C is not set ++# CONFIG_MFD_RSMU_SPI is not set ++# end of Multifunction device drivers ++ ++# CONFIG_REGULATOR is not set ++# CONFIG_RC_CORE is not set ++ ++# ++# CEC support ++# ++# CONFIG_MEDIA_CEC_SUPPORT is not set ++# end of CEC support ++ ++# CONFIG_MEDIA_SUPPORT is not set ++ ++# ++# Graphics support ++# ++CONFIG_VIDEO_CMDLINE=y ++# CONFIG_AUXDISPLAY is not set ++# CONFIG_DRM is not set ++# CONFIG_DRM_DEBUG_MODESET_LOCK is not set ++ ++# ++# Frame buffer Devices ++# ++CONFIG_FB=y ++# CONFIG_FB_ARMCLCD is not set ++# CONFIG_FB_OPENCORES is not set ++# CONFIG_FB_S1D13XXX is not set ++# CONFIG_FB_SMSCUFX is not set ++# CONFIG_FB_UDL is not set ++# CONFIG_FB_IBM_GXT4500 is not set ++# CONFIG_FB_VIRTUAL is not set ++# CONFIG_FB_METRONOME is not set ++# CONFIG_FB_SIMPLE is not set ++# CONFIG_FB_SSD1307 is not set ++CONFIG_FB_CORE=y ++CONFIG_FB_NOTIFY=y ++# CONFIG_FIRMWARE_EDID is not set ++CONFIG_FB_DEVICE=y ++# CONFIG_FB_FOREIGN_ENDIAN is not set ++CONFIG_FB_IOMEM_FOPS=y ++# CONFIG_FB_MODE_HELPERS is not set ++# CONFIG_FB_TILEBLITTING is not set ++# end of Frame buffer Devices ++ ++# ++# Backlight & LCD device support ++# ++# CONFIG_LCD_CLASS_DEVICE is not set ++# CONFIG_BACKLIGHT_CLASS_DEVICE is not set ++# end of Backlight & LCD device support ++ ++# ++# Console display driver support ++# ++CONFIG_DUMMY_CONSOLE=y ++CONFIG_DUMMY_CONSOLE_COLUMNS=80 ++CONFIG_DUMMY_CONSOLE_ROWS=25 ++# CONFIG_FRAMEBUFFER_CONSOLE is not set ++# end of Console display driver support ++ ++# CONFIG_LOGO is not set ++# end of Graphics support ++ ++# CONFIG_SOUND is not set ++# CONFIG_HID_SUPPORT is not set ++CONFIG_USB_OHCI_LITTLE_ENDIAN=y ++CONFIG_USB_SUPPORT=y ++CONFIG_USB_COMMON=y ++# CONFIG_USB_ULPI_BUS is not set ++# CONFIG_USB_CONN_GPIO is not set ++CONFIG_USB_ARCH_HAS_HCD=y ++CONFIG_USB=y ++# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set ++ ++# ++# Miscellaneous USB options ++# ++CONFIG_USB_DEFAULT_PERSIST=y ++# CONFIG_USB_FEW_INIT_RETRIES is not set ++# CONFIG_USB_DYNAMIC_MINORS is not set ++# CONFIG_USB_OTG is not set ++# CONFIG_USB_OTG_PRODUCTLIST is not set ++# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set ++CONFIG_USB_AUTOSUSPEND_DELAY=2 ++# CONFIG_USB_MON is not set ++ ++# ++# USB Host Controller Drivers ++# ++# CONFIG_USB_C67X00_HCD is not set ++CONFIG_USB_XHCI_HCD=y ++# CONFIG_USB_XHCI_DBGCAP is not set ++# CONFIG_USB_XHCI_PCI_RENESAS is not set ++CONFIG_USB_XHCI_PLATFORM=y ++# CONFIG_USB_EHCI_HCD is not set ++# CONFIG_USB_OXU210HP_HCD is not set ++# CONFIG_USB_ISP116X_HCD is not set ++# CONFIG_USB_MAX3421_HCD is not set ++# CONFIG_USB_OHCI_HCD is not set ++# CONFIG_USB_SL811_HCD is not set ++# CONFIG_USB_R8A66597_HCD is not set ++# CONFIG_USB_HCD_TEST_MODE is not set ++ ++# ++# USB Device Class drivers ++# ++# CONFIG_USB_ACM is not set ++# CONFIG_USB_PRINTER is not set ++# CONFIG_USB_WDM is not set ++# CONFIG_USB_TMC is not set ++ ++# ++# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may ++# ++ ++# ++# also be needed; see USB_STORAGE Help for more info ++# ++CONFIG_USB_STORAGE=y ++# CONFIG_USB_STORAGE_DEBUG is not set ++# CONFIG_USB_STORAGE_REALTEK is not set ++# CONFIG_USB_STORAGE_DATAFAB is not set ++# CONFIG_USB_STORAGE_FREECOM is not set ++# CONFIG_USB_STORAGE_ISD200 is not set ++# CONFIG_USB_STORAGE_USBAT is not set ++# CONFIG_USB_STORAGE_SDDR09 is not set ++# CONFIG_USB_STORAGE_SDDR55 is not set ++# CONFIG_USB_STORAGE_JUMPSHOT is not set ++# CONFIG_USB_STORAGE_ALAUDA is not set ++# CONFIG_USB_STORAGE_ONETOUCH is not set ++# CONFIG_USB_STORAGE_KARMA is not set ++# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set ++# CONFIG_USB_STORAGE_ENE_UB6250 is not set ++# CONFIG_USB_UAS is not set ++ ++# ++# USB Imaging devices ++# ++# CONFIG_USB_MDC800 is not set ++# CONFIG_USB_MICROTEK is not set ++# CONFIG_USBIP_CORE is not set ++ ++# ++# USB dual-mode controller drivers ++# ++# CONFIG_USB_CDNS_SUPPORT is not set ++# CONFIG_USB_MUSB_HDRC is not set ++CONFIG_USB_DWC3=y ++CONFIG_USB_DWC3_HOST=y ++# CONFIG_USB_DWC3_GADGET is not set ++# CONFIG_USB_DWC3_DUAL_ROLE is not set ++ ++# ++# Platform Glue Driver Support ++# ++# CONFIG_USB_DWC3_OF_SIMPLE is not set ++# CONFIG_USB_DWC2 is not set ++# CONFIG_USB_CHIPIDEA is not set ++# CONFIG_USB_ISP1760 is not set ++ ++# ++# USB port drivers ++# ++# CONFIG_USB_SERIAL is not set ++ ++# ++# USB Miscellaneous drivers ++# ++# CONFIG_USB_EMI62 is not set ++# CONFIG_USB_EMI26 is not set ++# CONFIG_USB_ADUTUX is not set ++# CONFIG_USB_SEVSEG is not set ++# CONFIG_USB_LEGOTOWER is not set ++# CONFIG_USB_LCD is not set ++# CONFIG_USB_CYPRESS_CY7C63 is not set ++# CONFIG_USB_CYTHERM is not set ++# CONFIG_USB_IDMOUSE is not set ++# CONFIG_USB_APPLEDISPLAY is not set ++# CONFIG_APPLE_MFI_FASTCHARGE is not set ++# CONFIG_USB_LD is not set ++# CONFIG_USB_TRANCEVIBRATOR is not set ++# CONFIG_USB_IOWARRIOR is not set ++# CONFIG_USB_TEST is not set ++# CONFIG_USB_EHSET_TEST_FIXTURE is not set ++# CONFIG_USB_ISIGHTFW is not set ++# CONFIG_USB_YUREX is not set ++# CONFIG_USB_EZUSB_FX2 is not set ++# CONFIG_USB_HUB_USB251XB is not set ++# CONFIG_USB_HSIC_USB3503 is not set ++# CONFIG_USB_HSIC_USB4604 is not set ++# CONFIG_USB_LINK_LAYER_TEST is not set ++# CONFIG_USB_ONBOARD_HUB is not set ++ ++# ++# USB Physical Layer drivers ++# ++# CONFIG_NOP_USB_XCEIV is not set ++# CONFIG_USB_GPIO_VBUS is not set ++# CONFIG_USB_ISP1301 is not set ++# CONFIG_USB_ULPI is not set ++# end of USB Physical Layer drivers ++ ++CONFIG_USB_GADGET=y ++# CONFIG_USB_GADGET_DEBUG is not set ++# CONFIG_USB_GADGET_DEBUG_FILES is not set ++CONFIG_USB_GADGET_VBUS_DRAW=2 ++CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 ++# CONFIG_U_SERIAL_CONSOLE is not set ++ ++# ++# USB Peripheral Controller ++# ++# CONFIG_USB_GR_UDC is not set ++# CONFIG_USB_R8A66597 is not set ++# CONFIG_USB_PXA27X is not set ++# CONFIG_USB_MV_UDC is not set ++# CONFIG_USB_MV_U3D is not set ++# CONFIG_USB_SNP_UDC_PLAT is not set ++# CONFIG_USB_M66592 is not set ++# CONFIG_USB_BDC_UDC is not set ++# CONFIG_USB_NET2272 is not set ++# CONFIG_USB_GADGET_XILINX is not set ++# CONFIG_USB_MAX3420_UDC is not set ++# CONFIG_USB_DUMMY_HCD is not set ++# end of USB Peripheral Controller ++ ++CONFIG_USB_LIBCOMPOSITE=y ++CONFIG_USB_F_ACM=y ++CONFIG_USB_U_SERIAL=y ++CONFIG_USB_F_MASS_STORAGE=y ++CONFIG_USB_CONFIGFS=y ++# CONFIG_USB_CONFIGFS_SERIAL is not set ++CONFIG_USB_CONFIGFS_ACM=y ++# CONFIG_USB_CONFIGFS_OBEX is not set ++# CONFIG_USB_CONFIGFS_NCM is not set ++# CONFIG_USB_CONFIGFS_ECM is not set ++# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set ++# CONFIG_USB_CONFIGFS_RNDIS is not set ++# CONFIG_USB_CONFIGFS_EEM is not set ++CONFIG_USB_CONFIGFS_MASS_STORAGE=y ++# CONFIG_USB_CONFIGFS_F_LB_SS is not set ++# CONFIG_USB_CONFIGFS_F_FS is not set ++# CONFIG_USB_CONFIGFS_F_HID is not set ++# CONFIG_USB_CONFIGFS_F_PRINTER is not set ++ ++# ++# USB Gadget precomposed configurations ++# ++# CONFIG_USB_ZERO is not set ++# CONFIG_USB_ETH is not set ++# CONFIG_USB_G_NCM is not set ++# CONFIG_USB_GADGETFS is not set ++# CONFIG_USB_FUNCTIONFS is not set ++# CONFIG_USB_MASS_STORAGE is not set ++# CONFIG_USB_G_SERIAL is not set ++# CONFIG_USB_G_PRINTER is not set ++# CONFIG_USB_CDC_COMPOSITE is not set ++# CONFIG_USB_G_ACM_MS is not set ++# CONFIG_USB_G_MULTI is not set ++# CONFIG_USB_G_HID is not set ++# CONFIG_USB_G_DBGP is not set ++# CONFIG_USB_RAW_GADGET is not set ++# end of USB Gadget precomposed configurations ++ ++# CONFIG_TYPEC is not set ++# CONFIG_USB_ROLE_SWITCH is not set ++CONFIG_MMC=y ++CONFIG_PWRSEQ_EMMC=y ++CONFIG_PWRSEQ_SIMPLE=y ++CONFIG_MMC_BLOCK=y ++CONFIG_MMC_BLOCK_MINORS=8 ++# CONFIG_SDIO_UART is not set ++# CONFIG_MMC_TEST is not set ++ ++# ++# MMC/SD/SDIO Host Controller Drivers ++# ++# CONFIG_MMC_DEBUG is not set ++# CONFIG_MMC_ARMMMCI is not set ++CONFIG_MMC_SDHCI=y ++CONFIG_MMC_SDHCI_PLTFM=y ++# CONFIG_MMC_SDHCI_OF_ARASAN is not set ++# CONFIG_MMC_SDHCI_OF_AT91 is not set ++# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set ++# CONFIG_MMC_SDHCI_CADENCE is not set ++# CONFIG_MMC_SDHCI_F_SDH30 is not set ++# CONFIG_MMC_SDHCI_MILBEAUT is not set ++# CONFIG_MMC_SPI is not set ++# CONFIG_MMC_DW is not set ++# CONFIG_MMC_VUB300 is not set ++# CONFIG_MMC_USHC is not set ++# CONFIG_MMC_USDHI6ROL0 is not set ++CONFIG_MMC_CQHCI=y ++# CONFIG_MMC_HSQ is not set ++# CONFIG_MMC_MTK is not set ++# CONFIG_MMC_SDHCI_XENON is not set ++# CONFIG_SCSI_UFSHCD is not set ++# CONFIG_MEMSTICK is not set ++# CONFIG_NEW_LEDS is not set ++# CONFIG_ACCESSIBILITY is not set ++# CONFIG_INFINIBAND is not set ++CONFIG_EDAC_SUPPORT=y ++# CONFIG_RTC_CLASS is not set ++# CONFIG_DMADEVICES is not set ++ ++# ++# DMABUF options ++# ++# CONFIG_SYNC_FILE is not set ++# CONFIG_DMABUF_HEAPS is not set ++# end of DMABUF options ++ ++# CONFIG_UIO is not set ++# CONFIG_VFIO is not set ++# CONFIG_VIRT_DRIVERS is not set ++# CONFIG_VIRTIO_MENU is not set ++# CONFIG_VDPA is not set ++# CONFIG_VHOST_MENU is not set ++ ++# ++# Microsoft Hyper-V guest support ++# ++# end of Microsoft Hyper-V guest support ++ ++# CONFIG_GREYBUS is not set ++# CONFIG_COMEDI is not set ++# CONFIG_STAGING is not set ++# CONFIG_GOLDFISH is not set ++# CONFIG_CHROME_PLATFORMS is not set ++# CONFIG_MELLANOX_PLATFORM is not set ++# CONFIG_SURFACE_PLATFORMS is not set ++CONFIG_HAVE_CLK=y ++CONFIG_HAVE_CLK_PREPARE=y ++CONFIG_COMMON_CLK=y ++ ++# ++# Clock driver for ARM Reference designs ++# ++# CONFIG_CLK_ICST is not set ++# CONFIG_CLK_SP810 is not set ++# end of Clock driver for ARM Reference designs ++ ++# CONFIG_LMK04832 is not set ++# CONFIG_COMMON_CLK_MAX9485 is not set ++# CONFIG_COMMON_CLK_SI5341 is not set ++# CONFIG_COMMON_CLK_SI5351 is not set ++# CONFIG_COMMON_CLK_SI514 is not set ++# CONFIG_COMMON_CLK_SI544 is not set ++# CONFIG_COMMON_CLK_SI570 is not set ++# CONFIG_COMMON_CLK_CDCE706 is not set ++# CONFIG_COMMON_CLK_CDCE925 is not set ++# CONFIG_COMMON_CLK_CS2000_CP is not set ++# CONFIG_COMMON_CLK_AXI_CLKGEN is not set ++# CONFIG_COMMON_CLK_XGENE is not set ++# CONFIG_COMMON_CLK_RS9_PCIE is not set ++# CONFIG_COMMON_CLK_SI521XX is not set ++# CONFIG_COMMON_CLK_VC3 is not set ++# CONFIG_COMMON_CLK_VC5 is not set ++# CONFIG_COMMON_CLK_VC7 is not set ++# CONFIG_COMMON_CLK_FIXED_MMIO is not set ++# CONFIG_XILINX_VCU is not set ++# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set ++CONFIG_COMMON_CLK_SS928V100=y ++CONFIG_RESET_BSP=y ++# CONFIG_HWSPINLOCK is not set ++ ++# ++# Clock Source drivers ++# ++CONFIG_TIMER_OF=y ++CONFIG_TIMER_PROBE=y ++CONFIG_CLKSRC_MMIO=y ++CONFIG_ARM_ARCH_TIMER=y ++CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y ++CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y ++# CONFIG_FSL_ERRATUM_A008585 is not set ++CONFIG_HISILICON_ERRATUM_161010101=y ++CONFIG_ARM64_ERRATUM_858921=y ++CONFIG_ARM_TIMER_SP804=y ++# end of Clock Source drivers ++ ++# CONFIG_MAILBOX is not set ++# CONFIG_IOMMU_SUPPORT is not set ++ ++# ++# Remoteproc drivers ++# ++# CONFIG_REMOTEPROC is not set ++# end of Remoteproc drivers ++ ++# ++# Rpmsg drivers ++# ++# CONFIG_RPMSG_VIRTIO is not set ++# end of Rpmsg drivers ++ ++# CONFIG_SOUNDWIRE is not set ++ ++# ++# SOC (System On Chip) specific Drivers ++# ++ ++# ++# Amlogic SoC drivers ++# ++# end of Amlogic SoC drivers ++ ++# ++# Broadcom SoC drivers ++# ++# CONFIG_SOC_BRCMSTB is not set ++# end of Broadcom SoC drivers ++ ++# ++# NXP/Freescale QorIQ SoC drivers ++# ++# CONFIG_QUICC_ENGINE is not set ++# end of NXP/Freescale QorIQ SoC drivers ++ ++# ++# fujitsu SoC drivers ++# ++# end of fujitsu SoC drivers ++ ++# ++# i.MX SoC drivers ++# ++# end of i.MX SoC drivers ++ ++# ++# Enable LiteX SoC Builder specific drivers ++# ++# CONFIG_LITEX_SOC_CONTROLLER is not set ++# end of Enable LiteX SoC Builder specific drivers ++ ++# CONFIG_WPCM450_SOC is not set ++ ++# ++# Qualcomm SoC drivers ++# ++# end of Qualcomm SoC drivers ++ ++# CONFIG_SOC_TI is not set ++ ++# ++# Xilinx SoC drivers ++# ++# end of Xilinx SoC drivers ++# end of SOC (System On Chip) specific Drivers ++ ++# CONFIG_PM_DEVFREQ is not set ++CONFIG_EXTCON=y ++ ++# ++# Extcon Device Drivers ++# ++# CONFIG_EXTCON_FSA9480 is not set ++# CONFIG_EXTCON_GPIO is not set ++# CONFIG_EXTCON_MAX3355 is not set ++# CONFIG_EXTCON_PTN5150 is not set ++# CONFIG_EXTCON_RT8973A is not set ++# CONFIG_EXTCON_SM5502 is not set ++# CONFIG_EXTCON_USB_GPIO is not set ++# CONFIG_MEMORY is not set ++# CONFIG_IIO is not set ++# CONFIG_PWM is not set ++ ++# ++# IRQ chip support ++# ++CONFIG_IRQCHIP=y ++CONFIG_ARM_GIC=y ++CONFIG_ARM_GIC_MAX_NR=1 ++CONFIG_ARM_GIC_V3=y ++CONFIG_ARM_GIC_V3_ITS=y ++# CONFIG_AL_FIC is not set ++# CONFIG_XILINX_INTC is not set ++CONFIG_PARTITION_PERCPU=y ++# end of IRQ chip support ++ ++# CONFIG_IPACK_BUS is not set ++CONFIG_RESET_CONTROLLER=y ++# CONFIG_RESET_SIMPLE is not set ++# CONFIG_RESET_TI_SYSCON is not set ++# CONFIG_RESET_TI_TPS380X is not set ++ ++# ++# PHY Subsystem ++# ++CONFIG_GENERIC_PHY=y ++# CONFIG_PHY_CAN_TRANSCEIVER is not set ++ ++# ++# PHY drivers for Broadcom platforms ++# ++# CONFIG_BCM_KONA_USB2_PHY is not set ++# end of PHY drivers for Broadcom platforms ++ ++# CONFIG_PHY_CADENCE_TORRENT is not set ++# CONFIG_PHY_CADENCE_DPHY is not set ++# CONFIG_PHY_CADENCE_DPHY_RX is not set ++# CONFIG_PHY_CADENCE_SIERRA is not set ++# CONFIG_PHY_CADENCE_SALVO is not set ++# CONFIG_PHY_PXA_28NM_HSIC is not set ++# CONFIG_PHY_PXA_28NM_USB2 is not set ++# CONFIG_PHY_LAN966X_SERDES is not set ++# CONFIG_PHY_MAPPHONE_MDM6600 is not set ++# CONFIG_PHY_OCELOT_SERDES is not set ++# end of PHY Subsystem ++ ++# CONFIG_POWERCAP is not set ++# CONFIG_MCB is not set ++# CONFIG_RAS is not set ++ ++# ++# Android ++# ++# CONFIG_ANDROID_BINDER_IPC is not set ++# end of Android ++ ++# CONFIG_DAX is not set ++# CONFIG_NVMEM is not set ++ ++# ++# HW tracing support ++# ++# CONFIG_STM is not set ++# CONFIG_INTEL_TH is not set ++# end of HW tracing support ++ ++# CONFIG_FPGA is not set ++# CONFIG_FSI is not set ++# CONFIG_TEE is not set ++CONFIG_PM_OPP=y ++# CONFIG_SIOX is not set ++# CONFIG_SLIMBUS is not set ++# CONFIG_INTERCONNECT is not set ++# CONFIG_COUNTER is not set ++# CONFIG_MOST is not set ++# CONFIG_EDMAC is not set ++# CONFIG_PECI is not set ++# CONFIG_HTE is not set ++# CONFIG_CDX_BUS is not set ++ ++# ++# Vendor driver support ++# ++CONFIG_USB_WING=y ++ ++# ++# Wing UPS Phy ++# ++CONFIG_WING_UPS_PHY=y ++CONFIG_WING_UPS_XVP_PHY=y ++CONFIG_WING_UPS_NANO_PHY=y ++# CONFIG_WING_UPS_MISSILE_PHY is not set ++# end of Wing UPS Phy ++ ++CONFIG_BASEDRV_CLK=y ++CONFIG_MMC_SDHCI_NEBULA=y ++# CONFIG_MMC_QUICKBOOT is not set ++# CONFIG_MMC_CARD_INFO is not set ++# CONFIG_MMC_SDHCI_ANT is not set ++# CONFIG_CMA_MEM_SHARED is not set ++# CONFIG_CMA_ADVANCE_SHARE is not set ++# end of Vendor driver support ++# end of Device Drivers ++ ++# ++# File systems ++# ++CONFIG_DCACHE_WORD_ACCESS=y ++# CONFIG_VALIDATE_FS_PARSER is not set ++CONFIG_FS_IOMAP=y ++CONFIG_BUFFER_HEAD=y ++# CONFIG_EXT2_FS is not set ++CONFIG_EXT3_FS=y ++# CONFIG_EXT3_FS_POSIX_ACL is not set ++# CONFIG_EXT3_FS_SECURITY is not set ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_USE_FOR_EXT2=y ++# CONFIG_EXT4_FS_POSIX_ACL is not set ++# CONFIG_EXT4_FS_SECURITY is not set ++# CONFIG_EXT4_DEBUG is not set ++CONFIG_JBD2=y ++# CONFIG_JBD2_DEBUG is not set ++CONFIG_FS_MBCACHE=y ++# CONFIG_REISERFS_FS is not set ++# CONFIG_JFS_FS is not set ++CONFIG_XFS_FS=y ++CONFIG_XFS_SUPPORT_V4=y ++CONFIG_XFS_SUPPORT_ASCII_CI=y ++# CONFIG_XFS_QUOTA is not set ++# CONFIG_XFS_POSIX_ACL is not set ++# CONFIG_XFS_RT is not set ++# CONFIG_XFS_ONLINE_SCRUB is not set ++# CONFIG_XFS_WARN is not set ++# CONFIG_XFS_DEBUG is not set ++# CONFIG_GFS2_FS is not set ++# CONFIG_OCFS2_FS is not set ++CONFIG_BTRFS_FS=y ++# CONFIG_BTRFS_FS_POSIX_ACL is not set ++# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set ++# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set ++# CONFIG_BTRFS_DEBUG is not set ++# CONFIG_BTRFS_ASSERT is not set ++# CONFIG_BTRFS_FS_REF_VERIFY is not set ++# CONFIG_NILFS2_FS is not set ++# CONFIG_F2FS_FS is not set ++CONFIG_FS_POSIX_ACL=y ++CONFIG_EXPORTFS=y ++# CONFIG_EXPORTFS_BLOCK_OPS is not set ++CONFIG_FILE_LOCKING=y ++# CONFIG_FS_ENCRYPTION is not set ++# CONFIG_FS_VERITY is not set ++CONFIG_FSNOTIFY=y ++CONFIG_DNOTIFY=y ++CONFIG_INOTIFY_USER=y ++# CONFIG_FANOTIFY is not set ++CONFIG_QUOTA=y ++# CONFIG_QUOTA_NETLINK_INTERFACE is not set ++# CONFIG_QUOTA_DEBUG is not set ++CONFIG_QUOTA_TREE=m ++CONFIG_QFMT_V1=m ++CONFIG_QFMT_V2=m ++CONFIG_QUOTACTL=y ++CONFIG_AUTOFS_FS=m ++CONFIG_FUSE_FS=y ++# CONFIG_CUSE is not set ++# CONFIG_VIRTIO_FS is not set ++# CONFIG_OVERLAY_FS is not set ++ ++# ++# Caches ++# ++# CONFIG_FSCACHE is not set ++# end of Caches ++ ++# ++# CD-ROM/DVD Filesystems ++# ++# CONFIG_ISO9660_FS is not set ++# CONFIG_UDF_FS is not set ++# end of CD-ROM/DVD Filesystems ++ ++# ++# DOS/FAT/EXFAT/NT Filesystems ++# ++# CONFIG_MSDOS_FS is not set ++# CONFIG_VFAT_FS is not set ++# CONFIG_EXFAT_FS is not set ++# CONFIG_NTFS_FS is not set ++# CONFIG_NTFS3_FS is not set ++# end of DOS/FAT/EXFAT/NT Filesystems ++ ++# ++# Pseudo filesystems ++# ++CONFIG_PROC_FS=y ++# CONFIG_PROC_KCORE is not set ++CONFIG_PROC_SYSCTL=y ++CONFIG_PROC_PAGE_MONITOR=y ++# CONFIG_PROC_CHILDREN is not set ++CONFIG_KERNFS=y ++CONFIG_SYSFS=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_TMPFS_XATTR=y ++# CONFIG_TMPFS_INODE64 is not set ++# CONFIG_TMPFS_QUOTA is not set ++CONFIG_ARCH_SUPPORTS_HUGETLBFS=y ++# CONFIG_HUGETLBFS is not set ++CONFIG_ARCH_HAS_GIGANTIC_PAGE=y ++CONFIG_CONFIGFS_FS=y ++# end of Pseudo filesystems ++ ++CONFIG_MISC_FILESYSTEMS=y ++# CONFIG_ORANGEFS_FS is not set ++# CONFIG_ADFS_FS is not set ++# CONFIG_AFFS_FS is not set ++# CONFIG_ECRYPT_FS is not set ++# CONFIG_HFS_FS is not set ++# CONFIG_HFSPLUS_FS is not set ++# CONFIG_BEFS_FS is not set ++# CONFIG_BFS_FS is not set ++# CONFIG_EFS_FS is not set ++CONFIG_CRAMFS=y ++CONFIG_CRAMFS_BLOCKDEV=y ++CONFIG_SQUASHFS=y ++CONFIG_SQUASHFS_FILE_CACHE=y ++# CONFIG_SQUASHFS_FILE_DIRECT is not set ++CONFIG_SQUASHFS_DECOMP_SINGLE=y ++# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set ++CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y ++# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set ++# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set ++# CONFIG_SQUASHFS_XATTR is not set ++CONFIG_SQUASHFS_ZLIB=y ++# CONFIG_SQUASHFS_LZ4 is not set ++CONFIG_SQUASHFS_LZO=y ++CONFIG_SQUASHFS_XZ=y ++# CONFIG_SQUASHFS_ZSTD is not set ++# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set ++# CONFIG_SQUASHFS_EMBEDDED is not set ++CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 ++# CONFIG_VXFS_FS is not set ++# CONFIG_MINIX_FS is not set ++# CONFIG_OMFS_FS is not set ++# CONFIG_HPFS_FS is not set ++# CONFIG_QNX4FS_FS is not set ++# CONFIG_QNX6FS_FS is not set ++# CONFIG_ROMFS_FS is not set ++# CONFIG_PSTORE is not set ++# CONFIG_SYSV_FS is not set ++# CONFIG_UFS_FS is not set ++# CONFIG_EROFS_FS is not set ++CONFIG_NETWORK_FILESYSTEMS=y ++# CONFIG_NFS_FS is not set ++# CONFIG_NFSD is not set ++# CONFIG_CEPH_FS is not set ++# CONFIG_CIFS is not set ++# CONFIG_SMB_SERVER is not set ++# CONFIG_CODA_FS is not set ++# CONFIG_AFS_FS is not set ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="iso8859-1" ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_CODEPAGE_737=m ++CONFIG_NLS_CODEPAGE_775=m ++CONFIG_NLS_CODEPAGE_850=m ++CONFIG_NLS_CODEPAGE_852=m ++CONFIG_NLS_CODEPAGE_855=m ++CONFIG_NLS_CODEPAGE_857=m ++CONFIG_NLS_CODEPAGE_860=m ++CONFIG_NLS_CODEPAGE_861=m ++CONFIG_NLS_CODEPAGE_862=m ++CONFIG_NLS_CODEPAGE_863=m ++CONFIG_NLS_CODEPAGE_864=m ++CONFIG_NLS_CODEPAGE_865=m ++CONFIG_NLS_CODEPAGE_866=m ++CONFIG_NLS_CODEPAGE_869=m ++CONFIG_NLS_CODEPAGE_936=y ++CONFIG_NLS_CODEPAGE_950=m ++CONFIG_NLS_CODEPAGE_932=m ++CONFIG_NLS_CODEPAGE_949=m ++CONFIG_NLS_CODEPAGE_874=m ++CONFIG_NLS_ISO8859_8=m ++CONFIG_NLS_CODEPAGE_1250=m ++CONFIG_NLS_CODEPAGE_1251=m ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_NLS_ISO8859_2=m ++CONFIG_NLS_ISO8859_3=m ++CONFIG_NLS_ISO8859_4=m ++CONFIG_NLS_ISO8859_5=m ++CONFIG_NLS_ISO8859_6=m ++CONFIG_NLS_ISO8859_7=m ++CONFIG_NLS_ISO8859_9=m ++CONFIG_NLS_ISO8859_13=m ++CONFIG_NLS_ISO8859_14=m ++CONFIG_NLS_ISO8859_15=m ++CONFIG_NLS_KOI8_R=m ++CONFIG_NLS_KOI8_U=m ++# CONFIG_NLS_MAC_ROMAN is not set ++# CONFIG_NLS_MAC_CELTIC is not set ++# CONFIG_NLS_MAC_CENTEURO is not set ++# CONFIG_NLS_MAC_CROATIAN is not set ++# CONFIG_NLS_MAC_CYRILLIC is not set ++# CONFIG_NLS_MAC_GAELIC is not set ++# CONFIG_NLS_MAC_GREEK is not set ++# CONFIG_NLS_MAC_ICELAND is not set ++# CONFIG_NLS_MAC_INUIT is not set ++# CONFIG_NLS_MAC_ROMANIAN is not set ++# CONFIG_NLS_MAC_TURKISH is not set ++CONFIG_NLS_UTF8=y ++# CONFIG_DLM is not set ++# CONFIG_UNICODE is not set ++CONFIG_IO_WQ=y ++# end of File systems ++ ++# ++# Security options ++# ++CONFIG_KEYS=y ++# CONFIG_KEYS_REQUEST_CACHE is not set ++# CONFIG_PERSISTENT_KEYRINGS is not set ++# CONFIG_TRUSTED_KEYS is not set ++# CONFIG_ENCRYPTED_KEYS is not set ++# CONFIG_KEY_DH_OPERATIONS is not set ++# CONFIG_SECURITY_DMESG_RESTRICT is not set ++CONFIG_PROC_MEM_ALWAYS_FORCE=y ++# CONFIG_PROC_MEM_FORCE_PTRACE is not set ++# CONFIG_PROC_MEM_NO_FORCE is not set ++# CONFIG_SECURITY is not set ++# CONFIG_SECURITYFS is not set ++# CONFIG_HARDENED_USERCOPY is not set ++# CONFIG_FORTIFY_SOURCE is not set ++# CONFIG_STATIC_USERMODEHELPER is not set ++CONFIG_DEFAULT_SECURITY_DAC=y ++CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" ++ ++# ++# Kernel hardening options ++# ++ ++# ++# Memory initialization ++# ++CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y ++CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER=y ++CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y ++CONFIG_INIT_STACK_NONE=y ++# CONFIG_INIT_STACK_ALL_PATTERN is not set ++# CONFIG_INIT_STACK_ALL_ZERO is not set ++# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set ++# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set ++# end of Memory initialization ++ ++# ++# Hardening of kernel data structures ++# ++# CONFIG_LIST_HARDENED is not set ++# CONFIG_BUG_ON_DATA_CORRUPTION is not set ++# end of Hardening of kernel data structures ++ ++CONFIG_RANDSTRUCT_NONE=y ++# end of Kernel hardening options ++# end of Security options ++ ++CONFIG_XOR_BLOCKS=y ++CONFIG_CRYPTO=y ++ ++# ++# Crypto core or helper ++# ++CONFIG_CRYPTO_ALGAPI=y ++CONFIG_CRYPTO_ALGAPI2=y ++CONFIG_CRYPTO_AEAD=m ++CONFIG_CRYPTO_AEAD2=y ++CONFIG_CRYPTO_SIG2=y ++CONFIG_CRYPTO_SKCIPHER=y ++CONFIG_CRYPTO_SKCIPHER2=y ++CONFIG_CRYPTO_HASH=y ++CONFIG_CRYPTO_HASH2=y ++CONFIG_CRYPTO_RNG=m ++CONFIG_CRYPTO_RNG2=y ++CONFIG_CRYPTO_RNG_DEFAULT=m ++CONFIG_CRYPTO_AKCIPHER2=y ++CONFIG_CRYPTO_KPP2=y ++CONFIG_CRYPTO_ACOMP2=y ++CONFIG_CRYPTO_MANAGER=y ++CONFIG_CRYPTO_MANAGER2=y ++# CONFIG_CRYPTO_USER is not set ++CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y ++CONFIG_CRYPTO_NULL=m ++CONFIG_CRYPTO_NULL2=m ++# CONFIG_CRYPTO_PCRYPT is not set ++# CONFIG_CRYPTO_CRYPTD is not set ++# CONFIG_CRYPTO_AUTHENC is not set ++# CONFIG_CRYPTO_TEST is not set ++# end of Crypto core or helper ++ ++# ++# Public-key cryptography ++# ++# CONFIG_CRYPTO_RSA is not set ++# CONFIG_CRYPTO_DH is not set ++# CONFIG_CRYPTO_ECDH is not set ++# CONFIG_CRYPTO_ECDSA is not set ++# CONFIG_CRYPTO_ECRDSA is not set ++# CONFIG_CRYPTO_SM2 is not set ++# CONFIG_CRYPTO_CURVE25519 is not set ++# end of Public-key cryptography ++ ++# ++# Block ciphers ++# ++CONFIG_CRYPTO_AES=y ++# CONFIG_CRYPTO_AES_TI is not set ++# CONFIG_CRYPTO_ARIA is not set ++# CONFIG_CRYPTO_BLOWFISH is not set ++# CONFIG_CRYPTO_CAMELLIA is not set ++# CONFIG_CRYPTO_CAST5 is not set ++# CONFIG_CRYPTO_CAST6 is not set ++# CONFIG_CRYPTO_DES is not set ++# CONFIG_CRYPTO_FCRYPT is not set ++# CONFIG_CRYPTO_SERPENT is not set ++# CONFIG_CRYPTO_SM4_GENERIC is not set ++# CONFIG_CRYPTO_TWOFISH is not set ++# end of Block ciphers ++ ++# ++# Length-preserving ciphers and modes ++# ++# CONFIG_CRYPTO_ADIANTUM is not set ++# CONFIG_CRYPTO_CHACHA20 is not set ++# CONFIG_CRYPTO_CBC is not set ++# CONFIG_CRYPTO_CFB is not set ++CONFIG_CRYPTO_CTR=m ++# CONFIG_CRYPTO_CTS is not set ++CONFIG_CRYPTO_ECB=y ++# CONFIG_CRYPTO_HCTR2 is not set ++# CONFIG_CRYPTO_KEYWRAP is not set ++# CONFIG_CRYPTO_LRW is not set ++# CONFIG_CRYPTO_OFB is not set ++# CONFIG_CRYPTO_PCBC is not set ++# CONFIG_CRYPTO_XTS is not set ++# end of Length-preserving ciphers and modes ++ ++# ++# AEAD (authenticated encryption with associated data) ciphers ++# ++# CONFIG_CRYPTO_AEGIS128 is not set ++# CONFIG_CRYPTO_CHACHA20POLY1305 is not set ++CONFIG_CRYPTO_CCM=m ++# CONFIG_CRYPTO_GCM is not set ++CONFIG_CRYPTO_GENIV=m ++CONFIG_CRYPTO_SEQIV=m ++CONFIG_CRYPTO_ECHAINIV=m ++# CONFIG_CRYPTO_ESSIV is not set ++# end of AEAD (authenticated encryption with associated data) ciphers ++ ++# ++# Hashes, digests, and MACs ++# ++CONFIG_CRYPTO_BLAKE2B=y ++CONFIG_CRYPTO_CMAC=y ++# CONFIG_CRYPTO_GHASH is not set ++CONFIG_CRYPTO_HMAC=m ++# CONFIG_CRYPTO_MD4 is not set ++# CONFIG_CRYPTO_MD5 is not set ++# CONFIG_CRYPTO_MICHAEL_MIC is not set ++# CONFIG_CRYPTO_POLY1305 is not set ++# CONFIG_CRYPTO_RMD160 is not set ++# CONFIG_CRYPTO_SHA1 is not set ++CONFIG_CRYPTO_SHA256=y ++CONFIG_CRYPTO_SHA512=m ++CONFIG_CRYPTO_SHA3=m ++# CONFIG_CRYPTO_SM3_GENERIC is not set ++# CONFIG_CRYPTO_STREEBOG is not set ++# CONFIG_CRYPTO_VMAC is not set ++# CONFIG_CRYPTO_WP512 is not set ++# CONFIG_CRYPTO_XCBC is not set ++CONFIG_CRYPTO_XXHASH=y ++# end of Hashes, digests, and MACs ++ ++# ++# CRCs (cyclic redundancy checks) ++# ++CONFIG_CRYPTO_CRC32C=y ++# CONFIG_CRYPTO_CRC32 is not set ++# CONFIG_CRYPTO_CRCT10DIF is not set ++# end of CRCs (cyclic redundancy checks) ++ ++# ++# Compression ++# ++CONFIG_CRYPTO_DEFLATE=y ++CONFIG_CRYPTO_LZO=y ++# CONFIG_CRYPTO_842 is not set ++# CONFIG_CRYPTO_LZ4 is not set ++# CONFIG_CRYPTO_LZ4HC is not set ++CONFIG_CRYPTO_ZSTD=y ++# end of Compression ++ ++# ++# Random number generation ++# ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRYPTO_DRBG_MENU=m ++CONFIG_CRYPTO_DRBG_HMAC=y ++# CONFIG_CRYPTO_DRBG_HASH is not set ++# CONFIG_CRYPTO_DRBG_CTR is not set ++CONFIG_CRYPTO_DRBG=m ++CONFIG_CRYPTO_JITTERENTROPY=m ++# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set ++# end of Random number generation ++ ++# ++# Userspace interface ++# ++# CONFIG_CRYPTO_USER_API_HASH is not set ++# CONFIG_CRYPTO_USER_API_SKCIPHER is not set ++# CONFIG_CRYPTO_USER_API_RNG is not set ++# CONFIG_CRYPTO_USER_API_AEAD is not set ++# end of Userspace interface ++ ++# CONFIG_CRYPTO_NHPOLY1305_NEON is not set ++# CONFIG_CRYPTO_CHACHA20_NEON is not set ++ ++# ++# Accelerated Cryptographic Algorithms for CPU (arm64) ++# ++# CONFIG_CRYPTO_GHASH_ARM64_CE is not set ++# CONFIG_CRYPTO_POLY1305_NEON is not set ++# CONFIG_CRYPTO_SHA1_ARM64_CE is not set ++# CONFIG_CRYPTO_SHA256_ARM64 is not set ++# CONFIG_CRYPTO_SHA2_ARM64_CE is not set ++# CONFIG_CRYPTO_SHA512_ARM64 is not set ++# CONFIG_CRYPTO_SHA512_ARM64_CE is not set ++# CONFIG_CRYPTO_SHA3_ARM64 is not set ++# CONFIG_CRYPTO_SM3_NEON is not set ++# CONFIG_CRYPTO_SM3_ARM64_CE is not set ++# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set ++# CONFIG_CRYPTO_AES_ARM64 is not set ++# CONFIG_CRYPTO_AES_ARM64_CE is not set ++# CONFIG_CRYPTO_AES_ARM64_CE_BLK is not set ++# CONFIG_CRYPTO_AES_ARM64_NEON_BLK is not set ++# CONFIG_CRYPTO_AES_ARM64_BS is not set ++# CONFIG_CRYPTO_SM4_ARM64_CE is not set ++# CONFIG_CRYPTO_SM4_ARM64_CE_BLK is not set ++# CONFIG_CRYPTO_SM4_ARM64_NEON_BLK is not set ++# CONFIG_CRYPTO_AES_ARM64_CE_CCM is not set ++# CONFIG_CRYPTO_SM4_ARM64_CE_CCM is not set ++# CONFIG_CRYPTO_SM4_ARM64_CE_GCM is not set ++# end of Accelerated Cryptographic Algorithms for CPU (arm64) ++ ++CONFIG_CRYPTO_HW=y ++# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set ++# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set ++# CONFIG_CRYPTO_DEV_CCP is not set ++# CONFIG_CRYPTO_DEV_SAFEXCEL is not set ++# CONFIG_CRYPTO_DEV_CCREE is not set ++# CONFIG_CRYPTO_DEV_HISI_SEC is not set ++# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set ++# CONFIG_ASYMMETRIC_KEY_TYPE is not set ++ ++# ++# Certificates for signature checking ++# ++# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set ++# end of Certificates for signature checking ++ ++CONFIG_BINARY_PRINTF=y ++ ++# ++# Library routines ++# ++CONFIG_RAID6_PQ=y ++# CONFIG_RAID6_PQ_BENCHMARK is not set ++# CONFIG_PACKING is not set ++CONFIG_BITREVERSE=y ++CONFIG_HAVE_ARCH_BITREVERSE=y ++CONFIG_GENERIC_STRNCPY_FROM_USER=y ++CONFIG_GENERIC_STRNLEN_USER=y ++CONFIG_GENERIC_NET_UTILS=y ++# CONFIG_CORDIC is not set ++# CONFIG_PRIME_NUMBERS is not set ++CONFIG_RATIONAL=y ++CONFIG_GENERIC_PCI_IOMAP=y ++CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y ++CONFIG_ARCH_HAS_FAST_MULTIPLIER=y ++CONFIG_ARCH_USE_SYM_ANNOTATIONS=y ++# CONFIG_INDIRECT_PIO is not set ++ ++# ++# Crypto library routines ++# ++CONFIG_CRYPTO_LIB_UTILS=y ++CONFIG_CRYPTO_LIB_AES=y ++CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y ++# CONFIG_CRYPTO_LIB_CHACHA is not set ++# CONFIG_CRYPTO_LIB_CURVE25519 is not set ++CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 ++# CONFIG_CRYPTO_LIB_POLY1305 is not set ++# CONFIG_CRYPTO_LIB_CHACHA20POLY1305 is not set ++CONFIG_CRYPTO_LIB_SHA1=y ++CONFIG_CRYPTO_LIB_SHA256=y ++# end of Crypto library routines ++ ++CONFIG_CRC_CCITT=y ++CONFIG_CRC16=y ++# CONFIG_CRC_T10DIF is not set ++# CONFIG_CRC64_ROCKSOFT is not set ++CONFIG_CRC_ITU_T=y ++CONFIG_CRC32=y ++# CONFIG_CRC32_SELFTEST is not set ++CONFIG_CRC32_SLICEBY8=y ++# CONFIG_CRC32_SLICEBY4 is not set ++# CONFIG_CRC32_SARWATE is not set ++# CONFIG_CRC32_BIT is not set ++# CONFIG_CRC64 is not set ++# CONFIG_CRC4 is not set ++# CONFIG_CRC7 is not set ++CONFIG_LIBCRC32C=y ++# CONFIG_CRC8 is not set ++CONFIG_XXHASH=y ++CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y ++# CONFIG_RANDOM32_SELFTEST is not set ++CONFIG_ZLIB_INFLATE=y ++CONFIG_ZLIB_DEFLATE=y ++CONFIG_LZO_COMPRESS=y ++CONFIG_LZO_DECOMPRESS=y ++CONFIG_ZSTD_COMMON=y ++CONFIG_ZSTD_COMPRESS=y ++CONFIG_ZSTD_DECOMPRESS=y ++CONFIG_XZ_DEC=y ++# CONFIG_XZ_DEC_X86 is not set ++# CONFIG_XZ_DEC_POWERPC is not set ++# CONFIG_XZ_DEC_IA64 is not set ++# CONFIG_XZ_DEC_ARM is not set ++# CONFIG_XZ_DEC_ARMTHUMB is not set ++# CONFIG_XZ_DEC_SPARC is not set ++# CONFIG_XZ_DEC_MICROLZMA is not set ++# CONFIG_XZ_DEC_TEST is not set ++CONFIG_GENERIC_ALLOCATOR=y ++CONFIG_ASSOCIATIVE_ARRAY=y ++CONFIG_HAS_IOMEM=y ++CONFIG_HAS_IOPORT=y ++CONFIG_HAS_DMA=y ++CONFIG_NEED_SG_DMA_LENGTH=y ++CONFIG_NEED_DMA_MAP_STATE=y ++CONFIG_ARCH_DMA_ADDR_T_64BIT=y ++CONFIG_DMA_DECLARE_COHERENT=y ++CONFIG_ARCH_HAS_SETUP_DMA_OPS=y ++CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y ++CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y ++CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y ++CONFIG_SWIOTLB=y ++# CONFIG_SWIOTLB_DYNAMIC is not set ++CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y ++# CONFIG_DMA_RESTRICTED_POOL is not set ++CONFIG_DMA_NONCOHERENT_MMAP=y ++CONFIG_DMA_COHERENT_POOL=y ++CONFIG_DMA_DIRECT_REMAP=y ++CONFIG_DMA_CMA=y ++ ++# ++# Default contiguous memory area size: ++# ++CONFIG_CMA_SIZE_MBYTES=4 ++CONFIG_CMA_SIZE_SEL_MBYTES=y ++# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set ++# CONFIG_CMA_SIZE_SEL_MIN is not set ++# CONFIG_CMA_SIZE_SEL_MAX is not set ++CONFIG_CMA_ALIGNMENT=8 ++# CONFIG_DMA_API_DEBUG is not set ++CONFIG_SGL_ALLOC=y ++CONFIG_CPU_RMAP=y ++CONFIG_DQL=y ++CONFIG_GLOB=y ++# CONFIG_GLOB_SELFTEST is not set ++CONFIG_NLATTR=y ++# CONFIG_IRQ_POLL is not set ++CONFIG_LIBFDT=y ++CONFIG_HAVE_GENERIC_VDSO=y ++CONFIG_GENERIC_GETTIMEOFDAY=y ++CONFIG_GENERIC_VDSO_TIME_NS=y ++CONFIG_SG_POOL=y ++CONFIG_ARCH_STACKWALK=y ++CONFIG_STACKDEPOT=y ++CONFIG_SBITMAP=y ++# end of Library routines ++ ++CONFIG_GENERIC_IOREMAP=y ++CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y ++ ++# ++# Kernel hacking ++# ++ ++# ++# printk and dmesg options ++# ++# CONFIG_PRINTK_TIME is not set ++# CONFIG_PRINTK_CALLER is not set ++# CONFIG_STACKTRACE_BUILD_ID is not set ++CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 ++CONFIG_CONSOLE_LOGLEVEL_QUIET=4 ++CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 ++# CONFIG_BOOT_PRINTK_DELAY is not set ++# CONFIG_DYNAMIC_DEBUG is not set ++# CONFIG_DYNAMIC_DEBUG_CORE is not set ++CONFIG_SYMBOLIC_ERRNAME=y ++CONFIG_DEBUG_BUGVERBOSE=y ++# end of printk and dmesg options ++ ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_MISC=y ++ ++# ++# Compile-time checks and compiler options ++# ++CONFIG_AS_HAS_NON_CONST_LEB128=y ++CONFIG_DEBUG_INFO_NONE=y ++# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set ++# CONFIG_DEBUG_INFO_DWARF4 is not set ++# CONFIG_DEBUG_INFO_DWARF5 is not set ++CONFIG_FRAME_WARN=2048 ++# CONFIG_STRIP_ASM_SYMS is not set ++# CONFIG_HEADERS_INSTALL is not set ++CONFIG_SECTION_MISMATCH_WARN_ONLY=y ++# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B is not set ++CONFIG_ARCH_WANT_FRAME_POINTERS=y ++CONFIG_FRAME_POINTER=y ++# CONFIG_VMLINUX_MAP is not set ++# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set ++# end of Compile-time checks and compiler options ++ ++# ++# Generic Kernel Debugging Instruments ++# ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 ++CONFIG_MAGIC_SYSRQ_SERIAL=y ++CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" ++# CONFIG_DEBUG_FS is not set ++CONFIG_HAVE_ARCH_KGDB=y ++# CONFIG_KGDB is not set ++CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y ++# CONFIG_UBSAN is not set ++CONFIG_HAVE_ARCH_KCSAN=y ++CONFIG_HAVE_KCSAN_COMPILER=y ++# CONFIG_KCSAN is not set ++# end of Generic Kernel Debugging Instruments ++ ++# ++# Networking Debugging ++# ++# CONFIG_NET_DEV_REFCNT_TRACKER is not set ++# CONFIG_NET_NS_REFCNT_TRACKER is not set ++# CONFIG_DEBUG_NET is not set ++# end of Networking Debugging ++ ++# ++# Memory Debugging ++# ++# CONFIG_PAGE_EXTENSION is not set ++# CONFIG_DEBUG_PAGEALLOC is not set ++CONFIG_SLUB_DEBUG=y ++# CONFIG_SLUB_DEBUG_ON is not set ++# CONFIG_PAGE_OWNER is not set ++# CONFIG_PAGE_POISONING is not set ++# CONFIG_DEBUG_RODATA_TEST is not set ++CONFIG_ARCH_HAS_DEBUG_WX=y ++# CONFIG_DEBUG_WX is not set ++CONFIG_GENERIC_PTDUMP=y ++CONFIG_HAVE_DEBUG_KMEMLEAK=y ++# CONFIG_DEBUG_KMEMLEAK is not set ++# CONFIG_PER_VMA_LOCK_STATS is not set ++# CONFIG_DEBUG_OBJECTS is not set ++# CONFIG_DEBUG_STACK_USAGE is not set ++# CONFIG_SCHED_STACK_END_CHECK is not set ++CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y ++# CONFIG_DEBUG_VM is not set ++# CONFIG_DEBUG_VM_PGTABLE is not set ++CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y ++# CONFIG_DEBUG_VIRTUAL is not set ++# CONFIG_DEBUG_MEMORY_INIT is not set ++# CONFIG_DEBUG_PER_CPU_MAPS is not set ++CONFIG_HAVE_ARCH_KASAN=y ++CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y ++CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y ++CONFIG_HAVE_ARCH_KASAN_VMALLOC=y ++CONFIG_CC_HAS_KASAN_GENERIC=y ++CONFIG_CC_HAS_KASAN_SW_TAGS=y ++CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y ++# CONFIG_KASAN is not set ++CONFIG_HAVE_ARCH_KFENCE=y ++# CONFIG_KFENCE is not set ++# end of Memory Debugging ++ ++# CONFIG_DEBUG_SHIRQ is not set ++ ++# ++# Debug Oops, Lockups and Hangs ++# ++CONFIG_PANIC_ON_OOPS=y ++CONFIG_PANIC_ON_OOPS_VALUE=1 ++CONFIG_PANIC_TIMEOUT=0 ++# CONFIG_SOFTLOCKUP_DETECTOR is not set ++CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y ++# CONFIG_HARDLOCKUP_DETECTOR is not set ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 ++# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set ++# CONFIG_WQ_WATCHDOG is not set ++# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set ++# CONFIG_TEST_LOCKUP is not set ++# end of Debug Oops, Lockups and Hangs ++ ++# ++# Scheduler Debugging ++# ++CONFIG_SCHED_INFO=y ++CONFIG_SCHEDSTATS=y ++# end of Scheduler Debugging ++ ++# CONFIG_DEBUG_TIMEKEEPING is not set ++ ++# ++# Lock Debugging (spinlocks, mutexes, etc...) ++# ++CONFIG_LOCK_DEBUGGING_SUPPORT=y ++# CONFIG_PROVE_LOCKING is not set ++# CONFIG_LOCK_STAT is not set ++# CONFIG_DEBUG_RT_MUTEXES is not set ++# CONFIG_DEBUG_SPINLOCK is not set ++# CONFIG_DEBUG_MUTEXES is not set ++# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set ++# CONFIG_DEBUG_RWSEMS is not set ++# CONFIG_DEBUG_LOCK_ALLOC is not set ++# CONFIG_DEBUG_ATOMIC_SLEEP is not set ++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set ++# CONFIG_LOCK_TORTURE_TEST is not set ++# CONFIG_WW_MUTEX_SELFTEST is not set ++# CONFIG_SCF_TORTURE_TEST is not set ++# CONFIG_CSD_LOCK_WAIT_DEBUG is not set ++# end of Lock Debugging (spinlocks, mutexes, etc...) ++ ++# CONFIG_DEBUG_IRQFLAGS is not set ++CONFIG_STACKTRACE=y ++# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set ++# CONFIG_DEBUG_KOBJECT is not set ++ ++# ++# Debug kernel data structures ++# ++# CONFIG_DEBUG_LIST is not set ++# CONFIG_DEBUG_PLIST is not set ++# CONFIG_DEBUG_SG is not set ++# CONFIG_DEBUG_NOTIFIERS is not set ++# CONFIG_DEBUG_MAPLE_TREE is not set ++# end of Debug kernel data structures ++ ++# ++# RCU Debugging ++# ++# CONFIG_RCU_SCALE_TEST is not set ++# CONFIG_RCU_TORTURE_TEST is not set ++# CONFIG_RCU_REF_SCALE_TEST is not set ++CONFIG_RCU_CPU_STALL_TIMEOUT=60 ++CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 ++# CONFIG_RCU_CPU_STALL_CPUTIME is not set ++CONFIG_RCU_TRACE=y ++# CONFIG_RCU_EQS_DEBUG is not set ++# end of RCU Debugging ++ ++# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set ++# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set ++# CONFIG_LATENCYTOP is not set ++CONFIG_HAVE_FUNCTION_TRACER=y ++CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y ++CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y ++CONFIG_HAVE_DYNAMIC_FTRACE=y ++CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y ++CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y ++CONFIG_HAVE_SYSCALL_TRACEPOINTS=y ++CONFIG_HAVE_C_RECORDMCOUNT=y ++CONFIG_TRACE_CLOCK=y ++CONFIG_TRACING_SUPPORT=y ++# CONFIG_FTRACE is not set ++# CONFIG_SAMPLES is not set ++CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y ++CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y ++# CONFIG_STRICT_DEVMEM is not set ++ ++# ++# arm64 Debugging ++# ++# CONFIG_PID_IN_CONTEXTIDR is not set ++# CONFIG_ARM64_RELOC_TEST is not set ++# CONFIG_CORESIGHT is not set ++# end of arm64 Debugging ++ ++# ++# Kernel Testing and Coverage ++# ++# CONFIG_KUNIT is not set ++# CONFIG_NOTIFIER_ERROR_INJECTION is not set ++# CONFIG_FAULT_INJECTION is not set ++CONFIG_ARCH_HAS_KCOV=y ++CONFIG_CC_HAS_SANCOV_TRACE_PC=y ++# CONFIG_KCOV is not set ++CONFIG_RUNTIME_TESTING_MENU=y ++# CONFIG_TEST_DHRY is not set ++# CONFIG_TEST_MIN_HEAP is not set ++# CONFIG_TEST_DIV64 is not set ++# CONFIG_BACKTRACE_SELF_TEST is not set ++# CONFIG_TEST_REF_TRACKER is not set ++# CONFIG_RBTREE_TEST is not set ++# CONFIG_REED_SOLOMON_TEST is not set ++# CONFIG_INTERVAL_TREE_TEST is not set ++# CONFIG_PERCPU_TEST is not set ++# CONFIG_ATOMIC64_SELFTEST is not set ++# CONFIG_TEST_HEXDUMP is not set ++# CONFIG_STRING_SELFTEST is not set ++# CONFIG_TEST_STRING_HELPERS is not set ++# CONFIG_TEST_KSTRTOX is not set ++# CONFIG_TEST_PRINTF is not set ++# CONFIG_TEST_SCANF is not set ++# CONFIG_TEST_BITMAP is not set ++# CONFIG_TEST_UUID is not set ++# CONFIG_TEST_XARRAY is not set ++# CONFIG_TEST_MAPLE_TREE is not set ++# CONFIG_TEST_RHASHTABLE is not set ++# CONFIG_TEST_IDA is not set ++# CONFIG_TEST_LKM is not set ++# CONFIG_TEST_BITOPS is not set ++# CONFIG_TEST_VMALLOC is not set ++# CONFIG_TEST_USER_COPY is not set ++# CONFIG_TEST_BPF is not set ++# CONFIG_TEST_BLACKHOLE_DEV is not set ++# CONFIG_FIND_BIT_BENCHMARK is not set ++# CONFIG_TEST_FIRMWARE is not set ++# CONFIG_TEST_SYSCTL is not set ++# CONFIG_TEST_UDELAY is not set ++# CONFIG_TEST_STATIC_KEYS is not set ++# CONFIG_TEST_KMOD is not set ++# CONFIG_TEST_MEMCAT_P is not set ++# CONFIG_TEST_MEMINIT is not set ++# CONFIG_TEST_FREE_PAGES is not set ++CONFIG_ARCH_USE_MEMTEST=y ++# CONFIG_MEMTEST is not set ++# end of Kernel Testing and Coverage ++ ++# ++# Rust hacking ++# ++# end of Rust hacking ++# end of Kernel hacking +diff --git a/arch/arm64/configs/ss928v100_nand_defconfig b/arch/arm64/configs/ss928v100_nand_defconfig +new file mode 100644 +index 000000000..fb677b107 +--- /dev/null ++++ b/arch/arm64/configs/ss928v100_nand_defconfig +@@ -0,0 +1,3277 @@ ++# ++# Automatically generated file; DO NOT EDIT. ++# Linux/arm64 6.6.90 Kernel Configuration ++# ++CONFIG_CC_VERSION_TEXT="clang version 15.0.4 (v050 musl1.2.3 2023-03-18 10:36:32)" ++CONFIG_GCC_VERSION=0 ++CONFIG_CC_IS_CLANG=y ++CONFIG_CLANG_VERSION=150004 ++CONFIG_AS_IS_LLVM=y ++CONFIG_AS_VERSION=150004 ++CONFIG_LD_VERSION=0 ++CONFIG_LD_IS_LLD=y ++CONFIG_LLD_VERSION=150004 ++CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y ++CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y ++CONFIG_TOOLS_SUPPORT_RELR=y ++CONFIG_CC_HAS_ASM_INLINE=y ++CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y ++CONFIG_PAHOLE_VERSION=121 ++CONFIG_IRQ_WORK=y ++CONFIG_BUILDTIME_TABLE_SORT=y ++CONFIG_THREAD_INFO_IN_TASK=y ++ ++# ++# General setup ++# ++CONFIG_INIT_ENV_ARG_LIMIT=32 ++# CONFIG_COMPILE_TEST is not set ++# CONFIG_WERROR is not set ++CONFIG_LOCALVERSION="" ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_BUILD_SALT="" ++CONFIG_DEFAULT_INIT="" ++CONFIG_DEFAULT_HOSTNAME="(none)" ++CONFIG_SYSVIPC=y ++CONFIG_SYSVIPC_SYSCTL=y ++# CONFIG_POSIX_MQUEUE is not set ++# CONFIG_WATCH_QUEUE is not set ++CONFIG_CROSS_MEMORY_ATTACH=y ++CONFIG_USELIB=y ++# CONFIG_AUDIT is not set ++CONFIG_HAVE_ARCH_AUDITSYSCALL=y ++ ++# ++# IRQ subsystem ++# ++CONFIG_GENERIC_IRQ_PROBE=y ++CONFIG_GENERIC_IRQ_SHOW=y ++CONFIG_GENERIC_IRQ_SHOW_LEVEL=y ++CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y ++CONFIG_GENERIC_IRQ_MIGRATION=y ++CONFIG_HARDIRQS_SW_RESEND=y ++CONFIG_IRQ_DOMAIN=y ++CONFIG_IRQ_DOMAIN_HIERARCHY=y ++CONFIG_GENERIC_IRQ_IPI=y ++CONFIG_GENERIC_MSI_IRQ=y ++CONFIG_IRQ_FORCED_THREADING=y ++CONFIG_SPARSE_IRQ=y ++# end of IRQ subsystem ++ ++CONFIG_GENERIC_TIME_VSYSCALL=y ++CONFIG_GENERIC_CLOCKEVENTS=y ++CONFIG_ARCH_HAS_TICK_BROADCAST=y ++CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y ++CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y ++CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y ++CONFIG_CONTEXT_TRACKING=y ++CONFIG_CONTEXT_TRACKING_IDLE=y ++ ++# ++# Timers subsystem ++# ++CONFIG_HZ_PERIODIC=y ++# CONFIG_NO_HZ_IDLE is not set ++# CONFIG_NO_HZ_FULL is not set ++# CONFIG_NO_HZ is not set ++# CONFIG_HIGH_RES_TIMERS is not set ++# end of Timers subsystem ++ ++CONFIG_BPF=y ++CONFIG_HAVE_EBPF_JIT=y ++CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y ++ ++# ++# BPF subsystem ++# ++CONFIG_BPF_SYSCALL=y ++# CONFIG_BPF_JIT is not set ++# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set ++# CONFIG_BPF_PRELOAD is not set ++# end of BPF subsystem ++ ++CONFIG_PREEMPT_NONE_BUILD=y ++CONFIG_PREEMPT_NONE=y ++# CONFIG_PREEMPT_VOLUNTARY is not set ++# CONFIG_PREEMPT is not set ++# CONFIG_PREEMPT_DYNAMIC is not set ++ ++# ++# CPU/Task time and stats accounting ++# ++CONFIG_TICK_CPU_ACCOUNTING=y ++# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set ++# CONFIG_IRQ_TIME_ACCOUNTING is not set ++# CONFIG_BSD_PROCESS_ACCT is not set ++# CONFIG_TASKSTATS is not set ++# CONFIG_PSI is not set ++# end of CPU/Task time and stats accounting ++ ++CONFIG_CPU_ISOLATION=y ++ ++# ++# RCU Subsystem ++# ++CONFIG_TREE_RCU=y ++# CONFIG_RCU_EXPERT is not set ++CONFIG_TREE_SRCU=y ++CONFIG_TASKS_RCU_GENERIC=y ++CONFIG_TASKS_TRACE_RCU=y ++CONFIG_RCU_STALL_COMMON=y ++CONFIG_RCU_NEED_SEGCBLIST=y ++# end of RCU Subsystem ++ ++# CONFIG_IKCONFIG is not set ++# CONFIG_IKHEADERS is not set ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 ++CONFIG_GENERIC_SCHED_CLOCK=y ++ ++# ++# Scheduler features ++# ++# end of Scheduler features ++ ++CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y ++CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y ++CONFIG_CC_HAS_INT128=y ++CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough" ++CONFIG_GCC10_NO_ARRAY_BOUNDS=y ++CONFIG_ARCH_SUPPORTS_INT128=y ++# CONFIG_CGROUPS is not set ++CONFIG_NAMESPACES=y ++CONFIG_UTS_NS=y ++CONFIG_TIME_NS=y ++CONFIG_IPC_NS=y ++# CONFIG_USER_NS is not set ++CONFIG_PID_NS=y ++CONFIG_NET_NS=y ++# CONFIG_CHECKPOINT_RESTORE is not set ++# CONFIG_SCHED_AUTOGROUP is not set ++CONFIG_RELAY=y ++# CONFIG_BLK_DEV_INITRD is not set ++# CONFIG_BOOT_CONFIG is not set ++CONFIG_INITRAMFS_PRESERVE_MTIME=y ++CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_LD_ORPHAN_WARN=y ++CONFIG_LD_ORPHAN_WARN_LEVEL="warn" ++CONFIG_SYSCTL=y ++CONFIG_SYSCTL_EXCEPTION_TRACE=y ++CONFIG_EXPERT=y ++CONFIG_MULTIUSER=y ++# CONFIG_SGETMASK_SYSCALL is not set ++CONFIG_SYSFS_SYSCALL=y ++# CONFIG_FHANDLE is not set ++CONFIG_POSIX_TIMERS=y ++CONFIG_PRINTK=y ++CONFIG_BUG=y ++CONFIG_ELF_CORE=y ++CONFIG_BASE_FULL=y ++CONFIG_FUTEX=y ++CONFIG_FUTEX_PI=y ++CONFIG_EPOLL=y ++CONFIG_SIGNALFD=y ++CONFIG_TIMERFD=y ++CONFIG_EVENTFD=y ++CONFIG_SHMEM=y ++CONFIG_AIO=y ++CONFIG_IO_URING=y ++CONFIG_ADVISE_SYSCALLS=y ++CONFIG_MEMBARRIER=y ++CONFIG_KALLSYMS=y ++# CONFIG_KALLSYMS_SELFTEST is not set ++# CONFIG_KALLSYMS_ALL is not set ++CONFIG_KALLSYMS_BASE_RELATIVE=y ++CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y ++# CONFIG_KCMP is not set ++CONFIG_RSEQ=y ++CONFIG_CACHESTAT_SYSCALL=y ++# CONFIG_DEBUG_RSEQ is not set ++CONFIG_HAVE_PERF_EVENTS=y ++# CONFIG_PC104 is not set ++ ++# ++# Kernel Performance Events And Counters ++# ++# CONFIG_PERF_EVENTS is not set ++# end of Kernel Performance Events And Counters ++ ++# CONFIG_PROFILING is not set ++ ++# ++# Kexec and crash features ++# ++# CONFIG_KEXEC_FILE is not set ++# CONFIG_CRASH_DUMP is not set ++# end of Kexec and crash features ++# end of General setup ++ ++CONFIG_ARM64=y ++CONFIG_CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y ++CONFIG_64BIT=y ++CONFIG_MMU=y ++CONFIG_ARM64_PAGE_SHIFT=12 ++CONFIG_ARM64_CONT_PTE_SHIFT=4 ++CONFIG_ARM64_CONT_PMD_SHIFT=4 ++CONFIG_ARCH_MMAP_RND_BITS_MIN=18 ++CONFIG_ARCH_MMAP_RND_BITS_MAX=24 ++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 ++CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 ++CONFIG_NO_IOPORT_MAP=y ++CONFIG_STACKTRACE_SUPPORT=y ++CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 ++CONFIG_LOCKDEP_SUPPORT=y ++CONFIG_GENERIC_BUG=y ++CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y ++CONFIG_GENERIC_HWEIGHT=y ++CONFIG_GENERIC_CSUM=y ++CONFIG_GENERIC_CALIBRATE_DELAY=y ++CONFIG_SMP=y ++CONFIG_KERNEL_MODE_NEON=y ++CONFIG_FIX_EARLYCON_MEM=y ++CONFIG_PGTABLE_LEVELS=3 ++CONFIG_ARCH_SUPPORTS_UPROBES=y ++CONFIG_ARCH_PROC_KCORE_TEXT=y ++CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y ++ ++# ++# Platform selection ++# ++# CONFIG_ARCH_ACTIONS is not set ++# CONFIG_ARCH_SUNXI is not set ++# CONFIG_ARCH_ALPINE is not set ++# CONFIG_ARCH_APPLE is not set ++# CONFIG_ARCH_BCM is not set ++# CONFIG_ARCH_BERLIN is not set ++# CONFIG_ARCH_BITMAIN is not set ++# CONFIG_ARCH_EXYNOS is not set ++# CONFIG_ARCH_SPARX5 is not set ++# CONFIG_ARCH_K3 is not set ++# CONFIG_ARCH_LG1K is not set ++# CONFIG_ARCH_HISI is not set ++# CONFIG_ARCH_KEEMBAY is not set ++CONFIG_ARCH_BSP=y ++CONFIG_ARCH_SS928V100=y ++# CONFIG_ARCH_MEDIATEK is not set ++# CONFIG_ARCH_MESON is not set ++# CONFIG_ARCH_MVEBU is not set ++# CONFIG_ARCH_NXP is not set ++# CONFIG_ARCH_MA35 is not set ++# CONFIG_ARCH_NPCM is not set ++# CONFIG_ARCH_QCOM is not set ++# CONFIG_ARCH_REALTEK is not set ++# CONFIG_ARCH_RENESAS is not set ++# CONFIG_ARCH_ROCKCHIP is not set ++# CONFIG_ARCH_SEATTLE is not set ++# CONFIG_ARCH_INTEL_SOCFPGA is not set ++# CONFIG_ARCH_STM32 is not set ++# CONFIG_ARCH_SYNQUACER is not set ++# CONFIG_ARCH_TEGRA is not set ++# CONFIG_ARCH_SPRD is not set ++# CONFIG_ARCH_THUNDER is not set ++# CONFIG_ARCH_THUNDER2 is not set ++# CONFIG_ARCH_UNIPHIER is not set ++# CONFIG_ARCH_VEXPRESS is not set ++# CONFIG_ARCH_VISCONTI is not set ++# CONFIG_ARCH_XGENE is not set ++# CONFIG_ARCH_ZYNQMP is not set ++# end of Platform selection ++ ++# ++# Kernel Features ++# ++ ++# ++# ARM errata workarounds via the alternatives framework ++# ++CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y ++CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y ++CONFIG_ARM64_ERRATUM_826319=y ++CONFIG_ARM64_ERRATUM_827319=y ++CONFIG_ARM64_ERRATUM_824069=y ++CONFIG_ARM64_ERRATUM_819472=y ++CONFIG_ARM64_ERRATUM_832075=y ++CONFIG_ARM64_ERRATUM_843419=y ++CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y ++CONFIG_ARM64_ERRATUM_1024718=y ++CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y ++CONFIG_ARM64_ERRATUM_1165522=y ++CONFIG_ARM64_ERRATUM_1319367=y ++CONFIG_ARM64_ERRATUM_1530923=y ++CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y ++CONFIG_ARM64_ERRATUM_2441007=y ++CONFIG_ARM64_ERRATUM_1286807=y ++CONFIG_ARM64_ERRATUM_1463225=y ++CONFIG_ARM64_ERRATUM_1542419=y ++CONFIG_ARM64_ERRATUM_1508412=y ++CONFIG_ARM64_ERRATUM_2051678=y ++CONFIG_ARM64_ERRATUM_2077057=y ++CONFIG_ARM64_ERRATUM_2658417=y ++CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y ++CONFIG_ARM64_ERRATUM_2054223=y ++CONFIG_ARM64_ERRATUM_2067961=y ++CONFIG_ARM64_ERRATUM_2441009=y ++CONFIG_ARM64_ERRATUM_2457168=y ++CONFIG_ARM64_ERRATUM_2645198=y ++CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD=y ++CONFIG_ARM64_ERRATUM_2966298=y ++CONFIG_ARM64_ERRATUM_3117295=y ++CONFIG_ARM64_ERRATUM_3194386=y ++CONFIG_CAVIUM_ERRATUM_22375=y ++CONFIG_CAVIUM_ERRATUM_23154=y ++CONFIG_CAVIUM_ERRATUM_27456=y ++CONFIG_CAVIUM_ERRATUM_30115=y ++CONFIG_CAVIUM_TX2_ERRATUM_219=y ++CONFIG_FUJITSU_ERRATUM_010001=y ++CONFIG_HISILICON_ERRATUM_161600802=y ++CONFIG_QCOM_FALKOR_ERRATUM_1003=y ++CONFIG_QCOM_FALKOR_ERRATUM_1009=y ++CONFIG_QCOM_QDF2400_ERRATUM_0065=y ++CONFIG_QCOM_FALKOR_ERRATUM_E1041=y ++CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y ++CONFIG_ROCKCHIP_ERRATUM_3588001=y ++CONFIG_SOCIONEXT_SYNQUACER_PREITS=y ++# end of ARM errata workarounds via the alternatives framework ++ ++CONFIG_ARM64_4K_PAGES=y ++# CONFIG_ARM64_16K_PAGES is not set ++# CONFIG_ARM64_64K_PAGES is not set ++CONFIG_ARM64_VA_BITS_39=y ++# CONFIG_ARM64_VA_BITS_48 is not set ++CONFIG_ARM64_VA_BITS=39 ++CONFIG_ARM64_PA_BITS_48=y ++CONFIG_ARM64_PA_BITS=48 ++# CONFIG_CPU_BIG_ENDIAN is not set ++CONFIG_CPU_LITTLE_ENDIAN=y ++CONFIG_SCHED_MC=y ++# CONFIG_SCHED_CLUSTER is not set ++# CONFIG_SCHED_SMT is not set ++CONFIG_NR_CPUS=4 ++CONFIG_HOTPLUG_CPU=y ++# CONFIG_NUMA is not set ++CONFIG_HZ_100=y ++# CONFIG_HZ_250 is not set ++# CONFIG_HZ_300 is not set ++# CONFIG_HZ_1000 is not set ++CONFIG_HZ=100 ++CONFIG_ARCH_SPARSEMEM_ENABLE=y ++CONFIG_CC_HAVE_SHADOW_CALL_STACK=y ++# CONFIG_PARAVIRT is not set ++# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set ++CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y ++CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y ++CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y ++CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y ++CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y ++# CONFIG_XEN is not set ++CONFIG_ARCH_FORCE_MAX_ORDER=10 ++CONFIG_UNMAP_KERNEL_AT_EL0=y ++CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y ++CONFIG_RODATA_FULL_DEFAULT_ENABLED=y ++# CONFIG_ARM64_SW_TTBR0_PAN is not set ++CONFIG_ARM64_TAGGED_ADDR_ABI=y ++# CONFIG_COMPAT is not set ++ ++# ++# ARMv8.1 architectural features ++# ++CONFIG_ARM64_HW_AFDBM=y ++CONFIG_ARM64_PAN=y ++CONFIG_AS_HAS_LSE_ATOMICS=y ++CONFIG_ARM64_LSE_ATOMICS=y ++CONFIG_ARM64_USE_LSE_ATOMICS=y ++# end of ARMv8.1 architectural features ++ ++# ++# ARMv8.2 architectural features ++# ++CONFIG_AS_HAS_ARMV8_2=y ++CONFIG_AS_HAS_SHA3=y ++# CONFIG_ARM64_PMEM is not set ++CONFIG_ARM64_RAS_EXTN=y ++CONFIG_ARM64_CNP=y ++# end of ARMv8.2 architectural features ++ ++# ++# ARMv8.3 architectural features ++# ++CONFIG_ARM64_PTR_AUTH=y ++CONFIG_ARM64_PTR_AUTH_KERNEL=y ++CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y ++CONFIG_AS_HAS_ARMV8_3=y ++CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y ++CONFIG_AS_HAS_LDAPR=y ++# end of ARMv8.3 architectural features ++ ++# ++# ARMv8.4 architectural features ++# ++CONFIG_ARM64_AMU_EXTN=y ++CONFIG_AS_HAS_ARMV8_4=y ++CONFIG_ARM64_TLB_RANGE=y ++# end of ARMv8.4 architectural features ++ ++# ++# ARMv8.5 architectural features ++# ++CONFIG_AS_HAS_ARMV8_5=y ++CONFIG_ARM64_BTI=y ++CONFIG_ARM64_BTI_KERNEL=y ++CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y ++CONFIG_ARM64_E0PD=y ++CONFIG_ARM64_AS_HAS_MTE=y ++CONFIG_ARM64_MTE=y ++# end of ARMv8.5 architectural features ++ ++# ++# ARMv8.7 architectural features ++# ++CONFIG_ARM64_EPAN=y ++# end of ARMv8.7 architectural features ++ ++CONFIG_ARM64_SVE=y ++# CONFIG_ARM64_PSEUDO_NMI is not set ++CONFIG_RELOCATABLE=y ++# CONFIG_RANDOMIZE_BASE is not set ++CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y ++CONFIG_STACKPROTECTOR_PER_TASK=y ++# end of Kernel Features ++ ++# ++# Boot options ++# ++CONFIG_CMDLINE="mem=128M console=ttyAMA0,115200 console=ttyMTD,blackbox" ++CONFIG_CMDLINE_FROM_BOOTLOADER=y ++# CONFIG_CMDLINE_FORCE is not set ++# CONFIG_EFI is not set ++CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y ++CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES="vendor/ss928v100-demb-flash" ++# end of Boot options ++ ++# ++# Power management options ++# ++# CONFIG_SUSPEND is not set ++CONFIG_PM=y ++# CONFIG_PM_DEBUG is not set ++CONFIG_PM_CLK=y ++# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set ++# CONFIG_ENERGY_MODEL is not set ++CONFIG_ARCH_SUSPEND_POSSIBLE=y ++# end of Power management options ++ ++# ++# CPU Power Management ++# ++ ++# ++# CPU Idle ++# ++# CONFIG_CPU_IDLE is not set ++# end of CPU Idle ++ ++# ++# CPU Frequency scaling ++# ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_GOV_ATTR_SET=y ++CONFIG_CPU_FREQ_GOV_COMMON=y ++CONFIG_CPU_FREQ_STAT=y ++# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set ++CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y ++# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set ++# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set ++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y ++CONFIG_CPU_FREQ_GOV_POWERSAVE=y ++CONFIG_CPU_FREQ_GOV_USERSPACE=y ++CONFIG_CPU_FREQ_GOV_ONDEMAND=y ++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y ++# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set ++ ++# ++# CPU frequency scaling drivers ++# ++CONFIG_CPUFREQ_DT=y ++CONFIG_CPUFREQ_DT_PLATDEV=y ++# end of CPU Frequency scaling ++# end of CPU Power Management ++ ++CONFIG_HAVE_KVM=y ++# CONFIG_VIRTUALIZATION is not set ++CONFIG_CPU_MITIGATIONS=y ++ ++# ++# General architecture-dependent options ++# ++CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y ++CONFIG_HOTPLUG_CORE_SYNC=y ++CONFIG_HOTPLUG_CORE_SYNC_DEAD=y ++# CONFIG_KPROBES is not set ++# CONFIG_JUMP_LABEL is not set ++CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y ++CONFIG_HAVE_IOREMAP_PROT=y ++CONFIG_HAVE_KPROBES=y ++CONFIG_HAVE_KRETPROBES=y ++CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y ++CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y ++CONFIG_HAVE_NMI=y ++CONFIG_TRACE_IRQFLAGS_SUPPORT=y ++CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y ++CONFIG_HAVE_ARCH_TRACEHOOK=y ++CONFIG_HAVE_DMA_CONTIGUOUS=y ++CONFIG_GENERIC_SMP_IDLE_THREAD=y ++CONFIG_GENERIC_IDLE_POLL_SETUP=y ++CONFIG_ARCH_HAS_FORTIFY_SOURCE=y ++CONFIG_ARCH_HAS_KEEPINITRD=y ++CONFIG_ARCH_HAS_SET_MEMORY=y ++CONFIG_ARCH_HAS_SET_DIRECT_MAP=y ++CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y ++CONFIG_ARCH_WANTS_NO_INSTR=y ++CONFIG_HAVE_ASM_MODVERSIONS=y ++CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y ++CONFIG_HAVE_RSEQ=y ++CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y ++CONFIG_HAVE_PERF_REGS=y ++CONFIG_HAVE_PERF_USER_STACK_DUMP=y ++CONFIG_HAVE_ARCH_JUMP_LABEL=y ++CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y ++CONFIG_MMU_GATHER_TABLE_FREE=y ++CONFIG_MMU_GATHER_RCU_TABLE_FREE=y ++CONFIG_MMU_LAZY_TLB_REFCOUNT=y ++CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y ++CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y ++CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y ++CONFIG_HAVE_CMPXCHG_LOCAL=y ++CONFIG_HAVE_CMPXCHG_DOUBLE=y ++CONFIG_HAVE_ARCH_SECCOMP=y ++CONFIG_HAVE_ARCH_SECCOMP_FILTER=y ++# CONFIG_SECCOMP is not set ++CONFIG_HAVE_ARCH_STACKLEAK=y ++CONFIG_HAVE_STACKPROTECTOR=y ++CONFIG_STACKPROTECTOR=y ++CONFIG_STACKPROTECTOR_STRONG=y ++CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y ++# CONFIG_SHADOW_CALL_STACK is not set ++CONFIG_ARCH_SUPPORTS_LTO_CLANG=y ++CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y ++CONFIG_HAS_LTO_CLANG=y ++CONFIG_LTO_NONE=y ++# CONFIG_LTO_CLANG_FULL is not set ++# CONFIG_LTO_CLANG_THIN is not set ++CONFIG_ARCH_SUPPORTS_CFI_CLANG=y ++CONFIG_HAVE_CONTEXT_TRACKING_USER=y ++CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y ++CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y ++CONFIG_HAVE_MOVE_PUD=y ++CONFIG_HAVE_MOVE_PMD=y ++CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y ++CONFIG_HAVE_ARCH_HUGE_VMAP=y ++CONFIG_HAVE_ARCH_HUGE_VMALLOC=y ++CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y ++CONFIG_HAVE_MOD_ARCH_SPECIFIC=y ++CONFIG_MODULES_USE_ELF_RELA=y ++CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y ++CONFIG_SOFTIRQ_ON_OWN_STACK=y ++CONFIG_ARCH_HAS_ELF_RANDOMIZE=y ++CONFIG_HAVE_ARCH_MMAP_RND_BITS=y ++CONFIG_ARCH_MMAP_RND_BITS=18 ++CONFIG_PAGE_SIZE_LESS_THAN_64KB=y ++CONFIG_PAGE_SIZE_LESS_THAN_256KB=y ++CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y ++CONFIG_CLONE_BACKWARDS=y ++CONFIG_COMPAT_32BIT_TIME=y ++CONFIG_HAVE_ARCH_VMAP_STACK=y ++CONFIG_VMAP_STACK=y ++CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y ++CONFIG_RANDOMIZE_KSTACK_OFFSET=y ++# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set ++CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y ++CONFIG_STRICT_KERNEL_RWX=y ++CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y ++CONFIG_STRICT_MODULE_RWX=y ++CONFIG_HAVE_ARCH_COMPILER_H=y ++CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y ++CONFIG_ARCH_HAS_RELR=y ++CONFIG_RELR=y ++CONFIG_HAVE_PREEMPT_DYNAMIC=y ++CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y ++CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y ++CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y ++CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y ++CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y ++ ++# ++# GCOV-based kernel profiling ++# ++CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y ++# end of GCOV-based kernel profiling ++ ++CONFIG_HAVE_GCC_PLUGINS=y ++CONFIG_FUNCTION_ALIGNMENT_4B=y ++CONFIG_FUNCTION_ALIGNMENT=4 ++# end of General architecture-dependent options ++ ++CONFIG_RT_MUTEXES=y ++CONFIG_BASE_SMALL=0 ++CONFIG_MODULES=y ++CONFIG_MODULE_FORCE_LOAD=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set ++# CONFIG_MODVERSIONS is not set ++# CONFIG_MODULE_SRCVERSION_ALL is not set ++# CONFIG_MODULE_SIG is not set ++CONFIG_MODULE_COMPRESS_NONE=y ++# CONFIG_MODULE_COMPRESS_GZIP is not set ++# CONFIG_MODULE_COMPRESS_XZ is not set ++# CONFIG_MODULE_COMPRESS_ZSTD is not set ++# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set ++CONFIG_MODPROBE_PATH="/sbin/modprobe" ++# CONFIG_TRIM_UNUSED_KSYMS is not set ++CONFIG_BLOCK=y ++CONFIG_BLOCK_LEGACY_AUTOLOAD=y ++CONFIG_BLK_CGROUP_PUNT_BIO=y ++CONFIG_BLK_DEV_BSG_COMMON=y ++# CONFIG_BLK_DEV_BSGLIB is not set ++# CONFIG_BLK_DEV_INTEGRITY is not set ++# CONFIG_BLK_DEV_ZONED is not set ++# CONFIG_BLK_WBT is not set ++# CONFIG_BLK_SED_OPAL is not set ++# CONFIG_BLK_INLINE_ENCRYPTION is not set ++ ++# ++# Partition Types ++# ++CONFIG_PARTITION_ADVANCED=y ++# CONFIG_ACORN_PARTITION is not set ++# CONFIG_AIX_PARTITION is not set ++# CONFIG_OSF_PARTITION is not set ++# CONFIG_AMIGA_PARTITION is not set ++# CONFIG_ATARI_PARTITION is not set ++# CONFIG_MAC_PARTITION is not set ++CONFIG_MSDOS_PARTITION=y ++# CONFIG_BSD_DISKLABEL is not set ++# CONFIG_MINIX_SUBPARTITION is not set ++# CONFIG_SOLARIS_X86_PARTITION is not set ++# CONFIG_UNIXWARE_DISKLABEL is not set ++# CONFIG_LDM_PARTITION is not set ++# CONFIG_SGI_PARTITION is not set ++# CONFIG_ULTRIX_PARTITION is not set ++# CONFIG_SUN_PARTITION is not set ++# CONFIG_KARMA_PARTITION is not set ++CONFIG_EFI_PARTITION=y ++# CONFIG_SYSV68_PARTITION is not set ++CONFIG_CMDLINE_PARTITION=y ++# end of Partition Types ++ ++CONFIG_BLK_PM=y ++ ++# ++# IO Schedulers ++# ++CONFIG_MQ_IOSCHED_DEADLINE=y ++CONFIG_MQ_IOSCHED_KYBER=y ++# CONFIG_IOSCHED_BFQ is not set ++# end of IO Schedulers ++ ++CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y ++CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y ++CONFIG_ARCH_INLINE_SPIN_LOCK=y ++CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y ++CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y ++CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y ++CONFIG_ARCH_INLINE_SPIN_UNLOCK=y ++CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y ++CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y ++CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y ++CONFIG_ARCH_INLINE_READ_LOCK=y ++CONFIG_ARCH_INLINE_READ_LOCK_BH=y ++CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y ++CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y ++CONFIG_ARCH_INLINE_READ_UNLOCK=y ++CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y ++CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y ++CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y ++CONFIG_ARCH_INLINE_WRITE_LOCK=y ++CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y ++CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y ++CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y ++CONFIG_ARCH_INLINE_WRITE_UNLOCK=y ++CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y ++CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y ++CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y ++CONFIG_INLINE_SPIN_TRYLOCK=y ++CONFIG_INLINE_SPIN_TRYLOCK_BH=y ++CONFIG_INLINE_SPIN_LOCK=y ++CONFIG_INLINE_SPIN_LOCK_BH=y ++CONFIG_INLINE_SPIN_LOCK_IRQ=y ++CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y ++CONFIG_INLINE_SPIN_UNLOCK_BH=y ++CONFIG_INLINE_SPIN_UNLOCK_IRQ=y ++CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y ++CONFIG_INLINE_READ_LOCK=y ++CONFIG_INLINE_READ_LOCK_BH=y ++CONFIG_INLINE_READ_LOCK_IRQ=y ++CONFIG_INLINE_READ_LOCK_IRQSAVE=y ++CONFIG_INLINE_READ_UNLOCK=y ++CONFIG_INLINE_READ_UNLOCK_BH=y ++CONFIG_INLINE_READ_UNLOCK_IRQ=y ++CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y ++CONFIG_INLINE_WRITE_LOCK=y ++CONFIG_INLINE_WRITE_LOCK_BH=y ++CONFIG_INLINE_WRITE_LOCK_IRQ=y ++CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y ++CONFIG_INLINE_WRITE_UNLOCK=y ++CONFIG_INLINE_WRITE_UNLOCK_BH=y ++CONFIG_INLINE_WRITE_UNLOCK_IRQ=y ++CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y ++CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y ++CONFIG_MUTEX_SPIN_ON_OWNER=y ++CONFIG_RWSEM_SPIN_ON_OWNER=y ++CONFIG_LOCK_SPIN_ON_OWNER=y ++CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y ++CONFIG_QUEUED_SPINLOCKS=y ++CONFIG_ARCH_USE_QUEUED_RWLOCKS=y ++CONFIG_QUEUED_RWLOCKS=y ++CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y ++CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y ++ ++# ++# Executable file formats ++# ++CONFIG_BINFMT_ELF=y ++CONFIG_ARCH_BINFMT_ELF_STATE=y ++CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y ++CONFIG_ARCH_HAVE_ELF_PROT=y ++CONFIG_ARCH_USE_GNU_PROPERTY=y ++CONFIG_ELFCORE=y ++CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y ++CONFIG_BINFMT_SCRIPT=y ++# CONFIG_BINFMT_MISC is not set ++CONFIG_COREDUMP=y ++# end of Executable file formats ++ ++# ++# Memory Management options ++# ++# CONFIG_SWAP is not set ++ ++# ++# SLAB allocator options ++# ++# CONFIG_SLAB_DEPRECATED is not set ++CONFIG_SLUB=y ++# CONFIG_SLUB_TINY is not set ++CONFIG_SLAB_MERGE_DEFAULT=y ++# CONFIG_SLAB_FREELIST_RANDOM is not set ++# CONFIG_SLAB_FREELIST_HARDENED is not set ++# CONFIG_SLUB_STATS is not set ++CONFIG_SLUB_CPU_PARTIAL=y ++# CONFIG_RANDOM_KMALLOC_CACHES is not set ++# end of SLAB allocator options ++ ++# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set ++# CONFIG_COMPAT_BRK is not set ++CONFIG_SPARSEMEM=y ++CONFIG_SPARSEMEM_EXTREME=y ++CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y ++CONFIG_SPARSEMEM_VMEMMAP=y ++CONFIG_HAVE_FAST_GUP=y ++CONFIG_ARCH_KEEP_MEMBLOCK=y ++CONFIG_MEMORY_ISOLATION=y ++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y ++CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y ++# CONFIG_MEMORY_HOTPLUG is not set ++CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y ++CONFIG_SPLIT_PTLOCK_CPUS=4 ++CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y ++CONFIG_COMPACTION=y ++CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 ++# CONFIG_PAGE_REPORTING is not set ++CONFIG_MIGRATION=y ++CONFIG_CONTIG_ALLOC=y ++CONFIG_PCP_BATCH_SCALE_MAX=5 ++CONFIG_PHYS_ADDR_T_64BIT=y ++CONFIG_KSM=y ++CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 ++CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y ++# CONFIG_MEMORY_FAILURE is not set ++CONFIG_ARCH_WANTS_THP_SWAP=y ++# CONFIG_TRANSPARENT_HUGEPAGE is not set ++CONFIG_CMA=y ++# CONFIG_CMA_DEBUG is not set ++# CONFIG_CMA_SYSFS is not set ++CONFIG_CMA_AREAS=7 ++CONFIG_GENERIC_EARLY_IOREMAP=y ++# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set ++# CONFIG_IDLE_PAGE_TRACKING is not set ++CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y ++CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y ++CONFIG_ARCH_HAS_PTE_DEVMAP=y ++CONFIG_ARCH_HAS_ZONE_DMA_SET=y ++CONFIG_ZONE_DMA=y ++CONFIG_ZONE_DMA32=y ++CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y ++CONFIG_ARCH_USES_PG_ARCH_X=y ++CONFIG_VM_EVENT_COUNTERS=y ++# CONFIG_PERCPU_STATS is not set ++ ++# ++# GUP_TEST needs to have DEBUG_FS enabled ++# ++# CONFIG_DMAPOOL_TEST is not set ++CONFIG_ARCH_HAS_PTE_SPECIAL=y ++CONFIG_MEMFD_CREATE=y ++CONFIG_SECRETMEM=y ++# CONFIG_ANON_VMA_NAME is not set ++CONFIG_USERFAULTFD=y ++CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y ++# CONFIG_LRU_GEN is not set ++CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y ++CONFIG_PER_VMA_LOCK=y ++CONFIG_LOCK_MM_AND_FIND_VMA=y ++ ++# ++# Data Access Monitoring ++# ++# CONFIG_DAMON is not set ++# end of Data Access Monitoring ++# end of Memory Management options ++ ++CONFIG_NET=y ++CONFIG_NET_INGRESS=y ++CONFIG_NET_EGRESS=y ++CONFIG_NET_XGRESS=y ++ ++# ++# Networking options ++# ++CONFIG_PACKET=y ++# CONFIG_PACKET_DIAG is not set ++CONFIG_UNIX=y ++CONFIG_UNIX_SCM=y ++CONFIG_AF_UNIX_OOB=y ++# CONFIG_UNIX_DIAG is not set ++# CONFIG_TLS is not set ++# CONFIG_XFRM_USER is not set ++# CONFIG_NET_KEY is not set ++# CONFIG_XDP_SOCKETS is not set ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++# CONFIG_IP_ADVANCED_ROUTER is not set ++# CONFIG_IP_PNP is not set ++# CONFIG_NET_IPIP is not set ++# CONFIG_NET_IPGRE_DEMUX is not set ++CONFIG_NET_IP_TUNNEL=m ++# CONFIG_IP_MROUTE is not set ++# CONFIG_SYN_COOKIES is not set ++# CONFIG_NET_IPVTI is not set ++# CONFIG_NET_FOU is not set ++# CONFIG_NET_FOU_IP_TUNNELS is not set ++# CONFIG_INET_AH is not set ++# CONFIG_INET_ESP is not set ++# CONFIG_INET_IPCOMP is not set ++CONFIG_INET_TABLE_PERTURB_ORDER=16 ++CONFIG_INET_TUNNEL=m ++CONFIG_INET_DIAG=y ++CONFIG_INET_TCP_DIAG=y ++# CONFIG_INET_UDP_DIAG is not set ++# CONFIG_INET_RAW_DIAG is not set ++# CONFIG_INET_DIAG_DESTROY is not set ++# CONFIG_TCP_CONG_ADVANCED is not set ++CONFIG_TCP_CONG_CUBIC=y ++CONFIG_DEFAULT_TCP_CONG="cubic" ++# CONFIG_TCP_MD5SIG is not set ++CONFIG_IPV6=y ++CONFIG_IPV6_ROUTER_PREF=y ++# CONFIG_IPV6_ROUTE_INFO is not set ++# CONFIG_IPV6_OPTIMISTIC_DAD is not set ++# CONFIG_INET6_AH is not set ++# CONFIG_INET6_ESP is not set ++# CONFIG_INET6_IPCOMP is not set ++# CONFIG_IPV6_MIP6 is not set ++# CONFIG_IPV6_ILA is not set ++# CONFIG_IPV6_VTI is not set ++CONFIG_IPV6_SIT=m ++# CONFIG_IPV6_SIT_6RD is not set ++CONFIG_IPV6_NDISC_NODETYPE=y ++# CONFIG_IPV6_TUNNEL is not set ++# CONFIG_IPV6_MULTIPLE_TABLES is not set ++# CONFIG_IPV6_MROUTE is not set ++# CONFIG_IPV6_SEG6_LWTUNNEL is not set ++# CONFIG_IPV6_SEG6_HMAC is not set ++# CONFIG_IPV6_RPL_LWTUNNEL is not set ++# CONFIG_IPV6_IOAM6_LWTUNNEL is not set ++# CONFIG_MPTCP is not set ++# CONFIG_NETWORK_SECMARK is not set ++CONFIG_NET_PTP_CLASSIFY=y ++# CONFIG_NETWORK_PHY_TIMESTAMPING is not set ++CONFIG_NETFILTER=y ++CONFIG_NETFILTER_ADVANCED=y ++ ++# ++# Core Netfilter Configuration ++# ++CONFIG_NETFILTER_INGRESS=y ++# CONFIG_NETFILTER_EGRESS is not set ++CONFIG_NETFILTER_BPF_LINK=y ++# CONFIG_NETFILTER_NETLINK_ACCT is not set ++# CONFIG_NETFILTER_NETLINK_QUEUE is not set ++# CONFIG_NETFILTER_NETLINK_LOG is not set ++# CONFIG_NETFILTER_NETLINK_OSF is not set ++# CONFIG_NF_CONNTRACK is not set ++# CONFIG_NF_LOG_SYSLOG is not set ++# CONFIG_NF_TABLES is not set ++# CONFIG_NETFILTER_XTABLES is not set ++# end of Core Netfilter Configuration ++ ++# CONFIG_IP_SET is not set ++# CONFIG_IP_VS is not set ++ ++# ++# IP: Netfilter Configuration ++# ++# CONFIG_NF_SOCKET_IPV4 is not set ++# CONFIG_NF_TPROXY_IPV4 is not set ++# CONFIG_NF_DUP_IPV4 is not set ++# CONFIG_NF_LOG_ARP is not set ++# CONFIG_NF_LOG_IPV4 is not set ++# CONFIG_NF_REJECT_IPV4 is not set ++# CONFIG_IP_NF_IPTABLES is not set ++# CONFIG_IP_NF_ARPTABLES is not set ++# end of IP: Netfilter Configuration ++ ++# ++# IPv6: Netfilter Configuration ++# ++# CONFIG_NF_SOCKET_IPV6 is not set ++# CONFIG_NF_TPROXY_IPV6 is not set ++# CONFIG_NF_DUP_IPV6 is not set ++# CONFIG_NF_REJECT_IPV6 is not set ++# CONFIG_NF_LOG_IPV6 is not set ++# CONFIG_IP6_NF_IPTABLES is not set ++# end of IPv6: Netfilter Configuration ++ ++# CONFIG_BPFILTER is not set ++# CONFIG_IP_DCCP is not set ++# CONFIG_IP_SCTP is not set ++# CONFIG_RDS is not set ++# CONFIG_TIPC is not set ++# CONFIG_ATM is not set ++# CONFIG_L2TP is not set ++# CONFIG_BRIDGE is not set ++# CONFIG_NET_DSA is not set ++# CONFIG_VLAN_8021Q is not set ++# CONFIG_LLC2 is not set ++# CONFIG_ATALK is not set ++# CONFIG_X25 is not set ++# CONFIG_LAPB is not set ++# CONFIG_PHONET is not set ++# CONFIG_6LOWPAN is not set ++# CONFIG_IEEE802154 is not set ++# CONFIG_NET_SCHED is not set ++# CONFIG_DCB is not set ++# CONFIG_DNS_RESOLVER is not set ++# CONFIG_BATMAN_ADV is not set ++# CONFIG_OPENVSWITCH is not set ++# CONFIG_VSOCKETS is not set ++# CONFIG_NETLINK_DIAG is not set ++# CONFIG_MPLS is not set ++# CONFIG_NET_NSH is not set ++# CONFIG_HSR is not set ++# CONFIG_NET_SWITCHDEV is not set ++# CONFIG_NET_L3_MASTER_DEV is not set ++# CONFIG_QRTR is not set ++# CONFIG_NET_NCSI is not set ++CONFIG_PCPU_DEV_REFCNT=y ++CONFIG_MAX_SKB_FRAGS=17 ++CONFIG_RPS=y ++CONFIG_RFS_ACCEL=y ++CONFIG_SOCK_RX_QUEUE_MAPPING=y ++CONFIG_XPS=y ++CONFIG_NET_RX_BUSY_POLL=y ++CONFIG_BQL=y ++CONFIG_NET_FLOW_LIMIT=y ++ ++# ++# Network testing ++# ++# CONFIG_NET_PKTGEN is not set ++# end of Network testing ++# end of Networking options ++ ++# CONFIG_HAMRADIO is not set ++# CONFIG_CAN is not set ++# CONFIG_BT is not set ++# CONFIG_AF_RXRPC is not set ++# CONFIG_AF_KCM is not set ++# CONFIG_MCTP is not set ++CONFIG_WIRELESS=y ++# CONFIG_CFG80211 is not set ++ ++# ++# CFG80211 needs to be enabled for MAC80211 ++# ++CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 ++# CONFIG_RFKILL is not set ++# CONFIG_NET_9P is not set ++# CONFIG_CAIF is not set ++# CONFIG_CEPH_LIB is not set ++# CONFIG_NFC is not set ++# CONFIG_PSAMPLE is not set ++# CONFIG_NET_IFE is not set ++# CONFIG_LWTUNNEL is not set ++CONFIG_DST_CACHE=y ++CONFIG_GRO_CELLS=y ++CONFIG_NET_SELFTESTS=y ++CONFIG_NET_SOCK_MSG=y ++CONFIG_PAGE_POOL=y ++# CONFIG_PAGE_POOL_STATS is not set ++# CONFIG_FAILOVER is not set ++# CONFIG_ETHTOOL_NETLINK is not set ++ ++# ++# Device Drivers ++# ++CONFIG_ARM_AMBA=y ++CONFIG_HAVE_PCI=y ++# CONFIG_PCI is not set ++# CONFIG_PCCARD is not set ++ ++# ++# Generic Driver Options ++# ++CONFIG_UEVENT_HELPER=y ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_DEVTMPFS=y ++CONFIG_DEVTMPFS_MOUNT=y ++# CONFIG_DEVTMPFS_SAFE is not set ++CONFIG_STANDALONE=y ++# CONFIG_PREVENT_FIRMWARE_BUILD is not set ++ ++# ++# Firmware loader ++# ++CONFIG_FW_LOADER=y ++CONFIG_EXTRA_FIRMWARE="" ++# CONFIG_FW_LOADER_USER_HELPER is not set ++# CONFIG_FW_LOADER_COMPRESS is not set ++# CONFIG_FW_UPLOAD is not set ++# end of Firmware loader ++ ++CONFIG_ALLOW_DEV_COREDUMP=y ++# CONFIG_DEBUG_DRIVER is not set ++# CONFIG_DEBUG_DEVRES is not set ++# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set ++# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set ++CONFIG_GENERIC_CPU_AUTOPROBE=y ++CONFIG_GENERIC_CPU_VULNERABILITIES=y ++CONFIG_SOC_BUS=y ++CONFIG_REGMAP=y ++CONFIG_REGMAP_MMIO=y ++CONFIG_GENERIC_ARCH_TOPOLOGY=y ++# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set ++# end of Generic Driver Options ++ ++# ++# Bus devices ++# ++# CONFIG_BRCMSTB_GISB_ARB is not set ++# CONFIG_MOXTET is not set ++# CONFIG_VEXPRESS_CONFIG is not set ++# CONFIG_MHI_BUS is not set ++# CONFIG_MHI_BUS_EP is not set ++# end of Bus devices ++ ++# ++# Cache Drivers ++# ++# end of Cache Drivers ++ ++# CONFIG_CONNECTOR is not set ++ ++# ++# Firmware Drivers ++# ++ ++# ++# ARM System Control and Management Interface Protocol ++# ++# CONFIG_ARM_SCMI_PROTOCOL is not set ++# end of ARM System Control and Management Interface Protocol ++ ++# CONFIG_FIRMWARE_MEMMAP is not set ++# CONFIG_ARM_FFA_TRANSPORT is not set ++# CONFIG_GOOGLE_FIRMWARE is not set ++CONFIG_ARM_PSCI_FW=y ++CONFIG_HAVE_ARM_SMCCC=y ++CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y ++CONFIG_ARM_SMCCC_SOC_ID=y ++ ++# ++# Tegra firmware driver ++# ++# end of Tegra firmware driver ++# end of Firmware Drivers ++ ++# CONFIG_GNSS is not set ++CONFIG_MTD=y ++# CONFIG_MTD_TESTS is not set ++ ++# ++# Partition parsers ++# ++# CONFIG_MTD_AR7_PARTS is not set ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_OF_PARTS=y ++# CONFIG_MTD_AFS_PARTS is not set ++# CONFIG_MTD_REDBOOT_PARTS is not set ++# end of Partition parsers ++ ++# ++# User Modules And Translation Layers ++# ++CONFIG_MTD_BLKDEVS=y ++CONFIG_MTD_BLOCK=y ++ ++# ++# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. ++# ++# CONFIG_FTL is not set ++# CONFIG_NFTL is not set ++# CONFIG_INFTL is not set ++# CONFIG_RFD_FTL is not set ++# CONFIG_SSFDC is not set ++# CONFIG_SM_FTL is not set ++# CONFIG_MTD_OOPS is not set ++# CONFIG_MTD_PARTITIONED_MASTER is not set ++ ++# ++# RAM/ROM/Flash chip drivers ++# ++# CONFIG_MTD_CFI is not set ++# CONFIG_MTD_JEDECPROBE is not set ++CONFIG_MTD_MAP_BANK_WIDTH_1=y ++CONFIG_MTD_MAP_BANK_WIDTH_2=y ++CONFIG_MTD_MAP_BANK_WIDTH_4=y ++CONFIG_MTD_CFI_I1=y ++CONFIG_MTD_CFI_I2=y ++# CONFIG_MTD_RAM is not set ++# CONFIG_MTD_ROM is not set ++# CONFIG_MTD_ABSENT is not set ++# end of RAM/ROM/Flash chip drivers ++ ++# ++# Mapping drivers for chip access ++# ++# CONFIG_MTD_COMPLEX_MAPPINGS is not set ++# CONFIG_MTD_PLATRAM is not set ++# end of Mapping drivers for chip access ++ ++# ++# Self-contained MTD device drivers ++# ++# CONFIG_MTD_DATAFLASH is not set ++# CONFIG_MTD_MCHP23K256 is not set ++# CONFIG_MTD_MCHP48L640 is not set ++# CONFIG_MTD_SST25L is not set ++# CONFIG_MTD_SLRAM is not set ++# CONFIG_MTD_PHRAM is not set ++# CONFIG_MTD_MTDRAM is not set ++CONFIG_MTD_BLOCK2MTD=y ++ ++# ++# Disk-On-Chip Device Drivers ++# ++# CONFIG_MTD_DOCG3 is not set ++# end of Self-contained MTD device drivers ++ ++# ++# NAND ++# ++CONFIG_MTD_NAND_CORE=y ++# CONFIG_MTD_ONENAND is not set ++CONFIG_MTD_SPI_NAND_BSP=y ++# CONFIG_BSP_NAND_ECC_STATUS_REPORT is not set ++# CONFIG_BSP_NAND_FS_MAY_NO_YAFFS2 is not set ++# CONFIG_MTD_SPI_NAND_FMC100 is not set ++CONFIG_MTD_RAW_NAND=y ++ ++# ++# Raw/parallel NAND flash controllers ++# ++# CONFIG_MTD_NAND_DENALI_DT is not set ++# CONFIG_MTD_NAND_BRCMNAND is not set ++# CONFIG_MTD_NAND_MXIC is not set ++# CONFIG_MTD_NAND_GPIO is not set ++# CONFIG_MTD_NAND_PLATFORM is not set ++# CONFIG_MTD_NAND_CADENCE is not set ++# CONFIG_MTD_NAND_ARASAN is not set ++# CONFIG_MTD_NAND_INTEL_LGM is not set ++ ++# ++# Misc ++# ++# CONFIG_MTD_NAND_NANDSIM is not set ++# CONFIG_MTD_NAND_DISKONCHIP is not set ++# CONFIG_MTD_SPI_NAND is not set ++ ++# ++# ECC engine support ++# ++CONFIG_MTD_NAND_ECC=y ++CONFIG_MTD_NAND_ECC_SW_HAMMING=y ++# CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC is not set ++# CONFIG_MTD_NAND_ECC_SW_BCH is not set ++# CONFIG_MTD_NAND_ECC_MXIC is not set ++# end of ECC engine support ++# end of NAND ++ ++# ++# LPDDR & LPDDR2 PCM memory drivers ++# ++# CONFIG_MTD_LPDDR is not set ++# end of LPDDR & LPDDR2 PCM memory drivers ++ ++CONFIG_MTD_SPI_NOR=y ++CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y ++# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set ++CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y ++# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set ++CONFIG_SPI_BSP_SFC=y ++# CONFIG_MTD_SPI_IDS is not set ++CONFIG_CLOSE_SPI_8PIN_4IO=y ++CONFIG_BSP_SPI_BLOCK_PROTECT=y ++CONFIG_MTD_UBI=y ++CONFIG_MTD_UBI_WL_THRESHOLD=4096 ++CONFIG_MTD_UBI_BEB_LIMIT=20 ++# CONFIG_MTD_UBI_FASTMAP is not set ++# CONFIG_MTD_UBI_GLUEBI is not set ++# CONFIG_MTD_UBI_BLOCK is not set ++# CONFIG_MTD_HYPERBUS is not set ++CONFIG_DTC=y ++CONFIG_OF=y ++# CONFIG_OF_UNITTEST is not set ++CONFIG_OF_FLATTREE=y ++CONFIG_OF_EARLY_FLATTREE=y ++CONFIG_OF_KOBJ=y ++CONFIG_OF_ADDRESS=y ++CONFIG_OF_IRQ=y ++CONFIG_OF_RESERVED_MEM=y ++# CONFIG_OF_OVERLAY is not set ++# CONFIG_PARPORT is not set ++# CONFIG_BLK_DEV is not set ++ ++# ++# NVME Support ++# ++# CONFIG_NVME_FC is not set ++# CONFIG_NVME_TCP is not set ++# CONFIG_NVME_TARGET is not set ++# end of NVME Support ++ ++# ++# Misc devices ++# ++# CONFIG_AD525X_DPOT is not set ++# CONFIG_DUMMY_IRQ is not set ++# CONFIG_ICS932S401 is not set ++# CONFIG_ENCLOSURE_SERVICES is not set ++# CONFIG_APDS9802ALS is not set ++# CONFIG_ISL29003 is not set ++# CONFIG_ISL29020 is not set ++# CONFIG_SENSORS_TSL2550 is not set ++# CONFIG_SENSORS_BH1770 is not set ++# CONFIG_SENSORS_APDS990X is not set ++# CONFIG_HMC6352 is not set ++# CONFIG_DS1682 is not set ++# CONFIG_LATTICE_ECP3_CONFIG is not set ++# CONFIG_SRAM is not set ++# CONFIG_XILINX_SDFEC is not set ++# CONFIG_OPEN_DICE is not set ++# CONFIG_VCPU_STALL_DETECTOR is not set ++# CONFIG_C2PORT is not set ++ ++# ++# EEPROM support ++# ++# CONFIG_EEPROM_AT24 is not set ++# CONFIG_EEPROM_AT25 is not set ++# CONFIG_EEPROM_LEGACY is not set ++# CONFIG_EEPROM_MAX6875 is not set ++# CONFIG_EEPROM_93CX6 is not set ++# CONFIG_EEPROM_93XX46 is not set ++# CONFIG_EEPROM_IDT_89HPESX is not set ++# CONFIG_EEPROM_EE1004 is not set ++# end of EEPROM support ++ ++# ++# Texas Instruments shared transport line discipline ++# ++# CONFIG_TI_ST is not set ++# end of Texas Instruments shared transport line discipline ++ ++# CONFIG_SENSORS_LIS3_SPI is not set ++# CONFIG_SENSORS_LIS3_I2C is not set ++# CONFIG_ALTERA_STAPL is not set ++# CONFIG_ECHO is not set ++# CONFIG_PVPANIC is not set ++# end of Misc devices ++ ++# ++# SCSI device support ++# ++CONFIG_SCSI_MOD=y ++# CONFIG_RAID_ATTRS is not set ++CONFIG_SCSI_COMMON=y ++CONFIG_SCSI=y ++CONFIG_SCSI_DMA=y ++CONFIG_SCSI_PROC_FS=y ++ ++# ++# SCSI support type (disk, tape, CD-ROM) ++# ++CONFIG_BLK_DEV_SD=y ++# CONFIG_CHR_DEV_ST is not set ++# CONFIG_CHR_DEV_SG is not set ++CONFIG_BLK_DEV_BSG=y ++# CONFIG_CHR_DEV_SCH is not set ++# CONFIG_SCSI_CONSTANTS is not set ++# CONFIG_SCSI_LOGGING is not set ++# CONFIG_SCSI_SCAN_ASYNC is not set ++ ++# ++# SCSI Transports ++# ++# CONFIG_SCSI_SPI_ATTRS is not set ++# CONFIG_SCSI_FC_ATTRS is not set ++# CONFIG_SCSI_ISCSI_ATTRS is not set ++# CONFIG_SCSI_SAS_ATTRS is not set ++# CONFIG_SCSI_SAS_LIBSAS is not set ++# CONFIG_SCSI_SRP_ATTRS is not set ++# end of SCSI Transports ++ ++CONFIG_SCSI_LOWLEVEL=y ++# CONFIG_ISCSI_TCP is not set ++# CONFIG_ISCSI_BOOT_SYSFS is not set ++# CONFIG_SCSI_DEBUG is not set ++# CONFIG_SCSI_DH is not set ++# end of SCSI device support ++ ++# CONFIG_ATA is not set ++# CONFIG_MD is not set ++# CONFIG_TARGET_CORE is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_CORE=y ++# CONFIG_BONDING is not set ++# CONFIG_DUMMY is not set ++# CONFIG_WIREGUARD is not set ++# CONFIG_EQUALIZER is not set ++# CONFIG_NET_TEAM is not set ++# CONFIG_MACVLAN is not set ++# CONFIG_IPVLAN is not set ++# CONFIG_VXLAN is not set ++# CONFIG_GENEVE is not set ++# CONFIG_BAREUDP is not set ++# CONFIG_GTP is not set ++# CONFIG_AMT is not set ++# CONFIG_MACSEC is not set ++# CONFIG_NETCONSOLE is not set ++# CONFIG_TUN is not set ++# CONFIG_TUN_VNET_CROSS_LE is not set ++# CONFIG_VETH is not set ++# CONFIG_NLMON is not set ++CONFIG_ETHERNET=y ++# CONFIG_NET_VENDOR_ALACRITECH is not set ++# CONFIG_ALTERA_TSE is not set ++# CONFIG_NET_VENDOR_AMAZON is not set ++# CONFIG_NET_VENDOR_AMD is not set ++# CONFIG_NET_VENDOR_AQUANTIA is not set ++# CONFIG_NET_VENDOR_ARC is not set ++# CONFIG_NET_VENDOR_ASIX is not set ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CADENCE is not set ++# CONFIG_NET_VENDOR_CAVIUM is not set ++# CONFIG_NET_VENDOR_CORTINA is not set ++# CONFIG_NET_VENDOR_DAVICOM is not set ++# CONFIG_DNET is not set ++# CONFIG_NET_VENDOR_ENGLEDER is not set ++# CONFIG_NET_VENDOR_EZCHIP is not set ++# CONFIG_NET_VENDOR_FUNGIBLE is not set ++# CONFIG_NET_VENDOR_GOOGLE is not set ++# CONFIG_NET_VENDOR_HISILICON is not set ++# CONFIG_NET_VENDOR_HUAWEI is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++CONFIG_NET_VENDOR_BSP=y ++CONFIG_ETH_GMAC=y ++# CONFIG_GMAC is not set ++# CONFIG_GMAC_HAS_INTERNAL_PHY is not set ++CONFIG_RX_FLOW_CTRL_SUPPORT=y ++CONFIG_TX_FLOW_CTRL_SUPPORT=y ++CONFIG_TX_FLOW_CTRL_PAUSE_TIME=0xFFFF ++CONFIG_TX_FLOW_CTRL_PAUSE_INTERVAL=0xFFFF ++CONFIG_TX_FLOW_CTRL_ACTIVE_THRESHOLD=16 ++CONFIG_TX_FLOW_CTRL_DEACTIVE_THRESHOLD=32 ++# CONFIG_NET_VENDOR_ADI is not set ++# CONFIG_NET_VENDOR_LITEX is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MELLANOX is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_MICROSEMI is not set ++# CONFIG_NET_VENDOR_MICROSOFT is not set ++# CONFIG_NET_VENDOR_NI is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_NETRONOME is not set ++# CONFIG_ETHOC is not set ++# CONFIG_NET_VENDOR_PENSANDO is not set ++# CONFIG_NET_VENDOR_QUALCOMM is not set ++# CONFIG_NET_VENDOR_RENESAS is not set ++# CONFIG_NET_VENDOR_ROCKER is not set ++# CONFIG_NET_VENDOR_SAMSUNG is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SOLARFLARE is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_SOCIONEXT is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_NET_VENDOR_SYNOPSYS is not set ++# CONFIG_NET_VENDOR_VERTEXCOM is not set ++# CONFIG_NET_VENDOR_VIA is not set ++# CONFIG_NET_VENDOR_WANGXUN is not set ++# CONFIG_NET_VENDOR_WIZNET is not set ++# CONFIG_NET_VENDOR_XILINX is not set ++CONFIG_PHYLIB=y ++CONFIG_SWPHY=y ++CONFIG_FIXED_PHY=y ++ ++# ++# MII PHY device drivers ++# ++# CONFIG_AMD_PHY is not set ++# CONFIG_ADIN_PHY is not set ++# CONFIG_ADIN1100_PHY is not set ++# CONFIG_AQUANTIA_PHY is not set ++# CONFIG_AX88796B_PHY is not set ++# CONFIG_BROADCOM_PHY is not set ++# CONFIG_BCM54140_PHY is not set ++# CONFIG_BCM7XXX_PHY is not set ++# CONFIG_BCM84881_PHY is not set ++# CONFIG_BCM87XX_PHY is not set ++# CONFIG_CICADA_PHY is not set ++# CONFIG_CORTINA_PHY is not set ++# CONFIG_DAVICOM_PHY is not set ++# CONFIG_ICPLUS_PHY is not set ++# CONFIG_LXT_PHY is not set ++# CONFIG_INTEL_XWAY_PHY is not set ++# CONFIG_LSI_ET1011C_PHY is not set ++# CONFIG_MARVELL_PHY is not set ++# CONFIG_MARVELL_10G_PHY is not set ++# CONFIG_MARVELL_88Q2XXX_PHY is not set ++# CONFIG_MARVELL_88X2222_PHY is not set ++# CONFIG_MAXLINEAR_GPHY is not set ++# CONFIG_MEDIATEK_GE_PHY is not set ++# CONFIG_MICREL_PHY is not set ++# CONFIG_MICROCHIP_T1S_PHY is not set ++# CONFIG_MICROCHIP_PHY is not set ++# CONFIG_MICROCHIP_T1_PHY is not set ++# CONFIG_MICROSEMI_PHY is not set ++# CONFIG_MOTORCOMM_PHY is not set ++# CONFIG_NATIONAL_PHY is not set ++# CONFIG_NXP_CBTX_PHY is not set ++# CONFIG_NXP_C45_TJA11XX_PHY is not set ++# CONFIG_NCN26000_PHY is not set ++# CONFIG_QSEMI_PHY is not set ++# CONFIG_REALTEK_PHY is not set ++# CONFIG_RENESAS_PHY is not set ++# CONFIG_ROCKCHIP_PHY is not set ++# CONFIG_SMSC_PHY is not set ++# CONFIG_STE10XP is not set ++# CONFIG_TERANETICS_PHY is not set ++# CONFIG_DP83822_PHY is not set ++# CONFIG_DP83TC811_PHY is not set ++# CONFIG_DP83848_PHY is not set ++# CONFIG_DP83867_PHY is not set ++# CONFIG_DP83869_PHY is not set ++# CONFIG_DP83TD510_PHY is not set ++# CONFIG_VITESSE_PHY is not set ++# CONFIG_XILINX_GMII2RGMII is not set ++CONFIG_MDIO_BSP_GEMAC=y ++# CONFIG_MICREL_KS8995MA is not set ++# CONFIG_PSE_CONTROLLER is not set ++CONFIG_MDIO_DEVICE=y ++CONFIG_MDIO_BUS=y ++CONFIG_FWNODE_MDIO=y ++CONFIG_OF_MDIO=y ++CONFIG_MDIO_DEVRES=y ++# CONFIG_MDIO_BITBANG is not set ++# CONFIG_MDIO_BCM_UNIMAC is not set ++# CONFIG_MDIO_HISI_FEMAC is not set ++# CONFIG_MDIO_MSCC_MIIM is not set ++# CONFIG_MDIO_OCTEON is not set ++# CONFIG_MDIO_IPQ4019 is not set ++# CONFIG_MDIO_IPQ8064 is not set ++ ++# ++# MDIO Multiplexers ++# ++# CONFIG_MDIO_BUS_MUX_GPIO is not set ++# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set ++# CONFIG_MDIO_BUS_MUX_MMIOREG is not set ++ ++# ++# PCS device drivers ++# ++# end of PCS device drivers ++ ++# CONFIG_PPP is not set ++# CONFIG_SLIP is not set ++ ++# ++# Host-side USB support is needed for USB Network Adapter support ++# ++CONFIG_WLAN=y ++CONFIG_WLAN_VENDOR_ADMTEK=y ++CONFIG_WLAN_VENDOR_ATH=y ++# CONFIG_ATH_DEBUG is not set ++CONFIG_WLAN_VENDOR_ATMEL=y ++CONFIG_WLAN_VENDOR_BROADCOM=y ++CONFIG_WLAN_VENDOR_CISCO=y ++CONFIG_WLAN_VENDOR_INTEL=y ++CONFIG_WLAN_VENDOR_INTERSIL=y ++# CONFIG_HOSTAP is not set ++CONFIG_WLAN_VENDOR_MARVELL=y ++CONFIG_WLAN_VENDOR_MEDIATEK=y ++CONFIG_WLAN_VENDOR_MICROCHIP=y ++CONFIG_WLAN_VENDOR_PURELIFI=y ++CONFIG_WLAN_VENDOR_RALINK=y ++CONFIG_WLAN_VENDOR_REALTEK=y ++CONFIG_WLAN_VENDOR_RSI=y ++CONFIG_WLAN_VENDOR_SILABS=y ++CONFIG_WLAN_VENDOR_ST=y ++CONFIG_WLAN_VENDOR_TI=y ++CONFIG_WLAN_VENDOR_ZYDAS=y ++CONFIG_WLAN_VENDOR_QUANTENNA=y ++# CONFIG_WAN is not set ++ ++# ++# Wireless WAN ++# ++# CONFIG_WWAN is not set ++# end of Wireless WAN ++ ++# CONFIG_NET_FAILOVER is not set ++# CONFIG_ISDN is not set ++ ++# ++# Input device support ++# ++CONFIG_INPUT=y ++# CONFIG_INPUT_FF_MEMLESS is not set ++# CONFIG_INPUT_SPARSEKMAP is not set ++# CONFIG_INPUT_MATRIXKMAP is not set ++ ++# ++# Userland interfaces ++# ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_JOYDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_EVBUG is not set ++ ++# ++# Input Device Drivers ++# ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_INPUT_JOYSTICK is not set ++# CONFIG_INPUT_TABLET is not set ++# CONFIG_INPUT_TOUCHSCREEN is not set ++# CONFIG_INPUT_MISC is not set ++# CONFIG_RMI4_CORE is not set ++ ++# ++# Hardware I/O ports ++# ++# CONFIG_SERIO is not set ++# CONFIG_GAMEPORT is not set ++# end of Hardware I/O ports ++# end of Input device support ++ ++# ++# Character devices ++# ++CONFIG_TTY=y ++CONFIG_VT=y ++CONFIG_CONSOLE_TRANSLATIONS=y ++CONFIG_VT_CONSOLE=y ++CONFIG_HW_CONSOLE=y ++# CONFIG_VT_HW_CONSOLE_BINDING is not set ++CONFIG_UNIX98_PTYS=y ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_LEGACY_TIOCSTI=y ++CONFIG_LDISC_AUTOLOAD=y ++ ++# ++# Serial drivers ++# ++CONFIG_SERIAL_EARLYCON=y ++# CONFIG_SERIAL_8250 is not set ++ ++# ++# Non-8250 serial port support ++# ++# CONFIG_SERIAL_AMBA_PL010 is not set ++CONFIG_SERIAL_AMBA_PL011=y ++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y ++# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set ++# CONFIG_SERIAL_MAX3100 is not set ++# CONFIG_SERIAL_MAX310X is not set ++# CONFIG_SERIAL_UARTLITE is not set ++CONFIG_SERIAL_CORE=y ++CONFIG_SERIAL_CORE_CONSOLE=y ++# CONFIG_SERIAL_SIFIVE is not set ++# CONFIG_SERIAL_SCCNXP is not set ++# CONFIG_SERIAL_SC16IS7XX is not set ++# CONFIG_SERIAL_ALTERA_JTAGUART is not set ++# CONFIG_SERIAL_ALTERA_UART is not set ++# CONFIG_SERIAL_XILINX_PS_UART is not set ++# CONFIG_SERIAL_ARC is not set ++# CONFIG_SERIAL_FSL_LPUART is not set ++# CONFIG_SERIAL_FSL_LINFLEXUART is not set ++# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set ++# CONFIG_SERIAL_SPRD is not set ++# end of Serial drivers ++ ++# CONFIG_SERIAL_NONSTANDARD is not set ++# CONFIG_N_GSM is not set ++# CONFIG_NULL_TTY is not set ++# CONFIG_HVC_DCC is not set ++# CONFIG_SERIAL_DEV_BUS is not set ++# CONFIG_TTY_PRINTK is not set ++# CONFIG_VIRTIO_CONSOLE is not set ++# CONFIG_IPMI_HANDLER is not set ++# CONFIG_HW_RANDOM is not set ++CONFIG_DEVMEM=y ++CONFIG_DEVPORT=y ++# CONFIG_TCG_TPM is not set ++# CONFIG_XILLYBUS is not set ++# end of Character devices ++ ++# ++# I2C support ++# ++CONFIG_I2C=y ++CONFIG_I2C_BOARDINFO=y ++# CONFIG_I2C_COMPAT is not set ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_MUX=y ++ ++# ++# Multiplexer I2C Chip support ++# ++# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set ++# CONFIG_I2C_MUX_GPIO is not set ++# CONFIG_I2C_MUX_GPMUX is not set ++# CONFIG_I2C_MUX_LTC4306 is not set ++# CONFIG_I2C_MUX_PCA9541 is not set ++# CONFIG_I2C_MUX_PCA954x is not set ++# CONFIG_I2C_MUX_PINCTRL is not set ++# CONFIG_I2C_MUX_REG is not set ++# CONFIG_I2C_DEMUX_PINCTRL is not set ++# CONFIG_I2C_MUX_MLXCPLD is not set ++# end of Multiplexer I2C Chip support ++ ++# CONFIG_I2C_HELPER_AUTO is not set ++# CONFIG_I2C_SMBUS is not set ++ ++# ++# I2C Algorithms ++# ++# CONFIG_I2C_ALGOBIT is not set ++# CONFIG_I2C_ALGOPCF is not set ++# CONFIG_I2C_ALGOPCA is not set ++# end of I2C Algorithms ++ ++# ++# I2C Hardware Bus support ++# ++ ++# ++# I2C system bus drivers (mostly embedded / system-on-chip) ++# ++# CONFIG_I2C_CADENCE is not set ++# CONFIG_I2C_CBUS_GPIO is not set ++# CONFIG_I2C_DESIGNWARE_PLATFORM is not set ++# CONFIG_I2C_EMEV2 is not set ++# CONFIG_I2C_GPIO is not set ++CONFIG_I2C_BSP=y ++# CONFIG_I2C_HISI is not set ++# CONFIG_I2C_NOMADIK is not set ++# CONFIG_I2C_OCORES is not set ++# CONFIG_I2C_PCA_PLATFORM is not set ++# CONFIG_I2C_RK3X is not set ++# CONFIG_I2C_SIMTEC is not set ++# CONFIG_I2C_XILINX is not set ++ ++# ++# External I2C/SMBus adapter drivers ++# ++# CONFIG_I2C_TAOS_EVM is not set ++ ++# ++# Other I2C/SMBus bus drivers ++# ++# CONFIG_I2C_VIRTIO is not set ++CONFIG_DMA_MSG_MIN_LEN=5 ++CONFIG_DMA_MSG_MAX_LEN=4090 ++# end of I2C Hardware Bus support ++ ++# CONFIG_I2C_STUB is not set ++# CONFIG_I2C_SLAVE is not set ++# CONFIG_I2C_DEBUG_CORE is not set ++# CONFIG_I2C_DEBUG_ALGO is not set ++# CONFIG_I2C_DEBUG_BUS is not set ++# end of I2C support ++ ++# CONFIG_I3C is not set ++CONFIG_SPI=y ++# CONFIG_SPI_DEBUG is not set ++CONFIG_SPI_MASTER=y ++CONFIG_SPI_MEM=y ++ ++# ++# SPI Master Controller Drivers ++# ++# CONFIG_SPI_ALTERA is not set ++# CONFIG_SPI_AXI_SPI_ENGINE is not set ++# CONFIG_SPI_BITBANG is not set ++# CONFIG_SPI_CADENCE is not set ++# CONFIG_SPI_CADENCE_QUADSPI is not set ++# CONFIG_SPI_CADENCE_XSPI is not set ++# CONFIG_SPI_DESIGNWARE is not set ++# CONFIG_SPI_GPIO is not set ++# CONFIG_SPI_FSL_SPI is not set ++# CONFIG_SPI_MICROCHIP_CORE is not set ++# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set ++# CONFIG_SPI_OC_TINY is not set ++CONFIG_SPI_PL022=y ++# CONFIG_SPI_SC18IS602 is not set ++# CONFIG_SPI_SIFIVE is not set ++# CONFIG_SPI_SN_F_OSPI is not set ++# CONFIG_SPI_MXIC is not set ++# CONFIG_SPI_XCOMM is not set ++# CONFIG_SPI_XILINX is not set ++# CONFIG_SPI_ZYNQMP_GQSPI is not set ++# CONFIG_SPI_AMD is not set ++ ++# ++# SPI Multiplexer support ++# ++# CONFIG_SPI_MUX is not set ++ ++# ++# SPI Protocol Masters ++# ++CONFIG_SPI_SPIDEV=y ++# CONFIG_SPI_LOOPBACK_TEST is not set ++# CONFIG_SPI_TLE62X0 is not set ++# CONFIG_SPI_SLAVE is not set ++# CONFIG_SPMI is not set ++# CONFIG_HSI is not set ++CONFIG_PPS=y ++# CONFIG_PPS_DEBUG is not set ++# CONFIG_NTP_PPS is not set ++ ++# ++# PPS clients support ++# ++# CONFIG_PPS_CLIENT_KTIMER is not set ++# CONFIG_PPS_CLIENT_LDISC is not set ++# CONFIG_PPS_CLIENT_GPIO is not set ++ ++# ++# PPS generators support ++# ++ ++# ++# PTP clock support ++# ++CONFIG_PTP_1588_CLOCK=y ++CONFIG_PTP_1588_CLOCK_OPTIONAL=y ++ ++# ++# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. ++# ++CONFIG_PTP_1588_CLOCK_KVM=y ++# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set ++# CONFIG_PTP_1588_CLOCK_IDTCM is not set ++# CONFIG_PTP_1588_CLOCK_MOCK is not set ++# end of PTP clock support ++ ++CONFIG_PINCTRL=y ++CONFIG_GENERIC_PINCTRL_GROUPS=y ++CONFIG_PINMUX=y ++CONFIG_GENERIC_PINMUX_FUNCTIONS=y ++CONFIG_PINCONF=y ++CONFIG_GENERIC_PINCONF=y ++# CONFIG_DEBUG_PINCTRL is not set ++# CONFIG_PINCTRL_CY8C95X0 is not set ++# CONFIG_PINCTRL_MCP23S08 is not set ++# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set ++# CONFIG_PINCTRL_OCELOT is not set ++CONFIG_PINCTRL_SINGLE=y ++# CONFIG_PINCTRL_STMFX is not set ++# CONFIG_PINCTRL_SX150X is not set ++ ++# ++# Renesas pinctrl drivers ++# ++# end of Renesas pinctrl drivers ++ ++CONFIG_GPIOLIB=y ++CONFIG_GPIOLIB_FASTPATH_LIMIT=512 ++CONFIG_OF_GPIO=y ++CONFIG_GPIOLIB_IRQCHIP=y ++# CONFIG_DEBUG_GPIO is not set ++CONFIG_GPIO_SYSFS=y ++CONFIG_GPIO_CDEV=y ++CONFIG_GPIO_CDEV_V1=y ++CONFIG_GPIO_GENERIC=y ++ ++# ++# Memory mapped GPIO drivers ++# ++# CONFIG_GPIO_74XX_MMIO is not set ++# CONFIG_GPIO_ALTERA is not set ++# CONFIG_GPIO_CADENCE is not set ++# CONFIG_GPIO_DWAPB is not set ++# CONFIG_GPIO_FTGPIO010 is not set ++CONFIG_GPIO_GENERIC_PLATFORM=y ++# CONFIG_GPIO_GRGPIO is not set ++# CONFIG_GPIO_HISI is not set ++# CONFIG_GPIO_HLWD is not set ++# CONFIG_GPIO_LOGICVC is not set ++# CONFIG_GPIO_MB86S7X is not set ++CONFIG_GPIO_PL061=y ++# CONFIG_GPIO_SIFIVE is not set ++# CONFIG_GPIO_SYSCON is not set ++# CONFIG_GPIO_XGENE is not set ++# CONFIG_GPIO_XILINX is not set ++# CONFIG_GPIO_AMD_FCH is not set ++# end of Memory mapped GPIO drivers ++ ++# ++# I2C GPIO expanders ++# ++# CONFIG_GPIO_ADNP is not set ++# CONFIG_GPIO_FXL6408 is not set ++# CONFIG_GPIO_DS4520 is not set ++# CONFIG_GPIO_GW_PLD is not set ++# CONFIG_GPIO_MAX7300 is not set ++# CONFIG_GPIO_MAX732X is not set ++# CONFIG_GPIO_PCA953X is not set ++# CONFIG_GPIO_PCA9570 is not set ++# CONFIG_GPIO_PCF857X is not set ++# CONFIG_GPIO_TPIC2810 is not set ++# end of I2C GPIO expanders ++ ++# ++# MFD GPIO expanders ++# ++# end of MFD GPIO expanders ++ ++# ++# SPI GPIO expanders ++# ++# CONFIG_GPIO_74X164 is not set ++# CONFIG_GPIO_MAX3191X is not set ++# CONFIG_GPIO_MAX7301 is not set ++# CONFIG_GPIO_MC33880 is not set ++# CONFIG_GPIO_PISOSR is not set ++# CONFIG_GPIO_XRA1403 is not set ++# end of SPI GPIO expanders ++ ++# ++# Virtual GPIO drivers ++# ++# CONFIG_GPIO_AGGREGATOR is not set ++# CONFIG_GPIO_LATCH is not set ++# CONFIG_GPIO_MOCKUP is not set ++# CONFIG_GPIO_SIM is not set ++# end of Virtual GPIO drivers ++ ++# CONFIG_W1 is not set ++CONFIG_POWER_RESET=y ++# CONFIG_POWER_RESET_BRCMSTB is not set ++# CONFIG_POWER_RESET_GPIO is not set ++# CONFIG_POWER_RESET_GPIO_RESTART is not set ++# CONFIG_POWER_RESET_LTC2952 is not set ++# CONFIG_POWER_RESET_RESTART is not set ++# CONFIG_POWER_RESET_XGENE is not set ++# CONFIG_POWER_RESET_SYSCON is not set ++# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set ++# CONFIG_SYSCON_REBOOT_MODE is not set ++# CONFIG_NVMEM_REBOOT_MODE is not set ++CONFIG_POWER_SUPPLY=y ++# CONFIG_POWER_SUPPLY_DEBUG is not set ++# CONFIG_IP5XXX_POWER is not set ++# CONFIG_TEST_POWER is not set ++# CONFIG_CHARGER_ADP5061 is not set ++# CONFIG_BATTERY_CW2015 is not set ++# CONFIG_BATTERY_DS2780 is not set ++# CONFIG_BATTERY_DS2781 is not set ++# CONFIG_BATTERY_DS2782 is not set ++# CONFIG_BATTERY_SAMSUNG_SDI is not set ++# CONFIG_BATTERY_SBS is not set ++# CONFIG_CHARGER_SBS is not set ++# CONFIG_MANAGER_SBS is not set ++# CONFIG_BATTERY_BQ27XXX is not set ++# CONFIG_BATTERY_MAX17040 is not set ++# CONFIG_BATTERY_MAX17042 is not set ++# CONFIG_CHARGER_MAX8903 is not set ++# CONFIG_CHARGER_LP8727 is not set ++# CONFIG_CHARGER_GPIO is not set ++# CONFIG_CHARGER_LT3651 is not set ++# CONFIG_CHARGER_LTC4162L is not set ++# CONFIG_CHARGER_DETECTOR_MAX14656 is not set ++# CONFIG_CHARGER_MAX77976 is not set ++# CONFIG_CHARGER_BQ2415X is not set ++# CONFIG_CHARGER_BQ24190 is not set ++# CONFIG_CHARGER_BQ24257 is not set ++# CONFIG_CHARGER_BQ24735 is not set ++# CONFIG_CHARGER_BQ2515X is not set ++# CONFIG_CHARGER_BQ25890 is not set ++# CONFIG_CHARGER_BQ25980 is not set ++# CONFIG_CHARGER_BQ256XX is not set ++# CONFIG_BATTERY_GAUGE_LTC2941 is not set ++# CONFIG_BATTERY_GOLDFISH is not set ++# CONFIG_BATTERY_RT5033 is not set ++# CONFIG_CHARGER_RT9455 is not set ++# CONFIG_CHARGER_BD99954 is not set ++# CONFIG_BATTERY_UG3105 is not set ++# CONFIG_HWMON is not set ++# CONFIG_THERMAL is not set ++# CONFIG_WATCHDOG is not set ++CONFIG_SSB_POSSIBLE=y ++# CONFIG_SSB is not set ++CONFIG_BCMA_POSSIBLE=y ++# CONFIG_BCMA is not set ++ ++# ++# Multifunction device drivers ++# ++CONFIG_MFD_CORE=y ++# CONFIG_MFD_ACT8945A is not set ++# CONFIG_MFD_AS3711 is not set ++# CONFIG_MFD_SMPRO is not set ++# CONFIG_MFD_AS3722 is not set ++# CONFIG_PMIC_ADP5520 is not set ++# CONFIG_MFD_AAT2870_CORE is not set ++# CONFIG_MFD_ATMEL_FLEXCOM is not set ++# CONFIG_MFD_ATMEL_HLCDC is not set ++# CONFIG_MFD_BCM590XX is not set ++# CONFIG_MFD_BD9571MWV is not set ++# CONFIG_MFD_AXP20X_I2C is not set ++# CONFIG_MFD_CS42L43_I2C is not set ++# CONFIG_MFD_MADERA is not set ++# CONFIG_MFD_MAX5970 is not set ++# CONFIG_PMIC_DA903X is not set ++# CONFIG_MFD_DA9052_SPI is not set ++# CONFIG_MFD_DA9052_I2C is not set ++# CONFIG_MFD_DA9055 is not set ++# CONFIG_MFD_DA9062 is not set ++# CONFIG_MFD_DA9063 is not set ++# CONFIG_MFD_DA9150 is not set ++# CONFIG_MFD_GATEWORKS_GSC is not set ++# CONFIG_MFD_MC13XXX_SPI is not set ++# CONFIG_MFD_MC13XXX_I2C is not set ++# CONFIG_MFD_MP2629 is not set ++# CONFIG_MFD_HI6421_PMIC is not set ++CONFIG_MFD_BSP_FMC=y ++# CONFIG_MFD_IQS62X is not set ++# CONFIG_MFD_KEMPLD is not set ++# CONFIG_MFD_88PM800 is not set ++# CONFIG_MFD_88PM805 is not set ++# CONFIG_MFD_88PM860X is not set ++# CONFIG_MFD_MAX14577 is not set ++# CONFIG_MFD_MAX77541 is not set ++# CONFIG_MFD_MAX77620 is not set ++# CONFIG_MFD_MAX77650 is not set ++# CONFIG_MFD_MAX77686 is not set ++# CONFIG_MFD_MAX77693 is not set ++# CONFIG_MFD_MAX77714 is not set ++# CONFIG_MFD_MAX77843 is not set ++# CONFIG_MFD_MAX8907 is not set ++# CONFIG_MFD_MAX8925 is not set ++# CONFIG_MFD_MAX8997 is not set ++# CONFIG_MFD_MAX8998 is not set ++# CONFIG_MFD_MT6360 is not set ++# CONFIG_MFD_MT6370 is not set ++# CONFIG_MFD_MT6397 is not set ++# CONFIG_MFD_MENF21BMC is not set ++# CONFIG_MFD_OCELOT is not set ++# CONFIG_EZX_PCAP is not set ++# CONFIG_MFD_CPCAP is not set ++# CONFIG_MFD_NTXEC is not set ++# CONFIG_MFD_RETU is not set ++# CONFIG_MFD_PCF50633 is not set ++# CONFIG_MFD_SY7636A is not set ++# CONFIG_MFD_RT4831 is not set ++# CONFIG_MFD_RT5033 is not set ++# CONFIG_MFD_RT5120 is not set ++# CONFIG_MFD_RC5T583 is not set ++# CONFIG_MFD_RK8XX_I2C is not set ++# CONFIG_MFD_RK8XX_SPI is not set ++# CONFIG_MFD_RN5T618 is not set ++# CONFIG_MFD_SEC_CORE is not set ++# CONFIG_MFD_SI476X_CORE is not set ++# CONFIG_MFD_SM501 is not set ++# CONFIG_MFD_SKY81452 is not set ++# CONFIG_MFD_STMPE is not set ++CONFIG_MFD_SYSCON=y ++# CONFIG_MFD_LP3943 is not set ++# CONFIG_MFD_LP8788 is not set ++# CONFIG_MFD_TI_LMU is not set ++# CONFIG_MFD_PALMAS is not set ++# CONFIG_TPS6105X is not set ++# CONFIG_TPS65010 is not set ++# CONFIG_TPS6507X is not set ++# CONFIG_MFD_TPS65086 is not set ++# CONFIG_MFD_TPS65090 is not set ++# CONFIG_MFD_TPS65217 is not set ++# CONFIG_MFD_TI_LP873X is not set ++# CONFIG_MFD_TI_LP87565 is not set ++# CONFIG_MFD_TPS65218 is not set ++# CONFIG_MFD_TPS65219 is not set ++# CONFIG_MFD_TPS6586X is not set ++# CONFIG_MFD_TPS65910 is not set ++# CONFIG_MFD_TPS65912_I2C is not set ++# CONFIG_MFD_TPS65912_SPI is not set ++# CONFIG_MFD_TPS6594_I2C is not set ++# CONFIG_MFD_TPS6594_SPI is not set ++# CONFIG_TWL4030_CORE is not set ++# CONFIG_TWL6040_CORE is not set ++# CONFIG_MFD_WL1273_CORE is not set ++# CONFIG_MFD_LM3533 is not set ++# CONFIG_MFD_TC3589X is not set ++# CONFIG_MFD_TQMX86 is not set ++# CONFIG_MFD_LOCHNAGAR is not set ++# CONFIG_MFD_ARIZONA_I2C is not set ++# CONFIG_MFD_ARIZONA_SPI is not set ++# CONFIG_MFD_WM8400 is not set ++# CONFIG_MFD_WM831X_I2C is not set ++# CONFIG_MFD_WM831X_SPI is not set ++# CONFIG_MFD_WM8350_I2C is not set ++# CONFIG_MFD_WM8994 is not set ++# CONFIG_MFD_ROHM_BD718XX is not set ++# CONFIG_MFD_ROHM_BD71828 is not set ++# CONFIG_MFD_ROHM_BD957XMUF is not set ++# CONFIG_MFD_STPMIC1 is not set ++# CONFIG_MFD_STMFX is not set ++# CONFIG_MFD_ATC260X_I2C is not set ++# CONFIG_MFD_QCOM_PM8008 is not set ++# CONFIG_MFD_INTEL_M10_BMC_SPI is not set ++# CONFIG_MFD_RSMU_I2C is not set ++# CONFIG_MFD_RSMU_SPI is not set ++# end of Multifunction device drivers ++ ++# CONFIG_REGULATOR is not set ++# CONFIG_RC_CORE is not set ++ ++# ++# CEC support ++# ++# CONFIG_MEDIA_CEC_SUPPORT is not set ++# end of CEC support ++ ++# CONFIG_MEDIA_SUPPORT is not set ++ ++# ++# Graphics support ++# ++CONFIG_VIDEO_CMDLINE=y ++# CONFIG_AUXDISPLAY is not set ++# CONFIG_DRM is not set ++# CONFIG_DRM_DEBUG_MODESET_LOCK is not set ++ ++# ++# Frame buffer Devices ++# ++CONFIG_FB=y ++# CONFIG_FB_ARMCLCD is not set ++# CONFIG_FB_OPENCORES is not set ++# CONFIG_FB_S1D13XXX is not set ++# CONFIG_FB_IBM_GXT4500 is not set ++# CONFIG_FB_VIRTUAL is not set ++# CONFIG_FB_METRONOME is not set ++# CONFIG_FB_SIMPLE is not set ++# CONFIG_FB_SSD1307 is not set ++CONFIG_FB_CORE=y ++CONFIG_FB_NOTIFY=y ++# CONFIG_FIRMWARE_EDID is not set ++CONFIG_FB_DEVICE=y ++# CONFIG_FB_FOREIGN_ENDIAN is not set ++CONFIG_FB_IOMEM_FOPS=y ++# CONFIG_FB_MODE_HELPERS is not set ++# CONFIG_FB_TILEBLITTING is not set ++# end of Frame buffer Devices ++ ++# ++# Backlight & LCD device support ++# ++# CONFIG_LCD_CLASS_DEVICE is not set ++# CONFIG_BACKLIGHT_CLASS_DEVICE is not set ++# end of Backlight & LCD device support ++ ++# ++# Console display driver support ++# ++CONFIG_DUMMY_CONSOLE=y ++CONFIG_DUMMY_CONSOLE_COLUMNS=80 ++CONFIG_DUMMY_CONSOLE_ROWS=25 ++# CONFIG_FRAMEBUFFER_CONSOLE is not set ++# end of Console display driver support ++ ++# CONFIG_LOGO is not set ++# end of Graphics support ++ ++# CONFIG_SOUND is not set ++# CONFIG_HID_SUPPORT is not set ++CONFIG_USB_OHCI_LITTLE_ENDIAN=y ++# CONFIG_USB_SUPPORT is not set ++# CONFIG_MMC is not set ++# CONFIG_SCSI_UFSHCD is not set ++# CONFIG_MEMSTICK is not set ++# CONFIG_NEW_LEDS is not set ++# CONFIG_ACCESSIBILITY is not set ++# CONFIG_INFINIBAND is not set ++CONFIG_EDAC_SUPPORT=y ++# CONFIG_RTC_CLASS is not set ++# CONFIG_DMADEVICES is not set ++ ++# ++# DMABUF options ++# ++# CONFIG_SYNC_FILE is not set ++# CONFIG_DMABUF_HEAPS is not set ++# end of DMABUF options ++ ++# CONFIG_UIO is not set ++# CONFIG_VFIO is not set ++# CONFIG_VIRT_DRIVERS is not set ++# CONFIG_VIRTIO_MENU is not set ++# CONFIG_VDPA is not set ++# CONFIG_VHOST_MENU is not set ++ ++# ++# Microsoft Hyper-V guest support ++# ++# end of Microsoft Hyper-V guest support ++ ++# CONFIG_GREYBUS is not set ++# CONFIG_COMEDI is not set ++# CONFIG_STAGING is not set ++# CONFIG_GOLDFISH is not set ++# CONFIG_CHROME_PLATFORMS is not set ++# CONFIG_MELLANOX_PLATFORM is not set ++# CONFIG_SURFACE_PLATFORMS is not set ++CONFIG_HAVE_CLK=y ++CONFIG_HAVE_CLK_PREPARE=y ++CONFIG_COMMON_CLK=y ++ ++# ++# Clock driver for ARM Reference designs ++# ++# CONFIG_CLK_ICST is not set ++# CONFIG_CLK_SP810 is not set ++# end of Clock driver for ARM Reference designs ++ ++# CONFIG_LMK04832 is not set ++# CONFIG_COMMON_CLK_MAX9485 is not set ++# CONFIG_COMMON_CLK_SI5341 is not set ++# CONFIG_COMMON_CLK_SI5351 is not set ++# CONFIG_COMMON_CLK_SI514 is not set ++# CONFIG_COMMON_CLK_SI544 is not set ++# CONFIG_COMMON_CLK_SI570 is not set ++# CONFIG_COMMON_CLK_CDCE706 is not set ++# CONFIG_COMMON_CLK_CDCE925 is not set ++# CONFIG_COMMON_CLK_CS2000_CP is not set ++# CONFIG_COMMON_CLK_AXI_CLKGEN is not set ++# CONFIG_COMMON_CLK_XGENE is not set ++# CONFIG_COMMON_CLK_RS9_PCIE is not set ++# CONFIG_COMMON_CLK_SI521XX is not set ++# CONFIG_COMMON_CLK_VC3 is not set ++# CONFIG_COMMON_CLK_VC5 is not set ++# CONFIG_COMMON_CLK_VC7 is not set ++# CONFIG_COMMON_CLK_FIXED_MMIO is not set ++# CONFIG_XILINX_VCU is not set ++# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set ++CONFIG_COMMON_CLK_SS928V100=y ++CONFIG_RESET_BSP=y ++# CONFIG_HWSPINLOCK is not set ++ ++# ++# Clock Source drivers ++# ++CONFIG_TIMER_OF=y ++CONFIG_TIMER_PROBE=y ++CONFIG_CLKSRC_MMIO=y ++CONFIG_ARM_ARCH_TIMER=y ++CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y ++CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y ++# CONFIG_FSL_ERRATUM_A008585 is not set ++CONFIG_HISILICON_ERRATUM_161010101=y ++CONFIG_ARM64_ERRATUM_858921=y ++CONFIG_ARM_TIMER_SP804=y ++# end of Clock Source drivers ++ ++# CONFIG_MAILBOX is not set ++# CONFIG_IOMMU_SUPPORT is not set ++ ++# ++# Remoteproc drivers ++# ++# CONFIG_REMOTEPROC is not set ++# end of Remoteproc drivers ++ ++# ++# Rpmsg drivers ++# ++# CONFIG_RPMSG_VIRTIO is not set ++# end of Rpmsg drivers ++ ++# CONFIG_SOUNDWIRE is not set ++ ++# ++# SOC (System On Chip) specific Drivers ++# ++ ++# ++# Amlogic SoC drivers ++# ++# end of Amlogic SoC drivers ++ ++# ++# Broadcom SoC drivers ++# ++# CONFIG_SOC_BRCMSTB is not set ++# end of Broadcom SoC drivers ++ ++# ++# NXP/Freescale QorIQ SoC drivers ++# ++# CONFIG_QUICC_ENGINE is not set ++# end of NXP/Freescale QorIQ SoC drivers ++ ++# ++# fujitsu SoC drivers ++# ++# end of fujitsu SoC drivers ++ ++# ++# i.MX SoC drivers ++# ++# end of i.MX SoC drivers ++ ++# ++# Enable LiteX SoC Builder specific drivers ++# ++# CONFIG_LITEX_SOC_CONTROLLER is not set ++# end of Enable LiteX SoC Builder specific drivers ++ ++# CONFIG_WPCM450_SOC is not set ++ ++# ++# Qualcomm SoC drivers ++# ++# end of Qualcomm SoC drivers ++ ++# CONFIG_SOC_TI is not set ++ ++# ++# Xilinx SoC drivers ++# ++# end of Xilinx SoC drivers ++# end of SOC (System On Chip) specific Drivers ++ ++# CONFIG_PM_DEVFREQ is not set ++CONFIG_EXTCON=y ++ ++# ++# Extcon Device Drivers ++# ++# CONFIG_EXTCON_FSA9480 is not set ++# CONFIG_EXTCON_GPIO is not set ++# CONFIG_EXTCON_MAX3355 is not set ++# CONFIG_EXTCON_PTN5150 is not set ++# CONFIG_EXTCON_RT8973A is not set ++# CONFIG_EXTCON_SM5502 is not set ++# CONFIG_EXTCON_USB_GPIO is not set ++# CONFIG_MEMORY is not set ++# CONFIG_IIO is not set ++# CONFIG_PWM is not set ++ ++# ++# IRQ chip support ++# ++CONFIG_IRQCHIP=y ++CONFIG_ARM_GIC=y ++CONFIG_ARM_GIC_MAX_NR=1 ++CONFIG_ARM_GIC_V3=y ++CONFIG_ARM_GIC_V3_ITS=y ++# CONFIG_AL_FIC is not set ++# CONFIG_XILINX_INTC is not set ++CONFIG_PARTITION_PERCPU=y ++# end of IRQ chip support ++ ++# CONFIG_IPACK_BUS is not set ++CONFIG_RESET_CONTROLLER=y ++# CONFIG_RESET_SIMPLE is not set ++# CONFIG_RESET_TI_SYSCON is not set ++# CONFIG_RESET_TI_TPS380X is not set ++ ++# ++# PHY Subsystem ++# ++CONFIG_GENERIC_PHY=y ++# CONFIG_PHY_CAN_TRANSCEIVER is not set ++ ++# ++# PHY drivers for Broadcom platforms ++# ++# CONFIG_BCM_KONA_USB2_PHY is not set ++# end of PHY drivers for Broadcom platforms ++ ++# CONFIG_PHY_CADENCE_TORRENT is not set ++# CONFIG_PHY_CADENCE_DPHY is not set ++# CONFIG_PHY_CADENCE_DPHY_RX is not set ++# CONFIG_PHY_CADENCE_SIERRA is not set ++# CONFIG_PHY_CADENCE_SALVO is not set ++# CONFIG_PHY_PXA_28NM_HSIC is not set ++# CONFIG_PHY_PXA_28NM_USB2 is not set ++# CONFIG_PHY_LAN966X_SERDES is not set ++# CONFIG_PHY_OCELOT_SERDES is not set ++# end of PHY Subsystem ++ ++# CONFIG_POWERCAP is not set ++# CONFIG_MCB is not set ++# CONFIG_RAS is not set ++ ++# ++# Android ++# ++# CONFIG_ANDROID_BINDER_IPC is not set ++# end of Android ++ ++# CONFIG_DAX is not set ++# CONFIG_NVMEM is not set ++ ++# ++# HW tracing support ++# ++# CONFIG_STM is not set ++# CONFIG_INTEL_TH is not set ++# end of HW tracing support ++ ++# CONFIG_FPGA is not set ++# CONFIG_FSI is not set ++# CONFIG_TEE is not set ++CONFIG_PM_OPP=y ++# CONFIG_SIOX is not set ++# CONFIG_SLIMBUS is not set ++# CONFIG_INTERCONNECT is not set ++# CONFIG_COUNTER is not set ++# CONFIG_MOST is not set ++# CONFIG_EDMAC is not set ++# CONFIG_PECI is not set ++# CONFIG_HTE is not set ++# CONFIG_CDX_BUS is not set ++ ++# ++# Vendor driver support ++# ++CONFIG_USB_WING=y ++ ++# ++# Wing UPS Phy ++# ++CONFIG_WING_UPS_PHY=y ++CONFIG_WING_UPS_XVP_PHY=y ++CONFIG_WING_UPS_NANO_PHY=y ++# CONFIG_WING_UPS_MISSILE_PHY is not set ++# end of Wing UPS Phy ++ ++CONFIG_BASEDRV_CLK=y ++# CONFIG_CMA_MEM_SHARED is not set ++# CONFIG_CMA_ADVANCE_SHARE is not set ++# end of Vendor driver support ++# end of Device Drivers ++ ++# ++# File systems ++# ++CONFIG_DCACHE_WORD_ACCESS=y ++# CONFIG_VALIDATE_FS_PARSER is not set ++CONFIG_FS_IOMAP=y ++CONFIG_BUFFER_HEAD=y ++# CONFIG_EXT2_FS is not set ++CONFIG_EXT3_FS=y ++# CONFIG_EXT3_FS_POSIX_ACL is not set ++# CONFIG_EXT3_FS_SECURITY is not set ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_USE_FOR_EXT2=y ++# CONFIG_EXT4_FS_POSIX_ACL is not set ++# CONFIG_EXT4_FS_SECURITY is not set ++# CONFIG_EXT4_DEBUG is not set ++CONFIG_JBD2=y ++# CONFIG_JBD2_DEBUG is not set ++CONFIG_FS_MBCACHE=y ++# CONFIG_REISERFS_FS is not set ++# CONFIG_JFS_FS is not set ++CONFIG_XFS_FS=y ++CONFIG_XFS_SUPPORT_V4=y ++CONFIG_XFS_SUPPORT_ASCII_CI=y ++# CONFIG_XFS_QUOTA is not set ++# CONFIG_XFS_POSIX_ACL is not set ++# CONFIG_XFS_RT is not set ++# CONFIG_XFS_ONLINE_SCRUB is not set ++# CONFIG_XFS_WARN is not set ++# CONFIG_XFS_DEBUG is not set ++# CONFIG_GFS2_FS is not set ++# CONFIG_OCFS2_FS is not set ++CONFIG_BTRFS_FS=y ++# CONFIG_BTRFS_FS_POSIX_ACL is not set ++# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set ++# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set ++# CONFIG_BTRFS_DEBUG is not set ++# CONFIG_BTRFS_ASSERT is not set ++# CONFIG_BTRFS_FS_REF_VERIFY is not set ++# CONFIG_NILFS2_FS is not set ++# CONFIG_F2FS_FS is not set ++CONFIG_FS_POSIX_ACL=y ++CONFIG_EXPORTFS=y ++# CONFIG_EXPORTFS_BLOCK_OPS is not set ++CONFIG_FILE_LOCKING=y ++# CONFIG_FS_ENCRYPTION is not set ++# CONFIG_FS_VERITY is not set ++CONFIG_FSNOTIFY=y ++CONFIG_DNOTIFY=y ++CONFIG_INOTIFY_USER=y ++# CONFIG_FANOTIFY is not set ++CONFIG_QUOTA=y ++# CONFIG_QUOTA_NETLINK_INTERFACE is not set ++# CONFIG_QUOTA_DEBUG is not set ++CONFIG_QUOTA_TREE=m ++CONFIG_QFMT_V1=m ++CONFIG_QFMT_V2=m ++CONFIG_QUOTACTL=y ++CONFIG_AUTOFS_FS=m ++CONFIG_FUSE_FS=y ++# CONFIG_CUSE is not set ++# CONFIG_VIRTIO_FS is not set ++# CONFIG_OVERLAY_FS is not set ++ ++# ++# Caches ++# ++# CONFIG_FSCACHE is not set ++# end of Caches ++ ++# ++# CD-ROM/DVD Filesystems ++# ++# CONFIG_ISO9660_FS is not set ++# CONFIG_UDF_FS is not set ++# end of CD-ROM/DVD Filesystems ++ ++# ++# DOS/FAT/EXFAT/NT Filesystems ++# ++# CONFIG_MSDOS_FS is not set ++# CONFIG_VFAT_FS is not set ++# CONFIG_EXFAT_FS is not set ++# CONFIG_NTFS_FS is not set ++# CONFIG_NTFS3_FS is not set ++# end of DOS/FAT/EXFAT/NT Filesystems ++ ++# ++# Pseudo filesystems ++# ++CONFIG_PROC_FS=y ++# CONFIG_PROC_KCORE is not set ++CONFIG_PROC_SYSCTL=y ++CONFIG_PROC_PAGE_MONITOR=y ++# CONFIG_PROC_CHILDREN is not set ++CONFIG_KERNFS=y ++CONFIG_SYSFS=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_TMPFS_XATTR=y ++# CONFIG_TMPFS_INODE64 is not set ++# CONFIG_TMPFS_QUOTA is not set ++CONFIG_ARCH_SUPPORTS_HUGETLBFS=y ++# CONFIG_HUGETLBFS is not set ++CONFIG_ARCH_HAS_GIGANTIC_PAGE=y ++CONFIG_CONFIGFS_FS=y ++# end of Pseudo filesystems ++ ++CONFIG_MISC_FILESYSTEMS=y ++# CONFIG_ORANGEFS_FS is not set ++# CONFIG_ADFS_FS is not set ++# CONFIG_AFFS_FS is not set ++# CONFIG_ECRYPT_FS is not set ++# CONFIG_HFS_FS is not set ++# CONFIG_HFSPLUS_FS is not set ++# CONFIG_BEFS_FS is not set ++# CONFIG_BFS_FS is not set ++# CONFIG_EFS_FS is not set ++# CONFIG_JFFS2_FS is not set ++CONFIG_UBIFS_FS=y ++# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set ++CONFIG_UBIFS_FS_LZO=y ++CONFIG_UBIFS_FS_ZLIB=y ++CONFIG_UBIFS_FS_ZSTD=y ++# CONFIG_UBIFS_ATIME_SUPPORT is not set ++CONFIG_UBIFS_FS_XATTR=y ++CONFIG_UBIFS_FS_SECURITY=y ++# CONFIG_UBIFS_FS_AUTHENTICATION is not set ++CONFIG_CRAMFS=y ++CONFIG_CRAMFS_BLOCKDEV=y ++# CONFIG_CRAMFS_MTD is not set ++CONFIG_SQUASHFS=y ++CONFIG_SQUASHFS_FILE_CACHE=y ++# CONFIG_SQUASHFS_FILE_DIRECT is not set ++CONFIG_SQUASHFS_DECOMP_SINGLE=y ++# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set ++CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y ++# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set ++# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set ++# CONFIG_SQUASHFS_XATTR is not set ++CONFIG_SQUASHFS_ZLIB=y ++# CONFIG_SQUASHFS_LZ4 is not set ++CONFIG_SQUASHFS_LZO=y ++CONFIG_SQUASHFS_XZ=y ++# CONFIG_SQUASHFS_ZSTD is not set ++# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set ++# CONFIG_SQUASHFS_EMBEDDED is not set ++CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 ++# CONFIG_VXFS_FS is not set ++# CONFIG_MINIX_FS is not set ++# CONFIG_OMFS_FS is not set ++# CONFIG_HPFS_FS is not set ++# CONFIG_QNX4FS_FS is not set ++# CONFIG_QNX6FS_FS is not set ++# CONFIG_ROMFS_FS is not set ++# CONFIG_PSTORE is not set ++# CONFIG_SYSV_FS is not set ++# CONFIG_UFS_FS is not set ++# CONFIG_EROFS_FS is not set ++CONFIG_NETWORK_FILESYSTEMS=y ++# CONFIG_NFS_FS is not set ++# CONFIG_NFSD is not set ++# CONFIG_CEPH_FS is not set ++# CONFIG_CIFS is not set ++# CONFIG_SMB_SERVER is not set ++# CONFIG_CODA_FS is not set ++# CONFIG_AFS_FS is not set ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="iso8859-1" ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_CODEPAGE_737=m ++CONFIG_NLS_CODEPAGE_775=m ++CONFIG_NLS_CODEPAGE_850=m ++CONFIG_NLS_CODEPAGE_852=m ++CONFIG_NLS_CODEPAGE_855=m ++CONFIG_NLS_CODEPAGE_857=m ++CONFIG_NLS_CODEPAGE_860=m ++CONFIG_NLS_CODEPAGE_861=m ++CONFIG_NLS_CODEPAGE_862=m ++CONFIG_NLS_CODEPAGE_863=m ++CONFIG_NLS_CODEPAGE_864=m ++CONFIG_NLS_CODEPAGE_865=m ++CONFIG_NLS_CODEPAGE_866=m ++CONFIG_NLS_CODEPAGE_869=m ++CONFIG_NLS_CODEPAGE_936=y ++CONFIG_NLS_CODEPAGE_950=m ++CONFIG_NLS_CODEPAGE_932=m ++CONFIG_NLS_CODEPAGE_949=m ++CONFIG_NLS_CODEPAGE_874=m ++CONFIG_NLS_ISO8859_8=m ++CONFIG_NLS_CODEPAGE_1250=m ++CONFIG_NLS_CODEPAGE_1251=m ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_NLS_ISO8859_2=m ++CONFIG_NLS_ISO8859_3=m ++CONFIG_NLS_ISO8859_4=m ++CONFIG_NLS_ISO8859_5=m ++CONFIG_NLS_ISO8859_6=m ++CONFIG_NLS_ISO8859_7=m ++CONFIG_NLS_ISO8859_9=m ++CONFIG_NLS_ISO8859_13=m ++CONFIG_NLS_ISO8859_14=m ++CONFIG_NLS_ISO8859_15=m ++CONFIG_NLS_KOI8_R=m ++CONFIG_NLS_KOI8_U=m ++# CONFIG_NLS_MAC_ROMAN is not set ++# CONFIG_NLS_MAC_CELTIC is not set ++# CONFIG_NLS_MAC_CENTEURO is not set ++# CONFIG_NLS_MAC_CROATIAN is not set ++# CONFIG_NLS_MAC_CYRILLIC is not set ++# CONFIG_NLS_MAC_GAELIC is not set ++# CONFIG_NLS_MAC_GREEK is not set ++# CONFIG_NLS_MAC_ICELAND is not set ++# CONFIG_NLS_MAC_INUIT is not set ++# CONFIG_NLS_MAC_ROMANIAN is not set ++# CONFIG_NLS_MAC_TURKISH is not set ++CONFIG_NLS_UTF8=y ++# CONFIG_DLM is not set ++# CONFIG_UNICODE is not set ++CONFIG_IO_WQ=y ++# end of File systems ++ ++# ++# Security options ++# ++CONFIG_KEYS=y ++# CONFIG_KEYS_REQUEST_CACHE is not set ++# CONFIG_PERSISTENT_KEYRINGS is not set ++# CONFIG_TRUSTED_KEYS is not set ++# CONFIG_ENCRYPTED_KEYS is not set ++# CONFIG_KEY_DH_OPERATIONS is not set ++# CONFIG_SECURITY_DMESG_RESTRICT is not set ++CONFIG_PROC_MEM_ALWAYS_FORCE=y ++# CONFIG_PROC_MEM_FORCE_PTRACE is not set ++# CONFIG_PROC_MEM_NO_FORCE is not set ++# CONFIG_SECURITY is not set ++# CONFIG_SECURITYFS is not set ++# CONFIG_HARDENED_USERCOPY is not set ++# CONFIG_FORTIFY_SOURCE is not set ++# CONFIG_STATIC_USERMODEHELPER is not set ++CONFIG_DEFAULT_SECURITY_DAC=y ++CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" ++ ++# ++# Kernel hardening options ++# ++ ++# ++# Memory initialization ++# ++CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y ++CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER=y ++CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y ++CONFIG_INIT_STACK_NONE=y ++# CONFIG_INIT_STACK_ALL_PATTERN is not set ++# CONFIG_INIT_STACK_ALL_ZERO is not set ++# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set ++# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set ++# end of Memory initialization ++ ++# ++# Hardening of kernel data structures ++# ++# CONFIG_LIST_HARDENED is not set ++# CONFIG_BUG_ON_DATA_CORRUPTION is not set ++# end of Hardening of kernel data structures ++ ++CONFIG_RANDSTRUCT_NONE=y ++# end of Kernel hardening options ++# end of Security options ++ ++CONFIG_XOR_BLOCKS=y ++CONFIG_CRYPTO=y ++ ++# ++# Crypto core or helper ++# ++CONFIG_CRYPTO_ALGAPI=y ++CONFIG_CRYPTO_ALGAPI2=y ++CONFIG_CRYPTO_AEAD=m ++CONFIG_CRYPTO_AEAD2=y ++CONFIG_CRYPTO_SIG2=y ++CONFIG_CRYPTO_SKCIPHER=y ++CONFIG_CRYPTO_SKCIPHER2=y ++CONFIG_CRYPTO_HASH=y ++CONFIG_CRYPTO_HASH2=y ++CONFIG_CRYPTO_RNG=m ++CONFIG_CRYPTO_RNG2=y ++CONFIG_CRYPTO_RNG_DEFAULT=m ++CONFIG_CRYPTO_AKCIPHER2=y ++CONFIG_CRYPTO_KPP2=y ++CONFIG_CRYPTO_ACOMP2=y ++CONFIG_CRYPTO_MANAGER=y ++CONFIG_CRYPTO_MANAGER2=y ++# CONFIG_CRYPTO_USER is not set ++CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y ++CONFIG_CRYPTO_NULL=m ++CONFIG_CRYPTO_NULL2=m ++# CONFIG_CRYPTO_PCRYPT is not set ++# CONFIG_CRYPTO_CRYPTD is not set ++# CONFIG_CRYPTO_AUTHENC is not set ++# CONFIG_CRYPTO_TEST is not set ++# end of Crypto core or helper ++ ++# ++# Public-key cryptography ++# ++# CONFIG_CRYPTO_RSA is not set ++# CONFIG_CRYPTO_DH is not set ++# CONFIG_CRYPTO_ECDH is not set ++# CONFIG_CRYPTO_ECDSA is not set ++# CONFIG_CRYPTO_ECRDSA is not set ++# CONFIG_CRYPTO_SM2 is not set ++# CONFIG_CRYPTO_CURVE25519 is not set ++# end of Public-key cryptography ++ ++# ++# Block ciphers ++# ++CONFIG_CRYPTO_AES=y ++# CONFIG_CRYPTO_AES_TI is not set ++# CONFIG_CRYPTO_ARIA is not set ++# CONFIG_CRYPTO_BLOWFISH is not set ++# CONFIG_CRYPTO_CAMELLIA is not set ++# CONFIG_CRYPTO_CAST5 is not set ++# CONFIG_CRYPTO_CAST6 is not set ++# CONFIG_CRYPTO_DES is not set ++# CONFIG_CRYPTO_FCRYPT is not set ++# CONFIG_CRYPTO_SERPENT is not set ++# CONFIG_CRYPTO_SM4_GENERIC is not set ++# CONFIG_CRYPTO_TWOFISH is not set ++# end of Block ciphers ++ ++# ++# Length-preserving ciphers and modes ++# ++# CONFIG_CRYPTO_ADIANTUM is not set ++# CONFIG_CRYPTO_CHACHA20 is not set ++# CONFIG_CRYPTO_CBC is not set ++# CONFIG_CRYPTO_CFB is not set ++CONFIG_CRYPTO_CTR=m ++# CONFIG_CRYPTO_CTS is not set ++CONFIG_CRYPTO_ECB=y ++# CONFIG_CRYPTO_HCTR2 is not set ++# CONFIG_CRYPTO_KEYWRAP is not set ++# CONFIG_CRYPTO_LRW is not set ++# CONFIG_CRYPTO_OFB is not set ++# CONFIG_CRYPTO_PCBC is not set ++# CONFIG_CRYPTO_XTS is not set ++# end of Length-preserving ciphers and modes ++ ++# ++# AEAD (authenticated encryption with associated data) ciphers ++# ++# CONFIG_CRYPTO_AEGIS128 is not set ++# CONFIG_CRYPTO_CHACHA20POLY1305 is not set ++CONFIG_CRYPTO_CCM=m ++# CONFIG_CRYPTO_GCM is not set ++CONFIG_CRYPTO_GENIV=m ++CONFIG_CRYPTO_SEQIV=m ++CONFIG_CRYPTO_ECHAINIV=m ++# CONFIG_CRYPTO_ESSIV is not set ++# end of AEAD (authenticated encryption with associated data) ciphers ++ ++# ++# Hashes, digests, and MACs ++# ++CONFIG_CRYPTO_BLAKE2B=y ++CONFIG_CRYPTO_CMAC=y ++# CONFIG_CRYPTO_GHASH is not set ++CONFIG_CRYPTO_HMAC=m ++# CONFIG_CRYPTO_MD4 is not set ++# CONFIG_CRYPTO_MD5 is not set ++# CONFIG_CRYPTO_MICHAEL_MIC is not set ++# CONFIG_CRYPTO_POLY1305 is not set ++# CONFIG_CRYPTO_RMD160 is not set ++# CONFIG_CRYPTO_SHA1 is not set ++CONFIG_CRYPTO_SHA256=y ++CONFIG_CRYPTO_SHA512=m ++CONFIG_CRYPTO_SHA3=m ++# CONFIG_CRYPTO_SM3_GENERIC is not set ++# CONFIG_CRYPTO_STREEBOG is not set ++# CONFIG_CRYPTO_VMAC is not set ++# CONFIG_CRYPTO_WP512 is not set ++# CONFIG_CRYPTO_XCBC is not set ++CONFIG_CRYPTO_XXHASH=y ++# end of Hashes, digests, and MACs ++ ++# ++# CRCs (cyclic redundancy checks) ++# ++CONFIG_CRYPTO_CRC32C=y ++# CONFIG_CRYPTO_CRC32 is not set ++# CONFIG_CRYPTO_CRCT10DIF is not set ++# end of CRCs (cyclic redundancy checks) ++ ++# ++# Compression ++# ++CONFIG_CRYPTO_DEFLATE=y ++CONFIG_CRYPTO_LZO=y ++# CONFIG_CRYPTO_842 is not set ++# CONFIG_CRYPTO_LZ4 is not set ++# CONFIG_CRYPTO_LZ4HC is not set ++CONFIG_CRYPTO_ZSTD=y ++# end of Compression ++ ++# ++# Random number generation ++# ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRYPTO_DRBG_MENU=m ++CONFIG_CRYPTO_DRBG_HMAC=y ++# CONFIG_CRYPTO_DRBG_HASH is not set ++# CONFIG_CRYPTO_DRBG_CTR is not set ++CONFIG_CRYPTO_DRBG=m ++CONFIG_CRYPTO_JITTERENTROPY=m ++# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set ++# end of Random number generation ++ ++# ++# Userspace interface ++# ++# CONFIG_CRYPTO_USER_API_HASH is not set ++# CONFIG_CRYPTO_USER_API_SKCIPHER is not set ++# CONFIG_CRYPTO_USER_API_RNG is not set ++# CONFIG_CRYPTO_USER_API_AEAD is not set ++# end of Userspace interface ++ ++CONFIG_CRYPTO_HASH_INFO=y ++# CONFIG_CRYPTO_NHPOLY1305_NEON is not set ++# CONFIG_CRYPTO_CHACHA20_NEON is not set ++ ++# ++# Accelerated Cryptographic Algorithms for CPU (arm64) ++# ++# CONFIG_CRYPTO_GHASH_ARM64_CE is not set ++# CONFIG_CRYPTO_POLY1305_NEON is not set ++# CONFIG_CRYPTO_SHA1_ARM64_CE is not set ++# CONFIG_CRYPTO_SHA256_ARM64 is not set ++# CONFIG_CRYPTO_SHA2_ARM64_CE is not set ++# CONFIG_CRYPTO_SHA512_ARM64 is not set ++# CONFIG_CRYPTO_SHA512_ARM64_CE is not set ++# CONFIG_CRYPTO_SHA3_ARM64 is not set ++# CONFIG_CRYPTO_SM3_NEON is not set ++# CONFIG_CRYPTO_SM3_ARM64_CE is not set ++# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set ++# CONFIG_CRYPTO_AES_ARM64 is not set ++# CONFIG_CRYPTO_AES_ARM64_CE is not set ++# CONFIG_CRYPTO_AES_ARM64_CE_BLK is not set ++# CONFIG_CRYPTO_AES_ARM64_NEON_BLK is not set ++# CONFIG_CRYPTO_AES_ARM64_BS is not set ++# CONFIG_CRYPTO_SM4_ARM64_CE is not set ++# CONFIG_CRYPTO_SM4_ARM64_CE_BLK is not set ++# CONFIG_CRYPTO_SM4_ARM64_NEON_BLK is not set ++# CONFIG_CRYPTO_AES_ARM64_CE_CCM is not set ++# CONFIG_CRYPTO_SM4_ARM64_CE_CCM is not set ++# CONFIG_CRYPTO_SM4_ARM64_CE_GCM is not set ++# end of Accelerated Cryptographic Algorithms for CPU (arm64) ++ ++CONFIG_CRYPTO_HW=y ++# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set ++# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set ++# CONFIG_CRYPTO_DEV_CCP is not set ++# CONFIG_CRYPTO_DEV_SAFEXCEL is not set ++# CONFIG_CRYPTO_DEV_CCREE is not set ++# CONFIG_CRYPTO_DEV_HISI_SEC is not set ++# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set ++# CONFIG_ASYMMETRIC_KEY_TYPE is not set ++ ++# ++# Certificates for signature checking ++# ++# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set ++# end of Certificates for signature checking ++ ++CONFIG_BINARY_PRINTF=y ++ ++# ++# Library routines ++# ++CONFIG_RAID6_PQ=y ++# CONFIG_RAID6_PQ_BENCHMARK is not set ++# CONFIG_PACKING is not set ++CONFIG_BITREVERSE=y ++CONFIG_HAVE_ARCH_BITREVERSE=y ++CONFIG_GENERIC_STRNCPY_FROM_USER=y ++CONFIG_GENERIC_STRNLEN_USER=y ++CONFIG_GENERIC_NET_UTILS=y ++# CONFIG_CORDIC is not set ++# CONFIG_PRIME_NUMBERS is not set ++CONFIG_RATIONAL=y ++CONFIG_GENERIC_PCI_IOMAP=y ++CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y ++CONFIG_ARCH_HAS_FAST_MULTIPLIER=y ++CONFIG_ARCH_USE_SYM_ANNOTATIONS=y ++# CONFIG_INDIRECT_PIO is not set ++ ++# ++# Crypto library routines ++# ++CONFIG_CRYPTO_LIB_UTILS=y ++CONFIG_CRYPTO_LIB_AES=y ++CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y ++# CONFIG_CRYPTO_LIB_CHACHA is not set ++# CONFIG_CRYPTO_LIB_CURVE25519 is not set ++CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 ++# CONFIG_CRYPTO_LIB_POLY1305 is not set ++# CONFIG_CRYPTO_LIB_CHACHA20POLY1305 is not set ++CONFIG_CRYPTO_LIB_SHA1=y ++CONFIG_CRYPTO_LIB_SHA256=y ++# end of Crypto library routines ++ ++CONFIG_CRC_CCITT=y ++CONFIG_CRC16=y ++# CONFIG_CRC_T10DIF is not set ++# CONFIG_CRC64_ROCKSOFT is not set ++CONFIG_CRC_ITU_T=y ++CONFIG_CRC32=y ++# CONFIG_CRC32_SELFTEST is not set ++CONFIG_CRC32_SLICEBY8=y ++# CONFIG_CRC32_SLICEBY4 is not set ++# CONFIG_CRC32_SARWATE is not set ++# CONFIG_CRC32_BIT is not set ++# CONFIG_CRC64 is not set ++# CONFIG_CRC4 is not set ++# CONFIG_CRC7 is not set ++CONFIG_LIBCRC32C=y ++# CONFIG_CRC8 is not set ++CONFIG_XXHASH=y ++CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y ++# CONFIG_RANDOM32_SELFTEST is not set ++CONFIG_ZLIB_INFLATE=y ++CONFIG_ZLIB_DEFLATE=y ++CONFIG_LZO_COMPRESS=y ++CONFIG_LZO_DECOMPRESS=y ++CONFIG_ZSTD_COMMON=y ++CONFIG_ZSTD_COMPRESS=y ++CONFIG_ZSTD_DECOMPRESS=y ++CONFIG_XZ_DEC=y ++# CONFIG_XZ_DEC_X86 is not set ++# CONFIG_XZ_DEC_POWERPC is not set ++# CONFIG_XZ_DEC_IA64 is not set ++# CONFIG_XZ_DEC_ARM is not set ++# CONFIG_XZ_DEC_ARMTHUMB is not set ++# CONFIG_XZ_DEC_SPARC is not set ++# CONFIG_XZ_DEC_MICROLZMA is not set ++# CONFIG_XZ_DEC_TEST is not set ++CONFIG_GENERIC_ALLOCATOR=y ++CONFIG_ASSOCIATIVE_ARRAY=y ++CONFIG_HAS_IOMEM=y ++CONFIG_HAS_IOPORT=y ++CONFIG_HAS_DMA=y ++CONFIG_NEED_SG_DMA_LENGTH=y ++CONFIG_NEED_DMA_MAP_STATE=y ++CONFIG_ARCH_DMA_ADDR_T_64BIT=y ++CONFIG_DMA_DECLARE_COHERENT=y ++CONFIG_ARCH_HAS_SETUP_DMA_OPS=y ++CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y ++CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y ++CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y ++CONFIG_SWIOTLB=y ++# CONFIG_SWIOTLB_DYNAMIC is not set ++CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y ++# CONFIG_DMA_RESTRICTED_POOL is not set ++CONFIG_DMA_NONCOHERENT_MMAP=y ++CONFIG_DMA_COHERENT_POOL=y ++CONFIG_DMA_DIRECT_REMAP=y ++CONFIG_DMA_CMA=y ++ ++# ++# Default contiguous memory area size: ++# ++CONFIG_CMA_SIZE_MBYTES=4 ++CONFIG_CMA_SIZE_SEL_MBYTES=y ++# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set ++# CONFIG_CMA_SIZE_SEL_MIN is not set ++# CONFIG_CMA_SIZE_SEL_MAX is not set ++CONFIG_CMA_ALIGNMENT=8 ++# CONFIG_DMA_API_DEBUG is not set ++CONFIG_SGL_ALLOC=y ++CONFIG_CPU_RMAP=y ++CONFIG_DQL=y ++CONFIG_GLOB=y ++# CONFIG_GLOB_SELFTEST is not set ++CONFIG_NLATTR=y ++# CONFIG_IRQ_POLL is not set ++CONFIG_LIBFDT=y ++CONFIG_HAVE_GENERIC_VDSO=y ++CONFIG_GENERIC_GETTIMEOFDAY=y ++CONFIG_GENERIC_VDSO_TIME_NS=y ++CONFIG_SG_POOL=y ++CONFIG_ARCH_STACKWALK=y ++CONFIG_STACKDEPOT=y ++CONFIG_SBITMAP=y ++# end of Library routines ++ ++CONFIG_GENERIC_IOREMAP=y ++CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y ++ ++# ++# Kernel hacking ++# ++ ++# ++# printk and dmesg options ++# ++# CONFIG_PRINTK_TIME is not set ++# CONFIG_PRINTK_CALLER is not set ++# CONFIG_STACKTRACE_BUILD_ID is not set ++CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 ++CONFIG_CONSOLE_LOGLEVEL_QUIET=4 ++CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 ++# CONFIG_BOOT_PRINTK_DELAY is not set ++# CONFIG_DYNAMIC_DEBUG is not set ++# CONFIG_DYNAMIC_DEBUG_CORE is not set ++CONFIG_SYMBOLIC_ERRNAME=y ++CONFIG_DEBUG_BUGVERBOSE=y ++# end of printk and dmesg options ++ ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_MISC=y ++ ++# ++# Compile-time checks and compiler options ++# ++CONFIG_AS_HAS_NON_CONST_LEB128=y ++CONFIG_DEBUG_INFO_NONE=y ++# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set ++# CONFIG_DEBUG_INFO_DWARF4 is not set ++# CONFIG_DEBUG_INFO_DWARF5 is not set ++CONFIG_FRAME_WARN=2048 ++# CONFIG_STRIP_ASM_SYMS is not set ++# CONFIG_HEADERS_INSTALL is not set ++CONFIG_SECTION_MISMATCH_WARN_ONLY=y ++# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B is not set ++CONFIG_ARCH_WANT_FRAME_POINTERS=y ++CONFIG_FRAME_POINTER=y ++# CONFIG_VMLINUX_MAP is not set ++# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set ++# end of Compile-time checks and compiler options ++ ++# ++# Generic Kernel Debugging Instruments ++# ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 ++CONFIG_MAGIC_SYSRQ_SERIAL=y ++CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" ++# CONFIG_DEBUG_FS is not set ++CONFIG_HAVE_ARCH_KGDB=y ++# CONFIG_KGDB is not set ++CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y ++# CONFIG_UBSAN is not set ++CONFIG_HAVE_ARCH_KCSAN=y ++CONFIG_HAVE_KCSAN_COMPILER=y ++# CONFIG_KCSAN is not set ++# end of Generic Kernel Debugging Instruments ++ ++# ++# Networking Debugging ++# ++# CONFIG_NET_DEV_REFCNT_TRACKER is not set ++# CONFIG_NET_NS_REFCNT_TRACKER is not set ++# CONFIG_DEBUG_NET is not set ++# end of Networking Debugging ++ ++# ++# Memory Debugging ++# ++# CONFIG_PAGE_EXTENSION is not set ++# CONFIG_DEBUG_PAGEALLOC is not set ++CONFIG_SLUB_DEBUG=y ++# CONFIG_SLUB_DEBUG_ON is not set ++# CONFIG_PAGE_OWNER is not set ++# CONFIG_PAGE_POISONING is not set ++# CONFIG_DEBUG_RODATA_TEST is not set ++CONFIG_ARCH_HAS_DEBUG_WX=y ++# CONFIG_DEBUG_WX is not set ++CONFIG_GENERIC_PTDUMP=y ++CONFIG_HAVE_DEBUG_KMEMLEAK=y ++# CONFIG_DEBUG_KMEMLEAK is not set ++# CONFIG_PER_VMA_LOCK_STATS is not set ++# CONFIG_DEBUG_OBJECTS is not set ++# CONFIG_DEBUG_STACK_USAGE is not set ++# CONFIG_SCHED_STACK_END_CHECK is not set ++CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y ++# CONFIG_DEBUG_VM is not set ++# CONFIG_DEBUG_VM_PGTABLE is not set ++CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y ++# CONFIG_DEBUG_VIRTUAL is not set ++# CONFIG_DEBUG_MEMORY_INIT is not set ++# CONFIG_DEBUG_PER_CPU_MAPS is not set ++CONFIG_HAVE_ARCH_KASAN=y ++CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y ++CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y ++CONFIG_HAVE_ARCH_KASAN_VMALLOC=y ++CONFIG_CC_HAS_KASAN_GENERIC=y ++CONFIG_CC_HAS_KASAN_SW_TAGS=y ++CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y ++# CONFIG_KASAN is not set ++CONFIG_HAVE_ARCH_KFENCE=y ++# CONFIG_KFENCE is not set ++# end of Memory Debugging ++ ++# CONFIG_DEBUG_SHIRQ is not set ++ ++# ++# Debug Oops, Lockups and Hangs ++# ++CONFIG_PANIC_ON_OOPS=y ++CONFIG_PANIC_ON_OOPS_VALUE=1 ++CONFIG_PANIC_TIMEOUT=0 ++# CONFIG_SOFTLOCKUP_DETECTOR is not set ++CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y ++# CONFIG_HARDLOCKUP_DETECTOR is not set ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 ++# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set ++# CONFIG_WQ_WATCHDOG is not set ++# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set ++# CONFIG_TEST_LOCKUP is not set ++# end of Debug Oops, Lockups and Hangs ++ ++# ++# Scheduler Debugging ++# ++CONFIG_SCHED_INFO=y ++CONFIG_SCHEDSTATS=y ++# end of Scheduler Debugging ++ ++# CONFIG_DEBUG_TIMEKEEPING is not set ++ ++# ++# Lock Debugging (spinlocks, mutexes, etc...) ++# ++CONFIG_LOCK_DEBUGGING_SUPPORT=y ++# CONFIG_PROVE_LOCKING is not set ++# CONFIG_LOCK_STAT is not set ++# CONFIG_DEBUG_RT_MUTEXES is not set ++# CONFIG_DEBUG_SPINLOCK is not set ++# CONFIG_DEBUG_MUTEXES is not set ++# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set ++# CONFIG_DEBUG_RWSEMS is not set ++# CONFIG_DEBUG_LOCK_ALLOC is not set ++# CONFIG_DEBUG_ATOMIC_SLEEP is not set ++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set ++# CONFIG_LOCK_TORTURE_TEST is not set ++# CONFIG_WW_MUTEX_SELFTEST is not set ++# CONFIG_SCF_TORTURE_TEST is not set ++# CONFIG_CSD_LOCK_WAIT_DEBUG is not set ++# end of Lock Debugging (spinlocks, mutexes, etc...) ++ ++# CONFIG_DEBUG_IRQFLAGS is not set ++CONFIG_STACKTRACE=y ++# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set ++# CONFIG_DEBUG_KOBJECT is not set ++ ++# ++# Debug kernel data structures ++# ++# CONFIG_DEBUG_LIST is not set ++# CONFIG_DEBUG_PLIST is not set ++# CONFIG_DEBUG_SG is not set ++# CONFIG_DEBUG_NOTIFIERS is not set ++# CONFIG_DEBUG_MAPLE_TREE is not set ++# end of Debug kernel data structures ++ ++# ++# RCU Debugging ++# ++# CONFIG_RCU_SCALE_TEST is not set ++# CONFIG_RCU_TORTURE_TEST is not set ++# CONFIG_RCU_REF_SCALE_TEST is not set ++CONFIG_RCU_CPU_STALL_TIMEOUT=60 ++CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 ++# CONFIG_RCU_CPU_STALL_CPUTIME is not set ++CONFIG_RCU_TRACE=y ++# CONFIG_RCU_EQS_DEBUG is not set ++# end of RCU Debugging ++ ++# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set ++# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set ++# CONFIG_LATENCYTOP is not set ++CONFIG_HAVE_FUNCTION_TRACER=y ++CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y ++CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y ++CONFIG_HAVE_DYNAMIC_FTRACE=y ++CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y ++CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y ++CONFIG_HAVE_SYSCALL_TRACEPOINTS=y ++CONFIG_HAVE_C_RECORDMCOUNT=y ++CONFIG_TRACE_CLOCK=y ++CONFIG_TRACING_SUPPORT=y ++# CONFIG_FTRACE is not set ++# CONFIG_SAMPLES is not set ++CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y ++CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y ++# CONFIG_STRICT_DEVMEM is not set ++ ++# ++# arm64 Debugging ++# ++# CONFIG_PID_IN_CONTEXTIDR is not set ++# CONFIG_ARM64_RELOC_TEST is not set ++# CONFIG_CORESIGHT is not set ++# end of arm64 Debugging ++ ++# ++# Kernel Testing and Coverage ++# ++# CONFIG_KUNIT is not set ++# CONFIG_NOTIFIER_ERROR_INJECTION is not set ++# CONFIG_FAULT_INJECTION is not set ++CONFIG_ARCH_HAS_KCOV=y ++CONFIG_CC_HAS_SANCOV_TRACE_PC=y ++# CONFIG_KCOV is not set ++CONFIG_RUNTIME_TESTING_MENU=y ++# CONFIG_TEST_DHRY is not set ++# CONFIG_TEST_MIN_HEAP is not set ++# CONFIG_TEST_DIV64 is not set ++# CONFIG_BACKTRACE_SELF_TEST is not set ++# CONFIG_TEST_REF_TRACKER is not set ++# CONFIG_RBTREE_TEST is not set ++# CONFIG_REED_SOLOMON_TEST is not set ++# CONFIG_INTERVAL_TREE_TEST is not set ++# CONFIG_PERCPU_TEST is not set ++# CONFIG_ATOMIC64_SELFTEST is not set ++# CONFIG_TEST_HEXDUMP is not set ++# CONFIG_STRING_SELFTEST is not set ++# CONFIG_TEST_STRING_HELPERS is not set ++# CONFIG_TEST_KSTRTOX is not set ++# CONFIG_TEST_PRINTF is not set ++# CONFIG_TEST_SCANF is not set ++# CONFIG_TEST_BITMAP is not set ++# CONFIG_TEST_UUID is not set ++# CONFIG_TEST_XARRAY is not set ++# CONFIG_TEST_MAPLE_TREE is not set ++# CONFIG_TEST_RHASHTABLE is not set ++# CONFIG_TEST_IDA is not set ++# CONFIG_TEST_LKM is not set ++# CONFIG_TEST_BITOPS is not set ++# CONFIG_TEST_VMALLOC is not set ++# CONFIG_TEST_USER_COPY is not set ++# CONFIG_TEST_BPF is not set ++# CONFIG_TEST_BLACKHOLE_DEV is not set ++# CONFIG_FIND_BIT_BENCHMARK is not set ++# CONFIG_TEST_FIRMWARE is not set ++# CONFIG_TEST_SYSCTL is not set ++# CONFIG_TEST_UDELAY is not set ++# CONFIG_TEST_STATIC_KEYS is not set ++# CONFIG_TEST_KMOD is not set ++# CONFIG_TEST_MEMCAT_P is not set ++# CONFIG_TEST_MEMINIT is not set ++# CONFIG_TEST_FREE_PAGES is not set ++CONFIG_ARCH_USE_MEMTEST=y ++# CONFIG_MEMTEST is not set ++# end of Kernel Testing and Coverage ++ ++# ++# Rust hacking ++# ++# end of Rust hacking ++# end of Kernel hacking +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c +index bfb5065fb..2e5d1e238 100644 +--- a/arch/arm64/mm/fault.c ++++ b/arch/arm64/mm/fault.c +@@ -940,7 +940,7 @@ NOKPROBE_SYMBOL(do_debug_exception); + struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, + unsigned long vaddr) + { +- gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO | __GFP_CMA; ++ gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO; + + /* + * If the page is mapped with PROT_MTE, initialise the tags at the +diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c +index 013eead9b..9d9233b2b 100644 +--- a/arch/arm64/mm/flush.c ++++ b/arch/arm64/mm/flush.c +@@ -84,6 +84,7 @@ EXPORT_SYMBOL(flush_dcache_page); + * Additional functions defined in assembly. + */ + EXPORT_SYMBOL(caches_clean_inval_pou); ++EXPORT_SYMBOL(dcache_clean_inval_poc); + + #ifdef CONFIG_ARCH_HAS_PMEM_API + void arch_wb_cache_pmem(void *addr, size_t size) +diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c +index 7c5156e7d..5074bd1d3 100644 +--- a/arch/arm64/net/bpf_jit_comp.c ++++ b/arch/arm64/net/bpf_jit_comp.c +@@ -1952,7 +1952,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, + emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx); + + if (flags & BPF_TRAMP_F_CALL_ORIG) { +- emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); ++ emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); + emit_call((const u64)__bpf_tramp_enter, ctx); + } + +@@ -1996,7 +1996,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + im->ip_epilogue = ctx->image + ctx->idx; +- emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); ++ emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); + emit_call((const u64)__bpf_tramp_exit, ctx); + } + +diff --git a/crypto/asymmetric_keys/pkcs7_parser.h b/crypto/asymmetric_keys/pkcs7_parser.h +index e7c7736e5..e17f7ce4f 100644 +--- a/crypto/asymmetric_keys/pkcs7_parser.h ++++ b/crypto/asymmetric_keys/pkcs7_parser.h +@@ -37,11 +37,6 @@ struct pkcs7_signed_info { + #define sinfo_has_ms_statement_type 5 + time64_t signing_time; + +-#ifdef CONFIG_SECURITY_CODE_SIGN +- const char *ownerid; +- unsigned ownerid_len; +-#endif /* CONFIG_SECURITY_CODE_SIGN */ +- + /* Message signature. + * + * This contains the generated digest of _either_ the Content Data or +diff --git a/drivers/Kconfig b/drivers/Kconfig +index 4600b6ba5..6063f335f 100644 +--- a/drivers/Kconfig ++++ b/drivers/Kconfig +@@ -11,8 +11,6 @@ source "drivers/pcmcia/Kconfig" + source "drivers/rapidio/Kconfig" + + +-source "drivers/hyperhold/Kconfig" +- + source "drivers/base/Kconfig" + + source "drivers/bus/Kconfig" +@@ -239,14 +237,14 @@ source "drivers/counter/Kconfig" + + source "drivers/most/Kconfig" + ++source "drivers/edmac/Kconfig" ++ + source "drivers/peci/Kconfig" + + source "drivers/hte/Kconfig" + + source "drivers/cdx/Kconfig" + +-source "drivers/accesstokenid/Kconfig" +- +-source "drivers/hck/Kconfig" ++source "drivers/vendor/Kconfig" + + endmenu +diff --git a/drivers/Makefile b/drivers/Makefile +index 34963a7f5..ef5e86ac1 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -75,9 +75,6 @@ obj-$(CONFIG_CONNECTOR) += connector/ + obj-$(CONFIG_FB_I810) += video/fbdev/i810/ + obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ + +-# Hyperhold driver +-obj-$(CONFIG_HYPERHOLD) += hyperhold/ +- + obj-$(CONFIG_PARPORT) += parport/ + obj-y += base/ block/ misc/ mfd/ nfc/ + obj-$(CONFIG_LIBNVDIMM) += nvdimm/ +@@ -202,6 +199,5 @@ obj-$(CONFIG_DRM_ACCEL) += accel/ + obj-$(CONFIG_CDX_BUS) += cdx/ + + obj-$(CONFIG_S390) += s390/ +- +-obj-$(CONFIG_ACCESS_TOKENID) += accesstokenid/ +-obj-$(CONFIG_HCK_VENDOR_HOOKS) += hck/ ++obj-$(CONFIG_EDMAC) += edmac/ ++obj-$(CONFIG_ARCH_BSP) += vendor/ +diff --git a/drivers/accesstokenid/Kconfig b/drivers/accesstokenid/Kconfig +deleted file mode 100644 +index 30d2957a1..000000000 +--- a/drivers/accesstokenid/Kconfig ++++ /dev/null +@@ -1,5 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-config ACCESS_TOKENID +- bool "Access task's token" +- default n +- +diff --git a/drivers/accesstokenid/Makefile b/drivers/accesstokenid/Makefile +deleted file mode 100644 +index 738a550f8..000000000 +--- a/drivers/accesstokenid/Makefile ++++ /dev/null +@@ -1,2 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0-only +-obj-$(CONFIG_ACCESS_TOKENID) += access_tokenid.o +diff --git a/drivers/accesstokenid/access_tokenid.c b/drivers/accesstokenid/access_tokenid.c +deleted file mode 100644 +index 33a61ef16..000000000 +--- a/drivers/accesstokenid/access_tokenid.c ++++ /dev/null +@@ -1,397 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * access_tokenid.c +- * +- * Copyright (C) 2022-2023 Huawei Technologies Co., Ltd. All rights reserved. +- * +- */ +- +-#define pr_fmt(fmt) "access_token_id: " fmt +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include "access_tokenid.h" +- +-DEFINE_RWLOCK(token_rwlock); +-#define ACCESS_TOKEN_UID KUIDT_INIT(3020) +-#define MAX_NODE_NUM 500 +-#define UINT32_T_BITS 32 +- +-static struct kmem_cache *g_cache = NULL; +-static struct token_perm_node *g_token_perm_root = NULL; +-static size_t g_total_node_num = 0; +- +-int access_tokenid_get_tokenid(struct file *file, void __user *uarg) +-{ +- return copy_to_user(uarg, ¤t->token, +- sizeof(current->token)) ? -EFAULT : 0; +-} +- +-static bool check_permission_for_set_tokenid(struct file *file, unsigned long long tokenid) +-{ +- kuid_t uid = current_uid(); +- struct inode *inode = file->f_inode; +- access_tokenid_inner *tokenid_inner = (access_tokenid_inner *)&tokenid; +- +- if (inode == NULL) { +- pr_err("%s: file inode is null\n", __func__); +- return false; +- } +- +- if (uid_eq(uid, GLOBAL_ROOT_UID) || +- uid_eq(uid, inode->i_uid)) { +- return true; +- } else if (uid_eq(uid, NWEBSPAWN_UID) && (tokenid_inner->render_flag == 1)) { +- return true; +- } +- +- return false; +-} +- +-int access_tokenid_set_tokenid(struct file *file, void __user *uarg) +-{ +- unsigned long long tmp = 0; +- +- if (copy_from_user(&tmp, uarg, sizeof(tmp))) +- return -EFAULT; +- +- if (!check_permission_for_set_tokenid(file, tmp)) +- return -EPERM; +- +- current->token = tmp; +- return 0; +-} +- +-static bool check_permission_for_ftokenid(struct file *file) +-{ +- int i; +- struct group_info *group_info; +- kuid_t uid = current_uid(); +- struct inode *inode = file->f_inode; +- +- if (inode == NULL) { +- pr_err("%s: file inode is null\n", __func__); +- return false; +- } +- +- if (uid_eq(uid, GLOBAL_ROOT_UID)) +- return true; +- +- group_info = get_current_groups(); +- for (i = 0; i < group_info->ngroups; i++) { +- kgid_t gid = group_info->gid[i]; +- +- if (gid_eq(gid, inode->i_gid)) { +- put_group_info(group_info); +- return true; +- } +- } +- +- put_group_info(group_info); +- return false; +-} +- +-int access_tokenid_get_ftokenid(struct file *file, void __user *uarg) +-{ +- if (!check_permission_for_ftokenid(file)) +- return -EPERM; +- +- return copy_to_user(uarg, ¤t->ftoken, +- sizeof(current->ftoken)) ? -EFAULT : 0; +-} +- +-int access_tokenid_set_ftokenid(struct file *file, void __user *uarg) +-{ +- unsigned long long tmp = 0; +- +- if (!check_permission_for_ftokenid(file)) +- return -EPERM; +- +- if (copy_from_user(&tmp, uarg, sizeof(tmp))) +- return -EFAULT; +- +- current->ftoken = tmp; +- return 0; +-} +- +-static bool check_permission_for_set_token_permission(void) +-{ +- kuid_t uid = current_uid(); +- return uid_eq(uid, ACCESS_TOKEN_UID); +-} +- +-static void add_node_to_left_tree_tail(struct token_perm_node *root_node, struct token_perm_node *node) +-{ +- if ((root_node == NULL) || (node == NULL)) +- return; +- +- struct token_perm_node *current_node = root_node; +- while (true) { +- if (current_node->left == NULL) { +- current_node->left = node; +- break; +- } +- current_node = current_node->left; +- } +-} +- +-static void find_node_by_token(struct token_perm_node *root_node, uint32_t token, +- struct token_perm_node **target_node, struct token_perm_node **parent_node) +-{ +- *target_node = NULL; +- *parent_node = NULL; +- struct token_perm_node *current_node = root_node; +- while (current_node != NULL) { +- if (current_node->perm_data.token == token) { +- *target_node = current_node; +- break; +- } +- *parent_node = current_node; +- if (current_node->perm_data.token > token) { +- current_node = current_node->left; +- } else { +- current_node = current_node->right; +- } +- } +-} +- +-static int add_node_to_tree(struct token_perm_node *root_node, struct token_perm_node *node) +-{ +- struct token_perm_node *target_node = NULL; +- struct token_perm_node *parent_node = NULL; +- find_node_by_token(root_node, node->perm_data.token, &target_node, &parent_node); +- if (target_node != NULL) { +- target_node->perm_data = node->perm_data; +- return 0; +- } +- if (g_total_node_num >= MAX_NODE_NUM) { +- pr_err("%s: the number of token nodes is exceeded.\n", __func__); +- return -EDQUOT; +- } +- if (parent_node == NULL) { +- g_token_perm_root = node; +- } else if (parent_node->perm_data.token > node->perm_data.token) { +- parent_node->left = node; +- } else { +- parent_node->right = node; +- } +- g_total_node_num++; +- return 1; +-} +- +-static struct token_perm_node *remove_node_by_token(struct token_perm_node *root_node, uint32_t token) +-{ +- struct token_perm_node *target_node = NULL; +- struct token_perm_node *parent_node = NULL; +- find_node_by_token(root_node, token, &target_node, &parent_node); +- if (target_node == NULL) { +- pr_err("%s: target token to be removed not found.\n", __func__); +- return NULL; +- } +- +- struct token_perm_node **new_node_addr = NULL; +- if (parent_node == NULL) { +- new_node_addr = &root_node; +- } else if (parent_node->perm_data.token > token) { +- new_node_addr = &(parent_node->left); +- } else { +- new_node_addr = &(parent_node->right); +- } +- if (target_node->right != NULL) { +- *new_node_addr = target_node->right; +- add_node_to_left_tree_tail(target_node->right, target_node->left); +- } else { +- *new_node_addr = target_node->left; +- } +- g_total_node_num--; +- return target_node; +-} +- +-int access_tokenid_add_permission(struct file *file, void __user *uarg) +-{ +- if (!check_permission_for_set_token_permission()) +- return -EPERM; +- +- struct token_perm_node *node = kmem_cache_zalloc(g_cache, GFP_KERNEL); +- if (node == NULL) +- return -ENOMEM; +- if (copy_from_user(&(node->perm_data), uarg, sizeof(ioctl_add_perm_data))) { +- kmem_cache_free(g_cache, node); +- return -EFAULT; +- } +- +- write_lock(&token_rwlock); +- int ret = add_node_to_tree(g_token_perm_root, node); +- write_unlock(&token_rwlock); +- if (ret <= 0) { +- kmem_cache_free(g_cache, node); +- return ret; +- } +- return 0; +-} +- +-int access_tokenid_remove_permission(struct file *file, void __user *uarg) +-{ +- if (!check_permission_for_set_token_permission()) +- return -EPERM; +- +- uint32_t token = 0; +- if (copy_from_user(&token, uarg, sizeof(token))) +- return -EFAULT; +- +- write_lock(&token_rwlock); +- struct token_perm_node *target_node = remove_node_by_token(g_token_perm_root, token); +- write_unlock(&token_rwlock); +- +- if (target_node != NULL) +- kmem_cache_free(g_cache, target_node); +- +- return 0; +-} +- +-int access_tokenid_set_permission(struct file *file, void __user *uarg) +-{ +- if (!check_permission_for_set_token_permission()) +- return -EPERM; +- +- ioctl_set_get_perm_data set_perm_data; +- if (copy_from_user(&set_perm_data, uarg, sizeof(set_perm_data))) +- return -EFAULT; +- +- uint32_t idx = set_perm_data.op_code / UINT32_T_BITS; +- if (idx >= MAX_PERM_GROUP_NUM) { +- pr_err("%s: invalid op_code.\n", __func__); +- return -EINVAL; +- } +- +- struct token_perm_node *target_node = NULL; +- struct token_perm_node *parent_node = NULL; +- write_lock(&token_rwlock); +- find_node_by_token(g_token_perm_root, set_perm_data.token, &target_node, &parent_node); +- if (target_node == NULL) { +- write_unlock(&token_rwlock); +- pr_err("%s: token not found.\n", __func__); +- return -ENODATA; +- } +- uint32_t bit_idx = set_perm_data.op_code % UINT32_T_BITS; +- if (set_perm_data.is_granted) { +- target_node->perm_data.perm[idx] |= (uint32_t)0x01 << bit_idx; +- } else { +- target_node->perm_data.perm[idx] &= ~((uint32_t)0x01 << bit_idx); +- } +- write_unlock(&token_rwlock); +- return 0; +-} +- +-int access_tokenid_get_permission(struct file *file, void __user *uarg) +-{ +- ioctl_set_get_perm_data get_perm_data; +- if (copy_from_user(&get_perm_data, uarg, sizeof(get_perm_data))) +- return -EFAULT; +- +- uint32_t idx = get_perm_data.op_code / UINT32_T_BITS; +- if (idx >= MAX_PERM_GROUP_NUM) { +- pr_err("%s: invalid op_code.\n", __func__); +- return -EINVAL; +- } +- +- struct token_perm_node *target_node = NULL; +- struct token_perm_node *parent_node = NULL; +- read_lock(&token_rwlock); +- find_node_by_token(g_token_perm_root, get_perm_data.token, &target_node, &parent_node); +- read_unlock(&token_rwlock); +- if (target_node == NULL) +- return -ENODATA; +- +- uint32_t bit_idx = get_perm_data.op_code % UINT32_T_BITS; +- return (target_node->perm_data.perm[idx] & ((uint32_t)0x01 << bit_idx)) >> bit_idx; +-} +- +-typedef int (*access_token_id_func)(struct file *file, void __user *arg); +- +-static access_token_id_func g_func_array[ACCESS_TOKENID_MAX_NR] = { +- NULL, /* reserved */ +- access_tokenid_get_tokenid, +- access_tokenid_set_tokenid, +- access_tokenid_get_ftokenid, +- access_tokenid_set_ftokenid, +- access_tokenid_add_permission, +- access_tokenid_remove_permission, +- access_tokenid_get_permission, +- access_tokenid_set_permission, +-}; +- +-static long access_tokenid_ioctl(struct file *file, unsigned int cmd, +- unsigned long arg) +-{ +- void __user *uarg = (void __user *)arg; +- unsigned int func_cmd = _IOC_NR(cmd); +- +- if (uarg == NULL) { +- pr_err("%s: invalid user uarg\n", __func__); +- return -EINVAL; +- } +- +- if (_IOC_TYPE(cmd) != ACCESS_TOKEN_ID_IOCTL_BASE) { +- pr_err("%s: access tokenid magic fail, TYPE=%d\n", +- __func__, _IOC_TYPE(cmd)); +- return -EINVAL; +- } +- +- if (func_cmd >= ACCESS_TOKENID_MAX_NR) { +- pr_err("%s: access tokenid cmd error, cmd:%d\n", +- __func__, func_cmd); +- return -EINVAL; +- } +- +- if (g_func_array[func_cmd]) +- return (*g_func_array[func_cmd])(file, uarg); +- +- return -EINVAL; +-} +- +-static const struct file_operations access_tokenid_fops = { +- .owner = THIS_MODULE, +- .unlocked_ioctl = access_tokenid_ioctl, +- .compat_ioctl = access_tokenid_ioctl, +-}; +- +-static struct miscdevice access_tokenid_device = { +- .minor = MISC_DYNAMIC_MINOR, +- .name = "access_token_id", +- .fops = &access_tokenid_fops, +-}; +- +-static int access_tokenid_init_module(void) +-{ +- int err; +- +- err = misc_register(&access_tokenid_device); +- if (err < 0) { +- pr_err("access_tokenid register failed\n"); +- return err; +- } +- +- g_cache = kmem_cache_create("access_token_node", sizeof(struct token_perm_node), 0, SLAB_HWCACHE_ALIGN, NULL); +- if (g_cache == NULL) { +- pr_err("access_tokenid kmem_cache create failed\n"); +- return -ENOMEM; +- } +- pr_info("access_tokenid init success\n"); +- return 0; +-} +- +-static void access_tokenid_exit_module(void) +-{ +- kmem_cache_destroy(g_cache); +- misc_deregister(&access_tokenid_device); +-} +- +-/* module entry points */ +-module_init(access_tokenid_init_module); +-module_exit(access_tokenid_exit_module); +diff --git a/drivers/accesstokenid/access_tokenid.h b/drivers/accesstokenid/access_tokenid.h +deleted file mode 100644 +index 7eb3119ef..000000000 +--- a/drivers/accesstokenid/access_tokenid.h ++++ /dev/null +@@ -1,73 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * access_tokenid.h +- * +- * Copyright (C) 2022-2023 Huawei Technologies Co., Ltd. All rights reserved. +- * +- */ +- +-#ifndef _ACCESS_TOKEN_ID_H +-#define _ACCESS_TOKEN_ID_H +- +-#include +-#include +- +-#define ACCESS_TOKEN_ID_IOCTL_BASE 'A' +-#define MAX_PERM_GROUP_NUM 64 +- +-enum { +- GET_TOKEN_ID = 1, +- SET_TOKEN_ID, +- GET_FTOKEN_ID, +- SET_FTOKEN_ID, +- ADD_PERMISSIONS, +- REMOVE_PERMISSIONS, +- GET_PERMISSION, +- SET_PERMISSION, +- ACCESS_TOKENID_MAX_NR +-}; +- +-typedef struct { +- unsigned int token_uniqueid : 20; +- unsigned int res : 5; +- unsigned int render_flag : 1; +- unsigned int dlp_flag : 1; +- unsigned int type : 2; +- unsigned int version : 3; +-} access_tokenid_inner; +- +-typedef struct { +- uint32_t token; +- uint32_t op_code; +- bool is_granted; +-} ioctl_set_get_perm_data; +- +-typedef struct { +- uint32_t token; +- uint32_t perm[MAX_PERM_GROUP_NUM]; +-} ioctl_add_perm_data; +- +-struct token_perm_node { +- ioctl_add_perm_data perm_data; +- struct token_perm_node *left; +- struct token_perm_node *right; +-}; +- +-#define ACCESS_TOKENID_GET_TOKENID \ +- _IOR(ACCESS_TOKEN_ID_IOCTL_BASE, GET_TOKEN_ID, unsigned long long) +-#define ACCESS_TOKENID_SET_TOKENID \ +- _IOW(ACCESS_TOKEN_ID_IOCTL_BASE, SET_TOKEN_ID, unsigned long long) +-#define ACCESS_TOKENID_GET_FTOKENID \ +- _IOR(ACCESS_TOKEN_ID_IOCTL_BASE, GET_FTOKEN_ID, unsigned long long) +-#define ACCESS_TOKENID_SET_FTOKENID \ +- _IOW(ACCESS_TOKEN_ID_IOCTL_BASE, SET_FTOKEN_ID, unsigned long long) +-#define ACCESS_TOKENID_ADD_PERMISSIONS \ +- _IOW(ACCESS_TOKEN_ID_IOCTL_BASE, ADD_PERMISSIONS, ioctl_add_perm_data) +-#define ACCESS_TOKENID_REMOVE_PERMISSIONS \ +- _IOW(ACCESS_TOKEN_ID_IOCTL_BASE, REMOVE_PERMISSIONS, uint32_t) +-#define ACCESS_TOKENID_GET_PERMISSION \ +- _IOW(ACCESS_TOKEN_ID_IOCTL_BASE, GET_PERMISSION, ioctl_set_get_perm_data) +-#define ACCESS_TOKENID_SET_PERMISSION \ +- _IOW(ACCESS_TOKEN_ID_IOCTL_BASE, SET_PERMISSION, ioctl_set_get_perm_data) +- +-#endif /* _ACCESS_TOKEN_ID_H */ +diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig +index 19fd0a23b..07aa8ae0a 100644 +--- a/drivers/android/Kconfig ++++ b/drivers/android/Kconfig +@@ -1,13 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 + menu "Android" + +-config ANDROID +- bool "Android Drivers" +- help +- Enable support for various drivers needed on the Android platform +- +-if ANDROID +- + config ANDROID_BINDER_IPC + bool "Android Binder IPC Driver" + depends on MMU +@@ -54,18 +47,4 @@ config ANDROID_BINDER_IPC_SELFTEST + exhaustively with combinations of various buffer sizes and + alignments. + +-config BINDER_TRANSACTION_PROC_BRIEF +- bool "Brief debug info for binder transaction and proc" +- depends on ANDROID_BINDER_IPC +- default n +- help +- +- Enable binder optimization +- +-config BINDER_SENDER_INFO +- bool "Get binder sender info" +- default y +- +-endif # if ANDROID +- + endmenu +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index 53f27f379..94f10c6eb 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -67,10 +67,6 @@ + #include + #include + #include +-#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF +-#include +-#include +-#endif + + #include + +@@ -96,30 +92,8 @@ static atomic_t binder_last_id; + static int proc_show(struct seq_file *m, void *unused); + DEFINE_SHOW_ATTRIBUTE(proc); + +-#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF +-static int binder_transaction_proc_show(struct seq_file *m, void *unused); +-DEFINE_PROC_SHOW_ATTRIBUTE(binder_transaction_proc); +-#endif +- + #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) + +-#ifdef CONFIG_ACCESS_TOKENID +-#define ENABLE_ACCESS_TOKENID 1 +-#else +-#define ENABLE_ACCESS_TOKENID 0 +-#endif /* CONFIG_ACCESS_TOKENID */ +- +-#ifdef CONFIG_BINDER_SENDER_INFO +-#define ENABLE_BINDER_SENDER_INFO 1 +-#else +-#define ENABLE_BINDER_SENDER_INFO 0 +-#endif /* CONFIG_BINDER_SENDER_INFO */ +- +-#define ACCESS_TOKENID_FEATURE_VALUE (ENABLE_ACCESS_TOKENID << 0) +-#define BINDER_SENDER_INFO_FEATURE_VALUE (ENABLE_BINDER_SENDER_INFO << 2) +- +-#define BINDER_CURRENT_FEATURE_SET (ACCESS_TOKENID_FEATURE_VALUE | BINDER_SENDER_INFO_FEATURE_VALUE) +- + enum { + BINDER_DEBUG_USER_ERROR = 1U << 0, + BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, +@@ -573,16 +547,6 @@ static void binder_free_thread(struct binder_thread *thread); + static void binder_free_proc(struct binder_proc *proc); + static void binder_inc_node_tmpref_ilocked(struct binder_node *node); + +-#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF +-static inline u64 binder_clock(void) +-{ +-#ifdef CONFIG_TRACE_CLOCK +- return trace_clock_local(); +-#endif +- return 0; +-} +-#endif +- + static bool binder_has_work_ilocked(struct binder_thread *thread, + bool do_proc_work) + { +@@ -3228,25 +3192,13 @@ static void binder_transaction(struct binder_proc *proc, + (u64)tr->data_size, (u64)tr->offsets_size, + (u64)extra_buffers_size); + +- if (!reply && !(tr->flags & TF_ONE_WAY)) { ++ if (!reply && !(tr->flags & TF_ONE_WAY)) + t->from = thread; +-#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF +- t->from_pid = -1; +- t->from_tid = -1; +-#endif +- } else { ++ else + t->from = NULL; +-#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF +- t->from_pid = thread->proc->pid; +- t->from_tid = thread->pid; +-#endif +- } +- ++ t->from_pid = proc->pid; ++ t->from_tid = thread->pid; + t->sender_euid = task_euid(proc->tsk); +-#ifdef CONFIG_ACCESS_TOKENID +- t->sender_tokenid = current->token; +- t->first_tokenid = current->ftoken; +-#endif /* CONFIG_ACCESS_TOKENID */ + t->to_proc = target_proc; + t->to_thread = target_thread; + t->code = tr->code; +@@ -3661,9 +3613,6 @@ static void binder_transaction(struct binder_proc *proc, + goto err_dead_proc_or_thread; + } + BUG_ON(t->buffer->async_transaction != 0); +-#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF +- t->timestamp = in_reply_to->timestamp; +-#endif + binder_pop_transaction_ilocked(target_thread, in_reply_to); + binder_enqueue_thread_work_ilocked(target_thread, &t->work); + target_proc->outstanding_txns++; +@@ -3684,9 +3633,6 @@ static void binder_transaction(struct binder_proc *proc, + t->need_reply = 1; + t->from_parent = thread->transaction_stack; + thread->transaction_stack = t; +-#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF +- t->timestamp = binder_clock(); +-#endif + binder_inner_proc_unlock(proc); + return_error = binder_proc_transaction(t, + target_proc, target_thread); +@@ -3699,9 +3645,6 @@ static void binder_transaction(struct binder_proc *proc, + } else { + BUG_ON(target_node == NULL); + BUG_ON(t->buffer->async_transaction != 1); +-#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF +- t->timestamp = binder_clock(); +-#endif + return_error = binder_proc_transaction(t, target_proc, NULL); + /* + * Let the caller know when async transaction reaches a frozen +@@ -4732,18 +4675,8 @@ static int binder_thread_read(struct binder_proc *proc, + trd->sender_pid = + task_tgid_nr_ns(sender, + task_active_pid_ns(current)); +-#ifdef CONFIG_BINDER_SENDER_INFO +- binder_inner_proc_lock(thread->proc); +- thread->sender_pid_nr = task_tgid_nr(sender); +- binder_inner_proc_unlock(thread->proc); +-#endif + } else { + trd->sender_pid = 0; +-#ifdef CONFIG_BINDER_SENDER_INFO +- binder_inner_proc_lock(thread->proc); +- thread->sender_pid_nr = 0; +- binder_inner_proc_unlock(thread->proc); +-#endif + } + + ret = binder_apply_fd_fixups(proc, t); +@@ -4824,12 +4757,6 @@ static int binder_thread_read(struct binder_proc *proc, + if (t_from) + binder_thread_dec_tmpref(t_from); + t->buffer->allow_user_free = 1; +-#ifdef CONFIG_ACCESS_TOKENID +- binder_inner_proc_lock(thread->proc); +- thread->tokens.sender_tokenid = t->sender_tokenid; +- thread->tokens.first_tokenid = t->first_tokenid; +- binder_inner_proc_unlock(thread->proc); +-#endif /* CONFIG_ACCESS_TOKENID */ + if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { + binder_inner_proc_lock(thread->proc); + t->to_parent = thread->transaction_stack; +@@ -5067,10 +4994,6 @@ static int binder_thread_release(struct binder_proc *proc, + t = t->to_parent; + } else if (t->from == thread) { + t->from = NULL; +-#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF +- t->from_pid = -1; +- t->from_tid = -1; +-#endif + t = t->from_parent; + } else + BUG(); +@@ -5134,21 +5057,14 @@ static __poll_t binder_poll(struct file *filp, + return 0; + } + +-static int binder_ioctl_write_read(struct file *filp, +- unsigned int cmd, unsigned long arg, ++static int binder_ioctl_write_read(struct file *filp, unsigned long arg, + struct binder_thread *thread) + { + int ret = 0; + struct binder_proc *proc = filp->private_data; +- unsigned int size = _IOC_SIZE(cmd); + void __user *ubuf = (void __user *)arg; + struct binder_write_read bwr; + +- if (size != sizeof(struct binder_write_read)) { +- ret = -EINVAL; +- goto out; +- } +- + if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { + ret = -EFAULT; + goto out; +@@ -5425,7 +5341,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + int ret; + struct binder_proc *proc = filp->private_data; + struct binder_thread *thread; +- unsigned int size = _IOC_SIZE(cmd); + void __user *ubuf = (void __user *)arg; + + /*pr_info("binder_ioctl: %d:%d %x %lx\n", +@@ -5447,7 +5362,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + + switch (cmd) { + case BINDER_WRITE_READ: +- ret = binder_ioctl_write_read(filp, cmd, arg, thread); ++ ret = binder_ioctl_write_read(filp, arg, thread); + if (ret) + goto err; + break; +@@ -5490,10 +5405,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + case BINDER_VERSION: { + struct binder_version __user *ver = ubuf; + +- if (size != sizeof(struct binder_version)) { +- ret = -EINVAL; +- goto err; +- } + if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, + &ver->protocol_version)) { + ret = -EINVAL; +@@ -5633,76 +5544,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + if (ret < 0) + goto err; + break; +- case BINDER_FEATURE_SET: { +- struct binder_feature_set __user *features = ubuf; +- +- if (size != sizeof(struct binder_feature_set)) { +- ret = -EINVAL; +- goto err; +- } +- if (put_user(BINDER_CURRENT_FEATURE_SET, &features->feature_set)) { +- ret = -EINVAL; +- goto err; +- } +- break; +- } +-#ifdef CONFIG_ACCESS_TOKENID +- case BINDER_GET_ACCESS_TOKEN: { +- struct access_token __user *tokens = ubuf; +- u64 token, ftoken; +- +- if (size != sizeof(struct access_token)) { +- ret = -EINVAL; +- goto err; +- } +- binder_inner_proc_lock(proc); +- token = thread->tokens.sender_tokenid; +- ftoken = thread->tokens.first_tokenid; +- binder_inner_proc_unlock(proc); +- if (put_user(token, &tokens->sender_tokenid)) { +- ret = -EINVAL; +- goto err; +- } +- if (put_user(ftoken, &tokens->first_tokenid)) { +- ret = -EINVAL; +- goto err; +- } +- break; +- } +-#endif /* CONFIG_ACCESS_TOKENID */ +- +-#ifdef CONFIG_BINDER_SENDER_INFO +- case BINDER_GET_SENDER_INFO: { +- struct binder_sender_info __user *sender = ubuf; +- u64 token, ftoken, sender_pid_nr; +- if (size != sizeof(struct binder_sender_info)) { +- ret = -EINVAL; +- goto err; +- } +- binder_inner_proc_lock(proc); +-#ifdef CONFIG_ACCESS_TOKENID +- token = thread->tokens.sender_tokenid; +- ftoken = thread->tokens.first_tokenid; +-#endif /*CONFIG_ACCESS_TOKENID*/ +- sender_pid_nr = thread->sender_pid_nr; +- binder_inner_proc_unlock(proc); +-#ifdef CONFIG_ACCESS_TOKENID +- if (put_user(token, &sender->tokens.sender_tokenid)) { +- ret = -EFAULT; +- goto err; +- } +- if (put_user(ftoken, &sender->tokens.first_tokenid)) { +- ret = -EFAULT; +- goto err; +- } +-#endif /*CONFIG_ACCESS_TOKENID*/ +- if (put_user(sender_pid_nr, &sender->sender_pid_nr)) { +- ret = -EFAULT; +- goto err; +- } +- break; +- } +-#endif /* CONFIG_BINDER_SENDER_INFO */ + default: + ret = -EINVAL; + goto err; +@@ -6693,151 +6534,6 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = { + {} /* terminator */ + }; + +-#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF +-static void print_binder_transaction_brief_ilocked( +- struct seq_file *m, +- const char *prefix, struct binder_transaction *t, +- u64 timestamp) +-{ +- struct binder_proc *to_proc = NULL; +- int from_pid = 0; +- int from_tid = 0; +- int to_pid = 0; +- u64 sec; +- u32 nsec; +- +- spin_lock(&t->lock); +- to_proc = t->to_proc; +- from_pid = t->from ? (t->from->proc ? t->from->proc->pid : 0) : t->from_pid; +- from_tid = t->from ? t->from->pid : t->from_tid; +- to_pid = to_proc ? to_proc->pid : 0; +- sec = div_u64_rem((timestamp - t->timestamp), 1000000000, &nsec); +- +- seq_printf(m, +- "%s%d:%d to %d:%d code %x wait:%llu.%u s\n", +- prefix, +- from_pid, from_tid, +- to_pid, t->to_thread ? t->to_thread->pid : 0, +- t->code, +- timestamp > t->timestamp ? sec : 0, +- timestamp > t->timestamp ? nsec : 0); +- spin_unlock(&t->lock); +-} +- +-static void print_binder_work_transaction_nilocked(struct seq_file *m, +- const char *prefix, struct binder_work *w, +- u64 timestamp) +-{ +- struct binder_transaction *t = NULL; +- +- switch (w->type) { +- case BINDER_WORK_TRANSACTION: +- t = container_of(w, struct binder_transaction, work); +- print_binder_transaction_brief_ilocked(m, prefix, t, timestamp); +- break; +- +- default: +- break; +- } +-} +- +-static void print_binder_transaction_brief(struct seq_file *m, +- struct binder_proc *proc, +- u64 timestamp) +-{ +- struct binder_work *w = NULL; +- struct rb_node *n = NULL; +- struct binder_node *last_node = NULL; +- size_t start_pos = m->count; +- size_t header_pos = m->count; +- +- /* sync binder / not one way */ +- binder_inner_proc_lock(proc); +- for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { +- struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); +- struct binder_transaction *t = thread->transaction_stack; +- while (t) { +- if (t->from == thread) { +- print_binder_transaction_brief_ilocked(m, "\t", t, timestamp); +- t = t->from_parent; +- } else if (t->to_thread == thread) { +- t = t->to_parent; +- } else { +- t = NULL; +- } +- } +- } +- +- /* async binder / one way */ +- for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { +- struct binder_node *node = rb_entry(n, struct binder_node, rb_node); +- /* +- * take a temporary reference on the node so it +- * survives and isn't removed from the tree +- * while we print it. +- */ +- binder_inc_node_tmpref_ilocked(node); +- /* Need to drop inner lock to take node lock */ +- binder_inner_proc_unlock(proc); +- if (last_node) +- binder_put_node(last_node); +- binder_node_inner_lock(node); +- list_for_each_entry(w, &node->async_todo, entry) +- print_binder_work_transaction_nilocked(m, "async\t", w, timestamp); +- binder_node_inner_unlock(node); +- last_node = node; +- binder_inner_proc_lock(proc); +- } +- binder_inner_proc_unlock(proc); +- +- if (last_node) +- binder_put_node(last_node); +- +- if (m->count == header_pos) +- m->count = start_pos; +-} +- +-static void print_binder_proc_brief(struct seq_file *m, +- struct binder_proc *proc) +-{ +- struct binder_thread *thread = NULL; +- int ready_threads = 0; +- size_t free_async_space = binder_alloc_get_free_async_space(&proc->alloc); +- +- seq_printf(m, "%d\t", proc->pid); +- seq_printf(m, "%s\t", proc->context->name); +- +- binder_inner_proc_lock(proc); +- list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) +- ready_threads++; +- +- seq_printf(m, "%d\t%d\t%d\t%d" +- "\t%zd\n", proc->requested_threads, +- proc->requested_threads_started, proc->max_threads, +- ready_threads, +- free_async_space); +- binder_inner_proc_unlock(proc); +-} +- +-static int binder_transaction_proc_show(struct seq_file *m, void *unused) +-{ +- struct binder_proc *proc = NULL; +- u64 now = 0; +- +- mutex_lock(&binder_procs_lock); +- now = binder_clock(); +- hlist_for_each_entry(proc, &binder_procs, proc_node) +- print_binder_transaction_brief(m, proc, now); +- +- seq_printf(m, "\npid\tcontext\t\trequest\tstarted\tmax\tready\tfree_async_space\n"); +- hlist_for_each_entry(proc, &binder_procs, proc_node) +- print_binder_proc_brief(m, proc); +- mutex_unlock(&binder_procs_lock); +- +- return 0; +-} +-#endif +- + static int __init init_binder_device(const char *name) + { + int ret; +@@ -6895,16 +6591,6 @@ static int __init binder_init(void) + binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", + binder_debugfs_dir_entry_root); + +- if (binder_debugfs_dir_entry_root) { +-#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF +- proc_create_data("transaction_proc", +- S_IRUGO, +- NULL, +- &binder_transaction_proc_proc_ops, +- NULL); +-#endif +- } +- + if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && + strcmp(binder_devices_param, "") != 0) { + /* +diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h +index df7d0e2eb..5b7c80b99 100644 +--- a/drivers/android/binder_internal.h ++++ b/drivers/android/binder_internal.h +@@ -490,12 +490,6 @@ struct binder_thread { + struct binder_stats stats; + atomic_t tmp_ref; + bool is_dead; +-#ifdef CONFIG_ACCESS_TOKENID +- struct access_token tokens; +-#endif /* CONFIG_ACCESS_TOKENID */ +-#ifdef CONFIG_BINDER_SENDER_INFO +- __u64 sender_pid_nr; +-#endif /* CONFIG_BINDER_SENDER_INFO */ + }; + + /** +@@ -523,9 +517,6 @@ struct binder_transaction { + struct binder_thread *from; + pid_t from_pid; + pid_t from_tid; +-#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF +- u64 timestamp; +-#endif + struct binder_transaction *from_parent; + struct binder_proc *to_proc; + struct binder_thread *to_thread; +@@ -549,10 +540,6 @@ struct binder_transaction { + * during thread teardown + */ + spinlock_t lock; +-#ifdef CONFIG_ACCESS_TOKENID +- u64 sender_tokenid; +- u64 first_tokenid; +-#endif /* CONFIG_ACCESS_TOKENID */ + }; + + /** +diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c +index 774b46cdf..ef427ee78 100644 +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -188,38 +188,9 @@ static const struct attribute_group crash_note_cpu_attr_group = { + }; + #endif + +-#ifdef CONFIG_CPU_ISOLATION_OPT +-static ssize_t isolate_show(struct device *dev, +- struct device_attribute *attr, char *buf) +-{ +- struct cpu *cpu = container_of(dev, struct cpu, dev); +- ssize_t rc; +- int cpuid = cpu->dev.id; +- unsigned int isolated = cpu_isolated(cpuid); +- +- rc = sysfs_emit(buf, "%d\n", isolated); +- +- return rc; +-} +- +-static DEVICE_ATTR_RO(isolate); +- +-static struct attribute *cpu_isolated_attrs[] = { +- &dev_attr_isolate.attr, +- NULL +-}; +- +-static struct attribute_group cpu_isolated_attr_group = { +- .attrs = cpu_isolated_attrs, +-}; +-#endif +- + static const struct attribute_group *common_cpu_attr_groups[] = { + #ifdef CONFIG_KEXEC_CORE + &crash_note_cpu_attr_group, +-#endif +-#ifdef CONFIG_CPU_ISOLATION_OPT +- &cpu_isolated_attr_group, + #endif + NULL + }; +@@ -227,9 +198,6 @@ static const struct attribute_group *common_cpu_attr_groups[] = { + static const struct attribute_group *hotplugable_cpu_attr_groups[] = { + #ifdef CONFIG_KEXEC_CORE + &crash_note_cpu_attr_group, +-#endif +-#ifdef CONFIG_CPU_ISOLATION_OPT +- &cpu_isolated_attr_group, + #endif + NULL + }; +@@ -260,9 +228,6 @@ static struct cpu_attr cpu_attrs[] = { + _CPU_ATTR(online, &__cpu_online_mask), + _CPU_ATTR(possible, &__cpu_possible_mask), + _CPU_ATTR(present, &__cpu_present_mask), +-#ifdef CONFIG_CPU_ISOLATION_OPT +- _CPU_ATTR(core_ctl_isolated, &__cpu_isolated_mask), +-#endif + }; + + /* +@@ -527,9 +492,6 @@ static struct attribute *cpu_root_attrs[] = { + &cpu_attrs[0].attr.attr, + &cpu_attrs[1].attr.attr, + &cpu_attrs[2].attr.attr, +-#ifdef CONFIG_CPU_ISOLATION_OPT +- &cpu_attrs[3].attr.attr, +-#endif + &dev_attr_kernel_max.attr, + &dev_attr_offline.attr, + &dev_attr_isolated.attr, +diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig +index 31b23ce92..af201392e 100644 +--- a/drivers/block/zram/Kconfig ++++ b/drivers/block/zram/Kconfig +@@ -96,5 +96,3 @@ config ZRAM_MULTI_COMP + re-compress pages using a potentially slower but more effective + compression algorithm. Note, that IDLE page recompression + requires ZRAM_TRACK_ENTRY_ACTIME. +- +-source "drivers/block/zram/zram_group/Kconfig" +diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile +index a8947f7fa..de9e45790 100644 +--- a/drivers/block/zram/Makefile ++++ b/drivers/block/zram/Makefile +@@ -1,9 +1,4 @@ + # SPDX-License-Identifier: GPL-2.0-only + zram-y := zcomp.o zram_drv.o + +-zram-$(CONFIG_ZRAM_GROUP) += zram_group/zram_group.o zram_group/zlist.o zram_group/group_writeback.o +- + obj-$(CONFIG_ZRAM) += zram.o +- +-ccflags-$(CONFIG_ZRAM_GROUP) += -I$(srctree)/drivers/block/zram/zram_group/ +-ccflags-$(CONFIG_HYPERHOLD) += -I$(srctree)/drivers/hyperhold/ +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c +index 33011ea3c..44cf0e51d 100644 +--- a/drivers/block/zram/zram_drv.c ++++ b/drivers/block/zram/zram_drv.c +@@ -34,10 +34,6 @@ + #include + #include + +-#ifdef CONFIG_ZRAM_GROUP +-#include +-#endif +- + #include "zram_drv.h" + + static DEFINE_IDR(zram_index_idr); +@@ -61,6 +57,21 @@ static void zram_free_page(struct zram *zram, size_t index); + static int zram_read_page(struct zram *zram, struct page *page, u32 index, + struct bio *parent); + ++static int zram_slot_trylock(struct zram *zram, u32 index) ++{ ++ return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags); ++} ++ ++static void zram_slot_lock(struct zram *zram, u32 index) ++{ ++ bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags); ++} ++ ++static void zram_slot_unlock(struct zram *zram, u32 index) ++{ ++ bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); ++} ++ + static inline bool init_done(struct zram *zram) + { + return zram->disksize; +@@ -71,6 +82,35 @@ static inline struct zram *dev_to_zram(struct device *dev) + return (struct zram *)dev_to_disk(dev)->private_data; + } + ++static unsigned long zram_get_handle(struct zram *zram, u32 index) ++{ ++ return zram->table[index].handle; ++} ++ ++static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) ++{ ++ zram->table[index].handle = handle; ++} ++ ++/* flag operations require table entry bit_spin_lock() being held */ ++static bool zram_test_flag(struct zram *zram, u32 index, ++ enum zram_pageflags flag) ++{ ++ return zram->table[index].flags & BIT(flag); ++} ++ ++static void zram_set_flag(struct zram *zram, u32 index, ++ enum zram_pageflags flag) ++{ ++ zram->table[index].flags |= BIT(flag); ++} ++ ++static void zram_clear_flag(struct zram *zram, u32 index, ++ enum zram_pageflags flag) ++{ ++ zram->table[index].flags &= ~BIT(flag); ++} ++ + static inline void zram_set_element(struct zram *zram, u32 index, + unsigned long element) + { +@@ -82,6 +122,19 @@ static unsigned long zram_get_element(struct zram *zram, u32 index) + return zram->table[index].element; + } + ++static size_t zram_get_obj_size(struct zram *zram, u32 index) ++{ ++ return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1); ++} ++ ++static void zram_set_obj_size(struct zram *zram, ++ u32 index, size_t size) ++{ ++ unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT; ++ ++ zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; ++} ++ + static inline bool zram_allocated(struct zram *zram, u32 index) + { + return zram_get_obj_size(zram, index) || +@@ -563,6 +616,9 @@ static void read_from_bdev_async(struct zram *zram, struct page *page, + submit_bio(bio); + } + ++#define PAGE_WB_SIG "page_index=" ++ ++#define PAGE_WRITEBACK 0 + #define HUGE_WRITEBACK (1<<0) + #define IDLE_WRITEBACK (1<<1) + #define INCOMPRESSIBLE_WRITEBACK (1<<2) +@@ -588,8 +644,17 @@ static ssize_t writeback_store(struct device *dev, + mode = IDLE_WRITEBACK | HUGE_WRITEBACK; + else if (sysfs_streq(buf, "incompressible")) + mode = INCOMPRESSIBLE_WRITEBACK; +- else +- return -EINVAL; ++ else { ++ if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1)) ++ return -EINVAL; ++ ++ if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) || ++ index >= nr_pages) ++ return -EINVAL; ++ ++ nr_pages = 1; ++ mode = PAGE_WRITEBACK; ++ } + + down_read(&zram->init_lock); + if (!init_done(zram)) { +@@ -608,7 +673,7 @@ static ssize_t writeback_store(struct device *dev, + goto release_init_lock; + } + +- for (index = 0; index < nr_pages; index++) { ++ for (; nr_pages != 0; index++, nr_pages--) { + spin_lock(&zram->wb_limit_lock); + if (zram->wb_limit_enable && !zram->bd_wb_limit) { + spin_unlock(&zram->wb_limit_lock); +@@ -1168,66 +1233,6 @@ static DEVICE_ATTR_RO(bd_stat); + #endif + static DEVICE_ATTR_RO(debug_stat); + +-#ifdef CONFIG_ZRAM_GROUP +-static ssize_t group_show(struct device *dev, struct device_attribute *attr, char *buf) +-{ +- struct zram *zram = dev_to_zram(dev); +- int ret = 0; +- +- down_read(&zram->init_lock); +- if (zram->zgrp_ctrl == ZGRP_NONE) +- ret = snprintf(buf, PAGE_SIZE - 1, "disable\n"); +- else if (zram->zgrp_ctrl == ZGRP_TRACK) +- ret = snprintf(buf, PAGE_SIZE - 1, "readonly\n"); +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- else if (zram->zgrp_ctrl == ZGRP_WRITE) +- ret = snprintf(buf, PAGE_SIZE - 1, "readwrite\n"); +-#endif +- up_read(&zram->init_lock); +- +- return ret; +-} +- +-static ssize_t group_store(struct device *dev, struct device_attribute *attr, +- const char *buf, size_t len) +-{ +- struct zram *zram = dev_to_zram(dev); +- int ret; +-#ifdef CONFIG_ZRAM_GROUP_DEBUG +- u32 op, gid, index; +- +- ret = sscanf(buf, "%u %u %u", &op, &index, &gid); +- if (ret == 3) { +- pr_info("op[%u] index[%u] gid[%u].\n", op, index, gid); +- group_debug(zram, op, index, gid); +- return len; +- } +-#endif +- +- ret = len; +- down_write(&zram->init_lock); +- if (init_done(zram)) { +- pr_info("Can't setup group ctrl for initialized device!\n"); +- ret = -EBUSY; +- goto out; +- } +- if (!strcmp(buf, "disable\n")) +- zram->zgrp_ctrl = ZGRP_NONE; +- else if (!strcmp(buf, "readonly\n")) +- zram->zgrp_ctrl = ZGRP_TRACK; +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- else if (!strcmp(buf, "readwrite\n")) +- zram->zgrp_ctrl = ZGRP_WRITE; +-#endif +- else +- ret = -EINVAL; +-out: +- up_write(&zram->init_lock); +- +- return ret; +-} +-#endif +- + static void zram_meta_free(struct zram *zram, u64 disksize) + { + size_t num_pages = disksize >> PAGE_SHIFT; +@@ -1243,9 +1248,6 @@ static void zram_meta_free(struct zram *zram, u64 disksize) + zs_destroy_pool(zram->mem_pool); + vfree(zram->table); + zram->table = NULL; +-#ifdef CONFIG_ZRAM_GROUP +- zram_group_deinit(zram); +-#endif + } + + static bool zram_meta_alloc(struct zram *zram, u64 disksize) +@@ -1266,10 +1268,6 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) + + if (!huge_class_size) + huge_class_size = zs_huge_class_size(zram->mem_pool); +-#ifdef CONFIG_ZRAM_GROUP +- zram_group_init(zram, num_pages); +-#endif +- + return true; + } + +@@ -1282,10 +1280,6 @@ static void zram_free_page(struct zram *zram, size_t index) + { + unsigned long handle; + +-#ifdef CONFIG_ZRAM_GROUP +- zram_group_untrack_obj(zram, index); +-#endif +- + #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME + zram->table[index].ac_time = 0; + #endif +@@ -1389,20 +1383,6 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index, + int ret; + + zram_slot_lock(zram, index); +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- if (!parent) { +- ret = zram_group_fault_obj(zram, index); +- if (ret) { +- zram_slot_unlock(zram, index); +- return ret; +- } +- } +- +- if (zram_test_flag(zram, index, ZRAM_GWB)) { +- zram_slot_unlock(zram, index); +- return -EIO; +- } +-#endif + if (!zram_test_flag(zram, index, ZRAM_WB)) { + /* Slot should be locked through out the function call */ + ret = zram_read_from_zspool(zram, page, index); +@@ -1570,9 +1550,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) + zram_set_handle(zram, index, handle); + zram_set_obj_size(zram, index, comp_len); + } +-#ifdef CONFIG_ZRAM_GROUP +- zram_group_track_obj(zram, index, page_memcg(page)); +-#endif + zram_slot_unlock(zram, index); + + /* Update stats */ +@@ -2198,9 +2175,6 @@ static DEVICE_ATTR_RW(writeback_limit_enable); + static DEVICE_ATTR_RW(recomp_algorithm); + static DEVICE_ATTR_WO(recompress); + #endif +-#ifdef CONFIG_ZRAM_GROUP +-static DEVICE_ATTR_RW(group); +-#endif + + static struct attribute *zram_disk_attrs[] = { + &dev_attr_disksize.attr, +@@ -2227,9 +2201,6 @@ static struct attribute *zram_disk_attrs[] = { + #ifdef CONFIG_ZRAM_MULTI_COMP + &dev_attr_recomp_algorithm.attr, + &dev_attr_recompress.attr, +-#endif +-#ifdef CONFIG_ZRAM_GROUP +- &dev_attr_group.attr, + #endif + NULL, + }; +diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h +index 8b491f82f..35e322144 100644 +--- a/drivers/block/zram/zram_drv.h ++++ b/drivers/block/zram/zram_drv.h +@@ -21,10 +21,6 @@ + + #include "zcomp.h" + +-#ifdef CONFIG_ZRAM_GROUP +-#include "zram_group.h" +-#endif +- + #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) + #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) + #define ZRAM_LOGICAL_BLOCK_SHIFT 12 +@@ -42,15 +38,7 @@ + * + * We use BUILD_BUG_ON() to make sure that zram pageflags don't overflow. + */ +-#ifdef CONFIG_ZRAM_GROUP +-/* reserve 16 bits for group id */ +-#define ZRAM_SIZE_SHIFT 24 +-#define ZRAM_GRPID_SHIFT 16 +-#define ZRAM_GRPID_MASK (((1UL << ZRAM_GRPID_SHIFT) - 1) << ZRAM_SIZE_SHIFT) +-#define ZRAM_FLAG_SHIFT (ZRAM_SIZE_SHIFT + ZRAM_GRPID_SHIFT) +-#else + #define ZRAM_FLAG_SHIFT (PAGE_SHIFT + 1) +-#endif + + /* Only 2 bits are allowed for comp priority index */ + #define ZRAM_COMP_PRIORITY_MASK 0x3 +@@ -64,10 +52,6 @@ enum zram_pageflags { + ZRAM_UNDER_WB, /* page is under writeback */ + ZRAM_HUGE, /* Incompressible page */ + ZRAM_IDLE, /* not accessed page since last idle marking */ +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- ZRAM_GWB, /* obj is group writeback*/ +- ZRAM_FAULT, /* obj is needed by a pagefault req */ +-#endif + ZRAM_INCOMPRESSIBLE, /* none of the algorithms could compress it */ + + ZRAM_COMP_PRIORITY_BIT1, /* First bit of comp priority index */ +@@ -121,10 +105,6 @@ struct zram_stats { + + struct zram { + struct zram_table_entry *table; +-#ifdef CONFIG_ZRAM_GROUP +- struct zram_group *zgrp; +- unsigned int zgrp_ctrl; +-#endif + struct zs_pool *mem_pool; + struct zcomp *comps[ZRAM_MAX_COMPS]; + struct gendisk *disk; +@@ -160,86 +140,4 @@ struct zram { + struct dentry *debugfs_dir; + #endif + }; +- +-static inline int zram_slot_trylock(struct zram *zram, u32 index) +-{ +- return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags); +-} +- +-static inline void zram_slot_lock(struct zram *zram, u32 index) +-{ +- bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags); +-} +- +-static inline void zram_slot_unlock(struct zram *zram, u32 index) +-{ +- bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); +-} +- +-static inline unsigned long zram_get_handle(struct zram *zram, u32 index) +-{ +- return zram->table[index].handle; +-} +- +-static inline void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) +-{ +- zram->table[index].handle = handle; +-} +- +-/* flag operations require table entry bit_spin_lock() being held */ +-static inline bool zram_test_flag(struct zram *zram, u32 index, +- enum zram_pageflags flag) +-{ +- return zram->table[index].flags & BIT(flag); +-} +- +-static inline void zram_set_flag(struct zram *zram, u32 index, +- enum zram_pageflags flag) +-{ +- zram->table[index].flags |= BIT(flag); +-} +- +-static inline void zram_clear_flag(struct zram *zram, u32 index, +- enum zram_pageflags flag) +-{ +- zram->table[index].flags &= ~BIT(flag); +-} +-#ifdef CONFIG_ZRAM_GROUP +-static inline size_t zram_get_obj_size(struct zram *zram, u32 index) +-{ +- return zram->table[index].flags & (BIT(ZRAM_SIZE_SHIFT) - 1); +-} +- +-static inline void zram_set_obj_size(struct zram *zram, u32 index, size_t size) +-{ +- unsigned long flags = zram->table[index].flags >> ZRAM_SIZE_SHIFT; +- +- zram->table[index].flags = (flags << ZRAM_SIZE_SHIFT) | size; +-} +- +-void zram_group_init(struct zram *zram, u32 nr_obj); +-void zram_group_deinit(struct zram *zram); +-void zram_group_track_obj(struct zram *zram, u32 index, struct mem_cgroup *memcg); +-void zram_group_untrack_obj(struct zram *zram, u32 index); +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +-int zram_group_fault_obj(struct zram *zram, u32 index); +-#endif +- +-#ifdef CONFIG_ZRAM_GROUP_DEBUG +-void group_debug(struct zram *zram, u32 op, u32 index, u32 gid); +-#endif +- +-#else +-static inline size_t zram_get_obj_size(struct zram *zram, u32 index) +-{ +- return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1); +-} +- +-static inline void zram_set_obj_size(struct zram *zram, u32 index, size_t size) +-{ +- unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT; +- +- zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; +-} +-#endif + #endif +diff --git a/drivers/block/zram/zram_group/Kconfig b/drivers/block/zram/zram_group/Kconfig +deleted file mode 100644 +index 0eacf79fb..000000000 +--- a/drivers/block/zram/zram_group/Kconfig ++++ /dev/null +@@ -1,24 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-config ZRAM_GROUP +- bool "Manage Zram objs with mem_cgroup" +- depends on ZRAM && MEMCG +- help +- Manage Zram objs with mem_cgroup. +- +-config ZRAM_GROUP_DEBUG +- bool "Debug info for zram group" +- depends on ZRAM_GROUP +- help +- Debug info for ZRAM_GROUP. +- +-config ZLIST_DEBUG +- bool "Debug info for zram group list" +- depends on ZRAM_GROUP +- help +- Debug info for zram group list. +- +-config ZRAM_GROUP_WRITEBACK +- bool "Write back grouped zram objs to Hyperhold driver" +- depends on ZRAM_GROUP && HYPERHOLD +- help +- Write back grouped zram objs to hyperhold. +diff --git a/drivers/block/zram/zram_group/group_writeback.c b/drivers/block/zram/zram_group/group_writeback.c +deleted file mode 100644 +index 0956a2eb9..000000000 +--- a/drivers/block/zram/zram_group/group_writeback.c ++++ /dev/null +@@ -1,735 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * drivers/block/zram/zram_group/group_writeback.c +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +- +-#include "../zram_drv.h" +-#include "zram_group.h" +- +-#ifdef CONFIG_HYPERHOLD +-#include "hyperhold.h" +-#endif +- +-#define CHECK(cond, ...) ((cond) || (pr_err(__VA_ARGS__), false)) +-#define CHECK_BOUND(var, min, max) \ +- CHECK((var) >= (min) && (var) <= (max), \ +- "%s %u out of bounds %u ~ %u!\n", \ +- #var, (var), (min), (max)) +- +-static u16 zram_get_memcg_id(struct zram *zram, u32 index) +-{ +- return (zram->table[index].flags & ZRAM_GRPID_MASK) >> ZRAM_SIZE_SHIFT; +-} +- +-static void zram_set_memcg_id(struct zram *zram, u32 index, u16 gid) +-{ +- unsigned long old = zram->table[index].flags & (~ZRAM_GRPID_MASK); +- +- zram->table[index].flags = old | ((u64)gid << ZRAM_SIZE_SHIFT); +-} +- +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +-static bool obj_can_wb(struct zram *zram, u32 index, u16 gid) +-{ +- /* overwrited obj, just skip */ +- if (zram_get_memcg_id(zram, index) != gid) { +- pr_debug("obj %u is from group %u instead of group %u.\n", +- index, zram_get_memcg_id(zram, index), gid); +- return false; +- } +- if (!zgrp_obj_is_isolated(zram->zgrp, index)) { +- pr_debug("obj %u is not isolated.\n", index); +- return false; +- } +- /* need not to writeback, put back the obj as HOTEST */ +- if (zram_test_flag(zram, index, ZRAM_SAME)) { +- pr_debug("obj %u is filled with same element.\n", index); +- goto insert; +- } +- if (zram_test_flag(zram, index, ZRAM_WB)) { +- pr_debug("obj %u is writeback.\n", index); +- goto insert; +- } +- /* obj is needed by a pagefault req, do not writeback it. */ +- if (zram_test_flag(zram, index, ZRAM_FAULT)) { +- pr_debug("obj %u is needed by a pagefault request.\n", index); +- goto insert; +- } +- /* should never happen */ +- if (zram_test_flag(zram, index, ZRAM_GWB)) { +- pr_debug("obj %u is group writeback.\n", index); +- BUG(); +- return false; +- } +- +- return true; +-insert: +- zgrp_obj_insert(zram->zgrp, index, gid); +- +- return false; +-} +- +-static void copy_obj(struct hpio *hpio, u32 offset, char *obj, u32 size, bool to) +-{ +- u32 page_id, start; +- char *buf = NULL; +- +- page_id = offset / PAGE_SIZE; +- start = offset % PAGE_SIZE; +- if (size + start <= PAGE_SIZE) { +- buf = page_to_virt(hyperhold_io_page(hpio, page_id)); +- if (to) +- memcpy(buf + start, obj, size); +- else +- memcpy(obj, buf + start, size); +- +- return; +- } +- buf = page_to_virt(hyperhold_io_page(hpio, page_id)); +- if (to) +- memcpy(buf + start, obj, PAGE_SIZE - start); +- else +- memcpy(obj, buf + start, PAGE_SIZE - start); +- buf = page_to_virt(hyperhold_io_page(hpio, page_id + 1)); +- if (to) +- memcpy(buf, obj + PAGE_SIZE - start, size + start - PAGE_SIZE); +- else +- memcpy(obj + PAGE_SIZE - start, buf, size + start - PAGE_SIZE); +-} +- +-static u32 move_obj_to_hpio(struct zram *zram, u32 index, u16 gid, +- struct hpio *hpio, u32 offset) +-{ +- u32 size = 0; +- unsigned long handle; +- char *src = NULL; +- u32 ext_size; +- u32 eid; +- +- eid = hyperhold_io_extent(hpio); +- ext_size = hyperhold_extent_size(eid); +- +- zram_slot_lock(zram, index); +- if (!obj_can_wb(zram, index, gid)) +- goto unlock; +- size = zram_get_obj_size(zram, index); +- /* no space, put back the obj as COLDEST */ +- if (size + offset > ext_size) { +- pr_debug("obj %u size is %u, but ext %u only %u space left.\n", +- index, size, eid, ext_size - offset); +- zgrp_obj_putback(zram->zgrp, index, gid); +- size = 0; +- goto unlock; +- } +- handle = zram_get_handle(zram, index); +- src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); +- copy_obj(hpio, offset, src, size, true); +- zs_unmap_object(zram->mem_pool, handle); +- zs_free(zram->mem_pool, handle); +- zram_set_handle(zram, index, hyperhold_address(eid, offset)); +- zram_set_flag(zram, index, ZRAM_GWB); +- wbgrp_obj_insert(zram->zgrp, index, eid); +- wbgrp_obj_stats_inc(zram->zgrp, gid, eid, size); +- zgrp_obj_stats_dec(zram->zgrp, gid, size); +- pr_debug("move obj %u of group %u to hpio %p of eid %u, size = %u, offset = %u\n", +- index, gid, hpio, eid, size, offset); +-unlock: +- zram_slot_unlock(zram, index); +- +- return size; +-} +- +-static void move_obj_from_hpio(struct zram *zram, int index, struct hpio *hpio) +-{ +- u32 size = 0; +- unsigned long handle = 0; +- u32 eid, offset; +- u64 addr; +- char *dst = NULL; +- u16 gid; +- +- eid = hyperhold_io_extent(hpio); +-retry: +- zram_slot_lock(zram, index); +- if (!zram_test_flag(zram, index, ZRAM_GWB)) +- goto unlock; +- addr = zram_get_handle(zram, index); +- if (hyperhold_addr_extent(addr) != eid) +- goto unlock; +- size = zram_get_obj_size(zram, index); +- if (handle) +- goto move; +- handle = zs_malloc(zram->mem_pool, size, GFP_NOWAIT); +- if (handle) +- goto move; +- zram_slot_unlock(zram, index); +- handle = zs_malloc(zram->mem_pool, size, GFP_NOIO | __GFP_NOFAIL); +- if (handle) +- goto retry; +- BUG(); +- +- return; +-move: +- offset = hyperhold_addr_offset(addr); +- dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); +- copy_obj(hpio, offset, dst, size, false); +- zs_unmap_object(zram->mem_pool, handle); +- zram_set_handle(zram, index, handle); +- zram_clear_flag(zram, index, ZRAM_GWB); +- gid = zram_get_memcg_id(zram, index); +- zgrp_obj_insert(zram->zgrp, index, gid); +- wbgrp_obj_stats_dec(zram->zgrp, gid, eid, size); +- zgrp_obj_stats_inc(zram->zgrp, gid, size); +- pr_debug("move obj %u of group %u from hpio %p of eid %u, size = %u, offset = %u\n", +- index, gid, hpio, eid, size, offset); +-unlock: +- zram_slot_unlock(zram, index); +-} +- +- +-#define NR_ISOLATE 32 +-static bool move_extent_from_hpio(struct zram *zram, struct hpio *hpio) +-{ +- u32 idxs[NR_ISOLATE]; +- u32 eid; +- u32 nr; +- int i; +- bool last = false; +- +- eid = hyperhold_io_extent(hpio); +-repeat: +- nr = wbgrp_isolate_objs(zram->zgrp, eid, idxs, NR_ISOLATE, &last); +- for (i = 0; i < nr; i++) +- move_obj_from_hpio(zram, idxs[i], hpio); +- if (last) +- return true; +- if (nr) +- goto repeat; +- +- return false; +-} +- +-struct hpio_priv { +- struct zram *zram; +- u16 gid; +-}; +- +-static void write_endio(struct hpio *hpio) +-{ +- struct hpio_priv *priv = hyperhold_io_private(hpio); +- struct zram *zram = priv->zram; +- u16 gid = priv->gid; +- u32 eid = hyperhold_io_extent(hpio); +- +- if (hyperhold_io_success(hpio)) +- goto out; +- if (move_extent_from_hpio(zram, hpio)) { +- zgrp_ext_delete(zram->zgrp, eid, gid); +- hyperhold_should_free_extent(eid); +- } +-out: +- hyperhold_io_complete(hpio); +- hyperhold_io_put(hpio); +- kfree(priv); +-} +- +-static u32 collect_objs(struct zram *zram, u16 gid, struct hpio *hpio, u32 ext_size) +-{ +- u32 offset = 0; +- u32 last_offset; +- u32 nr; +- u32 idxs[NR_ISOLATE]; +- int i; +- +-more: +- last_offset = offset; +- nr = zgrp_isolate_objs(zram->zgrp, gid, idxs, NR_ISOLATE, NULL); +- for (i = 0; i < nr; i++) +- offset += move_obj_to_hpio(zram, idxs[i], gid, hpio, offset); +- pr_debug("%u data attached, offset = %u.\n", offset - last_offset, offset); +- if (offset < ext_size && offset != last_offset) +- goto more; +- +- return offset; +-} +- +-static u64 write_one_extent(struct zram *zram, u16 gid) +-{ +- int eid; +- struct hpio *hpio = NULL; +- struct hpio_priv *priv = NULL; +- u32 size = 0; +- int ret; +- +- priv = kmalloc(sizeof(struct hpio_priv), GFP_NOIO); +- if (!priv) +- return 0; +- priv->gid = gid; +- priv->zram = zram; +- eid = hyperhold_alloc_extent(); +- if (eid < 0) +- goto err; +- hpio = hyperhold_io_get(eid, GFP_NOIO, REQ_OP_WRITE); +- if (!hpio) +- goto free_extent; +- +- zgrp_get_ext(zram->zgrp, eid); +- size = collect_objs(zram, gid, hpio, hyperhold_extent_size(eid)); +- if (size == 0) { +- pr_err("group %u has no data in zram.\n", gid); +- zgrp_put_ext(zram->zgrp, eid); +- goto put_hpio; +- } +- zgrp_ext_insert(zram->zgrp, eid, gid); +- if (zgrp_put_ext(zram->zgrp, eid)) { +- zgrp_ext_delete(zram->zgrp, eid, gid); +- hyperhold_should_free_extent(eid); +- } +- +- ret = hyperhold_write_async(hpio, write_endio, priv); +- if (ret) +- goto move_back; +- +- return size; +-move_back: +- if (move_extent_from_hpio(zram, hpio)) { +- zgrp_ext_delete(zram->zgrp, eid, gid); +- hyperhold_should_free_extent(eid); +- } +- eid = -EINVAL; +-put_hpio: +- hyperhold_io_put(hpio); +-free_extent: +- if (eid >= 0) +- hyperhold_free_extent(eid); +-err: +- kfree(priv); +- +- return 0; +-} +- +-static void read_endio(struct hpio *hpio) +-{ +- struct hpio_priv *priv = hyperhold_io_private(hpio); +- struct zram *zram = priv->zram; +- u16 gid = priv->gid; +- u32 eid = hyperhold_io_extent(hpio); +- +- if (!hyperhold_io_success(hpio)) { +- BUG(); +- goto out; +- } +- if (move_extent_from_hpio(zram, hpio)) { +- zgrp_ext_delete(zram->zgrp, eid, gid); +- hyperhold_should_free_extent(eid); +- } +-out: +- hyperhold_io_complete(hpio); +- hyperhold_io_put(hpio); +- kfree(priv); +-} +- +-static u64 read_one_extent(struct zram *zram, u32 eid, u16 gid) +-{ +- struct hpio *hpio = NULL; +- u32 ext_size = 0; +- int ret; +- struct hpio_priv *priv = NULL; +- +- priv = kmalloc(sizeof(struct hpio_priv), GFP_NOIO); +- if (!priv) +- goto err; +- priv->gid = gid; +- priv->zram = zram; +- hpio = hyperhold_io_get(eid, GFP_NOIO, REQ_OP_READ); +- if (!hpio) +- goto err; +- ext_size = hyperhold_extent_size(eid); +- ret = hyperhold_read_async(hpio, read_endio, priv); +- if (ret) +- goto err; +- +- return ext_size; +-err: +- hyperhold_io_put(hpio); +- kfree(priv); +- +- return 0; +-} +- +-static void sync_read_endio(struct hpio *hpio) +-{ +- hyperhold_io_complete(hpio); +-} +- +-static int read_one_obj_sync(struct zram *zram, u32 index) +-{ +- struct hpio *hpio = NULL; +- int ret; +- u32 eid; +- u16 gid; +- u32 size; +- +- if (!zram_test_flag(zram, index, ZRAM_GWB)) +- return 0; +- +- pr_debug("read obj %u.\n", index); +- +- gid = zram_get_memcg_id(zram, index); +- eid = hyperhold_addr_extent(zram_get_handle(zram, index)); +- size = zram_get_obj_size(zram, index); +- wbgrp_fault_stats_inc(zram->zgrp, gid, eid, size); +-check: +- if (!zram_test_flag(zram, index, ZRAM_GWB)) +- return 0; +- if (!zram_test_flag(zram, index, ZRAM_FAULT)) +- goto read; +- zram_slot_unlock(zram, index); +- wait_event(zram->zgrp->wbgrp.fault_wq, !zram_test_flag(zram, index, ZRAM_FAULT)); +- zram_slot_lock(zram, index); +- goto check; +-read: +- zram_set_flag(zram, index, ZRAM_FAULT); +- zram_slot_unlock(zram, index); +- +- hpio = hyperhold_io_get(eid, GFP_NOIO, REQ_OP_READ); +- if (!hpio) { +- ret = -ENOMEM; +- goto out; +- } +- ret = hyperhold_read_async(hpio, sync_read_endio, NULL); +- /* io submit error */ +- if (ret && ret != -EAGAIN) +- goto out; +- +- hyperhold_io_wait(hpio); +- +- /* if not reset to zero, will return err sometimes and cause SIG_BUS error */ +- ret = 0; +- +- /* get a write io, data is ready, copy the pages even write failed */ +- if (op_is_write(hyperhold_io_operate(hpio))) +- goto move; +- /* read io failed, return -EIO */ +- if (!hyperhold_io_success(hpio)) { +- ret = -EIO; +- goto out; +- } +- /* success, copy the data and free extent */ +-move: +- if (move_extent_from_hpio(zram, hpio)) { +- zgrp_ext_delete(zram->zgrp, eid, gid); +- hyperhold_should_free_extent(eid); +- } +- move_obj_from_hpio(zram, index, hpio); +-out: +- hyperhold_io_put(hpio); +- zram_slot_lock(zram, index); +- zram_clear_flag(zram, index, ZRAM_FAULT); +- wake_up(&zram->zgrp->wbgrp.fault_wq); +- +- return ret; +-} +- +-u64 read_group_objs(struct zram *zram, u16 gid, u64 req_size) +-{ +- u32 eid; +- u64 read_size = 0; +- u32 nr; +- +- if (!(zram->zgrp)) { +- pr_debug("zram group is not enable!\n"); +- return 0; +- } +- if (!CHECK_BOUND(gid, 1, zram->zgrp->nr_grp - 1)) +- return 0; +- +- pr_debug("read %llu data of group %u.\n", req_size, gid); +- +- while (!req_size || req_size > read_size) { +- nr = zgrp_isolate_exts(zram->zgrp, gid, &eid, 1, NULL); +- if (!nr) +- break; +- read_size += read_one_extent(zram, eid, gid); +- } +- +- return read_size; +-} +- +-u64 write_group_objs(struct zram *zram, u16 gid, u64 req_size) +-{ +- u64 write_size = 0; +- u64 size = 0; +- +- if (!(zram->zgrp)) { +- pr_debug("zram group is not enable!\n"); +- return 0; +- } +- if (!CHECK(zram->zgrp->wbgrp.enable, "zram group writeback is not enable!\n")) +- return 0; +- if (!CHECK_BOUND(gid, 1, zram->zgrp->nr_grp - 1)) +- return 0; +- +- pr_debug("write %llu data of group %u.\n", req_size, gid); +- +- while (!req_size || req_size > write_size) { +- size = write_one_extent(zram, gid); +- if (!size) +- break; +- write_size += size; +- } +- +- atomic64_add(write_size, &zram->zgrp->stats[0].write_size); +- atomic64_add(write_size, &zram->zgrp->stats[gid].write_size); +- return write_size; +-} +-#endif +- +-#ifdef CONFIG_ZRAM_GROUP_DEBUG +-#include +-#define ZGRP_TEST_MAX_GRP 101 +-#endif +- +-int zram_group_fault_obj(struct zram *zram, u32 index) +-{ +- u16 gid; +- u32 size; +- +- if (!(zram->zgrp)) { +- pr_debug("zram group is not enable!\n"); +- return 0; +- } +- if (!CHECK_BOUND(index, 0, zram->zgrp->nr_obj - 1)) +- return 0; +- +- gid = zram_get_memcg_id(zram, index); +- size = zram_get_obj_size(zram, index); +- zgrp_fault_stats_inc(zram->zgrp, gid, size); +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- return read_one_obj_sync(zram, index); +-#else +- return 0; +-#endif +-} +- +-void zram_group_track_obj(struct zram *zram, u32 index, struct mem_cgroup *memcg) +-{ +- u16 gid; +- +- if (!(zram->zgrp)) { +- pr_debug("zram group is not enable!\n"); +- return; +- } +- if (!CHECK_BOUND(index, 0, zram->zgrp->nr_obj - 1)) +- return; +- if (!CHECK(memcg || !memcg->id.id, "obj %u has no memcg!\n", index)) +- return; +- gid = zram_get_memcg_id(zram, index); +- if (!CHECK(!gid, "obj %u has gid %u.\n", index, gid)) +- BUG(); +- +- gid = memcg->id.id; +- zram_set_memcg_id(zram, index, gid); +- zgrp_obj_insert(zram->zgrp, index, gid); +- zgrp_obj_stats_inc(zram->zgrp, gid, zram_get_obj_size(zram, index)); +-} +- +-void zram_group_untrack_obj(struct zram *zram, u32 index) +-{ +- u16 gid; +- u32 size; +- +- if (!(zram->zgrp)) { +- pr_debug("zram group is not enable!\n"); +- return; +- } +- if (!CHECK_BOUND(index, 0, zram->zgrp->nr_obj - 1)) +- return; +- +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +-check: +- if (!zram_test_flag(zram, index, ZRAM_FAULT)) +- goto clear; +- zram_slot_unlock(zram, index); +- wait_event(zram->zgrp->wbgrp.fault_wq, !zram_test_flag(zram, index, ZRAM_FAULT)); +- zram_slot_lock(zram, index); +- goto check; +-clear: +-#endif +- gid = zram_get_memcg_id(zram, index); +- size = zram_get_obj_size(zram, index); +- if (!gid) +- return; +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- if (zram_test_flag(zram, index, ZRAM_GWB)) { +- u32 eid = hyperhold_addr_extent(zram_get_handle(zram, index)); +- +- if (wbgrp_obj_delete(zram->zgrp, index, eid)) { +- zgrp_ext_delete(zram->zgrp, eid, gid); +- hyperhold_should_free_extent(eid); +- } +- zram_clear_flag(zram, index, ZRAM_GWB); +- zram_set_memcg_id(zram, index, 0); +- wbgrp_obj_stats_dec(zram->zgrp, gid, eid, size); +- zram_set_handle(zram, index, 0); +- return; +- } +-#endif +- zgrp_obj_delete(zram->zgrp, index, gid); +- zram_set_memcg_id(zram, index, 0); +- zgrp_obj_stats_dec(zram->zgrp, gid, size); +-} +- +-#ifdef CONFIG_ZRAM_GROUP_DEBUG +-void group_debug(struct zram *zram, u32 op, u32 index, u32 gid) +-{ +- if (op == 0) +- zram_group_dump(zram->zgrp, gid, index); +- +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- if (op == 22) +- read_group_objs(zram, gid, index); +- if (op == 23) +- write_group_objs(zram, gid, index); +- if (op == 20) { +- if (index) +- zram_group_apply_writeback(zram->zgrp, hyperhold_nr_extent()); +- else +- zram_group_remove_writeback(zram->zgrp); +- } +-#endif +-} +-#endif +- +-static u64 group_obj_stats(struct zram *zram, u16 gid, int type) +-{ +- if (!(zram->zgrp)) { +- pr_debug("zram group is not enable!\n"); +- return 0; +- } +- if (!CHECK_BOUND(gid, 0, zram->zgrp->nr_grp - 1)) +- return 0; +- +- if (type == CACHE_SIZE) +- return atomic64_read(&zram->zgrp->stats[gid].zram_size); +- else if (type == CACHE_PAGE) +- return atomic_read(&zram->zgrp->stats[gid].zram_pages); +- else if (type == CACHE_FAULT) +- return atomic64_read(&zram->zgrp->stats[gid].zram_fault); +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- else if (type == SWAP_SIZE) +- return atomic64_read(&zram->zgrp->stats[gid].wb_size); +- else if (type == SWAP_PAGE) +- return atomic_read(&zram->zgrp->stats[gid].wb_pages); +- else if (type == READ_SIZE) +- return atomic64_read(&zram->zgrp->stats[gid].read_size); +- else if (type == WRITE_SIZE) +- return atomic64_read(&zram->zgrp->stats[gid].write_size); +- else if (type == SWAP_FAULT) +- return atomic64_read(&zram->zgrp->stats[gid].wb_fault); +- BUG(); +-#endif +- +- return 0; +-} +- +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +-static u64 zram_group_read(u16 gid, u64 req_size, void *priv) +-{ +- if (!CHECK(priv, "priv is NULL!\n")) +- return 0; +- +- return read_group_objs((struct zram *)priv, gid, req_size); +-} +- +-static u64 zram_group_write(u16 gid, u64 req_size, void *priv) +-{ +- if (!CHECK(priv, "priv is NULL!\n")) +- return 0; +- +- return write_group_objs((struct zram *)priv, gid, req_size); +-} +-#else +-static u64 zram_group_read(u16 gid, u64 req_size, void *priv) +-{ +- return 0; +-} +-static u64 zram_group_write(u16 gid, u64 req_size, void *priv) +-{ +- return 0; +-} +-#endif +- +- +-static u64 zram_group_data_size(u16 gid, int type, void *priv) +-{ +- if (!CHECK(priv, "priv is NULL!\n")) +- return 0; +- +- return group_obj_stats((struct zram *)priv, gid, type); +-} +- +-struct group_swap_ops zram_group_ops = { +- .group_read = zram_group_read, +- .group_write = zram_group_write, +- .group_data_size = zram_group_data_size, +-}; +- +-static int register_zram_group(struct zram *zram) +-{ +- if (!CHECK(zram, "zram is NULL!\n")) +- return -EINVAL; +- if (!(zram->zgrp)) { +- pr_debug("zram group is not enable!\n"); +- return -EINVAL; +- } +- +- zram->zgrp->gsdev = register_group_swap(&zram_group_ops, zram); +- if (!zram->zgrp->gsdev) { +- pr_err("register zram group failed!\n"); +- return -ENOMEM; +- } +- +- return 0; +-} +- +-static void unregister_zram_group(struct zram *zram) +-{ +- if (!CHECK(zram, "zram is NULL!\n")) +- return; +- if (!(zram->zgrp)) { +- pr_debug("zram group is not enable!\n"); +- return; +- } +- +- unregister_group_swap(zram->zgrp->gsdev); +- zram->zgrp->gsdev = NULL; +-} +- +-void zram_group_init(struct zram *zram, u32 nr_obj) +-{ +- unsigned int ctrl = zram->zgrp_ctrl; +- +- if (ctrl == ZGRP_NONE) +- return; +- zram->zgrp = zram_group_meta_alloc(nr_obj, ZGRP_MAX_GRP - 1); +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- if (ctrl == ZGRP_WRITE) +- zram_group_apply_writeback(zram->zgrp, hyperhold_nr_extent()); +-#endif +- register_zram_group(zram); +-} +- +-void zram_group_deinit(struct zram *zram) +-{ +- unregister_zram_group(zram); +- zram_group_meta_free(zram->zgrp); +- zram->zgrp = NULL; +-} +diff --git a/drivers/block/zram/zram_group/zlist.c b/drivers/block/zram/zram_group/zlist.c +deleted file mode 100644 +index fd8295eca..000000000 +--- a/drivers/block/zram/zram_group/zlist.c ++++ /dev/null +@@ -1,235 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * drivers/block/zram/zram_group/zlist.c +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#define pr_fmt(fmt) "[ZLIST]" fmt +- +-#include +-#include +-#include +- +-#include "zlist.h" +- +-#define assert(expr) \ +- do { \ +- if (expr) \ +- break; \ +- pr_err("assertion [%s] failed: in func<%s> at %s:%d\n", \ +- #expr, __func__, __FILE__, __LINE__); \ +- BUG(); \ +- } while (0) +- +-static inline void zlist_node_lock(struct zlist_node *node) +-{ +- bit_spin_lock(ZLIST_LOCK_BIT, (unsigned long *)node); +-} +- +-static inline void zlist_node_unlock(struct zlist_node *node) +-{ +- bit_spin_unlock(ZLIST_LOCK_BIT, (unsigned long *)node); +-} +- +-#ifdef CONFIG_ZLIST_DEBUG +-static inline void zlist_before_add_check(struct zlist_table *tab, +- struct zlist_node *prev, struct zlist_node *node, +- struct zlist_node *next) +-{ +- assert(idx2node(prev->next, tab) == next); +- assert(idx2node(next->prev, tab) == prev); +- assert(idx2node(node->prev, tab) == node); +- assert(idx2node(node->next, tab) == node); +-} +- +-static inline void zlist_after_add_check(struct zlist_table *tab, +- struct zlist_node *prev, struct zlist_node *node, +- struct zlist_node *next) +-{ +- assert(idx2node(prev->next, tab) == node); +- assert(idx2node(next->prev, tab) == node); +- assert(idx2node(node->prev, tab) == prev); +- assert(idx2node(node->next, tab) == next); +-} +- +-static inline void zlist_before_del_check(struct zlist_table *tab, +- struct zlist_node *prev, struct zlist_node *node, +- struct zlist_node *next) +-{ +- assert(idx2node(prev->next, tab) == node); +- assert(idx2node(next->prev, tab) == node); +- assert(idx2node(node->prev, tab) == prev); +- assert(idx2node(node->next, tab) == next); +-} +- +-static inline void zlist_after_del_check(struct zlist_table *tab, +- struct zlist_node *prev, struct zlist_node *node, +- struct zlist_node *next) +-{ +- assert(idx2node(prev->next, tab) == next); +- assert(idx2node(next->prev, tab) == prev); +- assert(idx2node(node->prev, tab) == node); +- assert(idx2node(node->next, tab) == node); +-} +-#else +-static inline void zlist_before_add_check(struct zlist_table *tab, +- struct zlist_node *prev, struct zlist_node *node, +- struct zlist_node *next) {}; +-static inline void zlist_after_add_check(struct zlist_table *tab, +- struct zlist_node *prev, struct zlist_node *node, +- struct zlist_node *next) {}; +-static inline void zlist_before_del_check(struct zlist_table *tab, +- struct zlist_node *prev, struct zlist_node *node, +- struct zlist_node *next) {}; +-static inline void zlist_after_del_check(struct zlist_table *tab, +- struct zlist_node *prev, struct zlist_node *node, +- struct zlist_node *next) {}; +-#endif +- +-struct zlist_table *zlist_table_alloc(struct zlist_node *(*i2n)(u32, void*), +- void *private, gfp_t gfp) +-{ +- struct zlist_table *tab = kmalloc(sizeof(struct zlist_table), gfp); +- +- if (!tab) +- return NULL; +- tab->idx2node = i2n; +- tab->private = private; +- +- return tab; +-} +- +-void zlist_lock(u32 idx, struct zlist_table *tab) +-{ +- zlist_node_lock(idx2node(idx, tab)); +-} +- +-void zlist_unlock(u32 idx, struct zlist_table *tab) +-{ +- zlist_node_unlock(idx2node(idx, tab)); +-} +- +-void zlist_add_nolock(u32 hid, u32 idx, struct zlist_table *tab) +-{ +- struct zlist_node *node = idx2node(idx, tab); +- struct zlist_node *head = idx2node(hid, tab); +- u32 nid = head->next; +- struct zlist_node *next = idx2node(nid, tab); +- +- zlist_before_add_check(tab, head, node, next); +- if (idx != hid) +- zlist_node_lock(node); +- node->prev = hid; +- node->next = nid; +- if (idx != hid) +- zlist_node_unlock(node); +- head->next = idx; +- if (nid != hid) +- zlist_node_lock(next); +- next->prev = idx; +- if (nid != hid) +- zlist_node_unlock(next); +- zlist_after_add_check(tab, head, node, next); +-} +- +-void zlist_add_tail_nolock(u32 hid, u32 idx, struct zlist_table *tab) +-{ +- struct zlist_node *node = idx2node(idx, tab); +- struct zlist_node *head = idx2node(hid, tab); +- u32 tid = head->prev; +- struct zlist_node *tail = idx2node(tid, tab); +- +- zlist_before_add_check(tab, tail, node, head); +- if (idx != hid) +- zlist_node_lock(node); +- node->prev = tid; +- node->next = hid; +- if (idx != hid) +- zlist_node_unlock(node); +- head->prev = idx; +- if (tid != hid) +- zlist_node_lock(tail); +- tail->next = idx; +- if (tid != hid) +- zlist_node_unlock(tail); +- zlist_after_add_check(tab, tail, node, head); +-} +- +-bool zlist_del_nolock(u32 hid, u32 idx, struct zlist_table *tab) +-{ +- struct zlist_node *node = idx2node(idx, tab); +- u32 pid = node->prev; +- u32 nid = node->next; +- struct zlist_node *prev = idx2node(pid, tab); +- struct zlist_node *next = idx2node(nid, tab); +- +- zlist_before_del_check(tab, prev, node, next); +- if (idx != hid) +- zlist_node_lock(node); +- node->prev = idx; +- node->next = idx; +- if (idx != hid) +- zlist_node_unlock(node); +- if (pid != hid) +- zlist_node_lock(prev); +- prev->next = nid; +- if (pid != hid) +- zlist_node_unlock(prev); +- if (nid != hid) +- zlist_node_lock(next); +- next->prev = pid; +- if (nid != hid) +- zlist_node_unlock(next); +- zlist_after_del_check(tab, prev, node, next); +- +- return zlist_is_isolated_nolock(hid, tab); +-} +- +-bool zlist_is_isolated_nolock(u32 idx, struct zlist_table *tab) +-{ +- struct zlist_node *node = idx2node(idx, tab); +- +- return (node->prev == idx) && (node->next == idx); +-} +- +-bool zlist_set_priv(u32 idx, struct zlist_table *tab) +-{ +- struct zlist_node *node = idx2node(idx, tab); +- bool ret = false; +- +- zlist_node_lock(node); +- ret = !test_and_set_bit(ZLIST_PRIV_BIT, (unsigned long *)node); +- zlist_node_unlock(node); +- +- return ret; +-} +- +-bool zlist_clr_priv_nolock(u32 idx, struct zlist_table *tab) +-{ +- struct zlist_node *node = idx2node(idx, tab); +- bool ret = false; +- +- ret = !test_and_clear_bit(ZLIST_PRIV_BIT, (unsigned long *)node); +- +- return ret; +-} +- +-bool zlist_test_priv_nolock(u32 idx, struct zlist_table *tab) +-{ +- struct zlist_node *node = idx2node(idx, tab); +- bool ret = false; +- +- ret = test_bit(ZLIST_PRIV_BIT, (unsigned long *)node); +- +- return ret; +-} +- +-void zlist_node_init(u32 idx, struct zlist_table *tab) +-{ +- struct zlist_node *node = idx2node(idx, tab); +- +- memset(node, 0, sizeof(struct zlist_node)); +- node->prev = idx; +- node->next = idx; +-} +diff --git a/drivers/block/zram/zram_group/zlist.h b/drivers/block/zram/zram_group/zlist.h +deleted file mode 100644 +index a7cbf3750..000000000 +--- a/drivers/block/zram/zram_group/zlist.h ++++ /dev/null +@@ -1,97 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * drivers/block/zram/zram_group/zlist.h +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#ifndef _ZLIST_H_ +-#define _ZLIST_H_ +- +-#define ZLIST_IDX_SHIFT 30 +-#define ZLIST_LOCK_BIT ZLIST_IDX_SHIFT +-#define ZLIST_PRIV_BIT ((ZLIST_IDX_SHIFT << 1) + 1) +- +-#define ZLIST_IDX_MAX (1 << ZLIST_IDX_SHIFT) +- +-struct zlist_node { +- u64 prev : ZLIST_IDX_SHIFT; +- u64 lock : 1; +- u64 next : ZLIST_IDX_SHIFT; +- u64 priv : 1; +-}; +- +-struct zlist_table { +- struct zlist_node *(*idx2node)(u32 idx, void *priv); +- void *private; +-}; +- +-static inline struct zlist_node *idx2node(u32 idx, struct zlist_table *tab) +-{ +- return tab->idx2node(idx, tab->private); +-} +- +-static inline u32 next_idx(u32 idx, struct zlist_table *tab) +-{ +- return idx2node(idx, tab)->next; +-} +- +-static inline u32 prev_idx(u32 idx, struct zlist_table *tab) +-{ +- return idx2node(idx, tab)->prev; +-} +- +-static inline void zlist_table_free(struct zlist_table *tab) +-{ +- kfree(tab); +-} +- +-struct zlist_table *zlist_table_alloc(struct zlist_node *(*i2n)(u32, void*), +- void *private, gfp_t gfp); +- +-void zlist_lock(u32 idx, struct zlist_table *tab); +-void zlist_unlock(u32 idx, struct zlist_table *tab); +- +-void zlist_add_nolock(u32 hid, u32 idx, struct zlist_table *tab); +-void zlist_add_tail_nolock(u32 hid, u32 idx, struct zlist_table *tab); +-bool zlist_del_nolock(u32 hid, u32 idx, struct zlist_table *tab); +-bool zlist_is_isolated_nolock(u32 idx, struct zlist_table *tab); +- +-static inline void zlist_add(u32 hid, u32 idx, struct zlist_table *tab) +-{ +- zlist_lock(hid, tab); +- zlist_add_nolock(hid, idx, tab); +- zlist_unlock(hid, tab); +-} +- +-static inline void zlist_add_tail(u32 hid, u32 idx, struct zlist_table *tab) +-{ +- zlist_lock(hid, tab); +- zlist_add_tail_nolock(hid, idx, tab); +- zlist_unlock(hid, tab); +-} +- +-static inline bool zlist_del(u32 hid, u32 idx, struct zlist_table *tab) +-{ +- bool ret = false; +- +- zlist_lock(hid, tab); +- ret = zlist_del_nolock(hid, idx, tab); +- zlist_unlock(hid, tab); +- +- return ret; +-} +- +-bool zlist_set_priv(u32 idx, struct zlist_table *tab); +-bool zlist_clr_priv_nolock(u32 idx, struct zlist_table *tab); +-bool zlist_test_priv_nolock(u32 idx, struct zlist_table *tab); +- +-void zlist_node_init(u32 idx, struct zlist_table *tab); +- +-#define zlist_for_each_entry(idx, hid, tab) \ +- for ((idx) = next_idx(hid, tab); (idx) != (hid); \ +- (idx) = next_idx(idx, tab)) +-#define zlist_for_each_entry_reverse(idx, hid, tab) \ +- for ((idx) = prev_idx(hid, tab); (idx) != (hid); \ +- (idx) = prev_idx(idx, tab)) +-#endif +diff --git a/drivers/block/zram/zram_group/zram_group.c b/drivers/block/zram/zram_group/zram_group.c +deleted file mode 100644 +index 9a023e77d..000000000 +--- a/drivers/block/zram/zram_group/zram_group.c ++++ /dev/null +@@ -1,672 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * drivers/block/zram/zram_group/zram_group.c +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#define pr_fmt(fmt) "[ZRAM_GROUP]" fmt +- +-#include +-#include +-#include "zram_group.h" +- +-#define CHECK(cond, ...) ((cond) || (pr_err(__VA_ARGS__), false)) +-#define CHECK_BOUND(var, min, max) \ +- CHECK((var) >= (min) && (var) <= (max), \ +- "%s %u out of bounds %u ~ %u!\n", \ +- #var, (var), (min), (max)) +- +-/* +- * idx2node for obj table +- */ +-static struct zlist_node *get_obj(u32 index, void *private) +-{ +- struct zram_group *zgrp = private; +- +- if (index < zgrp->nr_obj) +- return &zgrp->obj[index]; +- +- index -= zgrp->nr_obj; +- BUG_ON(!index); +- if (index < zgrp->nr_grp) +- return &zgrp->grp_obj_head[index]; +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- index -= zgrp->nr_grp; +- BUG_ON(index >= zgrp->wbgrp.nr_ext); +- return &zgrp->wbgrp.ext_obj_head[index]; +-#endif +- BUG(); +-} +- +-void zram_group_meta_free(struct zram_group *zgrp) +-{ +- if (!CHECK(zgrp, "zram group is not enable!\n")) +- return; +- +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- zram_group_remove_writeback(zgrp); +-#endif +- vfree(zgrp->grp_obj_head); +- vfree(zgrp->obj); +- zlist_table_free(zgrp->obj_tab); +- vfree(zgrp->stats); +- kfree(zgrp); +- +- pr_info("zram group freed.\n"); +-} +- +-struct zram_group *zram_group_meta_alloc(u32 nr_obj, u32 nr_grp) +-{ +- struct zram_group *zgrp = NULL; +- u32 i; +- +- if (!CHECK_BOUND(nr_grp, 1, ZGRP_MAX_GRP - 1)) +- return NULL; +- +- /* reserve gid 0 */ +- nr_grp++; +- if (!CHECK_BOUND(nr_obj, 1, ZGRP_MAX_OBJ)) +- return NULL; +- zgrp = kzalloc(sizeof(struct zram_group), GFP_KERNEL); +- if (!zgrp) +- goto err; +- zgrp->nr_obj = nr_obj; +- zgrp->nr_grp = nr_grp; +- zgrp->grp_obj_head = vmalloc(sizeof(struct zlist_node) * zgrp->nr_grp); +- if (!zgrp->grp_obj_head) +- goto err; +- zgrp->obj = vmalloc(sizeof(struct zlist_node) * zgrp->nr_obj); +- if (!zgrp->obj) +- goto err; +- zgrp->obj_tab = zlist_table_alloc(get_obj, zgrp, GFP_KERNEL); +- if (!zgrp->obj_tab) +- goto err; +- zgrp->stats = vzalloc(sizeof(struct zram_group_stats) * zgrp->nr_grp); +- if (!zgrp->stats) +- goto err; +- zgrp->gsdev = NULL; +- +- for (i = 0; i < zgrp->nr_obj; i++) +- zlist_node_init(i, zgrp->obj_tab); +- for (i = 1; i < zgrp->nr_grp; i++) +- zlist_node_init(i + zgrp->nr_obj, zgrp->obj_tab); +- +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- zgrp->wbgrp.enable = false; +- mutex_init(&zgrp->wbgrp.init_lock); +-#endif +- pr_info("zram_group alloc succ.\n"); +- return zgrp; +-err: +- pr_err("zram_group alloc failed!\n"); +- zram_group_meta_free(zgrp); +- +- return NULL; +-} +- +-/* +- * insert obj at @index into group @gid as the HOTTEST obj +- */ +-void zgrp_obj_insert(struct zram_group *zgrp, u32 index, u16 gid) +-{ +- u32 hid; +- +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return; +- } +- if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1)) +- return; +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return; +- hid = gid + zgrp->nr_obj; +- zlist_add(hid, index, zgrp->obj_tab); +- pr_debug("insert obj %u to group %u\n", index, gid); +-} +- +-/* +- * remove obj at @index from group @gid +- */ +-bool zgrp_obj_delete(struct zram_group *zgrp, u32 index, u16 gid) +-{ +- u32 hid; +- +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return false; +- } +- if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1)) +- return false; +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return false; +- pr_debug("delete obj %u from group %u\n", index, gid); +- hid = gid + zgrp->nr_obj; +- +- return zlist_del(hid, index, zgrp->obj_tab); +-} +- +-/* +- * try to isolate the last @nr objs of @gid, store their indexes in array @idxs +- * and @return the obj cnt actually isolated. isolate all objs if nr is 0. +- */ +-u32 zgrp_isolate_objs(struct zram_group *zgrp, u16 gid, u32 *idxs, u32 nr, bool *last) +-{ +- u32 hid, idx; +- u32 cnt = 0; +- u32 i; +- +- if (last) +- *last = false; +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return 0; +- } +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return 0; +- if (!CHECK(idxs, "return array idxs is null!\n")) +- return 0; +- hid = gid + zgrp->nr_obj; +- zlist_lock(hid, zgrp->obj_tab); +- zlist_for_each_entry_reverse(idx, hid, zgrp->obj_tab) { +- idxs[cnt++] = idx; +- if (nr && cnt == nr) +- break; +- } +- for (i = 0; i < cnt; i++) +- zlist_del_nolock(hid, idxs[i], zgrp->obj_tab); +- if (last) +- *last = cnt && zlist_is_isolated_nolock(hid, zgrp->obj_tab); +- zlist_unlock(hid, zgrp->obj_tab); +- +- pr_debug("isolated %u objs from group %u.\n", cnt, gid); +- +- return cnt; +-} +- +-/* +- * check if the obj at @index is isolate from zram groups +- */ +-bool zgrp_obj_is_isolated(struct zram_group *zgrp, u32 index) +-{ +- bool ret = false; +- +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return false; +- } +- if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1)) +- return false; +- +- zlist_lock(index, zgrp->obj_tab); +- ret = zlist_is_isolated_nolock(index, zgrp->obj_tab); +- zlist_unlock(index, zgrp->obj_tab); +- +- return ret; +-} +-/* +- * insert obj at @index into group @gid as the COLDEST obj +- */ +-void zgrp_obj_putback(struct zram_group *zgrp, u32 index, u16 gid) +-{ +- u32 hid; +- +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return; +- } +- if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1)) +- return; +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return; +- hid = gid + zgrp->nr_obj; +- zlist_add_tail(hid, index, zgrp->obj_tab); +- pr_debug("putback obj %u to group %u\n", index, gid); +-} +- +-void zgrp_obj_stats_inc(struct zram_group *zgrp, u16 gid, u32 size) +-{ +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return; +- } +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return; +- +- atomic_inc(&zgrp->stats[gid].zram_pages); +- atomic64_add(size, &zgrp->stats[gid].zram_size); +- atomic_inc(&zgrp->stats[0].zram_pages); +- atomic64_add(size, &zgrp->stats[0].zram_size); +-} +- +-void zgrp_obj_stats_dec(struct zram_group *zgrp, u16 gid, u32 size) +-{ +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return; +- } +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return; +- +- atomic_dec(&zgrp->stats[gid].zram_pages); +- atomic64_sub(size, &zgrp->stats[gid].zram_size); +- atomic_dec(&zgrp->stats[0].zram_pages); +- atomic64_sub(size, &zgrp->stats[0].zram_size); +-} +- +-void zgrp_fault_stats_inc(struct zram_group *zgrp, u16 gid, u32 size) +-{ +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return; +- } +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return; +- +- atomic64_inc(&zgrp->stats[gid].zram_fault); +- atomic64_inc(&zgrp->stats[0].zram_fault); +-} +- +-#ifdef CONFIG_ZRAM_GROUP_DEBUG +-void zram_group_dump(struct zram_group *zgrp, u16 gid, u32 index) +-{ +- u32 hid, idx; +- +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return; +- } +- hid = gid + zgrp->nr_obj; +- if (gid == 0) { +- struct zlist_node *node = NULL; +- +- if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1)) +- return; +- node = idx2node(index, zgrp->obj_tab); +- pr_err("dump index %u = %u %u %u %u\n", index, +- node->prev, node->next, +- node->lock, node->priv); +- } else { +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return; +- pr_err("dump index of group %u\n", gid); +- zlist_for_each_entry(idx, hid, zgrp->obj_tab) +- pr_err("%u\n", idx); +- } +-} +-#endif +- +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +-/* +- * idx2node for ext table +- */ +-static struct zlist_node *get_ext(u32 index, void *private) +-{ +- struct zram_group *zgrp = private; +- +- if (index < zgrp->wbgrp.nr_ext) +- return &zgrp->wbgrp.ext[index]; +- +- index -= zgrp->wbgrp.nr_ext; +- BUG_ON(!index); +- return &zgrp->wbgrp.grp_ext_head[index]; +-} +- +-/* +- * disable writeback for zram group @zgrp +- */ +-void zram_group_remove_writeback(struct zram_group *zgrp) +-{ +- if (!CHECK(zgrp, "zram group is not enable!\n")) +- return; +- if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n")) +- return; +- zgrp->wbgrp.enable = false; +- vfree(zgrp->wbgrp.grp_ext_head); +- vfree(zgrp->wbgrp.ext); +- zlist_table_free(zgrp->wbgrp.ext_tab); +- vfree(zgrp->wbgrp.ext_obj_head); +- pr_info("zram group writeback is removed.\n"); +-} +- +-/* +- * init & enable writeback on exist zram group @zgrp with a backing device of +- * @nr_ext extents. +- */ +-int zram_group_apply_writeback(struct zram_group *zgrp, u32 nr_ext) +-{ +- struct writeback_group *wbgrp = NULL; +- u32 i; +- int ret = 0; +- +- if (!CHECK(zgrp, "zram group is not enable!\n")) +- return -EINVAL; +- +- mutex_lock(&zgrp->wbgrp.init_lock); +- if (!CHECK(!zgrp->wbgrp.enable, "zram group writeback is already enable!\n")) +- goto out; +- if (!CHECK_BOUND(nr_ext, 1, ZGRP_MAX_EXT)) { +- ret = -EINVAL; +- goto out; +- } +- wbgrp = &zgrp->wbgrp; +- wbgrp->nr_ext = nr_ext; +- wbgrp->grp_ext_head = vmalloc(sizeof(struct zlist_node) * zgrp->nr_grp); +- if (!wbgrp->grp_ext_head) { +- ret = -ENOMEM; +- goto out; +- } +- wbgrp->ext = vmalloc(sizeof(struct zlist_node) * wbgrp->nr_ext); +- if (!wbgrp->ext) { +- ret = -ENOMEM; +- goto out; +- } +- wbgrp->ext_obj_head = vmalloc(sizeof(struct zlist_node) * wbgrp->nr_ext); +- if (!wbgrp->ext_obj_head) { +- ret = -ENOMEM; +- goto out; +- } +- +- wbgrp->ext_tab = zlist_table_alloc(get_ext, zgrp, GFP_KERNEL); +- if (!wbgrp->ext_tab) { +- ret = -ENOMEM; +- goto out; +- } +- +- for (i = 0; i < wbgrp->nr_ext; i++) +- zlist_node_init(i, wbgrp->ext_tab); +- for (i = 1; i < zgrp->nr_grp; i++) +- zlist_node_init(i + wbgrp->nr_ext, wbgrp->ext_tab); +- +- for (i = 0; i < wbgrp->nr_ext; i++) +- zlist_node_init(i + zgrp->nr_obj + zgrp->nr_grp, zgrp->obj_tab); +- +- init_waitqueue_head(&wbgrp->fault_wq); +- wbgrp->enable = true; +- pr_info("zram group writeback is enabled.\n"); +-out: +- mutex_unlock(&zgrp->wbgrp.init_lock); +- +- if (ret) { +- zram_group_remove_writeback(zgrp); +- pr_err("zram group writeback enable failed!\n"); +- } +- +- return ret; +-} +- +-/* +- * attach extent at @eid to group @gid as the HOTTEST extent +- */ +-void zgrp_ext_insert(struct zram_group *zgrp, u32 eid, u16 gid) +-{ +- u32 hid; +- +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return; +- } +- if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n")) +- return; +- if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1)) +- return; +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return; +- hid = gid + zgrp->wbgrp.nr_ext; +- zlist_add(hid, eid, zgrp->wbgrp.ext_tab); +- pr_debug("insert extent %u to group %u\n", eid, gid); +-} +- +-/* +- * remove extent at @eid from group @gid +- */ +-bool zgrp_ext_delete(struct zram_group *zgrp, u32 eid, u16 gid) +-{ +- u32 hid; +- bool isolated = false; +- +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return false; +- } +- if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n")) +- return false; +- if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1)) +- return false; +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return false; +- +- zlist_lock(eid, zgrp->wbgrp.ext_tab); +- isolated = zlist_is_isolated_nolock(eid, zgrp->wbgrp.ext_tab); +- zlist_unlock(eid, zgrp->wbgrp.ext_tab); +- if (isolated) { +- pr_debug("extent %u is already isolated, skip delete.\n", eid); +- return false; +- } +- +- pr_debug("delete extent %u from group %u\n", eid, gid); +- hid = gid + zgrp->wbgrp.nr_ext; +- return zlist_del(hid, eid, zgrp->wbgrp.ext_tab); +-} +- +-/* +- * try to isolate the first @nr exts of @gid, store their eids in array @eids +- * and @return the cnt actually isolated. isolate all exts if nr is 0. +- */ +-u32 zgrp_isolate_exts(struct zram_group *zgrp, u16 gid, u32 *eids, u32 nr, bool *last) +-{ +- u32 hid, idx; +- u32 cnt = 0; +- u32 i; +- +- if (last) +- *last = false; +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return 0; +- } +- if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n")) +- return 0; +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return 0; +- if (!CHECK(eids, "return array eids is null!\n")) +- return 0; +- hid = gid + zgrp->wbgrp.nr_ext; +- zlist_lock(hid, zgrp->wbgrp.ext_tab); +- zlist_for_each_entry_reverse(idx, hid, zgrp->wbgrp.ext_tab) { +- eids[cnt++] = idx; +- if (nr && cnt == nr) +- break; +- } +- for (i = 0; i < cnt; i++) +- zlist_del_nolock(hid, eids[i], zgrp->wbgrp.ext_tab); +- if (last) +- *last = cnt && zlist_is_isolated_nolock(hid, zgrp->wbgrp.ext_tab); +- zlist_unlock(hid, zgrp->wbgrp.ext_tab); +- +- pr_debug("isolated %u exts from group %u.\n", cnt, gid); +- +- return cnt; +-} +- +-void zgrp_get_ext(struct zram_group *zgrp, u32 eid) +-{ +- u32 hid; +- +- if (!CHECK(zgrp, "zram group is not enable!\n")) +- return; +- if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n")) +- return; +- if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1)) +- return; +- +- hid = eid + zgrp->nr_obj + zgrp->nr_grp; +- zlist_set_priv(hid, zgrp->obj_tab); +- pr_info("get extent %u\n", eid); +-} +- +-bool zgrp_put_ext(struct zram_group *zgrp, u32 eid) +-{ +- u32 hid; +- bool ret = false; +- +- if (!CHECK(zgrp, "zram group is not enable!\n")) +- return false; +- if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n")) +- return false; +- if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1)) +- return false; +- +- hid = eid + zgrp->nr_obj + zgrp->nr_grp; +- zlist_lock(hid, zgrp->obj_tab); +- zlist_clr_priv_nolock(hid, zgrp->obj_tab); +- ret = zlist_is_isolated_nolock(hid, zgrp->obj_tab); +- zlist_unlock(hid, zgrp->obj_tab); +- +- pr_info("put extent %u, ret = %d\n", eid, ret); +- +- return ret; +-} +- +-/* +- * insert obj at @index into extent @eid +- */ +-void wbgrp_obj_insert(struct zram_group *zgrp, u32 index, u32 eid) +-{ +- u32 hid; +- +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return; +- } +- if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n")) +- return; +- if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1)) +- return; +- if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1)) +- return; +- hid = eid + zgrp->nr_obj + zgrp->nr_grp; +- zlist_add_tail(hid, index, zgrp->obj_tab); +- pr_debug("insert obj %u to extent %u\n", index, eid); +-} +- +-/* +- * remove obj at @index from extent @eid +- */ +-bool wbgrp_obj_delete(struct zram_group *zgrp, u32 index, u32 eid) +-{ +- u32 hid; +- bool ret = false; +- +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return false; +- } +- if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n")) +- return false; +- if (!CHECK_BOUND(index, 0, zgrp->nr_obj - 1)) +- return false; +- if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1)) +- return false; +- pr_debug("delete obj %u from extent %u\n", index, eid); +- hid = eid + zgrp->nr_obj + zgrp->nr_grp; +- +- zlist_lock(hid, zgrp->obj_tab); +- ret = zlist_del_nolock(hid, index, zgrp->obj_tab) +- && !zlist_test_priv_nolock(hid, zgrp->obj_tab); +- zlist_unlock(hid, zgrp->obj_tab); +- +- return ret; +-} +- +-/* +- * try to isolate the first @nr writeback objs of @eid, store their indexes in +- * array @idxs and @return the obj cnt actually isolated. isolate all objs if +- * @nr is 0. +- */ +-u32 wbgrp_isolate_objs(struct zram_group *zgrp, u32 eid, u32 *idxs, u32 nr, bool *last) +-{ +- u32 hid, idx; +- u32 cnt = 0; +- u32 i; +- +- if (last) +- *last = false; +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return 0; +- } +- if (!CHECK(zgrp->wbgrp.enable, "zram group writeback is not enable!\n")) +- return 0; +- if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1)) +- return 0; +- if (!CHECK(idxs, "return array idxs is null!\n")) +- return 0; +- hid = eid + zgrp->nr_obj + zgrp->nr_grp; +- zlist_lock(hid, zgrp->obj_tab); +- zlist_for_each_entry(idx, hid, zgrp->obj_tab) { +- idxs[cnt++] = idx; +- if (nr && cnt == nr) +- break; +- } +- for (i = 0; i < cnt; i++) +- zlist_del_nolock(hid, idxs[i], zgrp->obj_tab); +- if (last) +- *last = cnt && zlist_is_isolated_nolock(hid, zgrp->obj_tab) +- && !zlist_test_priv_nolock(hid, zgrp->obj_tab); +- zlist_unlock(hid, zgrp->obj_tab); +- +- pr_debug("isolated %u objs from extent %u.\n", cnt, eid); +- +- return cnt; +-} +- +-void wbgrp_obj_stats_inc(struct zram_group *zgrp, u16 gid, u32 eid, u32 size) +-{ +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return; +- } +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return; +- if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1)) +- return; +- +- atomic_inc(&zgrp->stats[gid].wb_pages); +- atomic64_add(size, &zgrp->stats[gid].wb_size); +- atomic_inc(&zgrp->stats[0].wb_pages); +- atomic64_add(size, &zgrp->stats[0].wb_size); +-} +- +-void wbgrp_obj_stats_dec(struct zram_group *zgrp, u16 gid, u32 eid, u32 size) +-{ +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return; +- } +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return; +- if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1)) +- return; +- +- atomic_dec(&zgrp->stats[gid].wb_pages); +- atomic64_sub(size, &zgrp->stats[gid].wb_size); +- atomic_dec(&zgrp->stats[0].wb_pages); +- atomic64_sub(size, &zgrp->stats[0].wb_size); +-} +- +-void wbgrp_fault_stats_inc(struct zram_group *zgrp, u16 gid, u32 eid, u32 size) +-{ +- if (!zgrp) { +- pr_debug("zram group is not enable!"); +- return; +- } +- if (!CHECK_BOUND(gid, 1, zgrp->nr_grp - 1)) +- return; +- if (!CHECK_BOUND(eid, 0, zgrp->wbgrp.nr_ext - 1)) +- return; +- +- atomic64_inc(&zgrp->stats[gid].wb_fault); +- atomic64_inc(&zgrp->stats[0].wb_fault); +-} +-#endif +diff --git a/drivers/block/zram/zram_group/zram_group.h b/drivers/block/zram/zram_group/zram_group.h +deleted file mode 100644 +index 9b184b7bd..000000000 +--- a/drivers/block/zram/zram_group/zram_group.h ++++ /dev/null +@@ -1,98 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * drivers/block/zram/zram_group/zram_group.h +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#ifndef _ZRAM_GROUP_H_ +-#define _ZRAM_GROUP_H_ +- +-#include +-#include +- +-#include "zlist.h" +- +-#define ZGRP_MAX_GRP USHRT_MAX +-#define ZGRP_MAX_OBJ (1 << 30) +- +-enum { +- ZGRP_NONE = 0, +- ZGRP_TRACK, +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- ZGRP_WRITE, +-#endif +-}; +- +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +-#define ZGRP_MAX_EXT (ZLIST_IDX_MAX - ZGRP_MAX_GRP - ZGRP_MAX_OBJ) +-struct writeback_group { +- bool enable; +- u32 nr_ext; +- struct zlist_node *grp_ext_head; +- struct zlist_node *ext; +- struct zlist_table *ext_tab; +- struct zlist_node *ext_obj_head; +- struct mutex init_lock; +- wait_queue_head_t fault_wq; +-}; +-#endif +- +-struct zram_group_stats { +- atomic64_t zram_size; +- atomic_t zram_pages; +- atomic64_t zram_fault; +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- atomic64_t wb_size; +- atomic_t wb_pages; +- atomic64_t wb_fault; +- atomic_t wb_exts; +- atomic64_t write_size; +- atomic64_t read_size; +-#endif +-}; +- +-struct zram_group { +- u32 nr_obj; +- u32 nr_grp; +- struct zlist_node *grp_obj_head; +- struct zlist_node *obj; +- struct zlist_table *obj_tab; +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +- struct writeback_group wbgrp; +-#endif +- struct group_swap_device *gsdev; +- struct zram_group_stats *stats; +-}; +- +-void zram_group_meta_free(struct zram_group *zgrp); +-struct zram_group *zram_group_meta_alloc(u32 nr_obj, u32 nr_grp); +-void zgrp_obj_insert(struct zram_group *zgrp, u32 index, u16 gid); +-bool zgrp_obj_delete(struct zram_group *zgrp, u32 index, u16 gid); +-u32 zgrp_isolate_objs(struct zram_group *zgrp, u16 gid, u32 *idxs, u32 nr, bool *last); +-bool zgrp_obj_is_isolated(struct zram_group *zgrp, u32 index); +-void zgrp_obj_putback(struct zram_group *zgrp, u32 index, u16 gid); +-void zgrp_obj_stats_inc(struct zram_group *zgrp, u16 gid, u32 size); +-void zgrp_obj_stats_dec(struct zram_group *zgrp, u16 gid, u32 size); +-void zgrp_fault_stats_inc(struct zram_group *zgrp, u16 gid, u32 size); +- +-#ifdef CONFIG_ZRAM_GROUP_DEBUG +-void zram_group_dump(struct zram_group *zgrp, u16 gid, u32 index); +-#endif +- +-#ifdef CONFIG_ZRAM_GROUP_WRITEBACK +-void zram_group_remove_writeback(struct zram_group *zgrp); +-int zram_group_apply_writeback(struct zram_group *zgrp, u32 nr_ext); +-void zgrp_ext_insert(struct zram_group *zgrp, u32 eid, u16 gid); +-bool zgrp_ext_delete(struct zram_group *zgrp, u32 eid, u16 gid); +-u32 zgrp_isolate_exts(struct zram_group *zgrp, u16 gid, u32 *eids, u32 nr, bool *last); +-void zgrp_get_ext(struct zram_group *zgrp, u32 eid); +-bool zgrp_put_ext(struct zram_group *zgrp, u32 eid); +-void wbgrp_obj_insert(struct zram_group *zgrp, u32 index, u32 eid); +-bool wbgrp_obj_delete(struct zram_group *zgrp, u32 index, u32 eid); +-u32 wbgrp_isolate_objs(struct zram_group *zgrp, u32 eid, u32 *idxs, u32 nr, bool *last); +-void wbgrp_obj_stats_inc(struct zram_group *zgrp, u16 gid, u32 eid, u32 size); +-void wbgrp_obj_stats_dec(struct zram_group *zgrp, u16 gid, u32 eid, u32 size); +-void wbgrp_fault_stats_inc(struct zram_group *zgrp, u16 gid, u32 eid, u32 size); +-#endif +-#endif +diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig +index c30099866..0b04c1eb5 100644 +--- a/drivers/clk/Kconfig ++++ b/drivers/clk/Kconfig +@@ -501,6 +501,7 @@ source "drivers/clk/visconti/Kconfig" + source "drivers/clk/x86/Kconfig" + source "drivers/clk/xilinx/Kconfig" + source "drivers/clk/zynqmp/Kconfig" ++source "drivers/clk/vendor/Kconfig" + + # Kunit test cases + config CLK_KUNIT_TEST +diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile +index 18969cbd4..98116bcbd 100644 +--- a/drivers/clk/Makefile ++++ b/drivers/clk/Makefile +@@ -136,3 +136,4 @@ endif + obj-y += xilinx/ + obj-$(CONFIG_ARCH_ZYNQ) += zynq/ + obj-$(CONFIG_COMMON_CLK_ZYNQMP) += zynqmp/ ++obj-$(CONFIG_ARCH_BSP) += vendor/ +diff --git a/drivers/clk/vendor/Kconfig b/drivers/clk/vendor/Kconfig +new file mode 100644 +index 000000000..c4cf956c6 +--- /dev/null ++++ b/drivers/clk/vendor/Kconfig +@@ -0,0 +1,15 @@ ++config COMMON_CLK_SS928V100 ++ tristate "SS928V100 Clock Driver" ++ depends on ARCH_SS928V100 || ARCH_SS927V100 || COMPILE_TEST ++ select RESET_BSP ++ default ARCH_BSP ++ help ++ Build the clock driver for ss928v100. ++ ++config RESET_BSP ++ bool "Vendor Reset Controller Driver" ++ depends on ARCH_BSP || COMPILE_TEST || ARCH_BSP ++ select RESET_CONTROLLER ++ help ++ Build reset controller driver for Vendor device chipsets. ++ +diff --git a/drivers/clk/vendor/Makefile b/drivers/clk/vendor/Makefile +new file mode 100644 +index 000000000..42a7a001e +--- /dev/null ++++ b/drivers/clk/vendor/Makefile +@@ -0,0 +1,8 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# ++ ++obj-y += clk.o clkgate-separated.o ++ ++obj-$(CONFIG_COMMON_CLK_SS928V100) += clk_ss928v100.o ++obj-$(CONFIG_RESET_BSP) += reset.o ++ +diff --git a/drivers/clk/vendor/clk.c b/drivers/clk/vendor/clk.c +new file mode 100644 +index 000000000..edfdd68e9 +--- /dev/null ++++ b/drivers/clk/vendor/clk.c +@@ -0,0 +1,316 @@ ++/* ++ * Copyright (c) 2012-2013 Shenshu Technologies Co., Ltd. ++ * Copyright (c) 2012-2013 Linaro Limited. ++ * ++ * Author: Haojian Zhuang ++ * Xin Li ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "clk.h" ++ ++static DEFINE_SPINLOCK(bsp_clk_lock); ++ ++struct bsp_clock_data *bsp_clk_alloc(struct platform_device *pdev, ++ unsigned int nr_clks) ++{ ++ struct bsp_clock_data *clk_data; ++ struct resource *res; ++ struct clk **clk_table; ++ ++ clk_data = devm_kmalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL); ++ if (!clk_data) ++ return NULL; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) ++ goto clk_data_free; ++ clk_data->base = devm_ioremap(&pdev->dev, ++ res->start, resource_size(res)); ++ if (!clk_data->base) ++ goto clk_data_free; ++ ++ clk_table = devm_kmalloc_array(&pdev->dev, nr_clks, ++ sizeof(*clk_table), ++ GFP_KERNEL); ++ if (!clk_table) ++ goto clk_data_base_unmap; ++ ++ clk_data->clk_data.clks = clk_table; ++ clk_data->clk_data.clk_num = nr_clks; ++ ++ return clk_data; ++ ++clk_data_base_unmap: ++ if (clk_data->base != NULL) ++ devm_iounmap(&pdev->dev, clk_data->base); ++clk_data_free: ++ if (clk_data != NULL) { ++ devm_kfree(&pdev->dev, clk_data); ++ clk_data = NULL; ++ } ++ return NULL; ++} ++EXPORT_SYMBOL_GPL(bsp_clk_alloc); ++ ++struct bsp_clock_data *bsp_clk_init(struct device_node *np, ++ unsigned int nr_clks) ++{ ++ struct bsp_clock_data *clk_data; ++ struct clk **clk_table; ++ void __iomem *base; ++ ++ base = of_iomap(np, 0); ++ if (!base) { ++ pr_err("%s: failed to map clock registers\n", __func__); ++ goto err; ++ } ++ ++ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); ++ if (!clk_data) ++ goto err; ++ ++ clk_data->base = base; ++ clk_table = kcalloc(nr_clks, sizeof(*clk_table), GFP_KERNEL); ++ if (!clk_table) ++ goto err_data; ++ ++ clk_data->clk_data.clks = clk_table; ++ clk_data->clk_data.clk_num = nr_clks; ++ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data->clk_data); ++ return clk_data; ++err_data: ++ if (base) { ++ iounmap(base); ++ base = NULL; ++ } ++ kfree(clk_data); ++err: ++ return NULL; ++} ++EXPORT_SYMBOL_GPL(bsp_clk_init); ++ ++long bsp_clk_register_fixed_rate(const struct bsp_fixed_rate_clock *clks, ++ int nums, struct bsp_clock_data *data) ++{ ++ struct clk *clk; ++ int i; ++ ++ for (i = 0; i < nums; i++) { ++ clk = clk_register_fixed_rate(NULL, clks[i].name, ++ clks[i].parent_name, ++ clks[i].flags, ++ clks[i].fixed_rate); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", ++ __func__, clks[i].name); ++ goto err; ++ } ++ data->clk_data.clks[clks[i].id] = clk; ++ } ++ ++ return 0; ++ ++err: ++ while (i--) ++ clk_unregister_fixed_rate(data->clk_data.clks[clks[i].id]); ++ ++ return PTR_ERR(clk); ++} ++EXPORT_SYMBOL_GPL(bsp_clk_register_fixed_rate); ++ ++long bsp_clk_register_fixed_factor(const struct bsp_fixed_factor_clock *clks, ++ int nums, struct bsp_clock_data *data) ++{ ++ struct clk *clk; ++ int i; ++ ++ for (i = 0; i < nums; i++) { ++ clk = clk_register_fixed_factor(NULL, clks[i].name, ++ clks[i].parent_name, ++ clks[i].flags, clks[i].mult, ++ clks[i].div); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", ++ __func__, clks[i].name); ++ goto err; ++ } ++ data->clk_data.clks[clks[i].id] = clk; ++ } ++ ++ return 0; ++ ++err: ++ while (i--) ++ clk_unregister_fixed_factor(data->clk_data.clks[clks[i].id]); ++ ++ return PTR_ERR(clk); ++} ++EXPORT_SYMBOL_GPL(bsp_clk_register_fixed_factor); ++ ++long bsp_clk_register_mux(const struct bsp_mux_clock *clks, ++ int nums, struct bsp_clock_data *data) ++{ ++ struct clk *clk; ++ void __iomem *base = data->base; ++ int i; ++ ++ for (i = 0; i < nums; i++) { ++ u32 mask = BIT(clks[i].width) - 1; ++ ++ clk = clk_register_mux_table(NULL, clks[i].name, ++ clks[i].parent_names, ++ clks[i].num_parents, clks[i].flags, ++ base + clks[i].offset, clks[i].shift, ++ mask, clks[i].mux_flags, ++ clks[i].table, &bsp_clk_lock); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", ++ __func__, clks[i].name); ++ goto err; ++ } ++ ++ if (clks[i].alias) ++ clk_register_clkdev(clk, clks[i].alias, NULL); ++ ++ data->clk_data.clks[clks[i].id] = clk; ++ } ++ ++ return 0; ++ ++err: ++ while (i--) ++ clk_unregister_mux(data->clk_data.clks[clks[i].id]); ++ ++ return PTR_ERR(clk); ++} ++EXPORT_SYMBOL_GPL(bsp_clk_register_mux); ++ ++long bsp_clk_register_divider(const struct bsp_divider_clock *clks, ++ int nums, struct bsp_clock_data *data) ++{ ++ struct clk *clk; ++ void __iomem *base = data->base; ++ int i; ++ ++ for (i = 0; i < nums; i++) { ++ clk = clk_register_divider_table(NULL, clks[i].name, ++ clks[i].parent_name, ++ clks[i].flags, ++ base + clks[i].offset, ++ clks[i].shift, clks[i].width, ++ clks[i].div_flags, ++ clks[i].table, ++ &bsp_clk_lock); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", ++ __func__, clks[i].name); ++ goto err; ++ } ++ ++ if (clks[i].alias) ++ clk_register_clkdev(clk, clks[i].alias, NULL); ++ ++ data->clk_data.clks[clks[i].id] = clk; ++ } ++ ++ return 0; ++ ++err: ++ while (i--) ++ clk_unregister_divider(data->clk_data.clks[clks[i].id]); ++ ++ return PTR_ERR(clk); ++} ++EXPORT_SYMBOL_GPL(bsp_clk_register_divider); ++ ++long bsp_clk_register_gate(const struct bsp_gate_clock *clks, ++ int nums, struct bsp_clock_data *data) ++{ ++ struct clk *clk; ++ void __iomem *base = data->base; ++ int i; ++ ++ for (i = 0; i < nums; i++) { ++ clk = clk_register_gate(NULL, clks[i].name, ++ clks[i].parent_name, ++ clks[i].flags, ++ base + clks[i].offset, ++ clks[i].bit_idx, ++ clks[i].gate_flags, ++ &bsp_clk_lock); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", ++ __func__, clks[i].name); ++ goto err; ++ } ++ ++ if (clks[i].alias) ++ clk_register_clkdev(clk, clks[i].alias, NULL); ++ ++ data->clk_data.clks[clks[i].id] = clk; ++ } ++ ++ return 0; ++ ++err: ++ while (i--) ++ clk_unregister_gate(data->clk_data.clks[clks[i].id]); ++ ++ return PTR_ERR(clk); ++} ++EXPORT_SYMBOL_GPL(bsp_clk_register_gate); ++ ++void bsp_clk_register_gate_sep(const struct bsp_gate_clock *clks, ++ int nums, struct bsp_clock_data *data) ++{ ++ struct clk *clk; ++ void __iomem *base = data->base; ++ int i; ++ ++ for (i = 0; i < nums; i++) { ++ clk = bsp_register_clkgate_sep(NULL, clks[i].name, ++ clks[i].parent_name, ++ clks[i].flags, ++ base + clks[i].offset, ++ clks[i].bit_idx, ++ clks[i].gate_flags, ++ &bsp_clk_lock); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", ++ __func__, clks[i].name); ++ continue; ++ } ++ ++ if (clks[i].alias) ++ clk_register_clkdev(clk, clks[i].alias, NULL); ++ ++ data->clk_data.clks[clks[i].id] = clk; ++ } ++} ++EXPORT_SYMBOL_GPL(bsp_clk_register_gate_sep); ++ +diff --git a/drivers/clk/vendor/clk.h b/drivers/clk/vendor/clk.h +new file mode 100644 +index 000000000..1d2905777 +--- /dev/null ++++ b/drivers/clk/vendor/clk.h +@@ -0,0 +1,147 @@ ++/* ++ * Copyright (c) 2012-2013 Shenshu Technologies Co., Ltd. ++ * Copyright (c) 2012-2013 Linaro Limited. ++ * ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ */ ++ ++#ifndef __BSP_CLK_H ++#define __BSP_CLK_H ++ ++#include ++#include ++#include ++ ++struct platform_device; ++ ++struct bsp_clock_data { ++ struct clk_onecell_data clk_data; ++ void __iomem *base; ++}; ++ ++struct bsp_fixed_rate_clock { ++ unsigned int id; ++ char *name; ++ const char *parent_name; ++ unsigned long flags; ++ unsigned long fixed_rate; ++}; ++ ++struct bsp_fixed_factor_clock { ++ unsigned int id; ++ char *name; ++ const char *parent_name; ++ unsigned long mult; ++ unsigned long div; ++ unsigned long flags; ++}; ++ ++struct bsp_mux_clock { ++ unsigned int id; ++ const char *name; ++ const char *const *parent_names; ++ u8 num_parents; ++ unsigned long flags; ++ unsigned long offset; ++ u8 shift; ++ u8 width; ++ u8 mux_flags; ++ u32 *table; ++ const char *alias; ++}; ++ ++struct bsp_phase_clock { ++ unsigned int id; ++ const char *name; ++ const char *parent_names; ++ unsigned long flags; ++ unsigned long offset; ++ u8 shift; ++ u8 width; ++ u32 *phase_degrees; ++ u32 *phase_regvals; ++ u8 phase_num; ++}; ++ ++struct bsp_divider_clock { ++ unsigned int id; ++ const char *name; ++ const char *parent_name; ++ unsigned long flags; ++ unsigned long offset; ++ u8 shift; ++ u8 width; ++ u8 div_flags; ++ struct clk_div_table *table; ++ const char *alias; ++}; ++ ++struct bsp_gate_clock { ++ unsigned int id; ++ const char *name; ++ const char *parent_name; ++ unsigned long flags; ++ unsigned long offset; ++ u8 bit_idx; ++ u8 gate_flags; ++ const char *alias; ++}; ++ ++struct clk *bsp_register_clkgate_sep(struct device *dev, const char *name, ++ const char *parent_name, ++ unsigned long flags, ++ void __iomem *reg, u8 bit_idx, ++ u8 clk_gate_flags, spinlock_t *lock); ++ ++struct bsp_clock_data *bsp_clk_alloc(struct platform_device *pdev, ++ unsigned int nr_clks); ++struct bsp_clock_data *bsp_clk_init(struct device_node *np, ++ unsigned int nr_clks); ++long bsp_clk_register_fixed_rate(const struct bsp_fixed_rate_clock *clks, ++ int nums, struct bsp_clock_data *data); ++long bsp_clk_register_fixed_factor(const struct bsp_fixed_factor_clock *clks, ++ int nums, struct bsp_clock_data *data); ++long bsp_clk_register_mux(const struct bsp_mux_clock *clks, ++ int nums, struct bsp_clock_data *data); ++long bsp_clk_register_divider(const struct bsp_divider_clock *clks, ++ int nums, struct bsp_clock_data *data); ++long bsp_clk_register_gate(const struct bsp_gate_clock *clks, ++ int nums, struct bsp_clock_data *data); ++void bsp_clk_register_gate_sep(const struct bsp_gate_clock *clks, ++ int nums, struct bsp_clock_data *data); ++ ++#define bsp_clk_unregister(type) \ ++static inline \ ++void bsp_clk_unregister_##type(const struct bsp_##type##_clock *clks, \ ++ int nums, struct bsp_clock_data *data) \ ++{ \ ++ struct clk **clocks = data->clk_data.clks; \ ++ int i; \ ++ for (i = 0; i < nums; i++) { \ ++ unsigned int id = clks[i].id; \ ++ if (clocks[id]) \ ++ clk_unregister_##type(clocks[id]); \ ++ } \ ++} ++ ++bsp_clk_unregister(fixed_rate) ++bsp_clk_unregister(fixed_factor) ++bsp_clk_unregister(mux) ++bsp_clk_unregister(divider) ++bsp_clk_unregister(gate) ++ ++#endif /* __BSP_CLK_H */ +diff --git a/drivers/clk/vendor/clk_ss928v100.c b/drivers/clk/vendor/clk_ss928v100.c +new file mode 100644 +index 000000000..b62e60ac4 +--- /dev/null ++++ b/drivers/clk/vendor/clk_ss928v100.c +@@ -0,0 +1,646 @@ ++/* ++ * SS928V100 Clock Driver ++ * ++ * Copyright (c) 2016-2017 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "clk.h" ++#include "crg.h" ++#include "reset.h" ++ ++struct ss928v100_pll_clock { ++ u32 id; ++ const char *name; ++ const char *parent_name; ++ u32 ctrl_reg1; ++ u8 frac_shift; ++ u8 frac_width; ++ u8 postdiv1_shift; ++ u8 postdiv1_width; ++ u8 postdiv2_shift; ++ u8 postdiv2_width; ++ u32 ctrl_reg2; ++ u8 fbdiv_shift; ++ u8 fbdiv_width; ++ u8 refdiv_shift; ++ u8 refdiv_width; ++}; ++ ++struct ss928v100_clk_pll { ++ struct clk_hw hw; ++ u32 id; ++ void __iomem *ctrl_reg1; ++ u8 frac_shift; ++ u8 frac_width; ++ u8 postdiv1_shift; ++ u8 postdiv1_width; ++ u8 postdiv2_shift; ++ u8 postdiv2_width; ++ void __iomem *ctrl_reg2; ++ u8 fbdiv_shift; ++ u8 fbdiv_width; ++ u8 refdiv_shift; ++ u8 refdiv_width; ++}; ++ ++/* soc clk config */ ++static const struct bsp_fixed_rate_clock ss928v100_fixed_rate_clks_crg[] = { ++ { SS928V100_FIXED_396M, "396m", NULL, 0, 396000000, }, ++ { SS928V100_FIXED_297M, "297m", NULL, 0, 297000000, }, ++ { SS928V100_FIXED_250M, "250m", NULL, 0, 250000000, }, ++ { SS928V100_FIXED_200M, "200m", NULL, 0, 200000000, }, ++ { SS928V100_FIXED_198M, "198m", NULL, 0, 198000000, }, ++ { SS928V100_FIXED_187P_5M, "187p5m", NULL, 0, 187500000, }, ++ { SS928V100_FIXED_150M, "150m", NULL, 0, 150000000, }, ++ { SS928V100_FIXED_148P_5M, "148p5m", NULL, 0, 148500000, }, ++ { SS928V100_FIXED_100M, "100m", NULL, 0, 100000000, }, ++ { SS928V100_FIXED_99M, "99m", NULL, 0, 99000000, }, ++ { SS928V100_FIXED_50M, "50m", NULL, 0, 50000000, }, ++ { SS928V100_FIXED_25M, "25m", NULL, 0, 25000000, }, ++ { SS928V100_FIXED_24M, "24m", NULL, 0, 24000000, }, ++ { SS928V100_FIXED_3M, "3m", NULL, 0, 3000000, }, ++ { SS928V100_FIXED_400K, "400k", NULL, 0, 400000, }, ++}; ++ ++ ++static const char *fmc_mux_p[] __initdata = { ++ "24m", "99m", "148p5m", "198m", "250m", "297m", "396m" ++}; ++static u32 fmc_mux_table[] = {0, 1, 2, 3, 4, 5, 6}; ++ ++static const char *mmc_mux_p[] __initdata = { ++ "400k", "25m", "50m", "100m", "150m", "187p5m", "200m" ++}; ++static u32 mmc_mux_table[] = {0, 1, 2, 3, 4, 5, 6}; ++ ++static const char *sdio0_mux_p[] __initdata = { ++ "400k", "25m", "50m", "100m", "150m", "187p5m", "200m" ++}; ++static u32 sdio0_mux_table[] = {0, 1, 2, 3, 4, 5, 6}; ++ ++static const char *sdio1_mux_p[] __initdata = { ++ "400k", "25m", "50m", "100m", "150m", "187p5m", "200m" ++}; ++static u32 sdio1_mux_table[] = {0, 1, 2, 3, 4, 5, 6}; ++ ++static const char *uart_mux_p[] __initdata = {"50m", "24m", "3m", "100m"}; ++static u32 uart_mux_table[] = {0, 1, 2, 3}; ++ ++static const char *i2c_mux_p[] __initdata = { ++ "50m", "100m" ++}; ++static u32 i2c_mux_table[] = {0, 1}; ++ ++static const char * pwm0_mux_p[] __initdata = {"200m"}; ++static u32 pwm0_mux_table[] = {0}; ++ ++static const char * pwm1_mux_p[] __initdata = {"200m"}; ++static u32 pwm1_mux_table[] = {0}; ++ ++static struct bsp_mux_clock ss928v100_mux_clks_crg[] __initdata = { ++ { ++ SS928V100_FMC_MUX, "fmc_mux", ++ fmc_mux_p, ARRAY_SIZE(fmc_mux_p), ++ CLK_SET_RATE_PARENT, 0x3f40, 12, 3, 0, fmc_mux_table, ++ }, ++ { ++ SS928V100_MMC0_MUX, "mmc0_mux", ++ mmc_mux_p, ARRAY_SIZE(mmc_mux_p), ++ CLK_SET_RATE_PARENT, 0x34c0, 24, 3, 0, mmc_mux_table, ++ }, ++ { ++ SS928V100_MMC1_MUX, "mmc1_mux", ++ sdio0_mux_p, ARRAY_SIZE(sdio0_mux_p), ++ CLK_SET_RATE_PARENT, 0x35c0, 24, 3, 0, sdio0_mux_table, ++ }, ++ { ++ SS928V100_MMC2_MUX, "mmc2_mux", ++ sdio1_mux_p, ARRAY_SIZE(sdio1_mux_p), ++ CLK_SET_RATE_PARENT, 0x36c0, 24, 3, 0, sdio1_mux_table, ++ }, ++ { ++ SS928V100_UART0_MUX, "uart0_mux", ++ uart_mux_p, ARRAY_SIZE(uart_mux_p), ++ CLK_SET_RATE_PARENT, 0x4180, 12, 2, 0, uart_mux_table ++ }, ++ { ++ SS928V100_UART1_MUX, "uart1_mux", ++ uart_mux_p, ARRAY_SIZE(uart_mux_p), ++ CLK_SET_RATE_PARENT, 0x4188, 12, 2, 0, uart_mux_table ++ }, ++ { ++ SS928V100_UART2_MUX, "uart2_mux", ++ uart_mux_p, ARRAY_SIZE(uart_mux_p), ++ CLK_SET_RATE_PARENT, 0x4190, 12, 2, 0, uart_mux_table ++ }, ++ { ++ SS928V100_UART3_MUX, "uart3_mux", ++ uart_mux_p, ARRAY_SIZE(uart_mux_p), ++ CLK_SET_RATE_PARENT, 0x4198, 12, 2, 0, uart_mux_table ++ }, ++ { ++ SS928V100_UART4_MUX, "uart4_mux", ++ uart_mux_p, ARRAY_SIZE(uart_mux_p), ++ CLK_SET_RATE_PARENT, 0x41a0, 12, 2, 0, uart_mux_table ++ }, ++ { ++ SS928V100_UART5_MUX, "uart5_mux", ++ uart_mux_p, ARRAY_SIZE(uart_mux_p), ++ CLK_SET_RATE_PARENT, 0x41a8, 12, 2, 0, uart_mux_table ++ }, ++ { ++ SS928V100_I2C0_MUX, "i2c0_mux", ++ i2c_mux_p, ARRAY_SIZE(i2c_mux_p), ++ CLK_SET_RATE_PARENT, 0x4280, 12, 1, 0, i2c_mux_table ++ }, ++ { ++ SS928V100_I2C1_MUX, "i2c1_mux", ++ i2c_mux_p, ARRAY_SIZE(i2c_mux_p), ++ CLK_SET_RATE_PARENT, 0x4288, 12, 1, 0, i2c_mux_table ++ }, ++ { ++ SS928V100_I2C2_MUX, "i2c2_mux", ++ i2c_mux_p, ARRAY_SIZE(i2c_mux_p), ++ CLK_SET_RATE_PARENT, 0x4290, 12, 1, 0, i2c_mux_table ++ }, ++ { ++ SS928V100_I2C3_MUX, "i2c3_mux", ++ i2c_mux_p, ARRAY_SIZE(i2c_mux_p), ++ CLK_SET_RATE_PARENT, 0x4298, 12, 1, 0, i2c_mux_table ++ }, ++ { ++ SS928V100_I2C4_MUX, "i2c4_mux", ++ i2c_mux_p, ARRAY_SIZE(i2c_mux_p), ++ CLK_SET_RATE_PARENT, 0x42a0, 12, 1, 0, i2c_mux_table ++ }, ++ { ++ SS928V100_I2C5_MUX, "i2c5_mux", ++ i2c_mux_p, ARRAY_SIZE(i2c_mux_p), ++ CLK_SET_RATE_PARENT, 0x42a8, 12, 1, 0, i2c_mux_table ++ }, ++ { ++ SS928V100_PWM0_MUX, "pwm0_mux", ++ pwm0_mux_p, ARRAY_SIZE(pwm0_mux_p), ++ CLK_SET_RATE_PARENT, 0x4588, 12, 2, 0, pwm0_mux_table ++ }, ++ { ++ SS928V100_PWM1_MUX, "pwm1_mux", ++ pwm1_mux_p, ARRAY_SIZE(pwm1_mux_p), ++ CLK_SET_RATE_PARENT, 0x4590, 12, 2, 0, pwm1_mux_table ++ }, ++}; ++ ++static struct bsp_fixed_factor_clock ++ ss928v100_fixed_factor_clks[] __initdata = { ++}; ++ ++static struct bsp_gate_clock ss928v100_gate_clks[] __initdata = { ++ { ++ SS928V100_FMC_CLK, "clk_fmc", "fmc_mux", ++ CLK_SET_RATE_PARENT, 0x3f40, 4, 0, ++ }, ++ { ++ SS928V100_MMC0_CLK, "clk_mmc0", "mmc0_mux", ++ CLK_SET_RATE_PARENT, 0x34c0, 0, 0, ++ }, ++ { ++ SS928V100_MMC1_CLK, "clk_mmc1", "mmc1_mux", ++ CLK_SET_RATE_PARENT, 0x35c0, 0, 0, ++ }, ++ { ++ SS928V100_MMC2_CLK, "clk_mmc2", "mmc2_mux", ++ CLK_SET_RATE_PARENT, 0x36c0, 0, 0, ++ }, ++ { ++ SS928V100_UART0_CLK, "clk_uart0", "uart0_mux", ++ CLK_SET_RATE_PARENT, 0x4180, 4, 0, ++ }, ++ { ++ SS928V100_UART1_CLK, "clk_uart1", "uart1_mux", ++ CLK_SET_RATE_PARENT, 0x4188, 4, 0, ++ }, ++ { ++ SS928V100_UART2_CLK, "clk_uart2", "uart2_mux", ++ CLK_SET_RATE_PARENT, 0x4190, 4, 0, ++ }, ++ { ++ SS928V100_UART3_CLK, "clk_uart3", "uart3_mux", ++ CLK_SET_RATE_PARENT, 0x4198, 4, 0, ++ }, ++ { ++ SS928V100_UART4_CLK, "clk_uart4", "uart4_mux", ++ CLK_SET_RATE_PARENT, 0x41A0, 4, 0, ++ }, ++ { ++ SS928V100_UART5_CLK, "clk_uart5", "uart5_mux", ++ CLK_SET_RATE_PARENT, 0x41a8, 4, 0, ++ }, ++ /* ethernet mac */ ++ { ++ SS928V100_ETH_CLK, "clk_eth", NULL, ++ CLK_SET_RATE_PARENT, 0x37c4, 4, 0, ++ }, ++ { ++ SS928V100_ETH_MACIF_CLK, "clk_eth_macif", NULL, ++ CLK_SET_RATE_PARENT, 0x37c0, 4, 0, ++ }, ++ { ++ SS928V100_ETH1_CLK, "clk_eth1", NULL, ++ CLK_SET_RATE_PARENT, 0x3804, 4, 0, ++ }, ++ { ++ SS928V100_ETH1_MACIF_CLK, "clk_eth1_macif", NULL, ++ CLK_SET_RATE_PARENT, 0x3800, 4, 0, ++ }, ++ /* i2c */ ++ { ++ SS928V100_I2C0_CLK, "clk_i2c0", "i2c0_mux", ++ CLK_SET_RATE_PARENT, 0x4280, 4, 0, ++ }, ++ { ++ SS928V100_I2C1_CLK, "clk_i2c1", "i2c1_mux", ++ CLK_SET_RATE_PARENT, 0x4288, 4, 0, ++ }, ++ { ++ SS928V100_I2C2_CLK, "clk_i2c2", "i2c2_mux", ++ CLK_SET_RATE_PARENT, 0x4290, 4, 0, ++ }, ++ { ++ SS928V100_I2C3_CLK, "clk_i2c3", "i2c3_mux", ++ CLK_SET_RATE_PARENT, 0x4298, 4, 0, ++ }, ++ { ++ SS928V100_I2C4_CLK, "clk_i2c4", "i2c4_mux", ++ CLK_SET_RATE_PARENT, 0x42a0, 4, 0, ++ }, ++ { SS928V100_I2C5_CLK, "clk_i2c5", "i2c5_mux", ++ CLK_SET_RATE_PARENT, 0x42a8, 4, 0, ++ }, ++ /* spi */ ++ { ++ SS928V100_SPI0_CLK, "clk_spi0", "100m", ++ CLK_SET_RATE_PARENT, 0x4480, 4, 0, ++ }, ++ { ++ SS928V100_SPI1_CLK, "clk_spi1", "100m", ++ CLK_SET_RATE_PARENT, 0x4488, 4, 0, ++ }, ++ { ++ SS928V100_SPI2_CLK, "clk_spi2", "100m", ++ CLK_SET_RATE_PARENT, 0x4490, 4, 0, ++ }, ++ { ++ SS928V100_SPI3_CLK, "clk_spi3", "100m", ++ CLK_SET_RATE_PARENT, 0x4498, 4, 0, ++ }, ++ { ++ SS928V100_EDMAC_AXICLK, "axi_clk_edmac", NULL, ++ CLK_SET_RATE_PARENT, 0x2a80, 5, 0, ++ }, ++ { ++ SS928V100_EDMAC_CLK, "clk_edmac", NULL, ++ CLK_SET_RATE_PARENT, 0x2a80, 4, 0, ++ }, ++ /* pwm0 */ ++ { ++ SS928V100_PWM0_CLK, "clk_pwm0", "pwm0_mux", ++ CLK_SET_RATE_PARENT, 0x4588, 4, 0, ++ }, ++ /* pwm1 */ ++ { ++ SS928V100_PWM1_CLK, "clk_pwm1", "pwm1_mux", ++ CLK_SET_RATE_PARENT, 0x4590, 4, 0, ++ }, ++}; ++ ++static struct ss928v100_pll_clock ss928v100_pll_clks[] __initdata = { ++ { ++ SS928V100_APLL_CLK, "apll", NULL, 0x0, 0, 24, 24, 3, 28, 3, ++ 0x4, 0, 12, 12, 6 ++ }, ++}; ++ ++static inline struct ss928v100_clk_pll *to_pll_clk(struct clk_hw *_hw) ++{ ++ return container_of(_hw, struct ss928v100_clk_pll, hw); ++} ++ ++static void ss928v100_calc_pll(u32 *frac_val, u32 *fbdiv_val, ++ u32 *refdiv_val, u64 rate) ++{ ++ u64 rem; ++ *frac_val = 0; ++ /* Frequency divided by 1000000 can be converted from Hz to MHz. */ ++ rem = do_div(rate, 1000000); ++ /* rate/24 is the integral part of the frequency multiplication coefficient. */ ++ *fbdiv_val = rate / 24; ++ *refdiv_val = 1; ++ /* 2 to the 24th power */ ++ rem = rem * (1 << 24); ++ /* Frequency divided by 1000000 can be converted from Hz to MHz. */ ++ do_div(rem, 1000000); ++ *frac_val = rem; ++} ++ ++static int clk_pll_set_rate(struct clk_hw *hw, ++ unsigned long rate, ++ unsigned long parent_rate) ++{ ++ struct ss928v100_clk_pll *clk = to_pll_clk(hw); ++ u32 frac_val, postdiv1_val, postdiv2_val, fbdiv_val, refdiv_val; ++ u32 val; ++ ++ /* Fixme ignore postdives now because apll don't use them */ ++ postdiv1_val = postdiv2_val = 0; ++ ++ ss928v100_calc_pll(&frac_val, &fbdiv_val, &refdiv_val, (u64)rate); ++ ++ val = readl_relaxed(clk->ctrl_reg1); ++ val &= ~(((1 << clk->frac_width) - 1) << clk->frac_shift); ++ val &= ~(((1 << clk->postdiv1_width) - 1) << clk->postdiv1_shift); ++ val &= ~(((1 << clk->postdiv2_width) - 1) << clk->postdiv2_shift); ++ ++ val |= frac_val << clk->frac_shift; ++ val |= postdiv1_val << clk->postdiv1_shift; ++ val |= postdiv2_val << clk->postdiv2_shift; ++ writel_relaxed(val, clk->ctrl_reg1); ++ ++ val = readl_relaxed(clk->ctrl_reg2); ++ val &= ~(((1 << clk->fbdiv_width) - 1) << clk->fbdiv_shift); ++ val &= ~(((1 << clk->refdiv_width) - 1) << clk->refdiv_shift); ++ ++ val |= fbdiv_val << clk->fbdiv_shift; ++ val |= refdiv_val << clk->refdiv_shift; ++ writel_relaxed(val, clk->ctrl_reg2); ++ ++ return 0; ++} ++ ++static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct ss928v100_clk_pll *clk = to_pll_clk(hw); ++ u64 fbdiv_val, refdiv_val; ++ u32 val; ++ u64 tmp, rate; ++ ++ val = readl_relaxed(clk->ctrl_reg2); ++ val = val >> clk->fbdiv_shift; ++ val &= ((1 << clk->fbdiv_width) - 1); ++ fbdiv_val = val; ++ ++ val = readl_relaxed(clk->ctrl_reg2); ++ val = val >> clk->refdiv_shift; ++ val &= ((1 << clk->refdiv_width) - 1); ++ refdiv_val = val; ++ ++ /* rate = 24000000 * (fbdiv + frac / (1<<24) ) / refdiv */ ++ tmp = 24000000 * fbdiv_val; ++ rate = tmp; ++ do_div(rate, refdiv_val); ++ ++ return rate; ++} ++ ++static int clk_pll_determine_rate(struct clk_hw *hw, ++ struct clk_rate_request *req) ++{ ++ return req->rate; ++} ++ ++static struct clk_ops clk_pll_ops = { ++ .set_rate = clk_pll_set_rate, ++ .determine_rate = clk_pll_determine_rate, ++ .recalc_rate = clk_pll_recalc_rate, ++}; ++ ++void clk_register_pll(const struct ss928v100_pll_clock *clks, ++ int nums, const struct bsp_clock_data *data) ++{ ++ void __iomem *base = data->base; ++ int i; ++ ++ for (i = 0; i < nums; i++) { ++ struct ss928v100_clk_pll *p_clk = NULL; ++ struct clk *clk = NULL; ++ struct clk_init_data init; ++ ++ p_clk = kzalloc(sizeof(*p_clk), GFP_KERNEL); ++ if (p_clk == NULL) ++ return; ++ ++ init.name = clks[i].name; ++ init.flags = 0; ++ init.parent_names = ++ (clks[i].parent_name ? &clks[i].parent_name : NULL); ++ init.num_parents = (clks[i].parent_name ? 1 : 0); ++ init.ops = &clk_pll_ops; ++ ++ p_clk->ctrl_reg1 = base + clks[i].ctrl_reg1; ++ p_clk->frac_shift = clks[i].frac_shift; ++ p_clk->frac_width = clks[i].frac_width; ++ p_clk->postdiv1_shift = clks[i].postdiv1_shift; ++ p_clk->postdiv1_width = clks[i].postdiv1_width; ++ p_clk->postdiv2_shift = clks[i].postdiv2_shift; ++ p_clk->postdiv2_width = clks[i].postdiv2_width; ++ ++ p_clk->ctrl_reg2 = base + clks[i].ctrl_reg2; ++ p_clk->fbdiv_shift = clks[i].fbdiv_shift; ++ p_clk->fbdiv_width = clks[i].fbdiv_width; ++ p_clk->refdiv_shift = clks[i].refdiv_shift; ++ p_clk->refdiv_width = clks[i].refdiv_width; ++ p_clk->hw.init = &init; ++ ++ clk = clk_register(NULL, &p_clk->hw); ++ if (IS_ERR(clk)) { ++ kfree(p_clk); ++ pr_err("%s: failed to register clock %s\n", ++ __func__, clks[i].name); ++ continue; ++ } ++ ++ data->clk_data.clks[clks[i].id] = clk; ++ } ++} ++ ++static __init struct bsp_clock_data *ss928v100_clk_register( ++ struct platform_device *pdev) ++{ ++ struct bsp_clock_data *clk_data = NULL; ++ int ret; ++ ++ clk_data = bsp_clk_alloc(pdev, SS928V100_CRG_NR_CLKS); ++ if (clk_data == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ ret = bsp_clk_register_fixed_rate(ss928v100_fixed_rate_clks_crg, ++ ARRAY_SIZE(ss928v100_fixed_rate_clks_crg), clk_data); ++ if (ret) ++ return ERR_PTR(ret); ++ ++ clk_register_pll(ss928v100_pll_clks, ++ ARRAY_SIZE(ss928v100_pll_clks), clk_data); ++ ++ ret = bsp_clk_register_mux(ss928v100_mux_clks_crg, ++ ARRAY_SIZE(ss928v100_mux_clks_crg), ++ clk_data); ++ if (ret) ++ goto unregister_fixed_rate; ++ ++ ret = bsp_clk_register_fixed_factor(ss928v100_fixed_factor_clks, ++ ARRAY_SIZE(ss928v100_fixed_factor_clks), clk_data); ++ if (ret) ++ goto unregister_mux; ++ ++ ret = bsp_clk_register_gate(ss928v100_gate_clks, ++ ARRAY_SIZE(ss928v100_gate_clks), ++ clk_data); ++ if (ret) ++ goto unregister_factor; ++ ++ ret = of_clk_add_provider(pdev->dev.of_node, ++ of_clk_src_onecell_get, &clk_data->clk_data); ++ if (ret) ++ goto unregister_gate; ++ ++ return clk_data; ++ ++unregister_gate: ++ bsp_clk_unregister_gate(ss928v100_gate_clks, ++ ARRAY_SIZE(ss928v100_gate_clks), clk_data); ++unregister_factor: ++ bsp_clk_unregister_fixed_factor(ss928v100_fixed_factor_clks, ++ ARRAY_SIZE(ss928v100_fixed_factor_clks), clk_data); ++unregister_mux: ++ bsp_clk_unregister_mux(ss928v100_mux_clks_crg, ++ ARRAY_SIZE(ss928v100_mux_clks_crg), ++ clk_data); ++unregister_fixed_rate: ++ bsp_clk_unregister_fixed_rate(ss928v100_fixed_rate_clks_crg, ++ ARRAY_SIZE(ss928v100_fixed_rate_clks_crg), clk_data); ++ return ERR_PTR(ret); ++} ++ ++static __init void ss928v100_clk_unregister(const struct platform_device *pdev) ++{ ++ struct bsp_crg_dev *crg = platform_get_drvdata(pdev); ++ ++ of_clk_del_provider(pdev->dev.of_node); ++ ++ bsp_clk_unregister_gate(ss928v100_gate_clks, ++ ARRAY_SIZE(ss928v100_gate_clks), crg->clk_data); ++ bsp_clk_unregister_mux(ss928v100_mux_clks_crg, ++ ARRAY_SIZE(ss928v100_mux_clks_crg), crg->clk_data); ++ bsp_clk_unregister_fixed_factor(ss928v100_fixed_factor_clks, ++ ARRAY_SIZE(ss928v100_fixed_factor_clks), crg->clk_data); ++ bsp_clk_unregister_fixed_rate(ss928v100_fixed_rate_clks_crg, ++ ARRAY_SIZE(ss928v100_fixed_rate_clks_crg), crg->clk_data); ++} ++ ++static const struct bsp_crg_funcs ss928v100_crg_funcs = { ++ .register_clks = ss928v100_clk_register, ++ .unregister_clks = ss928v100_clk_unregister, ++}; ++ ++ ++static const struct of_device_id ss928v100_crg_match_table[] = { ++ { ++ .compatible = "vendor,ss928v100_clock", ++ .data = &ss928v100_crg_funcs ++ }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, ss928v100_crg_match_table); ++ ++static int ss928v100_crg_probe(struct platform_device *pdev) ++{ ++ struct bsp_crg_dev *crg = NULL; ++ int ret = -1; ++ ++ crg = devm_kmalloc(&pdev->dev, sizeof(*crg), GFP_KERNEL); ++ if (crg == NULL) ++ return -ENOMEM; ++ ++ crg->funcs = of_device_get_match_data(&pdev->dev); ++ if (crg->funcs == NULL) { ++ ret = -ENOENT; ++ goto crg_free; ++ } ++ ++ crg->rstc = vendor_reset_init(pdev); ++ if (crg->rstc == NULL) { ++ ret = -ENOENT; ++ goto crg_free; ++ } ++ ++ crg->clk_data = crg->funcs->register_clks(pdev); ++ if (IS_ERR(crg->clk_data)) { ++ bsp_reset_exit(crg->rstc); ++ ret = PTR_ERR(crg->clk_data); ++ goto crg_free; ++ } ++ ++ platform_set_drvdata(pdev, crg); ++ return 0; ++ ++crg_free: ++ if (crg != NULL) { ++ devm_kfree(&pdev->dev, crg); ++ crg = NULL; ++ } ++ return ret; ++} ++ ++static int ss928v100_crg_remove(struct platform_device *pdev) ++{ ++ struct bsp_crg_dev *crg = platform_get_drvdata(pdev); ++ ++ bsp_reset_exit(crg->rstc); ++ crg->funcs->unregister_clks(pdev); ++ return 0; ++} ++ ++static struct platform_driver ss928v100_crg_driver = { ++ .probe = ss928v100_crg_probe, ++ .remove = ss928v100_crg_remove, ++ .driver = { ++ .name = "ss928v100_clock", ++ .of_match_table = ss928v100_crg_match_table, ++ }, ++}; ++ ++static int __init ss928v100_crg_init(void) ++{ ++ return platform_driver_register(&ss928v100_crg_driver); ++} ++core_initcall(ss928v100_crg_init); ++ ++static void __exit ss928v100_crg_exit(void) ++{ ++ platform_driver_unregister(&ss928v100_crg_driver); ++} ++module_exit(ss928v100_crg_exit); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("SS928V100 CRG Driver"); +diff --git a/drivers/clk/vendor/clkgate-separated.c b/drivers/clk/vendor/clkgate-separated.c +new file mode 100644 +index 000000000..07c4b334e +--- /dev/null ++++ b/drivers/clk/vendor/clkgate-separated.c +@@ -0,0 +1,123 @@ ++/* ++ * Copyright (c) 2012-2013 Shenshu Technologies Co., Ltd. ++ * Copyright (c) 2012-2013 Linaro Limited. ++ * ++ ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "clk.h" ++ ++/* clock separated gate register offset */ ++#define CLKGATE_SEPERATED_ENABLE 0x0 ++#define CLKGATE_SEPERATED_DISABLE 0x4 ++#define CLKGATE_SEPERATED_STATUS 0x8 ++ ++struct clkgate_separated { ++ struct clk_hw hw; ++ void __iomem *enable; /* enable register */ ++ u8 bit_idx; /* bits in enable/disable register */ ++ u8 flags; ++ spinlock_t *lock; ++}; ++ ++static int clkgate_separated_enable(struct clk_hw *hw) ++{ ++ struct clkgate_separated *sclk; ++ unsigned long flags = 0; ++ u32 reg; ++ ++ sclk = container_of(hw, struct clkgate_separated, hw); ++ if (sclk->lock) ++ spin_lock_irqsave(sclk->lock, flags); ++ reg = BIT(sclk->bit_idx); ++ writel_relaxed(reg, sclk->enable); ++ readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS); ++ if (sclk->lock) ++ spin_unlock_irqrestore(sclk->lock, flags); ++ return 0; ++} ++ ++static void clkgate_separated_disable(struct clk_hw *hw) ++{ ++ struct clkgate_separated *sclk; ++ unsigned long flags = 0; ++ u32 reg; ++ ++ sclk = container_of(hw, struct clkgate_separated, hw); ++ if (sclk->lock) ++ spin_lock_irqsave(sclk->lock, flags); ++ reg = BIT(sclk->bit_idx); ++ writel_relaxed(reg, sclk->enable + CLKGATE_SEPERATED_DISABLE); ++ readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS); ++ if (sclk->lock) ++ spin_unlock_irqrestore(sclk->lock, flags); ++} ++ ++static int clkgate_separated_is_enabled(struct clk_hw *hw) ++{ ++ struct clkgate_separated *sclk; ++ u32 reg; ++ ++ sclk = container_of(hw, struct clkgate_separated, hw); ++ reg = readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS); ++ reg &= BIT(sclk->bit_idx); ++ ++ return reg != 0 ? 1 : 0; ++} ++ ++static const struct clk_ops clkgate_separated_ops = { ++ .enable = clkgate_separated_enable, ++ .disable = clkgate_separated_disable, ++ .is_enabled = clkgate_separated_is_enabled, ++}; ++ ++struct clk *bsp_register_clkgate_sep(struct device *dev, const char *name, ++ const char *parent_name, ++ unsigned long flags, ++ void __iomem *reg, u8 bit_idx, ++ u8 clk_gate_flags, spinlock_t *lock) ++{ ++ struct clkgate_separated *sclk; ++ struct clk *clk; ++ struct clk_init_data init; ++ ++ sclk = kzalloc(sizeof(*sclk), GFP_KERNEL); ++ if (!sclk) ++ return ERR_PTR(-ENOMEM); ++ ++ init.name = name; ++ init.ops = &clkgate_separated_ops; ++ init.flags = flags; ++ init.parent_names = (parent_name ? &parent_name : NULL); ++ init.num_parents = (parent_name ? 1 : 0); ++ ++ sclk->enable = reg + CLKGATE_SEPERATED_ENABLE; ++ sclk->bit_idx = bit_idx; ++ sclk->flags = clk_gate_flags; ++ sclk->hw.init = &init; ++ sclk->lock = lock; ++ ++ clk = clk_register(dev, &sclk->hw); ++ if (IS_ERR(clk)) ++ kfree(sclk); ++ return clk; ++} +diff --git a/drivers/clk/vendor/crg.h b/drivers/clk/vendor/crg.h +new file mode 100644 +index 000000000..07b420f52 +--- /dev/null ++++ b/drivers/clk/vendor/crg.h +@@ -0,0 +1,40 @@ ++/* ++ * Vendor Clock and Reset Driver Header ++ * ++ * Copyright (c) 2016-2017 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#ifndef __BSP_CRG_H ++#define __BSP_CRG_H ++ ++#include ++ ++struct bsp_clock_data; ++struct bsp_reset_controller; ++ ++struct bsp_crg_funcs { ++ struct bsp_clock_data* (*register_clks)(struct platform_device *pdev); ++ void (*unregister_clks)(const struct platform_device *pdev); ++}; ++ ++struct bsp_crg_dev { ++ struct bsp_clock_data *clk_data; ++ struct bsp_reset_controller *rstc; ++ const struct bsp_crg_funcs *funcs; ++}; ++ ++#endif /* __BSP_CRG_H */ +diff --git a/drivers/clk/vendor/reset.c b/drivers/clk/vendor/reset.c +new file mode 100644 +index 000000000..ff39055f1 +--- /dev/null ++++ b/drivers/clk/vendor/reset.c +@@ -0,0 +1,159 @@ ++/* ++ * Copyright (c) 2015-2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "reset.h" ++ ++#define BSP_RESET_BIT_MASK 0x1f ++#define BSP_RESET_OFFSET_SHIFT 8 ++#define BSP_RESET_OFFSET_MASK 0xffff00 ++ ++ ++static inline struct bsp_reset_controller *to_bsp_reset_controller(struct reset_controller_dev *rcdev) ++{ ++ return container_of(rcdev, struct bsp_reset_controller, rcdev); ++} ++ ++static int bsp_reset_of_xlate(struct reset_controller_dev *rcdev, ++ const struct of_phandle_args *reset_spec) ++{ ++ u32 offset; ++ u8 bit; ++ ++ offset = (reset_spec->args[0] << BSP_RESET_OFFSET_SHIFT) ++ & BSP_RESET_OFFSET_MASK; ++ bit = reset_spec->args[1] & BSP_RESET_BIT_MASK; ++ ++ return (offset | bit); ++} ++ ++static int bsp_reset_assert(struct reset_controller_dev *rcdev, ++ unsigned long id) ++{ ++ struct bsp_reset_controller *rstc = to_bsp_reset_controller(rcdev); ++ unsigned long flags; ++ u32 offset, reg; ++ u8 bit; ++ ++ offset = (id & BSP_RESET_OFFSET_MASK) >> BSP_RESET_OFFSET_SHIFT; ++ bit = id & BSP_RESET_BIT_MASK; ++ ++ spin_lock_irqsave(&rstc->lock, flags); ++ ++ reg = readl(rstc->membase + offset); ++ writel(reg | BIT(bit), rstc->membase + offset); ++ ++ spin_unlock_irqrestore(&rstc->lock, flags); ++ ++ return 0; ++} ++ ++static int bsp_reset_deassert(struct reset_controller_dev *rcdev, ++ unsigned long id) ++{ ++ struct bsp_reset_controller *rstc = to_bsp_reset_controller(rcdev); ++ unsigned long flags; ++ u32 offset, reg; ++ u8 bit; ++ ++ offset = (id & BSP_RESET_OFFSET_MASK) >> BSP_RESET_OFFSET_SHIFT; ++ bit = id & BSP_RESET_BIT_MASK; ++ ++ spin_lock_irqsave(&rstc->lock, flags); ++ ++ reg = readl(rstc->membase + offset); ++ writel(reg & ~BIT(bit), rstc->membase + offset); ++ ++ spin_unlock_irqrestore(&rstc->lock, flags); ++ ++ return 0; ++} ++ ++static const struct reset_control_ops bsp_reset_ops = { ++ .assert = bsp_reset_assert, ++ .deassert = bsp_reset_deassert, ++}; ++ ++#ifdef CONFIG_ARCH_BSP ++int bsp_reset_init(struct device_node *np, ++ int nr_rsts) ++{ ++ struct bsp_reset_controller *rstc; ++ ++ rstc = kzalloc(sizeof(*rstc), GFP_KERNEL); ++ if (!rstc) ++ return -ENOMEM; ++ ++ rstc->membase = of_iomap(np, 0); ++ if (!rstc->membase) { ++ kfree(rstc); ++ return -EINVAL; ++ } ++ ++ spin_lock_init(&rstc->lock); ++ ++ rstc->rcdev.owner = THIS_MODULE; ++ rstc->rcdev.nr_resets = nr_rsts; ++ rstc->rcdev.ops = &bsp_reset_ops; ++ rstc->rcdev.of_node = np; ++ rstc->rcdev.of_reset_n_cells = 2; ++ rstc->rcdev.of_xlate = bsp_reset_of_xlate; ++ ++ return reset_controller_register(&rstc->rcdev); ++} ++EXPORT_SYMBOL_GPL(bsp_reset_init); ++#endif ++ ++struct bsp_reset_controller *vendor_reset_init(struct platform_device *pdev) ++{ ++ struct bsp_reset_controller *rstc; ++ struct resource *res; ++ ++ rstc = devm_kmalloc(&pdev->dev, sizeof(*rstc), GFP_KERNEL); ++ if (!rstc) ++ return NULL; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ rstc->membase = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(rstc->membase)) { ++ devm_kfree(&pdev->dev, rstc); ++ rstc = NULL; ++ return NULL; ++ } ++ ++ spin_lock_init(&rstc->lock); ++ rstc->rcdev.owner = THIS_MODULE; ++ rstc->rcdev.ops = &bsp_reset_ops; ++ rstc->rcdev.of_node = pdev->dev.of_node; ++ rstc->rcdev.of_reset_n_cells = 2; ++ rstc->rcdev.of_xlate = bsp_reset_of_xlate; ++ reset_controller_register(&rstc->rcdev); ++ ++ return rstc; ++} ++EXPORT_SYMBOL_GPL(vendor_reset_init); ++ ++void bsp_reset_exit(struct bsp_reset_controller *rstc) ++{ ++ reset_controller_unregister(&rstc->rcdev); ++} ++EXPORT_SYMBOL_GPL(bsp_reset_exit); +diff --git a/drivers/clk/vendor/reset.h b/drivers/clk/vendor/reset.h +new file mode 100644 +index 000000000..2b8a48c4e +--- /dev/null ++++ b/drivers/clk/vendor/reset.h +@@ -0,0 +1,49 @@ ++/* ++ * Copyright (c) 2015 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#ifndef __BSP_RESET_H ++#define __BSP_RESET_H ++ ++#include ++#include ++#include ++#include ++ ++struct bsp_reset_controller { ++ spinlock_t lock; ++ void __iomem *membase; ++ struct reset_controller_dev rcdev; ++}; ++ ++#ifdef CONFIG_RESET_CONTROLLER ++#include ++struct bsp_reset_controller *vendor_reset_init(struct platform_device *pdev); ++#ifdef CONFIG_ARCH_BSP ++int bsp_reset_init(struct device_node *np, int nr_rsts); ++#endif ++void bsp_reset_exit(struct bsp_reset_controller *rstc); ++#else ++static inline ++struct bsp_reset_controller *vendor_reset_init(struct platform_device *pdev) ++{ ++ return 0; ++} ++static inline void bsp_reset_exit(struct bsp_reset_controller *rstc) ++{} ++#endif ++ ++#endif /* __BSP_RESET_H */ +diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig +index c86a4756a..e4dc53a36 100644 +--- a/drivers/dma-buf/Kconfig ++++ b/drivers/dma-buf/Kconfig +@@ -65,19 +65,6 @@ config DMABUF_SELFTESTS + default n + depends on DMA_SHARED_BUFFER + +-config DMABUF_PROCESS_INFO +- bool "Show dmabuf usage of all processes" +- default n +- depends on DMA_SHARED_BUFFER +- depends on PROC_FS || DEBUG_FS +- help +- Choose this option to show dmabuf objects usage of all processes. +- Firstly, with this option, when a process creates a dmabuf object, +- its pid and task_comm will be recorded in the dmabuf. +- Secondly, this option creates dma_buf/process_bufinfo file in +- debugfs (if DEBUG_FS enabled) and process_dmabuf_info file in procfs +- (if PROC_FS enabled) to show dmabuf objects usage of all processes. +- + menuconfig DMABUF_HEAPS + bool "DMA-BUF Userland Memory Heaps" + select DMA_SHARED_BUFFER +diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile +index cdb3bb049..70ec901ed 100644 +--- a/drivers/dma-buf/Makefile ++++ b/drivers/dma-buf/Makefile +@@ -16,5 +16,3 @@ dmabuf_selftests-y := \ + st-dma-resv.o + + obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o +- +-obj-$(CONFIG_DMABUF_PROCESS_INFO) += dma-buf-process-info.o +diff --git a/drivers/dma-buf/dma-buf-process-info.c b/drivers/dma-buf/dma-buf-process-info.c +deleted file mode 100755 +index ff528d0ca..000000000 +--- a/drivers/dma-buf/dma-buf-process-info.c ++++ /dev/null +@@ -1,165 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * DMA-BUF: dmabuf usage of all processes statistics. +- * +- * Copyright (c) 2022 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +- +-#include "dma-buf-process-info.h" +- +-static struct proc_dir_entry *proc_dmabuf_entry; +- +-struct dmabuf_task_info_args { +- struct seq_file *seq; +- struct task_struct *tsk; +- size_t tsk_dmabuf_bytes; +-}; +- +-void init_dma_buf_task_info(struct dma_buf *buf) +-{ +- struct task_struct *tsk = NULL; +- +- if (IS_ERR_OR_NULL(buf)) +- return; +- +- get_task_struct(current->group_leader); +- task_lock(current->group_leader); +- tsk = current->group_leader; +- buf->exp_pid = task_pid_nr(tsk); +- if (tsk->flags & PF_KTHREAD) +- tsk = NULL; +- task_unlock(current->group_leader); +- put_task_struct(current->group_leader); +- +- if (tsk) +- get_task_comm(buf->exp_task_comm, tsk); +- else /* kernel task */ +- strncpy(buf->exp_task_comm, "[kernel task]", +- sizeof(buf->exp_task_comm)); +-} +- +-pid_t dma_buf_exp_pid(const struct dma_buf *buf) +-{ +- if (IS_ERR_OR_NULL(buf)) +- return 0; +- +- return buf->exp_pid; +-} +- +-const char *dma_buf_exp_task_comm(const struct dma_buf *buf) +-{ +- if (IS_ERR_OR_NULL(buf)) +- return NULL; +- +- return buf->exp_task_comm; +-} +- +-static int dma_buf_single_file_show(const void *data, struct file *f, +- unsigned int fd) +-{ +- struct dmabuf_task_info_args *tsk_info = NULL; +- struct task_struct *tsk = NULL; +- struct dma_buf *buf = NULL; +- +- tsk_info = (struct dmabuf_task_info_args *)data; +- if (IS_ERR_OR_NULL(tsk_info) || IS_ERR_OR_NULL(tsk_info->seq)) +- return 0; +- +- tsk = tsk_info->tsk; +- buf = get_dma_buf_from_file(f); +- if (IS_ERR_OR_NULL(tsk) || IS_ERR_OR_NULL(buf)) +- return 0; +- +- tsk_info->tsk_dmabuf_bytes += buf->size; +- +- spin_lock(&buf->name_lock); +- seq_printf(tsk_info->seq, +- "%-16s %-16d %-16u %-16zu %-16lu %-16d %-16s %s \t %s\n", +- tsk->comm, +- tsk->pid, +- fd, +- buf->size, +- file_inode(buf->file)->i_ino, +- buf->exp_pid, +- buf->exp_task_comm, +- buf->name ?: "NULL", +- buf->exp_name ?: "NULL"); +- spin_unlock(&buf->name_lock); +- +- return 0; +-} +- +-static int dma_buf_process_info_show(struct seq_file *s, void *unused) +-{ +- struct dmabuf_task_info_args task_info = { NULL, NULL, 0 }; +- struct task_struct *tsk = NULL; +- +- seq_puts(s, "Dma-buf objects usage of processes:\n"); +- seq_printf(s, "%-16s %-16s %-16s %-16s %-16s %-16s %-16s %s \t %s\n", +- "Process", "pid", "fd", "size_bytes", "ino", "exp_pid", +- "exp_task_comm", "buf_name", "exp_name"); +- +- task_info.seq = s; +- +- rcu_read_lock(); +- for_each_process(tsk) { +- task_info.tsk = tsk; +- task_info.tsk_dmabuf_bytes = 0; +- +- task_lock(tsk); +- iterate_fd(tsk->files, 0, dma_buf_single_file_show, +- (void *)&task_info); +- if (task_info.tsk_dmabuf_bytes) +- seq_printf(s, "Total dmabuf size of %s: %zu bytes\n", +- tsk->comm, task_info.tsk_dmabuf_bytes); +- task_unlock(tsk); +- } +- rcu_read_unlock(); +- +- return 0; +-} +- +-void dma_buf_process_info_init_procfs(void) +-{ +- proc_dmabuf_entry = proc_create_single("process_dmabuf_info", 0444, +- NULL, +- dma_buf_process_info_show); +- if (!proc_dmabuf_entry) +- pr_err("%s: create node /proc/process_dmabuf_info failed\n", +- __func__); +-} +- +-void dma_buf_process_info_uninit_procfs(void) +-{ +- if (!proc_dmabuf_entry) +- return; +- +- proc_remove(proc_dmabuf_entry); +-} +- +-DEFINE_SHOW_ATTRIBUTE(dma_buf_process_info); +- +-int dma_buf_process_info_init_debugfs(struct dentry *parent) +-{ +- struct dentry *debugfs_file = NULL; +- int err = 0; +- +- if (IS_ERR_OR_NULL(parent)) +- return -EINVAL; +- +- debugfs_file = debugfs_create_file("process_bufinfo", 0444, +- parent, NULL, +- &dma_buf_process_info_fops); +- if (IS_ERR(debugfs_file)) { +- pr_err("dma_buf: debugfs: create process_bufinfo failed\n"); +- err = PTR_ERR(debugfs_file); +- } +- +- return err; +-} +diff --git a/drivers/dma-buf/dma-buf-process-info.h b/drivers/dma-buf/dma-buf-process-info.h +deleted file mode 100755 +index 1275c1c7e..000000000 +--- a/drivers/dma-buf/dma-buf-process-info.h ++++ /dev/null +@@ -1,83 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * DMA-BUF: dmabuf usage of all processes statistics. +- * +- * Copyright (c) 2022 Huawei Device Co., Ltd. +- */ +- +-#ifndef __DMA_BUF_PROCESS_INFO_H +-#define __DMA_BUF_PROCESS_INFO_H +- +-#ifdef CONFIG_DMABUF_PROCESS_INFO +-/** +- * init_dma_buf_task_info - init exp_pid and exp_task_comm of dma_buf +- * @buf: [in] pointer to struct dma_buf. If @buf IS_ERR_OR_NULL, +- * return with doing nothing. +- */ +-void init_dma_buf_task_info(struct dma_buf *buf); +- +-/** +- * dma_buf_exp_pid - return exp_pid of @buf +- * @buf: [in] pointer to struct dma_buf +- * +- * Return 0 if @buf IS_ERR_OR_NULL, else return buf->exp_pid +- */ +-pid_t dma_buf_exp_pid(const struct dma_buf *buf); +- +-/** +- * dma_buf_exp_task_comm - return exp_task_comm of @buf +- * @buf: [in] pointer to struct dma_buf +- * +- * Return NULL if @buf IS_ERR_OR_NULL, else return buf->exp_task_comm +- */ +-const char *dma_buf_exp_task_comm(const struct dma_buf *buf); +- +-/** +- * dma_buf_process_info_init_procfs - module init: create node in procfs +- */ +-void dma_buf_process_info_init_procfs(void); +- +-/** +- * dma_buf_process_info_uninit_procfs - module exit: remove node in procfs +- */ +-void dma_buf_process_info_uninit_procfs(void); +- +-/** +- * dma_buf_process_info_init_debugfs - create debug node under @parent +- * in debugfs. +- * @parent: [in] pointer to struct dentry. If @parent IS_ERR_OR_NULL, +- * return -EINVAL +- * +- * Return 0 if success, otherwise return errno. +- * +- * Note that there is no related uninit function, since the debug node will +- * be removed in dma_buf_uninit_debugfs() when dma_buf_deinit() called. +- */ +-int dma_buf_process_info_init_debugfs(struct dentry *parent); +- +-#else /* CONFIG_DMABUF_PROCESS_INFO */ +- +-static inline void init_dma_buf_task_info(struct dma_buf *buf) {} +- +-static inline pid_t dma_buf_exp_pid(const struct dma_buf *buf) +-{ +- return 0; +-} +- +-static inline const char *dma_buf_exp_task_comm(const struct dma_buf *buf) +-{ +- return NULL; +-} +- +-static inline void dma_buf_process_info_init_procfs(void) {} +- +-static inline void dma_buf_process_info_uninit_procfs(void) {} +- +-static inline int +-dma_buf_process_info_init_debugfs(struct dentry *parent) +-{ +- return 0; +-} +-#endif /* CONFIG_DMABUF_PROCESS_INFO */ +-#endif /* __DMA_BUF_PROCESS_INFO_H */ +- +diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c +index b5ec6592e..21916bba7 100644 +--- a/drivers/dma-buf/dma-buf.c ++++ b/drivers/dma-buf/dma-buf.c +@@ -32,7 +32,6 @@ + #include + + #include "dma-buf-sysfs-stats.h" +-#include "dma-buf-process-info.h" + + static inline int is_dma_buf_file(struct file *); + +@@ -1690,7 +1689,6 @@ static int dma_buf_init_debugfs(void) + err = PTR_ERR(d); + } + +- dma_buf_process_info_init_debugfs(dma_buf_debugfs_dir); + return err; + } + +@@ -1708,19 +1706,6 @@ static inline void dma_buf_uninit_debugfs(void) + } + #endif + +-#ifdef CONFIG_DMABUF_PROCESS_INFO +-struct dma_buf *get_dma_buf_from_file(struct file *f) +-{ +- if (IS_ERR_OR_NULL(f)) +- return NULL; +- +- if (!is_dma_buf_file(f)) +- return NULL; +- +- return f->private_data; +-} +-#endif /* CONFIG_DMABUF_PROCESS_INFO */ +- + static int __init dma_buf_init(void) + { + int ret; +@@ -1736,7 +1721,6 @@ static int __init dma_buf_init(void) + mutex_init(&db_list.lock); + INIT_LIST_HEAD(&db_list.head); + dma_buf_init_debugfs(); +- dma_buf_process_info_init_procfs(); + return 0; + } + subsys_initcall(dma_buf_init); +@@ -1746,6 +1730,5 @@ static void __exit dma_buf_deinit(void) + dma_buf_uninit_debugfs(); + kern_unmount(dma_buf_mnt); + dma_buf_uninit_sysfs_statistics(); +- dma_buf_process_info_uninit_procfs(); + } + __exitcall(dma_buf_deinit); +diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig +index e36506471..4976e1717 100644 +--- a/drivers/dma/Kconfig ++++ b/drivers/dma/Kconfig +@@ -369,6 +369,20 @@ config K3_DMA + Support the DMA engine for Hisilicon K3 platform + devices. + ++config EDMACV310 ++ tristate "Vendor EDMAC Controller support" ++ depends on ARCH_BSP ++ select DMA_ENGINE ++ select DMA_VIRTUAL_CHANNELS ++ help ++ The Direction Memory Access(EDMA) is a high-speed data transfer ++ operation. It supports data read/write between peripherals and ++ memories without using the CPU. ++ Vendor EDMA Controller(EDMAC) directly transfers data between ++ a memory and a peripheral, between peripherals, or between memories. ++ This avoids the CPU intervention and reduces the interrupt handling ++ overhead of the CPU. ++ + config LPC18XX_DMAMUX + bool "NXP LPC18xx/43xx DMA MUX for PL080" + depends on ARCH_LPC18XX || COMPILE_TEST +diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile +index 83553a97a..39d3c0b95 100644 +--- a/drivers/dma/Makefile ++++ b/drivers/dma/Makefile +@@ -83,6 +83,7 @@ obj-$(CONFIG_ST_FDMA) += st_fdma.o + obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/ + obj-$(CONFIG_INTEL_LDMA) += lgm/ + ++obj-$(CONFIG_EDMACV310) += edmacv310.o + obj-y += mediatek/ + obj-y += qcom/ + obj-y += ti/ +diff --git a/drivers/dma/edmacv310.c b/drivers/dma/edmacv310.c +new file mode 100644 +index 000000000..b2ce681f3 +--- /dev/null ++++ b/drivers/dma/edmacv310.c +@@ -0,0 +1,1463 @@ ++/* ++ * ++ * Copyright (c) 2015-2021 Shenshu Technologies Co., Ltd. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "edmacv310.h" ++#include "dmaengine.h" ++#include "virt-dma.h" ++#include ++ ++#define DRIVER_NAME "edmacv310" ++ ++#define MAX_TSFR_LLIS 512 ++#define EDMACV300_LLI_WORDS 64 ++#define EDMACV300_POOL_ALIGN 64 ++#define BITS_PER_HALF_WORD 32 ++#define ERR_STATUS_REG_NUM 3 ++ ++typedef struct edmac_lli { ++ u64 next_lli; ++ u32 reserved[5]; ++ u32 count; ++ u64 src_addr; ++ u64 dest_addr; ++ u32 config; ++ u32 pad[3]; ++} edmac_lli; ++ ++struct edmac_sg { ++ dma_addr_t src_addr; ++ dma_addr_t dst_addr; ++ size_t len; ++ struct list_head node; ++}; ++ ++struct transfer_desc { ++ struct virt_dma_desc virt_desc; ++ dma_addr_t llis_busaddr; ++ u64 *llis_vaddr; ++ u32 ccfg; ++ size_t size; ++ bool done; ++ bool cyclic; ++}; ++ ++enum edmac_dma_chan_state { ++ EDMAC_CHAN_IDLE, ++ EDMAC_CHAN_RUNNING, ++ EDMAC_CHAN_PAUSED, ++ EDMAC_CHAN_WAITING, ++}; ++ ++struct edmacv310_dma_chan { ++ bool slave; ++ int signal; ++ int id; ++ struct virt_dma_chan virt_chan; ++ struct edmacv310_phy_chan *phychan; ++ struct dma_slave_config cfg; ++ struct transfer_desc *at; ++ struct edmacv310_driver_data *host; ++ enum edmac_dma_chan_state state; ++}; ++ ++struct edmacv310_phy_chan { ++ unsigned int id; ++ void __iomem *base; ++ spinlock_t lock; ++ struct edmacv310_dma_chan *serving; ++}; ++ ++struct edmacv310_driver_data { ++ struct platform_device *dev; ++ struct dma_device slave; ++ struct dma_device memcpy; ++ void __iomem *base; ++ struct regmap *misc_regmap; ++ void __iomem *crg_ctrl; ++ struct edmacv310_phy_chan *phy_chans; ++ struct dma_pool *pool; ++ unsigned int misc_ctrl_base; ++ int irq; ++ unsigned int id; ++ struct clk *clk; ++ struct clk *axi_clk; ++ struct reset_control *rstc; ++ unsigned int channels; ++ unsigned int slave_requests; ++ unsigned int max_transfer_size; ++}; ++ ++#ifdef DEBUG_EDMAC ++void dump_lli(const u64 *llis_vaddr, unsigned int num) ++{ ++ edmac_lli *plli = (edmac_lli *)llis_vaddr; ++ unsigned int i; ++ ++ edmacv310_trace(EDMACV310_CONFIG_TRACE_LEVEL, "lli num = 0%d\n", num); ++ for (i = 0; i < num; i++) { ++ printk("lli%d:lli_L: 0x%llx\n", i, plli[i].next_lli & 0xffffffff); ++ printk("lli%d:lli_H: 0x%llx\n", i, (plli[i].next_lli >> BITS_PER_HALF_WORD) & 0xffffffff); ++ printk("lli%d:count: 0x%x\n", i, plli[i].count); ++ printk("lli%d:src_addr_L: 0x%llx\n", i, plli[i].src_addr & 0xffffffff); ++ printk("lli%d:src_addr_H: 0x%llx\n", i, (plli[i].src_addr >> BITS_PER_HALF_WORD) & 0xffffffff); ++ printk("lli%d:dst_addr_L: 0x%llx\n", i, plli[i].dest_addr & 0xffffffff); ++ printk("lli%d:dst_addr_H: 0x%llx\n", i, (plli[i].dest_addr >> BITS_PER_HALF_WORD) & 0xffffffff); ++ printk("lli%d:CONFIG: 0x%x\n", i, plli[i].config); ++ } ++} ++ ++#else ++void dump_lli(const u64 *llis_vaddr, unsigned int num) ++{ ++} ++#endif ++ ++static inline struct edmacv310_dma_chan *to_edamc_chan(const struct dma_chan *chan) ++{ ++ return container_of(chan, struct edmacv310_dma_chan, virt_chan.chan); ++} ++ ++static inline struct transfer_desc *to_edmac_transfer_desc( ++ const struct dma_async_tx_descriptor *tx) ++{ ++ return container_of(tx, struct transfer_desc, virt_desc.tx); ++} ++ ++static struct dma_chan *edmac_find_chan_id( ++ const struct edmacv310_driver_data *edmac, ++ int request_num) ++{ ++ struct edmacv310_dma_chan *edmac_dma_chan = NULL; ++ ++ list_for_each_entry(edmac_dma_chan, &edmac->slave.channels, ++ virt_chan.chan.device_node) { ++ if (edmac_dma_chan->id == request_num) ++ return &edmac_dma_chan->virt_chan.chan; ++ } ++ return NULL; ++} ++ ++static struct dma_chan *edma_of_xlate(struct of_phandle_args *dma_spec, ++ struct of_dma *ofdma) ++{ ++ struct edmacv310_driver_data *edmac = ofdma->of_dma_data; ++ struct edmacv310_dma_chan *edmac_dma_chan = NULL; ++ struct dma_chan *dma_chan = NULL; ++ struct regmap *misc = NULL; ++ unsigned int signal, request_num; ++ unsigned int reg = 0; ++ unsigned int offset = 0; ++ ++ if (!edmac) ++ return NULL; ++ ++ misc = edmac->misc_regmap; ++ ++ if (dma_spec->args_count != 2) { /* check num of dts node args */ ++ edmacv310_error("args count not true!\n"); ++ return NULL; ++ } ++ ++ request_num = dma_spec->args[0]; ++ signal = dma_spec->args[1]; ++ ++ edmacv310_trace(EDMACV310_CONFIG_TRACE_LEVEL, "host->id = %d,signal = %d, request_num = %d\n", ++ edmac->id, signal, request_num); ++ ++ if (misc != NULL) { ++#ifdef CONFIG_ACCESS_M7_DEV ++ offset = edmac->misc_ctrl_base; ++ reg = 0xc0; ++ regmap_write(misc, offset, reg); ++#else ++ offset = edmac->misc_ctrl_base + (request_num & (~0x3)); ++ regmap_read(misc, offset, ®); ++ /* set misc for signal line */ ++ reg &= ~(0x3f << ((request_num & 0x3) << 3)); ++ reg |= signal << ((request_num & 0x3) << 3); ++ regmap_write(misc, offset, reg); ++#endif ++ } ++ ++ edmacv310_trace(EDMACV310_CONFIG_TRACE_LEVEL, "offset = 0x%x, reg = 0x%x\n", offset, reg); ++ ++ dma_chan = edmac_find_chan_id(edmac, request_num); ++ if (!dma_chan) { ++ edmacv310_error("DMA slave channel is not found!\n"); ++ return NULL; ++ } ++ ++ edmac_dma_chan = to_edamc_chan(dma_chan); ++ edmac_dma_chan->signal = request_num; ++ return dma_get_slave_channel(dma_chan); ++} ++ ++static int edmacv310_devm_get(struct edmacv310_driver_data *edmac) ++{ ++ struct platform_device *platdev = edmac->dev; ++ struct resource *res = NULL; ++ ++ edmac->clk = devm_clk_get(&(platdev->dev), "apb_pclk"); ++ if (IS_ERR(edmac->clk)) ++ return PTR_ERR(edmac->clk); ++ ++ edmac->axi_clk = devm_clk_get(&(platdev->dev), "axi_aclk"); ++ if (IS_ERR(edmac->axi_clk)) ++ return PTR_ERR(edmac->axi_clk); ++ ++ edmac->irq = platform_get_irq(platdev, 0); ++ if (unlikely(edmac->irq < 0)) ++ return -ENODEV; ++ ++ edmac->rstc = devm_reset_control_get(&(platdev->dev), "dma-reset"); ++ if (IS_ERR(edmac->rstc)) ++ return PTR_ERR(edmac->rstc); ++ ++ res = platform_get_resource(platdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ edmacv310_error("no reg resource\n"); ++ return -ENODEV; ++ } ++ ++ edmac->base = devm_ioremap_resource(&(platdev->dev), res); ++ if (IS_ERR(edmac->base)) ++ return PTR_ERR(edmac->base); ++ ++ res = platform_get_resource_byname(platdev, IORESOURCE_MEM, "dma_peri_channel_req_sel"); ++ if (res) { ++ void *dma_peri_channel_req_sel = ioremap(res->start, res->end - res->start); ++ if (IS_ERR(dma_peri_channel_req_sel)) ++ return PTR_ERR(dma_peri_channel_req_sel); ++ writel(0xffffffff, dma_peri_channel_req_sel); ++ iounmap(dma_peri_channel_req_sel); ++ } ++ return 0; ++} ++ ++static int edmacv310_of_property_read(struct edmacv310_driver_data *edmac) ++{ ++ struct platform_device *platdev = edmac->dev; ++ struct device_node *np = platdev->dev.of_node; ++ int ret; ++ ++ if (!of_find_property(np, "misc_regmap", NULL) || ++ !of_find_property(np, "misc_ctrl_base", NULL)) { ++ edmac->misc_regmap = 0; ++ } else { ++ edmac->misc_regmap = syscon_regmap_lookup_by_phandle(np, "misc_regmap"); ++ if (IS_ERR(edmac->misc_regmap)) ++ return PTR_ERR(edmac->misc_regmap); ++ ++ ret = of_property_read_u32(np, "misc_ctrl_base", &(edmac->misc_ctrl_base)); ++ if (ret) { ++ edmacv310_error("get dma-misc_ctrl_base fail\n"); ++ return -ENODEV; ++ } ++ } ++ ret = of_property_read_u32(np, "devid", &(edmac->id)); ++ if (ret) { ++ edmacv310_error("get edmac id fail\n"); ++ return -ENODEV; ++ } ++ ret = of_property_read_u32(np, "dma-channels", &(edmac->channels)); ++ if (ret) { ++ edmacv310_error("get dma-channels fail\n"); ++ return -ENODEV; ++ } ++ ret = of_property_read_u32(np, "dma-requests", &(edmac->slave_requests)); ++ if (ret) { ++ edmacv310_error("get dma-requests fail\n"); ++ return -ENODEV; ++ } ++ edmacv310_trace(EDMACV310_REG_TRACE_LEVEL, "dma-channels = %d, dma-requests = %d\n", ++ edmac->channels, edmac->slave_requests); ++ return 0; ++} ++ ++static int get_of_probe(struct edmacv310_driver_data *edmac) ++{ ++ struct platform_device *platdev = edmac->dev; ++ int ret; ++ ++ ret = edmacv310_devm_get(edmac); ++ if (ret) ++ return ret; ++ ++ ret = edmacv310_of_property_read(edmac); ++ if (ret) ++ return ret; ++ ++ return of_dma_controller_register(platdev->dev.of_node, ++ edma_of_xlate, edmac); ++} ++ ++static void edmac_free_chan_resources(struct dma_chan *chan) ++{ ++ vchan_free_chan_resources(to_virt_chan(chan)); ++} ++ ++static size_t read_residue_from_phychan( ++ const struct edmacv310_dma_chan *edmac_dma_chan, ++ const struct transfer_desc *tsf_desc) ++{ ++ size_t bytes; ++ u64 next_lli; ++ struct edmacv310_phy_chan *phychan = edmac_dma_chan->phychan; ++ unsigned int i, index; ++ struct edmacv310_driver_data *edmac = edmac_dma_chan->host; ++ edmac_lli *plli = NULL; ++ ++ next_lli = (edmacv310_readl(edmac->base + edmac_cx_lli_l(phychan->id)) & ++ (~(EDMAC_LLI_ALIGN - 1))); ++ next_lli |= ((u64)(edmacv310_readl(edmac->base + edmac_cx_lli_h( ++ phychan->id)) & 0xffffffff) << BITS_PER_HALF_WORD); ++ bytes = edmacv310_readl(edmac->base + edmac_cx_curr_cnt0( ++ phychan->id)); ++ if (next_lli != 0) { ++ /* It means lli mode */ ++ bytes += tsf_desc->size; ++ index = (next_lli - tsf_desc->llis_busaddr) / sizeof(*plli); ++ plli = (edmac_lli *)(tsf_desc->llis_vaddr); ++ ++ if (index > MAX_TSFR_LLIS) ++ return 0; ++ ++ for (i = 0; i < index; i++) ++ bytes -= plli[i].count; ++ } ++ return bytes; ++} ++ ++static enum dma_status edmac_tx_status(struct dma_chan *chan, ++ dma_cookie_t cookie, ++ struct dma_tx_state *txstate) ++{ ++ enum dma_status ret; ++ struct edmacv310_dma_chan *edmac_dma_chan = to_edamc_chan(chan); ++ struct virt_dma_desc *vd = NULL; ++ struct transfer_desc *tsf_desc = NULL; ++ unsigned long flags; ++ size_t bytes; ++ ++ ret = dma_cookie_status(chan, cookie, txstate); ++ if (ret == DMA_COMPLETE) ++ return ret; ++ ++ spin_lock_irqsave(&edmac_dma_chan->virt_chan.lock, flags); ++ vd = vchan_find_desc(&edmac_dma_chan->virt_chan, cookie); ++ if (vd) { ++ /* no been trasfer */ ++ tsf_desc = to_edmac_transfer_desc(&vd->tx); ++ bytes = tsf_desc->size; ++ } else { ++ /* trasfering */ ++ tsf_desc = edmac_dma_chan->at; ++ ++ if (!(edmac_dma_chan->phychan) || !tsf_desc) { ++ spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); ++ return ret; ++ } ++ bytes = read_residue_from_phychan(edmac_dma_chan, tsf_desc); ++ } ++ spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); ++ dma_set_residue(txstate, bytes); ++ ++ if (edmac_dma_chan->state == EDMAC_CHAN_PAUSED && ret == DMA_IN_PROGRESS) ++ ret = DMA_PAUSED; ++ ++ return ret; ++} ++ ++static struct edmacv310_phy_chan *edmac_get_phy_channel( ++ const struct edmacv310_driver_data *edmac, ++ struct edmacv310_dma_chan *edmac_dma_chan) ++{ ++ struct edmacv310_phy_chan *ch = NULL; ++ unsigned long flags; ++ int i; ++ ++ for (i = 0; i < edmac->channels; i++) { ++ ch = &edmac->phy_chans[i]; ++ ++ spin_lock_irqsave(&ch->lock, flags); ++ ++ if (!ch->serving) { ++ ch->serving = edmac_dma_chan; ++ spin_unlock_irqrestore(&ch->lock, flags); ++ break; ++ } ++ spin_unlock_irqrestore(&ch->lock, flags); ++ } ++ ++ if (i == edmac->channels) ++ return NULL; ++ ++ return ch; ++} ++ ++static void edmac_write_lli(const struct edmacv310_driver_data *edmac, ++ const struct edmacv310_phy_chan *phychan, ++ const struct transfer_desc *tsf_desc) ++{ ++ edmac_lli *plli = (edmac_lli *)tsf_desc->llis_vaddr; ++ ++ if (plli->next_lli != 0x0) ++ edmacv310_writel((plli->next_lli & 0xffffffff) | EDMAC_LLI_ENABLE, ++ edmac->base + edmac_cx_lli_l(phychan->id)); ++ else ++ edmacv310_writel((plli->next_lli & 0xffffffff), ++ edmac->base + edmac_cx_lli_l(phychan->id)); ++ ++ edmacv310_writel(((plli->next_lli >> 32) & 0xffffffff), ++ edmac->base + edmac_cx_lli_h(phychan->id)); ++ edmacv310_writel(plli->count, edmac->base + edmac_cx_cnt0(phychan->id)); ++ edmacv310_writel(plli->src_addr & 0xffffffff, ++ edmac->base + edmac_cx_src_addr_l(phychan->id)); ++ edmacv310_writel((plli->src_addr >> 32) & 0xffffffff, ++ edmac->base + edmac_cx_src_addr_h(phychan->id)); ++ edmacv310_writel(plli->dest_addr & 0xffffffff, ++ edmac->base + edmac_cx_dest_addr_l(phychan->id)); ++ edmacv310_writel((plli->dest_addr >> 32) & 0xffffffff, ++ edmac->base + edmac_cx_dest_addr_h(phychan->id)); ++ edmacv310_writel(plli->config, ++ edmac->base + edmac_cx_config(phychan->id)); ++} ++ ++static void edmac_start_next_txd(struct edmacv310_dma_chan *edmac_dma_chan) ++{ ++ struct edmacv310_driver_data *edmac = edmac_dma_chan->host; ++ struct edmacv310_phy_chan *phychan = edmac_dma_chan->phychan; ++ struct virt_dma_desc *vd = vchan_next_desc(&edmac_dma_chan->virt_chan); ++ struct transfer_desc *tsf_desc = to_edmac_transfer_desc(&vd->tx); ++ unsigned int val; ++ list_del(&tsf_desc->virt_desc.node); ++ edmac_dma_chan->at = tsf_desc; ++ edmac_write_lli(edmac, phychan, tsf_desc); ++ val = edmacv310_readl(edmac->base + edmac_cx_config(phychan->id)); ++ edmacv310_trace(EDMACV310_REG_TRACE_LEVEL, " EDMAC_Cx_CONFIG = 0x%x\n", val); ++ edmacv310_writel(val | EDMAC_CXCONFIG_LLI_START, ++ edmac->base + edmac_cx_config(phychan->id)); ++} ++ ++static void edmac_start(struct edmacv310_dma_chan *edmac_dma_chan) ++{ ++ struct edmacv310_driver_data *edmac = edmac_dma_chan->host; ++ struct edmacv310_phy_chan *ch; ++ ch = edmac_get_phy_channel(edmac, edmac_dma_chan); ++ if (!ch) { ++ edmacv310_error("no phy channel available !\n"); ++ edmac_dma_chan->state = EDMAC_CHAN_WAITING; ++ return; ++ } ++ edmac_dma_chan->phychan = ch; ++ edmac_dma_chan->state = EDMAC_CHAN_RUNNING; ++ edmac_start_next_txd(edmac_dma_chan); ++} ++ ++static void edmac_issue_pending(struct dma_chan *chan) ++{ ++ struct edmacv310_dma_chan *edmac_dma_chan = to_edamc_chan(chan); ++ unsigned long flags; ++ spin_lock_irqsave(&edmac_dma_chan->virt_chan.lock, flags); ++ if (vchan_issue_pending(&edmac_dma_chan->virt_chan)) { ++ if (!edmac_dma_chan->phychan && edmac_dma_chan->state != EDMAC_CHAN_WAITING) ++ edmac_start(edmac_dma_chan); ++ } ++ spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); ++} ++ ++static void edmac_free_txd_list(struct edmacv310_dma_chan *edmac_dma_chan) ++{ ++ LIST_HEAD(head); ++ vchan_get_all_descriptors(&edmac_dma_chan->virt_chan, &head); ++ vchan_dma_desc_free_list(&edmac_dma_chan->virt_chan, &head); ++} ++ ++static int edmac_config(struct dma_chan *chan, ++ struct dma_slave_config *config) ++{ ++ struct edmacv310_dma_chan *edmac_dma_chan = to_edamc_chan(chan); ++ if (!edmac_dma_chan->slave) { ++ edmacv310_error("slave is null!"); ++ return -EINVAL; ++ } ++ edmac_dma_chan->cfg = *config; ++ return 0; ++} ++ ++static void edmac_pause_phy_chan(const struct edmacv310_dma_chan *edmac_dma_chan) ++{ ++ struct edmacv310_driver_data *edmac = edmac_dma_chan->host; ++ struct edmacv310_phy_chan *phychan = edmac_dma_chan->phychan; ++ unsigned int val; ++ int timeout; ++ ++ val = edmacv310_readl(edmac->base + edmac_cx_config(phychan->id)); ++ val &= ~CCFG_EN; ++ edmacv310_writel(val, edmac->base + edmac_cx_config(phychan->id)); ++ /* Wait for channel inactive */ ++ for (timeout = 2000; timeout > 0; timeout--) { ++ if (!((0x1 << phychan->id) & edmacv310_readl(edmac->base + EDMAC_CH_STAT))) ++ break; ++ edmacv310_writel(val, edmac->base + edmac_cx_config(phychan->id)); ++ udelay(1); ++ } ++ if (timeout == 0) ++ edmacv310_error(":channel%u timeout waiting for pause, timeout:%d\n", ++ phychan->id, timeout); ++} ++ ++static int edmac_pause(struct dma_chan *chan) ++{ ++ struct edmacv310_dma_chan *edmac_dma_chan = to_edamc_chan(chan); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&edmac_dma_chan->virt_chan.lock, flags); ++ if (!edmac_dma_chan->phychan) { ++ spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); ++ return 0; ++ } ++ edmac_pause_phy_chan(edmac_dma_chan); ++ edmac_dma_chan->state = EDMAC_CHAN_PAUSED; ++ spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); ++ return 0; ++} ++ ++static void edmac_resume_phy_chan(const struct edmacv310_dma_chan *edmac_dma_chan) ++{ ++ struct edmacv310_driver_data *edmac = edmac_dma_chan->host; ++ struct edmacv310_phy_chan *phychan = edmac_dma_chan->phychan; ++ unsigned int val; ++ val = edmacv310_readl(edmac->base + edmac_cx_config(phychan->id)); ++ val |= CCFG_EN; ++ edmacv310_writel(val, edmac->base + edmac_cx_config(phychan->id)); ++} ++ ++static int edmac_resume(struct dma_chan *chan) ++{ ++ struct edmacv310_dma_chan *edmac_dma_chan = to_edamc_chan(chan); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&edmac_dma_chan->virt_chan.lock, flags); ++ ++ if (!edmac_dma_chan->phychan) { ++ spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); ++ return 0; ++ } ++ ++ edmac_resume_phy_chan(edmac_dma_chan); ++ edmac_dma_chan->state = EDMAC_CHAN_RUNNING; ++ spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); ++ ++ return 0; ++} ++ ++void edmac_phy_free(struct edmacv310_dma_chan *chan); ++static void edmac_desc_free(struct virt_dma_desc *vd); ++static int edmac_terminate_all(struct dma_chan *chan) ++{ ++ struct edmacv310_dma_chan *edmac_dma_chan = to_edamc_chan(chan); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&edmac_dma_chan->virt_chan.lock, flags); ++ if (!edmac_dma_chan->phychan && !edmac_dma_chan->at) { ++ spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); ++ return 0; ++ } ++ ++ edmac_dma_chan->state = EDMAC_CHAN_IDLE; ++ ++ if (edmac_dma_chan->phychan) ++ edmac_phy_free(edmac_dma_chan); ++ if (edmac_dma_chan->at) { ++ edmac_desc_free(&edmac_dma_chan->at->virt_desc); ++ edmac_dma_chan->at = NULL; ++ } ++ edmac_free_txd_list(edmac_dma_chan); ++ spin_unlock_irqrestore(&edmac_dma_chan->virt_chan.lock, flags); ++ ++ return 0; ++} ++ ++static u32 get_width(enum dma_slave_buswidth width) ++{ ++ switch (width) { ++ case DMA_SLAVE_BUSWIDTH_1_BYTE: ++ return EDMAC_WIDTH_8BIT; ++ case DMA_SLAVE_BUSWIDTH_2_BYTES: ++ return EDMAC_WIDTH_16BIT; ++ case DMA_SLAVE_BUSWIDTH_4_BYTES: ++ return EDMAC_WIDTH_32BIT; ++ case DMA_SLAVE_BUSWIDTH_8_BYTES: ++ return EDMAC_WIDTH_64BIT; ++ default: ++ edmacv310_error("check here, width warning!\n"); ++ return ~0; ++ } ++} ++ ++static unsigned int edmac_set_config_value(enum dma_transfer_direction direction, ++ unsigned int addr_width, ++ unsigned int burst, ++ unsigned int signal) ++{ ++ unsigned int config, width; ++ ++ if (direction == DMA_MEM_TO_DEV) ++ config = EDMAC_CONFIG_SRC_INC; ++ else ++ config = EDMAC_CONFIG_DST_INC; ++ ++ edmacv310_trace(EDMACV310_CONFIG_TRACE_LEVEL, "addr_width = 0x%x\n", addr_width); ++ width = get_width(addr_width); ++ edmacv310_trace(EDMACV310_CONFIG_TRACE_LEVEL, "width = 0x%x\n", width); ++ config |= width << EDMAC_CONFIG_SRC_WIDTH_SHIFT; ++ config |= width << EDMAC_CONFIG_DST_WIDTH_SHIFT; ++ edmacv310_trace(EDMACV310_REG_TRACE_LEVEL, "tsf_desc->ccfg = 0x%x\n", config); ++ edmacv310_trace(EDMACV310_CONFIG_TRACE_LEVEL, "burst = 0x%x\n", burst); ++ config |= burst << EDMAC_CONFIG_SRC_BURST_SHIFT; ++ config |= burst << EDMAC_CONFIG_DST_BURST_SHIFT; ++ if (signal >= 0) { ++ edmacv310_trace(EDMACV310_REG_TRACE_LEVEL, "edmac_dma_chan->signal = %d\n", signal); ++ config |= (unsigned int)signal << EDMAC_CXCONFIG_SIGNAL_SHIFT; ++ } ++ config |= EDMAC_CXCONFIG_DEV_MEM_TYPE << EDMAC_CXCONFIG_TSF_TYPE_SHIFT; ++ return config; ++} ++ ++struct transfer_desc *edmac_init_tsf_desc(const struct dma_chan *chan, ++ enum dma_transfer_direction direction, ++ dma_addr_t *slave_addr) ++{ ++ struct edmacv310_dma_chan *edmac_dma_chan = to_edamc_chan(chan); ++ struct transfer_desc *tsf_desc; ++ unsigned int burst = 0; ++ unsigned int addr_width = 0; ++ unsigned int maxburst = 0; ++ tsf_desc = kzalloc(sizeof(*tsf_desc), GFP_NOWAIT); ++ if (!tsf_desc) ++ return NULL; ++ if (direction == DMA_MEM_TO_DEV) { ++ *slave_addr = edmac_dma_chan->cfg.dst_addr; ++ addr_width = edmac_dma_chan->cfg.dst_addr_width; ++ maxburst = edmac_dma_chan->cfg.dst_maxburst; ++ } else if (direction == DMA_DEV_TO_MEM) { ++ *slave_addr = edmac_dma_chan->cfg.src_addr; ++ addr_width = edmac_dma_chan->cfg.src_addr_width; ++ maxburst = edmac_dma_chan->cfg.src_maxburst; ++ } else { ++ kfree(tsf_desc); ++ edmacv310_error("direction unsupported!\n"); ++ return NULL; ++ } ++ ++ if (maxburst > (EDMAC_MAX_BURST_WIDTH)) ++ burst |= (EDMAC_MAX_BURST_WIDTH - 1); ++ else if (maxburst == 0) ++ burst |= EDMAC_MIN_BURST_WIDTH; ++ else ++ burst |= (maxburst - 1); ++ ++ tsf_desc->ccfg = edmac_set_config_value(direction, addr_width, ++ burst, edmac_dma_chan->signal); ++ edmacv310_trace(EDMACV310_REG_TRACE_LEVEL, "tsf_desc->ccfg = 0x%x\n", tsf_desc->ccfg); ++ return tsf_desc; ++} ++ ++static int edmac_fill_desc(const struct edmac_sg *dsg, ++ struct transfer_desc *tsf_desc, ++ unsigned int length, unsigned int num) ++{ ++ edmac_lli *plli = NULL; ++ ++ if (num >= MAX_TSFR_LLIS) { ++ edmacv310_error("lli out of range. \n"); ++ return -ENOMEM; ++ } ++ ++ plli = (edmac_lli *)(tsf_desc->llis_vaddr); ++ (void)memset_s(&plli[num], sizeof(edmac_lli), 0x0, sizeof(*plli)); ++ ++ plli[num].src_addr = dsg->src_addr; ++ plli[num].dest_addr = dsg->dst_addr; ++ plli[num].config = tsf_desc->ccfg; ++ plli[num].count = length; ++ tsf_desc->size += length; ++ ++ if (num > 0) { ++ plli[num - 1].next_lli = (tsf_desc->llis_busaddr + (num) * sizeof( ++ *plli)) & (~(EDMAC_LLI_ALIGN - 1)); ++ plli[num - 1].next_lli |= EDMAC_LLI_ENABLE; ++ } ++ return 0; ++} ++ ++static void free_dsg(struct list_head *dsg_head) ++{ ++ struct edmac_sg *dsg = NULL; ++ struct edmac_sg *_dsg = NULL; ++ ++ list_for_each_entry_safe(dsg, _dsg, dsg_head, node) { ++ list_del(&dsg->node); ++ kfree(dsg); ++ } ++} ++ ++static int edmac_add_sg(struct list_head *sg_head, ++ dma_addr_t dst, dma_addr_t src, ++ size_t len) ++{ ++ struct edmac_sg *dsg = NULL; ++ ++ if (len == 0) { ++ free_dsg(sg_head); ++ edmacv310_error("Transfer length is 0. \n"); ++ return -ENOMEM; ++ } ++ ++ dsg = (struct edmac_sg *)kzalloc(sizeof(*dsg), GFP_NOWAIT); ++ if (!dsg) { ++ free_dsg(sg_head); ++ edmacv310_error("alloc memory for dsg fail.\n"); ++ return -ENOMEM; ++ } ++ ++ list_add_tail(&dsg->node, sg_head); ++ dsg->src_addr = src; ++ dsg->dst_addr = dst; ++ dsg->len = len; ++ return 0; ++} ++ ++static int edmac_add_sg_slave(struct list_head *sg_head, ++ dma_addr_t slave_addr, dma_addr_t addr, ++ size_t length, ++ enum dma_transfer_direction direction) ++{ ++ dma_addr_t src = 0; ++ dma_addr_t dst = 0; ++ if (direction == DMA_MEM_TO_DEV) { ++ src = addr; ++ dst = slave_addr; ++ } else if (direction == DMA_DEV_TO_MEM) { ++ src = slave_addr; ++ dst = addr; ++ } else { ++ edmacv310_error("invali dma_transfer_direction.\n"); ++ return -ENOMEM; ++ } ++ return edmac_add_sg(sg_head, dst, src, length); ++} ++ ++static int edmac_fill_sg_for_slave(struct list_head *sg_head, ++ dma_addr_t slave_addr, ++ struct scatterlist *sgl, ++ unsigned int sg_len, ++ enum dma_transfer_direction direction) ++{ ++ struct scatterlist *sg = NULL; ++ int tmp, ret; ++ size_t length; ++ dma_addr_t addr; ++ if (sgl == NULL) { ++ edmacv310_error("sgl is null!\n"); ++ return -ENOMEM; ++ } ++ ++ for_each_sg(sgl, sg, sg_len, tmp) { ++ addr = sg_dma_address(sg); ++ length = sg_dma_len(sg); ++ ret = edmac_add_sg_slave(sg_head, slave_addr, addr, length, direction); ++ if (ret) ++ break; ++ } ++ return ret; ++} ++ ++static inline int edmac_fill_sg_for_m2m_copy(struct list_head *sg_head, ++ dma_addr_t dst, dma_addr_t src, ++ size_t len) ++{ ++ return edmac_add_sg(sg_head, dst, src, len); ++} ++ ++struct edmac_cyclic_args { ++ dma_addr_t slave_addr; ++ dma_addr_t buf_addr; ++ size_t buf_len; ++ size_t period_len; ++ enum dma_transfer_direction direction; ++}; ++ ++static int edmac_fill_sg_for_cyclic(struct list_head *sg_head, ++ struct edmac_cyclic_args args) ++{ ++ size_t count_in_sg = 0; ++ size_t trans_bytes; ++ int ret; ++ while (count_in_sg < args.buf_len) { ++ trans_bytes = min(args.period_len, args.buf_len - count_in_sg); ++ count_in_sg += trans_bytes; ++ ret = edmac_add_sg_slave(sg_head, args.slave_addr, args.buf_addr + count_in_sg, count_in_sg, args.direction); ++ if (ret) ++ return ret; ++ } ++ return 0; ++} ++ ++static unsigned short get_max_width(dma_addr_t ccfg) ++{ ++ unsigned short src_width = (ccfg & EDMAC_CONTROL_SRC_WIDTH_MASK) >> ++ EDMAC_CONFIG_SRC_WIDTH_SHIFT; ++ unsigned short dst_width = (ccfg & EDMAC_CONTROL_DST_WIDTH_MASK) >> ++ EDMAC_CONFIG_DST_WIDTH_SHIFT; ++ return 1 << max(src_width, dst_width); /* to byte */ ++} ++ ++static int edmac_fill_asg_lli_for_desc(struct edmac_sg *dsg, ++ struct transfer_desc *tsf_desc, ++ unsigned int *lli_count) ++{ ++ int ret; ++ unsigned short width = get_max_width(tsf_desc->ccfg); ++ ++ while (dsg->len != 0) { ++ size_t lli_len = MAX_TRANSFER_BYTES; ++ lli_len = (lli_len / width) * width; /* bus width align */ ++ lli_len = min(lli_len, dsg->len); ++ ret = edmac_fill_desc(dsg, tsf_desc, lli_len, *lli_count); ++ if (ret) ++ return ret; ++ ++ if (tsf_desc->ccfg & EDMAC_CONFIG_SRC_INC) ++ dsg->src_addr += lli_len; ++ if (tsf_desc->ccfg & EDMAC_CONFIG_DST_INC) ++ dsg->dst_addr += lli_len; ++ dsg->len -= lli_len; ++ (*lli_count)++; ++ } ++ return 0; ++} ++ ++static int edmac_fill_lli_for_desc(const struct list_head *sg_head, ++ struct transfer_desc *tsf_desc) ++{ ++ struct edmac_sg *dsg = NULL; ++ struct edmac_lli *last_plli = NULL; ++ unsigned int lli_count = 0; ++ int ret; ++ ++ list_for_each_entry(dsg, sg_head, node) { ++ ret = edmac_fill_asg_lli_for_desc(dsg, tsf_desc, &lli_count); ++ if (ret) ++ return ret; ++ } ++ ++ if (tsf_desc->cyclic) { ++ last_plli = (edmac_lli *)((uintptr_t)tsf_desc->llis_vaddr + ++ (lli_count - 1) * sizeof(*last_plli)); ++ last_plli->next_lli = tsf_desc->llis_busaddr | EDMAC_LLI_ENABLE; ++ } else { ++ last_plli = (edmac_lli *)((uintptr_t)tsf_desc->llis_vaddr + ++ (lli_count - 1) * sizeof(*last_plli)); ++ last_plli->next_lli = 0; ++ } ++ dump_lli(tsf_desc->llis_vaddr, lli_count); ++ return 0; ++} ++ ++static struct dma_async_tx_descriptor *edmac_prep_slave_sg( ++ struct dma_chan *chan, struct scatterlist *sgl, ++ unsigned int sg_len, enum dma_transfer_direction direction, ++ unsigned long flags, void *context) ++{ ++ struct edmacv310_dma_chan *edmac_dma_chan = to_edamc_chan(chan); ++ struct edmacv310_driver_data *edmac = edmac_dma_chan->host; ++ struct transfer_desc *tsf_desc = NULL; ++ dma_addr_t slave_addr = 0; ++ int ret; ++ LIST_HEAD(sg_head); ++ if (sgl == NULL) { ++ edmacv310_error("sgl is null!\n"); ++ return NULL; ++ } ++ ++ tsf_desc = edmac_init_tsf_desc(chan, direction, &slave_addr); ++ if (!tsf_desc) ++ return NULL; ++ ++ tsf_desc->llis_vaddr = dma_pool_alloc(edmac->pool, GFP_NOWAIT, ++ &tsf_desc->llis_busaddr); ++ if (!tsf_desc->llis_vaddr) { ++ edmacv310_error("malloc memory from pool fail !\n"); ++ goto err_alloc_lli; ++ } ++ ++ ret = edmac_fill_sg_for_slave(&sg_head, slave_addr, sgl, sg_len, direction); ++ if (ret) ++ goto err_fill_sg; ++ ret = edmac_fill_lli_for_desc(&sg_head, tsf_desc); ++ free_dsg(&sg_head); ++ if (ret) ++ goto err_fill_sg; ++ return vchan_tx_prep(&edmac_dma_chan->virt_chan, &tsf_desc->virt_desc, flags); ++ ++err_fill_sg: ++ dma_pool_free(edmac->pool, tsf_desc->llis_vaddr, tsf_desc->llis_busaddr); ++err_alloc_lli: ++ kfree(tsf_desc); ++ return NULL; ++} ++ ++static struct dma_async_tx_descriptor *edmac_prep_dma_m2m_copy( ++ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, ++ size_t len, unsigned long flags) ++{ ++ struct edmacv310_dma_chan *edmac_dma_chan = to_edamc_chan(chan); ++ struct edmacv310_driver_data *edmac = edmac_dma_chan->host; ++ struct transfer_desc *tsf_desc = NULL; ++ LIST_HEAD(sg_head); ++ u32 config = 0; ++ int ret; ++ ++ if (!len) ++ return NULL; ++ ++ tsf_desc = kzalloc(sizeof(*tsf_desc), GFP_NOWAIT); ++ if (tsf_desc == NULL) { ++ edmacv310_error("get tsf desc fail!\n"); ++ return NULL; ++ } ++ ++ tsf_desc->llis_vaddr = dma_pool_alloc(edmac->pool, GFP_NOWAIT, ++ &tsf_desc->llis_busaddr); ++ if (!tsf_desc->llis_vaddr) { ++ edmacv310_error("malloc memory from pool fail !\n"); ++ goto err_alloc_lli; ++ } ++ ++ config |= EDMAC_CONFIG_SRC_INC | EDMAC_CONFIG_DST_INC; ++ config |= EDMAC_CXCONFIG_MEM_TYPE << EDMAC_CXCONFIG_TSF_TYPE_SHIFT; ++ /* max burst width is 16 ,but reg value set 0xf */ ++ config |= (EDMAC_MAX_BURST_WIDTH - 1) << EDMAC_CONFIG_SRC_BURST_SHIFT; ++ config |= (EDMAC_MAX_BURST_WIDTH - 1) << EDMAC_CONFIG_DST_BURST_SHIFT; ++ config |= EDMAC_MEM_BIT_WIDTH << EDMAC_CONFIG_SRC_WIDTH_SHIFT; ++ config |= EDMAC_MEM_BIT_WIDTH << EDMAC_CONFIG_DST_WIDTH_SHIFT; ++ tsf_desc->ccfg = config; ++ ret = edmac_fill_sg_for_m2m_copy(&sg_head, dst, src, len); ++ if (ret) ++ goto err_fill_sg; ++ ret = edmac_fill_lli_for_desc(&sg_head, tsf_desc); ++ free_dsg(&sg_head); ++ if (ret) ++ goto err_fill_sg; ++ return vchan_tx_prep(&edmac_dma_chan->virt_chan, &tsf_desc->virt_desc, flags); ++ ++err_fill_sg: ++ dma_pool_free(edmac->pool, tsf_desc->llis_vaddr, tsf_desc->llis_busaddr); ++err_alloc_lli: ++ kfree(tsf_desc); ++ return NULL; ++} ++ ++static struct dma_async_tx_descriptor *edmac_prep_dma_cyclic( ++ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, ++ size_t period_len, enum dma_transfer_direction direction, ++ unsigned long flags) ++{ ++ struct edmacv310_dma_chan *edmac_dma_chan = to_edamc_chan(chan); ++ struct edmacv310_driver_data *edmac = edmac_dma_chan->host; ++ struct transfer_desc *tsf_desc = NULL; ++ struct edmac_cyclic_args args = { ++ .slave_addr = 0, ++ .buf_addr = buf_addr, ++ .buf_len = buf_len, ++ .period_len = period_len, ++ .direction = direction ++ }; ++ LIST_HEAD(sg_head); ++ int ret; ++ ++ tsf_desc = edmac_init_tsf_desc(chan, direction, &(args.slave_addr)); ++ if (!tsf_desc) ++ return NULL; ++ ++ tsf_desc->llis_vaddr = dma_pool_alloc(edmac->pool, GFP_NOWAIT, ++ &tsf_desc->llis_busaddr); ++ if (!tsf_desc->llis_vaddr) { ++ edmacv310_error("malloc memory from pool fail !\n"); ++ goto err_alloc_lli; ++ } ++ ++ tsf_desc->cyclic = true; ++ ret = edmac_fill_sg_for_cyclic(&sg_head, args); ++ if (ret) ++ goto err_fill_sg; ++ ret = edmac_fill_lli_for_desc(&sg_head, tsf_desc); ++ free_dsg(&sg_head); ++ if (ret) ++ goto err_fill_sg; ++ return vchan_tx_prep(&edmac_dma_chan->virt_chan, &tsf_desc->virt_desc, flags); ++ ++err_fill_sg: ++ dma_pool_free(edmac->pool, tsf_desc->llis_vaddr, tsf_desc->llis_busaddr); ++err_alloc_lli: ++ kfree(tsf_desc); ++ return NULL; ++} ++ ++static void edmac_phy_reassign(struct edmacv310_phy_chan *phy_chan, ++ struct edmacv310_dma_chan *chan) ++{ ++ phy_chan->serving = chan; ++ chan->phychan = phy_chan; ++ chan->state = EDMAC_CHAN_RUNNING; ++ ++ edmac_start_next_txd(chan); ++} ++ ++static void edmac_terminate_phy_chan(const struct edmacv310_driver_data *edmac, ++ const struct edmacv310_dma_chan *edmac_dma_chan) ++{ ++ unsigned int val; ++ struct edmacv310_phy_chan *phychan = edmac_dma_chan->phychan; ++ edmac_pause_phy_chan(edmac_dma_chan); ++ val = 0x1 << phychan->id; ++ edmacv310_writel(val, edmac->base + EDMAC_INT_TC1_RAW); ++ edmacv310_writel(val, edmac->base + EDMAC_INT_ERR1_RAW); ++ edmacv310_writel(val, edmac->base + EDMAC_INT_ERR2_RAW); ++} ++ ++void edmac_phy_free(struct edmacv310_dma_chan *chan) ++{ ++ struct edmacv310_driver_data *edmac = chan->host; ++ struct edmacv310_dma_chan *p = NULL; ++ struct edmacv310_dma_chan *next = NULL; ++ ++ list_for_each_entry(p, &edmac->memcpy.channels, virt_chan.chan.device_node) { ++ if (p->state == EDMAC_CHAN_WAITING) { ++ next = p; ++ break; ++ } ++ } ++ ++ if (!next) { ++ list_for_each_entry(p, &edmac->slave.channels, virt_chan.chan.device_node) { ++ if (p->state == EDMAC_CHAN_WAITING) { ++ next = p; ++ break; ++ } ++ } ++ } ++ edmac_terminate_phy_chan(edmac, chan); ++ ++ if (next) { ++ spin_lock(&next->virt_chan.lock); ++ edmac_phy_reassign(chan->phychan, next); ++ spin_unlock(&next->virt_chan.lock); ++ } else { ++ chan->phychan->serving = NULL; ++ } ++ ++ chan->phychan = NULL; ++ chan->state = EDMAC_CHAN_IDLE; ++} ++ ++#define DMA_CFG_ERR 0 ++#define DMA_TRANS_ERR 1 ++#define DMA_LLI_ERR 2 ++ ++bool handle_irq(const struct edmacv310_driver_data *edmac, int chan_id) ++{ ++ struct edmacv310_dma_chan *chan = NULL; ++ struct edmacv310_phy_chan *phy_chan = NULL; ++ struct transfer_desc *tsf_desc = NULL; ++ unsigned int channel_tc_status, channel_err_status[ERR_STATUS_REG_NUM]; ++ ++ phy_chan = &edmac->phy_chans[chan_id]; ++ chan = phy_chan->serving; ++ if (!chan) { ++ edmacv310_error("error interrupt on chan: %d!\n", chan_id); ++ return 0; ++ } ++ tsf_desc = chan->at; ++ ++ channel_tc_status = edmacv310_readl(edmac->base + EDMAC_INT_TC1_RAW); ++ channel_tc_status = (channel_tc_status >> chan_id) & 0x01; ++ if (channel_tc_status) ++ edmacv310_writel(channel_tc_status << chan_id, edmac->base + EDMAC_INT_TC1_RAW); ++ ++ channel_tc_status = edmacv310_readl(edmac->base + EDMAC_INT_TC2); ++ channel_tc_status = (channel_tc_status >> chan_id) & 0x01; ++ if (channel_tc_status) ++ edmacv310_writel(channel_tc_status << chan_id, edmac->base + EDMAC_INT_TC2_RAW); ++ ++ channel_err_status[DMA_CFG_ERR] = edmacv310_readl(edmac->base + EDMAC_INT_ERR1); ++ channel_err_status[DMA_TRANS_ERR] = edmacv310_readl(edmac->base + EDMAC_INT_ERR2); ++ channel_err_status[DMA_LLI_ERR] = edmacv310_readl(edmac->base + EDMAC_INT_ERR3); ++ if ((channel_err_status[DMA_CFG_ERR] | ++ channel_err_status[DMA_TRANS_ERR] | ++ channel_err_status[DMA_LLI_ERR]) & (1 << chan_id)) { ++ edmacv310_error("Error in edmac %d!,ERR1 = 0x%x,ERR2 = 0x%x,ERR3 = 0x%x\n", ++ chan_id, channel_err_status[DMA_CFG_ERR], ++ channel_err_status[DMA_TRANS_ERR], ++ channel_err_status[DMA_LLI_ERR]); ++ edmacv310_writel(1 << chan_id, edmac->base + EDMAC_INT_ERR1_RAW); ++ edmacv310_writel(1 << chan_id, edmac->base + EDMAC_INT_ERR2_RAW); ++ edmacv310_writel(1 << chan_id, edmac->base + EDMAC_INT_ERR3_RAW); ++ } ++ ++ spin_lock(&chan->virt_chan.lock); ++ ++ if (tsf_desc->cyclic) { ++ vchan_cyclic_callback(&tsf_desc->virt_desc); ++ spin_unlock(&chan->virt_chan.lock); ++ return 1; ++ } ++ chan->at = NULL; ++ tsf_desc->done = true; ++ vchan_cookie_complete(&tsf_desc->virt_desc); ++ ++ if (vchan_next_desc(&chan->virt_chan)) ++ edmac_start_next_txd(chan); ++ else ++ edmac_phy_free(chan); ++ spin_unlock(&chan->virt_chan.lock); ++ return 1; ++} ++ ++static irqreturn_t emdacv310_irq(int irq, void *dev) ++{ ++ struct edmacv310_driver_data *edmac = (struct edmacv310_driver_data *)dev; ++ u32 mask = 0; ++ unsigned int channel_status, temp, i; ++ ++ channel_status = edmacv310_readl(edmac->base + EDMAC_INT_STAT); ++ if (!channel_status) { ++ edmacv310_error("channel_status = 0x%x\n", channel_status); ++ return IRQ_NONE; ++ } ++ ++ for (i = 0; i < edmac->channels; i++) { ++ temp = (channel_status >> i) & 0x1; ++ if (temp) ++ mask |= handle_irq(edmac, i) << i; ++ } ++ return mask ? IRQ_HANDLED : IRQ_NONE; ++} ++ ++static inline void edmac_dma_slave_init(struct edmacv310_dma_chan *chan) ++{ ++ chan->slave = true; ++} ++ ++static void edmac_desc_free(struct virt_dma_desc *vd) ++{ ++ struct transfer_desc *tsf_desc = to_edmac_transfer_desc(&vd->tx); ++ struct edmacv310_dma_chan *edmac_dma_chan = to_edamc_chan(vd->tx.chan); ++ dma_descriptor_unmap(&vd->tx); ++ dma_pool_free(edmac_dma_chan->host->pool, tsf_desc->llis_vaddr, tsf_desc->llis_busaddr); ++ kfree(tsf_desc); ++} ++ ++static int edmac_init_virt_channels(struct edmacv310_driver_data *edmac, ++ struct dma_device *dmadev, ++ unsigned int channels, bool slave) ++{ ++ struct edmacv310_dma_chan *chan = NULL; ++ int i; ++ INIT_LIST_HEAD(&dmadev->channels); ++ ++ for (i = 0; i < channels; i++) { ++ chan = kzalloc(sizeof(struct edmacv310_dma_chan), GFP_KERNEL); ++ if (!chan) { ++ edmacv310_error("fail to allocate memory for virt channels!"); ++ return -1; ++ } ++ ++ chan->host = edmac; ++ chan->state = EDMAC_CHAN_IDLE; ++ chan->signal = -1; ++ ++ if (slave) { ++ chan->id = i; ++ edmac_dma_slave_init(chan); ++ } ++ chan->virt_chan.desc_free = edmac_desc_free; ++ vchan_init(&chan->virt_chan, dmadev); ++ } ++ return 0; ++} ++ ++void edmac_free_virt_channels(struct dma_device *dmadev) ++{ ++ struct edmacv310_dma_chan *chan = NULL; ++ struct edmacv310_dma_chan *next = NULL; ++ ++ list_for_each_entry_safe(chan, next, &dmadev->channels, virt_chan.chan.device_node) { ++ list_del(&chan->virt_chan.chan.device_node); ++ kfree(chan); ++ } ++} ++ ++static void edmacv310_prep_dma_device(struct platform_device *pdev, ++ struct edmacv310_driver_data *edmac) ++{ ++ dma_cap_set(DMA_MEMCPY, edmac->memcpy.cap_mask); ++ edmac->memcpy.dev = &pdev->dev; ++ edmac->memcpy.device_free_chan_resources = edmac_free_chan_resources; ++ edmac->memcpy.device_prep_dma_memcpy = edmac_prep_dma_m2m_copy; ++ edmac->memcpy.device_tx_status = edmac_tx_status; ++ edmac->memcpy.device_issue_pending = edmac_issue_pending; ++ edmac->memcpy.device_config = edmac_config; ++ edmac->memcpy.device_pause = edmac_pause; ++ edmac->memcpy.device_resume = edmac_resume; ++ edmac->memcpy.device_terminate_all = edmac_terminate_all; ++ edmac->memcpy.directions = BIT(DMA_MEM_TO_MEM); ++ edmac->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; ++ ++ dma_cap_set(DMA_SLAVE, edmac->slave.cap_mask); ++ dma_cap_set(DMA_CYCLIC, edmac->slave.cap_mask); ++ edmac->slave.dev = &pdev->dev; ++ edmac->slave.device_free_chan_resources = edmac_free_chan_resources; ++ edmac->slave.device_tx_status = edmac_tx_status; ++ edmac->slave.device_issue_pending = edmac_issue_pending; ++ edmac->slave.device_prep_slave_sg = edmac_prep_slave_sg; ++ edmac->slave.device_prep_dma_cyclic = edmac_prep_dma_cyclic; ++ edmac->slave.device_config = edmac_config; ++ edmac->slave.device_resume = edmac_resume; ++ edmac->slave.device_pause = edmac_pause; ++ edmac->slave.device_terminate_all = edmac_terminate_all; ++ edmac->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); ++ edmac->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; ++} ++ ++static int edmacv310_init_chan(struct edmacv310_driver_data *edmac) ++{ ++ int i, ret; ++ edmac->phy_chans = kzalloc((edmac->channels * sizeof( ++ struct edmacv310_phy_chan)), ++ GFP_KERNEL); ++ if (!edmac->phy_chans) { ++ edmacv310_error("malloc for phy chans fail!"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < edmac->channels; i++) { ++ struct edmacv310_phy_chan *phy_ch = &edmac->phy_chans[i]; ++ phy_ch->id = i; ++ phy_ch->base = edmac->base + edmac_cx_base(i); ++ spin_lock_init(&phy_ch->lock); ++ phy_ch->serving = NULL; ++ } ++ ++ ret = edmac_init_virt_channels(edmac, &edmac->memcpy, edmac->channels, ++ false); ++ if (ret) { ++ edmacv310_error("fail to init memory virt channels!"); ++ goto free_phychans; ++ } ++ ++ ret = edmac_init_virt_channels(edmac, &edmac->slave, edmac->slave_requests, ++ true); ++ if (ret) { ++ edmacv310_error("fail to init slave virt channels!"); ++ goto free_memory_virt_channels; ++ } ++ return 0; ++ ++free_memory_virt_channels: ++ edmac_free_virt_channels(&edmac->memcpy); ++free_phychans: ++ kfree(edmac->phy_chans); ++ return -ENOMEM; ++} ++ ++static void edmacv310_free_chan(struct edmacv310_driver_data *edmac) ++{ ++ edmac_free_virt_channels(&edmac->slave); ++ edmac_free_virt_channels(&edmac->memcpy); ++ kfree(edmac->phy_chans); ++} ++ ++static void edmacv310_prep_phy_device(const struct edmacv310_driver_data *edmac) ++{ ++ clk_prepare_enable(edmac->clk); ++ clk_prepare_enable(edmac->axi_clk); ++ reset_control_deassert(edmac->rstc); ++ ++ edmacv310_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_TC1_RAW); ++ edmacv310_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_TC2_RAW); ++ edmacv310_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_ERR1_RAW); ++ edmacv310_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_ERR2_RAW); ++ edmacv310_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_ERR3_RAW); ++ edmacv310_writel(EDMAC_INT_ENABLE_ALL_CHAN, ++ edmac->base + EDMAC_INT_TC1_MASK); ++ edmacv310_writel(EDMAC_INT_ENABLE_ALL_CHAN, ++ edmac->base + EDMAC_INT_TC2_MASK); ++ edmacv310_writel(EDMAC_INT_ENABLE_ALL_CHAN, ++ edmac->base + EDMAC_INT_ERR1_MASK); ++ edmacv310_writel(EDMAC_INT_ENABLE_ALL_CHAN, ++ edmac->base + EDMAC_INT_ERR2_MASK); ++ edmacv310_writel(EDMAC_INT_ENABLE_ALL_CHAN, ++ edmac->base + EDMAC_INT_ERR3_MASK); ++} ++ ++static struct edmacv310_driver_data *edmacv310_prep_edmac_device(struct platform_device *pdev) ++{ ++ int ret; ++ struct edmacv310_driver_data *edmac = NULL; ++ ssize_t trasfer_size; ++ ++ ret = dma_set_mask_and_coherent(&(pdev->dev), DMA_BIT_MASK(64)); ++ if (ret) ++ return NULL; ++ ++ edmac = kzalloc(sizeof(*edmac), GFP_KERNEL); ++ if (!edmac) { ++ edmacv310_error("malloc for edmac fail!"); ++ return NULL; ++ } ++ ++ edmac->dev = pdev; ++ ++ ret = get_of_probe(edmac); ++ if (ret) { ++ edmacv310_error("get dts info fail!"); ++ goto free_edmac; ++ } ++ ++ edmacv310_prep_dma_device(pdev, edmac); ++ edmac->max_transfer_size = MAX_TRANSFER_BYTES; ++ trasfer_size = MAX_TSFR_LLIS * EDMACV300_LLI_WORDS * sizeof(u32); ++ ++ edmac->pool = dma_pool_create(DRIVER_NAME, &(pdev->dev), ++ trasfer_size, EDMACV300_POOL_ALIGN, 0); ++ if (!edmac->pool) { ++ edmacv310_error("create pool fail!"); ++ goto free_edmac; ++ } ++ ++ ret = edmacv310_init_chan(edmac); ++ if (ret) ++ goto free_pool; ++ ++ return edmac; ++ ++free_pool: ++ dma_pool_destroy(edmac->pool); ++free_edmac: ++ kfree(edmac); ++ return NULL; ++} ++ ++static void free_edmac_device(struct edmacv310_driver_data *edmac) ++{ ++ edmacv310_free_chan(edmac); ++ dma_pool_destroy(edmac->pool); ++ kfree(edmac); ++} ++ ++static int __init edmacv310_probe(struct platform_device *pdev) ++{ ++ int ret; ++ struct edmacv310_driver_data *edmac = NULL; ++ ++ edmac = edmacv310_prep_edmac_device(pdev); ++ if (edmac == NULL) ++ return -ENOMEM; ++ ++ ret = request_irq(edmac->irq, emdacv310_irq, 0, DRIVER_NAME, edmac); ++ if (ret) { ++ edmacv310_error("fail to request irq"); ++ goto free_edmac; ++ } ++ edmacv310_prep_phy_device(edmac); ++ ret = dma_async_device_register(&edmac->memcpy); ++ if (ret) { ++ edmacv310_error("%s failed to register memcpy as an async device - %d\n", __func__, ret); ++ goto free_irq_res; ++ } ++ ++ ret = dma_async_device_register(&edmac->slave); ++ if (ret) { ++ edmacv310_error("%s failed to register slave as an async device - %d\n", __func__, ret); ++ goto free_memcpy_device; ++ } ++ return 0; ++ ++free_memcpy_device: ++ dma_async_device_unregister(&edmac->memcpy); ++free_irq_res: ++ free_irq(edmac->irq, edmac); ++free_edmac: ++ free_edmac_device(edmac); ++ return -ENOMEM; ++} ++ ++static int emda_remove(struct platform_device *pdev) ++{ ++ int err = 0; ++ return err; ++} ++ ++static const struct of_device_id edmacv310_match[] = { ++ { .compatible = "vendor,edmacv310" }, ++ {}, ++}; ++ ++static struct platform_driver edmacv310_driver = { ++ .remove = emda_remove, ++ .driver = { ++ .name = "edmacv310", ++ .of_match_table = edmacv310_match, ++ }, ++}; ++ ++static int __init edmacv310_init(void) ++{ ++ return platform_driver_probe(&edmacv310_driver, edmacv310_probe); ++} ++subsys_initcall(edmacv310_init); ++ ++static void __exit edmacv310_exit(void) ++{ ++ platform_driver_unregister(&edmacv310_driver); ++} ++module_exit(edmacv310_exit); ++ ++MODULE_LICENSE("GPL"); +diff --git a/drivers/dma/edmacv310.h b/drivers/dma/edmacv310.h +new file mode 100644 +index 000000000..2a266f3a0 +--- /dev/null ++++ b/drivers/dma/edmacv310.h +@@ -0,0 +1,147 @@ ++/* ++ * ++ * Copyright (c) 2015-2021 Shenshu Technologies Co., Ltd. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef __EDMACV310_H__ ++#define __EDMACV310_H__ ++ ++/* debug control */ ++#define EDMACV310_CONFIG_TRACE_LEVEL 3 ++#define EDMACV310_TRACE_LEVEL 0 ++#define EDMACV310_REG_TRACE_LEVEL 3 ++#define EDMACV310_TRACE_FMT KERN_INFO ++ ++#ifdef DEBUG_EDMAC ++#define edmacv310_trace(level, msg...) do { \ ++ if ((level) >= EDMACV310_TRACE_LEVEL) { \ ++ printk(EDMACV310_TRACE_FMT"%s:%d: ", __func__, __LINE__); \ ++ printk(msg); \ ++ printk("\n"); \ ++ } \ ++} while (0) ++ ++ ++#define edmacv310_assert(cond) do { \ ++ if (!(cond)) { \ ++ printk(KERN_ERR "Assert:edmacv310:%s:%d\n", \ ++ __func__, \ ++ __LINE__); \ ++ BUG(); \ ++ } \ ++} while (0) ++ ++#define edmacv310_error(s...) do { \ ++ printk(KERN_ERR "edmacv310:%s:%d: ", __func__, __LINE__); \ ++ printk(s); \ ++ printk("\n"); \ ++} while (0) ++ ++#else ++ ++#define edmacv310_trace(level, msg...) ++#define edmacv310_assert(level, msg...) ++#define edmacv310_error(level, msg...) ++ ++#endif ++ ++#define edmacv310_readl(addr) ((unsigned int)readl((void *)(addr))) ++ ++#define edmacv310_writel(v, addr) do { writel(v, (void *)(addr)); \ ++} while (0) ++ ++ ++#define MAX_TRANSFER_BYTES 0xffff ++ ++/* reg offset */ ++#define EDMAC_INT_STAT 0x0 ++#define EDMAC_INT_TC1 0x4 ++#define EDMAC_INT_TC2 0x8 ++#define EDMAC_INT_ERR1 0xc ++#define EDMAC_INT_ERR2 0x10 ++#define EDMAC_INT_ERR3 0x14 ++ ++#define EDMAC_INT_TC1_MASK 0x18 ++#define EDMAC_INT_TC2_MASK 0x1c ++#define EDMAC_INT_ERR1_MASK 0x20 ++#define EDMAC_INT_ERR2_MASK 0x24 ++#define EDMAC_INT_ERR3_MASK 0x28 ++ ++#define EDMAC_INT_TC1_RAW 0x600 ++#define EDMAC_INT_TC2_RAW 0x608 ++#define EDMAC_INT_ERR1_RAW 0x610 ++#define EDMAC_INT_ERR2_RAW 0x618 ++#define EDMAC_INT_ERR3_RAW 0x620 ++ ++#define edmac_cx_curr_cnt0(cn) (0x404 + (cn) * 0x20) ++#define edmac_cx_curr_src_addr_l(cn) (0x408 + (cn) * 0x20) ++#define edmac_cx_curr_src_addr_h(cn) (0x40c + (cn) * 0x20) ++#define edmac_cx_curr_dest_addr_l(cn) (0x410 + (cn) * 0x20) ++#define edmac_cx_curr_dest_addr_h(cn) (0x414 + (cn) * 0x20) ++ ++#define EDMAC_CH_PRI 0x688 ++#define EDMAC_CH_STAT 0x690 ++#define EDMAC_DMA_CTRL 0x698 ++ ++#define edmac_cx_base(cn) (0x800 + (cn) * 0x40) ++#define edmac_cx_lli_l(cn) (0x800 + (cn) * 0x40) ++#define edmac_cx_lli_h(cn) (0x804 + (cn) * 0x40) ++#define edmac_cx_cnt0(cn) (0x81c + (cn) * 0x40) ++#define edmac_cx_src_addr_l(cn) (0x820 + (cn) * 0x40) ++#define edmac_cx_src_addr_h(cn) (0x824 + (cn) * 0x40) ++#define edmac_cx_dest_addr_l(cn) (0x828 + (cn) * 0x40) ++#define edmac_cx_dest_addr_h(cn) (0x82c + (cn) * 0x40) ++#define edmac_cx_config(cn) (0x830 + (cn) * 0x40) ++ ++#define EDMAC_ALL_CHAN_CLR 0xff ++#define EDMAC_INT_ENABLE_ALL_CHAN 0xff ++ ++ ++#define EDMAC_CONFIG_SRC_INC (1 << 31) ++#define EDMAC_CONFIG_DST_INC (1 << 30) ++ ++#define EDMAC_CONFIG_SRC_WIDTH_SHIFT 16 ++#define EDMAC_CONFIG_DST_WIDTH_SHIFT 12 ++#define EDMAC_WIDTH_8BIT 0b0 ++#define EDMAC_WIDTH_16BIT 0b1 ++#define EDMAC_WIDTH_32BIT 0b10 ++#define EDMAC_WIDTH_64BIT 0b11 ++#ifdef CONFIG_64BIT ++#define EDMAC_MEM_BIT_WIDTH EDMAC_WIDTH_64BIT ++#else ++#define EDMAC_MEM_BIT_WIDTH EDMAC_WIDTH_32BIT ++#endif ++ ++#define EDMAC_MAX_BURST_WIDTH 16 ++#define EDMAC_MIN_BURST_WIDTH 1 ++#define EDMAC_CONFIG_SRC_BURST_SHIFT 24 ++#define EDMAC_CONFIG_DST_BURST_SHIFT 20 ++ ++#define EDMAC_LLI_ALIGN 0x40 ++#define EDMAC_LLI_DISABLE 0x0 ++#define EDMAC_LLI_ENABLE 0x2 ++ ++#define EDMAC_CXCONFIG_SIGNAL_SHIFT 0x4 ++#define EDMAC_CXCONFIG_MEM_TYPE 0x0 ++#define EDMAC_CXCONFIG_DEV_MEM_TYPE 0x1 ++#define EDMAC_CXCONFIG_TSF_TYPE_SHIFT 0x2 ++#define EDMAC_CXCONFIG_LLI_START 0x1 ++ ++#define EDMAC_CXCONFIG_ITC_EN 0x1 ++#define EDMAC_CXCONFIG_ITC_EN_SHIFT 0x1 ++ ++#define CCFG_EN 0x1 ++ ++#define EDMAC_CONTROL_SRC_WIDTH_MASK GENMASK(18, 16) ++#define EDMAC_CONTROL_DST_WIDTH_MASK GENMASK(14, 12) ++#endif +diff --git a/drivers/edmac/Kconfig b/drivers/edmac/Kconfig +new file mode 100644 +index 000000000..ad77f151d +--- /dev/null ++++ b/drivers/edmac/Kconfig +@@ -0,0 +1,29 @@ ++# ++# Sensor device configuration ++# ++ ++config EDMAC ++ tristate "Vendor EDMAC Controller support" ++ depends on (ARCH_BSP && !EDMACV310) ++ help ++ The Direction Memory Access(EDMA) is a high-speed data transfer ++ operation. It supports data read/write between peripherals and ++ memories without using the CPU. ++ Vendor EDMA Controller(EDMAC) directly transfers data between ++ a memory and a peripheral, between peripherals, or between memories. ++ This avoids the CPU intervention and reduces the interrupt handling ++ overhead of the CPU. ++ ++if EDMAC ++ ++config EDMAC_CHANNEL_NUM ++ int "edmac channel num" ++ default "8" ++ ++config EDMAC_INTERRUPT ++ bool "Vendor EDMAC Controller interrupt mode support" ++ depends on EDMAC ++ help ++ open Vendor EDMAC Controller interrupt mode ++ ++endif +diff --git a/drivers/edmac/Makefile b/drivers/edmac/Makefile +new file mode 100644 +index 000000000..dda2aaa97 +--- /dev/null ++++ b/drivers/edmac/Makefile +@@ -0,0 +1,4 @@ ++# ++# Makefile for the edmac drivers. ++# ++obj-$(CONFIG_EDMAC) += edmacv310.o +diff --git a/drivers/edmac/edma_ss928v100.h b/drivers/edmac/edma_ss928v100.h +new file mode 100644 +index 000000000..47f7bc854 +--- /dev/null ++++ b/drivers/edmac/edma_ss928v100.h +@@ -0,0 +1,59 @@ ++/* ++ * ++ * Copyright (c) 2015-2021 Shenshu Technologies Co., Ltd. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef __EDMA_SS928V100_H__ ++#define __EDMA_SS928V100_H__ ++ ++#include "edmacv310.h" ++#define EDMAC_MAX_PERIPHERALS 32 ++#define EDMAC_CHANNEL_NUM 8 ++ ++#define UART0_REG_BASE 0x11040000 ++#define UART1_REG_BASE 0x11041000 ++#define UART2_REG_BASE 0x11042000 ++#define UART3_REG_BASE 0x11043000 ++#define UART4_REG_BASE 0x11044000 ++#define UART6_REG_BASE 0x11046000 ++#define UART5_REG_BASE 0x11045000 ++#define UART7_REG_BASE 0x11047000 ++ ++#define UART0_DR (UART0_REG_BASE + 0x0) ++#define UART1_DR (UART1_REG_BASE + 0x0) ++#define UART2_DR (UART2_REG_BASE + 0x0) ++#define UART3_DR (UART3_REG_BASE + 0x0) ++#define UART4_DR (UART4_REG_BASE + 0x0) ++#define UART5_DR (UART5_REG_BASE + 0x0) ++#define UART6_DR (UART6_REG_BASE + 0x0) ++#define UART7_DR (UART7_REG_BASE + 0x0) ++ ++#define I2C0_REG_BASE 0x11060000 ++#define I2C1_REG_BASE 0x11061000 ++ ++#define I2C0_TX_FIFO (I2C0_REG_BASE + 0x20) ++#define I2C0_RX_FIFO (I2C0_REG_BASE + 0x24) ++ ++#define I2C1_TX_FIFO (I2C1_REG_BASE + 0x20) ++#define I2C1_RX_FIFO (I2C1_REG_BASE + 0x24) ++ ++#define EDMAC_TX 1 ++#define EDMAC_RX 0 ++ ++edmac_peripheral g_peripheral[EDMAC_MAX_PERIPHERALS] = { ++ {0, I2C0_RX_FIFO, DMAC_HOST1, (0x40000004), PERI_8BIT_MODE, 0}, ++ {1, I2C0_TX_FIFO, DMAC_HOST1, (0x80000004), PERI_8BIT_MODE, 0}, ++ {2, I2C1_RX_FIFO, DMAC_HOST1, (0x40000004), PERI_8BIT_MODE, 0}, ++ {3, I2C1_TX_FIFO, DMAC_HOST1, (0x80000004), PERI_8BIT_MODE, 0}, ++}; ++#endif +diff --git a/drivers/edmac/edmacv310.c b/drivers/edmac/edmacv310.c +new file mode 100644 +index 000000000..b837da1c2 +--- /dev/null ++++ b/drivers/edmac/edmacv310.c +@@ -0,0 +1,950 @@ ++/* ++ * ++ * Copyright (c) 2015-2021 Shenshu Technologies Co., Ltd. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "edmacv310.h" ++#include ++ ++#ifdef CONFIG_ARCH_SS928V100 ++#include "edma_ss928v100.h" ++#endif ++ ++int g_channel_status[EDMAC_CHANNEL_NUM]; ++DMAC_ISR *function[EDMAC_CHANNEL_NUM]; ++unsigned long pllihead[2] = {0, 0}; ++void __iomem *dma_regbase; ++int edmacv310_trace_level_n = EDMACV310_TRACE_LEVEL; ++ ++struct edmac_host { ++ struct platform_device *pdev; ++ void __iomem *base; ++ struct regmap *misc_regmap; ++ unsigned int misc_ctrl_base; ++ void __iomem *crg_ctrl; ++ unsigned int id; ++ struct clk *clk; ++ struct clk *axi_clk; ++ unsigned int irq; ++ struct reset_control *rstc; ++ unsigned int channels; ++ unsigned int slave_requests; ++}; ++ ++#define DRIVER_NAME "edmacv310" ++ ++#define DMA_CFG_ERR 0 ++#define DMA_TRANS_ERR 1 ++#define DMA_LLI_ERR 2 ++ ++int dmac_channel_allocate(void) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < EDMAC_CHANNEL_NUM; i++) { ++ if (g_channel_status[i] == DMAC_CHN_VACANCY) { ++ g_channel_status[i] = DMAC_CHN_ALLOCAT; ++ return i; ++ } ++ } ++ ++ edmacv310_error("no to alloc\n"); ++ return -1; ++} ++EXPORT_SYMBOL(dmac_channel_allocate); ++ ++static void edmac_read_err_status(unsigned int *channel_err_status, ++ const unsigned int err_status_len) ++{ ++ if (err_status_len < EDMAC_ERR_REG_NUM) { ++ edmacv310_error("channel_err_status size err.\n"); ++ return; ++ } ++ channel_err_status[DMA_CFG_ERR] = edmacv310_readl(dma_regbase + EDMAC_INT_ERR1); ++ channel_err_status[DMA_TRANS_ERR] = edmacv310_readl(dma_regbase + EDMAC_INT_ERR2); ++ channel_err_status[DMA_LLI_ERR] = edmacv310_readl(dma_regbase + EDMAC_INT_ERR3); ++} ++ ++static void edmac_err_status_filter(unsigned int *channel_err_status, ++ const unsigned int err_status_len, ++ unsigned int curr_channel) ++{ ++ if (err_status_len < EDMAC_ERR_REG_NUM) { ++ edmacv310_error("channel_err_status size err.\n"); ++ return; ++ } ++ channel_err_status[DMA_CFG_ERR] = (channel_err_status[DMA_CFG_ERR] >> curr_channel) & 0x01; ++ channel_err_status[DMA_TRANS_ERR] = (channel_err_status[DMA_TRANS_ERR] >> curr_channel) & 0x01; ++ channel_err_status[DMA_LLI_ERR] = (channel_err_status[DMA_LLI_ERR] >> curr_channel) & 0x01; ++} ++ ++static void edmac_clear_err_status(unsigned int curr_channel) ++{ ++ edmacv310_writel(1 << curr_channel, dma_regbase + EDMAC_INT_ERR1_RAW); ++ edmacv310_writel(1 << curr_channel, dma_regbase + EDMAC_INT_ERR2_RAW); ++ edmacv310_writel(1 << curr_channel, dma_regbase + EDMAC_INT_ERR3_RAW); ++} ++ ++/* ++ * update the state of channels ++ */ ++static int edmac_update_status(unsigned int channel) ++{ ++ unsigned int channel_status; ++ unsigned int channel_tc_status; ++ unsigned int channel_err_status[EDMAC_ERR_REG_NUM]; ++ unsigned int i = channel; ++ unsigned long update_jiffies_timeout; ++ ++ update_jiffies_timeout = jiffies + EDMAC_UPDATE_TIMEOUT; ++ while (1) { ++ channel_status = edmacv310_readl(dma_regbase + EDMAC_INT_STAT); ++ channel_status = (channel_status >> i) & 0x01; ++ if (channel_status) { ++ channel_tc_status = edmacv310_readl(dma_regbase + EDMAC_INT_TC1); ++ channel_tc_status = (channel_tc_status >> i) & 0x01; ++ if (channel_tc_status) { ++ edmacv310_writel(1 << i, dma_regbase + EDMAC_INT_TC1_RAW); ++ g_channel_status[i] = DMAC_CHN_SUCCESS; ++ break; ++ } ++ ++ channel_tc_status = edmacv310_readl(dma_regbase + EDMAC_INT_TC2); ++ channel_tc_status = (channel_tc_status >> i) & 0x01; ++ if (channel_tc_status) { ++ edmacv310_writel(1 << i, dma_regbase + EDMAC_INT_TC2_RAW); ++ g_channel_status[i] = DMAC_CHN_SUCCESS; ++ break; ++ } ++ ++ edmac_read_err_status(channel_err_status, EDMAC_ERR_REG_NUM); ++ edmac_err_status_filter(channel_err_status, EDMAC_ERR_REG_NUM, i); ++ ++ if (channel_err_status[DMA_CFG_ERR] | ++ channel_err_status[DMA_TRANS_ERR] | ++ channel_err_status[DMA_LLI_ERR]) { ++ edmacv310_error("Error in EDMAC %d finish!\n", i); ++ edmac_read_err_status(channel_err_status, EDMAC_ERR_REG_NUM); ++ edmac_clear_err_status(i); ++ g_channel_status[i] = -DMAC_CHN_ERROR; ++ break; ++ } ++ } ++ ++ if (!time_before(jiffies, update_jiffies_timeout)) { ++ edmacv310_error("Timeout in DMAC %d!\n", i); ++ g_channel_status[i] = -DMAC_CHN_TIMEOUT; ++ break; ++ } ++ } ++ return g_channel_status[i]; ++} ++ ++/* ++ * register user's function ++ */ ++int dmac_register_isr(unsigned int channel, void *pisr) ++{ ++ if (channel < 0 || channel > EDMAC_CHANNEL_NUM - 1) { ++ edmacv310_error("invalid channel,channel=%0d\n", channel); ++ return -EINVAL; ++ } ++ ++ function[channel] = (void *)pisr; ++ ++ return 0; ++} ++EXPORT_SYMBOL(dmac_register_isr); ++ ++/* ++ * free channel ++ */ ++int dmac_channel_free(unsigned int channel) ++{ ++ if ((channel >= 0) && (channel < EDMAC_CHANNEL_NUM)) ++ g_channel_status[channel] = DMAC_CHN_VACANCY; ++ ++ return 0; ++} ++EXPORT_SYMBOL(dmac_channel_free); ++ ++#define CHANNELS_PER_PERIPHERAL 2 ++static unsigned int dmac_check_request(unsigned int peripheral_addr, ++ int direction) ++{ ++ int i; ++ ++ for (i = direction; i < EDMAC_MAX_PERIPHERALS; i += CHANNELS_PER_PERIPHERAL) { ++ if (g_peripheral[i].peri_addr == peripheral_addr) ++ return i; ++ } ++ edmacv310_error("Invalid devaddr\n"); ++ return -1; ++} ++ ++void edmac_channel_free(int channel) ++{ ++ if ((channel >= 0) && (channel < EDMAC_CHANNEL_NUM)) ++ g_channel_status[channel] = DMAC_CHN_VACANCY; ++} ++/* ++ * wait for transfer end ++ */ ++int dmac_wait(int channel) ++{ ++ int ret_result; ++ int ret = 0; ++ ++ if (channel < 0) ++ return -1; ++ ++ while (1) { ++ ret_result = edmac_update_status(channel); ++ if (ret_result == -DMAC_CHN_ERROR) { ++ edmacv310_error("Transfer Error.\n"); ++ ret = -1; ++ goto end; ++ } else if (ret_result == DMAC_NOT_FINISHED) { ++ udelay(DMAC_FINISHED_WAIT_TIME); ++ } else if (ret_result == DMAC_CHN_SUCCESS) { ++ ret = DMAC_CHN_SUCCESS; ++ goto end; ++ } else if (ret_result == DMAC_CHN_VACANCY) { ++ ret = DMAC_CHN_SUCCESS; ++ goto end; ++ } else if (ret_result == -DMAC_CHN_TIMEOUT) { ++ edmacv310_error("Timeout.\n"); ++ edmacv310_writel(EDMAC_CX_DISABLE, ++ dma_regbase + edmac_cx_config(channel)); ++ g_channel_status[channel] = DMAC_CHN_VACANCY; ++ ret = -1; ++ return ret; ++ } ++ } ++end: ++ edmacv310_writel(EDMAC_CX_DISABLE, ++ dma_regbase + edmac_cx_config(channel)); ++ edmac_channel_free(channel); ++ return ret; ++} ++EXPORT_SYMBOL(dmac_wait); ++ ++enum { ++ M2P_TRANSFER, ++ P2M_TRANSFER ++}; ++ ++static int __dmac_m2p_transfer(unsigned long long memaddr, unsigned int uwperipheralid, ++ unsigned int length, int direction) ++{ ++ unsigned int ulchnn, uwwidth, temp; ++ unsigned long long src_addr, dst_addr, peri_addr; ++ ++ ulchnn = dmac_channel_allocate(); ++ if (-1 == ulchnn) ++ return -1; ++ ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "ulchnn = %d\n", ulchnn); ++ uwwidth = g_peripheral[uwperipheralid].transfer_width; ++ if ((length >> uwwidth) >= EDMAC_TRANS_MAXSIZE) { ++ edmacv310_error("The length is more than 64k!\n"); ++ return -1; ++ } ++ peri_addr = g_peripheral[uwperipheralid].peri_addr & 0xffffffff; ++ src_addr = direction == M2P_TRANSFER ? memaddr : peri_addr; ++ dst_addr = direction == M2P_TRANSFER ? peri_addr : memaddr; ++ ++ edmacv310_writel(dst_addr & 0xffffffff, ++ dma_regbase + edmac_cx_dest_addr_l(ulchnn)); ++#ifdef CONFIG_ARM64 ++ edmacv310_writel((dst_addr >> EDMACV310_32BIT) & 0xffffffff, ++ dma_regbase + edmac_cx_dest_addr_h(ulchnn)); ++#endif ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "EDMAC_Cx_DEST_ADDR_L = 0x%x\n", ++ edmacv310_readl(dma_regbase + edmac_cx_dest_addr_l(ulchnn))); ++ ++ edmacv310_writel(src_addr & 0xffffffff, ++ dma_regbase + edmac_cx_src_addr_l(ulchnn)); ++#ifdef CONFIG_ARM64 ++ edmacv310_writel((src_addr >> EDMACV310_32BIT) & 0xffffffff, ++ dma_regbase + edmac_cx_src_addr_h(ulchnn)); ++#endif ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "EDMAC_Cx_SRC_ADDR_L = 0x%x\n", ++ edmacv310_readl(dma_regbase + edmac_cx_src_addr_l(ulchnn))); ++ ++ edmacv310_writel(0, dma_regbase + edmac_cx_lli_l(ulchnn)); ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "EDMAC_Cx_LLI_L = 0x%x\n", ++ edmacv310_readl(dma_regbase + edmac_cx_lli_l(ulchnn))); ++ ++ edmacv310_writel(length, dma_regbase + edmac_cx_cnt0(ulchnn)); ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "EDMAC_Cx_CNT0 = 0x%x\n", ++ edmacv310_readl(dma_regbase + edmac_cx_cnt0(ulchnn))); ++ ++ temp = g_peripheral[uwperipheralid].transfer_cfg | (uwwidth << EDMA_SRC_WIDTH_OFFSET) | ++ (uwperipheralid << PERI_ID_OFFSET) | EDMA_CH_ENABLE; ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "EDMAC_Cx_CONFIG = 0x%x\n", temp); ++ edmacv310_writel(temp, dma_regbase + edmac_cx_config(ulchnn)); ++ return ulchnn; ++} ++ ++/* ++ * execute memory to peripheral dma transfer without LLI ++ */ ++int dmac_m2p_transfer(unsigned long long memaddr, unsigned int uwperipheralid, ++ unsigned int length) ++{ ++ return __dmac_m2p_transfer(memaddr, uwperipheralid, length, M2P_TRANSFER); ++} ++ ++/* ++ * execute memory to peripheral dma transfer without LLI ++ */ ++int dmac_p2m_transfer(unsigned long memaddr, unsigned int uwperipheralid, ++ unsigned int length) ++{ ++ return __dmac_m2p_transfer(memaddr, uwperipheralid, length, P2M_TRANSFER); ++} ++ ++int do_dma_m2p(unsigned long long memaddr, unsigned int peripheral_addr, ++ unsigned int length) ++{ ++ int ret; ++ int uwperipheralid; ++ ++ uwperipheralid = dmac_check_request(peripheral_addr, EDMAC_TX); ++ if (uwperipheralid < 0) { ++ edmacv310_error("m2p:Invalid devaddr\n"); ++ return -1; ++ } ++ ++ ret = dmac_m2p_transfer(memaddr, uwperipheralid, length); ++ if (ret == -1) { ++ edmacv310_error("m2p:trans err\n"); ++ return -1; ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(do_dma_m2p); ++ ++int do_dma_p2m(unsigned long memaddr, unsigned int peripheral_addr, ++ unsigned int length) ++{ ++ int ret; ++ int uwperipheralid; ++ ++ uwperipheralid = dmac_check_request(peripheral_addr, EDMAC_RX); ++ if (uwperipheralid < 0) { ++ edmacv310_error("p2m:Invalid devaddr.\n"); ++ return -1; ++ } ++ ++ ret = dmac_p2m_transfer(memaddr, uwperipheralid, length); ++ if (ret == -1) { ++ edmacv310_error("p2m:trans err\n"); ++ return -1; ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(do_dma_p2m); ++ ++/* ++ * buile LLI for memory to memory DMA transfer ++ */ ++int dmac_buildllim2m(const unsigned long *ppheadlli, ++ unsigned long psource, ++ unsigned long pdest, ++ unsigned int totaltransfersize, ++ unsigned int uwnumtransfers) ++{ ++ int lli_num; ++ unsigned long phy_address; ++ int j; ++ dmac_lli *plli = NULL; ++ ++ if (uwnumtransfers == 0) ++ return -EINVAL; ++ ++ lli_num = (totaltransfersize / uwnumtransfers); ++ if ((totaltransfersize % uwnumtransfers) != 0) ++ lli_num++; ++ ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "lli_num:%d\n", lli_num); ++ ++ phy_address = ppheadlli[0]; ++ plli = (dmac_lli *)ppheadlli[1]; ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "phy_address: 0x%lx\n", phy_address); ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "address: 0x%p\n", plli); ++ for (j = 0; j < lli_num; j++) { ++ (void)memset_s(plli, sizeof(dmac_lli), 0x0, sizeof(dmac_lli)); ++ /* ++ * at the last transfer, chain_en should be set to 0x0; ++ * others tansfer,chain_en should be set to 0x2; ++ */ ++ plli->next_lli = (phy_address + (j + 1) * sizeof(dmac_lli)) & ++ (~(EDMAC_LLI_ALIGN - 1)); ++ if (j < lli_num - 1) { ++ plli->next_lli |= EDMAC_LLI_ENABLE; ++ plli->count = uwnumtransfers; ++ } else { ++ plli->next_lli |= EDMAC_LLI_DISABLE; ++ plli->count = totaltransfersize % uwnumtransfers; ++ } ++ ++ plli->src_addr = psource; ++ plli->dest_addr = pdest; ++ plli->config = EDMAC_CXCONFIG_M2M_LLI; ++ ++ psource += uwnumtransfers; ++ pdest += uwnumtransfers; ++ plli++; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(dmac_buildllim2m); ++ ++/* ++ * load configuration from LLI for memory to memory ++ */ ++int dmac_start_llim2m(unsigned int channel, const unsigned long *pfirst_lli) ++{ ++ unsigned int i = channel; ++ dmac_lli *plli; ++ ++ plli = (dmac_lli *)pfirst_lli[1]; ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "plli.src_addr: 0x%lx\n", plli->src_addr); ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "plli.dst_addr: 0x%lx\n", plli->dest_addr); ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "plli.next_lli: 0x%lx\n", plli->next_lli); ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "plli.count: 0x%d\n", plli->count); ++ ++ edmacv310_writel(plli->dest_addr & 0xffffffff, ++ dma_regbase + edmac_cx_lli_l(i)); ++#ifdef CONFIG_ARM64 ++ edmacv310_writel((plli->dest_addr >> EDMACV310_32BIT) & 0xffffffff, ++ dma_regbase + edmac_cx_lli_h(i)); ++#endif ++ edmacv310_writel(plli->count, dma_regbase + edmac_cx_cnt0(i)); ++ ++ edmacv310_writel(plli->src_addr & 0xffffffff, ++ dma_regbase + edmac_cx_src_addr_l(i)); ++#ifdef CONFIG_ARM64 ++ edmacv310_writel((plli->src_addr >> EDMACV310_32BIT) & 0xffffffff, ++ dma_regbase + edmac_cx_src_addr_h(i)); ++#endif ++ edmacv310_writel(plli->dest_addr & 0xffffffff, ++ dma_regbase + edmac_cx_dest_addr_l(i)); ++#ifdef CONFIG_ARM64 ++ edmacv310_writel((plli->dest_addr >> EDMACV310_32BIT) & 0xffffffff, ++ dma_regbase + edmac_cx_dest_addr_h(i)); ++#endif ++ edmacv310_writel(plli->config | EDMA_CH_ENABLE, ++ dma_regbase + edmac_cx_config(i)); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dmac_start_llim2m); ++ ++/* ++ * config register for memory to memory DMA transfer without LLI ++ */ ++int dmac_start_m2m(unsigned int channel, unsigned long psource, ++ unsigned long pdest, unsigned int uwnumtransfers) ++{ ++ unsigned int i = channel; ++ ++ if (uwnumtransfers > EDMAC_TRANS_MAXSIZE || uwnumtransfers == 0) { ++ edmacv310_error("Invalidate transfer size,size=%x\n", uwnumtransfers); ++ return -EINVAL; ++ } ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "channel[%d],source=0x%lx,dest=0x%lx,length=%d\n", ++ channel, psource, pdest, uwnumtransfers); ++ ++ edmacv310_writel(psource & 0xffffffff, ++ dma_regbase + edmac_cx_src_addr_l(i)); ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "EDMAC_Cx_SRC_ADDR_L = 0x%x\n", ++ edmacv310_readl(dma_regbase + edmac_cx_src_addr_l(i))); ++#ifdef CONFIG_ARM64 ++ edmacv310_writel((psource >> EDMACV310_32BIT) & 0xffffffff, ++ dma_regbase + edmac_cx_src_addr_h(i)); ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "EDMAC_Cx_SRC_ADDR_H = 0x%x\n", ++ edmacv310_readl(dma_regbase + edmac_cx_src_addr_h(i))); ++#endif ++ edmacv310_writel(pdest & 0xffffffff, dma_regbase + edmac_cx_dest_addr_l(i)); ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "EDMAC_Cx_DEST_ADDR_L = 0x%x\n", ++ edmacv310_readl(dma_regbase + edmac_cx_dest_addr_l(i))); ++#ifdef CONFIG_ARM64 ++ edmacv310_writel((pdest >> EDMACV310_32BIT) & 0xffffffff, ++ dma_regbase + edmac_cx_dest_addr_h(i)); ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "EDMAC_Cx_DEST_ADDR_H = 0x%x\n", ++ edmacv310_readl(dma_regbase + edmac_cx_dest_addr_h(i))); ++#endif ++ edmacv310_writel(0, dma_regbase + edmac_cx_lli_l(i)); ++ ++ edmacv310_writel(uwnumtransfers, dma_regbase + edmac_cx_cnt0(i)); ++ ++ edmacv310_writel(EDMAC_CXCONFIG_M2M | EDMA_CH_ENABLE, ++ dma_regbase + edmac_cx_config(i)); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dmac_start_m2m); ++ ++/* ++ * execute memory to memory dma transfer without LLI ++ */ ++int dmac_m2m_transfer(unsigned long source, unsigned long dest, ++ unsigned int length) ++{ ++ unsigned int dma_size; ++ unsigned int ulchnn, dma_count, left_size; ++ ++ left_size = length; ++ dma_count = 0; ++ ulchnn = dmac_channel_allocate(); ++ if (ulchnn < 0) ++ return -EINVAL; ++ ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_DEBUG, "using channel[%d],source=0x%lx,dest=0x%lx,length=%d\n", ++ ulchnn, source, dest, length); ++ ++ while (left_size) { ++ if (left_size >= EDMAC_TRANS_MAXSIZE) ++ dma_size = EDMAC_TRANS_MAXSIZE; ++ else ++ dma_size = left_size; ++ if (dmac_start_m2m(ulchnn, source + dma_count * dma_size, ++ dest + dma_count * dma_size, dma_size)) { ++ edmacv310_error("dma transfer error...\n"); ++ return -1; ++ } ++ ++ if (dmac_wait(ulchnn) != DMAC_CHN_SUCCESS) { ++ edmacv310_error("dma transfer error...\n"); ++ return -1; ++ } ++ left_size -= dma_size; ++ dma_count++; ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "left_size is %d.\n", left_size); ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(dmac_m2m_transfer); ++ ++/* ++ * memory to memory dma transfer with LLI ++ * ++ * @source ++ * @dest ++ * @length ++ * */ ++int do_dma_llim2m(unsigned long source, ++ unsigned long dest, ++ unsigned long length) ++{ ++ int ret = 0; ++ unsigned chnn; ++ ++ chnn = dmac_channel_allocate(); ++ if (chnn < 0) { ++ ret = -1; ++ goto end; ++ } ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "chnn:%d,src:%lx,dst:%lx,len:%ld.\n", chnn, source, dest, ++ length); ++ ++ if (pllihead[0] == 0) { ++ edmacv310_error("ppheadlli[0] is NULL.\n"); ++ ret = -ENOMEM; ++ goto end; ++ } ++ ++ ret = dmac_buildllim2m(pllihead, source, dest, length, EDMAC_TRANS_MAXSIZE); ++ if (ret) { ++ edmacv310_error("build lli error...\n"); ++ ret = -EIO; ++ goto end; ++ } ++ ret = dmac_start_llim2m(chnn, pllihead); ++ if (ret) { ++ edmacv310_error("start lli error...\n"); ++ ret = -EIO; ++ goto end; ++ } ++ ++end: ++ return ret; ++} ++EXPORT_SYMBOL(do_dma_llim2m); ++ ++/* ++ * alloc_dma_lli_space ++ * output: ++ * ppheadlli[0]: memory physics address ++ * ppheadlli[1]: virtual address ++ * ++ */ ++int allocate_dmalli_space(struct device *dev, unsigned long *ppheadlli, ++ unsigned int page_num) ++{ ++ dma_addr_t dma_phys; ++ void *dma_virt; ++ ++ dma_virt = dma_alloc_coherent(dev, page_num * PAGE_SIZE, ++ &dma_phys, GFP_DMA); ++ if (dma_virt == NULL) { ++ edmacv310_error("can't get dma mem from system\n"); ++ return -1; ++ } ++ ++ ppheadlli[0] = (unsigned long)(dma_phys); ++ ppheadlli[1] = (unsigned long)(dma_virt); ++ ++ if (dma_phys & (EDMAC_LLI_ALIGN - 1)) ++ return -1; ++ ++ return 0; ++} ++EXPORT_SYMBOL(allocate_dmalli_space); ++ ++#define MISC_REG_WIDTH 4 /* byte */ ++#define DMA_REQ_SEL_MASK 0x3f ++#define DMA_REQ_PER_MISC_REG 4 ++#define BIT_PER_DMA_REQ 8 ++ ++static int edmac_priv_init(struct edmac_host *edmac, ++ edmac_peripheral *peripheral_info) ++{ ++ struct regmap *misc = edmac->misc_regmap; ++ int i; ++ unsigned int count = 0; ++ unsigned int miscreg_addr_of_dma_req_line, dma_req_line_offset_in_miscreg; ++ unsigned ctrl = 0; ++ ++ for (i = 0; i < EDMAC_MAX_PERIPHERALS; i++) { ++ if (peripheral_info[i].host_sel == edmac->id) { ++ if (misc != NULL) { ++ dma_req_line_offset_in_miscreg = count % DMA_REQ_PER_MISC_REG; ++ miscreg_addr_of_dma_req_line = edmac->misc_ctrl_base + count - count % MISC_REG_WIDTH; ++ regmap_read(misc, miscreg_addr_of_dma_req_line, &ctrl); ++ ctrl &= ~(DMA_REQ_SEL_MASK << (dma_req_line_offset_in_miscreg * BIT_PER_DMA_REQ)); ++ ctrl |= peripheral_info[i].peri_id << (dma_req_line_offset_in_miscreg * BIT_PER_DMA_REQ); ++ regmap_write(misc, miscreg_addr_of_dma_req_line, ctrl); ++ } ++ peripheral_info[i].dynamic_periphery_num = count; ++ count++; ++ } ++ } ++ ++ return 0; ++} ++ ++static int of_probe_read(struct edmac_host *edmac, struct device_node *np) ++{ ++ int ret; ++ ++ ret = of_property_read_u32(np, ++ "devid", &(edmac->id)); ++ if (ret) { ++ edmacv310_error("get edmac id fail\n"); ++ return -ENODEV; ++ } ++ ++ if (!of_find_property(np, "misc_regmap", NULL) || ++ !of_find_property(np, "misc_ctrl_base", NULL)) { ++ edmac->misc_regmap = 0; ++ } else { ++ edmac->misc_regmap = syscon_regmap_lookup_by_phandle(np, "misc_regmap"); ++ if (IS_ERR(edmac->misc_regmap)) { ++ edmacv310_error("get edmac misc fail\n"); ++ return PTR_ERR(edmac->misc_regmap); ++ } ++ ++ ret = of_property_read_u32(np, ++ "misc_ctrl_base", &(edmac->misc_ctrl_base)); ++ if (ret) { ++ edmacv310_error("get dma-misc_ctrl_base fail\n"); ++ return -ENODEV; ++ } ++ } ++ ret = of_property_read_u32(np, ++ "dma-channels", &(edmac->channels)); ++ if (ret) { ++ edmacv310_error("get dma-channels fail\n"); ++ return -ENODEV; ++ } ++ ret = of_property_read_u32(np, ++ "dma-requests", &(edmac->slave_requests)); ++ if (ret) { ++ edmacv310_error("get dma-requests fail\n"); ++ return -ENODEV; ++ } ++ edmacv310_trace(EDMACV310_TRACE_LEVEL_INFO, "dma-channels = %d, dma-requests = %d\n", ++ edmac->channels, edmac->slave_requests); ++ return 0; ++} ++ ++static int of_probe_get_resource(struct edmac_host *edmac, ++ struct platform_device *platdev) ++{ ++ struct resource *res = NULL; ++ edmac->clk = devm_clk_get(&(platdev->dev), "apb_pclk"); ++ if (IS_ERR(edmac->clk)) { ++ edmacv310_error("get edmac clk fail\n"); ++ return PTR_ERR(edmac->clk); ++ } ++ ++ edmac->axi_clk = devm_clk_get(&(platdev->dev), "axi_aclk"); ++ if (IS_ERR(edmac->axi_clk)) { ++ edmacv310_error("get edmac axi clk fail\n"); ++ return PTR_ERR(edmac->axi_clk); ++ } ++ ++ edmac->rstc = devm_reset_control_get(&(platdev->dev), "dma-reset"); ++ if (IS_ERR(edmac->rstc)) { ++ edmacv310_error("get edmac rstc fail\n"); ++ return PTR_ERR(edmac->rstc); ++ } ++ ++ res = platform_get_resource(platdev, IORESOURCE_MEM, 0); ++ if (res == NULL) { ++ edmacv310_error("no reg resource\n"); ++ return -ENODEV; ++ } ++ ++ edmac->base = devm_ioremap_resource(&(platdev->dev), res); ++ if (IS_ERR(edmac->base)) { ++ edmacv310_error("get edmac base fail\n"); ++ return PTR_ERR(edmac->base); ++ } ++ ++ res = platform_get_resource_byname(platdev, IORESOURCE_MEM, "dma_peri_channel_req_sel"); ++ if (res != NULL) { ++ void *dma_peri_channel_req_sel = ioremap(res->start, res->end - res->start); ++ if (IS_ERR(dma_peri_channel_req_sel)) ++ return PTR_ERR(dma_peri_channel_req_sel); ++ writel(0xffffffff, dma_peri_channel_req_sel); ++ iounmap(dma_peri_channel_req_sel); ++ } ++ return 0; ++} ++ ++static int get_of_probe(struct edmac_host *edmac) ++{ ++ struct platform_device *platdev = edmac->pdev; ++ struct device_node *np = platdev->dev.of_node; ++ int ret; ++ ++ ret = of_probe_read(edmac, np); ++ if (ret) ++ return ret; ++ ++ ret = of_probe_get_resource(edmac, platdev); ++ if (ret) ++ return ret; ++ ++ edmac->irq = platform_get_irq(platdev, 0); ++ if (unlikely(edmac->irq < 0)) ++ return -ENODEV; ++ ++ edmac_priv_init(edmac, (edmac_peripheral *)&g_peripheral); ++ return 0; ++} ++ ++/* Don't need irq mode now */ ++#if defined(CONFIG_EDMAC_INTERRUPT) ++static irqreturn_t emdacv310_irq(int irq, void *dev) ++{ ++ struct edmac_host *edmac = (struct edmac_host *)dev; ++ unsigned int channel_err_status[EDMAC_ERR_REG_NUM]; ++ unsigned int channel_tc_status, channel_status; ++ int i; ++ unsigned int mask = 0; ++ ++ channel_status = edmacv310_readl(edmac->base + EDMAC_INT_STAT); ++ if (!channel_status) { ++ edmacv310_error("channel_status = 0x%x\n", channel_status); ++ return IRQ_NONE; ++ } ++ ++ for (i = 0; i < edmac->channels; i++) { ++ channel_status = (channel_status >> i) & 0x1; ++ if (channel_status) { ++ channel_tc_status = edmacv310_readl(edmac->base + EDMAC_INT_TC1_RAW); ++ channel_tc_status = (channel_tc_status >> i) & 0x01; ++ if (channel_tc_status) ++ edmacv310_writel(channel_tc_status << i, edmac->base + EDMAC_INT_TC1_RAW); ++ ++ channel_tc_status = edmacv310_readl(edmac->base + EDMAC_INT_TC2); ++ channel_tc_status = (channel_tc_status >> i) & 0x01; ++ if (channel_tc_status) ++ edmacv310_writel(channel_tc_status << i, edmac->base + EDMAC_INT_TC2_RAW); ++ ++ channel_err_status[DMA_CFG_ERR] = edmacv310_readl(edmac->base + EDMAC_INT_ERR1); ++ channel_err_status[DMA_CFG_ERR] = (channel_err_status[DMA_CFG_ERR] >> i) & 0x01; ++ channel_err_status[DMA_TRANS_ERR] = edmacv310_readl(edmac->base + EDMAC_INT_ERR2); ++ channel_err_status[DMA_TRANS_ERR] = (channel_err_status[DMA_TRANS_ERR] >> i) & 0x01; ++ channel_err_status[DMA_LLI_ERR] = edmacv310_readl(edmac->base + EDMAC_INT_ERR3); ++ channel_err_status[DMA_LLI_ERR] = (channel_err_status[DMA_LLI_ERR] >> i) & 0x01; ++ ++ if (channel_err_status[DMA_CFG_ERR] | ++ channel_err_status[DMA_TRANS_ERR] | ++ channel_err_status[DMA_LLI_ERR]) { ++ edmacv310_error("Error in edmac %d finish!,ERR1 = 0x%x,ERR2 = 0x%x,ERR3 = 0x%x\n", ++ i, channel_err_status[DMA_CFG_ERR], ++ channel_err_status[DMA_TRANS_ERR], ++ channel_err_status[DMA_LLI_ERR]); ++ edmacv310_writel(1 << i, edmac->base + EDMAC_INT_ERR1_RAW); ++ edmacv310_writel(1 << i, edmac->base + EDMAC_INT_ERR2_RAW); ++ edmacv310_writel(1 << i, edmac->base + EDMAC_INT_ERR3_RAW); ++ } ++ if ((function[i]) != NULL) ++ function[i](i, g_channel_status[i]); ++ ++ mask |= (1 << i); ++ edmacv310_writel(EDMAC_CX_DISABLE, dma_regbase + edmac_cx_config(i)); ++ edmac_channel_free(i); ++ } ++ } ++ ++ return mask ? IRQ_HANDLED : IRQ_NONE; ++} ++#endif ++ ++static void edmac310_dev_init(const struct edmac_host *edmac) ++{ ++ clk_prepare_enable(edmac->clk); ++ clk_prepare_enable(edmac->axi_clk); ++ ++ reset_control_deassert(edmac->rstc); ++ ++ edmacv310_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_TC1_RAW); ++ edmacv310_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_TC2_RAW); ++ edmacv310_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_ERR1_RAW); ++ edmacv310_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_ERR2_RAW); ++ edmacv310_writel(EDMAC_ALL_CHAN_CLR, edmac->base + EDMAC_INT_ERR3_RAW); ++ ++ edmacv310_writel(EDMAC_INT_ENABLE_ALL_CHAN, ++ edmac->base + EDMAC_INT_TC1_MASK); ++ edmacv310_writel(EDMAC_INT_ENABLE_ALL_CHAN, ++ edmac->base + EDMAC_INT_TC2_MASK); ++ edmacv310_writel(EDMAC_INT_ENABLE_ALL_CHAN, ++ edmac->base + EDMAC_INT_ERR1_MASK); ++ edmacv310_writel(EDMAC_INT_ENABLE_ALL_CHAN, ++ edmac->base + EDMAC_INT_ERR2_MASK); ++ edmacv310_writel(EDMAC_INT_ENABLE_ALL_CHAN, ++ edmac->base + EDMAC_INT_ERR3_MASK); ++} ++ ++static int __init edmacv310_probe(struct platform_device *pdev) ++{ ++ int ret = 0; ++ int i = 0; ++ struct edmac_host *edmac = NULL; ++ ++ edmac = kzalloc(sizeof(*edmac), GFP_KERNEL); ++ if (!edmac) { ++ edmacv310_error("malloc for edmac fail!"); ++ ret = -ENOMEM; ++ return ret; ++ } ++ edmac->pdev = pdev; ++ ++ ret = get_of_probe(edmac); ++ if (ret) { ++ edmacv310_error("get dts info fail!"); ++ goto free_edmac; ++ } ++ ++ for (i = 0; i < EDMAC_CHANNEL_NUM; i++) ++ g_channel_status[i] = DMAC_CHN_VACANCY; ++ ++ dma_regbase = edmac->base; ++ ++ ret = allocate_dmalli_space(&(edmac->pdev->dev), pllihead, ++ EDMAC_LLI_PAGE_NUM); ++ if (ret < 0) ++ goto free_edmac; ++ ++#if defined(CONFIG_EDMAC_INTERRUPT) ++ /* register irq if necessary! */ ++ ret = request_irq(edmac->irq, emdacv310_irq, 0, DRIVER_NAME, edmac); ++ if (ret) { ++ edmacv310_error("fail to request irq"); ++ goto free_edmac; ++ } ++#endif ++ edmac310_dev_init(edmac); ++ return 0; ++ ++free_edmac: ++ kfree(edmac); ++ ++ return ret; ++} ++ ++static int emda_remove(struct platform_device *pdev) ++{ ++ int err = 0; ++ return err; ++} ++ ++static const struct of_device_id edmacv310_match[] = { ++ { .compatible = "vendor,edmacv310_n" }, ++ {}, ++}; ++ ++static struct platform_driver edmacv310_driver = { ++ .remove = emda_remove, ++ .driver = { ++ .name = "edmacv310_n", ++ .of_match_table = edmacv310_match, ++ }, ++}; ++ ++static int __init edmacv310_init(void) ++{ ++ return platform_driver_probe(&edmacv310_driver, edmacv310_probe); ++} ++subsys_initcall(edmacv310_init); ++ ++static void __exit edmacv310_exit(void) ++{ ++ platform_driver_unregister(&edmacv310_driver); ++} ++module_exit(edmacv310_exit); ++ ++MODULE_LICENSE("GPL"); +diff --git a/drivers/edmac/edmacv310.h b/drivers/edmac/edmacv310.h +new file mode 100644 +index 000000000..b01e5f614 +--- /dev/null ++++ b/drivers/edmac/edmacv310.h +@@ -0,0 +1,186 @@ ++/* ++ * ++ * Copyright (c) 2015-2021 Shenshu Technologies Co., Ltd. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef __EDMACV310_H__ ++#define __EDMACV310_H__ ++ ++/* debug control */ ++extern int edmacv310_trace_level_n; ++#define EDMACV310_TRACE_LEVEL 5 ++#define EDMACV310_TRACE_LEVEL_INFO 4 ++#define EDMACV310_TRACE_LEVEL_DEBUG 6 ++ ++#define EDMACV310_TRACE_FMT KERN_INFO ++ ++typedef void DMAC_ISR(unsigned int channel, int status); ++ ++#define EDMACV310_32BIT 32 ++#define EDMAC_UPDATE_TIMEOUT (30 * HZ) ++#define EDMAC_TRANS_MAXSIZE (64 * 1024 - 1) ++ ++#ifdef DEBUG_EDMAC ++ ++#define edmacv310_trace(level, msg...) do { \ ++ if ((level) >= edmacv310_trace_level_n) { \ ++ printk(EDMACV310_TRACE_FMT"%s:%d: ", __func__, __LINE__); \ ++ printk(msg); \ ++ printk("\n"); \ ++ } \ ++} while (0) ++ ++ ++#define edmacv310_assert(cond) do { \ ++ if (!(cond)) { \ ++ printk(KERN_ERR "Assert:edmacv310:%s:%d\n", \ ++ __func__, \ ++ __LINE__); \ ++ BUG(); \ ++ } \ ++} while (0) ++ ++#define edmacv310_error(s...) do { \ ++ printk(KERN_ERR "edmacv310:%s:%d: ", __func__, __LINE__); \ ++ printk(s); \ ++ printk("\n"); \ ++} while (0) ++ ++#else ++ ++#define edmacv310_trace(level, msg...) do { } while (0) ++#define edmacv310_assert(level, msg...) do { } while (0) ++#define edmacv310_error(level, msg...) do { } while (0) ++ ++#endif ++ ++#define edmacv310_readl(addr) ((unsigned int)readl((void *)(addr))) ++ ++#define edmacv310_writel(v, addr) do { writel(v, (void *)(addr)); \ ++} while (0) ++ ++ ++#define MAX_TRANSFER_BYTES 0xffff ++ ++#define EDMAC_ERR_REG_NUM 3 ++#define DMAC_FINISHED_WAIT_TIME 10 ++ ++/* reg offset */ ++#define EDMAC_INT_STAT 0x0 ++#define EDMAC_INT_TC1 0x4 ++#define EDMAC_INT_TC2 0x8 ++#define EDMAC_INT_ERR1 0xc ++#define EDMAC_INT_ERR2 0x10 ++#define EDMAC_INT_ERR3 0x14 ++#define EDMAC_INT_TC1_MASK 0x18 ++#define EDMAC_INT_TC2_MASK 0x1c ++#define EDMAC_INT_ERR1_MASK 0x20 ++#define EDMAC_INT_ERR2_MASK 0x24 ++#define EDMAC_INT_ERR3_MASK 0x28 ++ ++#define EDMAC_INT_TC1_RAW 0x600 ++#define EDMAC_INT_TC2_RAW 0x608 ++#define EDMAC_INT_ERR1_RAW 0x610 ++#define EDMAC_INT_ERR2_RAW 0x618 ++#define EDMAC_INT_ERR3_RAW 0x620 ++ ++#define edmac_cx_curr_cnt0(cn) (0x404 + (cn) * 0x20) ++#define edmac_cx_curr_src_addr_l(cn) (0x408 + (cn) * 0x20) ++#define edmac_cx_curr_src_addr_h(cn) (0x40c + (cn) * 0x20) ++#define edmac_cx_curr_dest_addr_l(cn) (0x410 + (cn) * 0x20) ++#define edmac_cx_curr_dest_addr_h(cn) (0x414 + (cn) * 0x20) ++ ++#define EDMAC_CH_PRI 0x688 ++#define EDMAC_CH_STAT 0x690 ++#define EDMAC_DMA_CTRL 0x698 ++ ++#define edmac_cx_base(cn) (0x800 + (cn) * 0x40) ++#define edmac_cx_lli_l(cn) (0x800 + (cn) * 0x40) ++#define edmac_cx_lli_h(cn) (0x804 + (cn) * 0x40) ++#define edmac_cx_cnt0(cn) (0x81c + (cn) * 0x40) ++#define edmac_cx_src_addr_l(cn) (0x820 + (cn) * 0x40) ++#define edmac_cx_src_addr_h(cn) (0x824 + (cn) * 0x40) ++#define edmac_cx_dest_addr_l(cn) (0x828 + (cn) * 0x40) ++#define edmac_cx_dest_addr_h(cn) (0x82c + (cn) * 0x40) ++#define edmac_cx_config(cn) (0x830 + (cn) * 0x40) ++ ++#define EDMAC_CXCONFIG_M2M 0xCFF33000 ++#define EDMAC_CXCONFIG_M2M_LLI 0xCFF00000 ++#define EDMAC_CXCONFIG_CHN_START 0x1 ++#define EDMAC_CX_DISABLE 0x0 ++ ++#define EDMAC_ALL_CHAN_CLR 0xff ++#define EDMAC_INT_ENABLE_ALL_CHAN 0xff ++ ++ ++#define EDMAC_CONFIG_SRC_INC (1 << 31) ++#define EDMAC_CONFIG_DST_INC (1 << 30) ++ ++#define EDMAC_CONFIG_SRC_WIDTH_SHIFT 16 ++#define EDMAC_CONFIG_DST_WIDTH_SHIFT 12 ++#define EDMAC_WIDTH_8BIT 0x0 ++#define EDMAC_WIDTH_16BIT 0x1 ++#define EDMAC_WIDTH_32BIT 0x10 ++#define EDMAC_WIDTH_64BIT 0x11 ++ ++#define EDMAC_MAX_BURST_WIDTH 16 ++#define EDMAC_MIN_BURST_WIDTH 1 ++#define EDMAC_CONFIG_SRC_BURST_SHIFT 24 ++#define EDMAC_CONFIG_DST_BURST_SHIFT 20 ++ ++#define EDMAC_LLI_ALIGN 0x40 ++#define EDMAC_LLI_DISABLE 0x0 ++#define EDMAC_LLI_ENABLE 0x2 ++ ++#define EDMAC_CXCONFIG_SIGNAL_SHIFT 0x4 ++#define EDMAC_CXCONFIG_MEM_TYPE 0x0 ++#define EDMAC_CXCONFIG_DEV_MEM_TYPE 0x1 ++#define EDMAC_CXCONFIG_TSF_TYPE_SHIFT 0x2 ++#define EDMAC_CXCONFIG_LLI_START 0x1 ++ ++#define EDMAC_CXCONFIG_ITC_EN 0x1 ++#define EDMAC_CXCONFIG_ITC_EN_SHIFT 0x1 ++ ++#define CCFG_EN 0x1 ++ ++/* DMAC peripheral structure */ ++typedef struct edmac_peripheral { ++ /* peripherial ID */ ++ unsigned int peri_id; ++ /* peripheral data register address */ ++ unsigned long peri_addr; ++ /* config requset */ ++ int host_sel; ++#define DMAC_HOST0 0 ++#define DMAC_HOST1 1 ++#define DMAC_NOT_USE (-1) ++ /* default channel configuration word */ ++ unsigned int transfer_cfg; ++ /* default channel configuration word */ ++ unsigned int transfer_width; ++ unsigned int dynamic_periphery_num; ++} edmac_peripheral; ++ ++ ++#define PERI_ID_OFFSET 4 ++#define EDMA_SRC_WIDTH_OFFSET 16 ++#define EDMA_DST_WIDTH_OFFSET 12 ++#define EDMA_CH_ENABLE 1 ++ ++#define PERI_8BIT_MODE 0 ++#define PERI_16BIT_MODE 1 ++#define PERI_32BIT_MODE 2 ++#define PERI_64BIT_MODE 3 ++ ++#define EDMAC_LLI_PAGE_NUM 0x4 ++#endif +diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile +index eb73b5d63..f89d3a813 100644 +--- a/drivers/gpio/Makefile ++++ b/drivers/gpio/Makefile +@@ -195,3 +195,4 @@ obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o + obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o + obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o + obj-$(CONFIG_GPIO_ZYNQMP_MODEPIN) += gpio-zynqmp-modepin.o ++obj-$(CONFIG_ARCH_BSP) += vendor/ +diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c +index 9fc1f3dd4..87b017ee2 100644 +--- a/drivers/gpio/gpio-pl061.c ++++ b/drivers/gpio/gpio-pl061.c +@@ -25,6 +25,10 @@ + #include + #include + #include ++#ifdef CONFIG_ARCH_BSP ++#include "vendor/vendor_gpio.h" ++#endif ++ + + #define GPIODIR 0x400 + #define GPIOIS 0x404 +@@ -354,11 +358,19 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) + girq->parents[0] = irq; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; +- ++#ifdef CONFIG_ARCH_BSP ++ ret = vendor_gpio_init_clk_and_base(adev, &pl061->gc, pl061->base); ++ if (ret) ++ return ret; ++#endif + ret = devm_gpiochip_add_data(dev, &pl061->gc, pl061); + if (ret) + return ret; +- ++#ifdef CONFIG_ARCH_BSP ++ ret = vendor_gpio_init_irq(adev, &pl061->gc, pl061->base); ++ if (ret) ++ return ret; ++#endif + amba_set_drvdata(adev, pl061); + dev_info(dev, "PL061 GPIO chip registered\n"); + +diff --git a/drivers/gpio/vendor/Makefile b/drivers/gpio/vendor/Makefile +new file mode 100644 +index 000000000..a46d67937 +--- /dev/null ++++ b/drivers/gpio/vendor/Makefile +@@ -0,0 +1 @@ ++obj-$(CONFIG_ARCH_BSP) += vendor_gpio.o +diff --git a/drivers/gpio/vendor/vendor_gpio.c b/drivers/gpio/vendor/vendor_gpio.c +new file mode 100644 +index 000000000..ab8d357c2 +--- /dev/null ++++ b/drivers/gpio/vendor/vendor_gpio.c +@@ -0,0 +1,92 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++#include "vendor_gpio.h" ++ ++struct gpio_vendor_irq_data { ++ void __iomem *base; ++ struct gpio_chip *gc; ++}; ++ ++irqreturn_t vendor_gpio_irq_handler(int irq, void *data) ++{ ++ unsigned long pending; ++ int offset; ++ struct gpio_vendor_irq_data *vendor_irq_data = data; ++ struct gpio_chip *gc = vendor_irq_data->gc; ++ ++ pending = readb(vendor_irq_data->base + VENDOR_GPIOMIS); ++ writeb(pending, vendor_irq_data->base + VENDOR_GPIOIC); ++ if (pending) { ++ for_each_set_bit(offset, &pending, VENDOR_GPIO_NR) ++ generic_handle_irq(irq_find_mapping(gc->irq.domain, ++ offset)); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++int vendor_gpio_init_clk_and_base(struct amba_device *adev, struct gpio_chip *gc, ++ void __iomem *base) ++{ ++ int ret, gpio_idx; ++ struct clk *clk; ++ struct device *dev = &adev->dev; ++ struct gpio_irq_chip *girq = &gc->irq; ++ ++ clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(clk)) { ++ dev_warn(dev, "The GPIO clock automatic enable not support\n"); ++ } else { ++ ret = clk_prepare_enable(clk); ++ if (ret) { ++ dev_warn(dev, "The GPIO clock request failed\n"); ++ return ret; ++ } ++ } ++ ++ if (dev->of_node) { ++ gpio_idx = of_alias_get_id(dev->of_node, "gpio"); ++ if (gpio_idx < 0) ++ return -ENOMEM; ++ gc->base = gpio_idx * VENDOR_GPIO_NR; ++ } ++ ++ if (gc->base < 0) ++ gc->base = -1; ++ ++ writeb(0, base + VENDOR_GPIOIE); /* disable irqs */ ++ ++ girq->parent_handler = (irq_flow_handler_t)vendor_gpio_irq_handler; ++ devm_kfree(dev, girq->parents); ++ girq->num_parents = 0; ++ ++ return 0; ++} ++ ++int vendor_gpio_init_irq(struct amba_device *adev, struct gpio_chip *gc, ++ void __iomem *base) ++{ ++ int ret, gpio_idx; ++ struct device *dev = &adev->dev; ++ struct gpio_vendor_irq_data *vendor_irq_data = NULL; ++ ++ vendor_irq_data = devm_kzalloc(dev, sizeof(struct gpio_vendor_irq_data), GFP_KERNEL); ++ if (vendor_irq_data == NULL) ++ return -ENOMEM; ++ ++ vendor_irq_data->base = base; ++ vendor_irq_data->gc = gc; ++ ++ ret = devm_request_irq(dev, adev->irq[0], vendor_gpio_irq_handler, IRQF_SHARED, ++ dev_name(dev), vendor_irq_data); ++ if (ret) { ++ dev_info(dev, "request irq failed: %d\n", ret); ++ return ret; ++ } ++ ++ for (gpio_idx = 0; gpio_idx < gc->ngpio; gpio_idx++) ++ irq_set_parent(irq_find_mapping(gc->irq.domain, gpio_idx), adev->irq[0]); ++ ++ return 0; ++} +diff --git a/drivers/gpio/vendor/vendor_gpio.h b/drivers/gpio/vendor/vendor_gpio.h +new file mode 100644 +index 000000000..119a5c6bf +--- /dev/null ++++ b/drivers/gpio/vendor/vendor_gpio.h +@@ -0,0 +1,27 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++#ifndef __VENDOR_LINUX_GPIO_H ++#define __VENDOR_LINUX_GPIO_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define VENDOR_GPIO_NR 8 ++#define VENDOR_GPIOIE 0x410 ++#define VENDOR_GPIOMIS 0x418 ++#define VENDOR_GPIOIC 0x41C ++ ++int vendor_gpio_init_clk_and_base(struct amba_device *adev, struct gpio_chip *gc, ++ void __iomem *base); ++int vendor_gpio_init_irq(struct amba_device *adev, struct gpio_chip *gc, ++ void __iomem *base); ++ ++#endif /* __VENDOR_LINUX_GPIO_H */ +diff --git a/drivers/hck/Kconfig b/drivers/hck/Kconfig +deleted file mode 100644 +index 1028c52a3..000000000 +--- a/drivers/hck/Kconfig ++++ /dev/null +@@ -1,21 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0-only +-menu "Hck" +- +-config HCK +- bool "Hck Drivers" +- help +- Enable support for various drivers needed on the OpenHarmony Common Kernel +- +-if HCK +- +-config HCK_VENDOR_HOOKS +- bool "Hck Vendor Hooks" +- help +- Enable vendor hooks implemented as tracepoints +- +- Allow vendor modules to attach to tracepoint "hooks" defined via +- DECLARE_HCK_HOOK DECLARE_HCK_RESTRICTED_HOOK +- +-endif # if HCK +- +-endmenu +diff --git a/drivers/hck/Makefile b/drivers/hck/Makefile +deleted file mode 100644 +index 93dc6acc7..000000000 +--- a/drivers/hck/Makefile ++++ /dev/null +@@ -1,4 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0-only +-ccflags-y += -I$(src) +- +-obj-$(CONFIG_HCK_VENDOR_HOOKS) += vendor_hooks.o +\ No newline at end of file +diff --git a/drivers/hck/vendor_hooks.c b/drivers/hck/vendor_hooks.c +deleted file mode 100644 +index 6dce54016..000000000 +--- a/drivers/hck/vendor_hooks.c ++++ /dev/null +@@ -1,17 +0,0 @@ +-//SPDX-License-Identifier: GPL-2.0-only +-/*vendor_hooks.c +- * +- *OpenHarmony Common Kernel Vendor Hook Support +- * +- */ +- +-/* lite vendor hook */ +-#define CREATE_LITE_VENDOR_HOOK +-/* add your lite vendor hook header file here */ +-#include +-#include +-#include +-#include +-#include +-#include +-#include +diff --git a/drivers/hyperhold/Kconfig b/drivers/hyperhold/Kconfig +deleted file mode 100644 +index 4bba0efd1..000000000 +--- a/drivers/hyperhold/Kconfig ++++ /dev/null +@@ -1,14 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-config HYPERHOLD +- bool "Hyperhold driver" +- select HYPERHOLD_ZSWAPD +- select HYPERHOLD_MEMCG +- default n +- help +- Hyperhold driver. +- +-config HYPERHOLD_DEBUG +- bool "Debug info for Hyperhold driver" +- depends on HYPERHOLD +- help +- Debug info for Hyperhold driver. +diff --git a/drivers/hyperhold/Makefile b/drivers/hyperhold/Makefile +deleted file mode 100644 +index b45a1a678..000000000 +--- a/drivers/hyperhold/Makefile ++++ /dev/null +@@ -1,4 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-hyperhold-y := hp_core.o hp_device.o hp_space.o hp_iotab.o +- +-obj-$(CONFIG_HYPERHOLD) += hyperhold.o +diff --git a/drivers/hyperhold/hp_core.c b/drivers/hyperhold/hp_core.c +deleted file mode 100644 +index 6de2f06c9..000000000 +--- a/drivers/hyperhold/hp_core.c ++++ /dev/null +@@ -1,854 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * drivers/hyperhold/hp_core.c +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +- #define pr_fmt(fmt) "[HYPERHOLD]" fmt +- +-#include +-#include +-#include +-#include +- +-#include "hyperhold.h" +-#include "hp_device.h" +-#include "hp_space.h" +-#include "hp_iotab.h" +- +-#define HP_DFLT_DEVICE "/dev/by-name/hyperhold" +-#define HP_DFLT_EXT_SIZE (1 << 15) +-#define HP_DEV_NAME_LEN 256 +-#define HP_STATE_LEN 10 +- +-#define CHECK(cond, ...) ((cond) || (pr_err(__VA_ARGS__), false)) +-#define CHECK_BOUND(var, min, max) \ +- CHECK((var) >= (min) && (var) <= (max), \ +- "%s %u out of bounds %u ~ %u!\n", #var, (var), (min), (max)) +-#define CHECK_INITED CHECK(hyperhold.inited, "hyperhold is not enable!\n") +-#define CHECK_ENABLE (CHECK_INITED && CHECK(hyperhold.enable, "hyperhold is readonly!\n")) +- +-struct hyperhold { +- bool enable; +- bool inited; +- +- char device_name[HP_DEV_NAME_LEN]; +- u32 extent_size; +- u32 enable_soft_crypt; +- +- struct hp_device dev; +- struct hp_space spc; +- +- struct workqueue_struct *read_wq; +- struct workqueue_struct *write_wq; +- +- struct mutex init_lock; +-}; +- +-struct hyperhold hyperhold; +- +-atomic64_t mem_used = ATOMIC64_INIT(0); +-#ifdef CONFIG_HYPERHOLD_DEBUG +-/* +- * return the memory overhead of hyperhold module +- */ +-u64 hyperhold_memory_used(void) +-{ +- return atomic64_read(&mem_used) + hpio_memory() + space_memory(); +-} +-#endif +- +-void hyperhold_disable(bool force) +-{ +- if (!CHECK_INITED) +- return; +- if (!force && !CHECK_ENABLE) +- return; +- +- mutex_lock(&hyperhold.init_lock); +- hyperhold.enable = false; +- if (!wait_for_space_empty(&hyperhold.spc, force)) +- goto out; +- hyperhold.inited = false; +- wait_for_iotab_empty(); +- destroy_workqueue(hyperhold.read_wq); +- destroy_workqueue(hyperhold.write_wq); +- deinit_space(&hyperhold.spc); +- crypto_deinit(&hyperhold.dev); +- unbind_bdev(&hyperhold.dev); +-out: +- if (hyperhold.inited) +- pr_info("hyperhold is disabled, read only.\n"); +- else +- pr_info("hyperhold is totally disabled!\n"); +- mutex_unlock(&hyperhold.init_lock); +-} +-EXPORT_SYMBOL(hyperhold_disable); +- +-void hyperhold_enable(void) +-{ +- bool enable = true; +- +- if (hyperhold.inited) +- goto out; +- +- mutex_lock(&hyperhold.init_lock); +- if (hyperhold.inited) +- goto unlock; +- if (!bind_bdev(&hyperhold.dev, hyperhold.device_name)) +- goto err1; +- if (!crypto_init(&hyperhold.dev, hyperhold.enable_soft_crypt)) +- goto err2; +- if (!init_space(&hyperhold.spc, hyperhold.dev.dev_size, hyperhold.extent_size)) +- goto err3; +- hyperhold.read_wq = alloc_workqueue("hyperhold_read", WQ_HIGHPRI | WQ_UNBOUND, 0); +- if (!hyperhold.read_wq) +- goto err4; +- hyperhold.write_wq = alloc_workqueue("hyperhold_write", 0, 0); +- if (!hyperhold.write_wq) +- goto err5; +- hyperhold.inited = true; +- goto unlock; +-err5: +- destroy_workqueue(hyperhold.read_wq); +-err4: +- deinit_space(&hyperhold.spc); +-err3: +- crypto_deinit(&hyperhold.dev); +-err2: +- unbind_bdev(&hyperhold.dev); +-err1: +- enable = false; +-unlock: +- mutex_unlock(&hyperhold.init_lock); +-out: +- if (enable) { +- hyperhold.enable = true; +- pr_info("hyperhold is enabled.\n"); +- } else { +- hyperhold.enable = false; +- pr_err("hyperhold enable failed!\n"); +- } +-} +-EXPORT_SYMBOL(hyperhold_enable); +- +-static int enable_sysctl_handler(struct ctl_table *table, int write, +- void *buffer, size_t *lenp, loff_t *ppos) +-{ +- const struct cred *cred = current_cred(); +- char *filter_buf; +- +- filter_buf = strstrip((char *)buffer); +- if (write) { +- if (!uid_eq(cred->euid, GLOBAL_MEMMGR_UID) && +- !uid_eq(cred->euid, GLOBAL_ROOT_UID)) { +- pr_err("no permission to enable/disable eswap!\n"); +- return 0; +- } +- if (!strcmp(filter_buf, "enable")) +- hyperhold_enable(); +- else if (!strcmp(filter_buf, "disable")) +- hyperhold_disable(false); +- else if (!strcmp(filter_buf, "force_disable")) +- hyperhold_disable(true); +- } else { +- if (*lenp < HP_STATE_LEN || *ppos) { +- *lenp = 0; +- return 0; +- } +- if (hyperhold.enable) +- strcpy(buffer, "enable\n"); +- else if (hyperhold.inited) +- strcpy(buffer, "readonly\n"); +- else +- strcpy(buffer, "disable\n"); +- *lenp = strlen(buffer); +- *ppos += *lenp; +-#ifdef CONFIG_HYPERHOLD_DEBUG +- pr_info("hyperhold memory overhead = %llu.\n", hyperhold_memory_used()); +-#endif +- } +- return 0; +-} +- +-static int device_sysctl_handler(struct ctl_table *table, int write, +- void *buffer, size_t *lenp, loff_t *ppos) +-{ +- int ret; +- +- mutex_lock(&hyperhold.init_lock); +- if (write && hyperhold.inited) { +- pr_err("hyperhold device is busy!\n"); +- ret = -EBUSY; +- goto unlock; +- } +- ret = proc_dostring(table, write, buffer, lenp, ppos); +- if (write && !ret) { +- hyperhold.enable_soft_crypt = 1; +- pr_info("device changed, default enable soft crypt.\n"); +- } +-unlock: +- mutex_unlock(&hyperhold.init_lock); +- +- return ret; +-} +- +-static int extent_sysctl_handler(struct ctl_table *table, int write, +- void *buffer, size_t *lenp, loff_t *ppos) +-{ +- int ret; +- +- mutex_lock(&hyperhold.init_lock); +- if (write && hyperhold.inited) { +- pr_err("hyperhold device is busy!\n"); +- ret = -EBUSY; +- goto unlock; +- } +- ret = proc_douintvec(table, write, buffer, lenp, ppos); +-unlock: +- mutex_unlock(&hyperhold.init_lock); +- +- return ret; +-} +- +-static int crypto_sysctl_handler(struct ctl_table *table, int write, +- void *buffer, size_t *lenp, loff_t *ppos) +-{ +- int ret; +- +- mutex_lock(&hyperhold.init_lock); +- if (write && hyperhold.inited) { +- pr_err("hyperhold device is busy!\n"); +- ret = -EBUSY; +- goto unlock; +- } +- ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos); +-unlock: +- mutex_unlock(&hyperhold.init_lock); +- +- return ret; +-} +- +-static struct ctl_table_header *hp_sysctl_header; +- +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) +-static struct ctl_table hp_sys_table[] = { +- { +- .procname = "enable", +- .mode = 0666, +- .proc_handler = enable_sysctl_handler, +- }, +- { +- .procname = "device", +- .data = &hyperhold.device_name, +- .maxlen = sizeof(hyperhold.device_name), +- .mode = 0644, +- .proc_handler = device_sysctl_handler, +- }, +- { +- .procname = "extent_size", +- .data = &hyperhold.extent_size, +- .maxlen = sizeof(hyperhold.extent_size), +- .mode = 0644, +- .proc_handler = extent_sysctl_handler, +- }, +- { +- .procname = "soft_crypt", +- .data = &hyperhold.enable_soft_crypt, +- .maxlen = sizeof(hyperhold.enable_soft_crypt), +- .mode = 0644, +- .proc_handler = crypto_sysctl_handler, +- .extra1 = SYSCTL_ZERO, +- .extra2 = SYSCTL_ONE, +- }, +- {} +-}; +-#else +-static struct ctl_table hp_table[] = { +- { +- .procname = "enable", +- .mode = 0666, +- .proc_handler = enable_sysctl_handler, +- }, +- { +- .procname = "device", +- .data = &hyperhold.device_name, +- .maxlen = sizeof(hyperhold.device_name), +- .mode = 0644, +- .proc_handler = device_sysctl_handler, +- }, +- { +- .procname = "extent_size", +- .data = &hyperhold.extent_size, +- .maxlen = sizeof(hyperhold.extent_size), +- .mode = 0644, +- .proc_handler = extent_sysctl_handler, +- }, +- { +- .procname = "soft_crypt", +- .data = &hyperhold.enable_soft_crypt, +- .maxlen = sizeof(hyperhold.enable_soft_crypt), +- .mode = 0644, +- .proc_handler = crypto_sysctl_handler, +- .extra1 = SYSCTL_ZERO, +- .extra2 = SYSCTL_ONE, +- }, +- {} +-}; +-static struct ctl_table hp_kernel_table[] = { +- { +- .procname = "hyperhold", +- .mode = 0555, +- .child = hp_table, +- }, +- {} +-}; +-static struct ctl_table hp_sys_table[] = { +- { +- .procname = "kernel", +- .mode = 0555, +- .child = hp_kernel_table, +- }, +- {} +-}; +-#endif +- +-bool is_hyperhold_enable(void) +-{ +- return hyperhold.enable; +-} +- +-static int __init hyperhold_init(void) +-{ +- strcpy(hyperhold.device_name, HP_DFLT_DEVICE); +- hyperhold.extent_size = HP_DFLT_EXT_SIZE; +- hyperhold.enable_soft_crypt = 1; +- mutex_init(&hyperhold.init_lock); +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) +- hp_sysctl_header = register_sysctl("kernel/hyperhold", hp_sys_table); +-#else +- hp_sysctl_header = register_sysctl_table(hp_sys_table); +-#endif +- if (!hp_sysctl_header) { +- pr_err("register hyperhold sysctl table failed!\n"); +- return -EINVAL; +- } +- +- return 0; +-} +- +-static void __exit hyperhold_exit(void) +-{ +- unregister_sysctl_table(hp_sysctl_header); +- hyperhold_disable(true); +-} +- +-static struct hp_space *space_of(u32 eid) +-{ +- return &hyperhold.spc; +-} +- +-/* replace this func for multi devices */ +-static struct hp_device *device_of(u32 eid) +-{ +- return &hyperhold.dev; +-} +- +-/* replace this func for multi devices */ +-u32 hyperhold_nr_extent(void) +-{ +- if (!CHECK_INITED) +- return 0; +- +- return hyperhold.spc.nr_ext; +-} +-EXPORT_SYMBOL(hyperhold_nr_extent); +- +-u32 hyperhold_extent_size(u32 eid) +-{ +- struct hp_space *spc = NULL; +- +- if (!CHECK_INITED) +- return 0; +- spc = space_of(eid); +- if (!CHECK(spc, "invalid eid %u!\n", eid)) +- return 0; +- +- return spc->ext_size; +-} +-EXPORT_SYMBOL(hyperhold_extent_size); +- +-/* replace this func for multi devices */ +-long hyperhold_address(u32 eid, u32 offset) +-{ +- struct hp_space *spc = NULL; +- +- if (!CHECK_INITED) +- return -EINVAL; +- spc = space_of(eid); +- if (!CHECK(spc, "invalid eid %u!\n", eid)) +- return -EINVAL; +- if (!CHECK_BOUND(offset, 0, spc->ext_size - 1)) +- return -EINVAL; +- +- return (u64)eid * spc->ext_size + offset; +-} +-EXPORT_SYMBOL(hyperhold_address); +- +-/* replace this func for multi devices */ +-int hyperhold_addr_extent(u64 addr) +-{ +- struct hp_space *spc = NULL; +- u32 eid; +- +- if (!CHECK_INITED) +- return -EINVAL; +- eid = div_u64(addr, hyperhold.spc.ext_size); +- spc = space_of(eid); +- if (!CHECK(spc, "invalid eid %u!\n", eid)) +- return -EINVAL; +- +- return eid; +-} +-EXPORT_SYMBOL(hyperhold_addr_extent); +- +-/* replace this func for multi devices */ +-int hyperhold_addr_offset(u64 addr) +-{ +- if (!CHECK_INITED) +- return -EINVAL; +- +- return do_div(addr, hyperhold.spc.ext_size); +-} +-EXPORT_SYMBOL(hyperhold_addr_offset); +- +-/* replace this func for multi devices */ +-int hyperhold_alloc_extent(void) +-{ +- if (!CHECK_ENABLE) +- return -EINVAL; +- +- return alloc_eid(&hyperhold.spc); +-} +-EXPORT_SYMBOL(hyperhold_alloc_extent); +- +-void hyperhold_free_extent(u32 eid) +-{ +- struct hp_space *spc = NULL; +- +- if (!CHECK_INITED) +- return; +- spc = space_of(eid); +- if (!CHECK(spc, "invalid eid %u!\n", eid)) +- return; +- +- free_eid(spc, eid); +-} +-EXPORT_SYMBOL(hyperhold_free_extent); +- +-void hyperhold_should_free_extent(u32 eid) +-{ +- struct hpio *hpio = NULL; +- struct hp_space *spc = NULL; +- +- if (!CHECK_INITED) +- return; +- spc = space_of(eid); +- if (!CHECK(spc, "invalid eid %u", eid)) +- return; +- +- hpio = hpio_get(eid); +- if (!hpio) { +- free_eid(spc, eid); +- return; +- } +- hpio->free_extent = hyperhold_free_extent; +- hpio_put(hpio); +-} +-EXPORT_SYMBOL(hyperhold_should_free_extent); +- +-/* +- * alloc hpio struct for r/w extent at @eid, will fill hpio with new alloced +- * pages if @new_page. @return NULL on fail. +- */ +-struct hpio *hyperhold_io_alloc(u32 eid, gfp_t gfp, unsigned int op, bool new_page) +-{ +- struct hpio *hpio = NULL; +- struct hp_space *spc; +- u32 nr_page; +- +- if (!CHECK_ENABLE) +- return NULL; +- spc = space_of(eid); +- if (!CHECK(spc, "invalid eid %u!\n", eid)) +- return NULL; +- +- nr_page = spc->ext_size / PAGE_SIZE; +- hpio = hpio_alloc(nr_page, gfp, op, new_page); +- if (!hpio) +- goto err; +- hpio->eid = eid; +- +- return hpio; +-err: +- hpio_free(hpio); +- +- return NULL; +-} +-EXPORT_SYMBOL(hyperhold_io_alloc); +- +-void hyperhold_io_free(struct hpio *hpio) +-{ +- if (!CHECK_INITED) +- return; +- if (!CHECK(hpio, "hpio is null!\n")) +- return; +- +- hpio_free(hpio); +-} +-EXPORT_SYMBOL(hyperhold_io_free); +- +-/* +- * find exist read hpio of the extent @eid in iotab and inc its refcnt, +- * alloc a new hpio and insert it into iotab if there is no hpio for @eid +- */ +-struct hpio *hyperhold_io_get(u32 eid, gfp_t gfp, unsigned int op) +-{ +- struct hp_space *spc = NULL; +- u32 nr_page; +- +- if (!CHECK_INITED) +- return NULL; +- spc = space_of(eid); +- if (!CHECK(spc, "invalid eid %u", eid)) +- return NULL; +- +- nr_page = spc->ext_size / PAGE_SIZE; +- return hpio_get_alloc(eid, nr_page, gfp, op); +-} +-EXPORT_SYMBOL(hyperhold_io_get); +- +-bool hyperhold_io_put(struct hpio *hpio) +-{ +- if (!CHECK_INITED) +- return false; +- if (!CHECK(hpio, "hpio is null!\n")) +- return false; +- +- return hpio_put(hpio); +-} +-EXPORT_SYMBOL(hyperhold_io_put); +- +-/* +- * notify all threads waiting for this hpio +- */ +-void hyperhold_io_complete(struct hpio *hpio) +-{ +- if (!CHECK_INITED) +- return; +- if (!CHECK(hpio, "hpio is null!\n")) +- return; +- +- hpio_complete(hpio); +-} +-EXPORT_SYMBOL(hyperhold_io_complete); +- +-void hyperhold_io_wait(struct hpio *hpio) +-{ +- if (!CHECK_INITED) +- return; +- if (!CHECK(hpio, "hpio is null!\n")) +- return; +- +- hpio_wait(hpio); +-} +-EXPORT_SYMBOL(hyperhold_io_wait); +- +-bool hyperhold_io_success(struct hpio *hpio) +-{ +- if (!CHECK_INITED) +- return false; +- if (!CHECK(hpio, "hpio is null!\n")) +- return false; +- +- return hpio_get_state(hpio) == HPIO_DONE; +-} +-EXPORT_SYMBOL(hyperhold_io_success); +- +-int hyperhold_io_extent(struct hpio *hpio) +-{ +- if (!CHECK_INITED) +- return -EINVAL; +- if (!CHECK(hpio, "hpio is null!\n")) +- return -EINVAL; +- +- return hpio->eid; +-} +-EXPORT_SYMBOL(hyperhold_io_extent); +- +-int hyperhold_io_operate(struct hpio *hpio) +-{ +- if (!CHECK_INITED) +- return -EINVAL; +- if (!CHECK(hpio, "hpio is null!\n")) +- return -EINVAL; +- +- return hpio->op; +-} +-EXPORT_SYMBOL(hyperhold_io_operate); +- +-struct page *hyperhold_io_page(struct hpio *hpio, u32 index) +-{ +- if (!CHECK_INITED) +- return NULL; +- if (!CHECK(hpio, "hpio is null!\n")) +- return NULL; +- if (!CHECK_BOUND(index, 0, hpio->nr_page - 1)) +- return NULL; +- +- return hpio->pages[index]; +-} +-EXPORT_SYMBOL(hyperhold_io_page); +- +-bool hyperhold_io_add_page(struct hpio *hpio, u32 index, struct page *page) +-{ +- if (!CHECK_INITED) +- return false; +- if (!CHECK(hpio, "hpio is null!\n")) +- return false; +- if (!CHECK(page, "page is null!\n")) +- return false; +- if (!CHECK_BOUND(index, 0, hpio->nr_page - 1)) +- return false; +- +- get_page(page); +- atomic64_add(PAGE_SIZE, &mem_used); +- BUG_ON(hpio->pages[index]); +- hpio->pages[index] = page; +- +- return true; +-} +-EXPORT_SYMBOL(hyperhold_io_add_page); +- +-u32 hyperhold_io_nr_page(struct hpio *hpio) +-{ +- if (!CHECK_INITED) +- return 0; +- if (!CHECK(hpio, "hpio is null!\n")) +- return 0; +- +- return hpio->nr_page; +-} +-EXPORT_SYMBOL(hyperhold_io_nr_page); +- +-void *hyperhold_io_private(struct hpio *hpio) +-{ +- if (!CHECK_INITED) +- return NULL; +- if (!CHECK(hpio, "hpio is null!\n")) +- return NULL; +- +- return hpio->private; +-} +-EXPORT_SYMBOL(hyperhold_io_private); +- +-static struct page *get_encrypted_page(struct hp_device *dev, struct page *page, unsigned int op) +-{ +- struct page *encrypted_page = NULL; +- +- if (!dev->ctfm) { +- encrypted_page = page; +- get_page(encrypted_page); +- goto out; +- } +- +- encrypted_page = alloc_page(GFP_NOIO); +- if (!encrypted_page) { +- pr_err("alloc encrypted page failed!\n"); +- goto out; +- } +- encrypted_page->index = page->index; +- +- /* just alloc a new page for read */ +- if (!op_is_write(op)) +- goto out; +- +- /* encrypt page for write */ +- if (soft_crypt_page(dev->ctfm, encrypted_page, page, HP_DEV_ENCRYPT)) { +- put_page(encrypted_page); +- encrypted_page = NULL; +- } +-out: +- return encrypted_page; +-} +- +-static void put_encrypted_pages(struct bio *bio) +-{ +- struct bio_vec *bv = NULL; +- struct bvec_iter_all iter; +- +- bio_for_each_segment_all(bv, bio, iter) +- put_page(bv->bv_page); +-} +- +-static void hp_endio_work(struct work_struct *work) +-{ +- struct hpio *hpio = container_of(work, struct hpio, endio_work); +- struct hp_device *dev = NULL; +- struct bio_vec *bv = NULL; +- struct bvec_iter_all iter; +- struct page *page = NULL; +- u32 ext_size; +- sector_t sec; +- int i; +- +- if (op_is_write(hpio->op)) +- goto endio; +- ext_size = space_of(hpio->eid)->ext_size; +- dev = device_of(hpio->eid); +- sec = hpio->eid * ext_size / dev->sec_size; +- i = 0; +- bio_for_each_segment_all(bv, hpio->bio, iter) { +- page = bv->bv_page; +- BUG_ON(i >= hpio->nr_page); +- BUG_ON(!hpio->pages[i]); +- if (dev->ctfm) +- BUG_ON(soft_crypt_page(dev->ctfm, hpio->pages[i], page, HP_DEV_DECRYPT)); +- sec += PAGE_SIZE / dev->sec_size; +- i++; +- } +-endio: +- put_encrypted_pages(hpio->bio); +- bio_put(hpio->bio); +- if (hpio->endio) +- hpio->endio(hpio); +-} +- +-static void hpio_endio(struct bio *bio) +-{ +- struct hpio *hpio = bio->bi_private; +- struct workqueue_struct *wq = NULL; +- +- pr_info("hpio %p for eid %u returned %d.\n", +- hpio, hpio->eid, bio->bi_status); +- hpio_set_state(hpio, bio->bi_status ? HPIO_FAIL : HPIO_DONE); +- wq = op_is_write(hpio->op) ? hyperhold.write_wq : hyperhold.read_wq; +- queue_work(wq, &hpio->endio_work); +- atomic64_sub(sizeof(struct bio), &mem_used); +-} +- +-static int hpio_submit(struct hpio *hpio) +-{ +- struct hp_device *dev = NULL; +- struct bio *bio = NULL; +- struct page *page = NULL; +- u32 ext_size; +- sector_t sec; +- int i; +- +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) +- dev = device_of(hpio->eid); +- bio = bio_alloc(dev->bdev, BIO_MAX_VECS, +- hpio->op, GFP_NOIO); +-#else +- bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); +-#endif +- if (!bio) { +- pr_err("bio alloc failed!\n"); +- return -ENOMEM; +- } +- atomic64_add(sizeof(struct bio), &mem_used); +- +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) +- bio->bi_opf = hpio->op; +-#else +- dev = device_of(hpio->eid); +- bio_set_op_attrs(bio, hpio->op, 0); +-#endif +- bio_set_dev(bio, dev->bdev); +- +- ext_size = space_of(hpio->eid)->ext_size; +- sec = div_u64((u64)hpio->eid * ext_size, dev->sec_size); +- bio->bi_iter.bi_sector = sec; +- for (i = 0; i < hpio->nr_page; i++) { +- if (!hpio->pages[i]) +- break; +- hpio->pages[i]->index = sec; +- page = get_encrypted_page(dev, hpio->pages[i], hpio->op); +- if (!page) +- goto err; +- if (!bio_add_page(bio, page, PAGE_SIZE, 0)) { +- put_page(page); +- goto err; +- } +- sec += PAGE_SIZE / dev->sec_size; +- } +- +- if (dev->blk_key) +- inline_crypt_bio(dev->blk_key, bio); +- bio->bi_private = hpio; +- bio->bi_end_io = hpio_endio; +- hpio->bio = bio; +- submit_bio(bio); +- pr_info("submit hpio %p for eid %u.\n", hpio, hpio->eid); +- +- return 0; +-err: +- put_encrypted_pages(bio); +- bio_put(bio); +- atomic64_sub(sizeof(struct bio), &mem_used); +- return -EIO; +-} +- +-static int rw_extent_async(struct hpio *hpio, hp_endio endio, void *priv, unsigned int op) +-{ +- int ret = 0; +- +- if (!hpio_change_state(hpio, HPIO_INIT, HPIO_SUBMIT)) +- return -EAGAIN; +- +- hpio->private = priv; +- hpio->endio = endio; +- INIT_WORK(&hpio->endio_work, hp_endio_work); +- +- ret = hpio_submit(hpio); +- if (ret) { +- hpio_set_state(hpio, HPIO_FAIL); +- hpio_complete(hpio); +- } +- +- return ret; +-} +- +-int hyperhold_write_async(struct hpio *hpio, hp_endio endio, void *priv) +-{ +- if (!CHECK_ENABLE) { +- hpio_set_state(hpio, HPIO_FAIL); +- hpio_complete(hpio); +- return -EINVAL; +- } +- +- BUG_ON(!op_is_write(hpio->op)); +- +- return rw_extent_async(hpio, endio, priv, REQ_OP_WRITE); +-} +-EXPORT_SYMBOL(hyperhold_write_async); +- +-int hyperhold_read_async(struct hpio *hpio, hp_endio endio, void *priv) +-{ +- if (!CHECK_INITED) { +- hpio_set_state(hpio, HPIO_FAIL); +- hpio_complete(hpio); +- return -EINVAL; +- } +- +- if (op_is_write(hpio->op)) +- return -EAGAIN; +- +- return rw_extent_async(hpio, endio, priv, REQ_OP_READ); +-} +-EXPORT_SYMBOL(hyperhold_read_async); +- +-module_init(hyperhold_init) +-module_exit(hyperhold_exit) +diff --git a/drivers/hyperhold/hp_device.c b/drivers/hyperhold/hp_device.c +deleted file mode 100644 +index 666972465..000000000 +--- a/drivers/hyperhold/hp_device.c ++++ /dev/null +@@ -1,240 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * drivers/hyperhold/hp_device.c +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#define pr_fmt(fmt) "[HYPERHOLD]" fmt +- +-#include +-#include +-#include +-#include +- +-#include "hp_device.h" +- +-#define HP_CIPHER_MODE BLK_ENCRYPTION_MODE_AES_256_XTS +-#define HP_CIPHER_NAME "xts(aes)" +-#define HP_KEY_SIZE (64) +-#define HP_IV_SIZE (16) +- +-union hp_iv { +- __le64 index; +- __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; +-}; +- +-void unbind_bdev(struct hp_device *dev) +-{ +- int ret; +- +- if (!dev->bdev) +- goto close; +- if (!dev->old_block_size) +- goto put; +- ret = set_blocksize(dev->bdev, dev->old_block_size); +- if (ret) +- pr_err("set old block size %d failed, err = %d!\n", +- dev->old_block_size, ret); +- dev->old_block_size = 0; +-put: +- blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE); +- dev->bdev = NULL; +-close: +- if (dev->filp) +- filp_close(dev->filp, NULL); +- dev->filp = NULL; +- +- pr_info("hyperhold bdev unbinded.\n"); +-} +- +-bool bind_bdev(struct hp_device *dev, const char *name) +-{ +- struct inode *inode = NULL; +- int ret; +- +- dev->filp = filp_open(name, O_RDWR | O_LARGEFILE, 0); +- if (IS_ERR(dev->filp)) { +- pr_err("open file %s failed, err = %ld!\n", name, PTR_ERR(dev->filp)); +- dev->filp = NULL; +- goto err; +- } +- inode = dev->filp->f_mapping->host; +- if (!S_ISBLK(inode->i_mode)) { +- pr_err("%s is not a block device!\n", name); +- goto err; +- } +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) +- dev->bdev = blkdev_get_by_dev(inode->i_rdev, BLK_OPEN_READ | BLK_OPEN_WRITE, dev, NULL); +-#else +- dev->bdev = blkdev_get_by_dev(inode->i_rdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, dev); +-#endif +- if (IS_ERR(dev->bdev)) { +- ret = PTR_ERR(dev->bdev); +- dev->bdev = NULL; +- pr_err("get blkdev %s failed, err = %d!\n", name, ret); +- goto err; +- } +- dev->old_block_size = block_size(dev->bdev); +- ret = set_blocksize(dev->bdev, PAGE_SIZE); +- if (ret) { +- pr_err("set %s block size failed, err = %d!\n", name, ret); +- goto err; +- } +- dev->dev_size = (u64)i_size_read(inode); +- dev->sec_size = SECTOR_SIZE; +- +- pr_info("hyperhold bind bdev %s of size %llu / %u succ.\n", +- name, dev->dev_size, dev->sec_size); +- +- return true; +-err: +- unbind_bdev(dev); +- +- return false; +-} +- +-int soft_crypt_page(struct crypto_skcipher *ctfm, struct page *dst_page, +- struct page *src_page, unsigned int op) +-{ +- struct skcipher_request *req = NULL; +- DECLARE_CRYPTO_WAIT(wait); +- struct scatterlist dst, src; +- int ret = 0; +- union hp_iv iv; +- +- memset(&iv, 0, sizeof(union hp_iv)); +- iv.index = cpu_to_le64(src_page->index); +- +- req = skcipher_request_alloc(ctfm, GFP_NOIO); +- if (!req) { +- pr_err("alloc skcipher request failed!\n"); +- return -ENOMEM; +- } +- +- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, +- crypto_req_done, &wait); +- sg_init_table(&dst, 1); +- sg_set_page(&dst, dst_page, PAGE_SIZE, 0); +- sg_init_table(&src, 1); +- sg_set_page(&src, src_page, PAGE_SIZE, 0); +- skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &iv); +- if (op == HP_DEV_ENCRYPT) +- ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); +- else if (op == HP_DEV_DECRYPT) +- ret = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); +- else +- BUG(); +- +- skcipher_request_free(req); +- +- if (ret) +- pr_err("%scrypt failed!\n", op == HP_DEV_ENCRYPT ? "en" : "de"); +- +- return ret; +-} +- +-static struct crypto_skcipher *soft_crypto_init(const u8 *key) +-{ +- char *cipher = HP_CIPHER_NAME; +- u32 key_len = HP_KEY_SIZE; +- struct crypto_skcipher *ctfm = NULL; +- int ret; +- +- ctfm = crypto_alloc_skcipher(cipher, 0, 0); +- if (IS_ERR(ctfm)) { +- pr_err("alloc ctfm failed, ret = %ld!\n", PTR_ERR(ctfm)); +- ctfm = NULL; +- goto err; +- } +- crypto_skcipher_clear_flags(ctfm, ~0); +- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); +- ret = crypto_skcipher_setkey(ctfm, key, key_len); +- if (ret) { +- pr_err("ctfm setkey failed, ret = %d!\n", ret); +- goto err; +- } +- +- return ctfm; +-err: +- if (ctfm) +- crypto_free_skcipher(ctfm); +- +- return NULL; +-} +- +-#ifdef CONFIG_BLK_INLINE_ENCRYPTION +-void inline_crypt_bio(struct blk_crypto_key *blk_key, struct bio *bio) +-{ +- union hp_iv iv; +- +- memset(&iv, 0, sizeof(union hp_iv)); +- iv.index = cpu_to_le64(bio->bi_iter.bi_sector); +- +- bio_crypt_set_ctx(bio, blk_key, iv.dun, GFP_NOIO); +-} +- +-static struct blk_crypto_key *inline_crypto_init(const u8 *key) +-{ +- struct blk_crypto_key *blk_key = NULL; +- u32 dun_bytes = HP_IV_SIZE - sizeof(__le64); +- int ret; +- +- blk_key = kzalloc(sizeof(struct blk_crypto_key), GFP_KERNEL); +- if (!blk_key) { +- pr_err("blk key alloc failed!\n"); +- goto err; +- } +- ret = blk_crypto_init_key(blk_key, key, HP_CIPHER_MODE, dun_bytes, PAGE_SIZE); +- if (ret) { +- pr_err("blk key init failed, ret = %d!\n", ret); +- goto err; +- } +- +- return blk_key; +-err: +- if (blk_key) +- kfree_sensitive(blk_key); +- +- return NULL; +-} +-#else +-void inline_crypt_bio(struct blk_crypto_key *blk_key, struct bio *bio) {} +-static struct blk_crypto_key *inline_crypto_init(const u8 *key) +-{ +- pr_err("CONFIG_BLK_INLINE_ENCRYPTION is not enabled!\n"); +- return NULL; +-} +-#endif +- +-bool crypto_init(struct hp_device *dev, bool soft) +-{ +- u8 key[HP_KEY_SIZE]; +- bool ret = false; +- +- get_random_bytes(key, HP_KEY_SIZE); +- if (soft) { +- dev->ctfm = soft_crypto_init(key); +- ret = dev->ctfm; +- } else { +- dev->blk_key = inline_crypto_init(key); +- ret = dev->blk_key; +- if (ret) +- pr_warn("soft crypt has been turned off, now apply hard crypt!\n"); +- } +- memzero_explicit(key, HP_KEY_SIZE); +- +- return ret; +-} +- +-void crypto_deinit(struct hp_device *dev) +-{ +- if (dev->ctfm) { +- crypto_free_skcipher(dev->ctfm); +- dev->ctfm = NULL; +- } +- if (dev->blk_key) { +- kfree_sensitive(dev->blk_key); +- dev->blk_key = NULL; +- } +-} +diff --git a/drivers/hyperhold/hp_device.h b/drivers/hyperhold/hp_device.h +deleted file mode 100644 +index 06f007891..000000000 +--- a/drivers/hyperhold/hp_device.h ++++ /dev/null +@@ -1,38 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * drivers/hyperhold/hp_device.h +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#ifndef _HP_DEVICE_H_ +-#define _HP_DEVICE_H_ +- +-#include +-#include +-#include +- +-enum { +- HP_DEV_ENCRYPT, +- HP_DEV_DECRYPT, +-}; +- +-struct hp_device { +- struct file *filp; +- struct block_device *bdev; +- u32 old_block_size; +- u64 dev_size; +- u32 sec_size; +- +- struct crypto_skcipher *ctfm; +- struct blk_crypto_key *blk_key; +-}; +- +-void unbind_bdev(struct hp_device *dev); +-bool bind_bdev(struct hp_device *dev, const char *name); +-bool crypto_init(struct hp_device *dev, bool soft); +-void crypto_deinit(struct hp_device *dev); +-int soft_crypt_page(struct crypto_skcipher *ctfm, +- struct page *dst_page, struct page *src_page, unsigned int op); +-void inline_crypt_bio(struct blk_crypto_key *blk_key, struct bio *bio); +-#endif +diff --git a/drivers/hyperhold/hp_iotab.c b/drivers/hyperhold/hp_iotab.c +deleted file mode 100644 +index 258cb83a1..000000000 +--- a/drivers/hyperhold/hp_iotab.c ++++ /dev/null +@@ -1,271 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * drivers/hyperhold/hp_iotab.c +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#define pr_fmt(fmt) "[HYPERHOLD]" fmt +- +-#include +-#include +- +-#include "hp_iotab.h" +- +-atomic64_t hpio_mem = ATOMIC64_INIT(0); +-u64 hpio_memory(void) +-{ +- return atomic64_read(&hpio_mem); +-} +- +-struct hp_iotab { +- struct list_head io_list; +- rwlock_t lock; +- u32 io_cnt; +- wait_queue_head_t empty_wq; +-}; +- +-/* store all inflight hpio in iotab */ +-struct hp_iotab iotab = { +- .io_list = LIST_HEAD_INIT(iotab.io_list), +- .lock = __RW_LOCK_UNLOCKED(iotab.lock), +- .io_cnt = 0, +- .empty_wq = __WAIT_QUEUE_HEAD_INITIALIZER(iotab.empty_wq), +-}; +- +-static struct hpio *__iotab_search_get(struct hp_iotab *iotab, u32 eid) +-{ +- struct hpio *hpio = NULL; +- +- list_for_each_entry(hpio, &iotab->io_list, list) +- if (hpio->eid == eid && kref_get_unless_zero(&hpio->refcnt)) +- return hpio; +- +- return NULL; +-} +- +-static struct hpio *iotab_search_get(struct hp_iotab *iotab, u32 eid) +-{ +- struct hpio *hpio = NULL; +- unsigned long flags; +- +- read_lock_irqsave(&iotab->lock, flags); +- hpio = __iotab_search_get(iotab, eid); +- read_unlock_irqrestore(&iotab->lock, flags); +- +- pr_info("find hpio %p for eid %u.\n", hpio, eid); +- +- return hpio; +-} +- +-/* +- * insert @hpio into @iotab, cancel insertion if there is a hpio of the same +- * @eid, inc the refcnt of duplicated hpio and return it +- */ +-static struct hpio *iotab_insert(struct hp_iotab *iotab, struct hpio *hpio) +-{ +- struct hpio *dup = NULL; +- unsigned long flags; +- +- write_lock_irqsave(&iotab->lock, flags); +- dup = __iotab_search_get(iotab, hpio->eid); +- if (dup) { +- pr_info("find exist hpio %p for eid %u, insert hpio %p failed.\n", +- dup, hpio->eid, hpio); +- goto unlock; +- } +- list_add(&hpio->list, &iotab->io_list); +- iotab->io_cnt++; +- pr_info("insert new hpio %p for eid %u.\n", hpio, hpio->eid); +-unlock: +- write_unlock_irqrestore(&iotab->lock, flags); +- +- return dup; +-} +- +-static void iotab_delete(struct hp_iotab *iotab, struct hpio *hpio) +-{ +- unsigned long flags; +- +- write_lock_irqsave(&iotab->lock, flags); +- list_del(&hpio->list); +- iotab->io_cnt--; +- if (!iotab->io_cnt) +- wake_up(&iotab->empty_wq); +- write_unlock_irqrestore(&iotab->lock, flags); +- +- pr_info("delete hpio %p for eid %u from iotab.\n", hpio, hpio->eid); +-} +- +-static void hpio_clear_pages(struct hpio *hpio) +-{ +- int i; +- +- if (!hpio->pages) +- return; +- +- for (i = 0; i < hpio->nr_page; i++) +- if (hpio->pages[i]) { +- put_page(hpio->pages[i]); +- atomic64_sub(PAGE_SIZE, &hpio_mem); +- } +- kfree(hpio->pages); +- atomic64_sub(sizeof(struct page *) * hpio->nr_page, &hpio_mem); +- hpio->nr_page = 0; +- hpio->pages = NULL; +-} +- +-/* +- * alloc pages array for @hpio, fill in new alloced pages if @new_page +- */ +-static bool hpio_fill_pages(struct hpio *hpio, u32 nr_page, gfp_t gfp, bool new_page) +-{ +- int i; +- +- BUG_ON(hpio->pages); +- hpio->nr_page = nr_page; +- hpio->pages = kcalloc(hpio->nr_page, sizeof(struct page *), gfp); +- if (!hpio->pages) +- goto err; +- atomic64_add(sizeof(struct page *) * hpio->nr_page, &hpio_mem); +- +- if (!new_page) +- goto out; +- for (i = 0; i < hpio->nr_page; i++) { +- hpio->pages[i] = alloc_page(gfp); +- if (!hpio->pages[i]) +- goto err; +- atomic64_add(PAGE_SIZE, &hpio_mem); +- } +-out: +- return true; +-err: +- hpio_clear_pages(hpio); +- +- return false; +-} +- +-void hpio_free(struct hpio *hpio) +-{ +- if (!hpio) +- return; +- +- pr_info("free hpio = %p.\n", hpio); +- +- hpio_clear_pages(hpio); +- kfree(hpio); +- atomic64_sub(sizeof(struct hpio), &hpio_mem); +-} +- +-struct hpio *hpio_alloc(u32 nr_page, gfp_t gfp, unsigned int op, bool new_page) +-{ +- struct hpio *hpio = NULL; +- +- hpio = kzalloc(sizeof(struct hpio), gfp); +- if (!hpio) +- goto err; +- atomic64_add(sizeof(struct hpio), &hpio_mem); +- if (!hpio_fill_pages(hpio, nr_page, gfp, new_page)) +- goto err; +- hpio->op = op; +- atomic_set(&hpio->state, HPIO_INIT); +- kref_init(&hpio->refcnt); +- init_completion(&hpio->wait); +- +- return hpio; +-err: +- hpio_free(hpio); +- +- return NULL; +-} +- +-struct hpio *hpio_get(u32 eid) +-{ +- return iotab_search_get(&iotab, eid); +-} +- +-struct hpio *hpio_get_alloc(u32 eid, u32 nr_page, gfp_t gfp, unsigned int op) +-{ +- struct hpio *hpio = NULL; +- struct hpio *dup = NULL; +- +- hpio = iotab_search_get(&iotab, eid); +- if (hpio) { +- pr_info("find exist hpio %p for eid %u.\n", hpio, eid); +- goto out; +- } +- hpio = hpio_alloc(nr_page, gfp, op, true); +- if (!hpio) +- goto out; +- hpio->eid = eid; +- +- pr_info("alloc hpio %p for eid %u.\n", hpio, eid); +- +- dup = iotab_insert(&iotab, hpio); +- if (dup) { +- hpio_free(hpio); +- hpio = dup; +- } +-out: +- return hpio; +-} +- +-static void hpio_release(struct kref *kref) +-{ +- struct hpio *hpio = container_of(kref, struct hpio, refcnt); +- +- iotab_delete(&iotab, hpio); +- if (hpio->free_extent) +- hpio->free_extent(hpio->eid); +- hpio_free(hpio); +-} +- +-bool hpio_put(struct hpio *hpio) +-{ +- pr_info("put hpio %p for eid %u, ref = %u.\n", hpio, hpio->eid, kref_read(&hpio->refcnt)); +- return kref_put(&hpio->refcnt, hpio_release); +-} +- +-void hpio_complete(struct hpio *hpio) +-{ +- pr_info("complete hpio %p for eid %u.\n", hpio, hpio->eid); +- complete_all(&hpio->wait); +-} +- +-void hpio_wait(struct hpio *hpio) +-{ +- wait_for_completion(&hpio->wait); +-} +- +-enum hpio_state hpio_get_state(struct hpio *hpio) +-{ +- return atomic_read(&hpio->state); +-} +- +-void hpio_set_state(struct hpio *hpio, enum hpio_state state) +-{ +- atomic_set(&hpio->state, state); +-} +- +-bool hpio_change_state(struct hpio *hpio, enum hpio_state from, enum hpio_state to) +-{ +- return atomic_cmpxchg(&hpio->state, from, to) == from; +-} +- +-static void dump_iotab(struct hp_iotab *iotab) +-{ +- struct hpio *hpio = NULL; +- unsigned long flags; +- +- pr_info("dump inflight hpio in iotab.\n"); +- read_lock_irqsave(&iotab->lock, flags); +- list_for_each_entry(hpio, &iotab->io_list, list) +- pr_info("hpio %p for eid %u is inflight.\n", hpio, hpio->eid); +- read_unlock_irqrestore(&iotab->lock, flags); +-} +- +-void wait_for_iotab_empty(void) +-{ +- dump_iotab(&iotab); +- wait_event(iotab.empty_wq, !iotab.io_cnt); +-} +diff --git a/drivers/hyperhold/hp_iotab.h b/drivers/hyperhold/hp_iotab.h +deleted file mode 100644 +index b3785f7aa..000000000 +--- a/drivers/hyperhold/hp_iotab.h ++++ /dev/null +@@ -1,63 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * drivers/hyperhold/hp_iotab.h +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#ifndef _HP_IOTAB_H_ +-#define _HP_IOTAB_H_ +- +-#include +-#include +-#include +-#include +- +-enum hpio_state { +- HPIO_INIT, +- HPIO_SUBMIT, +- HPIO_DONE, +- HPIO_FAIL, +-}; +- +-struct hpio; +- +-typedef void (*hp_endio)(struct hpio *); +- +-struct hpio { +- u32 eid; +- struct page **pages; +- u32 nr_page; +- void *private; +- +- unsigned int op; +- void (*free_extent)(u32 eid); +- +- atomic_t state; +- struct kref refcnt; +- struct completion wait; +- hp_endio endio; +- struct work_struct endio_work; +- +- struct bio *bio; +- struct list_head list; +-}; +- +-struct hpio *hpio_alloc(u32 nr_page, gfp_t gfp, unsigned int op, bool new_page); +-void hpio_free(struct hpio *hpio); +- +-struct hpio *hpio_get(u32 eid); +-bool hpio_put(struct hpio *hpio); +-struct hpio *hpio_get_alloc(u32 eid, u32 nr_page, gfp_t gfp, unsigned int op); +- +-void hpio_complete(struct hpio *hpio); +-void hpio_wait(struct hpio *hpio); +- +-enum hpio_state hpio_get_state(struct hpio *hpio); +-void hpio_set_state(struct hpio *hpio, enum hpio_state state); +-bool hpio_change_state(struct hpio *hpio, enum hpio_state from, enum hpio_state to); +- +-void wait_for_iotab_empty(void); +- +-u64 hpio_memory(void); +-#endif +diff --git a/drivers/hyperhold/hp_space.c b/drivers/hyperhold/hp_space.c +deleted file mode 100644 +index cb3d3439c..000000000 +--- a/drivers/hyperhold/hp_space.c ++++ /dev/null +@@ -1,122 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * drivers/hyperhold/hp_space.c +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#define pr_fmt(fmt) "[HYPERHOLD]" fmt +- +-#include +- +-#include "hp_space.h" +- +-atomic64_t spc_mem = ATOMIC64_INIT(0); +- +-u64 space_memory(void) +-{ +- return atomic64_read(&spc_mem); +-} +- +-void deinit_space(struct hp_space *spc) +-{ +- kvfree(spc->bitmap); +- atomic64_sub(BITS_TO_LONGS(spc->nr_ext) * sizeof(long), &spc_mem); +- spc->ext_size = 0; +- spc->nr_ext = 0; +- atomic_set(&spc->last_alloc_bit, 0); +- atomic_set(&spc->nr_alloced, 0); +- +- pr_info("hyperhold space deinited.\n"); +-} +- +-bool init_space(struct hp_space *spc, u64 dev_size, u32 ext_size) +-{ +- if (ext_size & (PAGE_SIZE - 1)) { +- pr_err("extent size %u do not align to page size %lu!", ext_size, PAGE_SIZE); +- return false; +- } +- if (dev_size & (ext_size - 1)) { +- pr_err("device size %llu do not align to extent size %u!", dev_size, ext_size); +- return false; +- } +- spc->ext_size = ext_size; +- spc->nr_ext = div_u64(dev_size, ext_size); +- atomic_set(&spc->last_alloc_bit, 0); +- atomic_set(&spc->nr_alloced, 0); +- init_waitqueue_head(&spc->empty_wq); +- spc->bitmap = kvzalloc(BITS_TO_LONGS(spc->nr_ext) * sizeof(long), GFP_KERNEL); +- if (!spc->bitmap) { +- pr_err("hyperhold bitmap alloc failed.\n"); +- return false; +- } +- atomic64_add(BITS_TO_LONGS(spc->nr_ext) * sizeof(long), &spc_mem); +- +- pr_info("hyperhold space init succ, capacity = %u x %u.\n", ext_size, spc->nr_ext); +- +- return true; +-} +- +-int alloc_eid(struct hp_space *spc) +-{ +- u32 bit; +- u32 last_bit; +- +-retry: +- last_bit = atomic_read(&spc->last_alloc_bit); +- bit = find_next_zero_bit(spc->bitmap, spc->nr_ext, last_bit); +- if (bit == spc->nr_ext) +- bit = find_next_zero_bit(spc->bitmap, spc->nr_ext, 0); +- if (bit == spc->nr_ext) +- goto full; +- if (test_and_set_bit(bit, spc->bitmap)) +- goto retry; +- +- atomic_set(&spc->last_alloc_bit, bit); +- atomic_inc(&spc->nr_alloced); +- +- pr_info("hyperhold alloc extent %u.\n", bit); +- +- return bit; +-full: +- pr_err("hyperhold space is full.\n"); +- +- return -ENOSPC; +-} +- +-void free_eid(struct hp_space *spc, u32 eid) +-{ +- if (!test_and_clear_bit(eid, spc->bitmap)) { +- pr_err("eid is not alloced!\n"); +- BUG(); +- return; +- } +- if (atomic_dec_and_test(&spc->nr_alloced)) { +- pr_info("notify space empty.\n"); +- wake_up(&spc->empty_wq); +- } +- pr_info("hyperhold free extent %u.\n", eid); +-} +- +-static void dump_space(struct hp_space *spc) +-{ +- u32 i = 0; +- +- pr_info("dump alloced extent in space.\n"); +- for (i = 0; i < spc->nr_ext; i++) +- if (test_bit(i, spc->bitmap)) +- pr_info("alloced eid %u.\n", i); +-} +- +-bool wait_for_space_empty(struct hp_space *spc, bool force) +-{ +- if (!atomic_read(&spc->nr_alloced)) +- return true; +- if (!force) +- return false; +- +- dump_space(spc); +- wait_event(spc->empty_wq, !atomic_read(&spc->nr_alloced)); +- +- return true; +-} +diff --git a/drivers/hyperhold/hp_space.h b/drivers/hyperhold/hp_space.h +deleted file mode 100644 +index caaaf92a0..000000000 +--- a/drivers/hyperhold/hp_space.h ++++ /dev/null +@@ -1,30 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * drivers/hyperhold/hp_space.h +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#ifndef _HP_SPACE_H_ +-#define _HP_SPACE_H_ +- +-#include +- +-struct hp_space { +- u32 ext_size; +- u32 nr_ext; +- unsigned long *bitmap; +- atomic_t last_alloc_bit; +- atomic_t nr_alloced; +- wait_queue_head_t empty_wq; +-}; +- +-void deinit_space(struct hp_space *spc); +-bool init_space(struct hp_space *spc, u64 dev_size, u32 ext_size); +-int alloc_eid(struct hp_space *spc); +-void free_eid(struct hp_space *spc, u32 eid); +- +-bool wait_for_space_empty(struct hp_space *spc, bool force); +- +-u64 space_memory(void); +-#endif +diff --git a/drivers/hyperhold/hyperhold.h b/drivers/hyperhold/hyperhold.h +deleted file mode 100644 +index b65ff5444..000000000 +--- a/drivers/hyperhold/hyperhold.h ++++ /dev/null +@@ -1,52 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * drivers/hyperhold/hyperhold.h +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#ifndef _HYPERHOLD_H_ +-#define _HYPERHOLD_H_ +- +-#include +- +-struct hpio; +- +-typedef void (*hp_endio)(struct hpio *); +- +-void hyperhold_disable(bool force); +-void hyperhold_enable(void); +-bool is_hyperhold_enable(void); +- +-u32 hyperhold_nr_extent(void); +-u32 hyperhold_extent_size(u32 eid); +-long hyperhold_address(u32 eid, u32 offset); +-int hyperhold_addr_extent(u64 addr); +-int hyperhold_addr_offset(u64 addr); +- +-int hyperhold_alloc_extent(void); +-void hyperhold_free_extent(u32 eid); +-void hyperhold_should_free_extent(u32 eid); +- +-struct hpio *hyperhold_io_alloc(u32 eid, gfp_t gfp, unsigned int op, bool new_page); +-void hyperhold_io_free(struct hpio *hpio); +- +-struct hpio *hyperhold_io_get(u32 eid, gfp_t gfp, unsigned int op); +-bool hyperhold_io_put(struct hpio *hpio); +- +-void hyperhold_io_complete(struct hpio *hpio); +-void hyperhold_io_wait(struct hpio *hpio); +- +-bool hyperhold_io_success(struct hpio *hpio); +- +-int hyperhold_io_extent(struct hpio *hpio); +-int hyperhold_io_operate(struct hpio *hpio); +-struct page *hyperhold_io_page(struct hpio *hpio, u32 index); +-bool hyperhold_io_add_page(struct hpio *hpio, u32 index, struct page *page); +-u32 hyperhold_io_nr_page(struct hpio *hpio); +-void *hyperhold_io_private(struct hpio *hpio); +- +-int hyperhold_write_async(struct hpio *hpio, hp_endio endio, void *priv); +-int hyperhold_read_async(struct hpio *hpio, hp_endio endio, void *priv); +- +-#endif +diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile +index 3f71ce471..3b3427c4f 100644 +--- a/drivers/i2c/Makefile ++++ b/drivers/i2c/Makefile +@@ -12,6 +12,7 @@ i2c-core-$(CONFIG_OF) += i2c-core-of.o + + obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o + obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o ++obj-$(CONFIG_ARCH_BSP) += vendor/ + obj-$(CONFIG_I2C_MUX) += i2c-mux.o + obj-$(CONFIG_I2C_ATR) += i2c-atr.o + obj-y += algos/ busses/ muxes/ +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig +index 982007a11..e9b5a21d3 100644 +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -665,6 +665,17 @@ config I2C_GPIO_FAULT_INJECTOR + faults to an I2C bus, so another bus master can be stress-tested. + This is for debugging. If unsure, say 'no'. + ++if ARCH_BSP ++config I2C_BSP ++ tristate "Vendor I2C Controller" ++ help ++ Say Y here to include support for Vendor I2C controller in the ++ Vendor SoCs. ++ ++ This driver can also be built as a module. If so, the module ++ will be called i2c-bsp. ++endif ++ + config I2C_GXP + tristate "GXP I2C Interface" + depends on ARCH_HPE_GXP || COMPILE_TEST +@@ -1481,4 +1492,23 @@ config I2C_VIRTIO + This driver can also be built as a module. If so, the module + will be called i2c-virtio. + ++if ARCH_BSP ++config DMA_MSG_MIN_LEN ++ int "Vendor I2C support DMA minimum LEN" ++ depends on I2C_BSP ++ range 1 4090 ++ default 5 ++ help ++ The i2c_msg minimum LEN of i2c support DMA,range from 1 to 4091 ++ ++config DMA_MSG_MAX_LEN ++ int "Vendor I2C support DMA maximum LEN" ++ depends on I2C_BSP ++ range DMA_MSG_MIN_LEN 4090 ++ default 4090 ++ help ++ The i2c_msg maximum LEN of i2c support DMA,range from i2c_msg minimum LEN to 4090, ++ because DMA for 0xFFC one-time largest data transfers; ++endif ++ + endmenu +diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile +index 9be9fdb07..6043ced4f 100644 +--- a/drivers/i2c/busses/Makefile ++++ b/drivers/i2c/busses/Makefile +@@ -156,4 +156,8 @@ obj-$(CONFIG_SCx200_ACB) += scx200_acb.o + obj-$(CONFIG_I2C_FSI) += i2c-fsi.o + obj-$(CONFIG_I2C_VIRTIO) += i2c-virtio.o + ++ifdef CONFIG_ARCH_BSP ++obj-$(CONFIG_I2C_BSP) += i2c-bsp.o ++endif ++ + ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG +diff --git a/drivers/i2c/busses/i2c-bsp.c b/drivers/i2c/busses/i2c-bsp.c +new file mode 100644 +index 000000000..183faad74 +--- /dev/null ++++ b/drivers/i2c/busses/i2c-bsp.c +@@ -0,0 +1,1537 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#if defined(CONFIG_EDMAC) ++#include ++#include ++ ++/* ++ * In the case of enable edmacv310_n, msg->buf must be continuous memory, for DMA processing. ++ * Mostly dma_xfer_* have to handle the uncontinuous memory. So i2c_bsp allocate ++ * continuous memory for msg->buf and use highmem_buf_list to manage msg->buf allocated by i2c_bsp. ++ */ ++struct highmem_buf_list_node { ++ __u8 *buf; ++ __u8 *highmem_buf; ++ struct i2c_msg *msg; ++ struct list_head node; ++}; ++ ++static LIST_HEAD(highmem_buf_list); ++ ++static struct highmem_buf_list_node *search_in_highmem_buf_list(struct i2c_msg *msg) ++{ ++ struct highmem_buf_list_node *highmem_buf_node = NULL; ++ struct highmem_buf_list_node *_highmem_buf_node = NULL; ++ ++ list_for_each_entry_safe(highmem_buf_node, _highmem_buf_node, &highmem_buf_list, node) { ++ if (highmem_buf_node->msg == msg) { ++ return highmem_buf_node; ++ } ++ } ++ return NULL; ++} ++#endif ++ ++#ifdef DEBUG_BSP_I2C ++#define debug_dump_i2c_msg(msg) \ ++ do { \ ++ printk("%s::%d\n", __FILE__, __LINE__); \ ++ dump_i2c_msg(msg); \ ++ } while (0) ++ ++static void dump_i2c_msg(struct i2c_msg *msg) ++{ ++ int i = 0; ++ printk("msg->addr: %u\n", (unsigned int)msg->addr); ++ printk("msg->flags:%u\n", (unsigned int)msg->flags); ++ printk("msg->len: %u\n", (unsigned int)msg->len); ++ for (; i < msg->len; i++) { ++ printk("%d %x\n", i, msg->buf[i]); ++ } ++} ++#else ++#define debug_dump_i2c_msg(msg) ++#endif ++ ++/* ++ * I2C Registers offsets ++ */ ++#define BSP_I2C_GLB 0x0 ++#define BSP_I2C_SCL_H 0x4 ++#define BSP_I2C_SCL_L 0x8 ++#define BSP_I2C_DATA1 0x10 ++#define BSP_I2C_TXF 0x20 ++#define BSP_I2C_RXF 0x24 ++#define BSP_I2C_CMD_BASE 0x30 ++#define BSP_I2C_LOOP1 0xb0 ++#define BSP_I2C_DST1 0xb4 ++#define BSP_I2C_LOOP2 0xb8 ++#define BSP_I2C_DST2 0xbc ++#define BSP_I2C_TX_WATER 0xc8 ++#define BSP_I2C_RX_WATER 0xcc ++#define BSP_I2C_CTRL1 0xd0 ++#define BSP_I2C_CTRL2 0xd4 ++#define BSP_I2C_STAT 0xd8 ++#define BSP_I2C_INTR_RAW 0xe0 ++#define BSP_I2C_INTR_EN 0xe4 ++#define BSP_I2C_INTR_STAT 0xe8 ++ ++/* ++ * I2C Global Config Register -- BSP_I2C_GLB ++ */ ++#define GLB_EN_MASK BIT(0) ++#define GLB_SDA_HOLD_MASK GENMASK(23, 8) ++#define GLB_SDA_HOLD_SHIFT (8) ++#define should_copy_to_continuous_mem(addr) true ++ ++/* ++ * I2C Timing CMD Register -- BSP_I2C_CMD_BASE + n * 4 (n = 0, 1, 2, ... 31) ++ */ ++#define CMD_EXIT 0x0 ++#define CMD_TX_S 0x1 ++#define CMD_TX_D1_2 0x4 ++#define CMD_TX_D1_1 0x5 ++#define CMD_TX_FIFO 0x9 ++#define CMD_RX_FIFO 0x12 ++#define CMD_RX_ACK 0x13 ++#define CMD_IGN_ACK 0x15 ++#define CMD_TX_ACK 0x16 ++#define CMD_TX_NACK 0x17 ++#define CMD_JMP1 0x18 ++#define CMD_JMP2 0x19 ++#define CMD_UP_TXF 0x1d ++#define CMD_TX_RS 0x1e ++#define CMD_TX_P 0x1f ++ ++/* ++ * I2C Control Register 1 -- BSP_I2C_CTRL1 ++ */ ++#define CTRL1_CMD_START_MASK BIT(0) ++#define CTRL1_DMA_OP_MASK (0x3 << 8) ++#define CTRL1_DMA_R (0x3 << 8) ++#define CTRL1_DMA_W (0x2 << 8) ++ ++/* ++ * I2C Status Register -- BSP_I2C_STAT ++ */ ++#define STAT_RXF_NOE_MASK BIT(16) /* RX FIFO not empty flag */ ++#define STAT_TXF_NOF_MASK BIT(19) /* TX FIFO not full flag */ ++ ++/* ++ * I2C Interrupt status and mask Register -- ++ * BSP_I2C_INTR_RAW, BSP_I2C_STAT, BSP_I2C_INTR_STAT ++ */ ++#define INTR_ABORT_MASK (BIT(0) | BIT(11)) ++#define INTR_RX_MASK BIT(2) ++#define INTR_TX_MASK BIT(4) ++#define INTR_CMD_DONE_MASK BIT(12) ++#define INTR_USE_MASK (INTR_ABORT_MASK \ ++ | INTR_RX_MASK \ ++ | INTR_TX_MASK \ ++ | INTR_CMD_DONE_MASK) ++#define INTR_ALL_MASK GENMASK(31, 0) ++ ++#define I2C_DEFAULT_FREQUENCY 100000 ++#define I2C_TXF_DEPTH 64 ++#define I2C_RXF_DEPTH 64 ++#define I2C_TXF_WATER 32 ++#define I2C_RXF_WATER 32 ++#define I2C_WAIT_TIMEOUT 0x400 ++#define I2C_IRQ_TIMEOUT (msecs_to_jiffies(1000)) ++ ++struct bsp_i2c_dev { ++ struct device *dev; ++ struct i2c_adapter adap; ++ resource_size_t phybase; ++ void __iomem *base; ++ struct clk *clk; ++ int irq; ++ ++ unsigned int freq; ++ struct i2c_msg *msg; ++ int msg_num; ++ int msg_idx; ++ unsigned int msg_buf_ptr; ++ struct completion msg_complete; ++ ++ spinlock_t lock; ++ int status; ++}; ++static inline void bsp_i2c_disable(const struct bsp_i2c_dev *i2c); ++static inline void bsp_i2c_cfg_irq(const struct bsp_i2c_dev *i2c, ++ unsigned int flag); ++static inline unsigned int bsp_i2c_clr_irq(const struct bsp_i2c_dev *i2c); ++static inline void bsp_i2c_enable(const struct bsp_i2c_dev *i2c); ++ ++#define CHECK_SDA_IN_SHIFT (16) ++#define GPIO_MODE_SHIFT (8) ++#define FORCE_SCL_OEN_SHIFT (4) ++#define FORCE_SDA_OEN_SHIFT (0) ++ ++static void bsp_i2c_rescue(const struct bsp_i2c_dev *i2c) ++{ ++ unsigned int val; ++ unsigned int time_cnt; ++ int index; ++ ++ bsp_i2c_disable(i2c); ++ bsp_i2c_cfg_irq(i2c, 0); ++ bsp_i2c_clr_irq(i2c); ++ ++ val = (0x1 << GPIO_MODE_SHIFT) | (0x1 << FORCE_SCL_OEN_SHIFT) | ++ (0x1 << FORCE_SDA_OEN_SHIFT); ++ writel(val, i2c->base + BSP_I2C_CTRL2); ++ ++ time_cnt = 0; ++ do { ++ for (index = 0; index < 9; index++) { /* Cycle ten times */ ++ val = (0x1 << GPIO_MODE_SHIFT) | 0x1; ++ writel(val, i2c->base + BSP_I2C_CTRL2); ++ ++ udelay(5); /* delay 5 us */ ++ ++ val = (0x1 << GPIO_MODE_SHIFT) | (0x1 << FORCE_SCL_OEN_SHIFT) | ++ (0x1 << FORCE_SDA_OEN_SHIFT); ++ writel(val, i2c->base + BSP_I2C_CTRL2); ++ ++ udelay(5); /* delay 5 us */ ++ } ++ ++ time_cnt++; ++ if (time_cnt > I2C_WAIT_TIMEOUT) { ++ dev_err(i2c->dev, "wait Timeout!\n"); ++ goto disable_rescue; ++ } ++ ++ val = readl(i2c->base + BSP_I2C_CTRL2); ++ } while (!(val & (0x1 << CHECK_SDA_IN_SHIFT))); ++ ++ val = (0x1 << GPIO_MODE_SHIFT) | (0x1 << FORCE_SCL_OEN_SHIFT) | ++ (0x1 << FORCE_SDA_OEN_SHIFT); ++ writel(val, i2c->base + BSP_I2C_CTRL2); ++ ++ val = (0x1 << GPIO_MODE_SHIFT) | (0x1 << FORCE_SCL_OEN_SHIFT); ++ writel(val, i2c->base + BSP_I2C_CTRL2); ++ ++ udelay(10); /* delay 10 us */ ++ ++ val = (0x1 << GPIO_MODE_SHIFT) | (0x1 << FORCE_SCL_OEN_SHIFT) | ++ (0x1 << FORCE_SDA_OEN_SHIFT); ++ writel(val, i2c->base + BSP_I2C_CTRL2); ++ ++disable_rescue: ++ val = (0x1 << FORCE_SCL_OEN_SHIFT) | 0x1; ++ writel(val, i2c->base + BSP_I2C_CTRL2); ++} ++ ++static inline void bsp_i2c_disable(const struct bsp_i2c_dev *i2c) ++{ ++ unsigned int val; ++ ++ val = readl(i2c->base + BSP_I2C_GLB); ++ val &= ~GLB_EN_MASK; ++ writel(val, i2c->base + BSP_I2C_GLB); ++} ++ ++static inline void bsp_i2c_enable(const struct bsp_i2c_dev *i2c) ++{ ++ unsigned int val; ++ ++ val = readl(i2c->base + BSP_I2C_GLB); ++ val |= GLB_EN_MASK; ++ writel(val, i2c->base + BSP_I2C_GLB); ++} ++ ++static inline void bsp_i2c_cfg_irq(const struct bsp_i2c_dev *i2c, ++ unsigned int flag) ++{ ++ writel(flag, i2c->base + BSP_I2C_INTR_EN); ++} ++ ++static void bsp_i2c_disable_irq(const struct bsp_i2c_dev *i2c, ++ unsigned int flag) ++{ ++ unsigned int val; ++ ++ val = readl(i2c->base + BSP_I2C_INTR_EN); ++ val &= ~flag; ++ writel(val, i2c->base + BSP_I2C_INTR_EN); ++} ++ ++static unsigned int bsp_i2c_clr_irq(const struct bsp_i2c_dev *i2c) ++{ ++ unsigned int val; ++ ++ val = readl(i2c->base + BSP_I2C_INTR_STAT); ++ writel(INTR_ALL_MASK, i2c->base + BSP_I2C_INTR_RAW); ++ ++ return val; ++} ++ ++static inline void bsp_i2c_cmdreg_set(const struct bsp_i2c_dev *i2c, ++ unsigned int cmd, unsigned int *offset) ++{ ++ dev_dbg(i2c->dev, "i2c reg: offset=0x%x, cmd=0x%x...\n", *offset * 4, cmd); ++ /* Register bit width */ ++ writel(cmd, i2c->base + BSP_I2C_CMD_BASE + *offset * 4); ++ (*offset)++; ++} ++ ++/* ++ * config i2c slave addr ++ */ ++static void bsp_i2c_set_addr(const struct bsp_i2c_dev *i2c) ++{ ++ struct i2c_msg *msg = i2c->msg; ++ u16 addr; ++ ++ if (msg->flags & I2C_M_TEN) { ++ /* First byte is 11110XX0 where XX is upper 2 bits */ ++ addr = ((msg->addr & 0x300) << 1) | 0xf000; ++ if (msg->flags & I2C_M_RD) ++ addr |= 1 << 8; /* Shift the read flag to the left by eight bits */ ++ ++ /* Second byte is the remaining 8 bits */ ++ addr |= msg->addr & 0xff; ++ } else { ++ addr = (msg->addr & 0x7f) << 1; ++ if (msg->flags & I2C_M_RD) ++ addr |= 1; ++ } ++ ++ writel(addr, i2c->base + BSP_I2C_DATA1); ++} ++ ++/* ++ * Start command sequence ++ */ ++static inline void bsp_i2c_start_cmd(const struct bsp_i2c_dev *i2c) ++{ ++ unsigned int val; ++ ++ val = readl(i2c->base + BSP_I2C_CTRL1); ++ val |= CTRL1_CMD_START_MASK; ++ writel(val, i2c->base + BSP_I2C_CTRL1); ++} ++ ++static int bsp_i2c_wait_rx_noempty(const struct bsp_i2c_dev *i2c) ++{ ++ unsigned int time_cnt = 0; ++ unsigned int val; ++ ++ do { ++ val = readl(i2c->base + BSP_I2C_STAT); ++ if (val & STAT_RXF_NOE_MASK) ++ return 0; ++ ++ udelay(50); /* delay 50 us */ ++ } while (time_cnt++ < I2C_WAIT_TIMEOUT); ++ ++ bsp_i2c_rescue(i2c); ++ ++ dev_err(i2c->dev, "wait rx no empty timeout, RIS: 0x%x, SR: 0x%x\n", ++ readl(i2c->base + BSP_I2C_INTR_RAW), val); ++ return -EIO; ++} ++ ++static int bsp_i2c_wait_tx_nofull(const struct bsp_i2c_dev *i2c) ++{ ++ unsigned int time_cnt = 0; ++ unsigned int val; ++ ++ do { ++ val = readl(i2c->base + BSP_I2C_STAT); ++ if (val & STAT_TXF_NOF_MASK) ++ return 0; ++ ++ udelay(50); /* delay 50 us */ ++ } while (time_cnt++ < I2C_WAIT_TIMEOUT); ++ ++ bsp_i2c_rescue(i2c); ++ ++ dev_err(i2c->dev, "wait rx no empty timeout, RIS: 0x%x, SR: 0x%x\n", ++ readl(i2c->base + BSP_I2C_INTR_RAW), val); ++ return -EIO; ++} ++ ++static int bsp_i2c_wait_idle(const struct bsp_i2c_dev *i2c) ++{ ++ unsigned int time_cnt = 0; ++ unsigned int val; ++ ++ do { ++ val = readl(i2c->base + BSP_I2C_INTR_RAW); ++ if (val & (INTR_ABORT_MASK)) { ++ dev_err(i2c->dev, "wait idle abort!, RIS: 0x%x\n", ++ val); ++ return -EIO; ++ } ++ ++ if (val & INTR_CMD_DONE_MASK) ++ return 0; ++ ++ udelay(50); /* delay 50 us */ ++ } while (time_cnt++ < I2C_WAIT_TIMEOUT); ++ ++ bsp_i2c_rescue(i2c); ++ ++ dev_err(i2c->dev, "wait idle timeout, RIS: 0x%x, SR: 0x%x\n", ++ val, readl(i2c->base + BSP_I2C_STAT)); ++ ++ return -EIO; ++} ++ ++static void bsp_i2c_set_freq(struct bsp_i2c_dev *i2c) ++{ ++ unsigned int max_freq, freq; ++ unsigned int clk_rate; ++ unsigned int val; ++ ++ freq = i2c->freq; ++ clk_rate = clk_get_rate(i2c->clk); ++ max_freq = clk_rate >> 1; ++ ++ if (freq > max_freq) { ++ i2c->freq = max_freq; ++ freq = i2c->freq; ++ } ++ ++ if (!freq) { ++ pr_err("bsp_i2c_set_freq:freq can't be zero!"); ++ return; ++ } ++ /* If the frequency band is less than or equal to 100 MHz, the standard mode is used */ ++ if (freq <= 100000) { ++ /* in normal mode F_scl: freq ++ i2c_scl_hcnt = (F_i2c / F_scl) * 0.5 ++ i2c_scl_hcnt = (F_i2c / F_scl) * 0.5 ++ */ ++ val = clk_rate / (freq * 2); ++ writel(val, i2c->base + BSP_I2C_SCL_H); ++ writel(val, i2c->base + BSP_I2C_SCL_L); ++ } else { ++ /* in fast mode F_scl: freq ++ i2c_scl_hcnt = (F_i2c / F_scl) * 0.36 ++ i2c_scl_hcnt = (F_i2c / F_scl) * 0.64 ++ */ ++ val = ((clk_rate / 100) * 36) / freq; ++ writel(val, i2c->base + BSP_I2C_SCL_H); ++ val = ((clk_rate / 100) * 64) / freq; ++ writel(val, i2c->base + BSP_I2C_SCL_L); ++ } ++ ++ val = readl(i2c->base + BSP_I2C_GLB); ++ val &= ~GLB_SDA_HOLD_MASK; ++ val |= ((0xa << GLB_SDA_HOLD_SHIFT) & GLB_SDA_HOLD_MASK); ++ writel(val, i2c->base + BSP_I2C_GLB); ++} ++ ++/* ++ * set i2c controller TX and RX FIFO water ++ */ ++static inline void bsp_i2c_set_water(const struct bsp_i2c_dev *i2c) ++{ ++ writel(I2C_TXF_WATER, i2c->base + BSP_I2C_TX_WATER); ++ writel(I2C_RXF_WATER, i2c->base + BSP_I2C_RX_WATER); ++} ++ ++/* ++ * initialise the controller, set i2c bus interface freq ++ */ ++static void bsp_i2c_hw_init(struct bsp_i2c_dev *i2c) ++{ ++ bsp_i2c_disable(i2c); ++ bsp_i2c_disable_irq(i2c, INTR_ALL_MASK); ++ bsp_i2c_set_freq(i2c); ++ bsp_i2c_set_water(i2c); ++} ++ ++/* ++ * bsp_i2c_cfg_cmd - config i2c controller command sequence ++ * ++ * After all the timing command is configured, ++ * and then start the command, you can i2c communication, ++ * and then only need to read and write i2c fifo. ++ */ ++static void bsp_i2c_cfg_cmd(const struct bsp_i2c_dev *i2c) ++{ ++ struct i2c_msg *msg = i2c->msg; ++ int offset = 0; ++ ++ if (i2c->msg_idx == 0) ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_S, &offset); ++ else ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_RS, &offset); ++ ++ if (msg->flags & I2C_M_TEN) { ++ if (i2c->msg_idx == 0) { ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_D1_2, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_D1_1, &offset); ++ } else { ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_D1_2, &offset); ++ } ++ } else { ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_D1_1, &offset); ++ } ++ ++ if (msg->flags & I2C_M_IGNORE_NAK) ++ bsp_i2c_cmdreg_set(i2c, CMD_IGN_ACK, &offset); ++ else ++ bsp_i2c_cmdreg_set(i2c, CMD_RX_ACK, &offset); ++ ++ if (msg->flags & I2C_M_RD) { ++ /* The extended address occupies two bytes */ ++ if (msg->len >= 2) { ++ writel(offset, i2c->base + BSP_I2C_DST1); ++ /* The extended address occupies two bytes */ ++ writel(msg->len - 2, i2c->base + BSP_I2C_LOOP1); ++ bsp_i2c_cmdreg_set(i2c, CMD_RX_FIFO, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_ACK, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_JMP1, &offset); ++ } ++ bsp_i2c_cmdreg_set(i2c, CMD_RX_FIFO, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_NACK, &offset); ++ } else { ++ writel(offset, i2c->base + BSP_I2C_DST1); ++ writel(msg->len - 1, i2c->base + BSP_I2C_LOOP1); ++ bsp_i2c_cmdreg_set(i2c, CMD_UP_TXF, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_FIFO, &offset); ++ ++ if (msg->flags & I2C_M_IGNORE_NAK) ++ bsp_i2c_cmdreg_set(i2c, CMD_IGN_ACK, &offset); ++ else ++ bsp_i2c_cmdreg_set(i2c, CMD_RX_ACK, &offset); ++ ++ bsp_i2c_cmdreg_set(i2c, CMD_JMP1, &offset); ++ } ++ ++ if ((i2c->msg_idx == (i2c->msg_num - 1)) || (msg->flags & I2C_M_STOP)) { ++ dev_dbg(i2c->dev, "run to %s %d...TX STOP\n", ++ __func__, __LINE__); ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_P, &offset); ++ } ++ ++ bsp_i2c_cmdreg_set(i2c, CMD_EXIT, &offset); ++} ++ ++static void bsp_i2c_cfg_cmd_mul_reg(struct bsp_i2c_dev *i2c, unsigned int reg_data_width) ++{ ++ struct i2c_msg *msg = i2c->msg; ++ int offset = 0; ++ int i; ++ ++ if (i2c->msg_idx == 0) ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_S, &offset); ++ else ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_RS, &offset); ++ ++ if (msg->flags & I2C_M_TEN) { ++ if (i2c->msg_idx == 0) { ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_D1_2, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_D1_1, &offset); ++ } else { ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_D1_2, &offset); ++ } ++ } else { ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_D1_1, &offset); ++ } ++ ++ if (msg->flags & I2C_M_IGNORE_NAK) ++ bsp_i2c_cmdreg_set(i2c, CMD_IGN_ACK, &offset); ++ else ++ bsp_i2c_cmdreg_set(i2c, CMD_RX_ACK, &offset); ++ ++ if (msg->flags & I2C_M_RD) { ++ /* The extended address occupies two bytes */ ++ if (msg->len >= 2) { ++ writel(offset, i2c->base + BSP_I2C_DST1); ++ /* The extended address occupies two bytes */ ++ writel(msg->len - 2, i2c->base + BSP_I2C_LOOP1); ++ bsp_i2c_cmdreg_set(i2c, CMD_RX_FIFO, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_ACK, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_JMP1, &offset); ++ } ++ bsp_i2c_cmdreg_set(i2c, CMD_RX_FIFO, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_NACK, &offset); ++ } else { ++ for (i = 0; i < reg_data_width - 1; i++) { ++ bsp_i2c_cmdreg_set(i2c, CMD_UP_TXF, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_FIFO, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_RX_ACK, &offset); ++ } ++ bsp_i2c_cmdreg_set(i2c, CMD_UP_TXF, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_FIFO, &offset); ++ bsp_i2c_cmdreg_set(i2c, CMD_IGN_ACK, &offset); ++ } ++ ++ bsp_i2c_cmdreg_set(i2c, CMD_TX_P, &offset); ++ if (((msg->len / reg_data_width) - 1) > 0) { ++ writel(0, i2c->base + BSP_I2C_DST2); ++ writel((msg->len / reg_data_width) - 1, i2c->base + BSP_I2C_LOOP2); ++ bsp_i2c_cmdreg_set(i2c, CMD_JMP2, &offset); ++ } ++ bsp_i2c_cmdreg_set(i2c, CMD_EXIT, &offset); ++} ++ ++static inline void check_i2c_send_complete(struct bsp_i2c_dev *i2c) ++{ ++ unsigned int val; ++ val = readl(i2c->base + BSP_I2C_GLB); ++ if (val & GLB_EN_MASK) { ++ bsp_i2c_wait_idle(i2c); ++ bsp_i2c_disable(i2c); ++ } ++} ++ ++#if defined(CONFIG_EDMAC) ++int dma_to_i2c(unsigned long src, unsigned int dst, unsigned int length) ++{ ++ int chan; ++ ++ chan = do_dma_m2p(src, dst, length); ++ if (chan == -1) ++ pr_err("dma_to_i2c error\n"); ++ ++ return chan; ++} ++ ++int i2c_to_dma(unsigned int src, unsigned long dst, ++ unsigned int length) ++{ ++ int chan; ++ ++ chan = do_dma_p2m(dst, src, length); ++ if (chan == -1) ++ pr_err("dma_p2m error...\n"); ++ ++ return chan; ++} ++ ++static int bsp_i2c_do_dma_write(struct bsp_i2c_dev *i2c, ++ unsigned long dma_dst_addr) ++{ ++ int chan, val; ++ int status = 0; ++ struct i2c_msg *msg = i2c->msg; ++ ++ check_i2c_send_complete(i2c); ++ bsp_i2c_set_freq(i2c); ++ writel(0x1, i2c->base + BSP_I2C_TX_WATER); ++ bsp_i2c_enable(i2c); ++ bsp_i2c_clr_irq(i2c); ++ bsp_i2c_set_addr(i2c); ++ bsp_i2c_cfg_cmd(i2c); ++ ++ /* transmit DATA from DMAC to I2C in DMA mode */ ++ chan = dma_to_i2c(dma_dst_addr, (i2c->phybase + BSP_I2C_TXF), ++ msg->len); ++ if (chan == -1) { ++ status = -1; ++ goto fail_0; ++ } ++ ++ val = readl(i2c->base + BSP_I2C_CTRL1); ++ val &= ~CTRL1_DMA_OP_MASK; ++ val |= CTRL1_DMA_W | CTRL1_CMD_START_MASK; ++ writel(val, i2c->base + BSP_I2C_CTRL1); ++ ++ if (dmac_wait(chan) != DMAC_CHN_SUCCESS) { ++ status = -1; ++ goto fail_1; ++ } ++ ++ status = bsp_i2c_wait_idle(i2c); ++ ++fail_1: ++ dmac_channel_free((unsigned int)chan); ++fail_0: ++ bsp_i2c_disable(i2c); ++ ++ return status; ++} ++ ++static int bsp_i2c_do_dma_write_mul_reg(struct bsp_i2c_dev *i2c, ++ unsigned long dma_dst_addr, unsigned int reg_data_width) ++{ ++ int chan; ++ int val = 0; ++ struct i2c_msg *msg = i2c->msg; ++ ++ check_i2c_send_complete(i2c); ++ bsp_i2c_set_freq(i2c); ++ writel(0x1, i2c->base + BSP_I2C_TX_WATER); ++ bsp_i2c_enable(i2c); ++ bsp_i2c_clr_irq(i2c); ++ bsp_i2c_set_addr(i2c); ++ bsp_i2c_cfg_cmd_mul_reg(i2c, reg_data_width); ++ ++ /* transmit DATA from DMAC to I2C in DMA mode */ ++ chan = dma_to_i2c(dma_dst_addr, (i2c->phybase + BSP_I2C_TXF), ++ msg->len); ++ if (chan == -1) ++ return -1; ++ ++ val = readl(i2c->base + BSP_I2C_CTRL1); ++ val &= ~CTRL1_DMA_OP_MASK; ++ val |= CTRL1_DMA_W | CTRL1_CMD_START_MASK; ++ writel(val, i2c->base + BSP_I2C_CTRL1); ++ ++ return 0; ++} ++ ++static int bsp_i2c_do_dma_read(struct bsp_i2c_dev *i2c, ++ unsigned long dma_dst_addr) ++{ ++ int val, chan; ++ int status = 0; ++ struct i2c_msg *msg = i2c->msg; ++ ++ check_i2c_send_complete(i2c); ++ bsp_i2c_set_freq(i2c); ++ writel(0x0, i2c->base + BSP_I2C_RX_WATER); ++ bsp_i2c_enable(i2c); ++ bsp_i2c_clr_irq(i2c); ++ bsp_i2c_set_addr(i2c); ++ bsp_i2c_cfg_cmd(i2c); ++ ++ /* transmit DATA from I2C to DMAC in DMA mode */ ++ chan = i2c_to_dma((i2c->phybase + BSP_I2C_RXF), ++ dma_dst_addr, msg->len); ++ if (chan == -1) { ++ status = -1; ++ goto fail_0; ++ } ++ ++ val = readl(i2c->base + BSP_I2C_CTRL1); ++ val &= ~CTRL1_DMA_OP_MASK; ++ val |= CTRL1_CMD_START_MASK | CTRL1_DMA_R; ++ writel(val, i2c->base + BSP_I2C_CTRL1); ++ ++ if (dmac_wait(chan) != DMAC_CHN_SUCCESS) { ++ status = -1; ++ goto fail_1; ++ } ++ ++ status = bsp_i2c_wait_idle(i2c); ++ ++fail_1: ++ dmac_channel_free((unsigned int)chan); ++fail_0: ++ bsp_i2c_disable(i2c); ++ ++ return status; ++} ++ ++/* ++ * Before the DMA transfer, the buffer allocated in high memory is copied to contiguous memory allocated ++ * by i2c_bsp and managed by the highmem_buf_list. ++ */ ++static int copy_to_continuous_mem(struct bsp_i2c_dev *i2c) ++{ ++ int ret; ++ ++ struct highmem_buf_list_node *highmem_node = NULL; ++ if (should_copy_to_continuous_mem(i2c->msg->buf) && search_in_highmem_buf_list(i2c->msg) == NULL) { ++ highmem_node = (struct highmem_buf_list_node *)kzalloc(sizeof(*highmem_node), GFP_KERNEL | __GFP_ATOMIC); ++ if (highmem_node == NULL) { ++ dev_err(i2c->dev, "Allocate memory fail.\n"); ++ return -EINVAL; ++ } ++ ++ highmem_node->msg = i2c->msg; ++ highmem_node->highmem_buf = i2c->msg->buf; ++ i2c->msg->buf = kmalloc(i2c->msg->len, GFP_KERNEL | __GFP_ATOMIC); ++ if (i2c->msg->buf == NULL) { ++ i2c->msg->buf = highmem_node->highmem_buf; ++ kfree(highmem_node); ++ highmem_node = NULL; ++ dev_err(i2c->dev, "Allocate continuous memory fail.\n"); ++ return -EINVAL; ++ } ++ highmem_node->buf = i2c->msg->buf; ++ ret = memcpy_s(highmem_node->buf, i2c->msg->len, ++ highmem_node->highmem_buf, i2c->msg->len); ++ if (ret) { ++ dev_err(i2c->dev, "%s, memcpy_s failed!\n", __func__); ++ kfree(i2c->msg->buf); ++ i2c->msg->buf = NULL; ++ i2c->msg->buf = highmem_node->highmem_buf; ++ highmem_node->buf = NULL; ++ kfree(highmem_node); ++ highmem_node = NULL; ++ return ret; ++ } ++ list_add_tail(&highmem_node->node, &highmem_buf_list); ++ } ++ return 0; ++} ++ ++/* ++ * When the DMA transfer ends, the high memory buf is returned to the ++ * i2c->msg so that the user mode can read and release the buffer, ++ * and the contiguous memory allocated by i2c_bsp will be released. ++ */ ++static void released_contiguous_buf_from_list(struct i2c_msg *msg) ++{ ++ struct highmem_buf_list_node *highmem_node = NULL; ++ int ret; ++ ++ debug_dump_i2c_msg(msg); ++ highmem_node = search_in_highmem_buf_list(msg); ++ if (highmem_node != NULL) { ++ ret = memcpy_s(highmem_node->highmem_buf, msg->len, ++ highmem_node->buf, msg->len); ++ if (ret) { ++ printk("%s, memcpy_s failed\n", __func__); ++ return; ++ } ++ ++ list_del(&highmem_node->node); ++ kfree(highmem_node->buf); ++ msg->buf = highmem_node->highmem_buf; ++ kfree(highmem_node); ++ } ++ ++ debug_dump_i2c_msg(msg); ++} ++ ++static int bsp_i2c_dma_xfer_one_msg(struct bsp_i2c_dev *i2c) ++{ ++ unsigned int status = -EIO; ++ struct i2c_msg *msg = i2c->msg; ++ dma_addr_t dma_dst_addr; ++ ++ dev_dbg(i2c->dev, "[%s,%d]msg->flags=0x%x, len=0x%x\n", ++ __func__, __LINE__, msg->flags, msg->len); ++ ++ debug_dump_i2c_msg(msg); ++ if (copy_to_continuous_mem(i2c)) ++ return -EINVAL; ++ ++ debug_dump_i2c_msg(msg); ++ if (msg->flags & I2C_M_RD) { ++ mb(); ++ dma_dst_addr = dma_map_single(i2c->dev, msg->buf, ++ msg->len, DMA_FROM_DEVICE); ++ status = dma_mapping_error(i2c->dev, dma_dst_addr); ++ if (status) { ++ dev_err(i2c->dev, "DMA mapping failed\n"); ++ goto out; ++ } ++ ++ status = bsp_i2c_do_dma_read(i2c, dma_dst_addr); ++ ++ dma_unmap_single(i2c->dev, dma_dst_addr, msg->len, DMA_FROM_DEVICE); ++ mb(); ++ } else { ++ mb(); ++ dma_dst_addr = dma_map_single(i2c->dev, msg->buf, ++ msg->len, DMA_TO_DEVICE); ++ status = dma_mapping_error(i2c->dev, dma_dst_addr); ++ if (status) { ++ dev_err(i2c->dev, "DMA mapping failed\n"); ++ goto out; ++ } ++ ++ status = bsp_i2c_do_dma_write(i2c, dma_dst_addr); ++ dma_unmap_single(i2c->dev, dma_dst_addr, msg->len, DMA_TO_DEVICE); ++ mb(); ++ } ++ ++out: ++ released_contiguous_buf_from_list(i2c->msg); ++ if (!status) { ++ status = bsp_i2c_wait_idle(i2c); ++ bsp_i2c_disable(i2c); ++ } ++ ++ return status; ++} ++ ++static int bsp_i2c_dma_xfer_one_msg_mul_reg(struct bsp_i2c_dev *i2c, ++ unsigned int reg_data_width) ++{ ++ unsigned int status = -EIO; ++ struct i2c_msg *msg = i2c->msg; ++ dma_addr_t dma_dst_addr; ++ ++ dev_dbg(i2c->dev, "[%s,%d]msg->flags=0x%x, len=0x%x\n", ++ __func__, __LINE__, msg->flags, msg->len); ++ ++ if (copy_to_continuous_mem(i2c)) ++ return -EINVAL; ++ ++ if (msg->flags & I2C_M_RD) { ++ debug_dump_i2c_msg(i2c->msg); ++ mb(); ++ dma_dst_addr = dma_map_single(i2c->dev, msg->buf, ++ msg->len, DMA_FROM_DEVICE); ++ status = dma_mapping_error(i2c->dev, dma_dst_addr); ++ if (status) { ++ dev_err(i2c->dev, "DMA mapping failed\n"); ++ goto out; ++ } ++ ++ status = bsp_i2c_do_dma_read(i2c, dma_dst_addr); ++ ++ dma_unmap_single(i2c->dev, dma_dst_addr, msg->len, DMA_FROM_DEVICE); ++ mb(); ++ } else { ++ mb(); ++ dma_dst_addr = dma_map_single(i2c->dev, msg->buf, ++ msg->len, DMA_TO_DEVICE); ++ status = dma_mapping_error(i2c->dev, dma_dst_addr); ++ if (status) { ++ dev_err(i2c->dev, "DMA mapping failed\n"); ++ goto out; ++ } ++ ++ status = bsp_i2c_do_dma_write_mul_reg(i2c, dma_dst_addr, reg_data_width); ++ dma_unmap_single(i2c->dev, dma_dst_addr, msg->len, DMA_TO_DEVICE); ++ mb(); ++ debug_dump_i2c_msg(i2c->msg); ++ } ++ ++out: ++ released_contiguous_buf_from_list(i2c->msg); ++ return status; ++} ++#endif ++static int bsp_i2c_polling_xfer_one_msg(struct bsp_i2c_dev *i2c) ++{ ++ int status = -EIO; ++ unsigned int val; ++ struct i2c_msg *msg = i2c->msg; ++ ++ dev_dbg(i2c->dev, "[%s,%d]msg->flags=0x%x, len=0x%x\n", ++ __func__, __LINE__, msg->flags, msg->len); ++ ++ check_i2c_send_complete(i2c); ++ bsp_i2c_enable(i2c); ++ bsp_i2c_clr_irq(i2c); ++ bsp_i2c_set_addr(i2c); ++ bsp_i2c_cfg_cmd(i2c); ++ bsp_i2c_start_cmd(i2c); ++ ++ i2c->msg_buf_ptr = 0; ++ ++ if (msg->flags & I2C_M_RD) { ++ while (i2c->msg_buf_ptr < msg->len) { ++ status = bsp_i2c_wait_rx_noempty(i2c); ++ if (status) ++ goto end; ++ ++ val = readl(i2c->base + BSP_I2C_RXF); ++ msg->buf[i2c->msg_buf_ptr] = val; ++ i2c->msg_buf_ptr++; ++ } ++ } else { ++ while (i2c->msg_buf_ptr < msg->len) { ++ status = bsp_i2c_wait_tx_nofull(i2c); ++ if (status) ++ goto end; ++ ++ val = msg->buf[i2c->msg_buf_ptr]; ++ writel(val, i2c->base + BSP_I2C_TXF); ++ i2c->msg_buf_ptr++; ++ } ++ } ++ ++ status = bsp_i2c_wait_idle(i2c); ++end: ++ bsp_i2c_disable(i2c); ++ ++ return status; ++} ++ ++static int bsp_i2c_polling_xfer_one_msg_mul_reg(struct bsp_i2c_dev *i2c, ++ unsigned int reg_data_width) ++{ ++ int status = -EIO; ++ unsigned int val; ++ struct i2c_msg *msg = i2c->msg; ++ ++ dev_dbg(i2c->dev, "[%s,%d]msg->flags=0x%x, len=0x%x\n", ++ __func__, __LINE__, msg->flags, msg->len); ++ ++ check_i2c_send_complete(i2c); ++ bsp_i2c_enable(i2c); ++ bsp_i2c_clr_irq(i2c); ++ bsp_i2c_set_addr(i2c); ++ bsp_i2c_cfg_cmd_mul_reg(i2c, reg_data_width); ++ bsp_i2c_start_cmd(i2c); ++ ++ i2c->msg_buf_ptr = 0; ++ ++ if (msg->flags & I2C_M_RD) { ++ while (i2c->msg_buf_ptr < msg->len) { ++ status = bsp_i2c_wait_rx_noempty(i2c); ++ if (status) ++ goto end; ++ ++ val = readl(i2c->base + BSP_I2C_RXF); ++ msg->buf[i2c->msg_buf_ptr] = val; ++ i2c->msg_buf_ptr++; ++ } ++ } else { ++ while (i2c->msg_buf_ptr < msg->len) { ++ status = bsp_i2c_wait_tx_nofull(i2c); ++ if (status) ++ goto end; ++ ++ val = msg->buf[i2c->msg_buf_ptr]; ++ writel(val, i2c->base + BSP_I2C_TXF); ++ i2c->msg_buf_ptr++; ++ } ++ } ++ ++end: ++ return status; ++} ++ ++static irqreturn_t bsp_i2c_isr(int irq, void *dev_id) ++{ ++ struct bsp_i2c_dev *i2c = dev_id; ++ unsigned int irq_status; ++ struct i2c_msg *msg = i2c->msg; ++ ++ spin_lock(&i2c->lock); ++ ++ irq_status = bsp_i2c_clr_irq(i2c); ++ dev_dbg(i2c->dev, "%s RIS: 0x%x\n", __func__, irq_status); ++ ++ if (!irq_status) { ++ dev_dbg(i2c->dev, "no irq\n"); ++ goto end; ++ } ++ ++ if (irq_status & INTR_ABORT_MASK) { ++ dev_err(i2c->dev, "irq handle abort, RIS: 0x%x\n", ++ irq_status); ++ i2c->status = -EIO; ++ bsp_i2c_disable_irq(i2c, INTR_ALL_MASK); ++ ++ complete(&i2c->msg_complete); ++ goto end; ++ } ++ ++ if (msg->flags & I2C_M_RD) { ++ while ((readl(i2c->base + BSP_I2C_STAT) & STAT_RXF_NOE_MASK) ++ && (i2c->msg_buf_ptr < msg->len)) { ++ msg->buf[i2c->msg_buf_ptr] = ++ readl(i2c->base + BSP_I2C_RXF); ++ i2c->msg_buf_ptr++; ++ } ++ } else { ++ while ((readl(i2c->base + BSP_I2C_STAT) & STAT_TXF_NOF_MASK) ++ && (i2c->msg_buf_ptr < msg->len)) { ++ writel(msg->buf[i2c->msg_buf_ptr], ++ i2c->base + BSP_I2C_TXF); ++ i2c->msg_buf_ptr++; ++ } ++ } ++ ++ if (i2c->msg_buf_ptr >= msg->len) ++ bsp_i2c_disable_irq(i2c, INTR_TX_MASK | INTR_RX_MASK); ++ ++ if (irq_status & INTR_CMD_DONE_MASK) { ++ dev_dbg(i2c->dev, "cmd done\n"); ++ i2c->status = 0; ++ bsp_i2c_disable_irq(i2c, INTR_ALL_MASK); ++ ++ complete(&i2c->msg_complete); ++ } ++ ++end: ++ spin_unlock(&i2c->lock); ++ ++ return IRQ_HANDLED; ++} ++ ++static int bsp_i2c_interrupt_xfer_one_msg(struct bsp_i2c_dev *i2c) ++{ ++ int status; ++ struct i2c_msg *msg = i2c->msg; ++ unsigned long timeout; ++ unsigned long flags; ++ ++ dev_dbg(i2c->dev, "[%s,%d]msg->flags=0x%x, len=0x%x\n", ++ __func__, __LINE__, msg->flags, msg->len); ++ ++ reinit_completion(&i2c->msg_complete); ++ i2c->msg_buf_ptr = 0; ++ i2c->status = -EIO; ++ ++ spin_lock_irqsave(&i2c->lock, flags); ++ check_i2c_send_complete(i2c); ++ bsp_i2c_enable(i2c); ++ bsp_i2c_clr_irq(i2c); ++ if (msg->flags & I2C_M_RD) ++ bsp_i2c_cfg_irq(i2c, INTR_USE_MASK & ~INTR_TX_MASK); ++ else ++ bsp_i2c_cfg_irq(i2c, INTR_USE_MASK & ~INTR_RX_MASK); ++ ++ bsp_i2c_set_addr(i2c); ++ bsp_i2c_cfg_cmd(i2c); ++ bsp_i2c_start_cmd(i2c); ++ spin_unlock_irqrestore(&i2c->lock, flags); ++ ++ timeout = wait_for_completion_timeout(&i2c->msg_complete, ++ I2C_IRQ_TIMEOUT); ++ ++ spin_lock_irqsave(&i2c->lock, flags); ++ if (timeout == 0) { ++ bsp_i2c_disable_irq(i2c, INTR_ALL_MASK); ++ status = -EIO; ++ dev_err(i2c->dev, "%s timeout\n", ++ msg->flags & I2C_M_RD ? "rx" : "tx"); ++ } else { ++ status = i2c->status; ++ } ++ ++ bsp_i2c_disable(i2c); ++ ++ spin_unlock_irqrestore(&i2c->lock, flags); ++ return status; ++} ++ ++/* ++ * Master transfer function ++ */ ++static int bsp_i2c_xfer(struct i2c_adapter *adap, ++ struct i2c_msg *msgs, int num) ++{ ++ struct bsp_i2c_dev *i2c = i2c_get_adapdata(adap); ++ int status = -EINVAL; ++ unsigned long flags; ++ ++ if (msgs == NULL || (num <= 0)) { ++ dev_err(i2c->dev, "msgs == NULL || num <= 0, Invalid argument!\n"); ++ return -EINVAL; ++ } ++ ++ spin_lock_irqsave(&i2c->lock, flags); ++ ++ i2c->msg = msgs; ++ i2c->msg_num = num; ++ i2c->msg_idx = 0; ++ ++ while (i2c->msg_idx < i2c->msg_num) { ++#if defined(CONFIG_EDMAC) ++ if ((i2c->msg->len >= CONFIG_DMA_MSG_MIN_LEN) && ++ (i2c->msg->len <= CONFIG_DMA_MSG_MAX_LEN)) { ++ status = bsp_i2c_dma_xfer_one_msg(i2c); ++ if (status) ++ break; ++ } else if (i2c->irq >= 0) { ++#else ++ if (i2c->irq >= 0) { ++#endif ++ spin_unlock_irqrestore(&i2c->lock, flags); ++ status = bsp_i2c_interrupt_xfer_one_msg(i2c); ++ spin_lock_irqsave(&i2c->lock, flags); ++ if (status) ++ break; ++ } else { ++ status = bsp_i2c_polling_xfer_one_msg(i2c); ++ if (status) ++ break; ++ } ++ i2c->msg++; ++ i2c->msg_idx++; ++ } ++ ++ if (!status || i2c->msg_idx > 0) ++ status = i2c->msg_idx; ++ ++ spin_unlock_irqrestore(&i2c->lock, flags); ++ return status; ++} ++ ++/* bsp_i2c_break_polling_xfer ++ * ++ * I2c polling independent branch, Shielding interrupt interface ++ */ ++static int bsp_i2c_break_polling_xfer(const struct i2c_adapter *adap, ++ struct i2c_msg *msgs, int num) ++{ ++ struct bsp_i2c_dev *i2c = i2c_get_adapdata(adap); ++ int status = -EINVAL; ++ unsigned long flags; ++ if (msgs == NULL || (num <= 0)) { ++ dev_err(i2c->dev, "msgs == NULL || num <= 0, Invalid argument!\n"); ++ return -EINVAL; ++ } ++ spin_lock_irqsave(&i2c->lock, flags); ++ i2c->msg = msgs; ++ i2c->msg_num = num; ++ i2c->msg_idx = 0; ++ while (i2c->msg_idx < i2c->msg_num) { ++#if defined(CONFIG_EDMAC) ++ debug_dump_i2c_msg(msgs); ++ ++ if ((i2c->msg->len >= CONFIG_DMA_MSG_MIN_LEN) && ++ (i2c->msg->len <= CONFIG_DMA_MSG_MAX_LEN)) { ++ status = bsp_i2c_dma_xfer_one_msg(i2c); ++ if (status) ++ break; ++ } ++#else ++ status = bsp_i2c_polling_xfer_one_msg(i2c); ++ if (status) ++ break; ++#endif ++ i2c->msg++; ++ i2c->msg_idx++; ++ } ++ if (!status || i2c->msg_idx > 0) ++ status = i2c->msg_idx; ++ spin_unlock_irqrestore(&i2c->lock, flags); ++ return status; ++} ++ ++static int bsp_i2c_mul_reg_xfer(struct i2c_adapter* const adap, ++ struct i2c_msg *msgs, int num, unsigned int reg_data_width) ++{ ++ struct bsp_i2c_dev *i2c = i2c_get_adapdata(adap); ++ int status = -EINVAL; ++ unsigned long flags; ++ if (msgs == NULL || (num <= 0)) { ++ dev_err(i2c->dev, "msgs == NULL || num <= 0, Invalid argument!\n"); ++ return -EINVAL; ++ } ++ spin_lock_irqsave(&i2c->lock, flags); ++ i2c->msg = msgs; ++ i2c->msg_num = num; ++ i2c->msg_idx = 0; ++ while (i2c->msg_idx < i2c->msg_num) { ++ if ((i2c->msg->len >= CONFIG_DMA_MSG_MIN_LEN) && ++ (i2c->msg->len <= CONFIG_DMA_MSG_MAX_LEN) && (i2c->msg->flags & I2C_M_DMA)) { ++#if defined(CONFIG_EDMAC) && defined(CONFIG_EDMAC_INTERRUPT) ++ status = bsp_i2c_dma_xfer_one_msg_mul_reg(i2c, reg_data_width); ++#endif ++ if (status) ++ break; ++ } else { ++ status = bsp_i2c_polling_xfer_one_msg_mul_reg(i2c, reg_data_width); ++ if (status) ++ break; ++ } ++ i2c->msg++; ++ i2c->msg_idx++; ++ } ++ if (!status || i2c->msg_idx > 0) ++ status = i2c->msg_idx; ++ ++ spin_unlock_irqrestore(&i2c->lock, flags); ++ return status; ++} ++/* I2C READ * ++ * bsp_i2c_master_recv - issue a single I2C message in master receive mode ++ * @client: Handle to slave device ++ * @buf: Where to store data read from slave ++ * @count: How many bytes to read, must be less than 64k since msg.len is u16 ++ * ++ * Returns negative errno, or else the number of bytes read. ++ */ ++int bsp_i2c_master_recv(const struct i2c_client* client, const char* buf, ++ int count) ++{ ++ printk("Wrong interface call" ++ "bsp_i2c_transfer is the only interface to i2c read!!!\n"); ++ ++ return -EIO; ++} ++EXPORT_SYMBOL(bsp_i2c_master_recv); ++ ++/* I2C WRITE * ++ * bsp_i2c_master_send - issue a single I2C message in master transmit mode ++ * @client: Handle to slave device ++ * @buf: Data that will be written to the slave ++ * @count: How many bytes to write, must be less than 64k since msg.len is u16 ++ * ++ * Returns negative errno, or else the number of bytes written. ++ */ ++int bsp_i2c_master_send(const struct i2c_client *client, ++ const char *buf, __u16 count) ++{ ++ struct i2c_adapter *adap = NULL; ++ struct i2c_msg msg; ++ int msgs_count; ++ ++ if ((client == NULL) || (buf == NULL) || (client->adapter == NULL) || ++ (count < 0)) { ++ printk(KERN_ERR "invalid args\n"); ++ return -EINVAL; ++ } ++ ++ if ((client->addr > 0x3ff) || ++ (((client->flags & I2C_M_TEN) == 0) && (client->addr > 0x7f))) { ++ printk(KERN_ERR "dev address out of range\n"); ++ return -EINVAL; ++ } ++ ++ adap = client->adapter; ++ msg.addr = client->addr; ++ msg.flags = client->flags; ++ msg.len = count; ++ ++ msg.buf = (__u8 *)buf; ++ ++ debug_dump_i2c_msg(&msg); ++ ++ msgs_count = bsp_i2c_break_polling_xfer(adap, &msg, 1); ++ ++ return (msgs_count == 1) ? count : -EIO; ++} ++EXPORT_SYMBOL(bsp_i2c_master_send); ++ ++int bsp_i2c_master_send_mul_reg(const struct i2c_client *client, ++ const char *buf, __u16 count, unsigned int reg_data_width) ++{ ++ struct i2c_adapter *adap = client->adapter; ++ struct i2c_msg msg; ++ int msgs_count; ++ ++ if ((client->addr > 0x3ff) ++ || (((client->flags & I2C_M_TEN) == 0) && (client->addr > 0x7f))) { ++ printk(KERN_ERR "dev address out of range\n"); ++ return -EINVAL; ++ } ++ ++ msg.addr = client->addr; ++ msg.flags = client->flags; ++ msg.len = count; ++ ++ if (!buf) { ++ printk(KERN_ERR "buf == NULL\n"); ++ return -EINVAL; ++ } ++ msg.buf = (__u8 *)buf; ++ ++ msgs_count = bsp_i2c_mul_reg_xfer(adap, &msg, 1, reg_data_width); ++ ++ return (msgs_count == 1) ? count : -EIO; ++} ++EXPORT_SYMBOL(bsp_i2c_master_send_mul_reg); ++/** ++ * bsp_i2c_transfer - execute a single or combined I2C message ++ * @adap: Handle to I2C bus ++ * @msgs: One or more messages to execute before STOP is issued to ++ * terminate the operation; each message begins with a START. ++ * @num: Number of messages to be executed. ++ * ++ * Returns negative errno, else the number of messages executed. ++ * ++ * Note that there is no requirement that each message be sent to ++ * the same slave address, although that is the most common model. ++ */ ++int bsp_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, ++ int num) ++{ ++ int msgs_count; ++ ++ if ((!adap) || (!msgs)) { ++ printk(KERN_ERR "adap == NULL || msgs == NULL, Invalid argument!\n"); ++ return -EINVAL; ++ } ++ ++ if ((msgs[0].addr > 0x3ff) || ++ (((msgs[0].flags & I2C_M_TEN) == 0) && (msgs[0].addr > 0x7f))) { ++ printk(KERN_ERR "msgs[0] dev address out of range\n"); ++ return -EINVAL; ++ } ++ ++ if ((msgs[1].addr > 0x3ff) || ++ (((msgs[1].flags & I2C_M_TEN) == 0) && (msgs[1].addr > 0x7f))) { ++ printk(KERN_ERR "msgs[1] dev address out of range\n"); ++ return -EINVAL; ++ } ++ ++ msgs_count = bsp_i2c_xfer(adap, msgs, num); ++ ++ return msgs_count; ++} ++EXPORT_SYMBOL(bsp_i2c_transfer); ++ ++static u32 bsp_i2c_func(struct i2c_adapter *adap) ++{ ++ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | ++ I2C_FUNC_PROTOCOL_MANGLING | ++ I2C_FUNC_SMBUS_WORD_DATA | ++ I2C_FUNC_SMBUS_BYTE_DATA | ++ I2C_FUNC_SMBUS_BYTE | ++ I2C_FUNC_SMBUS_I2C_BLOCK; ++} ++ ++static const struct i2c_algorithm bsp_i2c_algo = { ++ .master_xfer = bsp_i2c_xfer, ++ .functionality = bsp_i2c_func, ++}; ++ ++static int bsp_i2c_init_adap(struct i2c_adapter* const adap, struct bsp_i2c_dev* const i2c, ++ struct platform_device* const pdev) ++{ ++ int status; ++ ++ i2c_set_adapdata(adap, i2c); ++ adap->owner = THIS_MODULE; ++ strlcpy(adap->name, "bsp-i2c", sizeof(adap->name)); ++ adap->dev.parent = &pdev->dev; ++ adap->dev.of_node = pdev->dev.of_node; ++ adap->algo = &bsp_i2c_algo; ++ ++ /* Add the i2c adapter */ ++ status = i2c_add_adapter(adap); ++ if (status) ++ dev_err(i2c->dev, "failed to add bus to i2c core\n"); ++ ++ return status; ++} ++ ++static void try_deassert_i2c_reset(const struct bsp_i2c_dev *i2c) ++{ ++ struct reset_control *i2c_rst = NULL; ++ ++ i2c_rst = devm_reset_control_get(i2c->dev, "i2c_reset"); ++ if (IS_ERR_OR_NULL(i2c_rst)) ++ return; ++ ++ /* deassert reset if "resets" property is set */ ++ dev_info(i2c->dev, "deassert reset\n"); ++ reset_control_deassert(i2c_rst); ++} ++ ++static int bsp_i2c_probe(struct platform_device *pdev) ++{ ++ int status; ++ struct bsp_i2c_dev *i2c; ++ struct i2c_adapter *adap = NULL; ++ struct resource *res = NULL; ++ ++ i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); ++ if (i2c == NULL) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, i2c); ++ i2c->dev = &pdev->dev; ++ spin_lock_init(&i2c->lock); ++ init_completion(&i2c->msg_complete); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (res == NULL) { ++ dev_err(i2c->dev, "Invalid mem resource./n"); ++ return -ENODEV; ++ } ++ ++ i2c->phybase = res->start; ++ i2c->base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(i2c->base)) { ++ dev_err(i2c->dev, "cannot ioremap resource\n"); ++ return -ENOMEM; ++ } ++ ++ i2c->clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(i2c->clk)) { ++ dev_err(i2c->dev, "cannot get clock\n"); ++ return -ENOENT; ++ } ++ clk_prepare_enable(i2c->clk); ++ ++ try_deassert_i2c_reset(i2c); ++ ++ if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", &i2c->freq)) { ++ dev_warn(i2c->dev, "setting default clock-frequency@%dHz\n", I2C_DEFAULT_FREQUENCY); ++ i2c->freq = I2C_DEFAULT_FREQUENCY; ++ } ++ ++ /* i2c controller initialization, disable interrupt */ ++ bsp_i2c_hw_init(i2c); ++ ++ i2c->irq = platform_get_irq(pdev, 0); ++ status = devm_request_irq(&pdev->dev, i2c->irq, bsp_i2c_isr, ++ IRQF_SHARED, dev_name(&pdev->dev), i2c); ++ if (status) { ++ dev_dbg(i2c->dev, "falling back to polling mode"); ++ i2c->irq = -1; ++ } ++ ++ adap = &i2c->adap; ++ status = bsp_i2c_init_adap(adap, i2c, pdev); ++ if (status) ++ clk_disable_unprepare(i2c->clk); ++ ++ return status; ++} ++ ++static int bsp_i2c_remove(struct platform_device *pdev) ++{ ++ struct bsp_i2c_dev *i2c = platform_get_drvdata(pdev); ++ ++ clk_disable_unprepare(i2c->clk); ++ i2c_del_adapter(&i2c->adap); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int bsp_i2c_suspend(struct device *dev) ++{ ++ struct bsp_i2c_dev *i2c = dev_get_drvdata(dev); ++ ++ i2c_lock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER); ++ clk_disable_unprepare(i2c->clk); ++ i2c_unlock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER); ++ ++ return 0; ++} ++ ++static int bsp_i2c_resume(struct device *dev) ++{ ++ struct bsp_i2c_dev *i2c = dev_get_drvdata(dev); ++ ++ i2c_lock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER); ++ clk_prepare_enable(i2c->clk); ++ bsp_i2c_hw_init(i2c); ++ i2c_unlock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER); ++ ++ return 0; ++} ++#endif ++ ++static SIMPLE_DEV_PM_OPS(bsp_i2c_dev_pm, bsp_i2c_suspend, ++ bsp_i2c_resume); ++ ++static const struct of_device_id bsp_i2c_match[] = { ++ { .compatible = "vendor,i2c" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, bsp_i2c_match); ++ ++static struct platform_driver bsp_i2c_driver = { ++ .driver = { ++ .name = "bsp-i2c", ++ .of_match_table = bsp_i2c_match, ++ .pm = &bsp_i2c_dev_pm, ++ }, ++ .probe = bsp_i2c_probe, ++ .remove = bsp_i2c_remove, ++}; ++ ++module_platform_driver(bsp_i2c_driver); ++ ++MODULE_DESCRIPTION("I2C Bus driver"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c +index 7d337380a..d4b1476a2 100644 +--- a/drivers/i2c/i2c-dev.c ++++ b/drivers/i2c/i2c-dev.c +@@ -31,6 +31,10 @@ + #include + #include + ++#ifdef CONFIG_ARCH_BSP ++#include "vendor/vendor_i2c_dev.h" ++#endif ++ + /* + * An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a + * slave (i2c_client) with which messages will be exchanged. It's coupled +@@ -484,6 +488,12 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + */ + client->adapter->timeout = msecs_to_jiffies(arg * 10); + break; ++#ifdef CONFIG_ARCH_BSP ++ case I2C_CONFIG_FLAGS: ++ return i2c_config_flags(client, arg); ++ case I2C_CONFIG_MUL_REG: ++ return i2c_config_mul_reg(client, arg); ++#endif + default: + /* NOTE: returning a fault code here could cause trouble + * in buggy userspace code. Some old kernel bugs returned +diff --git a/drivers/i2c/vendor/Makefile b/drivers/i2c/vendor/Makefile +new file mode 100644 +index 000000000..3c19c2a0d +--- /dev/null ++++ b/drivers/i2c/vendor/Makefile +@@ -0,0 +1 @@ ++obj-$(CONFIG_ARCH_BSP) += vendor_i2c_dev.o +diff --git a/drivers/i2c/vendor/vendor_i2c_dev.c b/drivers/i2c/vendor/vendor_i2c_dev.c +new file mode 100644 +index 000000000..f671eb674 +--- /dev/null ++++ b/drivers/i2c/vendor/vendor_i2c_dev.c +@@ -0,0 +1,72 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++ ++#include "vendor_i2c_dev.h" ++ ++int i2c_config_flags(struct i2c_client *client, unsigned long arg) ++{ ++ if (arg & I2C_M_16BIT_REG) ++ client->flags |= I2C_M_16BIT_REG; ++ else ++ client->flags &= ~I2C_M_16BIT_REG; ++ ++ if (arg & I2C_M_16BIT_DATA) ++ client->flags |= I2C_M_16BIT_DATA; ++ else ++ client->flags &= ~I2C_M_16BIT_DATA; ++ ++ if (arg & I2C_M_DMA) ++ client->flags |= I2C_M_DMA; ++ else ++ client->flags &= ~I2C_M_DMA; ++ ++ return 0; ++} ++ ++int i2c_config_mul_reg(struct i2c_client *client, unsigned long arg) ++{ ++ int ret; ++ struct i2c_msg msg; ++ unsigned int reg_width; ++ unsigned int data_width; ++ unsigned int reg_data_width; ++ ++ if (copy_from_user(&msg, ++ (struct i2c_msg __user *)arg, ++ sizeof(msg))) ++ return -EFAULT; ++ ++ /* i2c slave dev reg width */ ++ if (client->flags & I2C_M_16BIT_REG) ++ reg_width = 2; ++ else ++ reg_width = 1; ++ ++ /* i2c send data width */ ++ if (client->flags & I2C_M_16BIT_DATA) ++ data_width = 2; ++ else ++ data_width = 1; ++ ++ reg_data_width = reg_width + data_width; ++ ++ msg.buf = memdup_user(msg.buf, msg.len); ++ ++ if (IS_ERR(msg.buf)) { ++ printk(KERN_ERR "dump user fail!!!\n"); ++ return PTR_ERR(msg.buf); ++ } ++ ++ if (msg.len == 0 || reg_data_width > msg.len || msg.len % reg_data_width != 0) { ++ printk(KERN_ERR "msg.len err!!!\n"); ++ kfree(msg.buf); ++ return -EINVAL; ++ } ++ ++ ret = bsp_i2c_master_send_mul_reg(client, msg.buf, msg.len, reg_data_width); ++ ++ kfree(msg.buf); ++ ++ return ret; ++} +diff --git a/drivers/i2c/vendor/vendor_i2c_dev.h b/drivers/i2c/vendor/vendor_i2c_dev.h +new file mode 100644 +index 000000000..2997a4a1a +--- /dev/null ++++ b/drivers/i2c/vendor/vendor_i2c_dev.h +@@ -0,0 +1,18 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++#ifndef __VENDOR_I2C_DEV_H ++#define __VENDOR_I2C_DEV_H ++ ++#include ++#include ++#include ++ ++#define I2C_CONFIG_MUL_REG 0x070c ++#define I2C_CONFIG_FLAGS 0x070d ++ ++int i2c_config_flags(struct i2c_client *client, unsigned long arg); ++ ++int i2c_config_mul_reg(struct i2c_client *client, unsigned long arg); ++ ++#endif /* __VENDOR_I2C_DEV_H */ +diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +index 8a16cd3ef..f48e03b15 100644 +--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c ++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +@@ -392,6 +392,7 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) + unsigned long reg, fld; + unsigned long oas; + unsigned long asid_bits; ++#ifndef CONFIG_VENDOR_NPU + u32 feat_mask = ARM_SMMU_FEAT_COHERENCY; + + if (vabits_actual == 52) +@@ -399,7 +400,7 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) + + if ((smmu->features & feat_mask) != feat_mask) + return false; +- ++#endif + if (!(smmu->pgsize_bitmap & PAGE_SIZE)) + return false; + +@@ -595,3 +596,15 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(void) + + return domain; + } ++ ++#if defined(CONFIG_VENDOR_NPU) ++void arm_smmu_sva_mm_invalidate_range(struct iommu_domain *domain, ++ struct mm_struct *mm, unsigned long start, unsigned long size) ++{ ++ unsigned long asid = arm64_mm_context_get(mm); ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); ++ arm_smmu_tlb_inv_range_asid(start, size, asid, ++ PAGE_SIZE, false, smmu_domain); ++ arm64_mm_context_put(mm); ++} ++#endif +diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +index 6cecbac0e..18d98a7fd 100644 +--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c ++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +@@ -81,6 +81,9 @@ DEFINE_MUTEX(arm_smmu_asid_lock); + struct arm_smmu_ctx_desc quiet_cd = { 0 }; + + static struct arm_smmu_option_prop arm_smmu_options[] = { ++#ifdef CONFIG_VENDOR_NPU ++ { ARM_SMMU_OPT_SKIP_PREFETCH, "vendor,broken-prefetch-cmd" }, ++#endif + { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, + { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"}, + { 0, NULL}, +@@ -1716,11 +1719,55 @@ static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev) + return IRQ_HANDLED; + } + ++#ifndef CONFIG_VENDOR_NPU + static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev) + { + arm_smmu_gerror_handler(irq, dev); + return IRQ_WAKE_THREAD; + } ++#else ++static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev) ++{ ++ return IRQ_WAKE_THREAD; ++} ++ ++static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev) ++{ ++ struct arm_smmu_device *smmu = dev; ++ /* We don't actually use CMD_SYNC interrupts for anything */ ++ dev_warn(smmu->dev, "Receive cmdq_sync interrupt=======================>\n"); ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev) ++{ ++ irqreturn_t ret = IRQ_NONE; ++ u32 irq_status = 0; ++ u32 raw_irq_status = 0; ++ u32 reg = (TCU_EVENT_Q_IRQ_CLR | TCU_CMD_SYNC_IRQ_CLR | ++ TCU_GERROR_IRQ_CLR | TCU_EVENTTO_CLR); ++ struct arm_smmu_device *smmu = (struct arm_smmu_device *)dev; ++ ++ irq_status = readl_relaxed(smmu->base + SMMU_IRPT_STAT_NS); ++ raw_irq_status = readl_relaxed(smmu->base + SMMU_IRPT_RAW_NS); ++ dev_dbg(smmu->dev, "irq info: status:0x%x,raw_status:0x%x\n", irq_status, raw_irq_status); ++ writel_relaxed(reg, smmu->base + SMMU_IRPT_CLR_NS); ++ ++ if (irq_status & TCU_EVENT_Q_IRQ) ++ ret = arm_smmu_evtq_handler(irq, smmu); ++ ++ if (irq_status & TCU_CMD_SYNC_IRQ) ++ ret |= arm_smmu_cmdq_sync_handler(irq, dev); ++ ++ if (irq_status & TCU_GERROR_IRQ) ++ ret |= arm_smmu_gerror_handler(irq, dev); ++ ++ if (ret & IRQ_WAKE_THREAD) ++ return IRQ_WAKE_THREAD; ++ else ++ return ret; ++} ++#endif + + static void + arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, +@@ -2859,6 +2906,10 @@ static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid) + arm_smmu_sva_remove_dev_pasid(domain, dev, pasid); + } + ++#ifdef CONFIG_VENDOR_NPU ++void arm_smmu_sva_mm_invalidate_range(struct iommu_domain *domain, ++ struct mm_struct *mm, unsigned long start, unsigned long size); ++#endif + static struct iommu_ops arm_smmu_ops = { + .capable = arm_smmu_capable, + .domain_alloc = arm_smmu_domain_alloc, +@@ -2879,6 +2930,9 @@ static struct iommu_ops arm_smmu_ops = { + .map_pages = arm_smmu_map_pages, + .unmap_pages = arm_smmu_unmap_pages, + .flush_iotlb_all = arm_smmu_flush_iotlb_all, ++#ifdef CONFIG_VENDOR_NPU ++ .inv_iotlb_range = arm_smmu_sva_mm_invalidate_range, ++#endif + .iotlb_sync = arm_smmu_iotlb_sync, + .iova_to_phys = arm_smmu_iova_to_phys, + .enable_nesting = arm_smmu_enable_nesting, +@@ -3242,7 +3296,11 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) + } + } + ++#ifdef CONFIG_VENDOR_NPU ++static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu, bool for_suspend) ++#else + static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) ++#endif + { + int ret, irq; + u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN; +@@ -3255,6 +3313,11 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) + return ret; + } + ++#ifdef CONFIG_VENDOR_NPU ++ if (for_suspend) ++ goto irq_requested; ++#endif ++ + irq = smmu->combined_irq; + if (irq) { + /* +@@ -3273,13 +3336,18 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) + + if (smmu->features & ARM_SMMU_FEAT_PRI) + irqen_flags |= IRQ_CTRL_PRIQ_IRQEN; +- ++#ifdef CONFIG_VENDOR_NPU ++irq_requested: ++#endif + /* Enable interrupt generation on the SMMU */ + ret = arm_smmu_write_reg_sync(smmu, irqen_flags, + ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK); + if (ret) + dev_warn(smmu->dev, "failed to enable irqs\n"); +- ++#ifdef CONFIG_VENDOR_NPU ++ writel_relaxed(VENDOR_VAL_MASK, smmu->base + SMMU_IRPT_CLR_NS); ++ writel_relaxed(TCU_EVENT_TO_MASK, smmu->base + SMMU_IRPT_MASK_NS); ++#endif + return 0; + } + +@@ -3294,7 +3362,11 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu) + return ret; + } + ++#ifdef CONFIG_VENDOR_NPU ++static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool for_suspend) ++#else + static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) ++#endif + { + int ret; + u32 reg, enables; +@@ -3401,8 +3473,11 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) + return ret; + } + } +- ++#ifdef CONFIG_VENDOR_NPU ++ ret = arm_smmu_setup_irqs(smmu, for_suspend); ++#else + ret = arm_smmu_setup_irqs(smmu); ++#endif + if (ret) { + dev_err(smmu->dev, "failed to setup irqs\n"); + return ret; +@@ -3412,7 +3487,11 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) + enables &= ~(CR0_EVTQEN | CR0_PRIQEN); + + /* Enable the SMMU interface, or ensure bypass */ ++#ifdef CONFIG_VENDOR_NPU ++ if (!smmu->bypass || disable_bypass) { ++#else + if (!bypass || disable_bypass) { ++#endif + enables |= CR0_SMMUEN; + } else { + ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT); +@@ -3475,13 +3554,14 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) + /* IDR0 */ + reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0); + ++#ifndef CONFIG_VENDOR_NPU + /* 2-level structures */ + if (FIELD_GET(IDR0_ST_LVL, reg) == IDR0_ST_LVL_2LVL) + smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB; + + if (reg & IDR0_CD2L) + smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB; +- ++#endif + /* + * Translation table endianness. + * We currently require the same endianness as the CPU, but this +@@ -3601,7 +3681,10 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) + smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg); + smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg); + smmu->iommu.max_pasids = 1UL << smmu->ssid_bits; +- ++#ifdef CONFIG_VENDOR_NPU ++ smmu->sid_bits = 6; /* set sid to 6 bits */ ++ smmu->ssid_bits = 6; /* set ssid to 6 bits */ ++#endif + /* + * If the SMMU supports fewer bits than would fill a single L2 stream + * table, use a linear table instead. +@@ -3681,13 +3764,15 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) + + if (arm_smmu_sva_supported(smmu)) + smmu->features |= ARM_SMMU_FEAT_SVA; +- ++#ifdef CONFIG_VENDOR_NPU ++ if (smmu->features & ARM_SMMU_FEAT_SVA) ++ printk("support SVA===================>\n"); ++#endif + dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", + smmu->ias, smmu->oas, smmu->features); + return 0; + } + +-#ifdef CONFIG_ACPI + static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu) + { + switch (model) { +@@ -3721,13 +3806,6 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev, + + return 0; + } +-#else +-static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev, +- struct arm_smmu_device *smmu) +-{ +- return -ENODEV; +-} +-#endif + + static int arm_smmu_device_dt_probe(struct platform_device *pdev, + struct arm_smmu_device *smmu) +@@ -3759,6 +3837,70 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu) + return SZ_128K; + } + ++#ifdef CONFIG_VENDOR_NPU ++#define ARM_SMMMU_DEVICE_MAX 2 ++#define ARM_SMMMU_DEVICE_NAME_LEN 64 ++struct smmu_dev_wl_mng { ++ char smmu_name[ARM_SMMMU_DEVICE_NAME_LEN]; ++ void *pdev; ++ bool is_probe; ++ bool is_poweron; ++}; ++ ++static struct smmu_dev_wl_mng smmu_dev_white_list[ARM_SMMMU_DEVICE_MAX] = { ++ { "smmu_npu", NULL, false, false}, ++#if defined(CONFIG_ARCH_SD3491V100) ++ { "smmu_svp_npu", NULL, false, false} ++#elif defined(CONFIG_ARCH_SS928V100) || defined(CONFIG_ARCH_SS927V100) ++ { "smmu_pqp", NULL, false, false} ++#endif ++}; ++ ++static int smmu_device_wl_process(struct platform_device *pdev) ++{ ++ int i; ++ ++ for (i = 0; i < ARM_SMMMU_DEVICE_MAX; i++) { ++ if (strnstr(pdev->name, smmu_dev_white_list[i].smmu_name, strlen(pdev->name)) != NULL) { ++ smmu_dev_white_list[i].pdev = (void *)pdev; ++ return 0; ++ } ++ } ++ return -1; ++} ++ ++static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu); ++static int arm_smmu_device_poweron_probe(struct arm_smmu_device *smmu, struct device *dev) ++{ ++ int ret; ++ ++ /* Probe the h/w */ ++ ret = arm_smmu_device_hw_probe(smmu); ++ if (ret) ++ return ret; ++ ++ /* Initialise in-memory data structures */ ++ ret = arm_smmu_init_structures(smmu); ++ if (ret) ++ goto err_free_iopf; ++ ++ /* Check for RMRs and install bypass STEs if any */ ++ arm_smmu_rmr_install_bypass_ste(smmu); ++ ++ /* Reset the device */ ++ ret = arm_smmu_device_reset(smmu, false); ++ if (ret) ++ goto err_disable; ++ ++ return 0; ++err_disable: ++ arm_smmu_device_disable(smmu); ++err_free_iopf: ++ iopf_queue_free(smmu->evtq.iopf); ++ return ret; ++} ++#endif ++ + static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start, + resource_size_t size) + { +@@ -3804,7 +3946,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev) + resource_size_t ioaddr; + struct arm_smmu_device *smmu; + struct device *dev = &pdev->dev; ++#ifndef CONFIG_VENDOR_NPU + bool bypass; ++#endif + + smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); + if (!smmu) +@@ -3820,7 +3964,11 @@ static int arm_smmu_device_probe(struct platform_device *pdev) + } + + /* Set bypass mode according to firmware probing result */ ++#ifdef CONFIG_VENDOR_NPU ++ smmu->bypass = !!ret; ++#else + bypass = !!ret; ++#endif + + /* Base address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +@@ -3836,10 +3984,12 @@ static int arm_smmu_device_probe(struct platform_device *pdev) + * Don't map the IMPLEMENTATION DEFINED regions, since they may contain + * the PMCG registers which are reserved by the PMU driver. + */ +- smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ); ++ smmu->base = arm_smmu_ioremap(dev, ioaddr, resource_size(res)); + if (IS_ERR(smmu->base)) + return PTR_ERR(smmu->base); +- ++#ifdef CONFIG_VENDOR_NPU ++ smmu->page1 = smmu->base + SZ_64K; ++#else + if (arm_smmu_resource_size(smmu) > SZ_64K) { + smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K, + ARM_SMMU_REG_SZ); +@@ -3848,7 +3998,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) + } else { + smmu->page1 = smmu->base; + } +- ++#endif + /* Interrupt lines */ + + irq = platform_get_irq_byname_optional(pdev, "combined"); +@@ -3867,6 +4017,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) + if (irq > 0) + smmu->gerr_irq = irq; + } ++#ifndef CONFIG_VENDOR_NPU + /* Probe the h/w */ + ret = arm_smmu_device_hw_probe(smmu); + if (ret) +@@ -3887,7 +4038,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev) + ret = arm_smmu_device_reset(smmu, bypass); + if (ret) + goto err_disable; ++#else ++ /* Record our private device structure */ ++ platform_set_drvdata(pdev, smmu); + ++ if (0 != smmu_device_wl_process(pdev)) { ++ ret = arm_smmu_device_poweron_probe(smmu, dev); ++ if (ret) ++ return ret; ++ } ++#endif + /* And we're up. Go go go! */ + ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, + "smmu3.%pa", &ioaddr); +@@ -3905,21 +4065,166 @@ static int arm_smmu_device_probe(struct platform_device *pdev) + err_free_sysfs: + iommu_device_sysfs_remove(&smmu->iommu); + err_disable: ++#ifndef CONFIG_VENDOR_NPU + arm_smmu_device_disable(smmu); + err_free_iopf: + iopf_queue_free(smmu->evtq.iopf); ++#endif + return ret; + } + ++#ifdef CONFIG_VENDOR_NPU ++int arm_smmu_device_post_probe(const char *device_name) ++{ ++ int ret, i; ++ struct platform_device *pdev = NULL; ++ struct arm_smmu_device *smmu = NULL; ++ ++ for (i = 0; i < ARM_SMMMU_DEVICE_MAX; i++) { ++ if (strnstr(device_name, smmu_dev_white_list[i].smmu_name, strlen(device_name)) != NULL) { ++ if (smmu_dev_white_list[i].is_probe == true) ++ return 0; ++ ++ pdev = (struct platform_device *)smmu_dev_white_list[i].pdev; ++ break; ++ } ++ } ++ ++ if (pdev == NULL) { ++ dev_err(&pdev->dev, "fail to find smmu device in white list \n"); ++ return -1; ++ } ++ ++ smmu = platform_get_drvdata(pdev); ++ if (smmu == NULL) ++ return -1; ++ ret = arm_smmu_device_poweron_probe(smmu, &pdev->dev); ++ if (ret) { ++ dev_err(&pdev->dev, "Fail to do smmu post probe\n"); ++ return ret; ++ } ++ smmu_dev_white_list[i].is_probe = true; ++ smmu_dev_white_list[i].is_poweron = true; ++ return 0; ++} ++EXPORT_SYMBOL_GPL(arm_smmu_device_post_probe); ++ ++static bool arm_smmu_device_is_in_wl(const char *device_name) ++{ ++ int i; ++ ++ for (i = 0; i < ARM_SMMMU_DEVICE_MAX; i++) { ++ if (strnstr(device_name, smmu_dev_white_list[i].smmu_name, strlen(device_name)) != NULL) ++ return true; ++ } ++ return false; ++} ++ ++int arm_smmu_device_suspend(const char *device_name) ++{ ++ int i; ++ ++ for (i = 0; i < ARM_SMMMU_DEVICE_MAX; i++) { ++ if (strnstr(device_name, smmu_dev_white_list[i].smmu_name, strlen(device_name)) != NULL) { ++ smmu_dev_white_list[i].is_poweron = false; ++ return 0; ++ } ++ } ++ return -1; ++} ++EXPORT_SYMBOL_GPL(arm_smmu_device_suspend); ++ ++static struct arm_smmu_device *get_smmu_device_data(const char *device_name, int *index) ++{ ++ int i; ++ struct platform_device *pdev = NULL; ++ struct arm_smmu_device *smmu = NULL; ++ ++ for (i = 0; i < ARM_SMMMU_DEVICE_MAX; i++) { ++ if (strnstr(device_name, smmu_dev_white_list[i].smmu_name, strlen(device_name)) != NULL) { ++ if (smmu_dev_white_list[i].is_probe == false) ++ return 0; ++ ++ pdev = (struct platform_device *)smmu_dev_white_list[i].pdev; ++ break; ++ } ++ } ++ ++ if (i >= ARM_SMMMU_DEVICE_MAX || pdev == NULL) { ++ dev_err(&pdev->dev, "fail to find smmu device in white list \n"); ++ return NULL; ++ } ++ ++ *index = i; ++ ++ smmu = platform_get_drvdata(pdev); ++ if (smmu == NULL) ++ return NULL; ++ ++ return smmu; ++} ++ ++int arm_smmu_device_resume(const char *device_name) ++{ ++ int ret; ++ int index = 0; ++ struct arm_smmu_device *smmu = NULL; ++ ++ smmu = get_smmu_device_data(device_name, &index); ++ if (smmu == NULL) ++ return -1; ++ ++ if (index < ARM_SMMMU_DEVICE_MAX) ++ smmu_dev_white_list[index].is_poweron = true; ++ ++ ret = arm_smmu_device_reset(smmu, true); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(arm_smmu_device_resume); ++ ++int arm_smmu_device_reset_ex(const char *device_name) ++{ ++ int ret = -1; ++ int index = 0; ++ struct arm_smmu_device *smmu = NULL; ++ ++ smmu = get_smmu_device_data(device_name, &index); ++ if (smmu == NULL) ++ return -1; ++ ++ if (index < ARM_SMMMU_DEVICE_MAX && smmu_dev_white_list[index].is_poweron == true) ++ ret = arm_smmu_device_reset(smmu, true); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(arm_smmu_device_reset_ex); ++ ++const char *arm_smmu_get_device_name(struct iommu_domain *domain) ++{ ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); ++ struct arm_smmu_device *smmu = smmu_domain->smmu; ++ return dev_name(smmu->dev); ++} ++EXPORT_SYMBOL_GPL(arm_smmu_get_device_name); ++#endif ++ + static void arm_smmu_device_remove(struct platform_device *pdev) + { + struct arm_smmu_device *smmu = platform_get_drvdata(pdev); ++#ifdef CONFIG_VENDOR_NPU ++ const char *device_name = dev_name(smmu->dev); + ++ iopf_queue_free(smmu->evtq.iopf); ++ if (arm_smmu_device_is_in_wl(device_name)) { ++ return; ++ } ++#else + iommu_device_unregister(&smmu->iommu); + iommu_device_sysfs_remove(&smmu->iommu); + arm_smmu_device_disable(smmu); + iopf_queue_free(smmu->evtq.iopf); + ida_destroy(&smmu->vmid_map); ++#endif + } + + static void arm_smmu_device_shutdown(struct platform_device *pdev) +diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +index 9915850dd..0aaeb5845 100644 +--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h ++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +@@ -426,6 +426,27 @@ + #define MSI_IOVA_BASE 0x8000000 + #define MSI_IOVA_LENGTH 0x100000 + ++#if defined(CONFIG_VENDOR_NPU) ++#define VENDOR_TOP_CTL_BASE (0x30000) ++ ++#define SMMU_IRPT_MASK_NS (VENDOR_TOP_CTL_BASE + 0x70) ++#define TCU_EVENT_TO_MASK BIT(5) ++#define VENDOR_VAL_MASK 0xffffffff ++ ++#define SMMU_IRPT_RAW_NS (VENDOR_TOP_CTL_BASE + 0x74) ++ ++#define SMMU_IRPT_STAT_NS (VENDOR_TOP_CTL_BASE + 0x78) ++#define TCU_EVENT_Q_IRQ BIT(0) ++#define TCU_CMD_SYNC_IRQ BIT(1) ++#define TCU_GERROR_IRQ BIT(2) ++ ++#define SMMU_IRPT_CLR_NS (VENDOR_TOP_CTL_BASE + 0x7c) ++#define TCU_EVENT_Q_IRQ_CLR BIT(0) ++#define TCU_CMD_SYNC_IRQ_CLR BIT(1) ++#define TCU_GERROR_IRQ_CLR BIT(2) ++#define TCU_EVENTTO_CLR BIT(5) ++#endif ++ + enum pri_resp { + PRI_RESP_DENY = 0, + PRI_RESP_FAIL = 1, +@@ -682,6 +703,9 @@ struct arm_smmu_device { + + struct rb_root streams; + struct mutex streams_mutex; ++#if defined(CONFIG_VENDOR_NPU) ++ bool bypass; ++#endif + }; + + struct arm_smmu_stream { +diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c +index b78671a8a..9677b96b4 100644 +--- a/drivers/iommu/iommu-sva.c ++++ b/drivers/iommu/iommu-sva.c +@@ -148,6 +148,9 @@ EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); + /* + * I/O page fault handler for SVA + */ ++#if defined(CONFIG_VENDOR_NPU) ++int svm_flush_cache(struct mm_struct *mm, unsigned long addr, size_t size); ++#endif + enum iommu_page_response_code + iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) + { +@@ -195,7 +198,12 @@ iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) + ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL); + status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID : + IOMMU_PAGE_RESP_SUCCESS; +- ++#if defined(CONFIG_VENDOR_NPU) ++ if (status == IOMMU_PAGE_RESP_SUCCESS) { ++ unsigned long aligned_addr = prm->addr & PAGE_MASK; ++ svm_flush_cache(vma->vm_mm, aligned_addr, PAGE_SIZE); ++ } ++#endif + out_put_mm: + mmap_read_unlock(mm); + mmput(mm); +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c +index 3f1029c08..085f21458 100644 +--- a/drivers/iommu/iommu.c ++++ b/drivers/iommu/iommu.c +@@ -3548,3 +3548,177 @@ void iommu_free_global_pasid(ioasid_t pasid) + ida_free(&iommu_global_pasid_ida, pasid); + } + EXPORT_SYMBOL_GPL(iommu_free_global_pasid); ++ ++#if defined(CONFIG_VENDOR_NPU) ++/** ++ * iommu_group_get_for_dev - Find or create the IOMMU group for a device ++ * @dev: target device ++ * ++ * This function is intended to be called by IOMMU drivers and extended to ++ * support common, bus-defined algorithms when determining or creating the ++ * IOMMU group for a device. On success, the caller will hold a reference ++ * to the returned IOMMU group, which will already include the provided ++ * device. The reference should be released with iommu_group_put(). ++ */ ++static struct iommu_group *iommu_group_get_for_dev(struct device *dev) ++{ ++ const struct iommu_ops *ops = dev->bus->iommu_ops; ++ struct iommu_group *group; ++ int ret; ++ ++ group = iommu_group_get(dev); ++ if (group) ++ return group; ++ ++ if (!ops) ++ return ERR_PTR(-EINVAL); ++ ++ group = ops->device_group(dev); ++ if (WARN_ON_ONCE(group == NULL)) ++ return ERR_PTR(-EINVAL); ++ ++ if (IS_ERR(group)) ++ return group; ++ ++ ret = iommu_group_add_device(group, dev); ++ if (ret) ++ goto out_put_group; ++ ++ return group; ++ ++out_put_group: ++ iommu_group_put(group); ++ ++ return ERR_PTR(ret); ++} ++ ++static int iommu_group_device_count(struct iommu_group *group) ++{ ++ struct group_device *entry; ++ int ret = 0; ++ ++ list_for_each_entry(entry, &group->devices, list) ++ ret++; ++ ++ return ret; ++} ++ ++/* Request that a device is direct mapped by the IOMMU */ ++int iommu_request_dm_for_dev(struct device *dev) ++{ ++ struct iommu_domain *dm_domain; ++ struct iommu_group *group; ++ int ret; ++ ++ /* Device must already be in a group before calling this function */ ++ group = iommu_group_get_for_dev(dev); ++ if (IS_ERR(group)) ++ return PTR_ERR(group); ++ ++ mutex_lock(&group->mutex); ++ ++ /* Check if the default domain is already direct mapped */ ++ ret = 0; ++ if (group->default_domain && ++ group->default_domain->type == IOMMU_DOMAIN_IDENTITY) ++ goto out; ++ ++ /* Don't change mappings of existing devices */ ++ ret = -EBUSY; ++ if (iommu_group_device_count(group) != 1) ++ goto out; ++ ++ /* Allocate a direct mapped domain */ ++ ret = -ENOMEM; ++ dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY); ++ if (!dm_domain) ++ goto out; ++ ++ /* Attach the device to the domain */ ++ ret = __iommu_attach_group(dm_domain, group); ++ if (ret) { ++ iommu_domain_free(dm_domain); ++ goto out; ++ } ++ /* Make the direct mapped domain the default for this group */ ++ if (group->default_domain) ++ iommu_domain_free(group->default_domain); ++ group->default_domain = dm_domain; ++ ++ pr_info("Using direct mapping for device %s\n", dev_name(dev)); ++ ++ ret = 0; ++out: ++ mutex_unlock(&group->mutex); ++ iommu_group_put(group); ++ ++ return ret; ++} ++ ++static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) ++{ ++ struct notifier_block *nb; ++ int err; ++ ++ nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); ++ if (!nb) ++ return -ENOMEM; ++ ++ nb->notifier_call = iommu_bus_notifier; ++ ++ err = bus_register_notifier(bus, nb); ++ if (err) ++ goto out_free; ++ ++ err = bus_iommu_probe(bus); ++ if (err) ++ goto out_err; ++ ++ return 0; ++ ++out_err: ++ /* Clean up */ ++ bus_for_each_dev(bus, NULL, NULL, remove_iommu_group); ++ bus_unregister_notifier(bus, nb); ++ ++out_free: ++ kfree(nb); ++ ++ return err; ++} ++ ++/** ++ * bus_set_iommu - set iommu-callbacks for the bus ++ * @bus: bus. ++ * @ops: the callbacks provided by the iommu-driver ++ * ++ * This function is called by an iommu driver to set the iommu methods ++ * used for a particular bus. Drivers for devices on that bus can use ++ * the iommu-api after these ops are registered. ++ * This special function is needed because IOMMUs are usually devices on ++ * the bus itself, so the iommu drivers are not initialized when the bus ++ * is set up. With this function the iommu-driver can set the iommu-ops ++ * afterwards. ++ */ ++int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) ++{ ++ int err; ++ ++ if (ops == NULL) { ++ bus->iommu_ops = NULL; ++ return 0; ++ } ++ ++ if (bus->iommu_ops != NULL) ++ return -EBUSY; ++ ++ bus->iommu_ops = ops; ++ ++ /* Do IOMMU specific setup for this bus-type */ ++ err = iommu_bus_init(bus, ops); ++ if (err) ++ bus->iommu_ops = NULL; ++ ++ return err; ++} ++#endif +diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig +index 68d71b4b5..ef7e99363 100644 +--- a/drivers/mfd/Kconfig ++++ b/drivers/mfd/Kconfig +@@ -572,7 +572,17 @@ config MFD_HI655X_PMIC + select REGMAP_IRQ + help + Select this option to enable Hisilicon hi655x series pmic driver. +- ++if ARCH_BSP ++config MFD_BSP_FMC ++ tristate "Vendor Flash Memory Controller" ++ depends on OF ++ depends on ARCH_BSP ++ select MFD_CORE ++ select REGMAP_MMIO ++ help ++ Select this option to enable the Vendor Flash Memory ++ Controller(FMC) driver. ++endif + config MFD_INTEL_QUARK_I2C_GPIO + tristate "Intel Quark MFD I2C GPIO" + depends on PCI +diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile +index db1ba39de..e1dbdc830 100644 +--- a/drivers/mfd/Makefile ++++ b/drivers/mfd/Makefile +@@ -213,6 +213,9 @@ obj-$(CONFIG_MFD_AT91_USART) += at91-usart.o + obj-$(CONFIG_MFD_ATMEL_FLEXCOM) += atmel-flexcom.o + obj-$(CONFIG_MFD_ATMEL_HLCDC) += atmel-hlcdc.o + obj-$(CONFIG_MFD_ATMEL_SMC) += atmel-smc.o ++ifdef CONFIG_ARCH_BSP ++obj-$(CONFIG_MFD_BSP_FMC) += bsp_fmc.o ++endif + obj-$(CONFIG_MFD_INTEL_LPSS) += intel-lpss.o + obj-$(CONFIG_MFD_INTEL_LPSS_PCI) += intel-lpss-pci.o + obj-$(CONFIG_MFD_INTEL_LPSS_ACPI) += intel-lpss-acpi.o +diff --git a/drivers/mfd/bsp_fmc.c b/drivers/mfd/bsp_fmc.c +new file mode 100644 +index 000000000..bb571ed25 +--- /dev/null ++++ b/drivers/mfd/bsp_fmc.c +@@ -0,0 +1,135 @@ ++/* Vendor Flash Memory Controller Driver ++ * ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++unsigned char fmc_cs_user[FMC_MAX_CHIP_NUM]; ++ ++DEFINE_MUTEX(fmc_switch_mutex); ++EXPORT_SYMBOL_GPL(fmc_switch_mutex); ++ ++/* ------------------------------------------------------------------------ */ ++static const struct mfd_cell bsp_fmc_devs[] = { ++ { ++ .name = "bsp_spi_nor", ++ .of_compatible = "vendor,fmc-spi-nor", ++ }, ++ { ++ .name = "bsp_spi_nand", ++ .of_compatible = "vendor,fmc-spi-nand", ++ }, ++ { ++ .name = "bsp_nand", ++ .of_compatible = "vendor,fmc-nand", ++ }, ++}; ++ ++static int bsp_fmc_probe(struct platform_device *pdev) ++{ ++ struct bsp_fmc *fmc = NULL; ++ struct resource *res = NULL; ++ struct device *dev = &pdev->dev; ++ int ret; ++ ++ fmc = devm_kzalloc(dev, sizeof(*fmc), GFP_KERNEL); ++ if (!fmc) ++ return -ENOMEM; ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); ++ fmc->regbase = devm_ioremap_resource(dev, res); ++ if (IS_ERR(fmc->regbase)) ++ return PTR_ERR(fmc->regbase); ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory"); ++ fmc->iobase = devm_ioremap_resource(dev, res); ++ if (IS_ERR(fmc->iobase)) ++ return PTR_ERR(fmc->iobase); ++ ++ fmc->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(fmc->clk)) ++ return PTR_ERR(fmc->clk); ++ ++ if (of_property_read_u32(dev->of_node, "max-dma-size", &fmc->dma_len)) { ++ dev_err(dev, "Please set the suitable max-dma-size value !!!\n"); ++ return -ENOMEM; ++ } ++ ++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(34)); ++ if (ret) { ++ dev_warn(dev, "Unable to set dma mask\n"); ++ return ret; ++ } ++ ++ fmc->buffer = dmam_alloc_coherent(dev, fmc->dma_len, ++ &fmc->dma_buffer, GFP_KERNEL); ++ if (!fmc->buffer) { ++ WARN_ON(1); ++ return -ENOMEM; ++ } ++ ++ mutex_init(&fmc->lock); ++ ++ platform_set_drvdata(pdev, fmc); ++ ret = mfd_add_devices(dev, 0, bsp_fmc_devs, ++ ARRAY_SIZE(bsp_fmc_devs), NULL, 0, NULL); ++ if (ret) { ++ dev_err(dev, "add mfd devices failed: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int bsp_fmc_remove(struct platform_device *pdev) ++{ ++ struct bsp_fmc *fmc = platform_get_drvdata(pdev); ++ ++ dmam_free_coherent(&pdev->dev, fmc->dma_len, ++ fmc->buffer, fmc->dma_buffer); ++ mfd_remove_devices(&pdev->dev); ++ mutex_destroy(&fmc->lock); ++ ++ return 0; ++} ++ ++static const struct of_device_id bsp_fmc_of_match_tbl[] = { ++ {.compatible = "vendor,fmc"}, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, bsp_fmc_of_match_tbl); ++ ++static struct platform_driver bsp_fmc_driver = { ++ .driver = { ++ .name = "fmc", ++ .of_match_table = bsp_fmc_of_match_tbl, ++ }, ++ .probe = bsp_fmc_probe, ++ .remove = bsp_fmc_remove, ++}; ++module_platform_driver(bsp_fmc_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Flash Memory Controller Driver"); +diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h +index 37091a658..57087ef2b 100644 +--- a/drivers/mmc/core/core.h ++++ b/drivers/mmc/core/core.h +@@ -59,6 +59,10 @@ void mmc_power_off(struct mmc_host *host); + void mmc_power_cycle(struct mmc_host *host, u32 ocr); + void mmc_set_initial_state(struct mmc_host *host); + u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max); ++#ifdef CONFIG_MMC_QUICKBOOT ++int mmc_quick_init_card(struct mmc_host *host, u32 ocr, ++ struct mmc_card *oldcard); ++#endif + + static inline void mmc_delay(unsigned int ms) + { +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c +index 7e39017e4..594b06f47 100644 +--- a/drivers/mmc/core/mmc.c ++++ b/drivers/mmc/core/mmc.c +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + + #include "core.h" + #include "card.h" +@@ -1433,6 +1434,9 @@ static int mmc_select_hs400es(struct mmc_card *card) + + /* Set host controller to HS400 timing and frequency */ + mmc_set_timing(host, MMC_TIMING_MMC_HS400); ++#if defined(CONFIG_MMC_SDHCI_NEBULA) || (defined(MODULE) && defined(CONFIG_MMC_SDHCI_BSP_MODULE)) ++ mmc_set_bus_speed(card); ++#endif + + /* Controller enable enhanced strobe function */ + host->ios.enhanced_strobe = true; +@@ -1542,7 +1546,7 @@ static int mmc_select_timing(struct mmc_card *card) + if (!mmc_can_ext_csd(card)) + goto bus_speed; + +- if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES) { ++ if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES) && (card->host->caps & MMC_CAP_8_BIT_DATA)) { + err = mmc_select_hs400es(card); + goto out; + } +@@ -1591,6 +1595,292 @@ static int mmc_hs200_tuning(struct mmc_card *card) + return mmc_execute_tuning(card); + } + ++#ifdef CONFIG_MMC_QUICKBOOT ++#include "../../vendor/mmc/adapter/nebula_quick.h" ++static const struct mmc_bus_ops mmc_ops; ++/* ++ * Handle the detection and initialisation of a card. ++ * ++ * In the case of a resume, "oldcard" will contain the card ++ * we're trying to reinitialise. ++ */ ++int mmc_quick_init_card(struct mmc_host *host, u32 ocr, ++ struct mmc_card *oldcard) ++{ ++ struct mmc_card *card; ++ int err = 0; ++ ++ mmc_claim_host(host); ++ ++ WARN_ON(!host->claimed); ++ ++ mmc_attach_bus(host, &mmc_ops); ++ if (host->ocr_avail_mmc) ++ host->ocr_avail = host->ocr_avail_mmc; ++ ++ /* ++ * Allocate card structure. ++ */ ++ card = mmc_alloc_card(host, &mmc_type); ++ if (IS_ERR(card)) { ++ err = PTR_ERR(card); ++ goto err; ++ } ++ ++ card->ocr = mmc_select_voltage(host, ocr); ++ card->type = MMC_TYPE_MMC; ++ card->rca = 1; ++ memset_s(card->raw_cid, sizeof(card->raw_cid), 0x0, sizeof(card->raw_cid)); ++ card->raw_cid[0] = MMC_CID_MAGIC; ++ ++ /* For quick boot, csd register can not obtain from device, ++ * should be set to init value. ++ * mmca_vsn[127:126] = 0x4; // support CSD_SPEC_VER_4 ++ * taac_ns[119:112] = 0x7F; // max ++ * taac_clks[111:104] = 0xFF; ++ * max_dtr[103:96] = 0x32; // set to 26MHz ++ * cmdclass[95:84] = 0x21B; // BASIC, BLOCK_READ, BLOCK_WRITE, ERASE, SWITCH ++ * capacity[49:47] = 0x7; // set to max, not used ++ * read_blkbits[83:80] = 0x9; // Block size is 512B ++ * read_partial[79] = 0x0; // not support ++ * write_misalign[78] = 0x0; // not support ++ * read_misalign[77] = 0x0; // not support ++ * dsr_imp[76] = 0x0; // not support ++ * capacity[73:62] = 0xFFF; // set to max, not used ++ * r2w_factor[28:26] = 0x7; ++ * write_blkbits[25:22] = 0x9; // Block size is 512B ++ * write_partial[21] = 0x0; // not support ++ */ ++ card->raw_csd[0] = 0xD0FFFF32; ++ card->raw_csd[1] = 0x435903FF; ++ card->raw_csd[2] = 0xC0038000; ++ card->raw_csd[3] = 0x1E400000; ++ ++ err = mmc_decode_csd(card); ++ if (err) { ++ pr_err("%s: decode csd failed(%d)\n", mmc_hostname(card->host), err); ++ goto free_card; ++ } ++ ++ /* Read extended CSD. */ ++ err = mmc_read_ext_csd(card); ++ if (err) { ++ pr_err("%s: read ext csd failed(%d)\n", mmc_hostname(card->host), err); ++ goto free_card; ++ } ++ ++ /* ++ * If doing byte addressing, check if required to do sector ++ * addressing. Handle the case of <2GB cards needing sector ++ * addressing. See section 8.1 JEDEC Standard JED84-A441; ++ * ocr register has bit 30 set for sector addressing. ++ */ ++ if (ocr & BIT(30)) ++ mmc_card_set_blockaddr(card); ++ ++ /* Erase size depends on CSD and Extended CSD */ ++ mmc_set_erase_size(card); ++ ++ /* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */ ++ if (card->ext_csd.rev >= 3) { /* 3: revision */ ++ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, ++ EXT_CSD_ERASE_GROUP_DEF, 1, ++ card->ext_csd.generic_cmd6_time); ++ ++ if (err && err != -EBADMSG) ++ goto free_card; ++ ++ if (err != -EBADMSG) { ++ err = 0; ++ /* ++ * Just disable enhanced area off & sz ++ * will try to enable ERASE_GROUP_DEF ++ * during next time reinit ++ */ ++ card->ext_csd.enhanced_area_offset = -EINVAL; ++ card->ext_csd.enhanced_area_size = -EINVAL; ++ } else { ++ card->ext_csd.erase_group_def = 1; ++ /* ++ * enable ERASE_GRP_DEF successfully. ++ * This will affect the erase size, so ++ * here need to reset erase size ++ */ ++ mmc_set_erase_size(card); ++ } ++ } ++ ++ /* ++ * Ensure eMMC user default partition is enabled ++ */ ++ if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { ++ card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; ++ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, ++ card->ext_csd.part_config, ++ card->ext_csd.part_time); ++ if (err && err != -EBADMSG) ++ goto free_card; ++ } ++ ++ /* ++ * Enable power_off_notification byte in the ext_csd register ++ */ ++ if (card->ext_csd.rev >= 6) { /* 6: revision */ ++ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, ++ EXT_CSD_POWER_OFF_NOTIFICATION, ++ EXT_CSD_POWER_ON, ++ card->ext_csd.generic_cmd6_time); ++ if (err && err != -EBADMSG) ++ goto free_card; ++ ++ /* ++ * The err can be -EBADMSG or 0, ++ * so check for success and update the flag ++ */ ++ if (!err) ++ card->ext_csd.power_off_notification = EXT_CSD_POWER_ON; ++ } ++ ++ mmc_set_bus_speed(card); ++ ++ /* ++ * Choose the power class with selected bus interface ++ */ ++ mmc_select_powerclass(card); ++ ++ /* ++ * Enable HPI feature (if supported) ++ */ ++ if (card->ext_csd.hpi) { ++ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, ++ EXT_CSD_HPI_MGMT, 1, ++ card->ext_csd.generic_cmd6_time); ++ if (err && err != -EBADMSG) ++ goto free_card; ++ if (err) { ++ pr_warn("%s: Enabling HPI failed\n", ++ mmc_hostname(card->host)); ++ card->ext_csd.hpi_en = 0; ++ err = 0; ++ } else { ++ card->ext_csd.hpi_en = 1; ++ } ++ } ++ ++ /* ++ * If cache size is higher than 0, this indicates the existence of cache ++ * and it can be turned on. Note that some eMMCs from Micron has been ++ * reported to need ~800 ms timeout, while enabling the cache after ++ * sudden power failure tests. Let's extend the timeout to a minimum of ++ * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards. ++ */ ++ if (card->ext_csd.cache_size > 0) { ++ unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS; ++ ++ timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms); ++ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, ++ EXT_CSD_CACHE_CTRL, 1, timeout_ms); ++ if (err && err != -EBADMSG) ++ goto free_card; ++ ++ /* ++ * Only if no error, cache is turned on successfully. ++ */ ++ if (err) { ++ pr_warn("%s: Cache is supported, but failed to turn on (%d)\n", ++ mmc_hostname(card->host), err); ++ card->ext_csd.cache_ctrl = 0; ++ err = 0; ++ } else { ++ card->ext_csd.cache_ctrl = 1; ++ } ++ } ++ ++ /* ++ * Enable Command Queue if supported. Note that Packed Commands cannot ++ * be used with Command Queue. ++ */ ++ card->ext_csd.cmdq_en = false; ++ if (card->ext_csd.cmdq_support && (host->caps2 & MMC_CAP2_CQE)) { ++ err = mmc_cmdq_enable(card); ++ if (err && err != -EBADMSG) ++ goto free_card; ++ if (err) { ++ pr_warn("%s: Enabling CMDQ failed\n", ++ mmc_hostname(card->host)); ++ card->ext_csd.cmdq_support = false; ++ card->ext_csd.cmdq_depth = 0; ++ err = 0; ++ } ++ } ++ /* ++ * In some cases (e.g. RPMB or mmc_test), the Command Queue must be ++ * disabled for a time, so a flag is needed to indicate to re-enable the ++ * Command Queue. ++ */ ++ card->reenable_cmdq = card->ext_csd.cmdq_en; ++ ++ if (card->ext_csd.cmdq_en && !host->cqe_enabled) { ++ err = host->cqe_ops->cqe_enable(host, card); ++ if (err) { ++ pr_err("%s: Failed to enable CQE, error %d\n", ++ mmc_hostname(host), err); ++ } else { ++ host->cqe_enabled = true; ++ pr_info("%s: Command Queue Engine enabled\n", ++ mmc_hostname(host)); ++ } ++ } ++ ++ if ((host->caps2 & MMC_CAP2_AVOID_3_3V) && ++ (host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330)) { ++ pr_err("%s: Host failed to negotiate down from 3.3V\n", ++ mmc_hostname(host)); ++ err = -EINVAL; ++ goto free_card; ++ } ++ ++ if (!oldcard) ++ host->card = card; ++ ++ /* detect card alive */ ++ if (host->bus_ops->alive(host)) { ++ pr_info("%s: card seems dead!\n", mmc_hostname(host)); ++ err = -EINVAL; ++ goto free_card; ++ } ++ ++ mmc_release_host(host); ++ ++ err = mmc_add_card(host->card); ++ if (err) { ++ pr_err("%s: mmc add card failed.\n", mmc_hostname(host)); ++ mmc_claim_host(host); ++ goto free_card; ++ } ++ ++ return 0; ++ ++free_card: ++ if (!oldcard) ++ mmc_remove_card(card); ++ ++ host->card = NULL; ++ ++ mmc_detach_bus(host); ++ ++ /* here only set power mode to MMC_POWER_OFF */ ++ host->ios.power_mode = MMC_POWER_OFF; ++ ++ pr_err("%s: error %d whilst quick initialising MMC card\n", ++ mmc_hostname(host), err); ++err: ++ mmc_release_host(host); ++ return err; ++} ++EXPORT_SYMBOL(mmc_quick_init_card); ++#endif ++ + /* + * Handle the detection and initialisation of a card. + * +@@ -1605,6 +1895,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, + u32 cid[4]; + u32 rocr; + ++#ifdef CONFIG_MMC_QUICKBOOT ++ /* Call here should clear sleep mode */ ++ if (mmc_is_fast_boot(mmc_priv(host)) == QUICK_BOOT_WARM) { ++ mmc_set_cur_mode(mmc_priv(host), INIT_MODE); ++ } ++#endif ++ + WARN_ON(!host->claimed); + + /* Set correct bus mode for MMC before attempting init */ +@@ -1642,6 +1939,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, + goto err; + + if (oldcard) { ++#ifdef CONFIG_MMC_QUICKBOOT ++ /* Fastboot emmc cid is null, next just init only */ ++ if ((mmc_is_fast_boot(mmc_priv(host)) != QUICK_BOOT_DIS) && \ ++ (oldcard->raw_cid[0] == MMC_CID_MAGIC)) { ++ memcpy_s(oldcard->raw_cid, sizeof(card->raw_cid), cid, sizeof(card->raw_cid)); ++ } ++#endif + if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { + pr_debug("%s: Perhaps the card was replaced\n", + mmc_hostname(host)); +@@ -1936,6 +2240,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, + if (!oldcard) + host->card = card; + ++#ifdef CONFIG_MMC_QUICKBOOT ++ mmc_save_parameters(host); ++#endif ++ + return 0; + + free_card: +diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c +index fe7a4eac9..833da2d81 100644 +--- a/drivers/mmc/host/cqhci-core.c ++++ b/drivers/mmc/host/cqhci-core.c +@@ -52,6 +52,10 @@ static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag) + + static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag) + { ++ if (cq_host->quirks & CQHCI_QUIRK_TXFR_DESC_SZ_SPLIT) ++ return cq_host->trans_desc_dma_base + ++ (cq_host->mmc->max_segs * tag * 2 * ++ cq_host->trans_desc_len); + size_t offset = get_trans_desc_offset(cq_host, tag); + + return cq_host->trans_desc_dma_base + offset; +@@ -59,6 +63,10 @@ static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag) + + static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag) + { ++ if (cq_host->quirks & CQHCI_QUIRK_TXFR_DESC_SZ_SPLIT) ++ return cq_host->trans_desc_base + ++ (cq_host->trans_desc_len * ++ cq_host->mmc->max_segs * 2 * tag); + size_t offset = get_trans_desc_offset(cq_host, tag); + + return cq_host->trans_desc_base + offset; +@@ -200,7 +208,11 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) + + cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; + +- cq_host->data_size = get_trans_desc_offset(cq_host, cq_host->mmc->cqe_qdepth); ++ if (cq_host->quirks & CQHCI_QUIRK_TXFR_DESC_SZ_SPLIT) ++ cq_host->data_size = cq_host->trans_desc_len * ++ cq_host->mmc->max_segs * 2 * cq_host->mmc->cqe_qdepth; ++ else ++ cq_host->data_size = get_trans_desc_offset(cq_host, cq_host->mmc->cqe_qdepth); + + pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", + mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, +@@ -276,6 +288,8 @@ static void __cqhci_enable(struct cqhci_host *cq_host) + + cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2); + ++ cqhci_writel(cq_host, SEND_QSR_INTERVAL, CQHCI_SSC1); ++ + cqhci_set_irqs(cq_host, 0); + + cqcfg |= CQHCI_ENABLE; +@@ -285,8 +299,6 @@ static void __cqhci_enable(struct cqhci_host *cq_host) + if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) + cqhci_writel(cq_host, 0, CQHCI_CTL); + +- mmc->cqe_on = true; +- + if (cq_host->ops->enable) + cq_host->ops->enable(mmc); + +@@ -306,8 +318,6 @@ static void __cqhci_disable(struct cqhci_host *cq_host) + cqcfg &= ~CQHCI_ENABLE; + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); + +- cq_host->mmc->cqe_on = false; +- + cq_host->activated = false; + } + +@@ -392,6 +402,8 @@ static void cqhci_off(struct mmc_host *mmc) + cq_host->ops->post_disable(mmc); + + mmc->cqe_on = false; ++ ++ cqhci_deactivate(mmc); + } + + static void cqhci_disable(struct mmc_host *mmc) +@@ -496,6 +508,27 @@ static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end, + } + } + ++static void _cqhci_set_tran_desc(struct cqhci_host *cq_host, u8 **desc, ++ dma_addr_t addr, int len, bool end, bool dma64, unsigned int blksz) ++{ ++ int desc_len; ++ ++ if ((cq_host->quirks & CQHCI_QUIRK_TXFR_DESC_SZ_SPLIT) && ++ ((addr % SYNOPSYS_DMA_LIMIT) + len) > SYNOPSYS_DMA_LIMIT) { ++ if (((addr + (unsigned int)len) % SYNOPSYS_DMA_LIMIT) < blksz) ++ BUG_ON(1); ++ ++ desc_len = (SYNOPSYS_DMA_LIMIT - addr % SYNOPSYS_DMA_LIMIT); ++ cqhci_set_tran_desc(*desc, addr, desc_len, false, dma64); ++ ++ *desc = *desc + cq_host->trans_desc_len; ++ len -= desc_len; ++ addr += desc_len; ++ } ++ ++ cqhci_set_tran_desc(*desc, addr, len, end, dma64); ++} ++ + static int cqhci_prep_tran_desc(struct mmc_request *mrq, + struct cqhci_host *cq_host, int tag) + { +@@ -522,7 +555,7 @@ static int cqhci_prep_tran_desc(struct mmc_request *mrq, + + if ((i+1) == sg_count) + end = true; +- cqhci_set_tran_desc(desc, addr, len, end, dma64); ++ _cqhci_set_tran_desc(cq_host, &desc, addr, len, end, dma64, data->blksz); + desc += cq_host->trans_desc_len; + } + +@@ -1003,6 +1036,9 @@ static void cqhci_recovery_start(struct mmc_host *mmc) + cq_host->ops->disable(mmc, true); + + mmc->cqe_on = false; ++ ++ cqhci_deactivate(mmc); ++ + } + + static int cqhci_error_from_flags(unsigned int flags) +diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h +index 1a12e40a0..0e1b92a0c 100644 +--- a/drivers/mmc/host/cqhci.h ++++ b/drivers/mmc/host/cqhci.h +@@ -93,6 +93,12 @@ + /* send status config 1 */ + #define CQHCI_SSC1 0x40 + #define CQHCI_SSC1_CBC_MASK GENMASK(19, 16) ++/* ++ * Value n means CQE would send CMD13 during the transfer of data block ++ * BLOCK_CNT-n ++ */ ++#define SEND_QSR_INTERVAL 0x70001 ++ + + /* send status config 2 */ + #define CQHCI_SSC2 0x44 +@@ -158,6 +164,8 @@ + #define CQHCI_DAT_ADDR_LO(x) (((x) & 0xFFFFFFFF) << 32) + #define CQHCI_DAT_ADDR_HI(x) (((x) & 0xFFFFFFFF) << 0) + ++#define SYNOPSYS_DMA_LIMIT 0x8000000 ++ + /* CCAP - Crypto Capability 100h */ + union cqhci_crypto_capabilities { + __le32 reg_val; +@@ -239,6 +247,7 @@ struct cqhci_host { + + u32 quirks; + #define CQHCI_QUIRK_SHORT_TXFR_DESC_SZ 0x1 ++#define CQHCI_QUIRK_TXFR_DESC_SZ_SPLIT 0x2 + + bool enabled; + bool halted; +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c +index 9796a3cb3..3670fe308 100644 +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + + #include "sdhci.h" + +@@ -381,6 +382,9 @@ static void sdhci_init(struct sdhci_host *host, int soft) + host->reinit_uhs = true; + mmc->ops->set_ios(mmc, &mmc->ios); + } ++ ++ if (host->ops->init) ++ host->ops->init(host); + } + + static void sdhci_reinit(struct sdhci_host *host) +@@ -1791,6 +1795,14 @@ static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) + } + } + ++#define CMD_ERRORS \ ++ (R1_OUT_OF_RANGE | /* Command argument out of range */ \ ++ R1_ADDRESS_ERROR | /* Misaligned address */ \ ++ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ ++ R1_WP_VIOLATION | /* Tried to write to protected block */ \ ++ R1_CC_ERROR | /* Card controller error */ \ ++ R1_ERROR) /* General/unknown error */ ++ + static void sdhci_finish_command(struct sdhci_host *host) + { + struct mmc_command *cmd = host->cmd; +@@ -1803,6 +1815,16 @@ static void sdhci_finish_command(struct sdhci_host *host) + } else { + cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); + } ++ ++ if (((cmd->flags & MMC_RSP_R1) == MMC_RSP_R1) && ++ ((cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)) { ++ if ((cmd->resp[0] & CMD_ERRORS) && !host->is_tuning && ++ (host->error_count < S32_MAX)) { ++ host->error_count++; ++ cmd->mrq->cmd->error = -EACCES; ++ pr_err("The status of the card is abnormal, cmd->resp[0]: %x", cmd->resp[0]); ++ } ++ } + } + + if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) +@@ -2461,7 +2483,9 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) + } + + /* Re-enable SD Clock */ +- host->ops->set_clock(host, host->clock); ++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); ++ clk |= SDHCI_CLOCK_CARD_EN; ++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + } else + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + } +@@ -2616,6 +2640,9 @@ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, + u16 ctrl; + int ret; + ++ if (host->ops->signal_voltage_switch) ++ return host->ops->signal_voltage_switch(host, ios); ++ + /* + * Signal Voltage Switching is only applicable for Host Controllers + * v3.00 and above. +@@ -3050,6 +3077,41 @@ static void sdhci_card_event(struct mmc_host *mmc) + spin_unlock_irqrestore(&host->lock, flags); + } + ++static int sdhci_card_info_save(struct mmc_host *mmc) ++{ ++ struct mmc_card *card = mmc->card; ++ struct sdhci_host *host= mmc_priv(mmc); ++ struct card_info *c_info = &host->c_info; ++ int ret; ++ ++ if (card == NULL) { ++ ret = memset_s(c_info, sizeof(struct card_info), 0, sizeof(struct card_info)); ++ if (ret != EOK) ++ pr_err("memset_s c_info failed\n"); ++ c_info->card_connect = CARD_DISCONNECT; ++ goto out; ++ } ++ ++ c_info->card_type = card->type; ++ c_info->card_state = card->state; ++ ++ c_info->timing = mmc->ios.timing; ++ c_info->enhanced_strobe = mmc->ios.enhanced_strobe; ++ c_info->card_support_clock = mmc->ios.clock; ++ ++ c_info->sd_bus_speed = card->sd_bus_speed; ++ ++ ret = memcpy_s(c_info->ssr, sizeof(c_info->ssr), card->raw_ssr, 64); /* SSR length: 512bit / 8 = 64 byte */ ++ if (ret != EOK) { ++ pr_err("SD Status Reg memcpy_s failed\n"); ++ return ret; ++ } ++ ++ c_info->card_connect = CARD_CONNECT; ++out: ++ return 0; ++} ++ + static const struct mmc_host_ops sdhci_ops = { + .request = sdhci_request, + .post_req = sdhci_post_req, +@@ -3065,6 +3127,7 @@ static const struct mmc_host_ops sdhci_ops = { + .execute_tuning = sdhci_execute_tuning, + .card_event = sdhci_card_event, + .card_busy = sdhci_card_busy, ++ .card_info_save = sdhci_card_info_save, + }; + + /*****************************************************************************\ +@@ -3181,6 +3244,10 @@ static bool sdhci_request_done(struct sdhci_host *host) + } + } + ++ if ((mrq->data != NULL) && (mrq->data->error != 0) && ++ !host->is_tuning && (host->error_count < S32_MAX)) ++ host->error_count++; ++ + host->mrqs_done[i] = NULL; + + spin_unlock_irqrestore(&host->lock, flags); +@@ -3287,10 +3354,12 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) + */ + if (host->pending_reset) + return; ++#ifndef CONFIG_MMC_SDHCI_NEBULA + pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", + mmc_hostname(host->mmc), (unsigned)intmask); + sdhci_err_stats_inc(host, UNEXPECTED_IRQ); + sdhci_dumpregs(host); ++#endif + return; + } + +@@ -3426,10 +3495,12 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) + if (host->pending_reset) + return; + ++#ifndef CONFIG_MMC_SDHCI_NEBULA + pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", + mmc_hostname(host->mmc), (unsigned)intmask); + sdhci_err_stats_inc(host, UNEXPECTED_IRQ); + sdhci_dumpregs(host); ++#endif + + return; + } +@@ -3545,6 +3616,10 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) + do { + DBG("IRQ status 0x%08x\n", intmask); + ++ if (((intmask & SDHCI_INT_ERROR) != 0) && !host->is_tuning && ++ (host->error_count < S32_MAX)) ++ host->error_count++; ++ + if (host->ops->irq) { + intmask = host->ops->irq(host, intmask); + if (!intmask) +@@ -3907,10 +3982,13 @@ void sdhci_cqe_enable(struct mmc_host *mmc) + { + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; ++#ifndef CONFIG_MMC_SDHCI_NEBULA + u8 ctrl; ++#endif + + spin_lock_irqsave(&host->lock, flags); + ++#ifndef CONFIG_MMC_SDHCI_NEBULA + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + ctrl &= ~SDHCI_CTRL_DMA_MASK; + /* +@@ -3928,6 +4006,7 @@ void sdhci_cqe_enable(struct mmc_host *mmc) + + sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), + SDHCI_BLOCK_SIZE); ++#endif + + /* Set maximum timeout */ + sdhci_set_timeout(host, NULL); +@@ -4059,7 +4138,12 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, + * number of segments times 2, to allow for an alignment + * descriptor for each segment, plus 1 for a nop end descriptor. + */ ++#ifdef CONFIG_MMC_SDHCI_NEBULA ++ /* 3: Add 1 descriptor for each segmets, to handle ADMA 128MB boundary limitation. */ ++ host->adma_table_cnt = SDHCI_MAX_SEGS * 3 + 1; ++#else + host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; ++#endif + host->max_adma = 65536; + + host->max_timeout_count = 0xE; +@@ -4122,10 +4206,10 @@ void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, + if (host->v4_mode) + sdhci_do_enable_v4_mode(host); + +- device_property_read_u64(mmc_dev(host->mmc), +- "sdhci-caps-mask", &dt_caps_mask); +- device_property_read_u64(mmc_dev(host->mmc), +- "sdhci-caps", &dt_caps); ++ device_property_read_u64_array(mmc_dev(host->mmc), ++ "sdhci-caps-mask", &dt_caps_mask, 1); ++ device_property_read_u64_array(mmc_dev(host->mmc), ++ "sdhci-caps", &dt_caps, 1); + + v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); + host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; +@@ -4908,6 +4992,8 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) + + sdhci_disable_card_detection(host); + ++ free_irq(host->irq, host); ++ + mmc_remove_host(mmc); + + sdhci_led_unregister(host); +@@ -4917,7 +5003,6 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) + + sdhci_writel(host, 0, SDHCI_INT_ENABLE); + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); +- free_irq(host->irq, host); + + del_timer_sync(&host->timer); + del_timer_sync(&host->data_timer); +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h +index a315cee69..c81e2cbb1 100644 +--- a/drivers/mmc/host/sdhci.h ++++ b/drivers/mmc/host/sdhci.h +@@ -193,7 +193,7 @@ + #define SDHCI_CTRL_UHS_SDR50 0x0002 + #define SDHCI_CTRL_UHS_SDR104 0x0003 + #define SDHCI_CTRL_UHS_DDR50 0x0004 +-#define SDHCI_CTRL_HS400 0x0005 /* Non-standard */ ++#define SDHCI_CTRL_HS400 0x0007 /* Non-standard */ + #define SDHCI_CTRL_VDD_180 0x0008 + #define SDHCI_CTRL_DRV_TYPE_MASK 0x0030 + #define SDHCI_CTRL_DRV_TYPE_B 0x0000 +@@ -294,6 +294,7 @@ + + #define SDHCI_MAX_DIV_SPEC_200 256 + #define SDHCI_MAX_DIV_SPEC_300 2046 ++#define SDHCI_DMA_BOUNDARY_SIZE (0x1 << 27) + + /* + * Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2. +@@ -373,6 +374,19 @@ enum sdhci_cookie { + COOKIE_MAPPED, /* mapped by sdhci_prepare_data() */ + }; + ++struct card_info { ++ unsigned int card_type; ++ unsigned char timing; ++ bool enhanced_strobe; ++ unsigned char card_connect; ++#define CARD_CONNECT 1 ++#define CARD_DISCONNECT 0 ++ unsigned int card_support_clock; /* clock rate */ ++ unsigned int card_state; /* (our) card state */ ++ unsigned int sd_bus_speed; ++ unsigned int ssr[16]; ++}; ++ + struct sdhci_host { + /* Data set by hardware interface driver */ + const char *hw_name; /* Hardware bus name */ +@@ -431,6 +445,8 @@ struct sdhci_host { + #define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25) + /* Controller cannot support End Attribute in NOP ADMA descriptor */ + #define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26) ++/* Controller is missing device caps. Use caps provided by host */ ++#define SDHCI_QUIRK_MISSING_CAPS (1<<27) + /* Controller uses Auto CMD12 command to stop the transfer */ + #define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28) + /* Controller doesn't have HISPD bit field in HI-SPEED SD card */ +@@ -622,6 +638,10 @@ struct sdhci_host { + + u64 data_timeout; + ++ bool is_tuning; ++ unsigned int error_count; ++ struct card_info c_info; ++ + unsigned long private[] ____cacheline_aligned; + }; + +@@ -661,6 +681,9 @@ struct sdhci_ops { + void (*adma_workaround)(struct sdhci_host *host, u32 intmask); + void (*card_event)(struct sdhci_host *host); + void (*voltage_switch)(struct sdhci_host *host); ++ void (*init)(struct sdhci_host *host); ++ int (*signal_voltage_switch)(struct sdhci_host *host, ++ struct mmc_ios *ios); + void (*adma_write_desc)(struct sdhci_host *host, void **desc, + dma_addr_t addr, int len, unsigned int cmd); + void (*copy_to_bounce_buffer)(struct sdhci_host *host, +diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile +index 593d0593a..d4f0b233b 100644 +--- a/drivers/mtd/Makefile ++++ b/drivers/mtd/Makefile +@@ -26,8 +26,14 @@ obj-$(CONFIG_MTD_SWAP) += mtdswap.o + nftl-objs := nftlcore.o nftlmount.o + inftl-objs := inftlcore.o inftlmount.o + ++ifdef CONFIG_ARCH_BSP ++obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/ ++obj-y += chips/ lpddr/ maps/ devices/ nand/ tests/ ++else + obj-y += chips/ lpddr/ maps/ devices/ nand/ tests/ + ++obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/ ++endif + obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/ + obj-$(CONFIG_MTD_UBI) += ubi/ + obj-$(CONFIG_MTD_HYPERBUS) += hyperbus/ +diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig +index 5b0c2c95f..09382cbd3 100644 +--- a/drivers/mtd/nand/Kconfig ++++ b/drivers/mtd/nand/Kconfig +@@ -6,6 +6,34 @@ config MTD_NAND_CORE + tristate + + source "drivers/mtd/nand/onenand/Kconfig" ++ ++if ARCH_BSP ++config MTD_SPI_NAND_BSP ++ tristate "Support for SPI NAND controller on Vendor SoCs" ++ depends on MTD_RAW_NAND ++ help ++ Enables support for the SPI NAND device drivers. ++ ++config BSP_NAND_ECC_STATUS_REPORT ++ tristate "Report the ecc status to MTD for Nand Driver" ++ depends on MTD_RAW_NAND && ARCH_BSP ++ default n ++ help ++ Flash Memory Controller reports the ecc status include ECC error ++ and ECC corrected to MTD to monitor the aging of devices. ++ ++config BSP_NAND_FS_MAY_NO_YAFFS2 ++ bool "Remove the restraintion of 16bit ecc type on yaffs2 to Vendor" ++ depends on MFD_BSP_FMC ++ default n ++ help ++ The ecc type: 16bit is limited by the Vendor flash memory controller, ++ as the yaffs2 tag of rootfs limits the min size of CTRL len is 28. ++ ++source "drivers/mtd/nand/fmc100/Kconfig" ++source "drivers/mtd/nand/fmc100_nand/Kconfig" ++endif ++ + source "drivers/mtd/nand/raw/Kconfig" + source "drivers/mtd/nand/spi/Kconfig" + +diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile +index 19e1291ac..9ba941d48 100644 +--- a/drivers/mtd/nand/Makefile ++++ b/drivers/mtd/nand/Makefile +@@ -1,6 +1,10 @@ + # SPDX-License-Identifier: GPL-2.0 + + nandcore-objs := core.o bbt.o ++ifdef CONFIG_ARCH_BSP ++obj-$(CONFIG_MTD_NAND_FMC100) += fmc100_nand/ ++obj-$(CONFIG_MTD_SPI_NAND_FMC100) += fmc100/ ++endif + obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o + obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o + +diff --git a/drivers/mtd/nand/fmc100/Kconfig b/drivers/mtd/nand/fmc100/Kconfig +new file mode 100644 +index 000000000..eff6b8c93 +--- /dev/null ++++ b/drivers/mtd/nand/fmc100/Kconfig +@@ -0,0 +1,16 @@ ++# ++# vendor flash memory controller SPI nand device driver version 100 ++# drivers/mtd/nand/fmc100/Kconfig ++# ++ ++config MTD_SPI_NAND_FMC100 ++ bool "Vendor Flash Memory Controller v100 SPI Nand devices support" ++ depends on MFD_BSP_FMC && MTD_SPI_NAND_BSP ++ select MISC_FILESYSTEMS ++ select MTD_BLOCK ++ select YAFFS_FS ++ select YAFFS_YAFFS2 ++ help ++ Vendor Flash Memory Controller version 100 is called fmc100 for ++ short. The controller driver support registers and DMA transfers ++ while reading or writing the SPI nand flash. +diff --git a/drivers/mtd/nand/fmc100/Makefile b/drivers/mtd/nand/fmc100/Makefile +new file mode 100644 +index 000000000..27cd893de +--- /dev/null ++++ b/drivers/mtd/nand/fmc100/Makefile +@@ -0,0 +1,26 @@ ++# ++# The Flash Memory Controller v100 Device Driver for vendor ++# ++# Copyright (c) 2016-2017 Shenshu Technologies Co., Ltd. ++# ++# This program is free software; you can redistribute it and/or modify it ++# under the terms of the GNU General Public License as published by the ++# Free Software Foundation; either version 2 of the License, or (at your ++# option) any later version. ++# ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program. If not, see . ++# ++# ++ ++# ++# drivers/mtd/nand/fmc100/Makefile ++# ++ ++obj-y += fmc_spi_nand_ids.o ++obj-y += fmc100.o fmc100_os.o +diff --git a/drivers/mtd/nand/fmc100/fmc100.c b/drivers/mtd/nand/fmc100/fmc100.c +new file mode 100644 +index 000000000..3a17640d3 +--- /dev/null ++++ b/drivers/mtd/nand/fmc100/fmc100.c +@@ -0,0 +1,1250 @@ ++/* ++ * The Flash Memory Controller v100 Device Driver for vendor ++ * ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include "fmc100.h" ++ ++static void fmc100_switch_to_spi_nand(struct fmc_host *host) ++{ ++ u32 reg; ++ ++ reg = fmc_readl(host, FMC_CFG); ++ reg &= ~FLASH_TYPE_SEL_MASK; ++ reg |= fmc_cfg_flash_sel(FLASH_TYPE_SPI_NAND); ++ fmc_writel(host, FMC_CFG, reg); ++} ++ ++static void fmc100_set_str_mode(const struct fmc_host *host) ++{ ++ u32 reg; ++ ++ reg = fmc_readl(host, FMC_GLOBAL_CFG); ++ reg &= (~FMC_GLOBAL_CFG_DTR_MODE); ++ fmc_writel(host, FMC_GLOBAL_CFG, reg); ++} ++ ++static void fmc100_operation_config(struct fmc_host *host, int op) ++{ ++ int ret; ++ unsigned long clkrate = 0; ++ struct fmc_spi *spi = host->spi; ++ ++ fmc100_switch_to_spi_nand(host); ++ clk_prepare_enable(host->clk); ++ switch (op) { ++ case OP_STYPE_WRITE: ++ clkrate = min((u_long)host->clkrate, ++ (u_long)clk_fmc_to_crg_mhz(spi->write->clock)); ++ break; ++ case OP_STYPE_READ: ++ clkrate = min((u_long)host->clkrate, ++ (u_long)clk_fmc_to_crg_mhz(spi->read->clock)); ++ break; ++ case OP_STYPE_ERASE: ++ clkrate = min((u_long)host->clkrate, ++ (u_long)clk_fmc_to_crg_mhz(spi->erase->clock)); ++ break; ++ default: ++ break; ++ } ++ ++ ret = clk_set_rate(host->clk, clkrate); ++ if (WARN_ON((ret != 0))) ++ pr_err("clk_set_rate failed: %d\n", ret); ++} ++ ++static void fmc100_dma_wr_addr_config(struct fmc_host *host) ++{ ++ unsigned int reg; ++ ++#ifndef FMC100_SPI_NAND_SUPPORT_REG_WRITE ++ reg = host->dma_buffer; ++ fmc_writel(host, FMC_DMA_SADDR_D0, reg); ++ fmc_pr(WR_DBG, "|-Set DMA_SADDR_D[0x40]%#x\n", reg); ++ ++#ifdef CONFIG_64BIT ++ reg = (host->dma_buffer & FMC_DMA_SADDRH_MASK) >> ++ FMC_DMA_BIT_SHIFT_LENTH; ++ fmc_writel(host, FMC_DMA_SADDRH_D0, reg); ++ fmc_pr(WR_DBG, "\t|-Set DMA_SADDRH_D0[%#x]%#x\n", FMC_DMA_SADDRH_D0, reg); ++#endif ++ ++ reg = host->dma_oob; ++ fmc_writel(host, FMC_DMA_SADDR_OOB, reg); ++ fmc_pr(WR_DBG, "|-Set DMA_SADDR_OOB[%#x]%#x\n", FMC_DMA_SADDR_OOB, reg); ++#ifdef CONFIG_64BIT ++ reg = (host->dma_oob & FMC_DMA_SADDRH_MASK) >> ++ FMC_DMA_BIT_SHIFT_LENTH; ++ fmc_writel(host, FMC_DMA_SADDRH_OOB, reg); ++ fmc_pr(WR_DBG, "\t|-Set DMA_SADDRH_OOB[%#x]%#x\n", FMC_DMA_SADDRH_OOB, ++ reg); ++#endif ++#endif ++} ++ ++static void fmc100_dma_wr_op_config(struct fmc_host *host, const struct fmc_spi *spi) ++{ ++ unsigned int reg; ++ unsigned int block_num; ++ unsigned int block_num_h; ++ unsigned int page_num; ++ unsigned char pages_per_block_shift; ++ struct nand_chip *chip = host->chip; ++ ++ reg = FMC_INT_CLR_ALL; ++ fmc_writel(host, FMC_INT_CLR, reg); ++ fmc_pr(WR_DBG, "|-Set INT_CLR[%#x]%#x\n", FMC_INT_CLR, reg); ++ ++ reg = op_cfg_fm_cs(host->cmd_op.cs) | ++ op_cfg_mem_if_type(spi->write->iftype) | ++ OP_CFG_OEN_EN; ++ fmc_writel(host, FMC_OP_CFG, reg); ++ fmc_pr(WR_DBG, "|-Set OP_CFG[%#x]%#x\n", FMC_OP_CFG, reg); ++ ++ pages_per_block_shift = chip->phys_erase_shift - chip->page_shift; ++ block_num = host->addr_value[1] >> pages_per_block_shift; ++ block_num_h = block_num >> REG_CNT_HIGH_BLOCK_NUM_SHIFT; ++ reg = fmc_addrh_set(block_num_h); ++ fmc_writel(host, FMC_ADDRH, reg); ++ fmc_pr(WR_DBG, "|-Set ADDRH[%#x]%#x\n", FMC_ADDRH, reg); ++ ++ page_num = host->addr_value[1] - (block_num << pages_per_block_shift); ++ reg = ((block_num & REG_CNT_BLOCK_NUM_MASK) << REG_CNT_BLOCK_NUM_SHIFT) | ++ ((page_num & REG_CNT_PAGE_NUM_MASK) << REG_CNT_PAGE_NUM_SHIFT); ++ fmc_writel(host, FMC_ADDRL, reg); ++ fmc_pr(WR_DBG, "|-Set ADDRL[%#x]%#x\n", FMC_ADDRL, reg); ++ ++ *host->epm = 0x0000; ++ ++ fmc100_dma_wr_addr_config(host); ++ ++ reg = op_ctrl_wr_opcode(spi->write->cmd) | ++#ifdef FMC100_SPI_NAND_SUPPORT_REG_WRITE ++ op_ctrl_dma_op(OP_TYPE_REG) | ++#else ++ op_ctrl_dma_op(OP_TYPE_DMA) | ++#endif ++ op_ctrl_rw_op(RW_OP_WRITE) | ++ OP_CTRL_DMA_OP_READY; ++ fmc_writel(host, FMC_OP_CTRL, reg); ++ fmc_pr(WR_DBG, "|-Set OP_CTRL[%#x]%#x\n", FMC_OP_CTRL, reg); ++ ++ fmc_dma_wait_int_finish(host); ++} ++ ++static void fmc100_send_cmd_write(struct fmc_host *host) ++{ ++ int ret; ++ struct fmc_spi *spi = host->spi; ++#ifdef FMC100_SPI_NAND_SUPPORT_REG_WRITE ++ const char *op = "Reg"; ++#else ++ const char *op = "Dma"; ++#endif ++ ++ if (WR_DBG) ++ pr_info("\n"); ++ fmc_pr(WR_DBG, "*-Start send %s page write command\n", op); ++ ++ mutex_lock(host->lock); ++ fmc100_operation_config(host, OP_STYPE_WRITE); ++ ++ ret = spi->driver->wait_ready(spi); ++ if (ret) { ++ db_msg("Error: %s program wait ready failed! status: %#x\n", ++ op, ret); ++ goto end; ++ } ++ ++ ret = spi->driver->write_enable(spi); ++ if (ret) { ++ db_msg("Error: %s program write enable failed! ret: %#x\n", ++ op, ret); ++ goto end; ++ } ++ ++ fmc100_dma_wr_op_config(host, spi); ++ ++end: ++ mutex_unlock(host->lock); ++ fmc_pr(WR_DBG, "*-End %s page program!\n", op); ++} ++ ++static void fmc100_send_cmd_status(struct fmc_host *host) ++{ ++ u_char status; ++ int ret; ++ unsigned char addr = STATUS_ADDR; ++ struct fmc_spi *spi = NULL; ++ ++ if (host == NULL || host->spi == NULL) { ++ db_msg("Error: host or host->spi is NULL!\n"); ++ return; ++ } ++ spi = host->spi; ++ if (host->cmd_op.l_cmd == NAND_CMD_GET_FEATURES) ++ addr = PROTECT_ADDR; ++ ++ ret = spi_nand_feature_op(spi, GET_OP, addr, &status); ++ if (ret) ++ return; ++ fmc_pr((ER_DBG || WR_DBG), "\t*-Get status[%#x]: %#x\n", addr, status); ++} ++ ++static void fmc100_dma_rd_addr_config(struct fmc_host *host) ++{ ++ unsigned int reg; ++ ++#ifndef FMC100_SPI_NAND_SUPPORT_REG_READ ++ reg = host->dma_buffer; ++ fmc_writel(host, FMC_DMA_SADDR_D0, reg); ++ fmc_pr(RD_DBG, "\t|-Set DMA_SADDR_D0[%#x]%#x\n", FMC_DMA_SADDR_D0, reg); ++ ++#ifdef CONFIG_64BIT ++ reg = (host->dma_buffer & FMC_DMA_SADDRH_MASK) >> ++ FMC_DMA_BIT_SHIFT_LENTH; ++ fmc_writel(host, FMC_DMA_SADDRH_D0, reg); ++ fmc_pr(RD_DBG, "\t|-Set DMA_SADDRH_D0[%#x]%#x\n", FMC_DMA_SADDRH_D0, reg); ++#endif ++ ++ reg = host->dma_oob; ++ fmc_writel(host, FMC_DMA_SADDR_OOB, reg); ++ fmc_pr(RD_DBG, "\t|-Set DMA_SADDR_OOB[%#x]%#x\n", FMC_DMA_SADDR_OOB, ++ reg); ++ ++#ifdef CONFIG_64BIT ++ reg = (host->dma_oob & FMC_DMA_SADDRH_MASK) >> ++ FMC_DMA_BIT_SHIFT_LENTH; ++ fmc_writel(host, FMC_DMA_SADDRH_OOB, reg); ++ fmc_pr(RD_DBG, "\t|-Set DMA_SADDRH_OOB[%#x]%#x\n", FMC_DMA_SADDRH_OOB, ++ reg); ++#endif ++#endif ++} ++ ++static void fmc100_dma_rd_op_config(struct fmc_host *host, const struct fmc_spi *spi) ++{ ++ unsigned int reg; ++ unsigned int block_num; ++ unsigned int block_num_h; ++ unsigned int page_num; ++ unsigned char pages_per_block_shift; ++ struct nand_chip *chip = host->chip; ++ ++ reg = FMC_INT_CLR_ALL; ++ fmc_writel(host, FMC_INT_CLR, reg); ++ fmc_pr(RD_DBG, "\t|-Set INT_CLR[%#x]%#x\n", FMC_INT_CLR, reg); ++ ++ if (host->cmd_op.l_cmd == NAND_CMD_READOOB) ++ host->cmd_op.op_cfg = op_ctrl_rd_op_sel(RD_OP_READ_OOB); ++ else ++ host->cmd_op.op_cfg = op_ctrl_rd_op_sel(RD_OP_READ_ALL_PAGE); ++ ++ reg = op_cfg_fm_cs(host->cmd_op.cs) | ++ op_cfg_mem_if_type(spi->read->iftype) | ++ op_cfg_dummy_num(spi->read->dummy) | ++ OP_CFG_OEN_EN; ++ fmc_writel(host, FMC_OP_CFG, reg); ++ fmc_pr(RD_DBG, "\t|-Set OP_CFG[%#x]%#x\n", FMC_OP_CFG, reg); ++ ++ pages_per_block_shift = chip->phys_erase_shift - chip->page_shift; ++ block_num = host->addr_value[1] >> pages_per_block_shift; ++ block_num_h = block_num >> REG_CNT_HIGH_BLOCK_NUM_SHIFT; ++ ++ reg = fmc_addrh_set(block_num_h); ++ fmc_writel(host, FMC_ADDRH, reg); ++ fmc_pr(RD_DBG, "\t|-Set ADDRH[%#x]%#x\n", FMC_ADDRH, reg); ++ ++ page_num = host->addr_value[1] - (block_num << pages_per_block_shift); ++ ++ reg = ((block_num & REG_CNT_BLOCK_NUM_MASK) << REG_CNT_BLOCK_NUM_SHIFT) | ++ ((page_num & REG_CNT_PAGE_NUM_MASK) << REG_CNT_PAGE_NUM_SHIFT); ++ fmc_writel(host, FMC_ADDRL, reg); ++ fmc_pr(RD_DBG, "\t|-Set ADDRL[%#x]%#x\n", FMC_ADDRL, reg); ++ ++ fmc100_dma_rd_addr_config(host); ++ ++ reg = op_ctrl_rd_opcode(spi->read->cmd) | host->cmd_op.op_cfg | ++#ifdef FMC100_SPI_NAND_SUPPORT_REG_READ ++ op_ctrl_dma_op(OP_TYPE_REG) | ++#else ++ op_ctrl_dma_op(OP_TYPE_DMA) | ++#endif ++ op_ctrl_rw_op(RW_OP_READ) | OP_CTRL_DMA_OP_READY; ++ fmc_writel(host, FMC_OP_CTRL, reg); ++ fmc_pr(RD_DBG, "\t|-Set OP_CTRL[%#x]%#x\n", FMC_OP_CTRL, reg); ++ ++ fmc_dma_wait_int_finish(host); ++} ++ ++static void fmc100_send_cmd_read(struct fmc_host *host) ++{ ++ struct fmc_spi *spi = host->spi; ++ int ret; ++ ++#ifdef FMC100_SPI_NAND_SUPPORT_REG_READ ++ char *op = "Reg"; ++#else ++ char *op = "Dma"; ++#endif ++ ++ if (RD_DBG) ++ pr_info("\n"); ++ ++ fmc_pr(RD_DBG, "\t*-Start %s page read\n", op); ++ ++ if ((host->addr_value[0] == host->cache_addr_value[0]) ++ && (host->addr_value[1] == host->cache_addr_value[1])) { ++ fmc_pr(RD_DBG, "\t*-%s read cache hit, addr[%#x %#x]\n", ++ op, host->addr_value[1], host->addr_value[0]); ++ return; ++ } ++ ++ mutex_lock(host->lock); ++ fmc100_operation_config(host, OP_STYPE_READ); ++ ++ fmc_pr(RD_DBG, "\t|-Wait ready before %s page read\n", op); ++ ret = spi->driver->wait_ready(spi); ++ if (ret) { ++ db_msg("Error: %s read wait ready fail! ret: %#x\n", op, ret); ++ goto end; ++ } ++ ++ fmc100_dma_rd_op_config(host, spi); ++ ++ host->cache_addr_value[0] = host->addr_value[0]; ++ host->cache_addr_value[1] = host->addr_value[1]; ++ ++end: ++ mutex_unlock(host->lock); ++ fmc_pr(RD_DBG, "\t*-End %s page read\n", op); ++} ++ ++static void fmc100_send_cmd_erase(struct fmc_host *host) ++{ ++ unsigned int reg; ++ struct fmc_spi *spi = host->spi; ++ int ret; ++ ++ if (ER_DBG) ++ pr_info("\n"); ++ ++ fmc_pr(ER_DBG, "\t*-Start send cmd erase!\n"); ++ ++ mutex_lock(host->lock); ++ fmc100_operation_config(host, OP_STYPE_ERASE); ++ ++ ret = spi->driver->wait_ready(spi); ++ fmc_pr(ER_DBG, "\t|-Erase wait ready, ret: %#x\n", ret); ++ if (ret) { ++ db_msg("Error: Erase wait ready fail! status: %#x\n", ret); ++ goto end; ++ } ++ ++ ret = spi->driver->write_enable(spi); ++ if (ret) { ++ db_msg("Error: Erase write enable failed! ret: %#x\n", ret); ++ goto end; ++ } ++ ++ reg = FMC_INT_CLR_ALL; ++ fmc_writel(host, FMC_INT_CLR, reg); ++ fmc_pr(ER_DBG, "\t|-Set INT_CLR[%#x]%#x\n", FMC_INT_CLR, reg); ++ ++ reg = spi->erase->cmd; ++ fmc_writel(host, FMC_CMD, fmc_cmd_cmd1(reg)); ++ fmc_pr(ER_DBG, "\t|-Set CMD[%#x]%#x\n", FMC_CMD, reg); ++ ++ reg = fmc_addrl_block_h_mask(host->addr_value[1]) | ++ fmc_addrl_block_l_mask(host->addr_value[0]); ++ fmc_writel(host, FMC_ADDRL, reg); ++ fmc_pr(ER_DBG, "\t|-Set ADDRL[%#x]%#x\n", FMC_ADDRL, reg); ++ ++ reg = op_cfg_fm_cs(host->cmd_op.cs) | ++ op_cfg_mem_if_type(spi->erase->iftype) | ++ op_cfg_addr_num(STD_OP_ADDR_NUM) | ++ op_cfg_dummy_num(spi->erase->dummy) | ++ OP_CFG_OEN_EN; ++ fmc_writel(host, FMC_OP_CFG, reg); ++ fmc_pr(ER_DBG, "\t|-Set OP_CFG[%#x]%#x\n", FMC_OP_CFG, reg); ++ ++ reg = FMC_OP_CMD1_EN | ++ FMC_OP_ADDR_EN | ++ FMC_OP_REG_OP_START; ++ fmc_writel(host, FMC_OP, reg); ++ fmc_pr(ER_DBG, "\t|-Set OP[%#x]%#x\n", FMC_OP, reg); ++ ++ fmc_cmd_wait_cpu_finish(host); ++ ++end: ++ mutex_unlock(host->lock); ++ fmc_pr(ER_DBG, "\t*-End send cmd erase!\n"); ++} ++ ++void fmc100_ecc0_switch(struct fmc_host *host, unsigned char op) ++{ ++ unsigned int config; ++#if EC_DBG ++ unsigned int cmp_cfg; ++ ++ if (host == NULL) { ++ db_msg("Error: host is NULL!\n"); ++ return; ++ } ++ config = fmc_readl(host, FMC_CFG); ++ fmc_pr(EC_DBG, "\t *-Get CFG[%#x]%#x\n", FMC_CFG, config); ++ ++ if (op) ++ cmp_cfg = host->fmc_cfg; ++ else ++ cmp_cfg = host->fmc_cfg_ecc0; ++ ++ if (cmp_cfg != config) ++ db_msg("Warning: FMC config[%#x] is different.\n", ++ cmp_cfg); ++#endif ++ if (host == NULL) { ++ db_msg("Error: host is NULL!\n"); ++ return; ++ } ++ if (op == ENABLE) { ++ config = host->fmc_cfg_ecc0; ++ } else if (op == DISABLE) { ++ config = host->fmc_cfg; ++ } else { ++ db_msg("Error: Invalid opcode: %d\n", op); ++ return; ++ } ++ ++ fmc_writel(host, FMC_CFG, config); ++ fmc_pr(EC_DBG, "\t *-Set CFG[%#x]%#x\n", FMC_CFG, config); ++} ++ ++static void fmc100_send_cmd_readid(struct fmc_host *host) ++{ ++ unsigned int reg; ++ ++ fmc_pr(BT_DBG, "\t|*-Start send cmd read ID\n"); ++ ++ fmc100_ecc0_switch(host, ENABLE); ++ ++ reg = fmc_cmd_cmd1(SPI_CMD_RDID); ++ fmc_writel(host, FMC_CMD, reg); ++ fmc_pr(BT_DBG, "\t||-Set CMD[%#x]%#x\n", FMC_CMD, reg); ++ ++ reg = READ_ID_ADDR; ++ fmc_writel(host, FMC_ADDRL, reg); ++ fmc_pr(BT_DBG, "\t||-Set ADDRL[%#x]%#x\n", FMC_ADDRL, reg); ++ ++ reg = op_cfg_fm_cs(host->cmd_op.cs) | ++ op_cfg_addr_num(READ_ID_ADDR_NUM) | ++ OP_CFG_OEN_EN; ++ fmc_writel(host, FMC_OP_CFG, reg); ++ fmc_pr(BT_DBG, "\t||-Set OP_CFG[%#x]%#x\n", FMC_OP_CFG, reg); ++ ++ reg = fmc_data_num_cnt(MAX_SPI_NAND_ID_LEN); ++ fmc_writel(host, FMC_DATA_NUM, reg); ++ fmc_pr(BT_DBG, "\t||-Set DATA_NUM[%#x]%#x\n", FMC_DATA_NUM, reg); ++ ++ reg = FMC_OP_CMD1_EN | ++ FMC_OP_ADDR_EN | ++ FMC_OP_READ_DATA_EN | ++ FMC_OP_REG_OP_START; ++ fmc_writel(host, FMC_OP, reg); ++ fmc_pr(BT_DBG, "\t||-Set OP[%#x]%#x\n", FMC_OP, reg); ++ ++ host->addr_cycle = 0x0; ++ ++ fmc_cmd_wait_cpu_finish(host); ++ ++ fmc100_ecc0_switch(host, DISABLE); ++ ++ fmc_pr(BT_DBG, "\t|*-End read flash ID\n"); ++} ++ ++static void fmc100_send_cmd_reset(struct fmc_host *host) ++{ ++ unsigned int reg; ++ ++ fmc_pr(BT_DBG, "\t|*-Start send cmd reset\n"); ++ ++ reg = fmc_cmd_cmd1(SPI_CMD_RESET); ++ fmc_writel(host, FMC_CMD, reg); ++ fmc_pr(BT_DBG, "\t||-Set CMD[%#x]%#x\n", FMC_CMD, reg); ++ ++ reg = op_cfg_fm_cs(host->cmd_op.cs) | OP_CFG_OEN_EN; ++ fmc_writel(host, FMC_OP_CFG, reg); ++ fmc_pr(BT_DBG, "\t||-Set OP_CFG[%#x]%#x\n", FMC_OP_CFG, reg); ++ ++ reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START; ++ fmc_writel(host, FMC_OP, reg); ++ fmc_pr(BT_DBG, "\t||-Set OP[%#x]%#x\n", FMC_OP, reg); ++ ++ fmc_cmd_wait_cpu_finish(host); ++ ++ fmc_pr(BT_DBG, "\t|*-End send cmd reset\n"); ++} ++ ++static void fmc100_host_init(struct fmc_host *host) ++{ ++ unsigned int reg; ++ ++ fmc_pr(BT_DBG, "\t||*-Start SPI Nand host init\n"); ++ ++ reg = fmc_readl(host, FMC_CFG); ++ if ((reg & FMC_CFG_OP_MODE_MASK) == FMC_CFG_OP_MODE_BOOT) { ++ reg |= fmc_cfg_op_mode(FMC_CFG_OP_MODE_NORMAL); ++ fmc_writel(host, FMC_CFG, reg); ++ fmc_pr(BT_DBG, "\t|||-Set CFG[%#x]%#x\n", FMC_CFG, reg); ++ } ++ ++ host->fmc_cfg = reg; ++ host->fmc_cfg_ecc0 = (reg & ~ECC_TYPE_MASK) | ECC_TYPE_0BIT; ++ ++ reg = fmc_readl(host, FMC_GLOBAL_CFG); ++ if (reg & FMC_GLOBAL_CFG_WP_ENABLE) { ++ reg &= ~FMC_GLOBAL_CFG_WP_ENABLE; ++ fmc_writel(host, FMC_GLOBAL_CFG, reg); ++ } ++ ++ host->addr_cycle = 0; ++ host->addr_value[0] = 0; ++ host->addr_value[1] = 0; ++ host->cache_addr_value[0] = ~0; ++ host->cache_addr_value[1] = ~0; ++ ++ host->send_cmd_write = fmc100_send_cmd_write; ++ host->send_cmd_status = fmc100_send_cmd_status; ++ host->send_cmd_read = fmc100_send_cmd_read; ++ host->send_cmd_erase = fmc100_send_cmd_erase; ++ host->send_cmd_readid = fmc100_send_cmd_readid; ++ host->send_cmd_reset = fmc100_send_cmd_reset; ++#ifdef CONFIG_PM ++ host->suspend = fmc100_suspend; ++ host->resume = fmc100_resume; ++#endif ++ ++ reg = timing_cfg_tcsh(CS_HOLD_TIME) | ++ timing_cfg_tcss(CS_SETUP_TIME) | ++ timing_cfg_tshsl(CS_DESELECT_TIME); ++ fmc_writel(host, FMC_SPI_TIMING_CFG, reg); ++ ++ reg = ALL_BURST_ENABLE; ++ fmc_writel(host, FMC_DMA_AHB_CTRL, reg); ++ ++ fmc_pr(BT_DBG, "\t||*-End SPI Nand host init\n"); ++} ++ ++static unsigned char fmc100_read_byte(struct nand_chip *chip) ++{ ++ struct fmc_host *host = chip->priv; ++ unsigned char value; ++ unsigned char ret_val = 0; ++ ++ if (host->cmd_op.l_cmd == NAND_CMD_READID) { ++ value = fmc_readb(host->iobase + host->offset); ++ host->offset++; ++ if (host->cmd_op.data_no == host->offset) ++ host->cmd_op.l_cmd = 0; ++ ++ return value; ++ } ++ ++ if (host->cmd_op.cmd == NAND_CMD_STATUS) { ++ value = fmc_readl(host, FMC_STATUS); ++ if (host->cmd_op.l_cmd == NAND_CMD_GET_FEATURES) { ++ fmc_pr((ER_DBG || WR_DBG), "\t\tRead BP status:%#x\n", ++ value); ++ if (any_bp_enable(value)) ++ ret_val |= NAND_STATUS_WP; ++ ++ host->cmd_op.l_cmd = NAND_CMD_STATUS; ++ } ++ ++ if ((value & STATUS_OIP_MASK) == 0) ++ ret_val |= NAND_STATUS_READY; ++ ++ if (value & STATUS_E_FAIL_MASK) { ++ fmc_pr(ER_DBG, "\t\tGet erase status: %#x\n", value); ++ ret_val |= NAND_STATUS_FAIL; ++ } ++ ++ if (value & STATUS_P_FAIL_MASK) { ++ fmc_pr(WR_DBG, "\t\tGet write status: %#x\n", value); ++ ret_val |= NAND_STATUS_FAIL; ++ } ++ ++ return ret_val; ++ } ++ ++ if (host->cmd_op.l_cmd == NAND_CMD_READOOB) { ++ value = fmc_readb(host->buffer + host->pagesize + host->offset); ++ host->offset++; ++ return value; ++ } ++ ++ host->offset++; ++ ++ return fmc_readb(host->buffer + host->column + host->offset - 1); ++} ++ ++static void fmc100_write_buf(struct nand_chip *chip, ++ const u_char *buf, int len) ++{ ++ struct fmc_host *host = chip->priv; ++ int ret; ++ ++#ifdef FMC100_SPI_NAND_SUPPORT_REG_WRITE ++ if (buf == chip->oob_poi) ++ ret = memcpy_s((char *)host->iobase + host->pagesize, ++ FMC_MEM_LEN, buf, len); ++ else ++ ret = memcpy_s((char *)host->iobase, FMC_MEM_LEN, buf, len); ++ ++#else ++ if (buf == chip->oob_poi) ++ ret = memcpy_s((char *)(host->buffer + host->pagesize), ++ FMC_MAX_DMA_LEN, buf, len); ++ else ++ ret = memcpy_s((char *)host->buffer, FMC_MAX_DMA_LEN, buf, len); ++ ++#endif ++ if (ret) ++ printk("%s:memcpy_s failed\n", __func__); ++ ++ return; ++} ++ ++static void fmc100_read_buf(struct nand_chip *chip, u_char *buf, int len) ++{ ++ struct fmc_host *host = chip->priv; ++ int ret; ++ ++#ifdef FMC100_SPI_NAND_SUPPORT_REG_READ ++ if (buf == chip->oob_poi) ++ ret = memcpy_s(buf, MAX_OOB_LEN, (char *)host->iobase + ++ host->pagesize, len); ++ else ++ ret = memcpy_s(buf, MAX_PAGE_SIZE, (char *)host->iobase, len); ++ ++#else ++ if (buf == chip->oob_poi) ++ ret = memcpy_s(buf, MAX_OOB_LEN, (char *)host->buffer + ++ host->pagesize, len); ++ else ++ ret = memcpy_s(buf, MAX_PAGE_SIZE, (char *)host->buffer, len); ++ ++#endif ++ if (ret) { ++ printk("%s %d:memcpy_s failed\n", __func__, __LINE__); ++ return; ++ } ++ ++#ifdef CONFIG_BSP_NAND_ECC_STATUS_REPORT ++ if (buf != chip->oob_poi) { ++ u_int reg; ++ u_int ecc_step = host->pagesize >> ECC_STEP_SHIFT; ++ ++ reg = fmc_readl(host, FMC100_ECC_ERR_NUM0_BUF0); ++ while (ecc_step) { ++ u_char err_num; ++ ++ err_num = get_ecc_err_num(--ecc_step, reg); ++ if (err_num == 0xff) ++ mtd->ecc_stats.failed++; ++ else ++ mtd->ecc_stats.corrected += err_num; ++ } ++ } ++#endif ++ ++ return; ++} ++ ++static void fmc100_select_chip(struct nand_chip *chip, int chipselect) ++{ ++ struct mtd_info *mtd = nand_to_mtd(chip); ++ struct fmc_host *host = chip->priv; ++ ++ if (chipselect < 0) { ++ mutex_unlock(&fmc_switch_mutex); ++ return; ++ } ++ ++ mutex_lock(&fmc_switch_mutex); ++ ++ if (chipselect > CONFIG_SPI_NAND_MAX_CHIP_NUM) ++ db_bug("Error: Invalid chipselect: %d\n", chipselect); ++ ++ if (host->mtd != mtd) { ++ host->mtd = mtd; ++ host->cmd_op.cs = chipselect; ++ } ++} ++ ++static void fmc100_ale_init(struct fmc_host *host, unsigned ctrl, unsigned int udat) ++{ ++ unsigned int addr_value = 0; ++ unsigned int addr_offset; ++ ++ if (ctrl & NAND_CTRL_CHANGE) { ++ host->addr_cycle = 0x0; ++ host->addr_value[0] = 0x0; ++ host->addr_value[1] = 0x0; ++ } ++ addr_offset = host->addr_cycle << FMC100_ADDR_CYCLE_SHIFT; ++ ++ if (host->addr_cycle >= FMC100_ADDR_CYCLE_MASK) { ++ addr_offset = (host->addr_cycle - ++ FMC100_ADDR_CYCLE_MASK) << ++ FMC100_ADDR_CYCLE_SHIFT; ++ addr_value = 1; ++ } ++ host->addr_value[addr_value] |= ++ ((udat & 0xff) << addr_offset); ++ ++ host->addr_cycle++; ++} ++ ++static void fmc100_cle_init(struct fmc_host *host, ++ unsigned ctrl, ++ unsigned int udat, ++ int *is_cache_invalid) ++{ ++ unsigned char cmd; ++ ++ cmd = udat & 0xff; ++ host->cmd_op.cmd = cmd; ++ switch (cmd) { ++ case NAND_CMD_PAGEPROG: ++ host->offset = 0; ++ host->send_cmd_write(host); ++ break; ++ ++ case NAND_CMD_READSTART: ++ *is_cache_invalid = 0; ++ if (host->addr_value[0] == host->pagesize) ++ host->cmd_op.l_cmd = NAND_CMD_READOOB; ++ ++ host->send_cmd_read(host); ++ break; ++ ++ case NAND_CMD_ERASE2: ++ host->send_cmd_erase(host); ++ break; ++ ++ case NAND_CMD_READID: ++ /* dest fmcbuf just need init ID lenth */ ++ if (memset_s((u_char *)(host->iobase), FMC_MEM_LEN, ++ 0, MAX_SPI_NAND_ID_LEN)) { ++ printk("%s %d:memset_s failed\n", __func__, __LINE__); ++ break; ++ } ++ host->cmd_op.l_cmd = cmd; ++ host->cmd_op.data_no = MAX_SPI_NAND_ID_LEN; ++ host->send_cmd_readid(host); ++ break; ++ ++ case NAND_CMD_STATUS: ++ host->send_cmd_status(host); ++ break; ++ ++ case NAND_CMD_READ0: ++ host->cmd_op.l_cmd = cmd; ++ break; ++ ++ case NAND_CMD_RESET: ++ host->send_cmd_reset(host); ++ break; ++ ++ case NAND_CMD_SEQIN: ++ case NAND_CMD_ERASE1: ++ default: ++ break; ++ } ++} ++ ++static void fmc100_cmd_ctrl(struct nand_chip *chip, int dat, unsigned ctrl) ++{ ++ int is_cache_invalid = 1; ++ struct fmc_host *host = chip->priv; ++ unsigned int udat = (unsigned int)dat; ++ ++ if (ctrl & NAND_ALE) ++ fmc100_ale_init(host, ctrl, udat); ++ ++ if ((ctrl & NAND_CLE) != 0 && (ctrl & NAND_CTRL_CHANGE) != 0) ++ fmc100_cle_init(host, ctrl, udat, &is_cache_invalid); ++ ++ if ((dat == NAND_CMD_NONE) && (host->addr_cycle != 0)) { ++ if (host->cmd_op.cmd == NAND_CMD_SEQIN || ++ host->cmd_op.cmd == NAND_CMD_READ0 || ++ host->cmd_op.cmd == NAND_CMD_READID) { ++ host->offset = 0x0; ++ host->column = (host->addr_value[0] & 0xffff); ++ } ++ } ++ ++ if (is_cache_invalid) { ++ host->cache_addr_value[0] = ~0; ++ host->cache_addr_value[1] = ~0; ++ } ++} ++ ++static int fmc100_dev_ready(struct nand_chip *chip) ++{ ++ unsigned int reg; ++ unsigned long deadline = jiffies + FMC_MAX_READY_WAIT_JIFFIES; ++ struct fmc_host *host = chip->priv; ++ ++ do { ++ reg = op_cfg_fm_cs(host->cmd_op.cs) | OP_CFG_OEN_EN; ++ fmc_writel(host, FMC_OP_CFG, reg); ++ ++ reg = FMC_OP_READ_STATUS_EN | FMC_OP_REG_OP_START; ++ fmc_writel(host, FMC_OP, reg); ++ ++ fmc_cmd_wait_cpu_finish(host); ++ ++ reg = fmc_readl(host, FMC_STATUS); ++ if ((reg & STATUS_OIP_MASK) == 0) ++ return NAND_STATUS_READY; ++ ++ cond_resched(); ++ } while ((time_after_eq(jiffies, deadline)) == 0); ++ ++ if ((chip->options & NAND_SCAN_SILENT_NODEV) == 0) ++ pr_warn("Wait SPI nand ready timeout, status: %#x\n", reg); ++ ++ return 0; ++} ++ ++/* ++ * 'host->epm' only use the first oobfree[0] field, it looks very simple, But... ++ */ ++/* Default OOB area layout */ ++static int fmc_ooblayout_ecc_default(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = OOB_LENGTH_DEFAULT; ++ oobregion->offset = OOB_OFFSET_DEFAULT; ++ ++ return 0; ++} ++ ++static int fmc_ooblayout_free_default(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = OOB_LENGTH_DEFAULT_FREE; ++ oobregion->offset = OOB_OFFSET_DEFAULT_FREE; ++ ++ return 0; ++} ++ ++static struct mtd_ooblayout_ops fmc_ooblayout_default_ops = { ++ .ecc = fmc_ooblayout_ecc_default, ++ .free = fmc_ooblayout_free_default, ++}; ++ ++#ifdef CONFIG_BSP_NAND_FS_MAY_NO_YAFFS2 ++static int fmc_ooblayout_ecc_4k16bit(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = OOB_LENGTH_4K16BIT; ++ oobregion->offset = OOB_OFFSET_4K16BIT; ++ ++ return 0; ++} ++ ++static int fmc_ooblayout_free_4k16bit(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = OOB_LENGTH_4K16BIT_FREE; ++ oobregion->offset = OOB_OFFSET_4K16BIT_FREE; ++ ++ return 0; ++} ++ ++static struct mtd_ooblayout_ops fmc_ooblayout_4k16bit_ops = { ++ .ecc = fmc_ooblayout_ecc_4k16bit, ++ .free = fmc_ooblayout_free_4k16bit, ++}; ++ ++static int fmc_ooblayout_ecc_2k16bit(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = OOB_LENGTH_2K16BIT; ++ oobregion->offset = OOB_OFFSET_2K16BIT; ++ ++ return 0; ++} ++ ++static int fmc_ooblayout_free_2k16bit(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = OOB_LENGTH_2K16BIT_FREE; ++ oobregion->offset = OOB_OFFSET_2K16BIT_FREE; ++ ++ return 0; ++} ++ ++static struct mtd_ooblayout_ops fmc_ooblayout_2k16bit_ops = { ++ .ecc = fmc_ooblayout_ecc_2k16bit, ++ .free = fmc_ooblayout_free_2k16bit, ++}; ++#endif ++ ++static struct nand_config_info fmc_spi_nand_config_table[] = { ++ {NAND_PAGE_4K, NAND_ECC_24BIT, 24, 200, &fmc_ooblayout_default_ops}, ++#ifdef CONFIG_BSP_NAND_FS_MAY_NO_YAFFS2 ++ {NAND_PAGE_4K, NAND_ECC_16BIT, 16, 128, &fmc_ooblayout_4k16bit_ops}, ++#endif ++ {NAND_PAGE_4K, NAND_ECC_8BIT, 8, 128, &fmc_ooblayout_default_ops}, ++ {NAND_PAGE_4K, NAND_ECC_0BIT, 0, 32, &fmc_ooblayout_default_ops}, ++ {NAND_PAGE_2K, NAND_ECC_24BIT, 24, 128, &fmc_ooblayout_default_ops}, ++#ifdef CONFIG_BSP_NAND_FS_MAY_NO_YAFFS2 ++ {NAND_PAGE_2K, NAND_ECC_16BIT, 16, 64, &fmc_ooblayout_2k16bit_ops}, ++#endif ++ {NAND_PAGE_2K, NAND_ECC_8BIT, 8, 64, &fmc_ooblayout_default_ops}, ++ {NAND_PAGE_2K, NAND_ECC_0BIT, 0, 32, &fmc_ooblayout_default_ops}, ++ {0, 0, 0, 0, NULL}, ++}; ++ ++/* ++ * Auto-sensed the page size and ecc type value. driver will try each of page ++ * size and ecc type one by one till flash can be read and wrote accurately. ++ * so the page size and ecc type is match adaptively without switch on the board ++ */ ++static struct nand_config_info *fmc100_get_config_type_info( ++ struct mtd_info *mtd, struct nand_dev_t *nand_dev) ++{ ++ struct nand_config_info *best = NULL; ++ struct nand_chip *chip = mtd_to_nand(mtd); ++ struct nand_config_info *info = fmc_spi_nand_config_table; ++ ++ nand_dev->start_type = "Auto"; ++ ++ for (; info->ooblayout_ops; info++) { ++ if (match_page_type_to_size(info->pagetype) != mtd->writesize) ++ continue; ++ ++ if (mtd->oobsize < info->oobsize) ++ continue; ++ ++ if (!best || (best->ecctype < info->ecctype)) ++ best = info; ++ } ++ ++ /* All SPI NAND are small-page, SLC */ ++ chip->base.memorg.bits_per_cell = 1; ++ ++ return best; ++} ++ ++static void fmc100_chip_init(struct nand_chip *chip) ++{ ++ chip->legacy.read_byte = fmc100_read_byte; ++ chip->legacy.write_buf = fmc100_write_buf; ++ chip->legacy.read_buf = fmc100_read_buf; ++ ++ chip->legacy.select_chip = fmc100_select_chip; ++ ++ chip->legacy.cmd_ctrl = fmc100_cmd_ctrl; ++ chip->legacy.dev_ready = fmc100_dev_ready; ++ ++ chip->legacy.chip_delay = FMC_CHIP_DELAY; ++ ++ chip->options = NAND_SKIP_BBTSCAN | NAND_BROKEN_XD ++ | NAND_SCAN_SILENT_NODEV; ++ ++ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_NONE; ++} ++ ++static void fmc100_set_oob_info(struct mtd_info *mtd, ++ struct nand_config_info *info, struct nand_dev_t *nand_dev) ++{ ++ struct nand_chip *chip = NULL; ++ struct fmc_host *host = NULL; ++ struct mtd_oob_region fmc_oobregion = {0, 0}; ++ ++ if (info == NULL || mtd == NULL || nand_dev == NULL) { ++ db_msg("set oob info err!!!\n"); ++ return; ++ } ++ ++ chip = mtd_to_nand(mtd); ++ host = chip->priv; ++ ++ if (info->ecctype != NAND_ECC_0BIT) ++ mtd->oobsize = info->oobsize; ++ ++ host->oobsize = mtd->oobsize; ++ nand_dev->oobsize = host->oobsize; ++ ++ host->dma_oob = host->dma_buffer + host->pagesize; ++ host->bbm = (u_char *)(host->buffer + host->pagesize ++ + FMC_BAD_BLOCK_POS); ++ if (info->ooblayout_ops == NULL) { ++ db_msg("Error: info->ooblayout_ops or is NULL!\n"); ++ return; ++ } ++ info->ooblayout_ops->free(mtd, 0, &fmc_oobregion); ++ ++ mtd_set_ooblayout(mtd, info->ooblayout_ops); ++ ++ /* EB bits locate in the bottom two of CTRL(30) */ ++ host->epm = (u_short *)(host->buffer + host->pagesize + ++ fmc_oobregion.offset + ++ FMC_OOB_LEN_30_EB_OFFSET); ++ ++#ifdef CONFIG_BSP_NAND_FS_MAY_NO_YAFFS2 ++ if (info->ecctype == NAND_ECC_16BIT) { ++ if (host->pagesize == _2K) { ++ /* EB bits locate in the bottom two of CTRL(6) */ ++ host->epm = (u_short *)(host->buffer + host->pagesize + ++ fmc_oobregion.offset + FMC_OOB_LEN_6_EB_OFFSET); ++ } else if (host->pagesize == _4K) { ++ /* EB bit locate in the bottom two of CTRL(14) */ ++ host->epm = (u_short *)(host->buffer + host->pagesize + ++ fmc_oobregion.offset + FMC_OOB_LEN_14_EB_OFFSET); ++ } ++ } ++#endif ++} ++ ++static unsigned int fmc100_get_ecc_reg(struct fmc_host *host, ++ const struct nand_config_info *info, struct nand_dev_t *nand_dev) ++{ ++ if (info == NULL || host == NULL || nand_dev == NULL) { ++ db_msg("get ecc reg err!!!\n"); ++ return 0; ++ } ++ host->ecctype = info->ecctype; ++ nand_dev->ecctype = host->ecctype; ++ ++ return fmc_cfg_ecc_type(match_ecc_type_to_reg(info->ecctype)); ++} ++ ++static unsigned int fmc100_get_page_reg(struct fmc_host *host, ++ const struct nand_config_info *info) ++{ ++ if (info == NULL || host == NULL) { ++ db_msg("get page reg err!!!\n"); ++ return 0; ++ } ++ host->pagesize = match_page_type_to_size(info->pagetype); ++ ++ return fmc_cfg_page_size(match_page_type_to_reg(info->pagetype)); ++} ++ ++static unsigned int fmc100_get_block_reg(struct fmc_host *host, ++ const struct nand_config_info *info) ++{ ++ unsigned int block_reg = 0; ++ unsigned int page_per_block; ++ struct mtd_info *mtd = NULL; ++ ++ if (info == NULL || host == NULL) { ++ db_msg("get block reg err!!!\n"); ++ return 0; ++ } ++ ++ mtd = host->mtd; ++ if (mtd == NULL) { ++ db_msg("err:mtd is NULL!!!\n"); ++ return 0; ++ } ++ host->block_page_mask = ((mtd->erasesize / mtd->writesize) - 1); ++ page_per_block = mtd->erasesize / match_page_type_to_size(info->pagetype); ++ switch (page_per_block) { ++ case PAGE_PER_BLK_64: ++ block_reg = BLOCK_SIZE_64_PAGE; ++ break; ++ case PAGE_PER_BLK_128: ++ block_reg = BLOCK_SIZE_128_PAGE; ++ break; ++ case PAGE_PER_BLK_256: ++ block_reg = BLOCK_SIZE_256_PAGE; ++ break; ++ case PAGE_PER_BLK_512: ++ block_reg = BLOCK_SIZE_512_PAGE; ++ break; ++ default: ++ db_msg("Can't support block %#x and page %#x size\n", ++ mtd->erasesize, mtd->writesize); ++ } ++ ++ return fmc_cfg_block_size(block_reg); ++} ++ ++static void fmc100_set_fmc_cfg_reg(struct fmc_host *host, ++ const struct nand_config_info *type_info, struct nand_dev_t *nand_dev) ++{ ++ unsigned int page_reg, ecc_reg, block_reg, reg_fmc_cfg; ++ ++ ecc_reg = fmc100_get_ecc_reg(host, type_info, nand_dev); ++ page_reg = fmc100_get_page_reg(host, type_info); ++ block_reg = fmc100_get_block_reg(host, type_info); ++ ++ reg_fmc_cfg = fmc_readl(host, FMC_CFG); ++ reg_fmc_cfg &= ~(PAGE_SIZE_MASK | ECC_TYPE_MASK | BLOCK_SIZE_MASK); ++ reg_fmc_cfg |= ecc_reg | page_reg | block_reg; ++ fmc_writel(host, FMC_CFG, reg_fmc_cfg); ++ ++ /* Save value of FMC_CFG and FMC_CFG_ECC0 to turn on/off ECC */ ++ host->fmc_cfg = reg_fmc_cfg; ++ host->fmc_cfg_ecc0 = (host->fmc_cfg & ~ECC_TYPE_MASK) | ECC_TYPE_0BIT; ++ fmc_pr(BT_DBG, "\t|-Save FMC_CFG[%#x]: %#x and FMC_CFG_ECC0: %#x\n", ++ FMC_CFG, host->fmc_cfg, host->fmc_cfg_ecc0); ++} ++ ++static int fmc100_set_config_info(struct mtd_info *mtd, ++ struct nand_chip *chip, struct nand_dev_t *nand_dev) ++{ ++ struct fmc_host *host = chip->priv; ++ struct nand_config_info *type_info = NULL; ++ ++ fmc_pr(BT_DBG, "\t*-Start config Block Page OOB and Ecc\n"); ++ ++ type_info = fmc100_get_config_type_info(mtd, nand_dev); ++ WARN_ON(type_info == NULL); ++ if (type_info == NULL) { ++ db_msg("set config info err!!!\n"); ++ return 0; ++ } ++ ++ fmc_pr(BT_DBG, "\t|-%s Config, PageSize %s EccType %s OOBSize %d\n", ++ nand_dev->start_type, nand_page_name(type_info->pagetype), ++ nand_ecc_name(type_info->ecctype), type_info->oobsize); ++ ++ /* Set the page_size, ecc_type, block_size of FMC_CFG[0x0] register */ ++ fmc100_set_fmc_cfg_reg(host, type_info, nand_dev); ++ ++ fmc100_set_oob_info(mtd, type_info, nand_dev); ++ ++ fmc_pr(BT_DBG, "\t*-End config Block Page Oob and Ecc\n"); ++ ++ return 0; ++} ++ ++void fmc100_spi_nand_init(struct nand_chip *chip) ++{ ++ struct fmc_host *host = NULL; ++ ++ if ((chip == NULL) || (chip->priv == NULL)) { ++ db_msg("Error: chip or chip->priv is NULL!\n"); ++ return; ++ } ++ host = chip->priv; ++ fmc_pr(BT_DBG, "\t|*-Start fmc100 SPI Nand init\n"); ++ ++ /* Switch SPI type to SPI nand */ ++ fmc100_switch_to_spi_nand(host); ++ ++ /* hold on STR mode */ ++ fmc100_set_str_mode(host); ++ ++ /* fmc host init */ ++ fmc100_host_init(host); ++ host->chip = chip; ++ ++ /* fmc nand_chip struct init */ ++ fmc100_chip_init(chip); ++ ++ fmc_spi_nand_ids_register(); ++ nfc_param_adjust = fmc100_set_config_info; ++ ++ fmc_pr(BT_DBG, "\t|*-End fmc100 SPI Nand init\n"); ++} ++#ifdef CONFIG_PM ++int fmc100_suspend(struct platform_device *pltdev, pm_message_t state) ++{ ++ int ret; ++ struct fmc_host *host = platform_get_drvdata(pltdev); ++ struct fmc_spi *spi = host->spi; ++ ++ mutex_lock(host->lock); ++ fmc100_switch_to_spi_nand(host); ++ ++ ret = spi->driver->wait_ready(spi); ++ if (ret) { ++ db_msg("Error: wait ready failed!"); ++ clk_disable_unprepare(host->clk); ++ mutex_unlock(host->lock); ++ return 0; ++ } ++ ++ clk_disable_unprepare(host->clk); ++ mutex_unlock(host->lock); ++ ++ return 0; ++} ++ ++int fmc100_resume(struct platform_device *pltdev) ++{ ++ int cs; ++ struct fmc_host *host = platform_get_drvdata(pltdev); ++ struct nand_chip *chip = host->chip; ++ struct nand_memory_organization *memorg; ++ ++ memorg = nanddev_get_memorg(&chip->base); ++ ++ mutex_lock(host->lock); ++ fmc100_switch_to_spi_nand(host); ++ clk_prepare_enable(host->clk); ++ ++ for (cs = 0; cs < memorg->ntargets; cs++) ++ host->send_cmd_reset(host); ++ ++ fmc100_spi_nand_config(host); ++ ++ mutex_unlock(host->lock); ++ return 0; ++} ++#endif +diff --git a/drivers/mtd/nand/fmc100/fmc100.h b/drivers/mtd/nand/fmc100/fmc100.h +new file mode 100644 +index 000000000..2edaf91f4 +--- /dev/null ++++ b/drivers/mtd/nand/fmc100/fmc100.h +@@ -0,0 +1,383 @@ ++/* ++ * The Flash Memory Controller v100 Device Driver for vendor ++ * ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#ifndef __FMC100_H__ ++#define __FMC100_H__ ++ ++#include ++#include ++#include ++#include "../raw/nfc_gen.h" ++ ++#define INFINITE (0xFFFFFFFF) ++ ++#define SPI_IF_READ_STD (0x01) ++#define SPI_IF_READ_FAST (0x02) ++#define SPI_IF_READ_DUAL (0x04) ++#define SPI_IF_READ_DUAL_ADDR (0x08) ++#define SPI_IF_READ_QUAD (0x10) ++#define SPI_IF_READ_QUAD_ADDR (0x20) ++ ++#define SPI_IF_WRITE_STD (0x01) ++#define SPI_IF_WRITE_DUAL (0x02) ++#define SPI_IF_WRITE_DUAL_ADDR (0x04) ++#define SPI_IF_WRITE_QUAD (0x08) ++#define SPI_IF_WRITE_QUAD_ADDR (0x10) ++ ++#define SPI_IF_ERASE_SECTOR_4K (0x01) ++#define SPI_IF_ERASE_SECTOR_32K (0x02) ++#define SPI_IF_ERASE_SECTOR_64K (0x04) ++#define SPI_IF_ERASE_SECTOR_128K (0x08) ++#define SPI_IF_ERASE_SECTOR_256K (0x10) ++ ++#define FMC_SPI_NAND_SUPPORT_READ (SPI_IF_READ_STD | \ ++ SPI_IF_READ_FAST | \ ++ SPI_IF_READ_DUAL | \ ++ SPI_IF_READ_DUAL_ADDR | \ ++ SPI_IF_READ_QUAD | \ ++ SPI_IF_READ_QUAD_ADDR) ++ ++#define FMC_SPI_NAND_SUPPORT_WRITE (SPI_IF_WRITE_STD | SPI_IF_WRITE_QUAD) ++ ++#define FMC_SPI_NAND_SUPPORT_MAX_DUMMY 8 ++ ++#define SPI_CMD_READ_STD 0x03 /* Standard read cache */ ++#define SPI_CMD_READ_FAST 0x0B /* Higher speed read cache */ ++#define SPI_CMD_READ_DUAL 0x3B /* 2 IO read cache only date */ ++#define SPI_CMD_READ_DUAL_ADDR 0xBB /* 2 IO read cache date&addr */ ++#define SPI_CMD_READ_QUAD 0x6B /* 4 IO read cache only date */ ++#define SPI_CMD_READ_QUAD_ADDR 0xEB /* 4 IO read cache date&addr */ ++ ++#define SPI_CMD_WRITE_STD 0x02 /* Standard page program */ ++#define SPI_CMD_WRITE_DUAL 0xA2 /* 2 IO program only date */ ++#define SPI_CMD_WRITE_DUAL_ADDR 0xD2 /* 2 IO program date&addr */ ++#define SPI_CMD_WRITE_QUAD 0x32 /* 4 IO program only date */ ++#define SPI_CMD_WRITE_QUAD_ADDR 0x12 /* 4 IO program date&addr */ ++ ++#define SPI_CMD_SE_4K 0x20 /* 4KB sector Erase */ ++#define SPI_CMD_SE_32K 0x52 /* 32KB sector Erase */ ++#define SPI_CMD_SE_64K 0xD8 /* 64KB sector Erase */ ++#define SPI_CMD_SE_128K 0xD8 /* 128KB sector Erase */ ++#define SPI_CMD_SE_256K 0xD8 /* 256KB sector Erase */ ++ ++#define set_read_std(_dummy_, _size_, _clk_) \ ++ static struct spi_op read_std_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_READ_STD, SPI_CMD_READ_STD, _dummy_, _size_, _clk_ } ++ ++#define set_read_fast(_dummy_, _size_, _clk_) \ ++ static struct spi_op read_fast_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_READ_FAST, SPI_CMD_READ_FAST, _dummy_, _size_, _clk_ } ++ ++#define set_read_dual(_dummy_, _size_, _clk_) \ ++ static struct spi_op read_dual_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_READ_DUAL, SPI_CMD_READ_DUAL, _dummy_, _size_, _clk_ } ++ ++#define set_read_dual_addr(_dummy_, _size_, _clk_) \ ++ static struct spi_op read_dual_addr_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_READ_DUAL_ADDR, SPI_CMD_READ_DUAL_ADDR, _dummy_, _size_, _clk_ } ++ ++#define set_read_quad(_dummy_, _size_, _clk_) \ ++ static struct spi_op read_quad_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_READ_QUAD, SPI_CMD_READ_QUAD, _dummy_, _size_, _clk_ } ++ ++#define set_read_quad_addr(_dummy_, _size_, _clk_) \ ++ static struct spi_op read_quad_addr_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_READ_QUAD_ADDR, SPI_CMD_READ_QUAD_ADDR, _dummy_, _size_, _clk_ } ++ ++ ++#define set_write_std(_dummy_, _size_, _clk_) \ ++ static struct spi_op write_std_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_WRITE_STD, SPI_CMD_WRITE_STD, _dummy_, _size_, _clk_ } ++ ++#define set_write_dual(_dummy_, _size_, _clk_) \ ++ static struct spi_op write_dual_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_WRITE_DUAL, SPI_CMD_WRITE_DUAL, _dummy_, _size_, _clk_ } ++ ++#define set_write_dual_addr(_dummy_, _size_, _clk_) \ ++ static struct spi_op write_dual_addr_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_WRITE_DUAL_ADDR, SPI_CMD_WRITE_DUAL_ADDR, _dummy_, _size_, _clk_ } ++ ++#define set_write_quad(_dummy_, _size_, _clk_) \ ++ static struct spi_op write_quad_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_WRITE_QUAD, SPI_CMD_WRITE_QUAD, _dummy_, _size_, _clk_ } ++ ++#define set_write_quad_addr(_dummy_, _size_, _clk_) \ ++ static struct spi_op write_quad_addr_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_WRITE_QUAD_ADDR, SPI_CMD_WRITE_QUAD_ADDR, _dummy_, _size_, _clk_ } ++ ++#define set_erase_sector_4k(_dummy_, _size_, _clk_) \ ++ static struct spi_op erase_sector_4k_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_ERASE_SECTOR_4K, SPI_CMD_SE_4K, _dummy_, _size_, _clk_ } ++ ++#define set_erase_sector_32k(_dummy_, _size_, _clk_) \ ++ static struct spi_op erase_sector_32k_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_ERASE_SECTOR_32K, SPI_CMD_SE_32K, _dummy_, _size_, _clk_ } ++ ++#define set_erase_sector_64k(_dummy_, _size_, _clk_) \ ++ static struct spi_op erase_sector_64k_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_ERASE_SECTOR_64K, SPI_CMD_SE_64K, _dummy_, _size_, _clk_ } ++ ++#define set_erase_sector_128k(_dummy_, _size_, _clk_) \ ++ static struct spi_op erase_sector_128k_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_ERASE_SECTOR_128K, SPI_CMD_SE_128K, _dummy_, _size_, _clk_ } ++ ++#define set_erase_sector_256k(_dummy_, _size_, _clk_) \ ++ static struct spi_op erase_sector_256k_##_dummy_##_size_##_clk_ = { \ ++ SPI_IF_ERASE_SECTOR_256K, SPI_CMD_SE_256K, _dummy_, _size_, _clk_ } ++ ++#define read_std(_dummy_, _size_, _clk_) read_std_##_dummy_##_size_##_clk_ ++#define read_fast(_dummy_, _size_, _clk_) read_fast_##_dummy_##_size_##_clk_ ++#define read_dual(_dummy_, _size_, _clk_) read_dual_##_dummy_##_size_##_clk_ ++#define read_dual_addr(_dummy_, _size_, _clk_) \ ++ read_dual_addr_##_dummy_##_size_##_clk_ ++#define read_quad(_dummy_, _size_, _clk_) read_quad_##_dummy_##_size_##_clk_ ++#define read_quad_addr(_dummy_, _size_, _clk_) \ ++ read_quad_addr_##_dummy_##_size_##_clk_ ++ ++#define write_std(_dummy_, _size_, _clk_) write_std_##_dummy_##_size_##_clk_ ++#define write_dual(_dummy_, _size_, _clk_) write_dual_##_dummy_##_size_##_clk_ ++#define write_dual_addr(_dummy_, _size_, _clk_) \ ++ write_dual_addr_##_dummy_##_size_##_clk_ ++#define write_quad(_dummy_, _size_, _clk_) write_quad_##_dummy_##_size_##_clk_ ++#define write_quad_addr(_dummy_, _size_, _clk_) \ ++ write_quad_addr_##_dummy_##_size_##_clk_ ++ ++#define erase_sector_4k(_dummy_, _size_, _clk_) \ ++ erase_sector_4k_##_dummy_##_size_##_clk_ ++#define erase_sector_32k(_dummy_, _size_, _clk_) \ ++ erase_sector_32k_##_dummy_##_size_##_clk_ ++#define erase_sector_64k(_dummy_, _size_, _clk_) \ ++ erase_sector_64k_##_dummy_##_size_##_clk_ ++#define erase_sector_128k(_dummy_, _size_, _clk_) \ ++ erase_sector_128k_##_dummy_##_size_##_clk_ ++#define erase_sector_256k(_dummy_, _size_, _clk_) \ ++ erase_sector_256k_##_dummy_##_size_##_clk_ ++ ++#define SPI_CMD_WREN 0x06 /* Write Enable */ ++#define SPI_CMD_WRDI 0x04 /* Write Disable */ ++ ++#define SPI_CMD_RDID 0x9F /* Read Identification */ ++ ++#define SPI_CMD_GET_FEATURES 0x0F /* Get Features */ ++#define SPI_CMD_SET_FEATURE 0x1F /* Set Feature */ ++ ++#define SPI_CMD_PAGE_READ 0x13 /* Page Read to Cache */ ++ ++#define SPI_CMD_RESET 0xff /* Reset the device */ ++ ++/* These macroes are for debug only, reg option is slower then dma option */ ++#undef FMC100_SPI_NAND_SUPPORT_REG_READ ++/* Open it as you need #define FMC100_SPI_NAND_SUPPORT_REG_READ */ ++ ++#undef FMC100_SPI_NAND_SUPPORT_REG_WRITE ++/* Open it as you need #define FMC100_SPI_NAND_SUPPORT_REG_WRITE */ ++ ++#define WORD_READ_OFFSET_ADD_LENGTH 2 ++#define WORD_READ_START_OFFSET 2 ++ ++#define PAGE_PER_BLK_64 64 ++#define PAGE_PER_BLK_128 128 ++#define PAGE_PER_BLK_256 256 ++#define PAGE_PER_BLK_512 512 ++ ++#define OOB_LENGTH_DEFAULT 32 ++#define OOB_OFFSET_DEFAULT 32 ++#define OOB_LENGTH_DEFAULT_FREE 30 ++#define OOB_OFFSET_DEFAULT_FREE 2 ++ ++#define OOB_LENGTH_4K16BIT 14 ++#define OOB_OFFSET_4K16BIT 14 ++#define OOB_LENGTH_4K16BIT_FREE 14 ++#define OOB_OFFSET_4K16BIT_FREE 2 ++ ++#define OOB_LENGTH_2K16BIT 6 ++#define OOB_OFFSET_2K16BIT 6 ++#define OOB_LENGTH_2K16BIT_FREE 6 ++#define OOB_OFFSET_2K16BIT_FREE 2 ++ ++#ifdef CONFIG_BSP_NAND_ECC_STATUS_REPORT ++ ++#define FMC100_ECC_ERR_NUM0_BUF0 0xc0 ++ ++#define get_ecc_err_num(_i, _reg) (((_reg) >> ((_i) * 8)) & 0xff) ++#define ECC_STEP_SHIFT 10 ++#endif ++ ++#define REG_CNT_HIGH_BLOCK_NUM_SHIFT 10 ++ ++#define REG_CNT_BLOCK_NUM_MASK 0x3ff ++#define REG_CNT_BLOCK_NUM_SHIFT 22 ++ ++#define REG_CNT_PAGE_NUM_MASK 0x3f ++#define REG_CNT_PAGE_NUM_SHIFT 16 ++ ++#define ERR_STR_DRIVER "Driver does not support this configure " ++#define ERR_STR_CHECK "Please make sure the hardware configuration is correct" ++ ++#define FMC100_ADDR_CYCLE_MASK 0x2 ++#define FMC100_ADDR_CYCLE_SHIFT 0x3 ++ ++#define OP_STYPE_NONE 0x0 ++#define OP_STYPE_READ 0x01 ++#define OP_STYPE_WRITE 0x02 ++#define OP_STYPE_ERASE 0x04 ++#define clk_fmc_to_crg_mhz(_clk) ((_clk) * 2000000) ++ ++#define MAX_SPI_OP 8 ++ ++/* SPI general operation parameter */ ++struct spi_op { ++ unsigned char iftype; ++ unsigned char cmd; ++ unsigned char dummy; ++ unsigned int size; ++ unsigned int clock; ++}; ++ ++struct spi_drv; ++ ++/* SPI interface all operation */ ++struct fmc_spi { ++ char *name; ++ int chipselect; ++ unsigned long long chipsize; ++ unsigned int erasesize; ++#define SPI_NOR_3BYTE_ADDR_LEN 3 /* address len 3Bytes */ ++#define SPI_NOR_4BYTE_ADDR_LEN 4 /* address len 4Bytes for 32MB */ ++ unsigned int addrcycle; ++ ++ struct spi_op read[1]; ++ struct spi_op write[1]; ++ struct spi_op erase[MAX_SPI_OP]; ++ ++ void *host; ++ ++ struct spi_drv *driver; ++}; ++ ++/* SPI interface special operation function hook */ ++struct spi_drv { ++ int (*wait_ready)(struct fmc_spi *spi); ++ int (*write_enable)(struct fmc_spi *spi); ++ int (*qe_enable)(struct fmc_spi *spi); ++ int (*bus_prepare)(struct fmc_spi *spi, int op); ++ int (*entry_4addr)(struct fmc_spi *spi, int en); ++}; ++ ++struct spi_nand_info { ++ char *name; ++ unsigned char id[MAX_SPI_NAND_ID_LEN]; ++ unsigned char id_len; ++ unsigned long long chipsize; ++ unsigned int erasesize; ++ unsigned int pagesize; ++ unsigned int oobsize; ++#define BBP_LAST_PAGE 0x01 ++#define BBP_FIRST_PAGE 0x02 ++ unsigned int badblock_pos; ++ struct spi_op *read[MAX_SPI_OP]; ++ struct spi_op *write[MAX_SPI_OP]; ++ struct spi_op *erase[MAX_SPI_OP]; ++ struct spi_drv *driver; ++}; ++ ++extern char spi_nand_feature_op(struct fmc_spi *spi, u_char op, u_char addr, ++ u_char *val); ++ ++struct fmc_host { ++ struct mtd_info *mtd; ++ struct nand_chip *chip; ++ struct fmc_spi spi[CONFIG_SPI_NAND_MAX_CHIP_NUM]; ++ struct fmc_cmd_op cmd_op; ++ void __iomem *iobase; ++ void __iomem *regbase; ++ struct clk *clk; ++ u32 clkrate; ++ unsigned int fmc_cfg; ++ unsigned int fmc_cfg_ecc0; ++ unsigned int offset; ++ struct device *dev; ++ struct mutex *lock; ++ ++ /* This is maybe an un-aligment address, only for malloc or free */ ++ char *buforg; ++ char *buffer; ++ ++#ifdef CONFIG_64BIT ++ unsigned long long dma_buffer; ++ unsigned long long dma_oob; ++#else ++ unsigned int dma_buffer; ++ unsigned int dma_oob; ++#endif ++ unsigned int addr_cycle; ++ unsigned int addr_value[2]; ++ unsigned int cache_addr_value[2]; ++ ++ unsigned int column; ++ unsigned int block_page_mask; ++ unsigned int ecctype; ++ unsigned int pagesize; ++ unsigned int oobsize; ++ int add_partition; ++ int need_rr_data; ++#define FMC100_READ_RETRY_DATA_LEN 128 ++ char rr_data[FMC100_READ_RETRY_DATA_LEN]; ++ struct read_retry_t *read_retry; ++ ++ int version; ++ ++ /* BOOTROM read two bytes to detect the bad block flag */ ++#define FMC_BAD_BLOCK_POS 0 ++ unsigned char *bbm; /* nand bad block mark */ ++#define FMC_OOB_LEN_30_EB_OFFSET (30 - 2) ++#define FMC_OOB_LEN_6_EB_OFFSET (6 - 2) ++#define FMC_OOB_LEN_14_EB_OFFSET (14 - 2) ++ unsigned short *epm; /* nand empty page mark */ ++ ++ unsigned int uc_er; ++ ++ void (*send_cmd_write)(struct fmc_host *host); ++ void (*send_cmd_status)(struct fmc_host *host); ++ void (*send_cmd_read)(struct fmc_host *host); ++ void (*send_cmd_erase)(struct fmc_host *host); ++ void (*send_cmd_readid)(struct fmc_host *host); ++ void (*send_cmd_reset)(struct fmc_host *host); ++#ifdef CONFIG_PM ++ int (*suspend)(struct platform_device *pltdev, pm_message_t state); ++ int (*resume)(struct platform_device *pltdev); ++#endif ++}; ++ ++void fmc100_ecc0_switch(struct fmc_host *host, unsigned char op); ++ ++void fmc100_spi_nand_init(struct nand_chip *chip); ++ ++extern void fmc_spi_nand_ids_register(void); ++ ++extern void fmc_set_nand_system_clock(struct spi_op *op, int clk_en); ++ ++#ifdef CONFIG_PM ++int fmc100_suspend(struct platform_device *pltdev, pm_message_t state); ++int fmc100_resume(struct platform_device *pltdev); ++void fmc100_spi_nand_config(struct fmc_host *host); ++#endif ++ ++#endif /* End of __FMC100_H__ */ +diff --git a/drivers/mtd/nand/fmc100/fmc100_os.c b/drivers/mtd/nand/fmc100/fmc100_os.c +new file mode 100644 +index 000000000..2e2fd2789 +--- /dev/null ++++ b/drivers/mtd/nand/fmc100/fmc100_os.c +@@ -0,0 +1,264 @@ ++/* ++ * The Flash Memory Controller v100 Device Driver for vendor ++ * ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include "fmc100.h" ++ ++static int fmc100_spi_nand_pre_probe(struct nand_chip *chip) ++{ ++ uint8_t nand_maf_id; ++ struct fmc_host *host = chip->priv; ++ ++ /* Reset the chip first */ ++ host->send_cmd_reset(host); ++ udelay(1000);/* 1000us */ ++ ++ /* Check the ID */ ++ host->offset = 0; ++ /* dest fmcbuf just need init ID lenth */ ++ if (memset_s((unsigned char *)(chip->legacy.IO_ADDR_R), FMC_MEM_LEN, 0, 0x10)) { ++ return 1; ++ } ++ ++ host->send_cmd_readid(host); ++ nand_maf_id = fmc_readb(chip->legacy.IO_ADDR_R); ++ if (nand_maf_id == 0x00 || nand_maf_id == 0xff) { ++ printk("Cannot found a valid SPI Nand Device\n"); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static int fmc_nand_scan(struct mtd_info *mtd) ++{ ++ int result; ++ unsigned char cs; ++ unsigned char chip_num = CONFIG_SPI_NAND_MAX_CHIP_NUM; ++ struct nand_chip *chip = mtd_to_nand(mtd); ++ struct fmc_host *host = chip->priv; ++ ++ for (cs = 0; (chip_num != 0) && (cs < FMC_MAX_CHIP_NUM); cs++) { ++ if (fmc_cs_user[cs]) { ++ fmc_pr(BT_DBG, "\t\t*-Current CS(%d) is occupied.\n", ++ cs); ++ continue; ++ } ++ ++ host->cmd_op.cs = cs; ++ ++ if (fmc100_spi_nand_pre_probe(chip)) ++ return -ENODEV; ++ ++ fmc_pr(BT_DBG, "\t\t*-Scan SPI nand flash on CS: %d\n", cs); ++ if (nand_scan_with_ids(chip, chip_num, NULL)) ++ continue; ++ ++ chip_num--; ++ } ++ ++ if (chip_num == CONFIG_SPI_NAND_MAX_CHIP_NUM) ++ result = -ENXIO; ++ else ++ result = 0; ++ ++ return result; ++} ++ ++static int spi_nand_host_parm_init(struct platform_device *pltdev, ++ struct device *dev, ++ struct fmc_host **host, ++ struct nand_chip **chip, ++ struct mtd_info **mtd) ++{ ++ int len; ++ struct bsp_fmc *fmc = dev_get_drvdata(dev->parent); ++ ++ if (!fmc) { ++ dev_err(dev, "get mfd fmc devices failed\n"); ++ return -ENXIO; ++ } ++ ++ len = sizeof(struct fmc_host) + sizeof(struct nand_chip) + ++ sizeof(struct mtd_info); ++ *host = devm_kzalloc(dev, len, GFP_KERNEL); ++ if (!(*host)) { ++ dev_err(dev, "get host failed\n"); ++ return -ENOMEM; ++ } ++ (void)memset_s((char *)(*host), len, 0, len); ++ ++ platform_set_drvdata(pltdev, *host); ++ (*host)->dev = &pltdev->dev; ++ (*host)->chip = *chip = (struct nand_chip *)&(*host)[1]; ++ (*host)->mtd = *mtd = nand_to_mtd(*chip); ++ (*host)->regbase = fmc->regbase; ++ (*host)->iobase = fmc->iobase; ++ (*host)->clk = fmc->clk; ++ (*host)->lock = &fmc->lock; ++ (*host)->buffer = fmc->buffer; ++ (*host)->dma_buffer = fmc->dma_buffer; ++ /* dest fmcbuf just need init dma_len lenth */ ++ if (memset_s((char *)(*host)->iobase, FMC_MEM_LEN, 0xff, fmc->dma_len)) { ++ dev_err(dev, "memset_s failed\n"); ++ return -1; ++ } ++ (*chip)->legacy.IO_ADDR_R = (*chip)->legacy.IO_ADDR_W = (*host)->iobase; ++ (*chip)->priv = (*host); ++ ++ return 0; ++} ++ ++static int bsp_spi_nand_probe(struct platform_device *pltdev) ++{ ++ int result; ++ struct fmc_host *host = NULL; ++ struct nand_chip *chip = NULL; ++ struct mtd_info *mtd = NULL; ++ struct device *dev = &pltdev->dev; ++ struct device_node *np = NULL; ++ ++ fmc_pr(BT_DBG, "\t*-Start SPI Nand flash driver probe\n"); ++ result = spi_nand_host_parm_init(pltdev, dev, &host, &chip, &mtd); ++ if (result) { ++ fmc_pr(BT_DBG, "\t*-test0\n"); ++ return result; ++ } ++ /* Set system clock */ ++ result = clk_prepare_enable(host->clk); ++ if (result) { ++ printk("\nclk prepare enable failed!"); ++ goto fail; ++ } ++ ++ fmc100_spi_nand_init(chip); ++ ++ np = of_get_next_available_child(dev->of_node, NULL); ++ if (np == NULL) { ++ printk("\nof_get_next_available_child failed!"); ++ goto fail; ++ } ++ mtd->name = np->name; ++ mtd->type = MTD_NANDFLASH; ++ mtd->priv = chip; ++ mtd->owner = THIS_MODULE; ++ ++ result = of_property_read_u32(np, "spi-max-frequency", &host->clkrate); ++ if (result) { ++ printk("\nget fmc clkrate failed"); ++ goto fail; ++ } ++ ++ result = fmc_nand_scan(mtd); ++ if (result) { ++ fmc_pr(BT_DBG, "\t|-Scan SPI Nand failed.\n"); ++ goto fail; ++ } ++ ++ result = mtd_device_register(mtd, NULL, 0); ++ if (result == 0) { ++ fmc_pr(BT_DBG, "\t*-End driver probe !!\n"); ++ return 0; ++ } ++ ++ result = -ENODEV; ++ ++ mtd_device_unregister(mtd); ++ nand_cleanup(mtd_to_nand(mtd)); ++ ++fail: ++ clk_disable_unprepare(host->clk); ++ db_msg("Error: driver probe, result: %d\n", result); ++ return result; ++} ++ ++static int bsp_spi_nand_remove(struct platform_device *pltdev) ++{ ++ struct fmc_host *host = platform_get_drvdata(pltdev); ++ ++ if (host) { ++ if (host->clk) ++ clk_disable_unprepare(host->clk); ++ if (host->mtd) { ++ mtd_device_unregister(host->mtd); ++ nand_cleanup(mtd_to_nand(host->mtd)); ++ } ++ } ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++static int fmc100_os_suspend(struct platform_device *pltdev, ++ pm_message_t state) ++{ ++ struct fmc_host *host = platform_get_drvdata(pltdev); ++ ++ if (host && host->suspend) ++ return (host->suspend)(pltdev, state); ++ ++ return 0; ++} ++ ++static int fmc100_os_resume(struct platform_device *pltdev) ++{ ++ struct fmc_host *host = platform_get_drvdata(pltdev); ++ ++ if (host && host->resume) ++ return (host->resume)(pltdev); ++ ++ return 0; ++} ++#endif /* End of CONFIG_PM */ ++ ++static const struct of_device_id bsp_spi_nand_dt_ids[] = { ++ { .compatible = "vendor,spi-nand" }, ++ { } /* sentinel */ ++}; ++MODULE_DEVICE_TABLE(of, bsp_spi_nand_dt_ids); ++ ++static struct platform_driver bsp_spi_nand_driver = { ++ .driver = { ++ .name = "bsp_spi_nand", ++ .of_match_table = bsp_spi_nand_dt_ids, ++ }, ++ .probe = bsp_spi_nand_probe, ++ .remove = bsp_spi_nand_remove, ++#ifdef CONFIG_PM ++ .suspend = fmc100_os_suspend, ++ .resume = fmc100_os_resume, ++#endif ++}; ++module_platform_driver(bsp_spi_nand_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Vendor Flash Memory Controller V100 SPI Nand Driver"); +diff --git a/drivers/mtd/nand/fmc100/fmc100_spi_general.c b/drivers/mtd/nand/fmc100/fmc100_spi_general.c +new file mode 100644 +index 000000000..2cc682882 +--- /dev/null ++++ b/drivers/mtd/nand/fmc100/fmc100_spi_general.c +@@ -0,0 +1,335 @@ ++/* ++ * The Flash Memory Controller v100 Device Driver for vendor ++ * ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++static int spi_nand_feature_op_config(struct fmc_host *host, ++ u_char op, ++ const u_char *val, ++ u_char addr) ++{ ++ unsigned int reg; ++ ++ reg = fmc_cmd_cmd1((op != 0) ? SPI_CMD_SET_FEATURE : SPI_CMD_GET_FEATURES); ++ fmc_writel(host, FMC_CMD, reg); ++ fmc_pr(FT_DBG, "\t||||-Set CMD[%#x]%#x\n", FMC_CMD, reg); ++ ++ fmc_writel(host, FMC_ADDRL, addr); ++ fmc_pr(FT_DBG, "\t||||-Set ADDRL[%#x]%#x\n", FMC_ADDRL, addr); ++ ++ reg = op_cfg_fm_cs(host->cmd_op.cs) | ++ op_cfg_addr_num(FEATURES_OP_ADDR_NUM) | ++ OP_CFG_OEN_EN; ++ fmc_writel(host, FMC_OP_CFG, reg); ++ fmc_pr(FT_DBG, "\t||||-Set OP_CFG[%#x]%#x\n", FMC_OP_CFG, reg); ++ ++ reg = fmc_data_num_cnt(FEATURES_DATA_LEN); ++ fmc_writel(host, FMC_DATA_NUM, reg); ++ fmc_pr(FT_DBG, "\t||||-Set DATA_NUM[%#x]%#x\n", FMC_DATA_NUM, reg); ++ ++ reg = FMC_OP_CMD1_EN | ++ FMC_OP_ADDR_EN | ++ FMC_OP_REG_OP_START; ++ ++ if (op == SET_OP) { ++ if (!val || !host->iobase) { ++ db_msg("Error: host->iobase is NULL !\n"); ++ return -1; ++ } ++ reg |= FMC_OP_WRITE_DATA_EN; ++ fmc_writeb(*val, host->iobase); ++ fmc_pr(FT_DBG, "\t||||-Write IO[%#lx]%#x\n", (long)host->iobase, ++ *(u_char *)host->iobase); ++ } else { ++ reg |= FMC_OP_READ_DATA_EN; ++ } ++ ++ fmc_writel(host, FMC_OP, reg); ++ fmc_pr(FT_DBG, "\t||||-Set OP[%#x]%#x\n", FMC_OP, reg); ++ ++ fmc_cmd_wait_cpu_finish(host); ++ ++ return 0; ++} ++ ++static int spi_nand_get_op(struct fmc_host *host, u_char *val) ++{ ++ unsigned int reg; ++ ++ if (!val) { ++ db_msg("Error: val is NULL !\n"); ++ return -1; ++ } ++ if (SR_DBG) ++ pr_info("\n"); ++ fmc_pr(SR_DBG, "\t\t|*-Start Get Status\n"); ++ ++ reg = op_cfg_fm_cs(host->cmd_op.cs) | OP_CFG_OEN_EN; ++ fmc_writel(host, FMC_OP_CFG, reg); ++ fmc_pr(SR_DBG, "\t\t||-Set OP_CFG[%#x]%#x\n", FMC_OP_CFG, reg); ++ ++ reg = FMC_OP_READ_STATUS_EN | FMC_OP_REG_OP_START; ++ fmc_writel(host, FMC_OP, reg); ++ fmc_pr(SR_DBG, "\t\t||-Set OP[%#x]%#x\n", FMC_OP, reg); ++ ++ fmc_cmd_wait_cpu_finish(host); ++ ++ *val = fmc_readl(host, FMC_STATUS); ++ fmc_pr(SR_DBG, "\t\t|*-End Get Status, result: %#x\n", *val); ++ ++ return 0; ++} ++ ++/* ++ Send set/get features command to SPI Nand flash ++*/ ++char spi_nand_feature_op(struct fmc_spi *spi, u_char op, u_char addr, ++ u_char *val) ++{ ++ const char *str[] = {"Get", "Set"}; ++ struct fmc_host *host = NULL; ++ int ret; ++ ++ if (!spi) { ++ db_msg("Error: spi is NULL !\n"); ++ return -1; ++ } ++ host = (struct fmc_host *)spi->host; ++ if (!host) { ++ db_msg("Error: host is NULL !\n"); ++ return -1; ++ } ++ ++ if ((op == GET_OP) && (addr == STATUS_ADDR)) { ++ ret = spi_nand_get_op(host, val); ++ return ret; ++ } ++ ++ fmc_pr(FT_DBG, "\t|||*-Start %s feature, addr[%#x]\n", str[op], addr); ++ ++ fmc100_ecc0_switch(host, ENABLE); ++ ++ ret = spi_nand_feature_op_config(host, op, val, addr); ++ if (ret) ++ return -1; ++ ++ if (op == GET_OP) { ++ if (!val || !host->iobase) { ++ db_msg("Error: val or host->iobase is NULL !\n"); ++ return -1; ++ } ++ *val = fmc_readb(host->iobase); ++ fmc_pr(FT_DBG, "\t||||-Read IO[%#lx]%#x\n", (long)host->iobase, ++ *(u_char *)host->iobase); ++ } ++ ++ fmc100_ecc0_switch(host, DISABLE); ++ ++ fmc_pr(FT_DBG, "\t|||*-End %s Feature[%#x]:%#x\n", str[op], addr, *val); ++ ++ return 0; ++} ++ ++/* ++ Read status[C0H]:[0]bit OIP, judge whether the device is busy or not ++*/ ++static int spi_general_wait_ready(struct fmc_spi *spi) ++{ ++ unsigned char status; ++ int ret; ++ unsigned long deadline = jiffies + FMC_MAX_READY_WAIT_JIFFIES; ++ struct fmc_host *host = NULL; ++ ++ if (spi == NULL || spi->host == NULL) { ++ db_msg("Error: host or host->spi is NULL!\n"); ++ return -1; ++ } ++ host = (struct fmc_host *)spi->host; ++ ++ do { ++ ret = spi_nand_feature_op(spi, GET_OP, STATUS_ADDR, &status); ++ if (ret) ++ return -1; ++ if ((status & STATUS_OIP_MASK) == 0) { ++ if ((host->cmd_op.l_cmd == NAND_CMD_ERASE2) && ++ ((((unsigned int)status) & STATUS_E_FAIL_MASK) != 0)) ++ return status; ++ ++ if ((host->cmd_op.l_cmd == NAND_CMD_PAGEPROG) && ++ ((((unsigned int)status) & STATUS_P_FAIL_MASK) != 0)) ++ return status; ++ ++ return 0; ++ } ++ ++ cond_resched(); ++ } while (time_after_eq(jiffies, deadline) == 0); ++ ++ db_msg("Error: SPI Nand wait ready timeout, status: %#x\n", status); ++ ++ return 1; ++} ++ ++static void spi_general_write_enable_op(struct fmc_host *host) ++{ ++ unsigned int regl; ++ ++ regl = fmc_readl(host, FMC_GLOBAL_CFG); ++ fmc_pr(WE_DBG, "\t||-Get GLOBAL_CFG[%#x]%#x\n", FMC_GLOBAL_CFG, regl); ++ if (regl & FMC_GLOBAL_CFG_WP_ENABLE) { ++ regl &= ~FMC_GLOBAL_CFG_WP_ENABLE; ++ fmc_writel(host, FMC_GLOBAL_CFG, regl); ++ fmc_pr(WE_DBG, "\t||-Set GLOBAL_CFG[%#x]%#x\n", ++ FMC_GLOBAL_CFG, regl); ++ } ++ ++ regl = fmc_cmd_cmd1(SPI_CMD_WREN); ++ fmc_writel(host, FMC_CMD, regl); ++ fmc_pr(WE_DBG, "\t||-Set CMD[%#x]%#x\n", FMC_CMD, regl); ++ ++ regl = op_cfg_fm_cs(host->cmd_op.cs) | OP_CFG_OEN_EN; ++ fmc_writel(host, FMC_OP_CFG, regl); ++ fmc_pr(WE_DBG, "\t||-Set OP_CFG[%#x]%#x\n", FMC_OP_CFG, regl); ++ ++ regl = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START; ++ fmc_writel(host, FMC_OP, regl); ++ fmc_pr(WE_DBG, "\t||-Set OP[%#x]%#x\n", FMC_OP, regl); ++ ++ fmc_cmd_wait_cpu_finish(host); ++} ++ ++/* ++ Send write enable cmd to SPI Nand, status[C0H]:[2]bit WEL must be set 1 ++*/ ++static int spi_general_write_enable(struct fmc_spi *spi) ++{ ++ u_char reg; ++ int ret; ++ struct fmc_host *host = NULL; ++ if (spi == NULL || spi->host == NULL) { ++ db_msg("Error: host or host->spi is NULL!\n"); ++ return -1; ++ } ++ host = spi->host; ++ if (WE_DBG) ++ pr_info("\n"); ++ fmc_pr(WE_DBG, "\t|*-Start Write Enable\n"); ++ ++ ret = spi_nand_feature_op(spi, GET_OP, STATUS_ADDR, ®); ++ if (ret) ++ return -1; ++ if (reg & STATUS_WEL_MASK) { ++ fmc_pr(WE_DBG, "\t||-Write Enable was opened! reg: %#x\n", ++ reg); ++ return 0; ++ } ++ ++ spi_general_write_enable_op(host); ++ ++#if WE_DBG ++ if (!spi->driver) { ++ db_msg("Error: spi->driver is NULL!\n"); ++ return -1; ++ } ++ spi->driver->wait_ready(spi); ++ ++ ret = spi_nand_feature_op(spi, GET_OP, STATUS_ADDR, ®); ++ if (ret) ++ return -1; ++ if (reg & STATUS_WEL_MASK) { ++ fmc_pr(WE_DBG, "\t||-Write Enable success. reg: %#x\n", reg); ++ } else { ++ db_msg("Error: Write Enable failed! reg: %#x\n", reg); ++ return reg; ++ } ++#endif ++ ++ fmc_pr(WE_DBG, "\t|*-End Write Enable\n"); ++ return 0; ++} ++ ++/* ++ judge whether SPI Nand support QUAD read/write or not ++*/ ++static int spi_is_quad(const struct fmc_spi *spi) ++{ ++ const char *if_str[] = {"STD", "DUAL", "DIO", "QUAD", "QIO"}; ++ fmc_pr(QE_DBG, "\t\t|||*-SPI read iftype: %s write iftype: %s\n", ++ if_str[spi->read->iftype], if_str[spi->write->iftype]); ++ ++ if ((spi->read->iftype == IF_TYPE_QUAD) || ++ (spi->read->iftype == IF_TYPE_QIO) || ++ (spi->write->iftype == IF_TYPE_QUAD) || ++ (spi->write->iftype == IF_TYPE_QIO)) ++ return 1; ++ ++ return 0; ++} ++ ++/* ++ Send set features cmd to SPI Nand, feature[B0H]:[0]bit QE would be set ++*/ ++static int spi_general_qe_enable(struct fmc_spi *spi) ++{ ++ int op; ++ u_char reg; ++ int ret; ++ const char *str[] = {"Disable", "Enable"}; ++ if (!spi || !spi->host || !spi->driver) { ++ db_msg("Error: host or spi->host or spi->driver is NULL!\n"); ++ return -1; ++ } ++ fmc_pr(QE_DBG, "\t||*-Start SPI Nand flash QE\n"); ++ ++ op = spi_is_quad(spi); ++ ++ fmc_pr(QE_DBG, "\t|||*-End Quad check, SPI Nand %s Quad.\n", str[op]); ++ ++ ret = spi_nand_feature_op(spi, GET_OP, FEATURE_ADDR, ®); ++ if (ret) ++ return -1; ++ fmc_pr(QE_DBG, "\t|||-Get [%#x]feature: %#x\n", FEATURE_ADDR, reg); ++ if ((reg & FEATURE_QE_ENABLE) == op) { ++ fmc_pr(QE_DBG, "\t||*-SPI Nand quad was %sd!\n", str[op]); ++ return op; ++ } ++ ++ if (op == ENABLE) ++ reg |= FEATURE_QE_ENABLE; ++ else ++ reg &= ~FEATURE_QE_ENABLE; ++ ++ ret = spi_nand_feature_op(spi, SET_OP, FEATURE_ADDR, ®); ++ if (ret) ++ return -1; ++ fmc_pr(QE_DBG, "\t|||-SPI Nand %s Quad\n", str[op]); ++ ++ spi->driver->wait_ready(spi); ++ ++ ret = spi_nand_feature_op(spi, GET_OP, FEATURE_ADDR, ®); ++ if (ret) ++ return -1; ++ if ((reg & FEATURE_QE_ENABLE) == op) ++ fmc_pr(QE_DBG, "\t|||-SPI Nand %s Quad succeed!\n", str[op]); ++ else ++ db_msg("Error: %s Quad failed! reg: %#x\n", str[op], reg); ++ ++ fmc_pr(QE_DBG, "\t||*-End SPI Nand %s Quad.\n", str[op]); ++ ++ return op; ++} +diff --git a/drivers/mtd/nand/fmc100/fmc_spi_nand_ids.c b/drivers/mtd/nand/fmc100/fmc_spi_nand_ids.c +new file mode 100644 +index 000000000..9188bf153 +--- /dev/null ++++ b/drivers/mtd/nand/fmc100/fmc_spi_nand_ids.c +@@ -0,0 +1,476 @@ ++/* ++ * The Flash Memory Controller v100 Device Driver for vendor ++ * ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "fmc100.h" ++ ++set_read_quad(1, INFINITE, 80); ++ ++set_write_quad(0, 256, 80); ++ ++set_erase_sector_128k(0, _128K, 80); ++ ++#include "fmc100_spi_general.c" ++static struct spi_drv spi_driver_general = { ++ .wait_ready = spi_general_wait_ready, ++ .write_enable = spi_general_write_enable, ++ .qe_enable = spi_general_qe_enable, ++}; ++ ++/* some spi nand flash default QUAD enable, needn't to set qe enable */ ++__maybe_unused static struct spi_drv spi_driver_no_qe = { ++ .wait_ready = spi_general_wait_ready, ++ .write_enable = spi_general_write_enable, ++}; ++ ++#define SPI_NAND_ID_TAB_VER "2.7" ++ ++/****************************************************************************** ++ * We do not guarantee the compatibility of the following device models in the ++ * table.Device compatibility is based solely on the list of compatible devices ++ * in the release package. ++ ******************************************************************************/ ++static struct spi_nand_info fmc_spi_nand_flash_table[] = { ++ /* name id id_len chipsize(Bytes) erasesize pagesize oobsize(Bytes) */ ++ /* Dosilicon 1.8V DS35Q4GM-IB 1Gb */ ++ { ++ .name = "DS35Q4GM-IB", ++ .id = {0xe5, 0xA4}, ++ .id_len = 2, ++ .chipsize = _512M, ++ .erasesize = _128K, ++ .pagesize = _2K, ++ .oobsize = 128, ++ .badblock_pos = BBP_FIRST_PAGE, ++ .read = { ++ &read_quad(1, INFINITE, 80), /* 104MHz */ ++ 0 ++ }, ++ .write = { ++ &write_quad(0, 256, 80), /* 104MHz */ ++ 0 ++ }, ++ .erase = { ++ &erase_sector_128k(0, _128K, 80), /* 104MHz */ ++ 0 ++ }, ++ .driver = &spi_driver_general, ++ }, ++ ++ { .id_len = 0, }, ++}; ++ ++ ++static void fmc100_spi_nand_search_rw(struct spi_nand_info *spiinfo, ++ struct spi_op *spiop_rw, u_int iftype, u_int max_dummy, int rw_type) ++{ ++ int ix = 0; ++ struct spi_op **spiop, **fitspiop; ++ int ret; ++ ++ for (fitspiop = spiop = (rw_type ? spiinfo->write : spiinfo->read); ++ (*spiop) && ix < MAX_SPI_OP; spiop++, ix++) { ++ if (((((unsigned int)(*spiop)->iftype) & iftype) != 0) ++ && ((*spiop)->dummy <= max_dummy) ++ && (*fitspiop)->iftype < (*spiop)->iftype) ++ fitspiop = spiop; ++ } ++ ret = memcpy_s(spiop_rw, sizeof(struct spi_op), (*fitspiop), ++ sizeof(struct spi_op)); ++ if (ret) ++ printk("%s: memcpy_s failed\n", __func__); ++} ++ ++ ++static void fmc100_spi_nand_get_erase(const struct spi_nand_info *spiinfo, ++ struct spi_op *spiop_erase) ++{ ++ int ix; ++ int ret; ++ ++ spiop_erase->size = 0; ++ for (ix = 0; ix < MAX_SPI_OP; ix++) { ++ if (spiinfo->erase[ix] == NULL) ++ break; ++ ++ if (spiinfo->erasesize == spiinfo->erase[ix]->size) { ++ ret = memcpy_s(&spiop_erase[ix], sizeof(struct spi_op), ++ spiinfo->erase[ix], sizeof(struct spi_op)); ++ if (ret) ++ printk("%s:memcpy_s failed\n", __func__); ++ break; ++ } ++ } ++} ++ ++ ++static void fmc100_map_spi_op(struct fmc_spi *spi) ++{ ++ unsigned char ix; ++ const int iftype_read[] = { ++ SPI_IF_READ_STD, IF_TYPE_STD, ++ SPI_IF_READ_FAST, IF_TYPE_STD, ++ SPI_IF_READ_DUAL, IF_TYPE_DUAL, ++ SPI_IF_READ_DUAL_ADDR, IF_TYPE_DIO, ++ SPI_IF_READ_QUAD, IF_TYPE_QUAD, ++ SPI_IF_READ_QUAD_ADDR, IF_TYPE_QIO, ++ 0, 0, ++ }; ++ const int iftype_write[] = { ++ SPI_IF_WRITE_STD, IF_TYPE_STD, ++ SPI_IF_WRITE_QUAD, IF_TYPE_QUAD, ++ 0, 0, ++ }; ++ const char *if_str[] = {"STD", "DUAL", "DIO", "QUAD", "QIO"}; ++ ++ fmc_pr(BT_DBG, "\t||*-Start Get SPI operation iftype\n"); ++ ++ for (ix = 0; iftype_write[ix]; ix += 2) { /* 2 is row1 of iftype_write[] */ ++ if (spi->write->iftype == iftype_write[ix]) { ++ spi->write->iftype = iftype_write[ix + 1]; ++ break; ++ } ++ } ++ fmc_pr(BT_DBG, "\t|||-Get best write iftype: %s \n", ++ if_str[spi->write->iftype]); ++ ++ for (ix = 0; iftype_read[ix]; ix += 2) { /* 2 is row1 of iftype_read[] */ ++ if (spi->read->iftype == iftype_read[ix]) { ++ spi->read->iftype = iftype_read[ix + 1]; ++ break; ++ } ++ } ++ fmc_pr(BT_DBG, "\t|||-Get best read iftype: %s \n", ++ if_str[spi->read->iftype]); ++ ++ spi->erase->iftype = IF_TYPE_STD; ++ fmc_pr(BT_DBG, "\t|||-Get best erase iftype: %s \n", ++ if_str[spi->erase->iftype]); ++ ++ fmc_pr(BT_DBG, "\t||*-End Get SPI operation iftype \n"); ++} ++ ++static void fmc100_spi_nand_op_cmd_init(struct spi_nand_info *spi_dev, ++ struct fmc_spi *spi) ++{ ++ fmc100_spi_nand_search_rw(spi_dev, spi->read, ++ FMC_SPI_NAND_SUPPORT_READ, ++ FMC_SPI_NAND_SUPPORT_MAX_DUMMY, RW_OP_READ); ++ fmc_pr(BT_DBG, "\t||-Save spi->read op cmd:%#x\n", spi->read->cmd); ++ ++ fmc100_spi_nand_search_rw(spi_dev, spi->write, ++ FMC_SPI_NAND_SUPPORT_WRITE, ++ FMC_SPI_NAND_SUPPORT_MAX_DUMMY, RW_OP_WRITE); ++ fmc_pr(BT_DBG, "\t||-Save spi->write op cmd:%#x\n", spi->write->cmd); ++ ++ fmc100_spi_nand_get_erase(spi_dev, spi->erase); ++ fmc_pr(BT_DBG, "\t||-Save spi->erase op cmd:%#x\n", spi->erase->cmd); ++ ++ fmc100_map_spi_op(spi); ++} ++ ++ ++static int fmc100_spi_nand_dis_wr_protect(struct fmc_spi *spi, u_char *reg) ++{ ++ int ret; ++ ++ ret = spi_nand_feature_op(spi, GET_OP, PROTECT_ADDR, reg); ++ if (ret) ++ return ret; ++ fmc_pr(BT_DBG, "\t||-Get protect status[%#x]: %#x\n", PROTECT_ADDR, ++ *reg); ++ if (any_bp_enable(*reg)) { ++ *reg &= ~ALL_BP_MASK; ++ ret = spi_nand_feature_op(spi, SET_OP, PROTECT_ADDR, reg); ++ if (ret) ++ return ret; ++ fmc_pr(BT_DBG, "\t||-Set [%#x]FT %#x\n", PROTECT_ADDR, *reg); ++ ++ spi->driver->wait_ready(spi); ++ ++ ret = spi_nand_feature_op(spi, GET_OP, PROTECT_ADDR, reg); ++ if (ret) ++ return ret; ++ fmc_pr(BT_DBG, "\t||-Check BP disable result: %#x\n", *reg); ++ if (any_bp_enable(*reg)) ++ db_msg("Error: Write protection disable failed!\n"); ++ } ++ return ret; ++} ++ ++static int fmc100_spi_nand_dis_chip_inner_ecc(struct fmc_spi *spi, u_char *reg) ++{ ++ int ret; ++ ++ ret = spi_nand_feature_op(spi, GET_OP, FEATURE_ADDR, reg); ++ if (ret) ++ return ret; ++ fmc_pr(BT_DBG, "\t||-Get feature status[%#x]: %#x\n", FEATURE_ADDR, ++ *reg); ++ if (*reg & FEATURE_ECC_ENABLE) { ++ *reg &= ~FEATURE_ECC_ENABLE; ++ ret = spi_nand_feature_op(spi, SET_OP, FEATURE_ADDR, reg); ++ if (ret) ++ return ret; ++ fmc_pr(BT_DBG, "\t||-Set [%#x]FT: %#x\n", FEATURE_ADDR, *reg); ++ ++ spi->driver->wait_ready(spi); ++ ++ ret = spi_nand_feature_op(spi, GET_OP, FEATURE_ADDR, reg); ++ if (ret) ++ return ret; ++ fmc_pr(BT_DBG, "\t||-Check internal ECC disable result: %#x\n", ++ *reg); ++ if (*reg & FEATURE_ECC_ENABLE) ++ db_msg("Error: Chip internal ECC disable failed!\n"); ++ } ++ return ret; ++} ++ ++static void fmc100_spi_ids_probe(struct mtd_info *mtd, ++ struct spi_nand_info *spi_dev) ++{ ++ u_char reg; ++ int ret; ++ struct nand_chip *chip = NULL; ++ struct fmc_host *host = NULL; ++ struct fmc_spi *spi = NULL; ++ ++ if (mtd == NULL || spi_dev == NULL) { ++ db_msg("Error: mtd or spi_dev is NULL!\n"); ++ return; ++ } ++ chip = mtd_to_nand(mtd); ++ if (chip == NULL || chip->priv == NULL) { ++ db_msg("Error: chip is NULL!\n"); ++ return; ++ } ++ host = chip->priv; ++ if (host->spi == NULL) { ++ db_msg("Error: host->spi is NULL!\n"); ++ return; ++ } ++ spi = host->spi; ++ fmc_pr(BT_DBG, "\t|*-Start match SPI operation & chip init\n"); ++ ++ spi->host = host; ++ spi->name = spi_dev->name; ++ spi->driver = spi_dev->driver; ++ if (!spi->driver) { ++ db_msg("Error: host->driver is NULL!\n"); ++ return; ++ } ++ ++ fmc100_spi_nand_op_cmd_init(spi_dev, spi); ++ ++ if (spi->driver->qe_enable) ++ spi->driver->qe_enable(spi); ++ ++ /* Disable write protection */ ++ ret = fmc100_spi_nand_dis_wr_protect(spi, ®); ++ if (ret) ++ return; ++ ++ /* Disable chip internal ECC */ ++ ret = fmc100_spi_nand_dis_chip_inner_ecc(spi, ®); ++ if (ret) ++ return; ++ ++ fmc_cs_user[host->cmd_op.cs]++; ++ ++ fmc_pr(BT_DBG, "\t|*-End match SPI operation & chip init\n"); ++} ++ ++static struct nand_flash_dev spi_nand_dev; ++ ++static struct nand_flash_dev *spi_nand_get_flash_info(struct mtd_info *mtd, ++ unsigned char *id) ++{ ++ unsigned char ix; ++ int len; ++ char buffer[BUFF_LEN]; ++ struct nand_chip *chip = mtd_to_nand(mtd); ++ struct fmc_host *host = chip->priv; ++ struct spi_nand_info *spi_dev = fmc_spi_nand_flash_table; ++ struct nand_flash_dev *type = &spi_nand_dev; ++ int ret; ++ ++ fmc_pr(BT_DBG, "\t*-Start find SPI Nand flash\n"); ++ ++ len = sprintf_s(buffer, BUFF_LEN, "SPI Nand(cs %d) ID: %#x %#x", ++ host->cmd_op.cs, id[0], id[1]); ++ if (len < 0) ++ printk("%s, line: %d, sprintf_s failed\n", __func__, __LINE__); ++ ++ for (; spi_dev->id_len; spi_dev++) { ++ unsigned long long tmp; ++ if (memcmp(id, spi_dev->id, spi_dev->id_len)) ++ continue; ++ ++ for (ix = 2; ix < spi_dev->id_len; ix++) { /* star from id[2] */ ++ if ((spi_dev->id_len <= MAX_SPI_NAND_ID_LEN)) { ++ len += sprintf_s(buffer + len, BUFF_LEN - len, " %#x", ++ id[ix]); ++ if (len < 0) ++ printk("%s,line: %d, sprintf_s failed\n", ++ __func__, __LINE__); ++ } ++ } ++ pr_info("%s\n", buffer); ++ ++ fmc_pr(BT_DBG, "\t||-CS(%d) found SPI Nand: %s\n", ++ host->cmd_op.cs, spi_dev->name); ++ ++ type->name = spi_dev->name; ++ ret = memcpy_s(type->id, MAX_SPI_NAND_ID_LEN, spi_dev->id, ++ spi_dev->id_len); ++ if (ret) { ++ printk("%s: memcpy_s failed\n", __func__); ++ return NULL; ++ } ++ type->pagesize = spi_dev->pagesize; ++ type->chipsize = (unsigned int)(spi_dev->chipsize >> ++ 20); /* 1M unit shift right 20 bit */ ++ type->erasesize = spi_dev->erasesize; ++ type->id_len = spi_dev->id_len; ++ type->oobsize = spi_dev->oobsize; ++ fmc_pr(BT_DBG, "\t|-Save struct nand_flash_dev info\n"); ++ ++ mtd->oobsize = spi_dev->oobsize; ++ mtd->erasesize = spi_dev->erasesize; ++ mtd->writesize = spi_dev->pagesize; ++ ++ chip->base.memorg.pagesize = spi_dev->pagesize; ++ chip->base.memorg.pages_per_eraseblock = spi_dev->erasesize / spi_dev->pagesize; ++ ++ tmp = spi_dev->chipsize; ++ do_div(tmp, spi_dev->erasesize); ++ chip->base.memorg.eraseblocks_per_lun = tmp; ++ ++ chip->base.memorg.oobsize = spi_dev->oobsize; ++ fmc100_spi_ids_probe(mtd, spi_dev); ++ ++ fmc_pr(BT_DBG, "\t*-Found SPI nand: %s\n", spi_dev->name); ++ ++ return type; ++ } ++ ++ fmc_pr(BT_DBG, "\t*-Not found SPI nand flash, %s\n", buffer); ++ ++ return NULL; ++} ++ ++ ++void fmc_spi_nand_ids_register(void) ++{ ++ pr_info("SPI Nand ID Table Version %s\n", SPI_NAND_ID_TAB_VER); ++ get_spi_nand_flash_type_hook = spi_nand_get_flash_info; ++} ++ ++#ifdef CONFIG_PM ++ ++ ++static int fmc100_spi_nand_dis_wp(struct fmc_spi *spi, u_char *reg) ++{ ++ int ret; ++ ++ ret = spi_nand_feature_op(spi, GET_OP, PROTECT_ADDR, reg); ++ if (ret) ++ return ret; ++ fmc_pr(PM_DBG, "\t|-Get protect status[%#x]: %#x\n", PROTECT_ADDR, ++ *reg); ++ if (any_bp_enable(*reg)) { ++ *reg &= ~ALL_BP_MASK; ++ ret = spi_nand_feature_op(spi, SET_OP, PROTECT_ADDR, reg); ++ if (ret) ++ return ret; ++ fmc_pr(PM_DBG, "\t|-Set [%#x]FT %#x\n", PROTECT_ADDR, *reg); ++ ++ spi->driver->wait_ready(spi); ++ ++ ret = spi_nand_feature_op(spi, GET_OP, PROTECT_ADDR, reg); ++ if (ret) ++ return ret; ++ fmc_pr(PM_DBG, "\t|-Check BP disable result: %#x\n", *reg); ++ if (any_bp_enable(*reg)) ++ db_msg("Error: Write protection disable failed!\n"); ++ } ++ return ret; ++} ++ ++void fmc100_spi_nand_config(struct fmc_host *host) ++{ ++ u_char reg; ++ int ret; ++ struct fmc_spi *spi = NULL; ++ static const char *str[] = {"STD", "DUAL", "DIO", "QUAD", "QIO"}; ++ ++ if ((host == NULL) || (host->spi == NULL)) { ++ db_msg("Error: host or host->spi is NULL!\n"); ++ return; ++ } ++ spi = host->spi; ++ /* judge whether support QUAD read/write or not, set it if yes */ ++ fmc_pr(PM_DBG, "\t|-SPI read iftype: %s write iftype: %s\n", ++ str[spi->read->iftype], str[spi->write->iftype]); ++ ++ if (spi->driver->qe_enable) ++ spi->driver->qe_enable(spi); ++ ++ /* Disable write protection */ ++ ret = fmc100_spi_nand_dis_wp(spi, ®); ++ if (ret) ++ return; ++ /* Disable chip internal ECC */ ++ ret = spi_nand_feature_op(spi, GET_OP, FEATURE_ADDR, ®); ++ if (ret) ++ return; ++ fmc_pr(PM_DBG, "\t|-Get feature status[%#x]: %#x\n", FEATURE_ADDR, ++ reg); ++ if (reg & FEATURE_ECC_ENABLE) { ++ reg &= ~FEATURE_ECC_ENABLE; ++ ret = spi_nand_feature_op(spi, SET_OP, FEATURE_ADDR, ®); ++ if (ret) ++ return; ++ fmc_pr(PM_DBG, "\t|-Set [%#x]FT: %#x\n", FEATURE_ADDR, reg); ++ ++ spi->driver->wait_ready(spi); ++ ++ ret = spi_nand_feature_op(spi, GET_OP, FEATURE_ADDR, ®); ++ if (ret) ++ return; ++ fmc_pr(PM_DBG, "\t|-Check internal ECC disable result: %#x\n", ++ reg); ++ if (reg & FEATURE_ECC_ENABLE) ++ db_msg("Error: Chip internal ECC disable failed!\n"); ++ } ++} ++ ++#endif /* CONFIG_PM */ +diff --git a/drivers/mtd/nand/fmc100_nand/Kconfig b/drivers/mtd/nand/fmc100_nand/Kconfig +new file mode 100644 +index 000000000..897fdb2af +--- /dev/null ++++ b/drivers/mtd/nand/fmc100_nand/Kconfig +@@ -0,0 +1,46 @@ ++# ++# drivers/mtd/nand/fmc100_nand/Kconfig ++# ++ ++menuconfig MTD_NAND_FMC100 ++ bool "Vendor Flash Memory Controller v100 Nand devices support" ++ depends on MFD_BSP_FMC && !MTD_SPI_NAND_BSP ++ select MISC_FILESYSTEMS ++ select MTD_BLOCK ++ select YAFFS_FS ++ select YAFFS_YAFFS2 ++ help ++ Vendor Flash Memory Controller version 100 is called fmc100 for ++ short. The controller support DMA transfers while reading or writing ++ the Nand flash. ++ ++if MTD_NAND_FMC100 ++ ++config FMC100_NAND_EDO_MODE ++ bool "the Extended Data Out(EDO) mode" ++ help ++ In Extended data out (EDO), a new data cycle is started while the data ++ output of the previous cycle is still active. This process of cycle ++ overlapping, called pipelining, increases processing speed by about ++ 10 nanoseconds per cycle,increasing computer performance by about 5 ++ percent compared to performance using FMP. ++ ++config RW_H_WIDTH ++ int "the width of Read/Write HIGH Hold Time (0 to 15)" ++ range 0 15 ++ help ++ the Read/Write HIGH Hold Time of nand flash ++ ++config R_L_WIDTH ++ int "the Read pulse width (0 to 15)" ++ range 0 15 ++ help ++ the Read/Write LOW Hold Time of nand flash ++ ++config W_L_WIDTH ++ int "the Write pulse width (0 to 15)" ++ range 0 15 ++ help ++ the Read/Write LOW Hold Time of nand flash ++ ++endif # End of MTD_NAND_FMC100 +diff --git a/drivers/mtd/nand/fmc100_nand/Makefile b/drivers/mtd/nand/fmc100_nand/Makefile +new file mode 100644 +index 000000000..997b8f4da +--- /dev/null ++++ b/drivers/mtd/nand/fmc100_nand/Makefile +@@ -0,0 +1,26 @@ ++# ++# The Flash Memory Controller v100 Device Driver for vendor ++# ++# Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++# ++# This program is free software; you can redistribute it and/or modify it ++# under the terms of the GNU General Public License as published by the ++# Free Software Foundation; either version 2 of the License, or (at your ++# option) any later version. ++# ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program. If not, see . ++# ++# ++ ++# ++# drivers/mtd/nand/fmc100_nand/Makefile ++# ++ ++obj-y += fmc_nand_spl_ids.o ++obj-y += fmc100_nand.o fmc100_nand_os.o +diff --git a/drivers/mtd/nand/fmc100_nand/fmc100_nand.c b/drivers/mtd/nand/fmc100_nand/fmc100_nand.c +new file mode 100644 +index 000000000..2782f82dd +--- /dev/null ++++ b/drivers/mtd/nand/fmc100_nand/fmc100_nand.c +@@ -0,0 +1,1141 @@ ++/* ++ * The Flash Memory Controller v100 Device Driver for vendor ++ * ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../raw/nfc_gen.h" ++#include "fmc100_nand_os.h" ++#include "fmc100_nand.h" ++ ++#include ++#include ++ ++static void fmc100_dma_addr_config(struct fmc_host *host, char *op) ++{ ++ unsigned int reg = (unsigned int)host->dma_buffer; ++ ++ fmc_pr(DMA_DB, "\t\t *-Start %s page dma transfer\n", op); ++ ++ fmc_writel(host, FMC_DMA_SADDR_D0, reg); ++ fmc_pr(DMA_DB, "\t\t |-Set ADDR0[%#x]%#x\n", FMC_DMA_SADDR_D0, reg); ++ ++#ifdef CONFIG_64BIT ++ reg = (unsigned int)((host->dma_buffer & FMC_DMA_SADDRH_MASK) >> ++ FMC_DMA_BIT_SHIFT_LENTH); ++ fmc_writel(host, FMC_DMA_SADDRH_D0, reg); ++ fmc_pr(DMA_DB, "\t\t |-Set ADDRH0[%#x]%#x\n", FMC_DMA_SADDRH_D0, reg); ++#endif ++ ++ reg += FMC_DMA_ADDR_OFFSET; ++ fmc_writel(host, FMC_DMA_SADDR_D1, reg); ++ fmc_pr(DMA_DB, "\t\t |-Set ADDR1[%#x]%#x\n", FMC_DMA_SADDR_D1, reg); ++ ++ reg += FMC_DMA_ADDR_OFFSET; ++ fmc_writel(host, FMC_DMA_SADDR_D2, reg); ++ fmc_pr(DMA_DB, "\t\t |-Set ADDR2[%#x]%#x\n", FMC_DMA_SADDR_D2, reg); ++ ++ reg += FMC_DMA_ADDR_OFFSET; ++ fmc_writel(host, FMC_DMA_SADDR_D3, reg); ++ fmc_pr(DMA_DB, "\t\t |-Set ADDR3[%#x]%#x\n", FMC_DMA_SADDR_D3, reg); ++ ++ reg = host->dma_oob; ++ fmc_writel(host, FMC_DMA_SADDR_OOB, reg); ++ fmc_pr(DMA_DB, "\t\t |-Set OOB[%#x]%#x\n", FMC_DMA_SADDR_OOB, reg); ++ ++#ifdef CONFIG_64BIT ++ reg = (unsigned int)((host->dma_oob & FMC_DMA_SADDRH_MASK) >> ++ FMC_DMA_BIT_SHIFT_LENTH); ++ fmc_writel(host, FMC_DMA_SADDRH_OOB, reg); ++ fmc_pr(DMA_DB, "\t\t |-Set ADDRH0[%#x]%#x\n", FMC_DMA_SADDRH_OOB, reg); ++#endif ++} ++ ++static void fmc100_dma_transfer(struct fmc_host *host, unsigned int todev) ++{ ++ unsigned int reg; ++ char *op = todev ? "write" : "read"; ++ ++ fmc100_dma_addr_config(host, op); ++ ++ if (host->ecctype == NAND_ECC_0BIT) { ++ fmc_writel(host, FMC_DMA_LEN, fmc_dma_len_set(host->oobsize)); ++ fmc_pr(DMA_DB, "\t\t |-Set LEN[%#x]%#x\n", FMC_DMA_LEN, reg); ++ } ++ reg = FMC_OP_READ_DATA_EN | FMC_OP_WRITE_DATA_EN; ++ fmc_writel(host, FMC_OP, reg); ++ fmc_pr(DMA_DB, "\t\t |-Set OP[%#x]%#x\n", FMC_OP, reg); ++ ++ reg = FMC_DMA_AHB_CTRL_DMA_PP_EN | ++ FMC_DMA_AHB_CTRL_BURST16_EN | ++ FMC_DMA_AHB_CTRL_BURST8_EN | ++ FMC_DMA_AHB_CTRL_BURST4_EN; ++ fmc_writel(host, FMC_DMA_AHB_CTRL, reg); ++ fmc_pr(DMA_DB, "\t\t |-Set AHBCTRL[%#x]%#x\n", FMC_DMA_AHB_CTRL, reg); ++ ++ reg = op_cfg_fm_cs(host->cmd_op.cs) | ++ op_cfg_addr_num(host->addr_cycle); ++ fmc_writel(host, FMC_OP_CFG, reg); ++ fmc_pr(DMA_DB, "\t\t |-Set OP_CFG[%#x]%#x\n", FMC_OP_CFG, reg); ++ ++ reg = OP_CTRL_DMA_OP_READY; ++ if (todev) ++ reg |= op_ctrl_rw_op(todev); ++ ++ fmc_writel(host, FMC_OP_CTRL, reg); ++ fmc_pr(DMA_DB, "\t\t |-Set OP_CTRL[%#x]%#x\n", FMC_OP_CTRL, reg); ++ ++ fmc_dma_wait_cpu_finish(host); ++ ++ fmc_pr(DMA_DB, "\t\t *-End %s page dma transfer\n", op); ++ ++ return; ++} ++ ++static void fmc100_send_cmd_write(struct fmc_host *host) ++{ ++ unsigned int reg; ++ ++ fmc_pr(WR_DBG, "\t|*-Start send page programme cmd\n"); ++ ++ if (*host->bbm != 0xFF && *host->bbm != 0x00) ++ pr_info("WARNING: attempt to write an invalid bbm. " ++ "page: 0x%08x, mark: 0x%02x,\n", ++ get_page_index(host), *host->bbm); ++ ++ host->enable_ecc_randomizer(host, ENABLE, ENABLE); ++ ++ reg = host->addr_value[1]; ++ fmc_writel(host, FMC_ADDRH, reg); ++ fmc_pr(WR_DBG, "\t||-Set ADDRH[%#x]%#x\n", FMC_ADDRH, reg); ++ ++ reg = host->addr_value[0] & 0xffff0000; ++ fmc_writel(host, FMC_ADDRL, reg); ++ fmc_pr(WR_DBG, "\t||-Set ADDRL[%#x]%#x\n", FMC_ADDRL, reg); ++ ++ reg = fmc_cmd_cmd2(NAND_CMD_PAGEPROG) | fmc_cmd_cmd1(NAND_CMD_SEQIN); ++ fmc_writel(host, FMC_CMD, reg); ++ fmc_pr(WR_DBG, "\t||-Set CMD[%#x]%#x\n", FMC_CMD, reg); ++ ++ *host->epm = 0x0000; ++ ++ fmc100_dma_transfer(host, RW_OP_WRITE); ++ ++ fmc_pr(WR_DBG, "\t|*-End send page read cmd\n"); ++} ++ ++static void fmc100_send_cmd_read(struct fmc_host *host) ++{ ++ unsigned int reg; ++ ++ fmc_pr(RD_DBG, "\t*-Start send page read cmd\n"); ++ ++ if ((host->addr_value[0] == host->cache_addr_value[0]) && ++ (host->addr_value[1] == host->cache_addr_value[1])) { ++ fmc_pr(RD_DBG, "\t*-Cache hit! addr1[%#x], addr0[%#x]\n", ++ host->addr_value[1], host->addr_value[0]); ++ return; ++ } ++ ++ host->page_status = 0; ++ ++ host->enable_ecc_randomizer(host, ENABLE, ENABLE); ++ ++ reg = FMC_INT_CLR_ALL; ++ fmc_writel(host, FMC_INT_CLR, reg); ++ fmc_pr(RD_DBG, "\t|-Set INT_CLR[%#x]%#x\n", FMC_INT_CLR, reg); ++ ++ reg = host->nand_cfg; ++ fmc_writel(host, FMC_CFG, reg); ++ fmc_pr(RD_DBG, "\t|-Set CFG[%#x]%#x\n", FMC_CFG, reg); ++ ++ reg = host->addr_value[1]; ++ fmc_writel(host, FMC_ADDRH, reg); ++ fmc_pr(RD_DBG, "\t|-Set ADDRH[%#x]%#x\n", FMC_ADDRH, reg); ++ ++ reg = host->addr_value[0] & 0xffff0000; ++ fmc_writel(host, FMC_ADDRL, reg); ++ fmc_pr(RD_DBG, "\t|-Set ADDRL[%#x]%#x\n", FMC_ADDRL, reg); ++ ++ reg = fmc_cmd_cmd2(NAND_CMD_READSTART) | fmc_cmd_cmd1(NAND_CMD_READ0); ++ fmc_writel(host, FMC_CMD, reg); ++ fmc_pr(RD_DBG, "\t|-Set CMD[%#x]%#x\n", FMC_CMD, reg); ++ ++ fmc100_dma_transfer(host, RW_OP_READ); ++ ++ if (fmc_readl(host, FMC_INT) & FMC_INT_ERR_INVALID) ++ host->page_status |= FMC100_PS_UC_ECC; ++ ++ host->cache_addr_value[0] = host->addr_value[0]; ++ host->cache_addr_value[1] = host->addr_value[1]; ++ ++ fmc_pr(RD_DBG, "\t*-End send page read cmd\n"); ++} ++ ++static void fmc100_send_cmd_erase(struct fmc_host *host) ++{ ++ unsigned int reg; ++ ++ fmc_pr(ER_DBG, "\t *-Start send cmd erase\n"); ++ ++ /* Don't case the read retry config */ ++ host->enable_ecc_randomizer(host, DISABLE, DISABLE); ++ ++ reg = host->addr_value[0]; ++ fmc_writel(host, FMC_ADDRL, reg); ++ fmc_pr(ER_DBG, "\t |-Set ADDRL[%#x]%#x\n", FMC_ADDRL, reg); ++ ++ reg = fmc_cmd_cmd2(NAND_CMD_ERASE2) | fmc_cmd_cmd1(NAND_CMD_ERASE1); ++ fmc_writel(host, FMC_CMD, reg); ++ fmc_pr(ER_DBG, "\t |-Set CMD[%#x]%#x\n", FMC_CMD, reg); ++ ++ reg = op_cfg_fm_cs(host->cmd_op.cs) | ++ op_cfg_addr_num(host->addr_cycle); ++ fmc_writel(host, FMC_OP_CFG, reg); ++ fmc_pr(ER_DBG, "\t |-Set OP_CFG[%#x]%#x\n", FMC_OP_CFG, reg); ++ ++ /* need to config WAIT_READY_EN */ ++ reg = FMC_OP_WAIT_READY_EN | ++ FMC_OP_CMD1_EN | ++ FMC_OP_CMD2_EN | ++ FMC_OP_ADDR_EN | ++ FMC_OP_REG_OP_START; ++ fmc_writel(host, FMC_OP, reg); ++ fmc_pr(ER_DBG, "\t |-Set OP[%#x]%#x\n", FMC_OP, reg); ++ ++ fmc_cmd_wait_cpu_finish(host); ++ ++ fmc_pr(ER_DBG, "\t |*-End send cmd erase\n"); ++} ++ ++static void fmc100_ecc_randomizer(struct fmc_host *host, int ecc_en, ++ int randomizer_en) ++{ ++ unsigned int old_reg, reg; ++ unsigned int change = 0; ++ char *ecc_op = ecc_en ? "Quit" : "Enter"; ++ char *rand_op = randomizer_en ? "Enable" : "Disable"; ++ ++ if (IS_NAND_RANDOM(host)) { ++ reg = old_reg = fmc_readl(host, FMC_GLOBAL_CFG); ++ if (randomizer_en) ++ reg |= FMC_GLOBAL_CFG_RANDOMIZER_EN; ++ else ++ reg &= ~FMC_GLOBAL_CFG_RANDOMIZER_EN; ++ ++ if (old_reg != reg) { ++ fmc_pr(EC_DBG, "\t |*-Start %s randomizer\n", rand_op); ++ fmc_pr(EC_DBG, "\t ||-Get global CFG[%#x]%#x\n", ++ FMC_GLOBAL_CFG, old_reg); ++ fmc_writel(host, FMC_GLOBAL_CFG, reg); ++ fmc_pr(EC_DBG, "\t ||-Set global CFG[%#x]%#x\n", ++ FMC_GLOBAL_CFG, reg); ++ change++; ++ } ++ } ++ ++ old_reg = fmc_readl(host, FMC_CFG); ++ reg = (ecc_en ? host->nand_cfg : host->nand_cfg_ecc0); ++ ++ if (old_reg != reg) { ++ fmc_pr(EC_DBG, "\t |%s-Start %s ECC0 mode\n", change ? "|" : "*", ++ ecc_op); ++ fmc_pr(EC_DBG, "\t ||-Get CFG[%#x]%#x\n", FMC_CFG, old_reg); ++ fmc_writel(host, FMC_CFG, reg); ++ fmc_pr(EC_DBG, "\t ||-Set CFG[%#x]%#x\n", FMC_CFG, reg); ++ change++; ++ } ++ ++ if (EC_DBG && change) ++ fmc_pr(EC_DBG, "\t |*-End randomizer and ECC0 mode config\n"); ++} ++ ++static void fmc100_send_cmd_status(struct fmc_host *host) ++{ ++ unsigned int regval; ++ ++ host->enable_ecc_randomizer(host, DISABLE, DISABLE); ++ ++ regval = op_cfg_fm_cs(host->cmd_op.cs); ++ fmc_writel(host, FMC_OP_CFG, regval); ++ ++ regval = FMC_OP_READ_STATUS_EN | FMC_OP_REG_OP_START; ++ fmc_writel(host, FMC_OP, regval); ++ ++ fmc_cmd_wait_cpu_finish(host); ++} ++ ++static void fmc100_send_cmd_readid(struct fmc_host *host) ++{ ++ unsigned int reg; ++ ++ fmc_pr(BT_DBG, "\t *-Start read nand flash ID\n"); ++ ++ host->enable_ecc_randomizer(host, DISABLE, DISABLE); ++ ++ reg = fmc_data_num_cnt(host->cmd_op.data_no); ++ fmc_writel(host, FMC_DATA_NUM, reg); ++ fmc_pr(BT_DBG, "\t |-Set DATA_NUM[%#x]%#x\n", FMC_DATA_NUM, reg); ++ ++ reg = fmc_cmd_cmd1(NAND_CMD_READID); ++ fmc_writel(host, FMC_CMD, reg); ++ fmc_pr(BT_DBG, "\t |-Set CMD[%#x]%#x\n", FMC_CMD, reg); ++ ++ reg = 0; ++ fmc_writel(host, FMC_ADDRL, reg); ++ fmc_pr(BT_DBG, "\t |-Set ADDRL[%#x]%#x\n", FMC_ADDRL, reg); ++ ++ reg = op_cfg_fm_cs(host->cmd_op.cs) | ++ op_cfg_addr_num(READ_ID_ADDR_NUM); ++ fmc_writel(host, FMC_OP_CFG, reg); ++ fmc_pr(BT_DBG, "\t |-Set OP_CFG[%#x]%#x\n", FMC_OP_CFG, reg); ++ ++ reg = FMC_OP_CMD1_EN | ++ FMC_OP_ADDR_EN | ++ FMC_OP_READ_DATA_EN | ++ FMC_OP_REG_OP_START; ++ fmc_writel(host, FMC_OP, reg); ++ fmc_pr(BT_DBG, "\t |-Set OP[%#x]%#x\n", FMC_OP, reg); ++ ++ host->addr_cycle = 0x0; ++ ++ fmc_cmd_wait_cpu_finish(host); ++ ++ fmc_pr(BT_DBG, "\t *-End read nand flash ID\n"); ++} ++ ++static void fmc100_send_cmd_reset(struct fmc_host *host) ++{ ++ unsigned int reg; ++ ++ fmc_pr(BT_DBG, "\t *-Start reset nand flash\n"); ++ ++ reg = fmc_cmd_cmd1(NAND_CMD_RESET); ++ fmc_writel(host, FMC_CMD, reg); ++ fmc_pr(BT_DBG, "\t |-Set CMD[%#x]%#x\n", FMC_CMD, reg); ++ ++ reg = op_cfg_fm_cs(host->cmd_op.cs); ++ fmc_writel(host, FMC_OP_CFG, reg); ++ fmc_pr(BT_DBG, "\t |-Set OP_CFG[%#x]%#x\n", FMC_OP_CFG, reg); ++ ++ reg = FMC_OP_CMD1_EN | ++ FMC_OP_WAIT_READY_EN | ++ FMC_OP_REG_OP_START; ++ fmc_writel(host, FMC_OP, reg); ++ fmc_pr(BT_DBG, "\t |-Set OP[%#x]%#x\n", FMC_OP, reg); ++ ++ fmc_cmd_wait_cpu_finish(host); ++ ++ fmc_pr(BT_DBG, "\t *-End reset nand flash\n"); ++} ++ ++static unsigned char fmc100_read_byte(struct nand_chip *chip) ++{ ++ unsigned char value = 0; ++ struct fmc_host *host = chip->priv; ++ ++ if (host->cmd_op.l_cmd == NAND_CMD_READID) { ++ value = fmc_readb((void __iomem *)(chip->legacy.IO_ADDR_R + host->offset)); ++ host->offset++; ++ if (host->cmd_op.data_no == host->offset) ++ host->cmd_op.l_cmd = 0; ++ ++ return value; ++ } ++ ++ if (host->cmd_op.cmd == NAND_CMD_STATUS) { ++ value = fmc_readl(host, FMC_STATUS); ++ if (host->cmd_op.l_cmd == NAND_CMD_ERASE1) ++ fmc_pr(ER_DBG, "\t*-Erase WP status: %#x\n", value); ++ ++ if (host->cmd_op.l_cmd == NAND_CMD_PAGEPROG) ++ fmc_pr(WR_DBG, "\t*-Write WP status: %#x\n", value); ++ ++ return value; ++ } ++ ++ if (host->cmd_op.l_cmd == NAND_CMD_READOOB) { ++ value = fmc_readb((void __iomem *)(host->buffer + ++ host->pagesize + host->offset)); ++ host->offset++; ++ return value; ++ } ++ ++ host->offset++; ++ ++ return fmc_readb((void __iomem *)(host->buffer + host->column + ++ host->offset - 1)); ++} ++ ++static void fmc100_write_buf(struct nand_chip *chip, ++ const u_char *buf, int len) ++{ ++ struct fmc_host *host = chip->priv; ++ int ret; ++ ++#ifdef FMC100_NAND_SUPPORT_REG_WRITE ++ if (buf == chip->oob_poi) ++ ret = memcpy_s((char *)host->iobase + host->pagesize, ++ FMC_MEM_LEN, buf, len); ++ else ++ ret = memcpy_s((char *)host->iobase, FMC_MEM_LEN, buf, len); ++ ++#else ++ if (buf == chip->oob_poi) ++ ret = memcpy_s((char *)host->buffer + host->pagesize, ++ FMC_MAX_DMA_LEN, buf, len); ++ else ++ ret = memcpy_s((char *)host->buffer, FMC_MAX_DMA_LEN, buf, len); ++ ++#endif ++ if (ret) ++ printk("%s: memcpy_s failed\n", __func__); ++ ++ return; ++} ++ ++#ifdef CONFIG_BSP_NAND_ECC_STATUS_REPORT ++ ++static void fmc100_ecc_err_num_count(struct mtd_info *mtd, ++ u_int ecc_st, u_int reg) ++{ ++ u_char err_num; ++ ++ if (ecc_st > ECC_STEP_MAX_4K_PAGE) ++ ecc_st = ECC_STEP_MAX_4K_PAGE; ++ ++ while (ecc_st) { ++ err_num = get_ecc_err_num(--ecc_st, reg); ++ if (err_num == 0xff) ++ mtd->ecc_stats.failed++; ++ else ++ mtd->ecc_stats.corrected += err_num; ++ } ++} ++#endif ++ ++static void fmc100_read_buf(struct nand_chip *chip, u_char *buf, int len) ++{ ++#ifdef CONFIG_BSP_NAND_ECC_STATUS_REPORT ++ struct mtd_info *mtd = nand_to_mtd(chip); ++#endif ++ struct fmc_host *host = chip->priv; ++ int ret; ++ ++#ifdef FMC100_NAND_SUPPORT_REG_READ ++ if (buf == chip->oob_poi) ++ ret = memcpy_s(buf, MAX_OOB_LEN, (char *)host->iobase + ++ host->pagesize, len); ++ else ++ ret = memcpy_s(buf, MAX_PAGE_SIZE, (char *)host->iobase, len); ++ ++#else ++ if (buf == chip->oob_poi) ++ ret = memcpy_s(buf, MAX_OOB_LEN, (char *)host->buffer + ++ host->pagesize, len); ++ else ++ ret = memcpy_s(buf, MAX_PAGE_SIZE, (char *)host->buffer, len); ++#endif ++ if (ret) ++ printk("%s: memcpy_s failed\n", __func__); ++ ++#ifdef CONFIG_BSP_NAND_ECC_STATUS_REPORT ++ if (buf != chip->oob_poi) { ++ u_int reg; ++ u_int ecc_step = host->pagesize >> ECC_STEP_SHIFT; ++ ++ /* 2K or 4K or 8K(1) or 16K(1-1) pagesize */ ++ reg = fmc_readl(host, FMC100_ECC_ERR_NUM0_BUF0); ++ fmc100_ecc_err_num_count(mtd, ecc_step, reg); ++ ++ if (ecc_step > ECC_STEP_MAX_4K_PAGE) { ++ /* 8K(2) or 16K(1-2) pagesize */ ++ reg = fmc_readl(host, FMC100_ECC_ERR_NUM1_BUF0); ++ fmc100_ecc_err_num_count(mtd, ecc_step, reg); ++ if (ecc_step > ECC_STEP_MAX_8K_PAGE) { ++ /* 16K(2-1) pagesize */ ++ reg = fmc_readl(host, ++ FMC100_ECC_ERR_NUM0_BUF1); ++ fmc100_ecc_err_num_count(mtd, ecc_step, reg); ++ /* 16K(2-2) pagesize */ ++ reg = fmc_readl(host, ++ FMC100_ECC_ERR_NUM1_BUF1); ++ fmc100_ecc_err_num_count(mtd, ecc_step, reg); ++ } ++ } ++ } ++#endif ++ ++ return; ++} ++ ++static void fmc100_select_chip(struct nand_chip *chip, int chipselect) ++{ ++ struct mtd_info *mtd = nand_to_mtd(chip); ++ struct fmc_host *host = chip->priv; ++ ++ if (chipselect < 0) { ++ mutex_unlock(&fmc_switch_mutex); ++ return; ++ } ++ ++ mutex_lock(&fmc_switch_mutex); ++ ++ if (chipselect > CONFIG_FMC100_MAX_NAND_CHIP) ++ db_bug("Error: Invalid chip select: %d\n", chipselect); ++ ++ host->cmd_op.cs = chipselect; ++ if (host->mtd != mtd) ++ host->mtd = mtd; ++} ++ ++static void fmc100_nand_ale_init(struct fmc_host *host, unsigned ctrl, int dat) ++{ ++ unsigned int addr_value = 0; ++ unsigned int addr_offset; ++ ++ if (ctrl & NAND_CTRL_CHANGE) { ++ host->addr_cycle = 0x0; ++ host->addr_value[0] = 0x0; ++ host->addr_value[1] = 0x0; ++ } ++ addr_offset = host->addr_cycle << FMC100_ADDR_CYCLE_SHIFT; ++ ++ if (host->addr_cycle >= FMC100_ADDR_CYCLE_MASK) { ++ addr_offset = (host->addr_cycle - ++ FMC100_ADDR_CYCLE_MASK) << ++ FMC100_ADDR_CYCLE_SHIFT; ++ addr_value = 1; ++ } ++ ++ host->addr_value[addr_value] |= ++ (((unsigned int)dat & 0xff) << addr_offset); ++ ++ host->addr_cycle++; ++} ++ ++static void fmc100_nand_cle_init(struct fmc_host *host, ++ struct nand_chip *chip, ++ int dat, ++ int *is_cache_invalid) ++{ ++ unsigned char cmd; ++ ++ cmd = (unsigned int)dat & 0xff; ++ host->cmd_op.cmd = cmd; ++ switch (cmd) { ++ case NAND_CMD_PAGEPROG: ++ host->offset = 0; ++ host->send_cmd_pageprog(host); ++ break; ++ ++ case NAND_CMD_READSTART: ++ *is_cache_invalid = 0; ++ if (host->addr_value[0] == host->pagesize) ++ host->cmd_op.l_cmd = NAND_CMD_READOOB; ++ ++ host->send_cmd_readstart(host); ++ break; ++ ++ case NAND_CMD_ERASE2: ++ host->cmd_op.l_cmd = cmd; ++ host->send_cmd_erase(host); ++ break; ++ ++ case NAND_CMD_READID: ++ /* dest fmcbuf just need init ID lenth */ ++ if (memset_s((u_char *)(chip->legacy.IO_ADDR_R), FMC_MEM_LEN, ++ 0, MAX_NAND_ID_LEN)) { ++ printk("%s %d:memset_s failed\n", __func__, __LINE__); ++ break; ++ } ++ host->cmd_op.l_cmd = cmd; ++ host->cmd_op.data_no = MAX_NAND_ID_LEN; ++ host->send_cmd_readid(host); ++ break; ++ ++ case NAND_CMD_STATUS: ++ host->send_cmd_status(host); ++ break; ++ ++ case NAND_CMD_READ0: ++ host->cmd_op.l_cmd = cmd; ++ break; ++ ++ case NAND_CMD_RESET: ++ host->send_cmd_reset(host); ++ break; ++ ++ case NAND_CMD_SEQIN: ++ case NAND_CMD_ERASE1: ++ default: ++ break; ++ } ++} ++ ++static void fmc100_cmd_ctrl(struct nand_chip *chip, int dat, unsigned ctrl) ++{ ++ int is_cache_invalid = 1; ++ struct fmc_host *host = chip->priv; ++ ++ if (ctrl & NAND_ALE) ++ fmc100_nand_ale_init(host, ctrl, dat); ++ ++ if ((ctrl & NAND_CLE) && (ctrl & NAND_CTRL_CHANGE)) ++ fmc100_nand_cle_init(host, chip, dat, &is_cache_invalid); ++ ++ /* pass pagesize and ecctype to kernel when startup. */ ++ host->enable_ecc_randomizer(host, ENABLE, ENABLE); ++ ++ if ((dat == NAND_CMD_NONE) && host->addr_cycle) { ++ if (host->cmd_op.cmd == NAND_CMD_SEQIN || ++ host->cmd_op.cmd == NAND_CMD_READ0 || ++ host->cmd_op.cmd == NAND_CMD_READID) { ++ host->offset = 0x0; ++ host->column = (host->addr_value[0] & 0xffff); ++ } ++ } ++ ++ if (is_cache_invalid) { ++ host->cache_addr_value[0] = ~0; ++ host->cache_addr_value[1] = ~0; ++ } ++} ++ ++static int fmc100_dev_ready(struct nand_chip *chip) ++{ ++ return 0x1; ++} ++ ++/* ++ * 'host->epm' only use the first oobfree[0] field, it looks very simple, But... ++ */ ++static int fmc_ooblayout_ecc_default(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = OOB_LENGTH_DEFAULT; ++ oobregion->offset = OOB_OFFSET_DEFAULT; ++ ++ return 0; ++} ++ ++static int fmc_ooblayout_free_default(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = OOB_LENGTH_DEFAULT_FREE; ++ oobregion->offset = OOB_OFFSET_DEFAULT_FREE; ++ ++ return 0; ++} ++ ++static struct mtd_ooblayout_ops fmc_ooblayout_default_ops = { ++ .ecc = fmc_ooblayout_ecc_default, ++ .free = fmc_ooblayout_free_default, ++}; ++ ++#ifdef CONFIG_BSP_NAND_FS_MAY_NO_YAFFS2 ++static int fmc_ooblayout_ecc_4k16bit(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = OOB_LENGTH_4K16BIT; ++ oobregion->offset = OOB_OFFSET_4K16BIT; ++ ++ return 0; ++} ++ ++static int fmc_ooblayout_free_4k16bit(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = OOB_LENGTH_4K16BIT_FREE; ++ oobregion->offset = OOB_OFFSET_4K16BIT_FREE; ++ ++ return 0; ++} ++tatic struct mtd_ooblayout_ops fmc_ooblayout_4k16bit_ops = { ++ .ecc = fmc_ooblayout_ecc_4k16bit, ++ .free = fmc_ooblayout_free_4k16bit, ++}; ++ ++static int fmc_ooblayout_ecc_2k16bit(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = OOB_LENGTH_2K16BIT; ++ oobregion->offset = OOB_OFFSET_2K16BIT; ++ ++ return 0; ++} ++ ++static int fmc_ooblayout_free_2k16bit(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section) ++ return -ERANGE; ++ ++ oobregion->length = OOB_LENGTH_2K16BIT_FREE; ++ oobregion->offset = OOB_OFFSET_2K16BIT_FREE; ++ ++ return 0; ++} ++ ++static struct mtd_ooblayout_ops fmc_ooblayout_2k16bit_ops = { ++ .ecc = fmc_ooblayout_ecc_2k16bit, ++ .free = fmc_ooblayout_free_2k16bit, ++}; ++#endif ++ ++/* ecc/pagesize get from NAND controller */ ++static struct nand_config_info fmc100_nand_hw_auto_config_table[] = { ++ {NAND_PAGE_16K, NAND_ECC_64BIT, 64, 1824, &fmc_ooblayout_default_ops}, /* 1824 */ ++ {NAND_PAGE_16K, NAND_ECC_40BIT, 40, 1200, &fmc_ooblayout_default_ops}, /* 1152 */ ++ {NAND_PAGE_16K, NAND_ECC_0BIT, 0, 32, &fmc_ooblayout_default_ops}, ++ {NAND_PAGE_8K, NAND_ECC_64BIT, 64, 928, &fmc_ooblayout_default_ops}, /* 928 */ ++ {NAND_PAGE_8K, NAND_ECC_40BIT, 40, 600, &fmc_ooblayout_default_ops}, /* 592 */ ++ {NAND_PAGE_8K, NAND_ECC_24BIT, 24, 368, &fmc_ooblayout_default_ops}, /* 368 */ ++ {NAND_PAGE_8K, NAND_ECC_0BIT, 0, 32, &fmc_ooblayout_default_ops}, ++ {NAND_PAGE_4K, NAND_ECC_24BIT, 24, 200, &fmc_ooblayout_default_ops}, /* 200 */ ++#ifdef CONFIG_BSP_NAND_FS_MAY_NO_YAFFS2 ++ {NAND_PAGE_4K, NAND_ECC_16BIT, 16, 128, &fmc_ooblayout_4k16bit_ops}, /* 128 */ ++#endif ++ {NAND_PAGE_4K, NAND_ECC_8BIT, 8, 128, &fmc_ooblayout_default_ops}, /* 88 */ ++ {NAND_PAGE_4K, NAND_ECC_0BIT, 0, 32, &fmc_ooblayout_default_ops}, ++ {NAND_PAGE_2K, NAND_ECC_24BIT, 24, 128, &fmc_ooblayout_default_ops}, /* 116 */ ++#ifdef CONFIG_BSP_NAND_FS_MAY_NO_YAFFS2 ++ {NAND_PAGE_2K, NAND_ECC_16BIT, 16, 64, &fmc_ooblayout_2k16bit_ops}, /* 64 */ ++#endif ++ {NAND_PAGE_2K, NAND_ECC_8BIT, 8, 64, &fmc_ooblayout_default_ops}, /* 60 */ ++ {NAND_PAGE_2K, NAND_ECC_0BIT, 0, 32, &fmc_ooblayout_default_ops}, ++ {0, 0, 0, 0, NULL}, ++}; ++ ++/* ++ * 0 - This NAND NOT support randomizer ++ * 1 - This NAND support randomizer. ++ */ ++static int fmc100_nand_support_randomizer(u_int pageisze, u_int ecctype) ++{ ++ switch (pageisze) { ++ case _8K: ++ return (ecctype >= NAND_ECC_24BIT && ecctype <= NAND_ECC_80BIT); ++ case _16K: ++ return (ecctype >= NAND_ECC_40BIT && ecctype <= NAND_ECC_80BIT); ++ case _32K: ++ return (ecctype >= NAND_ECC_40BIT && ecctype <= NAND_ECC_80BIT); ++ default: ++ return 0; ++ } ++} ++ ++/* used the best correct arithmetic. */ ++static struct nand_config_info *fmc100_get_config_type_info( ++ struct mtd_info *mtd, struct nand_dev_t *nand_dev) ++{ ++ struct nand_config_info *best = NULL; ++ struct nand_chip *chip = mtd_to_nand(mtd); ++ struct fmc_host *host = chip->priv; ++ struct nand_config_info *info = fmc100_nand_hw_auto_config_table; ++ ++ nand_dev->start_type = "Auto"; ++ nand_dev->flags |= (IS_NANDC_HW_AUTO(host) | IS_NANDC_CONFIG_DONE(host)); ++ ++ for (; info->ooblayout_ops; info++) { ++ if (match_page_type_to_size(info->pagetype) != mtd->writesize) ++ continue; ++ ++ if (mtd->oobsize < info->oobsize) ++ continue; ++ ++ if (!best || (best->ecctype < info->ecctype)) ++ best = info; ++ } ++ ++ return best; ++} ++ ++static unsigned int fmc100_get_ecc_reg(struct fmc_host *host, ++ const struct nand_config_info *info, struct nand_dev_t *nand_dev) ++{ ++ host->ecctype = info->ecctype; ++ fmc_pr(BT_DBG, "\t |-Save best EccType %d(%s)\n", host->ecctype, ++ match_ecc_type_to_str(info->ecctype)); ++ ++ nand_dev->ecctype = host->ecctype; ++ ++ return fmc_cfg_ecc_type(match_ecc_type_to_reg(info->ecctype)); ++} ++ ++static unsigned int fmc100_get_page_reg(struct fmc_host *host, ++ const struct nand_config_info *info) ++{ ++ host->pagesize = match_page_type_to_size(info->pagetype); ++ fmc_pr(BT_DBG, "\t |-Save best PageSize %d(%s)\n", host->pagesize, ++ match_page_type_to_str(info->pagetype)); ++ ++ return fmc_cfg_page_size(match_page_type_to_reg(info->pagetype)); ++} ++ ++static unsigned int fmc100_get_block_reg(struct fmc_host *host, ++ const struct nand_config_info *info) ++{ ++ unsigned int block_reg = 0; ++ unsigned int page_per_block; ++ struct mtd_info *mtd = host->mtd; ++ ++ host->block_page_mask = ((mtd->erasesize / mtd->writesize) - 1); ++ page_per_block = mtd->erasesize / match_page_type_to_size(info->pagetype); ++ switch (page_per_block) { ++ case PAGE_PER_BLK_64: ++ block_reg = BLOCK_SIZE_64_PAGE; ++ break; ++ case PAGE_PER_BLK_128: ++ block_reg = BLOCK_SIZE_128_PAGE; ++ break; ++ case PAGE_PER_BLK_256: ++ block_reg = BLOCK_SIZE_256_PAGE; ++ break; ++ case PAGE_PER_BLK_512: ++ block_reg = BLOCK_SIZE_512_PAGE; ++ break; ++ default: ++ db_msg("Can't support block %#x and page %#x size\n", ++ mtd->erasesize, mtd->writesize); ++ } ++ ++ return fmc_cfg_block_size(block_reg); ++} ++ ++static void fmc100_set_fmc_cfg_reg(struct fmc_host *host, ++ const struct nand_config_info *type_info, struct nand_dev_t *nand_dev) ++{ ++ unsigned int page_reg, ecc_reg, block_reg, reg_fmc_cfg; ++ ++ ecc_reg = fmc100_get_ecc_reg(host, type_info, nand_dev); ++ page_reg = fmc100_get_page_reg(host, type_info); ++ block_reg = fmc100_get_block_reg(host, type_info); ++ ++ if (fmc100_nand_support_randomizer(host->pagesize, host->ecctype)) ++ host->flags |= IS_NAND_RANDOM(nand_dev); ++ ++ /* ++ * Check if hardware enable randomizer PIN, But NAND does not need ++ * randomizer. We will notice user. ++ */ ++ if (IS_NAND_RANDOM(host) && ++ !fmc100_nand_support_randomizer(host->pagesize, ++ host->ecctype)) { ++ db_bug(ERSTR_HARDWARE ++ "This NAND flash does not support `randomizer`, " ++ "Please don't configure hardware randomizer PIN."); ++ } ++ /* Save value of FMC_CFG and FMC_CFG_ECC0 to turn on/off ECC */ ++ reg_fmc_cfg = fmc_readl(host, FMC_CFG); ++ reg_fmc_cfg &= ~(PAGE_SIZE_MASK | ECC_TYPE_MASK | BLOCK_SIZE_MASK); ++ reg_fmc_cfg |= ecc_reg | page_reg | block_reg; ++ host->nand_cfg = reg_fmc_cfg; ++ host->nand_cfg_ecc0 = (host->nand_cfg & ~ECC_TYPE_MASK) | ECC_TYPE_0BIT; ++ fmc_pr(BT_DBG, "\t|-Save FMC_CFG[%#x]: %#x and FMC_CFG_ECC0: %#x\n", ++ FMC_CFG, host->nand_cfg, host->nand_cfg_ecc0); ++ ++ /* pass pagesize and ecctype to kernel when spiflash startup. */ ++ host->enable_ecc_randomizer(host, ENABLE, ENABLE); ++ ++ /* ++ * If it want to support the 'read retry' feature, the 'randomizer' ++ * feature must be support first. ++ */ ++ host->read_retry = NULL; ++ ++ if (host->read_retry && !IS_NAND_RANDOM(host)) { ++ db_bug(ERSTR_HARDWARE ++ "This Nand flash need to enable 'randomizer' feature. " ++ "Please configure hardware randomizer PIN."); ++ } ++} ++ ++static void fmc100_set_oob_info(struct mtd_info *mtd, ++ struct nand_config_info *info, struct nand_dev_t *nand_dev) ++{ ++ int buffer_len; ++ struct nand_chip *chip = mtd_to_nand(mtd); ++ struct fmc_host *host = chip->priv; ++ struct mtd_oob_region fmc_oobregion = {0, 0}; ++ if (info->ecctype != NAND_ECC_0BIT) ++ mtd->oobsize = info->oobsize; ++ ++ mtd->oobavail = FMC100_NAND_OOBSIZE_FOR_YAFFS; ++ ++ host->oobsize = mtd->oobsize; ++ ++ buffer_len = host->pagesize + host->oobsize; ++ /* dest buffer just need init buffer_len */ ++ if (memset_s(host->buffer, FMC_MAX_DMA_LEN, 0xff, buffer_len)) { ++ printk("%s %d:memset_s failed\n", __func__, __LINE__); ++ return; ++ } ++ host->dma_oob = host->dma_buffer + host->pagesize; ++ ++ host->bbm = (unsigned char *)(host->buffer + host->pagesize + ++ FMC100_BAD_BLOCK_POS); ++ ++ info->ooblayout_ops->free(mtd, 0, &fmc_oobregion); ++ ++ mtd_set_ooblayout(mtd, info->ooblayout_ops); ++ ++ /* EB bits locate in the bottom two of CTRL(30) */ ++ host->epm = (u_short *)(host->buffer + host->pagesize + ++ fmc_oobregion.offset + ++ FMC_OOB_LEN_30_EB_OFFSET); ++ ++#ifdef CONFIG_BSP_NAND_FS_MAY_NO_YAFFS2 ++ if (info->ecctype == NAND_ECC_16BIT) { ++ if (host->pagesize == _2K) { ++ /* EB bits locate in the bottom two of CTRL(6) */ ++ host->epm = (u_short *)(host->buffer + host->pagesize + ++ fmc_oobregion.offset + ++ FMC_OOB_LEN_6_EB_OFFSET); ++ } else if (host->pagesize == _4K) { ++ /* EB bit locate in the bottom two of CTRL(14) */ ++ host->epm = (u_short *)(host->buffer + host->pagesize + ++ fmc_oobregion.offset + ++ FMC_OOB_LEN_14_EB_OFFSET); ++ } ++ } ++#endif ++} ++ ++static int fmc100_set_config_info(struct mtd_info *mtd, ++ struct nand_chip *chip, struct nand_dev_t *dev) ++{ ++ struct fmc_host *host = chip->priv; ++ struct nand_dev_t *nand_dev = dev; ++ struct nand_config_info *type_info = NULL; ++ ++ fmc_pr(BT_DBG, "\t*-Start config Block Page OOB and Ecc\n"); ++ ++ type_info = fmc100_get_config_type_info(mtd, nand_dev); ++ WARN_ON(!type_info); ++ ++ fmc_pr(BT_DBG, "\t|-%s Config, PageSize %s EccType %s OobSize %d\n", ++ nand_dev->start_type, nand_page_name(type_info->pagetype), ++ nand_ecc_name(type_info->ecctype), type_info->oobsize); ++ ++ /* Set the page_size, ecc_type, block_size of FMC_CFG[0x0] register */ ++ fmc100_set_fmc_cfg_reg(host, type_info, nand_dev); ++ ++ fmc100_set_oob_info(mtd, type_info, nand_dev); ++ ++ if (mtd->writesize > NAND_MAX_PAGESIZE || ++ mtd->oobsize > NAND_MAX_OOBSIZE) { ++ db_bug(ERSTR_DRIVER ++ "Driver does not support this Nand Flash. Please " \ ++ "increase NAND_MAX_PAGESIZE and NAND_MAX_OOBSIZE.\n"); ++ } ++ ++ /* Some Nand Flash devices have subpage structure */ ++ if (mtd->writesize != host->pagesize) { ++ unsigned int shift = 0; ++ unsigned int writesize = mtd->writesize; ++ ++ while (writesize > host->pagesize) { ++ writesize >>= 1; ++ shift++; ++ } ++ mtd->erasesize = mtd->erasesize >> shift; ++ mtd->writesize = host->pagesize; ++ pr_info("Nand divide into 1/%u\n", (1 << shift)); ++ } ++ ++ fmc_pr(BT_DBG, "\t*-End config Block Page Oob and Ecc\n"); ++ ++ return 0; ++} ++ ++static void fmc100_chip_init(struct nand_chip *chip) ++{ ++ struct fmc_host *host = chip->priv; ++ /* dest fmcbuf just need init dma_len lenth */ ++ if (memset_s((char *)chip->legacy.IO_ADDR_R, FMC_MEM_LEN, 0xff, host->dma_len)) { ++ printk("%s %d:memset_s failed\n", __func__, __LINE__); ++ return; ++ } ++ ++ chip->legacy.read_byte = fmc100_read_byte; ++ ++ chip->legacy.write_buf = fmc100_write_buf; ++ chip->legacy.read_buf = fmc100_read_buf; ++ ++ chip->legacy.select_chip = fmc100_select_chip; ++ ++ chip->legacy.cmd_ctrl = fmc100_cmd_ctrl; ++ chip->legacy.dev_ready = fmc100_dev_ready; ++ ++ chip->legacy.chip_delay = FMC_CHIP_DELAY; ++ ++ chip->options = NAND_NEED_READRDY | NAND_BROKEN_XD | ++ NAND_SKIP_BBTSCAN; ++ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_NONE; ++} ++ ++static void fmc100_host_fun_init(struct fmc_host *host) ++{ ++ host->send_cmd_pageprog = fmc100_send_cmd_write; ++ host->send_cmd_status = fmc100_send_cmd_status; ++ host->send_cmd_readstart = fmc100_send_cmd_read; ++ host->send_cmd_erase = fmc100_send_cmd_erase; ++ host->send_cmd_readid = fmc100_send_cmd_readid; ++ host->send_cmd_reset = fmc100_send_cmd_reset; ++} ++ ++static inline unsigned int get_sys_boot_mode(unsigned int reg_val) ++{ ++ return (reg_val >> 4) & 0x3; ++} ++ ++static int fmc100_host_init(struct fmc_host *host) ++{ ++ unsigned int reg, flash_type; ++ ++ fmc_pr(BT_DBG, "\t *-Start nand host init\n"); ++ ++ reg = fmc_readl(host, FMC_CFG); ++ fmc_pr(BT_DBG, "\t |-Read FMC CFG[%#x]%#x\n", FMC_CFG, reg); ++ flash_type = get_spi_flash_type(reg); ++ if (flash_type != FLASH_TYPE_NAND) { ++ db_msg("Error: Flash type isn't Nand flash. reg[%#x]\n", reg); ++ reg |= fmc_cfg_flash_sel(FLASH_TYPE_NAND); ++ fmc_pr(BT_DBG, "\t |-Change flash type to Nand flash\n"); ++ } ++ ++ if ((reg & FMC_CFG_OP_MODE_MASK) == FMC_CFG_OP_MODE_BOOT) { ++ reg |= fmc_cfg_op_mode(FMC_CFG_OP_MODE_NORMAL); ++ fmc_pr(BT_DBG, "\t |-Controller enter normal mode\n"); ++ } ++ fmc_writel(host, FMC_CFG, reg); ++ fmc_pr(BT_DBG, "\t |-Set CFG[%#x]%#x\n", FMC_CFG, reg); ++ ++ host->nand_cfg = reg; ++ host->nand_cfg_ecc0 = (reg & ~ECC_TYPE_MASK) | ECC_TYPE_0BIT; ++ ++ reg = fmc_readl(host, FMC_GLOBAL_CFG); ++ fmc_pr(BT_DBG, "\t |-Read global CFG[%#x]%#x\n", FMC_GLOBAL_CFG, reg); ++ if (reg & FMC_GLOBAL_CFG_RANDOMIZER_EN) { ++ host->flags &= ~NAND_RANDOMIZER; ++ fmc_pr(BT_DBG, "\t |-Default disable randomizer\n"); ++ reg &= ~FMC_GLOBAL_CFG_RANDOMIZER_EN; ++ fmc_writel(host, FMC_GLOBAL_CFG, reg); ++ fmc_pr(BT_DBG, "\t |-Set global CFG[%#x]%#x\n", FMC_GLOBAL_CFG, reg); ++ } ++ ++#ifdef CONFIG_FMC100_NAND_EDO_MODE ++ /* enable EDO node */ ++ reg = fmc_readl(host, FMC_GLOBAL_CFG); ++ fmc_writel(host, FMC_GLOBAL_CFG, set_nand_edo_mode_en(reg)); ++#endif ++ ++ host->addr_cycle = 0; ++ host->addr_value[0] = 0; ++ host->addr_value[1] = 0; ++ host->cache_addr_value[0] = ~0; ++ host->cache_addr_value[1] = ~0; ++ ++ fmc100_host_fun_init(host); ++ ++ /* ++ * check if start from nand. ++ * This register REG_SYSSTAT is set in start.S ++ * When start in NAND (Auto), the ECC/PAGESIZE driver don't detect. ++ */ ++ host->flags |= NANDC_HW_AUTO; ++ ++ if (get_sys_boot_mode(reg) == BOOT_FROM_NAND) { ++ host->flags |= NANDC_CONFIG_DONE; ++ fmc_pr(BT_DBG, "\t |-Auto config pagesize and ecctype\n"); ++ } ++ ++ host->enable_ecc_randomizer = fmc100_ecc_randomizer; ++ ++ fmc_pr(BT_DBG, "\t *-End nand host init\n"); ++ ++ return 0; ++} ++ ++int fmc100_nand_init(struct nand_chip *chip) ++{ ++ struct fmc_host *host = chip->priv; ++ ++ /* enable and set system clock */ ++ clk_prepare_enable(host->clk); ++ ++ /* fmc ip version check */ ++ host->version = fmc_readl(host, FMC_VERSION); ++ if (host->version != FMC_VER_100) ++ return -EFAULT; ++ ++ pr_info("Found Flash Memory Controller v100 Nand Driver\n"); ++ ++ /* fmc host init */ ++ if (fmc100_host_init(host)) { ++ db_msg("Error: Nand host init failed!\n"); ++ return -EFAULT; ++ } ++ host->chip = chip; ++ ++ fmc_writel(host, ++ FMC_PND_PWIDTH_CFG, ++ pwidth_cfg_rw_hcnt(CONFIG_RW_H_WIDTH) | ++ pwidth_cfg_r_lcnt(CONFIG_R_L_WIDTH) | ++ pwidth_cfg_w_lcnt(CONFIG_W_L_WIDTH)); ++ ++ /* fmc nand_chip struct init */ ++ fmc100_chip_init(chip); ++ ++ fmc_spl_ids_register(); ++ nfc_param_adjust = fmc100_set_config_info; ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++ ++void fmc100_nand_config(const struct fmc_host *host) ++{ ++ /* enable system clock */ ++ clk_prepare_enable(host->clk); ++ fmc_pr(PM_DBG, "\t |-enable system clock\n"); ++} ++#endif /* CONFIG_PM */ +diff --git a/drivers/mtd/nand/fmc100_nand/fmc100_nand.h b/drivers/mtd/nand/fmc100_nand/fmc100_nand.h +new file mode 100644 +index 000000000..a7414f577 +--- /dev/null ++++ b/drivers/mtd/nand/fmc100_nand/fmc100_nand.h +@@ -0,0 +1,185 @@ ++/* ++ * The Flash Memory Controller v100 Device Driver for vendor ++ * ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#ifndef __FMC100_NAND_H__ ++#define __FMC100_NAND_H__ ++ ++#include ++ ++/******************************************************************************/ ++/* These macroes are for debug only, reg option is slower then dma option */ ++#undef FMC100_NAND_SUPPORT_REG_READ ++/* Open it as you need #define FMC100_NAND_SUPPORT_REG_READ */ ++ ++#undef FMC100_NAND_SUPPORT_REG_WRITE ++/* Open it as you need #define FMC100_NAND_SUPPORT_REG_WRITE */ ++ ++/*****************************************************************************/ ++#define PAGE_PER_BLK_64 64 ++#define PAGE_PER_BLK_128 128 ++#define PAGE_PER_BLK_256 256 ++#define PAGE_PER_BLK_512 512 ++ ++/*****************************************************************************/ ++#define OOB_LENGTH_DEFAULT 32 ++#define OOB_OFFSET_DEFAULT 32 ++#define OOB_LENGTH_DEFAULT_FREE 30 ++#define OOB_OFFSET_DEFAULT_FREE 2 ++ ++#define OOB_LENGTH_4K16BIT 14 ++#define OOB_OFFSET_4K16BIT 14 ++#define OOB_LENGTH_4K16BIT_FREE 14 ++#define OOB_OFFSET_4K16BIT_FREE 2 ++ ++#define OOB_LENGTH_2K16BIT 6 ++#define OOB_OFFSET_2K16BIT 6 ++#define OOB_LENGTH_2K16BIT_FREE 6 ++#define OOB_OFFSET_2K16BIT_FREE 2 ++ ++/*****************************************************************************/ ++#define FMC100_ECC_ERR_NUM0_BUF0 0xc0 ++#define FMC100_ECC_ERR_NUM1_BUF0 0xc4 ++#define FMC100_ECC_ERR_NUM0_BUF1 0xc8 ++#define FMC100_ECC_ERR_NUM1_BUF1 0xcc ++ ++#define get_ecc_err_num(_i, _reg) (((_reg) >> ((_i) * 8)) & 0xff) ++ ++#define ECC_STEP_SHIFT 10 ++#define ECC_STEP_MAX_4K_PAGE 4 ++#define ECC_STEP_MAX_8K_PAGE 8 ++#define ECC_STEP_MAX_16K_PAGE 8 ++ ++/*****************************************************************************/ ++#define NAND_MAX_PAGESIZE 32768 ++#define NAND_MAX_OOBSIZE 4800 ++ ++#define CONFIG_SUPPORT_YAFFS ++#define FMC100_NAND_OOBSIZE_FOR_YAFFS 32 ++ ++/*****************************************************************************/ ++#define REG_CNT_HIGH_BLOCK_NUM_SHIFT 10 ++ ++#define REG_CNT_BLOCK_NUM_MASK 0x3ff ++#define REG_CNT_BLOCK_NUM_SHIFT 22 ++ ++#define REG_CNT_PAGE_NUM_MASK 0x3f ++#define REG_CNT_PAGE_NUM_SHIFT 16 ++ ++/*****************************************************************************/ ++#define WORD_READ_OFFSET_ADD_LENGTH 2 ++#define WORD_READ_START_OFFSET 2 ++/*****************************************************************************/ ++#define FMC100_ADDR_CYCLE_MASK 0x4 ++#define FMC100_ADDR_CYCLE_SHIFT 0x3 ++#define NAND_EDO_MODE_SHIFT 9 ++#define NAND_EDO_MODE_MASK (1<. ++ * ++ */ ++ ++#include ++ ++#include "fmc100_nand_os.h" ++#include "fmc100_nand.h" ++#include ++#include ++ ++static int nand_host_parm_init(struct platform_device *pltdev, ++ struct device *dev, ++ struct fmc_host **host, ++ struct nand_chip **chip, ++ struct mtd_info **mtd) ++{ ++ int len; ++ struct bsp_fmc *fmc = dev_get_drvdata(dev->parent); ++ ++ if (!fmc) { ++ dev_err(dev, "get mfd fmc devices failed\n"); ++ return -ENXIO; ++ } ++ ++ len = sizeof(struct fmc_host) + sizeof(struct nand_chip) ++ + sizeof(struct mtd_info); ++ *host = devm_kzalloc(dev, len, GFP_KERNEL); ++ if (!(*host)) ++ return -ENOMEM; ++ ++ (void)memset_s((char *)(*host), len, 0, len); ++ ++ platform_set_drvdata(pltdev, host); ++ ++ (*host)->dev = &pltdev->dev; ++ (*host)->chip = *chip = (struct nand_chip *)&(*host)[1]; ++ (*host)->mtd = *mtd = nand_to_mtd(*chip); ++ (*host)->regbase = fmc->regbase; ++ (*host)->iobase = fmc->iobase; ++ (*host)->clk = fmc->clk; ++ (*chip)->legacy.IO_ADDR_R = (*chip)->legacy.IO_ADDR_W = (*host)->iobase; ++ (*host)->buffer = fmc->buffer; ++ (*host)->dma_buffer = fmc->dma_buffer; ++ (*host)->dma_len = fmc->dma_len; ++ ++ return 0; ++} ++ ++static int bsp_nand_os_probe(struct platform_device *pltdev) ++{ ++ int result; ++ struct fmc_host *host = NULL; ++ struct nand_chip *chip = NULL; ++ struct mtd_info *mtd = NULL; ++ int nr_parts = 0; ++ struct mtd_partition *parts = NULL; ++ struct device *dev = &pltdev->dev; ++ struct device_node *np = NULL; ++ ++ result = nand_host_parm_init(pltdev, dev, &host, &chip, &mtd); ++ if (result) ++ return result; ++ ++ /* fmc Nand host init */ ++ chip->priv = host; ++ result = fmc100_nand_init(chip); ++ if (result) { ++ db_msg("Error: host init failed! result: %d\n", result); ++ goto fail; ++ } ++ ++ np = of_get_next_available_child(dev->of_node, NULL); ++ mtd->name = np->name; ++ mtd->type = MTD_NANDFLASH; ++ mtd->priv = chip; ++ mtd->flags = MTD_CAP_NANDFLASH; ++ mtd->owner = THIS_MODULE; ++ ++ if (nand_scan(chip, CONFIG_FMC100_MAX_NAND_CHIP)) { ++ result = -ENXIO; ++ goto fail; ++ } ++ ++ result = mtd_device_register(host->mtd, parts, nr_parts); ++ if (result) { ++ kfree(parts); ++ parts = NULL; ++ } ++ ++ return (result == 1) ? -ENODEV : 0; ++ ++fail: ++ clk_disable_unprepare(host->clk); ++ ++ mtd_device_unregister(mtd); ++ nand_cleanup(mtd_to_nand(mtd)); ++ ++ return result; ++} ++ ++static int bsp_nand_os_remove(struct platform_device *pltdev) ++{ ++ struct fmc_host *host = platform_get_drvdata(pltdev); ++ ++ clk_disable_unprepare(host->clk); ++ ++ mtd_device_unregister(host->mtd); ++ nand_cleanup(mtd_to_nand(host->mtd)); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++static int fmc100_nand_os_suspend(struct platform_device *pltdev, ++ pm_message_t state) ++{ ++ struct fmc_host *host = platform_get_drvdata(pltdev); ++ struct device *dev = &pltdev->dev; ++ if (!host || !host->clk) { ++ dev_err(dev, "host or host->clk is null err\n"); ++ return 0; ++ } ++ ++ while ((fmc_readl(host, FMC_OP) & FMC_OP_REG_OP_START)) ++ _cond_resched(); ++ ++ while ((fmc_readl(host, FMC_OP_CTRL) & OP_CTRL_DMA_OP_READY)) ++ _cond_resched(); ++ ++ clk_disable_unprepare(host->clk); ++ fmc_pr(PM_DBG, "\t|-disable system clock\n"); ++ return 0; ++} ++ ++static int fmc100_nand_os_resume(struct platform_device *pltdev) ++{ ++ int cs; ++ struct fmc_host *host = platform_get_drvdata(pltdev); ++ struct nand_chip *chip = NULL; ++ struct nand_memory_organization *memorg; ++ memorg = nanddev_get_memorg(&chip->base); ++ ++ if (!host) ++ return 0; ++ ++ chip = host->chip; ++ ++ for (cs = 0; cs < memorg->ntargets; cs++) ++ host->send_cmd_reset(host); ++ ++ fmc100_nand_config(host); ++ return 0; ++} ++#endif /* CONFIG_PM */ ++ ++static const struct of_device_id bsp_nand_dt_ids[] = { ++ { .compatible = "vendor,bsp_nand" }, ++ { } /* sentinel */ ++}; ++MODULE_DEVICE_TABLE(of, bsp_nand_dt_ids); ++ ++static struct platform_driver bsp_nand_driver = { ++ .driver = { ++ .name = "bsp_nand", ++ .of_match_table = bsp_nand_dt_ids, ++ }, ++ .probe = bsp_nand_os_probe, ++ .remove = bsp_nand_os_remove, ++#ifdef CONFIG_PM ++ .suspend = fmc100_nand_os_suspend, ++ .resume = fmc100_nand_os_resume, ++#endif ++}; ++module_platform_driver(bsp_nand_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Flash Memory Controller V100 Nand Driver"); +diff --git a/drivers/mtd/nand/fmc100_nand/fmc100_nand_os.h b/drivers/mtd/nand/fmc100_nand/fmc100_nand_os.h +new file mode 100644 +index 000000000..ba073bd88 +--- /dev/null ++++ b/drivers/mtd/nand/fmc100_nand/fmc100_nand_os.h +@@ -0,0 +1,70 @@ ++/* ++ * The Flash Memory Controller v100 Device Driver for vendor ++ * ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#ifndef __FMC100_NAND_OS_H__ ++#define __FMC100_NAND_OS_H__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#if (KERNEL_VERSION(3, 4, 5) <= LINUX_VERSION_CODE) ++#include "../../mtdcore.h" ++#endif ++ ++#define DEFAULT_NAND_PAGESIZE 2048 ++#define DEFAULT_NAND_OOBSIZE 64 ++ ++#define NAND_BUFFER_LEN (DEFAULT_NAND_PAGESIZE + DEFAULT_NAND_OOBSIZE) ++ ++#ifndef CONFIG_RW_H_WIDTH ++#define CONFIG_RW_H_WIDTH (10) ++#warning NOT config CONFIG_RW_H_WIDTH, used default value, maybe invalid. ++#endif ++ ++#ifndef CONFIG_R_L_WIDTH ++#define CONFIG_R_L_WIDTH (10) ++#warning NOT config CONFIG_R_L_WIDTH, used default value, maybe invalid. ++#endif ++ ++#ifndef CONFIG_W_L_WIDTH ++#define CONFIG_W_L_WIDTH (10) ++#warning NOT config CONFIG_W_L_WIDTH, used default value, maybe invalid. ++#endif ++ ++extern void fmc100_nand_controller_enable(int enable); ++ ++#endif /* End of __FMC100_NAND_OS_H__ */ +diff --git a/drivers/mtd/nand/fmc100_nand/fmc_nand_spl_ids.c b/drivers/mtd/nand/fmc100_nand/fmc_nand_spl_ids.c +new file mode 100644 +index 000000000..322a08215 +--- /dev/null ++++ b/drivers/mtd/nand/fmc100_nand/fmc_nand_spl_ids.c +@@ -0,0 +1,191 @@ ++/* ++ * The Flash Memory Controller v100 Device Driver for vendor ++ * ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "../raw/nfc_gen.h" ++#include "fmc100_nand.h" ++#include ++ ++#define _768K (_256K + _512K) ++ ++ ++struct nand_flash_special_dev { ++ unsigned char id[8]; ++ int length; /* length of id. */ ++ unsigned long long chipsize; ++ struct nand_flash_dev *(*probe)(unsigned char *id); ++ char *name; ++ ++ unsigned long pagesize; ++ unsigned long erasesize; ++ unsigned long oobsize; ++ unsigned long options; ++ unsigned int read_retry_type; ++ ++#define BBP_LAST_PAGE 0x01 ++#define BBP_FIRST_PAGE 0x02 ++ unsigned int badblock_pos; ++ int flags; ++}; ++ ++ ++/* this is nand probe function. */ ++ ++ ++static struct nand_flash_dev *hynix_probe_v02(unsigned char *id) ++{ ++ struct nand_flash_dev *type = &g_nand_dev.flash_dev; ++ ++ int pagesizes[] = {_2K, _4K, _8K, 0}; ++ int oobsizes[] = {128, 224, 448, 0, 0, 0, 0, 0}; ++ int blocksizes[] = {_128K, _256K, _512K, _768K, _1M, _2M, 0, 0}; ++ ++ int blocktype = (((id[3] >> 5) & 0x04) | ((id[3] >> 4) & 0x03)); ++ int oobtype = (((id[3] >> 2) & 0x03) | ((id[3] >> 4) & 0x04)); ++ ++ type->options = 0; ++ type->pagesize = pagesizes[(id[3] & 0x03)]; /* 0x3: check bit[0] and bit[1] */ ++ type->erasesize = blocksizes[blocktype]; ++ type->oobsize = oobsizes[oobtype]; ++ ++ return type; ++} ++ ++ ++static struct nand_flash_dev *samsung_probe_v02(unsigned char *id) ++{ ++ struct nand_flash_dev *type = &g_nand_dev.flash_dev; ++ ++ int pagesizes[] = {_2K, _4K, _8K, 0}; ++ int oobsizes[] = {0, 128, 218, 400, 436, 0, 0, 0}; ++ int blocksizes[] = {_128K, _256K, _512K, _1M, 0, 0, 0, 0}; ++ ++ int blocktype = (((id[3] >> 5) & 0x04) | ((id[3] >> 4) & 0x03)); ++ int oobtype = (((id[3] >> 4) & 0x04) | ((id[3] >> 2) & 0x03)); ++ ++ type->options = 0; ++ type->pagesize = pagesizes[(id[3] & 0x03)]; /* 0x3: check bit[0] and bit[1] */ ++ type->erasesize = blocksizes[blocktype]; ++ type->oobsize = oobsizes[oobtype]; ++ ++ return type; ++} ++ ++#define DRV_VERSION "1.40" ++ ++/****************************************************************************** ++ * We do not guarantee the compatibility of the following device models in the ++ * table.Device compatibility is based solely on the list of compatible devices ++ * in the release package. ++ ******************************************************************************/ ++ ++static struct nand_flash_special_dev nand_flash_special_table[] = { ++ ++ {{0}, 0, 0, 0, 0, 0, 0, 0, 0}, ++}; ++ ++struct nand_dev_t g_nand_dev; ++ ++struct nand_flash_dev *fmc_get_spl_flash_type(struct mtd_info *mtd, unsigned char *id) ++{ ++ struct nand_chip *chip = mtd_to_nand(mtd); ++ struct nand_flash_special_dev *spl_dev = nand_flash_special_table; ++ struct nand_flash_dev *type = &g_nand_dev.flash_dev; ++ struct nand_dev_t *nand_dev = &g_nand_dev; ++ int ret; ++ ++ fmc_pr(BT_DBG, "\t *-Start find special nand flash\n"); ++ ++ pr_info("Nand ID: %#X %#X %#X %#X %#X %#X %#X %#X\n", id[0], id[1], ++ id[2], id[3], id[4], id[5], id[6], id[7]); ++ ++ for (; spl_dev->length; spl_dev++) { ++ if (!access_ok(id, spl_dev->length)) { ++ pr_info("err: access_ok verify fail\n"); ++ return NULL; ++ } ++ if (memcmp(id, spl_dev->id, spl_dev->length)) ++ continue; ++ ++ fmc_pr(BT_DBG, "\t |-Found special Nand flash: %s\n", ++ spl_dev->name); ++ ++ if (spl_dev->probe) { ++ type = spl_dev->probe(id); ++ } else { ++ type->options = spl_dev->options; ++ type->pagesize = spl_dev->pagesize; ++ type->erasesize = spl_dev->erasesize; ++ type->oobsize = spl_dev->oobsize; ++ } ++ ++ type->name = spl_dev->name; ++ type->id_len = spl_dev->length; ++ ret = memcpy_s(type->id, NAND_MAX_ID_LEN, id, type->id_len); ++ if (ret) { ++ printk("%s,memcpy_s failed\n", __func__); ++ return NULL; ++ } ++ type->chipsize = (unsigned int)(spl_dev->chipsize >> 20); /* 1M unit need shift right 20 bit */ ++ fmc_pr(BT_DBG, "\t |-Save struct nand_flash_dev info\n"); ++ ++ ret = memcpy_s(nand_dev->ids, NAND_MAX_ID_LEN, id, MAX_NAND_ID_LEN); ++ if (ret) { ++ printk("%s,mcmcpy_s failed\n", __func__); ++ return NULL; ++ } ++ ++ nand_dev->oobsize = type->oobsize; ++ nand_dev->flags = spl_dev->flags; ++ nand_dev->read_retry_type = spl_dev->read_retry_type; ++ fmc_pr(BT_DBG, "\t |-Save struct nand_dev_t information\n"); ++ ++ mtd->oobsize = spl_dev->oobsize; ++ mtd->erasesize = spl_dev->erasesize; ++ mtd->writesize = spl_dev->pagesize; ++ mtd->size = spl_dev->chipsize; ++ ++ chip->base.memorg.pagesize = spl_dev->pagesize; ++ chip->base.memorg.pages_per_eraseblock = spl_dev->erasesize / spl_dev->pagesize; ++ chip->base.memorg.eraseblocks_per_lun = spl_dev->chipsize / spl_dev->erasesize; ++ chip->base.memorg.oobsize = spl_dev->oobsize; ++ ++ return type; ++ } ++ nand_dev->read_retry_type = NAND_RR_NONE; ++ ++ chip->legacy.cmdfunc(chip, NAND_CMD_READID, 0x00, -1); ++ chip->legacy.read_byte(chip); ++ ++ fmc_pr(BT_DBG, "\t *-Not found special nand flash\n"); ++ ++ return NULL; ++} ++ ++ ++void fmc_spl_ids_register(void) ++{ ++ pr_info("Special NAND id table Version %s\n", DRV_VERSION); ++ get_spi_nand_flash_type_hook = fmc_get_spl_flash_type; ++} +diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile +index 25120a4af..cee3c85ba 100644 +--- a/drivers/mtd/nand/raw/Makefile ++++ b/drivers/mtd/nand/raw/Makefile +@@ -59,6 +59,11 @@ obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o + obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o + + nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o ++ ++ifdef CONFIG_ARCH_BSP ++nand-objs += nfc_gen.o nfc_spl_ids.o match_table.o ++endif ++ + nand-objs += nand_onfi.o + nand-objs += nand_jedec.o + nand-objs += nand_amd.o +diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h +index e9932da18..d4ba08bd4 100644 +--- a/drivers/mtd/nand/raw/internals.h ++++ b/drivers/mtd/nand/raw/internals.h +@@ -34,6 +34,10 @@ + #define NAND_MFR_TOSHIBA 0x98 + #define NAND_MFR_WINBOND 0xef + ++#ifdef CONFIG_ARCH_BSP ++#define NAND_MFR_ALL_FLASH 0xc1 ++#endif ++ + /** + * struct nand_manufacturer_ops - NAND Manufacturer operations + * @detect: detect the NAND memory organization and capabilities +diff --git a/drivers/mtd/nand/raw/match_table.c b/drivers/mtd/nand/raw/match_table.c +new file mode 100644 +index 000000000..1b8af986a +--- /dev/null ++++ b/drivers/mtd/nand/raw/match_table.c +@@ -0,0 +1,113 @@ ++/* ++ * ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#include ++#include "match_table.h" ++ ++int reg2type(struct match_reg_type *table, int length, int reg, int def) ++{ ++ while (length-- > 0) { ++ if (table->reg == reg) { ++ return table->type; ++ } ++ table++; ++ } ++ return def; ++} ++ ++int type2reg(struct match_reg_type *table, int length, int type, int def) ++{ ++ while (length-- > 0) { ++ if (table->type == type) { ++ return table->reg; ++ } ++ table++; ++ } ++ return def; ++} ++ ++int str2type(struct match_type_str *table, int length, const char *str, ++ int size, int def) ++{ ++ while (length-- > 0) { ++ if (!strncmp(table->str, str, size)) { ++ return table->type; ++ } ++ table++; ++ } ++ return def; ++} ++ ++const char *type2str(struct match_type_str *table, int length, int type, ++ const char *def) ++{ ++ while (length-- > 0) { ++ if (table->type == type) { ++ return table->str; ++ } ++ table++; ++ } ++ return def; ++} ++ ++int match_reg_to_type(struct match_t *table, int nr_table, int reg, int def) ++{ ++ while (nr_table-- > 0) { ++ if (table->reg == reg) { ++ return table->type; ++ } ++ table++; ++ } ++ return def; ++} ++ ++int match_type_to_reg(struct match_t *table, int nr_table, int type, int def) ++{ ++ while (nr_table-- > 0) { ++ if (table->type == type) { ++ return table->reg; ++ } ++ table++; ++ } ++ return def; ++} ++ ++int match_data_to_type(struct match_t *table, int nr_table,const char *data, ++ int size, int def) ++{ ++ while (nr_table-- > 0) { ++ if (!memcmp(table->data, data, size)) { ++ return table->type; ++ } ++ table++; ++ } ++ return def; ++} ++ ++void *match_type_to_data(struct match_t *table, int nr_table, int type, ++ void *def) ++{ ++ while (nr_table-- > 0) { ++ if (table->type == type) { ++ return table->data; ++ } ++ table++; ++ } ++ return def; ++} +diff --git a/drivers/mtd/nand/raw/match_table.h b/drivers/mtd/nand/raw/match_table.h +new file mode 100644 +index 000000000..1ee1d13fe +--- /dev/null ++++ b/drivers/mtd/nand/raw/match_table.h +@@ -0,0 +1,62 @@ ++/* ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#ifndef __MATCH_TABLE_H__ ++#define __MATCH_TABLE_H__ ++ ++struct match_reg_type { ++ int reg; ++ int type; ++}; ++ ++struct match_type_str { ++ int type; ++ const char *str; ++}; ++ ++struct match_t { ++ int type; ++ int reg; ++ void *data; ++}; ++ ++#define MATCH_SET_TYPE_REG(_type, _reg) {(_type), (_reg), (void *)0} ++#define MATCH_SET_TYPE_DATA(_type, _data) {(_type), 0, (void *)(_data)} ++#define MATCH_SET(_type, _reg, _data) {(_type), (_reg), (void *)(_data)} ++ ++int reg2type(struct match_reg_type *table, int length, int reg, int def); ++ ++int type2reg(struct match_reg_type *table, int length, int type, int def); ++ ++int str2type(struct match_type_str *table, int length, const char *str, ++ int size, int def); ++ ++const char *type2str(struct match_type_str *table, int length, int type, ++ const char *def); ++ ++int match_reg_to_type(struct match_t *table, int nr_table, int reg, int def); ++ ++int match_type_to_reg(struct match_t *table, int nr_table, int type, int def); ++ ++int match_data_to_type(struct match_t *table, int nr_table,const char *data, ++ int size, int def); ++ ++void *match_type_to_data(struct match_t *table, int nr_table, int type, ++ void *def); ++ ++#endif /* End of __MATCH_TABLE_H__ */ +diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c +index 7c3e3d70b..aedbe4ab6 100644 +--- a/drivers/mtd/nand/raw/nand_base.c ++++ b/drivers/mtd/nand/raw/nand_base.c +@@ -47,6 +47,10 @@ + + #include "internals.h" + ++#ifdef CONFIG_ARCH_BSP ++#include "nfc_gen.h" ++#endif ++ + static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page, + struct mtd_pairing_info *info) + { +@@ -4431,6 +4435,10 @@ static int nand_do_write_ops(struct nand_chip *chip, loff_t to, + int ret; + int oob_required = oob ? 1 : 0; + ++#ifdef CONFIG_ARCH_BSP ++ oob_required = 1; ++#endif ++ + ops->retlen = 0; + if (!writelen) + return 0; +@@ -5229,6 +5237,10 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) + u8 maf_id, dev_id; + u64 targetsize; + ++#ifdef CONFIG_ARCH_BSP ++ busw = 0; ++#endif ++ + /* + * Let's start by initializing memorg fields that might be left + * unassigned by the ID-based detection logic. +@@ -5282,6 +5294,27 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) + /* Try to identify manufacturer */ + manufacturer_desc = nand_get_manufacturer_desc(maf_id); + chip->manufacturer.desc = manufacturer_desc; ++#ifdef CONFIG_ARCH_BSP ++#ifndef CONFIG_MTD_SPI_NAND_BSP ++ /* Parallel Nand Flash */ ++ ++ /* The 3rd id byte holds MLC / multichip data */ ++ chip->base.memorg.bits_per_cell = nand_get_bits_per_cell(id_data[2]); ++#endif ++ ++ if (get_spi_nand_flash_type_hook) ++ type = get_spi_nand_flash_type_hook(mtd, id_data); ++ ++ if (type) ++ goto ident_done; ++#ifdef CONFIG_MTD_SPI_NAND_BSP ++ else { ++ pr_info("This device[%02x,%02x] cannot found in spi nand id table!!\n", ++ maf_id, dev_id); ++ return -ENODEV; ++ } ++#endif ++#endif /* endif CONFIG_ARCH_BSP */ + + if (!type) + type = nand_flash_ids; +@@ -5347,12 +5380,19 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) + memorg->pages_per_eraseblock); + + ident_done: ++#ifdef CONFIG_ARCH_BSP ++ nfc_nand_param_adjust(mtd, chip); ++ chip->parameters.model = kstrdup(type->name, GFP_KERNEL); ++ if (!chip->parameters.model) ++ return -ENOMEM; ++#endif + if (!mtd->name) + mtd->name = chip->parameters.model; + + if (chip->options & NAND_BUSWIDTH_AUTO) { + WARN_ON(busw & NAND_BUSWIDTH_16); + nand_set_defaults(chip); ++ printk("NAND_BUSWIDTH_AUTO,line:%d",__LINE__); + } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { + /* + * Check, if buswidth is correct. Hardware drivers should set +diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c +index 650351c62..0edd83a97 100644 +--- a/drivers/mtd/nand/raw/nand_ids.c ++++ b/drivers/mtd/nand/raw/nand_ids.c +@@ -195,6 +195,9 @@ static const struct nand_manufacturer_desc nand_manufacturer_descs[] = { + {NAND_MFR_STMICRO, "ST Micro"}, + {NAND_MFR_TOSHIBA, "Toshiba", &toshiba_nand_manuf_ops}, + {NAND_MFR_WINBOND, "Winbond"}, ++#ifdef CONFIG_ARCH_BSP ++ {NAND_MFR_ALL_FLASH, "All-flash"}, ++#endif + }; + + /** +diff --git a/drivers/mtd/nand/raw/nfc_gen.c b/drivers/mtd/nand/raw/nfc_gen.c +new file mode 100644 +index 000000000..82ec22fbc +--- /dev/null ++++ b/drivers/mtd/nand/raw/nfc_gen.c +@@ -0,0 +1,237 @@ ++/* ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#include ++#include "match_table.h" ++#include "nfc_gen.h" ++ ++struct nand_flash_dev *(*get_spi_nand_flash_type_hook)(struct mtd_info *mtd, ++ unsigned char *id) = NULL; ++ ++static struct match_t match_ecc[] = { ++ MATCH_SET_TYPE_DATA(NAND_ECC_NONE_0, "none"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_0BIT, "none"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_1BIT_512, "1bit/512"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_4BIT, "4bit/512"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_4BIT_512, "4bit/512"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_4BYTE, "4byte/1k"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_8BIT, "4bit/512"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_8BIT_512, "8bit/512"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_8BYTE, "8byte/1k"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_13BIT, "13bit/1k"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_16BIT, "8bit/512"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_18BIT, "18bit/1k"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_24BIT, "24bit/1k"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_27BIT, "27bit/1k"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_32BIT, "32bit/1k"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_40BIT, "40bit/1k"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_41BIT, "41bit/1k"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_48BIT, "48bit/1k"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_60BIT, "60bit/1k"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_72BIT, "72bit/1k"), ++ MATCH_SET_TYPE_DATA(NAND_ECC_80BIT, "80bit/1k"), ++}; ++ ++const char *nand_ecc_name(int type) ++{ ++ return (char *)match_type_to_data(match_ecc, ARRAY_SIZE(match_ecc), ++ type, "unknown"); ++} ++ ++char *get_ecctype_str(enum ecc_type ecctype) ++{ ++ static char *ecctype_string[] = { ++ "None", "1bit/512Byte", "4bits/512Byte", "8bits/512Byte", ++ "24bits/1K", "40bits/1K", "unknown", "unknown" ++ }; ++ return ecctype_string[(ecctype & 0x07)]; ++} ++ ++static struct match_type_str page2name[] = { ++ { NAND_PAGE_512B, "512" }, ++ { NAND_PAGE_2K, "2K" }, ++ { NAND_PAGE_4K, "4K" }, ++ { NAND_PAGE_8K, "8K" }, ++ { NAND_PAGE_16K, "16K" }, ++ { NAND_PAGE_32K, "32K" }, ++}; ++ ++const char *nand_page_name(int type) ++{ ++ return type2str(page2name, ARRAY_SIZE(page2name), type, "unknown"); ++} ++ ++char *get_pagesize_str(enum page_type pagetype) ++{ ++ static char *pagesize_str[] = { ++ "512", "2K", "4K", "8K", "16K", "unknown", ++ "unknown", "unknown" ++ }; ++ return pagesize_str[(pagetype & 0x07)]; ++} ++ ++static struct match_reg_type page2size[] = { ++ { _512B, NAND_PAGE_512B }, ++ { _2K, NAND_PAGE_2K }, ++ { _4K, NAND_PAGE_4K }, ++ { _8K, NAND_PAGE_8K }, ++ { _16K, NAND_PAGE_16K }, ++ { _32K, NAND_PAGE_32K }, ++}; ++ ++unsigned int get_pagesize(enum page_type pagetype) ++{ ++ unsigned int pagesize[] = { ++ _512B, _2K, _4K, _8K, _16K, 0, 0, 0 ++ }; ++ return pagesize[(pagetype & 0x07)]; ++} ++ ++int nandpage_size2type(int size) ++{ ++ return reg2type(page2size, ARRAY_SIZE(page2size), size, NAND_PAGE_2K); ++} ++ ++int nandpage_type2size(int size) ++{ ++ return type2reg(page2size, ARRAY_SIZE(page2size), size, NAND_PAGE_2K); ++} ++ ++char *nand_dbgfs_options; ++ ++static int __init dbgfs_options_setup(char *s) ++{ ++ nand_dbgfs_options = s; ++ return 1; ++} ++__setup("nanddbgfs=", dbgfs_options_setup); ++ ++int get_bits(unsigned int n) ++{ ++ int loop; ++ int ret = 0; ++ ++ if (!n) ++ return 0; ++ ++ if (n > 0xFFFF) ++ loop = n > 0xFFFFFF ? 32 : 24; ++ else ++ loop = n > 0xFF ? 16 : 8; ++ ++ while (loop-- > 0 && n) { ++ if (n & 1) ++ ret++; ++ n >>= 1; ++ } ++ return ret; ++} ++ ++#define et_ecc_none 0x00 ++#define et_ecc_4bit 0x02 ++#define et_ecc_8bit 0x03 ++#define et_ecc_24bit1k 0x04 ++#define et_ecc_40bit1k 0x05 ++#define et_ecc_64bit1k 0x06 ++ ++static struct match_reg_type ecc_yaffs_type_t[] = { ++ {et_ecc_none, NAND_ECC_0BIT}, ++ {et_ecc_4bit, NAND_ECC_8BIT}, ++ {et_ecc_8bit, NAND_ECC_16BIT}, ++ {et_ecc_24bit1k, NAND_ECC_24BIT}, ++ {et_ecc_40bit1k, NAND_ECC_40BIT}, ++ {et_ecc_64bit1k, NAND_ECC_64BIT} ++}; ++ ++unsigned char match_ecc_type_to_yaffs(unsigned char type) ++{ ++ return type2reg(ecc_yaffs_type_t, ARRAY_SIZE(ecc_yaffs_type_t), type, ++ et_ecc_4bit); ++} ++ ++static struct match_t page_table[] = { ++ {NAND_PAGE_2K, PAGE_SIZE_2KB, "2K"}, ++ {NAND_PAGE_4K, PAGE_SIZE_4KB, "4K"}, ++ {NAND_PAGE_8K, PAGE_SIZE_8KB, "8K"}, ++ {NAND_PAGE_16K, PAGE_SIZE_16KB, "16K"}, ++}; ++ ++unsigned char match_page_reg_to_type(unsigned char reg) ++{ ++ return match_reg_to_type(page_table, ARRAY_SIZE(page_table), reg, ++ NAND_PAGE_2K); ++} ++ ++unsigned char match_page_type_to_reg(unsigned char type) ++{ ++ return match_type_to_reg(page_table, ARRAY_SIZE(page_table), type, ++ PAGE_SIZE_2KB); ++} ++ ++const char *match_page_type_to_str(unsigned char type) ++{ ++ return match_type_to_data(page_table, ARRAY_SIZE(page_table), type, ++ "unknown"); ++} ++ ++static struct match_t ecc_table[] = { ++ {NAND_ECC_0BIT, ECC_TYPE_0BIT, "none"}, ++ {NAND_ECC_8BIT, ECC_TYPE_8BIT, "4bit/512"}, ++ {NAND_ECC_16BIT, ECC_TYPE_16BIT, "8bit/512"}, ++ {NAND_ECC_24BIT, ECC_TYPE_24BIT, "24bit/1K"}, ++ {NAND_ECC_28BIT, ECC_TYPE_28BIT, "28bit/1K"}, ++ {NAND_ECC_40BIT, ECC_TYPE_40BIT, "40bit/1K"}, ++ {NAND_ECC_64BIT, ECC_TYPE_64BIT, "64bit/1K"}, ++}; ++ ++unsigned char match_ecc_reg_to_type(unsigned char reg) ++{ ++ return match_reg_to_type(ecc_table, ARRAY_SIZE(ecc_table), reg, ++ NAND_ECC_8BIT); ++} ++ ++unsigned char match_ecc_type_to_reg(unsigned char type) ++{ ++ return match_type_to_reg(ecc_table, ARRAY_SIZE(ecc_table), type, ++ ECC_TYPE_8BIT); ++} ++ ++const char *match_ecc_type_to_str(unsigned char type) ++{ ++ return match_type_to_data(ecc_table, ARRAY_SIZE(ecc_table), type, ++ "unknown"); ++} ++ ++static struct match_t page_type_size_table[] = { ++ {NAND_PAGE_2K, _2K, NULL}, ++ {NAND_PAGE_4K, _4K, NULL}, ++ {NAND_PAGE_8K, _8K, NULL}, ++ {NAND_PAGE_16K, _16K, NULL}, ++}; ++ ++unsigned char match_page_size_to_type(unsigned int size) ++{ ++ return match_reg_to_type(page_type_size_table, ++ ARRAY_SIZE(page_type_size_table), size, NAND_PAGE_2K); ++} ++ ++unsigned int match_page_type_to_size(unsigned char type) ++{ ++ return match_type_to_reg(page_type_size_table, ++ ARRAY_SIZE(page_type_size_table), type, _2K); ++} +\ No newline at end of file +diff --git a/drivers/mtd/nand/raw/nfc_gen.h b/drivers/mtd/nand/raw/nfc_gen.h +new file mode 100644 +index 000000000..650d2e515 +--- /dev/null ++++ b/drivers/mtd/nand/raw/nfc_gen.h +@@ -0,0 +1,256 @@ ++/* ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#ifndef __NFC_GEN_H__ ++#define __NFC_GEN_H__ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#define NFC_VER_300 (0x300) ++#define NFC_VER_301 (0x301) ++#define NFC_VER_310 (0x310) ++#define NFC_VER_504 (0x504) ++#define NFC_VER_505 (0x505) ++#define NFC_VER_600 (0x600) ++#define NFC_VER_610 (0x610) ++#define NFC_VER_620 (0x620) ++ ++#define SNFC_VER_100 (0x400) ++ ++#define NAND_PAGE_512B 0 ++#define NAND_PAGE_1K 1 ++#define NAND_PAGE_2K 2 ++#define NAND_PAGE_4K 3 ++#define NAND_PAGE_8K 4 ++#define NAND_PAGE_16K 5 ++#define NAND_PAGE_32K 6 ++ ++#define NAND_ECC_NONE_0 0 ++#define NAND_ECC_0BIT 0 ++#define NAND_ECC_1BIT 1 ++#define NAND_ECC_1BIT_512 1 ++#define NAND_ECC_4BIT 2 ++#define NAND_ECC_4BIT_512 2 ++#define NAND_ECC_4BYTE 2 ++#define NAND_ECC_8BIT 2 ++#define NAND_ECC_8BIT_512 3 ++#define NAND_ECC_8BYTE 3 ++#define NAND_ECC_13BIT 4 ++#define NAND_ECC_16BIT 5 ++#define NAND_ECC_18BIT 6 ++#define NAND_ECC_24BIT 7 ++#define NAND_ECC_27BIT 8 ++#define NAND_ECC_28BIT 9 ++#define NAND_ECC_32BIT 10 ++#define NAND_ECC_40BIT 11 ++#define NAND_ECC_41BIT 12 ++#define NAND_ECC_42BIT 13 ++#define NAND_ECC_48BIT 14 ++#define NAND_ECC_60BIT 15 ++#define NAND_ECC_64BIT 16 ++#define NAND_ECC_72BIT 17 ++#define NAND_ECC_80BIT 18 ++ ++enum ecc_type { ++ et_ecc_none = 0x00, ++ et_ecc_1bit = 0x01, ++ et_ecc_4bit = 0x02, ++ et_ecc_8bit = 0x03, ++ et_ecc_24bit1k = 0x04, ++ et_ecc_40bit1k = 0x05, ++ et_ecc_64bit1k = 0x06, ++}; ++ ++enum page_type { ++ pt_pagesize_512 = 0x00, ++ pt_pagesize_2K = 0x01, ++ pt_pagesize_4K = 0x02, ++ pt_pagesize_8K = 0x03, ++ pt_pagesize_16K = 0x04, ++}; ++ ++struct nand_config_info { ++ unsigned int pagetype; ++ unsigned int ecctype; ++ unsigned int ecc_strength; ++ unsigned int oobsize; ++ struct mtd_ooblayout_ops *ooblayout_ops; ++}; ++ ++struct nfc_host; ++ ++struct nand_sync { ++#define SET_NAND_SYNC_TYPE(_mfr, _onfi, _version) \ ++ ((((_mfr) & 0xFF) << 16) | (((_version) & 0xFF) << 8) \ ++ | ((_onfi) & 0xFF)) ++ ++#define GET_NAND_SYNC_TYPE_MFR(_type) (((_type) >> 16) & 0xFF) ++#define GET_NAND_SYNC_TYPE_VER(_type) (((_type) >> 8) & 0xFF) ++#define GET_NAND_SYNC_TYPE_INF(_type) ((_type) & 0xFF) ++ ++#define NAND_TYPE_TOGGLE_10 SET_NAND_SYNC_TYPE(0, 0, 0x10) ++#define NAND_TYPE_ONFI_30 SET_NAND_SYNC_TYPE(0, NAND_IS_ONFI, 0x30) ++#define NAND_TYPE_ONFI_23 SET_NAND_SYNC_TYPE(0, NAND_IS_ONFI, 0x23) ++ ++ int type; ++ int (*enable)(struct nand_chip *chip); ++ int (*disable)(struct nand_chip *chip); ++}; ++ ++struct read_retry_t { ++ int type; ++ int count; ++ int (*set_rr_param)(struct nfc_host *host, int param); ++ int (*get_rr_param)(struct nfc_host *host); ++ int (*reset_rr_param)(struct nfc_host *host); ++}; ++ ++struct ecc_info_t { ++ int pagesize; ++ int ecctype; ++ int threshold; ++ int section; ++ void (*dump)(struct nfc_host *host, unsigned char ecc[], ++ int *max_bitsflag); ++}; ++ ++struct nand_dev_t { ++ struct nand_flash_dev flash_dev; ++ ++ char *start_type; ++ unsigned char ids[NAND_MAX_ID_LEN]; ++ int oobsize; ++ int ecctype; ++ ++ /* (Controller) support ecc/page detect, driver don't need detect */ ++#define NANDC_HW_AUTO 0x01 ++ /* (Controller) support ecc/page detect, ++ * and current ecc/page config finish */ ++#define NANDC_CONFIG_DONE 0x02 ++ /* (Controller) is sync, default is async */ ++#define NANDC_IS_SYNC_BOOT 0x04 ++ ++/* (NAND) need randomizer */ ++#define NAND_RANDOMIZER 0x10 ++/* (NAND) is ONFI interface, combine with sync/async symble */ ++#define NAND_IS_ONFI 0x20 ++ ++#define NAND_MODE_SYNC_ASYNC 0x40 ++ ++#define NAND_MODE_ONLY_SYNC 0x80 ++ ++ unsigned int flags; ++ ++#define NAND_RR_NONE 0x00 ++ int read_retry_type; ++}; ++ ++ ++#define IS_NANDC_HW_AUTO(_host) ((_host)->flags & NANDC_HW_AUTO) ++#define IS_NANDC_CONFIG_DONE(_host) ((_host)->flags & NANDC_CONFIG_DONE) ++#define IS_NANDC_SYNC_BOOT(_host) ((_host)->flags & NANDC_IS_SYNC_BOOT) ++ ++#define IS_NAND_RANDOM(_dev) ((_dev)->flags & NAND_RANDOMIZER) ++#define IS_NAND_ONLY_SYNC(_dev) ((_dev)->flags & NAND_MODE_ONLY_SYNC) ++#define IS_NAND_SYNC_ASYNC(_dev) ((_dev)->flags & NAND_MODE_SYNC_ASYNC) ++#define IS_NAND_ONFI(_dev) ((_dev)->flags & NAND_IS_ONFI) ++ ++#define ERSTR_HARDWARE "Hardware configuration error. " ++#define ERSTR_DRIVER "Driver does not support. " ++ ++#define ENABLE 1 ++#define DISABLE 0 ++ ++char *get_ecctype_str(enum ecc_type ecctype); ++ ++char *get_pagesize_str(enum page_type pagetype); ++ ++unsigned int get_pagesize(enum page_type pagetype); ++ ++const char *nand_ecc_name(int type); ++ ++const char *nand_page_name(int type); ++ ++int nandpage_size2type(int size); ++ ++int nandpage_type2size(int size); ++ ++extern int (*nfc_param_adjust)(struct mtd_info *mtd, struct nand_chip *chip, ++ struct nand_dev_t *nand_dev); ++ ++extern struct nand_flash_dev *(*nand_get_flash_type_func)(struct mtd_info *mtd, ++ struct nand_chip *chip, ++ struct nand_dev_t *spinand_dev_t); ++ ++extern struct nand_flash_dev *(*get_spi_nand_flash_type_hook) ++(struct mtd_info *mtd, unsigned char *id); ++ ++extern int (*nfc_param_adjust)(struct mtd_info *, ++ struct nand_chip *, struct nand_dev_t *); ++ ++struct nand_flash_dev *nfc_get_flash_type(struct mtd_info *mtd, ++ struct nand_chip *chip, ++ u8 *id_data, int *busw); ++ ++extern struct nand_flash_dev *(*get_spi_nand_flash_type_hook) ++(struct mtd_info *mtd, unsigned char *id); ++ ++void nfc_nand_param_adjust(struct mtd_info *mtd, struct nand_chip *chip); ++ ++void nfc_show_info(struct mtd_info *mtd, const char *vendor, char *chipname); ++ ++void nfc_show_chipsize(struct nand_chip *chip); ++ ++int get_bits(unsigned int n); ++ ++#define nfc_pr_msg(_fmt, arg...) printk(_fmt, ##arg) ++ ++#define nfc_pr_bug(fmt, args...) do { \ ++ printk("%s(%d): bug " fmt, __FILE__, __LINE__, ##args); \ ++ while (1) \ ++ ; \ ++} while (0) ++ ++#define PR_MSG(_fmt, arg...) \ ++ printk(_fmt, ##arg) ++ ++extern char *nand_dbgfs_options; ++ ++extern unsigned char match_page_reg_to_type(unsigned char reg); ++ ++extern unsigned char match_page_type_to_reg(unsigned char type); ++ ++extern const char *match_page_type_to_str(unsigned char type); ++ ++extern unsigned char match_ecc_reg_to_type(unsigned char reg); ++ ++extern unsigned char match_ecc_type_to_reg(unsigned char type); ++ ++extern const char *match_ecc_type_to_str(unsigned char type); ++ ++extern unsigned char match_page_size_to_type(unsigned int size); ++ ++extern unsigned int match_page_type_to_size(unsigned char type); ++ ++const char *nand_ecc_name(int type); ++ ++#endif /* End of __NFC_GEN_H__ */ +\ No newline at end of file +diff --git a/drivers/mtd/nand/raw/nfc_spl_ids.c b/drivers/mtd/nand/raw/nfc_spl_ids.c +new file mode 100644 +index 000000000..7cccc1451 +--- /dev/null ++++ b/drivers/mtd/nand/raw/nfc_spl_ids.c +@@ -0,0 +1,169 @@ ++/* ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#include ++#include ++#include ++#include "nfc_gen.h" ++ ++struct nand_flash_special_dev { ++ unsigned char id[8]; ++ int length; /* length of id. */ ++ unsigned long long chipsize; ++ struct nand_flash_dev *(*probe)(struct nand_dev_t *nand_dev); ++ char *name; ++ ++ unsigned long pagesize; ++ unsigned long erasesize; ++ unsigned long oobsize; ++ unsigned long options; ++ unsigned int read_retry_type; ++ ++#define BBP_LAST_PAGE 0x01 ++#define BBP_FIRST_PAGE 0x02 ++ unsigned int badblock_pos; ++ unsigned int flags; ++}; ++ ++/* this is nand probe function. */ ++ ++#define DRV_VERSION "1.38" ++ ++static struct nand_flash_special_dev nand_flash_special_dev[] = { ++ {{0}, 0, 0, 0, 0, 0, 0, 0, 0}, ++}; ++ ++#define NUM_OF_SPECIAL_DEVICE \ ++ (sizeof(nand_flash_special_dev) / sizeof(struct nand_flash_special_dev)) ++ ++int (*nfc_param_adjust)(struct mtd_info *, struct nand_chip *, ++ struct nand_dev_t *) = NULL; ++ ++static struct nand_dev_t __nand_dev; ++ ++ ++static struct nand_flash_dev *nfc_nand_probe(struct mtd_info *mtd, ++ struct nand_chip *chip, ++ struct nand_dev_t *nand_dev) ++{ ++ struct nand_flash_special_dev *spl_dev = NULL; ++ unsigned char *byte = nand_dev->ids; ++ struct nand_flash_dev *type = &nand_dev->flash_dev; ++ ++ nfc_pr_msg("Nand ID: 0x%02X 0x%02X 0x%02X 0x%02X", ++ byte[0], byte[1], byte[2], byte[3]); ++ nfc_pr_msg(" 0x%02X 0x%02X 0x%02X 0x%02X\n", ++ byte[4], byte[5], byte[6], byte[7]); ++ ++ for (spl_dev = nand_flash_special_dev; spl_dev->length; spl_dev++) { ++ if (memcmp(byte, spl_dev->id, spl_dev->length)) ++ continue; ++ ++ nfc_pr_msg("The Special NAND id table Version: %s\n", DRV_VERSION); ++ ++ if (spl_dev->probe) { ++ type = spl_dev->probe(nand_dev); ++ } else { ++ type->options = spl_dev->options; ++ type->pagesize = spl_dev->pagesize; ++ type->erasesize = spl_dev->erasesize; ++ nand_dev->oobsize = spl_dev->oobsize; ++ } ++ ++ nand_dev->read_retry_type = spl_dev->read_retry_type; ++ nand_dev->flags = spl_dev->flags; ++ ++ type->id[1] = byte[1]; ++ type->chipsize = (unsigned long)(spl_dev->chipsize >> 20); ++ type->name = spl_dev->name; ++ return type; ++ } ++ nand_dev->read_retry_type = NAND_RR_NONE; ++ ++ return NULL; ++} ++ ++ ++struct nand_flash_dev *nfc_get_flash_type(struct mtd_info *mtd, ++ struct nand_chip *chip, ++ u8 *id_data, int *busw) ++{ ++ struct nand_flash_dev *type = NULL; ++ struct nand_dev_t *nand_dev = &__nand_dev; ++ int ret; ++ ++ (void)memset_s(nand_dev, sizeof(struct nand_dev_t), 0, ++ sizeof(struct nand_dev_t)); ++ ret = memcpy_s(nand_dev->ids, 8, id_data, 8); ++ if (ret) { ++ printk("%s:memcpy_s failed!\n", __func__); ++ return NULL; ++ } ++ ++ if (!nfc_nand_probe(mtd, chip, nand_dev)) ++ return NULL; ++ ++ type = &nand_dev->flash_dev; ++ ++ if (!mtd->name) ++ mtd->name = type->name; ++ ++ mtd->erasesize = type->erasesize; ++ mtd->writesize = type->pagesize; ++ mtd->oobsize = nand_dev->oobsize; ++ *busw = (type->options & NAND_BUSWIDTH_16); ++ ++ return type; ++} ++ ++ ++void nfc_nand_param_adjust(struct mtd_info *mtd, struct nand_chip *chip) ++{ ++ struct nand_dev_t *nand_dev = &__nand_dev; ++ ++ if (!nand_dev->oobsize) ++ nand_dev->oobsize = mtd->oobsize; ++ ++ if (nfc_param_adjust) ++ nfc_param_adjust(mtd, chip, nand_dev); ++} ++ ++ ++void nfc_show_info(struct mtd_info *mtd, const char *vendor, char *chipname) ++{ ++ struct nand_dev_t *nand_dev = &__nand_dev; ++ ++ if (IS_NAND_RANDOM(nand_dev)) ++ nfc_pr_msg("Randomizer \n"); ++ ++ if (nand_dev->read_retry_type != NAND_RR_NONE) ++ nfc_pr_msg("Read-Retry \n"); ++ ++ if (nand_dev->start_type) ++ nfc_pr_msg("Nand(%s): ", nand_dev->start_type); ++ else ++ nfc_pr_msg("Nand: "); ++ ++ nfc_pr_msg("OOB:%dB ", nand_dev->oobsize); ++ nfc_pr_msg("ECC:%s ", nand_ecc_name(nand_dev->ecctype)); ++} ++ ++ ++void nfc_show_chipsize(struct nand_chip *chip) ++{ ++} +diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile +index e347b435a..655b09247 100644 +--- a/drivers/mtd/spi-nor/Makefile ++++ b/drivers/mtd/spi-nor/Makefile +@@ -18,6 +18,9 @@ spi-nor-objs += winbond.o + spi-nor-objs += xilinx.o + spi-nor-objs += xmc.o + spi-nor-$(CONFIG_DEBUG_FS) += debugfs.o ++ifdef CONFIG_ARCH_BSP ++obj-$(CONFIG_MTD_SPI_NOR) += bsp-generic.o ++endif + obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o + + obj-$(CONFIG_MTD_SPI_NOR) += controllers/ +diff --git a/drivers/mtd/spi-nor/bsp-generic.c b/drivers/mtd/spi-nor/bsp-generic.c +new file mode 100644 +index 000000000..52f4406f5 +--- /dev/null ++++ b/drivers/mtd/spi-nor/bsp-generic.c +@@ -0,0 +1,25 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++ ++#include ++ ++#include "core.h" ++ ++static const struct flash_info general_parts[] = { ++}; ++ ++static void spinor_default_init(struct spi_nor *nor) ++{ ++} ++ ++static const struct spi_nor_fixups general_fixups = { ++ .default_init = spinor_default_init, ++}; ++ ++const struct spi_nor_manufacturer spi_nor_general = { ++ .name = "general", ++ .parts = general_parts, ++ .nparts = ARRAY_SIZE(general_parts), ++ .fixups = &general_fixups, ++}; +diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig +index ca45dcd3f..210069f29 100644 +--- a/drivers/mtd/spi-nor/controllers/Kconfig ++++ b/drivers/mtd/spi-nor/controllers/Kconfig +@@ -6,6 +6,16 @@ config SPI_HISI_SFC + help + This enables support for HiSilicon FMC SPI NOR flash controller. + ++if ARCH_BSP ++config SPI_BSP_SFC ++ tristate "Vendor FMCV100 SPI-NOR Flash Controller(SFC)" ++ depends on ARCH_BSP || COMPILE_TEST ++ depends on HAS_IOMEM && HAS_DMA ++ help ++ This enables support for vendor flash memory contrller ver100 ++ (FMCV100)- SPI-NOR flash controller. ++endif ++ + config SPI_NXP_SPIFI + tristate "NXP SPI Flash Interface (SPIFI)" + depends on OF && (ARCH_LPC18XX || COMPILE_TEST) +@@ -16,3 +26,34 @@ config SPI_NXP_SPIFI + SPIFI is a specialized controller for connecting serial SPI + Flash. Enable this option if you have a device with a SPIFI + controller and want to access the Flash as a mtd device. ++ ++if ARCH_BSP ++config MTD_SPI_IDS ++ bool "SPI Flash Timing Cycles Probe Function" ++ default n ++ help ++ This option enables sfc300/sfc350 used spi flash timing cylces ++ probe function. ++ If your use sfc300 and sfc350, this function should be select. ++ ++config CLOSE_SPI_8PIN_4IO ++ bool "Close SPI device Quad SPI mode for some 8PIN chip" ++ default y if ARCH_BSP ++ help ++ fmcv100 and sfcv350 support Quad SPI mode and Quad&addr SPI mode. ++ But some 8PIN chip does not support this mode when HOLD/IO3 PIN ++ was used by reset operation. ++ Usually, your should not config this option. ++ ++config BSP_SPI_BLOCK_PROTECT ++ bool "Vendor Spi Nor Device BP(Block Protect) Support" ++ depends on SPI_BSP_SFC ++ default y if SPI_BSP_SFC ++ help ++ SFC supports BP(Block Protect) feature to preestablish a series ++ area to avoid writing and erasing, except to reading. With this macro ++ definition we can get the BP info which was setted before. The ++ BOTTOM/TOP bit is setted to BOTTOM, it means the lock area starts ++ from 0 address. ++endif ++ +diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile +index 0b8e1d530..45d1f3f1b 100644 +--- a/drivers/mtd/spi-nor/controllers/Makefile ++++ b/drivers/mtd/spi-nor/controllers/Makefile +@@ -1,3 +1,4 @@ + # SPDX-License-Identifier: GPL-2.0 + obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o + obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o ++obj-$(CONFIG_SPI_BSP_SFC) += bsp-sfc.o +diff --git a/drivers/mtd/spi-nor/controllers/bsp-sfc.c b/drivers/mtd/spi-nor/controllers/bsp-sfc.c +new file mode 100644 +index 000000000..dfd7b3511 +--- /dev/null ++++ b/drivers/mtd/spi-nor/controllers/bsp-sfc.c +@@ -0,0 +1,793 @@ ++/* ++ * SPI Nor Flash Controller Driver ++ * ++ * Copyright (c) 2015-2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++*/ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef CONFIG_ARCH_BSP ++#include ++#include ++#include "../../mtdcore.h" ++#endif /* CONFIG_ARCH_BSP */ ++ ++#ifndef CONFIG_ARCH_BSP ++/* Hardware register offsets and field definitions */ ++#define FMC_CFG 0x00 ++#define FMC_CFG_OP_MODE_MASK BIT_MASK(0) ++#define FMC_CFG_OP_MODE_BOOT 0 ++#define FMC_CFG_OP_MODE_NORMAL 1 ++#define fmc_cfg_ecc_type(type) (((type) & 0x3) << 1) ++#define FMC_CFG_FLASH_SEL_MASK 0x6 ++#define mfc_ecc_type(type) (((type) & 0x7) << 5) ++#define FMC_ECC_TYPE_MASK GENMASK(7, 5) ++#define SPI_NOR_ADDR_MODE_MASK BIT_MASK(10) ++#define SPI_NOR_ADDR_MODE_3BYTES (0x0 << 10) ++#define SPI_NOR_ADDR_MODE_4BYTES (0x1 << 10) ++#define FMC_GLOBAL_CFG 0x04 ++#define FMC_GLOBAL_CFG_WP_ENABLE BIT(6) ++#define FMC_SPI_TIMING_CFG 0x08 ++#define timing_cfg_tcsh(nr) (((nr) & 0xf) << 8) ++#define timing_cfg_tcss(nr) (((nr) & 0xf) << 4) ++#define timing_cfg_tshsl(nr) ((nr) & 0xf) ++#define CS_HOLD_TIME 0x6 ++#define CS_SETUP_TIME 0x6 ++#define CS_DESELECT_TIME 0xf ++#define FMC_INT 0x18 ++#define FMC_INT_OP_DONE BIT(0) ++#define FMC_INT_CLR 0x20 ++#define FMC_CMD 0x24 ++#define fmc_cmd_cmd1(cmd) ((cmd) & 0xff) ++#define FMC_ADDRL 0x2c ++#define FMC_OP_CFG 0x30 ++#define op_cfg_fm_cs(cs) ((cs) << 11) ++#define op_cfg_mem_if_type(type) (((type) & 0x7) << 7) ++#define op_cfg_addr_num(addr) (((addr) & 0x7) << 4) ++#define op_cfg_dummy_num(dummy) ((dummy) & 0xf) ++#define FMC_DATA_NUM 0x38 ++#define fmc_data_num_cnt(cnt) ((cnt) & GENMASK(13, 0)) ++#define FMC_OP 0x3c ++#define FMC_OP_DUMMY_EN BIT(8) ++#define FMC_OP_CMD1_EN BIT(7) ++#define FMC_OP_ADDR_EN BIT(6) ++#define FMC_OP_WRITE_DATA_EN BIT(5) ++#define FMC_OP_READ_DATA_EN BIT(2) ++#define FMC_OP_READ_STATUS_EN BIT(1) ++#define FMC_OP_REG_OP_START BIT(0) ++#define FMC_DMA_LEN 0x40 ++#define fmc_dma_len_set(len) ((len) & GENMASK(27, 0)) ++#define FMC_DMA_SADDR_D0 0x4c ++#define FMC_DMA_MAX_LEN (4096) ++#define FMC_DMA_MASK (FMC_DMA_MAX_LEN - 1) ++#define FMC_OP_DMA 0x68 ++#define op_ctrl_rd_opcode(code) (((code) & 0xff) << 16) ++#define op_ctrl_wr_opcode(code) (((code) & 0xff) << 8) ++#define op_ctrl_rw_op(op) ((op) << 1) ++#define OP_CTRL_DMA_OP_READY BIT(0) ++#define FMC_OP_READ 0x0 ++#define FMC_OP_WRITE 0x1 ++#define FMC_WAIT_TIMEOUT 1000000 ++ ++enum fmc_iftype { ++ IF_TYPE_STD, ++ IF_TYPE_DUAL, ++ IF_TYPE_DIO, ++ IF_TYPE_QUAD, ++ IF_TYPE_QIO, ++}; ++#endif /* CONFIG_ARCH_BSP */ ++ ++struct fmc_priv { ++ u32 chipselect; ++ u32 clkrate; ++ struct fmc_host *host; ++}; ++ ++#ifndef CONFIG_ARCH_BSP ++#define FMC_MAX_CHIP_NUM 2 ++#endif /* CONFIG_ARCH_BSP */ ++struct fmc_host { ++ struct device *dev; ++#ifdef CONFIG_ARCH_BSP ++ struct mutex *lock; ++#else ++ struct mutex lock; ++#endif /* CONFIG_ARCH_BSP */ ++ ++ void __iomem *regbase; ++ void __iomem *iobase; ++ struct clk *clk; ++ void *buffer; ++ dma_addr_t dma_buffer; ++ ++ struct spi_nor *nor[FMC_MAX_CHIP_NUM]; ++ u32 num_chip; ++#ifdef CONFIG_ARCH_BSP ++ struct fmc_priv priv[FMC_MAX_CHIP_NUM]; ++ unsigned int dma_len; ++#endif /* CONFIG_ARCH_BSP */ ++}; ++ ++static inline int bsp_spi_nor_wait_op_finish(const struct fmc_host *host) ++{ ++ u32 reg; ++ ++ if (!host) ++ return -1; ++ ++ return readl_poll_timeout(host->regbase + FMC_INT, reg, ++ (reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT); ++} ++ ++static u8 bsp_spi_nor_get_if_type(enum spi_nor_protocol proto) ++{ ++ enum fmc_iftype if_type; ++ ++ switch (proto) { ++ case SNOR_PROTO_1_1_2: ++ if_type = IF_TYPE_DUAL; ++ break; ++ case SNOR_PROTO_1_2_2: ++ if_type = IF_TYPE_DIO; ++ break; ++ case SNOR_PROTO_1_1_4: ++ if_type = IF_TYPE_QUAD; ++ break; ++ case SNOR_PROTO_1_4_4: ++ if_type = IF_TYPE_QIO; ++ break; ++ case SNOR_PROTO_1_1_1: ++ default: ++ if_type = IF_TYPE_STD; ++ break; ++ } ++ ++ return (u8)if_type; ++} ++ ++#ifdef CONFIG_ARCH_BSP ++static void spi_nor_switch_spi_type(struct fmc_host *host) ++{ ++ unsigned int reg; ++ ++ reg = readl(host->regbase + FMC_CFG); ++ reg &= ~FLASH_TYPE_SEL_MASK; ++ reg |= fmc_cfg_ecc_type(0); ++ writel(reg, host->regbase + FMC_CFG); ++} ++#endif /* CONFIG_ARCH_BSP */ ++ ++static void bsp_spi_nor_init(struct fmc_host *host) ++{ ++ u32 reg; ++ ++#ifdef CONFIG_ARCH_BSP ++ /* switch the flash type to spi nor */ ++ spi_nor_switch_spi_type(host); ++ ++ /* set the boot mode to normal */ ++ reg = readl(host->regbase + FMC_CFG); ++ if ((reg & FMC_CFG_OP_MODE_MASK) == FMC_CFG_OP_MODE_BOOT) { ++ reg |= fmc_cfg_op_mode(FMC_CFG_OP_MODE_NORMAL); ++ writel(reg, host->regbase + FMC_CFG); ++ } ++ ++ /* hold on STR mode */ ++ reg = readl(host->regbase + FMC_GLOBAL_CFG); ++ reg &= (~FMC_GLOBAL_CFG_DTR_MODE); ++ writel(reg, host->regbase + FMC_GLOBAL_CFG); ++#endif /* CONFIG_ARCH_BSP */ ++ ++ /* set timming */ ++ reg = timing_cfg_tcsh(CS_HOLD_TIME) | ++ timing_cfg_tcss(CS_SETUP_TIME) | ++ timing_cfg_tshsl(CS_DESELECT_TIME); ++ writel(reg, host->regbase + FMC_SPI_TIMING_CFG); ++} ++ ++static int bsp_spi_nor_prep(struct spi_nor *nor) ++{ ++ struct fmc_priv *priv = nor->priv; ++ struct fmc_host *host = priv->host; ++ int ret; ++#ifdef CONFIG_ARCH_BSP ++ u32 clkrate; ++ ++ mutex_lock(&fmc_switch_mutex); ++ mutex_lock(host->lock); ++ ++ clkrate = min_t(u32, priv->clkrate, nor->clkrate); ++ ret = clk_set_rate(host->clk, clkrate); ++#else ++ mutex_lock(&host->lock); ++ ++ ret = clk_set_rate(host->clk, priv->clkrate); ++#endif /* CONFIG_ARCH_BSP */ ++ if (ret) ++ goto out; ++ ++ ret = clk_prepare_enable(host->clk); ++ if (ret) ++ goto out; ++ ++#ifdef CONFIG_ARCH_BSP ++ spi_nor_switch_spi_type(host); ++#endif /* CONFIG_ARCH_BSP */ ++ ++ return 0; ++ ++out: ++#ifdef CONFIG_ARCH_BSP ++ mutex_unlock(host->lock); ++#else ++ mutex_unlock(&host->lock); ++#endif /* CONFIG_ARCH_BSP */ ++ return ret; ++} ++ ++static void bsp_spi_nor_unprep(struct spi_nor *nor) ++{ ++ struct fmc_priv *priv = nor->priv; ++ struct fmc_host *host = priv->host; ++ ++ clk_disable_unprepare(host->clk); ++#ifdef CONFIG_ARCH_BSP ++ mutex_unlock(host->lock); ++ mutex_unlock(&fmc_switch_mutex); ++#else ++ mutex_unlock(&host->lock); ++#endif /* CONFIG_ARCH_BSP */ ++} ++ ++static int bsp_spi_nor_op_reg(struct spi_nor *nor, ++ u8 opcode, size_t len, u8 optype) ++{ ++ struct fmc_priv *priv = nor->priv; ++ struct fmc_host *host = priv->host; ++ u32 reg; ++ ++ reg = fmc_cmd_cmd1(opcode); ++ writel(reg, host->regbase + FMC_CMD); ++ ++ reg = fmc_data_num_cnt((unsigned int)len); ++ writel(reg, host->regbase + FMC_DATA_NUM); ++ ++#ifdef CONFIG_ARCH_BSP ++ reg = op_cfg_fm_cs(priv->chipselect) | OP_CFG_OEN_EN; ++#else ++ reg = op_cfg_fm_cs(priv->chipselect); ++#endif /* CONFIG_ARCH_BSP */ ++ writel(reg, host->regbase + FMC_OP_CFG); ++ ++ writel(0xff, host->regbase + FMC_INT_CLR); ++ reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype; ++ writel(reg, host->regbase + FMC_OP); ++ ++ return bsp_spi_nor_wait_op_finish(host); ++} ++ ++static int bsp_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, ++ size_t len) ++{ ++ struct fmc_priv *priv = nor->priv; ++ struct fmc_host *host = priv->host; ++ int ret; ++ ++ ret = bsp_spi_nor_op_reg(nor, opcode, len, FMC_OP_READ_DATA_EN); ++ if (ret) ++ return ret; ++ ++ memcpy_fromio(buf, host->iobase, len); ++ return 0; ++} ++ ++static int bsp_spi_nor_write_reg(struct spi_nor *nor, u8 opcode, ++ const u8 *buf, size_t len) ++{ ++ struct fmc_priv *priv = nor->priv; ++ struct fmc_host *host = priv->host; ++ ++ if (len) ++ memcpy_toio(host->iobase, buf, len); ++ ++ return bsp_spi_nor_op_reg(nor, opcode, len, FMC_OP_WRITE_DATA_EN); ++} ++ ++static int bsp_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off, ++ dma_addr_t dma_buf, size_t len, u8 op_type) ++{ ++ struct fmc_priv *priv = nor->priv; ++ struct fmc_host *host = priv->host; ++ u8 if_type = 0; ++ u32 reg; ++ ++ reg = readl(host->regbase + FMC_CFG); ++ reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK); ++ reg |= FMC_CFG_OP_MODE_NORMAL; ++ reg |= (nor->addr_nbytes == 4) ? SPI_NOR_ADDR_MODE_4BYTES ++ : SPI_NOR_ADDR_MODE_3BYTES; ++ writel(reg, host->regbase + FMC_CFG); ++ ++ writel(start_off, host->regbase + FMC_ADDRL); ++ ++#ifdef CONFIG_ARCH_BSP ++ reg = (unsigned int)dma_buf; ++ writel(reg, host->regbase + FMC_DMA_SADDR_D0); ++ ++#ifdef CONFIG_64BIT ++ reg = (dma_buf & FMC_DMA_SADDRH_MASK) >> 32; ++ writel(reg, host->regbase + FMC_DMA_SADDRH_D0); ++#endif ++#else ++ writel(dma_buf, host->regbase + FMC_DMA_SADDR_D0); ++#endif /* CONFIG_ARCH_BSP */ ++ ++ writel(fmc_dma_len_set(len), host->regbase + FMC_DMA_LEN); ++ ++ reg = op_cfg_fm_cs(priv->chipselect); ++ if (op_type == FMC_OP_READ) ++ if_type = bsp_spi_nor_get_if_type(nor->read_proto); ++ else ++ if_type = bsp_spi_nor_get_if_type(nor->write_proto); ++ reg |= op_cfg_mem_if_type(if_type); ++ if (op_type == FMC_OP_READ) ++ reg |= op_cfg_dummy_num(nor->read_dummy >> 3); ++ ++#ifdef CONFIG_ARCH_BSP ++ reg |= OP_CFG_OEN_EN; ++#endif /* CONFIG_ARCH_BSP */ ++ ++ writel(reg, host->regbase + FMC_OP_CFG); ++ ++ writel(0xff, host->regbase + FMC_INT_CLR); ++ reg = op_ctrl_rw_op(op_type) | OP_CTRL_DMA_OP_READY; ++ reg |= (op_type == FMC_OP_READ) ? ++ op_ctrl_rd_opcode(nor->read_opcode) : ++ op_ctrl_wr_opcode(nor->program_opcode); ++ writel(reg, host->regbase + FMC_OP_DMA); ++ ++ return bsp_spi_nor_wait_op_finish(host); ++} ++ ++static ssize_t bsp_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len, ++ u_char *read_buf) ++{ ++ struct fmc_priv *priv = nor->priv; ++ struct fmc_host *host = priv->host; ++ size_t offset; ++ int ret; ++ ++#ifdef CONFIG_ARCH_BSP ++ for (offset = 0; offset < len; offset += host->dma_len) { ++ size_t trans = min_t(size_t, host->dma_len, len - offset); ++#else ++ for (offset = 0; offset < len; offset += FMC_DMA_MAX_LEN) { ++ size_t trans = min_t(size_t, FMC_DMA_MAX_LEN, len - offset); ++#endif /* CONFIG_ARCH_BSP */ ++ ++ ret = bsp_spi_nor_dma_transfer(nor, ++ from + offset, host->dma_buffer, trans, FMC_OP_READ); ++ if (ret) { ++ dev_warn(nor->dev, "DMA read timeout\n"); ++ return ret; ++ } ++ ++ ret = memcpy_s(read_buf + offset, trans, host->buffer, trans); ++ if (ret) { ++ printk("%s:memcpy_s failed\n", __func__); ++ return ret; ++ } ++ } ++ ++ return len; ++} ++ ++static ssize_t bsp_spi_nor_write(struct spi_nor *nor, loff_t to, ++ size_t len, const u_char *write_buf) ++{ ++ struct fmc_priv *priv = nor->priv; ++ struct fmc_host *host = priv->host; ++ size_t offset; ++ int ret; ++ ++#ifdef CONFIG_ARCH_BSP ++ for (offset = 0; offset < len; offset += host->dma_len) { ++ size_t trans = min_t(size_t, host->dma_len, len - offset); ++#else ++ for (offset = 0; offset < len; offset += FMC_DMA_MAX_LEN) { ++ size_t trans = min_t(size_t, FMC_DMA_MAX_LEN, len - offset); ++#endif /* CONFIG_ARCH_BSP */ ++ ++ ret = memcpy_s(host->buffer, trans, write_buf + offset, trans); ++ if (ret) { ++ printk("%s,memcpy_s failed\n", __func__); ++ return ret; ++ } ++ ++ ret = bsp_spi_nor_dma_transfer(nor, ++ to + offset, host->dma_buffer, trans, FMC_OP_WRITE); ++ if (ret) { ++ dev_warn(nor->dev, "DMA write timeout\n"); ++ return ret; ++ } ++ } ++ ++ return len; ++} ++ ++/** ++ * parse partitions info and register spi flash device as mtd device. ++ */ ++#ifdef CONFIG_ARCH_BSP ++static int bsp_snor_device_register(struct mtd_info *mtd) ++{ ++ int ret; ++ ++ /* ++ * We do not add the whole spi flash as a mtdblock device, ++ * To avoid the number of nand partition +1. ++ */ ++ INIT_LIST_HEAD(&mtd->partitions); ++ ret = parse_mtd_partitions(mtd, NULL, NULL); ++ ++ return ret; ++} ++#endif /* CONFIG_ARCH_BSP */ ++ ++static const struct spi_nor_controller_ops bsp_controller_ops = { ++ .prepare = bsp_spi_nor_prep, ++ .unprepare = bsp_spi_nor_unprep, ++ .read_reg = bsp_spi_nor_read_reg, ++ .write_reg = bsp_spi_nor_write_reg, ++ .read = bsp_spi_nor_read, ++ .write = bsp_spi_nor_write, ++}; ++ ++/** ++ * Get spi flash device information and register it as a mtd device. ++ */ ++static int bsp_spi_nor_register(struct device_node *np, ++ struct fmc_host *host) ++{ ++ struct spi_nor_hwcaps hwcaps = { ++ .mask = SNOR_HWCAPS_READ | ++ SNOR_HWCAPS_READ_FAST | ++ SNOR_HWCAPS_READ_1_1_2 | ++#ifdef CONFIG_ARCH_BSP ++ SNOR_HWCAPS_READ_1_2_2 | ++#else ++ ++ SNOR_HWCAPS_READ_1_1_4 | ++#endif ++ SNOR_HWCAPS_PP, ++ }; ++ struct device *dev = NULL; ++ struct spi_nor *nor = NULL; ++ struct fmc_priv *priv = NULL; ++ struct mtd_info *mtd = NULL; ++ int ret; ++ ++ if (!host || !host->dev) ++ return -ENXIO; ++ ++ dev = host->dev; ++ nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL); ++ if (!nor) ++ return -ENOMEM; ++ ++ nor->dev = dev; ++ spi_nor_set_flash_node(nor, np); ++ ++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ ret = of_property_read_u32(np, "reg", &priv->chipselect); ++ if (ret) { ++ dev_err(dev, "There's no reg property for %pOF\n", ++ np); ++ return ret; ++ } ++ ++#ifdef CONFIG_ARCH_BSP ++ if (priv->chipselect != host->num_chip) { ++ dev_warn(dev, " The CS: %d states in device trees isn't real " \ ++ "chipselect on board\n, using CS: %d instead. ", ++ priv->chipselect, host->num_chip); ++ priv->chipselect = host->num_chip; ++ } ++#endif /* CONFIG_ARCH_BSP */ ++ ++ ret = of_property_read_u32(np, "spi-max-frequency", ++ &priv->clkrate); ++ if (ret) { ++ dev_err(dev, "There's no spi-max-frequency property for %pOF\n", ++ np); ++ return ret; ++ } ++ priv->host = host; ++ nor->priv = priv; ++ nor->controller_ops = &bsp_controller_ops; ++ ++#ifndef CONFIG_CLOSE_SPI_8PIN_4IO ++ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4 | ++ SNOR_HWCAPS_READ_1_4_4 | ++ SNOR_HWCAPS_PP_1_1_4 | ++ SNOR_HWCAPS_PP_1_4_4; ++#endif ++ ret = spi_nor_scan(nor, NULL, &hwcaps); ++ if (ret) ++ return ret; ++ ++ mtd = &nor->mtd; ++ mtd->name = np->name; ++#ifdef CONFIG_ARCH_BSP ++ ret = bsp_snor_device_register(mtd); ++ if (ret < 0) ++ return ret; ++ /* current chipselect has scanned, to detect next chipselect */ ++ fmc_cs_user[host->num_chip]++; ++#else ++ ret = mtd_device_register(mtd, NULL, 0); ++ if (ret) ++ return ret; ++ host->num_chip++; ++#endif /* CONFIG_ARCH_BSP */ ++ ++ host->nor[host->num_chip] = nor; ++ return 0; ++} ++ ++static void bsp_spi_nor_unregister_all(struct fmc_host *host) ++{ ++ int i; ++ ++ for (i = 0; i < host->num_chip; i++) ++ mtd_device_unregister(&host->nor[i]->mtd); ++} ++ ++static int bsp_spi_nor_register_all(struct fmc_host *host) ++{ ++ struct device *dev = host->dev; ++ struct device_node *np; ++ int ret; ++ for_each_available_child_of_node(dev->of_node, np) { ++#ifdef CONFIG_ARCH_BSP ++ if (host->num_chip == FMC_MAX_CHIP_NUM) { ++ dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n"); ++ break; ++ } ++ if (fmc_cs_user[host->num_chip]) { ++ dev_warn(dev, "Current CS(%d) is occupied.\n", ++ host->num_chip); ++ continue; ++ } ++#endif /* CONFIG_ARCH_BSP */ ++ ret = bsp_spi_nor_register(np, host); ++ if (ret) ++ goto fail; ++ ++#ifdef CONFIG_ARCH_BSP ++ host->num_chip++; ++#endif /* CONFIG_ARCH_BSP */ ++ ++ } ++ ++ return 0; ++ ++fail: ++ bsp_spi_nor_unregister_all(host); ++ return ret; ++} ++ ++static int bsp_spi_nor_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++#ifndef CONFIG_ARCH_BSP ++ struct resource *res; ++#endif /* CONFIG_ARCH_BSP */ ++ struct fmc_host *host; ++ int ret; ++#ifdef CONFIG_ARCH_BSP ++ struct bsp_fmc *fmc = dev_get_drvdata(dev->parent); ++ if (!fmc) { ++ dev_err(&pdev->dev, "get mfd fmc devices failed\n"); ++ return -ENXIO; ++ } ++#endif /* CONFIG_ARCH_BSP */ ++ ++ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); ++ if (!host) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, host); ++ host->dev = dev; ++ ++#ifdef CONFIG_ARCH_BSP ++ host->regbase = fmc->regbase; ++ host->iobase = fmc->iobase; ++ host->clk = fmc->clk; ++ host->lock = &fmc->lock; ++ host->buffer = fmc->buffer; ++ host->dma_buffer = fmc->dma_buffer; ++ host->dma_len = fmc->dma_len; ++#else ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); ++ host->regbase = devm_ioremap_resource(dev, res); ++ if (IS_ERR(host->regbase)) ++ return PTR_ERR(host->regbase); ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory"); ++ host->iobase = devm_ioremap_resource(dev, res); ++ if (IS_ERR(host->iobase)) ++ return PTR_ERR(host->iobase); ++ ++ host->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(host->clk)) ++ return PTR_ERR(host->clk); ++ ++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); ++ if (ret) { ++ dev_warn(dev, "Unable to set dma mask\n"); ++ return ret; ++ } ++ ++ host->buffer = dmam_alloc_coherent(dev, FMC_DMA_MAX_LEN, ++ &host->dma_buffer, GFP_KERNEL); ++ if (!host->buffer) ++ return -ENOMEM; ++#endif /* CONFIG_ARCH_BSP */ ++ ++ ret = clk_prepare_enable(host->clk); ++ if (ret) ++ return ret; ++ ++#ifndef CONFIG_ARCH_BSP ++ mutex_init(&host->lock); ++#endif /* CONFIG_ARCH_BSP */ ++ bsp_spi_nor_init(host); ++ ret = bsp_spi_nor_register_all(host); ++ if (ret) ++#ifdef CONFIG_ARCH_BSP ++ dev_warn(dev, "spi nor register fail!\n"); ++#else ++ mutex_destroy(&host->lock); ++#endif /* CONFIG_ARCH_BSP */ ++ ++ clk_disable_unprepare(host->clk); ++ ++ return ret; ++} ++ ++static int bsp_spi_nor_remove(struct platform_device *pdev) ++{ ++ struct fmc_host *host = platform_get_drvdata(pdev); ++ ++ if (host && host->clk) { ++ bsp_spi_nor_unregister_all(host); ++#ifndef CONFIG_ARCH_BSP ++ mutex_destroy(&host->lock); ++#endif /* CONFIG_ARCH_BSP */ ++ clk_disable_unprepare(host->clk); ++ } ++ ++ return 0; ++} ++ ++#ifdef CONFIG_ARCH_BSP ++ ++static void bsp_spi_nor_driver_shutdown(struct platform_device *pdev) ++{ ++ int i; ++ struct fmc_host *host = platform_get_drvdata(pdev); ++ ++ if (!host || !host->clk) ++ return; ++ ++ mutex_lock(host->lock); ++ clk_prepare_enable(host->clk); ++ ++ spi_nor_switch_spi_type(host); ++ for (i = 0; i < host->num_chip; i++) ++ spi_nor_driver_shutdown(host->nor[i]); ++ ++ clk_disable_unprepare(host->clk); ++ mutex_unlock(host->lock); ++ dev_dbg(host->dev, "End of driver shutdown\n"); ++} ++ ++#ifdef CONFIG_PM ++static int bsp_spi_nor_driver_suspend(struct platform_device *pdev, ++ pm_message_t state) ++{ ++ int i; ++ struct fmc_host *host = platform_get_drvdata(pdev); ++ ++ if (!host || !host->clk) ++ return 0; ++ ++ mutex_lock(host->lock); ++ clk_prepare_enable(host->clk); ++ ++ spi_nor_switch_spi_type(host); ++ for (i = 0; i < host->num_chip; i++) ++ bsp_spi_nor_suspend(host->nor[i], state); ++ ++ clk_disable_unprepare(host->clk); ++ mutex_unlock(host->lock); ++ dev_dbg(host->dev, "End of suspend\n"); ++ ++ return 0; ++} ++ ++static int bsp_spi_nor_driver_resume(struct platform_device *pdev) ++{ ++ int i; ++ struct fmc_host *host = platform_get_drvdata(pdev); ++ ++ if (!host || !host->clk) ++ return 0; ++ ++ mutex_lock(host->lock); ++ clk_prepare_enable(host->clk); ++ ++ spi_nor_switch_spi_type(host); ++ for (i = 0; i < host->num_chip; i++) ++ bsp_spi_nor_resume(host->nor[i]); ++ ++ mutex_unlock(host->lock); ++ dev_dbg(host->dev, "End of resume\n"); ++ ++ return 0; ++} ++#endif /* End of CONFIG_PM */ ++#endif /* CONFIG_ARCH_BSP */ ++ ++static const struct of_device_id bsp_spi_nor_dt_ids[] = { ++ { .compatible = "vendor,fmc-spi-nor"}, ++ { /* sentinel */ } ++}; ++MODULE_DEVICE_TABLE(of, bsp_spi_nor_dt_ids); ++ ++static struct platform_driver bsp_spi_nor_driver = { ++ .driver = { ++ .name = "bsp-sfc", ++ .of_match_table = bsp_spi_nor_dt_ids, ++ }, ++ .probe = bsp_spi_nor_probe, ++ .remove = bsp_spi_nor_remove, ++#ifdef CONFIG_ARCH_BSP ++ .shutdown = bsp_spi_nor_driver_shutdown, ++#ifdef CONFIG_PM ++ .suspend = bsp_spi_nor_driver_suspend, ++ .resume = bsp_spi_nor_driver_resume, ++#endif ++#endif /* CONFIG_ARCH_BSP */ ++}; ++module_platform_driver(bsp_spi_nor_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("SPI Nor Flash Controller Driver"); +diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c +index 1b0c6770c..88a4a8aa4 100644 +--- a/drivers/mtd/spi-nor/core.c ++++ b/drivers/mtd/spi-nor/core.c +@@ -435,6 +435,10 @@ int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id, + ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, + SPI_NOR_MAX_ID_LEN); + } ++#ifdef CONFIG_ARCH_BSP ++ if (ret) ++ dev_dbg(nor->dev, "error %d reading SR\n", ret); ++#endif + return ret; + } + +@@ -1818,7 +1822,15 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) + ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len); + if (ret) + return ret; +- ++#ifdef CONFIG_ARCH_BSP ++#ifdef CONFIG_BSP_SPI_BLOCK_PROTECT ++ if ((nor->level) && (addr < nor->end_addr)) { ++ dev_err(nor->dev, "Error: The erase area was locked\n"); ++ spi_nor_unlock_and_unprep_pe(nor, instr->addr, instr->len); ++ return -EINVAL; ++ } ++#endif ++#endif + /* whole-chip erase? */ + if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { + unsigned long timeout; +@@ -2060,6 +2072,14 @@ static const struct flash_info *spi_nor_detect(struct spi_nor *nor) + return ERR_PTR(ret); + } + ++#ifdef CONFIG_ARCH_BSP ++ if ((id[0] == 0xff) || (id[0] == 0x00)) { ++ dev_err(nor->dev, "unrecognized Manufacturer ID\n"); ++ return ERR_PTR(-ENODEV); ++ } ++ printk("cmd read ID 0x%x 0x%x 0x%x\n", id[0], id[1], id[2]); ++#endif /* CONFIG_ARCH_BSP */ ++ + /* Cache the complete flash ID. */ + nor->id = devm_kmemdup(nor->dev, id, SPI_NOR_MAX_ID_LEN, GFP_KERNEL); + if (!nor->id) +@@ -2142,6 +2162,15 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, + ret = spi_nor_prep_and_lock_pe(nor, to, len); + if (ret) + return ret; ++#ifdef CONFIG_ARCH_BSP ++#ifdef CONFIG_BSP_SPI_BLOCK_PROTECT ++ if (nor->level && (to < nor->end_addr)) { ++ dev_err(nor->dev, "Error: The DMA write area was locked\n"); ++ spi_nor_unlock_and_unprep(nor); ++ return -EINVAL; ++ } ++#endif ++#endif + + for (i = 0; i < len; ) { + ssize_t written; +@@ -2189,10 +2218,151 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, + + write_err: + spi_nor_unlock_and_unprep_pe(nor, to, len); ++ return ret; ++} ++ ++#ifdef CONFIG_ARCH_BSP ++#ifdef CONFIG_BSP_SPI_BLOCK_PROTECT ++static void spi_lock_update_address(struct spi_nor *nor, const struct flash_info *info) ++{ ++ unsigned int lock_level_max, sectorsize, chipsize; ++ unsigned char mfr_id; ++ ++ if (!nor->level) { ++ nor->end_addr = 0; ++ dev_warn(nor->dev, "all blocks is unlocked.\n"); ++ return; ++ } ++ ++ sectorsize = info->sector_size; ++ chipsize = sectorsize * info->n_sectors; ++ lock_level_max = nor->lock_level_max; ++ ++ mfr_id = JEDEC_MFR(info); ++ ++ /* general case */ ++ nor->end_addr = chipsize >> (lock_level_max - nor->level); ++} ++ ++__maybe_unused static unsigned char bsp_bp_to_level(struct spi_nor *nor, ++ const struct flash_info *info, unsigned int bp_num) ++{ ++ int ret; ++ unsigned char val; ++ unsigned char level; ++ unsigned int chipsize; ++ ++ ret = spi_nor_wait_till_ready(nor); ++ BUG_ON(ret); ++ ++ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR, &val, 1); ++ if (ret < 0) { ++ dev_err(nor->dev, "error %d reading SR\n", ret); ++ return ret; ++ } ++ ++ if (bp_num == BP_NUM_3) ++ level = (val & SPI_NOR_SR_BP_MASK_3) >> SPI_NOR_SR_BP0_SHIFT; ++ else ++ level = (val & SPI_NOR_SR_BP_MASK_4) >> SPI_NOR_SR_BP0_SHIFT; ++ ++ dev_dbg(nor->dev, "the current level[%d]\n", level); ++ ++ if (bp_num == BP_NUM_4) { ++ /* 9-15:(256 blocks, protected all) */ ++ nor->lock_level_max = LOCK_LEVEL_MAX(bp_num) - 5; /* level 10 = 16 - 5*/ ++ chipsize = info->sector_size * info->n_sectors; ++ } else { ++ nor->lock_level_max = LOCK_LEVEL_MAX(bp_num); ++ } ++ dev_dbg(nor->dev, "Get the max bp level: [%d]\n", ++ nor->lock_level_max); ++ ++ return level; ++} ++ ++static void bsp_get_spi_lock_info(struct spi_nor *nor, const struct flash_info *info) ++{ ++ unsigned int chipsize; ++ struct device *dev = nor->dev; ++ ++ chipsize = info->sector_size * info->n_sectors; ++ ++ spi_lock_update_address(nor, info); ++ if (nor->end_addr) ++ dev_info(dev, "Address range [0 => %#x] is locked.\n", ++ nor->end_addr); ++ return; ++} ++#endif/* CONFIG_BSP_SPI_BLOCK_PROTECT */ ++ ++__maybe_unused static int spi_nor_sr3_to_reset(struct spi_nor *nor) ++{ ++ int ret; ++ unsigned char val; ++ ++ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR3, &val, 1); ++ if (ret < 0) { ++ dev_err(nor->dev, "error %d reading Status Reg 3.\n", ret); ++ return ret; ++ } ++ ++ if (SPI_NOR_GET_RST(val)) { ++ dev_dbg(nor->dev, "Device has worked on RESET#.\n"); ++ return 0; ++ } ++ ++ dev_dbg(nor->dev, "Start to enable RESET# function.\n"); ++ val = SPI_NOR_SET_RST(val); ++ ++ nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR3, &val, 1); ++ if (ret < 0) { ++ dev_err(nor->dev, "error while writing Status Reg 3.\n"); ++ return ret; ++ } ++ ++ dev_dbg(nor->dev, "Enable RESET# function success.\n"); ++ ++ return 0; ++} ++ ++static int spi_nor_reset_pin_enable(struct spi_nor *nor, ++ const struct flash_info *info) ++{ ++ switch (JEDEC_MFR(info)) { ++ default: ++ return 0; ++ } ++} ++ ++static int spi_nor_clear_dtr_mode(struct spi_nor *nor,const struct flash_info *info) ++{ ++ int ret = 0; + + return ret; + } + ++int spi_nor_dtrclear_pinreset(struct spi_nor *nor, const struct flash_info *info, const struct spi_nor_hwcaps *hwcaps) ++{ ++ int ret; ++ ++ ret = spi_nor_clear_dtr_mode(nor, info); ++ if (ret) { ++ dev_err(nor->dev, "Clear Dtr Mode Fail.\n"); ++ return ret; ++ } ++ ++ if (!(hwcaps->mask & (SNOR_HWCAPS_READ_1_1_4 | SNOR_HWCAPS_READ_1_4_4))) { ++ ret = spi_nor_reset_pin_enable(nor, info); ++ if (ret) { ++ dev_err(nor->dev, "Enable RESET# Fail.\n"); ++ return ret; ++ } ++ } ++ return 0; ++} ++#endif /*CONFIG_ARCH_BSP*/ ++ + static int spi_nor_check(struct spi_nor *nor) + { + if (!nor->dev || +@@ -3370,6 +3540,98 @@ static const struct flash_info *spi_nor_match_name(struct spi_nor *nor, + return NULL; + } + ++#ifdef CONFIG_ARCH_BSP ++ ++static int bsp_spi_nor_init_params(struct spi_nor *nor) ++{ ++ nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL); ++ if (!nor->params) ++ return -ENOMEM; ++ memcpy(nor->params, nor->info->params, sizeof(*nor->params)); ++ ++ spi_nor_init_default_params(nor); ++ ++ spi_nor_manufacturer_init_params(nor); ++ ++ spi_nor_late_init_params(nor); ++ ++ return 0; ++} ++ ++void spi_nor_driver_shutdown(struct spi_nor *nor) ++{ ++ /* disable 4-byte addressing if the device exceeds 16MiB */ ++ if (nor->addr_nbytes == 4) ++ nor->params->set_4byte_addr_mode(nor, false); ++ ++ return; ++} ++ ++#ifdef CONFIG_PM ++int bsp_spi_nor_suspend(struct spi_nor *nor, pm_message_t state) ++{ ++ return spi_nor_wait_till_ready(nor); ++} ++int bsp_spi_nor_resume(struct spi_nor *nor) ++{ ++ int ret; ++ const struct flash_info *info = NULL; ++ ++ struct spi_nor_hwcaps hwcaps = { ++ .mask = SNOR_HWCAPS_READ | ++ SNOR_HWCAPS_READ_FAST | ++ SNOR_HWCAPS_READ_1_1_2 | ++#ifdef CONFIG_ARCH_BSP ++ SNOR_HWCAPS_READ_1_2_2 | ++#else ++ ++ SNOR_HWCAPS_READ_1_1_4 | ++#endif ++ SNOR_HWCAPS_PP, ++ }; ++#ifndef CONFIG_CLOSE_SPI_8PIN_4IO ++ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4 | ++ SNOR_HWCAPS_READ_1_4_4 | ++ SNOR_HWCAPS_PP_1_1_4 | ++ SNOR_HWCAPS_PP_1_4_4; ++#endif ++ ++ if (!info) ++ info = spi_nor_detect(nor); ++ ++ /* Quad mode takes precedence over fast/normal */ ++#ifdef CONFIG_ARCH_BSP ++ if (info->params) ++ { ++ bsp_spi_nor_init_params(nor); ++ ++ }else ++#endif /* CONFIG_ARCH_BSP */ ++ { ++ ret = spi_nor_init_params(nor); ++ if (ret) ++ return ret; ++ } ++ ret = spi_nor_setup(nor, &hwcaps); ++ if (ret) ++ return ret; ++#ifdef CONFIG_ARCH_BSP ++ ret = spi_nor_dtrclear_pinreset(nor, info, &hwcaps); ++ if (ret) { ++ dev_err(nor->dev, "Clear Dtr Mode or Enable RESET Fail.\n"); ++ return ret; ++ } ++#endif ++ /* enable 4-byte addressing if the device exceeds 16MiB */ ++ if (nor->addr_nbytes == 4) { ++ nor->params->set_4byte_addr_mode(nor, true); ++ } ++ ++ return 0; ++} ++#endif /* End of CONFIG_PM */ ++#endif /* CONFIG_ARCH_BSP */ ++ + static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor, + const char *name) + { +@@ -3378,9 +3640,14 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor, + if (name) + info = spi_nor_match_name(nor, name); + /* Try to auto-detect if chip name wasn't specified or not found */ +- if (!info) ++#ifdef CONFIG_ARCH_BSP ++ if (!info) { ++ dev_info(nor->dev, "SPI Nor ID Table Version %s\n", SPI_NOR_IDS_VER); + return spi_nor_detect(nor); +- ++ } ++ if (IS_ERR_OR_NULL(info)) ++ return ERR_PTR(-ENOENT); ++#endif + /* + * If caller has specified name of flash model that can normally be + * detected using JEDEC, let's verify it. +@@ -3506,10 +3773,23 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, + + mutex_init(&nor->lock); + ++#ifdef CONFIG_BSP_SPI_BLOCK_PROTECT ++ /* NOR block protection support */ ++ bsp_get_spi_lock_info(nor, info); ++#endif /* CONFIG_BSP_SPI_BLOCK_PROTECT */ + /* Init flash parameters based on flash_info struct and SFDP */ +- ret = spi_nor_init_params(nor); +- if (ret) +- return ret; ++#ifdef CONFIG_ARCH_BSP ++ if (info->params) ++ { ++ bsp_spi_nor_init_params(nor); ++ } else ++#endif /* CONFIG_ARCH_BSP */ ++ { ++ ret = spi_nor_init_params(nor); ++ if (ret) { ++ return ret; ++ } ++ } + + if (spi_nor_use_parallel_locking(nor)) + init_waitqueue_head(&nor->rww.wait); +@@ -3525,6 +3805,22 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, + if (ret) + return ret; + ++#ifdef CONFIG_ARCH_BSP ++ ret = spi_nor_dtrclear_pinreset(nor, info, hwcaps); ++ if (ret) { ++ dev_err(nor->dev, "Clear Dtr Mode or Enable RESET Fail.\n"); ++ return ret; ++ } ++ ++ /* choose the suitable clockrate */ ++ if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) /* device supports dual or quad */ ++ && (hwcaps->mask & (~SNOR_HWCAPS_READ)) /* controller supports fast mode */ ++ && info->clkrate) ++ nor->clkrate = info->clkrate; ++ else ++ nor->clkrate = 24000000; ++#endif /* CONFIG_ARCH_BSP */ ++ + /* Send all the required SPI flash commands to initialize device */ + ret = spi_nor_init(nor); + if (ret) +@@ -3532,7 +3828,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, + + /* No mtd_info fields should be used up to this point. */ + spi_nor_set_mtd_info(nor); +- ++#ifdef CONFIG_ARCH_BSP ++ mtd->erasesize = info->sector_size; ++#endif + dev_info(dev, "%s (%lld Kbytes)\n", info->name, + (long long)mtd->size >> 10); + +diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h +index 9217379b9..7b084ce2f 100644 +--- a/drivers/mtd/spi-nor/core.h ++++ b/drivers/mtd/spi-nor/core.h +@@ -540,10 +540,38 @@ struct flash_info { + + u8 mfr_flags; + ++#ifdef CONFIG_ARCH_BSP ++ const struct spi_nor_flash_parameter *params; ++ u32 clkrate; ++#endif /*CONFIG_ARCH_BSP*/ + const struct spi_nor_otp_organization otp_org; + const struct spi_nor_fixups *fixups; + }; + ++#ifdef CONFIG_ARCH_BSP ++ ++#define JEDEC_MFR(info) ((info)->id[0]) ++ ++#define SNOR_RD_MODES \ ++ (SNOR_HWCAPS_READ | \ ++ SNOR_HWCAPS_READ_FAST | \ ++ SNOR_HWCAPS_READ_1_1_2 | \ ++ SNOR_HWCAPS_READ_1_2_2 | \ ++ SNOR_HWCAPS_READ_1_1_4 | \ ++ SNOR_HWCAPS_READ_1_4_4) ++ ++#define SNOR_WR_MODES \ ++ (SNOR_HWCAPS_PP | \ ++ SNOR_HWCAPS_PP_1_1_4) ++ ++#define PARAMS(_name) .params = &_name##_params ++ ++/* Different from spi-max-frequency in DTS, the clk here stands for the clock ++ * rate on SPI interface, it is half of the FMC CRG configuration */ ++#define CLK_MHZ_2X(clk) .clkrate = (clk * 2000000), ++#define SPI_NOR_IDS_VER "1.2" ++#endif /*CONFIG_ARCH_BSP*/ ++ + #define SPI_NOR_ID_2ITEMS(_id) ((_id) >> 8) & 0xff, (_id) & 0xff + #define SPI_NOR_ID_3ITEMS(_id) ((_id) >> 16) & 0xff, SPI_NOR_ID_2ITEMS(_id) + +diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig +index 5a274b99f..17dfef562 100644 +--- a/drivers/net/ethernet/Kconfig ++++ b/drivers/net/ethernet/Kconfig +@@ -85,6 +85,7 @@ source "drivers/net/ethernet/i825xx/Kconfig" + source "drivers/net/ethernet/ibm/Kconfig" + source "drivers/net/ethernet/intel/Kconfig" + source "drivers/net/ethernet/xscale/Kconfig" ++source "drivers/net/ethernet/vendor/Kconfig" + + config JME + tristate "JMicron(R) PCI-Express Gigabit Ethernet support" +diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile +index 0d872d4ef..649dd5b61 100644 +--- a/drivers/net/ethernet/Makefile ++++ b/drivers/net/ethernet/Makefile +@@ -45,6 +45,7 @@ obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ + obj-$(CONFIG_NET_VENDOR_FUNGIBLE) += fungible/ + obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/ + obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/ ++obj-$(CONFIG_NET_VENDOR_BSP) += vendor/ + obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/ + obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ + obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ +diff --git a/drivers/net/ethernet/vendor/Kconfig b/drivers/net/ethernet/vendor/Kconfig +new file mode 100644 +index 000000000..3b4168ea5 +--- /dev/null ++++ b/drivers/net/ethernet/vendor/Kconfig +@@ -0,0 +1,22 @@ ++# ++# Vendor device configuration ++# ++ ++config NET_VENDOR_BSP ++ bool "Vendor net devices" ++ default y ++ depends on OF || ACPI ++ depends on ARM || ARM64 || COMPILE_TEST ++ help ++ If you have a network (Ethernet) card belonging to this class, say Y. ++ ++ Note that the answer to this question doesn't directly affect the ++ kernel: saying N will just cause the configurator to skip all ++ the questions about Vendor devices. If you say Y, you will be asked ++ for your specific card in the following questions. ++ ++if NET_VENDOR_BSP ++ ++source "drivers/net/ethernet/vendor/gmac/Kconfig" ++ ++endif # NET_VENDOR_BSP +diff --git a/drivers/net/ethernet/vendor/Makefile b/drivers/net/ethernet/vendor/Makefile +new file mode 100644 +index 000000000..041f50ee2 +--- /dev/null ++++ b/drivers/net/ethernet/vendor/Makefile +@@ -0,0 +1,6 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# ++# Makefile for the network device drivers. ++# ++ ++obj-$(CONFIG_ETH_GMAC) += gmac/ +diff --git a/drivers/net/ethernet/vendor/gmac/Kconfig b/drivers/net/ethernet/vendor/gmac/Kconfig +new file mode 100644 +index 000000000..caee0e7fe +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/Kconfig +@@ -0,0 +1,104 @@ ++# ++# gmac family network device configuration ++# ++ ++menuconfig ETH_GMAC ++ tristate "eth gmac family network device support" ++ select PHYLIB ++ select RESET_CONTROLLER ++ help ++ This selects the eth gmac family network device. ++ The gigabit switch fabric (GSF) receives and transmits data over Ethernet ++ ports at 10/100/1000 Mbit/s in full-duplex or half-duplex mode. ++ The Ethernet port exchanges data with the CPU port, and supports ++ the energy efficient Ethernet (EEE) and wake on LAN (WoL) functions. ++ ++config GMAC ++ tristate "eth gmac family network device support" ++ select PHYLIB ++ select RESET_CONTROLLER ++ help ++ This selects the eth gmac family network device. ++ The gigabit switch fabric (GSF) receives and transmits data over Ethernet ++ ports at 10/100/1000 Mbit/s in full-duplex or half-duplex mode. ++ The Ethernet port exchanges data with the CPU port, and supports ++ the energy efficient Ethernet (EEE) and wake on LAN (WoL) functions. ++ ++config GMAC_EXTERNAL_PHY ++ bool "gmac external phy driver support" ++ depends on GMAC ++ default n ++ help ++ This define the use of gmac`s external phy driver. ++ The default value is false. ++ ++config GMAC_HAS_INTERNAL_PHY ++ bool "gmac internal fephy driver support" ++ default n ++ help ++ This indicate MAC support internal fephy. ++ The default value is disabled. ++ ++if ETH_GMAC ++ ++config RX_FLOW_CTRL_SUPPORT ++ bool "rx flow ctrl supported" ++ default y ++ help ++ Rx flow ctrl supported, default is enabled. ++ When we received pause frame, ++ we will stop transmiting data frame for some time. ++ The stopping time is the time filled in pause frame. ++ ++config TX_FLOW_CTRL_SUPPORT ++ bool "tx flow ctrl supported" ++ default y ++ help ++ Tx flow ctrl supported, default is enabled. ++ When we has no buffer to receive packet, ++ we will send pause frame. ++ When buffer is available, we will send zero-quanta pause frame. ++ ++config TX_FLOW_CTRL_PAUSE_TIME ++ hex "tx flow ctrl pause time" ++ default "0xFFFF" ++ help ++ The pause time filled in the sending pause frame. ++ The unit is the time for transmiting 512 bit data. ++ This value is 16 bit, so its value is 0x0000~0xFFFF. ++ The default value is 0xFFFF. ++ ++config TX_FLOW_CTRL_PAUSE_INTERVAL ++ hex "tx flow ctrl pause interval" ++ default "0xFFFF" ++ help ++ The interval time for sending pause frame. ++ When the remainint amount of receive queue is below tx flow ctrl active threshold, ++ we will wait this time to transmiting pause frame. ++ The unit is the time for transmiting 512 bit data. ++ This value is 16 bit, so its value is 0x0000~0xFFFF. ++ The default value is 0xFFFF. ++ ++config TX_FLOW_CTRL_ACTIVE_THRESHOLD ++ int "tx flow ctrl active threshold" ++ default "16" ++ range 1 127 ++ help ++ The threshold for activing tx flow ctrl. ++ When the left amount of receive queue descriptors is below this threshold, ++ hardware will send pause frame immediately. ++ We advise this value is set smaller than 64. Too bigger is not a good choice. ++ This value must be smaller than tx flow ctrl deactive threshold. ++ ++config TX_FLOW_CTRL_DEACTIVE_THRESHOLD ++ int "tx flow ctrl deactive threshold" ++ default "32" ++ range 1 127 ++ help ++ The threshold for deactiving tx flow ctrl. ++ When the left amount of receive queue descriptors is above or equal with this threshold, ++ hardware will exit flow control state. ++ We advise this value is set smaller than 64. Too bigger is not a good choice. ++ This value must be larger than tx flow ctrl active threshold. ++ ++endif # ETH_GMAC +diff --git a/drivers/net/ethernet/vendor/gmac/Makefile b/drivers/net/ethernet/vendor/gmac/Makefile +new file mode 100644 +index 000000000..0aa859d47 +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/Makefile +@@ -0,0 +1,11 @@ ++ccflags-y += -I$(srctree)/include/linux ++include $(src)/version.mak ++ccflags-y += -DGMAC_KERNEL_VERSION=\"$(GMAC_KERNEL_VERSION)\" ++ ++obj-$(CONFIG_GMAC_EXTERNAL_PHY) += gmac_external_phy.o ++obj-y += eth_gmac.o ++eth_gmac-objs := gmac.o gmac_ethtool_ops.o gmac_phy_fixup.o gmac_pm.o gmac_proc.o gmac_netdev_ops.o autoeee/autoeee.o autoeee/phy_id_table.o ++ ++ifneq ($(CONFIG_MDIO_BSP_GEMAC), y) ++eth_gmac-objs += gmac_mdio.o ++endif +diff --git a/drivers/net/ethernet/vendor/gmac/autoeee/autoeee.c b/drivers/net/ethernet/vendor/gmac/autoeee/autoeee.c +new file mode 100644 +index 000000000..ed7d3865c +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/autoeee/autoeee.c +@@ -0,0 +1,136 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++ ++#include ++#include ++#include "../gmac.h" ++#include "autoeee.h" ++ ++static u32 set_link_stat(struct gmac_netdev_local const *ld) ++{ ++ u32 link_stat = 0; ++ ++ switch (ld->phy->speed) { ++ case SPEED_10: ++ link_stat |= GMAC_SPD_10M; ++ break; ++ case SPEED_100: ++ link_stat |= GMAC_SPD_100M; ++ break; ++ case SPEED_1000: ++ link_stat |= GMAC_SPD_1000M; ++ break; ++ default: ++ break; ++ } ++ return link_stat; ++} ++ ++static void set_eee_clk(struct gmac_netdev_local const *ld, u32 phy_id) ++{ ++ u32 v; ++ ++ if ((phy_id & REALTEK_PHY_MASK) == REALTEK_PHY_ID_8211E) { ++ v = readl(ld->gmac_iobase + EEE_CLK); ++ v &= ~MASK_EEE_CLK; ++ v |= BIT_DISABLE_TX_CLK; ++ writel(v, ld->gmac_iobase + EEE_CLK); ++ } else if ((phy_id & MICREL_PHY_ID_MASK) == PHY_ID_KSZ9031) { ++ v = readl(ld->gmac_iobase + EEE_CLK); ++ v &= ~MASK_EEE_CLK; ++ v |= (BIT_DISABLE_TX_CLK | BIT_PHY_KSZ9031); ++ writel(v, ld->gmac_iobase + EEE_CLK); ++ } ++} ++ ++static void enable_eee(struct gmac_netdev_local const *ld) ++{ ++ u32 v; ++ ++ /* EEE_1us: 0x7c for 125M */ ++ writel(0x7c, ld->gmac_iobase + ++ EEE_TIME_CLK_CNT); ++ writel(0x1e0400, ld->gmac_iobase + EEE_TIMER); ++ ++ v = readl(ld->gmac_iobase + EEE_LINK_STATUS); ++ v |= 0x3 << 1; /* auto EEE and ... */ ++ v |= BIT_PHY_LINK_STATUS; /* phy linkup */ ++ writel(v, ld->gmac_iobase + EEE_LINK_STATUS); ++ ++ v = readl(ld->gmac_iobase + EEE_ENABLE); ++ v |= BIT_EEE_ENABLE; /* enable EEE */ ++ writel(v, ld->gmac_iobase + EEE_ENABLE); ++} ++ ++static void set_phy_eee_mode(struct gmac_netdev_local const *ld) ++{ ++ u32 v; ++ if (netif_msg_wol(ld)) ++ pr_info("enter phy-EEE mode\n"); ++ ++ v = readl(ld->gmac_iobase + EEE_ENABLE); ++ v &= ~BIT_EEE_ENABLE; /* disable auto-EEE */ ++ writel(v, ld->gmac_iobase + EEE_ENABLE); ++} ++ ++void init_autoeee(struct gmac_netdev_local *ld) ++{ ++ int phy_id; ++ int eee_available, lp_eee_capable; ++ u32 v, link_stat; ++ struct phy_info *phy_info = NULL; ++ if (ld == NULL || ld->eee_init == NULL || ld->phy == NULL) ++ return; ++ phy_id = ld->phy->phy_id; ++ if (ld->eee_init != NULL) ++ goto eee_init; ++ ++ phy_info = phy_search_ids(phy_id); ++ if (phy_info == NULL) ++ goto not_support; ++ ++ eee_available = phy_info->eee_available; ++ if (netif_msg_wol(ld) && phy_info->name != NULL) ++ pr_info("fit phy_id:0x%x, phy_name:%s, eee:%d\n", phy_info->phy_id, phy_info->name, eee_available); ++ ++ if (!eee_available) ++ goto not_support; ++ ++ if (eee_available == PHY_EEE) { ++ set_phy_eee_mode(ld); ++ return; ++ } ++ ++ ld->eee_init = phy_info->eee_init; ++eee_init: ++ link_stat = set_link_stat(ld); ++ ++ lp_eee_capable = ld->eee_init(ld->phy); ++ if (lp_eee_capable < 0) ++ return; ++ ++ if (ld->phy->link) { ++ if (((u32)lp_eee_capable) & link_stat) { ++ set_eee_clk(ld, phy_id); ++ enable_eee(ld); ++ ++ if (netif_msg_wol(ld)) ++ pr_info("enter auto-EEE mode\n"); ++ } else { ++ if (netif_msg_wol(ld)) ++ pr_info("link partner not support EEE\n"); ++ } ++ } else { ++ v = readl(ld->gmac_iobase + EEE_LINK_STATUS); ++ v &= ~(BIT_PHY_LINK_STATUS); /* phy linkdown */ ++ writel(v, ld->gmac_iobase + EEE_LINK_STATUS); ++ } ++ ++ return; ++ ++not_support: ++ ld->eee_init = NULL; ++ if (netif_msg_wol(ld)) ++ pr_info("non-EEE mode\n"); ++} +diff --git a/drivers/net/ethernet/vendor/gmac/autoeee/autoeee.h b/drivers/net/ethernet/vendor/gmac/autoeee/autoeee.h +new file mode 100644 +index 000000000..924cc604b +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/autoeee/autoeee.h +@@ -0,0 +1,49 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++ ++#ifndef _AUTO_EEE_H ++#define _AUTO_EEE_H ++ ++#include "../gmac.h" ++ ++#define NO_EEE 0 ++#define MAC_EEE 1 ++#define PHY_EEE 2 ++#define PARTNER_EEE 2 ++ ++struct phy_info { ++ char *name; ++ int phy_id; ++ char eee_available; /* eee support by this phy */ ++ int (*eee_init)(struct phy_device *phy_dev); ++}; ++ ++/* GMAC register definition */ ++#define EEE_CLK 0x800 ++#define MASK_EEE_CLK (0x3 << 20) ++#define BIT_DISABLE_TX_CLK BIT(21) ++#define BIT_PHY_KSZ9031 BIT(20) ++#define EEE_ENABLE 0x808 ++#define BIT_EEE_ENABLE BIT(0) ++#define EEE_TIMER 0x80C ++#define EEE_LINK_STATUS 0x810 ++#define BIT_PHY_LINK_STATUS BIT(0) ++#define EEE_TIME_CLK_CNT 0x814 ++ ++/* ----------------------------phy register-------------------------------*/ ++/* MMD: MDIO Manageable Device */ ++#define MACR 0x0D ++#define MAADR 0x0E ++#define EEE_DEV 0x3 ++#define EEE_CAPABILITY 0x14 ++#define EEELPAR_DEV 0x7 ++#define EEELPAR 0x3D /* EEE link partner ability register */ ++#define EEE_ADVERTISE 0x3c ++#define LP_1000BASE_EEE BIT(2) ++#define LP_100BASE_EEE BIT(1) ++ ++struct phy_info *phy_search_ids(int phy_id); ++void init_autoeee(struct gmac_netdev_local *ld); ++ ++#endif +diff --git a/drivers/net/ethernet/vendor/gmac/autoeee/phy_id_table.c b/drivers/net/ethernet/vendor/gmac/autoeee/phy_id_table.c +new file mode 100644 +index 000000000..9e8356abc +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/autoeee/phy_id_table.c +@@ -0,0 +1,177 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include "../gmac.h" ++#include "autoeee.h" ++ ++struct phy_info phy_info_table[]; ++ ++struct phy_info *phy_search_ids(int phy_id) ++{ ++ int i; ++ struct phy_info *fit_info = NULL; ++ ++ for (i = 0; phy_info_table[i].name; i++) { ++ if (phy_id == phy_info_table[i].phy_id) { ++ fit_info = &phy_info_table[i]; ++ break; ++ } ++ } ++ ++ return fit_info; ++} ++ ++static inline int phy_mmd_read(struct phy_device *phy_dev, u32 mmd_device, u32 regnum) ++{ ++ phy_write(phy_dev, MACR, mmd_device); /* function = 00 address */ ++ phy_write(phy_dev, MAADR, regnum); ++ phy_write(phy_dev, MACR, 0x4000 | mmd_device); /* function = 01 data */ ++ ++ return phy_read(phy_dev, MAADR); ++} ++ ++static inline int phy_mmd_write(struct phy_device *phy_dev, u32 mmd_device, u32 regnum, u16 val) ++{ ++ phy_write(phy_dev, MACR, mmd_device); /* function = 00 address */ ++ phy_write(phy_dev, MAADR, regnum); ++ phy_write(phy_dev, MACR, 0x4000 | mmd_device); /* function = 01 data */ ++ ++ return phy_write(phy_dev, MAADR, val); ++} ++ ++static int smsc_lan8740_init(struct phy_device *phy_dev) ++{ ++ static int first_time = 0; ++ int v; ++ u32 eee_type = 0; ++ ++ if (!first_time) { ++ /* Realtek LAN 8740 start to enable eee */ ++ int eee_lan; ++ ++ eee_lan = phy_read(phy_dev, 0x10); ++ if (eee_lan < 0) ++ return eee_lan; ++ eee_lan = (u32)eee_lan | 0x4; ++ phy_write(phy_dev, 0x10, eee_lan); ++ eee_lan = phy_read(phy_dev, 0x10); ++ if (eee_lan < 0) ++ return eee_lan; ++ /* auto negotiate after enable eee */ ++ eee_lan = phy_read(phy_dev, 0x0); ++ if (eee_lan < 0) ++ return eee_lan; ++ eee_lan = (u32)eee_lan | 0x200; ++ phy_write(phy_dev, 0x0, eee_lan); ++ first_time = 1; ++ } ++ ++ v = phy_mmd_read(phy_dev, EEELPAR_DEV, EEELPAR); ++ if ((u32)v & LP_1000BASE_EEE) ++ eee_type |= GMAC_SPD_1000M; ++ if ((u32)v & LP_100BASE_EEE) ++ eee_type |= GMAC_SPD_100M; ++ ++ return (int)eee_type; ++} ++ ++#define RTL8211EG_MAC 0 ++#if RTL8211EG_MAC ++static int rtl8211eg_mac_init(struct phy_device *phy_dev) ++{ ++ static int first_time = 0; ++ /* Realtek 8211EG start reset to change eee to mac */ ++ int v; ++ u32 eee_type = 0; ++ ++ if (!first_time) { ++ int tmp; ++ ++ phy_write(phy_dev, 0x1f, 0x0); ++ phy_write(phy_dev, MII_BMCR, BMCR_RESET); /* reset phy */ ++ do { /* wait phy restart over */ ++ udelay(1); ++ tmp = phy_read(phy_dev, MII_BMSR); ++ /* no need to wait AN finished */ ++ tmp &= (BMSR_ANEGCOMPLETE | BMSR_ANEGCAPABLE); ++ } while (!tmp); ++ ++ phy_write(phy_dev, 0x1f, 0x7); ++ phy_write(phy_dev, 0x1e, 0x20); ++ phy_write(phy_dev, 0x1b, 0xa03a); ++ phy_write(phy_dev, 0x1f, 0x0); ++ ++ first_time = 1; ++ } ++ ++ v = phy_mmd_read(phy_dev, EEELPAR_DEV, EEELPAR); ++ if ((u32)v & LP_1000BASE_EEE) ++ eee_type |= GMAC_SPD_1000M; ++ if ((u32)v & LP_100BASE_EEE) ++ eee_type |= GMAC_SPD_100M; ++ ++ return (int)eee_type; ++} ++#else ++static int rtl8211eg_init(struct phy_device *phy_dev) ++{ ++ u32 eee_type = 0; ++ u32 v; ++ ++ v = (u32)phy_mmd_read(phy_dev, EEELPAR_DEV, EEELPAR); ++ if (v & LP_1000BASE_EEE) ++ eee_type |= GMAC_SPD_1000M; ++ if (v & LP_100BASE_EEE) ++ eee_type |= GMAC_SPD_100M; ++ ++ return (int)eee_type; ++} ++#endif ++ ++static int festa_v200_init(struct phy_device *phy_dev) ++{ ++ static int first_time_init = 0; ++ int v; ++ u32 eee_type = 0; ++ ++ if (!first_time_init) { ++ /* EEE_CAPABILITY register: support 100M-BaseT */ ++ v = phy_mmd_read(phy_dev, EEE_DEV, EEE_CAPABILITY); ++ phy_mmd_write(phy_dev, EEE_DEV, EEE_CAPABILITY, ((u32)v) | BIT(1)); ++ ++ /* EEE_ADVERTISEMENT register: advertising 100M-BaseT */ ++ v = phy_mmd_read(phy_dev, EEELPAR_DEV, EEE_ADVERTISE); ++ phy_mmd_write(phy_dev, EEELPAR_DEV, EEE_ADVERTISE, ((u32)v) | BIT(1)); ++ ++ v = phy_read(phy_dev, MII_BMCR); ++ if (v < 0) ++ return v; ++ v = (u32)v | (BMCR_ANENABLE | BMCR_ANRESTART); ++ phy_write(phy_dev, MII_BMCR, v); /* auto-neg restart */ ++ ++ first_time_init = 1; ++ } ++ ++ v = phy_mmd_read(phy_dev, EEELPAR_DEV, EEELPAR); ++ if ((u32)v & LP_1000BASE_EEE) ++ eee_type |= GMAC_SPD_1000M; ++ if ((u32)v & LP_100BASE_EEE) ++ eee_type |= GMAC_SPD_100M; ++ ++ return (int)eee_type; ++} ++ ++struct phy_info phy_info_table[] = { ++ /* phy_name phy_id eee_available phy_driver */ ++ {"SMSC LAN8740", 0x0007c110, MAC_EEE, &smsc_lan8740_init}, ++#if RTL8211EG_MAC ++ {"Realtek 8211EG", 0x001cc915, MAC_EEE, &rtl8211eg_mac_init}, ++#else ++ {"Realtek 8211EG", 0x001cc915, PHY_EEE, &rtl8211eg_init}, ++#endif ++ {"Festa V200", VENDOR_PHY_ID_FESTAV200, MAC_EEE, &festa_v200_init}, ++}; +diff --git a/drivers/net/ethernet/vendor/gmac/gmac.c b/drivers/net/ethernet/vendor/gmac/gmac.c +new file mode 100644 +index 000000000..1ae7ebb46 +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac.c +@@ -0,0 +1,2630 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2024. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "autoeee/autoeee.h" ++#include "gmac_ethtool_ops.h" ++#include "gmac_netdev_ops.h" ++#include "gmac_phy_fixup.h" ++#include "gmac_pm.h" ++#include "gmac_proc.h" ++#include "gmac_mdio.h" ++#include "gmac.h" ++ ++#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) ++static int debug = -1; ++module_param(debug, int, 0000); ++MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); ++ ++static void gmac_set_desc_depth(struct gmac_netdev_local const *priv, ++ u32 rx, u32 tx) ++{ ++ u32 reg, val; ++ int i; ++ ++ writel(BITS_RX_FQ_DEPTH_EN, priv->gmac_iobase + RX_FQ_REG_EN); ++ val = readl(priv->gmac_iobase + RX_FQ_DEPTH); ++ val &= ~Q_ADDR_HI8_MASK; ++ val |= rx << DESC_WORD_SHIFT; ++ writel(val, priv->gmac_iobase + RX_FQ_DEPTH); ++ writel(0, priv->gmac_iobase + RX_FQ_REG_EN); ++ ++ writel(BITS_RX_BQ_DEPTH_EN, priv->gmac_iobase + RX_BQ_REG_EN); ++ val = readl(priv->gmac_iobase + RX_BQ_DEPTH); ++ val &= ~Q_ADDR_HI8_MASK; ++ val |= rx << DESC_WORD_SHIFT; ++ writel(val, priv->gmac_iobase + RX_BQ_DEPTH); ++ for (i = 1; i < priv->num_rxqs; i++) { ++ reg = rx_bq_depth_queue(i); ++ val = readl(priv->gmac_iobase + reg); ++ val &= ~Q_ADDR_HI8_MASK; ++ val |= rx << DESC_WORD_SHIFT; ++ writel(val, priv->gmac_iobase + reg); ++ } ++ writel(0, priv->gmac_iobase + RX_BQ_REG_EN); ++ ++ writel(BITS_TX_BQ_DEPTH_EN, priv->gmac_iobase + TX_BQ_REG_EN); ++ val = readl(priv->gmac_iobase + TX_BQ_DEPTH); ++ val &= ~Q_ADDR_HI8_MASK; ++ val |= tx << DESC_WORD_SHIFT; ++ writel(val, priv->gmac_iobase + TX_BQ_DEPTH); ++ writel(0, priv->gmac_iobase + TX_BQ_REG_EN); ++ ++ writel(BITS_TX_RQ_DEPTH_EN, priv->gmac_iobase + TX_RQ_REG_EN); ++ val = readl(priv->gmac_iobase + TX_RQ_DEPTH); ++ val &= ~Q_ADDR_HI8_MASK; ++ val |= tx << DESC_WORD_SHIFT; ++ writel(val, priv->gmac_iobase + TX_RQ_DEPTH); ++ writel(0, priv->gmac_iobase + TX_RQ_REG_EN); ++} ++ ++static void gmac_set_rx_fq(struct gmac_netdev_local const *priv, ++ dma_addr_t phy_addr) ++{ ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ u32 val; ++#endif ++ writel(BITS_RX_FQ_START_ADDR_EN, priv->gmac_iobase + RX_FQ_REG_EN); ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ val = readl(priv->gmac_iobase + RX_FQ_DEPTH); ++ val &= Q_ADDR_HI8_MASK; ++ val |= (phy_addr >> REG_BIT_WIDTH) << Q_ADDR_HI8_OFFSET; ++ writel(val, priv->gmac_iobase + RX_FQ_DEPTH); ++#endif ++ writel((u32)phy_addr, priv->gmac_iobase + RX_FQ_START_ADDR); ++ writel(0, priv->gmac_iobase + RX_FQ_REG_EN); ++} ++ ++static void gmac_set_rx_bq(struct gmac_netdev_local const *priv, ++ dma_addr_t phy_addr) ++{ ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ u32 val; ++#endif ++ ++ writel(BITS_RX_BQ_START_ADDR_EN, priv->gmac_iobase + RX_BQ_REG_EN); ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ val = readl(priv->gmac_iobase + RX_BQ_DEPTH); ++ val &= Q_ADDR_HI8_MASK; ++ val |= (phy_addr >> REG_BIT_WIDTH) << Q_ADDR_HI8_OFFSET; ++ writel(val, priv->gmac_iobase + RX_BQ_DEPTH); ++#endif ++ writel((u32)phy_addr, priv->gmac_iobase + RX_BQ_START_ADDR); ++ writel(0, priv->gmac_iobase + RX_BQ_REG_EN); ++} ++ ++static void gmac_set_tx_bq(struct gmac_netdev_local const *priv, ++ dma_addr_t phy_addr) ++{ ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ u32 val; ++#endif ++ writel(BITS_TX_BQ_START_ADDR_EN, priv->gmac_iobase + TX_BQ_REG_EN); ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ val = readl(priv->gmac_iobase + TX_BQ_DEPTH); ++ val &= Q_ADDR_HI8_MASK; ++ val |= (phy_addr >> REG_BIT_WIDTH) << Q_ADDR_HI8_OFFSET; ++ writel(val, priv->gmac_iobase + TX_BQ_DEPTH); ++#endif ++ writel((u32)phy_addr, priv->gmac_iobase + TX_BQ_START_ADDR); ++ writel(0, priv->gmac_iobase + TX_BQ_REG_EN); ++} ++ ++static void gmac_set_tx_rq(struct gmac_netdev_local const *priv, ++ dma_addr_t phy_addr) ++{ ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ u32 val; ++#endif ++ writel(BITS_TX_RQ_START_ADDR_EN, priv->gmac_iobase + TX_RQ_REG_EN); ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ val = readl(priv->gmac_iobase + TX_RQ_DEPTH); ++ val &= Q_ADDR_HI8_MASK; ++ val |= (phy_addr >> REG_BIT_WIDTH) << Q_ADDR_HI8_OFFSET; ++ writel(val, priv->gmac_iobase + TX_RQ_DEPTH); ++#endif ++ writel((u32)phy_addr, priv->gmac_iobase + TX_RQ_START_ADDR); ++ writel(0, priv->gmac_iobase + TX_RQ_REG_EN); ++} ++ ++static void gmac_hw_set_desc_addr(struct gmac_netdev_local const *priv) ++{ ++ u32 reg; ++ int i; ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ u32 val; ++#endif ++ ++ gmac_set_rx_fq(priv, priv->RX_FQ.phys_addr); ++ gmac_set_rx_bq(priv, priv->RX_BQ.phys_addr); ++ gmac_set_tx_rq(priv, priv->TX_RQ.phys_addr); ++ gmac_set_tx_bq(priv, priv->TX_BQ.phys_addr); ++ ++ for (i = 1; i < priv->num_rxqs; i++) { ++ reg = rx_bq_depth_queue(i); ++ /* ++ * Logical limitation: We must enable BITS_RX_BQ_DEPTH_EN ++ * to write rx_bq_start_addr_39to32 successfully. ++ */ ++ writel(BITS_RX_BQ_START_ADDR_EN | BITS_RX_BQ_DEPTH_EN, ++ priv->gmac_iobase + RX_BQ_REG_EN); ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ val = readl(priv->gmac_iobase + reg); ++ val &= Q_ADDR_HI8_MASK; ++ val |= ((priv->pool[BASE_QUEUE_NUMS + i].phys_addr) >> REG_BIT_WIDTH) << ++ Q_ADDR_HI8_OFFSET; ++ writel(val, priv->gmac_iobase + reg); ++#endif ++ reg = (u32)rx_bq_start_addr_queue(i); ++ /* pool 3 add i */ ++ writel((u32)(priv->pool[BASE_QUEUE_NUMS + i].phys_addr), ++ priv->gmac_iobase + reg); ++ writel(0, priv->gmac_iobase + RX_BQ_REG_EN); ++ } ++} ++ ++static void gmac_hw_init(struct gmac_netdev_local *priv) ++{ ++ u32 val; ++ u32 reg; ++ int i; ++ ++ /* disable and clear all interrupts */ ++ writel(0, priv->gmac_iobase + ENA_PMU_INT); ++ writel(~0, priv->gmac_iobase + RAW_PMU_INT); ++ ++ for (i = 1; i < priv->num_rxqs; i++) { ++ reg = (u32)rss_ena_int_queue(i); ++ writel(0, priv->gmac_iobase + reg); ++ } ++ writel(~0, priv->gmac_iobase + RSS_RAW_PMU_INT); ++ ++ /* enable CRC erro packets filter */ ++ val = readl(priv->gmac_iobase + REC_FILT_CONTROL); ++ val |= BIT_CRC_ERR_PASS; ++ writel(val, priv->gmac_iobase + REC_FILT_CONTROL); ++ ++ /* set tx min packet length */ ++ val = readl(priv->gmac_iobase + CRF_MIN_PACKET); ++ val &= ~BIT_MASK_TX_MIN_LEN; ++ val |= ETH_HLEN << BIT_OFFSET_TX_MIN_LEN; ++ writel(val, priv->gmac_iobase + CRF_MIN_PACKET); ++ ++ /* fix bug for udp and ip error check */ ++ writel(CONTROL_WORD_CONFIG, priv->gmac_iobase + CONTROL_WORD); ++ ++ writel(0, priv->gmac_iobase + COL_SLOT_TIME); ++ ++ writel(DUPLEX_HALF, priv->gmac_iobase + MAC_DUPLEX_HALF_CTRL); ++ ++ /* interrupt when rcv packets >= RX_BQ_INT_THRESHOLD */ ++ gmac_set_rxbq_enqueue_thres(priv, priv->coalesce.rx_frames); ++ gmac_set_txrq_enqueue_thres(priv, priv->coalesce.tx_frames); ++ ++ /* RX_BQ/TX_RQ in timeout threshold */ ++ gmac_set_rxbq_enqueue_timeout_thres(priv, priv->coalesce.rx_timeout); ++ gmac_set_txrq_enqueue_timeout_thres(priv, priv->coalesce.tx_timeout); ++ ++ gmac_set_desc_depth(priv, RX_DESC_NUM, TX_DESC_NUM); ++} ++ ++/* ++ * the func stop the hw desc and relaim the software skb resource ++ * before reusing the gmac, you'd better reset the gmac ++ */ ++static void gmac_reclaim_rx_tx_resource(struct gmac_netdev_local *ld) ++{ ++ unsigned long rxflags, txflags; ++ int rd_offset, wr_offset; ++ int i; ++ ++ gmac_irq_disable_all_queue(ld); ++ gmac_hw_desc_disable(ld); ++ writel(STOP_RX_TX, ld->gmac_iobase + STOP_CMD); ++ ++ spin_lock_irqsave(&ld->rxlock, rxflags); ++ /* RX_BQ: logic write pointer */ ++ wr_offset = readl(ld->gmac_iobase + RX_BQ_WR_ADDR); ++ /* prevent to reclaim skb in rx bottom half */ ++ writel(wr_offset, ld->gmac_iobase + RX_BQ_RD_ADDR); ++ ++ for (i = 1; i < ld->num_rxqs; i++) { ++ u32 rx_bq_wr_reg, rx_bq_rd_reg; ++ ++ rx_bq_wr_reg = rx_bq_wr_addr_queue(i); ++ rx_bq_rd_reg = rx_bq_rd_addr_queue(i); ++ ++ wr_offset = readl(ld->gmac_iobase + rx_bq_wr_reg); ++ writel(wr_offset, ld->gmac_iobase + rx_bq_rd_reg); ++ } ++ ++ /* RX_FQ: logic read pointer */ ++ rd_offset = readl(ld->gmac_iobase + RX_FQ_RD_ADDR); ++ if (rd_offset == 0) ++ rd_offset = (RX_DESC_NUM - 1) << DESC_BYTE_SHIFT; ++ else ++ rd_offset -= DESC_SIZE; ++ /* stop to feed hw desc */ ++ writel(rd_offset, ld->gmac_iobase + RX_FQ_WR_ADDR); ++ ++ for (i = 0; i < ld->RX_FQ.count; i++) { ++ if (!ld->RX_FQ.skb[i]) ++ ld->RX_FQ.skb[i] = SKB_MAGIC; ++ } ++ spin_unlock_irqrestore(&ld->rxlock, rxflags); ++ ++ /* ++ * no need to wait pkts in TX_RQ finish to free all skb, ++ * because gmac_xmit_reclaim is in the tx_lock, ++ */ ++ spin_lock_irqsave(&ld->txlock, txflags); ++ /* TX_RQ: logic write */ ++ wr_offset = readl(ld->gmac_iobase + TX_RQ_WR_ADDR); ++ /* stop to reclaim tx skb */ ++ writel(wr_offset, ld->gmac_iobase + TX_RQ_RD_ADDR); ++ ++ /* TX_BQ: logic read */ ++ rd_offset = readl(ld->gmac_iobase + TX_BQ_RD_ADDR); ++ if (rd_offset == 0) ++ rd_offset = (TX_DESC_NUM - 1) << DESC_BYTE_SHIFT; ++ else ++ rd_offset -= DESC_SIZE; ++ /* stop software tx skb */ ++ writel(rd_offset, ld->gmac_iobase + TX_BQ_WR_ADDR); ++ ++ for (i = 0; i < ld->TX_BQ.count; i++) { ++ if (!ld->TX_BQ.skb[i]) ++ ld->TX_BQ.skb[i] = SKB_MAGIC; ++ } ++ spin_unlock_irqrestore(&ld->txlock, txflags); ++} ++ ++static void gmac_free_rx_skb(struct gmac_netdev_local const *ld) ++{ ++ struct sk_buff *skb = NULL; ++ int i; ++ ++ for (i = 0; i < ld->RX_FQ.count; i++) { ++ skb = ld->RX_FQ.skb[i]; ++ if (skb != NULL) { ++ ld->rx_skb[i] = NULL; ++ ld->RX_FQ.skb[i] = NULL; ++ if (skb == SKB_MAGIC) ++ continue; ++ dev_kfree_skb_any(skb); ++ /* ++ * need to unmap the skb here ++ * but there is no way to get the dma_addr here, ++ * and unmap(TO_DEVICE) ops do nothing in fact, ++ * so we ignore to call ++ * dma_unmap_single(dev, dma_addr, skb->len, ++ * DMA_TO_DEVICE) ++ */ ++ } ++ } ++} ++ ++static void gmac_free_tx_skb(struct gmac_netdev_local const *ld) ++{ ++ struct sk_buff *skb = NULL; ++ int i; ++ ++ for (i = 0; i < ld->TX_BQ.count; i++) { ++ skb = ld->TX_BQ.skb[i]; ++ if (skb != NULL) { ++ ld->tx_skb[i] = NULL; ++ ld->TX_BQ.skb[i] = NULL; ++ if (skb == SKB_MAGIC) ++ continue; ++ netdev_completed_queue(ld->netdev, 1, skb->len); ++ dev_kfree_skb_any(skb); ++ } ++ } ++} ++ ++#define GMAC_RESET_TIMEOUT 1000 ++/* board related func */ ++static void gmac_mac_core_reset(struct gmac_netdev_local const *priv) ++{ ++ int timeout = GMAC_RESET_TIMEOUT; ++ ++ if (priv->port_rst == NULL) ++ return; ++ ++ /* undo reset */ ++ reset_control_deassert(priv->port_rst); ++ ++ /* ++ * When GMAC is not in an idle state, we could not reset GMAC ++ * successfully. And it may cause GMAC stalling in an abnormal state if ++ * we turn off its clock when GMAC is initiating a bus access. Thus ++ * we diable tx/rx and desc_read_write here to ensure that GMAC no ++ * longer initiates new bus access. ++ */ ++ gmac_hw_desc_disable(priv); ++ gmac_port_disable(priv); ++ msleep(1); ++ ++ /* ++ * When GMAC is not in an idle state, We could not successfully ++ * reset it, thus here we reset the GMAC repeatedly and read the ++ * RX_FQ_START_ADDR register to check whether the GMAC is successfully ++ * reset. ++ */ ++ while (timeout--) { ++ /* soft reset mac port */ ++ reset_control_assert(priv->port_rst); ++ msleep(1); ++ /* undo reset */ ++ reset_control_deassert(priv->port_rst); ++ if (readl(priv->gmac_iobase + RX_FQ_START_ADDR) == 0) { ++ break; ++ } ++ } ++ ++ if (timeout < 0) { ++ pr_err("gmac reset failed!\n"); ++ } ++ return; ++} ++ ++/* reset and re-config gmac */ ++static void gmac_restart(struct gmac_netdev_local *ld) ++{ ++ unsigned long rxflags, txflags; ++ struct net_device *ndev = ld->netdev; ++ ++ spin_lock_irqsave(&ld->rxlock, rxflags); ++ spin_lock_irqsave(&ld->txlock, txflags); ++ ++ gmac_free_rx_skb(ld); ++ gmac_free_tx_skb(ld); ++ ++ ld->sg_head = 0; ++ ld->sg_tail = 0; ++ ++ pmt_reg_restore(ld); ++ gmac_hw_init(ld); ++ gmac_hw_set_mac_addr(ld); ++ gmac_hw_set_desc_addr(ld); ++ ++ /* we don't set macif here, it will be set in adjust_link */ ++ if (netif_running(ld->netdev)) { ++ /* ++ * when resume, only do the following operations ++ * when dev is up before suspend. ++ */ ++ gmac_rx_refill(ld); ++ if (ndev->netdev_ops->ndo_set_rx_mode != NULL) ++ ndev->netdev_ops->ndo_set_rx_mode(ndev); ++ ++ gmac_hw_desc_enable(ld); ++ gmac_port_enable(ld); ++ gmac_irq_enable_all_queue(ld); ++ } ++ spin_unlock_irqrestore(&ld->txlock, txflags); ++ spin_unlock_irqrestore(&ld->rxlock, rxflags); ++} ++ ++#ifdef GMAC_LINK_CHANGE_PROTECT ++#define GMAC_MS_TO_NS (1000000ULL) ++#define GMAC_FLUSH_WAIT_TIME (100 * GMAC_MS_TO_NS) ++/* protect code */ ++static void gmac_linkup_flush(struct gmac_netdev_local const *ld) ++{ ++ int tx_bq_wr_offset, tx_bq_rd_offset; ++ unsigned long long time_limit, time_now; ++ ++ time_now = sched_clock(); ++ time_limit = time_now + GMAC_FLUSH_WAIT_TIME; ++ ++ do { ++ tx_bq_wr_offset = readl(ld->gmac_iobase + TX_BQ_WR_ADDR); ++ tx_bq_rd_offset = readl(ld->gmac_iobase + TX_BQ_RD_ADDR); ++ ++ time_now = sched_clock(); ++ if (unlikely(((long long)time_now - (long long)time_limit) >= 0)) ++ break; ++ } while (tx_bq_rd_offset != tx_bq_wr_offset); ++ ++ mdelay(1); ++} ++#endif ++ ++#ifdef GMAC_MAC_TX_RESET_IN_LINKUP ++static void gmac_mac_tx_state_engine_reset(struct gmac_netdev_local const *priv) ++{ ++ u32 val; ++ val = readl(priv->gmac_iobase + MAC_CLEAR); ++ val |= BIT_TX_SOFT_RESET; ++ writel(val, priv->gmac_iobase + MAC_CLEAR); ++ ++ mdelay(5); /* wait 5ms */ ++ ++ val = readl(priv->gmac_iobase + MAC_CLEAR); ++ val &= ~BIT_TX_SOFT_RESET; ++ writel(val, priv->gmac_iobase + MAC_CLEAR); ++} ++#endif ++ ++static void gmac_config_port(struct net_device const *dev, u32 speed, u32 duplex) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(dev); ++ u32 val; ++ ++ switch (priv->phy_mode) { ++ case PHY_INTERFACE_MODE_RGMII: ++ case PHY_INTERFACE_MODE_RGMII_ID: ++ case PHY_INTERFACE_MODE_RGMII_RXID: ++ case PHY_INTERFACE_MODE_RGMII_TXID: ++ if (speed == SPEED_1000) ++ val = RGMII_SPEED_1000; ++ else if (speed == SPEED_100) ++ val = RGMII_SPEED_100; ++ else ++ val = RGMII_SPEED_10; ++ break; ++ case PHY_INTERFACE_MODE_MII: ++ if (speed == SPEED_100) ++ val = MII_SPEED_100; ++ else ++ val = MII_SPEED_10; ++ break; ++ case PHY_INTERFACE_MODE_RMII: ++ if (speed == SPEED_100) ++ val = RMII_SPEED_100; ++ else ++ val = RMII_SPEED_10; ++ break; ++ default: ++ netdev_warn(dev, "not supported mode\n"); ++ val = MII_SPEED_10; ++ break; ++ } ++ ++ if (duplex) ++ val |= GMAC_FULL_DUPLEX; ++ ++ reset_control_assert(priv->macif_rst); ++ writel_relaxed(val, priv->macif_base); ++ reset_control_deassert(priv->macif_rst); ++ ++ writel_relaxed(BIT_MODE_CHANGE_EN, priv->gmac_iobase + MODE_CHANGE_EN); ++ if (speed == SPEED_1000) ++ val = GMAC_SPEED_1000; ++ else if (speed == SPEED_100) ++ val = GMAC_SPEED_100; ++ else ++ val = GMAC_SPEED_10; ++ writel_relaxed(val, priv->gmac_iobase + PORT_MODE); ++ writel_relaxed(0, priv->gmac_iobase + MODE_CHANGE_EN); ++ writel_relaxed(duplex, priv->gmac_iobase + MAC_DUPLEX_HALF_CTRL); ++} ++ ++static unsigned int flow_ctrl_en = FLOW_OFF; ++static int tx_flow_ctrl_pause_time = CONFIG_TX_FLOW_CTRL_PAUSE_TIME; ++static int tx_flow_ctrl_pause_interval = CONFIG_TX_FLOW_CTRL_PAUSE_INTERVAL; ++static int tx_flow_ctrl_active_threshold = CONFIG_TX_FLOW_CTRL_ACTIVE_THRESHOLD; ++static int tx_flow_ctrl_deactive_threshold = ++ CONFIG_TX_FLOW_CTRL_DEACTIVE_THRESHOLD; ++ ++static void gmac_set_flow_ctrl_args(struct gmac_netdev_local *ld) ++{ ++ ld->flow_ctrl = flow_ctrl_en; ++ ld->pause = tx_flow_ctrl_pause_time; ++ ld->pause_interval = tx_flow_ctrl_pause_interval; ++ ld->flow_ctrl_active_threshold = tx_flow_ctrl_active_threshold; ++ ld->flow_ctrl_deactive_threshold = tx_flow_ctrl_deactive_threshold; ++} ++ ++static void gmac_set_flow_ctrl_params(struct gmac_netdev_local const *ld) ++{ ++ unsigned int rx_fq_empty_th; ++ unsigned int rx_fq_full_th; ++ unsigned int rx_bq_empty_th; ++ unsigned int rx_bq_full_th; ++ unsigned int rec_filter; ++ ++ writel(ld->pause, ld->gmac_iobase + FC_TX_TIMER); ++ writel(ld->pause_interval, ld->gmac_iobase + PAUSE_THR); ++ ++ rx_fq_empty_th = readl(ld->gmac_iobase + RX_FQ_ALEMPTY_TH); ++ rx_fq_empty_th &= ~(BITS_Q_PAUSE_TH_MASK << BITS_Q_PAUSE_TH_OFFSET); ++ rx_fq_empty_th |= (ld->flow_ctrl_active_threshold << ++ BITS_Q_PAUSE_TH_OFFSET); ++ writel(rx_fq_empty_th, ld->gmac_iobase + RX_FQ_ALEMPTY_TH); ++ ++ rx_fq_full_th = readl(ld->gmac_iobase + RX_FQ_ALFULL_TH); ++ rx_fq_full_th &= ~(BITS_Q_PAUSE_TH_MASK << BITS_Q_PAUSE_TH_OFFSET); ++ rx_fq_full_th |= (ld->flow_ctrl_deactive_threshold << ++ BITS_Q_PAUSE_TH_OFFSET); ++ writel(rx_fq_full_th, ld->gmac_iobase + RX_FQ_ALFULL_TH); ++ ++ rx_bq_empty_th = readl(ld->gmac_iobase + RX_BQ_ALEMPTY_TH); ++ rx_bq_empty_th &= ~(BITS_Q_PAUSE_TH_MASK << BITS_Q_PAUSE_TH_OFFSET); ++ rx_bq_empty_th |= (ld->flow_ctrl_active_threshold << ++ BITS_Q_PAUSE_TH_OFFSET); ++ writel(rx_bq_empty_th, ld->gmac_iobase + RX_BQ_ALEMPTY_TH); ++ ++ rx_bq_full_th = readl(ld->gmac_iobase + RX_BQ_ALFULL_TH); ++ rx_bq_full_th &= ~(BITS_Q_PAUSE_TH_MASK << BITS_Q_PAUSE_TH_OFFSET); ++ rx_bq_full_th |= (ld->flow_ctrl_deactive_threshold << ++ BITS_Q_PAUSE_TH_OFFSET); ++ writel(rx_bq_full_th, ld->gmac_iobase + RX_BQ_ALFULL_TH); ++ ++ writel(0, ld->gmac_iobase + CRF_TX_PAUSE); ++ ++ rec_filter = readl(ld->gmac_iobase + REC_FILT_CONTROL); ++ rec_filter |= BIT_PAUSE_FRM_PASS; ++ writel(rec_filter, ld->gmac_iobase + REC_FILT_CONTROL); ++} ++ ++void gmac_set_flow_ctrl_state(struct gmac_netdev_local const *ld, int pause) ++{ ++ unsigned int flow_rx_q_en; ++ unsigned int flow; ++ if (ld == NULL) ++ return; ++ flow_rx_q_en = readl(ld->gmac_iobase + RX_PAUSE_EN); ++ flow_rx_q_en &= ~(BIT_RX_FQ_PAUSE_EN | BIT_RX_BQ_PAUSE_EN); ++ if (pause && (ld->flow_ctrl & FLOW_TX)) ++ flow_rx_q_en |= (BIT_RX_FQ_PAUSE_EN | BIT_RX_BQ_PAUSE_EN); ++ writel(flow_rx_q_en, ld->gmac_iobase + RX_PAUSE_EN); ++ ++ flow = readl(ld->gmac_iobase + PAUSE_EN); ++ flow &= ~(BIT_RX_FDFC | BIT_TX_FDFC); ++ if (pause) { ++ if (ld->flow_ctrl & FLOW_RX) ++ flow |= BIT_RX_FDFC; ++ if (ld->flow_ctrl & FLOW_TX) ++ flow |= BIT_TX_FDFC; ++ } ++ writel(flow, ld->gmac_iobase + PAUSE_EN); ++} ++ ++static void gmac_adjust_link(struct net_device *dev) ++{ ++ struct gmac_netdev_local *priv = NULL; ++ struct phy_device *phy = NULL; ++ bool link_status_changed = false; ++ if (dev == NULL) ++ return; ++ priv = netdev_priv(dev); ++ if (priv == NULL || priv->phy == NULL) ++ return; ++ phy = priv->phy; ++ if (phy->link) { ++ if ((priv->old_speed != phy->speed) || ++ (priv->old_duplex != phy->duplex)) { ++#ifdef GMAC_LINK_CHANGE_PROTECT ++ unsigned long txflags; ++ ++ spin_lock_irqsave(&priv->txlock, txflags); ++ ++ gmac_linkup_flush(priv); ++#endif ++ gmac_config_port(dev, phy->speed, phy->duplex); ++#ifdef GMAC_MAC_TX_RESET_IN_LINKUP ++ gmac_mac_tx_state_engine_reset(priv); ++#endif ++#ifdef GMAC_LINK_CHANGE_PROTECT ++ spin_unlock_irqrestore(&priv->txlock, txflags); ++#endif ++ gmac_set_flow_ctrl_state(priv, phy->pause); ++ ++ if (priv->autoeee) ++ init_autoeee(priv); ++ ++ link_status_changed = true; ++ priv->old_link = 1; ++ priv->old_speed = phy->speed; ++ priv->old_duplex = phy->duplex; ++ } ++ } else if (priv->old_link) { ++ link_status_changed = true; ++ priv->old_link = 0; ++ netif_carrier_off(dev); ++ priv->old_speed = SPEED_UNKNOWN; ++ priv->old_duplex = DUPLEX_UNKNOWN; ++ } ++ ++ if (link_status_changed && netif_msg_link(priv)) ++ phy_print_status(phy); ++} ++ ++static int gmac_init_sg_desc_queue(struct gmac_netdev_local *ld) ++{ ++ ld->sg_count = ld->TX_BQ.count + GMAC_SG_DESC_ADD; ++ ld->dma_sg_desc = (struct sg_desc *)dma_alloc_coherent(ld->dev, ++ ld->sg_count * sizeof(struct sg_desc), ++ &ld->dma_sg_phy, GFP_KERNEL); ++ ++ if (ld->dma_sg_desc == NULL) { ++ pr_err("alloc sg desc dma error!\n"); ++ return -ENOMEM; ++ } ++ ++ ld->sg_head = 0; ++ ld->sg_tail = 0; ++ ++ return 0; ++} ++ ++static void gmac_destroy_sg_desc_queue(struct gmac_netdev_local *ld) ++{ ++ if (ld->dma_sg_desc) { ++ dma_free_coherent(ld->dev, ++ ld->sg_count * sizeof(struct sg_desc), ++ ld->dma_sg_desc, ld->dma_sg_phy); ++ ld->dma_sg_desc = NULL; ++ } ++} ++ ++static bool gmac_rx_fq_empty(struct gmac_netdev_local const *priv) ++{ ++ u32 start, end; ++ ++ start = readl(priv->gmac_iobase + RX_FQ_WR_ADDR); ++ end = readl(priv->gmac_iobase + RX_FQ_RD_ADDR); ++ if (start == end) ++ return true; ++ else ++ return false; ++} ++ ++static bool gmac_rxq_has_packets(struct gmac_netdev_local const *priv, int rxq_id) ++{ ++ u32 rx_bq_rd_reg, rx_bq_wr_reg; ++ u32 start, end; ++ ++ rx_bq_rd_reg = rx_bq_rd_addr_queue(rxq_id); ++ rx_bq_wr_reg = rx_bq_wr_addr_queue(rxq_id); ++ ++ start = readl(priv->gmac_iobase + rx_bq_rd_reg); ++ end = readl(priv->gmac_iobase + rx_bq_wr_reg); ++ if (start == end) ++ return false; ++ else ++ return true; ++} ++ ++static void gmac_monitor_func(struct timer_list *t) ++{ ++ struct gmac_netdev_local *ld = from_timer(ld, t, monitor); ++ struct net_device *dev = NULL; ++ u32 refill_cnt; ++ ++ if (ld == NULL) { ++ gmac_trace(GMAC_NORMAL_LEVEL, "ld is null"); ++ return; ++ } ++ ++ if (ld->netdev == NULL) { ++ gmac_trace(GMAC_NORMAL_LEVEL, "ld->netdev is null"); ++ return; ++ } ++ dev_hold(ld->netdev); ++ dev = ld->netdev; ++ if (!netif_running(dev)) { ++ dev_put(dev); ++ gmac_trace(GMAC_NORMAL_LEVEL, "network driver is stopped"); ++ return; ++ } ++ dev_put(dev); ++ ++ spin_lock(&ld->rxlock); ++ refill_cnt = gmac_rx_refill(ld); ++ if (!refill_cnt && gmac_rx_fq_empty(ld)) { ++ int rxq_id; ++ ++ for (rxq_id = 0; rxq_id < ld->num_rxqs; rxq_id++) { ++ if (gmac_rxq_has_packets(ld, rxq_id)) ++ napi_schedule(&ld->q_napi[rxq_id].napi); ++ } ++ } ++ spin_unlock(&ld->rxlock); ++ ++ ld->monitor.expires = jiffies + GMAC_MONITOR_TIMER; ++ mod_timer(&ld->monitor, ld->monitor.expires); ++} ++ ++u32 gmac_rx_refill(struct gmac_netdev_local *priv) ++{ ++ struct gmac_desc *desc = NULL; ++ struct sk_buff *skb = NULL; ++ struct cyclic_queue_info dma_info; ++ u32 len = ETH_MAX_FRAME_SIZE; ++ dma_addr_t addr; ++ u32 refill_cnt = 0; ++ u32 i; ++ /* software write pointer */ ++ dma_info.start = dma_cnt(readl(priv->gmac_iobase + RX_FQ_WR_ADDR)); ++ /* logic read pointer */ ++ dma_info.end = dma_cnt(readl(priv->gmac_iobase + RX_FQ_RD_ADDR)); ++ dma_info.num = CIRC_SPACE(dma_info.start, dma_info.end, RX_DESC_NUM); ++ ++ for (i = 0, dma_info.pos = dma_info.start; i < dma_info.num; i++) { ++ if (priv->RX_FQ.skb[dma_info.pos] || priv->rx_skb[dma_info.pos]) ++ break; ++ ++ skb = netdev_alloc_skb_ip_align(priv->netdev, len); ++ if (unlikely(skb == NULL)) ++ break; ++ ++ addr = dma_map_single(priv->dev, skb->data, len, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(priv->dev, addr)) { ++ dev_kfree_skb_any(skb); ++ break; ++ } ++ ++ desc = priv->RX_FQ.desc + dma_info.pos; ++ desc->data_buff_addr = (u32)addr; ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ desc->reserve31 = addr >> REG_BIT_WIDTH; ++#endif ++ priv->RX_FQ.skb[dma_info.pos] = skb; ++ priv->rx_skb[dma_info.pos] = skb; ++ ++ desc->buffer_len = len - 1; ++ desc->data_len = 0; ++ desc->fl = 0; ++ desc->descvid = DESC_VLD_FREE; ++ desc->skb_id = dma_info.pos; ++ ++ refill_cnt++; ++ dma_info.pos = dma_ring_incr(dma_info.pos, RX_DESC_NUM); ++ } ++ ++ /* ++ * This barrier is important here. It is required to ensure ++ * the ARM CPU flushes it's DMA write buffers before proceeding ++ * to the next instruction, to ensure that GMAC will see ++ * our descriptor changes in memory ++ */ ++ gmac_sync_barrier(); ++ ++ if (dma_info.pos != dma_info.start) ++ writel(dma_byte(dma_info.pos), priv->gmac_iobase + RX_FQ_WR_ADDR); ++ ++ return refill_cnt; ++} ++ ++static int gmac_rx_checksum(struct net_device *dev, struct sk_buff *skb, ++ struct gmac_desc const *desc) ++{ ++ int hdr_csum_done, payload_csum_done; ++ int hdr_csum_err, payload_csum_err; ++ ++ if (dev->features & NETIF_F_RXCSUM) { ++ hdr_csum_done = desc->header_csum_done; ++ payload_csum_done = desc->payload_csum_done; ++ hdr_csum_err = desc->header_csum_err; ++ payload_csum_err = desc->payload_csum_err; ++ ++ if (hdr_csum_done && payload_csum_done) { ++ if (unlikely(hdr_csum_err || payload_csum_err)) { ++ dev->stats.rx_errors++; ++ dev->stats.rx_crc_errors++; ++ dev_kfree_skb_any(skb); ++ return -1; ++ } else { ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ } ++ } ++ } ++ return 0; ++} ++ ++static void gmac_rx_skbput(struct net_device *dev, struct sk_buff *skb, ++ struct gmac_desc const *desc, int rxq_id) ++{ ++ struct gmac_netdev_local *ld = netdev_priv(dev); ++ dma_addr_t addr; ++ u32 len; ++ int ret; ++ ++ len = desc->data_len; ++ ++ addr = desc->data_buff_addr; ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ addr |= (dma_addr_t)(desc->reserve31) << REG_BIT_WIDTH; ++#endif ++ dma_unmap_single(ld->dev, addr, ETH_MAX_FRAME_SIZE, DMA_FROM_DEVICE); ++ ++ if ((addr & NET_IP_ALIGN) == 0) ++ skb_reserve(skb, 2); /* 2:NET_IP_ALIGN */ ++ ++ skb_put(skb, len); ++ if (skb->len > ETH_MAX_FRAME_SIZE) { ++ netdev_err(dev, "rcv len err, len = %d\n", skb->len); ++ dev->stats.rx_errors++; ++ dev->stats.rx_length_errors++; ++ dev_kfree_skb_any(skb); ++ return; ++ } ++ ++ skb->protocol = eth_type_trans(skb, dev); ++ skb->ip_summed = CHECKSUM_NONE; ++ ++#if defined(CONFIG_GMAC_RXCSUM) ++ ret = gmac_rx_checksum(dev, skb, desc); ++ if (unlikely(ret)) ++ return; ++#endif ++ if ((dev->features & NETIF_F_RXHASH) && desc->has_hash) ++ skb_set_hash(skb, desc->rxhash, desc->l3_hash ? ++ PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4); ++ ++ skb_record_rx_queue(skb, rxq_id); ++ ++ napi_gro_receive(&ld->q_napi[rxq_id].napi, skb); ++ dev->stats.rx_packets++; ++ dev->stats.rx_bytes += len; ++} ++ ++static int gmac_rx_skb(struct net_device *dev, struct gmac_desc *desc, int rxq_id) ++{ ++ struct gmac_netdev_local *ld = netdev_priv(dev); ++ struct sk_buff *skb = NULL; ++ u16 skb_id = desc->skb_id; ++ ++ spin_lock(&ld->rxlock); ++ skb = ld->rx_skb[skb_id]; ++ if (unlikely(skb == NULL)) { ++ spin_unlock(&ld->rxlock); ++ netdev_err(dev, "inconsistent rx_skb\n"); ++ return -1; ++ } ++ ++ /* data consistent check */ ++ if (unlikely(skb != ld->RX_FQ.skb[skb_id])) { ++ netdev_err(dev, "desc->skb(0x%p),RX_FQ.skb[%d](0x%p)\n", ++ skb, skb_id, ld->RX_FQ.skb[skb_id]); ++ if (ld->RX_FQ.skb[skb_id] == SKB_MAGIC) { ++ spin_unlock(&ld->rxlock); ++ goto next_skb; ++ } ++ WARN_ON(1); ++ } else { ++ ld->RX_FQ.skb[skb_id] = NULL; ++ } ++ spin_unlock(&ld->rxlock); ++ ++ gmac_rx_skbput(dev, skb, desc, rxq_id); ++ ++next_skb: ++ spin_lock(&ld->rxlock); ++ ld->rx_skb[skb_id] = NULL; ++ spin_unlock(&ld->rxlock); ++ return 0; ++} ++ ++static int gmac_rx(struct net_device *dev, int limit, int rxq_id) ++{ ++ struct gmac_netdev_local *ld = netdev_priv(dev); ++ struct gmac_desc *desc = NULL; ++ struct cyclic_queue_info dma_info; ++ u32 rx_bq_rd_reg, rx_bq_wr_reg; ++ u32 i; ++ ++ rx_bq_rd_reg = rx_bq_rd_addr_queue(rxq_id); ++ rx_bq_wr_reg = rx_bq_wr_addr_queue(rxq_id); ++ ++ /* software read pointer */ ++ dma_info.start = dma_cnt(readl(ld->gmac_iobase + rx_bq_rd_reg)); ++ /* logic write pointer */ ++ dma_info.end = dma_cnt(readl(ld->gmac_iobase + rx_bq_wr_reg)); ++ dma_info.num = CIRC_CNT(dma_info.end, dma_info.start, RX_DESC_NUM); ++ if (dma_info.num > limit) ++ dma_info.num = limit; ++ ++ /* ensure get updated desc */ ++ rmb(); ++ for (i = 0, dma_info.pos = dma_info.start; i < dma_info.num; i++) { ++ if (rxq_id) ++ desc = ld->pool[BASE_QUEUE_NUMS + rxq_id].desc + dma_info.pos; ++ else ++ desc = ld->RX_BQ.desc + dma_info.pos; ++ ++ if (unlikely(gmac_rx_skb(dev, desc, rxq_id))) ++ break; ++ ++ dma_info.pos = dma_ring_incr(dma_info.pos, RX_DESC_NUM); ++ } ++ ++ if (dma_info.pos != dma_info.start) ++ writel(dma_byte(dma_info.pos), ld->gmac_iobase + rx_bq_rd_reg); ++ ++ spin_lock(&ld->rxlock); ++ gmac_rx_refill(ld); ++ spin_unlock(&ld->rxlock); ++ ++ return dma_info.num; ++} ++ ++static int gmac_check_tx_err(struct gmac_netdev_local const *ld, ++ struct gmac_tso_desc const *tx_bq_desc, unsigned int desc_pos) ++{ ++ unsigned int tx_err = tx_bq_desc->tx_err; ++ ++ if (unlikely(tx_err & ERR_ALL)) { ++ struct sg_desc *desc_cur = NULL; ++ int *sg_word = NULL; ++ int i; ++ ++ WARN((tx_err & ERR_ALL), ++ "TX ERR: desc1=0x%x, desc2=0x%x, desc5=0x%x\n", ++ tx_bq_desc->data_buff_addr, ++ tx_bq_desc->desc1.val, tx_bq_desc->tx_err); ++ ++ desc_cur = ld->dma_sg_desc + ld->TX_BQ.sg_desc_offset[desc_pos]; ++ sg_word = (int *)desc_cur; ++ for (i = 0; i < sizeof(struct sg_desc) / sizeof(int); i++) ++ pr_err("%s,%d: sg_desc word[%d]=0x%x\n", ++ __func__, __LINE__, i, sg_word[i]); ++ ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static void gmac_xmit_release_gso_sg(struct gmac_netdev_local *ld, ++ struct gmac_tso_desc const *tx_rq_desc, unsigned int desc_pos) ++{ ++ struct sg_desc *desc_cur = NULL; ++ int nfrags = tx_rq_desc->desc1.tx.nfrags_num; ++ unsigned int desc_offset; ++ dma_addr_t addr; ++ size_t len; ++ int i; ++ ++ desc_offset = ld->TX_BQ.sg_desc_offset[desc_pos]; ++ WARN_ON(desc_offset != ld->sg_tail); ++ desc_cur = ld->dma_sg_desc + desc_offset; ++ ++ addr = desc_cur->linear_addr; ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ addr |= (dma_addr_t)(desc_cur->reserv3 >> SG_DESC_HI8_OFFSET) << REG_BIT_WIDTH; ++#endif ++ len = desc_cur->linear_len; ++ dma_unmap_single(ld->dev, addr, len, DMA_TO_DEVICE); ++ for (i = 0; i < nfrags; i++) { ++ addr = desc_cur->frags[i].addr; ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ addr |= (dma_addr_t) (desc_cur->frags[i].reserved >> SG_DESC_HI8_OFFSET) << REG_BIT_WIDTH; ++#endif ++ len = desc_cur->frags[i].size; ++ dma_unmap_page(ld->dev, addr, len, DMA_TO_DEVICE); ++ } ++} ++ ++static int gmac_xmit_release_gso(struct gmac_netdev_local *ld, ++ struct gmac_tso_desc *tx_rq_desc, unsigned int desc_pos) ++{ ++ int pkt_type; ++ unsigned int nfrags = tx_rq_desc->desc1.tx.nfrags_num; ++ dma_addr_t addr; ++ size_t len; ++ ++ if (unlikely(gmac_check_tx_err(ld, tx_rq_desc, desc_pos) < 0)) { ++#ifdef GMAC_STALL_WHEN_TX_ERR ++ /* dev_close */ ++ gmac_irq_disable_all_queue(ld); ++ gmac_hw_desc_disable(ld); ++ ++ netif_carrier_off(ld->netdev); ++ netif_stop_queue(ld->netdev); ++ ++ phy_stop(ld->phy); ++ del_timer_sync(&ld->monitor); ++ return -1; ++#endif ++ } ++ ++ if (tx_rq_desc->desc1.tx.tso_flag || (nfrags != 0) || tx_rq_desc->desc1.tx.sg_flag) ++ pkt_type = PKT_SG; ++ else ++ pkt_type = PKT_NORMAL; ++ ++ if (pkt_type == PKT_NORMAL) { ++ addr = tx_rq_desc->data_buff_addr; ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ addr |= (dma_addr_t)(tx_rq_desc->reserve_desc2 & TX_DESC_HI8_MASK) << REG_BIT_WIDTH; ++#endif ++ len = tx_rq_desc->desc1.tx.data_len; ++ dma_unmap_single(ld->dev, addr, len, DMA_TO_DEVICE); ++ } else { ++ gmac_xmit_release_gso_sg(ld, tx_rq_desc, desc_pos); ++ ld->sg_tail = (ld->sg_tail + 1) % ld->sg_count; ++ } ++ ++ return 0; ++} ++ ++static int gmac_xmit_reclaim_release(struct gmac_netdev_local *priv, ++ struct sk_buff *skb, u32 pos) ++{ ++ struct gmac_tso_desc *tso_desc = NULL; ++ dma_addr_t addr; ++ struct gmac_desc *desc = priv->TX_RQ.desc + pos; ++ ++ if (priv->tso_supported) { ++ tso_desc = (struct gmac_tso_desc *)desc; ++ return gmac_xmit_release_gso(priv, tso_desc, pos); ++ } else { ++ addr = desc->data_buff_addr; ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ addr |= (dma_addr_t)(desc->rxhash & TX_DESC_HI8_MASK) << REG_BIT_WIDTH; ++#endif ++ dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE); ++ } ++ return 0; ++} ++ ++static void gmac_xmit_reclaim(struct net_device *dev) ++{ ++ struct sk_buff *skb = NULL; ++ struct gmac_netdev_local *priv = netdev_priv(dev); ++ unsigned int bytes_compl = 0; ++ unsigned int pkts_compl = 0; ++ struct cyclic_queue_info dma_info; ++ u32 i; ++ ++ spin_lock(&priv->txlock); ++ ++ /* software read */ ++ dma_info.start = dma_cnt(readl(priv->gmac_iobase + TX_RQ_RD_ADDR)); ++ /* logic write */ ++ dma_info.end = dma_cnt(readl(priv->gmac_iobase + TX_RQ_WR_ADDR)); ++ dma_info.num = CIRC_CNT(dma_info.end, dma_info.start, TX_DESC_NUM); ++ ++ for (i = 0, dma_info.pos = dma_info.start; i < dma_info.num; i++) { ++ skb = priv->tx_skb[dma_info.pos]; ++ if (unlikely(skb == NULL)) { ++ netdev_err(dev, "inconsistent tx_skb\n"); ++ break; ++ } ++ ++ if (skb != priv->TX_BQ.skb[dma_info.pos]) { ++ netdev_err(dev, "wired, tx skb[%d](%p) != skb(%p)\n", ++ dma_info.pos, priv->TX_BQ.skb[dma_info.pos], skb); ++ if (priv->TX_BQ.skb[dma_info.pos] == SKB_MAGIC) ++ goto next; ++ } ++ ++ pkts_compl++; ++ bytes_compl += skb->len; ++ if (gmac_xmit_reclaim_release(priv, skb, dma_info.pos) < 0) ++ break; ++ ++ priv->TX_BQ.skb[dma_info.pos] = NULL; ++next: ++ priv->tx_skb[dma_info.pos] = NULL; ++ dev_consume_skb_any(skb); ++ dma_info.pos = dma_ring_incr(dma_info.pos, TX_DESC_NUM); ++ } ++ ++ if (dma_info.pos != dma_info.start) ++ writel(dma_byte(dma_info.pos), priv->gmac_iobase + TX_RQ_RD_ADDR); ++ ++ if ((pkts_compl != 0) || (bytes_compl != 0)) ++ netdev_completed_queue(dev, pkts_compl, bytes_compl); ++ ++ if (unlikely(netif_queue_stopped(priv->netdev)) && (pkts_compl != 0)) ++ netif_wake_queue(priv->netdev); ++ ++ spin_unlock(&priv->txlock); ++} ++ ++noinline unsigned int gmac_get_rxfifo_overcnt(struct gmac_netdev_local *priv) ++{ ++ return readl(priv->gmac_iobase + RX_OVER_FLOW_CNT); ++} ++ ++static int gmac_poll(struct napi_struct *napi, int budget) ++{ ++ struct gmac_napi *q_napi = container_of(napi, ++ struct gmac_napi, napi); ++ struct gmac_netdev_local *priv = q_napi->ndev_priv; ++ struct net_device *dev = priv->netdev; ++ int work_done = 0; ++ int task = budget; ++ u32 ints, num; ++ u32 raw_int_reg, raw_int_mask; ++ u32 val, overcnt_increment; ++ ++ dev_hold(dev); ++ if (q_napi->rxq_id) { ++ raw_int_reg = RSS_RAW_PMU_INT; ++ raw_int_mask = def_int_mask_queue((u32)q_napi->rxq_id); ++ } else { ++ raw_int_reg = RAW_PMU_INT; ++ raw_int_mask = DEF_INT_MASK; ++ } ++ ++ do { ++ if (!q_napi->rxq_id) ++ gmac_xmit_reclaim(dev); ++ num = gmac_rx(dev, task, q_napi->rxq_id); ++ work_done += num; ++ task -= num; ++ if (work_done >= budget) ++ break; ++ ++ ints = readl(priv->gmac_iobase + raw_int_reg); ++ ints &= raw_int_mask; ++ writel(ints, priv->gmac_iobase + raw_int_reg); ++ } while (ints); ++ ++ val = gmac_get_rxfifo_overcnt(priv); ++ if (priv->rx_fifo_overcnt < val) { ++ overcnt_increment = val - priv->rx_fifo_overcnt; ++ priv->rx_fifo_overcnt = val; ++ } ++ ++ if (work_done < budget) { ++ napi_complete(napi); ++ gmac_irq_enable_queue(priv, q_napi->rxq_id); ++ } ++ ++ dev_put(dev); ++ return work_done; ++} ++ ++static irqreturn_t gmac_interrupt(int irq, void *dev_id) ++{ ++ struct gmac_napi *q_napi = (struct gmac_napi *)dev_id; ++ struct gmac_netdev_local *ld = q_napi->ndev_priv; ++ u32 ints; ++ u32 raw_int_reg, raw_int_mask; ++ ++ if (gmac_queue_irq_disabled(ld, q_napi->rxq_id)) ++ return IRQ_NONE; ++ ++ if (q_napi->rxq_id) { ++ raw_int_reg = RSS_RAW_PMU_INT; ++ raw_int_mask = def_int_mask_queue((u32)q_napi->rxq_id); ++ } else { ++ raw_int_reg = RAW_PMU_INT; ++ raw_int_mask = DEF_INT_MASK; ++ } ++ ++ ints = readl(ld->gmac_iobase + raw_int_reg); ++ ints &= raw_int_mask; ++ writel(ints, ld->gmac_iobase + raw_int_reg); ++ ++ if (likely(ints || gmac_rxq_has_packets(ld, q_napi->rxq_id))) { ++ gmac_irq_disable_queue(ld, q_napi->rxq_id); ++ napi_schedule(&q_napi->napi); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++void gmac_enable_napi(struct gmac_netdev_local *priv) ++{ ++ struct gmac_napi *q_napi = NULL; ++ int i; ++ ++ if (priv == NULL) ++ return; ++ ++ for (i = 0; i < priv->num_rxqs; i++) { ++ q_napi = &priv->q_napi[i]; ++ napi_enable(&q_napi->napi); ++ } ++} ++ ++void gmac_disable_napi(struct gmac_netdev_local *priv) ++{ ++ struct gmac_napi *q_napi = NULL; ++ int i; ++ ++ if (priv == NULL) ++ return; ++ ++ for (i = 0; i < priv->num_rxqs; i++) { ++ q_napi = &priv->q_napi[i]; ++ napi_disable(&q_napi->napi); ++ } ++} ++ ++#define DEFAULT_LD_AM 0xe ++#define DEFAULT_LDO_AM 0x3 ++#define DEFAULT_R_TUNING 0x16 ++static void gmac_of_get_phy_trim_params(struct gmac_netdev_local *priv) ++{ ++ struct device_node *chiptrim_node = NULL; ++ u32 phy_trim_val; ++ u8 ld_am, ldo_am, r_tuning; ++ int ret; ++ ++ priv->trim_params = 0; ++ ++ if (!priv->internal_phy) ++ return; ++ ++ chiptrim_node = of_find_node_by_path("chiptrim"); ++ if (chiptrim_node == NULL) ++ return; ++ ++ ld_am = DEFAULT_LD_AM; ++ ldo_am = DEFAULT_LDO_AM; ++ r_tuning = DEFAULT_R_TUNING; ++ ++ ret = of_property_read_u32(chiptrim_node, "chiptrim0", &phy_trim_val); ++ if (ret) { ++ pr_err("%s,%d: chiptrim0 property not found\n", ++ __func__, __LINE__); ++ return; ++ } ++ ++ if (phy_trim_val) { ++ ld_am = (phy_trim_val >> 11) & 0x1f; /* 11 */ ++ ldo_am = (phy_trim_val >> 8) & 0x7; /* 8 */ ++ r_tuning = phy_trim_val & 0x3f; /* 0x3f */ ++ } ++ ++ priv->trim_params = (r_tuning << 16) | (ldo_am << 8) | ld_am; /* fixed shift 16/8 */ ++} ++ ++static int gmac_of_get_param(struct platform_device *pdev, struct gmac_netdev_local *ld) ++{ ++ struct device *dev = &pdev->dev; ++ struct device_node *node = dev->of_node; ++ ++ /* get auto eee */ ++ ld->autoeee = of_property_read_bool(node, "autoeee"); ++ /* get internal flag */ ++ ld->internal_phy = ++ of_property_read_bool(node, "internal-phy"); ++ if (ld->internal_phy) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) ++ ld->crg_iobase = ioremap(REG_BASE_CRG, 0x1000); ++ ld->sysctrl_iobase = ioremap(REG_BASE_SYSTEM_CTRL, 0x1000); ++#else ++ ld->crg_iobase = ioremap_nocache(REG_BASE_CRG, 0x1000); ++ ld->sysctrl_iobase = ioremap_nocache(REG_BASE_SYSTEM_CTRL, 0x1000); ++#endif ++ gmac_of_get_phy_trim_params(ld); ++ } ++ ++ /* get gpio_base and bit */ ++ of_property_read_u32(node, "phy-gpio-base", (u32 *)(&ld->gpio_base)); ++ of_property_read_u32(node, "phy-gpio-bit", &ld->gpio_bit); ++ ++ return 0; ++} ++ ++static void gmac_destroy_hw_desc_queue(struct gmac_netdev_local *priv) ++{ ++ int i; ++ ++ for (i = 0; i < QUEUE_NUMS + RSS_NUM_RXQS - 1; i++) { ++ if (priv->pool[i].desc) { ++ dma_free_coherent(priv->dev, priv->pool[i].size, ++ priv->pool[i].desc, ++ priv->pool[i].phys_addr); ++ priv->pool[i].desc = NULL; ++ } ++ } ++ ++ kfree(priv->RX_FQ.skb); ++ kfree(priv->TX_BQ.skb); ++ priv->RX_FQ.skb = NULL; ++ priv->TX_BQ.skb = NULL; ++ ++ if (priv->tso_supported) { ++ kfree(priv->TX_BQ.sg_desc_offset); ++ priv->TX_BQ.sg_desc_offset = NULL; ++ } ++ ++ kfree(priv->tx_skb); ++ priv->tx_skb = NULL; ++ ++ kfree(priv->rx_skb); ++ priv->rx_skb = NULL; ++} ++ ++static int gmac_init_desc_queue_mem(struct gmac_netdev_local *priv) ++{ ++ priv->RX_FQ.skb = kzalloc(priv->RX_FQ.count * sizeof(struct sk_buff *), GFP_KERNEL); ++ if (!priv->RX_FQ.skb) ++ return -ENOMEM; ++ ++ priv->rx_skb = kzalloc(priv->RX_FQ.count * sizeof(struct sk_buff *), GFP_KERNEL); ++ if (priv->rx_skb == NULL) ++ return -ENOMEM; ++ ++ priv->TX_BQ.skb = kzalloc(priv->TX_BQ.count * sizeof(struct sk_buff *), GFP_KERNEL); ++ if (!priv->TX_BQ.skb) ++ return -ENOMEM; ++ ++ priv->tx_skb = kzalloc(priv->TX_BQ.count * sizeof(struct sk_buff *), GFP_KERNEL); ++ if (priv->tx_skb == NULL) ++ return -ENOMEM; ++ ++ if (priv->tso_supported) { ++ priv->TX_BQ.sg_desc_offset = kzalloc(priv->TX_BQ.count * sizeof(int), GFP_KERNEL); ++ if (!priv->TX_BQ.sg_desc_offset) ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int gmac_init_hw_desc_queue(struct gmac_netdev_local *priv) ++{ ++ struct device *dev = priv->dev; ++ struct gmac_desc *virt_addr = NULL; ++ dma_addr_t phys_addr = 0; ++ int size, i; ++ ++ priv->RX_FQ.count = RX_DESC_NUM; ++ priv->RX_BQ.count = RX_DESC_NUM; ++ priv->TX_BQ.count = TX_DESC_NUM; ++ priv->TX_RQ.count = TX_DESC_NUM; ++ ++ for (i = 1; i < RSS_NUM_RXQS; i++) ++ priv->pool[BASE_QUEUE_NUMS + i].count = RX_DESC_NUM; ++ ++ for (i = 0; i < (QUEUE_NUMS + RSS_NUM_RXQS - 1); i++) { ++ size = priv->pool[i].count * sizeof(struct gmac_desc); ++ virt_addr = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); ++ if (virt_addr == NULL) ++ goto error_free_pool; ++ ++ if (memset_s(virt_addr, size, 0, size) != EOK) { ++ pr_info("gmac init hw desc queue: memset_s failed\n"); ++ goto error_free_pool; ++ } ++ priv->pool[i].size = (unsigned int)size; ++ priv->pool[i].desc = virt_addr; ++ priv->pool[i].phys_addr = phys_addr; ++ } ++ ++ if (gmac_init_desc_queue_mem(priv) == -ENOMEM) ++ goto error_free_pool; ++ ++ gmac_hw_set_desc_addr(priv); ++ ++ return 0; ++ ++error_free_pool: ++ gmac_destroy_hw_desc_queue(priv); ++ ++ return -ENOMEM; ++} ++ ++static void gmac_init_napi(struct gmac_netdev_local *priv) ++{ ++ struct gmac_napi *q_napi = NULL; ++ int i; ++ ++ for (i = 0; i < priv->num_rxqs; i++) { ++ q_napi = &priv->q_napi[i]; ++ q_napi->rxq_id = (unsigned int)i; ++ q_napi->ndev_priv = priv; ++ netif_napi_add_weight(priv->netdev, &q_napi->napi, gmac_poll, NAPI_POLL_WEIGHT); ++ } ++} ++ ++static void gmac_destroy_napi(struct gmac_netdev_local *priv) ++{ ++ struct gmac_napi *q_napi = NULL; ++ int i; ++ ++ for (i = 0; i < priv->num_rxqs; i++) { ++ q_napi = &priv->q_napi[i]; ++ netif_napi_del(&q_napi->napi); ++ } ++} ++ ++static int gmac_request_irqs(struct platform_device *pdev, ++ struct gmac_netdev_local *priv) ++{ ++ struct device *dev = priv->dev; ++ int ret; ++ int i; ++ ++ for (i = 0; i < priv->num_rxqs; i++) { ++ ret = platform_get_irq(pdev, i); ++ if (ret < 0) { ++ dev_err(dev, "No irq[%d] resource, ret=%d\n", i, ret); ++ return ret; ++ } ++ priv->irq[i] = (unsigned int)ret; ++ ++ ret = devm_request_irq(dev, priv->irq[i], gmac_interrupt, ++ IRQF_SHARED, pdev->name, &priv->q_napi[i]); ++ if (ret) { ++ dev_err(dev, "devm_request_irq failed, ret=%d\n", ret); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++static int gmac_get_clk(struct gmac_netdev_local *priv, struct platform_device *pdev) ++{ ++ int ret; ++ struct device *dev = &pdev->dev; ++ priv->pub_clk = devm_clk_get(&pdev->dev, "pub_clk"); ++ if (IS_ERR(priv->pub_clk)) ++ priv->pub_clk = NULL; ++ ++ priv->clk = devm_clk_get(&pdev->dev, GMAC_MAC_CLK_NAME); ++ if (IS_ERR(priv->clk)) { ++ dev_err(dev, "failed to get clk\n"); ++ ret = -ENODEV; ++ return ret; ++ } ++ ++ priv->macif_clk = devm_clk_get(&pdev->dev, GMAC_MACIF_CLK_NAME); ++ if (IS_ERR(priv->macif_clk)) ++ priv->macif_clk = NULL; ++ ++ return 0; ++} ++ ++static int gmac_enable_clk(struct gmac_netdev_local *priv) ++{ ++ int ret; ++ struct net_device *ndev = priv->netdev; ++ ++ if (priv->macif_clk != NULL) { ++ ret = clk_prepare_enable(priv->macif_clk); ++ if (ret < 0) { ++ netdev_err(ndev, "failed enable macif_clk %d\n", ret); ++ return ret; ++ } ++ } ++ ++ if (priv->pub_clk != NULL) { ++ ret = clk_prepare_enable(priv->pub_clk); ++ if (ret < 0) { ++ netdev_err(ndev, "failed to enable pub clk %d\n", ret); ++ return ret; ++ } ++ } ++ ++ if (priv->clk != NULL) { ++ ret = clk_prepare_enable(priv->clk); ++ if (ret < 0) { ++ netdev_err(ndev, "failed to enable clk %d\n", ret); ++ return ret; ++ } ++ } ++ return 0; ++} ++ ++static void gmac_disable_clk(struct gmac_netdev_local *priv) ++{ ++ if (priv->macif_clk != NULL) ++ clk_disable_unprepare(priv->macif_clk); ++ if (priv->pub_clk != NULL) ++ clk_disable_unprepare(priv->pub_clk); ++ if (priv->clk != NULL) ++ clk_disable_unprepare(priv->clk); ++} ++ ++static int gmac_of_get_resource(struct platform_device *pdev, ++ struct gmac_netdev_local *priv) ++{ ++ struct device *dev = &pdev->dev; ++ struct resource *res = NULL; ++ int ret; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, MEM_GMAC_IOBASE); ++ priv->gmac_iobase = devm_ioremap_resource(dev, res); ++ if (IS_ERR(priv->gmac_iobase)) { ++ ret = PTR_ERR(priv->gmac_iobase); ++ return ret; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, MEM_MACIF_IOBASE); ++ priv->macif_base = devm_ioremap_resource(dev, res); ++ if (IS_ERR(priv->macif_base)) { ++ ret = PTR_ERR(priv->macif_base); ++ return ret; ++ } ++ ++ /* only for some chip to fix AXI bus burst and outstanding config */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, MEM_AXI_BUS_CFG_IOBASE); ++ priv->axi_bus_cfg_base = devm_ioremap_resource(dev, res); ++ if (IS_ERR(priv->axi_bus_cfg_base)) ++ priv->axi_bus_cfg_base = NULL; ++ ++ priv->port_rst = devm_reset_control_get(dev, GMAC_PORT_RST_NAME); ++ if (IS_ERR(priv->port_rst)) { ++ ret = PTR_ERR(priv->port_rst); ++ return ret; ++ } ++ ++ priv->macif_rst = devm_reset_control_get(dev, GMAC_MACIF_RST_NAME); ++ if (IS_ERR(priv->macif_rst)) { ++ ret = PTR_ERR(priv->macif_rst); ++ return ret; ++ } ++ ++ priv->phy_rst = devm_reset_control_get(dev, GMAC_PHY_RST_NAME); ++ if (IS_ERR(priv->phy_rst)) ++ priv->phy_rst = NULL; ++ ++ ret = gmac_get_clk(priv, pdev); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int gmac_netdev_preinit(struct gmac_netdev_local *priv) ++{ ++ int ret; ++ struct net_device *ndev = priv->netdev; ++ ++ gmac_init_napi(priv); ++ spin_lock_init(&priv->rxlock); ++ spin_lock_init(&priv->txlock); ++ spin_lock_init(&priv->pmtlock); ++ ++ /* init netdevice */ ++ ndev->irq = priv->irq[0]; ++ ndev->watchdog_timeo = 3 * HZ; /* 3HZ */ ++ gmac_set_netdev_ops(ndev); ++ ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; ++ gmac_set_ethtool_ops(ndev); ++ ++ if (priv->has_rxhash_cap) ++ ndev->hw_features |= NETIF_F_RXHASH; ++ if (priv->has_rss_cap) ++ ndev->hw_features |= NETIF_F_NTUPLE; ++ if (priv->tso_supported) ++ ndev->hw_features |= NETIF_F_SG | ++ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | ++ NETIF_F_TSO | NETIF_F_TSO6; ++ ++#if defined(CONFIG_GMAC_RXCSUM) ++ ndev->hw_features |= NETIF_F_RXCSUM; ++ gmac_enable_rxcsum_drop(priv, true); ++#endif ++ ++ ndev->features |= ndev->hw_features; ++ ndev->features |= NETIF_F_HIGHDMA | NETIF_F_GSO; ++ ndev->vlan_features |= ndev->features; ++ ++ timer_setup(&priv->monitor, gmac_monitor_func, 0); ++ ++ device_set_wakeup_capable(priv->dev, 1); ++ /* ++ * when we can let phy powerdown? ++ * In some mode, we don't want phy powerdown, ++ * so I set wakeup enable all the time ++ */ ++ device_set_wakeup_enable(priv->dev, 1); ++ ++ priv->mac_wol_enable = false; ++ priv->phy_wol_enable = false; ++ ++ priv->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); ++ ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ ret = dma_set_mask_and_coherent(priv->dev, DMA_BIT_MASK(64)); /* 64: mask length */ ++ if (ret) { ++ pr_err("dma set mask bit 64 failed! ret=%d", ret); ++ gmac_destroy_napi(priv); ++ return ret; ++ } ++#else ++ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); /* 32: mask length */ ++ if (ret) { ++ pr_err("dma set mask bit 32 failed! ret=%d", ret); ++ gmac_destroy_napi(priv); ++ return ret; ++ } ++#endif ++ ++ return 0; ++} ++ ++static int gmac_dev_connect_phy(struct gmac_netdev_local *priv) ++{ ++ struct net_device *ndev = priv->netdev; ++ ++ /* phy fix here?? other way ??? */ ++ gmac_phy_register_fixups(priv); ++ /* Unable to handle kernel paging request at virtual address 08ffffff80052fc0 */ ++ priv->phy = of_phy_connect(ndev, priv->phy_node, ++ &gmac_adjust_link, 0, priv->phy_mode); ++ if (priv->phy == NULL || priv->phy->drv == NULL) { ++ pr_info("No eth phy found\n"); ++ return -ENODEV; ++ } ++ ++ /* If the phy_id is all zero and not fixed link, there is no device there */ ++ if ((priv->phy->phy_id == 0) && !priv->fixed_link) { ++ pr_info("phy %d not found\n", priv->phy->mdio.addr); ++ return -ENODEV; ++ } ++ ++ pr_info("attached PHY %d to driver %s, PHY_ID=0x%x\n", ++ priv->phy->mdio.addr, priv->phy->drv->name, priv->phy->phy_id); ++ ++ /* Stop Advertising 1000BASE Capability if interface is not RGMII */ ++ if ((priv->phy_mode == PHY_INTERFACE_MODE_MII) || ++ (priv->phy_mode == PHY_INTERFACE_MODE_RMII)) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) ++ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT | ETHTOOL_LINK_MODE_1000baseT_Half_BIT, ++ priv->phy->advertising); ++ /* ++ * Internal FE phy's reg BMSR bit8 is wrong, make the kernel ++ * believe it has the 1000base Capability, so fix it here ++ */ ++ if (priv->phy->phy_id == VENDOR_PHY_ID_FESTAV200) ++ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT | ETHTOOL_LINK_MODE_1000baseT_Half_BIT, ++ priv->phy->supported); ++#else ++ priv->phy->advertising &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); ++ /* Internal FE phy's reg BMSR bit8 is wrong, make the kernel ++ * believe it has the 1000base Capability, so fix it here ++ */ ++ if (priv->phy->phy_id == VENDOR_PHY_ID_FESTAV200) ++ priv->phy->supported &= ~(ADVERTISED_1000baseT_Full | ++ ADVERTISED_1000baseT_Half); ++#endif ++ } ++ ++ gmac_set_flow_ctrl_args(priv); ++ gmac_set_flow_ctrl_params(priv); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) ++ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, priv->phy->supported); ++ if (priv->flow_ctrl) ++ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, priv->phy->advertising); ++#else ++ priv->phy->supported |= ADVERTISED_Pause; ++ if (priv->flow_ctrl) ++ priv->phy->advertising |= ADVERTISED_Pause; ++#endif ++ if (priv->autoeee) ++ init_autoeee(priv); ++ ++ return 0; ++} ++ ++static void gmac_set_hw_cap(struct gmac_netdev_local *priv) ++{ ++ priv->tso_supported = has_tso_cap(priv); ++ priv->has_rxhash_cap = has_rxhash_cap(priv); ++ priv->has_rss_cap = has_rss_cap(priv); ++ ++ gmac_set_rss_cap(priv); ++ gmac_get_rss_key(priv); ++ if (priv->has_rss_cap) { ++ priv->rss_info.ind_tbl_size = RSS_INDIRECTION_TABLE_SIZE; ++ } ++ ++ if (priv->has_rxhash_cap) { ++ priv->rss_info.hash_cfg = DEF_HASH_CFG; ++ gmac_config_hash_policy(priv); ++ } ++} ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) ++static void gmac_of_get_macaddr(struct gmac_netdev_local *priv, struct platform_device *pdev) ++{ ++ char mac_addr[64] = {0}; ++ struct net_device *ndev = priv->netdev; ++ struct device_node *node = pdev->dev.of_node; ++ int ret; ++ ++ ret = of_get_mac_address(node, mac_addr); ++ if (ret == 0) ++ ether_addr_copy((u8*)ndev->dev_addr, mac_addr); ++ else ++ eth_hw_addr_random(ndev); ++ ++ gmac_hw_set_mac_addr(priv); ++} ++#else ++static void gmac_of_get_macaddr(struct gmac_netdev_local *priv, struct platform_device *pdev) ++{ ++ const char *mac_addr = NULL; ++ struct net_device *ndev = priv->netdev; ++ struct device_node *node = pdev->dev.of_node; ++ ++ mac_addr = of_get_mac_address(node); ++ if (!IS_ERR_OR_NULL(mac_addr)) ++ ether_addr_copy(ndev->dev_addr, mac_addr); ++ else ++ eth_hw_addr_random(ndev); ++ ++ gmac_hw_set_mac_addr(priv); ++} ++#endif ++ ++/* board independent func */ ++static void gmac_hw_internal_phy_reset(struct gmac_netdev_local const *priv) ++{ ++#ifdef CONFIG_GMAC_HAS_INTERNAL_PHY ++ unsigned int v; ++ ++ /* disable MDCK clock to make sure FEPHY reset success */ ++ clk_disable_unprepare(priv->clk); ++ ++ v = readl(priv->crg_iobase + REG_CRG_FEPHY); ++ v &= ~BIT_FEPHY_CLK; ++ writel(v, priv->crg_iobase + REG_CRG_FEPHY); /* disable clk */ ++ ++ v = readl(priv->sysctrl_iobase + REG_FEPHY_REG0); ++ v |= (BIT_LDO_EN | BIT_LDO_RSTN); ++ v &= ~BIT_LDO_ENZ; ++ writel(v, priv->sysctrl_iobase + REG_FEPHY_REG0); ++ ++ v = readl(priv->sysctrl_iobase + REG_FEPHY_REG1); ++ v &= ~BIT_IDDQ_MODE; ++ writel(v, priv->sysctrl_iobase + REG_FEPHY_REG1); ++ ++ v = readl(priv->crg_iobase + REG_CRG_FEPHY); ++ v |= BIT_FEPHY_CLK; /* use 25MHz clock, enable clk */ ++ writel((u32)v, priv->crg_iobase + REG_CRG_FEPHY); ++ ++ udelay(10); /* 10:Function arguments */ ++ ++ /* suppose internal phy can only be used as mac0's phy */ ++ v = readl(priv->sysctrl_iobase + REG_FEPHY_REG1); ++ v &= ~BIT_MASK_FEPHY_ADDR; ++ v |= (priv->phy_addr & BIT_MASK_FEPHY_ADDR); ++ writel(v, priv->sysctrl_iobase + REG_FEPHY_REG1); ++ ++ v = readl(priv->crg_iobase + REG_CRG_FEPHY); ++ v |= BIT_FEPHY_RST; /* set reset bit */ ++ writel((u32)v, priv->crg_iobase + REG_CRG_FEPHY); ++ ++ udelay(10); /* 10:Function arguments */ ++ ++ v = readl(priv->crg_iobase + REG_CRG_FEPHY); ++ v &= ~BIT_FEPHY_RST; /* clear reset bit */ ++ writel((u32)v, priv->crg_iobase + REG_CRG_FEPHY); ++ ++ msleep(20); /* 20:delay at least 15ms for MDIO operation */ ++ ++ clk_prepare_enable(priv->clk); ++#endif ++} ++ ++#define RESET_DATA 1 ++#define GPIO_DIR 0x400 ++ ++/* board independent func */ ++static void gmac_hw_external_phy_reset(struct gmac_netdev_local const *priv) ++{ ++ if (priv->phy_rst != NULL) { ++ /* write 0 to cancel reset */ ++ reset_control_deassert(priv->phy_rst); ++ msleep(50); /* wait 50ms */ ++ ++ /* XX use CRG register to reset phy */ ++ /* RST_BIT, write 0 to reset phy, write 1 to cancel reset */ ++ reset_control_assert(priv->phy_rst); ++ ++ /* ++ * delay some time to ensure reset ok, ++ * this depends on PHY hardware feature ++ */ ++ msleep(50); /* wait 50ms */ ++ ++ /* write 0 to cancel reset */ ++ reset_control_deassert(priv->phy_rst); ++ /* delay some time to ensure later MDIO access */ ++ msleep(170); /* wait 170ms */ ++ } else if (priv->gpio_base) { ++#ifdef GMAC_RESET_PHY_BY_GPIO ++ /* use gpio to control mac's phy reset */ ++ void __iomem *gpio_base; ++ u32 gpio_bit; ++ u32 v; ++ ++ gpio_base = (void __iomem *)IO_ADDRESS( ++ (unsigned int)priv->gpio_base); ++ gpio_bit = priv->gpio_bit; ++ ++ /* config gpip[x] dir to output */ ++ v = readb(gpio_base + GPIO_DIR); ++ v |= (1 << gpio_bit); ++ writeb(v, gpio_base + GPIO_DIR); ++ ++ /* gpiox[x] set to reset, then delay 200ms */ ++ writeb(RESET_DATA << gpio_bit, gpio_base + (4 << gpio_bit)); /* 4 */ ++ msleep(20); /* 20 ms */ ++ /* then,cancel reset,and should delay 200ms */ ++ writeb((!RESET_DATA) << gpio_bit, gpio_base + (4 << gpio_bit)); /* 4 */ ++ msleep(20); /* 20 ms */ ++ writeb(RESET_DATA << gpio_bit, gpio_base + (4 << gpio_bit)); /* 4 */ ++ ++ /* add some delay in case mdio cann't access now! */ ++ msleep(30); /* 30 ms */ ++#endif ++ } ++} ++ ++/* board independent func */ ++static void gmac_hw_phy_reset(struct gmac_netdev_local *priv) ++{ ++ if (priv->internal_phy) ++ gmac_hw_internal_phy_reset(priv); ++ else ++ gmac_hw_external_phy_reset(priv); ++} ++ ++static int gmac_of_get_phy(struct gmac_netdev_local *priv, struct platform_device *pdev) ++{ ++ int ret; ++ struct net_device *ndev = priv->netdev; ++ struct device *dev = &pdev->dev; ++ struct device_node *node = dev->of_node; ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0) ++ ret = of_get_phy_mode(node, &priv->phy_mode); ++ if (ret < 0) { ++ netdev_err(ndev, "not find phy-mode\n"); ++ return ret; ++ } ++#else ++ priv->phy_mode = (phy_interface_t)of_get_phy_mode(node); ++ if (priv->phy_mode < 0) { ++ netdev_err(ndev, "not find phy-mode\n"); ++ return ret; ++ } ++#endif ++ ++ priv->phy_node = of_parse_phandle(node, "phy-handle", 0); ++ if (priv->phy_node == NULL) { ++ /* check if a fixed-link is defined in device-tree */ ++ if (of_phy_is_fixed_link(node)) { ++ ret = of_phy_register_fixed_link(node); ++ if (ret < 0) { ++ netdev_err(ndev, "cannot register fixed PHY %d\n", ret); ++ return ret; ++ } ++ ++ /* ++ * In the case of a fixed PHY, the DT node associated ++ * to the PHY is the Ethernet MAC DT node. ++ */ ++ priv->phy_node = of_node_get(node); ++ priv->fixed_link = true; ++ } else { ++ netdev_err(ndev, "not find phy-handle\n"); ++ ret = -EINVAL; ++ return ret; ++ } ++ } ++ return 0; ++} ++ ++static void gmac_verify_flow_ctrl_args(void) ++{ ++#if defined(CONFIG_TX_FLOW_CTRL_SUPPORT) ++ flow_ctrl_en |= FLOW_TX; ++#endif ++#if defined(CONFIG_RX_FLOW_CTRL_SUPPORT) ++ flow_ctrl_en |= FLOW_RX; ++#endif ++ if (tx_flow_ctrl_active_threshold < FC_ACTIVE_MIN || ++ tx_flow_ctrl_active_threshold > FC_ACTIVE_MAX) ++ tx_flow_ctrl_active_threshold = FC_ACTIVE_DEFAULT; ++ ++ if (tx_flow_ctrl_deactive_threshold < FC_DEACTIVE_MIN || ++ tx_flow_ctrl_deactive_threshold > FC_DEACTIVE_MAX) ++ tx_flow_ctrl_deactive_threshold = FC_DEACTIVE_DEFAULT; ++ ++ if (tx_flow_ctrl_active_threshold >= tx_flow_ctrl_deactive_threshold) { ++ tx_flow_ctrl_active_threshold = FC_ACTIVE_DEFAULT; ++ tx_flow_ctrl_deactive_threshold = FC_DEACTIVE_DEFAULT; ++ } ++ ++ if (tx_flow_ctrl_pause_time < 0 || ++ tx_flow_ctrl_pause_time > FC_PAUSE_TIME_MAX) ++ tx_flow_ctrl_pause_time = FC_PAUSE_TIME_DEFAULT; ++ ++ if (tx_flow_ctrl_pause_interval < 0 || ++ tx_flow_ctrl_pause_interval > FC_PAUSE_TIME_MAX) ++ tx_flow_ctrl_pause_interval = FC_PAUSE_INTERVAL_DEFAULT; ++ ++ /* ++ * pause interval should not bigger than pause time, ++ * but should not too smaller to avoid sending too many pause frame. ++ */ ++ if ((tx_flow_ctrl_pause_interval > tx_flow_ctrl_pause_time) || ++ (tx_flow_ctrl_pause_interval < ((unsigned int)tx_flow_ctrl_pause_time >> 1))) ++ tx_flow_ctrl_pause_interval = tx_flow_ctrl_pause_time; ++} ++ ++static struct gmac_netdev_local *gmac_alloc_netdev(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct device_node *node = dev->of_node; ++ struct net_device *ndev = NULL; ++ struct gmac_netdev_local *priv = NULL; ++ int num_rxqs; ++ ++ gmac_verify_flow_ctrl_args(); ++ ++ if ((of_device_is_compatible(node, "huanglong,gmac-v5") == 0) || ++ (of_device_is_compatible(node, "vendor,gmac-v5") == 0)) ++ num_rxqs = RSS_NUM_RXQS; ++ else ++ num_rxqs = 1; ++ ++ ndev = alloc_etherdev_mqs(sizeof(struct gmac_netdev_local), 1, ++ num_rxqs); ++ if (ndev == NULL) ++ return NULL; ++ ++ platform_set_drvdata(pdev, ndev); ++ SET_NETDEV_DEV(ndev, dev); ++ ++ priv = netdev_priv(ndev); ++ priv->dev = dev; ++ priv->netdev = ndev; ++ priv->num_rxqs = num_rxqs; ++ priv->coalesce.rx_timeout = RX_BQ_INT_TIMEOUT_THRESHOLD; ++ priv->coalesce.tx_timeout = TX_RQ_INT_TIMEOUT_THRESHOLD; ++ priv->coalesce.rx_frames = RX_BQ_INT_THRESHOLD; ++ priv->coalesce.tx_frames = TX_RQ_INT_THRESHOLD; ++ ++ return priv; ++} ++ ++static int gmac_of_parse(struct platform_device *pdev, struct gmac_netdev_local *priv) ++{ ++ int ret; ++ ++ ret = gmac_of_get_param(pdev, priv); ++ if (ret < 0) ++ return ret; ++ ++ ret = gmac_of_get_resource(pdev, priv); ++ if (ret) ++ return ret; ++ ++ ret = gmac_of_get_phy(priv, pdev); ++ if (ret) ++ return ret; ++ ++ gmac_of_get_macaddr(priv, pdev); ++ ++ return 0; ++} ++ ++static int gmac_register_netdev(struct gmac_netdev_local *priv) ++{ ++ int ret; ++ struct net_device *ndev = priv->netdev; ++ ++ ret = gmac_netdev_preinit(priv); ++ if (ret) ++ return ret; ++ ++ /* init hw desc queue */ ++ ret = gmac_init_hw_desc_queue(priv); ++ if (ret) ++ goto _out_netdev_preinit; ++ ++ if (priv->tso_supported) { ++ ret = gmac_init_sg_desc_queue(priv); ++ if (ret) ++ goto _out_hw_desc_queue; ++ } ++ ++ /* register netdevice */ ++ ret = register_netdev(ndev); ++ if (ret) { ++ pr_err("register_ndev failed!"); ++ goto _out_sg_desc_queue; ++ } ++ ++ /* ++ * reset queue here to make BQL only reset once. ++ * if we put netdev_reset_queue() in gmac_net_open(), ++ * the BQL will be reset when ifconfig eth0 down and up, ++ * but the tx ring is not cleared before. ++ * As a result, the NAPI poll will call netdev_completed_queue() ++ * and BQL throw a bug. ++ */ ++ netdev_reset_queue(ndev); ++ ++ /* config PHY power down to save power */ ++ phy_suspend(priv->phy); ++ ++ clk_disable_unprepare(priv->clk); ++ if (priv->macif_clk != NULL) ++ clk_disable_unprepare(priv->macif_clk); ++ ++ pr_info("ETH: %s, phy_addr=%d\n", ++ phy_modes(priv->phy_mode), priv->phy->mdio.addr); ++ ++ return ret; ++ ++_out_sg_desc_queue: ++ if (priv->tso_supported) ++ gmac_destroy_sg_desc_queue(priv); ++_out_hw_desc_queue: ++ gmac_destroy_hw_desc_queue(priv); ++_out_netdev_preinit: ++ gmac_destroy_napi(priv); ++ ++ return ret; ++} ++ ++#ifndef CONFIG_MDIO_BSP_GEMAC ++static int gmac_mdio_register(struct platform_device *pdev, struct gmac_netdev_local *priv) ++{ ++ struct device *dev = &pdev->dev; ++ struct device_node *node = dev->of_node; ++ struct mii_bus *bus = NULL; ++ int ret; ++ ++ if (!of_phy_is_fixed_link(node)) { ++ bus = mdiobus_alloc(); ++ if (bus == NULL) { ++ ret = -ENOMEM; ++ return ret; ++ } ++ ++ bus->priv = priv; ++ bus->name = "gmac_mii_bus"; ++ bus->read = gmac_mdio_read; ++ bus->write = gmac_mdio_write; ++ bus->parent = dev; ++ ret = snprintf_s(bus->id, MII_BUS_ID_SIZE, MII_BUS_ID_SIZE - 1, "%s-mii", dev_name(dev)); ++ if (ret == -1) { ++ pr_err("gmac mdio register: snprintf_s failed\n"); ++ return ret; ++ } ++ priv->bus = bus; ++ if (priv->internal_phy) { ++ usleep_range(5000, 8000); /* 5000,8000:function arguments */ ++ gmac_internal_fephy_trim(bus, priv->phy_addr, priv->trim_params); ++ } ++ ++ ret = of_mdiobus_register(bus, node); ++ if (ret) ++ goto err_free_mdio; ++ } ++ return 0; ++ ++err_free_mdio: ++ if (priv->bus != NULL) ++ mdiobus_free(priv->bus); ++ ++ return ret; ++} ++#endif ++ ++static void gmac_internal_phy_clk_disable(struct gmac_netdev_local const *priv); ++ ++static int gmac_dev_probe(struct platform_device *pdev) ++{ ++ struct gmac_netdev_local *priv = NULL; ++ int ret; ++ ++ priv = gmac_alloc_netdev(pdev); ++ if (priv == NULL) ++ return -ENOMEM; ++ ++ ret = gmac_of_parse(pdev, priv); ++ if (ret) ++ goto out_free_netdev; ++ ++ /* enable macif clk early than PHY hardware reset to make sure ++ * MDIO access PHY ok. ++ */ ++ ret = gmac_enable_clk(priv); ++ if (ret) ++ goto out_phy_node; ++ ++ gmac_mac_core_reset(priv); ++ /* ++ * phy reset, should be early than "of_mdiobus_register". ++ * becausue "of_mdiobus_register" will read PHY register by MDIO. ++ */ ++ gmac_hw_phy_reset(priv); ++#ifndef CONFIG_MDIO_BSP_GEMAC ++ ret = gmac_mdio_register(pdev, priv); ++ if (ret) ++ goto out_clk_disable; ++#endif ++ gmac_set_hw_cap(priv); ++ ++ /* init hw controller */ ++ gmac_hw_init(priv); ++ ++ ret = gmac_dev_connect_phy(priv); ++ if (ret) ++ goto out_phy_disconnect; ++ ++ ret = gmac_request_irqs(pdev, priv); ++ if (ret) ++ goto out_phy_disconnect; ++ ++ ret = gmac_register_netdev(priv); ++ if (ret) ++ goto out_phy_disconnect; ++ ++ return ret; ++ ++out_phy_disconnect: ++ if (priv->phy != NULL) ++ phy_disconnect(priv->phy); ++ if (priv->bus != NULL) { ++ mdiobus_unregister(priv->bus); ++ mdiobus_free(priv->bus); ++ } ++out_clk_disable: ++ gmac_disable_clk(priv); ++out_phy_node: ++ if (priv->phy_node != NULL) ++ of_node_put(priv->phy_node); ++out_free_netdev: ++ if (priv->internal_phy) { ++ gmac_internal_phy_clk_disable(priv); ++ iounmap(priv->crg_iobase); ++ iounmap(priv->sysctrl_iobase); ++ } ++ if (priv->netdev != NULL) ++ free_netdev(priv->netdev); ++ ++ return ret; ++} ++ ++static int gmac_dev_remove(struct platform_device *pdev) ++{ ++ struct net_device *ndev = platform_get_drvdata(pdev); ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ ++ /* stop the gmac and free all resource */ ++ del_timer_sync(&priv->monitor); ++ gmac_destroy_napi(priv); ++ ++ unregister_netdev(ndev); ++ ++ gmac_reclaim_rx_tx_resource(priv); ++ gmac_free_rx_skb(priv); ++ gmac_free_tx_skb(priv); ++ ++ if (priv->tso_supported) ++ gmac_destroy_sg_desc_queue(priv); ++ gmac_destroy_hw_desc_queue(priv); ++ ++ phy_disconnect(priv->phy); ++ of_node_put(priv->phy_node); ++ if (priv->bus != NULL) { ++ mdiobus_unregister(priv->bus); ++ mdiobus_free(priv->bus); ++ } ++ clk_disable_unprepare(priv->pub_clk); ++ free_netdev(ndev); ++ ++ gmac_phy_unregister_fixups(priv); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++static void gmac_disable_irq(struct gmac_netdev_local *priv) ++{ ++ int i; ++ ++ for (i = 0; i < priv->num_rxqs; i++) ++ disable_irq(priv->irq[i]); ++} ++ ++static void gmac_enable_irq(struct gmac_netdev_local *priv) ++{ ++ int i; ++ ++ for (i = 0; i < priv->num_rxqs; i++) ++ enable_irq(priv->irq[i]); ++} ++ ++static void gmac_internal_phy_clk_disable(struct gmac_netdev_local const *priv) ++{ ++#ifdef CONFIG_GMAC_HAS_INTERNAL_PHY ++ u32 v; ++ ++ v = readl(priv->crg_iobase + REG_CRG_FEPHY); ++ v &= ~BIT_FEPHY_CLK; ++ v |= BIT_FEPHY_RST; ++ writel(v, priv->crg_iobase + REG_CRG_FEPHY); /* inside fephy clk disable */ ++#endif ++} ++ ++static void gmac_hw_all_clk_disable(struct gmac_netdev_local *priv) ++{ ++ if (netif_running(priv->netdev)) { ++ clk_disable_unprepare(priv->macif_clk); ++ clk_disable_unprepare(priv->clk); ++ } ++ ++ if (priv->internal_phy && !priv->phy_wol_enable) ++ gmac_internal_phy_clk_disable(priv); ++} ++ ++#ifdef GMAC_SET_PHY_SPEED_10M_WHEN_SUSPEND ++int gmac_config_phy_aneg_10m(struct phy_device *phydev) ++{ ++ int adv, bmsr; ++ int err; ++ ++ /* Setup standard advertisement */ ++ adv = phy_read(phydev, MII_ADVERTISE); ++ if (adv < 0) ++ return adv; ++ ++ adv = (unsigned int)adv & ~(ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_100BASE4); ++ ++ err = phy_write(phydev, MII_ADVERTISE, adv); ++ if (err < 0) ++ return err; ++ ++ bmsr = phy_read(phydev, MII_BMSR); ++ if (bmsr < 0) ++ return bmsr; ++ ++ /* Per 802.3-2008, Section 22.2.4.2.16 Extended status all ++ * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a ++ * logical 1. ++ */ ++ if (!((unsigned int)bmsr & BMSR_ESTATEN)) ++ return 0; ++ ++ /* Configure gigabit if it's supported */ ++ adv = phy_read(phydev, MII_CTRL1000); ++ if (adv < 0) ++ return adv; ++ ++ adv = (unsigned int)adv & ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); ++ err = phy_write(phydev, MII_CTRL1000, adv); ++ if (err < 0) ++ return err; ++ ++ err = genphy_restart_aneg(phydev); ++ if (err < 0) ++ return err; ++ ++ return 0; ++} ++#endif /* GMAC_SET_PHY_SPEED_10M_WHEN_SUSPEND */ ++ ++int gmac_dev_suspend(struct platform_device *pdev, pm_message_t state) ++{ ++ struct net_device *ndev = platform_get_drvdata(pdev); ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ struct phy_driver *phydrv = to_phy_driver(priv->phy->mdio.dev.driver); ++ ++ gmac_disable_irq(priv); ++ /* ++ * If support Wake on LAN, we should not disconnect phy ++ * because it will call phy_suspend to power down phy. ++ */ ++ if (!priv->do_pm_s4 && !priv->mac_wol_enable) ++ phy_disconnect(priv->phy); ++ del_timer_sync(&priv->monitor); ++ /* ++ * If suspend when netif is not up, the napi_disable will run into ++ * dead loop and dpm_drv_timeout will give warning. ++ */ ++ if (netif_running(ndev)) ++ gmac_disable_napi(priv); ++ netif_device_detach(ndev); ++ ++ netif_carrier_off(ndev); ++ ++ /* ++ * If netdev is down, MAC clock is disabled. ++ * So if we want to reclaim MAC rx and tx resource, ++ * we must first enable MAC clock and then disable it. ++ */ ++ if (!netif_running(ndev)) ++ clk_prepare_enable(priv->clk); ++ ++ gmac_reclaim_rx_tx_resource(priv); ++ ++ if (priv->phy_wol_enable) { ++ if (priv->phy->drv && phydrv->suspend) ++ phydrv->suspend(priv->phy); ++ } ++ ++#ifdef GMAC_SET_PHY_SPEED_10M_WHEN_SUSPEND ++ if (priv->phy_wol_enable || priv->mac_wol_enable) { ++ gmac_config_phy_aneg_10m(priv->phy); ++ gmac_config_port(ndev, SPEED_10, DUPLEX_FULL); ++ priv->phy->speed = SPEED_10; ++ } ++#endif ++ ++ if (!netif_running(ndev)) ++ clk_disable_unprepare(priv->clk); ++ ++ pmt_enter(priv); ++ ++ if (!priv->mac_wol_enable) ++ gmac_hw_all_clk_disable(priv); ++ ++ return 0; ++} ++EXPORT_SYMBOL(gmac_dev_suspend); ++ ++static void gmac_internal_phy_clk_enable(struct gmac_netdev_local const *priv) ++{ ++#ifdef CONFIG_GMAC_HAS_INTERNAL_PHY ++ u32 v; ++ ++ v = readl(priv->crg_iobase + REG_CRG_FEPHY); ++ v |= BIT_FEPHY_CLK; ++ writel(v, priv->crg_iobase + REG_CRG_FEPHY); /* inside fephy clk enable */ ++#endif ++} ++ ++static void gmac_hw_all_clk_enable(struct gmac_netdev_local *priv) ++{ ++ if (priv->internal_phy) ++ gmac_internal_phy_clk_enable(priv); ++ ++ if (netif_running(priv->netdev)) { ++ clk_prepare_enable(priv->macif_clk); ++ clk_prepare_enable(priv->clk); ++ } ++} ++ ++int gmac_dev_resume(struct platform_device *pdev) ++{ ++ struct net_device *ndev = platform_get_drvdata(pdev); ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ int ret; ++ ++ /* make sure MAC_PUB_CLK is always on, ++ * even if the suspend/resume power down CRG and ++ * disable all the GSF clock bit. */ ++ clk_disable_unprepare(priv->pub_clk); ++ clk_prepare_enable(priv->pub_clk); ++ ++ /* If we support Wake on LAN, we doesn't call clk_disable. ++ * But when we resume, the uboot may off mac clock and reset phy ++ * by re-write the mac CRG register. ++ * So we first call clk_disable, and then clk_enable. ++ */ ++ if (priv->mac_wol_enable) ++ gmac_hw_all_clk_disable(priv); ++ ++ gmac_hw_all_clk_enable(priv); ++ ++ /* ++ * If netdev is down, MAC clock is disabled. ++ * So if we want to restart MAC and re-initialize it, ++ * we must first enable MAC clock and then disable it. ++ */ ++ if (!netif_running(ndev)) { ++ clk_prepare_enable(priv->clk); ++ clk_prepare_enable(priv->macif_clk); ++ } ++ ++ /* restart hw engine now */ ++ gmac_mac_core_reset(priv); ++ ++ /* internal FE_PHY: enable clk and reset */ ++ if (!priv->do_pm_s4_thaw) ++ gmac_hw_phy_reset(priv); ++ if (priv->internal_phy) { ++ usleep_range(5000, 8000); /* 5000,8000:function arguments */ ++ gmac_internal_fephy_trim(priv->bus, priv->phy_addr, priv->trim_params); ++ } ++ ++ /* power on gmac */ ++ gmac_restart(priv); ++ ++ /* ++ * If support WoL, we didn't disconnect phy. ++ * But when we resume, we reset PHY, so we want to ++ * call phy_connect to make phy_fixup excuted. ++ * This is important for internal PHY fix. ++ */ ++ if (!priv->do_pm_s4_thaw) { ++ if (priv->do_pm_s4 || priv->mac_wol_enable) ++ phy_disconnect(priv->phy); ++ ret = phy_connect_direct(ndev, priv->phy, gmac_adjust_link, priv->phy_mode); ++ if (ret) ++ return ret; ++ } ++ ++ /* ++ * If we suspend and resume when net device is down, ++ * some operations are unnecessary. ++ */ ++ if (netif_running(ndev)) { ++ priv->monitor.expires = jiffies + GMAC_MONITOR_TIMER; ++ mod_timer(&priv->monitor, priv->monitor.expires); ++ priv->old_link = 0; ++ priv->old_speed = SPEED_UNKNOWN; ++ priv->old_duplex = DUPLEX_UNKNOWN; ++ } ++ if (netif_running(ndev)) ++ gmac_enable_napi(priv); ++ netif_device_attach(ndev); ++ if (!priv->do_pm_s4_thaw && netif_running(ndev)) ++ phy_start(priv->phy); ++ gmac_enable_irq(priv); ++ ++ pmt_exit(priv); ++ device_set_wakeup_enable(priv->dev, false); ++ ++ if (!netif_running(ndev)) { ++ clk_disable_unprepare(priv->macif_clk); ++ clk_disable_unprepare(priv->clk); ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(gmac_dev_resume); ++ ++ ++static void gmac_dev_shutdown(struct platform_device *pdev) ++{ ++ struct net_device *ndev = platform_get_drvdata(pdev); ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ struct phy_driver *phydrv = to_phy_driver(priv->phy->mdio.dev.driver); ++ ++#ifdef GMAC_SET_PHY_SPEED_10M_WHEN_SUSPEND ++ if (system_state == SYSTEM_POWER_OFF && priv->phy->speed != SPEED_10) { ++ gmac_config_phy_aneg_10m(priv->phy); ++ gmac_config_port(ndev, SPEED_10, DUPLEX_FULL); ++ } ++#endif /* GMAC_SET_PHY_SPEED_10M_WHEN_SUSPEND */ ++ /* Enable WOL when WOL is supported, otherwise configure phy to power down state */ ++ if (priv->phy->drv && phydrv->suspend) ++ phydrv->suspend(priv->phy); ++} ++#endif ++ ++static const struct of_device_id gmac_of_match[] = { ++ { .compatible = "vendor,gmac-v4", }, ++ { .compatible = "vendor,gmac-v5", }, ++ { .compatible = "huanglong,gmac-v5", }, ++ { }, ++}; ++ ++MODULE_DEVICE_TABLE(of, gmac_of_match); ++ ++#ifdef CONFIG_PM_SLEEP ++static int gmac_dev_pm_suspend(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ return gmac_dev_suspend(pdev, dev->power.power_state); ++} ++ ++static int gmac_dev_pm_resume(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ return gmac_dev_resume(pdev); ++} ++ ++static int gmac_dev_pm_freeze(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct net_device *ndev = platform_get_drvdata(pdev); ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ ++ priv->do_pm_s4 = true; ++ return gmac_dev_suspend(pdev, dev->power.power_state); ++} ++ ++static int gmac_dev_pm_thaw(struct device *dev) ++{ ++ int ret; ++ struct platform_device *pdev = to_platform_device(dev); ++ struct net_device *ndev = platform_get_drvdata(pdev); ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ ++ priv->do_pm_s4_thaw = true; ++ ret = gmac_dev_resume(pdev); ++ priv->do_pm_s4_thaw = false; ++ return ret; ++} ++ ++static int gmac_dev_pm_restore(struct device *dev) ++{ ++ int ret; ++ struct platform_device *pdev = to_platform_device(dev); ++ struct net_device *ndev = platform_get_drvdata(pdev); ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ ++ ret = gmac_dev_resume(pdev); ++ priv->do_pm_s4 = false; ++ return ret; ++} ++ ++static const struct dev_pm_ops gmac_pm_ops = { ++ .freeze = gmac_dev_pm_freeze, ++ .restore = gmac_dev_pm_restore, ++ .thaw = gmac_dev_pm_thaw, ++ .suspend = gmac_dev_pm_suspend, ++ .resume = gmac_dev_pm_resume, ++}; ++#endif ++static struct platform_driver gmac_dev_driver = { ++ .probe = gmac_dev_probe, ++ .remove = gmac_dev_remove, ++#ifdef CONFIG_PM ++ .suspend = gmac_dev_suspend, ++ .resume = gmac_dev_resume, ++ .shutdown = gmac_dev_shutdown, ++#endif ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = GMAC_DRIVER_NAME, ++ .of_match_table = gmac_of_match, ++#ifdef CONFIG_PM_SLEEP ++ .pm = &gmac_pm_ops, ++#endif ++ }, ++}; ++ ++static int __init gmac_init(void) ++{ ++ int ret; ++ ++ ret = platform_driver_register(&gmac_dev_driver); ++ if (ret) ++ return ret; ++ ++ gmac_proc_create(); ++ ++ return 0; ++} ++ ++static void __exit gmac_exit(void) ++{ ++ platform_driver_unregister(&gmac_dev_driver); ++ ++ gmac_proc_destroy(); ++} ++ ++module_init(gmac_init); ++module_exit(gmac_exit); ++ ++MODULE_AUTHOR("Vendor"); ++MODULE_DESCRIPTION("Vendor double GMAC driver, base on driver gmacv200"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/net/ethernet/vendor/gmac/gmac.h b/drivers/net/ethernet/vendor/gmac/gmac.h +new file mode 100644 +index 000000000..07e8505cb +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac.h +@@ -0,0 +1,982 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2024. All rights reserved. ++ */ ++ ++#ifndef GMAC_GMAC_H ++#define GMAC_GMAC_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#if !defined(CONFIG_GMAC_DDR_64BIT) && defined(CONFIG_ARM64) ++#define CONFIG_GMAC_DDR_64BIT ++#endif ++#ifndef CONFIG_GMAC_DESC_4WORD ++#define CONFIG_GMAC_DESC_4WORD ++#endif ++#ifndef CONFIG_GMAC_RXCSUM ++#define CONFIG_GMAC_RXCSUM ++#endif ++#ifndef CONFIG_TX_FLOW_CTRL_PAUSE_TIME ++#define CONFIG_TX_FLOW_CTRL_PAUSE_TIME 0xFFFF ++#endif ++#ifndef CONFIG_TX_FLOW_CTRL_PAUSE_INTERVAL ++#define CONFIG_TX_FLOW_CTRL_PAUSE_INTERVAL 0xFFFF ++#endif ++#ifndef CONFIG_TX_FLOW_CTRL_ACTIVE_THRESHOLD ++#define CONFIG_TX_FLOW_CTRL_ACTIVE_THRESHOLD 16 ++#endif ++#ifndef CONFIG_TX_FLOW_CTRL_DEACTIVE_THRESHOLD ++#define CONFIG_TX_FLOW_CTRL_DEACTIVE_THRESHOLD 32 ++#endif ++ ++ ++#define STATION_ADDR_LOW 0x0000 ++#define STATION_ADDR_HIGH 0x0004 ++#define MAC_DUPLEX_HALF_CTRL 0x0008 ++ ++#define PORT_MODE 0x0040 ++ ++#define PORT_EN 0x0044 ++#define BITS_TX_EN BIT(2) ++#define BITS_RX_EN BIT(1) ++ ++#define FC_TX_TIMER 0x001C ++ ++#define PAUSE_THR 0x0038 ++ ++#define PAUSE_EN 0x0048 ++#define BIT_RX_FDFC BIT(0) ++#define BIT_TX_FDFC BIT(1) ++ ++#define RX_PAUSE_EN 0x02A4 ++#define BIT_RX_FQ_PAUSE_EN BIT(0) ++#define BIT_RX_BQ_PAUSE_EN BIT(1) ++ ++#define CRF_TX_PAUSE 0x0340 ++ ++#define BITS_Q_PAUSE_TH_OFFSET 16 ++#define BITS_Q_PAUSE_TH_MASK 0xFFFF ++ ++#define REC_FILT_CONTROL 0x0064 ++#define BIT_CRC_ERR_PASS BIT(5) ++#define BIT_PAUSE_FRM_PASS BIT(4) ++#define BIT_VLAN_DROP_EN BIT(3) ++#define BIT_BC_DROP_EN BIT(2) ++#define BIT_MC_MATCH_EN BIT(1) ++#define BIT_UC_MATCH_EN BIT(0) ++ ++#define PORT_MC_ADDR_LOW 0x0068 ++#define PORT_MC_ADDR_HIGH 0x006C ++#define MAC_CLEAR 0x0070 ++#define BIT_TX_SOFT_RESET BIT(0) ++ ++#define RX_OCTETS_OK_CNT 0x80 ++#define RX_SHORT_ERR_CNT 0xd8 ++#define OCTETS_TRANSMITTED_OK 0x100 ++#define TX_PKTS_1519TOMAXOCTETS 0x12c ++#define TX_UNDERRUN 0x150 ++#define TX_PAUSE_FRAMES 0x15c ++ ++#define MODE_CHANGE_EN 0x01b4 ++#define BIT_MODE_CHANGE_EN BIT(0) ++ ++#define COL_SLOT_TIME 0x01c0 ++ ++#define CRF_MIN_PACKET 0x0210 ++#define BIT_RSS_EN_VERISION BIT(31) ++#define BIT_HASH_EN_VERISION BIT(30) ++#define BIT_OFFSET_TSO_VERSION 28 ++#define BIT_OFFSET_TX_MIN_LEN 8 ++#define BIT_MASK_TX_MIN_LEN GENMASK(13, 8) ++ ++#define CONTROL_WORD 0x0214 ++#define CONTROL_WORD_CONFIG 0x640 ++#define RX_OVER_FLOW_CNT 0x0294 ++ ++#define TSO_COE_CTRL 0x02e8 ++#if defined(CONFIG_ARM64_64K_PAGES) ++#define FRAG_MAX_LEN 0xffff ++#define FRAG_MAX_LEN_OFFSET 16 ++#endif ++#define BIT_COE_IPHDR_DROP BIT(4) ++#define BIT_COE_PAYLOAD_DROP BIT(5) ++#define BIT_COE_IPV6_UDP_ZERO_DROP BIT(6) ++#define COE_ERR_DROP (BIT_COE_IPHDR_DROP | \ ++ BIT_COE_PAYLOAD_DROP | \ ++ BIT_COE_IPV6_UDP_ZERO_DROP) ++ ++#define RX_FQ_START_ADDR 0x0500 ++#define RX_FQ_DEPTH 0x0504 ++#define REG_BIT_WIDTH 32 ++#define Q_ADDR_HI8_OFFSET 24 ++#define Q_ADDR_HI8_MASK (BIT(Q_ADDR_HI8_OFFSET) - 1) ++#define TX_DESC_HI8_MASK 0xff ++#define SG_DESC_HI8_OFFSET 8 ++#define RX_FQ_WR_ADDR 0x0508 ++#define BITS_RX_FQ_WR_ADDR mk_bits(0, 21) ++#define RX_FQ_RD_ADDR 0x050c ++#define BITS_RX_FQ_RD_ADDR mk_bits(0, 21) ++#define RX_FQ_VLDDESC_CNT 0x0510 ++#define BITS_RX_FQ_VLDDESC_CNT mk_bits(0, 16) ++#define RX_FQ_ALEMPTY_TH 0x0514 ++#define BITS_RX_FQ_ALEMPTY_TH mk_bits(0, 16) ++#define RX_FQ_REG_EN 0x0518 ++#define BITS_RX_FQ_START_ADDR_EN BIT(2) ++#define BITS_RX_FQ_DEPTH_EN BIT(1) ++#define BITS_RX_FQ_RD_ADDR_EN mk_bits(0, 1) ++#define RX_FQ_ALFULL_TH 0x051c ++#define BITS_RX_FQ_ALFULL_TH mk_bits(0, 16) ++ ++#define RX_BQ_START_ADDR 0x0520 ++#define RX_BQ_DEPTH 0x0524 ++#define RX_BQ_WR_ADDR 0x0528 ++#define RX_BQ_RD_ADDR 0x052c ++#define RX_BQ_FREE_DESC_CNT 0x0530 ++#define BITS_RX_BQ_FREE_DESC_CNT mk_bits(0, 16) ++#define RX_BQ_ALEMPTY_TH 0x0534 ++#define BITS_RX_BQ_ALEMPTY_TH mk_bits(0, 16) ++#define RX_BQ_REG_EN 0x0538 ++#define BITS_RX_BQ_START_ADDR_EN BIT(2) ++#define BITS_RX_BQ_DEPTH_EN BIT(1) ++#define BITS_RX_BQ_WR_ADDR_EN mk_bits(0, 1) ++#define RX_BQ_ALFULL_TH 0x053c ++#define BITS_RX_BQ_ALFULL_TH mk_bits(0, 16) ++ ++#define TX_BQ_START_ADDR 0x0580 ++#define TX_BQ_DEPTH 0x0584 ++#define TX_BQ_WR_ADDR 0x0588 ++#define BITS_TX_BQ_WR_ADDR mk_bits(0, 21) ++#define TX_BQ_RD_ADDR 0x058c ++#define BITS_TX_BQ_RD_ADDR mk_bits(0, 21) ++#define TX_BQ_VLDDESC_CNT 0x0590 ++#define BITS_TX_BQ_VLDDESC_CNT mk_bits(0, 16) ++#define TX_BQ_ALEMPTY_TH 0x0594 ++#define BITS_TX_BQ_ALEMPTY_TH mk_bits(0, 16) ++#define TX_BQ_REG_EN 0x0598 ++#define BITS_TX_BQ_START_ADDR_EN BIT(2) ++#define BITS_TX_BQ_DEPTH_EN BIT(1) ++#define BITS_TX_BQ_RD_ADDR_EN mk_bits(0, 1) ++#define TX_BQ_ALFULL_TH 0x059c ++#define BITS_TX_BQ_ALFULL_TH mk_bits(0, 16) ++ ++#define TX_RQ_START_ADDR 0x05a0 ++#define TX_RQ_DEPTH 0x05a4 ++#define TX_RQ_WR_ADDR 0x05a8 ++#define BITS_TX_RQ_WR_ADDR mk_bits(0, 21) ++#define TX_RQ_RD_ADDR 0x05ac ++#define BITS_TX_RQ_RD_ADDR mk_bits(0, 21) ++#define TX_RQ_FREE_DESC_CNT 0x05b0 ++#define BITS_TX_RQ_FREE_DESC_CNT mk_bits(0, 16) ++#define TX_RQ_ALEMPTY_TH 0x05b4 ++#define BITS_TX_RQ_ALEMPTY_TH mk_bits(0, 16) ++#define TX_RQ_REG_EN 0x05b8 ++#define BITS_TX_RQ_START_ADDR_EN BIT(2) ++#define BITS_TX_RQ_DEPTH_EN BIT(1) ++#define BITS_TX_RQ_WR_ADDR_EN mk_bits(0, 1) ++#define TX_RQ_ALFULL_TH 0x05bc ++#define BITS_TX_RQ_ALFULL_TH mk_bits(0, 16) ++ ++#define RAW_PMU_INT 0x05c0 ++#define ENA_PMU_INT 0x05c4 ++ ++#define DESC_WR_RD_ENA 0x05CC ++ ++#define IN_QUEUE_TH 0x05d8 ++#define BITS_OFFSET_TX_RQ_IN_TH 16 ++#define IN_QUEUE_TH_MASK 0xFF ++#define MAX_IN_QUEUE_TH 0xFF ++ ++#define RX_BQ_IN_TIMEOUT_TH 0x05E0 ++#define TX_RQ_IN_TIMEOUT_TH 0x05e4 ++#define MAX_IN_QUQUE_TIMEOUT_TH 0xFFFFFF ++ ++#define STOP_CMD 0x05e8 ++#define BITS_TX_STOP_EN BIT(1) ++#define BITS_RX_STOP_EN BIT(0) ++#define STOP_RX_TX (BITS_TX_STOP_EN | BITS_RX_STOP_EN) ++ ++#define RSS_IND_TBL 0x0c0c ++#define BIT_IND_TBL_READY BIT(13) ++#define BIT_IND_TLB_WR BIT(12) ++#define RSS_RAW_PMU_INT 0x0c10 ++#define RSS_QUEUE1_START_ADDR 0x0c20 ++#define rx_bq_start_addr_queue(i) (RSS_QUEUE1_START_ADDR + \ ++ ((i) - 1) * 0x10) ++#define RSS_QUEUE1_DEPTH 0x0c24 ++#define RX_BQ_WR_ADDR_QUEUE1 0x0c28 ++#define RX_BQ_RD_ADDR_QUEUE1 0x0c2c ++#define RSS_QUEUE1_ENA_INT 0x0c90 ++#define rss_ena_int_queue(i) (RSS_QUEUE1_ENA_INT + ((i) - 1) * 0x4) ++#define rx_bq_depth_queue(i) (RSS_QUEUE1_DEPTH + ((i) - 1) * 0x10) ++#define rx_bq_wr_addr_queue(i) ((i) ? (RX_BQ_WR_ADDR_QUEUE1 + \ ++ ((i) - 1) * 0x10) : RX_BQ_WR_ADDR) ++#define rx_bq_rd_addr_queue(i) ((i) ? (RX_BQ_RD_ADDR_QUEUE1 + \ ++ ((i) - 1) * 0x10) : RX_BQ_RD_ADDR) ++ ++#define def_int_mask_queue(i) (0x3 << (2 * ((i) - 1))) ++ ++/* AXI burst and outstanding config */ ++#define BURST_OUTSTANDING_REG 0x3014 ++#define BURST4_OUTSTANDING1 0x81ff ++#define BURST_OUTSTANDING_OFFSET 16 ++ ++#define GMAC_SPEED_1000 0x05 ++#define GMAC_SPEED_100 0x01 ++#define GMAC_SPEED_10 0x00 ++ ++#define IPV4_HEAD_LENGTH 0x5 ++ ++enum gmac_tx_err { ++ ERR_NONE = 0, ++ ERR_DESC_CFG = (1 << 0), ++ ERR_DATA_LEN = (1 << 1), ++ ERR_DESC_NFRAG_NUM = (1 << 2), /* bit2 */ ++ ERR_DESC_IP_HDR_LEN = (1 << 3), /* bit3 */ ++ ERR_DESC_PROT_HDR_LEN = (1 << 4), /* bit4 */ ++ ERR_DESC_MTU = (1 << 5), /* bit5 */ ++ ERR_LINK_SGPKT_LEN = (1 << 8), /* bit8 */ ++ ERR_LINK_TSOPKT_LINEAR = (1 << 9), /* bit9 */ ++ ERR_LINK_NFRAG_LEN = (1 << 10), /* bit10 */ ++ ERR_LINK_TOTAL_LEN = (1 << 11), /* bit11 */ ++ ERR_HDR_TCP_BCMC = (1 << 12), /* bit12 */ ++ ERR_HDR_UDP_BC = (1 << 13), /* bit13 */ ++ ERR_HDR_VLAN_IP_TYPE = (1 << 14), /* bit14 */ ++ ERR_HDR_IP_TYPE = (1 << 15), /* bit15 */ ++ ERR_HDR_IP_VERSION = (1 << 16), /* bit16 */ ++ ERR_HDR_IP_HDR_LEN = (1 << 17), /* bit17 */ ++ ERR_HDR_IP_TOTAL_LEN = (1 << 18), /* bit18 */ ++ ERR_HDR_IPV6_TTL_PROT = (1 << 19), /* bit19 */ ++ ERR_HDR_IPV4_OFFSET = (1 << 20), /* bit20 */ ++ ERR_HDR_IPV4_TTL_PROT = (1 << 21), /* bit21 */ ++ ERR_HDR_UDP_LEN = (1 << 22), /* bit22 */ ++ ERR_HDR_TCP_LEN = (1 << 23), /* bit23 */ ++ ERR_DESC = (ERR_DESC_CFG | ERR_DATA_LEN | ++ ERR_DESC_NFRAG_NUM | ERR_DESC_IP_HDR_LEN | ++ ERR_DESC_PROT_HDR_LEN | ERR_DESC_MTU), ++ ERR_LINK = (ERR_LINK_SGPKT_LEN | ERR_LINK_TSOPKT_LINEAR | ++ ERR_LINK_NFRAG_LEN | ERR_LINK_TOTAL_LEN), ++ ERR_HDR = (ERR_HDR_TCP_BCMC | ERR_HDR_UDP_BC | ++ ERR_HDR_VLAN_IP_TYPE | ERR_HDR_IP_TYPE | ++ ERR_HDR_IP_VERSION | ERR_HDR_IP_HDR_LEN | ++ ERR_HDR_IP_TOTAL_LEN | ERR_HDR_IPV6_TTL_PROT | ++ ERR_HDR_IPV4_OFFSET | ERR_HDR_IPV4_TTL_PROT | ++ ERR_HDR_UDP_LEN | ERR_HDR_TCP_LEN), ++ ERR_ALL = (ERR_DESC | ERR_LINK | ERR_HDR), ++}; ++ ++#define GMAC_DRIVER_NAME "gmac_v200" ++ ++#define GMAC_MAC_CLK_NAME "gmac_clk" ++#define GMAC_MACIF_CLK_NAME "macif_clk" ++ ++#define GMAC_PORT_RST_NAME "port_reset" ++#define GMAC_MACIF_RST_NAME "macif_reset" ++#define GMAC_PHY_RST_NAME "phy_reset" ++ ++#define GMAC_IOSIZE 0x1000 ++#define GMAC_OFFSET (GMAC_IOSIZE) ++ ++#define RX_BQ_IN_INT BIT(17) ++#define TX_RQ_IN_INT BIT(19) ++#define RX_BQ_IN_TIMEOUT_INT BIT(28) ++#define TX_RQ_IN_TIMEOUT_INT BIT(29) ++ ++#define DEF_INT_MASK (RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | \ ++ TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT) ++ ++/* write or read descriptor need memory barrier */ ++#define gmac_sync_barrier() do { isb(); smp_mb(); } while (0) ++ ++#define VENDOR_PHY_ID_FESTAV200 0x20669823 ++#define PHY_ID_KSZ8051MNL 0x00221550 ++#define PHY_ID_KSZ8081RNB 0x00221560 ++#define PHY_ID_UNKNOWN 0x00221513 ++#define DEFAULT_PHY_MASK 0xfffffff0 ++#define REALTEK_PHY_ID_8211E 0x001cc915 ++#define REALTEK_PHY_MASK 0x001fffff ++#define PHY_ID_RTL8211F 0x001cc916 ++#define PHY_ID_MASK_RTL8211F 0x001fffff ++ ++#define REG_BASE_CRG 0x00A00000 ++#ifdef CONFIG_ARCH_SHAOLINSPEAR ++#define REG_BASE_SYSTEM_CTRL 0x00840000 ++#else ++#define REG_BASE_SYSTEM_CTRL 0x00A10000 ++#endif ++ ++#ifdef CONFIG_GMAC_HAS_INTERNAL_PHY ++/* register REG_CRG_FEPHY */ ++#define REG_CRG_FEPHY 0x0180 ++#define BIT_FEPHY_CLK BIT(0) ++#define BIT_FEPHY_RST BIT(1) ++ ++/* register REG_PERI_FEPHY_LDO */ ++#define REG_SC_FEPHY_REG0 0x0910 /* ldo, wol */ ++#define REG_SC_FEPHY_REG1 0x0914 /* iddq, fephy addr */ ++#define REG_FEPHY_REG0 REG_SC_FEPHY_REG0 ++#define REG_FEPHY_REG1 REG_SC_FEPHY_REG1 ++ ++#define BIT_LDO_ENZ BIT(0) ++#define BIT_LDO_EN BIT(1) ++#define BIT_LDO_RSTN BIT(2) ++ ++#define BIT_IDDQ_MODE BIT(24) ++#define BIT_MASK_FEPHY_ADDR 0x1F ++#endif /* CONFIG_GMAC_HAS_INTERNAL_PHY */ ++ ++enum { ++ GMAC_PORT0, ++ GMAC_PORT1, ++ GMAC_MAX_PORT, ++}; ++ ++enum { ++ MEM_GMAC_IOBASE, ++ MEM_MACIF_IOBASE, ++ MEM_AXI_BUS_CFG_IOBASE, ++ MEM_FWD_IOBASE, ++ MEM_CTRL_IOBASE, ++}; ++ ++#define GMAC_LINKED BIT(0) ++#define GMAC_DUP_FULL BIT(1) ++#define GMAC_SPD_10M BIT(2) ++#define GMAC_SPD_100M BIT(3) ++#define GMAC_SPD_1000M BIT(4) ++/* Flow Control defines */ ++#define FLOW_OFF 0 ++#define FLOW_RX 1 ++#define FLOW_TX 2 ++#define FLOW_AUTO (FLOW_TX | FLOW_RX) ++ ++#define RX_BQ_INT_THRESHOLD 0x40 ++#define TX_RQ_INT_THRESHOLD 0x20 ++#define RX_BQ_INT_TIMEOUT_THRESHOLD 0x10000 ++#define TX_RQ_INT_TIMEOUT_THRESHOLD 0x18000 ++ ++#define GMAC_MONITOR_TIMER (msecs_to_jiffies(200)) ++ ++#define ETH_MAX_FRAME_SIZE (1600 + 128) ++#define SKB_SIZE (ETH_MAX_FRAME_SIZE) ++ ++#define DESC_VLD_FREE 0 ++#define DESC_VLD_BUSY 1 ++ ++#define DESC_FL_FIRST 2 ++#define DESC_FL_MID 0 ++#define DESC_FL_LAST 1 ++#define DESC_FL_FULL 3 ++ ++#if defined(CONFIG_GMAC_DESC_4WORD) ++#define DESC_WORD_SHIFT 2 ++#else ++#define DESC_WORD_SHIFT 3 ++#endif ++#define DESC_BYTE_SHIFT (DESC_WORD_SHIFT + 2) ++#define DESC_WORD_CNT (1 << DESC_WORD_SHIFT) ++#define DESC_SIZE (1 << DESC_BYTE_SHIFT) ++ ++#define RX_DESC_NUM 1024 ++#define TX_DESC_NUM 1024 ++ ++/* DMA descriptor ring helpers */ ++#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) ++#define dma_cnt(n) ((n) >> DESC_BYTE_SHIFT) ++#define dma_byte(n) ((n) << DESC_BYTE_SHIFT) ++ ++#define RSS_HASH_KEY_SIZE 4 ++#define RSS_INDIRECTION_TABLE_SIZE 128 ++#define RSS_NUM_RXQS 4 ++ ++#define GMAC_TRACE_LEVEL 10 ++#define GMAC_NORMAL_LEVEL 7 ++ ++#define mk_bits(shift, nbits) ((((shift) & 0x1F) << 16) | ((nbits) & 0x3F)) ++ ++#define FC_ACTIVE_MIN 1 ++#define FC_ACTIVE_DEFAULT 16 ++#define FC_ACTIVE_MAX 127 ++#define FC_DEACTIVE_MIN 1 ++#define FC_DEACTIVE_DEFAULT 32 ++#define FC_DEACTIVE_MAX 127 ++ ++#define FC_PAUSE_TIME_DEFAULT 0xFFFF ++#define FC_PAUSE_INTERVAL_DEFAULT 0xFFFF ++#define FC_PAUSE_TIME_MAX 0xFFFF ++ ++#define HW_CAP_EN 0x0c00 ++#define BIT_RSS_CAP BIT(0) ++#define BIT_RXHASH_CAP BIT(1) ++#define RSS_HASH_KEY 0x0c04 ++#define RSS_HASH_CONFIG 0x0c08 ++#define TCPV4_L3_HASH_EN BIT(0) ++#define TCPV4_L4_HASH_EN BIT(1) ++#define TCPV4_VLAN_HASH_EN BIT(2) ++#define UDPV4_L3_HASH_EN BIT(4) ++#define UDPV4_L4_HASH_EN BIT(5) ++#define UDPV4_VLAN_HASH_EN BIT(6) ++#define IPV4_L3_HASH_EN BIT(8) ++#define IPV4_VLAN_HASH_EN BIT(9) ++#define TCPV6_L3_HASH_EN BIT(12) ++#define TCPV6_L4_HASH_EN BIT(13) ++#define TCPV6_VLAN_HASH_EN BIT(14) ++#define UDPV6_L3_HASH_EN BIT(16) ++#define UDPV6_L4_HASH_EN BIT(17) ++#define UDPV6_VLAN_HASH_EN BIT(18) ++#define IPV6_L3_HASH_EN BIT(20) ++#define IPV6_VLAN_HASH_EN BIT(21) ++#define DEF_HASH_CFG 0x377377 ++ ++#define RGMII_SPEED_1000 0x2c ++#define RGMII_SPEED_100 0x2f ++#define RGMII_SPEED_10 0x2d ++#define MII_SPEED_100 0x0f ++#define MII_SPEED_10 0x0d ++#define RMII_SPEED_100 0x8f ++#define RMII_SPEED_10 0x8d ++#define GMAC_FULL_DUPLEX BIT(4) ++ ++/* tso stuff */ ++#define SG_FLAG BIT(30) ++#define COE_FLAG BIT(29) ++#define TSO_FLAG BIT(28) ++#define VLAN_FLAG BIT(10) ++#define IPV6_FLAG BIT(9) ++#define UDP_FLAG BIT(8) ++ ++#define PKT_IPV6_HDR_LEN 10 ++#define PKT_UDP_HDR_LEN 2 ++#define WORD_TO_BYTE 4 ++enum { ++ PKT_NORMAL, ++ PKT_SG ++}; ++ ++enum { ++ PKT_IPV4, ++ PKT_IPV6 ++}; ++ ++enum { ++ PKT_TCP, ++ PKT_UDP ++}; ++ ++struct frags_info { ++ /* Word(2*i+2) */ ++ u32 addr; ++ /* Word(2*i+3) */ ++ u32 size : 16; ++ u32 reserved : 16; ++}; ++ ++struct sg_desc { ++ /* Word0 */ ++ u32 total_len : 17; ++ u32 reserv : 15; ++ /* Word1 */ ++ u32 ipv6_id; ++ /* Word2 */ ++ u32 linear_addr; ++ /* Word3 */ ++ u32 linear_len : 16; ++ u32 reserv3 : 16; ++ /* MAX_SKB_FRAGS is 18 */ ++ struct frags_info frags[18]; ++}; ++/* tso stuff end */ ++ ++#if defined(CONFIG_GMAC_DESC_4WORD) ++struct gmac_desc { ++ unsigned int data_buff_addr; ++ ++ unsigned int buffer_len : 11; ++#if defined(CONFIG_GMAC_RXCSUM) ++ unsigned int reserve2 : 1; ++ unsigned int payload_csum_err : 1; ++ unsigned int header_csum_err : 1; ++ unsigned int payload_csum_done : 1; ++ unsigned int header_csum_done : 1; ++#else ++ unsigned int reserve2 : 5; ++#endif ++ unsigned int data_len : 11; ++ unsigned int reserve1 : 2; ++ unsigned int fl : 2; ++ unsigned int descvid : 1; ++ ++ unsigned int rxhash; ++ unsigned int reserve3 : 8; ++ unsigned int l3_hash : 1; ++ unsigned int has_hash : 1; ++ unsigned int skb_id : 14; ++ unsigned int reserve31 : 8; ++}; ++ ++struct gmac_tso_desc { ++ unsigned int data_buff_addr; ++ union { ++ struct { ++ unsigned int prot_hdr_len : 4; ++ unsigned int ip_hdr_len : 4; ++ unsigned int prot_type : 1; ++ unsigned int ip_ver : 1; ++ unsigned int vlan_flag : 1; ++ unsigned int nfrags_num : 5; ++ unsigned int data_len : 11; ++ unsigned int reservel : 1; ++ unsigned int tso_flag : 1; ++ unsigned int coe_flag : 1; ++ unsigned int sg_flag : 1; ++ unsigned int hw_own : 1; ++ } tx; ++ unsigned int val; ++ } desc1; ++ unsigned int reserve_desc2; ++ unsigned int tx_err; ++}; ++#else ++struct gmac_desc { ++ unsigned int data_buff_addr; ++ ++ unsigned int buffer_len : 11; ++#if defined(CONFIG_GMAC_RXCSUM) ++ unsigned int reserve2 : 1; ++ unsigned int payload_csum_err : 1; ++ unsigned int header_csum_err : 1; ++ unsigned int payload_csum_done : 1; ++#else ++ unsigned int reserve2 : 5; ++#endif ++ unsigned int data_len : 11; ++ unsigned int reserve1 : 2; ++ unsigned int fl : 2; ++ unsigned int descvid : 1; ++ ++ unsigned int rxhash; ++ unsigned int reserve3 : 8; ++ unsigned int l3_hash : 1; ++ unsigned int has_hash : 1; ++ unsigned int skb_id : 14; ++ unsigned int reserve31 : 8; ++ ++ unsigned int reserve4; ++ unsigned int reserve5; ++ unsigned int reserve6; ++ unsigned int reserve7; ++}; ++ ++struct gmac_tso_desc { ++ unsigned int data_buff_addr; ++ union { ++ struct { ++ unsigned int prot_hdr_len : 4; ++ unsigned int ip_hdr_len : 4; ++ unsigned int prot_type : 1; ++ unsigned int ip_ver : 1; ++ unsigned int vlan_flag : 1; ++ unsigned int nfrags_num : 5; ++ unsigned int data_len : 11; ++ unsigned int reservel : 1; ++ unsigned int tso_flag : 1; ++ unsigned int coe_flag : 1; ++ unsigned int sg_flag : 1; ++ unsigned int hw_own : 1; ++ } tx; ++ unsigned int val; ++ } desc1; ++ unsigned int reserve_desc2; ++ unsigned int reserve3; ++ ++ unsigned int tx_err; ++ unsigned int reserve5; ++ unsigned int reserve6; ++ unsigned int reserve7; ++}; ++#endif ++ ++#define SKB_MAGIC ((struct sk_buff *)0x5a) ++ ++struct gmac_napi { ++ struct napi_struct napi; ++ struct gmac_netdev_local *ndev_priv; ++ int rxq_id; ++}; ++ ++struct gmac_rss_info { ++ u32 hash_cfg; ++ u32 ind_tbl_size; ++ u8 ind_tbl[RSS_INDIRECTION_TABLE_SIZE]; ++ u8 key[RSS_HASH_KEY_SIZE]; ++}; ++ ++struct gmac_coalesce { ++ u32 rx_timeout; ++ u32 tx_timeout; ++ u32 rx_frames; ++ u32 tx_frames; ++}; ++ ++#define QUEUE_NUMS 4 ++#define BASE_QUEUE_NUMS 3 ++#define STATISTICS_MAX_NUM 50 ++ ++struct gmac_phy_fixup_entry { ++ u16 reg; ++ u16 val; ++ u16 delay; ++ u16 resv; ++}; ++ ++struct gmac_netdev_local { ++#define GMAC_SG_DESC_ADD 64U ++ struct sg_desc *dma_sg_desc ____cacheline_aligned; ++ dma_addr_t dma_sg_phy; ++ unsigned int sg_head; ++ unsigned int sg_tail; ++ unsigned int sg_count; ++ u64 statistics[STATISTICS_MAX_NUM]; ++ ++ void __iomem *gmac_iobase; ++ void __iomem *macif_base; ++ void __iomem *crg_iobase; ++ void __iomem *sysctrl_iobase; ++ void __iomem *axi_bus_cfg_base; ++ int index; /* 0 -- mac0, 1 -- mac1 */ ++ ++ u32 hw_cap; ++ bool tso_supported; ++ bool has_rxhash_cap; ++ bool has_rss_cap; ++ int num_rxqs; ++ struct gmac_napi q_napi[RSS_NUM_RXQS]; ++ int irq[RSS_NUM_RXQS]; ++ struct gmac_rss_info rss_info; ++ struct gmac_coalesce coalesce; ++ ++ struct reset_control *port_rst; ++ struct reset_control *macif_rst; ++ struct reset_control *phy_rst; ++ ++#define MAX_FIXUP_PHY_CNT 5 ++#define MAX_FIXUP_ENTRY_CNT 256 ++#define PHY_FIXUP_ID_STR "phy_fixup_id" ++#define PHY_FIXUP_NAME_SIZE 64 ++#define MAX_FIXUP_ENTRY_ARR_SIZE (sizeof(struct gmac_phy_fixup_entry) * MAX_FIXUP_ENTRY_CNT / sizeof(u16)) ++ u32 phy_fixup_phycnt; ++ u32 phy_fixup_id[MAX_FIXUP_PHY_CNT]; ++ u32 phy_fixup_entry_cnt[MAX_FIXUP_PHY_CNT]; ++ struct gmac_phy_fixup_entry phy_fixup_entry[MAX_FIXUP_PHY_CNT][MAX_FIXUP_ENTRY_CNT]; ++ ++ struct { ++ struct gmac_desc *desc; ++ dma_addr_t phys_addr; ++ int *sg_desc_offset; ++ /* how many desc in the desc pool */ ++ unsigned int count; ++ struct sk_buff **skb; ++ unsigned int size; ++ } pool[QUEUE_NUMS + RSS_NUM_RXQS - 1]; ++#define RX_FQ pool[0] ++#define RX_BQ pool[1] ++#define TX_BQ pool[2] ++#define TX_RQ pool[3] ++ ++ struct sk_buff **tx_skb; ++ struct sk_buff **rx_skb; ++ ++ struct device *dev; ++ struct net_device *netdev; ++ struct clk *pub_clk; ++ struct clk *clk; ++ struct clk *macif_clk; ++ ++ struct mii_bus *bus; ++ struct gmac_adapter *adapter; ++ ++ struct timer_list monitor; ++ ++ char phy_name[MII_BUS_ID_SIZE]; ++ struct phy_device *phy; ++ struct device_node *phy_node; ++ phy_interface_t phy_mode; ++ bool autoeee; ++ bool internal_phy; ++ u32 trim_params; ++ bool fixed_link; ++ unsigned int phy_addr; ++ int (*eee_init)(struct phy_device *phy_dev); ++ /* gpio reset pin if has */ ++ void __iomem *gpio_base; ++ u32 gpio_bit; ++ ++ unsigned int flow_ctrl; ++ unsigned int pause; ++ unsigned int pause_interval; ++ unsigned int flow_ctrl_active_threshold; ++ unsigned int flow_ctrl_deactive_threshold; ++ ++ int old_link; ++ int old_speed; ++ int old_duplex; ++ ++ /* receive packet lock */ ++ spinlock_t rxlock; ++ /* transmit packet lock */ ++ spinlock_t txlock; ++ /* power management lock */ ++ spinlock_t pmtlock; ++ ++ int dev_state; /* INIT/OPEN/CLOSE */ ++ char pm_state; ++ bool mac_wol_enable; ++ bool phy_wol_enable; ++ bool do_pm_s4; ++ bool do_pm_s4_thaw; ++ u32 msg_enable; ++ u32 rx_fifo_overcnt; ++#define INIT 0 /* init gmac */ ++#define OPEN 1 /* power on gmac */ ++#define CLOSE 2 /* power off gmac */ ++}; ++ ++enum tso_version { ++ VER_NO_TSO = 0x0, ++ VER_BYTE_SPLICE = 0x1, ++ VER_SG_COE = 0x2, ++ VER_TSO = 0x3, ++}; ++ ++struct cyclic_queue_info { ++ u32 start; ++ u32 end; ++ u32 num; ++ u32 pos; ++}; ++ ++/* ethtool ops related func */ ++void gmac_set_flow_ctrl_state(struct gmac_netdev_local const *ld, int pause); ++ ++/* netdev ops related func */ ++void gmac_enable_napi(struct gmac_netdev_local *priv); ++void gmac_disable_napi(struct gmac_netdev_local *priv); ++u32 gmac_rx_refill(struct gmac_netdev_local *priv); ++ ++#define gmac_trace(level, msg...) do { \ ++ if ((level) >= GMAC_TRACE_LEVEL) { \ ++ pr_info("gmac_trace:%s:%d: ", __FILE__, __LINE__); \ ++ printk(msg); \ ++ printk("\n"); \ ++ } \ ++} while (0) ++ ++static inline void gmac_irq_enable(struct gmac_netdev_local const *ld) ++{ ++ writel(RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | ++ TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT, ++ ld->gmac_iobase + ENA_PMU_INT); ++} ++ ++static inline void gmac_irq_enable_queue(struct gmac_netdev_local *ld, unsigned int rxq_id) ++{ ++ if (rxq_id) { ++ const u32 reg = rss_ena_int_queue(rxq_id); ++ writel(~0, ld->gmac_iobase + reg); ++ } else { ++ gmac_irq_enable(ld); ++ } ++} ++ ++static inline void gmac_irq_enable_all_queue(struct gmac_netdev_local *ld) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < (unsigned int)ld->num_rxqs; i++) ++ gmac_irq_enable_queue(ld, i); ++} ++ ++static inline void gmac_irq_disable(struct gmac_netdev_local const *ld) ++{ ++ writel(0, ld->gmac_iobase + ENA_PMU_INT); ++} ++ ++static inline void gmac_irq_disable_queue(struct gmac_netdev_local const *ld, ++ int rxq_id) ++{ ++ if (rxq_id) { ++ u32 reg = (u32)rss_ena_int_queue(rxq_id); ++ writel(0, ld->gmac_iobase + reg); ++ } else { ++ gmac_irq_disable(ld); ++ } ++} ++ ++static inline void gmac_irq_disable_all_queue(struct gmac_netdev_local const *ld) ++{ ++ int i; ++ ++ for (i = 0; i < ld->num_rxqs; i++) ++ gmac_irq_disable_queue(ld, i); ++} ++ ++static inline bool gmac_queue_irq_disabled(struct gmac_netdev_local *ld, int rxq_id) ++{ ++ u32 reg, val; ++ ++ if (rxq_id) ++ reg = (u32)rss_ena_int_queue(rxq_id); ++ else ++ reg = ENA_PMU_INT; ++ val = readl(ld->gmac_iobase + reg); ++ ++ return !val; ++} ++ ++static inline void gmac_hw_desc_enable(struct gmac_netdev_local const *ld) ++{ ++ writel(0xF, ld->gmac_iobase + DESC_WR_RD_ENA); ++} ++ ++static inline void gmac_hw_desc_disable(struct gmac_netdev_local const *ld) ++{ ++ writel(0, ld->gmac_iobase + DESC_WR_RD_ENA); ++} ++ ++static inline void gmac_port_enable(struct gmac_netdev_local const *ld) ++{ ++ writel(BITS_TX_EN | BITS_RX_EN, ld->gmac_iobase + PORT_EN); ++} ++ ++static inline void gmac_port_disable(struct gmac_netdev_local const *ld) ++{ ++ writel(0, ld->gmac_iobase + PORT_EN); ++} ++ ++static inline void gmac_hw_set_mac_addr(struct gmac_netdev_local *priv) ++{ ++ const unsigned char *mac = priv->netdev->dev_addr; ++ u32 val; ++ ++ val = mac[1] | (mac[0] << 8); /* mac[1]->(7, 0) mac[0]->(15, 8) */ ++ writel(val, priv->gmac_iobase + STATION_ADDR_HIGH); ++ val = mac[5] | (mac[4] << 8) | /* mac[5]->(7, 0) mac[4]->(8, 15) */ ++ (mac[3] << 16) | (mac[2] << 24); /* mac[3]->(23, 16) mac[2]->(31, 24) */ ++ writel(val, priv->gmac_iobase + STATION_ADDR_LOW); ++} ++ ++static inline void gmac_enable_rxcsum_drop(struct gmac_netdev_local const *ld, bool drop) ++{ ++ unsigned int v; ++ ++ v = readl(ld->gmac_iobase + TSO_COE_CTRL); ++ if (drop) ++ v |= COE_ERR_DROP; ++ else ++ v &= ~COE_ERR_DROP; ++#if defined(CONFIG_ARM64_64K_PAGES) ++ v |= (FRAG_MAX_LEN << FRAG_MAX_LEN_OFFSET); ++#endif ++ writel(v, ld->gmac_iobase + TSO_COE_CTRL); ++} ++ ++static inline void gmac_set_rss_cap(struct gmac_netdev_local const *priv) ++{ ++ u32 val = 0; ++ ++ if (priv->has_rxhash_cap) ++ val |= BIT_RXHASH_CAP; ++ if (priv->has_rss_cap) ++ val |= BIT_RSS_CAP; ++ writel(val, priv->gmac_iobase + HW_CAP_EN); ++} ++ ++static inline void gmac_get_rss_key(struct gmac_netdev_local *priv) ++{ ++ struct gmac_rss_info *rss = NULL; ++ u32 hkey; ++ ++ rss = &priv->rss_info; ++ hkey = readl(priv->gmac_iobase + RSS_HASH_KEY); ++ *((u32 *)rss->key) = hkey; ++} ++ ++static inline void gmac_set_rss_key(struct gmac_netdev_local *priv) ++{ ++ struct gmac_rss_info *rss = &priv->rss_info; ++ ++ writel(*((u32 *)rss->key), priv->gmac_iobase + RSS_HASH_KEY); ++} ++ ++static inline void gmac_config_hash_policy(struct gmac_netdev_local const *priv) ++{ ++ writel(priv->rss_info.hash_cfg, priv->gmac_iobase + RSS_HASH_CONFIG); ++} ++ ++static inline bool has_tso_cap(struct gmac_netdev_local *priv) ++{ ++ unsigned int hw_cap = readl(priv->gmac_iobase + CRF_MIN_PACKET); ++ ++ return ((hw_cap >> BIT_OFFSET_TSO_VERSION) & 0x3) == VER_TSO; /* CRF_MIN_PACKET bit 29:28 tso_version */ ++} ++ ++static inline bool has_rxhash_cap(struct gmac_netdev_local *priv) ++{ ++ unsigned int hw_cap = readl(priv->gmac_iobase + CRF_MIN_PACKET); ++ ++ return hw_cap & BIT_HASH_EN_VERISION; ++} ++ ++static inline bool has_rss_cap(struct gmac_netdev_local *priv) ++{ ++ unsigned int hw_cap = readl(priv->gmac_iobase + CRF_MIN_PACKET); ++ ++ return hw_cap & BIT_RSS_EN_VERISION; ++} ++ ++static inline u32 gmac_timeout_usec_to_reg(u32 usec) ++{ ++ return usec * NSEC_PER_USEC / 8; /* The unit of the register value is 8 ns. */ ++} ++ ++static inline u32 gmac_timeout_reg_to_usec(u32 regval) ++{ ++ return regval * 8 / NSEC_PER_USEC; /* The unit of the register value is 8 ns. */ ++} ++ ++static inline void gmac_set_rxbq_enqueue_timeout_thres(struct gmac_netdev_local *priv, u32 val) ++{ ++ writel(val, priv->gmac_iobase + RX_BQ_IN_TIMEOUT_TH); ++} ++ ++static inline void gmac_set_txrq_enqueue_timeout_thres(struct gmac_netdev_local *priv, u32 val) ++{ ++ writel(val, priv->gmac_iobase + TX_RQ_IN_TIMEOUT_TH); ++} ++ ++static inline void gmac_set_rxbq_enqueue_thres(struct gmac_netdev_local *priv, u32 val) ++{ ++ u32 tmp_val; ++ ++ tmp_val = readl(priv->gmac_iobase + IN_QUEUE_TH); ++ tmp_val &= ~(IN_QUEUE_TH_MASK); ++ val &= IN_QUEUE_TH_MASK; ++ val |= tmp_val; ++ writel(val, priv->gmac_iobase + IN_QUEUE_TH); ++} ++ ++static inline void gmac_set_txrq_enqueue_thres(struct gmac_netdev_local *priv, u32 val) ++{ ++ u32 tmp_val; ++ ++ tmp_val = readl(priv->gmac_iobase + IN_QUEUE_TH); ++ tmp_val &= ~(IN_QUEUE_TH_MASK << BITS_OFFSET_TX_RQ_IN_TH); ++ val = ((val & IN_QUEUE_TH_MASK) << BITS_OFFSET_TX_RQ_IN_TH); ++ val |= tmp_val; ++ writel(val, priv->gmac_iobase + IN_QUEUE_TH); ++} ++#endif +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_ethtool_ops.c b/drivers/net/ethernet/vendor/gmac/gmac_ethtool_ops.c +new file mode 100644 +index 000000000..222f77c0f +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_ethtool_ops.c +@@ -0,0 +1,621 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2024. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++ ++#include "gmac.h" ++#include "gmac_pm.h" ++#include "gmac_ethtool_ops.h" ++ ++static const char gmac_gstrings[][ETH_GSTRING_LEN] = { ++ "RX OK Bytes Count", ++ "RX Bad Bytes Count", ++ "RX Unicast Frames", ++ "RX Multicast Frames", ++ "RX Broadcast Frames", ++ "RX 64 Byte Frames", ++ "RX 65 - 127 Byte Frames", ++ "RX 128 - 255 Byte Frames", ++ "RX 256 - 511 Bytes Frames", ++ "RX 512 - 1023 Byte Frames", ++ "RX 1024 - 1518 Byte Frames", ++ "RX Greater 1518 Byte Frames", ++ "RX FCS Errors", ++ "RX Tagged Frames", ++ "RX Data Errors", ++ "RX Alignment Errors", ++ "RX Too Long Errors", ++ "RX Jabber Errors", ++ "RX Pause Frames", ++ "RX Unknown Pause Frames", ++ "RX Very Long Errors", ++ "RX Runt Errors", ++ "RX Short Errors", ++ "TX OK Bytes Count", ++ "TX Bad Bytes Count", ++ "TX Unicast Frames", ++ "TX Multicast Frames", ++ "TX Broadcast Frames", ++ "TX 64 Byte Frames", ++ "TX 65 - 127 Byte Frames", ++ "TX 128 - 255 Byte Frames", ++ "TX 256 - 511 Bytes Frames", ++ "TX 512 - 1023 Byte Frames", ++ "TX 1024 - 1518 Byte Frames", ++ "TX Greater 1518 Byte Frames", ++ "TX Underun Errors", ++ "TX Tagged Frames", ++ "TX FCS Errors", ++ "TX Pause Frames", ++ "Interrupt0 Count", ++ "Interrupt1 Count", ++ "Interrupt2 Count", ++ "Interrupt3 Count", ++}; ++ ++static void gmac_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info) ++{ ++ if (info == NULL) ++ return; ++ if (strncpy_s(info->driver, sizeof(info->driver), "gmac driver", sizeof(info->driver))) ++ printk("strncpy_s err : %s %d.\n", __func__, __LINE__); ++ if (strncpy_s(info->version, sizeof(info->version), "gmac v200", sizeof(info->version))) ++ printk("strncpy_s err : %s %d.\n", __func__, __LINE__); ++ if (strncpy_s(info->bus_info, sizeof(info->bus_info), "platform", sizeof(info->bus_info))) ++ printk("strncpy_s err : %s %d.\n", __func__, __LINE__); ++} ++ ++static unsigned int gmac_get_link(struct net_device *net_dev) ++{ ++ struct gmac_netdev_local *ld = netdev_priv(net_dev); ++ ++ return ld->phy->link ? GMAC_LINKED : 0; ++} ++ ++static void gmac_get_mac_wol(struct net_device *dev, struct ethtool_wolinfo *wol) ++{ ++ wol->supported = WAKE_UCAST | WAKE_MAGIC; ++ wol->wolopts = 0; ++} ++ ++static void gmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) ++{ ++ wol->supported = 0; ++ wol->wolopts = 0; ++ if (dev->phydev) ++ phy_ethtool_get_wol(dev->phydev, wol); ++ ++ if (!wol->supported) ++ gmac_get_mac_wol(dev, wol); ++} ++ ++static int gmac_set_mac_wol(struct net_device *dev, const struct ethtool_wolinfo *wol) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(dev); ++ ++ struct pm_config mac_pm_config = { 0 }; ++ ++ mac_pm_config.index = (unsigned int)priv->index; ++ if (wol->wolopts & WAKE_UCAST) ++ mac_pm_config.uc_pkts_enable = 1; ++ ++ if (wol->wolopts & WAKE_MAGIC) ++ mac_pm_config.magic_pkts_enable = 1; ++ ++ pmt_config(dev, &mac_pm_config); ++ priv->mac_wol_enable = true; ++ ++ return 0; ++} ++ ++static int gmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(dev); ++ int err = 0; ++ ++ if (dev->phydev) { ++ err = phy_ethtool_set_wol(dev->phydev, wol); ++ if (!err) { ++ if (wol->wolopts) { ++ priv->phy_wol_enable = true; ++ } else { ++ priv->phy_wol_enable = false; ++ } ++ } ++ } ++ ++ err = gmac_set_mac_wol(dev, wol); ++ if (!err) ++ device_set_wakeup_enable(priv->dev, true); ++ ++ return err; ++} ++ ++static void gmac_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause) ++{ ++ struct gmac_netdev_local *ld = NULL; ++ if (net_dev == NULL || pause == NULL) ++ return; ++ ld = netdev_priv(net_dev); ++ ++ pause->rx_pause = 0; ++ pause->tx_pause = 0; ++ pause->autoneg = ld->phy->autoneg; ++ ++ if (ld->phy->pause && (ld->flow_ctrl & FLOW_RX)) ++ pause->rx_pause = 1; ++ if (ld->phy->pause && (ld->flow_ctrl & FLOW_TX)) ++ pause->tx_pause = 1; ++} ++ ++static int gmac_set_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause) ++{ ++ struct gmac_netdev_local *ld = netdev_priv(net_dev); ++ struct phy_device *phy = ld->phy; ++ unsigned int new_pause = FLOW_OFF; ++ ++ if (pause == NULL) ++ return -ENOMEM; ++ ++ if (pause->rx_pause) ++ new_pause |= FLOW_RX; ++ if (pause->tx_pause) ++ new_pause |= FLOW_TX; ++ ++ if (new_pause != ld->flow_ctrl) ++ ld->flow_ctrl = new_pause; ++ ++ gmac_set_flow_ctrl_state(ld, phy->pause); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) ++ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phy->advertising); ++ if (ld->flow_ctrl) ++ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phy->advertising); ++#else ++ phy->supported |= ADVERTISED_Pause; ++ if (ld->flow_ctrl) ++ phy->advertising |= ADVERTISED_Pause; ++#endif ++ ++ if (phy->autoneg) { ++ if (netif_running(net_dev)) ++ return phy_start_aneg(phy); ++ } ++ ++ return 0; ++} ++ ++static u32 gmac_ethtool_getmsglevel(struct net_device *ndev) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ ++ return priv->msg_enable; ++} ++ ++static void gmac_ethtool_setmsglevel(struct net_device *ndev, u32 level) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ ++ priv->msg_enable = level; ++} ++ ++static u32 gmac_get_rxfh_key_size(struct net_device *ndev) ++{ ++ return RSS_HASH_KEY_SIZE; ++} ++ ++static u32 gmac_get_rxfh_indir_size(struct net_device *ndev) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ ++ return priv->rss_info.ind_tbl_size; ++} ++ ++static int gmac_wait_rss_ready(struct gmac_netdev_local const *priv) ++{ ++ void __iomem *base = priv->gmac_iobase; ++ int i; ++ const int timeout = 10000; ++ ++ for (i = 0; !(readl(base + RSS_IND_TBL) & BIT_IND_TBL_READY); i++) { ++ if (i == timeout) { ++ netdev_err(priv->netdev, "wait rss ready timeout!\n"); ++ return -ETIMEDOUT; ++ } ++ usleep_range(10, 20); /* wait 10~20us */ ++ } ++ ++ return 0; ++} ++ ++static void gmac_config_rss(struct gmac_netdev_local *priv) ++{ ++ struct gmac_rss_info *rss = NULL; ++ u32 rss_val; ++ unsigned int i; ++ if (priv == NULL) ++ return; ++ rss = &priv->rss_info; ++ for (i = 0; i < rss->ind_tbl_size; i++) { ++ if (gmac_wait_rss_ready(priv) != 0) ++ break; ++ rss_val = BIT_IND_TLB_WR | (rss->ind_tbl[i] << 8) | i; /* shift 8 */ ++ writel(rss_val, priv->gmac_iobase + RSS_IND_TBL); ++ } ++} ++ ++static void gmac_get_rss(struct gmac_netdev_local *priv) ++{ ++ struct gmac_rss_info *rss = NULL; ++ u32 rss_val; ++ int i; ++ if (priv == NULL) ++ return; ++ rss = &priv->rss_info; ++ for (i = 0; i < rss->ind_tbl_size; i++) { ++ if (gmac_wait_rss_ready(priv) != 0) ++ break; ++ writel(i, priv->gmac_iobase + RSS_IND_TBL); ++ if (gmac_wait_rss_ready(priv) != 0) ++ break; ++ rss_val = readl(priv->gmac_iobase + RSS_IND_TBL); ++ rss->ind_tbl[i] = (rss_val >> 10) & 0x3; /* right shift 10 */ ++ } ++} ++ ++static int gmac_get_rxfh(struct net_device *ndev, u32 *indir, u8 *hkey, u8 *hfunc) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ struct gmac_rss_info *rss = &priv->rss_info; ++ ++ if (hfunc != NULL) ++ *hfunc = ETH_RSS_HASH_TOP; ++ ++ if (hkey != NULL) { ++ if (memcpy_s(hkey, RSS_HASH_KEY_SIZE, rss->key, RSS_HASH_KEY_SIZE) < 0) { ++ printk("memcpy_s err : %s %d.\n", __func__, __LINE__); ++ } ++ } ++ ++ gmac_get_rss(priv); ++ if (indir != NULL) { ++ int i; ++ ++ for (i = 0; i < rss->ind_tbl_size; i++) ++ indir[i] = rss->ind_tbl[i]; ++ } ++ ++ return 0; ++} ++ ++static int gmac_set_rxfh(struct net_device *ndev, const u32 *indir, const u8 *hkey, const u8 hfunc) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ struct gmac_rss_info *rss = &priv->rss_info; ++ ++ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ++ return -EOPNOTSUPP; ++ ++ if (indir != NULL) { ++ int i; ++ ++ for (i = 0; i < rss->ind_tbl_size; i++) ++ rss->ind_tbl[i] = indir[i]; ++ } ++ ++ if (hkey != NULL) { ++ if (memcpy_s(rss->key, RSS_HASH_KEY_SIZE, hkey, RSS_HASH_KEY_SIZE) < 0) ++ printk("memcpy_s err : %s %d.\n", __func__, __LINE__); ++ gmac_set_rss_key(priv); ++ } ++ ++ gmac_config_rss(priv); ++ ++ return 0; ++} ++ ++static void gmac_get_rss_hash(struct ethtool_rxnfc *info, u32 hash_cfg, ++ u32 l3_hash_en, u32 l4_hash_en, u32 vlan_hash_en) ++{ ++ if (hash_cfg & l3_hash_en) ++ info->data |= RXH_IP_SRC | RXH_IP_DST; ++ if (hash_cfg & l4_hash_en) ++ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; ++ if (hash_cfg & vlan_hash_en) ++ info->data |= RXH_VLAN; ++} ++ ++static int gmac_get_rss_hash_opts(struct gmac_netdev_local const *priv, struct ethtool_rxnfc *info) ++{ ++ u32 hash_cfg = priv->rss_info.hash_cfg; ++ ++ info->data = 0; ++ ++ switch (info->flow_type) { ++ case TCP_V4_FLOW: ++ gmac_get_rss_hash(info, hash_cfg, TCPV4_L3_HASH_EN, TCPV4_L4_HASH_EN, TCPV4_VLAN_HASH_EN); ++ break; ++ case TCP_V6_FLOW: ++ gmac_get_rss_hash(info, hash_cfg, TCPV6_L3_HASH_EN, TCPV6_L4_HASH_EN, TCPV6_VLAN_HASH_EN); ++ break; ++ case UDP_V4_FLOW: ++ gmac_get_rss_hash(info, hash_cfg, UDPV4_L3_HASH_EN, UDPV4_L4_HASH_EN, UDPV4_VLAN_HASH_EN); ++ break; ++ case UDP_V6_FLOW: ++ gmac_get_rss_hash(info, hash_cfg, UDPV6_L3_HASH_EN, UDPV6_L4_HASH_EN, UDPV6_VLAN_HASH_EN); ++ break; ++ case IPV4_FLOW: ++ gmac_get_rss_hash(info, hash_cfg, IPV4_L3_HASH_EN, 0, IPV4_VLAN_HASH_EN); ++ break; ++ case IPV6_FLOW: ++ gmac_get_rss_hash(info, hash_cfg, IPV6_L3_HASH_EN, 0, IPV6_VLAN_HASH_EN); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int gmac_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info, u32 *rules) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ int ret = -EOPNOTSUPP; ++ if (info == NULL) ++ return -EINVAL; ++ switch (info->cmd) { ++ case ETHTOOL_GRXRINGS: ++ info->data = priv->num_rxqs; ++ ret = 0; ++ break; ++ case ETHTOOL_GRXFH: ++ return gmac_get_rss_hash_opts(priv, info); ++ default: ++ break; ++ } ++ return ret; ++} ++ ++static int gmac_set_tcp_udp_hash_cfg(struct ethtool_rxnfc const *info, u32 *hash_cfg, u32 l4_mask, u32 vlan_mask) ++{ ++ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { ++ case 0: // all bits is 0 ++ *hash_cfg &= ~l4_mask; ++ break; ++ case (RXH_L4_B_0_1 | RXH_L4_B_2_3): ++ *hash_cfg |= l4_mask; ++ break; ++ default: ++ return -EINVAL; ++ } ++ if (info->data & RXH_VLAN) ++ *hash_cfg |= vlan_mask; ++ else ++ *hash_cfg &= ~vlan_mask; ++ return 0; ++} ++ ++static int gmac_ip_hash_cfg(struct ethtool_rxnfc const *info, u32 *hash_cfg, u32 vlan_mask) ++{ ++ if (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) ++ return -EINVAL; ++ if (info->data & RXH_VLAN) ++ *hash_cfg |= vlan_mask; ++ else ++ *hash_cfg &= ~vlan_mask; ++ return 0; ++} ++ ++static int gmac_set_rss_hash_opts(struct gmac_netdev_local *priv, struct ethtool_rxnfc const *info) ++{ ++ u32 hash_cfg; ++ if (priv == NULL || priv->netdev == NULL) ++ return -EINVAL; ++ hash_cfg = priv->rss_info.hash_cfg; ++ netdev_info(priv->netdev, "Set RSS flow type = %d, data = %lld\n", ++ info->flow_type, info->data); ++ ++ if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST)) ++ return -EINVAL; ++ ++ switch (info->flow_type) { ++ case TCP_V4_FLOW: ++ if (gmac_set_tcp_udp_hash_cfg(info, &hash_cfg, TCPV4_L4_HASH_EN, TCPV4_VLAN_HASH_EN) == -EINVAL) ++ return -EINVAL; ++ break; ++ case TCP_V6_FLOW: ++ if (gmac_set_tcp_udp_hash_cfg(info, &hash_cfg, TCPV6_L4_HASH_EN, TCPV6_VLAN_HASH_EN) == -EINVAL) ++ return -EINVAL; ++ break; ++ case UDP_V4_FLOW: ++ if (gmac_set_tcp_udp_hash_cfg(info, &hash_cfg, UDPV4_L4_HASH_EN, UDPV4_L4_HASH_EN) == -EINVAL) ++ return -EINVAL; ++ break; ++ case UDP_V6_FLOW: ++ if (gmac_set_tcp_udp_hash_cfg(info, &hash_cfg, UDPV6_L4_HASH_EN, UDPV6_L4_HASH_EN) == -EINVAL) ++ return -EINVAL; ++ break; ++ case IPV4_FLOW: ++ if (gmac_ip_hash_cfg(info, &hash_cfg, IPV4_VLAN_HASH_EN) == -EINVAL) ++ return -EINVAL; ++ break; ++ case IPV6_FLOW: ++ if (gmac_ip_hash_cfg(info, &hash_cfg, IPV6_VLAN_HASH_EN) == -EINVAL) ++ return -EINVAL; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ priv->rss_info.hash_cfg = hash_cfg; ++ gmac_config_hash_policy(priv); ++ ++ return 0; ++} ++ ++static int gmac_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ if (info == NULL) ++ return -EINVAL; ++ switch (info->cmd) { ++ case ETHTOOL_SRXFH: ++ return gmac_set_rss_hash_opts(priv, info); ++ default: ++ break; ++ } ++ return -EOPNOTSUPP; ++} ++ ++static int gmac_get_sset_count(struct net_device *ndev, int sset) ++{ ++ if (sset == ETH_SS_STATS) ++ return ARRAY_SIZE(gmac_gstrings); ++ ++ return -EOPNOTSUPP; ++} ++ ++static void gmac_get_strings(struct net_device *ndev, u32 stringset, u8 *data) ++{ ++ u32 strlen = sizeof(gmac_gstrings); ++ ++ if (stringset == ETH_SS_STATS) ++ memcpy_s(data, strlen, gmac_gstrings, strlen); ++} ++ ++static void gmac_update_statistics(struct gmac_netdev_local *priv) ++{ ++ u32 reg, i, queue, cpu; ++ u64 count; ++ ++ for (i = 0, reg = RX_OCTETS_OK_CNT; reg <= RX_SHORT_ERR_CNT; reg += WORD_TO_BYTE, i++) { ++ priv->statistics[i] += readl(priv->gmac_iobase + reg); ++ } ++ ++ for (reg = OCTETS_TRANSMITTED_OK; reg <= TX_PKTS_1519TOMAXOCTETS; reg += WORD_TO_BYTE, i++) { ++ priv->statistics[i] += readl(priv->gmac_iobase + reg); ++ } ++ ++ for (reg = TX_UNDERRUN; reg <= TX_PAUSE_FRAMES; reg += WORD_TO_BYTE, i++) { ++ priv->statistics[i] += readl(priv->gmac_iobase + reg); ++ } ++ ++ for (queue = 0; queue < RSS_NUM_RXQS; queue++, i++) { ++ count = 0; ++ for_each_online_cpu(cpu) ++ count += kstat_irqs_cpu(priv->irq[queue], cpu); ++ priv->statistics[i] = count; ++ } ++} ++ ++static void gmac_get_ethtool_stats(struct net_device *ndev, struct ethtool_stats *stats, u64 *data) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ ++ if (stats->n_stats > ARRAY_SIZE(priv->statistics)) { ++ netdev_err(ndev, "Get ethtool stats failed, stats->n_stats invalid.\n"); ++ return; ++ } ++ ++ gmac_update_statistics(priv); ++ ++ memcpy_s(data, sizeof(u64) * stats->n_stats, priv->statistics, sizeof(u64) * stats->n_stats); ++} ++ ++static int gmac_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *cmd, ++ struct kernel_ethtool_coalesce *cqe_cmd, struct netlink_ext_ack *ext_ack) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ ++ cmd->rx_coalesce_usecs = gmac_timeout_reg_to_usec(priv->coalesce.rx_timeout); ++ cmd->tx_coalesce_usecs = gmac_timeout_reg_to_usec(priv->coalesce.tx_timeout); ++ cmd->rx_max_coalesced_frames = priv->coalesce.rx_frames; ++ cmd->tx_max_coalesced_frames = priv->coalesce.tx_frames; ++ ++ return 0; ++} ++ ++static bool gmac_check_coalesce_param(struct gmac_netdev_local *priv, struct ethtool_coalesce *cmd) ++{ ++ struct net_device *ndev = priv->netdev; ++ ++ if (gmac_timeout_usec_to_reg(cmd->rx_coalesce_usecs) > MAX_IN_QUQUE_TIMEOUT_TH) { ++ netdev_err(ndev, "rx-usecs range is 1 ~ %d\n", gmac_timeout_reg_to_usec(MAX_IN_QUQUE_TIMEOUT_TH)); ++ return false; ++ } ++ ++ if (gmac_timeout_usec_to_reg(cmd->tx_coalesce_usecs) > MAX_IN_QUQUE_TIMEOUT_TH) { ++ netdev_err(ndev, "tx-usecs range is 1 ~ %d\n", gmac_timeout_reg_to_usec(MAX_IN_QUQUE_TIMEOUT_TH)); ++ return false; ++ } ++ ++ if ((cmd->rx_max_coalesced_frames == 0) || (cmd->rx_max_coalesced_frames > MAX_IN_QUEUE_TH)) { ++ netdev_err(ndev, "rx-frames range is 1 ~ %d\n", MAX_IN_QUEUE_TH); ++ return false; ++ } ++ ++ if ((cmd->tx_max_coalesced_frames == 0) || (cmd->tx_max_coalesced_frames > MAX_IN_QUEUE_TH)) { ++ netdev_err(ndev, "tx-frames range is 1 ~ %d\n", MAX_IN_QUEUE_TH); ++ return false; ++ } ++ ++ return true; ++} ++ ++static int gmac_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *cmd, ++ struct kernel_ethtool_coalesce *cqe_cmd, struct netlink_ext_ack *ext_ack) ++{ ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ ++ if (!gmac_check_coalesce_param(priv, cmd)) ++ return -EINVAL; ++ ++ priv->coalesce.rx_timeout = gmac_timeout_usec_to_reg(cmd->rx_coalesce_usecs); ++ priv->coalesce.tx_timeout = gmac_timeout_usec_to_reg(cmd->tx_coalesce_usecs); ++ priv->coalesce.rx_frames = cmd->rx_max_coalesced_frames; ++ priv->coalesce.tx_frames = cmd->tx_max_coalesced_frames; ++ ++ gmac_set_rxbq_enqueue_timeout_thres(priv, priv->coalesce.rx_timeout); ++ gmac_set_txrq_enqueue_timeout_thres(priv, priv->coalesce.tx_timeout); ++ gmac_set_rxbq_enqueue_thres(priv, priv->coalesce.rx_frames); ++ gmac_set_txrq_enqueue_thres(priv, priv->coalesce.tx_frames); ++ ++ return 0; ++} ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) ++#define GMAC_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES) ++static const struct ethtool_ops eth_ethtools_ops = { ++ .supported_coalesce_params = GMAC_ETHTOOL_COALESCE, ++#else ++static const struct ethtool_ops eth_ethtools_ops = { ++#endif ++ .get_drvinfo = gmac_get_drvinfo, ++ .get_link = gmac_get_link, ++ .get_wol = gmac_get_wol, ++ .set_wol = gmac_set_wol, ++ .get_pauseparam = gmac_get_pauseparam, ++ .set_pauseparam = gmac_set_pauseparam, ++ .get_msglevel = gmac_ethtool_getmsglevel, ++ .set_msglevel = gmac_ethtool_setmsglevel, ++ .get_rxfh_key_size = gmac_get_rxfh_key_size, ++ .get_rxfh_indir_size = gmac_get_rxfh_indir_size, ++ .get_rxfh = gmac_get_rxfh, ++ .set_rxfh = gmac_set_rxfh, ++ .get_rxnfc = gmac_get_rxnfc, ++ .set_rxnfc = gmac_set_rxnfc, ++ .get_link_ksettings = phy_ethtool_get_link_ksettings, ++ .set_link_ksettings = phy_ethtool_set_link_ksettings, ++ .get_sset_count = gmac_get_sset_count, ++ .get_strings = gmac_get_strings, ++ .get_ethtool_stats = gmac_get_ethtool_stats, ++ .get_coalesce = gmac_get_coalesce, ++ .set_coalesce = gmac_set_coalesce, ++}; ++ ++void gmac_set_ethtool_ops(struct net_device *ndev) ++{ ++ ndev->ethtool_ops = ð_ethtools_ops; ++} +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_ethtool_ops.h b/drivers/net/ethernet/vendor/gmac/gmac_ethtool_ops.h +new file mode 100644 +index 000000000..ba1b4a9db +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_ethtool_ops.h +@@ -0,0 +1,12 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++ ++#ifndef GMAC_ETHTOOL_OPS_H ++#define GMAC_ETHTOOL_OPS_H ++ ++#include "gmac.h" ++ ++void gmac_set_ethtool_ops(struct net_device *ndev); ++ ++#endif +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_external_phy.c b/drivers/net/ethernet/vendor/gmac/gmac_external_phy.c +new file mode 100644 +index 000000000..b679ccb7f +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_external_phy.c +@@ -0,0 +1,563 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Gmac external phy settings ++ * We do not guarantee the compatibility of the following device models in ++ * the table. Device compatibility is based solely on the list of compatible ++ * devices in the release package. ++ * Author: AuthorNameMagicTag ++ * Create: 2022-2-8 ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "gmac.h" ++ ++#define RTL821X_WOL_CTRL 0x10 ++#define RTL821X_LED_CTRL 0x10 ++#define RTL821X_INTBCR 0x16 ++#define RTL821X_PHYCR1 0x18 ++#define RTL821X_PHYCR2 0x19 ++#define RTL821X_PAGE_SELECT 0x1f ++ ++#define RTL821X_PIN_PMEB_SELECT BIT(5) ++#define RTL821X_WOL_MAGIC_ENABLE BIT(12) ++#define RTL821X_WOL_UNICAST_ENABLE BIT(10) ++ ++#define RTL821X_REG24_ALDPS_PLL_OFF_ENABLE BIT(1) ++#define RTL821X_REG24_ALDPS_ENABLE BIT(2) ++#define RTL821X_REG24_ALDPS_XTAL_OFF_ENABLE BIT(12) ++ ++#define RTL821X_REG25_CLKOUT_ENABLE BIT(0) ++#define RTL821X_REG25_RSVD8 BIT(8) ++ ++#define RTL821X_WOL_PAGE 0xd8a ++#define RTL821X_LED_PAGE 0xd04 ++ ++#define RTL821X_LED_CTRL_DEFAULT 0x8170 ++#define NUM_8 8 ++ ++#define MII_EXP_ADDR 0x1E ++#define MII_EXP_DATA 0x1F ++#define YT8521X_MACADDR_CFG1 0xa007 ++#define YT8521X_MACADDR_CFG2 0xa008 ++#define YT8521X_MACADDR_CFG3 0xa009 ++#define YT8521X_WOL_CFG 0xa00a ++#define YT8521X_LED0_CFG 0xa00c ++#define YT8521X_LED1_CFG 0xa00d ++#define YT8521X_LED2_CFG 0xa00e ++ ++#define YT8521X_LED0_CFG_DEFAULT 0x1e00 ++#define YT8521X_LED1_CFG_DEFAULT 0x1800 ++ ++#define YT8521X_PMEB_SEL BIT(6) ++#define YT8521X_WOL_ENA BIT(3) ++#define YT8521X_PME_N_LEVEL_TRIGGERD BIT(0) ++ ++static int rtl821x_read_page(struct phy_device *phydev) ++{ ++ return phy_read(phydev, RTL821X_PAGE_SELECT); ++} ++ ++static int rtl821x_write_page(struct phy_device *phydev, int page) ++{ ++ return phy_write(phydev, RTL821X_PAGE_SELECT, page); ++} ++ ++static int rtl821x_set_wol_mac(struct phy_device *phydev, const u8 *mac) ++{ ++ int oldpage = rtl821x_read_page(phydev); ++ ++ rtl821x_write_page(phydev, 0xd8c); ++ phy_write(phydev, 0x10, mac[0] | (mac[1] << NUM_8)); ++ phy_write(phydev, 0x11, mac[2] | (mac[3] << NUM_8)); /* 2,3:mac addr array offset */ ++ phy_write(phydev, 0x12, mac[4] | (mac[5] << NUM_8)); /* 4,5:mac addr array offset */ ++ rtl821x_write_page(phydev, oldpage); ++ return 0; ++} ++ ++static int rtl821x_enter_pmeb_mode(struct phy_device *phydev) ++{ ++ int oldpage = rtl821x_read_page(phydev); ++ u32 val; ++ ++ rtl821x_write_page(phydev, 0xd40); ++ val = (unsigned int)phy_read(phydev, RTL821X_INTBCR); ++ val |= RTL821X_PIN_PMEB_SELECT; ++ phy_write(phydev, RTL821X_INTBCR, val); ++ rtl821x_write_page(phydev, oldpage); ++ return 0; ++} ++ ++static int rtl821x_wol_reset(struct phy_device *phydev) ++{ ++ int oldpage = rtl821x_read_page(phydev); ++ ++ rtl821x_write_page(phydev, RTL821X_WOL_PAGE); ++ phy_write(phydev, 0x11, 0x1fff); ++ rtl821x_write_page(phydev, oldpage); ++ return 0; ++} ++ ++ ++static int rtl821x_wol_waveform_config(struct phy_device *phydev) ++{ ++#define PULSE_LOW_REGISTER 0x13 /* Interrupt Status Register */ ++ int oldpage = rtl821x_read_page(phydev); ++ u32 val; ++ ++ rtl821x_write_page(phydev, RTL821X_WOL_PAGE); ++ val = (u32)phy_read(phydev, PULSE_LOW_REGISTER); ++ val |= 0x1; ++ phy_write(phydev, PULSE_LOW_REGISTER, val); ++ rtl821x_write_page(phydev, oldpage); ++ return 0; ++} ++ ++static int rtl821x_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) ++{ ++ int oldpage; ++ u16 wol_ctrl; ++ struct net_device *netdev = phydev->attached_dev; ++ struct gmac_netdev_local *priv = netdev_priv(netdev); ++ ++ oldpage = rtl821x_read_page(phydev); ++ rtl821x_enter_pmeb_mode(phydev); ++ rtl821x_wol_waveform_config(phydev); ++ ++ /* Set mac address */ ++ rtl821x_set_wol_mac(phydev, (const u8 *)netdev->dev_addr); ++ ++ /* Set max packet length */ ++ rtl821x_write_page(phydev, RTL821X_WOL_PAGE); ++ phy_write(phydev, 0x11, 0x9fff); ++ ++ /* Enable wol events */ ++ wol_ctrl = (u16)phy_read(phydev, RTL821X_WOL_CTRL); ++ if (wol->wolopts & WAKE_MAGIC) { ++ wol_ctrl |= RTL821X_WOL_MAGIC_ENABLE; ++ } else { ++ wol_ctrl &= ~RTL821X_WOL_MAGIC_ENABLE; ++ } ++ if (wol->wolopts & WAKE_UCAST) { ++ wol_ctrl |= RTL821X_WOL_UNICAST_ENABLE; ++ } else { ++ wol_ctrl &= ~RTL821X_WOL_UNICAST_ENABLE; ++ } ++ phy_write(phydev, RTL821X_WOL_CTRL, wol_ctrl); ++ ++ rtl821x_wol_reset(phydev); ++ rtl821x_write_page(phydev, oldpage); ++ ++ netdev->wol_enabled = !!wol->wolopts; ++ priv->phy_wol_enable = !!wol->wolopts; ++ ++ return 0; ++} ++ ++static void rtl821x_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) ++{ ++ u32 val; ++ int oldpage; ++ ++ wol->supported = WAKE_UCAST | WAKE_MAGIC; ++ wol->wolopts = 0; ++ ++ oldpage = rtl821x_read_page(phydev); ++ rtl821x_write_page(phydev, RTL821X_WOL_PAGE); ++ val = (u32)phy_read(phydev, RTL821X_WOL_CTRL); ++ if (val & RTL821X_WOL_UNICAST_ENABLE) { ++ wol->wolopts |= WAKE_UCAST; ++ } ++ ++ if (val & RTL821X_WOL_MAGIC_ENABLE) { ++ wol->wolopts |= WAKE_MAGIC; ++ } ++ rtl821x_write_page(phydev, oldpage); ++} ++ ++static int rtl821x_suspend(struct phy_device *phydev) ++{ ++ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; ++ u32 oldpage; ++ ++ phy_ethtool_get_wol(phydev, &wol); ++ if (wol.wolopts) { ++ oldpage = (u32)rtl821x_read_page(phydev); ++ /* Set max packet length */ ++ rtl821x_write_page(phydev, RTL821X_WOL_PAGE); ++ phy_write(phydev, 0x11, 0x9fff); ++ rtl821x_write_page(phydev, oldpage); ++ ++ return 0; ++ } ++ ++ return genphy_suspend(phydev); ++} ++ ++static int rtl821x_resume(struct phy_device *phydev) ++{ ++ rtl821x_wol_reset(phydev); ++ return genphy_resume(phydev); ++} ++ ++static void rtl821x_disable_clkout(struct phy_device *phydev) ++{ ++ int oldpage = rtl821x_read_page(phydev); ++ u32 val; ++ ++ /* disalbe CLKOUT */ ++ rtl821x_write_page(phydev, 0xa43); ++ val = (unsigned int)phy_read(phydev, RTL821X_PHYCR2); ++ val &= ~RTL821X_REG25_CLKOUT_ENABLE; ++ phy_write(phydev, RTL821X_PHYCR2, val); ++ ++ /* reset phy */ ++ rtl821x_write_page(phydev, 0x0); ++ if (genphy_soft_reset(phydev) != 0) ++ pr_err("rtl821x: phy_reset failed while disable_CLKOUT\n"); ++ ++ rtl821x_write_page(phydev, oldpage); ++ ++ return; ++} ++ ++static void rtl821x_set_aldps_mode(struct phy_device *phydev) ++{ ++ int oldpage = rtl821x_read_page(phydev); ++ u32 val; ++ ++ /* enable ALDPS: Advance Link Down Power Saving */ ++ rtl821x_write_page(phydev, 0xa43); ++ val = (unsigned int)phy_read(phydev, RTL821X_PHYCR1); ++ val |= RTL821X_REG24_ALDPS_ENABLE; ++ phy_write(phydev, RTL821X_PHYCR1, val); ++ ++ /* more details about setting ALDPS */ ++ val |= RTL821X_REG24_ALDPS_XTAL_OFF_ENABLE; ++ val |= RTL821X_REG24_ALDPS_PLL_OFF_ENABLE; ++ phy_write(phydev, RTL821X_PHYCR1, val); ++ ++ val = (unsigned int)phy_read(phydev, RTL821X_PHYCR2); ++ val |= RTL821X_REG25_RSVD8; ++ phy_write(phydev, RTL821X_PHYCR2, val); ++ ++ rtl821x_write_page(phydev, oldpage); ++ ++ return; ++} ++ ++static bool confirm_status_in_phy_dts(struct phy_device *phydev, char *name, char *status) ++{ ++ struct device_node *node = phydev->mdio.dev.of_node; ++ struct property *pp; ++ char *value; ++ ++ if (name == NULL || status == NULL) { ++ pr_err("Invalid NULL property name or status input!\n"); ++ return false; ++ } ++ ++ pp = of_find_property(node, name, NULL); ++ if (pp == NULL) ++ return false; ++ ++ value = pp->value; ++ ++ return strncmp(value, status, strlen(status)) == 0 ? true : false; ++} ++ ++static void rtl821x_phyled_set(struct phy_device *phydev) ++{ ++ struct device_node *node = phydev->mdio.dev.of_node; ++ unsigned int phyled_val = 0; ++ ++ if (of_property_read_u32(node, "led-cfg", &phyled_val) == 0) { ++ rtl821x_write_page(phydev, RTL821X_LED_PAGE); ++ phy_write(phydev, RTL821X_LED_CTRL, phyled_val); ++ } ++ return; ++} ++ ++static int rtl821x_config_init(struct phy_device *phydev) ++{ ++ int oldpage; ++ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; ++ wol.wolopts |= WAKE_MAGIC; ++ ++ phy_ethtool_set_wol(phydev, &wol); ++ ++ rtl821x_disable_clkout(phydev); /* HW not support CLKOUT */ ++ if (confirm_status_in_phy_dts(phydev, "ALDPS", "enabled")) ++ rtl821x_set_aldps_mode(phydev); ++ ++ oldpage = rtl821x_read_page(phydev); ++ if (confirm_status_in_phy_dts(phydev, "phy-led", "enabled")) { ++ rtl821x_phyled_set(phydev); ++ } else { ++ rtl821x_write_page(phydev, RTL821X_LED_PAGE); ++ phy_write(phydev, RTL821X_LED_CTRL, RTL821X_LED_CTRL_DEFAULT); ++ } ++ rtl821x_write_page(phydev, oldpage); ++ return 0; ++} ++ ++static int yt8521x_phy_exp_write(struct phy_device *phydev, u32 regnum, u16 val) ++{ ++ int ret; ++ ++ ret = phy_write(phydev, MII_EXP_ADDR, regnum); ++ if (ret != 0) ++ return ret; ++ ++ ret = phy_write(phydev, MII_EXP_DATA, val); ++ if (ret != 0) ++ return ret; ++ return 0; ++} ++ ++static int yt8521x_phy_exp_read(struct phy_device *phydev, u32 regnum) ++{ ++ int ret; ++ ++ ret = phy_write(phydev, MII_EXP_ADDR, regnum); ++ if (ret != 0) ++ return ret; ++ ++ ret = phy_read(phydev, MII_EXP_DATA); ++ if (ret != 0) ++ return ret; ++ return 0; ++} ++ ++static void yt8521x_phyled_set(struct phy_device *phydev) ++{ ++ struct device_node *node = phydev->mdio.dev.of_node; ++ unsigned int phyled_val0 = 0; ++ unsigned int phyled_val1 = 0; ++ unsigned int phyled_val2 = 0; ++ ++ if (of_property_read_u32(node, "led-cfg", &phyled_val0) == 0) ++ yt8521x_phy_exp_write(phydev, YT8521X_LED0_CFG, phyled_val0); ++ ++ if (of_property_read_u32(node, "led-cfg1", &phyled_val1) == 0) ++ yt8521x_phy_exp_write(phydev, YT8521X_LED1_CFG, phyled_val1); ++ ++ if (of_property_read_u32(node, "led-cfg2", &phyled_val2) == 0) ++ yt8521x_phy_exp_write(phydev, YT8521X_LED2_CFG, phyled_val2); ++ ++ return; ++} ++ ++static int yt8521x_config_init(struct phy_device *phydev) ++{ ++ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; ++ wol.wolopts |= WAKE_MAGIC; ++ ++ phy_ethtool_set_wol(phydev, &wol); ++ if (confirm_status_in_phy_dts(phydev, "phy-led", "enabled")) { ++ yt8521x_phyled_set(phydev); ++ } else { ++ yt8521x_phy_exp_write(phydev, YT8521X_LED0_CFG, YT8521X_LED0_CFG_DEFAULT); ++ yt8521x_phy_exp_write(phydev, YT8521X_LED1_CFG, YT8521X_LED1_CFG_DEFAULT); ++ } ++ return 0; ++} ++ ++static int yt8521x_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) ++{ ++ struct net_device *netdev = phydev->attached_dev; ++ u8 *mac = (u8 *)netdev->dev_addr; ++ struct gmac_netdev_local *priv = netdev_priv(netdev); ++ u16 wol_ctrl; ++ ++ /* Set mac address */ ++ yt8521x_phy_exp_write(phydev, YT8521X_MACADDR_CFG1, (mac[0] << NUM_8) | mac[1]); ++ yt8521x_phy_exp_write(phydev, YT8521X_MACADDR_CFG2, (mac[2] << NUM_8) | mac[3]); ++ yt8521x_phy_exp_write(phydev, YT8521X_MACADDR_CFG3, (mac[4] << NUM_8) | mac[5]); ++ ++ /* Enable wol events */ ++ wol_ctrl = yt8521x_phy_exp_read(phydev, YT8521X_WOL_CFG); ++ wol_ctrl |= YT8521X_PMEB_SEL; ++ wol_ctrl |= YT8521X_PME_N_LEVEL_TRIGGERD; ++ if (wol->wolopts & WAKE_MAGIC) { ++ wol_ctrl |= YT8521X_WOL_ENA; ++ } else { ++ wol_ctrl &= ~YT8521X_WOL_ENA; ++ } ++ yt8521x_phy_exp_write(phydev, YT8521X_WOL_CFG, wol_ctrl); ++ ++ netdev->wol_enabled = !!wol->wolopts; ++ priv->phy_wol_enable = !!wol->wolopts; ++ ++ return 0; ++} ++ ++static void yt8521x_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) ++{ ++ u16 wol_ctrl; ++ ++ wol->supported = WAKE_MAGIC; ++ wol->wolopts = 0; ++ ++ wol_ctrl = yt8521x_phy_exp_read(phydev, YT8521X_WOL_CFG); ++ if (wol_ctrl & YT8521X_WOL_ENA) { ++ wol->wolopts |= WAKE_MAGIC; ++ } ++} ++ ++static int yt8521x_suspend(struct phy_device *phydev) ++{ ++ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; ++ ++ phy_ethtool_get_wol(phydev, &wol); ++ if (wol.wolopts) { ++ return 0; ++ } ++ return genphy_suspend(phydev); ++} ++ ++static int yt8521x_resume(struct phy_device *phydev) ++{ ++ return genphy_resume(phydev); ++} ++ ++struct hlphy_priv { ++ __u32 wolopts; ++}; ++ ++static int hlphy_probe(struct phy_device *phydev) ++{ ++ struct device *dev = &phydev->mdio.dev; ++ struct hlphy_priv *priv; ++ ++ if (confirm_status_in_phy_dts(phydev, "hlphy-driver", "disabled")) ++ return -1; ++ ++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ priv->wolopts = 0; ++ phydev->priv = priv; ++ ++ return 0; ++} ++ ++static int hlphy_config_init(struct phy_device *phydev) ++{ ++ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; ++ wol.wolopts |= WAKE_MAGIC; ++ ++ phy_ethtool_set_wol(phydev, &wol); ++ return 0; ++} ++ ++static int hlphy_suspend(struct phy_device *phydev) ++{ ++ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; ++ ++ phy_ethtool_get_wol(phydev, &wol); ++ ++ if (wol.wolopts != 0) ++ return 0; ++ else ++ return genphy_suspend(phydev); ++} ++ ++static int hlphy_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) ++{ ++ struct net_device *netdev = phydev->attached_dev; ++ struct gmac_netdev_local *mac_priv = netdev_priv(netdev); ++ struct hlphy_priv *priv = phydev->priv; ++ ++ netdev->wol_enabled = !!wol->wolopts; ++ mac_priv->phy_wol_enable = !!wol->wolopts; ++ priv->wolopts = wol->wolopts; ++ ++ return 0; ++} ++ ++static void hlphy_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) ++{ ++ struct hlphy_priv *priv = phydev->priv; ++ ++ wol->supported = WAKE_MAGIC; ++ ++ if (priv->wolopts != 0) ++ wol->wolopts = WAKE_MAGIC; ++ else ++ wol->wolopts = 0; ++} ++ ++static int hlphy_match_phy_device(struct phy_device *phydev) ++{ ++ return 1; ++} ++ ++static struct phy_driver ext_phy_drvs[] = { ++ { ++ .phy_id = 0x001cc916, ++ .name = "RTL8211F Gigabit Ethernet", ++ .phy_id_mask = 0x001fffff, ++ .probe = hlphy_probe, ++ .config_init = rtl821x_config_init, ++// .soft_reset = genphy_no_soft_reset, ++ .features = PHY_GBIT_FEATURES, ++ .aneg_done = genphy_aneg_done, ++ .suspend = rtl821x_suspend, ++ .resume = rtl821x_resume, ++ .set_loopback = genphy_loopback, ++ .set_wol = rtl821x_set_wol, ++ .get_wol = rtl821x_get_wol, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) ++ .read_page = rtl821x_read_page, ++ .write_page = rtl821x_write_page, ++#endif ++ }, ++ { ++ .phy_id = 0x0000011a, ++ .name = "YT8521 Gigabit Ethernet", ++ .phy_id_mask = 0x001fffff, ++ .probe = hlphy_probe, ++ .config_init = yt8521x_config_init, ++// .soft_reset = genphy_no_soft_reset, ++ .features = PHY_GBIT_FEATURES, ++ .aneg_done = genphy_aneg_done, ++ .suspend = yt8521x_suspend, ++ .resume = yt8521x_resume, ++ .set_loopback = genphy_loopback, ++ .set_wol = yt8521x_set_wol, ++ .get_wol = yt8521x_get_wol, ++ }, ++ { ++ .phy_id = 0xffffffff, ++ .phy_id_mask = 0xffffffff, ++ .name = "Generic PHY for HL", ++ .probe = hlphy_probe, ++ .config_init = hlphy_config_init, ++ .features = PHY_GBIT_FEATURES, ++ .aneg_done = genphy_aneg_done, ++ .suspend = hlphy_suspend, ++ .resume = genphy_resume, ++ .set_loopback = genphy_loopback, ++ .set_wol = hlphy_set_wol, ++ .get_wol = hlphy_get_wol, ++ .match_phy_device = hlphy_match_phy_device, ++ }, ++}; ++module_phy_driver(ext_phy_drvs); ++ ++static struct mdio_device_id __maybe_unused ext_phy_tbl[] = { ++ { 0x001cc916, 0x001fffff }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, ext_phy_tbl); ++ ++MODULE_AUTHOR("LBIN"); ++MODULE_DESCRIPTION("external phy driver"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_mdio.c b/drivers/net/ethernet/vendor/gmac/gmac_mdio.c +new file mode 100644 +index 000000000..e201261eb +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_mdio.c +@@ -0,0 +1,72 @@ ++/* ++ * Copyright (c) Company 2018-2019. All rights reserved. ++ * Description: GMAC mdio driver ++ */ ++#include "gmac_mdio.h" ++#include "gmac.h" ++ ++static int wait_mdio_ready(const struct gmac_netdev_local *ld) ++{ ++ int timeout_us = 1000; ++ ++ while ((--timeout_us > 0) && (!test_mdio_ready(ld))) ++ udelay(1); ++ ++ return timeout_us; ++} ++ ++int gmac_mdio_read(struct mii_bus *bus, int phy, int reg) ++{ ++ struct gmac_netdev_local *ld = NULL; ++ int timeout = 1000; ++ int val; ++ ++ if (bus == NULL) ++ return -ETIMEDOUT; ++ ++ ld = bus->priv; ++ if (ld == NULL) ++ return -ETIMEDOUT; ++ ++ if (!wait_mdio_ready(ld)) ++ return -ETIMEDOUT; ++ ++ mdio_start_phyread(ld, (unsigned int)phy, (unsigned int)reg); ++ ++ while ((wait_mdio_ready(ld) == 0) && (timeout-- > 0)) ++ udelay(1); ++ ++ if (timeout <= 0 || !test_mdio_read_data_done(ld)) ++ return -ETIMEDOUT; ++ ++ val = mdio_get_phyread_val(ld); ++ ++ gmac_trace(2, "mdio read phy:%x, reg:%x = %x\n", phy, reg, val); /* trace level 2 */ ++ ++ return val; ++} ++ ++int gmac_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) ++{ ++ struct gmac_netdev_local *ld = NULL; ++ ++ if (bus == NULL) ++ return -ETIMEDOUT; ++ ++ ld = bus->priv; ++ if (ld == NULL) ++ return -ETIMEDOUT; ++ ++ if (!wait_mdio_ready(ld)) ++ return -ETIMEDOUT; ++ ++ gmac_trace(2, "mdio write phy:%x, reg:%x = %x\n", phy, reg, val); /* trace level 2 */ ++ ++ mdio_set_phywrite_val(ld, val); ++ mdio_phywrite(ld, (u32)phy, (u32)reg); ++ ++ if (!wait_mdio_ready(ld)) ++ return -ETIMEDOUT; ++ else ++ return 0; ++} +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_mdio.h b/drivers/net/ethernet/vendor/gmac/gmac_mdio.h +new file mode 100644 +index 000000000..9e2504e8f +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_mdio.h +@@ -0,0 +1,114 @@ ++/* ++ * Copyright (c) Company 2018-2019. All rights reserved. ++ * Description: The header file of GMAC mdio driver ++ */ ++#ifndef __SOCT_GMAC_MDIO_H__ ++#define __SOCT_GMAC_MDIO_H__ ++ ++#include "gmac.h" ++ ++#define GMAC_MDIO_IO_BASE 0x10090000 ++#define GMAC_MDIO_IO_SIZE 0x1000 ++#define GMAC_MDIO_FRQDIV 0 ++ ++#define REG_MDIO_SINGLE_CMD 0x000003C0 ++#define REG_MDIO_SINGLE_DATA 0x000003C4 ++#define REG_MDIO_RDATA_STATUS 0x000003D0 ++ ++/* 0:mdio operation done, 1: start mdio operation */ ++#define MDIO_CMD mk_bits(20, 1) ++#define MDIO_WR_DATA mk_bits(0, 16) ++#define MDIO_RDATA_STATUS mk_bits(0, 1) ++ ++#define MDIO_CMD_READ 2 ++#define MDIO_CMD_WRITE 1 ++ ++#define GMAC_MDIO_TRACE_LEVEL 2 ++ ++static inline void gmac_mdio_writel_bits(const struct gmac_netdev_local *ld, ++ unsigned int val, unsigned int ofs, unsigned int bits_desc) ++{ ++ unsigned int shift = bits_desc >> 16; ++ unsigned int reg = readl(ld->gmac_iobase + ofs); ++ unsigned int mask = ((bits_desc & 0x3F) < 32) ? ++ (((1 << (bits_desc & 0x3F)) - 1) << shift) : 0xffffffff; ++ unsigned int write_val = (reg & (~mask)) | (((unsigned int)val << shift) & mask); ++ ++ writel(write_val, ld->gmac_iobase + ofs); ++ gmac_trace(GMAC_MDIO_TRACE_LEVEL, "writel(0x%04X) = 0x%08X", ofs, write_val); ++} ++ ++static inline unsigned int gmac_mdio_readl_bits(const struct gmac_netdev_local *ld, ++ unsigned int ofs, unsigned int bits_desc) ++{ ++ unsigned int shift = bits_desc >> 16; ++ unsigned int mask = ((bits_desc & 0x3F) < 32) ? ++ (((1 << (bits_desc & 0x3F)) - 1) << shift) : 0xffffffff; ++ unsigned int reg = readl(ld->gmac_iobase + ofs); ++ ++ reg = (reg & mask) >> shift; ++ gmac_trace(GMAC_MDIO_TRACE_LEVEL, "readl(0x%04X) = 0x%08X", ofs, reg); ++ ++ return reg; ++} ++ ++static inline unsigned int mdio_mk_rwctl(unsigned int rw_cmd, unsigned int phy_exaddr, ++ unsigned int phy_regnum) ++{ ++ /* read/write command-bits-setting, according to register list */ ++ return (0x1 << 20) | ((rw_cmd & 0x3) << 16) | ((phy_exaddr & 0x1f) << 8) | (phy_regnum & 0x1f); ++} ++ ++static inline void mdio_start_phyread(const struct gmac_netdev_local *ld, unsigned int phy, unsigned int reg) ++{ ++ unsigned int val = mdio_mk_rwctl(MDIO_CMD_READ, phy, reg); ++ unsigned int ofs = REG_MDIO_SINGLE_CMD; ++ ++ writel(val, ld->gmac_iobase + ofs); ++ gmac_trace(GMAC_MDIO_TRACE_LEVEL, "writel(0x%04X) = 0x%08X", ofs, val); ++} ++ ++static inline int mdio_get_phyread_val(const struct gmac_netdev_local *ld) ++{ ++ unsigned int ofs = REG_MDIO_SINGLE_DATA; ++ unsigned int reg = readl(ld->gmac_iobase + ofs) >> 16; ++ ++ gmac_trace(GMAC_MDIO_TRACE_LEVEL, "readl(0x%04X) = 0x%08X", ofs, reg); ++ ++ return (int)reg; ++} ++ ++static inline void mdio_set_phywrite_val(const struct gmac_netdev_local *ld, u16 val) ++{ ++ unsigned int reg; ++ unsigned int bits_desc = MDIO_WR_DATA; ++ unsigned int ofs = REG_MDIO_SINGLE_DATA; ++ ++ gmac_mdio_writel_bits(ld, val, ofs, bits_desc); ++ reg = readl(ld->gmac_iobase + ofs); ++ gmac_trace(GMAC_MDIO_TRACE_LEVEL, "write reg 0x%x, bits:0x%x= 0x%x, then read = 0x%x", ofs, bits_desc, val, reg); ++} ++ ++static inline void mdio_phywrite(const struct gmac_netdev_local *ld, unsigned int phy, unsigned int reg) ++{ ++ unsigned int val = mdio_mk_rwctl(MDIO_CMD_WRITE, phy, reg); ++ unsigned int ofs = REG_MDIO_SINGLE_CMD; ++ ++ writel(val, ld->gmac_iobase + ofs); ++ gmac_trace(GMAC_MDIO_TRACE_LEVEL, "writel(0x%04X) = 0x%08X", ofs, val); ++} ++ ++static inline bool test_mdio_ready(const struct gmac_netdev_local *ld) ++{ ++ return gmac_mdio_readl_bits(ld, REG_MDIO_SINGLE_CMD, MDIO_CMD) == 0; ++} ++ ++static inline bool test_mdio_read_data_done(const struct gmac_netdev_local *ld) ++{ ++ return gmac_mdio_readl_bits(ld, REG_MDIO_RDATA_STATUS, MDIO_RDATA_STATUS) == 0; ++} ++ ++int gmac_mdio_read(struct mii_bus *bus, int phy, int reg); ++int gmac_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val); ++ ++#endif +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_netdev_ops.c b/drivers/net/ethernet/vendor/gmac/gmac_netdev_ops.c +new file mode 100644 +index 000000000..d42318354 +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_netdev_ops.c +@@ -0,0 +1,785 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2024. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "gmac_pm.h" ++#include "gmac_proc.h" ++#include "gmac_netdev_ops.h" ++ ++static netdev_tx_t gmac_net_xmit(struct sk_buff *skb, struct net_device *dev); ++ ++static int gmac_net_open(struct net_device *dev) ++{ ++ struct gmac_netdev_local *ld = netdev_priv(dev); ++ unsigned long flags; ++ ++ clk_prepare_enable(ld->macif_clk); ++ clk_prepare_enable(ld->clk); ++ ++ phy_resume(ld->phy); ++ /* ++ * If we configure mac address by ++ * "ifconfig ethX hw ether XX:XX:XX:XX:XX:XX", ++ * the ethX must be down state and mac core clock is disabled ++ * which results the mac address has not been configured ++ * in mac core register. ++ * So we must set mac address again here, ++ * because mac core clock is enabled at this time ++ * and we can configure mac address to mac core register. ++ */ ++ gmac_hw_set_mac_addr(ld); ++ ++ /* ++ * We should use netif_carrier_off() here, ++ * because the default state should be off. ++ * And this call should before phy_start(). ++ */ ++ netif_carrier_off(dev); ++ gmac_enable_napi(ld); ++ phy_start(ld->phy); ++ ++ gmac_hw_desc_enable(ld); ++ gmac_port_enable(ld); ++ gmac_irq_enable_all_queue(ld); ++ ++ spin_lock_irqsave(&ld->rxlock, flags); ++ gmac_rx_refill(ld); ++ spin_unlock_irqrestore(&ld->rxlock, flags); ++ ++ ld->monitor.expires = jiffies + GMAC_MONITOR_TIMER; ++ mod_timer(&ld->monitor, ld->monitor.expires); ++ ++ netif_start_queue(dev); ++ ++ return 0; ++} ++ ++static int gmac_net_close(struct net_device *dev) ++{ ++ struct gmac_netdev_local *ld = netdev_priv(dev); ++ ++ gmac_irq_disable_all_queue(ld); ++ ++ /* We need 2ms to ensure that logic completes ++ * all commands before disabling gsf clk ++ */ ++ gmac_hw_desc_disable(ld); ++ gmac_port_disable(ld); ++ msleep(2); /* wait 2 ms */ ++ ++ gmac_disable_napi(ld); ++ ++ netif_carrier_off(dev); ++ netif_stop_queue(dev); ++ ++ phy_stop(ld->phy); ++ phy_suspend(ld->phy); ++ del_timer_sync(&ld->monitor); ++ ++ clk_disable_unprepare(ld->clk); ++ clk_disable_unprepare(ld->macif_clk); ++ ++ return 0; ++} ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) ++static void gmac_net_timeout(struct net_device *dev, unsigned int txqueue) ++#else ++static void gmac_net_timeout(struct net_device *dev) ++#endif ++{ ++ dev->stats.tx_errors++; ++ ++ pr_err("tx timeout!\n"); ++} ++ ++static int gmac_check_skb_len(struct sk_buff *skb, struct net_device *dev) ++{ ++ if (skb->len < ETH_HLEN) { ++ dev_kfree_skb_any(skb); ++ dev->stats.tx_errors++; ++ dev->stats.tx_dropped++; ++ return -1; ++ } ++ return 0; ++} ++ ++static int gmac_net_xmit_normal(struct sk_buff *skb, struct net_device *dev, struct gmac_desc *desc, u32 pos) ++{ ++ struct gmac_netdev_local *ld = netdev_priv(dev); ++ dma_addr_t addr; ++ ++ addr = dma_map_single(ld->dev, skb->data, skb->len, DMA_TO_DEVICE); ++ if (unlikely(dma_mapping_error(ld->dev, addr))) { ++ dev_kfree_skb_any(skb); ++ dev->stats.tx_dropped++; ++ ld->tx_skb[pos] = NULL; ++ ld->TX_BQ.skb[pos] = NULL; ++ return -1; ++ } ++ desc->data_buff_addr = (u32)addr; ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ desc->rxhash = (addr >> REG_BIT_WIDTH) & TX_DESC_HI8_MASK; ++#endif ++ desc->buffer_len = ETH_MAX_FRAME_SIZE - 1; ++ desc->data_len = skb->len; ++ desc->fl = DESC_FL_FULL; ++ desc->descvid = DESC_VLD_BUSY; ++ ++ return 0; ++} ++ ++static int gmac_tx_avail(struct gmac_netdev_local const *ld) ++{ ++ unsigned int tx_bq_wr_offset, tx_bq_rd_offset; ++ ++ if (ld == NULL) ++ return -1; ++ ++ tx_bq_wr_offset = readl(ld->gmac_iobase + TX_BQ_WR_ADDR); ++ tx_bq_rd_offset = readl(ld->gmac_iobase + TX_BQ_RD_ADDR); ++ ++ return (int)((tx_bq_rd_offset >> DESC_BYTE_SHIFT) + TX_DESC_NUM - ++ (tx_bq_wr_offset >> DESC_BYTE_SHIFT) - 1); ++} ++ ++static netdev_tx_t gmac_sw_gso(struct gmac_netdev_local *ld, struct sk_buff *skb) ++{ ++ struct sk_buff *segs = NULL; ++ struct sk_buff *curr_skb = NULL; ++ int ret; ++ int gso_segs = skb_shinfo(skb)->gso_segs; ++ if (gso_segs == 0 && skb_shinfo(skb)->gso_size != 0) ++ gso_segs = DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size); ++ ++ /* Estimate the number of fragments in the worst case */ ++ if (unlikely(gmac_tx_avail(ld) < gso_segs)) { ++ netif_stop_queue(ld->netdev); ++ if (gmac_tx_avail(ld) < gso_segs) { ++ ld->netdev->stats.tx_dropped++; ++ ld->netdev->stats.tx_fifo_errors++; ++ return NETDEV_TX_BUSY; ++ } ++ netif_wake_queue(ld->netdev); ++ } ++ ++ segs = skb_gso_segment(skb, ld->netdev->features & ~(NETIF_F_CSUM_MASK | ++ NETIF_F_SG | NETIF_F_GSO_SOFTWARE)); ++ if (IS_ERR_OR_NULL(segs)) ++ goto drop; ++ ++ do { ++ curr_skb = segs; ++ segs = segs->next; ++ curr_skb->next = NULL; ++ ret = gmac_net_xmit(curr_skb, ld->netdev); ++ if (unlikely(ret != NETDEV_TX_OK)) ++ pr_err_once("gmac_net_xmit error ret=%d\n", ret); ++ } while (segs != NULL); ++ ++ dev_kfree_skb_any(skb); ++ return NETDEV_TX_OK; ++ ++drop: ++ dev_kfree_skb_any(skb); ++ ld->netdev->stats.tx_dropped++; ++ return NETDEV_TX_OK; ++} ++ ++static int gmac_xmit_gso_sg_frag(struct gmac_netdev_local *ld, struct sk_buff *skb, struct sg_desc *desc_cur, ++ struct gmac_tso_desc *tx_bq_desc, unsigned int desc_pos) ++{ ++ int nfrags = skb_shinfo(skb)->nr_frags; ++ dma_addr_t addr; ++ dma_addr_t dma_addr; ++ int i, ret, len; ++ ++ for (i = 0; i < nfrags; i++) { ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ++ len = skb_frag_size(frag); ++ ++ dma_addr = skb_frag_dma_map(ld->dev, frag, 0, len, DMA_TO_DEVICE); ++ ret = dma_mapping_error(ld->dev, dma_addr); ++ if (unlikely(ret)) { ++ pr_err("skb frag DMA Mapping fail"); ++ return -EFAULT; ++ } ++ desc_cur->frags[i].addr = (u32)dma_addr; ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ desc_cur->frags[i].reserved = (dma_addr >> REG_BIT_WIDTH) << SG_DESC_HI8_OFFSET; ++#endif ++ desc_cur->frags[i].size = len; ++ } ++ ++ addr = ld->dma_sg_phy + ld->sg_head * sizeof(struct sg_desc); ++ tx_bq_desc->data_buff_addr = (u32)addr; ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ tx_bq_desc->reserve_desc2 = (addr >> REG_BIT_WIDTH) & ++ TX_DESC_HI8_MASK; ++#endif ++ ld->TX_BQ.sg_desc_offset[desc_pos] = ld->sg_head; ++ ++ ld->sg_head = (ld->sg_head + 1) % ld->sg_count; ++ ++ return 0; ++} ++ ++static int gmac_xmit_gso_sg(struct gmac_netdev_local *ld, struct sk_buff *skb, struct gmac_tso_desc *tx_bq_desc, ++ unsigned int desc_pos) ++{ ++ struct sg_desc *desc_cur = NULL; ++ dma_addr_t dma_addr; ++ int ret; ++ ++ if (unlikely(((ld->sg_head + 1) % ld->sg_count) == ld->sg_tail)) { ++ /* SG pkt, but sg desc all used */ ++ pr_err("WARNING: sg desc all used.\n"); ++ return -EBUSY; ++ } ++ ++ desc_cur = ld->dma_sg_desc + ld->sg_head; ++ ++ desc_cur->total_len = skb->len; ++ desc_cur->linear_len = skb_headlen(skb); ++ dma_addr = dma_map_single(ld->dev, skb->data, desc_cur->linear_len, DMA_TO_DEVICE); ++ ret = dma_mapping_error(ld->dev, dma_addr); ++ if (unlikely(ret)) { ++ pr_err("DMA Mapping fail"); ++ return -EFAULT; ++ } ++ desc_cur->linear_addr = (u32)dma_addr; ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ desc_cur->reserv3 = (dma_addr >> REG_BIT_WIDTH) << SG_DESC_HI8_OFFSET; ++#endif ++ ret = gmac_xmit_gso_sg_frag(ld, skb, desc_cur, tx_bq_desc, desc_pos); ++ if (unlikely(ret)) ++ return ret; ++ ++ return 0; ++} ++ ++static int gmac_get_pkt_info(struct gmac_netdev_local *ld, struct sk_buff *skb, struct gmac_tso_desc *tx_bq_desc); ++ ++static int gmac_check_hw_capability(struct sk_buff *skb); ++ ++static int gmac_xmit_gso(struct gmac_netdev_local *ld, struct sk_buff *skb, struct gmac_tso_desc *tx_bq_desc, ++ unsigned int desc_pos) ++{ ++ int pkt_type = PKT_NORMAL; ++ int nfrags = skb_shinfo(skb)->nr_frags; ++ dma_addr_t addr; ++ int ret; ++ ++ if (skb_is_gso(skb) || nfrags) ++ pkt_type = PKT_SG; /* TSO pkt or SG pkt */ ++ else ++ pkt_type = PKT_NORMAL; ++ ++ ret = gmac_check_hw_capability(skb); ++ if (unlikely(ret)) ++ return ret; ++ ++ ret = gmac_get_pkt_info(ld, skb, tx_bq_desc); ++ if (unlikely(ret)) ++ return ret; ++ ++ if (pkt_type == PKT_NORMAL) { ++ addr = dma_map_single(ld->dev, skb->data, skb->len, DMA_TO_DEVICE); ++ ret = dma_mapping_error(ld->dev, addr); ++ if (unlikely(ret)) { ++ pr_err("Normal Packet DMA Mapping fail.\n"); ++ return -EFAULT; ++ } ++ tx_bq_desc->data_buff_addr = (u32)addr; ++#if defined(CONFIG_GMAC_DDR_64BIT) ++ tx_bq_desc->reserve_desc2 = (addr >> REG_BIT_WIDTH) & TX_DESC_HI8_MASK; ++#endif ++ } else { ++ ret = gmac_xmit_gso_sg(ld, skb, tx_bq_desc, desc_pos); ++ if (unlikely(ret)) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static netdev_tx_t gmac_net_xmit(struct sk_buff *skb, struct net_device *dev) ++{ ++ struct gmac_netdev_local *ld = netdev_priv(dev); ++ struct gmac_desc *desc = NULL; ++ unsigned long txflags; ++ int ret; ++ u32 pos; ++ ++ if (unlikely(gmac_check_skb_len(skb, dev) < 0)) ++ return NETDEV_TX_OK; ++ ++ /* ++ * if adding gmac_xmit_reclaim here, iperf tcp client ++ * performance will be affected, from 550M(avg) to 513M~300M ++ */ ++ ++ /* software write pointer */ ++ pos = dma_cnt(readl(ld->gmac_iobase + TX_BQ_WR_ADDR)); ++ ++ spin_lock_irqsave(&ld->txlock, txflags); ++ ++ if (unlikely(ld->tx_skb[pos] || ld->TX_BQ.skb[pos])) { ++ dev->stats.tx_dropped++; ++ dev->stats.tx_fifo_errors++; ++ netif_stop_queue(dev); ++ spin_unlock_irqrestore(&ld->txlock, txflags); ++ ++ return NETDEV_TX_BUSY; ++ } ++ ++ ld->TX_BQ.skb[pos] = skb; ++ ld->tx_skb[pos] = skb; ++ ++ desc = ld->TX_BQ.desc + pos; ++ ++ if (ld->tso_supported) { ++ ret = gmac_xmit_gso(ld, skb, (struct gmac_tso_desc *)desc, pos); ++ if (unlikely(ret < 0)) { ++ ld->tx_skb[pos] = NULL; ++ ld->TX_BQ.skb[pos] = NULL; ++ spin_unlock_irqrestore(&ld->txlock, txflags); ++ ++ if (ret == -ENOTSUPP) ++ return gmac_sw_gso(ld, skb); ++ ++ dev_kfree_skb_any(skb); ++ dev->stats.tx_dropped++; ++ return NETDEV_TX_OK; ++ } ++ } else { ++ ret = gmac_net_xmit_normal(skb, dev, desc, pos); ++ if (unlikely(ret < 0)) { ++ spin_unlock_irqrestore(&ld->txlock, txflags); ++ return NETDEV_TX_OK; ++ } ++ } ++ ++ /* ++ * This barrier is important here. It is required to ensure ++ * the ARM CPU flushes it's DMA write buffers before proceeding ++ * to the next instruction, to ensure that GMAC will see ++ * our descriptor changes in memory ++ */ ++ gmac_sync_barrier(); ++ pos = dma_ring_incr(pos, TX_DESC_NUM); ++ writel(dma_byte(pos), ld->gmac_iobase + TX_BQ_WR_ADDR); ++ ++ netif_trans_update(dev); ++ dev->stats.tx_packets++; ++ dev->stats.tx_bytes += skb->len; ++ netdev_sent_queue(dev, skb->len); ++ ++ spin_unlock_irqrestore(&ld->txlock, txflags); ++ ++ return NETDEV_TX_OK; ++} ++ ++/* set gmac's multicast list, here we setup gmac's mc filter */ ++static void gmac_gmac_multicast_list(struct net_device const *dev) ++{ ++ struct gmac_netdev_local *ld = netdev_priv(dev); ++ struct netdev_hw_addr *ha = NULL; ++ unsigned int d; ++ unsigned int rec_filter; ++ ++ rec_filter = readl(ld->gmac_iobase + REC_FILT_CONTROL); ++ /* ++ * when set gmac in promisc mode ++ * a. dev in IFF_PROMISC mode ++ */ ++ if ((dev->flags & IFF_PROMISC)) { ++ /* promisc mode.received all pkgs. */ ++ rec_filter &= ~(BIT_BC_DROP_EN | BIT_MC_MATCH_EN | ++ BIT_UC_MATCH_EN); ++ } else { ++ /* drop uc pkgs with field 'DA' not match our's */ ++ rec_filter |= BIT_UC_MATCH_EN; ++ ++ if (dev->flags & IFF_BROADCAST) /* no broadcast */ ++ rec_filter &= ~BIT_BC_DROP_EN; ++ else ++ rec_filter |= BIT_BC_DROP_EN; ++ ++ if (netdev_mc_empty(dev) || !(dev->flags & IFF_MULTICAST)) { ++ /* haven't join any mc group */ ++ writel(0, ld->gmac_iobase + PORT_MC_ADDR_LOW); ++ writel(0, ld->gmac_iobase + PORT_MC_ADDR_HIGH); ++ rec_filter |= BIT_MC_MATCH_EN; ++ } else if ((netdev_mc_count(dev) == 1) && ++ (dev->flags & IFF_MULTICAST)) { ++ netdev_for_each_mc_addr(ha, dev) { ++ d = (ha->addr[0] << 8) | (ha->addr[1]); /* mac[0]->(15, 8) mac[1]->(7, 0) */ ++ writel(d, ld->gmac_iobase + PORT_MC_ADDR_HIGH); ++ ++ d = (ha->addr[2] << 24) | (ha->addr[3] << 16) | /* mac[2]->(31, 24) mac[3]->(23, 16) */ ++ (ha->addr[4] << 8) | (ha->addr[5]); /* mac[4]->(15, 8) mac[5]->(7, 0) */ ++ writel(d, ld->gmac_iobase + PORT_MC_ADDR_LOW); ++ } ++ rec_filter |= BIT_MC_MATCH_EN; ++ } else { ++ rec_filter &= ~BIT_MC_MATCH_EN; ++ } ++ } ++ writel(rec_filter, ld->gmac_iobase + REC_FILT_CONTROL); ++} ++ ++static void gmac_set_multicast_list(struct net_device *dev) ++{ ++ gmac_gmac_multicast_list(dev); ++} ++ ++static int gmac_set_features(struct net_device *dev, netdev_features_t features) ++{ ++ struct gmac_netdev_local *ld = netdev_priv(dev); ++ netdev_features_t changed = dev->features ^ features; ++ ++ if (changed & NETIF_F_RXCSUM) { ++ if (features & NETIF_F_RXCSUM) ++ gmac_enable_rxcsum_drop(ld, true); ++ else ++ gmac_enable_rxcsum_drop(ld, false); ++ } ++ ++ return 0; ++} ++ ++static int gmac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) ++{ ++ struct gmac_netdev_local *priv = NULL; ++ struct pm_config config; ++ int val = 0; ++ int ret; ++ ++ if (ndev == NULL || rq == NULL) ++ return -EINVAL; ++ priv = netdev_priv(ndev); ++ switch (cmd) { ++ case SIOCSETPM: ++ if (rq->ifr_data == NULL || ++ copy_from_user(&config, rq->ifr_data, sizeof(config))) ++ return -EFAULT; ++ return pmt_config(ndev, &config); ++ ++ case SIOCSETSUSPEND: ++ if (rq->ifr_data == NULL || copy_from_user(&val, rq->ifr_data, sizeof(val))) ++ return -EFAULT; ++ return set_suspend(val); ++ ++ case SIOCSETRESUME: ++ if (rq->ifr_data == NULL || copy_from_user(&val, rq->ifr_data, sizeof(val))) ++ return -EFAULT; ++ return set_resume(val); ++ ++ case SIOCGETPM: ++ if (memset_s(&config, sizeof(config), 0, sizeof(config)) != EOK) { ++ pr_err("gmac ioctl do cmd:%d failed\n", cmd); ++ return -EFAULT; ++ } ++ ++ if (rq->ifr_data == NULL) ++ return -EFAULT; ++ ret = pmt_get_config(ndev, &config); ++ if (ret == 0) { ++ if (copy_to_user(rq->ifr_data, &config, sizeof(config))) ++ return -EFAULT; ++ } ++ return ret; ++ ++ default: ++ if (!netif_running(ndev)) ++ return -EINVAL; ++ ++ if (priv->phy == NULL) ++ return -EINVAL; ++ ++ return phy_mii_ioctl(priv->phy, rq, cmd); ++ } ++ return 0; ++} ++ ++static int gmac_net_set_mac_address(struct net_device *dev, void *p) ++{ ++ int ret; ++ struct gmac_netdev_local *ld = netdev_priv(dev); ++ ++ ret = eth_mac_addr(dev, p); ++ if (ret == 0) { ++ gmac_hw_set_mac_addr(ld); ++ dev->addr_assign_type &= ~NET_ADDR_RANDOM; ++ } ++ ++ return ret; ++} ++ ++static int gmac_change_mtu(struct net_device *dev, int new_mtu) ++{ ++ netdev_warn(dev, "%s is deprecated\n", __func__); ++ dev->mtu = new_mtu; ++ return 0; ++} ++ ++static struct net_device_stats *gmac_net_get_stats(struct net_device *dev) ++{ ++ return &dev->stats; ++} ++ ++static void gmac_do_udp_checksum(struct sk_buff *skb) ++{ ++ int offset; ++ __wsum csum; ++ __sum16 udp_csum; ++ ++ offset = skb_checksum_start_offset(skb); ++ WARN_ON(offset >= skb_headlen(skb)); ++ csum = skb_checksum(skb, offset, skb->len - offset, 0); ++ ++ offset += skb->csum_offset; ++ WARN_ON(offset + sizeof(__sum16) > skb_headlen(skb)); ++ udp_csum = csum_fold(csum); ++ if (udp_csum == 0) ++ udp_csum = CSUM_MANGLED_0; ++ ++ *(__sum16 *)(skb->data + offset) = udp_csum; ++ ++ skb->ip_summed = CHECKSUM_NONE; ++} ++ ++static int gmac_get_pkt_info_l3l4(struct gmac_tso_desc *tx_bq_desc, struct sk_buff *skb, unsigned int *l4_proto, ++ unsigned int *max_mss, unsigned char *coe_enable) ++{ ++ __be16 l3_proto; /* level 3 protocol */ ++ int max_data_len = skb->len - ETH_HLEN; ++ ++ l3_proto = skb->protocol; ++ if (skb->protocol == htons(ETH_P_8021Q)) { ++ l3_proto = vlan_get_protocol(skb); ++ tx_bq_desc->desc1.tx.vlan_flag = 1; ++ max_data_len -= VLAN_HLEN; ++ } ++ ++ if (l3_proto == htons(ETH_P_IP)) { ++ struct iphdr *iph; ++ ++ iph = ip_hdr(skb); ++ tx_bq_desc->desc1.tx.ip_ver = PKT_IPV4; ++ tx_bq_desc->desc1.tx.ip_hdr_len = iph->ihl; ++ ++ if ((max_data_len >= GSO_MAX_SIZE) && ++ (ntohs(iph->tot_len) <= (iph->ihl << 2))) /* shift left 2 */ ++ iph->tot_len = htons(GSO_MAX_SIZE - 1); ++ ++ *max_mss -= iph->ihl * WORD_TO_BYTE; ++ *l4_proto = iph->protocol; ++ } else if (l3_proto == htons(ETH_P_IPV6)) { ++ tx_bq_desc->desc1.tx.ip_ver = PKT_IPV6; ++ tx_bq_desc->desc1.tx.ip_hdr_len = PKT_IPV6_HDR_LEN; ++ *max_mss -= PKT_IPV6_HDR_LEN * WORD_TO_BYTE; ++ *l4_proto = ipv6_hdr(skb)->nexthdr; ++ } else { ++ *coe_enable = 0; ++ } ++ ++ if (*l4_proto == IPPROTO_TCP) { ++ tx_bq_desc->desc1.tx.prot_type = PKT_TCP; ++ if (tcp_hdr(skb)->doff < sizeof(struct tcphdr) / WORD_TO_BYTE) ++ return -EFAULT; ++ tx_bq_desc->desc1.tx.prot_hdr_len = tcp_hdr(skb)->doff; ++ *max_mss -= tcp_hdr(skb)->doff * WORD_TO_BYTE; ++ } else if (*l4_proto == IPPROTO_UDP) { ++ tx_bq_desc->desc1.tx.prot_type = PKT_UDP; ++ tx_bq_desc->desc1.tx.prot_hdr_len = PKT_UDP_HDR_LEN; ++ if (l3_proto == htons(ETH_P_IPV6)) ++ *max_mss -= sizeof(struct frag_hdr); ++ } else { ++ *coe_enable = 0; ++ } ++ ++ return 0; ++} ++ ++static int gmac_get_pkt_info(struct gmac_netdev_local *ld, struct sk_buff *skb, struct gmac_tso_desc *tx_bq_desc) ++{ ++ int nfrags; ++ unsigned int l4_proto = IPPROTO_MAX; ++ unsigned int max_mss = ETH_DATA_LEN; ++ unsigned char coe_enable = 0; ++ int ret; ++ if (skb == NULL || tx_bq_desc == NULL) ++ return -EINVAL; ++ ++ nfrags = skb_shinfo(skb)->nr_frags; ++ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) ++ coe_enable = 1; ++ ++ tx_bq_desc->desc1.val = 0; ++ ++ if (skb_is_gso(skb)) { ++ tx_bq_desc->desc1.tx.tso_flag = 1; ++ tx_bq_desc->desc1.tx.sg_flag = 1; ++ } else if (nfrags) { ++ tx_bq_desc->desc1.tx.sg_flag = 1; ++ } ++ ++ ret = gmac_get_pkt_info_l3l4(tx_bq_desc, skb, &l4_proto, &max_mss, ++ &coe_enable); ++ if (ret < 0) ++ return ret; ++ ++ if (skb_is_gso(skb)) ++ tx_bq_desc->desc1.tx.data_len = ++ (skb_shinfo(skb)->gso_size > max_mss) ? max_mss : ++ skb_shinfo(skb)->gso_size; ++ else ++ tx_bq_desc->desc1.tx.data_len = skb->len; ++ ++ if (coe_enable && skb_is_gso(skb) && (l4_proto == IPPROTO_UDP)) ++ gmac_do_udp_checksum(skb); ++ ++ if (coe_enable) ++ tx_bq_desc->desc1.tx.coe_flag = 1; ++ ++ tx_bq_desc->desc1.tx.nfrags_num = nfrags; ++ ++ tx_bq_desc->desc1.tx.hw_own = DESC_VLD_BUSY; ++ return 0; ++} ++ ++static int gmac_check_hw_capability_for_udp(struct sk_buff const *skb) ++{ ++ struct ethhdr *eth; ++ ++ /* hardware can't dea with UFO broadcast packet */ ++ eth = (struct ethhdr *)(skb->data); ++ if (skb_is_gso(skb) && is_broadcast_ether_addr(eth->h_dest)) ++ return -ENOTSUPP; ++ ++ return 0; ++} ++ ++static int gmac_check_hw_capability_for_ipv6(struct sk_buff *skb) ++{ ++ unsigned int l4_proto; ++ ++ l4_proto = ipv6_hdr(skb)->nexthdr; ++ if ((l4_proto != IPPROTO_TCP) && (l4_proto != IPPROTO_UDP)) { ++ /* ++ * when IPv6 next header is not tcp or udp, ++ * it means that IPv6 next header is extension header. ++ * Hardware can't deal with this case, ++ * so do checksumming by software or do GSO by software. ++ */ ++ if (skb_is_gso(skb)) ++ return -ENOTSUPP; ++ ++ if (skb->ip_summed == CHECKSUM_PARTIAL && ++ skb_checksum_help(skb)) ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static __be16 gmac_get_l3_proto(struct sk_buff *skb) ++{ ++ __be16 l3_proto; ++ ++ l3_proto = skb->protocol; ++ if (skb->protocol == htons(ETH_P_8021Q)) ++ l3_proto = vlan_get_protocol(skb); ++ ++ return l3_proto; ++} ++ ++static unsigned int gmac_get_l4_proto(struct sk_buff *skb) ++{ ++ __be16 l3_proto; ++ unsigned int l4_proto = IPPROTO_MAX; ++ ++ l3_proto = gmac_get_l3_proto(skb); ++ if (l3_proto == htons(ETH_P_IP)) ++ l4_proto = ip_hdr(skb)->protocol; ++ else if (l3_proto == htons(ETH_P_IPV6)) ++ l4_proto = ipv6_hdr(skb)->nexthdr; ++ ++ return l4_proto; ++} ++ ++static inline bool gmac_skb_is_ipv6(struct sk_buff *skb) ++{ ++ return (gmac_get_l3_proto(skb) == htons(ETH_P_IPV6)); ++} ++ ++static inline bool gmac_skb_is_udp(struct sk_buff *skb) ++{ ++ return (gmac_get_l4_proto(skb) == IPPROTO_UDP); ++} ++ ++static inline bool gmac_skb_is_ipv4_with_options(struct sk_buff *skb) ++{ ++ return ((gmac_get_l3_proto(skb) == htons(ETH_P_IP)) && ++ (ip_hdr(skb)->ihl > IPV4_HEAD_LENGTH)); ++} ++ ++static int gmac_check_hw_capability(struct sk_buff *skb) ++{ ++ int ret; ++ ++ /* ++ * if tcp_mtu_probe() use (2 * tp->mss_cache) as probe_size, ++ * the linear data length will be larger than 2048, ++ * the MAC can't handle it, so let the software do it. ++ */ ++ if (skb_is_gso(skb) && (skb_headlen(skb) > 2048)) /* 2048(2k) */ ++ return -ENOTSUPP; ++ ++ if (gmac_skb_is_ipv6(skb)) { ++ ret = gmac_check_hw_capability_for_ipv6(skb); ++ if (ret) ++ return ret; ++ } ++ ++ if (gmac_skb_is_udp(skb)) { ++ ret = gmac_check_hw_capability_for_udp(skb); ++ if (ret) ++ return ret; ++ } ++ ++ if (((skb->ip_summed == CHECKSUM_PARTIAL) || skb_is_gso(skb)) && ++ gmac_skb_is_ipv4_with_options(skb)) ++ return -ENOTSUPP; ++ ++ return 0; ++} ++ ++static const struct net_device_ops eth_netdev_ops = { ++ .ndo_open = gmac_net_open, ++ .ndo_stop = gmac_net_close, ++ .ndo_start_xmit = gmac_net_xmit, ++ .ndo_tx_timeout = gmac_net_timeout, ++ .ndo_set_rx_mode = gmac_set_multicast_list, ++ .ndo_set_features = gmac_set_features, ++ .ndo_do_ioctl = gmac_ioctl, ++ .ndo_set_mac_address = gmac_net_set_mac_address, ++ .ndo_change_mtu = gmac_change_mtu, ++ .ndo_get_stats = gmac_net_get_stats, ++}; ++ ++void gmac_set_netdev_ops(struct net_device *ndev) ++{ ++ ndev->netdev_ops = ð_netdev_ops; ++} +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_netdev_ops.h b/drivers/net/ethernet/vendor/gmac/gmac_netdev_ops.h +new file mode 100644 +index 000000000..e56adcab4 +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_netdev_ops.h +@@ -0,0 +1,14 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++ ++#ifndef GMAC_NETDEV_OPS_H ++#define GMAC_NETDEV_OPS_H ++ ++#include ++ ++#include "gmac.h" ++ ++void gmac_set_netdev_ops(struct net_device *ndev); ++ ++#endif +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_phy_fixup.c b/drivers/net/ethernet/vendor/gmac/gmac_phy_fixup.c +new file mode 100644 +index 000000000..37bbf499d +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_phy_fixup.c +@@ -0,0 +1,396 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2024. All rights reserved. ++ */ ++ ++#include ++#include ++#include "gmac_mdio.h" ++#include "gmac_phy_fixup.h" ++ ++#ifdef CONFIG_GMAC_HAS_INTERNAL_PHY ++#define HIGMAC_INTERNAL_PHY_TRIM ++#endif /* CONFIG_GMAC_HAS_INTERNAL_PHY */ ++ ++#ifdef HIGMAC_INTERNAL_PHY_TRIM ++#define REG_LD_AM 0x3050 ++#define LD_AM_MASK GENMASK(4, 0) /* 4 */ ++#define REG_LDO_AM 0x3051 ++#define LDO_AM_MASK GENMASK(2, 0) /* 2 */ ++#define REG_R_TUNING 0x3052 ++#define R_TUNING_MASK GENMASK(5, 0) /* 5 */ ++#define REG_WR_DONE 0x3053 ++#define REG_DEF_ATE 0x3057 ++#define DEF_LD_AM 0x0f ++#define DEF_LDO_AM 0x7 ++#define DEF_R_TUNING 0x15 ++ ++static inline int gmac_phy_expanded_read(struct mii_bus *bus, int phyaddr, ++ u32 reg_addr) ++{ ++ int ret; ++ ++ gmac_mdio_write(bus, phyaddr, MII_EXPMA, reg_addr); ++ ret = gmac_mdio_read(bus, phyaddr, MII_EXPMD); ++ ++ return ret; ++} ++ ++static inline int gmac_phy_expanded_write(struct mii_bus *bus, int phyaddr, ++ u32 reg_addr, u16 val) ++{ ++ int ret; ++ ++ gmac_mdio_write(bus, phyaddr, MII_EXPMA, reg_addr); ++ ret = gmac_mdio_write(bus, phyaddr, MII_EXPMD, val); ++ ++ return ret; ++} ++ ++void gmac_use_default_trim(struct mii_bus *bus, int phyaddr) ++{ ++ unsigned short v; ++ int timeout = 3; /* 3 */ ++ ++ pr_info("No OTP data, festa PHY use default ATE parameters!\n"); ++ ++ do { ++ msleep(250); /* 250:Function arguments */ ++ v = gmac_phy_expanded_read(bus, phyaddr, REG_DEF_ATE); ++ v &= BIT(0); ++ } while (!v && --timeout); ++ WARN(!timeout, "festa PHY 0x3057 wait bit0 timeout!\n"); ++ ++ mdelay(5); /* 5:Function arguments */ ++} ++ ++void gmac_internal_fephy_trim(struct mii_bus *bus, int phyaddr, ++ u32 trim_params) ++{ ++ unsigned short ld_amptlitude; ++ unsigned short ldo_amptlitude; ++ unsigned short r_tuning_val; ++ unsigned short v; ++ int timeout = 3000; /* 3000 */ ++ ++ ld_amptlitude = DEF_LD_AM; ++ ldo_amptlitude = DEF_LDO_AM; ++ r_tuning_val = DEF_R_TUNING; ++ ++ if (!trim_params) { ++ gmac_use_default_trim(bus, phyaddr); ++ return; ++ } ++ ++ ld_amptlitude = trim_params & LD_AM_MASK; ++ ldo_amptlitude = (trim_params >> 8) & LDO_AM_MASK; /* 8:right shift value */ ++ r_tuning_val = (trim_params >> 16) & R_TUNING_MASK; /* 16:right shift value */ ++ ++ v = gmac_phy_expanded_read(bus, phyaddr, REG_LD_AM); ++ v = (v & ~LD_AM_MASK) | (ld_amptlitude & LD_AM_MASK); ++ gmac_phy_expanded_write(bus, phyaddr, REG_LD_AM, v); ++ ++ v = gmac_phy_expanded_read(bus, phyaddr, REG_LDO_AM); ++ v = (v & ~LDO_AM_MASK) | (ldo_amptlitude & LDO_AM_MASK); ++ gmac_phy_expanded_write(bus, phyaddr, REG_LDO_AM, v); ++ ++ v = gmac_phy_expanded_read(bus, phyaddr, REG_R_TUNING); ++ v = (v & ~R_TUNING_MASK) | (r_tuning_val & R_TUNING_MASK); ++ gmac_phy_expanded_write(bus, phyaddr, REG_R_TUNING, v); ++ ++ v = gmac_phy_expanded_read(bus, phyaddr, REG_WR_DONE); ++ WARN(v & BIT(1), "festa PHY 0x3053 bit1 CFG_ACK value: 1\n"); ++ v = v | BIT(0); ++ gmac_phy_expanded_write(bus, phyaddr, REG_WR_DONE, v); ++ ++ do { ++ usleep_range(100, 150); /* 100,150:function arguments */ ++ v = gmac_phy_expanded_read(bus, phyaddr, REG_WR_DONE); ++ v &= BIT(1); ++ } while (!v && --timeout); ++ WARN(!timeout, "festa PHY 0x3053 wait bit1 CFG_ACK timeout!\n"); ++ ++ mdelay(5); /* 5:function arguments */ ++ ++ pr_info("FEPHY:addr=%d, la_am=0x%x, ldo_am=0x%x, r_tuning=0x%x\n", ++ phyaddr, ++ gmac_phy_expanded_read(bus, phyaddr, REG_LD_AM), ++ gmac_phy_expanded_read(bus, phyaddr, REG_LDO_AM), ++ gmac_phy_expanded_read(bus, phyaddr, REG_R_TUNING)); ++} ++#else ++void gmac_internal_fephy_trim(struct mii_bus *bus, int phyaddr, ++ u32 trim_params) ++{ ++} ++#endif /* HIGMAC_INTERNAL_PHY_TRIM */ ++ ++ ++static int gmac_phy_mmd_read(struct phy_device *phy_dev, u32 mmd_device, u32 regnum) ++{ ++ phy_write(phy_dev, MACR, mmd_device); /* function = 00 address */ ++ phy_write(phy_dev, MAADR, regnum); ++ phy_write(phy_dev, MACR, 0x4000 | mmd_device); /* function = 01 data */ ++ ++ return phy_read(phy_dev, MAADR); ++} ++ ++static int gmac_phy_mmd_write(struct phy_device *phy_dev, u32 mmd_device, u32 regnum, u16 val) ++{ ++ phy_write(phy_dev, MACR, mmd_device); /* function = 00 address */ ++ phy_write(phy_dev, MAADR, regnum); ++ phy_write(phy_dev, MACR, 0x4000 | mmd_device); /* function = 01 data */ ++ ++ return phy_write(phy_dev, MAADR, val); ++} ++ ++static int ksz8051mnl_phy_fix(struct phy_device *phy_dev) ++{ ++ u32 v; ++ int ret; ++ ++ if (phy_dev->interface != PHY_INTERFACE_MODE_RMII) ++ return 0; ++ ++ ret = phy_read(phy_dev, 0x1F); ++ if (ret < 0) ++ return ret; ++ v = ret; ++ v |= (1 << 7); /* set bit 7, phy RMII 50MHz clk; */ ++ phy_write(phy_dev, 0x1F, v); ++ ++ ret = phy_read(phy_dev, 0x16); ++ if (ret < 0) ++ return ret; ++ v = ret; ++ v |= (1 << 1); /* set phy RMII override; */ ++ phy_write(phy_dev, 0x16, v); ++ ++ return 0; ++} ++ ++static int ksz8081rnb_phy_fix(struct phy_device *phy_dev) ++{ ++ u32 v; ++ int ret; ++ ++ if (phy_dev->interface != PHY_INTERFACE_MODE_RMII) ++ return 0; ++ ++ ret = phy_read(phy_dev, 0x1F); ++ if (ret < 0) ++ return ret; ++ v = ret; ++ v |= (1 << 7); /* set bit 7, phy RMII 50MHz clk; */ ++ phy_write(phy_dev, 0x1F, v); ++ ++ return 0; ++} ++ ++static int unknown_phy_fix(struct phy_device *phy_dev) ++{ ++ u32 v; ++ int ret; ++ ++ if (phy_dev->interface != PHY_INTERFACE_MODE_RMII) ++ return 0; ++ ++ ret = phy_read(phy_dev, 0x1F); ++ if (ret < 0) ++ return ret; ++ v = ret; ++ v |= (1 << 7); /* set bit 7, phy RMII 50MHz clk; */ ++ phy_write(phy_dev, 0x1F, v); ++ ++ return 0; ++} ++ ++static int ksz9031rnx_phy_fix(struct phy_device *phy_dev) ++{ ++ u32 v; ++ ++ /* RX_CLK Pad Skew: 1_1101(+0.84) */ ++ v = (u32)gmac_phy_mmd_read(phy_dev, 0x2, 0x8); ++ v = (v & ~0x1F) | 0x1D; ++ gmac_phy_mmd_write(phy_dev, 0x2, 0x8, v); ++ ++ return 0; ++} ++ ++static int at803x_phy_debug_read(struct phy_device *phy_dev, u32 reg_addr) ++{ ++ int ret; ++ ++ phy_write(phy_dev, MII_ATH_DEBUG_ADDR, reg_addr); ++ ret = phy_read(phy_dev, MII_ATH_DEBUG_DATA); ++ ++ return ret; ++} ++ ++static int at803x_phy_debug_write(struct phy_device *phy_dev, u32 reg_addr, u16 val) ++{ ++ int ret; ++ ++ phy_write(phy_dev, MII_ATH_DEBUG_ADDR, reg_addr); ++ ret = phy_write(phy_dev, MII_ATH_DEBUG_DATA, val); ++ ++ return ret; ++} ++ ++static int at803x_phy_fix(struct phy_device *phy_dev) ++{ ++ /* PHY-AR8035 */ ++ u16 tx_delay; ++ int ret; ++ ++ /* enable rgmii tx clock delay */ ++ tx_delay = (u16)at803x_phy_debug_read(phy_dev, 0x05); ++ tx_delay |= BIT(8); /* bit8 */ ++ ret = at803x_phy_debug_write(phy_dev, 0x05, tx_delay); ++ ++ return ret; ++} ++ ++static int rtl8211e_phy_fix(struct phy_device *phy_dev) ++{ ++ u32 v; ++ int ret; ++ ++ /* select Extension page */ ++ phy_write(phy_dev, 0x1f, 0x7); ++ /* switch ExtPage 164 */ ++ phy_write(phy_dev, 0x1e, 0xa4); ++ ++ /* config RGMII rx pin io driver max */ ++ ret = phy_read(phy_dev, 0x1c); ++ if (ret < 0) ++ return ret; ++ v = ret; ++ v = (v & 0xff03) | 0xfc; ++ phy_write(phy_dev, 0x1c, v); ++ ++ /* select to page 0 */ ++ phy_write(phy_dev, 0x1f, 0); ++ ++ return 0; ++} ++ ++#define RTL8211F_PAGE_SELECT 0x1f ++#define RTL8211F_TX_DELAY BIT(8) ++static int rtl8211f_phy_fix(struct phy_device *phy_dev) ++{ ++ u16 reg; ++ ++ if (phy_dev->interface != PHY_INTERFACE_MODE_RGMII) ++ return 0; ++ ++ /* enable TXDLY */ ++ phy_write(phy_dev, RTL8211F_PAGE_SELECT, 0xd08); ++ reg = (u16)phy_read(phy_dev, 0x11); ++ reg |= RTL8211F_TX_DELAY; ++ phy_write(phy_dev, 0x11, reg); ++ /* restore to default page 0 */ ++ phy_write(phy_dev, RTL8211F_PAGE_SELECT, 0x0); ++ ++ return 0; ++} ++ ++static int general_phy_fixup(struct phy_device *phy_dev) ++{ ++ struct net_device *netdev = phy_dev->attached_dev; ++ struct gmac_netdev_local *priv; ++ int i, j; ++ struct gmac_phy_fixup_entry *entry; ++ ++ if (netdev == NULL) ++ return 0; ++ priv = netdev_priv(netdev); ++ ++ printk("phy_fixup_id:0x%x\n", phy_dev->phy_id); ++ for (i = 0; i < priv->phy_fixup_phycnt; i++) { ++ if (phy_dev->phy_id != priv->phy_fixup_id[i]) ++ continue; ++ ++ entry = priv->phy_fixup_entry[i]; ++ printk("phy_fixup_entry_cnt:0x%x\n", priv->phy_fixup_entry_cnt[i]); ++ for (j = 0; j < priv->phy_fixup_entry_cnt[i]; j++) { ++ phy_write(phy_dev, entry[j].reg, entry[j].val); ++ if (entry[j].delay != 0) { ++ msleep(entry[j].delay); ++ } ++ } ++ } ++ return 0; ++} ++ ++void gmac_phy_register_fixups(struct gmac_netdev_local *priv) ++{ ++ int ret, i; ++ char phy_fixup_name[PHY_FIXUP_NAME_SIZE]; ++ struct device_node *np = priv->phy_node; ++ ++ phy_register_fixup_for_uid(PHY_ID_UNKNOWN, DEFAULT_PHY_MASK, unknown_phy_fix); ++ phy_register_fixup_for_uid(PHY_ID_KSZ8051MNL, DEFAULT_PHY_MASK, ksz8051mnl_phy_fix); ++ phy_register_fixup_for_uid(PHY_ID_KSZ8081RNB, DEFAULT_PHY_MASK, ksz8081rnb_phy_fix); ++ phy_register_fixup_for_uid(PHY_ID_KSZ9031RNX, DEFAULT_PHY_MASK, ksz9031rnx_phy_fix); ++ phy_register_fixup_for_uid(ATH8035_PHY_ID, ATH_PHY_ID_MASK, at803x_phy_fix); ++ phy_register_fixup_for_uid(REALTEK_PHY_ID_8211E, REALTEK_PHY_MASK, rtl8211e_phy_fix); ++ phy_register_fixup_for_uid(PHY_ID_RTL8211F, PHY_ID_MASK_RTL8211F, rtl8211f_phy_fix); ++ ++ ret = of_property_count_u32_elems(np, PHY_FIXUP_ID_STR); ++ if (ret < 0) { ++ return; ++ } ++ priv->phy_fixup_phycnt = (unsigned int)(ret > MAX_FIXUP_PHY_CNT ? MAX_FIXUP_PHY_CNT : ret); ++ ++ ret = of_property_read_u32_array(np, PHY_FIXUP_ID_STR, (u32 *)priv->phy_fixup_id, priv->phy_fixup_phycnt); ++ if (ret < 0) { ++ printk("gmac: of_property_read_u32_array %s fail!\n", PHY_FIXUP_ID_STR); ++ priv->phy_fixup_phycnt = 0; ++ return; ++ } ++ ++ for (i = 0; i < priv->phy_fixup_phycnt; i++) { ++ ret = snprintf_s(phy_fixup_name, PHY_FIXUP_NAME_SIZE, PHY_FIXUP_NAME_SIZE - 1, "phy_fixup%d", i); ++ if (ret == -1) { ++ printk("gmac: snprintf_s fail!\n"); ++ break; ++ } ++ ++ ret = of_property_count_u16_elems(np, phy_fixup_name); ++ if (ret < 0) { ++ printk("gmac: of_property_count_u16_elems %s fail!\n", phy_fixup_name); ++ break; ++ } ++ ++ priv->phy_fixup_entry_cnt[i] = (unsigned int)(ret > MAX_FIXUP_ENTRY_ARR_SIZE ? MAX_FIXUP_ENTRY_ARR_SIZE : ret); ++ ++ ret = of_property_read_u16_array(np, phy_fixup_name, (u16 *)priv->phy_fixup_entry[i], priv->phy_fixup_entry_cnt[i]); ++ if (ret < 0) { ++ printk("gmac: of_property_read_u16_array %s fail!\n", phy_fixup_name); ++ break; ++ } ++ priv->phy_fixup_entry_cnt[i] /= sizeof(struct gmac_phy_fixup_entry) / sizeof(u16); ++ phy_register_fixup_for_uid(priv->phy_fixup_id[i], ANY_PHY_ID_MASK, general_phy_fixup); ++ } ++ ++ priv->phy_fixup_phycnt = i; ++} ++ ++void gmac_phy_unregister_fixups(struct gmac_netdev_local *priv) ++{ ++ int i; ++ ++ phy_unregister_fixup_for_uid(PHY_ID_UNKNOWN, DEFAULT_PHY_MASK); ++ phy_unregister_fixup_for_uid(PHY_ID_KSZ8051MNL, DEFAULT_PHY_MASK); ++ phy_unregister_fixup_for_uid(PHY_ID_KSZ8081RNB, DEFAULT_PHY_MASK); ++ phy_unregister_fixup_for_uid(PHY_ID_KSZ9031RNX, DEFAULT_PHY_MASK); ++ phy_unregister_fixup_for_uid(ATH8035_PHY_ID, ATH_PHY_ID_MASK); ++ phy_unregister_fixup_for_uid(REALTEK_PHY_ID_8211E, REALTEK_PHY_MASK); ++ phy_unregister_fixup_for_uid(PHY_ID_RTL8211F, PHY_ID_MASK_RTL8211F); ++ ++ for (i = 0; i < priv->phy_fixup_phycnt; i++) { ++ if (priv->phy_fixup_id[i] != 0) { ++ phy_unregister_fixup_for_uid(priv->phy_fixup_id[i], ANY_PHY_ID_MASK); ++ } ++ } ++} +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_phy_fixup.h b/drivers/net/ethernet/vendor/gmac/gmac_phy_fixup.h +new file mode 100644 +index 000000000..aba45319d +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_phy_fixup.h +@@ -0,0 +1,31 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2024. All rights reserved. ++ */ ++ ++#ifndef GMAC_PHY_FIXUP_H ++#define GMAC_PHY_FIXUP_H ++ ++#include "gmac.h" ++ ++#define MACR 0x0D ++#define MAADR 0x0E ++ ++#define MII_EXPMD 0x1D ++#define MII_EXPMA 0x1E ++ ++#define MII_ATH_DEBUG_ADDR 0x1D ++#define MII_ATH_DEBUG_DATA 0x1E ++ ++#define PHY_ID_KSZ8051MNL 0x00221550 ++#define PHY_ID_KSZ8081RNB 0x00221560 ++#define PHY_ID_KSZ9031RNX 0x00221620 ++#define DEFAULT_PHY_MASK 0xfffffff0 ++#define ATH8035_PHY_ID 0x004dd072 ++#define ATH_PHY_ID_MASK 0xffffffef ++ ++#define ANY_PHY_ID_MASK 0xffffffff ++ ++void gmac_phy_register_fixups(struct gmac_netdev_local *priv); ++void gmac_phy_unregister_fixups(struct gmac_netdev_local *priv); ++void gmac_internal_fephy_trim(struct mii_bus *bus, int phyaddr, u32 trim_params); ++#endif +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_pm.c b/drivers/net/ethernet/vendor/gmac/gmac_pm.c +new file mode 100644 +index 000000000..fce254af8 +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_pm.c +@@ -0,0 +1,410 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2024. All rights reserved. ++ */ ++ ++#include ++#include ++#include "gmac_pm.h" ++ ++static unsigned char g_filter_value[FILTERS][N]; ++struct pm_reg_config pm_reg_config_backup; ++ ++static void init_crc_table(void); ++static unsigned short compute_crc(const char *message, int nbytes); ++static unsigned short calculate_crc16(const char *buf, unsigned int mask) ++{ ++ char data[N]; ++ int i; ++ int len = 0; ++ ++ if (memset_s(data, sizeof(data), 0, sizeof(data)) != EOK) ++ printk("memset_s err : %s %d.\n", __func__, __LINE__); ++ ++ for (i = 0; i < N; i++) { ++ if (mask & 0x1) ++ data[len++] = buf[i]; ++ ++ mask >>= 1; ++ } ++ ++ return compute_crc(data, len); ++} ++ ++/* use this func in config pm func */ ++static void _pmt_reg_backup(struct gmac_netdev_local const *ld) ++{ ++ if (ld == NULL) ++ return; ++ pm_reg_config_backup.pmt_ctrl = readl(ld->gmac_iobase + PMT_CTRL); ++ pm_reg_config_backup.pmt_mask0 = readl(ld->gmac_iobase + PMT_MASK0); ++ pm_reg_config_backup.pmt_mask1 = readl(ld->gmac_iobase + PMT_MASK1); ++ pm_reg_config_backup.pmt_mask2 = readl(ld->gmac_iobase + PMT_MASK2); ++ pm_reg_config_backup.pmt_mask3 = readl(ld->gmac_iobase + PMT_MASK3); ++ pm_reg_config_backup.pmt_cmd = readl(ld->gmac_iobase + PMT_CMD); ++ pm_reg_config_backup.pmt_offset = readl(ld->gmac_iobase + PMT_OFFSET); ++ pm_reg_config_backup.pmt_crc1_0 = readl(ld->gmac_iobase + PMT_CRC1_0); ++ pm_reg_config_backup.pmt_crc3_2 = readl(ld->gmac_iobase + PMT_CRC3_2); ++} ++ ++#define PM_SET 1 ++#define PM_CLEAR 0 ++ ++static void pmt_config_filter(struct pm_config const *config, ++ struct gmac_netdev_local const *ld) ++{ ++ unsigned int v; ++ unsigned int cmd = 0; ++ unsigned int offset = 0; ++ unsigned short crc[FILTERS] = { 0 }; ++ int reg_mask; ++ unsigned int i; ++ ++ /* ++ * filter.valid mask.valid mask_bytes effect ++ * 0 * * no use the filter ++ * 1 0 * all pkts can wake-up(non-exist) ++ * 1 1 0 all pkts can wake-up ++ * 1 1 !0 normal filter ++ */ ++ /* setup filter */ ++ for (i = 0; i < FILTERS; i++) { ++ if (config->filter[i].valid) { ++ if (config->filter[i].offset < PM_FILTER_OFFSET_MIN) ++ continue; ++ /* high 8 bits offset and low 8 bits valid bit */ ++ offset |= config->filter[i].offset << (i * 8); ++ cmd |= BIT(i * 8); /* valid bit8 */ ++ /* mask offset 4i */ ++ reg_mask = PMT_MASK0 + (i * 4); ++ ++ /* ++ * for logic, mask valid bit(bit31) must set to 0, ++ * 0 is enable ++ */ ++ v = config->filter[i].mask_bytes; ++ v &= ~BIT(31); /* bit31 */ ++ writel(v, ld->gmac_iobase + reg_mask); ++ ++ /* crc */ ++ crc[i] = calculate_crc16(config->filter[i].value, v); ++ if (i <= 1) { /* for filter0 and filter 1 */ ++ v = readl(ld->gmac_iobase + PMT_CRC1_0); ++ v &= ~(0xFFFF << (16 * i)); /* 16 bits mask */ ++ v |= crc[i] << (16 * i); /* 16 bits mask */ ++ writel(v, ld->gmac_iobase + PMT_CRC1_0); ++ } else { /* filter2 and filter3 */ ++ v = readl(ld->gmac_iobase + PMT_CRC3_2); ++ v &= ~(0xFFFF << (16 * (i - 2))); /* filer 2 3, 16 bits mask */ ++ v |= crc[i] << (16 * (i - 2)); /* filer 2 3, 16 bits mask */ ++ writel(v, ld->gmac_iobase + PMT_CRC3_2); ++ } ++ if (memcpy_s(g_filter_value[i], N, config->filter[i].value, N) != EOK) ++ pr_err("%s %d: memcpy_s failed\n", __func__, __LINE__); ++ } ++ } ++ ++ if (cmd) { ++ writel(offset, ld->gmac_iobase + PMT_OFFSET); ++ writel(cmd, ld->gmac_iobase + PMT_CMD); ++ } ++} ++ ++static int pmt_config_gmac(struct pm_config const *config, struct gmac_netdev_local *ld) ++{ ++ unsigned int v; ++ unsigned long flags; ++ ++ if (ld == NULL || config == NULL) ++ return -EINVAL; ++ ++ spin_lock_irqsave(&ld->pmtlock, flags); ++ if (config->wakeup_pkts_enable) { ++ /* disable wakeup_pkts_enable before reconfig? */ ++ v = readl(ld->gmac_iobase + PMT_CTRL); ++ v &= ~BIT(2); /* bit2 */ ++ writel(v, ld->gmac_iobase + PMT_CTRL); /* any side effect? */ ++ } else { ++ goto config_ctrl; ++ } ++ ++ pmt_config_filter(config, ld); ++ ++config_ctrl: ++ v = 0; ++ if (config->uc_pkts_enable) ++ v |= BIT(9); /* bit9 uc pkts wakeup */ ++ if (config->wakeup_pkts_enable) ++ v |= BIT(2); /* bit2 use filter framework */ ++ if (config->magic_pkts_enable) ++ v |= BIT(1); /* magic pkts wakeup */ ++ ++ v |= 0x3 << 5; /* set bit5 bit6, clear irq status */ ++ writel(v, ld->gmac_iobase + PMT_CTRL); ++ ++ _pmt_reg_backup(ld); ++ ++ spin_unlock_irqrestore(&ld->pmtlock, flags); ++ ++ return 0; ++} ++ ++/* pmt_config will overwrite pre-config */ ++int pmt_config(struct net_device const *ndev, struct pm_config const *config) ++{ ++ static int init; ++ int ret = -EINVAL; ++ struct gmac_netdev_local *priv = netdev_priv(ndev); ++ struct ethtool_wolinfo wol = {0}; ++ ++ if (ndev == NULL || ndev->phydev == NULL || config == NULL) ++ return ret; ++ ++ wol.cmd = ETHTOOL_SWOL; ++ /* If set magic packet WOL, we first try PHY wol */ ++ if (config->magic_pkts_enable) { ++ wol.supported |= WAKE_MAGIC; ++ wol.wolopts |= WAKE_MAGIC; ++ } ++ if (config->uc_pkts_enable) { ++ wol.supported |= WAKE_UCAST; ++ wol.wolopts |= WAKE_UCAST; ++ } ++ ++ ret = phy_ethtool_set_wol(ndev->phydev, &wol); ++ if (!ret) { ++ pr_info("gmac: set phy wol success\n"); ++ priv->phy_wol_enable = true; ++ device_set_wakeup_enable(priv->dev, true); ++ } else if (ret != -EOPNOTSUPP) { ++ pr_err("gmac: set phy wol failed, err=%d\n", ret); ++ } ++ ++ if (!init) ++ init_crc_table(); ++ ++ ret = pmt_config_gmac(config, priv); ++ if (ret) ++ return ret; ++ ++ priv->pm_state = PM_SET; ++ priv->mac_wol_enable = true; ++ device_set_wakeup_enable(priv->dev, 1); ++ ++ return 0; ++} ++ ++int pmt_get_config(struct net_device const *ndev, struct pm_config *config) ++{ ++ unsigned int val, cmd, offset; ++ struct ethtool_wolinfo wol = {0}; ++ struct gmac_netdev_local *ld = netdev_priv(ndev); ++ unsigned int i, reg_mask; ++ ++ if (ndev == NULL || ndev->phydev == NULL || ld == NULL || config == NULL) ++ return -1; ++ ++ config->index = (unsigned int)ld->index; ++ phy_ethtool_get_wol(ndev->phydev, &wol); ++ config->magic_pkts_enable = (wol.wolopts & WAKE_MAGIC) ? 1 : 0; ++ ++ val = readl(ld->gmac_iobase + PMT_CTRL); ++ if (val & (1 << 9)) /* bit9 */ ++ config->uc_pkts_enable = 1; ++ if (val & (1 << 2)) /* bit2 */ ++ config->wakeup_pkts_enable = 1; ++ if (val & (1 << 1)) ++ config->magic_pkts_enable = 1; ++ ++ if (config->wakeup_pkts_enable) { ++ cmd = readl(ld->gmac_iobase + PMT_CMD); ++ offset = readl(ld->gmac_iobase + PMT_OFFSET); ++ ++ for (i = 0; i < FILTERS; i++) { ++ if (!(cmd & (1 << (unsigned int)(i * 8)))) /* 8 */ ++ continue; ++ config->filter[i].valid = 1; ++ config->filter[i].offset = offset >> (i * 8); /* 8 */ ++ reg_mask = PMT_MASK0 + (i * 4); /* 4 */ ++ config->filter[i].mask_bytes = readl(ld->gmac_iobase + reg_mask); ++ if (memcpy_s(config->filter[i].value, N, g_filter_value[i], N) != EOK) { ++ pr_err("%s %d: memcpy_s failed\n", __func__, __LINE__); ++ return -1; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++bool pmt_enter(struct gmac_netdev_local *ld) ++{ ++ int pm = false; ++ unsigned long flags; ++ if (ld == NULL) ++ return -EINVAL; ++ spin_lock_irqsave(&ld->pmtlock, flags); ++ if (ld->pm_state == PM_SET) { ++ unsigned int v; ++ ++ v = readl(ld->gmac_iobase + PMT_CTRL); ++ v |= BIT(0); /* enter power down */ ++ v |= BIT(3); /* bit3, enable wakeup irq */ ++ v |= 0x3 << 5; /* set bit5 bit6, clear irq status */ ++ writel(v, ld->gmac_iobase + PMT_CTRL); ++ ++ ld->pm_state = PM_CLEAR; ++ pm = true; ++ } ++ spin_unlock_irqrestore(&ld->pmtlock, flags); ++ return pm; ++} ++ ++void pmt_exit(struct gmac_netdev_local *ld) ++{ ++ unsigned int v; ++ unsigned long flags; ++ if (ld == NULL) ++ return; ++ /* logic auto exit power down mode */ ++ spin_lock_irqsave(&ld->pmtlock, flags); ++ ++ v = readl(ld->gmac_iobase + PMT_CTRL); ++ v &= ~BIT(0); /* enter power down */ ++ v &= ~BIT(3); /* bit3, enable wakeup irq */ ++ ++ v |= 0x3 << 5; /* set bit5 bit6, clear irq status */ ++ writel(v, ld->gmac_iobase + PMT_CTRL); ++ ++ spin_unlock_irqrestore(&ld->pmtlock, flags); ++ ++ ld->mac_wol_enable = false; ++} ++ ++void pmt_reg_restore(struct gmac_netdev_local *ld) ++{ ++ unsigned int v; ++ unsigned long flags; ++ if (ld == NULL) ++ return; ++ spin_lock_irqsave(&ld->pmtlock, flags); ++ v = pm_reg_config_backup.pmt_mask0; ++ writel(v, ld->gmac_iobase + PMT_MASK0); ++ ++ v = pm_reg_config_backup.pmt_mask1; ++ writel(v, ld->gmac_iobase + PMT_MASK1); ++ ++ v = pm_reg_config_backup.pmt_mask2; ++ writel(v, ld->gmac_iobase + PMT_MASK2); ++ ++ v = pm_reg_config_backup.pmt_mask3; ++ writel(v, ld->gmac_iobase + PMT_MASK3); ++ ++ v = pm_reg_config_backup.pmt_cmd; ++ writel(v, ld->gmac_iobase + PMT_CMD); ++ ++ v = pm_reg_config_backup.pmt_offset; ++ writel(v, ld->gmac_iobase + PMT_OFFSET); ++ ++ v = pm_reg_config_backup.pmt_crc1_0; ++ writel(v, ld->gmac_iobase + PMT_CRC1_0); ++ ++ v = pm_reg_config_backup.pmt_crc3_2; ++ writel(v, ld->gmac_iobase + PMT_CRC3_2); ++ ++ v = pm_reg_config_backup.pmt_ctrl; ++ writel(v, ld->gmac_iobase + PMT_CTRL); ++ spin_unlock_irqrestore(&ld->pmtlock, flags); ++} ++ ++/* ========the following code copy from Synopsys DWC_gmac_crc_example.c====== */ ++#define CRC16 /* Change it to CRC16 for CRC16 Computation */ ++ ++#if defined(CRC16) ++#define CRC_NAME "CRC-16" ++#define POLYNOMIAL 0x8005 ++#define INITIAL_REMAINDER 0xFFFF ++#define FINAL_XOR_VALUE 0x0000 ++#define REVERSE_DATA ++#undef REVERSE_REMAINDER ++#endif ++ ++#define WIDTH (8 * sizeof(unsigned short)) ++#define TOPBIT BIT(WIDTH - 1) ++ ++#ifdef REVERSE_DATA ++#undef REVERSE_DATA ++#define reverse_data(X) ((unsigned char)reverse((X), 8)) ++#else ++#undef REVERSE_DATA ++#define reverse_data(X) (X) ++#endif ++ ++#ifdef REVERSE_REMAINDER ++#undef REVERSE_REMAINDER ++#define reverse_remainder(X) ((unsigned short)reverse((X), WIDTH)) ++#else ++#undef REVERSE_REMAINDER ++#define reverse_remainder(X) (X) ++#endif ++ ++#define CRC_TABLE_LEN 256 ++static unsigned short crc_table[CRC_TABLE_LEN]; ++ ++static unsigned int reverse(unsigned int data, unsigned char nbits) ++{ ++ unsigned int reversed = 0x00000000; ++ unsigned char bit; ++ ++ /* Reverse the data about the center bit. */ ++ for (bit = 0; bit < nbits; ++bit) { ++ /* If the LSB bit is set, set the reflection of it. */ ++ if (data & 0x01) ++ reversed |= BIT((nbits - 1) - bit); ++ ++ data = (data >> 1); ++ } ++ return reversed; ++} ++ ++/* This Initializes the partial CRC look up table */ ++static void init_crc_table(void) ++{ ++ unsigned short remainder; ++ unsigned int dividend; ++ unsigned char bit; ++ ++ /* Compute the remainder of each possible dividend. */ ++ for (dividend = 0; dividend < CRC_TABLE_LEN; ++dividend) { ++ /* Start with the dividend followed by zeros, WIDTH - 8. */ ++ remainder = (unsigned short)(dividend << (WIDTH - 8)); ++ ++ /* Perform modulo-2 division, a bit at a time for 8 times. */ ++ for (bit = 8; bit > 0; --bit) { ++ /* Try to divide the current data bit. */ ++ if (remainder & TOPBIT) ++ remainder = (remainder << 1) ^ POLYNOMIAL; ++ else ++ remainder = (remainder << 1); ++ } ++ ++ /* Store the result into the table. */ ++ crc_table[dividend] = remainder; ++ } ++} ++ ++static unsigned short compute_crc(const char *message, int nbytes) ++{ ++ unsigned short remainder = INITIAL_REMAINDER; ++ int byte; ++ unsigned char data; ++ ++ /* Divide the message by the polynomial, a byte at a time. */ ++ for (byte = 0; byte < nbytes; ++byte) { ++ /* high 8 bits */ ++ data = reverse_data(message[byte]) ^ (remainder >> (WIDTH - 8)); ++ remainder = crc_table[data] ^ (remainder << 8); /* shift left 8 bits */ ++ } ++ ++ /* The final remainder is the CRC. */ ++ return (reverse_remainder(remainder) ^ FINAL_XOR_VALUE); ++} +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_pm.h b/drivers/net/ethernet/vendor/gmac/gmac_pm.h +new file mode 100644 +index 000000000..0581a1e71 +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_pm.h +@@ -0,0 +1,56 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2024. All rights reserved. ++ */ ++ ++#ifndef __GMAC_PM_H__ ++#define __GMAC_PM_H__ ++ ++#include "gmac.h" ++ ++#define N 31 ++#define FILTERS 4 ++#define PM_FILTER_OFFSET_MIN 12 ++struct pm_config { ++ unsigned char index; /* bit0--eth0 bit1--eth1 */ ++ unsigned char uc_pkts_enable; ++ unsigned char magic_pkts_enable; ++ unsigned char wakeup_pkts_enable; ++ struct { ++ unsigned int mask_bytes : N; ++ unsigned int reserved : 1; /* userspace ignore this bit */ ++ unsigned char offset; /* >= 12 */ ++ unsigned char value[N]; /* byte string */ ++ unsigned char valid; /* valid filter */ ++ } filter[FILTERS]; ++}; ++ ++struct pm_reg_config { ++ unsigned int pmt_ctrl; ++ unsigned int pmt_mask0; ++ unsigned int pmt_mask1; ++ unsigned int pmt_mask2; ++ unsigned int pmt_mask3; ++ unsigned int pmt_cmd; ++ unsigned int pmt_offset; ++ unsigned int pmt_crc1_0; ++ unsigned int pmt_crc3_2; ++}; ++ ++#define PMT_CTRL 0xa00 ++#define PMT_MASK0 0xa04 ++#define PMT_MASK1 0xa08 ++#define PMT_MASK2 0xa0c ++#define PMT_MASK3 0xa10 ++#define PMT_CMD 0xa14 ++#define PMT_OFFSET 0xa18 ++#define PMT_CRC1_0 0xa1c ++#define PMT_CRC3_2 0xa20 ++#define MASK_INVALID_BIT BIT(31) ++ ++int pmt_config(struct net_device const *ndev, struct pm_config const *config); ++int pmt_get_config(struct net_device const *ndev, struct pm_config *config); ++bool pmt_enter(struct gmac_netdev_local *ld); ++void pmt_exit(struct gmac_netdev_local *ld); ++void pmt_reg_restore(struct gmac_netdev_local *ld); ++ ++#endif +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_proc.c b/drivers/net/ethernet/vendor/gmac/gmac_proc.c +new file mode 100644 +index 000000000..6d28f15d8 +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_proc.c +@@ -0,0 +1,125 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++ ++#include ++ ++#include ++#include "gmac_pm.h" ++#include "gmac_proc.h" ++ ++/* debug code */ ++int set_suspend(int eth_n) ++{ ++ return 0; ++} ++ ++/* debug code */ ++int set_resume(int eth_n) ++{ ++ return 0; ++} ++ ++static int hw_states_read(struct seq_file *m, void *v) ++{ ++ return 0; ++} ++ ++static struct proc_dir_entry *gmac_proc_root; ++ ++static int proc_open_hw_states_read(struct inode *inode, struct file *file) ++{ ++ return single_open(file, hw_states_read, pde_data(inode)); ++} ++ ++static int gmac_proc_version_show(struct seq_file *m, void *v) ++{ ++ seq_printf(m, "version: %s\n", GMAC_KERNEL_VERSION); ++ return 0; ++} ++ ++static int gmac_proc_version_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, gmac_proc_version_show, pde_data(inode)); ++} ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) ++static struct proc_file { ++ char *name; ++ const struct proc_ops ops; ++ ++} proc_file[] = { ++ { ++ .name = "hw_stats", ++ .ops = { ++ .proc_open = proc_open_hw_states_read, ++ .proc_read = seq_read, ++ .proc_lseek = seq_lseek, ++ .proc_release = single_release, ++ }, ++ }, ++ { ++ .name = "version", ++ .ops = { ++ .proc_open = gmac_proc_version_open, ++ .proc_read = seq_read, ++ .proc_lseek = seq_lseek, ++ .proc_release = single_release, ++ }, ++ } ++}; ++#else ++static struct proc_file { ++ char *name; ++ const struct file_operations ops; ++ ++} proc_file[] = { ++ { ++ .name = "hw_stats", ++ .ops = { ++ .open = proc_open_hw_states_read, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++ }, ++ }, ++ { ++ .name = "version", ++ .ops = { ++ .open = gmac_proc_version_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++ }, ++ } ++}; ++#endif ++/* ++ * /proc/gmac/ ++ * |---hw_stats ++ * |---skb_pools ++ */ ++void gmac_proc_create(void) ++{ ++ struct proc_dir_entry *entry = NULL; ++ int i; ++ ++ gmac_proc_root = proc_mkdir("gmac", NULL); ++ if (gmac_proc_root == NULL) ++ return; ++ ++ for (i = 0; i < ARRAY_SIZE(proc_file); i++) { ++ entry = proc_create(proc_file[i].name, 0, gmac_proc_root, &proc_file[i].ops); ++ if (entry == NULL) ++ pr_err("failed to create %s\n", proc_file[i].name); ++ } ++} ++ ++void gmac_proc_destroy(void) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(proc_file); i++) ++ remove_proc_entry(proc_file[i].name, gmac_proc_root); ++ ++ remove_proc_entry("gmac", NULL); ++} +diff --git a/drivers/net/ethernet/vendor/gmac/gmac_proc.h b/drivers/net/ethernet/vendor/gmac/gmac_proc.h +new file mode 100644 +index 000000000..43988e05a +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/gmac_proc.h +@@ -0,0 +1,22 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2024. All rights reserved. ++ */ ++ ++#ifndef GMAC_PROC_H ++#define GMAC_PROC_H ++ ++#include ++ ++#define SIOCSETPM (SIOCDEVPRIVATE + 4) /* set pmt wake up config */ ++#define SIOCSETSUSPEND (SIOCDEVPRIVATE + 5) /* call dev->suspend, debug */ ++#define SIOCSETRESUME (SIOCDEVPRIVATE + 6) /* call dev->resume, debug */ ++#define SIOCGETPM (SIOCDEVPRIVATE + 7) /* get pmt wake up config */ ++ ++void gmac_proc_create(void); ++void gmac_proc_destroy(void); ++ ++/* netdev ops related func */ ++int set_suspend(int eth_n); ++int set_resume(int eth_n); ++ ++#endif +diff --git a/drivers/net/ethernet/vendor/gmac/version.mak b/drivers/net/ethernet/vendor/gmac/version.mak +new file mode 100644 +index 000000000..4800f6187 +--- /dev/null ++++ b/drivers/net/ethernet/vendor/gmac/version.mak +@@ -0,0 +1 @@ ++GMAC_KERNEL_VERSION="HNET 1.0.0" +diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig +index 107880d13..297d305c0 100644 +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -411,6 +411,13 @@ config XILINX_GMII2RGMII + + endif # PHYLIB + ++config MDIO_BSP_GEMAC ++ tristate "Vendor GEMAC MDIO bus controller" ++ depends on HAS_IOMEM && OF_MDIO ++ help ++ This module provides a driver for the MDIO busses found in the ++ Vendor SoC that have an Gigabit Ethernet MAC. ++ + config MICREL_KS8995MA + tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch" + depends on SPI +diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile +index c945ed9bd..59c631b9c 100644 +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -95,3 +95,4 @@ obj-$(CONFIG_STE10XP) += ste10Xp.o + obj-$(CONFIG_TERANETICS_PHY) += teranetics.o + obj-$(CONFIG_VITESSE_PHY) += vitesse.o + obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o ++obj-$(CONFIG_MDIO_BSP_GEMAC) += mdio_bsp_gemac.o +diff --git a/drivers/net/phy/mdio_bsp_gemac.c b/drivers/net/phy/mdio_bsp_gemac.c +new file mode 100644 +index 000000000..16371b43e +--- /dev/null ++++ b/drivers/net/phy/mdio_bsp_gemac.c +@@ -0,0 +1,222 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2012-2021. All rights reserved. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "mdio_bsp_gemac.h" ++ ++#define MDIO_SINGLE_CMD 0x00 ++#define MDIO_SINGLE_DATA 0x04 ++#define MDIO_RDATA_STATUS 0x10 ++#define BIT_PHY_ADDR_OFFSET 8 ++#define MDIO_WRITE BIT(16) ++#define MDIO_READ BIT(17) ++#define MDIO_START BIT(20) ++#define MDIO_START_READ (MDIO_START | MDIO_READ) ++#define MDIO_START_WRITE (MDIO_START | MDIO_WRITE) ++#define DELAY_US 20 ++#define TIMEOUT_US 10000 ++ ++struct bsp_gemac_mdio_data { ++ struct clk *clk; ++ struct reset_control *phy_rst; ++ void __iomem *membase; ++}; ++ ++static int bsp_gemac_mdio_wait_ready(struct bsp_gemac_mdio_data *data) ++{ ++ u32 val; ++ return readl_poll_timeout(data->membase + MDIO_SINGLE_CMD, ++ val, !(val & MDIO_START), DELAY_US, TIMEOUT_US); ++} ++ ++static int bsp_gemac_mdio_read(struct mii_bus *bus, int mii_id, int regnum) ++{ ++ struct bsp_gemac_mdio_data *data = bus->priv; ++ int ret; ++ ++ ret = bsp_gemac_mdio_wait_ready(data); ++ if (ret) ++ return ret; ++ ++ writel(MDIO_START_READ | ((u32)mii_id << BIT_PHY_ADDR_OFFSET) | ++ ((u32)regnum), ++ data->membase + MDIO_SINGLE_CMD); ++ ++ ret = bsp_gemac_mdio_wait_ready(data); ++ if (ret) ++ return ret; ++ ++ /* if read data is invalid, we just return 0 instead of -EAGAIN. ++ * This can make MDIO more robust when reading PHY status. ++ */ ++ if (readl(data->membase + MDIO_RDATA_STATUS)) ++ return 0; ++ ++ return readl(data->membase + MDIO_SINGLE_DATA) >> 16; /* 16:right shift */ ++} ++ ++static int bsp_gemac_mdio_write(struct mii_bus *bus, int mii_id, int regnum, ++ u16 value) ++{ ++ struct bsp_gemac_mdio_data *data = bus->priv; ++ int ret; ++ ++ ret = bsp_gemac_mdio_wait_ready(data); ++ if (ret) ++ return ret; ++ ++ writel(value, data->membase + MDIO_SINGLE_DATA); ++ writel(MDIO_START_WRITE | ((u32)mii_id << BIT_PHY_ADDR_OFFSET) | ++ ((u32)regnum), ++ data->membase + MDIO_SINGLE_CMD); ++ ++ return bsp_gemac_mdio_wait_ready(data); ++} ++ ++static void bsp_gemac_external_phy_reset(struct bsp_gemac_mdio_data const *data) ++{ ++ if (data->phy_rst) { ++ /* write 0 to cancel reset */ ++ reset_control_deassert(data->phy_rst); ++ msleep(50); /* 50:delay */ ++ ++ /* use CRG register to reset phy */ ++ /* RST_BIT, write 0 to reset phy, write 1 to cancel reset */ ++ reset_control_assert(data->phy_rst); ++ ++ /* delay some time to ensure reset ok, ++ * this depends on PHY hardware feature ++ */ ++ msleep(50); /* 50:delay */ ++ ++ /* write 0 to cancel reset */ ++ reset_control_deassert(data->phy_rst); ++ /* delay some time to ensure later MDIO access */ ++ msleep(50); /* 50:delay */ ++ } ++} ++ ++static void bsp_gemac_mdiobus_init(struct mii_bus *const bus, struct platform_device *const pdev) ++{ ++ u32 str_len; ++ str_len = strlen(pdev->name); ++ bus->name = "bsp_gemac_mii_bus"; ++ bus->read = &bsp_gemac_mdio_read; ++ bus->write = &bsp_gemac_mdio_write; ++ if (snprintf_s(bus->id, MII_BUS_ID_SIZE, str_len, "%s", pdev->name) < 0) ++ printk("snprintf_s failed! func:%s, line: %d\n", __func__, __LINE__); ++ bus->parent = &pdev->dev; ++ return; ++} ++ ++static int bsp_gemac_mdio_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct mii_bus *bus = NULL; ++ struct bsp_gemac_mdio_data *data = NULL; ++ struct resource *res = NULL; ++ ++ int ret = bsp_gemac_pinctrl_config(pdev); ++ if (ret) { ++ pr_err("gmac pinctrl config error=%d.\n", ret); ++ return ret; ++ } ++ ++ bus = mdiobus_alloc_size(sizeof(*data)); ++ if (!bus) ++ return -ENOMEM; ++ ++ bsp_gemac_mdiobus_init(bus, pdev); ++ ++ data = bus->priv; ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (res == NULL || data == NULL) { ++ ret = -ENXIO; ++ goto err_out_free_mdiobus; ++ } ++ data->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); ++ if (!data->membase) { ++ ret = -ENOMEM; ++ goto err_out_free_mdiobus; ++ } ++ ++ data->clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(data->clk)) { ++ ret = PTR_ERR(data->clk); ++ goto err_out_free_mdiobus; ++ } ++ ++ ret = clk_prepare_enable(data->clk); ++ if (ret) ++ goto err_out_free_mdiobus; ++ ++ data->phy_rst = devm_reset_control_get(&pdev->dev, "phy_reset"); ++ if (IS_ERR(data->phy_rst)) ++ data->phy_rst = NULL; ++ bsp_gemac_external_phy_reset(data); ++ ++ ret = of_mdiobus_register(bus, np); ++ if (ret) ++ goto err_out_disable_clk; ++ ++ platform_set_drvdata(pdev, bus); ++ ++ return 0; ++ ++err_out_disable_clk: ++ clk_disable_unprepare(data->clk); ++err_out_free_mdiobus: ++ mdiobus_free(bus); ++ return ret; ++} ++ ++static int bsp_gemac_mdio_remove(struct platform_device *pdev) ++{ ++ struct mii_bus *bus = platform_get_drvdata(pdev); ++ struct bsp_gemac_mdio_data *data = bus->priv; ++ ++ mdiobus_unregister(bus); ++ clk_disable_unprepare(data->clk); ++ mdiobus_free(bus); ++ ++ return 0; ++} ++ ++static const struct of_device_id bsp_gemac_mdio_dt_ids[] = { ++ { .compatible = "vendor,gemac-mdio" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, bsp_gemac_mdio_dt_ids); ++ ++static struct platform_driver bsp_gemac_mdio_driver = { ++ .probe = bsp_gemac_mdio_probe, ++ .remove = bsp_gemac_mdio_remove, ++ .driver = { ++ .name = "bsp-gemac-mdio", ++ .of_match_table = bsp_gemac_mdio_dt_ids, ++ }, ++}; ++ ++module_platform_driver(bsp_gemac_mdio_driver); ++ ++MODULE_DESCRIPTION("Gigabit Ethernet MAC MDIO interface driver"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/net/phy/mdio_bsp_gemac.h b/drivers/net/phy/mdio_bsp_gemac.h +new file mode 100644 +index 000000000..77c7d194e +--- /dev/null ++++ b/drivers/net/phy/mdio_bsp_gemac.h +@@ -0,0 +1,27 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2012-2021. All rights reserved. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef __MDIO_BSP_GEMAC_H__ ++#define __MDIO_BSP_GEMAC_H__ ++ ++#if defined(CONFIG_ARCH_SS528V100) || defined(CONFIG_ARCH_SS625V100) ++int bsp_gemac_pinctrl_config(struct platform_device *pdev); ++#else ++static inline int bsp_gemac_pinctrl_config(struct platform_device *pdev) ++{ ++ return 0; ++} ++#endif ++ ++#endif /* __MDIO_BSP_GEMAC_H__ */ +diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig +index e9ae66cc4..22123e9a9 100644 +--- a/drivers/pci/Kconfig ++++ b/drivers/pci/Kconfig +@@ -291,5 +291,6 @@ source "drivers/pci/hotplug/Kconfig" + source "drivers/pci/controller/Kconfig" + source "drivers/pci/endpoint/Kconfig" + source "drivers/pci/switch/Kconfig" ++source "drivers/pci/bsp_pcie/Kconfig" + + endif +diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile +index cc8b4e01e..62089e1d6 100644 +--- a/drivers/pci/Makefile ++++ b/drivers/pci/Makefile +@@ -39,5 +39,6 @@ obj-$(CONFIG_PCI_ENDPOINT) += endpoint/ + + obj-y += controller/ + obj-y += switch/ ++obj-$(CONFIG_BSP_PCIE) += bsp_pcie/ + + subdir-ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG +diff --git a/drivers/pci/bsp_pcie/Kconfig b/drivers/pci/bsp_pcie/Kconfig +new file mode 100644 +index 000000000..3fad0eb1e +--- /dev/null ++++ b/drivers/pci/bsp_pcie/Kconfig +@@ -0,0 +1,27 @@ ++menuconfig BSP_PCIE ++ bool "Vendor PCI Express support" ++ depends on PCI && (ARCH_SS928V100 || ARCH_SS927V100) ++ default y if PCI ++ default n if ! PCI ++ select PCI_MSI_ARCH_FALLBACKS ++ help ++ Vendor PCI Express support ++ Choose this selection to support PCI Express uses. ++ ++if BSP_PCIE ++ ++menu "PCI Express configs" ++ ++ ++config LIMIT_MAX_RD_REQ_SIZE ++ bool "limit pcie max read request size" ++ help ++ The default max read request size of pcie device is 512 Byte. When pcie use ++ the card of pcie-to-sata to connect to the sata disk, with the default max read ++ request size value of 512 byte, would cause the low bandwidth of VDP. If you enable ++ the LIMIT_MAX_RD_REQ_SIZE config, the max read request size of pcie device would be ++ set to 128 byte, and the problem of VDP low band width also be avoided. ++ ++endmenu ++ ++endif +diff --git a/drivers/pci/bsp_pcie/Makefile b/drivers/pci/bsp_pcie/Makefile +new file mode 100644 +index 000000000..b513f5616 +--- /dev/null ++++ b/drivers/pci/bsp_pcie/Makefile +@@ -0,0 +1,8 @@ ++ ++obj-$(CONFIG_BSP_PCIE) += bsp_pcie.o ++ ++bsp_pcie-objs := pcie.o ++ ++ifeq ($(CONFIG_PCI_DEBUG),y) ++ EXTRA_CFLAGS += -DPCIE_DEBUG ++endif +diff --git a/drivers/pci/bsp_pcie/pci.h b/drivers/pci/bsp_pcie/pci.h +new file mode 100644 +index 000000000..25efee032 +--- /dev/null ++++ b/drivers/pci/bsp_pcie/pci.h +@@ -0,0 +1,85 @@ ++/* ++ * arch/arm/include/asm/mach/pci.h ++ * ++ * Copyright (C) 2000 Russell King ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#ifndef __ASM_MACH_PCI_H ++#define __ASM_MACH_PCI_H ++ ++#include ++ ++struct pci_sys_data; ++struct pci_ops; ++struct pci_bus; ++struct device; ++ ++struct hw_pci { ++#ifdef CONFIG_PCI_DOMAINS ++ int domain; ++#endif ++ struct device *dev; ++ struct pci_ops *ops; ++ int nr_controllers; ++ void **private_data; ++ int (*setup)(int nr, struct pci_sys_data *); ++ struct pci_bus *(*scan)(int nr, struct pci_sys_data *); ++ void (*preinit)(void); ++ void (*postinit)(void); ++ u8 (*swizzle)(struct pci_dev *dev, u8 *pin); ++ int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); ++ resource_size_t (*align_resource)(struct pci_dev *dev, ++ const struct resource *res, ++ resource_size_t start, ++ resource_size_t size, ++ resource_size_t align); ++ void (*add_bus)(struct pci_bus *bus); ++ void (*remove_bus)(struct pci_bus *bus); ++}; ++ ++/* ++ * Per-controller structure ++ */ ++struct pci_sys_data { ++#ifdef CONFIG_PCI_DOMAINS ++ int domain; ++#endif ++ struct list_head node; ++ int busnr; /* primary bus number */ ++ u64 mem_offset; /* bus->cpu memory mapping offset */ ++ unsigned long io_offset; /* bus->cpu IO mapping offset */ ++ struct pci_bus *bus; /* PCI bus */ ++ struct list_head resources; /* root bus resources (apertures) */ ++ struct resource io_res; ++ char io_res_name[12]; ++ /* Bridge swizzling */ ++ u8 (*swizzle)(struct pci_dev *, u8 *); ++ /* IRQ mapping */ ++ int (*map_irq)(const struct pci_dev *, u8, u8); ++ /* Resource alignement requirements */ ++ resource_size_t (*align_resource)(struct pci_dev *dev, ++ const struct resource *res, ++ resource_size_t start, ++ resource_size_t size, ++ resource_size_t align); ++ void (*add_bus)(struct pci_bus *bus); ++ void (*remove_bus)(struct pci_bus *bus); ++ void *private_data; /* platform controller private data */ ++}; ++ ++void __weak pcibios_update_irq(struct pci_dev *dev, int irq) ++{ ++ dev_dbg(&dev->dev, "assigning IRQ %02d\n", irq); ++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); ++} ++ ++int bsp_of_pci_get_host_bridge_resources(struct device *dev, ++ unsigned char busno, unsigned char bus_max, ++ struct list_head *resources, ++ resource_size_t *io_base); ++ ++#endif /* __ASM_MACH_PCI_H */ +diff --git a/drivers/pci/bsp_pcie/pcie.c b/drivers/pci/bsp_pcie/pcie.c +new file mode 100644 +index 000000000..34fa5228b +--- /dev/null ++++ b/drivers/pci/bsp_pcie/pcie.c +@@ -0,0 +1,606 @@ ++/* ++ * Copyright (c) 2016-2017 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../pci.h" ++#ifdef CONFIG_ARM64 ++#include "pci.h" ++#endif ++ ++#define PCIE_DBG_REG 1 ++#define PCIE_DBG_FUNC 2 ++#define PCIE_DBG_MODULE 3 ++ ++#define READ_OPS_MOVE_LEFT 3 ++ ++#define PCIE_DEBUG_LEVEL PCIE_DBG_MODULE ++ ++#ifdef PCIE_DEBUG ++#define pcie_debug(level, str, arg...) \ ++ do { \ ++ if ((level) <= PCIE_DEBUG_LEVEL) { \ ++ pr_debug("%s->%d," str "\n", \ ++ __func__, __LINE__, ##arg); \ ++ } \ ++ } while (0) ++#else ++#define pcie_debug(level, str, arg...) ++#endif ++ ++#define pcie_assert(con) \ ++ do { \ ++ if (!(con)) { \ ++ pr_err("%s->%d,assert fail!\n", \ ++ __func__, __LINE__); \ ++ } \ ++ } while (0) ++ ++#define pcie_error(str, arg...) \ ++ pr_err("%s->%d" str "\n", __func__, __LINE__, ##arg) ++ ++#define __256MB__ 0x10000000 ++#define __128MB__ 0x8000000 ++#define __4KB__ 0x1000 ++#define __8KB__ 0x2000 ++#define __16KB__ 0x4000 ++ ++enum pcie_sel { ++ /* ++ * No controller selected. ++ */ ++ PCIE_SEL_NONE, ++ /* ++ * PCIE0 selected. ++ */ ++ PCIE0_X1_SEL, ++ /* ++ * PCIE1 selected. ++ */ ++ PCIE1_X1_SEL ++}; ++ ++enum pcie_rc_sel { ++ PCIE_CONTROLLER_UNSELECTED, ++ PCIE_CONTROLLER_SELECTED ++}; ++ ++enum pcie_controller { ++ PCIE_CONTROLLER_NONE = -1, ++ PCIE_CONTROLLER_0 = 0, ++ PCIE_CONTROLLER_1 = 1 ++}; ++ ++struct pcie_iatu { ++ unsigned int viewport; /* iATU Viewport Register */ ++ unsigned int region_ctrl_1; /* Region Control 1 Register */ ++ unsigned int region_ctrl_2; /* Region Control 2 Register */ ++ unsigned int lbar; /* Lower Base Address Register */ ++ unsigned int ubar; /* Upper Base Address Register */ ++ unsigned int lar; /* Limit Address Register */ ++ unsigned int ltar; /* Lower Target Address Register */ ++ unsigned int utar; /* Upper Target Address Register */ ++}; ++ ++struct pcie_property { ++ unsigned int pcie_mem_size; ++ unsigned int pcie_cfg_size; ++ unsigned int pcie_dbi_base; ++ unsigned int pcie_ep_conf_base; ++ unsigned int pcie_contrl; ++}; ++ ++#define MAX_IATU_PER_CTRLLER (6) ++ ++struct pcie_info { ++ /* ++ * Root bus number ++ */ ++ int root_bus_nr; ++ enum pcie_controller controller; ++ ++ /* ++ * Devices configuration space base ++ */ ++ unsigned long base_addr; ++ ++ /* ++ * RC configuration space base ++ */ ++ unsigned long conf_base_addr; ++}; ++ ++#define MAX_PCIE_CONTROLLER_NUM 2 ++static struct pcie_info pcie_info[MAX_PCIE_CONTROLLER_NUM] = { ++ { .root_bus_nr = -1, }, ++ { .root_bus_nr = -1, } ++}; ++ ++static int pcie_controllers_nr; ++ ++static unsigned int pcie_errorvalue; ++ ++struct device_node *g_of_node = NULL; ++ ++static DEFINE_SPINLOCK(cw_lock); ++ ++#define PCIE0_MODE_SEL (1 << 0) ++#define PCIE1_MODE_SEL (1 << 1) ++ ++#if defined(CONFIG_ARCH_SS928V100) || defined(CONFIG_ARCH_SS927V100) ++#include "pcie_ss928v100.c" ++#else ++#error You must have defined CONFIG_ARCH_... ++#endif ++ ++static struct pcie_info *bus_to_info(int busnr) ++{ ++ int i = pcie_controllers_nr; ++ for (; i >= 0; i--) { ++ if (pcie_info[i].controller != PCIE_CONTROLLER_NONE ++ && pcie_info[i].root_bus_nr <= busnr ++ && pcie_info[i].root_bus_nr != -1) ++ return &pcie_info[i]; ++ } ++ ++ return NULL; ++} ++ ++#define pcie_cfg_bus(busnr) ((busnr & 0xff) << 20) ++#define pcie_cfg_dev(devfn) ((devfn & 0xff) << 12) ++#define pcie_cfg_reg(reg) (reg & 0xffc) /* set dword align */ ++ ++static unsigned long to_pcie_address(struct pci_bus *bus, ++ unsigned int devfn, int where) ++{ ++ struct pcie_info *info = bus_to_info(bus->number); ++ unsigned long address; ++ ++ if (unlikely(!info)) { ++ pcie_error( ++ "%s:Cannot find corresponding controller for appointed device!", __func__); ++ BUG(); ++ } ++ ++ address = info->base_addr + (pcie_cfg_bus(bus->number) | ++ pcie_cfg_dev(devfn) | pcie_cfg_reg((unsigned int)where)); ++ ++ return address; ++} ++ ++static int is_pcie_link_up(struct pcie_info *info) ++{ ++ int i; ++ /* check up to 1000 times */ ++ for (i = 0; i < 10000; i++) { ++ if (__arch_check_pcie_link(info)) ++ break; ++ udelay(100); /* 100:Delay duration */ ++ } ++ ++ return (i < 10000); /* 10000:Number of link checks */ ++} ++ ++static int pcie_read_from_device(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *value) ++{ ++ struct pcie_info *info = bus_to_info(bus->number); ++ unsigned int val; ++ void __iomem *addr; ++ int i; ++ ++ if (unlikely(!info)) { ++ pcie_error( ++ "%s:Cannot find corresponding controller for appointed device!", __func__); ++ BUG(); ++ } ++ if (!is_pcie_link_up(info)) { ++ pcie_debug(PCIE_DBG_MODULE, "pcie %d not link up!", ++ info->controller); ++ return -1; ++ } ++ /* where[11:2] repsent the reg_num for register addressing */ ++ addr = (void __iomem *)(uintptr_t)to_pcie_address(bus, devfn, where); ++ ++ val = readl(addr); ++ ++ /* nop 2000 times */ ++ i = 0; ++ while (i < 2000) { ++ __asm__ __volatile__("nop\n"); ++ i++; ++ } ++ ++ if (pcie_errorvalue == 1) { ++ pcie_errorvalue = 0; ++ val = 0xffffffff; ++ } ++ ++ if (size == 1) { ++ *value = ((val >> (((unsigned int)where & 0x3) << READ_OPS_MOVE_LEFT)) & 0xff); ++ } else if (size == 2) { /* 2:read ops size */ ++ *value = ((val >> (((unsigned int)where & 0x3) << READ_OPS_MOVE_LEFT)) & 0xffff); ++ } else if (size == 4) { /* 4:read ops size */ ++ *value = val; ++ } else { ++ pcie_error("Unknown size %d for read ops", size); ++ BUG(); ++ } ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int pcie_read_from_dbi(struct pcie_info *info, unsigned int devfn, ++ int where, int size, u32 *value) ++{ ++ unsigned int val; ++ ++ /* ++ * For host-side config space read, ignore device func nr. ++ */ ++ if (devfn > 0) ++ return -EIO; ++ ++ val = (u32)readl((void *)(uintptr_t)(info->conf_base_addr + ++ ((unsigned int)where & (~0x3)))); ++ /* where[1:0] repsent which byte of the register should be read */ ++ if (size == 1) { ++ *value = (val >> (((unsigned int)where & 0x3) << READ_OPS_MOVE_LEFT)) & 0xff; ++ } else if (size == 2) { /* 2:size for config read operation */ ++ *value = (val >> (((unsigned int)where & 0x3) << READ_OPS_MOVE_LEFT)) & 0xffff; ++ } else if (size == 4) { /* 4:size for config read operation */ ++ *value = val; ++ } else { ++ pcie_error("Unknown size for config read operation!"); ++ BUG(); ++ } ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int pcie_read_conf(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *value) ++{ ++ struct pcie_info *info = bus_to_info(bus->number); ++ int ret; ++ ++ if (unlikely(!info)) { ++ pcie_error( ++ "%s:Cannot find corresponding controller for appointed device!", __func__); ++ BUG(); ++ } ++ ++ if (bus->number == info->root_bus_nr) ++ ret = pcie_read_from_dbi(info, devfn, where, size, value); ++ else ++ ret = pcie_read_from_device(bus, devfn, where, size, value); ++ ++ pcie_debug(PCIE_DBG_REG, ++ "bus %d, devfn %d, where 0x%x, size 0x%x, value 0x%x", ++ bus->number & 0xff, devfn, where, size, *value); ++ ++ return ret; ++} ++ ++static int pcie_write_to_device(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 value) ++{ ++ struct pcie_info *info = bus_to_info(bus->number); ++ void __iomem *addr; ++ unsigned int org; ++ unsigned long flag; ++ int ret; ++ ++ if (unlikely(!info)) { ++ pcie_error( ++ "%s:Cannot find corresponding controller for appointed device!", __func__); ++ BUG(); ++ } ++ ++ if (!is_pcie_link_up(info)) { ++ pcie_debug(PCIE_DBG_MODULE, "pcie %d not link up!", ++ info->controller); ++ return -1; ++ } ++ ++ spin_lock_irqsave(&cw_lock, flag); ++ ++ /* where[11:2] repsent the reg_num for register addressing */ ++ ret = pcie_read_from_device(bus, devfn, where, 4, &org); /* 4:read ops size */ ++ ++ addr = (void __iomem *)(uintptr_t)to_pcie_address(bus, devfn, where); ++ ++ /* where[1:0] repsent which byte of the register should be read */ ++ if (size == 1) { ++ org &= (~(0xff << (((unsigned int)where & 0x3) << READ_OPS_MOVE_LEFT))); ++ org |= (value << (((unsigned int)where & 0x3) << READ_OPS_MOVE_LEFT)); ++ } else if (size == 2) { /* 2:read ops size */ ++ org &= (~(0xffff << (((unsigned int)where & 0x3) << READ_OPS_MOVE_LEFT))); ++ org |= (value << (((unsigned int)where & 0x3) << READ_OPS_MOVE_LEFT)); ++ } else if (size == 4) { /* 4:read ops size */ ++ org = value; ++ } else { ++ pcie_error("Unknown size %d for read ops", size); ++ BUG(); ++ } ++ ++ writel(org, addr); ++ ++ spin_unlock_irqrestore(&cw_lock, flag); ++ ++ return ret; ++} ++ ++static int pcie_write_to_dbi(struct pcie_info *info, unsigned int devfn, ++ int where, int size, u32 value) ++{ ++ unsigned long flag; ++ unsigned int org; ++ ++ spin_lock_irqsave(&cw_lock, flag); ++ /* where[11:2] repsent the reg_num for register addressing */ ++ if (pcie_read_from_dbi(info, devfn, (unsigned int)where, 4, &org)) { /* 4:size for config read operation */ ++ pcie_error("Cannot read from dbi! 0x%x:0x%x:0x%x!", ++ 0, devfn, (unsigned int)where); ++ spin_unlock_irqrestore(&cw_lock, flag); ++ return -EIO; ++ } ++ /* where[1:0] repsent which byte of the register should be written */ ++ if (size == 1) { ++ org &= (~(0xff << (((unsigned int)where & 0x3) << READ_OPS_MOVE_LEFT))); ++ org |= (value << (((unsigned int)where & 0x3) << READ_OPS_MOVE_LEFT)); ++ } else if (size == 2) { /* 2:read ops size */ ++ org &= (~(0xffff << (((unsigned int)where & 0x3) << READ_OPS_MOVE_LEFT))); ++ org |= (value << (((unsigned int)where & 0x3) << READ_OPS_MOVE_LEFT)); ++ } else if (size == 4) { /* 4:read ops size */ ++ org = value; ++ } else { ++ pcie_error("Unknown size %d for read ops", size); ++ BUG(); ++ } ++ writel(org, ((void __iomem *)(uintptr_t)info->conf_base_addr + ++ ((unsigned int)where & (~0x3)))); ++ ++ spin_unlock_irqrestore(&cw_lock, flag); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int pcie_write_conf(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 value) ++{ ++ struct pcie_info *info = bus_to_info(bus->number); ++ ++ pcie_debug(PCIE_DBG_REG, ++ "bus %d, devfn %d, where 0x%x, size 0x%x, value 0x%x", ++ bus->number & 0xff, devfn, where, size, value); ++ ++ if (unlikely(!info)) { ++ pcie_error( ++ "%s:Cannot find corresponding controller for appointed device!", __func__); ++ BUG(); ++ } ++ ++ if (bus->number == info->root_bus_nr) ++ return pcie_write_to_dbi(info, devfn, where, size, value); ++ else ++ return pcie_write_to_device(bus, devfn, where, size, value); ++} ++ ++static struct pci_ops pcie_ops = { ++ .read = pcie_read_conf, ++ .write = pcie_write_conf, ++}; ++ ++ ++static int pci_common_init(struct platform_device *pdev, struct hw_pci *bsp_pcie) ++{ ++ struct device_node *dn = pdev->dev.of_node; ++ struct pcie_info *info = NULL; ++ struct pci_bus *bus = NULL; ++ resource_size_t io_addr; ++ int ret; ++ int pcie_contrl = -1; ++ struct resource bus_range; ++ ++ LIST_HEAD(res); ++ ++ ret = of_property_read_u32(dn, "pcie_controller", &pcie_contrl); ++ if (ret) { ++ pr_err("%s:No pcie_controller found!\n", __func__); ++ return -EINVAL; ++ } ++ ++ ret = of_pci_parse_bus_range(dn, &bus_range); ++ if (ret != 0) { ++ pr_err("%s:No \"bus-range\" found!\n", __func__); ++ return ret; ++ } ++ ++ pr_info("PCIe Controller %d: Bus range [%#llx, %#llx]\n", pcie_contrl, ++ bus_range.start, bus_range.end); ++ ++ ret = bsp_of_pci_get_host_bridge_resources(&pdev->dev, bus_range.start, ++ bus_range.end, &res, &io_addr); ++ if (ret) ++ return ret; ++ bus = pci_create_root_bus(&pdev->dev, bus_range.start, &pcie_ops, bsp_pcie, &res); ++ if (!bus) ++ return -ENOMEM; ++ ++ ++ pcie_info[pcie_contrl].root_bus_nr = bus->number; ++ info = bus_to_info(bus->number); ++ if (info != NULL) ++ __arch_config_iatu_tbl(info, NULL); ++ ++ pci_scan_child_bus(bus); ++ pci_assign_unassigned_bus_resources(bus); ++ pci_bus_add_devices(bus); ++ ++ platform_set_drvdata(pdev, bsp_pcie); ++ ++ return 0; ++} ++ ++static int get_pcie_controller(struct device_node *node, int *controllers_nr) ++{ ++ int err; ++ if (!node) { ++ pr_err("get node from dts failed! controller:%d\n", *controllers_nr); ++ return -EIO; ++ } ++ ++ err = of_property_read_u32(node, "pcie_controller", controllers_nr); ++ if (err) { ++ pr_err("%s:No pcie_controller found!\n", __func__); ++ return -EINVAL; ++ } ++ ++ if (__arch_pcie_info_setup(pcie_info, controllers_nr)) ++ return -EIO; ++ ++ if (*controllers_nr >= MAX_PCIE_CONTROLLER_NUM) { ++ pr_err("pcie_controllers_nr is Invalid, pcie_controllers_nr: %d\n", *controllers_nr); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int pcie_init(struct platform_device *pdev) ++{ ++ int err; ++ struct hw_pci *bsp_pcie = NULL; ++ ++ if (!pdev) { ++ pr_err("pdev is null!\n"); ++ return -ENOMEM; ++ } ++ ++ bsp_pcie = kzalloc(sizeof(struct hw_pci), GFP_KERNEL); ++ if (!bsp_pcie) { ++ pr_err("kzalloc hw_pci space failed!\n"); ++ return -ENOMEM; ++ } ++ ++ bsp_pcie->dev = &pdev->dev; ++ ++ g_of_node = pdev->dev.of_node; ++ ++ err = get_pcie_controller(g_of_node, &pcie_controllers_nr); ++ if (err) { ++ kfree(bsp_pcie); ++ return err; ++ } ++ ++ if (__arch_pcie_sys_init(&pcie_info[pcie_controllers_nr])) ++ goto pcie_init_err; ++ ++ bsp_pcie->nr_controllers = pcie_controllers_nr; ++ pr_err("Number of PCIe controllers: %d\n", bsp_pcie->nr_controllers); ++ ++ err = pci_common_init(pdev, bsp_pcie); ++ ++ if (err) ++ goto pcie_init_err; ++ ++ return 0; ++ ++pcie_init_err: ++ __arch_pcie_info_release(&pcie_info[pcie_controllers_nr]); ++ ++ kfree(bsp_pcie); ++ ++ return -EIO; ++} ++ ++static int __exit pcie_uinit(struct platform_device *pdev) ++{ ++ __arch_pcie_info_release(pcie_info); ++ return 0; ++} ++ ++#include ++#include ++ ++int bsp_pcie_plat_driver_probe(struct platform_device *pdev) ++{ ++ return 0; ++} ++int bsp_pcie_plat_driver_remove(struct platform_device *pdev) ++{ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++int bsp_pcie_plat_driver_suspend(struct device *dev) ++{ ++ __arch_pcie_sys_exit(); ++ return 0; ++} ++ ++int bsp_pcie_plat_driver_resume(struct device *dev) ++{ ++ return __arch_pcie_sys_init(pcie_info); ++} ++ ++const struct dev_pm_ops bsp_pcie_pm_ops = { ++ .suspend = NULL, ++ .suspend_noirq = bsp_pcie_plat_driver_suspend, ++ .resume = NULL, ++ .resume_noirq = bsp_pcie_plat_driver_resume ++}; ++ ++#define BSP_PCIE_PM_OPS (&bsp_pcie_pm_ops) ++#else ++#define BSP_PCIE_PM_OPS NULL ++#endif ++ ++#define PCIE_RC_DRV_NAME "pcie root complex" ++ ++static const struct of_device_id bsp_pcie_match_table[] = { ++ { .compatible = "vendor,pcie", }, ++ {}, ++}; ++ ++static struct platform_driver bsp_pcie_driver = { ++ .driver = { ++ .name = "bsp-pcie", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(bsp_pcie_match_table), ++ }, ++ .probe = pcie_init, ++}; ++module_platform_driver(bsp_pcie_driver); ++ ++MODULE_DESCRIPTION("Vendor PCI-Express Root Complex driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/pci/bsp_pcie/pcie_ss928v100.c b/drivers/pci/bsp_pcie/pcie_ss928v100.c +new file mode 100644 +index 000000000..9d572a200 +--- /dev/null ++++ b/drivers/pci/bsp_pcie/pcie_ss928v100.c +@@ -0,0 +1,383 @@ ++/* ++ * Copyright (c) 2017-2018 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include "pcie_ss928v100.h" ++ ++static void *dbi_base; ++static int __arch_pcie_info_setup(struct pcie_info *info, int *controllers_nr); ++static int __arch_pcie_sys_init(struct pcie_info *info); ++static void __arch_pcie_info_release(struct pcie_info *info); ++ ++struct pcie_iatu iatu_table[] = { ++ { ++ .viewport = 0, /* iAtu Vierport Register */ ++ .region_ctrl_1 = 0x00000004, /* Region Control 1 Register */ ++ .region_ctrl_2 = 0x90000000, /* Region Control 2 Register */ ++ .lbar = PCIE_EP_CONF_BASE + (1<<20), /* Lower Base Address Register */ ++ .ubar = 0x0, /* Upper Base Address Register */ ++ .lar = PCIE_EP_CONF_BASE + (2<<20) - 1, /* Limit Address Register */ ++ .ltar = 0x01000000, /* Lower Target Address Register */ ++ .utar = 0x00000000, /* Upper Target Address Register */ ++ }, ++ { ++ .viewport = 1, /* iAtu Vierport Register */ ++ .region_ctrl_1 = 0x00000005, /* Region Control 1 Register */ ++ .region_ctrl_2 = 0x90000000, /* Region Control 2 Register */ ++ .lbar = PCIE_EP_CONF_BASE + (2<<20), /* Lower Base Address Register */ ++ .ubar = 0x0, /* Upper Base Address Register */ ++ .lar = PCIE_EP_CONF_BASE + (__256MB__ - 1), /* Limit Address Register */ ++ .ltar = 0x02000000, /* Lower Target Address Register */ ++ .utar = 0x00000000, /* Upper Target Address Register */ ++ }, ++}; ++ ++static void __arch_config_iatu_tbl(struct pcie_info *info, ++ struct pci_sys_data *sys) ++{ ++ int i; ++ void __iomem *config_base = (void __iomem *)(uintptr_t)info->conf_base_addr; ++ struct pcie_iatu *ptable = iatu_table; ++ int table_size = ARRAY_SIZE(iatu_table); ++ ++ /* configure atu */ ++ for (i = 0; i < table_size; i++) { ++ writel((ptable + i)->viewport, config_base + ATU_VIEWPORT_REG); ++ writel((ptable + i)->lbar, config_base + ATU_BASE_LOW_REG); ++ writel((ptable + i)->ubar, config_base + ATU_BASE_HIGH_REG); ++ writel((ptable + i)->lar, config_base + ATU_LIMIT_REG); ++ writel((ptable + i)->ltar, config_base + ATU_TARGET_LOW_REG); ++ writel((ptable + i)->utar, config_base + ATU_ATRGET_HIGH_REG); ++ writel((ptable + i)->region_ctrl_1, config_base + ATU_REGION_CTRL1_REG); ++ writel((ptable + i)->region_ctrl_2, config_base + ATU_REGION_CTRL2_REG); ++ } ++} ++ ++static unsigned int __arch_check_pcie_link(struct pcie_info *info) ++{ ++ unsigned int val; ++ ++ val = readl((void *)(uintptr_t)(info->conf_base_addr + PCIE_SYS_STATE0)); ++ return ((val & (1 << PCIE_XMLH_LINK_UP)) ++ && (val & (1 << PCIE_RDLH_LINK_UP))) ? 1 : 0; ++} ++ ++static int __arch_get_ups_mode(void) ++{ ++ unsigned int val; ++ unsigned int mode; ++ unsigned int sys_ctrl_base = 0; ++ void *pcie_sys_stat = NULL; ++ ++ /* Get sys ctrl base address */ ++ of_property_read_u32(g_of_node, "sys_ctrl_base", &sys_ctrl_base); ++ ++ pcie_sys_stat = ioremap(sys_ctrl_base + REG_SC_STAT, sizeof(int)); ++ if (!pcie_sys_stat) { ++ pr_err("ioremap pcie sys status register failed!\n"); ++ return 0; ++ } ++ ++ val = readl(pcie_sys_stat); ++ mode = (val >> PCIE_MODE_SHIFT) & PCIE_MODE_MASK; ++ ++ iounmap(pcie_sys_stat); ++ ++ return mode; ++} ++ ++static int __arch_get_port_nr(void) ++{ ++ unsigned int mode; ++ int port_nr; ++ ++ mode = __arch_get_ups_mode(); ++ switch (mode) { ++ case 0x0: ++ case 0x2: ++ port_nr = 1; ++ break; ++ ++ default: ++ port_nr = 0; ++ break; ++ } ++ ++ return port_nr; ++} ++ ++static int read_properties(struct pcie_property *property) ++{ ++ int err; ++ ++ /* Get pcie deice memory size */ ++ err = of_property_read_u32(g_of_node, "dev_mem_size", &property->pcie_mem_size); ++ if (err) { ++ pcie_error("No dev_mem_size found!"); ++ return err; ++ } ++ ++ /* Get pcie config space size */ ++ err = of_property_read_u32(g_of_node, "dev_conf_size", &property->pcie_cfg_size); ++ if (err) { ++ pcie_error("No dev_conf_size founcd!"); ++ return err; ++ } ++ ++ /* Get pcie dib base address */ ++ err = of_property_read_u32(g_of_node, "pcie_dbi_base", &property->pcie_dbi_base); ++ if (err) { ++ pcie_error("No pcie_dbi_base found!"); ++ return err; ++ } ++ ++ /* Get pcie device config base address */ ++ err = of_property_read_u32(g_of_node, "ep_conf_base", &property->pcie_ep_conf_base); ++ if (err) { ++ pcie_error("No ep_conf_base found!"); ++ return err; ++ } ++ ++ if ((property->pcie_mem_size > __256MB__) || (property->pcie_cfg_size > __256MB__)) { ++ pcie_error( ++ "Invalid parameter: pcie mem size[0x%x], pcie cfg size[0x%x]!", ++ property->pcie_mem_size, property->pcie_cfg_size); ++ return err; ++ } ++ ++ err = of_property_read_u32(g_of_node, "pcie_controller", &property->pcie_contrl); ++ if (err) { ++ pcie_error("No pcie_controller found!"); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int __arch_pcie_info_setup(struct pcie_info *info, int *controllers_nr) ++{ ++ struct pcie_property property = { ++ .pcie_mem_size = 0, ++ .pcie_cfg_size = 0, ++ .pcie_ep_conf_base = 0, ++ .pcie_contrl = 0 ++ }; ++ int nr; ++ int err; ++ ++ err = read_properties(&property); ++ if (err) ++ return -EINVAL; ++ ++ nr = __arch_get_port_nr(); ++ if (!nr) { ++ pr_err("Pcie port number: 0\n"); ++ *controllers_nr = 0; ++ return -EINVAL; ++ } ++ ++ info->controller = property.pcie_contrl; ++ ++ /* RC configuration space */ ++ info->conf_base_addr = (uintptr_t)ioremap(property.pcie_dbi_base, __8KB__); ++ if (!info->conf_base_addr) { ++ pcie_error("Address mapping for RC dbi failed!"); ++ return -EIO; ++ } ++ ++ /* Configuration space for all EPs */ ++ info->base_addr = (unsigned long)(uintptr_t)ioremap( ++ property.pcie_ep_conf_base, property.pcie_cfg_size); ++ if (!info->base_addr) { ++ iounmap((void *)(uintptr_t)info->conf_base_addr); ++ pcie_error("Address mapping for EPs cfg failed!"); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++static void __arch_pcie_info_release(struct pcie_info *info) ++{ ++ if (info->base_addr) ++ iounmap((void *)(uintptr_t)info->base_addr); ++ ++ if (info->conf_base_addr) ++ iounmap((void *)(uintptr_t)info->conf_base_addr); ++} ++ ++static int __arch_pcie_sys_init(struct pcie_info *info) ++{ ++ unsigned int val; ++ unsigned int pcie_clk_rst_reg = 0; ++ void *crg_base = NULL; ++ ++ crg_base = (void *)ioremap(PERI_CRG_BASE, __16KB__); ++ if (!crg_base) { ++ pr_err("ioremap crg base address fiailed!func:%s, line:%d\n", __func__, __LINE__); ++ return -EINVAL; ++ } ++ ++ val = of_property_read_u32(g_of_node, "pcie_clk_rest_reg", &pcie_clk_rst_reg); ++ if (val) { ++ pcie_error("No pcie_clk_rest_reg found!"); ++ return -EINVAL; ++ } ++ ++ dbi_base = (void *)(uintptr_t)info->conf_base_addr; ++ ++ /* ++ * Disable PCIE ++ */ ++ val = readl(dbi_base + PCIE_SYS_CTRL7); ++ val &= (~(1 << PCIE_APP_LTSSM_ENBALE)); ++ writel(val, dbi_base + PCIE_SYS_CTRL7); ++ ++ /* ++ * Reset ++ */ ++ val = readl(crg_base + pcie_clk_rst_reg); ++ val |= PCIE_X2_SRST_REQ; ++ writel(val, crg_base + pcie_clk_rst_reg); ++ ++ /* ++ * Retreat from the reset state ++ */ ++ udelay(500); ++ val = readl(crg_base + pcie_clk_rst_reg); ++ val &= ~PCIE_X2_SRST_REQ; ++ writel(val, crg_base + pcie_clk_rst_reg); ++ mdelay(10); ++ ++ writel(PCIE_WK_RC, dbi_base); ++ /* ++ * PCIE RC work mode ++ */ ++ val = readl(dbi_base + PCIE_SYS_CTRL0); ++ val &= (~(0xf << PCIE_DEVICE_TYPE)); ++ val |= (PCIE_WM_RC << PCIE_DEVICE_TYPE); ++ writel(val, dbi_base + PCIE_SYS_CTRL0); ++ ++ /* ++ * Enable clk ++ */ ++ val = readl(crg_base + pcie_clk_rst_reg); ++ val |= ((1 << PCIE_X2_BUS_CKEN) | ++ (1 << PCIE_X2_SYS_CKEN) | ++ (1 << PCIE_X2_PIPE_CKEN) | ++ (1 << PCIE_X2_AUX_CKEN)); ++ writel(val, crg_base + pcie_clk_rst_reg); ++ ++ mdelay(10); ++ ++ /* ++ * * Set PCIe support the identification Board card ++ */ ++ val = readl(dbi_base + PCI_CARD); ++ val |= (1<<3); ++ writel(val, dbi_base + PCI_CARD); ++ mdelay(10); ++ ++ /* ++ * Set PCIE controller class code to be PCI-PCI bridge device ++ */ ++ val = readl(dbi_base + PCI_CLASS_REVISION); ++ val &= ~(0xffffff00); ++ val |= (0x60400 << 8); ++ writel(val, dbi_base + PCI_CLASS_REVISION); ++ udelay(1000); ++ ++ /* ++ * reset EP ++ */ ++ val = readl(dbi_base + PCIE_SYS_CTRL32); ++ val &= ~(1 << PCIE_RESET_TO_PAD); ++ writel(val, dbi_base + PCIE_SYS_CTRL32); ++ ++ /* ++ * Enable controller ++ */ ++ val = readl(dbi_base + PCIE_SYS_CTRL7); ++ val |= (1 << PCIE_APP_LTSSM_ENBALE); ++ writel(val, dbi_base + PCIE_SYS_CTRL7); ++ udelay(1000); ++ ++ val = readl(dbi_base + PCI_COMMAND); ++ val |= 7; /* Bus Master Enable */ ++ writel(val, dbi_base + PCI_COMMAND); ++ ++ udelay(1000); ++ ++ /* ++ * release EP ++ */ ++ val = readl(dbi_base + PCIE_SYS_CTRL32); ++ val |= (1 << PCIE_RESET_TO_PAD); ++ writel(val, dbi_base + PCIE_SYS_CTRL32); ++ ++ iounmap(crg_base); ++ return 0; ++} ++ ++static void __arch_pcie_sys_exit(void) ++{ ++ unsigned int val; ++ unsigned int pcie_clk_rst_reg = 0; ++ void *crg_base = NULL; ++ ++ crg_base = (void *)ioremap(PERI_CRG_BASE, __8KB__); ++ if (!crg_base) { ++ pr_err("ioremap crg base address fiailed!func:%s, line:%d\n", ++ __func__, __LINE__); ++ return; ++ } ++ ++ val = of_property_read_u32(g_of_node, "pcie_clk_rest_reg", &pcie_clk_rst_reg); ++ if (val) { ++ pcie_error("No pcie_clk_rest_reg found!"); ++ return; ++ } ++ ++ /* ++ * Disable PCIE ++ */ ++ val = readl(dbi_base + PCIE_SYS_CTRL7); ++ val &= (~(1 << PCIE_APP_LTSSM_ENBALE)); ++ writel(val, dbi_base + PCIE_SYS_CTRL7); ++ ++ /* ++ * Reset ++ */ ++ val = readl(crg_base + pcie_clk_rst_reg); ++ val |= PCIE_X2_SRST_REQ; ++ writel(val, crg_base + pcie_clk_rst_reg); ++ ++ udelay(1000); ++ ++ /* ++ * Disable clk ++ */ ++ val = readl(crg_base + pcie_clk_rst_reg); ++ val &= (~(1 << PCIE_X2_AUX_CKEN)); ++ val &= (~(1 << PCIE_X2_PIPE_CKEN)); ++ val &= (~(1 << PCIE_X2_SYS_CKEN)); ++ val &= (~(1 << PCIE_X2_BUS_CKEN)); ++ writel(val, crg_base + pcie_clk_rst_reg); ++ ++ iounmap(crg_base); ++ ++ udelay(1000); ++} +diff --git a/drivers/pci/bsp_pcie/pcie_ss928v100.h b/drivers/pci/bsp_pcie/pcie_ss928v100.h +new file mode 100644 +index 000000000..eb6022a72 +--- /dev/null ++++ b/drivers/pci/bsp_pcie/pcie_ss928v100.h +@@ -0,0 +1,74 @@ ++/* ++ * Copyright (c) 2020-2021 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#ifndef __VENDOR_PCIE_H__ ++#define __VENDOR_PCIE_H__ ++#define PCIE_EP_CONF_BASE 0x20000000 ++#define PCIE_EP1_CONF_BASE 0x30000000 ++ ++#define PERI_CRG_BASE 0x11010000 ++ ++#define PERI_CRG_PCIE_CLK_REG 0x3A40 ++#define PCIE_BUS_SRST_REQ (0x1 << 0) ++#define PCIE_SYS_SRST_REQ (0x1 << 1) ++#define PCIE_SRST_REQ (0x1 << 2) ++#define PCIE_X2_SRST_REQ (PCIE_BUS_SRST_REQ | PCIE_SYS_SRST_REQ | PCIE_SRST_REQ) ++ ++#define PCIE_X2_AUX_CKEN 7 ++#define PCIE_X2_PIPE_CKEN 6 ++#define PCIE_X2_SYS_CKEN 5 ++#define PCIE_X2_BUS_CKEN 4 ++#define PCIE_PAD_OE_MASK (0x7 << 8) ++ ++#define PCIE_SYS_CTRL0 0xc00 ++#define PCIE_DEVICE_TYPE 28 ++#define PCIE_WM_EP 0x0 ++#define PCIE_WM_LEGACY 0x1 ++#define PCIE_WM_RC 0x4 ++#define PCIE_WK_RC 0x092821b4 ++ ++#define PCIE_SYS_CTRL7 0xc1C ++#define PCIE_APP_LTSSM_ENBALE 11 ++ ++#define PCIE_SYS_CTRL32 0xc80 ++#define PCIE_RESET_TO_PAD 30 ++ ++#define PCIE_SYS_STATE0 0xf00 ++#define PCIE_XMLH_LINK_UP 15 ++#define PCIE_RDLH_LINK_UP 5 ++ ++#define PCIE_INTA_PIN 1 ++#define PCIE_INTB_PIN 2 ++#define PCIE_INTC_PIN 3 ++#define PCIE_INTD_PIN 4 ++ ++#define REG_SC_STAT 0x0018 ++#define PCI_CARD 0x44 ++ ++#define PCIE_MODE_SHIFT 16 ++#define PCIE_MODE_MASK 0x3 ++ ++#define ATU_VIEWPORT_REG 0x900 ++#define ATU_REGION_CTRL1_REG 0x904 ++#define ATU_REGION_CTRL2_REG 0x908 ++#define ATU_BASE_LOW_REG 0x90c ++#define ATU_BASE_HIGH_REG 0x910 ++#define ATU_LIMIT_REG 0x914 ++#define ATU_TARGET_LOW_REG 0x918 ++#define ATU_ATRGET_HIGH_REG 0x91c ++ ++#endif +diff --git a/drivers/pci/of.c b/drivers/pci/of.c +index 51e3dd0ea..70a82d28d 100644 +--- a/drivers/pci/of.c ++++ b/drivers/pci/of.c +@@ -410,6 +410,16 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev, + return err; + } + ++#if defined(CONFIG_ARCH_BSP) && defined(CONFIG_BSP_PCIE) ++int bsp_of_pci_get_host_bridge_resources(struct device *dev, ++ unsigned char busno, unsigned char bus_max, ++ struct list_head *resources, resource_size_t *io_base) ++{ ++ return devm_of_pci_get_host_bridge_resources(dev, busno, bus_max, ++ resources, NULL, io_base); ++} ++#endif ++ + #if IS_ENABLED(CONFIG_OF_IRQ) + /** + * of_irq_parse_pci - Resolve the interrupt for a PCI device +diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig +index 8ebcddf91..025eaa616 100644 +--- a/drivers/pwm/Kconfig ++++ b/drivers/pwm/Kconfig +@@ -708,4 +708,13 @@ config PWM_XILINX + To compile this driver as a module, choose M here: the module + will be called pwm-xilinx. + ++config PWM_BSP ++ tristate "Vendor PWM support" ++ depends on ARCH_BSP || COMPILE_TEST ++ help ++ Generic PWM framework driver for Vendor SoCs. ++ ++ To compile this driver as a module, choose M here: the module ++ will be called pwm-bsp. ++ + endif +diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile +index c822389c2..bdb710716 100644 +--- a/drivers/pwm/Makefile ++++ b/drivers/pwm/Makefile +@@ -11,6 +11,7 @@ obj-$(CONFIG_PWM_BCM_KONA) += pwm-bcm-kona.o + obj-$(CONFIG_PWM_BCM2835) += pwm-bcm2835.o + obj-$(CONFIG_PWM_BERLIN) += pwm-berlin.o + obj-$(CONFIG_PWM_BRCMSTB) += pwm-brcmstb.o ++obj-$(CONFIG_PWM_BSP) += pwm-bsp.o + obj-$(CONFIG_PWM_CLK) += pwm-clk.o + obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o + obj-$(CONFIG_PWM_CRC) += pwm-crc.o +diff --git a/drivers/pwm/pwm-bsp.c b/drivers/pwm/pwm-bsp.c +new file mode 100644 +index 000000000..1957c0f63 +--- /dev/null ++++ b/drivers/pwm/pwm-bsp.c +@@ -0,0 +1,426 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2024. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef CONFIG_ARCH_SS928V100 ++ ++/* reg addr of the xth chn. */ ++#define pwm_period_cfg_addr(x) (0x0000 + (0x100 * (x))) ++#define pwm_duty0_cfg_addr(x) (0x0004 + (0x100 * (x))) ++#define pwm_duty1_cfg_addr(x) (0x0008 + (0x100 * (x))) ++#define pwm_duty2_cfg_addr(x) (0x000C + (0x100 * (x))) ++#define pwm_num_cfg_addr(x) (0x0010 + (0x100 * (x))) ++#define pwm_ctrl_addr(x) (0x0014 + (0x100 * (x))) ++#define pwm_dt_value_cfg_addr(x) (0x0020 + (0x100 * (x))) ++#define pwm_dt_ctrl_cfg_addr(x) (0x0024 + (0x100 * (x))) ++#define pwm_sync_cfg_addr(x) (0x0030 + (0x100 * (x))) ++#define pwm_sync_delay_cfg_addr(x) (0x0034 + (0x100 * (x))) ++#define pwm_period_addr(x) (0x0040 + (0x100 * (x))) ++#define pwm_duty0_addr(x) (0x0044 + (0x100 * (x))) ++#define pwm_duty1_addr(x) (0x0048 + (0x100 * (x))) ++#define pwm_duty2_addr(x) (0x004C + (0x100 * (x))) ++#define pwm_num_addr(x) (0x0050 + (0x100 * (x))) ++#define pwm_ctrl_st_addr(x) (0x0054 + (0x100 * (x))) ++#define pwm_dt_value_addr(x) (0x0060 + (0x100 * (x))) ++#define pwm_dt_ctrl_addr(x) (0x0064 + (0x100 * (x))) ++#define pwm_sync_delay_addr(x) (0x0074 + (0x100 * (x))) ++ ++#define PWM_SYNC_START_ADDR 0x0ff0 ++ ++#define PWM_ALIGN_MODE_SHIFT 4 ++#define PWM_ALIGN_MODE_MASK GENMASK(5, 4) ++ ++#define PWM_PRE_DIV_SEL_SHIFT 8 ++#define PWM_PRE_DIV_SEL_MASK GENMASK(11, 8) ++ ++/* pwm dt value */ ++#define PWM_DT_A_SHIFT 0 ++#define PWM_DT_A_MASK GENMASK(15, 0) ++ ++#define PWM_DT_B_SHIFT 16 ++#define PWM_DT_B_MASK GENMASK(31, 16) ++ ++/* pwm dt ctrl */ ++#define PWM_DTS_OUT_0P_SHIFT 0 ++#define PWM_DTS_OUT_0P_MASK BIT(0) ++ ++#define PWM_DTS_OUT_0N_SHIFT 1 ++#define PWM_DTS_OUT_0N_MASK BIT(1) ++ ++#define PWM_DTS_OUT_1P_SHIFT 2 ++#define PWM_DTS_OUT_1P_MASK BIT(2) ++ ++#define PWM_DTS_OUT_1N_SHIFT 3 ++#define PWM_DTS_OUT_1N_MASK BIT(3) ++ ++#define PWM_DTS_OUT_2P_SHIFT 4 ++#define PWM_DTS_OUT_2P_MASK BIT(4) ++ ++#define PWM_DTS_OUT_2N_SHIFT 5 ++#define PWM_DTS_OUT_2N_MASK BIT(5) ++ ++#else ++ ++#define pwm_period_cfg_addr(x) (((x) * 0x20) + 0x0) ++#define pwm_duty0_cfg_addr(x) (((x) * 0x20) + 0x4) ++#define pwm_cfg2_addr(x) (((x) * 0x20) + 0x8) ++#define pwm_ctrl_addr(x) (((x) * 0x20) + 0xC) ++ ++#endif ++ ++/* pwm ctrl */ ++#define PWM_ENABLE_SHIFT 0 ++#define PWM_ENABLE_MASK BIT(0) ++ ++#define PWM_POLARITY_SHIFT 1 ++#define PWM_POLARITY_MASK BIT(1) ++ ++#define PWM_KEEP_SHIFT 2 ++#define PWM_KEEP_MASK BIT(2) ++ ++/* pwm period */ ++#define PWM_PERIOD_MASK GENMASK(31, 0) ++ ++/* pwm duty */ ++#define PWM_DUTY_MASK GENMASK(31, 0) ++ ++#define PWM_RATE_MHZ (1000 * 1000) ++#define PWM_PERIOD_HZ 1000 ++#define PWM_SPECIFIER_CELL_COUNT 3 ++#define PWM_SLEEP_TIME 30 ++ ++enum pwm_pre_div { ++ PWM_PRE_DIV_1 = 0, ++ PWM_PRE_DIV_2, ++ PWM_PRE_DIV_4, ++ PWM_PRE_DIV_8, ++ PWM_PRE_DIV_16, ++ PWM_PRE_DIV_32, ++ PWM_PRE_DIV_64, ++ PWM_PRE_DIV_128, ++ PWM_PRE_DIV_256, ++}; ++ ++enum pwm_align { ++ PWM_ALIGN_RIGHT = 0, ++ PWM_ALIGN_LEFT, ++ PWM_ALIGN_MIDDLE, ++}; ++ ++typedef enum { ++ PWM_CONTROLLER_0 = 0, ++ PWM_CONTROLLER_1, ++} pwm_controller_index; ++ ++typedef enum { ++ PWM_CHN_0 = 0, ++ PWM_CHN_1, ++ PWM_CHN_2, ++ PWM_CHN_3, ++ PWM_CHN_4, ++ PWM_CHN_5, ++ PWM_CHN_6, ++ PWM_CHN_7, ++ PWM_CHN_8, ++ PWM_CHN_9, ++ PWM_CHN_10, ++ PWM_CHN_11, ++ PWM_CHN_12, ++ PWM_CHN_13, ++ PWM_CHN_14, ++ PWM_CHN_15, ++} pwm_chn_index; ++ ++struct bsp_pwm_chip { ++ pwm_controller_index controller_index; ++ struct pwm_chip chip; ++ struct clk *clk; ++ void __iomem *base; ++ struct reset_control *rstc; ++}; ++ ++struct bsp_pwm_soc { ++ u32 num_pwms; ++ const char *pwm_name; ++}; ++ ++#ifdef CONFIG_ARCH_SS928V100 ++#define CHIP_PWM_NUM 2 ++#define CHIP_PWM_CONTROLLER_0_NAME "pwm0" ++#define CHIP_PWM_CONTROLLER_1_NAME "pwm1" ++ ++static const struct bsp_pwm_soc pwm_soc[CHIP_PWM_NUM] = { ++ { .num_pwms = 16, .pwm_name = CHIP_PWM_CONTROLLER_0_NAME }, ++ { .num_pwms = 16, .pwm_name = CHIP_PWM_CONTROLLER_1_NAME }, ++}; ++#endif ++ ++static inline struct bsp_pwm_chip *to_bsp_pwm_chip(struct pwm_chip *chip) ++{ ++ return container_of(chip, struct bsp_pwm_chip, chip); ++} ++ ++static void bsp_pwm_set_bits(void __iomem *base, u32 offset, ++ u32 mask, u32 data) ++{ ++ void __iomem *address = base + offset; ++ u32 value; ++ ++ value = readl(address); ++ value &= ~mask; ++ value |= (data & mask); ++ writel(value, address); ++} ++ ++static void bsp_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) ++{ ++ struct bsp_pwm_chip *bsp_pwm_chip = to_bsp_pwm_chip(chip); ++ ++ bsp_pwm_set_bits(bsp_pwm_chip->base, pwm_ctrl_addr(pwm->hwpwm), ++ PWM_ENABLE_MASK, 0x1); ++} ++ ++static void bsp_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) ++{ ++ struct bsp_pwm_chip *bsp_pwm_chip = to_bsp_pwm_chip(chip); ++ ++ bsp_pwm_set_bits(bsp_pwm_chip->base, pwm_ctrl_addr(pwm->hwpwm), ++ PWM_ENABLE_MASK, 0x0); ++} ++ ++static bool bsp_pwm_is_complementary_chn(pwm_controller_index controller_index, pwm_chn_index chn_index) ++{ ++#ifdef CONFIG_ARCH_SS928V100 ++ if (((controller_index == PWM_CONTROLLER_0) && (chn_index == PWM_CHN_15)) || ++ ((controller_index == PWM_CONTROLLER_1) && ++ ((chn_index == PWM_CHN_0) || (chn_index == PWM_CHN_1)))) { ++ return 1; ++ } ++#endif ++ return 0; ++} ++ ++static void bsp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, ++ const struct pwm_state *state, u64 period_ns) ++{ ++ struct bsp_pwm_chip *bsp_pwm_chip = to_bsp_pwm_chip(chip); ++ u64 freq, period, duty, duty1, duty2; ++ ++ freq = div_u64(clk_get_rate(bsp_pwm_chip->clk), PWM_RATE_MHZ); ++ ++ period = div_u64(freq * period_ns, PWM_PERIOD_HZ); ++ duty = div_u64(period * state->duty_cycle, period_ns); ++ duty1 = div_u64(period * state->duty_cycle1, period_ns); ++ duty2 = div_u64(period * state->duty_cycle2, period_ns); ++ ++#ifdef CONFIG_ARCH_SS928V100 ++ bsp_pwm_set_bits(bsp_pwm_chip->base, pwm_ctrl_addr(pwm->hwpwm), ++ PWM_PRE_DIV_SEL_MASK, (PWM_PRE_DIV_1 << PWM_PRE_DIV_SEL_SHIFT)); ++#endif ++ ++ bsp_pwm_set_bits(bsp_pwm_chip->base, pwm_period_cfg_addr(pwm->hwpwm), ++ PWM_PERIOD_MASK, period); ++ ++ bsp_pwm_set_bits(bsp_pwm_chip->base, pwm_duty0_cfg_addr(pwm->hwpwm), ++ PWM_DUTY_MASK, duty); ++ ++ if (bsp_pwm_is_complementary_chn(bsp_pwm_chip->controller_index, pwm->hwpwm) == 1) { ++ bsp_pwm_set_bits(bsp_pwm_chip->base, pwm_duty1_cfg_addr(pwm->hwpwm), ++ PWM_DUTY_MASK, duty1); ++ bsp_pwm_set_bits(bsp_pwm_chip->base, pwm_duty2_cfg_addr(pwm->hwpwm), ++ PWM_DUTY_MASK, duty2); ++ } ++} ++ ++static void bsp_pwm_set_polarity(struct pwm_chip *chip, ++ struct pwm_device *pwm, ++ enum pwm_polarity polarity) ++{ ++ struct bsp_pwm_chip *bsp_pwm_chip = to_bsp_pwm_chip(chip); ++ ++ if (polarity == PWM_POLARITY_INVERSED) ++ bsp_pwm_set_bits(bsp_pwm_chip->base, pwm_ctrl_addr(pwm->hwpwm), ++ PWM_POLARITY_MASK, (0x1 << PWM_POLARITY_SHIFT)); ++ else ++ bsp_pwm_set_bits(bsp_pwm_chip->base, pwm_ctrl_addr(pwm->hwpwm), ++ PWM_POLARITY_MASK, (0x0 << PWM_POLARITY_SHIFT)); ++} ++ ++static int bsp_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, ++ struct pwm_state *state) ++{ ++ struct bsp_pwm_chip *bsp_pwm_chip = to_bsp_pwm_chip(chip); ++ void __iomem *base; ++ u32 freq, value; ++ ++ freq = div_u64(clk_get_rate(bsp_pwm_chip->clk), PWM_RATE_MHZ); ++ base = bsp_pwm_chip->base; ++ value = readl(base + pwm_period_cfg_addr(pwm->hwpwm)); ++ state->period = div_u64(value * PWM_PERIOD_HZ, freq); ++ ++ value = readl(base + pwm_duty0_cfg_addr(pwm->hwpwm)); ++ state->duty_cycle = div_u64(value * PWM_PERIOD_HZ, freq); ++ ++ if (bsp_pwm_is_complementary_chn(bsp_pwm_chip->controller_index, pwm->hwpwm) == 1) { ++ value = readl(base + pwm_duty1_cfg_addr(pwm->hwpwm)); ++ state->duty_cycle1 = div_u64(value * PWM_PERIOD_HZ, freq); ++ ++ value = readl(base + pwm_duty2_cfg_addr(pwm->hwpwm)); ++ state->duty_cycle2 = div_u64(value * PWM_PERIOD_HZ, freq); ++ } ++ value = readl(base + pwm_ctrl_addr(pwm->hwpwm)); ++ state->enabled = (PWM_ENABLE_MASK & value); ++ return 0; ++} ++ ++static int bsp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, ++ const struct pwm_state *state) ++{ ++ if (state->polarity != pwm->state.polarity) ++ bsp_pwm_set_polarity(chip, pwm, state->polarity); ++ ++ if (state->period != pwm->state.period || ++ state->duty_cycle != pwm->state.duty_cycle || ++ state->duty_cycle1 != pwm->state.duty_cycle1 || ++ state->duty_cycle2 != pwm->state.duty_cycle2) ++ bsp_pwm_config(chip, pwm, state, state->period); ++ ++ if (state->enabled != pwm->state.enabled) { ++ if (state->enabled) ++ bsp_pwm_enable(chip, pwm); ++ else ++ bsp_pwm_disable(chip, pwm); ++ } ++ ++ return 0; ++} ++ ++static const struct pwm_ops bsp_pwm_ops = { ++ .get_state = bsp_pwm_get_state, ++ .apply = bsp_pwm_apply, ++ ++ .owner = THIS_MODULE, ++}; ++ ++static void bsp_pwm_probe_set_chip_ops(struct platform_device *pdev, struct bsp_pwm_chip *pwm_chip, int chip_loop) ++{ ++ pwm_chip->chip.ops = &bsp_pwm_ops; ++ pwm_chip->chip.dev = &pdev->dev; ++ pwm_chip->chip.base = -1; ++ pwm_chip->chip.npwm = pwm_soc[chip_loop].num_pwms; ++ pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags; ++ pwm_chip->chip.of_pwm_n_cells = PWM_SPECIFIER_CELL_COUNT; ++} ++ ++static int bsp_pwm_probe(struct platform_device *pdev) ++{ ++ struct bsp_pwm_chip *pwm_chip, *pwm_chip_tmp; ++ struct resource *res; ++ int ret; ++ int i; ++ int chip_loop; ++ const char *pwm_name = NULL; ++ ++ pwm_chip_tmp = devm_kzalloc(&pdev->dev, sizeof(*pwm_chip_tmp) * CHIP_PWM_NUM, GFP_KERNEL); ++ if (pwm_chip_tmp == NULL) ++ return -ENOMEM; ++ ++ for (chip_loop = 0; chip_loop < CHIP_PWM_NUM; chip_loop++) { ++ pwm_chip = pwm_chip_tmp + chip_loop; ++ pwm_name = pwm_soc[chip_loop].pwm_name; ++ pwm_chip->controller_index = chip_loop; ++ pwm_chip->clk = devm_clk_get(&pdev->dev, pwm_name); ++ if (IS_ERR(pwm_chip->clk)) { ++ dev_err(&pdev->dev, "getting clock failed with %ld\n", ++ PTR_ERR(pwm_chip->clk)); ++ return PTR_ERR(pwm_chip->clk); ++ } ++ ++ bsp_pwm_probe_set_chip_ops(pdev, pwm_chip, chip_loop); ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pwm_name); ++ pwm_chip->base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(pwm_chip->base)) ++ return PTR_ERR(pwm_chip->base); ++ ++ ret = clk_prepare_enable(pwm_chip->clk); ++ if (ret < 0) ++ return ret; ++ ++ pwm_chip->rstc = devm_reset_control_get_exclusive(&pdev->dev, pwm_name); ++ if (IS_ERR(pwm_chip->rstc)) { ++ clk_disable_unprepare(pwm_chip->clk); ++ return PTR_ERR(pwm_chip->rstc); ++ } ++ ++ reset_control_assert(pwm_chip->rstc); ++ msleep(PWM_SLEEP_TIME); ++ reset_control_deassert(pwm_chip->rstc); ++ ++ ret = pwmchip_add(&pwm_chip->chip); ++ if (ret < 0) { ++ clk_disable_unprepare(pwm_chip->clk); ++ return ret; ++ } ++ ++ for (i = 0; i < pwm_chip->chip.npwm; i++) { ++ bsp_pwm_set_bits(pwm_chip->base, pwm_ctrl_addr(i), ++ PWM_KEEP_MASK, (0x1 << PWM_KEEP_SHIFT)); ++ } ++ } ++ ++ platform_set_drvdata(pdev, pwm_chip_tmp); ++ ++ return 0; ++} ++ ++static int bsp_pwm_remove(struct platform_device *pdev) ++{ ++ int ret = 0; ++ int chip_loop; ++ struct bsp_pwm_chip *pwm_chip; ++ struct bsp_pwm_chip *pwm_chip_tmp; ++ ++ pwm_chip_tmp = platform_get_drvdata(pdev); ++ ++ for (chip_loop = 0; chip_loop < CHIP_PWM_NUM; chip_loop++) { ++ pwm_chip = pwm_chip_tmp + chip_loop; ++ reset_control_assert(pwm_chip->rstc); ++ msleep(PWM_SLEEP_TIME); ++ reset_control_deassert(pwm_chip->rstc); ++ ++ clk_disable_unprepare(pwm_chip->clk); ++ ++ pwmchip_remove(&pwm_chip->chip); ++ } ++ return ret; ++} ++ ++static const struct of_device_id bsp_pwm_of_match[] = { ++ { .compatible = "vendor,pwm", .data = &pwm_soc[0] }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(of, bsp_pwm_of_match); ++ ++static struct platform_driver bsp_pwm_driver = { ++ .driver = { ++ .name = "bsp-pwm", ++ .of_match_table = bsp_pwm_of_match, ++ }, ++ .probe = bsp_pwm_probe, ++ .remove = bsp_pwm_remove, ++}; ++module_platform_driver(bsp_pwm_driver); ++ ++MODULE_LICENSE("GPL"); +diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c +index 052ccadbd..77e36341f 100644 +--- a/drivers/pwm/sysfs.c ++++ b/drivers/pwm/sysfs.c +@@ -13,6 +13,9 @@ + #include + #include + #include ++#ifdef CONFIG_ARCH_BSP ++#include ++#endif + + struct pwm_export { + struct device child; +@@ -103,6 +106,80 @@ static ssize_t duty_cycle_store(struct device *child, + return ret ? : size; + } + ++#ifdef CONFIG_ARCH_BSP ++ ++static ssize_t duty_cycle1_show(struct device *child, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ const struct pwm_device *pwm = child_to_pwm_device(child); ++ struct pwm_state state; ++ ++ pwm_get_state(pwm, &state); ++ ++ return sprintf_s(buf, sizeof(buf), "%llu\n", state.duty_cycle1); ++} ++ ++static ssize_t duty_cycle1_store(struct device *child, ++ struct device_attribute *attr, ++ const char *buf, size_t size) ++{ ++ struct pwm_export *export = child_to_pwm_export(child); ++ struct pwm_device *pwm = export->pwm; ++ struct pwm_state state; ++ u64 val; ++ int ret; ++ ++ ret = kstrtou64(buf, 0, &val); ++ if (ret) ++ return ret; ++ ++ mutex_lock(&export->lock); ++ pwm_get_state(pwm, &state); ++ state.duty_cycle1 = val; ++ ret = pwm_apply_state(pwm, &state); ++ mutex_unlock(&export->lock); ++ ++ return ret ? : size; ++} ++ ++static ssize_t duty_cycle2_show(struct device *child, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ const struct pwm_device *pwm = child_to_pwm_device(child); ++ struct pwm_state state; ++ ++ pwm_get_state(pwm, &state); ++ ++ return sprintf_s(buf, sizeof(buf), "%llu\n", state.duty_cycle2); ++} ++ ++static ssize_t duty_cycle2_store(struct device *child, ++ struct device_attribute *attr, ++ const char *buf, size_t size) ++{ ++ struct pwm_export *export = child_to_pwm_export(child); ++ struct pwm_device *pwm = export->pwm; ++ struct pwm_state state; ++ u64 val; ++ int ret; ++ ++ ret = kstrtou64(buf, 0, &val); ++ if (ret) ++ return ret; ++ ++ mutex_lock(&export->lock); ++ pwm_get_state(pwm, &state); ++ state.duty_cycle2 = val; ++ ret = pwm_apply_state(pwm, &state); ++ mutex_unlock(&export->lock); ++ ++ return ret ? : size; ++} ++ ++#endif ++ + static ssize_t enable_show(struct device *child, + struct device_attribute *attr, + char *buf) +@@ -231,6 +308,31 @@ static struct attribute *pwm_attrs[] = { + }; + ATTRIBUTE_GROUPS(pwm); + ++#ifdef CONFIG_ARCH_BSP ++ ++#define PWM_BASE_0 0 ++#define PWM_BASE_16 16 ++ ++#define PWM_COMPLEMENTARY_CHN_0 0 ++#define PWM_COMPLEMENTARY_CHN_1 1 ++#define PWM_COMPLEMENTARY_CHN_15 15 ++ ++static DEVICE_ATTR_RW(duty_cycle1); ++static DEVICE_ATTR_RW(duty_cycle2); ++static struct attribute *pwm_pn_attrs[] = { ++ &dev_attr_period.attr, ++ &dev_attr_duty_cycle.attr, ++ &dev_attr_duty_cycle1.attr, ++ &dev_attr_duty_cycle2.attr, ++ &dev_attr_enable.attr, ++ &dev_attr_polarity.attr, ++ &dev_attr_capture.attr, ++ NULL ++}; ++ATTRIBUTE_GROUPS(pwm_pn); ++ ++#endif ++ + static void pwm_export_release(struct device *child) + { + struct pwm_export *export = child_to_pwm_export(child); +@@ -240,6 +342,9 @@ static void pwm_export_release(struct device *child) + + static int pwm_export_child(struct device *parent, struct pwm_device *pwm) + { ++#ifdef CONFIG_ARCH_BSP ++ struct pwm_chip *chip = dev_get_drvdata(parent); ++#endif + struct pwm_export *export; + char *pwm_prop[2]; + int ret; +@@ -259,7 +364,17 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm) + export->child.release = pwm_export_release; + export->child.parent = parent; + export->child.devt = MKDEV(0, 0); +- export->child.groups = pwm_groups; ++#ifdef CONFIG_ARCH_BSP ++ if (((chip->base == PWM_BASE_0) && (pwm->hwpwm == PWM_COMPLEMENTARY_CHN_15)) || ++ ((chip->base == PWM_BASE_16) && ((pwm->hwpwm == PWM_COMPLEMENTARY_CHN_0) || ++ (pwm->hwpwm == PWM_COMPLEMENTARY_CHN_1)))) { ++ export->child.groups = pwm_pn_groups; ++ } else { ++#endif ++ export->child.groups = pwm_groups; ++#ifdef CONFIG_ARCH_BSP ++ } ++#endif + dev_set_name(&export->child, "pwm%u", pwm->hwpwm); + + ret = device_register(&export->child); +diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile +index 6af54842b..946c0a896 100644 +--- a/drivers/spi/Makefile ++++ b/drivers/spi/Makefile +@@ -103,6 +103,7 @@ obj-$(CONFIG_SPI_ORION) += spi-orion.o + obj-$(CONFIG_SPI_PCI1XXXX) += spi-pci1xxxx.o + obj-$(CONFIG_SPI_PIC32) += spi-pic32.o + obj-$(CONFIG_SPI_PIC32_SQI) += spi-pic32-sqi.o ++obj-$(CONFIG_ARCH_BSP) += vendor/ + obj-$(CONFIG_SPI_PL022) += spi-pl022.o + obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o + spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o +diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c +index bb347b6bb..6a8e01d42 100644 +--- a/drivers/spi/spi-pl022.c ++++ b/drivers/spi/spi-pl022.c +@@ -33,6 +33,9 @@ + #include + #include + #include ++#ifdef CONFIG_ARCH_BSP ++#include "vendor/vendor_spi.h" ++#endif + + /* + * This macro is used to define some register default values. +@@ -398,6 +401,9 @@ struct pl022 { + #endif + int cur_cs; + struct gpio_desc *cur_gpiod; ++#ifdef CONFIG_ARCH_BSP ++ struct ssp_vendor_data vendor_data; ++#endif + }; + + /** +@@ -441,6 +447,11 @@ static void internal_cs_control(struct pl022 *pl022, u32 command) + { + u32 tmp; + ++#ifdef CONFIG_ARCH_BSP ++ vendor_internal_cs_control(&pl022->vendor_data, pl022->cur_cs, command); ++ return; ++#endif ++ + tmp = readw(SSP_CSR(pl022->virtbase)); + if (command == SSP_CHIP_SELECT) + tmp &= ~BIT(pl022->cur_cs); +@@ -2024,6 +2035,10 @@ static int pl022_setup(struct spi_device *spi) + SSP_CR0_MASK_FRF, 4); + } + ++#ifdef CONFIG_ARCH_BSP ++ vendor_ssp_setup(&pl022->vendor_data, &chip_info_dt, spi, &chip->cr1); ++#endif ++ + /* Stuff that is common for all versions */ + if (spi->mode & SPI_CPOL) + tmp = SSP_CLK_POL_IDLE_HIGH; +@@ -2185,6 +2200,12 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) + tasklet_init(&pl022->pump_transfers, pump_transfers, + (unsigned long)pl022); + ++#ifdef CONFIG_ARCH_BSP ++ status = vendor_ssp_init(&pl022->vendor_data, pl022->virtbase, host, adev); ++ if (status) ++ goto err_no_irq; ++#endif ++ + /* Disable SSP */ + writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), + SSP_CR1(pl022->virtbase)); +@@ -2380,6 +2401,19 @@ static struct vendor_data vendor_lsi = { + .internal_cs_ctrl = true, + }; + ++#ifdef CONFIG_ARCH_BSP ++static struct vendor_data vendor_bsp = { ++ .fifodepth = 256, ++ .max_bpw = 16, ++ .unidir = false, ++ .extended_cr = false, ++ .pl023 = false, ++ .loopback = true, ++ .internal_cs_ctrl = true, ++}; ++#endif ++ ++ + static const struct amba_id pl022_ids[] = { + { + /* +@@ -2420,6 +2454,17 @@ static const struct amba_id pl022_ids[] = { + .mask = 0x000fffff, + .data = &vendor_lsi, + }, ++#ifdef CONFIG_ARCH_BSP ++ { ++ /* ++ * Vendor derivative, this has a 16bit wide ++ * and 256 locations deep TX/RX FIFO ++ */ ++ .id = 0x00800022, ++ .mask = 0xffffffff, ++ .data = &vendor_bsp, ++ }, ++#endif + { 0, 0 }, + }; + +diff --git a/drivers/spi/vendor/Makefile b/drivers/spi/vendor/Makefile +new file mode 100644 +index 000000000..cce6cd60b +--- /dev/null ++++ b/drivers/spi/vendor/Makefile +@@ -0,0 +1 @@ ++obj-$(CONFIG_ARCH_BSP) += vendor_spi.o +diff --git a/drivers/spi/vendor/vendor_spi.c b/drivers/spi/vendor/vendor_spi.c +new file mode 100644 +index 000000000..64271e030 +--- /dev/null ++++ b/drivers/spi/vendor/vendor_spi.c +@@ -0,0 +1,216 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++#include "vendor_spi.h" ++ ++void vendor_internal_cs_control(struct ssp_vendor_data *vendor_data, ++ int cur_cs, u32 command) ++{ ++ u32 tmp; ++ ++ if (vendor_data->num_cs > 1) { ++ tmp = readl(vendor_data->cs_data.virt_addr); ++ tmp &= ~(vendor_data->cs_data.cs_mask_bit); ++ tmp |= ((u32)cur_cs) << vendor_data->cs_data.cs_sb; ++ writel(tmp, vendor_data->cs_data.virt_addr); ++ } ++ if (command == SSP_CHIP_SELECT) ++ /* Enable SSP */ ++ writew((readw(VENDOR_SSP_CR1(vendor_data->virtbase)) | ++ VENDOR_SSP_CR1_MASK_SSE), ++ VENDOR_SSP_CR1(vendor_data->virtbase)); ++ else ++ /* disable SSP */ ++ writew((readw(VENDOR_SSP_CR1(vendor_data->virtbase)) & ++ (~VENDOR_SSP_CR1_MASK_SSE)), ++ VENDOR_SSP_CR1(vendor_data->virtbase)); ++} ++ ++void vendor_ssp_setup(struct ssp_vendor_data *vendor_data, ++ struct pl022_config_chip *chip_info, struct spi_device *spi, u16 *cr1) ++{ ++ u32 tmp; ++ ++ if (spi->master->slave) ++ chip_info->hierarchy = SSP_SLAVE; ++ else ++ chip_info->hierarchy = SSP_MASTER; ++ ++ if (vendor_data->slave_tx_disable) ++ chip_info->slave_tx_disable = VENDOR_SSP_DO_NOT_DRIVE_TX; ++ else ++ chip_info->slave_tx_disable = VENDOR_SSP_DRIVE_TX; ++ ++ if (spi->mode & SPI_LSB_FIRST) ++ tmp = !!SPI_LSB_FIRST; ++ else ++ tmp = !SPI_LSB_FIRST; ++ VENDOR_SSP_WRITE_BITS(*cr1, tmp, VENDOR_SSP_CR1_MASK_BITEND, ++ VENDOR_SSP_BITEND_SHIFT_BIT); ++ ++ if (spi->mode & SPI_CPHA) ++ VENDOR_SSP_WRITE_BITS(*cr1, 0x1, VENDOR_SSP_CR1_MASK_ALTASENS, ++ VENDOR_SSP_ALTASENS_SHIFT_BIT); ++ else ++ VENDOR_SSP_WRITE_BITS(*cr1, 0x0, VENDOR_SSP_CR1_MASK_ALTASENS, ++ VENDOR_SSP_ALTASENS_SHIFT_BIT); ++} ++ ++static void try_deassert_spi_reset(struct amba_device *adev) ++{ ++ struct reset_control *spi_rst = NULL; ++ spi_rst = devm_reset_control_get(&adev->dev, "bsp_spi_rst"); ++ if (IS_ERR_OR_NULL(spi_rst)) ++ return; ++ /* deassert reset if "resets" property is set */ ++ dev_info(&adev->dev, "deassert reset\n"); ++ reset_control_deassert(spi_rst); ++} ++ ++/* Before using the SPI, you need to read and write a piece ++ of data to clear the abnormal status of the RAT memory */ ++#ifdef CONFIG_ARCH_HI3516CV610_FAMILY ++static void vendor_ssp_clr_ratmem_abnormal(struct ssp_vendor_data *vendor_data) ++{ ++ int polling_count = 0; ++ ++ /* Disable SSP */ ++ writew((readw(VENDOR_SSP_CR1(vendor_data->virtbase)) & (~VENDOR_SSP_CR1_MASK_SSE)), ++ VENDOR_SSP_CR1(vendor_data->virtbase)); ++ ++ writew(VENDOR_SSP_DISABLE_IRQ, VENDOR_SSP_IMSC(vendor_data->virtbase)); ++ ++ writew(VENDOR_SSP_TRAINING_DATA, VENDOR_SSP_DR(vendor_data->virtbase)); ++ ++ writew(VENDOR_SSP_TRAINING_START, VENDOR_SSP_ITCR(vendor_data->virtbase)); ++ ++ while (VENDOR_SSP_POLLING_TIMEOUT > polling_count) { ++ if (readw(VENDOR_SSP_SR(vendor_data->virtbase)) == VENDOR_SSP_TX_STATUS) ++ break; ++ udelay(VENDOR_SSP_WAIT_TIME); ++ polling_count++; ++ } ++ ++ readw(VENDOR_SSP_TDR(vendor_data->virtbase)); ++ ++ polling_count = 0; ++ while (VENDOR_SSP_POLLING_TIMEOUT > polling_count) { ++ if (readw(VENDOR_SSP_SR(vendor_data->virtbase)) == VENDOR_SSP_DEFAULT_STATUS) ++ break; ++ udelay(VENDOR_SSP_WAIT_TIME); ++ polling_count++; ++ } ++ ++ writew(VENDOR_SSP_TRAINING_DATA, VENDOR_SSP_TDR(vendor_data->virtbase)); ++ ++ polling_count = 0; ++ while (VENDOR_SSP_POLLING_TIMEOUT > polling_count) { ++ if (readw(VENDOR_SSP_SR(vendor_data->virtbase)) == VENDOR_SSP_RX_STATUS) ++ break; ++ udelay(VENDOR_SSP_WAIT_TIME); ++ polling_count++; ++ } ++ ++ readw(VENDOR_SSP_DR(vendor_data->virtbase)); ++ ++ polling_count = 0; ++ while (VENDOR_SSP_POLLING_TIMEOUT > polling_count) { ++ if (readw(VENDOR_SSP_SR(vendor_data->virtbase)) == VENDOR_SSP_DEFAULT_STATUS) ++ break; ++ udelay(VENDOR_SSP_WAIT_TIME); ++ polling_count++; ++ } ++ ++ writew(VENDOR_SSP_TRAINING_END, VENDOR_SSP_ITCR(vendor_data->virtbase)); ++} ++#endif ++ ++static int vendor_ssp_get_slave_mode_data(struct ssp_vendor_data *vendor_data, ++ struct spi_master *master, struct amba_device *adev) ++{ ++ unsigned int slave_mode; ++ struct device_node *np = adev->dev.of_node; ++ ++ if (of_property_read_u32(np, "vendor,slave_mode", ++ &slave_mode) == 0) { ++ if (slave_mode == 1) { ++ master->slave = true; ++ } else if (slave_mode == 0) { ++ master->slave = false; ++ } else { ++ dev_err(&adev->dev, "cannot get slave mode!!!\n"); ++ return -EINVAL; ++ } ++ } ++ ++ if (of_property_read_u32(np, "vendor,slave_tx_disable", ++ &vendor_data->slave_tx_disable)) { ++ dev_err(&adev->dev, "cannot get slave_tx_disable!!!\n"); ++ return -EPROBE_DEFER; ++ } ++ ++ return 0; ++} ++ ++static int vendor_ssp_get_cs_data(struct ssp_vendor_data *vendor_data, ++ struct amba_device *adev) ++{ ++ struct device_node *np = adev->dev.of_node; ++ ++ if (of_property_read_u32(np, "num-cs", &vendor_data->num_cs)) { ++ return -EPROBE_DEFER; ++ } ++ ++ if (vendor_data->num_cs > 1) { ++ if (of_address_to_resource(np, 1, ++ &vendor_data->cs_data.res)) { ++ return -EPROBE_DEFER; ++ } ++ if (of_property_read_u32(np, "spi_cs_sb", ++ &vendor_data->cs_data.cs_sb)) { ++ return -EPROBE_DEFER; ++ } ++ if (of_property_read_u32(np, "spi_cs_mask_bit", ++ &vendor_data->cs_data.cs_mask_bit)) { ++ return -EPROBE_DEFER; ++ } ++ vendor_data->cs_data.virt_addr = devm_ioremap(&adev->dev, ++ vendor_data->cs_data.res.start, ++ resource_size(&vendor_data->cs_data.res)); ++ if (vendor_data->cs_data.virt_addr == NULL) { ++ dev_err(&adev->dev, "cs_data.virt_addr nomem!!!\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ return 0; ++} ++ ++int vendor_ssp_init(struct ssp_vendor_data *vendor_data, void __iomem *virtbase, ++ struct spi_master *master, struct amba_device *adev) ++{ ++ int ret; ++ ++ master->mode_bits |= SPI_LSB_FIRST; ++ vendor_data->virtbase = virtbase; ++ ++ ret = vendor_ssp_get_slave_mode_data(vendor_data, master, adev); ++ if (ret != 0) ++ return ret; ++ ++ ret = vendor_ssp_get_cs_data(vendor_data, adev); ++ if (ret != 0) ++ return ret; ++ ++ ++ master->num_chipselect = vendor_data->num_cs; ++ ++ try_deassert_spi_reset(adev); ++#ifdef CONFIG_ARCH_HI3516CV610_FAMILY ++ vendor_ssp_clr_ratmem_abnormal(vendor_data); ++#endif ++ writew(0x0, VENDOR_SSP_TX_FIFO_CR(vendor_data->virtbase)); ++ writew(0x0, VENDOR_SSP_RX_FIFO_CR(vendor_data->virtbase)); ++ ++ return 0; ++} +diff --git a/drivers/spi/vendor/vendor_spi.h b/drivers/spi/vendor/vendor_spi.h +new file mode 100644 +index 000000000..02a982965 +--- /dev/null ++++ b/drivers/spi/vendor/vendor_spi.h +@@ -0,0 +1,85 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++#ifndef __VENDOR_LINUX_SPI_H ++#define __VENDOR_LINUX_SPI_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define VENDOR_SSP_WRITE_BITS(reg, val, mask, sb) \ ++ ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask)))) ++/* ++ * The Vendor version of this block adds some bits ++ * in SSP_CR1 ++ */ ++#define VENDOR_SSP_CR1_MASK_SSE (0x1UL << 1) ++#define VENDOR_SSP_CR1_MASK_BITEND (0x1UL << 4) ++#define VENDOR_SSP_CR1_MASK_ALTASENS (0x1UL << 6) ++ ++#define VENDOR_SSP_CR0(r) (r + 0x000) ++#define VENDOR_SSP_CR1(r) (r + 0x004) ++#define VENDOR_SSP_DR(r) (r + 0x008) ++#define VENDOR_SSP_SR(r) (r + 0x00C) ++#define VENDOR_SSP_CPSR(r) (r + 0x010) ++#define VENDOR_SSP_IMSC(r) (r + 0x014) ++#define VENDOR_SSP_RIS(r) (r + 0x018) ++#define VENDOR_SSP_MIS(r) (r + 0x01C) ++#define VENDOR_SSP_ICR(r) (r + 0x020) ++#define VENDOR_SSP_DMACR(r) (r + 0x024) ++#define VENDOR_SSP_CSR(r) (r + 0x030) ++#define VENDOR_SSP_ITCR(r) (r + 0x080) ++#define VENDOR_SSP_ITIP(r) (r + 0x084) ++#define VENDOR_SSP_ITOP(r) (r + 0x088) ++#define VENDOR_SSP_TDR(r) (r + 0x08C) ++#define VENDOR_SSP_TX_FIFO_CR(r) (r + 0x028) ++#define VENDOR_SSP_RX_FIFO_CR(r) (r + 0x02C) ++ ++#define VENDOR_SSP_POLLING_TIMEOUT 1000 ++#define VENDOR_SSP_DRIVE_TX 0 ++#define VENDOR_SSP_DO_NOT_DRIVE_TX 1 ++ ++#define VENDOR_SSP_DISABLE_IRQ 0x0 ++#define VENDOR_SSP_TRAINING_DATA 0x5aa5 ++#define VENDOR_SSP_TRAINING_START 0x2 ++#define VENDOR_SSP_TRAINING_END 0x0 ++#define VENDOR_SSP_WAIT_TIME 100 ++#define VENDOR_SSP_DEFAULT_STATUS 0x3 ++#define VENDOR_SSP_RX_STATUS 0x7 ++#define VENDOR_SSP_TX_STATUS 0x2 ++ ++#define VENDOR_SSP_BITEND_SHIFT_BIT 4 ++#define VENDOR_SSP_ALTASENS_SHIFT_BIT 6 ++ ++struct cs_data { ++ struct resource res; ++ void __iomem *virt_addr; ++ unsigned int cs_sb; ++ unsigned int cs_mask_bit; ++}; ++ ++struct ssp_vendor_data { ++ unsigned int slave_tx_disable; ++ unsigned int num_cs; ++ struct cs_data cs_data; ++ void __iomem *virtbase; ++}; ++ ++int vendor_ssp_init(struct ssp_vendor_data *vendor_data, void __iomem *virtbase, ++ struct spi_master *master, struct amba_device *adev); ++ ++void vendor_ssp_setup(struct ssp_vendor_data *vendor_data, ++ struct pl022_config_chip *chip_info, struct spi_device *spi, u16 *cr1); ++ ++void vendor_internal_cs_control(struct ssp_vendor_data *vendor_data, ++ int cur_cs, u32 command); ++ ++#endif /* __VENDOR_LINUX_SPI_H */ +diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig +index 5dc634897..f9aef39ca 100644 +--- a/drivers/staging/Kconfig ++++ b/drivers/staging/Kconfig +@@ -78,13 +78,4 @@ source "drivers/staging/qlge/Kconfig" + + source "drivers/staging/vme_user/Kconfig" + +-source "drivers/staging/hilog/Kconfig" +- +-source "drivers/staging/hievent/Kconfig" +- +-source "drivers/staging/hisysevent/Kconfig" +- +-source "drivers/staging/zerohung/Kconfig" +- +-source "drivers/staging/hungtask/Kconfig" + endif # STAGING +diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile +index aec6e94a3..ffa70dda4 100644 +--- a/drivers/staging/Makefile ++++ b/drivers/staging/Makefile +@@ -28,8 +28,3 @@ obj-$(CONFIG_PI433) += pi433/ + obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ + obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/ + obj-$(CONFIG_QLGE) += qlge/ +-obj-$(CONFIG_HILOG) += hilog/ +-obj-$(CONFIG_HIEVENT) += hievent/ +-obj-$(CONFIG_HISYSEVENT) += hisysevent/ +-obj-$(CONFIG_DFX_ZEROHUNG) += zerohung/ +-obj-$(CONFIG_DFX_HUNGTASK) += hungtask/ +diff --git a/drivers/staging/blackbox/Kconfig b/drivers/staging/blackbox/Kconfig +deleted file mode 100644 +index 0e985823c..000000000 +--- a/drivers/staging/blackbox/Kconfig ++++ /dev/null +@@ -1,108 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-menu "Blackbox Options" +- +-config BLACKBOX +- bool "Support for blackbox" +- select STORAGE if BLACKBOX_STORAGE_MATERIAL +- default y +- help +- The blackbox is a fault log collecting framework for registered modules +- of chips. When a fault occurs, blackbox will invoke the registered +- function to save the log and reset the module. +- +-config BLACKBOX_LOG_ROOT_PATH +- string "root path of the blackbox log" +- depends on BLACKBOX +- help +- define the root path of the blackbox log +- +-config BLACKBOX_LOG_PART_REPRESENTATIVE +- string "representative of the blackbox log part" +- depends on BLACKBOX +- help +- define the representative of the blackbox log part +- +-config BLACKBOX_STORAGE_BY_MEMORY +- tristate "blackbox fault log storage by memory directly" +- depends on BLACKBOX +- select STORAGE_BY_MEMORY +- help +- This option enables saving fault logs with memory by blackbox when a +- panic occurs. It depends on supporting warm reset and disabling erase +- ddr when warm reset. +- +-config BLACKBOX_USE_PSTORE_BLK_DEBUG +- bool "blackbox use pstore blk for debug" +- depends on BLACKBOX +- default n +- help +- If Y, this enables pstore blk for blackbox. +- +-config BLACKBOX_STORAGE_BY_PSTORE_BLK +- tristate "blackbox fault log storage by pstore blk" +- depends on BLACKBOX +- depends on PSTORE_BLK +- depends on PSTORE_BLACKBOX +- select STORAGE_BY_PSTORE_BLK +- help +- This option enables saving fault logs with pstore blk by blackbox when a +- panic occurs. It depends on supporting pstore blk. Especially, flash +- driver's panic_write implementation is needed. Othersize, if a panic +- happen, then fault log can not be saved. +- +-config BLACKBOX_STORAGE_BY_PSTORE_RAM +- tristate "blackbox fault log storage by pstore ram" +- depends on BLACKBOX +- depends on PSTORE_RAM +- depends on PSTORE_BLACKBOX +- select STORAGE_BY_PSTORE_RAM +- help +- This option enables saving fault logs with pstore ram by blackbox when a +- panic occurs. It depends on supporting pstore ram. +- +-config BLACKBOX_STORAGE_BY_RAW_PARTITION +- tristate "blackbox fault log storage by RAW partition" +- depends on BLACKBOX +- select STORAGE_BY_RAW_PARTITION +- help +- This option enables saving fault logs with RAW partition by blackbox when a +- panic occurs. It depends on reserving partition for blackbox. +- +-config BLACKBOX_STORAGE_MATERIAL +- def_bool y +- depends on BLACKBOX +- depends on BLACKBOX_STORAGE_BY_MEMORY || BLACKBOX_STORAGE_BY_PSTORE_BLK || \ +- BLACKBOX_STORAGE_BY_PSTORE_RAM || BLACKBOX_STORAGE_BY_RAW_PARTITION +- +-choice +- prompt "Default storage material for fault log when a panic occurs." +- depends on BLACKBOX_STORAGE_MATERIAL +- help +- This option choose the default fault log material for blackbox when a +- panic occurs. +- +- The default materail is ram directly. It's easy, but not work offen. +- +- config DEF_BLACKBOX_STORAGE_BY_MEMORY +- bool "memory" if BLACKBOX_STORAGE_BY_MEMORY +- +- config DEF_BLACKBOX_STORAGE_BY_PSTORE_BLK +- bool "pstore_blk" if BLACKBOX_STORAGE_BY_PSTORE_BLK +- +- config DEF_BLACKBOX_STORAGE_BY_PSTORE_RAM +- bool "pstore_ram" if BLACKBOX_STORAGE_BY_PSTORE_RAM +- +- config DEF_BLACKBOX_STORAGE_BY_RAW_PARTITION +- bool "raw_partition" if BLACKBOX_STORAGE_BY_RAW_PARTITION +- +-endchoice +- +-config DEF_BLACKBOX_STORAGE +- string +- depends on BLACKBOX_STORAGE_MATERIAL +- default "memory" if DEF_BLACKBOX_STORAGE_BY_MEMORY +- default "pstore_blk" if DEF_BLACKBOX_STORAGE_BY_PSTORE_BLK +- default "pstore_ram" if DEF_BLACKBOX_STORAGE_BY_PSTORE_RAM +- default "raw_partition" if DEF_BLACKBOX_STORAGE_BY_RAW_PARTITION +- +-endmenu +diff --git a/drivers/staging/blackbox/Makefile b/drivers/staging/blackbox/Makefile +deleted file mode 100644 +index 9befa81a1..000000000 +--- a/drivers/staging/blackbox/Makefile ++++ /dev/null +@@ -1,5 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +- +-obj-$(CONFIG_BLACKBOX) += blackbox_core.o \ +- blackbox_storage.o \ +- blackbox_common.o +diff --git a/drivers/staging/blackbox/blackbox_common.c b/drivers/staging/blackbox/blackbox_common.c +deleted file mode 100644 +index cfd5212b6..000000000 +--- a/drivers/staging/blackbox/blackbox_common.c ++++ /dev/null +@@ -1,255 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2021 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-void sys_reset(void) +-{ +- bbox_print_info("reset the system now!\n"); +- emergency_restart(); +- bbox_print_info("reset the system failed!\n"); +-} +- +-void change_own(char *path, int uid, int gid) +-{ +- int ret = -1; +- +- if (unlikely(!path || uid == -1 || gid == -1)) { +- bbox_print_err("path or uid or gid error.\n"); +- return; +- } +- +- ret = ksys_chown(path, uid, gid); +- if (ret != 0) +- bbox_print_err("ksys_chown [%s] failed, ret: %d\n", path, ret); +-} +- +-int full_write_file(const char *pfile_path, char *buf, +- size_t buf_size, bool is_append) +-{ +- struct file *filp = NULL; +- char *pathname = NULL; +- loff_t pos = 0; +- int ret = -1; +- +- if (unlikely(!pfile_path || !buf)) { +- bbox_print_err("pfile_path or buf is NULL!\n"); +- return -EINVAL; +- } +- +- filp = file_open(pfile_path, O_CREAT | O_RDWR | +- (is_append ? O_APPEND : O_TRUNC), BBOX_FILE_LIMIT); +- if (IS_ERR(filp)) { +- bbox_print_err("open %s failed! [%ld]\n", pfile_path, PTR_ERR(filp)); +- return -EBADF; +- } +- +- ret = __kernel_write(filp, (const void *)buf, buf_size, &pos); +- +- file_close(filp); +- +- if (ret < 0) { +- pathname = getfullpath(filp); +- bbox_print_err("write [%s] failed! [%d]\n", pathname ? pathname : "", ret); +- return ret; +- } +- +- return 0; +-} +- +-int file_exists(const char *name) +-{ +- struct path path; +- int ret; +- +- ret = kern_path(name, LOOKUP_FOLLOW, &path); +- if (ret) +- return ret; +- +- ret = inode_permission(&nop_mnt_idmap, d_inode(path.dentry), MAY_ACCESS); +- path_put(&path); +- return ret; +-} +- +-static int create_new_dir(char *name) +-{ +- struct dentry *dentry; +- struct path path; +- int ret; +- +- if (unlikely(!name)) { +- bbox_print_err("name is NULL!\n"); +- return -EINVAL; +- } +- +- ret = file_exists(name); +- if (ret) { +- dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY); +- if (IS_ERR(dentry)) +- return PTR_ERR(dentry); +- +- ret = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, BBOX_DIR_LIMIT); +- if (ret && ret != -EEXIST) +- bbox_print_err("Create dir [%s] failed! ret: %d\n", name, ret); +- +- done_path_create(&path, dentry); +- } +- +- return 0; +-} +- +-int create_log_dir(const char *path) +-{ +- char *cur_path = NULL; +- int index = 0; +- +- if (unlikely(!path)) { +- bbox_print_err("path is NULL!\n"); +- return -EINVAL; +- } +- +- if (*path != '/') +- return -EINVAL; +- cur_path = kmalloc(PATH_MAX_LEN + 1, GFP_KERNEL); +- if (unlikely(!cur_path)) { +- bbox_print_err("kmalloc failed!\n"); +- return -ENOMEM; +- } +- memset(cur_path, 0, PATH_MAX_LEN + 1); +- cur_path[index++] = *path++; +- while (*path != '\0') { +- if (*path == '/') +- create_new_dir(cur_path); +- cur_path[index] = *path; +- path++; +- index++; +- } +- create_new_dir(cur_path); +- kfree(cur_path); +- +- return 0; +-} +- +-void get_timestamp(char *buf, size_t buf_size) +-{ +- struct rtc_time tm; +- struct timespec64 tv; +- +- if (unlikely(!buf || buf_size == 0)) { +- bbox_print_err("buf: %p, buf_size: %u\n", buf, (unsigned int)buf_size); +- return; +- } +- +- memset(buf, 0, buf_size); +- memset(&tm, 0, sizeof(tm)); +- +- memset(&tv, 0, sizeof(tv)); +- ktime_get_real_ts64(&tv); +- tv.tv_sec -= (long)sys_tz.tz_minuteswest * SECONDS_PER_MINUTE; +- rtc_time64_to_tm(tv.tv_sec, &tm); +- +- (void)scnprintf(buf, buf_size, TIMESTAMP_FORMAT, +- tm.tm_year + YEAR_BASE, tm.tm_mon + 1, tm.tm_mday, +- tm.tm_hour, tm.tm_min, tm.tm_sec, get_ticks()); +- buf[buf_size - 1] = '\0'; +-} +-EXPORT_SYMBOL_GPL(get_timestamp); +- +-unsigned long long get_ticks(void) +-{ +- /* use only one int value to save time: */ +- +- struct timespec64 uptime; +- +- ktime_get_ts64(&uptime); +- +- ktime_get_boottime_ts64(&uptime); +- +- return (u64)uptime.tv_sec; +-} +- +-static inline struct dentry *lock_parent(struct dentry *dentry) +-{ +- struct dentry *dir = dget_parent(dentry); +- +- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); +- return dir; +-} +- +-static inline void unlock_dir(struct dentry *dentry) +-{ +- inode_unlock(d_inode(dentry)); +- dput(dentry); +-} +- +-struct file *file_open(const char *filename, int open_mode, int mode) +-{ +- struct file *filp = NULL; +- +- filp = filp_open(filename, open_mode, mode); +- +- return filp; +-} +- +-void file_close(struct file *filp) +-{ +- if (likely(filp)) +- filp_close(filp, NULL); +-} +- +-int file_delete(struct file *filp) +-{ +- struct dentry *dentry = NULL; +- struct dentry *parent = NULL; +- int ret = 0; +- +- if (unlikely(!filp)) { +- bbox_print_err("file is NULL!\n"); +- return -EINVAL; +- } +- +- dentry = file_dentry(filp); +- parent = lock_parent(dentry); +- +- if (dentry->d_parent == parent) { +- dget(dentry); +- ret = vfs_unlink(&nop_mnt_idmap, d_inode(parent), dentry, NULL); +- dput(dentry); +- } +- +- unlock_dir(parent); +- +- return ret; +-} +- +-char *getfullpath(struct file *filp) +-{ +- char *buf = NULL, *path = NULL; +- +- if (unlikely(!filp)) +- return NULL; +- +- buf = kmalloc(PATH_MAX, GFP_KERNEL); +- if (unlikely(!buf)) +- return NULL; +- memset(buf, 0, PATH_MAX); +- +- // get the path +- path = d_path(&filp->f_path, buf, PATH_MAX); +- +- kfree(buf); +- +- return path; +-} +diff --git a/drivers/staging/blackbox/blackbox_core.c b/drivers/staging/blackbox/blackbox_core.c +deleted file mode 100644 +index f7d349488..000000000 +--- a/drivers/staging/blackbox/blackbox_core.c ++++ /dev/null +@@ -1,592 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2021 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#ifdef CONFIG_DFX_ZEROHUNG +-#include +-#endif +-#include +-#include +- +-/* ---- local macroes ---- */ +-/* bbox/BBOX - blackbox */ +-#define HISTORY_LOG_NAME "history.log" +-#define LOG_PART_WAIT_TIME 1000 /* unit: ms */ +-#define HISTORY_LOG_MAX_LEN 1024 +-#define TOP_CATEGORY_SYSTEM_RESET "System Reset" +-#define TOP_CATEGORY_FREEZE "System Freeze" +-#define TOP_CATEGORY_SYSTEM_POWEROFF "POWEROFF" +-#define TOP_CATEGORY_SUBSYSTEM_CRASH "Subsystem Crash" +- +-#ifndef CONFIG_BLACKBOX_LOG_ROOT_PATH +-#error no blackbox log root path +-#endif +-#ifndef CONFIG_BLACKBOX_LOG_PART_REPRESENTATIVE +-#error no representative of the blackbox log part +-#endif +- +-/* ---- local prototypes ---- */ +-struct bbox_ops { +- struct list_head list; +- struct module_ops ops; +-}; +- +-struct error_info_to_category { +- const char *module; +- struct { +- const char *event; +- const char *category; +- const char *top_category; +- } map; +-}; +- +-/* ---- local variables ---- */ +-static LIST_HEAD(ops_list); +-static DEFINE_SPINLOCK(ops_list_lock); +-static DEFINE_SEMAPHORE(temp_error_info_sem, 1); +-static struct error_info_to_category error_info_categories[] = { +- { +- MODULE_SYSTEM, +- {EVENT_SYSREBOOT, CATEGORY_SYSTEM_REBOOT, TOP_CATEGORY_SYSTEM_RESET} +- }, +- { +- MODULE_SYSTEM, +- {EVENT_LONGPRESS, CATEGORY_SYSTEM_REBOOT, TOP_CATEGORY_SYSTEM_RESET} +- }, +- { +- MODULE_SYSTEM, +- {EVENT_COMBINATIONKEY, CATEGORY_SYSTEM_REBOOT, TOP_CATEGORY_SYSTEM_RESET} +- }, +- { +- MODULE_SYSTEM, +- {EVENT_SUBSYSREBOOT, CATEGORY_SYSTEM_REBOOT, TOP_CATEGORY_SYSTEM_RESET} +- }, +- { +- MODULE_SYSTEM, +- {EVENT_POWEROFF, CATEGORY_SYSTEM_POWEROFF, TOP_CATEGORY_SYSTEM_POWEROFF} +- }, +- { +- MODULE_SYSTEM, +- {EVENT_PANIC, CATEGORY_SYSTEM_PANIC, TOP_CATEGORY_SYSTEM_RESET} +- }, +- { +- MODULE_SYSTEM, +- {EVENT_OOPS, CATEGORY_SYSTEM_OOPS, TOP_CATEGORY_SYSTEM_RESET} +- }, +- { +- MODULE_SYSTEM, +- {EVENT_SYS_WATCHDOG, CATEGORY_SYSTEM_WATCHDOG, TOP_CATEGORY_FREEZE} +- }, +- { +- MODULE_SYSTEM, +- {EVENT_HUNGTASK, CATEGORY_SYSTEM_HUNGTASK, TOP_CATEGORY_FREEZE} +- }, +-#ifdef CONFIG_BLACKBOX_EXPAND_EVENT +- #include +-#endif +-}; +- +-struct error_info *temp_error_info; +- +-/* ---- local function prototypes ---- */ +-static const char *get_top_category(const char *module, const char *event); +-static const char *get_category(const char *module, const char *event); +-static void format_log_dir(char *buf, size_t buf_size, const char *log_root_dir, +- const char *timestamp); +-static void save_history_log(const char *log_root_dir, struct error_info *info, +- const char *timestamp, int need_sys_reset); +-#ifdef CONFIG_BLACKBOX_DEBUG +-static void save_invalid_log(const struct bbox_ops *ops, const struct error_info *info); +-#endif +-static void wait_for_log_part(void); +-static void format_error_info(struct error_info *info, const char event[EVENT_MAX_LEN], +- const char module[MODULE_MAX_LEN], +- const char error_desc[ERROR_DESC_MAX_LEN]); +-static void save_last_log(void); +-static int save_error_log(void *pparam); +- +-/* ---- global function prototypes ---- */ +- +-/* ---- function definitions ---- */ +-static const char *get_top_category(const char *module, const char *event) +-{ +- int i; +- int count = (int)ARRAY_SIZE(error_info_categories); +- +- if (unlikely(!module || !event)) { +- bbox_print_err("module: %p, event: %p\n", module, event); +- return TOP_CATEGORY_SUBSYSTEM_CRASH; +- } +- +- for (i = 0; i < count; i++) { +- if (!strcmp(error_info_categories[i].module, module) && +- !strcmp(error_info_categories[i].map.event, event)) +- return error_info_categories[i].map.top_category; +- } +- if (!strcmp(module, MODULE_SYSTEM)) +- return TOP_CATEGORY_SYSTEM_RESET; +- +- return TOP_CATEGORY_SUBSYSTEM_CRASH; +-} +- +-static const char *get_category(const char *module, const char *event) +-{ +- int i; +- int count = (int)ARRAY_SIZE(error_info_categories); +- +- if (unlikely(!module || !event)) { +- bbox_print_err("module: %p, event: %p\n", module, event); +- return CATEGORY_SUBSYSTEM_CUSTOM; +- } +- +- for (i = 0; i < count; i++) { +- if (!strcmp(error_info_categories[i].module, module) && +- !strcmp(error_info_categories[i].map.event, event)) +- return error_info_categories[i].map.category; +- } +- if (!strcmp(module, MODULE_SYSTEM)) +- return CATEGORY_SYSTEM_CUSTOM; +- +- return CATEGORY_SUBSYSTEM_CUSTOM; +-} +- +-static void format_log_dir(char *buf, size_t buf_size, const char *log_root_dir, +- const char *timestamp) +-{ +- if (unlikely(!buf || buf_size == 0 || !log_root_dir || +- !timestamp)) { +- bbox_print_err("buf: %p, buf_size: %u, log_root_dir: %p, timestamp: %p\n", +- buf, (unsigned int)buf_size, log_root_dir, timestamp); +- return; +- } +- +- memset(buf, 0, buf_size); +- scnprintf(buf, buf_size - 1, "%s/%s", log_root_dir, timestamp); +-} +- +-static void format_error_info(struct error_info *info, const char event[EVENT_MAX_LEN], +- const char module[MODULE_MAX_LEN], +- const char error_desc[ERROR_DESC_MAX_LEN]) +-{ +- if (unlikely(!info || !event || !module || !error_desc)) { +- bbox_print_err("info: %p, event: %p, module: %p, error_desc: %p\n", +- info, event, module, error_desc); +- return; +- } +- +- memset(info, 0, sizeof(*info)); +- strncpy(info->event, event, min(strlen(event), +- sizeof(info->event) - 1)); +- strncpy(info->module, module, min(strlen(module), +- sizeof(info->module) - 1)); +- strncpy(info->category, get_category(module, event), +- min(strlen(get_category(module, event)), sizeof(info->category) - 1)); +- get_timestamp(info->error_time, TIMESTAMP_MAX_LEN); +- strncpy(info->error_desc, error_desc, min(strlen(error_desc), +- sizeof(info->error_desc) - 1)); +-} +- +-static void save_history_log(const char *log_root_dir, struct error_info *info, +- const char *timestamp, int need_sys_reset) +-{ +- char history_log_path[PATH_MAX_LEN]; +- char *buf; +- const char *bbox_sysreset; +- +- if (unlikely(!log_root_dir || !info || !timestamp)) { +- bbox_print_err("log_root_dir: %p, info: %p, timestamp: %p\n", +- log_root_dir, info, timestamp); +- return; +- } +- +- buf = kmalloc(HISTORY_LOG_MAX_LEN + 1, GFP_KERNEL); +- if (!buf) +- return; +- memset(buf, 0, HISTORY_LOG_MAX_LEN + 1); +-#ifdef CONFIG_DFX_ZEROHUNG +- bbox_sysreset = need_sys_reset ? "true" : "false"; +- zrhung_send_event_bbox("KERNEL_VENDOR", info->category, timestamp, bbox_sysreset); +-#endif +- memset(history_log_path, 0, sizeof(history_log_path)); +- scnprintf(history_log_path, sizeof(history_log_path) - 1, +- "%s/%s", log_root_dir, HISTORY_LOG_NAME); +- ksys_sync(); +- kfree(buf); +-} +- +-#ifdef CONFIG_BLACKBOX_DEBUG +-static void save_invalid_log(const struct bbox_ops *ops, const struct error_info *info) +-{ +- char invalid_log_path[PATH_MAX_LEN]; +- char timestamp[TIMESTAMP_MAX_LEN]; +- +- if (unlikely(!ops || !info)) { +- bbox_print_err("ops: %p, info: %p\n", ops, info); +- return; +- } +- +- get_timestamp(timestamp, sizeof(timestamp)); +- format_log_dir(invalid_log_path, PATH_MAX_LEN, CONFIG_BLACKBOX_LOG_PART_REPRESENTATIVE, +- timestamp); +- create_log_dir(invalid_log_path); +- if (ops->ops.save_last_log(invalid_log_path, (struct error_info *)info) != 0) +- bbox_print_err("[%s] failed to save invalid log!\n", ops->ops.module); +-} +-#endif +- +-static bool is_log_part_mounted(void) +-{ +- return file_exists(CONFIG_BLACKBOX_LOG_PART_REPRESENTATIVE) == 0; +-} +- +-static void wait_for_log_part(void) +-{ +- bbox_print_info("wait for log part [%s] begin!\n", +- CONFIG_BLACKBOX_LOG_PART_REPRESENTATIVE); +- while (!is_log_part_mounted()) +- msleep(LOG_PART_WAIT_TIME); +- +- bbox_print_info("wait for log part [%s] end!\n", +- CONFIG_BLACKBOX_LOG_PART_REPRESENTATIVE); +-} +- +-static bool find_module_ops(struct error_info *info, struct bbox_ops **ops) +-{ +- struct bbox_ops *cur = NULL; +- bool find_module = false; +- +- if (unlikely(!info || !ops)) { +- bbox_print_err("info: %p, ops: %p!\n", info, ops); +- return find_module; +- } +- +- list_for_each_entry(cur, &ops_list, list) { +- if (!strcmp(cur->ops.module, info->module)) { +- *ops = cur; +- find_module = true; +- break; +- } +- } +- if (!find_module) +- bbox_print_err("[%s] hasn't been registered!\n", info->module); +- +- return find_module; +-} +- +-static void invoke_module_ops(const char *log_dir, struct error_info *info, +- struct bbox_ops *ops) +-{ +- if (unlikely(!info || !ops)) { +- bbox_print_err("info: %p, ops: %p!\n", info, ops); +- return; +- } +- +- if (ops->ops.dump && log_dir) { +- bbox_print_info("[%s] starts dumping data!\n", ops->ops.module); +- ops->ops.dump(log_dir, info); +- bbox_print_info("[%s] ends dumping data!\n", ops->ops.module); +- } +- if (ops->ops.reset) { +- bbox_print_info("[%s] starts resetting!\n", ops->ops.module); +- ops->ops.reset(info); +- bbox_print_info("[%s] ends resetting!\n", ops->ops.module); +- } +-} +- +-static void save_log_without_reset(struct error_info *info) +-{ +- unsigned long flags; +- struct bbox_ops *ops = NULL; +- char *log_dir = NULL; +- char timestamp[TIMESTAMP_MAX_LEN]; +- +- if (unlikely(!info)) { +- bbox_print_err("info: %p!\n", info); +- return; +- } +- +- /* get timestamp */ +- get_timestamp(timestamp, sizeof(timestamp)); +- +- /* get bbox ops */ +- spin_lock_irqsave(&ops_list_lock, flags); +- if (!find_module_ops(info, &ops)) { +- spin_unlock_irqrestore(&ops_list_lock, flags); +- return; +- } +- spin_unlock_irqrestore(&ops_list_lock, flags); +- create_log_dir(CONFIG_BLACKBOX_LOG_ROOT_PATH); +- if (ops->ops.dump) { +- /* create log root path */ +- log_dir = kmalloc(PATH_MAX_LEN, GFP_KERNEL); +- if (log_dir) { +- format_log_dir(log_dir, PATH_MAX_LEN, +- CONFIG_BLACKBOX_LOG_ROOT_PATH, timestamp); +- create_log_dir(log_dir); +- } else +- bbox_print_err("kmalloc failed!\n"); +- } +- invoke_module_ops(log_dir, info, ops); +- save_history_log(CONFIG_BLACKBOX_LOG_ROOT_PATH, info, timestamp, 0); +- kfree(log_dir); +-} +- +-static void save_log_with_reset(struct error_info *info) +-{ +- struct bbox_ops *ops = NULL; +- +- if (unlikely(!info)) { +- bbox_print_err("info: %p!\n", info); +- return; +- } +- +- if (!find_module_ops(info, &ops)) +- return; +- +- invoke_module_ops("", info, ops); +- if (strcmp(info->category, CATEGORY_SYSTEM_REBOOT) && +- strcmp(info->category, CATEGORY_SYSTEM_PANIC)) +- sys_reset(); +-} +- +-static void save_temp_error_info(const char event[EVENT_MAX_LEN], +- const char module[MODULE_MAX_LEN], +- const char error_desc[ERROR_DESC_MAX_LEN]) +-{ +- if (unlikely(!event || !module || !error_desc)) { +- bbox_print_err("event: %p, module: %p, error_desc: %p\n", +- event, module, error_desc); +- return; +- } +- +- down(&temp_error_info_sem); +- format_error_info(temp_error_info, event, module, error_desc); +- up(&temp_error_info_sem); +-} +- +-static void do_save_last_log(const struct bbox_ops *ops, struct error_info *info) +-{ +- char *log_dir = NULL; +- int ret; +- +- if (unlikely(!ops || !info)) { +- bbox_print_err("ops: %p, info: %p\n", +- ops, info); +- return; +- } +- +- memset((void *)info, 0, sizeof(*info)); +- ret = ops->ops.get_last_log_info((struct error_info *)info); +- if (ret) { +- bbox_print_err("[%s] failed to get log info!\n", ops->ops.module); +-#ifdef CONFIG_BLACKBOX_DEBUG +- if (ret == -ENOMSG) +- save_invalid_log(ops, info); +-#endif +- return; +- } +- +- strncpy(info->category, get_category(info->module, info->event), +- min(strlen(get_category(info->module, info->event)), sizeof(info->category) - 1)); +- +- bbox_print_info("[%s] starts saving log!\n", ops->ops.module); +- bbox_print_info("event: [%s] module: [%s], time is [%s]!\n", +- info->event, info->module, info->error_time); +- +- log_dir = kmalloc(PATH_MAX_LEN, GFP_KERNEL); +- if (!log_dir) +- return; +- +- if (!strlen(info->error_time)) +- get_timestamp((char *)info->error_time, TIMESTAMP_MAX_LEN); +- +- format_log_dir(log_dir, PATH_MAX_LEN, CONFIG_BLACKBOX_LOG_ROOT_PATH, +- info->error_time); +- create_log_dir(log_dir); +- if (ops->ops.save_last_log(log_dir, (struct error_info *)info) == 0) +- save_history_log(CONFIG_BLACKBOX_LOG_ROOT_PATH, +- (struct error_info *)info, info->error_time, 1); +- else +- bbox_print_err("[%s] failed to save log!\n", ops->ops.module); +- kfree(log_dir); +-} +- +-static void save_last_log(void) +-{ +- unsigned long flags; +- struct error_info *info = NULL; +- struct bbox_ops *ops = NULL; +- +- info = kmalloc(sizeof(*info), GFP_KERNEL); +- if (!info) +- return; +- +- spin_lock_irqsave(&ops_list_lock, flags); +- list_for_each_entry(ops, &ops_list, list) { +- if (ops->ops.get_last_log_info && +- ops->ops.save_last_log) { +- spin_unlock_irqrestore(&ops_list_lock, flags); +- do_save_last_log(ops, info); +- spin_lock_irqsave(&ops_list_lock, flags); +- } else { +- bbox_print_err("[%s] get_last_log_info: %p, %s: %p\n", +- ops->ops.module, ops->ops.get_last_log_info, +- __func__, ops->ops.save_last_log); +- } +- } +- spin_unlock_irqrestore(&ops_list_lock, flags); +- kfree(info); +-} +- +-static void save_temp_error_log(void) +-{ +- down(&temp_error_info_sem); +- if (!temp_error_info) { +- bbox_print_err("temp_error_info: %p\n", temp_error_info); +- up(&temp_error_info_sem); +- return; +- } +- +- if (strlen(temp_error_info->event) != 0) +- save_log_without_reset(temp_error_info); +- +- kfree(temp_error_info); +- temp_error_info = NULL; +- up(&temp_error_info_sem); +-} +- +-static int save_error_log(void *pparam) +-{ +- wait_for_log_part(); +- save_last_log(); +- save_temp_error_log(); +- +- return 0; +-} +- +-int bbox_register_module_ops(struct module_ops *ops) +-{ +- struct bbox_ops *new_ops = NULL; +- struct bbox_ops *temp = NULL; +- unsigned long flags; +- +- if (unlikely(!ops)) { +- bbox_print_err("ops: %p\n", ops); +- return -EINVAL; +- } +- +- new_ops = kmalloc(sizeof(*new_ops), GFP_KERNEL); +- if (!new_ops) +- return -ENOMEM; +- memset(new_ops, 0, sizeof(*new_ops)); +- memcpy(&new_ops->ops, ops, sizeof(*ops)); +- spin_lock_irqsave(&ops_list_lock, flags); +- if (list_empty(&ops_list)) +- goto __out; +- +- list_for_each_entry(temp, &ops_list, list) { +- if (!strcmp(temp->ops.module, ops->module)) { +- spin_unlock_irqrestore(&ops_list_lock, flags); +- kfree(new_ops); +- bbox_print_info("[%s] has been registered!\n", temp->ops.module); +- return -ENODATA; +- } +- } +- +-__out: +- bbox_print_info("[%s] is registered successfully!\n", ops->module); +- list_add_tail(&new_ops->list, &ops_list); +- spin_unlock_irqrestore(&ops_list_lock, flags); +- +- return 0; +-} +- +-int bbox_notify_error(const char event[EVENT_MAX_LEN], const char module[MODULE_MAX_LEN], +- const char error_desc[ERROR_DESC_MAX_LEN], int need_sys_reset) +-{ +- struct error_info *info = NULL; +- +- if (unlikely(!event || !module || !error_desc)) { +- bbox_print_err("event: %p, module: %p, error_desc: %p\n", event, +- module, error_desc); +- return -EINVAL; +- } +- +- info = kmalloc(sizeof(*info), GFP_ATOMIC); +- if (!info) +- return -ENOMEM; +- +- format_error_info(info, event, module, error_desc); +- show_stack(current, NULL, KERN_DEFAULT); +- if (!need_sys_reset) { +- /* handle the error which do not need reset */ +- if (!is_log_part_mounted()) +- save_temp_error_info(event, module, error_desc); +- else +- save_log_without_reset(info); +- } else { +- /* handle the error which need reset */ +- save_log_with_reset(info); +- } +- +- kfree(info); +- +- return 0; +-} +- +-static void __init select_storage_material(void) +-{ +- const struct reboot_crashlog_storage *tmp = NULL; +- +- if (!storage_material) +- return; +- +- for (tmp = storage_lastwords; tmp->material; tmp++) { +- if (!strcmp(storage_material, tmp->material)) { +- storage_lastword = tmp; +- return; +- } +- } +-} +- +-static int __init blackbox_core_init(void) +-{ +- struct task_struct *tsk = NULL; +- +- select_storage_material(); +- +- temp_error_info = kmalloc(sizeof(*temp_error_info), GFP_KERNEL); +- if (!temp_error_info) +- return -ENOMEM; +- +- memset(temp_error_info, 0, sizeof(*temp_error_info)); +- +- /* Create a kernel thread to save log */ +- tsk = kthread_run(save_error_log, NULL, "save_error_log"); +- if (IS_ERR(tsk)) { +- kfree(temp_error_info); +- temp_error_info = NULL; +- bbox_print_err("kthread_run failed!\n"); +- return -ESRCH; +- } +- +- return 0; +-} +- +-core_initcall(blackbox_core_init); +-MODULE_LICENSE("GPL v2"); +-MODULE_DESCRIPTION("Blackbox core framework"); +-MODULE_AUTHOR("OHOS"); +diff --git a/drivers/staging/blackbox/blackbox_storage.c b/drivers/staging/blackbox/blackbox_storage.c +deleted file mode 100644 +index 117d05ea4..000000000 +--- a/drivers/staging/blackbox/blackbox_storage.c ++++ /dev/null +@@ -1,194 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2021 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-char *storage_material = +-#ifdef CONFIG_DEF_BLACKBOX_STORAGE +- CONFIG_DEF_BLACKBOX_STORAGE; +-#else +- NULL; +-#endif +-const struct reboot_crashlog_storage *storage_lastword __ro_after_init; +- +-bool g_blackbox_flag = false; +-EXPORT_SYMBOL(g_blackbox_flag); +- +-#if IS_ENABLED(CONFIG_DEF_BLACKBOX_STORAGE_BY_MEMORY) +-static DEFINE_SEMAPHORE(kmsg_sem, 1); +-static char *lastlog; +-unsigned int lastlog_len; +-static int get_log_by_memory(void *in, unsigned int inlen) +-{ +- return 0; +-} +- +-static int storage_log_by_memory(void *out, unsigned int outlen) +-{ +- if (unlikely(!out)) +- return -EINVAL; +- +- /* Initialized from caller. */ +- lastlog = out; +- lastlog_len = outlen; +- return 0; +-} +- +-/* Called after storage_log_by_memory successfully. */ +-static void do_kmsg_dump(struct kmsg_dumper *dumper, +- enum kmsg_dump_reason reason) +-{ +- struct fault_log_info *pinfo; +- struct kmsg_dump_iter iter; +- if (unlikely(!lastlog)) +- return; +- +- /* get kernel log from kmsg dump module */ +- if (down_trylock(&kmsg_sem) != 0) { +- bbox_print_err("down_trylock failed!\n"); +- return; +- } +- kmsg_dump_rewind(&iter); +- pinfo = (struct fault_log_info *)lastlog; +- (void)kmsg_dump_get_buffer(dumper, true, lastlog + sizeof(*pinfo), +- lastlog_len - sizeof(*pinfo), (size_t *)&pinfo->len); +- up(&kmsg_sem); +-} +-#endif +- +-#if defined(CONFIG_DEF_BLACKBOX_STORAGE_BY_PSTORE_BLK) || \ +- defined(CONFIG_DEF_BLACKBOX_STORAGE_BY_PSTORE_RAM) +-#define LOG_FILE_WAIT_TIME 1000 /* unit: ms */ +-#define RETRY_MAX_COUNT 10 +-#define PSTORE_MOUNT_POINT "/sys/fs/pstore/" +-#define FILE_LIMIT (0660) +- +-static bool is_pstore_part_ready(char *pstore_file) +-{ +- const char *cur_name = NULL; +- struct dentry *root_dentry; +- struct dentry *cur_dentry; +- struct file *filp = NULL; +- char *full_path = NULL; +- bool is_ready = false; +- +- if (unlikely(!pstore_file)) +- return -EINVAL; +- memset(pstore_file, 0, sizeof(*pstore_file)); +- +- filp = file_open(PSTORE_MOUNT_POINT, O_RDONLY, 0); +- if (IS_ERR(filp)) { +- bbox_print_err("open %s failed! err is [%ld]\n", PSTORE_MOUNT_POINT, PTR_ERR(filp)); +- return -EBADF; +- } +- +- full_path = vmalloc(PATH_MAX_LEN); +- if (!full_path) +- goto __out; +- +- root_dentry = filp->f_path.dentry; +- list_for_each_entry(cur_dentry, &root_dentry->d_subdirs, d_child) { +- cur_name = cur_dentry->d_name.name; +- +- memset(full_path, 0, PATH_MAX_LEN); +- snprintf(full_path, PATH_MAX_LEN - 1, "%s%s", PSTORE_MOUNT_POINT, cur_name); +- +- if (S_ISREG(d_inode(cur_dentry)->i_mode) && !strncmp(cur_name, "blackbox", +- strlen("blackbox"))) { +- is_ready = true; +- if (strcmp(full_path, pstore_file) > 0) +- strncpy(pstore_file, full_path, strlen(full_path)); +- } +- } +- +- if (is_ready && strlen(pstore_file)) +- bbox_print_info("get pstore file name %s successfully!\n", pstore_file); +- +-__out: +- file_close(filp); +- vfree(full_path); +- +- return is_ready; +-} +- +-static int get_log_by_pstore(void *in, unsigned int inlen) +-{ +- char pstore_file[PATH_MAX_LEN]; +- struct file *filp = NULL; +- char *pathname = NULL; +- void *pbuf = NULL; +- loff_t pos = 0; +- static int retry; +- int ret = -1; +- +- memset(pstore_file, 0, PATH_MAX_LEN); +- while (!is_pstore_part_ready((char *)&pstore_file)) { +- msleep(LOG_FILE_WAIT_TIME); +- retry++; +- if (retry >= RETRY_MAX_COUNT) +- return -ENOENT; +- } +- +- if (likely(in)) { +- filp = file_open(pstore_file, O_RDONLY, FILE_LIMIT); +- if (IS_ERR(filp)) { +- bbox_print_err("open %s failed! err is [%ld]\n", pstore_file, +- PTR_ERR(filp)); +- return -EBADF; +- } +- +- printk("bbox %s %d, read %s is succ\n", __func__, __LINE__, getfullpath(filp) ? getfullpath(filp) : ""); +- g_blackbox_flag = true; +- +- file_close(filp); +- file_delete(filp); +- return 0; +- } +- +- return -EBADF; +-__error: +- file_close(filp); +- return -EIO; +-} +-#endif +- +-const struct reboot_crashlog_storage storage_lastwords[] = { +-#if IS_ENABLED(CONFIG_DEF_BLACKBOX_STORAGE_BY_MEMORY) +- { +- .get_log = get_log_by_memory, +- .storage_log = storage_log_by_memory, +- .blackbox_dump = do_kmsg_dump, +- .material = "memory", +- }, +-#endif +-#if IS_ENABLED(CONFIG_DEF_BLACKBOX_STORAGE_BY_PSTORE_BLK) +- { +- .get_log = get_log_by_pstore, +- .blackbox_dump = pstore_blackbox_dump, +- .material = "pstore_blk", +- }, +-#endif +-#if IS_ENABLED(CONFIG_DEF_BLACKBOX_STORAGE_BY_PSTORE_RAM) +- { +- .get_log = get_log_by_pstore, +- .blackbox_dump = pstore_blackbox_dump, +- .material = "pstore_ram", +- }, +-#endif +-#if IS_ENABLED(CONFIG_DEF_BLACKBOX_STORAGE_BY_RAW_PARTITION) +- { +- .material = "raw_partition", +- }, +-#endif +- { } +-}; +- +diff --git a/drivers/staging/hievent/Kconfig b/drivers/staging/hievent/Kconfig +deleted file mode 100644 +index b445a2b90..000000000 +--- a/drivers/staging/hievent/Kconfig ++++ /dev/null +@@ -1,12 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-config HIEVENT +- tristate "Enable hievent" +- help +- hievent buffer manager +- +-config BBOX_BUFFER_SIZE +- int "bbox buffer size" +- depends on HIEVENT +- default 2048 +- help +- Define the default ring buffer size of BBOX +\ No newline at end of file +diff --git a/drivers/staging/hievent/Makefile b/drivers/staging/hievent/Makefile +deleted file mode 100644 +index 5b2adc23a..000000000 +--- a/drivers/staging/hievent/Makefile ++++ /dev/null +@@ -1,2 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-obj-$(CONFIG_HIEVENT) += hievent_driver.o +\ No newline at end of file +diff --git a/drivers/staging/hievent/hievent_driver.c b/drivers/staging/hievent/hievent_driver.c +deleted file mode 100644 +index 9697432f2..000000000 +--- a/drivers/staging/hievent/hievent_driver.c ++++ /dev/null +@@ -1,423 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2021 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#define pr_fmt(fmt) "hievent_driver " fmt +- +-#include "hievent_driver.h" +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-static struct class *hievent_class; +-static dev_t hievent_devno; +- +-#define HIEVENT_BUFFER ((size_t)CONFIG_BBOX_BUFFER_SIZE) +-#define HIEVENT_DRIVER "/dev/bbox" +-#define HIEVENT_DEV_NAME "bbox" +-#define HIEVENT_DEV_NR 1 +- +-struct hievent_entry { +- unsigned short len; +- unsigned short header_size; +- char msg[0]; +-}; +- +-struct hievent_char_device { +- struct cdev devm; +- int flag; +- struct mutex mtx; /* lock to protect read/write buffer */ +- unsigned char *buffer; +- wait_queue_head_t wq; +- size_t write_offset; +- size_t head_offset; +- size_t size; +- size_t count; +-} hievent_dev; +- +-static inline unsigned char *hievent_buffer_head(void) +-{ +- if (hievent_dev.head_offset > HIEVENT_BUFFER) +- hievent_dev.head_offset = +- hievent_dev.head_offset % HIEVENT_BUFFER; +- +- return hievent_dev.buffer + hievent_dev.head_offset; +-} +- +-static void hievent_buffer_inc(size_t sz) +-{ +- if (hievent_dev.size + sz <= HIEVENT_BUFFER) { +- hievent_dev.size += sz; +- hievent_dev.write_offset += sz; +- hievent_dev.write_offset %= HIEVENT_BUFFER; +- hievent_dev.count++; +- } +-} +- +-static void hievent_buffer_dec(size_t sz) +-{ +- if (hievent_dev.size >= sz) { +- hievent_dev.size -= sz; +- hievent_dev.head_offset += sz; +- hievent_dev.head_offset %= HIEVENT_BUFFER; +- hievent_dev.count--; +- } +-} +- +-static int hievent_read_ring_buffer(unsigned char __user *buffer, +- size_t buf_len) +-{ +- size_t retval; +- size_t buf_left = HIEVENT_BUFFER - hievent_dev.head_offset; +- +- if (buf_left > buf_len) { +- retval = copy_to_user(buffer, hievent_buffer_head(), buf_len); +- } else { +- size_t mem_len = (buf_len > buf_left) ? buf_left : buf_len; +- +- retval = copy_to_user(buffer, hievent_buffer_head(), mem_len); +- if (retval < 0) +- return retval; +- +- retval = copy_to_user(buffer + buf_left, hievent_dev.buffer, +- buf_len - buf_left); +- } +- return retval; +-} +- +-static int hievent_read_ring_head_buffer(unsigned char * const buffer, +- size_t buf_len) +-{ +- size_t buf_left = HIEVENT_BUFFER - hievent_dev.head_offset; +- +- if (buf_left > buf_len) { +- memcpy(buffer, hievent_buffer_head(), buf_len); +- } else { +- size_t mem_len = (buf_len > buf_left) ? buf_left : buf_len; +- +- memcpy(buffer, hievent_buffer_head(), mem_len); +- memcpy(buffer + buf_left, hievent_dev.buffer, +- buf_len - buf_left); +- } +- return 0; +-} +- +-static ssize_t hievent_read(struct file *file, char __user *user_buf, +- size_t count, loff_t *ppos) +-{ +- size_t retval; +- struct hievent_entry header; +- +- (void)file; +- +- if (wait_event_interruptible(hievent_dev.wq, (hievent_dev.size > 0))) +- return -EINVAL; +- +- (void)mutex_lock(&hievent_dev.mtx); +- +- if (hievent_dev.size == 0) { +- retval = 0; +- goto out; +- } +- +- retval = hievent_read_ring_head_buffer((unsigned char *)&header, +- sizeof(header)); +- if (retval < 0) { +- retval = -EINVAL; +- goto out; +- } +- +- if (count < header.len + sizeof(header)) { +- retval = -ENOMEM; +- goto out; +- } +- +- hievent_buffer_dec(sizeof(header)); +- +- retval = hievent_read_ring_buffer((unsigned char __user *)(user_buf), header.len); +- if (retval < 0) { +- retval = -EINVAL; +- goto out; +- } +- hievent_buffer_dec(header.len); +- +- retval = header.len + sizeof(header); +-out: +- if (retval == -ENOMEM) { +- // clean ring buffer +- hievent_dev.write_offset = 0; +- hievent_dev.head_offset = 0; +- hievent_dev.size = 0; +- hievent_dev.count = 0; +- } +- (void)mutex_unlock(&hievent_dev.mtx); +- +- return retval; +-} +- +-static int hievent_write_ring_head_buffer(const unsigned char *buffer, +- size_t buf_len) +-{ +- size_t buf_left = HIEVENT_BUFFER - hievent_dev.write_offset; +- +- if (buf_len > buf_left) { +- memcpy(hievent_dev.buffer + hievent_dev.write_offset, +- buffer, buf_left); +- memcpy(hievent_dev.buffer, buffer + buf_left, +- min(HIEVENT_BUFFER, buf_len - buf_left)); +- } else { +- memcpy(hievent_dev.buffer + hievent_dev.write_offset, +- buffer, min(buf_left, buf_len)); +- } +- +- return 0; +-} +- +-static void hievent_head_init(struct hievent_entry * const header, size_t len) +-{ +- header->len = (unsigned short)len; +- header->header_size = sizeof(struct hievent_entry); +-} +- +-static void hievent_cover_old_log(size_t buf_len) +-{ +- int retval; +- struct hievent_entry header; +- size_t total_size = buf_len + sizeof(struct hievent_entry); +- +- while (total_size + hievent_dev.size > HIEVENT_BUFFER) { +- retval = hievent_read_ring_head_buffer((unsigned char *)&header, +- sizeof(header)); +- if (retval < 0) +- break; +- +- /* let count decrease twice */ +- hievent_buffer_dec(sizeof(header)); +- hievent_buffer_dec(header.len); +- } +-} +- +-int hievent_write_internal(const char *buffer, size_t buf_len) +-{ +- struct hievent_entry header; +- int retval; +- +- if (buf_len < sizeof(int) || +- buf_len > HIEVENT_BUFFER - sizeof(struct hievent_entry)) +- return -EINVAL; +- +- (void)mutex_lock(&hievent_dev.mtx); +- +- hievent_cover_old_log(buf_len); +- +- hievent_head_init(&header, buf_len); +- retval = hievent_write_ring_head_buffer((unsigned char *)&header, +- sizeof(header)); +- if (retval) { +- retval = -EINVAL; +- goto out; +- } +- hievent_buffer_inc(sizeof(header)); +- +- retval = hievent_write_ring_head_buffer((unsigned char *)(buffer), +- header.len); +- if (retval) { +- retval = -EINVAL; +- goto out; +- } +- +- hievent_buffer_inc(header.len); +- +- retval = header.len; +- +-out: +- (void)mutex_unlock(&hievent_dev.mtx); +- if (retval > 0) +- wake_up_interruptible(&hievent_dev.wq); +- +- return retval; +-} +-EXPORT_SYMBOL(hievent_write_internal); +- +-static unsigned int hievent_poll(struct file *filep, poll_table *wait) +-{ +- unsigned int mask = 0; +- +- poll_wait(filep, &hievent_dev.wq, wait); +- if (hievent_dev.size > 0) { +- mask |= POLLIN | POLLRDNORM; +- return mask; +- } +- +- return 0; +-} +- +-static ssize_t hievent_write_iter(struct kiocb *iocb, struct iov_iter *from) +-{ +- int check_code = 0; +- unsigned char *temp_buffer = NULL; +- const struct iovec *iov = iter_iov(from); +- int retval; +- size_t buf_len; +- (void)iocb; +- +- if (from->nr_segs != 2) { /* must contain 2 segments */ +- pr_err("invalid nr_segs: %ld", from->nr_segs); +- retval = -EINVAL; +- goto out; +- } +- +- /* seg 0 info is checkcode*/ +- retval = copy_from_user(&check_code, iov[0].iov_base, +- sizeof(check_code)); +- if (retval || check_code != CHECK_CODE) { +- retval = -EINVAL; +- goto out; +- } +- +- /* seg 1 info */ +- buf_len = iov[1].iov_len; +- if (buf_len > HIEVENT_BUFFER - sizeof(struct hievent_entry)) { +- retval = -ENOMEM; +- goto out; +- } +- +- temp_buffer = kmalloc(buf_len, GFP_KERNEL); +- if (!temp_buffer) { +- retval = -ENOMEM; +- goto out; +- } +- +- retval = copy_from_user(temp_buffer, iov[1].iov_base, iov[1].iov_len); +- if (retval) { +- retval = -EIO; +- goto free_mem; +- } +- +- retval = hievent_write_internal(temp_buffer, buf_len); +- if (retval < 0) { +- retval = -EIO; +- goto free_mem; +- } +- retval = buf_len + iov[0].iov_len; +- +-free_mem: +- kfree(temp_buffer); +- +-out: +- return retval; +-} +- +-static const struct file_operations hievent_fops = { +- .read = hievent_read, /* read */ +- .poll = hievent_poll, /* poll */ +- .write_iter = hievent_write_iter, /* write_iter */ +-}; +- +-static int hievent_device_init(void) +-{ +- hievent_dev.buffer = kmalloc(HIEVENT_BUFFER, GFP_KERNEL); +- if (!hievent_dev.buffer) +- return -ENOMEM; +- +- init_waitqueue_head(&hievent_dev.wq); +- mutex_init(&hievent_dev.mtx); +- hievent_dev.write_offset = 0; +- hievent_dev.head_offset = 0; +- hievent_dev.size = 0; +- hievent_dev.count = 0; +- +- return 0; +-} +- +-static int __init hieventdev_init(void) +-{ +- int result; +- struct device *dev_ret = NULL; +- +- result = alloc_chrdev_region(&hievent_devno, 0, HIEVENT_DEV_NR, HIEVENT_DEV_NAME); +- if (result < 0) { +- pr_err("register %s failed", HIEVENT_DRIVER); +- return -ENODEV; +- } +- +- cdev_init(&hievent_dev.devm, &hievent_fops); +- hievent_dev.devm.owner = THIS_MODULE; +- +- result = cdev_add(&hievent_dev.devm, hievent_devno, HIEVENT_DEV_NR); +- if (result < 0) { +- pr_err("cdev_add failed"); +- goto unreg_dev; +- } +- +- result = hievent_device_init(); +- if (result < 0) { +- pr_err("hievent_device_init failed"); +- goto del_dev; +- } +- +- hievent_class = class_create(HIEVENT_DEV_NAME); +- if (IS_ERR(hievent_class)) { +- pr_err("class_create failed"); +- goto del_buffer; +- } +- +- dev_ret = device_create(hievent_class, 0, hievent_devno, 0, HIEVENT_DEV_NAME); +- if (IS_ERR(dev_ret)) { +- pr_err("device_create failed"); +- goto del_class; +- } +- +- return 0; +- +-del_class: +- class_destroy(hievent_class); +-del_buffer: +- kfree(hievent_dev.buffer); +-del_dev: +- cdev_del(&hievent_dev.devm); +-unreg_dev: +- unregister_chrdev_region(hievent_devno, HIEVENT_DEV_NR); +- +- return -ENODEV; +-} +- +-static void __exit hievent_exit_module(void) +-{ +- device_destroy(hievent_class, hievent_devno); +- class_destroy(hievent_class); +- kfree(hievent_dev.buffer); +- cdev_del(&hievent_dev.devm); +- unregister_chrdev_region(hievent_devno, HIEVENT_DEV_NR); +-} +- +-static int __init hievent_init_module(void) +-{ +- int state; +- +- state = hieventdev_init(); +- return 0; +-} +- +-module_init(hievent_init_module); +-module_exit(hievent_exit_module); +- +-MODULE_AUTHOR("OHOS"); +-MODULE_DESCRIPTION("User mode hievent device interface"); +-MODULE_LICENSE("GPL"); +-MODULE_ALIAS("hievent"); +diff --git a/drivers/staging/hievent/hievent_driver.h b/drivers/staging/hievent/hievent_driver.h +deleted file mode 100644 +index 83c67d9d2..000000000 +--- a/drivers/staging/hievent/hievent_driver.h ++++ /dev/null +@@ -1,22 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2021 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef HIEVENT_DRIVER_H +-#define HIEVENT_DRIVER_H +- +-#include +- +-#define CHECK_CODE 0x7BCDABCD +- +-struct idap_header { +- char level; +- char category; +- char log_type; +- char sn; +-}; +- +-int hievent_write_internal(const char *buffer, size_t buf_len); +- +-#endif /* HIEVENT_DRIVER_H */ +diff --git a/drivers/staging/hievent/hiview_hievent.c b/drivers/staging/hievent/hiview_hievent.c +deleted file mode 100644 +index 4533b6fbb..000000000 +--- a/drivers/staging/hievent/hiview_hievent.c ++++ /dev/null +@@ -1,488 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2021 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#include "hiview_hievent.h" +-#include "hievent_driver.h" +- +-#include +-#include +- +-#define INT_TYPE_MAX_LEN 21 +- +-#define MAX_PATH_LEN 256 +-#define MAX_STR_LEN (10 * 1024) +- +-/* CONFIG_BBOX_BUFFER_SIZE is max length of /dev/bbox */ +-#define EVENT_INFO_BUF_LEN ((size_t)CONFIG_BBOX_BUFFER_SIZE) +-#define EVENT_INFO_PACK_BUF_LEN min((size_t)CONFIG_BBOX_BUFFER_SIZE, 2048) +- +-#define BUF_POINTER_FORWARD \ +-do { \ +- if (tmplen < len) { \ +- tmp += tmplen; \ +- len -= tmplen; \ +- } else { \ +- tmp += len; \ +- len = 0; \ +- } \ +-} while (0) +- +-struct hievent_payload { +- char *key; +- char *value; +- struct hievent_payload *next; +-}; +- +-static int hievent_convert_string(struct hiview_hievent *event, char **pbuf); +- +-static struct hievent_payload *hievent_payload_create(void); +- +-static void hievent_payload_destroy(struct hievent_payload *p); +- +-static struct hievent_payload *hievent_get_payload(struct hievent_payload *head, +- const char *key); +- +-static void hievent_add_payload(struct hiview_hievent *obj, +- struct hievent_payload *payload); +- +-static struct hievent_payload *hievent_payload_create(void) +-{ +- struct hievent_payload *payload = NULL; +- +- payload = kmalloc(sizeof(*payload), GFP_KERNEL); +- if (!payload) +- return NULL; +- +- payload->key = NULL; +- payload->value = NULL; +- payload->next = NULL; +- +- return payload; +-} +- +-static void hievent_payload_destroy(struct hievent_payload *p) +-{ +- if (!p) +- return; +- +- kfree(p->value); +- kfree(p->key); +- kfree(p); +-} +- +-static struct hievent_payload *hievent_get_payload(struct hievent_payload *head, +- const char *key) +-{ +- struct hievent_payload *p = head; +- +- while (p) { +- if (key && p->key) { +- if (strcmp(p->key, key) == 0) +- return p; +- } +- p = p->next; +- } +- +- return NULL; +-} +- +-static void hievent_add_payload(struct hiview_hievent *obj, +- struct hievent_payload *payload) +-{ +- if (!obj->head) { +- obj->head = payload; +- } else { +- struct hievent_payload *p = obj->head; +- +- while (p->next) +- p = p->next; +- p->next = payload; +- } +-} +- +-struct hiview_hievent *hievent_create(unsigned int eventid) +-{ +- struct hiview_hievent *event = NULL; +- +- /* combined event obj struct */ +- event = kmalloc(sizeof(*event), GFP_KERNEL); +- if (!event) +- return NULL; +- +- memset(event, 0, sizeof(*event)); +- event->eventid = eventid; +- pr_debug("%s : %u\n", __func__, eventid); +- +- return (void *)event; +-} +- +-int hievent_put_integer(struct hiview_hievent *event, +- const char *key, long value) +-{ +- int ret; +- struct hievent_payload *payload = NULL; +- +- if ((!event) || (!key)) { +- pr_err("Bad input event or key for %s", __func__); +- return -EINVAL; +- } +- +- payload = hievent_get_payload(event->head, key); +- if (!payload) { +- payload = hievent_payload_create(); +- if (!payload) +- return -ENOMEM; +- payload->key = kstrdup(key, GFP_KERNEL); +- hievent_add_payload(event, payload); +- } +- +- kfree(payload->value); +- +- payload->value = kmalloc(INT_TYPE_MAX_LEN, GFP_KERNEL); +- if (!payload->value) +- return -ENOMEM; +- +- (void)memset(payload->value, 0, INT_TYPE_MAX_LEN); +- ret = snprintf(payload->value, INT_TYPE_MAX_LEN, "%d", (int)value); +- if (ret < 0) +- return -ENOMEM; +- +- return 0; +-} +- +-int hievent_put_string(struct hiview_hievent *event, +- const char *key, const char *value) +-{ +- struct hievent_payload *payload = NULL; +- int len; +- +- if ((!event) || (!key) || (!value)) { +- pr_err("Bad key for %s", __func__); +- return -EINVAL; +- } +- +- payload = hievent_get_payload(event->head, key); +- if (!payload) { +- payload = hievent_payload_create(); +- if (!payload) +- return -ENOMEM; +- +- payload->key = kstrdup(key, GFP_KERNEL); +- hievent_add_payload(event, payload); +- } +- +- kfree(payload->value); +- +- len = strlen(value); +- /* prevent length larger than MAX_STR_LEN */ +- if (len > MAX_STR_LEN) +- len = MAX_STR_LEN; +- +- payload->value = kmalloc(len + 1, GFP_KERNEL); +- if (!payload->value) +- return -ENOMEM; +- +- (void)memset(payload->value, 0, len + 1); +- if (strncpy(payload->value, value, len) > 0) +- payload->value[len] = '\0'; +- +- return 0; +-} +- +-int hievent_set_time(struct hiview_hievent *event, long long seconds) +-{ +- if ((!event) || (seconds == 0)) { +- pr_err("Bad input for %s", __func__); +- return -EINVAL; +- } +- event->time = seconds; +- return 0; +-} +- +-static int append_array_item(char **pool, int pool_len, const char *path) +-{ +- int i; +- +- if ((!path) || (path[0] == 0)) { +- pr_err("Bad path %s", __func__); +- return -EINVAL; +- } +- +- if (strlen(path) > MAX_PATH_LEN) { +- pr_err("file path over max: %d", MAX_PATH_LEN); +- return -EINVAL; +- } +- +- for (i = 0; i < pool_len; i++) { +- if (pool[i] != 0) +- continue; +- +- pool[i] = kstrdup(path, GFP_KERNEL); +- if (!pool[i]) +- return -ENOMEM; +- +- break; +- } +- +- if (i == MAX_PATH_NUMBER) { +- pr_err("Too many paths"); +- return -EINVAL; +- } +- +- return 0; +-} +- +-int hievent_add_filepath(struct hiview_hievent *event, const char *path) +-{ +- if (!event) { +- pr_err("Bad path %s", __func__); +- return -EINVAL; +- } +- return append_array_item(event->file_path, MAX_PATH_NUMBER, path); +-} +- +-/* make string ":" to "::", ";" to ";;", and remove newline character +- * for example: "abc:def;ghi" transfer to "abc::def;;ghi" +- */ +-static char *hievent_make_regular(char *value) +-{ +- int count = 0; +- int len = 0; +- char *temp = value; +- char *regular = NULL; +- char *regular_tmp = NULL; +- size_t regular_len; +- +- while (*temp != '\0') { +- if (*temp == ':') +- count++; +- else if (*temp == ';') +- count++; +- else if ((*temp == '\n') || (*temp == '\r')) +- *temp = ' '; +- +- temp++; +- len++; +- } +- +- /* no need to transfer, just return old value */ +- if (count == 0) +- return value; +- +- regular_len = len + count * 2 + 1; // 2 char in a byte +- regular = kmalloc(regular_len, GFP_KERNEL); +- if (!regular) +- return NULL; +- +- (void)memset(regular, 0, regular_len); +- regular_tmp = regular; +- temp = value; +- while (*temp != 0) { +- if ((*temp == ':') || (*temp == ';')) +- *regular_tmp++ = *temp; +- +- *regular_tmp++ = *temp; +- temp++; +- } +- *regular_tmp = '\0'; +- +- return regular; +-} +- +-int logbuff_to_exception(char category, int level, char log_type, +- char sn, const char *msg, int msglen) +-{ +- struct idap_header *hdr = NULL; +- size_t buf_len = sizeof(int) + sizeof(struct idap_header) + msglen; +- int ret; +- int *check_code = NULL; +- char *buffer = kmalloc(buf_len, GFP_KERNEL); +- +- if (!buffer) +- return -ENOMEM; +- +- check_code = (int *)buffer; +- *check_code = CHECK_CODE; +- +- hdr = (struct idap_header *)(buffer + sizeof(int)); +- hdr->level = level; +- hdr->category = category; +- hdr->log_type = log_type; +- hdr->sn = sn; +- +- memcpy(buffer + sizeof(int) + sizeof(struct idap_header), msg, msglen); +- +- ret = hievent_write_internal(buffer, buf_len); +- +- kfree(buffer); +- +- return ret; +-} +- +-static int hievent_fill_payload(struct hiview_hievent *event, char **pbuf, +- char *tmp, int length) +-{ +- struct hievent_payload *p = event->head; +- int len = length; +- int tmplen; +- unsigned int keycount = 0; +- +- while (p) { +- char *value = NULL; +- char *regular_value = NULL; +- int need_free = 1; +- +- if (!p->value) { +- p = p->next; +- continue; +- } +- if (keycount == 0) { +- tmplen = snprintf(tmp, len - 1, " --extra "); +- BUF_POINTER_FORWARD; +- } +- keycount++; +- +- /* fill key */ +- if (p->key) +- tmplen = snprintf(tmp, len - 1, "%s:", p->key); +- +- BUF_POINTER_FORWARD; +- /* fill value */ +- tmplen = 0; +- +- value = p->value; +- regular_value = hievent_make_regular(value); +- if (!regular_value) { +- regular_value = "NULL"; +- need_free = 0; +- } +- tmplen = snprintf(tmp, len - 1, "%s;", regular_value); +- if ((value != regular_value) && need_free) +- kfree(regular_value); +- +- BUF_POINTER_FORWARD; +- p = p->next; +- } +- return len; +-} +- +-static int hievent_convert_string(struct hiview_hievent *event, char **pbuf) +-{ +- int len; +- char *tmp = NULL; +- int tmplen; +- unsigned int i; +- +- char *buf = kmalloc(EVENT_INFO_BUF_LEN, GFP_KERNEL); +- +- if (!buf) { +- *pbuf = NULL; +- return 0; +- } +- +- (void)memset(buf, 0, EVENT_INFO_BUF_LEN); +- len = EVENT_INFO_BUF_LEN; +- tmp = buf; +- +- /* fill eventid */ +- tmplen = snprintf(tmp, len - 1, "eventid %d", event->eventid); +- BUF_POINTER_FORWARD; +- +- /* fill the path */ +- for (i = 0; i < MAX_PATH_NUMBER; i++) { +- if (!event->file_path[i]) +- break; +- +- tmplen = snprintf(tmp, len - 1, " -i %s", event->file_path[i]); +- BUF_POINTER_FORWARD; +- } +- +- /* fill time */ +- if (event->time) { +- tmplen = snprintf(tmp, len - 1, " -t %lld", event->time); +- BUF_POINTER_FORWARD; +- } +- +- /* fill the payload info */ +- len = hievent_fill_payload(event, pbuf, tmp, len); +- *pbuf = buf; +- return (EVENT_INFO_BUF_LEN - len); +-} +- +-#define IDAP_LOGTYPE_CMD 1 +-static int hievent_write_logexception(char *str, const int strlen) +-{ +- char tempchr; +- char *strptr = str; +- int left_buf_len = strlen + 1; +- int sent_cnt = 0; +- +- while (left_buf_len > 0) { +- if (left_buf_len > EVENT_INFO_PACK_BUF_LEN) { +- tempchr = strptr[EVENT_INFO_PACK_BUF_LEN - 1]; +- strptr[EVENT_INFO_PACK_BUF_LEN - 1] = '\0'; +- logbuff_to_exception(0, 0, IDAP_LOGTYPE_CMD, 1, strptr, +- EVENT_INFO_PACK_BUF_LEN); +- left_buf_len -= (EVENT_INFO_PACK_BUF_LEN - 1); +- strptr += (EVENT_INFO_PACK_BUF_LEN - 1); +- strptr[0] = tempchr; +- sent_cnt++; +- } else { +- logbuff_to_exception(0, 0, IDAP_LOGTYPE_CMD, 0, strptr, +- left_buf_len); +- sent_cnt++; +- break; +- } +- } +- +- return sent_cnt; +-} +- +-int hievent_report(struct hiview_hievent *obj) +-{ +- char *str = NULL; +- int buf_len; +- int sent_packet; +- +- if (!obj) { +- pr_err("Bad event %s", __func__); +- return -EINVAL; +- } +- +- buf_len = hievent_convert_string(obj, &str); +- if (!str) +- return -EINVAL; +- +- sent_packet = hievent_write_logexception(str, buf_len); +- pr_err("report: %s", str); +- kfree(str); +- +- return sent_packet; +-} +- +-void hievent_destroy(struct hiview_hievent *event) +-{ +- int i; +- struct hievent_payload *p = NULL; +- +- if (!event) +- return; +- +- p = event->head; +- while (p) { +- struct hievent_payload *del = p; +- +- p = p->next; +- hievent_payload_destroy(del); +- } +- +- event->head = NULL; +- for (i = 0; i < MAX_PATH_NUMBER; i++) { +- kfree(event->file_path[i]); +- event->file_path[i] = NULL; +- } +- +- kfree(event); +-} +diff --git a/drivers/staging/hievent/hiview_hievent.h b/drivers/staging/hievent/hiview_hievent.h +deleted file mode 100644 +index c1c003510..000000000 +--- a/drivers/staging/hievent/hiview_hievent.h ++++ /dev/null +@@ -1,34 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2021 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef HIVIEW_HIEVENT_H +-#define HIVIEW_HIEVENT_H +- +-#define MAX_PATH_NUMBER 10 +- +-/* hievent struct */ +-struct hiview_hievent { +- unsigned int eventid; +- +- long long time; +- +- /* payload linked list */ +- struct hievent_payload *head; +- +- /* file path needs uploaded */ +- char *file_path[MAX_PATH_NUMBER]; +-}; +- +-struct hiview_hievent *hievent_create(unsigned int eventid); +-int hievent_put_integer(struct hiview_hievent *event, +- const char *key, long value); +-int hievent_put_string(struct hiview_hievent *event, +- const char *key, const char *value); +-int hievent_set_time(struct hiview_hievent *event, long long seconds); +-int hievent_add_filepath(struct hiview_hievent *event, const char *path); +-int hievent_report(struct hiview_hievent *obj); +-void hievent_destroy(struct hiview_hievent *event); +- +-#endif /* HIVIEW_HIEVENT_H */ +diff --git a/drivers/staging/hilog/Kconfig b/drivers/staging/hilog/Kconfig +deleted file mode 100644 +index 243934c4c..000000000 +--- a/drivers/staging/hilog/Kconfig ++++ /dev/null +@@ -1,22 +0,0 @@ +-# +-# Sensor device configuration +-# +- +-config HILOG +- tristate "Hilog support" +- default n +- help +- hilog buffer manager. +- +- Hilog is a simple log manager for OpenHarmonyOS. +- log string write to /dev/hilog, and the hilog driver copy it +- to the ring buffer. Ring buffer can be read from userspace. +- +- If unsure, say N. +- +-config HILOG_BUFFER_SIZE +- int "hilog buffer size" +- depends on HILOG +- default 4096 +- help +- Define the default ring buffer size of hilog +diff --git a/drivers/staging/hilog/Makefile b/drivers/staging/hilog/Makefile +deleted file mode 100644 +index e53c86a5d..000000000 +--- a/drivers/staging/hilog/Makefile ++++ /dev/null +@@ -1,5 +0,0 @@ +-# +-# Makefile for the hi hilog drivers. +-# +- +-obj-$(CONFIG_HILOG) += hilog.o +diff --git a/drivers/staging/hilog/hilog.c b/drivers/staging/hilog/hilog.c +deleted file mode 100644 +index 178dcb6dd..000000000 +--- a/drivers/staging/hilog/hilog.c ++++ /dev/null +@@ -1,408 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2021 Huawei Technologies Co., Ltd. All rights reserved. +- * +- * This software is licensed under the terms of the GNU General Public +- * License version 2, as published by the Free Software Foundation, and +- * may be copied, distributed, and modified under those terms. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#ifndef HILOGDEV_MAJOR +-#define HILOGDEV_MAJOR 245 +-#endif +- +-#ifndef HILOG_NR_DEVS +-#define HILOG_NR_DEVS 2 +-#endif +- +-#ifndef MEMDEV_SIZE +-#define MEMDEV_SIZE 4096 +-#endif +- +-static int hilog_major = HILOGDEV_MAJOR; +- +-module_param(hilog_major, int, 0444); +- +-struct cdev g_hilog_cdev; +- +-#define HILOG_BUFFER ((size_t)CONFIG_HILOG_BUFFER_SIZE) +-#define HILOG_DRIVER "/dev/hilog" +- +-struct hilog_entry { +- unsigned int len; +- unsigned int header_size; +- unsigned int pid : 16; +- unsigned int task_id : 16; +- unsigned int sec; +- unsigned int nsec; +- unsigned int reserved; +- char msg[0]; +-}; +- +-static ssize_t hilog_write(struct file *file, +- const char __user *user_buf, +- size_t count, loff_t *ppos); +-static ssize_t hilog_read(struct file *file, +- char __user *user_buf, size_t count, loff_t *ppos); +- +-static const struct file_operations hilog_fops = { +- .read = hilog_read, +- .write = hilog_write, +-}; +- +-struct hilog_char_device { +- int flag; +- struct mutex mtx; /* lock to protect read/write buffer */ +- unsigned char *buffer; +- wait_queue_head_t wq; +- size_t wr_off; +- size_t hdr_off; +- size_t size; +- size_t count; +-} hilog_dev; +- +-static inline unsigned char *hilog_buffer_head(void) +-{ +- return hilog_dev.buffer + hilog_dev.hdr_off; +-} +- +-static void hilog_buffer_inc(size_t sz) +-{ +- if (hilog_dev.size + sz <= HILOG_BUFFER) { +- hilog_dev.size += sz; +- hilog_dev.wr_off += sz; +- hilog_dev.wr_off %= HILOG_BUFFER; +- hilog_dev.count++; +- } +-} +- +-static void hilog_buffer_dec(size_t sz) +-{ +- if (hilog_dev.size >= sz) { +- hilog_dev.size -= sz; +- hilog_dev.hdr_off += sz; +- hilog_dev.hdr_off %= HILOG_BUFFER; +- hilog_dev.count--; +- } +-} +- +-static int hilog_read_ring_buff(unsigned char __user *buffer, size_t buf_len) +-{ +- size_t retval; +- size_t buf_left = HILOG_BUFFER - hilog_dev.hdr_off; +- +- if (buf_left > buf_len) { +- retval = copy_to_user(buffer, hilog_buffer_head(), buf_len); +- } else { +- size_t mem_len = (buf_len > buf_left) ? buf_left : buf_len; +- +- retval = copy_to_user(buffer, hilog_buffer_head(), mem_len); +- if (retval < 0) +- return retval; +- +- retval = copy_to_user(buffer + buf_left, hilog_dev.buffer, +- buf_len - buf_left); +- } +- return retval; +-} +- +-static int hilog_read_ring_head_buffer(unsigned char *buffer, size_t buf_len) +-{ +- size_t buf_left = HILOG_BUFFER - hilog_dev.hdr_off; +- +- if (buf_left > buf_len) { +- memcpy(buffer, hilog_buffer_head(), buf_len); +- } else { +- size_t mem_len = (buf_len > buf_left) ? buf_left : buf_len; +- +- memcpy(buffer, hilog_buffer_head(), mem_len); +- memcpy(buffer + buf_left, hilog_dev.buffer, buf_len - buf_left); +- } +- +- return 0; +-} +- +-static ssize_t hilog_read(struct file *file, +- char __user *user_buf, size_t count, loff_t *ppos) +-{ +- size_t retval; +- struct hilog_entry header; +- +- (void)file; +- if (wait_event_interruptible(hilog_dev.wq, (hilog_dev.size > 0))) +- return -EINVAL; +- +- (void)mutex_lock(&hilog_dev.mtx); +- +- if (hilog_dev.size == 0) { +- retval = 0; +- goto out; +- } +- +- retval = hilog_read_ring_head_buffer((unsigned char *)&header, +- sizeof(header)); +- if (retval < 0) { +- retval = -EINVAL; +- goto out; +- } +- +- if (count < header.len + sizeof(header)) { +- pr_err("buffer too small,buf_len=%d, header.len=%d,%d\n", +- (int)count, header.len, header.header_size); +- retval = -ENOMEM; +- goto out; +- } +- +- hilog_buffer_dec(sizeof(header)); +- retval = copy_to_user((unsigned char *)user_buf, +- (unsigned char *)&header, +- min(count, sizeof(header))); +- +- if (retval < 0) { +- retval = -EINVAL; +- goto out; +- } +- +- retval = hilog_read_ring_buff((unsigned char *) +- (user_buf + sizeof(header)), +- header.len); +- if (retval < 0) { +- retval = -EINVAL; +- goto out; +- } +- +- hilog_buffer_dec(header.len); +- retval = header.len + sizeof(header); +-out: +- if (retval == -ENOMEM) { +- // clean ring buffer +- hilog_dev.wr_off = 0; +- hilog_dev.hdr_off = 0; +- hilog_dev.size = 0; +- hilog_dev.count = 0; +- } +- (void)mutex_unlock(&hilog_dev.mtx); +- +- return retval; +-} +- +-static int hilog_write_ring_buffer(unsigned char __user *buffer, size_t buf_len) +-{ +- int retval; +- size_t buf_left = HILOG_BUFFER - hilog_dev.wr_off; +- +- if (buf_len > buf_left) { +- retval = copy_from_user(hilog_dev.buffer + hilog_dev.wr_off, +- buffer, buf_left); +- if (retval) +- return -1; +- retval = copy_from_user(hilog_dev.buffer, buffer + buf_left, +- min(HILOG_BUFFER, buf_len - buf_left)); +- } else { +- retval = copy_from_user(hilog_dev.buffer + hilog_dev.wr_off, +- buffer, min(buf_left, buf_len)); +- } +- +- if (retval < 0) +- return -1; +- +- return 0; +-} +- +-static int hilog_write_ring_head_buffer(unsigned char *buffer, size_t buf_len) +-{ +- size_t buf_left = HILOG_BUFFER - hilog_dev.wr_off; +- +- if (buf_len > buf_left) { +- memcpy(hilog_dev.buffer + hilog_dev.wr_off, +- buffer, buf_left); +- memcpy(hilog_dev.buffer, buffer + buf_left, +- min(HILOG_BUFFER, buf_len - buf_left)); +- } else { +- memcpy(hilog_dev.buffer + hilog_dev.wr_off, +- buffer, min(buf_left, buf_len)); +- } +- +- return 0; +-} +- +-static void hilog_head_init(struct hilog_entry *header, size_t len) +-{ +-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) +-#define NANOSEC_PER_MIRCOSEC 1000 +- struct timeval now = { 0 }; +- +- do_gettimeofday(&now); +- +- header->sec = now.tv_sec; +- header->nsec = now.tv_usec * NANOSEC_PER_MIRCOSEC; +-#else +- struct timespec64 now = { 0 }; +- +- ktime_get_real_ts64(&now); +- +- header->sec = now.tv_sec; +- header->nsec = now.tv_nsec; +-#endif +- +- header->len = len; +- header->pid = current->pid; +- header->task_id = current->tgid; +- header->header_size = sizeof(struct hilog_entry); +-} +- +-static void hilog_cover_old_log(size_t buf_len) +-{ +- int retval; +- struct hilog_entry header; +- size_t total_size = buf_len + sizeof(struct hilog_entry); +- static int drop_log_lines; +- static bool is_last_time_full; +- bool is_this_time_full = false; +- +- while (total_size + hilog_dev.size > HILOG_BUFFER) { +- retval = hilog_read_ring_head_buffer((unsigned char *)&header, +- sizeof(header)); +- if (retval < 0) +- break; +- +- drop_log_lines++; +- is_this_time_full = true; +- is_last_time_full = true; +- hilog_buffer_dec(sizeof(header) + header.len); +- } +- if (is_last_time_full && !is_this_time_full) { +- /* so we can only print one log if hilog ring buffer is full in a short time */ +- if (drop_log_lines > 0) +- pr_info("hilog ringbuffer full, drop %d line(s) log\n", +- drop_log_lines); +- is_last_time_full = false; +- drop_log_lines = 0; +- } +-} +- +-int hilog_write_internal(const char __user *buffer, size_t buf_len) +-{ +- struct hilog_entry header; +- int retval; +- +- (void)mutex_lock(&hilog_dev.mtx); +- hilog_cover_old_log(buf_len); +- hilog_head_init(&header, buf_len); +- +- retval = hilog_write_ring_head_buffer((unsigned char *)&header, +- sizeof(header)); +- if (retval) { +- retval = -ENODATA; +- goto out; +- } +- hilog_buffer_inc(sizeof(header)); +- +- retval = hilog_write_ring_buffer((unsigned char *)(buffer), header.len); +- if (retval) { +- retval = -ENODATA; +- goto out; +- } +- +- hilog_buffer_inc(header.len); +- +- retval = header.len; +- +-out: +- (void)mutex_unlock(&hilog_dev.mtx); +- if (retval > 0) +- wake_up_interruptible(&hilog_dev.wq); +- else if (retval < 0) +- pr_err("write fail retval=%d\n", retval); +- +- return retval; +-} +- +-static ssize_t hilog_write(struct file *file, +- const char __user *user_buf, +- size_t count, loff_t *ppos) +-{ +- (void)file; +- if (count + sizeof(struct hilog_entry) > HILOG_BUFFER) { +- pr_err("input too large\n"); +- return -ENOMEM; +- } +- +- return hilog_write_internal(user_buf, count); +-} +- +-static void hilog_device_init(void) +-{ +- hilog_dev.buffer = kmalloc(HILOG_BUFFER, GFP_KERNEL); +- if (!hilog_dev.buffer) +- return; +- +- init_waitqueue_head(&hilog_dev.wq); +- mutex_init(&hilog_dev.mtx); +- hilog_dev.wr_off = 0; +- hilog_dev.hdr_off = 0; +- hilog_dev.size = 0; +- hilog_dev.count = 0; +-} +- +-static int __init hilogdev_init(void) +-{ +- int result; +- dev_t devno = MKDEV(hilog_major, 0); +- +- result = register_chrdev_region(devno, 2, "hilog"); +- if (result < 0) { +- pr_emerg("\t register hilog error %d\n", result); +- return result; +- } +- +- cdev_init(&g_hilog_cdev, &hilog_fops); +- g_hilog_cdev.owner = THIS_MODULE; +- g_hilog_cdev.ops = &hilog_fops; +- +- cdev_add(&g_hilog_cdev, MKDEV(hilog_major, 0), HILOG_NR_DEVS); +- +- hilog_device_init(); +- return 0; +-} +- +-static void __exit hilog_exit_module(void) +-{ +- cdev_del(&g_hilog_cdev); +- unregister_chrdev_region(MKDEV(hilog_major, 0), HILOG_NR_DEVS); +-} +- +-static int __init hilog_init_module(void) +-{ +- int state = hilogdev_init(); +- +- pr_info("\t hilog_init Start%d\n", state); +- return 0; +-} +- +-module_init(hilog_init_module); +-module_exit(hilog_exit_module); +- +-MODULE_AUTHOR("OHOS"); +-MODULE_DESCRIPTION("User mode hilog device interface"); +-MODULE_LICENSE("GPL"); +-MODULE_ALIAS("hilog"); +diff --git a/drivers/staging/hisysevent/Kconfig b/drivers/staging/hisysevent/Kconfig +deleted file mode 100644 +index a40621cb8..000000000 +--- a/drivers/staging/hisysevent/Kconfig ++++ /dev/null +@@ -1,6 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-config HISYSEVENT +- tristate "Enable hisysevent" +- depends on HIEVENT +- help +- Say Y here to enable hisysevent feature support. +diff --git a/drivers/staging/hisysevent/Makefile b/drivers/staging/hisysevent/Makefile +deleted file mode 100644 +index 025a2349b..000000000 +--- a/drivers/staging/hisysevent/Makefile ++++ /dev/null +@@ -1,6 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-obj-$(CONFIG_HISYSEVENT) += hiview_hisysevent.o +-# add lightweight writing feature +-obj-$(CONFIG_HISYSEVENT) += hisysevent_builder.o +-obj-$(CONFIG_HISYSEVENT) += hisysevent_raw_data_encoder.o +-obj-$(CONFIG_HISYSEVENT) += hisysevent_raw_data.o +diff --git a/drivers/staging/hisysevent/hisysevent_builder.c b/drivers/staging/hisysevent/hisysevent_builder.c +deleted file mode 100644 +index 297f6e10a..000000000 +--- a/drivers/staging/hisysevent/hisysevent_builder.c ++++ /dev/null +@@ -1,363 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2023 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#include "hisysevent_builder.h" +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#define MAX_PARAM_NAME_LENGTH 48 +- +-#define PARAM_STR_MAX_LEN 1536 // 1.5KB +-#define HISYSEVENT_INFO_BUF_LEN (2048 - 6) // 2KB - 6 (read_gap) +- +-#define TIME_ZONE_LEN 6 +-#define TIME_ZONE_TOTAL_CNT 38 +-#define DEFAULT_TZ_POS 14 +- +-#define MINUTE_TO_SECS 60 +-#define SEC_TO_MILLISEC 1000 +-#define MILLISEC_TO_NANOSEC (1000 * 1000) +- +-#define MAX_PARAM_NUMBER 128 +- +-#define HISYSEVENT_HEADER_SIZE sizeof(struct hisysevent_header) +- +-enum value_type { +- /* int64_t */ +- INT64 = 8, +- +- /* string */ +- STRING = 12, +-}; +- +-static int parse_time_zone(const char *time_zone_formatted) +-{ +- int ret; +- +- static const char *const time_zone_list[] = { +- "-0100", "-0200", "-0300", "-0330", "-0400", "-0500", "-0600", +- "-0700", "-0800", "-0900", "-0930", "-1000", "-1100", "-1200", +- "+0000", "+0100", "+0200", "+0300", "+0330", "+0400", "+0430", +- "+0500", "+0530", "+0545", "+0600", "+0630", "+0700", "+0800", +- "+0845", "+0900", "+0930", "+1000", "+1030", "+1100", "+1200", +- "+1245", "+1300", "+1400" +- }; +- if (!time_zone_formatted) +- return DEFAULT_TZ_POS; +- +- ret = match_string(time_zone_list, ARRAY_SIZE(time_zone_list), time_zone_formatted); +- if (ret < 0) +- return DEFAULT_TZ_POS; +- +- return ret; +-} +- +-static void hisysevent_builder_set_time(struct hisysevent_header *header) +-{ +- struct timespec64 ts; +- struct timezone tz = sys_tz; +- int tz_index = 0; +- char time_zone[TIME_ZONE_LEN]; +- int tz_hour; +- int tz_min; +- long long millisecs = 0; +- +- ktime_get_real_ts64(&ts); +- millisecs = ts.tv_sec * SEC_TO_MILLISEC + ts.tv_nsec / MILLISEC_TO_NANOSEC; +- header->timestamp = (u64)millisecs; +- +- tz_hour = (-tz.tz_minuteswest) / MINUTE_TO_SECS; +- time_zone[tz_index++] = tz_hour >= 0 ? '+' : '-'; +- tz_min = (-tz.tz_minuteswest) % MINUTE_TO_SECS; +- sprintf(&time_zone[tz_index], "%02u%02u", abs(tz_hour), abs(tz_min)); +- time_zone[TIME_ZONE_LEN - 1] = '\0'; +- header->time_zone = (u8)parse_time_zone(time_zone); +-} +- +-static bool is_valid_num_of_param(struct hisysevent_params *params) +-{ +- if (!params) +- return false; +- +- return params->total_cnt < MAX_PARAM_NUMBER; +-} +- +-static bool is_valid_string(const char *str, unsigned int max_len) +-{ +- unsigned int len = 0; +- unsigned int i; +- +- if (!str) +- return false; +- +- len = strlen(str); +- if (len == 0 || len > max_len) +- return false; +- +- if (!isalpha(str[0])) +- return false; +- +- for (i = 1; i < len; i++) { +- if (!isalnum(str[i]) && str[i] != '_') +- return false; +- } +- return true; +-} +- +-static int hisysevent_init_header(struct hisysevent_header *header, const char *domain, +- const char *name, enum hisysevent_type type) +-{ +- if (!is_valid_string(domain, MAX_DOMAIN_LENGTH) || +- !is_valid_string(name, MAX_EVENT_NAME_LENGTH)) { +- pr_err("domain or name is invalid"); +- return -EINVAL; +- } +- +- strcpy(header->domain, domain); +- strcpy(header->name, name); +- +- header->type = (u8)(type - 1); +- header->pid = (u32)current->pid; +- header->tid = (u32)current->tgid; +- header->uid = (u32)current_uid().val; +- header->is_open_trace = 0; // in kernel, this value is always 0 +- +- hisysevent_builder_set_time(header); +- if (!(header->time_zone)) { +- pr_err("failed to parse the time zone"); +- goto init_error; +- } +- +- pr_info("create hisysevent succeed, domain=%s, name=%s, type=%d", +- header->domain, header->name, (header->type + 1)); +- +- return 0; +- +-init_error: +- memset(header, 0, sizeof(*header)); +- return -EINVAL; +-} +- +-static int hisysevent_init_params(struct hisysevent_params *params) +-{ +- if (!params) { +- pr_err("params is null"); +- return -EINVAL; +- } +- +- params->raw_data = raw_data_create(); +- if (!(params->raw_data)) +- return -EINVAL; +- +- params->total_cnt = 0; +- return 0; +-} +- +-static void hisysevent_params_destroy(struct hisysevent_params *params) +-{ +- if (!params) { +- pr_err("params is null"); +- return; +- } +- raw_data_destroy(params->raw_data); +-} +- +-static bool hisysevent_check_params_validity(struct hisysevent_builder *builder) +-{ +- if (!builder) { +- pr_err("builder is null"); +- return false; +- } +- +- if (!is_valid_num_of_param(&builder->params)) { +- pr_err("number of param is invalid"); +- return false; +- } +- +- return true; +-} +- +-struct hisysevent_builder* +-hisysevent_builder_create(const char *domain, const char *name, enum hisysevent_type type) +-{ +- struct hisysevent_builder *builder; +- +- builder = kzalloc(sizeof(*builder), GFP_KERNEL); +- if (!builder) +- return NULL; +- +- // header struct initialize +- if (hisysevent_init_header(&builder->header, domain, name, type) != 0) +- goto create_err; +- +- // parameters struct initialize +- if (hisysevent_init_params(&builder->params) != 0) +- goto create_err; +- +- return builder; +- +-create_err: +- hisysevent_builder_destroy(builder); +- return NULL; +-} +-EXPORT_SYMBOL_GPL(hisysevent_builder_create); +- +-void hisysevent_builder_destroy(struct hisysevent_builder *builder) +-{ +- if (!builder) { +- pr_err("try to destroy an invalid builder"); +- return; +- } +- +- // destroy hisysevent parameters +- hisysevent_params_destroy(&builder->params); +- +- kfree(builder); +-} +-EXPORT_SYMBOL_GPL(hisysevent_builder_destroy); +- +-int hisysevent_builder_put_integer(struct hisysevent_builder *builder, const char *key, +- s64 value) +-{ +- int ret; +- struct hisysevent_raw_data *raw_data; +- +- if (!is_valid_string(key, MAX_PARAM_NAME_LENGTH)) { +- pr_err("try to put an invalid key"); +- return -EINVAL; +- } +- if (!hisysevent_check_params_validity(builder)) +- return -EINVAL; +- +- raw_data = raw_data_create(); +- if (!raw_data) { +- pr_err("failed to create raw data for an new integer parameter"); +- return -ENOMEM; +- } +- +- ret = -EINVAL; +- if ((str_length_delimited_encode(raw_data, key) != 0) || +- (key_value_type_encode(raw_data, (u8)0, (u8)INT64, (u8)0) != 0) || +- (int64_t_varint_encode(raw_data, value) != 0)) { +- pr_err("failed to encode an integer parameter"); +- goto put_int_err; +- } +- +- if (raw_data_append(builder->params.raw_data, raw_data->data, raw_data->len) != 0) { +- pr_err("failed to append a raw data"); +- goto put_int_err; +- } +- +- builder->params.total_cnt++; +- ret = 0; +- +-put_int_err: +- raw_data_destroy(raw_data); +- return ret; +-} +-EXPORT_SYMBOL_GPL(hisysevent_builder_put_integer); +- +-int hisysevent_builder_put_string(struct hisysevent_builder *builder, const char *key, +- const char *value) +-{ +- int ret; +- struct hisysevent_raw_data *raw_data; +- +- if (!is_valid_string(key, MAX_PARAM_NAME_LENGTH)) { +- pr_err("try to put an invalid key"); +- return -EINVAL; +- } +- if (!value || strlen(value) > PARAM_STR_MAX_LEN) { +- pr_err("string length exceeds limit"); +- return -EINVAL; +- } +- if (!hisysevent_check_params_validity(builder)) +- return -EINVAL; +- +- raw_data = raw_data_create(); +- if (!raw_data) { +- pr_err("failed to create raw data for a new string parameter"); +- return -ENOMEM; +- } +- +- ret = -EINVAL; +- if ((str_length_delimited_encode(raw_data, key) != 0) || +- (key_value_type_encode(raw_data, 0, (u8)STRING, 0) != 0) || +- (str_length_delimited_encode(raw_data, value) != 0)) { +- pr_err("failed to encode a string parameter"); +- goto put_str_err; +- } +- +- if (raw_data_append(builder->params.raw_data, raw_data->data, raw_data->len) != 0) { +- pr_err("failed to append a raw data"); +- goto put_str_err; +- } +- +- builder->params.total_cnt++; +- ret = 0; +- +-put_str_err: +- raw_data_destroy(raw_data); +- return ret; +-} +-EXPORT_SYMBOL_GPL(hisysevent_builder_put_string); +- +-int hisysevent_builder_build(struct hisysevent_builder *builder, +- struct hisysevent_raw_data *raw_data) +-{ +- s32 blockSize; +- struct hisysevent_raw_data *params_raw_data; +- +- if (!hisysevent_check_params_validity(builder)) +- return -EINVAL; +- +- blockSize = 0; +- // copy block size at first +- if (raw_data_append(raw_data, (u8 *)(&blockSize), sizeof(s32)) != 0) { +- pr_err("fialed to append block size"); +- return -ENOMEM; +- } +- // copy header +- if (raw_data_append(raw_data, (u8 *)(&builder->header), +- sizeof(struct hisysevent_header)) != 0) { +- pr_err("fialed to append sys event header"); +- return -ENOMEM; +- } +- // copy total count of parameter +- if (raw_data_append(raw_data, (u8 *)(&builder->params.total_cnt), +- sizeof(s32)) != 0) { +- pr_err("fialed to append total count of parameters"); +- return -ENOMEM; +- } +- // copy customized parameters +- params_raw_data = builder->params.raw_data; +- if (!params_raw_data) { +- pr_err("this sys event doesn't have any parameter"); +- return -EINVAL; +- } +- if (raw_data_append(raw_data, params_raw_data->data, params_raw_data->len) != 0) { +- pr_err("fialed to append encoded raw data of parameters"); +- return -ENOMEM; +- } +- // update block size +- blockSize = raw_data->len; +- if (raw_data_update(raw_data, (u8 *)(&blockSize), sizeof(s32), 0) != 0) { +- pr_err("fialed to update block size"); +- return -ENOMEM; +- } +- return 0; +-} +-EXPORT_SYMBOL_GPL(hisysevent_builder_build); +diff --git a/drivers/staging/hisysevent/hisysevent_builder.h b/drivers/staging/hisysevent/hisysevent_builder.h +deleted file mode 100644 +index 6570b8499..000000000 +--- a/drivers/staging/hisysevent/hisysevent_builder.h ++++ /dev/null +@@ -1,87 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2023 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef HISYSEVENT_BUILDER_H +-#define HISYSEVENT_BUILDER_H +- +-#include +- +-#include +-#include +- +-#include "hisysevent_raw_data_encoder.h" +-#include "hisysevent_raw_data.h" +- +-#define MAX_DOMAIN_LENGTH 16 +-#define MAX_EVENT_NAME_LENGTH 32 +- +-#pragma pack(1) +- +-struct hisysevent_header { +- /* event domain */ +- char domain[MAX_DOMAIN_LENGTH + 1]; +- +- /* event name */ +- char name[MAX_EVENT_NAME_LENGTH + 1]; +- +- /* event timestamp */ +- u64 timestamp; +- +- /* time zone */ +- u8 time_zone; +- +- /* user id */ +- u32 uid; +- +- /* process id */ +- u32 pid; +- +- /* thread id */ +- u32 tid; +- +- /* event hash code*/ +- u64 id; +- +- /* event type */ +- u8 type: 2; // enum hisysevent_type. +- +- /* trace info flag*/ +- u8 is_open_trace: 1; +-}; +- +-#pragma pack() +- +-struct hisysevent_params { +- /* total count of parameters */ +- s32 total_cnt; +- +- /* content of parameters */ +- struct hisysevent_raw_data *raw_data; +-}; +- +-/* hisysevent builder struct */ +-struct hisysevent_builder { +- /* common header */ +- struct hisysevent_header header; +- +- /* customized parameters*/ +- struct hisysevent_params params; +-}; +- +-struct hisysevent_builder * +-hisysevent_builder_create(const char *domain, const char *name, enum hisysevent_type type); +- +-void hisysevent_builder_destroy(struct hisysevent_builder *builder); +- +-int hisysevent_builder_put_integer(struct hisysevent_builder *builder, const char *key, +- s64 value); +- +-int hisysevent_builder_put_string(struct hisysevent_builder *builder, const char *key, +- const char *value); +- +-int hisysevent_builder_build(struct hisysevent_builder *builder, +- struct hisysevent_raw_data *raw_data); +- +-#endif /* HISYSEVENT_BUILDER_H */ +diff --git a/drivers/staging/hisysevent/hisysevent_raw_data.c b/drivers/staging/hisysevent/hisysevent_raw_data.c +deleted file mode 100644 +index 5e37ad726..000000000 +--- a/drivers/staging/hisysevent/hisysevent_raw_data.c ++++ /dev/null +@@ -1,117 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2023 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#include "hisysevent_raw_data.h" +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#define EXPAND_BUF_SIZE 100 +- +-static int raw_data_init(struct hisysevent_raw_data *raw_data) +-{ +- if (!raw_data) { +- pr_err("raw data is null"); +- return -EINVAL; +- } +- +- raw_data->data = kzalloc(EXPAND_BUF_SIZE, GFP_KERNEL); +- if (!(raw_data->data)) { +- pr_err("failed to allocate memory for raw data"); +- return -ENOMEM; +- } +- +- raw_data->capacity = EXPAND_BUF_SIZE; +- raw_data->len = 0; +- +- return 0; +-} +- +-int raw_data_update(struct hisysevent_raw_data *dest, u8 *src, unsigned int len, +- unsigned int pos) +-{ +- if (!dest) { +- pr_err("try to update a data which is null"); +- return -EINVAL; +- } +- if (!src || len == 0) { +- pr_info("do nothing"); +- return 0; +- } +- if (dest->len < pos) { +- pr_err("try to update on an invalid position"); +- return -EINVAL; +- } +- if ((pos + len) > dest->capacity) { +- unsigned int expanded_size; +- u8 *resize_data; +- +- expanded_size = (len > EXPAND_BUF_SIZE) ? len : EXPAND_BUF_SIZE; +- resize_data = kmalloc(dest->capacity + expanded_size, GFP_KERNEL); +- if (!resize_data) { +- pr_err("failed to expand memory for raw data"); +- return -ENOMEM; +- } +- if (dest->data) { +- memcpy(resize_data, dest->data, dest->len); +- dest->capacity += expanded_size; +- kfree(dest->data); +- } +- dest->data = resize_data; +- } +- +- // append new data +- memcpy(dest->data + pos, src, len); +- if ((pos + len) > dest->len) +- dest->len = pos + len; +- return 0; +-} +-EXPORT_SYMBOL_GPL(raw_data_update); +- +-int raw_data_append(struct hisysevent_raw_data *dest, u8 *src, unsigned int len) +-{ +- return raw_data_update(dest, src, len, dest->len); +-} +-EXPORT_SYMBOL_GPL(raw_data_append); +- +-struct hisysevent_raw_data* +-raw_data_create(void) +-{ +- struct hisysevent_raw_data *raw_data; +- +- raw_data = kzalloc(sizeof(*raw_data), GFP_KERNEL); +- if (!raw_data) +- return NULL; +- +- if (raw_data_init(raw_data) != 0) +- goto create_err; +- +- return raw_data; +- +-create_err: +- raw_data_destroy(raw_data); +- return NULL; +-} +-EXPORT_SYMBOL_GPL(raw_data_create); +- +-void raw_data_destroy(struct hisysevent_raw_data *raw_data) +-{ +- if (!raw_data) { +- pr_err("try to destroy an invalid raw data"); +- return; +- } +- +- if (raw_data->data) +- kfree(raw_data->data); +- +- kfree(raw_data); +-} +-EXPORT_SYMBOL_GPL(raw_data_destroy); +diff --git a/drivers/staging/hisysevent/hisysevent_raw_data.h b/drivers/staging/hisysevent/hisysevent_raw_data.h +deleted file mode 100644 +index 93a9b9706..000000000 +--- a/drivers/staging/hisysevent/hisysevent_raw_data.h ++++ /dev/null +@@ -1,33 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2023 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef HISYSEVENT_RAW_DATA_H +-#define HISYSEVENT_RAW_DATA_H +- +-#include +-#include +- +-struct hisysevent_raw_data { +- /* pointer to raw data */ +- u8 *data; +- +- /* length of data wrote */ +- int len; +- +- /* total allocated memory */ +- int capacity; +-}; +- +-struct hisysevent_raw_data * +-raw_data_create(void); +- +-int raw_data_append(struct hisysevent_raw_data *dest, u8 *src, unsigned int len); +- +-int raw_data_update(struct hisysevent_raw_data *dest, u8 *src, unsigned int len, +- unsigned int offset); +- +-void raw_data_destroy(struct hisysevent_raw_data *raw_data); +- +-#endif /* HISYSEVENT_RAW_DATA_H */ +diff --git a/drivers/staging/hisysevent/hisysevent_raw_data_encoder.c b/drivers/staging/hisysevent/hisysevent_raw_data_encoder.c +deleted file mode 100644 +index a206950aa..000000000 +--- a/drivers/staging/hisysevent/hisysevent_raw_data_encoder.c ++++ /dev/null +@@ -1,123 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2023 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#include "hisysevent_raw_data_encoder.h" +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#define TAG_BYTE_OFFSET 5 +-#define TAG_BYTE_BOUND (BIT(TAG_BYTE_OFFSET)) +-#define TAG_BYTE_MASK (TAG_BYTE_BOUND - 1) +- +-#define NON_TAG_BYTE_OFFSET 7 +-#define NON_TAG_BYTE_BOUND (BIT(NON_TAG_BYTE_OFFSET)) +-#define NON_TAG_BYTE_MASK (NON_TAG_BYTE_BOUND - 1) +- +-enum hisysevent_encode_type { +- // zigzag varint +- VARINT = 0, +- +- // length delimited +- LENGTH_DELIMITED = 1, +-}; +- +-#pragma pack(1) +- +-struct param_value_type { +- /* array flag */ +- u8 is_array: 1; +- +- /* type of parameter value */ +- u8 value_type: 4; +- +- /* byte count of parameter value */ +- u8 value_byte_cnt: 3; +-}; +- +-#pragma pack() +- +-static u8 encode_tag(u8 type) +-{ +- return type << (TAG_BYTE_OFFSET + 1); +-} +- +-static int unsigned_varint_code(struct hisysevent_raw_data *data, +- enum hisysevent_encode_type type, u64 val) +-{ +- u8 cpy_val; +- +- cpy_val = encode_tag((u8)type) | +- ((val < TAG_BYTE_BOUND) ? 0 : TAG_BYTE_BOUND) | +- (u8)(val & TAG_BYTE_MASK); +- if (raw_data_append(data, (u8 *)(&cpy_val), sizeof(u8)) != 0) +- return -EINVAL; +- +- val >>= TAG_BYTE_OFFSET; +- while (val > 0) { +- cpy_val = ((val < NON_TAG_BYTE_BOUND) ? 0 : NON_TAG_BYTE_BOUND) | +- (u8)(val & NON_TAG_BYTE_MASK); +- if (raw_data_append(data, (u8 *)(&cpy_val), sizeof(u8)) != 0) +- return -EINVAL; +- +- val >>= NON_TAG_BYTE_OFFSET; +- } +- return 0; +-} +- +-static int signed_varint_encode(struct hisysevent_raw_data *data, +- enum hisysevent_encode_type type, s64 val) +-{ +- u64 uval; +- +- uval = (val << 1) ^ (val >> ((sizeof(val) << 3) - 1)); // zigzag encode +- return unsigned_varint_code(data, type, uval); +-} +- +-int key_value_type_encode(struct hisysevent_raw_data *data, u8 is_array, u8 type, +- u8 count) +-{ +- struct param_value_type value_type; +- +- value_type.is_array = is_array; +- value_type.value_type = type; +- value_type.value_byte_cnt = count; +- +- if (raw_data_append(data, (u8 *)(&value_type), +- sizeof(struct param_value_type)) != 0) +- return -EINVAL; +- +- return 0; +-} +-EXPORT_SYMBOL_GPL(key_value_type_encode); +- +-int str_length_delimited_encode(struct hisysevent_raw_data *data, const char *str) +-{ +- u64 length; +- +- length = (u64)strlen(str); +- if (unsigned_varint_code(data, LENGTH_DELIMITED, length) != 0) +- return -EINVAL; +- +- if (raw_data_append(data, (u8 *)(str), length) != 0) +- return -EINVAL; +- +- return 0; +-} +-EXPORT_SYMBOL_GPL(str_length_delimited_encode); +- +-int int64_t_varint_encode(struct hisysevent_raw_data *raw_data, s64 val) +-{ +- return signed_varint_encode(raw_data, VARINT, val); +-} +-EXPORT_SYMBOL_GPL(int64_t_varint_encode); +diff --git a/drivers/staging/hisysevent/hisysevent_raw_data_encoder.h b/drivers/staging/hisysevent/hisysevent_raw_data_encoder.h +deleted file mode 100644 +index 75cb7c327..000000000 +--- a/drivers/staging/hisysevent/hisysevent_raw_data_encoder.h ++++ /dev/null +@@ -1,21 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2023 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef HISYSEVENT_RAW_DATA_ENCODER_H +-#define HISYSEVENT_RAW_DATA_ENCODER_H +- +-#include +-#include +- +-#include "hisysevent_raw_data.h" +- +-int key_value_type_encode(struct hisysevent_raw_data *data, u8 is_array, u8 type, +- u8 count); +- +-int str_length_delimited_encode(struct hisysevent_raw_data *data, const char *str); +- +-int int64_t_varint_encode(struct hisysevent_raw_data *data, s64 val); +- +-#endif /* HISYSEVENT_RAW_DATA_ENCODER_H */ +diff --git a/drivers/staging/hisysevent/hiview_hisysevent.c b/drivers/staging/hisysevent/hiview_hisysevent.c +deleted file mode 100644 +index af2236fa8..000000000 +--- a/drivers/staging/hisysevent/hiview_hisysevent.c ++++ /dev/null +@@ -1,145 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2022-2023 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#include +- +-#ifdef CONFIG_HISYSEVENT +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "hisysevent_builder.h" +-#include "hisysevent_raw_data.h" +- +-#define HISYSEVENT_WRITER_DEV "/dev/bbox" +- +-static int CHECK_CODE = 0x7BCDABCD; +- +-#define HISYSEVENT_INFO_BUF_LEN (2048 - 6) // 2KB - 6 (read_gap) +- +-int hievent_write_internal(const char *buffer, size_t buf_len); +- +-/* hisysevent struct */ +-struct hiview_hisysevent { +- /* hisysevent builder */ +- struct hisysevent_builder *builder; +-}; +- +-struct hiview_hisysevent * +-hisysevent_create(const char *domain, const char *name, enum hisysevent_type type) +-{ +- struct hiview_hisysevent *event; +- +- event = kzalloc(sizeof(*event), GFP_KERNEL); +- if (!event) +- return NULL; +- +- event->builder = hisysevent_builder_create(domain, name, type); +- if (!event->builder) +- goto create_err; +- return event; +- +-create_err: +- hisysevent_destroy(&event); +- return NULL; +-} +-EXPORT_SYMBOL_GPL(hisysevent_create); +- +-void hisysevent_destroy(struct hiview_hisysevent **event) +-{ +- if (!event || !*event) { +- pr_err("invalid event"); +- return; +- } +- +- hisysevent_builder_destroy((*event)->builder); +- +- kfree(*event); +- *event = NULL; +-} +-EXPORT_SYMBOL_GPL(hisysevent_destroy); +- +-int hisysevent_put_integer(struct hiview_hisysevent *event, const char *key, long long value) +-{ +- if (!event) { +- pr_err("invalid event"); +- return -EINVAL; +- } +- return hisysevent_builder_put_integer(event->builder, key, value); +-} +-EXPORT_SYMBOL_GPL(hisysevent_put_integer); +- +-int hisysevent_put_string(struct hiview_hisysevent *event, const char *key, const char *value) +-{ +- if (!event) { +- pr_err("invalid event"); +- return -EINVAL; +- } +- return hisysevent_builder_put_string(event->builder, key, value); +-} +-EXPORT_SYMBOL_GPL(hisysevent_put_string); +- +-int hisysevent_write(struct hiview_hisysevent *event) +-{ +- struct hisysevent_raw_data *raw_data; +- int ret; +- int retval; +- struct file *filp; +- unsigned long vcount; +- struct iovec vec[3]; +- struct iov_iter iter; +- +- if (!event) { +- pr_err("invalid event"); +- return -EINVAL; +- } +- +- raw_data = raw_data_create(); +- if (!raw_data) { +- pr_err("failed to create a new raw data"); +- return -EINVAL; +- } +- +- ret = hisysevent_builder_build(event->builder, raw_data); +- if (ret != 0) { +- pr_err("hisysevent builder build failed"); +- goto event_wrote_err; +- } +- pr_info("total block size of hisysevent data is %d", raw_data->len); +- +- if (raw_data->len > HISYSEVENT_INFO_BUF_LEN) { +- pr_err("content of sysevent exceeds limit"); +- goto event_wrote_err; +- } +- +- if (!current->fs) { +- pr_err("file system is null"); +- goto event_wrote_err; +- } +- +- retval = hievent_write_internal(raw_data->data, raw_data->len + 1); +- if (retval < 0) { +- retval = -EIO; +- goto event_wrote_err; +- } +- +-event_wrote_err: +- raw_data_destroy(raw_data); +- return ret; +-} +-EXPORT_SYMBOL_GPL(hisysevent_write); +- +-#endif /* CONFIG_HISYSEVENT */ +diff --git a/drivers/staging/hungtask/Kconfig b/drivers/staging/hungtask/Kconfig +deleted file mode 100644 +index 4e80dc9fc..000000000 +--- a/drivers/staging/hungtask/Kconfig ++++ /dev/null +@@ -1,14 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-config DFX_HUNGTASK +- bool "DFX hungtask" +- depends on DETECT_HUNG_TASK +- default n +- help +- Base DFX hungtask module +- +-config DFX_HUNGTASK_USER +- bool "DFX hungtask user watchdog module" +- depends on DFX_HUNGTASK +- default n +- help +- DFX hungtask user watchdog module +\ No newline at end of file +diff --git a/drivers/staging/hungtask/Makefile b/drivers/staging/hungtask/Makefile +deleted file mode 100644 +index 12def220e..000000000 +--- a/drivers/staging/hungtask/Makefile ++++ /dev/null +@@ -1,3 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-obj-$(CONFIG_DFX_HUNGTASK) += hungtask_base.o +-obj-$(CONFIG_DFX_HUNGTASK_USER) += hungtask_user.o +diff --git a/drivers/staging/hungtask/hungtask_base.c b/drivers/staging/hungtask/hungtask_base.c +deleted file mode 100644 +index 30408c0ba..000000000 +--- a/drivers/staging/hungtask/hungtask_base.c ++++ /dev/null +@@ -1,1031 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2022 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#define pr_fmt(fmt) "hungtask_base " fmt +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#ifdef CONFIG_DFX_ZEROHUNG +-#include +-#endif +-#include +-#include "hungtask_user.h" +- +-static struct rb_root list_tasks = RB_ROOT; +-static DEFINE_SPINLOCK(list_tasks_lock); +-static struct hlist_head whitelist[WHITELIST_LEN]; +-static struct whitelist_item whitetmplist[WHITELIST_LEN]; +-static bool whitelist_empty = true; +-static int remove_cnt; +-static struct task_item *remove_list[MAX_REMOVE_LIST_NUM + 1]; +-static unsigned long __read_mostly hungtask_timeout_secs = +- CONFIG_DEFAULT_HUNG_TASK_TIMEOUT; +-static int did_panic; +-static unsigned int hungtask_enable = HT_DISABLE; +-static unsigned int whitelist_type = WHITE_LIST; +-static int whitelist_dump_cnt = DEFAULT_WHITE_DUMP_CNT; +-static int whitelist_panic_cnt = DEFAULT_WHITE_PANIC_CNT; +-static int appspawn_pid; +-static int dump_and_upload; +-static int time_since_upload; +-static int hung_task_must_panic; +-static int report_zrhung_id; +-static struct task_hung_upload upload; +-static int do_refresh; +-static char frozen_buf[FROZEN_BUF_LEN]; +-static int frozen_used; +-static bool frozed_head; +-static unsigned long cur_heartbeat; +-static struct work_struct send_work; +-static char report_buf_text[REPORT_MSGLENGTH]; +- +-bool hashlist_find(struct hlist_head *head, int count, pid_t tgid) +-{ +- struct hashlist_node *hnode = NULL; +- +- if (count <= 0) +- return false; +- if (hlist_empty(&head[tgid % count])) +- return false; +- hlist_for_each_entry(hnode, &head[tgid % count], list) { +- if (hnode->pid == tgid) +- return true; +- } +- return false; +-} +- +-void hashlist_clear(struct hlist_head *head, int count) +-{ +- int i = 0; +- struct hlist_node *n = NULL; +- struct hashlist_node *hnode = NULL; +- +- for (i = 0; i < count; i++) { +- hlist_for_each_entry_safe(hnode, n, &head[i], list) { +- hlist_del(&hnode->list); +- kfree(hnode); +- hnode = NULL; +- } +- } +- for (i = 0; i < count; i++) +- INIT_HLIST_HEAD(&head[i]); +-} +- +-bool hashlist_insert(struct hlist_head *head, int count, pid_t tgid) +-{ +- struct hashlist_node *hnode = NULL; +- +- if (hashlist_find(head, count, tgid)) +- return false; +- hnode = kmalloc(sizeof(struct hashlist_node), GFP_ATOMIC); +- if (!hnode) +- return false; +- INIT_HLIST_NODE(&hnode->list); +- hnode->pid = tgid; +- hlist_add_head(&hnode->list, &head[tgid % count]); +- return true; +-} +- +-static bool rcu_lock_break(struct task_struct *g, struct task_struct *t) +-{ +- bool can_cont = false; +- +- get_task_struct(g); +- get_task_struct(t); +- rcu_read_unlock(); +- cond_resched(); +- rcu_read_lock(); +- can_cont = pid_alive(g) && pid_alive(t); +- put_task_struct(t); +- put_task_struct(g); +- return can_cont; +-} +- +-static bool rcu_break(int *max_count, int *batch_count, +- struct task_struct *g, +- struct task_struct *t) +-{ +- if (!(*max_count)--) +- return true; +- if (!--(*batch_count)) { +- *batch_count = HUNG_TASK_BATCHING; +- if (!rcu_lock_break(g, t)) +- return true; +- } +- return false; +-} +- +-static pid_t get_pid_by_name(const char *name) +-{ +- int max_count = PID_MAX_LIMIT; +- int batch_count = HUNG_TASK_BATCHING; +- struct task_struct *g = NULL; +- struct task_struct *t = NULL; +- int pid = 0; +- +- rcu_read_lock(); +- do_each_thread(g, t) { +- if (rcu_break(&max_count, &batch_count, g, t)) +- goto unlock; +- if (!strncmp(t->comm, name, TASK_COMM_LEN)) { +- pid = t->tgid; +- goto unlock; +- } +- } while_each_thread(g, t); +- +-unlock: +- rcu_read_unlock(); +- return pid; +-} +- +-static unsigned int get_task_type(pid_t pid, pid_t tgid, struct task_struct *parent) +-{ +- unsigned int flag = TASK_TYPE_IGNORE; +- /* check tgid of it's parent as PPID */ +- if (parent) { +- pid_t ppid = parent->tgid; +- +- if (ppid == PID_KTHREAD) +- flag |= TASK_TYPE_KERNEL; +- else if (ppid == appspawn_pid) +- flag |= TASK_TYPE_APP; +- else if (ppid == PID_INIT) +- flag |= TASK_TYPE_NATIVE; +- } +- if (!whitelist_empty && hashlist_find(whitelist, WHITELIST_LEN, tgid)) +- flag |= TASK_TYPE_WHITE | TASK_TYPE_JANK; +- +- return flag; +-} +- +-static void refresh_appspawn_pids(void) +-{ +- int max_count = PID_MAX_LIMIT; +- int batch_count = HUNG_TASK_BATCHING; +- struct task_struct *g = NULL; +- struct task_struct *t = NULL; +- +- rcu_read_lock(); +- do_each_thread(g, t) { +- if (rcu_break(&max_count, &batch_count, g, t)) +- goto unlock; +- if (!strncmp(t->comm, "appspawn", TASK_COMM_LEN)) +- appspawn_pid = t->tgid; +- } while_each_thread(g, t); +-unlock: +- rcu_read_unlock(); +-} +- +-static void refresh_task_type(pid_t pid, int task_type) +-{ +- struct task_item *item = NULL; +- struct rb_node *p = NULL; +- +- spin_lock(&list_tasks_lock); +- for (p = rb_first(&list_tasks); p; p = rb_next(p)) { +- item = rb_entry(p, struct task_item, node); +- if (item->tgid == pid) +- item->task_type = task_type; +- } +- spin_unlock(&list_tasks_lock); +-} +- +-static void refresh_whitelist_pids(void) +-{ +- int i; +- +- hashlist_clear(whitelist, WHITELIST_LEN); +- for (i = 0; i < WHITELIST_LEN; i++) { +- if (!strlen(whitetmplist[i].name)) +- continue; +- whitetmplist[i].pid = +- get_pid_by_name(whitetmplist[i].name); +- if (!whitetmplist[i].pid) +- continue; +- refresh_task_type(whitetmplist[i].pid, +- TASK_TYPE_WHITE | TASK_TYPE_JANK); +- if (hashlist_insert(whitelist, WHITELIST_LEN, +- whitetmplist[i].pid)) +- pr_info("whitelist[%d]-%s-%d\n", i, +- whitetmplist[i].name, whitetmplist[i].pid); +- else +- pr_info("can't find %s\n", whitetmplist[i].name); +- } +- refresh_appspawn_pids(); +-} +- +-static struct task_item *find_task(pid_t pid, struct rb_root *root) +-{ +- struct rb_node **p = &root->rb_node; +- struct task_item *cur = NULL; +- struct rb_node *parent = NULL; +- +- while (*p) { +- parent = *p; +- cur = rb_entry(parent, struct task_item, node); +- if (!cur) +- return NULL; +- if (pid < cur->pid) +- p = &(*p)->rb_left; +- else if (pid > cur->pid) +- p = &(*p)->rb_right; +- else +- return cur; +- } +- return NULL; +-} +- +-static bool insert_task(struct task_item *item, struct rb_root *root) +-{ +- struct rb_node **p = &root->rb_node; +- struct rb_node *parent = NULL; +- struct task_item *cur = NULL; +- +- while (*p) { +- parent = *p; +- +- cur = rb_entry(parent, struct task_item, node); +- if (!cur) +- return false; +- if (item->pid < cur->pid) { +- p = &(*p)->rb_left; +- } else if (item->pid > cur->pid) { +- p = &(*p)->rb_right; +- } else { +- pr_info("insert pid=%d,tgid=%d,name=%s,type=%d fail\n", +- item->pid, item->tgid, +- item->name, item->task_type); +- return false; +- } +- } +- rb_link_node(&item->node, parent, p); +- rb_insert_color(&item->node, root); +- return true; +-} +- +-void show_block_task(struct task_item *taskitem, struct task_struct *p) +-{ +- unsigned long last_arrival; +- unsigned long last_queued; +- +-#ifdef CONFIG_SCHED_INFO +- last_arrival = p->sched_info.last_arrival; +- last_queued = p->sched_info.last_queued; +-#else +- last_arrival = 0; +- last_queued = 0; +-#endif /* CONFIG_SCHED_INFO */ +- if (unlikely(p->flags & PF_FROZEN)) { +- if (taskitem) +- pr_err("name=%s,PID=%d,tgid=%d,tgname=%s," +- "FROZEN for %ds,type=%d,la%lu/lq%lu\n", +- p->comm, p->pid, p->tgid, +- p->group_leader->comm, +- taskitem->d_state_time * HEARTBEAT_TIME, +- taskitem->task_type, +- last_arrival, last_queued); +- else +- pr_err("name=%s,PID=%d,tgid=%d,tgname=%s," +- "just FROZE,la%lu/lq%lu\n", +- p->comm, p->pid, p->tgid, +- p->group_leader->comm, +- last_arrival, last_queued); +- } else { +- if (taskitem) +- pr_err("name=%s,PID=%d,tgid=%d,prio=%d,cpu=%d,tgname=%s," +- "type=%d,blocked for %ds,la%lu/lq%lu\n", +- taskitem->name, taskitem->pid, p->tgid, p->prio, +- task_cpu(p), p->group_leader->comm, taskitem->task_type, +- taskitem->d_state_time * HEARTBEAT_TIME, +- last_arrival, last_queued); +- else +- pr_err("name=%s,PID=%d,tgid=%d,prio=%d,cpu=%d," +- "tgname=%s,la%lu/lq%lu\n", +- p->comm, p->pid, p->tgid, p->prio, task_cpu(p), +- p->group_leader->comm, +- last_arrival, last_queued); +- +- sched_show_task(p); +- } +-} +- +-void htbase_show_state_filter(unsigned long state_filter) +-{ +- struct task_struct *g = NULL; +- struct task_struct *p = NULL; +- struct task_item *taskitem = NULL; +- +-#if BITS_PER_LONG == 32 +- pr_info(" task PC stack pid father\n"); +-#else +- pr_info(" task PC stack pid father\n"); +-#endif +- rcu_read_lock(); +- for_each_process_thread(g, p) { +- /* +- * reset the NMI-timeout, listing all files on a slow +- * console might take a lot of time: +- */ +- touch_nmi_watchdog(); +- if ((p->__state == TASK_RUNNING) || (p->__state & state_filter)) { +- spin_lock(&list_tasks_lock); +- taskitem = find_task(p->pid, &list_tasks); +- spin_unlock(&list_tasks_lock); +- show_block_task(taskitem, p); +- } +- } +- touch_all_softlockup_watchdogs(); +- rcu_read_unlock(); +- /* Show locks if hungtask happen */ +- if ((state_filter == TASK_UNINTERRUPTIBLE) || !state_filter) +- debug_show_all_locks(); +-} +- +-void hungtask_show_state_filter(unsigned long state_filter) +-{ +- pr_err("BinderChain_SysRq start\n"); +- htbase_show_state_filter(state_filter); +- pr_err("BinderChain_SysRq end\n"); +-} +- +-void do_dump_task(struct task_struct *task) +-{ +- sched_show_task(task); +- debug_show_held_locks(task); +-} +- +-void do_show_task(struct task_struct *task, unsigned int flag, int d_state_time) +-{ +- pr_err("%s, flag=%d\n", __func__, flag); +- rcu_read_lock(); +- if (!pid_alive(task)) { +- rcu_read_unlock(); +- return; +- } +- if (flag & (FLAG_DUMP_WHITE | FLAG_DUMP_APP)) { +- int cnt = 0; +- +- trace_sched_process_hang(task); +- cnt = d_state_time; +- pr_err("INFO: task %s:%d tgid:%d blocked for %ds in %s\n", +- task->comm, task->pid, task->tgid, +- (HEARTBEAT_TIME * cnt), +- (flag & FLAG_DUMP_WHITE) ? "whitelist" : "applist"); +- pr_err(" %s %s %.*s\n", +- print_tainted(), init_utsname()->release, +- (int)strcspn(init_utsname()->version, " "), +- init_utsname()->version); +- do_dump_task(task); +- touch_nmi_watchdog(); +- if (flag & FLAG_DUMP_WHITE && (!dump_and_upload)) { +- dump_and_upload++; +- upload.pid = task->pid; +- upload.tgid = task->tgid; +- upload.duration = d_state_time; +- memset(upload.name, 0, sizeof(upload.name)); +- strncpy(upload.name, task->comm, sizeof(upload.name)); +- upload.flag = flag; +- if (task->flags & PF_FROZEN) +- upload.flag = (upload.flag | FLAG_PF_FROZEN); +- } +- } +- rcu_read_unlock(); +-} +- +-static void do_panic(void) +-{ +- if (sysctl_hung_task_panic) { +- trigger_all_cpu_backtrace(); +- panic("hungtask: blocked tasks"); +- } +-} +- +-static void create_taskitem(struct task_item *taskitem, +- struct task_struct *task) +-{ +- taskitem->pid = task->pid; +- taskitem->tgid = task->tgid; +- memset(taskitem->name, 0, sizeof(taskitem->name)); +- strncpy(taskitem->name, task->comm, sizeof(taskitem->name)); +- taskitem->switch_count = task->nvcsw + task->nivcsw; +- taskitem->dump_wa = 0; /* whitelist or applist task dump times */ +- taskitem->panic_wa = 0; /* whitelist or applist task panic times */ +- taskitem->d_state_time = -1; +- taskitem->isdone_wa = true; /* if task in white or app dealed */ +-} +- +-static bool refresh_task(struct task_item *taskitem, struct task_struct *task) +-{ +- bool is_called = false; +- +- if (taskitem->switch_count != (task->nvcsw + task->nivcsw)) { +- taskitem->switch_count = task->nvcsw + task->nivcsw; +- is_called = true; +- return is_called; +- } +- if (taskitem->task_type & TASK_TYPE_WHITE) { +- taskitem->isdone_wa = false; +- taskitem->dump_wa++; +- taskitem->panic_wa++; +- } +- taskitem->d_state_time++; +- if (task->flags & PF_FROZEN) +- taskitem->task_type |= TASK_TYPE_FROZEN; +- return is_called; +-} +- +-static void remove_list_tasks(struct task_item *item) +-{ +- rb_erase(&item->node, &list_tasks); +- kfree(item); +-} +- +-static void shrink_process_item(struct task_item *item, bool *is_finish) +-{ +- if (remove_cnt >= MAX_REMOVE_LIST_NUM) { +- int i; +- +- remove_list[remove_cnt++] = item; +- for (i = 0; i < remove_cnt; i++) +- remove_list_tasks(remove_list[i]); +- remove_cnt = 0; +- *is_finish = false; +- } else { +- remove_list[remove_cnt++] = item; +- } +-} +- +-static void shrink_list_tasks(void) +-{ +- int i; +- bool is_finish = false; +- struct rb_node *n = NULL; +- struct task_item *item = NULL; +- +- spin_lock(&list_tasks_lock); +- while (!is_finish) { +- is_finish = true; +- for (n = rb_first(&list_tasks); n != NULL; n = rb_next(n)) { +- item = rb_entry(n, struct task_item, node); +- if (!item) +- continue; +- if (item->isdone_wa) { +- shrink_process_item(item, &is_finish); +- if (!is_finish) +- break; +- } +- } +- } +- for (i = 0; i < remove_cnt; i++) +- remove_list_tasks(remove_list[i]); +- remove_cnt = 0; +- spin_unlock(&list_tasks_lock); +-} +- +-static void check_parameters(void) +-{ +- if ((whitelist_dump_cnt < 0) || +- (whitelist_dump_cnt > DEFAULT_WHITE_DUMP_CNT)) +- whitelist_dump_cnt = DEFAULT_WHITE_DUMP_CNT; +- if ((whitelist_panic_cnt <= 0) || +- (whitelist_panic_cnt > DEFAULT_WHITE_PANIC_CNT)) +- whitelist_panic_cnt = DEFAULT_WHITE_PANIC_CNT; +-} +- +-static void send_work_handler(struct work_struct *data) +-{ +-#ifdef CONFIG_DFX_ZEROHUNG +- zrhung_send_event(HUNGTASK_DOMAIN, HUNGTASK_NAME, +- report_buf_text); +-#endif +-} +- +-static void htbase_report_zrhung_event(const char *report_buf_tag) +-{ +- htbase_show_state_filter(TASK_UNINTERRUPTIBLE); +- pr_err("%s end\n", report_buf_tag); +- schedule_work(&send_work); +- report_zrhung_id++; +-} +- +-static void htbase_report_zrhung(unsigned int event) +-{ +- bool report_load = false; +- char report_buf_tag[REPORT_MSGLENGTH] = {0}; +- char report_name[TASK_COMM_LEN + 1] = {0}; +- int report_pid = 0; +- int report_hungtime = 0; +- int report_tasktype = 0; +- +- if (!event) +- return; +- if (event & HUNGTASK_EVENT_WHITELIST) { +- snprintf(report_buf_tag, sizeof(report_buf_tag), +- "hungtask_whitelist_%d", report_zrhung_id); +- strncpy(report_name, upload.name, TASK_COMM_LEN); +- report_pid = upload.pid; +- report_tasktype = TASK_TYPE_WHITE; +- report_hungtime = whitelist_dump_cnt * HEARTBEAT_TIME; +- report_load = true; +- } else { +- pr_err("No such event report to zerohung!"); +- } +- pr_err("%s start\n", report_buf_tag); +- if (event & HUNGTASK_EVENT_WHITELIST) +- pr_err("report HUNGTASK_EVENT_WHITELIST to zrhung\n"); +- if (upload.flag & FLAG_PF_FROZEN) +- snprintf(report_buf_text, sizeof(report_buf_text), +- "Task %s(%s) pid %d type %d blocked %ds.", +- report_name, "FROZEN", report_pid, report_tasktype, report_hungtime); +- else +- snprintf(report_buf_text, sizeof(report_buf_text), +- "Task %s pid %d type %d blocked %ds.", +- report_name, report_pid, report_tasktype, report_hungtime); +- if (report_load) +- htbase_report_zrhung_event(report_buf_tag); +-} +- +-static int print_frozen_list_item(int pid) +-{ +- int tmp; +- +- if (!frozed_head) { +- tmp = snprintf(frozen_buf, FROZEN_BUF_LEN, "%s", "FROZEN Pid:"); +- if (tmp < 0) +- return -1; +- frozen_used += min(tmp, FROZEN_BUF_LEN - 1); +- frozed_head = true; +- } +- tmp = snprintf(frozen_buf + frozen_used, FROZEN_BUF_LEN - frozen_used, "%d,", +- pid); +- if (tmp < 0) +- return -1; +- frozen_used += min(tmp, FROZEN_BUF_LEN - frozen_used - 1); +- return frozen_used; +-} +- +-int dump_task_wa(struct task_item *item, int dump_cnt, +- struct task_struct *task, unsigned int flag) +-{ +- int ret = 0; +- +- if ((item->d_state_time > TWO_MINUTES) && +- (item->d_state_time % TWO_MINUTES != 0)) +- return ret; +- if ((item->d_state_time > HUNG_TEN_MINUTES) && +- (item->d_state_time % HUNG_TEN_MINUTES != 0)) +- return ret; +- if ((item->d_state_time > HUNG_ONE_HOUR) && +- (item->d_state_time % HUNG_ONE_HOUR != 0)) +- return ret; +- if (dump_cnt && (item->dump_wa > dump_cnt)) { +- item->dump_wa = 1; +- if (!dump_and_upload && task->flags & PF_FROZEN) { +- int tmp = print_frozen_list_item(item->pid); +- if (tmp < 0) +- return ret; +- if (tmp >= FROZEN_BUF_LEN - 1) { +- pr_err("%s", frozen_buf); +- memset(frozen_buf, 0, sizeof(frozen_buf)); +- frozen_used = 0; +- frozed_head = false; +- print_frozen_list_item(item->pid); +- } +- } else if (!dump_and_upload) { +- pr_err("Ready to dump a task %s\n", item->name); +- do_show_task(task, flag, item->d_state_time); +- ret++; +- } +- } +- return ret; +-} +- +-static void update_panic_task(struct task_item *item) +-{ +- if (upload.pid != 0) +- return; +- +- upload.pid = item->pid; +- upload.tgid = item->tgid; +- memset(upload.name, 0, sizeof(upload.name)); +- strncpy(upload.name, item->name, sizeof(upload.name)); +-} +- +-static void deal_task(struct task_item *item, struct task_struct *task, bool is_called) +-{ +- int any_dumped_num = 0; +- +- if (is_called) { +- item->dump_wa = 1; +- item->panic_wa = 1; +- item->d_state_time = 0; +- return; +- } +- if (item->task_type & TASK_TYPE_WHITE) +- any_dumped_num = dump_task_wa(item, whitelist_dump_cnt, task, +- FLAG_DUMP_WHITE); +- if (!is_called && (item->task_type & TASK_TYPE_WHITE)) { +- if (whitelist_panic_cnt && item->panic_wa > whitelist_panic_cnt) { +- pr_err("Task %s is causing panic\n", item->name); +- update_panic_task(item); +- item->panic_wa = 0; +- hung_task_must_panic++; +- } else { +- item->isdone_wa = false; +- } +- } +- if (item->isdone_wa) +- remove_list_tasks(item); +-} +- +-static bool check_conditions(struct task_struct *task, unsigned int task_type) +-{ +- bool no_check = true; +- +- if (task->flags & PF_FROZEN) +- return no_check; +- if (task_type & TASK_TYPE_WHITE && +- (whitelist_dump_cnt || whitelist_panic_cnt)) +- no_check = false; +- return no_check; +-} +- +-static void htbase_check_one_task(struct task_struct *t) +-{ +- unsigned int task_type = TASK_TYPE_IGNORE; +- unsigned long switch_count = t->nvcsw + t->nivcsw; +- struct task_item *taskitem = NULL; +- bool is_called = false; +- +- if (unlikely(!switch_count)) { +- pr_info("skip one's switch_count is zero\n"); +- return; +- } +- +- taskitem = find_task(t->pid, &list_tasks); +- if (taskitem) { +- if (check_conditions(t, taskitem->task_type)) +- return; +- is_called = refresh_task(taskitem, t); +- } else { +- task_type = get_task_type(t->pid, t->tgid, t->real_parent); +- if (check_conditions(t, task_type)) +- return; +- taskitem = kmalloc(sizeof(*taskitem), GFP_ATOMIC); +- if (!taskitem) { +- pr_err("kmalloc failed"); +- return; +- } +- memset(taskitem, 0, sizeof(*taskitem)); +- taskitem->task_type = task_type; +- create_taskitem(taskitem, t); +- is_called = refresh_task(taskitem, t); +- insert_task(taskitem, &list_tasks); +- } +- deal_task(taskitem, t, is_called); +-} +- +-static void htbase_pre_process(void) +-{ +- htbase_set_timeout_secs(sysctl_hung_task_timeout_secs); +- cur_heartbeat++; +- if ((cur_heartbeat % REFRESH_INTERVAL) == 0) +- do_refresh = 1; +- else +- do_refresh = 0; +- if (do_refresh || (cur_heartbeat < TIME_REFRESH_PIDS)) { +- refresh_whitelist_pids(); +- check_parameters(); +- } +-} +- +-static void htbase_post_process(void) +-{ +- struct rb_node *n = NULL; +- unsigned int hungevent = 0; +- +- if (frozen_used) { +- pr_err("%s", frozen_buf); +- memset(frozen_buf, 0, sizeof(frozen_buf)); +- frozen_used = 0; +- frozed_head = false; +- } +- if (dump_and_upload == HUNG_TASK_UPLOAD_ONCE) { +- hungevent |= HUNGTASK_EVENT_WHITELIST; +- dump_and_upload++; +- } +- if (dump_and_upload > 0) { +- time_since_upload++; +- if (time_since_upload > (whitelist_panic_cnt - whitelist_dump_cnt)) { +- dump_and_upload = 0; +- time_since_upload = 0; +- } +- } +- if (hung_task_must_panic) { +- htbase_show_state_filter(TASK_UNINTERRUPTIBLE); +- hung_task_must_panic = 0; +- pr_err("Task %s:%d blocked for %ds is causing panic\n", +- upload.name, upload.pid, +- whitelist_panic_cnt * HEARTBEAT_TIME); +- do_panic(); +- } +- htuser_post_process_userlist(); +- shrink_list_tasks(); +- for (n = rb_first(&list_tasks); n != NULL; n = rb_next(n)) { +- struct task_item *item = rb_entry(n, struct task_item, node); +- item->isdone_wa = true; +- } +- +- if (hungevent) +- htbase_report_zrhung(hungevent); +-} +- +-void htbase_check_tasks(unsigned long timeout) +-{ +- int max_count = PID_MAX_LIMIT; +- int batch_count = HUNG_TASK_BATCHING; +- struct task_struct *g = NULL; +- struct task_struct *t = NULL; +- +- if (!hungtask_enable) +- return; +- if (test_taint(TAINT_DIE) || did_panic) { +- pr_err("already in doing panic\n"); +- return; +- } +- +- htbase_pre_process(); +- rcu_read_lock(); +- for_each_process_thread(g, t) { +- if (!max_count--) +- goto unlock; +- if (!--batch_count) { +- batch_count = HUNG_TASK_BATCHING; +- if (!rcu_lock_break(g, t)) +- goto unlock; +- } +- if ((t->__state == TASK_UNINTERRUPTIBLE) || +- (t->__state == TASK_KILLABLE)) +- htbase_check_one_task(t); +- } +-unlock: +- rcu_read_unlock(); +- htbase_post_process(); +-} +- +-static ssize_t htbase_enable_show(struct kobject *kobj, +- struct kobj_attribute *attr, +- char *buf) +-{ +- if (hungtask_enable) +- return snprintf(buf, ENABLE_SHOW_LEN, "on\n"); +- else +- return snprintf(buf, ENABLE_SHOW_LEN, "off\n"); +-} +- +-static ssize_t htbase_enable_store(struct kobject *kobj, +- struct kobj_attribute *attr, +- const char *buf, size_t count) +-{ +- char tmp[6]; /* only storage "on" "off" "kick" and enter */ +- size_t len; +- char *p = NULL; +- +- if (!buf) +- return -EINVAL; +- if ((count < 2) || (count > (sizeof(tmp) - 1))) { +- pr_err("string too long or too short\n"); +- return -EINVAL; +- } +- +- p = memchr(buf, '\n', count); +- len = p ? (size_t)(p - buf) : count; +- memset(tmp, 0, sizeof(tmp)); +- strncpy(tmp, buf, len); +- if (!strncmp(tmp, "on", strlen(tmp))) { +- hungtask_enable = HT_ENABLE; +- pr_info("set hungtask_enable to enable\n"); +- } else if (!strncmp(tmp, "off", strlen(tmp))) { +- hungtask_enable = HT_DISABLE; +- pr_info("set hungtask_enable to disable\n"); +- } else { +- pr_err("only accept on or off\n"); +- } +- return (ssize_t) count; +-} +- +-static ssize_t htbase_monitorlist_show(struct kobject *kobj, +- struct kobj_attribute *attr, +- char *buf) +-{ +- int i; +- char *start = buf; +- char all_buf[WHITELIST_STORE_LEN - 20]; /* exclude extra header len 20*/ +- unsigned long len = 0; +- +- memset(all_buf, 0, sizeof(all_buf)); +- for (i = 0; i < WHITELIST_LEN; i++) { +- if (whitetmplist[i].pid > 0) { +- len += snprintf(all_buf + len, sizeof(all_buf) - len, +- "%s-%d,", whitetmplist[i].name, whitetmplist[i].pid); +- if (!(len < sizeof(all_buf))) { +- len = sizeof(all_buf) - 1; +- break; +- } +- } +- } +- if (len > 0) +- all_buf[len] = 0; +- if (whitelist_type == WHITE_LIST) +- buf += snprintf(buf, WHITELIST_STORE_LEN, "whitelist:[%s]\n", all_buf); +- else if (whitelist_type == BLACK_LIST) +- buf += snprintf(buf, WHITELIST_STORE_LEN, "blacklist:[%s]\n", all_buf); +- else +- buf += snprintf(buf, WHITELIST_STORE_LEN, "\n"); +- return buf - start; +-} +- +-static void htbase_monitorlist_update(char **cur) +-{ +- int index = 0; +- char *token = NULL; +- +- hashlist_clear(whitelist, WHITELIST_LEN); +- memset(whitetmplist, 0, sizeof(whitetmplist)); +- /* generate the new whitelist */ +- for (; ; ) { +- token = strsep(cur, ","); +- if (token && strlen(token)) { +- strncpy(whitetmplist[index].name, token, TASK_COMM_LEN); +- if (strlen(whitetmplist[index].name) > 0) +- whitelist_empty = false; +- index++; +- if (index >= WHITELIST_LEN) +- break; +- } +- if (!(*cur)) +- break; +- } +-} +- +-/* +- * monitorlist_store - Called when 'write/echo' method is +- * used on entry '/sys/kernel/hungtask/monitorlist'. +- */ +-static ssize_t htbase_monitorlist_store(struct kobject *kobj, +- struct kobj_attribute *attr, +- const char *buf, size_t n) +-{ +- size_t len; +- char *p = NULL; +- char all_buf[WHITELIST_STORE_LEN]; +- char *cur = all_buf; +- +- +- if ((n < 2) || (n > (sizeof(all_buf) - 1))) { +- pr_err("whitelist input string illegal\n"); +- return -EINVAL; +- } +- if (!buf) +- return -EINVAL; +- /* +- * input format: +- * write /sys/kernel/hungtask/monitorlist "whitelist, +- * system_server,surfaceflinger" +- */ +- p = memchr(buf, '\n', n); +- len = p ? (size_t)(p - buf) : n; /* exclude the '\n' */ +- +- memset(all_buf, 0, sizeof(all_buf)); +- len = len > WHITELIST_STORE_LEN ? WHITELIST_STORE_LEN : len; +- strncpy(all_buf, buf, len); +- p = strsep(&cur, ","); +- if (!cur) { +- pr_err("string is not correct\n"); +- return -EINVAL; +- } +- if (!strncmp(p, "whitelist", n)) { +- whitelist_type = WHITE_LIST; +- } else { +- if (!strncmp(p, "blacklist", n)) +- pr_err("blacklist is not support\n"); +- else +- pr_err("wrong list type is set\n"); +- return -EINVAL; +- } +- if (!strlen(cur)) { +- pr_err("at least one process need to be set\n"); +- return -EINVAL; +- } +- pr_err("whitelist is %s\n", cur); +- +- htbase_monitorlist_update(&cur); +- /* check again in case user input "whitelist,,,,,," */ +- if (whitelist_empty) { +- pr_err("at least one process need to be set\n"); +- return -EINVAL; +- } +- return (ssize_t) n; +-} +- +-/* used for sysctl at "/proc/sys/kernel/hung_task_timeout_secs" */ +-void htbase_set_timeout_secs(unsigned long new_hungtask_timeout_secs) +-{ +- if ((new_hungtask_timeout_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) || +- (new_hungtask_timeout_secs % HEARTBEAT_TIME)) +- return; +- hungtask_timeout_secs = new_hungtask_timeout_secs; +- /* +- * if user change panic timeout value, we sync it to dump value +- * defaultly, user can set it diffrently +- */ +- whitelist_panic_cnt = (int)(hungtask_timeout_secs / HEARTBEAT_TIME); +- if (whitelist_panic_cnt > THIRTY_SECONDS) +- whitelist_dump_cnt = whitelist_panic_cnt / HT_DUMP_IN_PANIC_LOOSE; +- else +- whitelist_dump_cnt = whitelist_panic_cnt / HT_DUMP_IN_PANIC_STRICT; +-} +- +-void htbase_set_panic(int new_did_panic) +-{ +- did_panic = new_did_panic; +-} +- +-static struct kobj_attribute timeout_attribute = { +- .attr = { +- .name = "enable", +- .mode = 0640, +- }, +- .show = htbase_enable_show, +- .store = htbase_enable_store, +-}; +- +-static struct kobj_attribute monitorlist_attr = { +- .attr = { +- .name = "monitorlist", +- .mode = 0640, +- }, +- .show = htbase_monitorlist_show, +- .store = htbase_monitorlist_store, +-}; +- +-#ifdef CONFIG_DFX_HUNGTASK_USER +-static struct kobj_attribute userlist_attr = { +- .attr = { +- .name = "userlist", +- .mode = 0640, +- }, +- .show = htuser_list_show, +- .store = htuser_list_store, +-}; +-#endif +- +-static struct attribute *attrs[] = { +- &timeout_attribute.attr, +- &monitorlist_attr.attr, +-#ifdef CONFIG_DFX_HUNGTASK_USER +- &userlist_attr.attr, +-#endif +- NULL +-}; +- +-static struct attribute_group hungtask_attr_group = { +- .attrs = attrs, +-}; +- +-static struct kobject *hungtask_kobj; +-int htbase_create_sysfs(void) +-{ +- int i; +- int ret; +- +- /* sleep 1000ms and wait /sys/kernel ready */ +- while (!kernel_kobj) +- msleep(1000); +- +- /* Create kobject named "hungtask" located at /sys/kernel/huangtask */ +- hungtask_kobj = kobject_create_and_add("hungtask", kernel_kobj); +- if (!hungtask_kobj) +- return -ENOMEM; +- ret = sysfs_create_group(hungtask_kobj, &hungtask_attr_group); +- if (ret) +- kobject_put(hungtask_kobj); +- +- for (i = 0; i < WHITELIST_LEN; i++) +- INIT_HLIST_HEAD(&whitelist[i]); +- memset(whitetmplist, 0, sizeof(whitetmplist)); +- +- INIT_WORK(&send_work, send_work_handler); +- +- return ret; +-} +diff --git a/drivers/staging/hungtask/hungtask_user.c b/drivers/staging/hungtask/hungtask_user.c +deleted file mode 100644 +index b66905672..000000000 +--- a/drivers/staging/hungtask/hungtask_user.c ++++ /dev/null +@@ -1,260 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2022 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#define pr_fmt(fmt) "hungtask_user " fmt +- +-#include +-#include +-#include +-#include +-#include +- +-#include +- +-#define CMD_MIN_LEN 3 +-#define CMD_MAX_LEN 20 +-#define USERLIST_NUM 10 +-#define MAX_USER_TIMEOUT 120 +-#define MAX_SHOW_LEN 512 +- +-struct user_item { +- pid_t pid; +- int cur_cnt; +- int panic_cnt; +-}; +- +-static struct user_item userlist[USERLIST_NUM]; +-static int userlist_count; +-static DEFINE_SPINLOCK(userlist_lock); +-static bool is_registered; +-static bool need_panic; +-static bool need_dump; +-static int block_time; +-static int block_pid; +- +-static void htuser_show_task(int pid) +-{ +- struct task_struct *p = NULL; +- +- p = pid_task(find_vpid(pid), PIDTYPE_PID); +- if (p == NULL) { +- pr_err("can not find pid %d\n", pid); +- return; +- } +- +- if (p->flags & PF_FROZEN) { +- pr_info("process %d is frozen\n", pid); +- return; +- } +- if (p->__state == TASK_UNINTERRUPTIBLE) { +- pr_err("UserList_KernelStack start\n"); +- sched_show_task(p); +- pr_err("UserList_KernelStack end\n"); +- } +-} +- +-static void htuser_list_insert(int pid, int count) +-{ +- spin_lock(&userlist_lock); +- if (userlist_count >= USERLIST_NUM) { +- pr_err("list is full\n"); +- spin_unlock(&userlist_lock); +- return; +- } +- userlist[userlist_count].pid = pid; +- userlist[userlist_count].cur_cnt = 0; +- userlist[userlist_count].panic_cnt = count; +- userlist_count++; +- spin_unlock(&userlist_lock); +-} +- +-static int htuser_list_remove(int pid) +-{ +- int i; +- +- spin_lock(&userlist_lock); +- for (i = 0; i < userlist_count; i++) { +- if (userlist[i].pid == pid) { +- if (i == userlist_count - 1) { +- memset(&userlist[i], 0, sizeof(userlist[i])); +- } else { +- int len = sizeof(userlist[0]) * (userlist_count - i - 1); +- memmove(&userlist[i], &userlist[i + 1], len); +- } +- userlist_count--; +- spin_unlock(&userlist_lock); +- return 0; +- } +- } +- spin_unlock(&userlist_lock); +- return -ENOENT; +-} +- +-static void htuser_list_update(void) +-{ +- int i; +- +- need_panic = false; +- need_dump = false; +- spin_lock(&userlist_lock); +- for (i = 0; i < userlist_count; i++) { +- userlist[i].cur_cnt++; +- if ((userlist[i].cur_cnt >= userlist[i].panic_cnt) || +- (userlist[i].cur_cnt == userlist[i].panic_cnt / 2)) { +- htuser_show_task(userlist[i].pid); +- pr_err("process %d not scheduled for %ds\n", +- userlist[i].pid, +- userlist[i].cur_cnt * HEARTBEAT_TIME); +- } +- if (userlist[i].cur_cnt == userlist[i].panic_cnt) { +- need_dump = true; +- need_panic = true; +- block_time = userlist[i].cur_cnt * HEARTBEAT_TIME; +- block_pid = userlist[i].pid; +- } +- } +- spin_unlock(&userlist_lock); +-} +- +-static void htuser_list_kick(int pid) +-{ +- int i; +- +- spin_lock(&userlist_lock); +- for (i = 0; i < userlist_count; i++) { +- if (userlist[i].pid == pid) { +- userlist[i].cur_cnt = 0; +- spin_unlock(&userlist_lock); +- return; +- } +- } +- spin_unlock(&userlist_lock); +-} +- +-void htuser_post_process_userlist(void) +-{ +- htuser_list_update(); +- if (need_dump) { +- pr_err("print all cpu stack and D state stack\n"); +- hungtask_show_state_filter(TASK_UNINTERRUPTIBLE); +- } +- if (need_panic) +- panic("UserList Process %d blocked for %ds causing panic", block_pid, block_time); +-} +- +-static int htuser_process_notifier(struct notifier_block *self, +- unsigned long cmd, void *v) +-{ +- struct task_struct *task = v; +- +- if (task == NULL) +- return NOTIFY_OK; +- +- if ((task->tgid == task->pid) && (!htuser_list_remove(task->tgid))) +- pr_err("remove success due to process %d die\n", task->tgid); +- +- return NOTIFY_OK; +-} +- +-static struct notifier_block htuser_process_notify = { +- .notifier_call = htuser_process_notifier, +-}; +- +-ssize_t htuser_list_show(struct kobject *kobj, +- struct kobj_attribute *attr, char *buf) +-{ +- int i; +- char tmp[MAX_SHOW_LEN] = {0}; +- int len = 0; +- +- len += snprintf(tmp + len, MAX_SHOW_LEN - len, +- " Pid Current(sec) Expired(sec)\n"); +- +- spin_lock(&userlist_lock); +- for (i = 0; i < userlist_count; i++) { +- len += snprintf(tmp + len, MAX_SHOW_LEN - len, +- "%5d %5d %5d", userlist[i].pid, +- userlist[i].cur_cnt * HEARTBEAT_TIME, +- userlist[i].panic_cnt * HEARTBEAT_TIME); +- if (len >= MAX_SHOW_LEN) { +- len = MAX_SHOW_LEN - 1; +- break; +- } +- } +- spin_unlock(&userlist_lock); +- pr_info("%s\n", tmp); +- strncpy(buf, tmp, len); +- +- return len; +-} +- +-static int htuser_list_store_on(char *tmp, size_t len, int pid) +-{ +- unsigned long sec = 0; +- +- if (kstrtoul(tmp + 3, 10, &sec)) { +- pr_err("invalid timeout value\n"); +- return -EINVAL; +- } +- if ((sec > MAX_USER_TIMEOUT) || !sec) { +- pr_err("invalid timeout value, should be in 0-%d\n", MAX_USER_TIMEOUT); +- return -EINVAL; +- } +- if (sec % HEARTBEAT_TIME) { +- pr_err("invalid timeout value, should be devided by %d\n", HEARTBEAT_TIME); +- return -EINVAL; +- } +- pr_info("process %d set to enable, timeout=%ld\n", pid, sec); +- htuser_list_insert(pid, sec / HEARTBEAT_TIME); +- if (!is_registered) { +- profile_event_register(PROFILE_TASK_EXIT, +- &htuser_process_notify); +- is_registered = true; +- } +- +- return 0; +-} +- +-ssize_t htuser_list_store(struct kobject *kobj, +- struct kobj_attribute *attr, +- const char *buf, size_t count) +-{ +- char tmp[CMD_MAX_LEN]; /* on/off/kick */ +- size_t len; +- char *p = NULL; +- int pid = current->tgid; +- int uid = current->cred->euid.val; +- +- if (uid >= 10000) +- pr_err("non-system process %d(uid=%d) can not be added to hungtask userlist\n", +- pid, uid); +- if ((count < CMD_MIN_LEN) || (count > CMD_MAX_LEN)) { +- pr_err("string too long or too short\n"); +- return -EINVAL; +- } +- if (!buf) +- return -EINVAL; +- +- memset(tmp, 0, sizeof(tmp)); +- p = memchr(buf, '\n', count); +- len = p ? (size_t)(p - buf) : count; +- strncpy(tmp, buf, len); +- +- if (strncmp(tmp, "on", CMD_MIN_LEN) == 0) { +- if (htuser_list_store_on(tmp, len, pid)) +- return -EINVAL; +- } else if (unlikely(strncmp(tmp, "off", CMD_MIN_LEN) == 0)) { +- pr_info("process %d set to disable\n", pid); +- if (!htuser_list_remove(pid)) +- pr_err("remove success duet to process %d call off\n", pid); +- } else if (likely(strncmp(tmp, "kick", CMD_MIN_LEN) == 0)) { +- pr_info("process %d is kicked\n", pid); +- htuser_list_kick(pid); +- } else { +- pr_err("only accept on off or kick\n"); +- } +- return (ssize_t)count; +-} +- +diff --git a/drivers/staging/hungtask/hungtask_user.h b/drivers/staging/hungtask/hungtask_user.h +deleted file mode 100644 +index 17ea7212b..000000000 +--- a/drivers/staging/hungtask/hungtask_user.h ++++ /dev/null +@@ -1,37 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2022 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef DFX_HUNGTASK_USER_H +-#define DFX_HUNGTASK_USER_H +- +-#include +- +-#ifdef CONFIG_DFX_HUNGTASK_USER +-void htuser_post_process_userlist(void); +-ssize_t htuser_list_store(struct kobject *kobj, +- struct kobj_attribute *attr, const char *buf, size_t count); +-ssize_t htuser_list_show(struct kobject *kobj, +- struct kobj_attribute *attr, char *buf); +-#else +-static inline void htuser_post_process_userlist(void) +-{ +-} +- +-static inline ssize_t htuser_list_store(struct kobject *kobj, +- struct kobj_attribute *attr, +- const char *buf, size_t count) +-{ +- return 0; +-} +-static inline ssize_t htuser_list_show(struct kobject *kobj, +- struct kobj_attribute *attr, +- char *buf) +-{ +- return 0; +-} +- +-#endif +- +-#endif /* DFX_HUNGTASK_USER_H */ +diff --git a/drivers/staging/zerohung/Kconfig b/drivers/staging/zerohung/Kconfig +deleted file mode 100644 +index 913d28efb..000000000 +--- a/drivers/staging/zerohung/Kconfig ++++ /dev/null +@@ -1,7 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-config DFX_ZEROHUNG +- bool "zerohung driver" +- default n +- depends on HISYSEVENT +- help +- This feature support to catch hung log +diff --git a/drivers/staging/zerohung/Makefile b/drivers/staging/zerohung/Makefile +deleted file mode 100644 +index 3727a0e91..000000000 +--- a/drivers/staging/zerohung/Makefile ++++ /dev/null +@@ -1,2 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-obj-$(CONFIG_DFX_ZEROHUNG) += zrhung_event.o watchpoint/ +diff --git a/drivers/staging/zerohung/watchpoint/Makefile b/drivers/staging/zerohung/watchpoint/Makefile +deleted file mode 100644 +index 1cb8d7f99..000000000 +--- a/drivers/staging/zerohung/watchpoint/Makefile ++++ /dev/null +@@ -1,2 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-obj-$(CONFIG_DFX_ZEROHUNG) += hung_wp_screen.o +diff --git a/drivers/staging/zerohung/watchpoint/hung_wp_screen.c b/drivers/staging/zerohung/watchpoint/hung_wp_screen.c +deleted file mode 100644 +index 3b5f2d6da..000000000 +--- a/drivers/staging/zerohung/watchpoint/hung_wp_screen.c ++++ /dev/null +@@ -1,299 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2022 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#define pr_fmt(fmt) "zrhung " fmt +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#define TIME_CONVERT_UNIT 1000 +-#define DEFAULT_TIMEOUT 10 +- +-#define LPRESSEVENT_TIME 5 +-#define POWERKEYEVENT_MAX_COUNT 10 +-#define POWERKEYEVENT_DEFAULT_COUNT 3 +-#define POWERKEYEVENT_DEFAULT_TIMEWINDOW 5 +-#define POWERKEYEVENT_DEFAULT_LIMIT_MS 300 +-#define POWERKEYEVENT_DEFAULT_REPORT_MIN 2 +-#define POWERKEYEVENT_TIME_LEN (POWERKEYEVENT_MAX_COUNT + 2) +- +-struct hung_wp_screen_data { +- struct timer_list timer; +- struct timer_list long_press_timer; +- struct workqueue_struct *workq; +- struct work_struct send_work; +- spinlock_t lock; +- int fb_blank; +- int check_id; +- int tag_id; +-}; +- +-static bool init_done; +-static struct hung_wp_screen_data g_hung_data; +-static unsigned int lastreport_time; +-static unsigned int lastprkyevt_time; +-static unsigned int powerkeyevent_time[POWERKEYEVENT_TIME_LEN] = {0}; +-static unsigned int newevt; +-static unsigned int headevt; +-static int *check_off_point; +-struct work_struct powerkeyevent_sendwork; +-struct work_struct lpressevent_sendwork; +-static struct notifier_block hung_wp_screen_setblank_ncb; +- +-static void zrhung_lpressevent_send_work(struct work_struct *work) +-{ +- pr_info("LONGPRESS_EVENT send to zerohung\n"); +- zrhung_send_event(WP_SCREEN_DOMAIN, WP_SCREEN_LPRESS_NAME, "none"); +-} +- +-static void zrhung_wp_lpress_send(struct timer_list *t) +-{ +- int *check_off = check_off_point; +- +- del_timer(&g_hung_data.long_press_timer); +- *check_off = 0; +- queue_work(g_hung_data.workq, &lpressevent_sendwork); +-} +- +-static void zrhung_powerkeyevent_send_work(struct work_struct *work) +-{ +- pr_info("POWERKEY_EVENT send to zerohung\n"); +- zrhung_send_event(WP_SCREEN_DOMAIN, WP_SCREEN_PWK_NAME, "none"); +-} +- +-static void zrhung_powerkeyevent_report(unsigned int dur, unsigned int end) +-{ +- unsigned int send_interval; +- +- send_interval = end > lastreport_time ? +- ((end - lastreport_time) / TIME_CONVERT_UNIT) : POWERKEYEVENT_DEFAULT_REPORT_MIN; +- if (unlikely(lastreport_time == 0)) { +- lastreport_time = end; +- } else if (send_interval < POWERKEYEVENT_DEFAULT_REPORT_MIN) { +- pr_info("powerkeyevent too fast to report: %d\n", end); +- return; +- } +- lastreport_time = end; +- queue_work(g_hung_data.workq, &powerkeyevent_sendwork); +-} +- +-static unsigned int refresh_prkyevt_index(unsigned int event) +-{ +- unsigned int evt = event; +- +- if (evt < POWERKEYEVENT_MAX_COUNT) +- evt++; +- else +- evt = 0; +- return evt; +-} +- +-static void zrhung_new_powerkeyevent(unsigned int tmescs) +-{ +- unsigned int prkyevt_interval; +- unsigned int evt_index; +- int diff; +- +- powerkeyevent_time[newevt] = tmescs; +- evt_index = (newevt >= headevt) ? +- (newevt - headevt) : (newevt + POWERKEYEVENT_MAX_COUNT + 1 - headevt); +- if (evt_index < (POWERKEYEVENT_DEFAULT_COUNT - 1)) { +- pr_info("powerkeyevent not enough-%d\n", POWERKEYEVENT_DEFAULT_COUNT); +- } else { +- diff = powerkeyevent_time[newevt] - powerkeyevent_time[headevt]; +- if (diff < 0) { +- pr_info("powerkeyevent sth wrong in record time\n"); +- return; +- } +- +- prkyevt_interval = (unsigned int)(diff / TIME_CONVERT_UNIT); +- if (prkyevt_interval <= POWERKEYEVENT_DEFAULT_TIMEWINDOW) +- zrhung_powerkeyevent_report(prkyevt_interval, tmescs); +- headevt = refresh_prkyevt_index(headevt); +- } +- newevt = refresh_prkyevt_index(newevt); +-} +- +-static void zrhung_powerkeyevent_handler(void) +-{ +- unsigned int curtime; +- unsigned long curjiff; +- +- pr_info("powerkeyevent check start"); +- curjiff = jiffies; +- curtime = jiffies_to_msecs(curjiff); +- if (unlikely(lastprkyevt_time > curtime)) { +- pr_info("powerkeyevent check but time overflow"); +- lastprkyevt_time = curtime; +- return; +- } else if ((curtime - lastprkyevt_time) < POWERKEYEVENT_DEFAULT_LIMIT_MS) { +- pr_info("powerkeyevent user press powerkey too fast-time:%d", curtime); +- return; +- } +- lastprkyevt_time = curtime; +- zrhung_new_powerkeyevent(curtime); +-} +- +-static int hung_wp_screen_setblank(struct notifier_block *self, unsigned long event, void *data) +-{ +- unsigned long flags; +- struct fb_event *evdata = data; +- int blank; +- +- if (!init_done) +- return 0; +- +- if (event != FB_EVENT_BLANK) +- return 0; +- +- blank = *(int *)evdata->data; +- spin_lock_irqsave(&(g_hung_data.lock), flags); +- g_hung_data.fb_blank = blank; +- if (((g_hung_data.check_id == ZRHUNG_WP_SCREENON) && (blank == 0)) || +- ((g_hung_data.check_id == ZRHUNG_WP_SCREENOFF) && (blank != 0))) { +- pr_info("check_id=%d, blank=%d", g_hung_data.check_id, g_hung_data.fb_blank); +- del_timer(&g_hung_data.timer); +- g_hung_data.check_id = ZRHUNG_WP_NONE; +- } +- spin_unlock_irqrestore(&(g_hung_data.lock), flags); +- +- return 0; +-} +- +-static void hung_wp_screen_send_work(struct work_struct *work) +-{ +- unsigned long flags = 0; +- +- show_state_filter(TASK_UNINTERRUPTIBLE); +- +- if (g_hung_data.check_id == 1) +- zrhung_send_event(WP_SCREEN_DOMAIN, WP_SCREEN_ON_NAME, "none"); +- else +- zrhung_send_event(WP_SCREEN_DOMAIN, WP_SCREEN_OFF_NAME, "none"); +- pr_info("send event: %d\n", g_hung_data.check_id); +- spin_lock_irqsave(&(g_hung_data.lock), flags); +- g_hung_data.check_id = ZRHUNG_WP_NONE; +- spin_unlock_irqrestore(&(g_hung_data.lock), flags); +-} +- +-static void hung_wp_screen_send(struct timer_list *t) +-{ +- del_timer(&g_hung_data.timer); +- pr_info("hung_wp_screen_%d end\n", g_hung_data.tag_id); +- queue_work(g_hung_data.workq, &g_hung_data.send_work); +-} +- +-static void hung_wp_screen_start(int check_id) +-{ +- if (g_hung_data.check_id != ZRHUNG_WP_NONE) { +- pr_info("already in check_id: %d\n", g_hung_data.check_id); +- return; +- } +- +- g_hung_data.check_id = check_id; +- if (timer_pending(&g_hung_data.timer)) +- del_timer(&g_hung_data.timer); +- +- g_hung_data.timer.expires = jiffies + msecs_to_jiffies(DEFAULT_TIMEOUT * TIME_CONVERT_UNIT); +- add_timer(&g_hung_data.timer); +- pr_info("going to check ID=%d timeout=%d\n", check_id, DEFAULT_TIMEOUT); +-} +- +-void hung_wp_screen_powerkey_ncb(int event) +-{ +- static int check_off; +- unsigned long flags = 0; +- +- if (!init_done) +- return; +- +- spin_lock_irqsave(&(g_hung_data.lock), flags); +- if (event == WP_SCREEN_PWK_PRESS) { +- pr_info("hung_wp_screen_%d start! fb_blank=%d", +- ++g_hung_data.tag_id, g_hung_data.fb_blank); +- check_off = 0; +- if (g_hung_data.fb_blank != 0) { +- hung_wp_screen_start(ZRHUNG_WP_SCREENON); +- } else { +- check_off = 1; +- pr_info("start longpress test timer\n"); +- check_off_point = &check_off; +- g_hung_data.long_press_timer.expires = jiffies + +- msecs_to_jiffies(LPRESSEVENT_TIME * TIME_CONVERT_UNIT); +- if (!timer_pending(&g_hung_data.long_press_timer)) +- add_timer(&g_hung_data.long_press_timer); +- } +- zrhung_powerkeyevent_handler(); +- } else if (check_off) { +- check_off = 0; +- del_timer(&g_hung_data.long_press_timer); +- if (event == WP_SCREEN_PWK_RELEASE && g_hung_data.fb_blank == 0) +- hung_wp_screen_start(ZRHUNG_WP_SCREENOFF); +- } +- spin_unlock_irqrestore(&(g_hung_data.lock), flags); +-} +- +-static int __init hung_wp_screen_init(void) +-{ +- init_done = false; +- pr_info("%s start\n", __func__); +- g_hung_data.fb_blank = 0; +- g_hung_data.tag_id = 0; +- g_hung_data.check_id = ZRHUNG_WP_NONE; +- spin_lock_init(&(g_hung_data.lock)); +- +- timer_setup(&g_hung_data.timer, hung_wp_screen_send, 0); +- timer_setup(&g_hung_data.long_press_timer, zrhung_wp_lpress_send, 0); +- +- g_hung_data.workq = create_workqueue("hung_wp_screen_workq"); +- if (g_hung_data.workq == NULL) { +- pr_err("create workq failed\n"); +- return -EFAULT; +- } +- INIT_WORK(&g_hung_data.send_work, hung_wp_screen_send_work); +- INIT_WORK(&powerkeyevent_sendwork, zrhung_powerkeyevent_send_work); +- INIT_WORK(&lpressevent_sendwork, zrhung_lpressevent_send_work); +- +- hung_wp_screen_setblank_ncb.notifier_call = hung_wp_screen_setblank; +- fb_register_client(&hung_wp_screen_setblank_ncb); +- +- init_done = true; +- pr_info("%s done\n", __func__); +- return 0; +-} +- +-static void __exit hung_wp_screen_exit(void) +-{ +- fb_unregister_client(&hung_wp_screen_setblank_ncb); +- +- cancel_work_sync(&lpressevent_sendwork); +- cancel_work_sync(&powerkeyevent_sendwork); +- cancel_work_sync(&g_hung_data.send_work); +- +- destroy_workqueue(g_hung_data.workq); +- +- del_timer_sync(&g_hung_data.timer); +- del_timer_sync(&g_hung_data.long_press_timer); +-} +- +-module_init(hung_wp_screen_init); +-module_exit(hung_wp_screen_exit); +- +-MODULE_AUTHOR("OHOS"); +-MODULE_DESCRIPTION("Reporting the frozen screen alarm event"); +-MODULE_LICENSE("GPL"); +diff --git a/drivers/staging/zerohung/zrhung_event.c b/drivers/staging/zerohung/zrhung_event.c +deleted file mode 100644 +index 739d8bbbb..000000000 +--- a/drivers/staging/zerohung/zrhung_event.c ++++ /dev/null +@@ -1,61 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (C) 2022 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#define pr_fmt(fmt) "zrhung " fmt +- +-#include +-#include +- +-#include +-#include +- +-int zrhung_send_event_bbox(const char *domain, const char *event_name, const char *timestamp, const char *reset) +-{ +- struct hiview_hisysevent *event = NULL; +- int ret = 0; +- +- event = hisysevent_create(domain, event_name, FAULT); +- if (!event) { +- pr_err("failed to create event"); +- return -EINVAL; +- } +- ret = hisysevent_put_string(event, "BBOX_TIME", timestamp); +- if (ret != 0) { +- pr_err("failed to put BBOX_TIME to event, ret=%d", ret); +- goto hisysevent_end; +- } +- ret = hisysevent_put_string(event, "BBOX_SYSRESET", reset); +- if (ret != 0) { +- pr_err("failed to put BBOX_SYSRESET to event, ret=%d", ret); +- goto hisysevent_end; +- } +- ret = hisysevent_write(event); +- +-hisysevent_end: +- hisysevent_destroy(&event); +- return ret; +-} +- +-int zrhung_send_event(const char *domain, const char *event_name, const char *msg_buf) +-{ +- struct hiview_hisysevent *event = NULL; +- int ret = 0; +- +- event = hisysevent_create(domain, event_name, FAULT); +- if (!event) { +- pr_err("failed to create event"); +- return -EINVAL; +- } +- ret = hisysevent_put_string(event, "MSG", msg_buf); +- if (ret != 0) { +- pr_err("failed to put sting to event, ret=%d", ret); +- goto hisysevent_end; +- } +- ret = hisysevent_write(event); +- +-hisysevent_end: +- hisysevent_destroy(&event); +- return ret; +-} +diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c +index 322a543b8..d0f397c90 100644 +--- a/drivers/tee/optee/supp.c ++++ b/drivers/tee/optee/supp.c +@@ -80,7 +80,6 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_supp *supp = &optee->supp; + struct optee_supp_req *req; +- bool interruptable; + u32 ret; + + /* +@@ -111,36 +110,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, + /* + * Wait for supplicant to process and return result, once we've + * returned from wait_for_completion(&req->c) successfully we have +- * exclusive access again. ++ * exclusive access again. Allow the wait to be killable such that ++ * the wait doesn't turn into an indefinite state if the supplicant ++ * gets hung for some reason. + */ +- while (wait_for_completion_interruptible(&req->c)) { ++ if (wait_for_completion_killable(&req->c)) { + mutex_lock(&supp->mutex); +- interruptable = !supp->ctx; +- if (interruptable) { +- /* +- * There's no supplicant available and since the +- * supp->mutex currently is held none can +- * become available until the mutex released +- * again. +- * +- * Interrupting an RPC to supplicant is only +- * allowed as a way of slightly improving the user +- * experience in case the supplicant hasn't been +- * started yet. During normal operation the supplicant +- * will serve all requests in a timely manner and +- * interrupting then wouldn't make sense. +- */ +- if (req->in_queue) { +- list_del(&req->link); +- req->in_queue = false; +- } ++ if (req->in_queue) { ++ list_del(&req->link); ++ req->in_queue = false; + } + mutex_unlock(&supp->mutex); +- +- if (interruptable) { +- req->ret = TEEC_ERROR_COMMUNICATION; +- break; +- } ++ req->ret = TEEC_ERROR_COMMUNICATION; + } + + ret = req->ret; +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c +index 2476774af..dad909547 100644 +--- a/drivers/thermal/thermal_core.c ++++ b/drivers/thermal/thermal_core.c +@@ -587,7 +587,6 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id) + mutex_lock(&thermal_list_lock); + list_for_each_entry(tz, &thermal_tz_list, node) { + if (tz->id == id) { +- get_device(&tz->device); + match = tz; + break; + } +diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h +index cf88b7d0c..024e82ebf 100644 +--- a/drivers/thermal/thermal_core.h ++++ b/drivers/thermal/thermal_core.h +@@ -56,9 +56,6 @@ int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *), + + struct thermal_zone_device *thermal_zone_get_by_id(int id); + +-DEFINE_CLASS(thermal_zone_get_by_id, struct thermal_zone_device *, +- if (_T) put_device(&_T->device), thermal_zone_get_by_id(id), int id) +- + struct thermal_attr { + struct device_attribute attr; + char name[THERMAL_NAME_LENGTH]; +diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c +index 8d07531d9..08bc46c3e 100644 +--- a/drivers/thermal/thermal_netlink.c ++++ b/drivers/thermal/thermal_netlink.c +@@ -450,6 +450,7 @@ static int thermal_genl_cmd_tz_get_id(struct param *p) + static int thermal_genl_cmd_tz_get_trip(struct param *p) + { + struct sk_buff *msg = p->msg; ++ struct thermal_zone_device *tz; + struct nlattr *start_trip; + struct thermal_trip trip; + int ret, i, id; +@@ -459,7 +460,7 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) + + id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); + +- CLASS(thermal_zone_get_by_id, tz)(id); ++ tz = thermal_zone_get_by_id(id); + if (!tz) + return -EINVAL; + +@@ -497,6 +498,7 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) + static int thermal_genl_cmd_tz_get_temp(struct param *p) + { + struct sk_buff *msg = p->msg; ++ struct thermal_zone_device *tz; + int temp, ret, id; + + if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) +@@ -504,7 +506,7 @@ static int thermal_genl_cmd_tz_get_temp(struct param *p) + + id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); + +- CLASS(thermal_zone_get_by_id, tz)(id); ++ tz = thermal_zone_get_by_id(id); + if (!tz) + return -EINVAL; + +@@ -522,6 +524,7 @@ static int thermal_genl_cmd_tz_get_temp(struct param *p) + static int thermal_genl_cmd_tz_get_gov(struct param *p) + { + struct sk_buff *msg = p->msg; ++ struct thermal_zone_device *tz; + int id, ret = 0; + + if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) +@@ -529,7 +532,7 @@ static int thermal_genl_cmd_tz_get_gov(struct param *p) + + id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); + +- CLASS(thermal_zone_get_by_id, tz)(id); ++ tz = thermal_zone_get_by_id(id); + if (!tz) + return -EINVAL; + +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c +index bb31ac9ca..9f7ab0403 100644 +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -42,6 +42,10 @@ + #include + #include + ++#ifdef CONFIG_ARCH_BSP ++#include ++#endif ++ + #define UART_NR 14 + + #define SERIAL_AMBA_MAJOR 204 +@@ -53,6 +57,8 @@ + #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) + #define UART_DUMMY_DR_RX (1 << 16) + ++#define PL011_DMA_BUFFER_SIZE (PAGE_SIZE * 4) ++ + enum { + REG_DR, + REG_ST_DMAWM, +@@ -2794,6 +2800,19 @@ static const struct serial_rs485 pl011_rs485_supported = { + .delay_rts_after_send = 1, + }; + ++#ifdef CONFIG_ARCH_BSP ++static void try_deassert_uart_reset(struct amba_device *adev) ++{ ++ struct reset_control *uart_rst = NULL; ++ uart_rst = devm_reset_control_get(&adev->dev, "bsp_uart_rst"); ++ if (IS_ERR_OR_NULL(uart_rst)) ++ return; ++ /* deassert reset if "resets" property is set */ ++ dev_info(&adev->dev, "deassert reset\n"); ++ reset_control_deassert(uart_rst); ++} ++#endif ++ + static int pl011_probe(struct amba_device *dev, const struct amba_id *id) + { + struct uart_amba_port *uap; +@@ -2822,6 +2841,10 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) + uap->port.ops = &amba_pl011_pops; + uap->port.rs485_config = pl011_rs485_config; + uap->port.rs485_supported = pl011_rs485_supported; ++ ++#ifdef CONFIG_ARCH_BSP ++ try_deassert_uart_reset(dev); ++#endif + snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); + + if (device_property_read_u32(&dev->dev, "reg-io-width", &val) == 0) { +diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig +index 4eeb4d299..c7fa0ec74 100644 +--- a/drivers/usb/gadget/Kconfig ++++ b/drivers/usb/gadget/Kconfig +@@ -505,13 +505,6 @@ config USB_CONFIGFS_F_TCM + Both protocols can work on USB2.0 and USB3.0. + UAS utilizes the USB 3.0 feature called streams support. + +-config DRIVERS_HDF_USB_F_GENERIC +- bool "Enable F_GENERIC driver" +- default y +- depends on DRIVERS_HDF +- help +- Answer Y to choice HDF USB F_GENERIC driver. +- + source "drivers/usb/gadget/legacy/Kconfig" + + endif # USB_GADGET +diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c +index ed5a92c47..d87e9149d 100644 +--- a/drivers/usb/gadget/epautoconf.c ++++ b/drivers/usb/gadget/epautoconf.c +@@ -75,10 +75,24 @@ struct usb_ep *usb_ep_autoconfig_ss( + } + + /* Second, look at endpoints until an unclaimed one looks usable */ ++#ifdef CONFIG_ARCH_BSP ++ if ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { ++ list_for_each_entry_reverse(ep, &gadget->ep_list, ep_list) { ++ if (usb_gadget_ep_match_desc(gadget, ep, desc, ep_comp)) ++ goto found_ep; ++ } ++ } else { ++ list_for_each_entry (ep, &gadget->ep_list, ep_list) { ++ if (usb_gadget_ep_match_desc(gadget, ep, desc, ep_comp)) ++ goto found_ep; ++ } ++ } ++#else + list_for_each_entry (ep, &gadget->ep_list, ep_list) { + if (usb_gadget_ep_match_desc(gadget, ep, desc, ep_comp)) + goto found_ep; + } ++#endif + + /* Fail */ + return NULL; +diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile +index 44e1e9941..87917a7d4 100644 +--- a/drivers/usb/gadget/function/Makefile ++++ b/drivers/usb/gadget/function/Makefile +@@ -52,8 +52,3 @@ usb_f_printer-y := f_printer.o + obj-$(CONFIG_USB_F_PRINTER) += usb_f_printer.o + usb_f_tcm-y := f_tcm.o + obj-$(CONFIG_USB_F_TCM) += usb_f_tcm.o +-obj-$(CONFIG_USB_F_ACC) += usb_f_accessory.o +-usb_f_audio_source-y := f_audio_source.o +-obj-$(CONFIG_USB_F_AUDIO_SRC) += usb_f_audio_source.o +-usb_f_generic-y := f_generic.o +-obj-$(CONFIG_DRIVERS_HDF_USB_F_GENERIC) += usb_f_generic.o +diff --git a/drivers/usb/gadget/function/f_generic.c b/drivers/usb/gadget/function/f_generic.c +deleted file mode 100644 +index 34596696b..000000000 +--- a/drivers/usb/gadget/function/f_generic.c ++++ /dev/null +@@ -1,3789 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0+ +-/* +- * f_fs.c -- user mode file system API for USB composite function controllers +- * +- * Copyright (C) 2010 Samsung Electronics +- * Author: Michal Nazarewicz +- * +- * Based on inode.c (GadgetFS) which was: +- * Copyright (C) 2003-2004 David Brownell +- * Copyright (C) 2003 Agilent Technologies +- */ +- +-/* #define DEBUG */ +-/* #define VERBOSE_DEBUG */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include "u_generic.h" +-#include "u_f.h" +-#include "u_os_desc.h" +-#include "configfs.h" +- +-#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */ +- +-/* Reference counter handling */ +-static void ffs_data_get(struct ffs_data *ffs); +-static void ffs_data_put(struct ffs_data *ffs); +-/* Creates new ffs_data object. */ +-static struct ffs_data *__must_check ffs_data_new(const char *dev_name) +- __attribute__((malloc)); +- +-/* Called with ffs->mutex held; take over ownership of data. */ +-static int __must_check +-__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len); +-static int __must_check +-__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len); +- +-/* The function structure ***************************************************/ +- +-struct ffs_ep; +- +-struct ffs_function { +- struct usb_configuration *conf; +- struct usb_gadget *gadget; +- struct ffs_data *ffs; +- +- struct ffs_ep *eps; +- u8 eps_revmap[16]; +- short *interfaces_nums; +- +- struct usb_function function; +-}; +-static struct ffs_function *ffs_func_from_usb(struct usb_function *f) +-{ +- return container_of(f, struct ffs_function, function); +-} +-static inline enum ffs_setup_state ffs_setup_state_clear_cancelled(struct ffs_data *ffs) +-{ +- return (enum ffs_setup_state) +- cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP); +-} +-static void ffs_func_eps_disable(struct ffs_function *func); +-static int __must_check ffs_func_eps_enable(struct ffs_function *func); +- +-static int ffs_func_bind(struct usb_configuration *, +- struct usb_function *); +-static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned); +-static void ffs_func_disable(struct usb_function *); +-static int ffs_func_setup(struct usb_function *, +- const struct usb_ctrlrequest *); +-static bool ffs_func_req_match(struct usb_function *, +- const struct usb_ctrlrequest *, +- bool config0); +-static void ffs_func_suspend(struct usb_function *); +-static void ffs_func_resume(struct usb_function *); +- +-static int ffs_func_revmap_ep(struct ffs_function *func, u8 num); +-static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf); +- +-/* The endpoints structures *************************************************/ +-struct ffs_ep { +- struct usb_ep *ep; /* P: ffs->eps_lock */ +- struct usb_request *req; /* P: epfile->mutex */ +- +- /* [0]: full speed, [1]: high speed, [2]: super speed */ +- struct usb_endpoint_descriptor *descs[3]; +- +- u8 num; +- +- int status; /* P: epfile->mutex */ +-}; +- +-struct ffs_epfile { +- /* Protects ep->ep and ep->req. */ +- struct mutex mutex; +- struct list_head memory_list; +- struct ffs_data *ffs; +- struct ffs_ep *ep; /* P: ffs->eps_lock */ +- /* +- * Buffer for holding data from partial reads which may happen since +- * we’re rounding user read requests to a multiple of a max packet size. +- * +- * The pointer is initialised with NULL value and may be set by +- * __ffs_epfile_read_data function to point to a temporary buffer. +- * +- * In normal operation, calls to __ffs_epfile_read_buffered will consume +- * data from said buffer and eventually free it. Importantly, while the +- * function is using the buffer, it sets the pointer to NULL. This is +- * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered +- * can never run concurrently (they are synchronised by epfile->mutex) +- * so the latter will not assign a new value to the pointer. +- * +- * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is +- * valid) and sets the pointer to READ_BUFFER_DROP value. This special +- * value is crux of the synchronisation between ffs_func_eps_disable and +- * __ffs_epfile_read_data. +- * +- * Once __ffs_epfile_read_data is about to finish it will try to set the +- * pointer back to its old value (as described above), but seeing as the +- * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free +- * the buffer. +- * +- * == State transitions == +- * +- * • ptr == NULL: (initial state) +- * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP +- * ◦ __ffs_epfile_read_buffered: nop +- * ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf +- * ◦ reading finishes: n/a, not in ‘and reading’ state +- * • ptr == DROP: +- * ◦ __ffs_epfile_read_buffer_free: nop +- * ◦ __ffs_epfile_read_buffered: go to ptr == NULL +- * ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop +- * ◦ reading finishes: n/a, not in ‘and reading’ state +- * • ptr == buf: +- * ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP +- * ◦ __ffs_epfile_read_buffered: go to ptr == NULL and reading +- * ◦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered +- * is always called first +- * ◦ reading finishes: n/a, not in ‘and reading’ state +- * • ptr == NULL and reading: +- * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading +- * ◦ __ffs_epfile_read_buffered: n/a, mutex is held +- * ◦ __ffs_epfile_read_data: n/a, mutex is held +- * ◦ reading finishes and … +- * … all data read: free buf, go to ptr == NULL +- * … otherwise: go to ptr == buf and reading +- * • ptr == DROP and reading: +- * ◦ __ffs_epfile_read_buffer_free: nop +- * ◦ __ffs_epfile_read_buffered: n/a, mutex is held +- * ◦ __ffs_epfile_read_data: n/a, mutex is held +- * ◦ reading finishes: free buf, go to ptr == DROP +- */ +- struct ffs_buffer *read_buffer; +-#define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN)) +- +- char name[MAX_NAMELEN]; +- dev_t devno; +- struct cdev cdev; +- struct device *device; +- +- unsigned char in; /* P: ffs->eps_lock */ +- unsigned char isoc; /* P: ffs->eps_lock */ +- +- struct kfifo reqEventFifo; +- wait_queue_head_t wait_que; +- +- unsigned char _pad; +-}; +- +-struct ffs_buffer { +- size_t length; +- char *data; +- char storage[]; +-}; +- +-/* ffs_io_data structure ***************************************************/ +- +-struct ffs_io_data { +- uint32_t aio; +- uint32_t read; +- uint32_t len; +- uint32_t timeout; +- uint64_t buf; +- uint32_t actual; +- int status; +- struct tasklet_struct task; +- struct usb_ep *ep; +- struct usb_request *req; +- struct ffs_epfile *epfile; +- struct ffs_data *ffs; +-}; +- +-struct ffs_desc_helper { +- struct ffs_data *ffs; +- unsigned interfaces_count; +- unsigned eps_count; +-}; +- +-static int __must_check ffs_epfiles_create(struct ffs_data *ffs); +-static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count); +- +-/* Devices management *******************************************************/ +- +-DEFINE_MUTEX(ffs_lock_adapter); +-EXPORT_SYMBOL_GPL(ffs_lock_adapter); +- +-static struct ffs_dev *_ffs_find_dev(const char *name); +-static struct ffs_dev *_ffs_alloc_dev(void); +-static void _ffs_free_dev(struct ffs_dev *dev); +-static void *ffs_acquire_dev(const char *dev_name); +-static void ffs_release_dev(struct ffs_data *ffs_data); +-static int ffs_ready(struct ffs_data *ffs); +-static void ffs_closed(struct ffs_data *ffs); +- +-/* Misc helper functions ****************************************************/ +- +-static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock) +- __attribute__((warn_unused_result, nonnull)); +-static char *ffs_prepare_buffer(const char __user *buf, size_t len) +- __attribute__((warn_unused_result, nonnull)); +- +-struct class *ffs_class; +- +-static char *ffs_devnode(const struct device *dev, umode_t *mode) +-{ +- if (mode) +- *mode = 0666; +- return kasprintf(GFP_KERNEL, "functionfs/%s", dev_name(dev)); +-} +- +-/* Control file aka ep0 *****************************************************/ +-static struct ffs_memory *generic_find_ep0_memory_area(struct ffs_data *ffs, uint64_t buf, uint32_t len) +-{ +- struct ffs_memory *ffsm = NULL; +- struct ffs_memory *iter = NULL; +- uint64_t buf_start = buf; +- unsigned long flags; +- +- spin_lock_irqsave(&ffs->mem_lock, flags); +- list_for_each_entry(iter, &ffs->memory_list, memlist) { +- if (buf_start >= iter->vm_start && +- buf_start < iter->vm_start + iter->size) { +- if (len <= iter->vm_start + iter->size - buf_start) { +- ffsm = iter; +- break; +- } +- } +- } +- spin_unlock_irqrestore(&ffs->mem_lock, flags); +- return ffsm; +-} +- +-static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req) +-{ +- struct ffs_data *ffs = req->context; +- +- complete(&ffs->ep0req_completion); +- +- ffs->setup_state = FFS_NO_SETUP; +-} +- +-static void ffs_ep0_async_io_complete(struct usb_ep *_ep, struct usb_request *req) +-{ +- struct ffs_io_data *io_data = req->context; +- struct ffs_data *ffs = io_data->ffs; +- ENTER(); +- +- io_data->status = io_data->req->status; +- io_data->actual = io_data->req->actual; +- kfifo_in(&ffs->reqEventFifo, &io_data->buf, sizeof(struct UsbFnReqEvent)); +- wake_up_all(&ffs->wait_que); +- +- list_del(&req->list); +- usb_ep_free_request(io_data->ep, io_data->req); +- kfree(io_data); +- +-} +- +-static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len) +- __releases(&ffs->ev.waitq.lock) +-{ +- struct usb_request *req = ffs->ep0req; +- int ret; +- +- req->zero = len < le16_to_cpu(ffs->ev.setup.wLength); +- +- spin_unlock_irq(&ffs->ev.waitq.lock); +- +- req->buf = data; +- req->length = len; +- +- /* +- * UDC layer requires to provide a buffer even for ZLP, but should +- * not use it at all. Let's provide some poisoned pointer to catch +- * possible bug in the driver. +- */ +- if (req->buf == NULL) +- req->buf = (void *)0xDEADBABE; +- +- reinit_completion(&ffs->ep0req_completion); +- +- ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC); +- if (unlikely(ret < 0)) +- return ret; +- +- ret = wait_for_completion_interruptible(&ffs->ep0req_completion); +- if (unlikely(ret)) { +- usb_ep_dequeue(ffs->gadget->ep0, req); +- return -EINTR; +- } +- +- ffs->setup_state = FFS_NO_SETUP; +- return req->status ? req->status : req->actual; +-} +- +-static int __ffs_ep0_stall(struct ffs_data *ffs) +-{ +- if (ffs->ev.can_stall) { +- pr_vdebug("ep0 stall\n"); +- usb_ep_set_halt(ffs->gadget->ep0); +- ffs->setup_state = FFS_NO_SETUP; +- return -EL2HLT; +- } else { +- pr_debug("bogus ep0 stall!\n"); +- return -ESRCH; +- } +-} +- +-static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, size_t len, loff_t *ptr) +-{ +- struct ffs_data *ffs = file->private_data; +- ssize_t ret; +- char *data = NULL; +- +- ENTER(); +- +- /* Fast check if setup was canceled */ +- if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED) +- return -EIDRM; +- +- /* Acquire mutex */ +- ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); +- if (unlikely(ret < 0)) +- return ret; +- +- /* Check state */ +- switch (ffs->state) { +- case FFS_READ_DESCRIPTORS: +- case FFS_READ_STRINGS: +- /* Copy data */ +- if (unlikely(len < 16)) { +- ret = -EINVAL; +- break; +- } +- +- data = ffs_prepare_buffer(buf, len); +- if (IS_ERR(data)) { +- ret = PTR_ERR(data); +- break; +- } +- +- /* Handle data */ +- if (ffs->state == FFS_READ_DESCRIPTORS) { +- pr_info("read descriptors\n"); +- ret = __ffs_data_got_descs(ffs, data, len); +- if (unlikely(ret < 0)) +- break; +- +- ffs->state = FFS_READ_STRINGS; +- ret = len; +- } else { +- pr_info("read strings\n"); +- ret = __ffs_data_got_strings(ffs, data, len); +- if (unlikely(ret < 0)) +- break; +- +- ret = ffs_epfiles_create(ffs); +- if (unlikely(ret)) { +- ffs->state = FFS_CLOSING; +- break; +- } +- +- ffs->state = FFS_ACTIVE; +- mutex_unlock(&ffs->mutex); +- +- ret = ffs_ready(ffs); +- if (unlikely(ret < 0)) { +- ffs->state = FFS_CLOSING; +- return ret; +- } +- +- return len; +- } +- break; +- +- case FFS_ACTIVE: +- data = NULL; +- /* +- * We're called from user space, we can use _irq +- * rather then _irqsave +- */ +- spin_lock_irq(&ffs->ev.waitq.lock); +- switch (ffs_setup_state_clear_cancelled(ffs)) { +- case FFS_SETUP_CANCELLED: +- ret = -EIDRM; +- goto done_spin; +- +- case FFS_NO_SETUP: +- ret = -ESRCH; +- goto done_spin; +- +- case FFS_SETUP_PENDING: +- break; +- } +- +- /* FFS_SETUP_PENDING */ +- if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) { +- spin_unlock_irq(&ffs->ev.waitq.lock); +- ret = __ffs_ep0_stall(ffs); +- break; +- } +- +- /* FFS_SETUP_PENDING and not stall */ +- len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength)); +- +- spin_unlock_irq(&ffs->ev.waitq.lock); +- +- data = ffs_prepare_buffer(buf, len); +- if (IS_ERR(data)) { +- ret = PTR_ERR(data); +- break; +- } +- +- spin_lock_irq(&ffs->ev.waitq.lock); +- +- /* +- * We are guaranteed to be still in FFS_ACTIVE state +- * but the state of setup could have changed from +- * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need +- * to check for that. If that happened we copied data +- * from user space in vain but it's unlikely. +- * +- * For sure we are not in FFS_NO_SETUP since this is +- * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP +- * transition can be performed and it's protected by +- * mutex. +- */ +- if (ffs_setup_state_clear_cancelled(ffs) == +- FFS_SETUP_CANCELLED) { +- ret = -EIDRM; +-done_spin: +- spin_unlock_irq(&ffs->ev.waitq.lock); +- } else { +- /* unlocks spinlock */ +- ret = __ffs_ep0_queue_wait(ffs, data, len); +- } +- kfree(data); +- break; +- +- default: +- ret = -EBADFD; +- break; +- } +- +- mutex_unlock(&ffs->mutex); +- return ret; +-} +- +-/* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */ +-static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf, size_t n) +- __releases(&ffs->ev.waitq.lock) +-{ +- /* +- * n cannot be bigger than ffs->ev.count, which cannot be bigger than +- * size of ffs->ev.types array (which is four) so that's how much space +- * we reserve. +- */ +- struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)]; +- const size_t size = n * sizeof *events; +- unsigned i = 0; +- +- memset(events, 0, size); +- +- do { +- events[i].type = ffs->ev.types[i]; +- if (events[i].type == FUNCTIONFS_SETUP) { +- events[i].u.setup = ffs->ev.setup; +- ffs->setup_state = FFS_SETUP_PENDING; +- } +- } while (++i < n); +- +- ffs->ev.count -= n; +- if (ffs->ev.count) +- memmove(ffs->ev.types, ffs->ev.types + n, ffs->ev.count * sizeof *ffs->ev.types); +- +- spin_unlock_irq(&ffs->ev.waitq.lock); +- mutex_unlock(&ffs->mutex); +- +- return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size; +-} +- +-static ssize_t ffs_ep0_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) +-{ +- struct ffs_data *ffs = file->private_data; +- char *data = NULL; +- size_t n; +- int ret; +- +- ENTER(); +- +- /* Fast check if setup was canceled */ +- if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED) +- return -EIDRM; +- +- /* Acquire mutex */ +- ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); +- if (unlikely(ret < 0)) +- return ret; +- +- /* Check state */ +- if (ffs->state != FFS_ACTIVE) { +- ret = -EBADFD; +- goto done_mutex; +- } +- +- /* +- * We're called from user space, we can use _irq rather then +- * _irqsave +- */ +- spin_lock_irq(&ffs->ev.waitq.lock); +- +- switch (ffs_setup_state_clear_cancelled(ffs)) { +- case FFS_SETUP_CANCELLED: +- ret = -EIDRM; +- break; +- +- case FFS_NO_SETUP: +- n = len / sizeof(struct usb_functionfs_event); +- if (unlikely(!n)) { +- ret = -EINVAL; +- break; +- } +- +- if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) { +- ret = -EAGAIN; +- break; +- } +- +- if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq, +- ffs->ev.count)) { +- ret = -EINTR; +- break; +- } +- +- /* unlocks spinlock */ +- return __ffs_ep0_read_events(ffs, buf, +- min(n, (size_t)ffs->ev.count)); +- +- case FFS_SETUP_PENDING: +- if (ffs->ev.setup.bRequestType & USB_DIR_IN) { +- spin_unlock_irq(&ffs->ev.waitq.lock); +- ret = __ffs_ep0_stall(ffs); +- goto done_mutex; +- } +- +- len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength)); +- +- spin_unlock_irq(&ffs->ev.waitq.lock); +- +- if (likely(len)) { +- data = kmalloc(len, GFP_KERNEL); +- if (unlikely(!data)) { +- ret = -ENOMEM; +- goto done_mutex; +- } +- } +- +- spin_lock_irq(&ffs->ev.waitq.lock); +- +- /* See ffs_ep0_write() */ +- if (ffs_setup_state_clear_cancelled(ffs) == +- FFS_SETUP_CANCELLED) { +- ret = -EIDRM; +- break; +- } +- +- /* unlocks spinlock */ +- ret = __ffs_ep0_queue_wait(ffs, data, len); +- if (likely(ret > 0) && unlikely(copy_to_user(buf, data, len))) +- ret = -EFAULT; +- goto done_mutex; +- +- default: +- ret = -EBADFD; +- break; +- } +- +- spin_unlock_irq(&ffs->ev.waitq.lock); +-done_mutex: +- mutex_unlock(&ffs->mutex); +- kfree(data); +- return ret; +-} +- +-static int ffs_ep0_open(struct inode *inode, struct file *file) +-{ +- struct ffs_data *ffs = container_of(inode->i_cdev, struct ffs_data, cdev); +- ENTER(); +- +- if (unlikely(ffs->state == FFS_CLOSING)) +- return -EBUSY; +- +- file->private_data = ffs; +- return 0; +-} +- +-static int ffs_ep0_release(struct inode *inode, struct file *file) +-{ +- ENTER(); +- return 0; +-} +- +-static ssize_t ffs_ep0_iorw(struct file *file, struct ffs_io_data *io_data) +-{ +- struct ffs_data *ffs = file->private_data; +- struct usb_request *req = NULL; +- ssize_t ret, data_len = io_data->len; +- bool interrupted = false; +- struct ffs_memory *ffsm = NULL; +- +- /* Are we still active? */ +- if (WARN_ON(ffs->state != FFS_ACTIVE)) +- return -ENODEV; +- ffsm = generic_find_ep0_memory_area(ffs, io_data->buf, data_len); +- if (ffsm == NULL) +- { +- return -ENODEV; +- } +- if (!io_data->aio) { +- reinit_completion(&ffs->ep0req_completion); +- +- req = ffs->ep0req; +- req->buf = (void *)(ffsm->mem + io_data->buf - ffsm->vm_start); +- req->length = data_len; +- req->complete = ffs_ep0_complete; +- +- ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC); +- if (unlikely(ret < 0)) +- goto error; +- +- if (io_data->timeout > 0) { +- ret = wait_for_completion_interruptible_timeout(&ffs->ep0req_completion, io_data->timeout); +- if (ret < 0) { +- /* +- * To avoid race condition with ffs_epfile_io_complete, +- * dequeue the request first then check +- * status. usb_ep_dequeue API should guarantee no race +- * condition with req->complete callback. +- */ +- usb_ep_dequeue(ffs->gadget->ep0, req); +- wait_for_completion(&ffs->ep0req_completion); +- interrupted = req->status < 0; +- } else if (ret == 0) { +- ret = -EBUSY; +- usb_ep_dequeue(ffs->gadget->ep0, req); +- wait_for_completion(&ffs->ep0req_completion); +- goto error; +- } +- } else { +- ret = wait_for_completion_interruptible(&ffs->ep0req_completion); +- if (ret < 0) { +- usb_ep_dequeue(ffs->gadget->ep0, req); +- wait_for_completion(&ffs->ep0req_completion); +- interrupted = req->status < 0; +- } +- } +- +- if (interrupted) { +- ret = -EINTR; +- } else { +- ret = req->actual; +- } +- goto error; +- } +- else if (!(req = usb_ep_alloc_request(ffs->gadget->ep0, GFP_ATOMIC))) { +- ret = -ENOMEM; +- } +- else { +- req->buf = (void *)(ffsm->mem + io_data->buf - ffsm->vm_start); +- req->length = data_len; +- +- io_data->ep = ffs->gadget->ep0; +- io_data->req = req; +- io_data->ffs = ffs; +- +- req->context = io_data; +- req->complete = ffs_ep0_async_io_complete; +- list_add(&req->list, &ffs->ep0req->list); +- ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC); +- if (unlikely(ret)) { +- usb_ep_free_request(ffs->gadget->ep0, req); +- goto error; +- } +- +- ret = -EIOCBQUEUED; +- } +- +-error: +- return ret; +-} +- +-static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value) +-{ +- struct ffs_data *ffs = file->private_data; +- long ret = 0; +- unsigned int copied = 0; +- struct ffs_memory *ffsm = NULL; +- struct generic_memory mem; +- +- ENTER(); +- +- switch (code) { +- case FUNCTIONFS_ENDPOINT_QUEUE_INIT: +- ret = kfifo_alloc(&ffs->reqEventFifo, MAX_REQUEST * sizeof(struct UsbFnReqEvent), GFP_KERNEL); +- break; +- case FUNCTIONFS_ENDPOINT_QUEUE_DEL: +- kfifo_free(&ffs->reqEventFifo); +- break; +- case FUNCTIONFS_ENDPOINT_RELEASE_BUF: +- if (copy_from_user(&mem, (void __user *)value, sizeof(mem))) +- { +- pr_info("copy from user failed\n"); +- return -EFAULT; +- } +- ffsm = generic_find_ep0_memory_area(ffs, mem.buf, mem.size); +- if (ffsm == NULL) +- { +- return -EFAULT; +- } +- list_del(&ffsm->memlist); +- kfree((void *)ffsm->mem); +- kfree(ffsm); +- break; +- case FUNCTIONFS_ENDPOINT_READ: +- case FUNCTIONFS_ENDPOINT_WRITE: +- { +- struct IoData myIoData; +- struct ffs_io_data io_data, *p = &io_data; +- ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData)); +- if (unlikely(ret)) { +- return -EFAULT; +- } +- if (myIoData.aio) { +- p = kmalloc(sizeof(io_data), GFP_KERNEL); +- if (unlikely(!p)) +- return -ENOMEM; +- } else { +- memset(p, 0, sizeof(*p)); +- } +- memcpy(p, &myIoData, sizeof(struct IoData)); +- +- ret = ffs_ep0_iorw(file, p); +- if (ret == -EIOCBQUEUED) { +- return 0; +- } +- if (p->aio) +- kfree(p); +- return ret; +- } +- case FUNCTIONFS_ENDPOINT_RW_CANCEL: +- { +- struct usb_request *req; +- struct IoData myIoData; +- ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData)); +- if (unlikely(ret)) { +- return -EFAULT; +- } +- ffsm = generic_find_ep0_memory_area(ffs, myIoData.buf, myIoData.len); +- if (ffsm == NULL) +- { +- return -EFAULT; +- } +- list_for_each_entry(req, &ffs->ep0req->list, list) { +- if (req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) { +- usb_ep_dequeue(ffs->gadget->ep0, req); +- return 0; +- } +- } +- return -EFAULT; +- } +- case FUNCTIONFS_ENDPOINT_GET_REQ_STATUS: +- { +- struct usb_request *req; +- struct IoData myIoData; +- ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData)); +- if (unlikely(ret)) { +- return -EFAULT; +- } +- ffsm = generic_find_ep0_memory_area(ffs, myIoData.buf, myIoData.len); +- if (ffsm == NULL) +- { +- return -EFAULT; +- } +- list_for_each_entry(req, &ffs->ep0req->list, list) { +- if (req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) { +- return req->status; +- } +- } +- return -EFAULT; +- } +- case FUNCTIONFS_ENDPOINT_GET_EP0_EVENT: +- if (!kfifo_is_empty(&ffs->reqEventFifo)) { +- ret = kfifo_to_user(&ffs->reqEventFifo, (void __user *)value, +- sizeof(struct UsbFnReqEvent), &copied) == 0 ? copied : -1; +- if (ret > 0) { +- ffs->setup_state = FFS_NO_SETUP; +- return ret; +- } +- } +- +- return -EFAULT; +- } +- +- return ret; +-} +- +-#ifdef CONFIG_COMPAT +-static long ffs_ep0_compat_ioctl(struct file *file, unsigned code, +- unsigned long value) +-{ +- return ffs_ep0_ioctl(file, code, value); +-} +-#endif +- +-static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait) +-{ +- struct ffs_data *ffs = file->private_data; +- __poll_t mask = EPOLLWRNORM; +- int ret; +- +- ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); +- if (unlikely(ret < 0)) +- return mask; +- +- switch (ffs->state) { +- case FFS_READ_DESCRIPTORS: +- case FFS_READ_STRINGS: +- mask |= EPOLLOUT; +- break; +- +- case FFS_ACTIVE: +- switch (ffs->setup_state) { +- case FFS_NO_SETUP: +- poll_wait(file, &ffs->ev.waitq, wait); +- if (ffs->ev.count) +- mask |= EPOLLIN; +- break; +- +- case FFS_SETUP_PENDING: +- case FFS_SETUP_CANCELLED: +- poll_wait(file, &ffs->wait_que, wait); +- if (!kfifo_is_empty(&ffs->reqEventFifo)) +- { +- mask |= EPOLLOUT; +- } +- break; +- } +- case FFS_CLOSING: +- break; +- case FFS_DEACTIVATED: +- break; +- } +- +- mutex_unlock(&ffs->mutex); +- +- return mask; +-} +- +-static int ffs_ep0_mmap(struct file *file, struct vm_area_struct *vma) +-{ +- struct ffs_data *ffs = file->private_data; +- size_t size = vma->vm_end - vma->vm_start; +- unsigned long flags; +- struct ffs_memory *ffsm = NULL; +- void *virt_mem = NULL; +- +- if (ffs == NULL) { +- pr_info("Invalid private parameter!\n"); +- return -EINVAL; +- } +- virt_mem = kmalloc(size, GFP_KERNEL); +- if (virt_mem == NULL) +- { +- pr_info("%s alloc memory failed!\n", __FUNCTION__); +- return -ENOMEM; +- } +- ffsm = kmalloc(sizeof(struct ffs_memory), GFP_KERNEL); +- if (ffsm == NULL) +- { +- pr_info("%s alloc memory failed!\n", __FUNCTION__); +- goto error_free_mem; +- } +- if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(virt_mem)>>PAGE_SHIFT, +- vma->vm_end - vma->vm_start, vma->vm_page_prot)) { +- goto error_free_ffsm; +- } +- ffsm->mem = (uint64_t)virt_mem; +- ffsm->size = size; +- ffsm->vm_start = vma->vm_start; +- INIT_LIST_HEAD(&ffsm->memlist); +- spin_lock_irqsave(&ffs->mem_lock, flags); +- list_add_tail(&ffsm->memlist, &ffs->memory_list); +- spin_unlock_irqrestore(&ffs->mem_lock, flags); +- return 0; +-error_free_ffsm: +- kfree(ffsm); +-error_free_mem: +- kfree(virt_mem); +- return -1; +-} +- +-static const struct file_operations ffs_ep0_operations = { +- .owner = THIS_MODULE, +- .llseek = no_llseek, +- .open = ffs_ep0_open, +- .write = ffs_ep0_write, +- .read = ffs_ep0_read, +- .release = ffs_ep0_release, +- .unlocked_ioctl = ffs_ep0_ioctl, +-#ifdef CONFIG_COMPAT +- .compat_ioctl = ffs_ep0_compat_ioctl, +-#endif +- .poll = ffs_ep0_poll, +- .mmap = ffs_ep0_mmap, +-}; +- +-/* "Normal" endpoints operations ********************************************/ +-static struct ffs_memory *generic_find_memory_area(struct ffs_epfile *epfile, uint64_t buf, uint32_t len) +-{ +- struct ffs_memory *ffsm = NULL, *iter = NULL; +- uint64_t buf_start = buf; +- +- list_for_each_entry(iter, &epfile->memory_list, memlist) { +- if (buf_start >= iter->vm_start && +- buf_start < iter->vm_start + iter->size) { +- if (len <= iter->vm_start + iter->size - buf_start) { +- ffsm = iter; +- break; +- } +- } +- } +- return ffsm; +-} +- +-static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req) +-{ +- ENTER(); +- if (likely(req->context)) { +- struct ffs_ep *ep = _ep->driver_data; +- ep->status = req->status ? req->status : req->actual; +- complete(req->context); +- } +-} +- +-static void epfile_task_proc(unsigned long context) +-{ +- struct ffs_io_data *io_data = (struct ffs_io_data *)context; +- struct ffs_epfile *epfile = io_data->epfile; +- unsigned long flags; +- +- spin_lock_irqsave(&epfile->ffs->eps_lock, flags); +- io_data->status = io_data->req->status; +- io_data->actual = io_data->req->actual; +- kfifo_in(&epfile->reqEventFifo, &io_data->buf, sizeof(struct UsbFnReqEvent)); +- list_del(&io_data->req->list); +- usb_ep_free_request(io_data->ep, io_data->req); +- kfree(io_data); +- spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags); +- wake_up_all(&epfile->wait_que); +-} +- +-static void ffs_epfile_async_io_complete(struct usb_ep *_ep, struct usb_request *req) +-{ +- struct ffs_io_data *io_data = req->context; +- +- tasklet_init(&io_data->task, epfile_task_proc, (uintptr_t)io_data); +- tasklet_schedule(&io_data->task); +- +-} +- +-static int ffs_epfile_open(struct inode *inode, struct file *file) +-{ +- struct ffs_epfile *epfile = container_of(inode->i_cdev, struct ffs_epfile, cdev); +- ENTER(); +- if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) +- return -ENODEV; +- +- file->private_data = epfile; +- return 0; +-} +- +-static int ffs_epfile_release(struct inode *inode, struct file *file) +-{ +- ENTER(); +- return 0; +-} +- +-static int ffs_epfile_mmap(struct file *file, struct vm_area_struct *vma) +-{ +- struct ffs_epfile *epfile = file->private_data; +- size_t size = vma->vm_end - vma->vm_start; +- struct ffs_memory *ffsm = NULL; +- unsigned long flags; +- void *virt_mem = NULL; +- +- if (epfile == NULL) +- { +- pr_info("Invalid private parameter!\n"); +- return -EINVAL; +- } +- virt_mem = kmalloc(size, GFP_KERNEL); +- if (virt_mem == NULL) +- { +- pr_info("%s alloc memory failed!\n", __FUNCTION__); +- return -ENOMEM; +- } +- ffsm = kmalloc(sizeof(struct ffs_memory), GFP_KERNEL); +- if (ffsm == NULL) +- { +- pr_info("%s alloc memory failed!\n", __FUNCTION__); +- goto error_free_mem; +- } +- if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(virt_mem)>>PAGE_SHIFT, +- vma->vm_end - vma->vm_start, vma->vm_page_prot)) +- { +- goto error_free_ffsm; +- } +- ffsm->mem = (uint64_t)virt_mem; +- ffsm->size = size; +- ffsm->vm_start = vma->vm_start; +- INIT_LIST_HEAD(&ffsm->memlist); +- spin_lock_irqsave(&epfile->ffs->eps_lock, flags); +- list_add_tail(&ffsm->memlist, &epfile->memory_list); +- spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags); +- +- return 0; +-error_free_ffsm: +- kfree(ffsm); +-error_free_mem: +- kfree(virt_mem); +- +- return -1; +-} +- +-static ssize_t ffs_epfile_iorw(struct file *file, struct ffs_io_data *io_data) +-{ +- struct ffs_epfile *epfile = file->private_data; +- struct usb_request *req = NULL; +- struct ffs_ep *ep = NULL; +- struct ffs_memory *ffsm = NULL; +- ssize_t ret, data_len = -EINVAL; +- int halt; +- +- /* Are we still active? */ +- if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) +- return -ENODEV; +- +- /* Wait for endpoint to be enabled */ +- ep = epfile->ep; +- if (!ep) { +- if (file->f_flags & O_NONBLOCK) +- return -EAGAIN; +- +- ret = wait_event_interruptible( +- epfile->ffs->wait, (ep = epfile->ep)); +- if (ret) +- return -EINTR; +- } +- +- /* Do we halt? */ +- halt = (!io_data->read == !epfile->in); +- if (halt && epfile->isoc) +- return -EINVAL; +- +- /* We will be using request and read_buffer */ +- ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK); +- if (unlikely(ret)) +- goto error; +- +- /* Allocate & copy */ +- if (!halt) { +- struct usb_gadget *gadget; +- /* +- * if we _do_ wait above, the epfile->ffs->gadget might be NULL +- * before the waiting completes, so do not assign to 'gadget' +- * earlier +- */ +- gadget = epfile->ffs->gadget; +- +- spin_lock_irq(&epfile->ffs->eps_lock); +- /* In the meantime, endpoint got disabled or changed. */ +- if (epfile->ep != ep) { +- ret = -ESHUTDOWN; +- goto error_lock; +- } +- data_len = io_data->len; +- /* +- * Controller may require buffer size to be aligned to +- * maxpacketsize of an out endpoint. +- */ +- if (io_data->read) +- data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); +- spin_unlock_irq(&epfile->ffs->eps_lock); +- } +- +- spin_lock_irq(&epfile->ffs->eps_lock); +- ffsm = generic_find_memory_area(epfile, io_data->buf, io_data->len); +- if (ffsm == NULL) +- { +- return -EFAULT; +- } +- if (epfile->ep != ep) { +- /* In the meantime, endpoint got disabled or changed. */ +- ret = -ESHUTDOWN; +- } +- else if (halt) { +- ret = usb_ep_set_halt(ep->ep); +- if (!ret) +- ret = -EBADMSG; +- } +- else if (!io_data->aio) { +- DECLARE_COMPLETION_ONSTACK(done); +- bool interrupted = false; +- +- req = ep->req; +- req->buf = (void *)(ffsm->mem + io_data->buf - ffsm->vm_start); +- req->length = data_len; +- +- req->context = &done; +- req->complete = ffs_epfile_io_complete; +- +- ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); +- if (unlikely(ret < 0)) +- goto error_lock; +- +- spin_unlock_irq(&epfile->ffs->eps_lock); +- if (io_data->timeout > 0) { +- ret = wait_for_completion_interruptible_timeout(&done, io_data->timeout); +- if (ret < 0) { +- /* +- * To avoid race condition with ffs_epfile_io_complete, +- * dequeue the request first then check +- * status. usb_ep_dequeue API should guarantee no race +- * condition with req->complete callback. +- */ +- usb_ep_dequeue(ep->ep, req); +- wait_for_completion(&done); +- interrupted = ep->status < 0; +- } else if (ret == 0) { +- ret = -EBUSY; +- usb_ep_dequeue(ep->ep, req); +- wait_for_completion(&done); +- goto error_mutex; +- } +- } else { +- ret = wait_for_completion_interruptible(&done); +- if (ret < 0) { +- usb_ep_dequeue(ep->ep, req); +- wait_for_completion(&done); +- interrupted = ep->status < 0; +- } +- } +- +- if (interrupted) { +- ret = -EINTR; +- } else { +- ret = req->actual; +- } +- goto error_mutex; +- } +- else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) { +- ret = -ENOMEM; +- } +- else { +- req->buf = (void *)(ffsm->mem + io_data->buf - ffsm->vm_start); +- req->length = data_len; +- +- io_data->ep = ep->ep; +- io_data->req = req; +- io_data->epfile = epfile; +- +- req->context = io_data; +- req->complete = ffs_epfile_async_io_complete; +- list_add(&req->list, &ep->req->list); +- ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); +- if (unlikely(ret)) { +- usb_ep_free_request(ep->ep, req); +- goto error_lock; +- } +- +- ret = -EIOCBQUEUED; +- } +- +-error_lock: +- spin_unlock_irq(&epfile->ffs->eps_lock); +-error_mutex: +- mutex_unlock(&epfile->mutex); +-error: +- return ret; +-} +- +-static long ffs_epfile_ioctl(struct file *file, unsigned code, unsigned long value) +-{ +- struct ffs_epfile *epfile = file->private_data; +- struct ffs_ep *ep = epfile->ep; +- int ret = 0; +- struct generic_memory mem; +- struct ffs_memory *ffsm = NULL; +- +- ENTER(); +- +- if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) +- return -ENODEV; +- +- spin_lock_irq(&epfile->ffs->eps_lock); +- +- switch (code) { +- case FUNCTIONFS_ENDPOINT_QUEUE_INIT: +- ret = kfifo_alloc(&epfile->reqEventFifo, MAX_REQUEST * sizeof(struct UsbFnReqEvent), GFP_KERNEL); +- break; +- case FUNCTIONFS_ENDPOINT_QUEUE_DEL: +- kfifo_free(&epfile->reqEventFifo); +- break; +- case FUNCTIONFS_ENDPOINT_RELEASE_BUF: +- if (copy_from_user(&mem, (void __user *)value, sizeof(mem))) +- { +- pr_info("copy from user failed\n"); +- return -EFAULT; +- } +- ffsm = generic_find_memory_area(epfile, mem.buf, mem.size); +- if (ffsm == NULL) +- { +- return -EFAULT; +- } +- list_del(&ffsm->memlist); +- kfree((void *)ffsm->mem); +- kfree(ffsm); +- break; +- case FUNCTIONFS_ENDPOINT_READ: +- case FUNCTIONFS_ENDPOINT_WRITE: +- { +- struct IoData myIoData; +- struct ffs_io_data io_data, *p = &io_data; +- ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData)); +- if (unlikely(ret)) { +- spin_unlock_irq(&epfile->ffs->eps_lock); +- return -EFAULT; +- } +- if (myIoData.aio) { +- p = kmalloc(sizeof(io_data), GFP_KERNEL); +- if (unlikely(!p)) { +- spin_unlock_irq(&epfile->ffs->eps_lock); +- return -ENOMEM; +- } +- } else { +- memset(p, 0, sizeof(*p)); +- } +- memcpy(p, &myIoData, sizeof(struct IoData)); +- +- spin_unlock_irq(&epfile->ffs->eps_lock); +- ret = ffs_epfile_iorw(file, p); +- if (ret == -EIOCBQUEUED) { +- return 0; +- } +- if (p->aio) +- kfree(p); +- return ret; +- } +- case FUNCTIONFS_ENDPOINT_RW_CANCEL: +- { +- struct usb_request *req; +- struct IoData myIoData; +- if (!ep) { +- spin_unlock_irq(&epfile->ffs->eps_lock); +- return -EFAULT; +- } +- ret = copy_from_user(&myIoData, (void __user *)value, sizeof(struct IoData)); +- if (unlikely(ret)) { +- spin_unlock_irq(&epfile->ffs->eps_lock); +- return -EFAULT; +- } +- ffsm = generic_find_memory_area(epfile, myIoData.buf, myIoData.len); +- if (ffsm == NULL) +- { +- return -EFAULT; +- } +- list_for_each_entry(req, &epfile->ep->req->list, list) { +- if (req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) { +- usb_ep_dequeue(epfile->ep->ep, req); +- spin_unlock_irq(&epfile->ffs->eps_lock); +- return 0; +- } +- } +- if (epfile->ep->req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) { +- usb_ep_dequeue(epfile->ep->ep, epfile->ep->req); +- spin_unlock_irq(&epfile->ffs->eps_lock); +- return 0; +- } +- spin_unlock_irq(&epfile->ffs->eps_lock); +- return -EFAULT; +- } +- case FUNCTIONFS_ENDPOINT_GET_REQ_STATUS: +- { +- struct usb_request *req; +- struct IoData myIoData; +- if (!ep) { +- spin_unlock_irq(&epfile->ffs->eps_lock); +- return -EFAULT; +- } +- ret = copy_from_user(&myIoData,(void __user *)value, sizeof(struct IoData)); +- if (unlikely(ret)) { +- spin_unlock_irq(&epfile->ffs->eps_lock); +- return -EFAULT; +- } +- ffsm = generic_find_memory_area(epfile, myIoData.buf, myIoData.len); +- if (ffsm == NULL) +- { +- return -EFAULT; +- } +- list_for_each_entry(req, &epfile->ep->req->list, list) { +- if (req->buf == (void *)(ffsm->mem + myIoData.buf - ffsm->vm_start)) { +- spin_unlock_irq(&epfile->ffs->eps_lock); +- return req->status; +- } +- } +- spin_unlock_irq(&epfile->ffs->eps_lock); +- return -EFAULT; +- } +- case FUNCTIONFS_FIFO_STATUS: +- ret = usb_ep_fifo_status(epfile->ep->ep); +- break; +- case FUNCTIONFS_FIFO_FLUSH: +- usb_ep_fifo_flush(epfile->ep->ep); +- ret = 0; +- break; +- case FUNCTIONFS_CLEAR_HALT: +- ret = usb_ep_clear_halt(epfile->ep->ep); +- break; +- case FUNCTIONFS_ENDPOINT_REVMAP: +- ret = epfile->ep->num; +- break; +- case FUNCTIONFS_ENDPOINT_DESC: +- { +- int desc_idx; +- int i; +- struct usb_endpoint_descriptor *desc; +- +- switch (epfile->ffs->speed) { +- case USB_SPEED_SUPER: +- desc_idx = 2; +- break; +- case USB_SPEED_HIGH: +- desc_idx = 1; +- break; +- default: +- desc_idx = 1; +- } +- for (i = 0; i < epfile->ffs->eps_count; i++) { +- if (epfile->ffs->epfiles + i == epfile) +- break; +- } +- ep = epfile->ffs->eps + i; +- desc = ep->descs[desc_idx]; +- spin_unlock_irq(&epfile->ffs->eps_lock); +- ret = copy_to_user((void __user *)value, desc, desc->bLength); +- if (ret) +- ret = -EFAULT; +- return ret; +- } +- default: +- ret = -ENOTTY; +- } +- spin_unlock_irq(&epfile->ffs->eps_lock); +- +- return ret; +-} +- +-static ssize_t ffs_epfile_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos) +-{ +- int status = 0; +- unsigned int copied = 0; +- unsigned long flags; +- struct ffs_epfile *epfile = file->private_data; +- ENTER(); +- if (kfifo_is_empty(&epfile->reqEventFifo)) { +- return 0; +- } +- spin_lock_irqsave(&epfile->ffs->eps_lock, flags); +- status = kfifo_to_user(&epfile->reqEventFifo, buf, count, &copied) == 0 ? copied : -1; +- spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags); +- +- return status; +-} +- +-static ssize_t ffs_epfile_write(struct file *file, const char __user *buf, size_t count, loff_t *f_pos) +-{ +- return count; +-} +- +-static unsigned int ffs_epfile_poll(struct file *file, struct poll_table_struct * wait) +-{ +- unsigned int mask = 0; +- struct ffs_epfile *epfile = file->private_data; +- ENTER(); +- poll_wait(file, &epfile->wait_que, wait); +- if (!kfifo_is_empty(&epfile->reqEventFifo)) { +- mask |= POLLIN; +- } +- return mask; +-} +- +-#ifdef CONFIG_COMPAT +-static long ffs_epfile_compat_ioctl(struct file *file, unsigned code, +- unsigned long value) +-{ +- return ffs_epfile_ioctl(file, code, value); +-} +-#endif +- +-static const struct file_operations ffs_epfile_operations = { +- .owner = THIS_MODULE, +- .llseek = no_llseek, +- .mmap = ffs_epfile_mmap, +- .read = ffs_epfile_read, +- .write = ffs_epfile_write, +- .poll = ffs_epfile_poll, +- .open = ffs_epfile_open, +- .release = ffs_epfile_release, +- .unlocked_ioctl = ffs_epfile_ioctl, +-#ifdef CONFIG_COMPAT +- .compat_ioctl = ffs_epfile_compat_ioctl, +-#endif +-}; +- +-/* ffs_data and ffs_function construction and destruction code **************/ +-static void ffs_data_clear(struct ffs_data *ffs); +-static void ffs_data_reset(struct ffs_data *ffs); +-static dev_t g_dev; +-#define MAX_EP_DEV 10 +-static long usbfn_ioctl(struct file *file, unsigned int cmd, unsigned long value) +-{ +- long ret; +- ENTER(); +- switch(cmd) +- { +- case FUNCTIONFS_NEWFN: +- { +- struct ffs_dev *ffs_dev; +- struct ffs_data *ffs; +- struct FuncNew newfn; +- char nameEp0[MAX_NAMELEN]; +- ret = copy_from_user(&newfn, (void __user *)value, sizeof(struct FuncNew )); +- if (unlikely(ret)) { +- return -EFAULT; +- } +- ffs = ffs_data_new(newfn.name); +- if (unlikely(!ffs)) { +- return (-ENOMEM); +- } +- +- if (newfn.nameLen > MAX_NAMELEN) { +- return -EPERM; +- } +- memcpy(ffs->dev_name, newfn.name, newfn.nameLen); +- +- if (unlikely(!ffs->dev_name)) { +- ffs_data_put(ffs); +- return (-ENOMEM); +- } +- +- if (sprintf(nameEp0, "%s.ep%u", ffs->dev_name, 0) < 0) { +- ffs_data_put(ffs); +- return -EFAULT; +- } +- ffs_dev = ffs_acquire_dev(newfn.name); +- if (IS_ERR(ffs_dev)) { +- ffs_data_put(ffs); +- return (-ENODEV); +- } +- ffs->private_data = ffs_dev; +- +- ret = alloc_chrdev_region(&g_dev, 0, MAX_EP_DEV, nameEp0); +- if (ret < 0) { +- ffs_release_dev(ffs); +- ffs_data_put(ffs); +- return -EBUSY; +- } +- cdev_init(&ffs->cdev, &ffs_ep0_operations); +- ffs->devno = MKDEV(MAJOR(g_dev), 0); +- ret = cdev_add(&ffs->cdev, ffs->devno, 1); +- if (ret) { +- ffs_release_dev(ffs); +- ffs_data_put(ffs); +- return -EBUSY; +- } +- +- ffs->fn_device = device_create(ffs_class, NULL, ffs->devno, NULL, nameEp0); +- if (IS_ERR(ffs->fn_device)) { +- cdev_del(&ffs->cdev); +- ffs_release_dev(ffs); +- ffs_data_put(ffs); +- return -EBUSY; +- } +- return 0; +- } +- case FUNCTIONFS_DELFN: +- { +- struct FuncNew newfn; +- struct ffs_data *ffs; +- struct ffs_dev *ffs_dev; +- ret = copy_from_user(&newfn, (void __user *)value, sizeof(struct FuncNew )); +- if (unlikely(ret)) { +- return -EFAULT; +- } +- +- ffs_dev = _ffs_find_dev(newfn.name); +- if (IS_ERR(ffs_dev)) { +- return -EFAULT; +- } +- ffs = ffs_dev->ffs_data; +- device_destroy(ffs_class, ffs->devno); +- cdev_del(&ffs->cdev); +- unregister_chrdev_region(g_dev, MAX_EP_DEV); +- ffs_release_dev(ffs); +- ffs_data_clear(ffs); +- destroy_workqueue(ffs->io_completion_wq); +- kfree(ffs); +- return 0; +- } +- default: +- ret = -ENOTTY; +- } +- +- return ret; +-} +- +-static int usbfn_open(struct inode *inode, struct file *file) +-{ +- return 0; +-} +- +-static int usbfn_release(struct inode *inode, struct file *file) +-{ +- return 0; +-} +- +-static struct file_operations usbfn_fops = { +- .owner = THIS_MODULE, +- .unlocked_ioctl = usbfn_ioctl, +- .open = usbfn_open, +- .release = usbfn_release, +-#ifdef CONFIG_COMPAT +- .compat_ioctl = usbfn_ioctl, +-#endif +-}; +- +-static struct miscdevice usbfn_misc = { +- .minor = MISC_DYNAMIC_MINOR, +- .name = "usbfn", +- .fops = &usbfn_fops, +-}; +- +-/* Driver's main init/cleanup functions *************************************/ +-static int functionfs_init(void) +-{ +- int ret; +- +- ENTER(); +- ret = misc_register(&usbfn_misc); +- if (likely(!ret)) +- pr_info("file system registered\n"); +- else +- pr_err("failed registering file system (%d)\n", ret); +- +- //ffs_class = class_create(THIS_MODULE, "functionfs"); +- ffs_class = class_create("functionfs"); +- if (IS_ERR(ffs_class)) +- return PTR_ERR(ffs_class); +- +- ffs_class->devnode = ffs_devnode; +- +- return ret; +-} +- +-static void functionfs_cleanup(void) +-{ +- ENTER(); +- class_destroy(ffs_class); +- misc_deregister(&usbfn_misc); +-} +- +-static void ffs_data_get(struct ffs_data *ffs) +-{ +- ENTER(); +- refcount_inc(&ffs->ref); +-} +- +-static void ffs_data_put(struct ffs_data *ffs) +-{ +- ENTER(); +- if (unlikely(refcount_dec_and_test(&ffs->ref))) { +- pr_info("%s(): freeing\n", __func__); +- ffs_data_clear(ffs); +- BUG_ON(waitqueue_active(&ffs->ev.waitq) || +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0) +- swait_active(&ffs->ep0req_completion.wait) || +-#else +- waitqueue_active(&ffs->ep0req_completion.wait) || +-#endif +- waitqueue_active(&ffs->wait) || +- waitqueue_active(&ffs->wait_que)); +- destroy_workqueue(ffs->io_completion_wq); +- kfree(ffs); +- } +-} +- +-static struct ffs_data *ffs_data_new(const char *dev_name) +-{ +- struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); +- if (unlikely(!ffs)) +- return NULL; +- +- ENTER(); +- +- ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name); +- if (!ffs->io_completion_wq) { +- kfree(ffs); +- return NULL; +- } +- +- refcount_set(&ffs->ref, 1); +- atomic_set(&ffs->opened, 0); +- ffs->state = FFS_READ_DESCRIPTORS; +- mutex_init(&ffs->mutex); +- spin_lock_init(&ffs->eps_lock); +- spin_lock_init(&ffs->mem_lock); +- init_waitqueue_head(&ffs->ev.waitq); +- init_waitqueue_head(&ffs->wait); +- init_waitqueue_head(&ffs->wait_que); +- init_completion(&ffs->ep0req_completion); +- INIT_LIST_HEAD(&ffs->memory_list); +- ffs->ev.can_stall = 1; +- +- return ffs; +-} +- +-static void ffs_data_clear(struct ffs_data *ffs) +-{ +- ENTER(); +- +- ffs_closed(ffs); +- +- BUG_ON(ffs->gadget); +- +- if (ffs->epfiles) +- ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); +- +- if (ffs->ffs_eventfd) +- eventfd_ctx_put(ffs->ffs_eventfd); +- +- kfree(ffs->raw_descs_data); +- kfree(ffs->raw_strings); +- kfree(ffs->stringtabs); +-} +- +-static void ffs_data_reset(struct ffs_data *ffs) +-{ +- ENTER(); +- +- ffs_data_clear(ffs); +- +- ffs->epfiles = NULL; +- ffs->raw_descs_data = NULL; +- ffs->raw_descs = NULL; +- ffs->raw_strings = NULL; +- ffs->stringtabs = NULL; +- +- ffs->raw_descs_length = 0; +- ffs->fs_descs_count = 0; +- ffs->hs_descs_count = 0; +- ffs->ss_descs_count = 0; +- +- ffs->strings_count = 0; +- ffs->interfaces_count = 0; +- ffs->eps_count = 0; +- +- ffs->ev.count = 0; +- +- ffs->state = FFS_READ_DESCRIPTORS; +- ffs->setup_state = FFS_NO_SETUP; +- ffs->flags = 0; +-} +- +-static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev) +-{ +- struct usb_gadget_strings **lang; +- int first_id; +- +- ENTER(); +- +- if (WARN_ON(ffs->state != FFS_ACTIVE +- || test_and_set_bit(FFS_FL_BOUND, &ffs->flags))) +- return -EBADFD; +- +- first_id = usb_string_ids_n(cdev, ffs->strings_count); +- if (unlikely(first_id < 0)) +- return first_id; +- +- ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL); +- if (unlikely(!ffs->ep0req)) +- return -ENOMEM; +- ffs->ep0req->complete = ffs_ep0_complete; +- ffs->ep0req->context = ffs; +- INIT_LIST_HEAD(&ffs->ep0req->list); +- +- lang = ffs->stringtabs; +- if (lang) { +- for (; *lang; ++lang) { +- struct usb_string *str = (*lang)->strings; +- int id = first_id; +- for (; str->s; ++id, ++str) +- str->id = id; +- } +- } +- +- ffs->gadget = cdev->gadget; +- ffs->speed = cdev->gadget->speed; +- ffs_data_get(ffs); +- return 0; +-} +- +-static void functionfs_unbind(struct ffs_data *ffs) +-{ +- ENTER(); +- +- if (!WARN_ON(!ffs->gadget)) { +- usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req); +- ffs->ep0req = NULL; +- ffs->gadget = NULL; +- clear_bit(FFS_FL_BOUND, &ffs->flags); +- ffs_data_put(ffs); +- } +-} +- +-static int ffs_epfiles_create(struct ffs_data *ffs) +-{ +- struct ffs_epfile *epfile = NULL, *epfiles = NULL; +- unsigned int i, count ,ret; +- +- ENTER(); +- +- count = ffs->eps_count; +- epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL); +- if (!epfiles) +- return -ENOMEM; +- +- epfile = epfiles; +- for (i = 1; i <= count; ++i, ++epfile) { +- epfile->ffs = ffs; +- mutex_init(&epfile->mutex); +- INIT_LIST_HEAD(&epfile->memory_list); +- init_waitqueue_head(&epfile->wait_que); +- if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR) { +- if (sprintf(epfile->name, "%s.ep%02x", ffs->dev_name, ffs->eps_addrmap[i]) < 0) { +- return -EFAULT; +- } +- } else { +- if (sprintf(epfile->name, "%s.ep%u", ffs->dev_name, i) < 0) { +- return -EFAULT; +- } +- } +- +- cdev_init(&epfile->cdev, &ffs_epfile_operations); +- epfile->devno=MKDEV(MAJOR(ffs->devno), i); +- ret = cdev_add(&epfile->cdev, epfile->devno, 1); +- if (ret) +- { +- ffs_epfiles_destroy(epfiles, i - 1); +- return -EBUSY; +- } +- +- epfile->device = device_create(ffs_class, NULL, epfile->devno, NULL, epfile->name); +- if (IS_ERR(epfile->device)) +- { +- cdev_del(&epfile->cdev); +- ffs_epfiles_destroy(epfiles, i - 1); +- return -EBUSY; +- } +- } +- +- ffs->epfiles = epfiles; +- return 0; +-} +- +-static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) +-{ +- struct ffs_epfile *epfile = epfiles; +- +- ENTER(); +- +- for (; count; --count, ++epfile) { +- BUG_ON(mutex_is_locked(&epfile->mutex)); +- device_destroy(ffs_class, epfile->devno); +- cdev_del(&epfile->cdev); +- } +- +- kfree(epfiles); +-} +- +-static void ffs_func_eps_disable(struct ffs_function *func) +-{ +- struct ffs_ep *ep = func->eps; +- struct ffs_epfile *epfile = func->ffs->epfiles; +- unsigned count = func->ffs->eps_count; +- unsigned long flags; +- +- spin_lock_irqsave(&func->ffs->eps_lock, flags); +- while (count--) { +- /* pending requests get nuked */ +- if (likely(ep->ep)) +- usb_ep_disable(ep->ep); +- ++ep; +- +- if (epfile) { +- epfile->ep = NULL; +- ++epfile; +- } +- } +- spin_unlock_irqrestore(&func->ffs->eps_lock, flags); +-} +- +-static int ffs_func_eps_enable(struct ffs_function *func) +-{ +- struct ffs_data *ffs = func->ffs; +- struct ffs_ep *ep = func->eps; +- struct ffs_epfile *epfile = ffs->epfiles; +- unsigned count = ffs->eps_count; +- unsigned long flags; +- int ret = 0; +- +- spin_lock_irqsave(&func->ffs->eps_lock, flags); +- while(count--) { +- ep->ep->driver_data = ep; +- +- ret = config_ep_by_speed(func->gadget, &func->function, ep->ep); +- if (ret) { +- pr_err("%s: config_ep_by_speed(%s) returned %d\n", +- __func__, ep->ep->name, ret); +- break; +- } +- +- ret = usb_ep_enable(ep->ep); +- if (likely(!ret)) { +- epfile->ep = ep; +- epfile->in = usb_endpoint_dir_in(ep->ep->desc); +- epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc); +- } else { +- break; +- } +- +- ++ep; +- ++epfile; +- } +- +- wake_up_interruptible(&ffs->wait); +- spin_unlock_irqrestore(&func->ffs->eps_lock, flags); +- +- return ret; +-} +- +-/* Parsing and building descriptors and strings *****************************/ +- +-/* +- * This validates if data pointed by data is a valid USB descriptor as +- * well as record how many interfaces, endpoints and strings are +- * required by given configuration. Returns address after the +- * descriptor or NULL if data is invalid. +- */ +-enum ffs_entity_type { +- FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT +-}; +- +-enum ffs_os_desc_type { +- FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP +-}; +- +-typedef int (*ffs_entity_callback)(enum ffs_entity_type entity, u8 *valuep, +- struct usb_descriptor_header *desc, +- void *priv); +- +-typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity, +- struct usb_os_desc_header *h, void *data, +- unsigned len, void *priv); +- +-static int __must_check ffs_do_single_desc(char *data, unsigned len, +- ffs_entity_callback entity, +- void *priv) +-{ +- struct usb_descriptor_header *_ds = (void *)data; +- u8 length; +- int ret; +- +- ENTER(); +- +- /* At least two bytes are required: length and type */ +- if (len < 2) { +- pr_vdebug("descriptor too short\n"); +- return -EINVAL; +- } +- +- /* If we have at least as many bytes as the descriptor takes? */ +- length = _ds->bLength; +- if (len < length) { +- pr_vdebug("descriptor longer then available data\n"); +- return -EINVAL; +- } +- +-#define __entity_check_INTERFACE(val) 1 +-#define __entity_check_STRING(val) (val) +-#define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK) +-#define __entity(type, val) do { \ +- pr_vdebug("entity " #type "(%02x)\n", (val)); \ +- if (unlikely(!__entity_check_ ##type(val))) { \ +- pr_vdebug("invalid entity's value\n"); \ +- return -EINVAL; \ +- } \ +- ret = entity(FFS_ ##type, &val, _ds, priv); \ +- if (unlikely(ret < 0)) { \ +- pr_debug("entity " #type "(%02x); ret = %d\n", \ +- (val), ret); \ +- return ret; \ +- } \ +- } while (0) +- +- /* Parse descriptor depending on type. */ +- switch (_ds->bDescriptorType) { +- case USB_DT_DEVICE: +- case USB_DT_CONFIG: +- case USB_DT_STRING: +- case USB_DT_DEVICE_QUALIFIER: +- /* function can't have any of those */ +- pr_vdebug("descriptor reserved for gadget: %d\n", +- _ds->bDescriptorType); +- return -EINVAL; +- +- case USB_DT_INTERFACE: { +- struct usb_interface_descriptor *ds = (void *)_ds; +- pr_vdebug("interface descriptor\n"); +- if (length != sizeof *ds) +- goto inv_length; +- +- __entity(INTERFACE, ds->bInterfaceNumber); +- if (ds->iInterface) +- __entity(STRING, ds->iInterface); +- } +- break; +- +- case USB_DT_ENDPOINT: { +- struct usb_endpoint_descriptor *ds = (void *)_ds; +- pr_vdebug("endpoint descriptor\n"); +- if (length != USB_DT_ENDPOINT_SIZE && +- length != USB_DT_ENDPOINT_AUDIO_SIZE) +- goto inv_length; +- __entity(ENDPOINT, ds->bEndpointAddress); +- } +- break; +- +- case HID_DT_HID: +- pr_vdebug("hid descriptor\n"); +- if (length != sizeof(struct hid_descriptor)) +- goto inv_length; +- break; +- +- case USB_DT_OTG: +- if (length != sizeof(struct usb_otg_descriptor)) +- goto inv_length; +- break; +- +- case USB_DT_INTERFACE_ASSOCIATION: { +- struct usb_interface_assoc_descriptor *ds = (void *)_ds; +- pr_vdebug("interface association descriptor\n"); +- if (length != sizeof *ds) +- goto inv_length; +- if (ds->iFunction) +- __entity(STRING, ds->iFunction); +- } +- break; +- +- case USB_DT_SS_ENDPOINT_COMP: +- pr_vdebug("EP SS companion descriptor\n"); +- if (length != sizeof(struct usb_ss_ep_comp_descriptor)) +- goto inv_length; +- break; +- +- case USB_DT_OTHER_SPEED_CONFIG: +- case USB_DT_INTERFACE_POWER: +- case USB_DT_DEBUG: +- case USB_DT_SECURITY: +- case USB_DT_CS_RADIO_CONTROL: +- pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType); +- break; +- default: +- /* We should never be here */ +- pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType); +- break; +-inv_length: +- pr_vdebug("invalid length: %d (descriptor %d)\n", +- _ds->bLength, _ds->bDescriptorType); +- return -EINVAL; +- } +- +-#undef __entity +-#undef __entity_check_DESCRIPTOR +-#undef __entity_check_INTERFACE +-#undef __entity_check_STRING +-#undef __entity_check_ENDPOINT +- +- return length; +-} +- +-static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len, +- ffs_entity_callback entity, void *priv) +-{ +- const unsigned _len = len; +- uintptr_t num = 0; +- +- ENTER(); +- +- for (;;) { +- int ret; +- +- if (num == count) +- data = NULL; +- +- /* Record "descriptor" entity */ +- ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv); +- if (unlikely(ret < 0)) { +- pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n", +- num, ret); +- return ret; +- } +- +- if (!data) +- return _len - len; +- +- ret = ffs_do_single_desc(data, len, entity, priv); +- if (unlikely(ret < 0)) { +- pr_debug("%s returns %d\n", __func__, ret); +- return ret; +- } +- +- len -= ret; +- data += ret; +- ++num; +- } +-} +- +-static int __ffs_data_do_entity(enum ffs_entity_type type, +- u8 *valuep, struct usb_descriptor_header *desc, +- void *priv) +-{ +- struct ffs_desc_helper *helper = priv; +- struct usb_endpoint_descriptor *d = NULL; +- +- ENTER(); +- +- switch (type) { +- case FFS_DESCRIPTOR: +- break; +- +- case FFS_INTERFACE: +- /* +- * Interfaces are indexed from zero so if we +- * encountered interface "n" then there are at least +- * "n+1" interfaces. +- */ +- if (*valuep >= helper->interfaces_count) +- helper->interfaces_count = *valuep + 1; +- break; +- +- case FFS_STRING: +- /* +- * Strings are indexed from 1 (0 is reserved +- * for languages list) +- */ +- if (*valuep > helper->ffs->strings_count) +- helper->ffs->strings_count = *valuep; +- break; +- +- case FFS_ENDPOINT: +- d = (void *)desc; +- helper->eps_count++; +- if (helper->eps_count >= FFS_MAX_EPS_COUNT) +- return -EINVAL; +- /* Check if descriptors for any speed were already parsed */ +- if (!helper->ffs->eps_count && !helper->ffs->interfaces_count) +- helper->ffs->eps_addrmap[helper->eps_count] = +- d->bEndpointAddress; +- else if (helper->ffs->eps_addrmap[helper->eps_count] != +- d->bEndpointAddress) +- return -EINVAL; +- break; +- } +- +- return 0; +-} +- +-static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type, +- struct usb_os_desc_header *desc) +-{ +- u16 bcd_version = le16_to_cpu(desc->bcdVersion); +- u16 w_index = le16_to_cpu(desc->wIndex); +- +- if (bcd_version != 1) { +- pr_vdebug("unsupported os descriptors version: %d", +- bcd_version); +- return -EINVAL; +- } +- switch (w_index) { +- case 0x4: +- *next_type = FFS_OS_DESC_EXT_COMPAT; +- break; +- case 0x5: +- *next_type = FFS_OS_DESC_EXT_PROP; +- break; +- default: +- pr_vdebug("unsupported os descriptor type: %d", w_index); +- return -EINVAL; +- } +- +- return sizeof(*desc); +-} +- +-/* +- * Process all extended compatibility/extended property descriptors +- * of a feature descriptor +- */ +-static int __must_check ffs_do_single_os_desc(char *data, unsigned len, +- enum ffs_os_desc_type type, +- u16 feature_count, +- ffs_os_desc_callback entity, +- void *priv, +- struct usb_os_desc_header *h) +-{ +- int ret; +- const unsigned _len = len; +- +- ENTER(); +- +- /* loop over all ext compat/ext prop descriptors */ +- while (feature_count--) { +- ret = entity(type, h, data, len, priv); +- if (unlikely(ret < 0)) { +- pr_debug("bad OS descriptor, type: %d\n", type); +- return ret; +- } +- data += ret; +- len -= ret; +- } +- return _len - len; +-} +- +-/* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */ +-static int __must_check ffs_do_os_descs(unsigned count, +- char *data, unsigned len, +- ffs_os_desc_callback entity, void *priv) +-{ +- const unsigned _len = len; +- unsigned long num = 0; +- +- ENTER(); +- +- for (num = 0; num < count; ++num) { +- int ret; +- enum ffs_os_desc_type type; +- u16 feature_count; +- struct usb_os_desc_header *desc = (void *)data; +- +- if (len < sizeof(*desc)) +- return -EINVAL; +- +- /* +- * Record "descriptor" entity. +- * Process dwLength, bcdVersion, wIndex, get b/wCount. +- * Move the data pointer to the beginning of extended +- * compatibilities proper or extended properties proper +- * portions of the data +- */ +- if (le32_to_cpu(desc->dwLength) > len) +- return -EINVAL; +- +- ret = __ffs_do_os_desc_header(&type, desc); +- if (unlikely(ret < 0)) { +- pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n", +- num, ret); +- return ret; +- } +- /* +- * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??" +- */ +- feature_count = le16_to_cpu(desc->wCount); +- if (type == FFS_OS_DESC_EXT_COMPAT && +- (feature_count > 255 || desc->Reserved)) +- return -EINVAL; +- len -= ret; +- data += ret; +- +- /* +- * Process all function/property descriptors +- * of this Feature Descriptor +- */ +- ret = ffs_do_single_os_desc(data, len, type, +- feature_count, entity, priv, desc); +- if (unlikely(ret < 0)) { +- pr_debug("%s returns %d\n", __func__, ret); +- return ret; +- } +- +- len -= ret; +- data += ret; +- } +- return _len - len; +-} +- +-/** +- * Validate contents of the buffer from userspace related to OS descriptors. +- */ +-static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, +- struct usb_os_desc_header *h, void *data, +- unsigned len, void *priv) +-{ +- struct ffs_data *ffs = priv; +- u8 length; +- +- ENTER(); +- +- switch (type) { +- case FFS_OS_DESC_EXT_COMPAT: { +- struct usb_ext_compat_desc *d = data; +- int i; +- +- if (len < sizeof(*d) || +- d->bFirstInterfaceNumber >= ffs->interfaces_count) +- return -EINVAL; +- if (d->Reserved1 != 1) { +- /* +- * According to the spec, Reserved1 must be set to 1 +- * but older kernels incorrectly rejected non-zero +- * values. We fix it here to avoid returning EINVAL +- * in response to values we used to accept. +- */ +- pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n"); +- d->Reserved1 = 1; +- } +- for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) +- if (d->Reserved2[i]) +- return -EINVAL; +- +- length = sizeof(struct usb_ext_compat_desc); +- } +- break; +- case FFS_OS_DESC_EXT_PROP: { +- struct usb_ext_prop_desc *d = data; +- u32 type, pdl; +- u16 pnl; +- +- if (len < sizeof(*d) || h->interface >= ffs->interfaces_count) +- return -EINVAL; +- length = le32_to_cpu(d->dwSize); +- if (len < length) +- return -EINVAL; +- type = le32_to_cpu(d->dwPropertyDataType); +- if (type < USB_EXT_PROP_UNICODE || +- type > USB_EXT_PROP_UNICODE_MULTI) { +- pr_vdebug("unsupported os descriptor property type: %d", +- type); +- return -EINVAL; +- } +- pnl = le16_to_cpu(d->wPropertyNameLength); +- if (length < 14 + pnl) { +- pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n", +- length, pnl, type); +- return -EINVAL; +- } +- pdl = le32_to_cpu(*(__le32 *)((u8 *)data + 10 + pnl)); +- if (length != 14 + pnl + pdl) { +- pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n", +- length, pnl, pdl, type); +- return -EINVAL; +- } +- ++ffs->ms_os_descs_ext_prop_count; +- /* property name reported to the host as "WCHAR"s */ +- ffs->ms_os_descs_ext_prop_name_len += pnl * 2; +- ffs->ms_os_descs_ext_prop_data_len += pdl; +- } +- break; +- default: +- pr_vdebug("unknown descriptor: %d\n", type); +- return -EINVAL; +- } +- return length; +-} +- +-static int __ffs_data_got_descs(struct ffs_data *ffs, +- char *const _data, size_t len) +-{ +- char *data = _data, *raw_descs = NULL; +- unsigned os_descs_count = 0, counts[3], flags; +- int ret = -EINVAL, i; +- struct ffs_desc_helper helper; +- +- ENTER(); +- +- if (get_unaligned_le32(data + 4) != len) +- goto error; +- +- switch (get_unaligned_le32(data)) { +- case FUNCTIONFS_DESCRIPTORS_MAGIC: +- flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC; +- data += 8; +- len -= 8; +- break; +- case FUNCTIONFS_DESCRIPTORS_MAGIC_V2: +- flags = get_unaligned_le32(data + 8); +- ffs->user_flags = flags; +- if (flags & ~(FUNCTIONFS_HAS_FS_DESC | +- FUNCTIONFS_HAS_HS_DESC | +- FUNCTIONFS_HAS_SS_DESC | +- FUNCTIONFS_HAS_MS_OS_DESC | +- FUNCTIONFS_VIRTUAL_ADDR | +- FUNCTIONFS_EVENTFD | +- FUNCTIONFS_ALL_CTRL_RECIP | +- FUNCTIONFS_CONFIG0_SETUP)) { +- ret = -ENOSYS; +- goto error; +- } +- data += 12; +- len -= 12; +- break; +- default: +- goto error; +- } +- +- if (flags & FUNCTIONFS_EVENTFD) { +- if (len < 4) +- goto error; +- ffs->ffs_eventfd = +- eventfd_ctx_fdget((int)get_unaligned_le32(data)); +- if (IS_ERR(ffs->ffs_eventfd)) { +- ret = PTR_ERR(ffs->ffs_eventfd); +- ffs->ffs_eventfd = NULL; +- goto error; +- } +- data += 4; +- len -= 4; +- } +- +- /* Read fs_count, hs_count and ss_count (if present) */ +- for (i = 0; i < 3; ++i) { +- if (!(flags & (1 << i))) { +- counts[i] = 0; +- } else if (len < 4) { +- goto error; +- } else { +- counts[i] = get_unaligned_le32(data); +- data += 4; +- len -= 4; +- } +- } +- if (flags & (1 << i)) { +- if (len < 4) { +- goto error; +- } +- os_descs_count = get_unaligned_le32(data); +- data += 4; +- len -= 4; +- } +- +- /* Read descriptors */ +- raw_descs = data; +- helper.ffs = ffs; +- for (i = 0; i < 3; ++i) { +- if (!counts[i]) +- continue; +- helper.interfaces_count = 0; +- helper.eps_count = 0; +- ret = ffs_do_descs(counts[i], data, len, +- __ffs_data_do_entity, &helper); +- if (ret < 0) +- goto error; +- if (!ffs->eps_count && !ffs->interfaces_count) { +- ffs->eps_count = helper.eps_count; +- ffs->interfaces_count = helper.interfaces_count; +- } else { +- if (ffs->eps_count != helper.eps_count) { +- ret = -EINVAL; +- goto error; +- } +- if (ffs->interfaces_count != helper.interfaces_count) { +- ret = -EINVAL; +- goto error; +- } +- } +- data += ret; +- len -= ret; +- } +- if (os_descs_count) { +- ret = ffs_do_os_descs(os_descs_count, data, len, +- __ffs_data_do_os_desc, ffs); +- if (ret < 0) +- goto error; +- data += ret; +- len -= ret; +- } +- +- if (raw_descs == data || len) { +- ret = -EINVAL; +- goto error; +- } +- +- ffs->raw_descs_data = _data; +- ffs->raw_descs = raw_descs; +- ffs->raw_descs_length = data - raw_descs; +- ffs->fs_descs_count = counts[0]; +- ffs->hs_descs_count = counts[1]; +- ffs->ss_descs_count = counts[2]; +- ffs->ms_os_descs_count = os_descs_count; +- +- return 0; +- +-error: +- kfree(_data); +- return ret; +-} +- +-static int __ffs_data_got_strings(struct ffs_data *ffs, +- char *const _data, size_t len) +-{ +- u32 str_count, needed_count, lang_count; +- struct usb_gadget_strings **stringtabs = NULL, *t = NULL; +- const char *data = _data; +- struct usb_string *s = NULL; +- +- ENTER(); +- +- if (unlikely(len < 16 || +- get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC || +- get_unaligned_le32(data + 4) != len)) +- goto error; +- str_count = get_unaligned_le32(data + 8); +- lang_count = get_unaligned_le32(data + 12); +- +- /* if one is zero the other must be zero */ +- if (unlikely(!str_count != !lang_count)) +- goto error; +- +- /* Do we have at least as many strings as descriptors need? */ +- needed_count = ffs->strings_count; +- if (unlikely(str_count < needed_count)) +- goto error; +- +- /* +- * If we don't need any strings just return and free all +- * memory. +- */ +- if (!needed_count) { +- kfree(_data); +- return 0; +- } +- +- /* Allocate everything in one chunk so there's less maintenance. */ +- { +- unsigned i = 0; +- vla_group(d); +- vla_item(d, struct usb_gadget_strings *, stringtabs, +- lang_count + 1); +- vla_item(d, struct usb_gadget_strings, stringtab, lang_count); +- vla_item(d, struct usb_string, strings, +- lang_count*(needed_count+1)); +- +- char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL); +- +- if (unlikely(!vlabuf)) { +- kfree(_data); +- return -ENOMEM; +- } +- +- /* Initialize the VLA pointers */ +- stringtabs = vla_ptr(vlabuf, d, stringtabs); +- t = vla_ptr(vlabuf, d, stringtab); +- i = lang_count; +- do { +- *stringtabs++ = t++; +- } while (--i); +- *stringtabs = NULL; +- +- /* stringtabs = vlabuf = d_stringtabs for later kfree */ +- stringtabs = vla_ptr(vlabuf, d, stringtabs); +- t = vla_ptr(vlabuf, d, stringtab); +- s = vla_ptr(vlabuf, d, strings); +- } +- +- /* For each language */ +- data += 16; +- len -= 16; +- +- do { /* lang_count > 0 so we can use do-while */ +- unsigned needed = needed_count; +- +- if (unlikely(len < 3)) +- goto error_free; +- t->language = get_unaligned_le16(data); +- t->strings = s; +- ++t; +- +- data += 2; +- len -= 2; +- +- /* For each string */ +- do { /* str_count > 0 so we can use do-while */ +- size_t length = strnlen(data, len); +- +- if (unlikely(length == len)) +- goto error_free; +- +- /* +- * User may provide more strings then we need, +- * if that's the case we simply ignore the +- * rest +- */ +- if (likely(needed)) { +- /* +- * s->id will be set while adding +- * function to configuration so for +- * now just leave garbage here. +- */ +- s->s = data; +- --needed; +- ++s; +- } +- +- data += length + 1; +- len -= length + 1; +- } while (--str_count); +- +- s->id = 0; /* terminator */ +- s->s = NULL; +- ++s; +- +- } while (--lang_count); +- +- /* Some garbage left? */ +- if (unlikely(len)) +- goto error_free; +- +- /* Done! */ +- ffs->stringtabs = stringtabs; +- ffs->raw_strings = _data; +- +- return 0; +- +-error_free: +- kfree(stringtabs); +-error: +- kfree(_data); +- return -EINVAL; +-} +- +-/* Events handling and management *******************************************/ +-static void __ffs_event_add(struct ffs_data *ffs, +- enum usb_functionfs_event_type type) +-{ +- enum usb_functionfs_event_type rem_type1, rem_type2 = type; +- int neg = 0; +- +- /* +- * Abort any unhandled setup +- * +- * We do not need to worry about some cmpxchg() changing value +- * of ffs->setup_state without holding the lock because when +- * state is FFS_SETUP_PENDING cmpxchg() in several places in +- * the source does nothing. +- */ +- if (ffs->setup_state == FFS_SETUP_PENDING) +- ffs->setup_state = FFS_SETUP_CANCELLED; +- +- /* +- * Logic of this function guarantees that there are at most four pending +- * evens on ffs->ev.types queue. This is important because the queue +- * has space for four elements only and __ffs_ep0_read_events function +- * depends on that limit as well. If more event types are added, those +- * limits have to be revisited or guaranteed to still hold. +- */ +- switch (type) { +- case FUNCTIONFS_RESUME: +- rem_type2 = FUNCTIONFS_SUSPEND; +- /* FALL THROUGH */ +- case FUNCTIONFS_SUSPEND: +- case FUNCTIONFS_SETUP: +- rem_type1 = type; +- /* Discard all similar events */ +- break; +- +- case FUNCTIONFS_BIND: +- case FUNCTIONFS_UNBIND: +- case FUNCTIONFS_DISABLE: +- case FUNCTIONFS_ENABLE: +- /* Discard everything other then power management. */ +- rem_type1 = FUNCTIONFS_SUSPEND; +- rem_type2 = FUNCTIONFS_RESUME; +- neg = 1; +- break; +- +- default: +- WARN(1, "%d: unknown event, this should not happen\n", type); +- return; +- } +- +- { +- u8 *ev = ffs->ev.types, *out = ev; +- unsigned n = ffs->ev.count; +- for (; n; --n, ++ev) +- if ((*ev == rem_type1 || *ev == rem_type2) == neg) +- *out++ = *ev; +- else +- pr_vdebug("purging event %d\n", *ev); +- ffs->ev.count = out - ffs->ev.types; +- } +- +- pr_vdebug("adding event %d\n", type); +- ffs->ev.types[ffs->ev.count++] = type; +- wake_up_locked(&ffs->ev.waitq); +- if (ffs->ffs_eventfd) +- eventfd_signal(ffs->ffs_eventfd, 1); +-} +- +-static void ffs_event_add(struct ffs_data *ffs, +- enum usb_functionfs_event_type type) +-{ +- unsigned long flags; +- spin_lock_irqsave(&ffs->ev.waitq.lock, flags); +- __ffs_event_add(ffs, type); +- spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); +-} +- +-/* Bind/unbind USB function hooks *******************************************/ +- +-static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address) +-{ +- int i; +- +- for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i) +- if (ffs->eps_addrmap[i] == endpoint_address) +- return i; +- return -ENOENT; +-} +- +-static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep, +- struct usb_descriptor_header *desc, +- void *priv) +-{ +- struct usb_endpoint_descriptor *ds = (void *)desc; +- struct ffs_function *func = priv; +- struct ffs_ep *ffs_ep = NULL; +- unsigned ep_desc_id; +- int idx; +- static const char *speed_names[] = { "full", "high", "super" }; +- +- if (type != FFS_DESCRIPTOR) +- return 0; +- +- /* +- * If ss_descriptors is not NULL, we are reading super speed +- * descriptors; if hs_descriptors is not NULL, we are reading high +- * speed descriptors; otherwise, we are reading full speed +- * descriptors. +- */ +- if (func->function.ss_descriptors) { +- ep_desc_id = 2; +- func->function.ss_descriptors[(uintptr_t)valuep] = desc; +- } else if (func->function.hs_descriptors) { +- ep_desc_id = 1; +- func->function.hs_descriptors[(uintptr_t)valuep] = desc; +- } else { +- ep_desc_id = 0; +- func->function.fs_descriptors[(uintptr_t)valuep] = desc; +- } +- +- if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) +- return 0; +- +- idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1; +- if (idx < 0) +- return idx; +- +- ffs_ep = func->eps + idx; +- +- if (unlikely(ffs_ep->descs[ep_desc_id])) { +- pr_err("two %sspeed descriptors for EP %d\n", +- speed_names[ep_desc_id], +- ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); +- return -EINVAL; +- } +- ffs_ep->descs[ep_desc_id] = ds; +- +- ffs_dump_mem(": Original ep desc", ds, ds->bLength); +- if (ffs_ep->ep) { +- ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress; +- if (!ds->wMaxPacketSize) +- ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize; +- } else { +- struct usb_request *req = NULL; +- struct usb_ep *ep = NULL; +- u8 bEndpointAddress; +- +- /* +- * We back up bEndpointAddress because autoconfig overwrites +- * it with physical endpoint address. +- */ +- bEndpointAddress = ds->bEndpointAddress; +- pr_vdebug("autoconfig\n"); +- ep = usb_ep_autoconfig(func->gadget, ds); +- if (unlikely(!ep)) +- return -ENOTSUPP; +- ep->driver_data = func->eps + idx; +- +- req = usb_ep_alloc_request(ep, GFP_KERNEL); +- if (unlikely(!req)) +- return -ENOMEM; +- +- ffs_ep->ep = ep; +- ffs_ep->req = req; +- INIT_LIST_HEAD(&ffs_ep->req->list); +- func->eps_revmap[ds->bEndpointAddress & +- USB_ENDPOINT_NUMBER_MASK] = idx + 1; +- /* +- * If we use virtual address mapping, we restore +- * original bEndpointAddress value. +- */ +- if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR) +- ds->bEndpointAddress = bEndpointAddress; +- } +- ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength); +- +- return 0; +-} +- +-static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep, +- struct usb_descriptor_header *desc, +- void *priv) +-{ +- struct ffs_function *func = priv; +- unsigned idx; +- u8 newValue; +- +- switch (type) { +- default: +- case FFS_DESCRIPTOR: +- /* Handled in previous pass by __ffs_func_bind_do_descs() */ +- return 0; +- +- case FFS_INTERFACE: +- idx = *valuep; +- if (func->interfaces_nums[idx] < 0) { +- int id = usb_interface_id(func->conf, &func->function); +- if (unlikely(id < 0)) +- return id; +- func->interfaces_nums[idx] = id; +- } +- newValue = func->interfaces_nums[idx]; +- break; +- +- case FFS_STRING: +- /* String' IDs are allocated when fsf_data is bound to cdev */ +- newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id; +- break; +- +- case FFS_ENDPOINT: +- /* +- * USB_DT_ENDPOINT are handled in +- * __ffs_func_bind_do_descs(). +- */ +- if (desc->bDescriptorType == USB_DT_ENDPOINT) +- return 0; +- +- idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1; +- if (unlikely(!func->eps[idx].ep)) +- return -EINVAL; +- +- { +- struct usb_endpoint_descriptor **descs; +- descs = func->eps[idx].descs; +- newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress; +- } +- break; +- } +- +- pr_vdebug("%02x -> %02x\n", *valuep, newValue); +- *valuep = newValue; +- return 0; +-} +- +-static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type, +- struct usb_os_desc_header *h, void *data, +- unsigned len, void *priv) +-{ +- struct ffs_function *func = priv; +- u8 length = 0; +- +- switch (type) { +- case FFS_OS_DESC_EXT_COMPAT: { +- struct usb_ext_compat_desc *desc = data; +- struct usb_os_desc_table *t; +- +- t = &func->function.os_desc_table[desc->bFirstInterfaceNumber]; +- t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber]; +- memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID, +- ARRAY_SIZE(desc->CompatibleID) + ARRAY_SIZE(desc->SubCompatibleID)); +- length = sizeof(*desc); +- } +- break; +- case FFS_OS_DESC_EXT_PROP: { +- struct usb_ext_prop_desc *desc = data; +- struct usb_os_desc_table *t; +- struct usb_os_desc_ext_prop *ext_prop; +- char *ext_prop_name; +- char *ext_prop_data; +- +- t = &func->function.os_desc_table[h->interface]; +- t->if_id = func->interfaces_nums[h->interface]; +- +- ext_prop = func->ffs->ms_os_descs_ext_prop_avail; +- func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop); +- +- ext_prop->type = le32_to_cpu(desc->dwPropertyDataType); +- ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength); +- ext_prop->data_len = le32_to_cpu(*(__le32 *) +- usb_ext_prop_data_len_ptr(data, ext_prop->name_len)); +- length = ext_prop->name_len + ext_prop->data_len + 14; +- +- ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail; +- func->ffs->ms_os_descs_ext_prop_name_avail += +- ext_prop->name_len; +- +- ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail; +- func->ffs->ms_os_descs_ext_prop_data_avail += +- ext_prop->data_len; +- memcpy(ext_prop_data, usb_ext_prop_data_ptr(data, ext_prop->name_len), +- ext_prop->data_len); +- /* unicode data reported to the host as "WCHAR"s */ +- switch (ext_prop->type) { +- case USB_EXT_PROP_UNICODE: +- case USB_EXT_PROP_UNICODE_ENV: +- case USB_EXT_PROP_UNICODE_LINK: +- case USB_EXT_PROP_UNICODE_MULTI: +- ext_prop->data_len *= 2; +- break; +- } +- ext_prop->data = ext_prop_data; +- +- memcpy(ext_prop_name, usb_ext_prop_name_ptr(data), +- ext_prop->name_len); +- /* property name reported to the host as "WCHAR"s */ +- ext_prop->name_len *= 2; +- ext_prop->name = ext_prop_name; +- +- t->os_desc->ext_prop_len += +- ext_prop->name_len + ext_prop->data_len + 14; +- ++t->os_desc->ext_prop_count; +- list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop); +- } +- break; +- default: +- pr_vdebug("unknown descriptor: %d\n", type); +- } +- +- return length; +-} +- +-static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f, +- struct usb_configuration *c) +-{ +- struct ffs_function *func = ffs_func_from_usb(f); +- struct f_fs_opts *ffs_opts = +- container_of(f->fi, struct f_fs_opts, func_inst); +- int ret; +- +- ENTER(); +- +- /* +- * Legacy gadget triggers binding in functionfs_ready_callback, +- * which already uses locking; taking the same lock here would +- * cause a deadlock. +- * +- * Configfs-enabled gadgets however do need ffs_dev_lock. +- */ +- if (!ffs_opts->no_configfs) +- ffs_dev_lock(); +- ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV; +- func->ffs = ffs_opts->dev->ffs_data; +- if (!ffs_opts->no_configfs) +- ffs_dev_unlock(); +- if (ret) +- return ERR_PTR(ret); +- +- func->conf = c; +- func->gadget = c->cdev->gadget; +- +- /* +- * in drivers/usb/gadget/configfs.c:configfs_composite_bind() +- * configurations are bound in sequence with list_for_each_entry, +- * in each configuration its functions are bound in sequence +- * with list_for_each_entry, so we assume no race condition +- * with regard to ffs_opts->bound access +- */ +- if (!ffs_opts->refcnt) { +- ret = functionfs_bind(func->ffs, c->cdev); +- if (ret) +- return ERR_PTR(ret); +- } +- ffs_opts->refcnt++; +- func->function.strings = func->ffs->stringtabs; +- +- return ffs_opts; +-} +- +-static int _ffs_func_bind(struct usb_configuration *c, struct usb_function *f) +-{ +- struct ffs_function *func = ffs_func_from_usb(f); +- struct ffs_data *ffs = func->ffs; +- +- const int full = !!func->ffs->fs_descs_count; +- const int high = !!func->ffs->hs_descs_count; +- const int super = !!func->ffs->ss_descs_count; +- +- int fs_len, hs_len, ss_len, ret, i; +- struct ffs_ep *eps_ptr = NULL; +- struct usb_descriptor_header *des_head = NULL; +- struct usb_interface_descriptor *intf_ctl = NULL; +- struct usb_interface_descriptor *intf_data = NULL; +- /* Make it a single chunk, less management later on */ +- vla_group(d); +- vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count); +- vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs, +- full ? ffs->fs_descs_count + 1 : 0); +- vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs, +- high ? ffs->hs_descs_count + 1 : 0); +- vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs, +- super ? ffs->ss_descs_count + 1 : 0); +- vla_item_with_sz(d, short, inums, ffs->interfaces_count); +- vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table, +- c->cdev->use_os_string ? ffs->interfaces_count : 0); +- vla_item_with_sz(d, char[16], ext_compat, +- c->cdev->use_os_string ? ffs->interfaces_count : 0); +- vla_item_with_sz(d, struct usb_os_desc, os_desc, +- c->cdev->use_os_string ? ffs->interfaces_count : 0); +- vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop, +- ffs->ms_os_descs_ext_prop_count); +- vla_item_with_sz(d, char, ext_prop_name, +- ffs->ms_os_descs_ext_prop_name_len); +- vla_item_with_sz(d, char, ext_prop_data, +- ffs->ms_os_descs_ext_prop_data_len); +- vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length); +- char *vlabuf = NULL; +- +- ENTER(); +- +- /* Has descriptors only for speeds gadget does not support */ +- if (unlikely(!(full | high | super))) +- return -ENOTSUPP; +- +- /* Allocate a single chunk, less management later on */ +- vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL); +- if (unlikely(!vlabuf)) +- return -ENOMEM; +- +- ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop); +- ffs->ms_os_descs_ext_prop_name_avail = +- vla_ptr(vlabuf, d, ext_prop_name); +- ffs->ms_os_descs_ext_prop_data_avail = +- vla_ptr(vlabuf, d, ext_prop_data); +- +- /* Copy descriptors */ +- memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs, ffs->raw_descs_length); +- +- memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz); +- +- eps_ptr = vla_ptr(vlabuf, d, eps); +- for (i = 0; i < ffs->eps_count; i++) +- eps_ptr[i].num = -1; +- +- /* Save pointers +- * d_eps == vlabuf, func->eps used to kfree vlabuf later +- */ +- func->eps = vla_ptr(vlabuf, d, eps); +- func->interfaces_nums = vla_ptr(vlabuf, d, inums); +- +- /* +- * Go through all the endpoint descriptors and allocate +- * endpoints first, so that later we can rewrite the endpoint +- * numbers without worrying that it may be described later on. +- */ +- if (likely(full)) { +- func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs); +- fs_len = ffs_do_descs(ffs->fs_descs_count, +- vla_ptr(vlabuf, d, raw_descs), +- d_raw_descs__sz, +- __ffs_func_bind_do_descs, func); +- if (unlikely(fs_len < 0)) { +- ret = fs_len; +- goto error; +- } +- } else { +- fs_len = 0; +- } +- if (likely(high)) { +- func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs); +- hs_len = ffs_do_descs(ffs->hs_descs_count, +- vla_ptr(vlabuf, d, raw_descs) + fs_len, +- d_raw_descs__sz - fs_len, +- __ffs_func_bind_do_descs, func); +- if (unlikely(hs_len < 0)) { +- ret = hs_len; +- goto error; +- } +- } else { +- hs_len = 0; +- } +- if (likely(super)) { +- func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs); +- ss_len = ffs_do_descs(ffs->ss_descs_count, +- vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len, +- d_raw_descs__sz - fs_len - hs_len, +- __ffs_func_bind_do_descs, func); +- if (unlikely(ss_len < 0)) { +- ret = ss_len; +- goto error; +- } +- } else { +- ss_len = 0; +- } +- /* +- * Now handle interface numbers allocation and interface and +- * endpoint numbers rewriting. We can do that in one go +- * now. +- */ +- ret = ffs_do_descs(ffs->fs_descs_count + +- (high ? ffs->hs_descs_count : 0) + +- (super ? ffs->ss_descs_count : 0), +- vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz, +- __ffs_func_bind_do_nums, func); +- if (unlikely(ret < 0)) +- goto error; +- +- func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table); +- if (c->cdev->use_os_string) { +- for (i = 0; i < ffs->interfaces_count; ++i) { +- struct usb_os_desc *desc; +- +- desc = func->function.os_desc_table[i].os_desc = +- vla_ptr(vlabuf, d, os_desc) + +- i * sizeof(struct usb_os_desc); +- desc->ext_compat_id = +- vla_ptr(vlabuf, d, ext_compat) + i * 16; +- INIT_LIST_HEAD(&desc->ext_prop); +- } +- ret = ffs_do_os_descs(ffs->ms_os_descs_count, +- vla_ptr(vlabuf, d, raw_descs) + +- fs_len + hs_len + ss_len, +- d_raw_descs__sz - fs_len - hs_len - +- ss_len, +- __ffs_func_bind_do_os_desc, func); +- if (unlikely(ret < 0)) +- goto error; +- } +- func->function.os_desc_n = +- c->cdev->use_os_string ? ffs->interfaces_count : 0; +- +- for (i = 0; i< func->ffs->fs_descs_count; i++) { +- des_head = func->function.fs_descriptors[i]; +- if (des_head->bDescriptorType == USB_DT_INTERFACE) { +- struct usb_interface_descriptor *intf = (struct usb_interface_descriptor *)des_head; +- if (intf->bNumEndpoints > 0) { +- if (intf_ctl == NULL) { +- intf_ctl = intf; +- } else { +- intf_data = intf; +- break; +- } +- } +- } +- } +- for (i = 0; i< func->ffs->fs_descs_count; i++) { +- des_head = func->function.fs_descriptors[i]; +- if (des_head->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) { +- struct usb_interface_assoc_descriptor *a_dec = (struct usb_interface_assoc_descriptor *)des_head; +- a_dec->bFirstInterface = intf_ctl->bInterfaceNumber; +- } else if (des_head->bDescriptorType == USB_DT_CS_INTERFACE) { +- struct usb_cdc_header_desc *cs_des = (struct usb_cdc_header_desc *)des_head; +- if (cs_des->bDescriptorSubType == USB_CDC_CALL_MANAGEMENT_TYPE) { +- struct usb_cdc_call_mgmt_descriptor *mgmt_des = (struct usb_cdc_call_mgmt_descriptor *)des_head; +- mgmt_des->bDataInterface = intf_data->bInterfaceNumber; +- } else if (cs_des->bDescriptorSubType == USB_CDC_UNION_TYPE) { +- struct usb_cdc_union_desc *union_des = (struct usb_cdc_union_desc *)des_head; +- union_des->bMasterInterface0 = intf_ctl->bInterfaceNumber; +- union_des->bSlaveInterface0 = intf_data->bInterfaceNumber; +- } else if (cs_des->bDescriptorSubType == USB_CDC_ETHERNET_TYPE) { +- struct usb_cdc_ether_desc *ether_des = (struct usb_cdc_ether_desc *)des_head; +- ether_des->iMACAddress = intf_ctl->iInterface + 1; +- } +- } +- } +- for (i = 0; i< func->ffs->hs_descs_count; i++) { +- des_head = func->function.hs_descriptors[i]; +- if (des_head->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) { +- struct usb_interface_assoc_descriptor *a_dec = (struct usb_interface_assoc_descriptor *)des_head; +- a_dec->bFirstInterface = intf_ctl->bInterfaceNumber; +- } else if (des_head->bDescriptorType == USB_DT_CS_INTERFACE) { +- struct usb_cdc_header_desc *cs_des = (struct usb_cdc_header_desc *)des_head; +- if (cs_des->bDescriptorSubType == USB_CDC_CALL_MANAGEMENT_TYPE) { +- struct usb_cdc_call_mgmt_descriptor *mgmt_des = (struct usb_cdc_call_mgmt_descriptor *)des_head; +- mgmt_des->bDataInterface = intf_data->bInterfaceNumber; +- } else if (cs_des->bDescriptorSubType == USB_CDC_UNION_TYPE) { +- struct usb_cdc_union_desc *union_des = (struct usb_cdc_union_desc *)des_head; +- union_des->bMasterInterface0 = intf_ctl->bInterfaceNumber; +- union_des->bSlaveInterface0 = intf_data->bInterfaceNumber; +- } else if (cs_des->bDescriptorSubType == USB_CDC_ETHERNET_TYPE) { +- struct usb_cdc_ether_desc *ether_des = (struct usb_cdc_ether_desc *)des_head; +- ether_des->iMACAddress = intf_ctl->iInterface + 1; +- } +- } +- } +- for (i = 0; i< func->ffs->ss_descs_count; i++) { +- des_head = func->function.ss_descriptors[i]; +- if (des_head->bDescriptorType == USB_DT_INTERFACE_ASSOCIATION) { +- struct usb_interface_assoc_descriptor *a_dec = (struct usb_interface_assoc_descriptor *)des_head; +- a_dec->bFirstInterface = intf_ctl->bInterfaceNumber; +- } else if (des_head->bDescriptorType == USB_DT_CS_INTERFACE) { +- struct usb_cdc_header_desc *cs_des = (struct usb_cdc_header_desc *)des_head; +- if (cs_des->bDescriptorSubType == USB_CDC_CALL_MANAGEMENT_TYPE) { +- struct usb_cdc_call_mgmt_descriptor *mgmt_des = (struct usb_cdc_call_mgmt_descriptor *)des_head; +- mgmt_des->bDataInterface = intf_data->bInterfaceNumber; +- } else if (cs_des->bDescriptorSubType == USB_CDC_UNION_TYPE) { +- struct usb_cdc_union_desc *union_des = (struct usb_cdc_union_desc *)des_head; +- union_des->bMasterInterface0 = intf_ctl->bInterfaceNumber; +- union_des->bSlaveInterface0 = intf_data->bInterfaceNumber; +- } else if (cs_des->bDescriptorSubType == USB_CDC_ETHERNET_TYPE) { +- struct usb_cdc_ether_desc *ether_des = (struct usb_cdc_ether_desc *)des_head; +- ether_des->iMACAddress = intf_ctl->iInterface + 1; +- } +- } +- } +- /* And we're done */ +- ffs->eps = func->eps; +- ffs_event_add(ffs, FUNCTIONFS_BIND); +- return 0; +- +-error: +- /* XXX Do we need to release all claimed endpoints here? */ +- return ret; +-} +- +-static int ffs_func_bind(struct usb_configuration *c, struct usb_function *f) +-{ +- struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c); +- struct ffs_function *func = ffs_func_from_usb(f); +- int ret; +- +- if (IS_ERR(ffs_opts)) +- return PTR_ERR(ffs_opts); +- +- ret = _ffs_func_bind(c, f); +- if (ret && !--ffs_opts->refcnt) +- functionfs_unbind(func->ffs); +- +- return ret; +-} +- +-/* Other USB function hooks *************************************************/ +-static void ffs_reset_work(struct work_struct *work) +-{ +- struct ffs_data *ffs = container_of(work, +- struct ffs_data, reset_work); +- ffs_data_reset(ffs); +-} +- +-static int ffs_func_set_alt(struct usb_function *f, +- unsigned interface, unsigned alt) +-{ +- struct ffs_function *func = ffs_func_from_usb(f); +- struct ffs_data *ffs = func->ffs; +- int ret = 0, intf; +- +- if (alt != (unsigned)-1) { +- intf = ffs_func_revmap_intf(func, interface); +- if (unlikely(intf < 0)) +- return intf; +- } +- +- if (ffs->func) +- ffs_func_eps_disable(ffs->func); +- +- if (ffs->state == FFS_DEACTIVATED) { +- ffs->state = FFS_CLOSING; +- INIT_WORK(&ffs->reset_work, ffs_reset_work); +- schedule_work(&ffs->reset_work); +- return -ENODEV; +- } +- +- if (ffs->state != FFS_ACTIVE) +- return -ENODEV; +- +- if (alt == (unsigned)-1) { +- ffs->func = NULL; +- ffs_event_add(ffs, FUNCTIONFS_DISABLE); +- return 0; +- } +- +- ffs->func = func; +- ret = ffs_func_eps_enable(func); +- if (likely(ret >= 0)) +- ffs_event_add(ffs, FUNCTIONFS_ENABLE); +- return ret; +-} +- +-static void ffs_func_disable(struct usb_function *f) +-{ +- ffs_func_set_alt(f, 0, (unsigned)-1); +-} +- +-static int ffs_func_setup(struct usb_function *f, const struct usb_ctrlrequest *creq) +-{ +- struct ffs_function *func = ffs_func_from_usb(f); +- struct ffs_data *ffs = func->ffs; +- unsigned long flags; +- int ret; +- +- ENTER(); +- +- pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType); +- pr_vdebug("creq->bRequest = %02x\n", creq->bRequest); +- pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue)); +- pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex)); +- pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength)); +- +- /* +- * Most requests directed to interface go through here +- * (notable exceptions are set/get interface) so we need to +- * handle them. All other either handled by composite or +- * passed to usb_configuration->setup() (if one is set). No +- * matter, we will handle requests directed to endpoint here +- * as well (as it's straightforward). Other request recipient +- * types are only handled when the user flag FUNCTIONFS_ALL_CTRL_RECIP +- * is being used. +- */ +- if (ffs->state != FFS_ACTIVE) +- return -ENODEV; +- +- switch (creq->bRequestType & USB_RECIP_MASK) { +- case USB_RECIP_INTERFACE: +- ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex)); +- if (unlikely(ret < 0)) +- return ret; +- break; +- +- case USB_RECIP_ENDPOINT: +- ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex)); +- if (unlikely(ret < 0)) +- return ret; +- if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR) +- ret = func->ffs->eps_addrmap[ret]; +- break; +- +- default: +- if (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP) +- ret = le16_to_cpu(creq->wIndex); +- else +- return -EOPNOTSUPP; +- } +- +- spin_lock_irqsave(&ffs->ev.waitq.lock, flags); +- ffs->ev.setup = *creq; +- ffs->ev.setup.wIndex = cpu_to_le16(ret); +- __ffs_event_add(ffs, FUNCTIONFS_SETUP); +- spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); +- +- return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0; +-} +- +-static bool ffs_func_req_match(struct usb_function *f, +- const struct usb_ctrlrequest *creq, +- bool config0) +-{ +- struct ffs_function *func = ffs_func_from_usb(f); +- +- if (config0 && !(func->ffs->user_flags & FUNCTIONFS_CONFIG0_SETUP)) +- return false; +- +- switch (creq->bRequestType & USB_RECIP_MASK) { +- case USB_RECIP_INTERFACE: +- return (ffs_func_revmap_intf(func, +- le16_to_cpu(creq->wIndex)) >= 0); +- case USB_RECIP_ENDPOINT: +- return (ffs_func_revmap_ep(func, +- le16_to_cpu(creq->wIndex)) >= 0); +- default: +- return (bool) (func->ffs->user_flags & +- FUNCTIONFS_ALL_CTRL_RECIP); +- } +-} +- +-static void ffs_func_suspend(struct usb_function *f) +-{ +- ENTER(); +- ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND); +-} +- +-static void ffs_func_resume(struct usb_function *f) +-{ +- ENTER(); +- ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME); +-} +- +-/* Endpoint and interface numbers reverse mapping ***************************/ +-static int ffs_func_revmap_ep(struct ffs_function *func, u8 num) +-{ +- num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK]; +- return num ? num : -EDOM; +-} +- +-static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf) +-{ +- short *nums = func->interfaces_nums; +- unsigned count = func->ffs->interfaces_count; +- +- for (; count; --count, ++nums) { +- if (*nums >= 0 && *nums == intf) +- return nums - func->interfaces_nums; +- } +- +- return -EDOM; +-} +- +-/* Devices management *******************************************************/ +-static LIST_HEAD(ffs_devices); +- +-static struct ffs_dev *_ffs_do_find_dev(const char *name) +-{ +- struct ffs_dev *dev = NULL; +- +- if (!name) +- return NULL; +- +- list_for_each_entry(dev, &ffs_devices, entry) { +- if (!dev->name) +- return NULL; +- if (strcmp(dev->name, name) == 0) +- return dev; +- } +- +- return NULL; +-} +- +-/* +- * ffs_lock must be taken by the caller of this function +- */ +-static struct ffs_dev *_ffs_get_single_dev(void) +-{ +- struct ffs_dev *dev = NULL; +- +- if (list_is_singular(&ffs_devices)) { +- dev = list_first_entry(&ffs_devices, struct ffs_dev, entry); +- if (dev->single) +- return dev; +- } +- +- return NULL; +-} +- +-/* +- * ffs_lock must be taken by the caller of this function +- */ +-static struct ffs_dev *_ffs_find_dev(const char *name) +-{ +- struct ffs_dev *dev; +- +- dev = _ffs_get_single_dev(); +- if (dev) +- return dev; +- +- return _ffs_do_find_dev(name); +-} +- +-/* Configfs support *********************************************************/ +-static inline struct f_fs_opts *to_ffs_opts(struct config_item *item) +-{ +- return container_of(to_config_group(item), struct f_fs_opts, +- func_inst.group); +-} +- +-static void ffs_attr_release(struct config_item *item) +-{ +- struct f_fs_opts *opts = to_ffs_opts(item); +- +- usb_put_function_instance(&opts->func_inst); +-} +- +-static struct configfs_item_operations ffs_item_ops = { +- .release = ffs_attr_release, +-}; +- +-static const struct config_item_type ffs_func_type = { +- .ct_item_ops = &ffs_item_ops, +- .ct_owner = THIS_MODULE, +-}; +- +-/* Function registration interface ******************************************/ +-static void ffs_free_inst(struct usb_function_instance *f) +-{ +- struct f_fs_opts *opts; +- +- opts = to_f_fs_opts(f); +- ffs_dev_lock(); +- _ffs_free_dev(opts->dev); +- ffs_dev_unlock(); +- kfree(opts); +-} +- +-static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) +-{ +- char name_dev[MAX_NAMELEN] = {0}; +- if (snprintf(name_dev, MAX_NAMELEN - 1, "%s.%s", FUNCTION_GENERIC, name) < 0) { +- return -EFAULT; +- } +- if (strlen(name_dev) >= sizeof_field(struct ffs_dev, name)) +- return -ENAMETOOLONG; +- return ffs_name_dev_adapter(to_f_fs_opts(fi)->dev, name_dev); +-} +- +-static struct usb_function_instance *ffs_alloc_inst(void) +-{ +- struct f_fs_opts *opts = NULL; +- struct ffs_dev *dev = NULL; +- +- opts = kzalloc(sizeof(*opts), GFP_KERNEL); +- if (!opts) +- return ERR_PTR(-ENOMEM); +- +- opts->func_inst.set_inst_name = ffs_set_inst_name; +- opts->func_inst.free_func_inst = ffs_free_inst; +- ffs_dev_lock(); +- dev = _ffs_alloc_dev(); +- ffs_dev_unlock(); +- if (IS_ERR(dev)) { +- kfree(opts); +- return ERR_CAST(dev); +- } +- opts->dev = dev; +- dev->opts = opts; +- +- config_group_init_type_name(&opts->func_inst.group, "", +- &ffs_func_type); +- return &opts->func_inst; +-} +- +-static void ffs_free(struct usb_function *f) +-{ +- kfree(ffs_func_from_usb(f)); +-} +- +-static void ffs_func_unbind(struct usb_configuration *c, +- struct usb_function *f) +-{ +- struct ffs_function *func = ffs_func_from_usb(f); +- struct ffs_data *ffs = func->ffs; +- struct f_fs_opts *opts = +- container_of(f->fi, struct f_fs_opts, func_inst); +- struct ffs_ep *ep = func->eps; +- unsigned count = ffs->eps_count; +- unsigned long flags; +- +- ENTER(); +- if (ffs->func == func) { +- ffs_func_eps_disable(func); +- ffs->func = NULL; +- } +- +- if (!--opts->refcnt) +- functionfs_unbind(ffs); +- +- /* cleanup after autoconfig */ +- spin_lock_irqsave(&func->ffs->eps_lock, flags); +- while (count--) { +- if (ep->ep && ep->req) +- usb_ep_free_request(ep->ep, ep->req); +- ep->req = NULL; +- ++ep; +- } +- spin_unlock_irqrestore(&func->ffs->eps_lock, flags); +- kfree(func->eps); +- func->eps = NULL; +- /* +- * eps, descriptors and interfaces_nums are allocated in the +- * same chunk so only one free is required. +- */ +- func->function.fs_descriptors = NULL; +- func->function.hs_descriptors = NULL; +- func->function.ss_descriptors = NULL; +- func->interfaces_nums = NULL; +- +- ffs_event_add(ffs, FUNCTIONFS_UNBIND); +-} +- +-static int ffs_func_get_alt(struct usb_function *f, unsigned intf) +-{ +- if (intf == 0) +- return 0; +- return 1; +-} +- +-static struct usb_function *ffs_alloc(struct usb_function_instance *fi) +-{ +- struct ffs_function *func = NULL; +- +- ENTER(); +- +- func = kzalloc(sizeof(*func), GFP_KERNEL); +- if (unlikely(!func)) +- return ERR_PTR(-ENOMEM); +- +- func->function.name = "FunctionFS Adapter"; +- +- func->function.bind = ffs_func_bind; +- func->function.unbind = ffs_func_unbind; +- func->function.set_alt = ffs_func_set_alt; +- func->function.get_alt = ffs_func_get_alt; +- func->function.disable = ffs_func_disable; +- func->function.setup = ffs_func_setup; +- func->function.req_match = ffs_func_req_match; +- func->function.suspend = ffs_func_suspend; +- func->function.resume = ffs_func_resume; +- func->function.free_func = ffs_free; +- +- return &func->function; +-} +- +-/* +- * ffs_lock must be taken by the caller of this function +- */ +-static struct ffs_dev *_ffs_alloc_dev(void) +-{ +- struct ffs_dev *dev = NULL; +- int ret; +- +- if (_ffs_get_single_dev()) +- return ERR_PTR(-EBUSY); +- +- dev = kzalloc(sizeof(*dev), GFP_KERNEL); +- if (!dev) +- return ERR_PTR(-ENOMEM); +- +- if (list_empty(&ffs_devices)) { +- ret = functionfs_init(); +- if (ret) { +- kfree(dev); +- return ERR_PTR(ret); +- } +- } +- +- list_add(&dev->entry, &ffs_devices); +- +- return dev; +-} +- +-int ffs_name_dev_adapter(struct ffs_dev *dev, const char *name) +-{ +- struct ffs_dev *existing = NULL; +- int ret = 0; +- +- ffs_dev_lock(); +- +- existing = _ffs_do_find_dev(name); +- if (!existing) +- strlcpy(dev->name, name, ARRAY_SIZE(dev->name)); +- else if (existing != dev) +- ret = -EBUSY; +- +- ffs_dev_unlock(); +- +- return ret; +-} +-EXPORT_SYMBOL_GPL(ffs_name_dev_adapter); +- +-int ffs_single_dev_adapter(struct ffs_dev *dev) +-{ +- int ret; +- +- ret = 0; +- ffs_dev_lock(); +- +- if (!list_is_singular(&ffs_devices)) +- ret = -EBUSY; +- else +- dev->single = true; +- +- ffs_dev_unlock(); +- return ret; +-} +-EXPORT_SYMBOL_GPL(ffs_single_dev_adapter); +-/* +- * ffs_lock must be taken by the caller of this function +- */ +-static void _ffs_free_dev(struct ffs_dev *dev) +-{ +- list_del(&dev->entry); +- +- /* Clear the private_data pointer to stop incorrect dev access */ +- if (dev->ffs_data) +- dev->ffs_data->private_data = NULL; +- +- kfree(dev); +- if (list_empty(&ffs_devices)) +- functionfs_cleanup(); +-} +- +-static void *ffs_acquire_dev(const char *dev_name) +-{ +- struct ffs_dev *ffs_dev = NULL; +- +- ENTER(); +- ffs_dev_lock(); +- +- ffs_dev = _ffs_find_dev(dev_name); +- if (!ffs_dev) +- ffs_dev = ERR_PTR(-ENOENT); +- else if (ffs_dev->mounted) +- ffs_dev = ERR_PTR(-EBUSY); +- else if (ffs_dev->ffs_acquire_dev_callback && +- ffs_dev->ffs_acquire_dev_callback(ffs_dev)) +- ffs_dev = ERR_PTR(-ENOENT); +- else +- ffs_dev->mounted = true; +- +- ffs_dev_unlock(); +- return ffs_dev; +-} +- +-static void ffs_release_dev(struct ffs_data *ffs_data) +-{ +- struct ffs_dev *ffs_dev = NULL; +- +- ENTER(); +- ffs_dev_lock(); +- +- ffs_dev = ffs_data->private_data; +- if (ffs_dev) { +- ffs_dev->mounted = false; +- +- if (ffs_dev->ffs_release_dev_callback) +- ffs_dev->ffs_release_dev_callback(ffs_dev); +- } +- +- ffs_dev_unlock(); +-} +- +-static int ffs_ready(struct ffs_data *ffs) +-{ +- struct ffs_dev *ffs_obj = NULL; +- int ret = 0; +- +- ENTER(); +- ffs_dev_lock(); +- +- ffs_obj = ffs->private_data; +- if (!ffs_obj) { +- ret = -EINVAL; +- goto done; +- } +- if (WARN_ON(ffs_obj->desc_ready)) { +- ret = -EBUSY; +- goto done; +- } +- +- ffs_obj->desc_ready = true; +- ffs_obj->ffs_data = ffs; +- +- if (ffs_obj->ffs_ready_callback) { +- ret = ffs_obj->ffs_ready_callback(ffs); +- if (ret) +- goto done; +- } +- +- set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags); +-done: +- ffs_dev_unlock(); +- return ret; +-} +- +-static void ffs_closed(struct ffs_data *ffs) +-{ +- struct ffs_dev *ffs_obj = NULL; +- struct f_fs_opts *opts = NULL; +- struct config_item *ci = NULL; +- +- ENTER(); +- ffs_dev_lock(); +- +- ffs_obj = ffs->private_data; +- if (!ffs_obj) +- goto done; +- +- ffs_obj->desc_ready = false; +- ffs_obj->ffs_data = NULL; +- +- if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) && +- ffs_obj->ffs_closed_callback) +- ffs_obj->ffs_closed_callback(ffs); +- +- if (ffs_obj->opts) +- opts = ffs_obj->opts; +- else +- goto done; +- +- if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent +- || !kref_read(&opts->func_inst.group.cg_item.ci_kref)) +- goto done; +- +- ci = opts->func_inst.group.cg_item.ci_parent->ci_parent; +- ffs_dev_unlock(); +- +- if (test_bit(FFS_FL_BOUND, &ffs->flags)) +- unregister_gadget_item(ci); +- return; +-done: +- ffs_dev_unlock(); +-} +- +-/* Misc helper functions ****************************************************/ +-static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock) +-{ +- return nonblock +- ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN +- : mutex_lock_interruptible(mutex); +-} +- +-static char *ffs_prepare_buffer(const char __user *buf, size_t len) +-{ +- char *data = NULL; +- +- if (unlikely(!len)) +- return NULL; +- +- data = kmalloc(len, GFP_KERNEL); +- if (unlikely(!data)) +- return ERR_PTR(-ENOMEM); +- +- if (unlikely(copy_from_user(data, buf, len))) { +- kfree(data); +- return ERR_PTR(-EFAULT); +- } +- +- pr_vdebug("Buffer from user space:\n"); +- ffs_dump_mem("", data, len); +- +- return data; +-} +- +-DECLARE_USB_FUNCTION_INIT(f_generic, ffs_alloc_inst, ffs_alloc); +-MODULE_LICENSE("GPL"); +diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c +index c265a1f62..c779e71ca 100644 +--- a/drivers/usb/gadget/function/f_mass_storage.c ++++ b/drivers/usb/gadget/function/f_mass_storage.c +@@ -279,6 +279,9 @@ struct fsg_common { + unsigned int bad_lun_okay:1; + unsigned int running:1; + unsigned int sysfs:1; ++#ifdef CONFIG_ARCH_BSP ++ unsigned int actived:1; ++#endif + + struct completion thread_notifier; + struct task_struct *thread_task; +@@ -1403,7 +1406,11 @@ static int do_start_stop(struct fsg_common *common) + + up_read(&common->filesem); + down_write(&common->filesem); ++#ifdef CONFIG_ARCH_BSP ++ common->actived = 0; ++#else + fsg_lun_close(curlun); ++#endif + up_write(&common->filesem); + down_read(&common->filesem); + +@@ -1840,7 +1847,11 @@ static int check_command(struct fsg_common *common, int cmnd_size, + + /* If the medium isn't mounted and the command needs to access + * it, return an error. */ ++#ifdef CONFIG_ARCH_BSP ++ if (curlun && !common->actived && needs_medium) { ++#else + if (curlun && !fsg_lun_is_open(curlun) && needs_medium) { ++#endif + curlun->sense_data = SS_MEDIUM_NOT_PRESENT; + return -EINVAL; + } +@@ -2340,6 +2351,9 @@ static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg) + } + + common->running = 0; ++#ifdef CONFIG_ARCH_BSP ++ common->actived = 0; ++#endif + if (!new_fsg || rc) + return rc; + +@@ -2385,6 +2399,9 @@ static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg) + } + + common->running = 1; ++#ifdef CONFIG_ARCH_BSP ++ common->actived = 1; ++#endif + for (i = 0; i < ARRAY_SIZE(common->luns); ++i) + if (common->luns[i]) + common->luns[i]->unit_attention_data = +diff --git a/drivers/usb/gadget/function/u_generic.h b/drivers/usb/gadget/function/u_generic.h +deleted file mode 100644 +index 24429c7fd..000000000 +--- a/drivers/usb/gadget/function/u_generic.h ++++ /dev/null +@@ -1,356 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * u_fs.h +- * +- * Utility definitions for the FunctionFS +- * +- * Copyright (c) 2013 Samsung Electronics Co., Ltd. +- * http://www.samsung.com +- * +- * Author: Andrzej Pietrasiewicz +- */ +- +-#ifndef U_GENERIC_H +-#define U_GENERIC_H +- +-#include +-#include +-#include +-#include +-#include +-#include +-#ifdef VERBOSE_DEBUG +-#ifndef pr_vdebug +-# define pr_vdebug pr_debug +-#endif /* pr_vdebug */ +-# define ffs_dump_mem(prefix, ptr, len) \ +- print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len) +-#else +-#ifndef pr_vdebug +-# define pr_vdebug(...) do { } while (0) +-#endif /* pr_vdebug */ +-# define ffs_dump_mem(prefix, ptr, len) do { } while (0) +-#endif /* VERBOSE_DEBUG */ +- +-#define ENTER() pr_vdebug("%s()\n", __func__) +- +-#define MAX_REQUEST 64 +-#define MAX_NAMELEN 64 +-#define FUNCTION_GENERIC "f_generic" +- +-struct FuncNew { +- uint32_t nameLen; +- char name[MAX_NAMELEN]; +-}; +- +-struct IoData { +- uint32_t aio; +- uint32_t read; +- uint32_t len; +- uint32_t timeout; +- uint64_t buf; +-}; +- +-struct UsbFnReqEvent { +- uint64_t buf; +- uint32_t actual; +- int status; +-}; +- +-struct ffs_memory{ +- uint64_t mem; +- uint64_t vm_start; +- uint32_t size; +- struct list_head memlist; +-}; +- +-struct generic_memory{ +- uint32_t size; +- uint64_t buf; +-}; +- +- +-#define FUNCTIONFS_NEWFN _IOW('g', 60, struct FuncNew) +-#define FUNCTIONFS_DELFN _IOW('g', 61, struct FuncNew) +-#define FUNCTIONFS_ENDPOINT_GET_REQ_STATUS _IOW('g', 48, struct IoData) +-#define FUNCTIONFS_ENDPOINT_WRITE _IOW('g', 49, struct IoData) +-#define FUNCTIONFS_ENDPOINT_READ _IOW('g', 50, struct IoData) +-#define FUNCTIONFS_ENDPOINT_RW_CANCEL _IOW('g', 51, struct IoData) +-#define FUNCTIONFS_ENDPOINT_QUEUE_INIT _IO('g', 52) +-#define FUNCTIONFS_ENDPOINT_QUEUE_DEL _IO('g', 53) +-#define FUNCTIONFS_ENDPOINT_RELEASE_BUF _IOR('g', 54, struct generic_memory) +-#define FUNCTIONFS_ENDPOINT_GET_EP0_EVENT _IOR('g', 56, struct UsbFnReqEvent) +- +-struct f_fs_opts; +- +-struct ffs_dev { +- struct ffs_data *ffs_data; +- struct f_fs_opts *opts; +- struct list_head entry; +- +- char name[MAX_NAMELEN]; +- +- bool mounted; +- bool desc_ready; +- bool single; +- +- int (*ffs_ready_callback)(struct ffs_data *ffs); +- void (*ffs_closed_callback)(struct ffs_data *ffs); +- void *(*ffs_acquire_dev_callback)(struct ffs_dev *dev); +- void (*ffs_release_dev_callback)(struct ffs_dev *dev); +-}; +- +-extern struct mutex ffs_lock_adapter; +- +-static inline void ffs_dev_lock(void) +-{ +- mutex_lock(&ffs_lock_adapter); +-} +- +-static inline void ffs_dev_unlock(void) +-{ +- mutex_unlock(&ffs_lock_adapter); +-} +- +-int ffs_name_dev_adapter(struct ffs_dev *dev, const char *name); +-int ffs_single_dev_adapter(struct ffs_dev *dev); +- +-struct ffs_epfile; +-struct ffs_function; +- +-enum ffs_state { +- /* +- * Waiting for descriptors and strings. +- * +- * In this state no open(2), read(2) or write(2) on epfiles +- * may succeed (which should not be the problem as there +- * should be no such files opened in the first place). +- */ +- FFS_READ_DESCRIPTORS, +- FFS_READ_STRINGS, +- +- /* +- * We've got descriptors and strings. We are or have called +- * functionfs_ready_callback(). functionfs_bind() may have +- * been called but we don't know. +- * +- * This is the only state in which operations on epfiles may +- * succeed. +- */ +- FFS_ACTIVE, +- +- /* +- * Function is visible to host, but it's not functional. All +- * setup requests are stalled and transfers on another endpoints +- * are refused. All epfiles, except ep0, are deleted so there +- * is no way to perform any operations on them. +- * +- * This state is set after closing all functionfs files, when +- * mount parameter "no_disconnect=1" has been set. Function will +- * remain in deactivated state until filesystem is umounted or +- * ep0 is opened again. In the second case functionfs state will +- * be reset, and it will be ready for descriptors and strings +- * writing. +- * +- * This is useful only when functionfs is composed to gadget +- * with another function which can perform some critical +- * operations, and it's strongly desired to have this operations +- * completed, even after functionfs files closure. +- */ +- FFS_DEACTIVATED, +- +- /* +- * All endpoints have been closed. This state is also set if +- * we encounter an unrecoverable error. The only +- * unrecoverable error is situation when after reading strings +- * from user space we fail to initialise epfiles or +- * functionfs_ready_callback() returns with error (<0). +- * +- * In this state no open(2), read(2) or write(2) (both on ep0 +- * as well as epfile) may succeed (at this point epfiles are +- * unlinked and all closed so this is not a problem; ep0 is +- * also closed but ep0 file exists and so open(2) on ep0 must +- * fail). +- */ +- FFS_CLOSING +-}; +- +-enum ffs_setup_state { +- /* There is no setup request pending. */ +- FFS_NO_SETUP, +- /* +- * User has read events and there was a setup request event +- * there. The next read/write on ep0 will handle the +- * request. +- */ +- FFS_SETUP_PENDING, +- /* +- * There was event pending but before user space handled it +- * some other event was introduced which canceled existing +- * setup. If this state is set read/write on ep0 return +- * -EIDRM. This state is only set when adding event. +- */ +- FFS_SETUP_CANCELLED +-}; +- +-struct ffs_data { +- struct usb_gadget *gadget; +- struct list_head entry; +- struct list_head memory_list; +- /* +- * Protect access read/write operations, only one read/write +- * at a time. As a consequence protects ep0req and company. +- * While setup request is being processed (queued) this is +- * held. +- */ +- struct mutex mutex; +- +- /* +- * Protect access to endpoint related structures (basically +- * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for +- * endpoint zero. +- */ +- spinlock_t eps_lock; +- spinlock_t mem_lock; +- +- /* +- * XXX REVISIT do we need our own request? Since we are not +- * handling setup requests immediately user space may be so +- * slow that another setup will be sent to the gadget but this +- * time not to us but another function and then there could be +- * a race. Is that the case? Or maybe we can use cdev->req +- * after all, maybe we just need some spinlock for that? +- */ +- struct usb_request *ep0req; /* P: mutex */ +- struct completion ep0req_completion; /* P: mutex */ +- +- /* reference counter */ +- refcount_t ref; +- /* how many files are opened (EP0 and others) */ +- atomic_t opened; +- +- /* EP0 state */ +- enum ffs_state state; +- +- /* +- * Possible transitions: +- * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock +- * happens only in ep0 read which is P: mutex +- * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock +- * happens only in ep0 i/o which is P: mutex +- * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELLED -- P: ev.waitq.lock +- * + FFS_SETUP_CANCELLED -> FFS_NO_SETUP -- cmpxchg +- * +- * This field should never be accessed directly and instead +- * ffs_setup_state_clear_cancelled function should be used. +- */ +- enum ffs_setup_state setup_state; +- +- /* Events & such. */ +- struct { +- u8 types[4]; +- unsigned short count; +- /* XXX REVISIT need to update it in some places, or do we? */ +- unsigned short can_stall; +- struct usb_ctrlrequest setup; +- +- wait_queue_head_t waitq; +- } ev; /* the whole structure, P: ev.waitq.lock */ +- +- /* Flags */ +- unsigned long flags; +-#define FFS_FL_CALL_CLOSED_CALLBACK 0 +-#define FFS_FL_BOUND 1 +- +- /* For waking up blocked threads when function is enabled. */ +- wait_queue_head_t wait; +- +- /* Active function */ +- struct ffs_function *func; +- +- +- char dev_name[MAX_NAMELEN]; +- struct cdev cdev; +- dev_t devno; +- struct device *fn_device; +- +- struct kfifo reqEventFifo; +- wait_queue_head_t wait_que; +- /* Private data for our user (ie. gadget). Managed by user. */ +- void *private_data; +- /* filled by __ffs_data_got_descs() */ +- /* +- * raw_descs is what you kfree, real_descs points inside of raw_descs, +- * where full speed, high speed and super speed descriptors start. +- * real_descs_length is the length of all those descriptors. +- */ +- const void *raw_descs_data; +- const void *raw_descs; +- unsigned raw_descs_length; +- unsigned fs_descs_count; +- unsigned hs_descs_count; +- unsigned ss_descs_count; +- unsigned ms_os_descs_count; +- unsigned ms_os_descs_ext_prop_count; +- unsigned ms_os_descs_ext_prop_name_len; +- unsigned ms_os_descs_ext_prop_data_len; +- void *ms_os_descs_ext_prop_avail; +- void *ms_os_descs_ext_prop_name_avail; +- void *ms_os_descs_ext_prop_data_avail; +- +- unsigned user_flags; +- +-#define FFS_MAX_EPS_COUNT 31 +- u8 eps_addrmap[FFS_MAX_EPS_COUNT]; +- +- unsigned short strings_count; +- unsigned short interfaces_count; +- unsigned short eps_count; +- unsigned short _pad1; +- +- /* filled by __ffs_data_got_strings() */ +- /* ids in stringtabs are set in functionfs_bind() */ +- const void *raw_strings; +- struct usb_gadget_strings **stringtabs; +- +- /* +- * File system's super block, write once when file system is +- * mounted. +- */ +- struct super_block *sb; +- +- /* File permissions, written once when fs is mounted */ +- struct ffs_file_perms { +- umode_t mode; +- kuid_t uid; +- kgid_t gid; +- } file_perms; +- +- struct eventfd_ctx *ffs_eventfd; +- struct workqueue_struct *io_completion_wq; +- bool no_disconnect; +- struct work_struct reset_work; +- +- /* +- * The endpoint files, filled by ffs_epfiles_create(), +- * destroyed by ffs_epfiles_destroy(). +- */ +- struct ffs_epfile *epfiles; +- struct ffs_ep *eps; +- enum usb_device_speed speed; +-}; +- +- +-struct f_fs_opts { +- struct usb_function_instance func_inst; +- struct ffs_dev *dev; +- unsigned refcnt; +- bool no_configfs; +-}; +- +-static inline struct f_fs_opts *to_f_fs_opts(struct usb_function_instance *fi) +-{ +- return container_of(fi, struct f_fs_opts, func_inst); +-} +- +-#endif /* U_GENERIC_H */ +\ No newline at end of file +diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c +index e61bad79d..0195625be 100644 +--- a/drivers/usb/gadget/function/uvc_v4l2.c ++++ b/drivers/usb/gadget/function/uvc_v4l2.c +@@ -264,29 +264,15 @@ uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt) + if (!uframe) + return -EINVAL; + +- if (uformat->type == UVCG_UNCOMPRESSED) { +- struct uvcg_uncompressed *u = +- to_uvcg_uncompressed(&uformat->group.cg_item); +- if (!u) +- return 0; +- +- v4l2_fill_pixfmt(&fmt->fmt.pix, fmt->fmt.pix.pixelformat, +- uframe->frame.w_width, uframe->frame.w_height); +- +- if (fmt->fmt.pix.sizeimage != (uvc_v4l2_get_bytesperline(uformat, uframe) * +- uframe->frame.w_height)) +- return -EINVAL; +- } else { +- fmt->fmt.pix.width = uframe->frame.w_width; +- fmt->fmt.pix.height = uframe->frame.w_height; +- fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(uformat, uframe); +- fmt->fmt.pix.sizeimage = uvc_get_frame_size(uformat, uframe); +- fmtdesc = to_uvc_format(uformat); +- if (IS_ERR(fmtdesc)) +- return PTR_ERR(fmtdesc); +- fmt->fmt.pix.pixelformat = fmtdesc->fcc; +- } ++ fmt->fmt.pix.width = uframe->frame.w_width; ++ fmt->fmt.pix.height = uframe->frame.w_height; + fmt->fmt.pix.field = V4L2_FIELD_NONE; ++ fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(uformat, uframe); ++ fmt->fmt.pix.sizeimage = uvc_get_frame_size(uformat, uframe); ++ fmtdesc = to_uvc_format(uformat); ++ if (IS_ERR(fmtdesc)) ++ return PTR_ERR(fmtdesc); ++ fmt->fmt.pix.pixelformat = fmtdesc->fcc; + fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; + fmt->fmt.pix.priv = 0; + +diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c +index 0017f6e96..1a9369148 100644 +--- a/drivers/usb/serial/usb_wwan.c ++++ b/drivers/usb/serial/usb_wwan.c +@@ -228,7 +228,11 @@ static void usb_wwan_indat_callback(struct urb *urb) + __func__, status, endpoint); + + /* don't resubmit on fatal errors */ ++#ifdef CONFIG_ARCH_BSP ++ if (status == -ESHUTDOWN || status == -ENOENT || status == -EPROTO) ++#else + if (status == -ESHUTDOWN || status == -ENOENT) ++#endif + return; + } else { + if (urb->actual_length) { +diff --git a/drivers/vendor/Kconfig b/drivers/vendor/Kconfig +new file mode 100644 +index 000000000..ff521b5f2 +--- /dev/null ++++ b/drivers/vendor/Kconfig +@@ -0,0 +1,10 @@ ++menu "Vendor driver support" ++ ++source "drivers/vendor/usb/Kconfig" ++source "drivers/vendor/usb_phy/Kconfig" ++source "drivers/vendor/basedrv_clk/Kconfig" ++source "drivers/vendor/mmc/Kconfig" ++source "drivers/vendor/cma/Kconfig" ++source "drivers/vendor/npu/Kconfig" ++ ++endmenu +diff --git a/drivers/vendor/Makefile b/drivers/vendor/Makefile +new file mode 100644 +index 000000000..d7be16682 +--- /dev/null ++++ b/drivers/vendor/Makefile +@@ -0,0 +1,6 @@ ++obj-$(CONFIG_USB_WING) += usb/ ++obj-$(CONFIG_WING_UPS_PHY) += usb_phy/ ++obj-$(CONFIG_BASEDRV_CLK) += basedrv_clk/ ++obj-$(CONFIG_CMA) += cma/ ++obj-$(CONFIG_MMC) += mmc/ ++obj-$(CONFIG_VENDOR_NPU) += npu/ +diff --git a/drivers/vendor/basedrv_clk/Kconfig b/drivers/vendor/basedrv_clk/Kconfig +new file mode 100644 +index 000000000..7fbbbbc69 +--- /dev/null ++++ b/drivers/vendor/basedrv_clk/Kconfig +@@ -0,0 +1,3 @@ ++config BASEDRV_CLK ++ bool "Basedrv IP Clock" ++ default y +diff --git a/drivers/vendor/basedrv_clk/Makefile b/drivers/vendor/basedrv_clk/Makefile +new file mode 100644 +index 000000000..75cdce04e +--- /dev/null ++++ b/drivers/vendor/basedrv_clk/Makefile +@@ -0,0 +1,7 @@ ++KBUILD_CFLAGS += -Werror ++ ++obj-$(CONFIG_ARCH_SS928V100) += ss928v100/ ++obj-$(CONFIG_ARCH_SS927V100) += ss928v100/ ++ ++obj-$(CONFIG_BASEDRV_CLK) += basedrv-clk.o ++basedrv-clk-y += basedrv_clk.o +diff --git a/drivers/vendor/basedrv_clk/basedrv-clock.h b/drivers/vendor/basedrv_clk/basedrv-clock.h +new file mode 100644 +index 000000000..3873aac7d +--- /dev/null ++++ b/drivers/vendor/basedrv_clk/basedrv-clock.h +@@ -0,0 +1,20 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2022-2023. All rights reserved. ++ * Description: driver for clk ++ * Author: AuthorNameMagicTag ++ * Create: 2022-12-05 ++ */ ++ ++#ifndef __DT_BINDINGS_UPS_CLOCK_H ++#define __DT_BINDINGS_UPS_CLOCK_H ++ ++#define PERI_CRG3664_USB30_CTRL0 0x0000 ++#define PERI_CRG3672_USB30_CTRL1 0x0004 ++#define PERI_CRG3632_USB2_PHY0 0x0008 ++#define PERI_CRG3640_USB2_PHY1 0x000C ++#define PERI_CRG3665_COMBPHY0_CLK 0x0010 ++#define PERI_CRG3673_COMBPHY1_CLK 0x0014 ++ ++#define CLK_MAX 0x0800 ++ ++#endif /* __DT_BINDINGS_UPS_CLOCK_H */ +diff --git a/drivers/vendor/basedrv_clk/basedrv_clk.c b/drivers/vendor/basedrv_clk/basedrv_clk.c +new file mode 100644 +index 000000000..214849886 +--- /dev/null ++++ b/drivers/vendor/basedrv_clk/basedrv_clk.c +@@ -0,0 +1,341 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2022-2023. All rights reserved. ++ * Description: driver for clk ++ * Author: AuthorNameMagicTag ++ * Create: 2022-12-05 ++ */ ++ ++#define DRVNAME "basedrv-clk" ++#define pr_fmt(fmt) DRVNAME ": " fmt ++ ++#include ++#include ++#include ++#include ++ ++#include "basedrv_clk.h" ++ ++static void ins_clk_enable(struct basedrv_clk_hw *clk) ++{ ++ if (clk->value) { ++ u32 val = readl(clk->peri_crgx); ++ val &= ~clk->mask; ++ val |= clk->value; ++ writel(val, clk->peri_crgx); ++ ++ val = readl(clk->peri_crgx); ++ if ((val & clk->mask) != clk->value) { ++ pr_warn("enable '%s' clock fail: want:%#x, real:%#x\n", ++ clk->name, clk->value, val); ++ } ++ } ++ ++ clk->flags |= CLKHW_ENABLE; ++} ++ ++static void ins_clk_disable(struct basedrv_clk_hw *clk) ++{ ++ if (clk->mask) { ++ u32 val = readl(clk->peri_crgx); ++ val &= ~clk->mask; ++ writel(val, clk->peri_crgx); ++ } ++ ++ clk->flags &= ~CLKHW_ENABLE; ++} ++ ++static void ins_clk_reset(struct basedrv_clk_hw *clk) ++{ ++ if (clk->rstbit) { ++ u32 val = readl(clk->peri_crgx); ++ val |= clk->rstbit; ++ writel(val, clk->peri_crgx); ++ } ++ ++ clk->flags |= CLKHW_RESET; ++} ++ ++static void ins_clk_unreset(struct basedrv_clk_hw *clk) ++{ ++ if (clk->rstbit) { ++ u32 val = readl(clk->peri_crgx); ++ val &= ~clk->rstbit; ++ writel(val, clk->peri_crgx); ++ } ++ ++ clk->flags &= ~CLKHW_RESET; ++} ++ ++int basedrv_clk_enable(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = NULL; ++ ++ if (hw == NULL) ++ return -1; ++ ++ clk = to_basedrv_clk_hw(hw); ++ ++ ins_clk_enable(clk); ++ ++ if (clk->flags & CLKHW_RESET) { ++ ins_clk_unreset(clk); ++ } ++ ++ return 0; ++} ++ ++void basedrv_clk_disable(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = NULL; ++ ++ if (hw == NULL) ++ return; ++ ++ clk = to_basedrv_clk_hw(hw); ++ ++ ins_clk_disable(clk); ++} ++ ++unsigned long basedrv_clk_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct basedrv_clk_hw *clk = NULL; ++ ++ if (hw == NULL) ++ return parent_rate; ++ ++ clk = to_basedrv_clk_hw(hw); ++ ++ return clk->rate; ++} ++ ++static long basedrv_clk_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *parent_rate) ++{ ++ return rate; ++} ++ ++int basedrv_clk_prepare(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = NULL; ++ ++ if (hw == NULL) ++ return -1; ++ ++ clk = to_basedrv_clk_hw(hw); ++ ++ ins_clk_enable(clk); ++ ins_clk_unreset(clk); ++ ins_clk_reset(clk); ++ ++ return 0; ++} ++ ++void basedrv_clk_unprepare(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = NULL; ++ ++ if (hw == NULL) ++ return; ++ ++ clk = to_basedrv_clk_hw(hw); ++ ++ clk->flags |= CLKHW_RESET; ++} ++ ++#if (LINUX_VERSION_CODE >=KERNEL_VERSION(5, 5, 0)) ++int basedrv_clk_init(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = NULL; ++ ++ if (hw == NULL) ++ return -1; ++ ++ clk = to_basedrv_clk_hw(hw); ++ ++ ins_clk_enable(clk); ++ ins_clk_reset(clk); ++ ins_clk_disable(clk); ++ ++ return 0; ++} ++#else ++void basedrv_clk_init(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = NULL; ++ ++ if (hw == NULL) ++ return; ++ ++ clk = to_basedrv_clk_hw(hw); ++ ++ ins_clk_enable(clk); ++ ins_clk_reset(clk); ++ ins_clk_disable(clk); ++} ++#endif ++ ++struct clk * __init basedrv_clk_register(struct basedrv_clk_hw *hw, ++ struct clk_ops *clkops) ++{ ++ struct clk *clk = NULL; ++ struct clk_init_data init; ++ ++ if (hw == NULL || clkops == NULL) ++ return NULL; ++ ++ init.name = hw->name; ++ init.flags = CLK_GET_RATE_NOCACHE; ++#ifdef CONFIG_ARCH_SHAOLINSWORD ++ init.flags |= CLK_IS_ROOT; ++#endif ++ init.parent_names = NULL; ++ init.num_parents = 0; ++ ++ if (hw->ops == NULL) ++ hw->ops = clkops; ++ ++ if (hw->ops->init == NULL) ++ hw->ops->init = basedrv_clk_init; ++ ++ if (hw->ops->prepare == NULL) ++ hw->ops->prepare = basedrv_clk_prepare; ++ ++ if (hw->ops->unprepare == NULL) ++ hw->ops->unprepare = basedrv_clk_unprepare; ++ ++ if (hw->ops->enable == NULL) ++ hw->ops->enable = basedrv_clk_enable; ++ ++ if (hw->ops->disable == NULL) ++ hw->ops->disable = basedrv_clk_disable; ++ ++ if (hw->ops->recalc_rate == NULL) ++ hw->ops->recalc_rate = basedrv_clk_recalc_rate; ++ ++ if (hw->ops->round_rate == NULL) ++ hw->ops->round_rate = basedrv_clk_round_rate; ++ ++ init.ops = hw->ops; ++ ++ hw->hw.init = &init; ++ ++ clk = clk_register(NULL, &hw->hw); ++ if (IS_ERR(clk)) { ++ pr_err("%s: register clock fail.\n", __func__); ++ return NULL; ++ } ++ ++ clk_register_clkdev(clk, hw->name, NULL); ++ ++ return clk; ++} ++ ++static struct clk *basedrv_clk_src_get(struct of_phandle_args *clkspec, void *data) ++{ ++ struct clk_onecell_data *clk_data = NULL; ++ unsigned int idx = 0; ++ ++ if ((data == NULL) || (clkspec == NULL)) { ++ return NULL; ++ } ++ ++ clk_data = data; ++ idx = clkspec->args[0]; ++ ++ if (idx >= (clk_data->clk_num << IDX_TO_CLK_NUM)) { ++ pr_err("%s: invalid clock index %d\n", __func__, idx); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ return clk_data->clks[idx >> IDX_TO_CLK_NUM]; ++} ++ ++#define PERI_CRG_NODE_IDX 0 ++#define PERI_CTRL_NODE_IDX 1 ++ ++void __init basedrv_clocks_init(struct device_node *node, ++ struct basedrv_clk_hw *clks_hw, int nr_hw, unsigned int clk_num, ++ struct clk_ops *clkops) ++{ ++ int ix; ++ int ret; ++ void __iomem *peri_crg_base = NULL; ++ void __iomem *peri_ctrl_base = NULL; ++ struct clk_onecell_data *clk_data = NULL; ++ ++ if ((nr_hw > clk_num) || (clks_hw == NULL)) { ++ basedrv_clk_err("invalid argument\n"); ++ return; ++ } ++ ++ peri_crg_base = of_iomap(node, PERI_CRG_NODE_IDX); ++ if (peri_crg_base == NULL) { ++ basedrv_clk_err("failed to remap peri crg base\n"); ++ return; ++ } ++ ++ peri_ctrl_base = of_iomap(node, PERI_CTRL_NODE_IDX); ++ if (peri_ctrl_base == NULL) { ++ basedrv_clk_err("failed to remap peri ctrl base\n"); ++ goto exit; ++ } ++ ++ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); ++ if (clk_data == NULL) { ++ basedrv_clk_err("failed to allocate clk_data\n"); ++ goto exit; ++ } ++ ++ clk_data->clk_num = clk_num; ++ clk_data->clks = kzalloc(sizeof(struct clk *) * clk_num, GFP_KERNEL); ++ if (clk_data->clks == NULL) { ++ basedrv_clk_err("failed to allocate clks\n"); ++ goto exit; ++ } ++ ++ for (ix = 0; ix < nr_hw; ix++) { ++ struct basedrv_clk_hw *hw = &clks_hw[ix]; ++ ++ hw->peri_crg_base = peri_crg_base; ++ hw->peri_crgx = hw->peri_crg_base + hw->offset; ++ hw->peri_ctrl_base = peri_ctrl_base; ++ ++ if ((hw->id >> IDX_TO_CLK_NUM) >= clk_num) { ++ basedrv_clk_info("clk id is exceed CLK_MAX.\n"); ++ continue; ++ } ++ ++ clk_data->clks[hw->id >> IDX_TO_CLK_NUM] = basedrv_clk_register(hw, clkops); ++ } ++ ++ ret = of_clk_add_provider(node, basedrv_clk_src_get, clk_data); ++ if (ret != 0) { ++ basedrv_clk_err("add clk provider failed\n"); ++ goto exit; ++ } ++ ++ return; ++ ++exit: ++ if (peri_crg_base != NULL) { ++ iounmap(peri_crg_base); ++ peri_crg_base = NULL; ++ } ++ ++ if (peri_ctrl_base != NULL) { ++ iounmap(peri_ctrl_base); ++ peri_ctrl_base = NULL; ++ } ++ ++ if (clk_data != NULL && clk_data->clks != NULL) { ++ kfree(clk_data->clks); ++ clk_data->clks = NULL; ++ } ++ ++ if (clk_data != NULL) { ++ kfree(clk_data); ++ clk_data = NULL; ++ } ++} ++ +diff --git a/drivers/vendor/basedrv_clk/basedrv_clk.h b/drivers/vendor/basedrv_clk/basedrv_clk.h +new file mode 100644 +index 000000000..e4fb4ba76 +--- /dev/null ++++ b/drivers/vendor/basedrv_clk/basedrv_clk.h +@@ -0,0 +1,106 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2022-2023. All rights reserved. ++ * Description: driver for clk ++ * Author: AuthorNameMagicTag ++ * Create: 2022-12-05 ++ */ ++ ++#ifndef BASEDRV_CLK_H ++#define BASEDRV_CLK_H ++ ++#include ++#include ++#include ++#include ++ ++#define BASEDRV_CLK_DEBUG 0 ++ ++#define basedrv_clk_dbg(format, arg...) \ ++ do { \ ++ if (BASEDRV_CLK_DEBUG) \ ++ printk(KERN_INFO "[UPS-CLK][%s]"format, __func__, ##arg); \ ++ } while (0) ++ ++#define basedrv_clk_info(format, arg...) \ ++ printk(KERN_INFO "[UPS-CLK][%s]"format, __func__, ##arg) ++ ++#define basedrv_clk_err(format, arg...) \ ++ printk(KERN_ERR "[UPS-CLK][%s]"format, __func__, ##arg) ++ ++struct clk_rate_reg { ++ unsigned long rate; ++ u32 regval; ++}; ++ ++struct basedrv_clk_hw { ++ unsigned int id; ++ const char *name; ++ u32 offset; ++ u32 mask; ++ u32 value; ++ u32 rstbit; ++ ++ unsigned long rate; ++ struct clk_ops *ops; ++ struct clk_hw hw; ++ void *__iomem peri_crgx; ++ void *__iomem peri_crg_base; ++ void *__iomem peri_ctrl_base; ++ ++#define CLKHW_RESET 0x01 ++#define CLKHW_ENABLE 0x02 ++ u32 flags; ++}; ++ ++/* clk id is 4bytes alignment, logical shift right 2 bits */ ++#define IDX_TO_CLK_NUM 2 ++ ++#define clk(_id, _mask, _value, _rstbit, _rate, _ops) { \ ++.id = (_id), \ ++.name = #_id, \ ++.offset = (_id), \ ++.mask = (_mask), \ ++.value = (_value), \ ++.rstbit = (_rstbit), \ ++.rate = (_rate), \ ++.ops = (_ops), \ ++} ++ ++#define clk_shared(_id, _off, _mask, _value, _rstbit, _rate, _ops) { \ ++.id = (_id), \ ++.name = #_id, \ ++.offset = (_off), \ ++.mask = (_mask), \ ++.value = (_value), \ ++.rstbit = (_rstbit), \ ++.rate = (_rate), \ ++.ops = (_ops), \ ++} ++ ++#define to_basedrv_clk_hw(_hw) container_of(_hw, struct basedrv_clk_hw, hw) ++ ++struct clk *basedrv_clk_register(struct basedrv_clk_hw *hw, struct clk_ops *clkops); ++ ++struct clk *basedrv_of_clk_src_get(struct of_phandle_args *clkspec, void *data); ++ ++void basedrv_clocks_init(struct device_node *node, struct basedrv_clk_hw *clks_hw, ++ int nr_hw, unsigned int clk_num, struct clk_ops *clkops); ++ ++int basedrv_clk_enable(struct clk_hw *hw); ++ ++void basedrv_clk_disable(struct clk_hw *hw); ++ ++unsigned long basedrv_clk_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate); ++ ++int basedrv_clk_prepare(struct clk_hw *hw); ++ ++void basedrv_clk_unprepare(struct clk_hw *hw); ++ ++#if (LINUX_VERSION_CODE >=KERNEL_VERSION(5, 5, 0)) ++int basedrv_clk_init(struct clk_hw *hw); ++#else ++void basedrv_clk_init(struct clk_hw *hw); ++#endif ++ ++#endif /* BASEDRV_CLK_H */ +diff --git a/drivers/vendor/basedrv_clk/ss928v100/Makefile b/drivers/vendor/basedrv_clk/ss928v100/Makefile +new file mode 100644 +index 000000000..f1ef46280 +--- /dev/null ++++ b/drivers/vendor/basedrv_clk/ss928v100/Makefile +@@ -0,0 +1,6 @@ ++KBUILD_CFLAGS += -Werror ++ ++obj-$(CONFIG_ARCH_SS928V100) += soc_clk_ss928v100.o ++obj-$(CONFIG_ARCH_SS927V100) += soc_clk_ss928v100.o ++soc_clk_ss928v100-y += clk_ss928v100.o clk_ups.o ++soc_clk_ss927v100-y += clk_ss928v100.o clk_ups.o +diff --git a/drivers/vendor/basedrv_clk/ss928v100/clk_ss928v100.c b/drivers/vendor/basedrv_clk/ss928v100/clk_ss928v100.c +new file mode 100644 +index 000000000..0d3123a8a +--- /dev/null ++++ b/drivers/vendor/basedrv_clk/ss928v100/clk_ss928v100.c +@@ -0,0 +1,98 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2022-2023. All rights reserved. ++ * Description: driver for clk ++ * Author: AuthorNameMagicTag ++ * Create: 2022-12-05 ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../basedrv_clk.h" ++#include "clk_ss928v100.h" ++ ++ ++#define REG_PERI_CRG3665_USB3_PHY0 0x3944 ++#define REG_PERI_CRG3673_USB3_PHY1 0x3964 ++ ++static struct basedrv_clk_hw g_clks_hw[] = { ++ clk(PERI_CRG3664_USB30_CTRL0, 0x0, 0x0, 0x0, 0, &g_clk_ops_usb30_host), ++ clk(PERI_CRG3672_USB30_CTRL1, 0x0, 0x0, 0x0, 0, &g_clk_ops_usb30_drd), ++ clk(PERI_CRG3632_USB2_PHY0, 0x0, 0x0, 0x0, 0, &g_clk_ops_xvpphy0), ++ clk(PERI_CRG3640_USB2_PHY1, 0x0, 0x0, 0x0, 0, &g_clk_ops_xvpphy1), ++ clk_shared(PERI_CRG3665_COMBPHY0_CLK, REG_PERI_CRG3665_USB3_PHY0, 0x0, 0x0, ++ 0x0, 0, &g_clk_ops_combophy0), ++ clk_shared(PERI_CRG3673_COMBPHY1_CLK, REG_PERI_CRG3673_USB3_PHY1, 0x0, 0x0, ++ 0x0, 0, &g_clk_ops_combophy1), ++}; ++ ++static unsigned long ss928v100_basedrv_clk_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ return 0; ++} ++ ++static int ss928v100_basedrv_clk_set_rate(struct clk_hw *hw, unsigned long drate, ++ unsigned long parent_rate) ++{ ++ return 0; ++} ++ ++static int ss928v100_basedrv_clk_get_phase(struct clk_hw *hw) ++{ ++ return 0; ++} ++ ++static int ss928v100_basedrv_clk_set_phase(struct clk_hw *hw, int degrees) ++{ ++ return 0; ++} ++ ++static int ss928v100_basedrv_clk_enable(struct clk_hw *hw) ++{ ++ basedrv_clk_enable(hw); ++ ss928v100_basedrv_clk_recalc_rate(hw, 0); ++ return 0; ++} ++ ++static struct clk_ops g_clk_ops = { ++ .enable = ss928v100_basedrv_clk_enable, ++ .recalc_rate = ss928v100_basedrv_clk_recalc_rate, ++ .set_rate = ss928v100_basedrv_clk_set_rate, ++ .get_phase = ss928v100_basedrv_clk_get_phase, ++ .set_phase = ss928v100_basedrv_clk_set_phase, ++}; ++ ++static void __init ss928v100_basedrv_clocks_init(struct device_node *np) ++{ ++ int ix; ++ ++ for (ix = 0; ix < ARRAY_SIZE(g_clks_hw); ix++) { ++ struct basedrv_clk_hw *hw = &g_clks_hw[ix]; ++ struct clk_ops *ops = hw->ops; ++ ++ if (ops == NULL) { ++ continue; ++ } ++ ++ if (ops->enable == NULL) { ++ ops->enable = ss928v100_basedrv_clk_enable; ++ } ++ ++ if (ops->recalc_rate == NULL) { ++ ops->recalc_rate = ss928v100_basedrv_clk_recalc_rate; ++ } ++ } ++ ++ basedrv_clocks_init(np, g_clks_hw, ARRAY_SIZE(g_clks_hw), ++ CLK_MAX >> IDX_TO_CLK_NUM, &g_clk_ops); ++} ++CLK_OF_DECLARE(basedrv_clk, "basedrv-ip,clock", ss928v100_basedrv_clocks_init); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("huanglong"); +diff --git a/drivers/vendor/basedrv_clk/ss928v100/clk_ss928v100.h b/drivers/vendor/basedrv_clk/ss928v100/clk_ss928v100.h +new file mode 100644 +index 000000000..33c0ea86d +--- /dev/null ++++ b/drivers/vendor/basedrv_clk/ss928v100/clk_ss928v100.h +@@ -0,0 +1,20 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2022-2023. All rights reserved. ++ * Description: driver for clk ++ * Author: AuthorNameMagicTag ++ * Create: 2022-12-05 ++ */ ++ ++#ifndef CLK_SS928V100_H ++#define CLK_SS928V100_H ++ ++extern struct clk_ops g_clk_ops_xvpphy0; ++extern struct clk_ops g_clk_ops_combophy0; ++extern struct clk_ops g_clk_ops_usb30_host; ++ ++extern struct clk_ops g_clk_ops_xvpphy1; ++extern struct clk_ops g_clk_ops_combophy1; ++extern struct clk_ops g_clk_ops_usb30_drd; ++ ++#endif ++ +diff --git a/drivers/vendor/basedrv_clk/ss928v100/clk_ups.c b/drivers/vendor/basedrv_clk/ss928v100/clk_ups.c +new file mode 100644 +index 000000000..b6bfe6b88 +--- /dev/null ++++ b/drivers/vendor/basedrv_clk/ss928v100/clk_ups.c +@@ -0,0 +1,532 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2022-2023. All rights reserved. ++ * Description: usb clk ++ * Author: AuthorNameMagicTag ++ * Create: 2022.12.05 ++ */ ++ ++#include ++#include ++#include ++ ++#include "../basedrv_clk.h" ++ ++#define REG_PERI_CRG3664_USB30_CTRL 0x3940 ++#define REG_PERI_CRG3672_USB30_CTRL1 0x3960 ++#define REG_PERI_CRG3632_USB2_PHY0 0x38C0 ++#define REG_PERI_CRG3640_USB2_PHY1 0x38E0 ++#define REG_PERI_CRG3665_USB3_PHY0 0x3944 ++#define REG_PERI_CRG3673_USB3_PHY1 0x3964 ++ ++#define USB3_CTRL_CRG_DEFAULT_VALUE 0x30001 ++#define USB2_PHY_CRG_DEFAULT_VALUE 0x57 ++#define USB3_PHY_CRG_DEFAULT_VALUE 0x13 ++ ++#define USB3_CRG_PCLK_OCC_SEL (0x1 << 18) ++#define USB3_CRG_PIPE_CKEN (0x1 << 12) ++#define USB3_CRG_UTMI_CKEN (0x1 << 8) ++#define USB3_CRG_SUSPEND_CKEN (0x1 << 6) ++#define USB3_CRG_REF_CKEN (0x1 << 5) ++#define USB3_CRG_BUS_CKEN (0x1 << 4) ++#define USB3_CRG_SRST_REQ (0x1 << 0) ++ ++#define USB2_PHY_CRG_APB_SREQ (0x1 << 2) ++#define USB2_PHY_CRG_TREQ (0x1 << 1) ++#define USB2_PHY_CRG_REQ (0x1 << 0) ++ ++#define USB3_PHY_CRG_TREQ (0x1 << 1) ++#define USB3_PHY_CRG_REQ (0x1 << 0) ++ ++#define COMBPHY_REF_CKEN (0x1<< 24) ++#define COMBPHY_SRST_REQ (0x1<< 16) ++ ++#define USB3_VCC_SRST_REQ (0x1<< 0) ++#define USB3_UTMI_CKSEL (0x1<< 13) ++#define USB3_PCLK_OCC_SEL (0x1<< 14) ++ ++#define PCIE_X2_MODE (0x0 << 16) ++#define USB3_X2_MODE (0x1 << 16) ++#define PORT0U2_PORT1U3_MODE (0x2 << 16) ++#define COMBPHY_MODE_MASK (0x3 << 16) ++#define SYSSTAT 0x18 ++ ++#define PINOUT_REG_BASE 0x10230000 ++#define PITOUT_CTRL0_PWREN_OFFSET 0x44 ++#define PITOUT_CTRL1_PWREN_OFFSET 0X3C ++#define PITOUT_CTRL1_VBUS_OFFSET 0x38 ++#define PINOUT_USB_VAL 0x1201 ++ ++static unsigned int basedrv_clk_readl(const void __iomem *addr) ++{ ++ unsigned int reg = readl(addr); ++ ++ basedrv_clk_dbg("readl(0x%lx) = %#08X\n", (uintptr_t)addr, reg); ++ return reg; ++} ++ ++static void basedrv_clk_writel(unsigned int v, void __iomem *addr) ++{ ++ writel(v, addr); ++ basedrv_clk_dbg("writel(0x%lx) = %#08X\n", (uintptr_t)addr, v); ++} ++ ++typedef enum mode { ++ PCIE_X2 = 0, ++ USB3_X2, ++ USB3_X1, ++ UNKOWN_MODE ++} combphy_mode; ++ ++static void usb_pinout_cfg(void) ++{ ++ void __iomem *addr = NULL; ++ ++ addr = ioremap(PINOUT_REG_BASE, 0x1000); ++ if (addr == NULL) { ++ basedrv_clk_info("map pinout registor failed\n"); ++ return; ++ } ++ ++ basedrv_clk_writel(PINOUT_USB_VAL, addr + PITOUT_CTRL0_PWREN_OFFSET); ++ basedrv_clk_writel(PINOUT_USB_VAL, addr + PITOUT_CTRL1_PWREN_OFFSET); ++ basedrv_clk_writel(PINOUT_USB_VAL, addr + PITOUT_CTRL1_VBUS_OFFSET); ++ udelay(200); /* delay 200 us to wait vbus stable */ ++ ++ iounmap(addr); ++ addr = NULL; ++} ++ ++static combphy_mode get_combphy_mode(struct basedrv_clk_hw *clk) ++{ ++ u32 val; ++ ++ val = basedrv_clk_readl(clk->peri_ctrl_base + SYSSTAT); ++ val &= COMBPHY_MODE_MASK; ++ ++ switch (val) { ++ case PCIE_X2_MODE: ++ return PCIE_X2; ++ case PORT0U2_PORT1U3_MODE: ++ return USB3_X1; ++ case USB3_X2_MODE: ++ return USB3_X2; ++ default: ++ break; ++ } ++ ++ return UNKOWN_MODE; ++} ++ ++static int xvpphy0_clk_prepare(struct clk_hw *hw) ++{ ++ u32 val; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ basedrv_clk_writel(USB2_PHY_CRG_DEFAULT_VALUE, ++ clk->peri_crg_base + REG_PERI_CRG3632_USB2_PHY0); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3632_USB2_PHY0); ++ val &= ~(USB2_PHY_CRG_APB_SREQ | USB2_PHY_CRG_REQ); ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3632_USB2_PHY0); ++ ++ basedrv_clk_dbg("---"); ++ ++ return 0; ++} ++ ++static void xvpphy0_clk_unprepare(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ basedrv_clk_writel(USB2_PHY_CRG_DEFAULT_VALUE, ++ clk->peri_crg_base + REG_PERI_CRG3632_USB2_PHY0); ++ ++ basedrv_clk_dbg("---"); ++} ++ ++static int xvpphy0_clk_enable(struct clk_hw *hw) ++{ ++ u32 val; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3632_USB2_PHY0); ++ val &= ~(USB2_PHY_CRG_TREQ); ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3632_USB2_PHY0); ++ ++ basedrv_clk_dbg("---"); ++ ++ return 0; ++} ++ ++static void xvpphy0_clk_disable(struct clk_hw *hw) ++{ ++ u32 val; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3632_USB2_PHY0); ++ val |= USB2_PHY_CRG_TREQ; ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3632_USB2_PHY0); ++ ++ basedrv_clk_dbg("---"); ++} ++ ++static int xvpphy1_clk_prepare(struct clk_hw *hw) ++{ ++ u32 val; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ basedrv_clk_writel(USB2_PHY_CRG_DEFAULT_VALUE, ++ clk->peri_crg_base + REG_PERI_CRG3640_USB2_PHY1); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3640_USB2_PHY1); ++ val &= ~(USB2_PHY_CRG_APB_SREQ | USB2_PHY_CRG_REQ); ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3640_USB2_PHY1); ++ ++ basedrv_clk_dbg("---"); ++ ++ return 0; ++} ++ ++static void xvpphy1_clk_unprepare(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ basedrv_clk_writel(USB2_PHY_CRG_DEFAULT_VALUE, ++ clk->peri_crg_base + REG_PERI_CRG3640_USB2_PHY1); ++ ++ basedrv_clk_dbg("---"); ++} ++ ++static int xvpphy1_clk_enable(struct clk_hw *hw) ++{ ++ u32 val; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3640_USB2_PHY1); ++ val &= ~(USB2_PHY_CRG_TREQ); ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3640_USB2_PHY1); ++ ++ basedrv_clk_dbg("---"); ++ ++ return 0; ++} ++ ++static void xvpphy1_clk_disable(struct clk_hw *hw) ++{ ++ u32 val; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3640_USB2_PHY1); ++ val |= USB2_PHY_CRG_TREQ; ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3640_USB2_PHY1); ++ ++ basedrv_clk_dbg("---"); ++} ++ ++static int combophy0_clk_prepare(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ basedrv_clk_writel(USB3_PHY_CRG_DEFAULT_VALUE, ++ clk->peri_crg_base + REG_PERI_CRG3665_USB3_PHY0); ++ ++ basedrv_clk_dbg("---"); ++ ++ return 0; ++} ++ ++static void combophy0_clk_unprepare(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ basedrv_clk_writel(USB3_PHY_CRG_DEFAULT_VALUE, ++ clk->peri_crg_base + REG_PERI_CRG3665_USB3_PHY0); ++ ++ basedrv_clk_dbg("---"); ++} ++ ++static int combophy0_clk_enable(struct clk_hw *hw) ++{ ++ u32 val; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3665_USB3_PHY0); ++ val &= ~(USB3_PHY_CRG_TREQ | USB3_PHY_CRG_REQ); ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3665_USB3_PHY0); ++ ++ return 0; ++} ++ ++static void combophy0_clk_disable(struct clk_hw *hw) ++{ ++ u32 val; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3665_USB3_PHY0); ++ val |= (USB3_PHY_CRG_TREQ | USB3_PHY_CRG_REQ); ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3665_USB3_PHY0); ++} ++ ++static int combophy1_clk_prepare(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ basedrv_clk_writel(USB3_PHY_CRG_DEFAULT_VALUE, ++ clk->peri_crg_base + REG_PERI_CRG3673_USB3_PHY1); ++ ++ basedrv_clk_dbg("---"); ++ ++ return 0; ++} ++ ++static void combophy1_clk_unprepare(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ basedrv_clk_writel(USB3_PHY_CRG_DEFAULT_VALUE, ++ clk->peri_crg_base + REG_PERI_CRG3673_USB3_PHY1); ++ ++ basedrv_clk_dbg("---"); ++} ++ ++static int combophy1_clk_enable(struct clk_hw *hw) ++{ ++ u32 val; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3673_USB3_PHY1); ++ val &= ~(USB3_PHY_CRG_TREQ | USB3_PHY_CRG_REQ); ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3673_USB3_PHY1); ++ ++ return 0; ++} ++ ++static void combophy1_clk_disable(struct clk_hw *hw) ++{ ++ u32 val; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3673_USB3_PHY1); ++ val |= (USB3_PHY_CRG_TREQ | USB3_PHY_CRG_REQ); ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3673_USB3_PHY1); ++} ++ ++static int usb30_host_clk_prepare(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ usb_pinout_cfg(); ++ ++ basedrv_clk_writel(USB3_CTRL_CRG_DEFAULT_VALUE, ++ clk->peri_crg_base + REG_PERI_CRG3664_USB30_CTRL); ++ ++ basedrv_clk_dbg("---"); ++ ++ return 0; ++} ++ ++static void usb30_host_clk_unprepare(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ basedrv_clk_writel(USB3_CTRL_CRG_DEFAULT_VALUE, ++ clk->peri_crg_base + REG_PERI_CRG3664_USB30_CTRL); ++ ++ basedrv_clk_dbg("---"); ++} ++ ++#define USB3_SUSPENDUSB20_PHY (0x1 << 6) ++#define USB3_CTRL0_BASE 0x10300000 ++#define USB3_GUSB2PHYCFGN 0xc200 ++ ++static int usb30_host_clk_enable(struct clk_hw *hw) ++{ ++ u32 val; ++ combphy_mode mode; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ void __iomem *ctrl_base = ioremap(USB3_CTRL0_BASE, 0x10000); ++ ++ basedrv_clk_dbg("+++"); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3664_USB30_CTRL); ++ mode = get_combphy_mode(clk); ++ if (mode == USB3_X2) { ++ val &= ~(USB3_CRG_PCLK_OCC_SEL); ++ } else { ++ val |= USB3_CRG_PCLK_OCC_SEL; ++ } ++ val |= (USB3_CRG_PIPE_CKEN | USB3_CRG_UTMI_CKEN | ++ USB3_CRG_SUSPEND_CKEN | USB3_CRG_REF_CKEN | USB3_CRG_BUS_CKEN); ++ val &= ~USB3_CRG_SRST_REQ; ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3664_USB30_CTRL); ++ ++ if (ctrl_base == NULL) ++ return 0; ++ ++ val = basedrv_clk_readl(ctrl_base + USB3_GUSB2PHYCFGN); ++ if (mode == USB3_X2) { ++ val |= (USB3_SUSPENDUSB20_PHY); ++ } else { ++ val &= ~(USB3_SUSPENDUSB20_PHY); ++ } ++ basedrv_clk_writel(val, ctrl_base + USB3_GUSB2PHYCFGN); ++ ++ iounmap(ctrl_base); ++ ctrl_base = NULL; ++ ++ basedrv_clk_dbg("---"); ++ ++ return 0; ++} ++ ++static void usb30_host_clk_disable(struct clk_hw *hw) ++{ ++ u32 val; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3664_USB30_CTRL); ++ val |= USB3_CRG_SRST_REQ; ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3664_USB30_CTRL); ++ ++ basedrv_clk_dbg("---"); ++} ++ ++static int usb30_drd_clk_prepare(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ usb_pinout_cfg(); ++ ++ basedrv_clk_writel(USB3_CTRL_CRG_DEFAULT_VALUE, ++ clk->peri_crg_base + REG_PERI_CRG3672_USB30_CTRL1); ++ ++ basedrv_clk_dbg("---"); ++ ++ return 0; ++} ++ ++static void usb30_drd_clk_unprepare(struct clk_hw *hw) ++{ ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ basedrv_clk_writel(USB3_CTRL_CRG_DEFAULT_VALUE, ++ clk->peri_crg_base + REG_PERI_CRG3672_USB30_CTRL1); ++ ++ basedrv_clk_dbg("---"); ++} ++ ++static int usb30_drd_clk_enable(struct clk_hw *hw) ++{ ++ u32 val; ++ combphy_mode mode; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3672_USB30_CTRL1); ++ mode = get_combphy_mode(clk); ++ if (mode == USB3_X2 || mode == USB3_X1) { ++ val &= ~(USB3_CRG_PCLK_OCC_SEL); ++ } else { ++ val |= USB3_CRG_PCLK_OCC_SEL; ++ } ++ val |= (USB3_CRG_PIPE_CKEN | USB3_CRG_UTMI_CKEN | ++ USB3_CRG_SUSPEND_CKEN | USB3_CRG_REF_CKEN | USB3_CRG_BUS_CKEN); ++ val &= ~USB3_CRG_SRST_REQ; ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3672_USB30_CTRL1); ++ ++ basedrv_clk_dbg("---"); ++ ++ return 0; ++} ++ ++static void usb30_drd_clk_disable(struct clk_hw *hw) ++{ ++ u32 val; ++ struct basedrv_clk_hw *clk = to_basedrv_clk_hw(hw); ++ ++ basedrv_clk_dbg("+++"); ++ ++ val = basedrv_clk_readl(clk->peri_crg_base + REG_PERI_CRG3672_USB30_CTRL1); ++ val |= USB3_CRG_SRST_REQ; ++ basedrv_clk_writel(val, clk->peri_crg_base + REG_PERI_CRG3672_USB30_CTRL1); ++ ++ basedrv_clk_dbg("---"); ++} ++ ++struct clk_ops g_clk_ops_xvpphy0 = { ++ .prepare = xvpphy0_clk_prepare, ++ .unprepare = xvpphy0_clk_unprepare, ++ .enable = xvpphy0_clk_enable, ++ .disable = xvpphy0_clk_disable, ++}; ++ ++struct clk_ops g_clk_ops_combophy0 = { ++ .prepare = combophy0_clk_prepare, ++ .unprepare = combophy0_clk_unprepare, ++ .enable = combophy0_clk_enable, ++ .disable = combophy0_clk_disable, ++}; ++ ++struct clk_ops g_clk_ops_usb30_host = { ++ .prepare = usb30_host_clk_prepare, ++ .unprepare = usb30_host_clk_unprepare, ++ .enable = usb30_host_clk_enable, ++ .disable = usb30_host_clk_disable, ++}; ++ ++struct clk_ops g_clk_ops_xvpphy1 = { ++ .prepare = xvpphy1_clk_prepare, ++ .unprepare = xvpphy1_clk_unprepare, ++ .enable = xvpphy1_clk_enable, ++ .disable = xvpphy1_clk_disable, ++}; ++ ++struct clk_ops g_clk_ops_combophy1 = { ++ .prepare = combophy1_clk_prepare, ++ .unprepare = combophy1_clk_unprepare, ++ .enable = combophy1_clk_enable, ++ .disable = combophy1_clk_disable, ++}; ++ ++struct clk_ops g_clk_ops_usb30_drd = { ++ .prepare = usb30_drd_clk_prepare, ++ .unprepare = usb30_drd_clk_unprepare, ++ .enable = usb30_drd_clk_enable, ++ .disable = usb30_drd_clk_disable, ++}; ++ +diff --git a/drivers/vendor/cma/Kconfig b/drivers/vendor/cma/Kconfig +new file mode 100644 +index 000000000..7472dccd8 +--- /dev/null ++++ b/drivers/vendor/cma/Kconfig +@@ -0,0 +1,16 @@ ++ ++config CMA_MEM_SHARED ++ bool "Support sharing CMA memory with the heap" ++ depends on CMA && DMA_CMA ++ default no ++ help ++ Support sharing CMA memory with the heap. ++ ++config CMA_ADVANCE_SHARE ++ bool "Support cma advance share" ++ depends on CMA && DMA_CMA ++ select CMA_MEM_SHARED ++ default no ++ help ++ Support advance sharing CMA memory with the heap. ++ CMA Multiplex Ratio will be improved when this macro defined. +diff --git a/drivers/vendor/cma/Makefile b/drivers/vendor/cma/Makefile +new file mode 100644 +index 000000000..97d573979 +--- /dev/null ++++ b/drivers/vendor/cma/Makefile +@@ -0,0 +1,2 @@ ++ ++obj-$(CONFIG_CMA) += cma.o +diff --git a/drivers/vendor/cma/cma.c b/drivers/vendor/cma/cma.c +new file mode 100644 +index 000000000..52454f377 +--- /dev/null ++++ b/drivers/vendor/cma/cma.c +@@ -0,0 +1,176 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++#include ++#include ++ ++static u32 num_zones; ++static struct cma_zone bsp_zone[ZONE_MAX]; ++static int use_bootargs; ++ ++unsigned int get_cma_size(void) ++{ ++ u32 i; ++ u64 total = 0; ++ ++ for (i = 0; i < num_zones; i++) ++ total += bsp_zone[i].nbytes; ++ ++ return (unsigned int)(total >> 20); /* unit M: shift right 20 bits */ ++} ++ ++int is_cma_address(phys_addr_t phys, unsigned long size) ++{ ++ phys_addr_t start, end; ++ u32 i; ++ ++ for (i = 0; i < num_zones; i++) { ++ start = bsp_zone[i].phys_start; ++ end = bsp_zone[i].phys_start + bsp_zone[i].nbytes; ++ ++ if ((phys >= start) && ((phys + size) <= end)) ++ return 1; /* Yes, found! */ ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(is_cma_address); ++ ++static int __init bsp_mmz_parse_cmdline(char *s) ++{ ++ char *line = NULL, *tmp = NULL; ++ char tmpline[256]; ++ int ret; ++ ++ if (s == NULL) { ++ pr_info("There is no cma zone!\n"); ++ return 0; ++ } ++ ret = strncpy_s(tmpline, sizeof(tmpline), s, sizeof(tmpline) - 1); ++ if (ret) { ++ printk("%s:strncpy_s failed\n", __func__); ++ return ret; ++ } ++ ++ tmpline[sizeof(tmpline) - 1] = '\0'; ++ tmp = tmpline; ++ ++ while ((line = strsep(&tmp, ":")) != NULL) { ++ int i; ++ char *argv[6]; ++ ++ for (i = 0; (argv[i] = strsep(&line, ",")) != NULL;) ++ if (++i == ARRAY_SIZE(argv)) ++ break; ++ ++ if (num_zones >= ZONE_MAX) ++ return 0; ++ bsp_zone[num_zones].pdev.coherent_dma_mask = DMA_BIT_MASK(64); ++ if (i == 4) { ++ strlcpy(bsp_zone[num_zones].name, argv[0], NAME_LEN_MAX); ++ bsp_zone[num_zones].gfp = (uintptr_t)memparse(argv[1], NULL); ++ bsp_zone[num_zones].phys_start = (uintptr_t)memparse(argv[2], NULL); ++ bsp_zone[num_zones].nbytes = (uintptr_t)memparse(argv[3], NULL); ++ } ++ ++ else if (i == 6) { ++ strlcpy(bsp_zone[num_zones].name, argv[0], NAME_LEN_MAX); ++ bsp_zone[num_zones].gfp = (uintptr_t)memparse(argv[1], NULL); ++ bsp_zone[num_zones].phys_start = (uintptr_t)memparse(argv[2], NULL); ++ bsp_zone[num_zones].nbytes = (uintptr_t)memparse(argv[3], NULL); ++ bsp_zone[num_zones].alloc_type = (uintptr_t)memparse(argv[4], NULL); ++ bsp_zone[num_zones].block_align = (uintptr_t)memparse(argv[5], NULL); ++ } else { ++ pr_err("ion parameter is not correct\n"); ++ continue; ++ } ++ ++ num_zones++; ++ } ++ if (num_zones != 0) ++ use_bootargs = 1; ++ ++ return 0; ++} ++early_param("mmz", bsp_mmz_parse_cmdline); ++ ++phys_addr_t get_zones_start(void) ++{ ++ u32 i; ++ phys_addr_t lowest_zone_base = memblock_end_of_DRAM(); ++ ++ for (i = 0; i < num_zones; i++) { ++ if (lowest_zone_base > bsp_zone[i].phys_start) ++ lowest_zone_base = bsp_zone[i].phys_start; ++ } ++ ++ return lowest_zone_base; ++} ++EXPORT_SYMBOL(get_zones_start); ++ ++struct cma_zone *get_cma_zone(const char *name) ++{ ++ u32 i; ++ ++ if (name == NULL) ++ return NULL; ++ for (i = 0; i < num_zones; i++) ++ if (strcmp(bsp_zone[i].name, name) == 0) ++ break; ++ ++ if (i == num_zones) ++ return NULL; ++ ++ return &bsp_zone[i]; ++} ++EXPORT_SYMBOL(get_cma_zone); ++ ++struct device *get_cma_device(const char *name) ++{ ++ u32 i; ++ ++ if (name == NULL) ++ return NULL; ++ ++ for (i = 0; i < num_zones; i++) ++ if (strcmp(bsp_zone[i].name, name) == 0) ++ break; ++ ++ if (i == num_zones) ++ return NULL; ++ ++ return &bsp_zone[i].pdev; ++} ++EXPORT_SYMBOL(get_cma_device); ++ ++int __init declare_heap_memory(void) ++{ ++ u32 i; ++ int ret = 0; ++ ++ if (use_bootargs == 0) { ++ pr_info("cma zone is not set!\n"); ++ return ret; ++ } ++ ++ for (i = 0; i < num_zones; i++) { ++ ret = dma_contiguous_reserve_area(bsp_zone[i].nbytes, ++ bsp_zone[i].phys_start, 0, &bsp_zone[i].pdev.cma_area, true); ++ if (ret) ++ panic("declare cma zone %s base: 0x%lx size:%lu MB failed. ret:%d", ++ bsp_zone[i].name, (unsigned long)bsp_zone[i].phys_start, ++ (unsigned long)bsp_zone[i].nbytes >> 20, ret); ++ ++ bsp_zone[i].phys_start = cma_get_base(bsp_zone[i].pdev.cma_area); ++ bsp_zone[i].nbytes = cma_get_size(bsp_zone[i].pdev.cma_area); ++ } ++ ++ return ret; ++} ++ ++static int bsp_mmz_setup(struct reserved_mem *rmem) ++{ ++ return 0; ++} ++RESERVEDMEM_OF_DECLARE(cma, "bsp-mmz", bsp_mmz_setup); ++ +diff --git a/drivers/vendor/mmc/CMakeLists.txt b/drivers/vendor/mmc/CMakeLists.txt +new file mode 100644 +index 000000000..99d9e6544 +--- /dev/null ++++ b/drivers/vendor/mmc/CMakeLists.txt +@@ -0,0 +1,26 @@ ++set(MODULE_NAME sdhci_fbb) ++set(LIST_MOD_SOURCES ++ sdhci_nebula.c ++ nebula_intf.c ++ adapter/nebula_fmea.c ++ adapter/nebula_adapter.c ++ adapter/nebula_quick.c ++ platform/platform_comm.c ++ platform/sdhci_${CONFIG_CHIP_NAME}.c ++ dfx/nebula_dfx.c ++ dfx/mci_proc.c ++) ++ ++set(LIST_MOD_PRIVATE_INC ++ ${CMAKE_CURRENT_SOURCE_DIR} ++ ${CMAKE_CURRENT_SOURCE_DIR}/dfx ++ ${CMAKE_CURRENT_SOURCE_DIR}/adapter ++ ${CMAKE_CURRENT_SOURCE_DIR}/platform ++ ${CONFIG_LINUX_KERNEL_DIR}/drivers/mmc/host ++ ${CONFIG_LINUX_KERNEL_DIR}/drivers/mmc/core ++) ++ ++list(APPEND LIST_MOD_PRIVATE_DEF 'SDHCI_NEBULA_KERNEL_VERSION=\"MMC_KERNEL 1.0.1\"') ++list(APPEND LIST_MOD_PUBLIC_DEF CONFIG_LINUX_DRV_FBB_MMC) ++ ++build_kernel_module() +diff --git a/drivers/vendor/mmc/ChangLog b/drivers/vendor/mmc/ChangLog +new file mode 100644 +index 000000000..1ea066b31 +--- /dev/null ++++ b/drivers/vendor/mmc/ChangLog +@@ -0,0 +1,7 @@ ++2023-02-01 ++ ++ * version 1.0.0 ++ * feature: 2023-03-03 support emmc quick boot up ++ * feature: 2023-01-31 add dfx support, support card status dump, card init command backtrace if init failed. ++ * feature: 2023-01-13 add command queuing support ++ * feature: 2022-11-15 support emmc, sdio basic function +diff --git a/drivers/vendor/mmc/Kconfig b/drivers/vendor/mmc/Kconfig +new file mode 100644 +index 000000000..79f68e381 +--- /dev/null ++++ b/drivers/vendor/mmc/Kconfig +@@ -0,0 +1,64 @@ ++# ++# MMC/SD host controller drivers ++# ++ ++config MMC_SDHCI_SOCT ++ tristate "SDHCI support on the Huanglong wudangstick SoC" ++ depends on ARCH_WUDANGSTICK || ARCH_SHAOLINGUN || ARCH_HENGSHANV200 || ARCH_KONGTONGV100 || ARCH_SHAOLINSPEAR || ARCH_EMEISWORD ++ depends on MMC_SDHCI_PLTFM ++ help ++ This selects the SDHCI support for wudangstick System-on-Chip devices. ++ ++ If you have a controller with this interface, say Y or M here. ++ ++ If unsure, say N. ++ ++config MMC_SDHCI_SHAOLINSWORD ++ tristate "SDHCI support on the Huanglong shaolinsword SoC" ++ depends on ARCH_SHAOLINSWORD || ARCH_SHAOLINKNIVE ++ help ++ This selects the SDHCI support for CNS3xxx System-on-Chip devices. ++ ++ If you have a controller with this interface, say Y or M here. ++ ++ If unsure, say N. ++ ++config MMC_SDHCI_NEBULA ++ tristate "SDHCI nebula support" ++ depends on MMC && MMC_SDHCI ++ help ++ This selects the SDHCI nebula support. ++ ++ If you have a controller with this interface, say Y or M here. ++ ++ If unsure, say N. ++ ++config MMC_QUICKBOOT ++ tristate "SDHCI support quick boot on the Vendor SoC" ++ depends on MMC_SDHCI_PLTFM && ((MMC_SDHCI_NEBULA = y) || (MMC_SDHCI_SOCT = y)) ++ help ++ This selects the SDHCI quick boot support for Vendor System-on-Chip devices. ++ ++ If you want to support this feature, say Y here. ++ ++ If unsure, say N. ++ ++config MMC_CARD_INFO ++ bool "SDHCI show card info on the Vendor SoC" ++ depends on MMC_SDHCI_PLTFM ++ help ++ This selects the SDHCI card info support for Vendor System-on-Chip devices. ++ ++ If you want to support this feature, say Y here. ++ ++ If unsure, say N. ++ ++config MMC_SDHCI_ANT ++ bool "SDHCI ANT controller on the Vendor SoC" ++ depends on MMC_SDHCI_PLTFM ++ help ++ This selects the SDHCI ant for Vendor System-on-Chip devices. ++ ++ If you want to support this feature, say Y here. ++ ++ If unsure, say N. +\ No newline at end of file +diff --git a/drivers/vendor/mmc/Makefile b/drivers/vendor/mmc/Makefile +new file mode 100644 +index 000000000..2cca2b5ce +--- /dev/null ++++ b/drivers/vendor/mmc/Makefile +@@ -0,0 +1,53 @@ ++include $(src)/version.mak ++ccflags-y += -DSDHCI_NEBULA_KERNEL_VERSION=\"$(SDHCI_NEBULA_KERNEL_VERSION)\" ++ ++ifeq ($(CONFIG_SOCT_FPGA_SUPPORT),y) ++ccflags-y += -DCONFIG_NEBULA_SDHCI_FPGA_SUPPORT=y ++endif ++ ++ccflags-y += -I$(src)/ ++ccflags-y += -I$(src)/dfx ++ccflags-y += -I$(srctree)/drivers/mmc/host ++ccflags-y += -I$(srctree)/drivers/mmc/core ++ccflags-y += -I$(srctree)/include/huanglong/utils/ ++ccflags-y += -I$(srctree)/include/huanglong/ ++ccflags-y += -I$(srctree)/../../huanglong/linux/include/generic/ ++ccflags-y += -I$(srctree)/drivers/drv/ext_inc/dftevent/ ++ ++mmc_src := sdhci_nebula.o \ ++ adapter/nebula_adapter.o \ ++ adapter/nebula_quick.o \ ++ adapter/nebula_fmea.o \ ++ dfx/nebula_dfx.o \ ++ platform/platform_comm.o \ ++ dfx/mci_proc.o \ ++ nebula_intf.o ++ ++mod_name := sdhcinebula ++ ++mmc_plat_src-$(CONFIG_ARCH_WUDANGSTICK) += platform/sdhci_wudangstick.o ++mmc_plat_src-$(CONFIG_ARCH_SHAOLINGUN) += platform/sdhci_wudangstick.o ++mmc_plat_src-$(CONFIG_ARCH_SS928V100) += platform/sdhci_ss928v100.o ++mmc_plat_src-$(CONFIG_ARCH_HI3519DV500_FAMILY) += platform/sdhci_hi3519dv500.o ++mmc_plat_src-$(CONFIG_ARCH_SHAOLINSWORD) += platform/sdhci_shaolinsword_c.o ++mmc_plat_src-$(CONFIG_ARCH_HIWING) += platform/sdhci_hiwing.o ++mmc_plat_src-$(CONFIG_ARCH_EMEISWORD) += platform/sdhci_wudangstick.o ++mmc_plat_src-$(CONFIG_ARCH_SHAOLINSWORD) += sdhci_shaolinsword.o ++mmc_plat_src-$(CONFIG_ARCH_SHAOLINAXE) += platform/sdhci_shaolinaxe.o ++mmc_plat_src-$(CONFIG_ARCH_SHAOLINSPEAR) += platform/sdhci_shaolinspear.o ++mmc_plat_src-$(CONFIG_ARCH_SHAOLINFIST) += platform/sdhci_shaolinfist.o ++mmc_plat_src-$(CONFIG_ARCH_HI3751V811) += platform/sdhci_hi3751v811_c.o ++ ++ifeq ($(CONFIG_SOCT_DRV_BUILD_KO),y) ++CONFIG_MMC_SDHCI_SOCT := m ++endif ++ ++obj-y += ${mod_name}.o ++ ++ifneq ($(CONFIG_MMC_SDHCI_SOCT)$(CONFIG_MMC_SDHCI_NEBULA),) ++${mod_name}-objs := ${mmc_src} ${mmc_plat_src-y} ++else ++${mod_name}-objs := sdhci_shaolinsword.o adapter/nebula_fmea.o ++endif ++ ++#test +diff --git a/drivers/vendor/mmc/adapter/nebula_adapter.c b/drivers/vendor/mmc/adapter/nebula_adapter.c +new file mode 100644 +index 000000000..30b6b0417 +--- /dev/null ++++ b/drivers/vendor/mmc/adapter/nebula_adapter.c +@@ -0,0 +1,1256 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Nebula SDHCI adapter ++ * Author: AuthorNameMagicTag ++ * Create: 2022-11-16 ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "cqhci.h" ++#include "nebula_quirk_ids.h" ++#include "nebula_quick.h" ++#include "sdhci_nebula.h" ++#include "core.h" ++#include "card.h" ++ ++static u32 __read_mostly g_mmc_flag = 0; ++ ++#define sdhci_nebula_dump(f, x...) \ ++ pr_err("%s: sdhci: " f, mmc_hostname(host->mmc), ## x) ++ ++#ifdef CONFIG_MMC_SDHCI_ANT ++static void sdhci_nebula_dump_vendor_regs_ant(struct sdhci_host *host) ++{ ++ sdhci_nebula_dump("tim0 ctl: 0x%08x | tim1 ctl: 0x%08x\n", ++ sdhci_readl(host, SDEMMC_TIMING_CTRL_0), ++ sdhci_readl(host, SDEMMC_TIMING_CTRL_1)); ++ sdhci_nebula_dump("tim2 ctl: 0x%08x | crc stat: 0x%08x\n", ++ sdhci_readl(host, SDEMMC_TIMING_CTRL_2), ++ sdhci_readl(host, CRC_STATUS_CTRL)); ++ sdhci_nebula_dump("bclk ctl: 0x%08x | ckg stat: 0x%08x\n", ++ sdhci_readl(host, BUF_CLK_CTRL_AND_STS), ++ sdhci_readl(host, CKG_CTRL_AND_STS)); ++ sdhci_nebula_dump("pol ctl: 0x%08x | dphy ctl: 0x%08x\n", ++ sdhci_readl(host, POLARITY_CTRL), ++ sdhci_readl(host, DPHY_CTRL)); ++ sdhci_nebula_dump("axi cfg: 0x%08x | rft cfg: 0x%08x\n", ++ sdhci_readl(host, AXI_CAPACITY_CFG), ++ sdhci_readl(host, C28_RFT_RAM_CFG)); ++} ++#endif ++ ++void __maybe_unused sdhci_nebula_dump_vendor_regs(struct sdhci_host *host) ++{ ++ u32 reg0, reg1; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ sdhci_nebula_dump("========= SDHCI NEBULA DEBUG DUMP ==========\n"); ++#ifdef CONFIG_MMC_SDHCI_ANT ++ sdhci_nebula_dump_vendor_regs_ant(host); ++#else ++ sdhci_nebula_dump("Mshc ctl: 0x%08x | Ahb ctl: 0x%08x\n", ++ sdhci_readl(host, SDHCI_MSHC_CTRL_R), ++ sdhci_readl(host, SDHCI_AXI_MBIU_CTRL)); ++ sdhci_nebula_dump("Debug1: 0x%08x | Debug2: 0x%08x\n", ++ sdhci_readl(host, SDHCI_DEBUG1_PORT), ++ sdhci_readl(host, SDHCI_DEBUG2_PORT)); ++ sdhci_nebula_dump("eMMC ctl: 0x%08x | eMMC rst: 0x%08x\n", ++ sdhci_readl(host, SDHCI_EMMC_CTRL), ++ sdhci_readl(host, SDHCI_EMMC_HW_RESET)); ++ sdhci_nebula_dump("AT ctl: 0x%08x | AT stat: 0x%08x\n", ++ sdhci_readl(host, SDHCI_AT_CTRL), ++ sdhci_readl(host, SDHCI_AT_STAT)); ++ sdhci_nebula_dump("eMMC reg: 0x%08x | Mutl cyl: 0x%08x\n", ++ sdhci_readl(host, SDHCI_EMAX_R), ++ sdhci_readl(host, SDHCI_MUTLI_CYCLE_EN)); ++#endif ++ ++ /* Crg */ ++ regmap_read(nebula->crg_regmap, nebula->info->crg_ofs[CRG_CLK_RST], ®0); ++ regmap_read(nebula->crg_regmap, nebula->info->crg_ofs[CRG_DLL_RST], ®1); ++ sdhci_nebula_dump("CRG ctl: 0x%08x | Dll rst: 0x%08x\n", reg0, reg1); ++ ++ regmap_read(nebula->crg_regmap, nebula->info->crg_ofs[CRG_DRV_DLL], ®0); ++ regmap_read(nebula->crg_regmap, nebula->info->crg_ofs[CRG_DLL_STA], ®1); ++ sdhci_nebula_dump("Drv dll: 0x%08x | Dll sta: 0x%08x\n", reg0, reg1); ++ ++ plat_dump_io_info(host); ++} ++ ++static int __maybe_unused __init nebula_mmc_setup(char *str) ++{ ++ /* cqe off */ ++ if (strcasecmp(str, "cqeoff") == 0) { ++ g_mmc_flag |= MMC_CMDQ_FORCE_OFF; ++ } ++ ++ /* no whitelist */ ++ if (strcasecmp(str, "nowhitelist") == 0) { ++ g_mmc_flag |= MMC_CMDQ_DIS_WHITELIST; ++ } ++ ++ return 1; ++} ++__setup("mmc=", nebula_mmc_setup); ++ ++#ifdef CONFIG_MMC_SDHCI_ANT ++unsigned int sdhci_nebula_get_max_clock(struct sdhci_host *host) ++{ ++ return MAX_FREQ; ++} ++ ++static void nebula_set_emmc_card(struct sdhci_host *host) ++{ ++} ++ ++static void nebula_enable_sample(struct sdhci_host *host) ++{ ++} ++ ++static void nebula_set_sample_phase(struct sdhci_host *host, unsigned int phase) ++{ ++ plat_set_sample_phase(host, phase); ++} ++#else ++static void nebula_set_emmc_card(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ unsigned int reg; ++ ++ reg = sdhci_readl(host, SDHCI_EMMC_CTRL); ++ reg |= SDHCI_CARD_IS_EMMC; ++ sdhci_writel(host, reg, SDHCI_EMMC_CTRL); ++ } ++} ++ ++static void nebula_enable_sample(struct sdhci_host *host) ++{ ++ unsigned int reg; ++ ++ reg = sdhci_readl(host, SDHCI_AT_CTRL); ++ reg |= SDHCI_SAMPLE_EN; ++ sdhci_writel(host, reg, SDHCI_AT_CTRL); ++} ++ ++static void nebula_set_sample_phase(struct sdhci_host *host, unsigned int phase) ++{ ++ unsigned int reg; ++ ++ reg = sdhci_readl(host, SDHCI_AT_STAT); ++ reg &= ~SDHCI_PHASE_SEL_MASK; ++ reg |= phase; ++ sdhci_writel(host, reg, SDHCI_AT_STAT); ++} ++#endif ++ ++static void nebula_disable_card_clk(struct sdhci_host *host) ++{ ++ u16 clk; ++ ++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); ++ clk &= ~SDHCI_CLOCK_CARD_EN; ++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); ++} ++ ++static void nebula_enable_card_clk(struct sdhci_host *host) ++{ ++ u16 clk; ++ ++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); ++ clk |= SDHCI_CLOCK_CARD_EN; ++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); ++} ++ ++static void nebula_disable_internal_clk(struct sdhci_host *host) ++{ ++ u16 clk; ++ ++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); ++ clk &= ~SDHCI_CLOCK_INT_EN; ++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); ++} ++ ++static void nebula_enable_internal_clk(struct sdhci_host *host) ++{ ++ __maybe_unused u16 clk, timeout; ++ ++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); ++ clk |= SDHCI_CLOCK_INT_EN | SDHCI_CLOCK_PLL_EN; ++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); ++ ++#ifndef CONFIG_MMC_SDHCI_ANT ++ /* Wait max 20 ms */ ++ timeout = 20; ++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); ++ while ((clk & SDHCI_CLOCK_INT_STABLE) == 0) { ++ if (timeout == 0) { ++ pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc)); ++ return; ++ } ++ timeout--; ++ udelay(1000); /* delay 1000 us */ ++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); ++ } ++#endif ++} ++ ++void sdhci_nebula_set_clock(struct sdhci_host *host, unsigned int clk) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ ++ nebula_disable_card_clk(host); ++ udelay(25); /* delay 25 us */ ++ nebula_disable_internal_clk(host); ++ ++ host->mmc->actual_clock = clk; ++ if (clk == 0) ++ return; ++ ++ clk_set_rate(pltfm_host->clk, clk); ++ ++ host->mmc->actual_clock = (unsigned int)clk_get_rate(pltfm_host->clk); ++ ++ plat_get_drv_samp_phase(host); ++ plat_set_drv_phase(host, nebula->drv_phase); ++ nebula_enable_sample(host); ++ nebula_set_sample_phase(host, nebula->sample_phase); ++ udelay(5); /* delay 5 us */ ++ nebula_enable_internal_clk(host); ++ ++ if ((host->timing == MMC_TIMING_MMC_HS400) || ++ (host->timing == MMC_TIMING_MMC_HS200) || ++ (host->timing == MMC_TIMING_UHS_SDR104) || ++ (host->timing == MMC_TIMING_UHS_SDR50)) { ++ if (nebula->priv_cap & NEBULA_CAP_RST_IN_DRV) { ++ plat_dll_reset_assert(host); ++ plat_dll_reset_deassert(host); ++ } else { ++ reset_control_assert(nebula->dll_rst); ++ reset_control_deassert(nebula->dll_rst); ++ } ++ plat_wait_p4_dll_lock(host); ++ plat_wait_sample_dll_ready(host); ++ } ++ ++ if (host->timing == MMC_TIMING_MMC_HS400) ++ plat_wait_ds_dll_ready(host); ++ ++ nebula_enable_card_clk(host); ++ udelay(75); /* delay 75 us */ ++} ++ ++static void nebula_select_sample_phase(struct sdhci_host *host, unsigned int phase) ++{ ++ nebula_disable_card_clk(host); ++ nebula_set_sample_phase(host, phase); ++ plat_wait_sample_dll_ready(host); ++ nebula_enable_card_clk(host); ++ udelay(1); ++} ++ ++static int nebula_send_tuning(struct sdhci_host *host, u32 opcode) ++{ ++ int count, err; ++ ++ count = 0; ++ do { ++ err = mmc_send_tuning(host->mmc, opcode, NULL); ++ if (err) { ++ if (host->tuning_delay > 0) ++ mdelay(host->tuning_delay); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) ++ mmc_send_abort_tuning(host->mmc, MMC_SEND_TUNING_BLOCK_HS200); ++#else ++ mmc_abort_tuning(host->mmc, MMC_SEND_TUNING_BLOCK_HS200); ++#endif ++ break; ++ } ++ ++ count++; ++ } while (count < MAX_TUNING_NUM); ++ ++ return err; ++} ++ ++static void nebula_pre_tuning(struct sdhci_host *host) ++{ ++ sdhci_writel(host, host->ier | SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); ++ sdhci_writel(host, host->ier | SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); ++ ++ nebula_enable_sample(host); ++#ifdef CONFIG_MMC_CARD_INFO ++ host->is_tuning = 1; ++#endif ++} ++ ++static void nebula_post_tuning(struct sdhci_host *host) ++{ ++ unsigned short ctrl; ++ ++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); ++ ctrl |= SDHCI_CTRL_TUNED_CLK; ++ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); ++ ++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); ++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); ++#ifdef CONFIG_MMC_CARD_INFO ++ host->is_tuning = 0; ++#endif ++} ++ ++static int nebula_get_best_sample(u32 candidates) ++{ ++ int rise = NOT_FOUND; ++ int fall, i, win; ++ int win_max_r = NOT_FOUND; ++ int win_max_f = NOT_FOUND; ++ int end_fall = NOT_FOUND; ++ int found = NOT_FOUND; ++ int win_max = 0; ++ ++ for (i = 0; i < PHASE_SCALE; i++) { ++ if ((candidates & WIN_MASK) == WIN_RISE) ++ rise = (i + 1) % PHASE_SCALE; ++ ++ if ((candidates & WIN_MASK) == WIN_FALL) { ++ fall = i; ++ win = fall - rise + 1; ++ if (rise == NOT_FOUND) { ++ end_fall = fall; ++ } else if ((rise != NOT_FOUND) && (win > win_max)) { ++ win_max = win; ++ found = (fall + rise) / WIN_DIV; ++ win_max_r = rise; ++ win_max_f = fall; ++ rise = NOT_FOUND; ++ fall = NOT_FOUND; ++ } ++ } ++ candidates = ror32(candidates, 1); ++ } ++ ++ if (end_fall != NOT_FOUND && rise != NOT_FOUND) { ++ fall = end_fall; ++ if (end_fall < rise) ++ end_fall += PHASE_SCALE; ++ ++ win = end_fall - rise + 1; ++ ++ if (win > win_max) { ++ found = (rise + (win / WIN_DIV)) % PHASE_SCALE; ++ win_max_r = rise; ++ win_max_f = fall; ++ } ++ } ++ ++ if (found != NOT_FOUND) ++ pr_err("valid phase shift [%d, %d] Final Phase:%d\n", ++ win_max_r, win_max_f, found); ++ ++ return found; ++} ++ ++static int nebula_exec_sample_tuning(struct sdhci_host *host, u32 opcode) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ unsigned int sample; ++ int err, phase; ++ unsigned int candidates = 0; ++ ++ nebula_pre_tuning(host); ++ ++ for (sample = 0; sample < PHASE_SCALE; sample++) { ++ nebula_select_sample_phase(host, sample); ++ ++ err = nebula_send_tuning(host, opcode); ++ if (err) ++ pr_debug("%s: send tuning CMD%u fail! phase:%d err:%d\n", ++ mmc_hostname(host->mmc), opcode, sample, err); ++ else ++ candidates |= (0x1 << sample); ++ } ++ ++ pr_info("%s: tuning done! candidates 0x%X: ", ++ mmc_hostname(host->mmc), candidates); ++ ++ phase = nebula_get_best_sample(candidates); ++ if (phase == NOT_FOUND) { ++ phase = nebula->sample_phase; ++ pr_err("%s: no valid phase shift! use default %d\n", mmc_hostname(host->mmc), phase); ++ } ++ ++ nebula->tuning_phase = (unsigned int)phase; ++ nebula_select_sample_phase(host, nebula->tuning_phase); ++ nebula_post_tuning(host); ++ ++ return ERET_SUCCESS; ++} ++ ++static void nebula_enable_edge_tuning(struct sdhci_host *host) ++{ ++ unsigned int reg; ++ ++ reg = sdhci_readl(host, SDHCI_MULTI_CYCLE); ++ reg |= SDHCI_EDGE_DETECT_EN; ++ sdhci_writel(host, reg, SDHCI_MULTI_CYCLE); ++} ++ ++static void nebula_disable_edge_tuning(struct sdhci_host *host) ++{ ++ unsigned int reg; ++ ++ reg = sdhci_readl(host, SDHCI_MULTI_CYCLE); ++ reg &= ~SDHCI_EDGE_DETECT_EN; ++ sdhci_writel(host, reg, SDHCI_MULTI_CYCLE); ++} ++ ++static int nebula_get_best_edges(struct sdhci_host *host, u32 opcode, ++ unsigned int *edge_p2f_ptr, unsigned int *edge_f2p_ptr) ++{ ++ int err; ++ unsigned int index, val; ++ bool found = false; ++ bool prev_found = false; ++ unsigned int edge_p2f, edge_f2p, start, end; ++ ++ start = 0; ++ end = PHASE_SCALE / EDGE_TUNING_PHASE_STEP; ++ ++ edge_p2f = start; ++ edge_f2p = end; ++ for (index = 0; index <= end; index++) { ++ nebula_select_sample_phase(host, index * EDGE_TUNING_PHASE_STEP); ++ ++ err = nebula_send_tuning(host, opcode); ++ if (err == 0) { ++ val = sdhci_readl(host, SDHCI_EDGE_DETECT_STAT); ++ found = ((val & SDHCI_FOUND_EDGE) != 0); ++ } else { ++ found = true; ++ } ++ ++ if (prev_found && !found) { ++ edge_f2p = index; ++ } else if (!prev_found && found) { ++ edge_p2f = index; ++ } ++ ++ if ((edge_p2f != start) && (edge_f2p != end)) ++ break; ++ ++ prev_found = found; ++ found = false; ++ } ++ ++ if ((edge_p2f == start) && (edge_f2p == end)) ++ return NOT_FOUND; ++ ++ *edge_p2f_ptr = edge_p2f; ++ *edge_f2p_ptr = edge_f2p; ++ ++ return ERET_SUCCESS; ++} ++ ++static unsigned int nebula_get_best_phase(struct sdhci_host *host, u32 opcode, ++ unsigned int *edge_p2f_ptr, unsigned int *edge_f2p_ptr) ++{ ++ unsigned int index, start, end; ++ unsigned int phase, fall, rise; ++ bool fall_updat_flag = false; ++ int err; ++ int prev_err = 0; ++ ++ start = *edge_p2f_ptr * EDGE_TUNING_PHASE_STEP; ++ end = *edge_f2p_ptr * EDGE_TUNING_PHASE_STEP; ++ if (end <= start) ++ end += PHASE_SCALE; ++ ++ fall = start; ++ rise = end; ++ for (index = start; index <= end; index++) { ++ nebula_select_sample_phase(host, index % PHASE_SCALE); ++ ++ err = nebula_send_tuning(host, opcode); ++ if (err) { ++ pr_debug("%s: send tuning CMD%u fail! phase:%d err:%d\n", ++ mmc_hostname(host->mmc), opcode, index, err); ++ } ++ ++ if ((err != 0) && (index == start)) { ++ if (!fall_updat_flag) { ++ fall_updat_flag = true; ++ fall = start; ++ } ++ } else if ((prev_err == 0) && (err != 0)) { ++ if (!fall_updat_flag) { ++ fall_updat_flag = true; ++ fall = index; ++ } ++ } ++ ++ if ((prev_err != 0) && (err == 0)) ++ rise = index; ++ ++ if ((err != 0) && (index == end)) { ++ rise = end; ++ } ++ ++ prev_err = err; ++ } ++ ++ /* Calculate the center value by devide 2 */ ++ phase = ((fall + rise) / 2 + PHASE_SCALE / 2) % PHASE_SCALE; ++ ++ pr_info("%s: tuning done! valid phase shift [%d, %d] Final Phase:%d\n", ++ mmc_hostname(host->mmc), rise % PHASE_SCALE, ++ fall % PHASE_SCALE, phase); ++ ++ return phase; ++} ++ ++static int nebula_exec_edge_tuning(struct sdhci_host *host, u32 opcode) ++{ ++ int ret; ++ unsigned int phase, edge_p2f, edge_f2p; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ nebula_pre_tuning(host); ++ nebula_enable_edge_tuning(host); ++ ++ ret = nebula_get_best_edges(host, opcode, &edge_p2f, &edge_f2p); ++ if (ret == NOT_FOUND) { ++ pr_err("%s: tuning failed! can not found edge!\n", mmc_hostname(host->mmc)); ++ return ret; ++ } ++ ++ nebula_disable_edge_tuning(host); ++ ++ phase = nebula_get_best_phase(host, opcode, &edge_p2f, &edge_f2p); ++ ++ nebula->tuning_phase = phase; ++ nebula_select_sample_phase(host, phase); ++ nebula_post_tuning(host); ++ ++ return ERET_SUCCESS; ++} ++ ++int sdhci_nebula_execute_tuning(struct sdhci_host *host, u32 opcode) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->priv_quirk & NEBULA_QUIRK_FPGA) ++ return ERET_SUCCESS; ++ ++ if (nebula->priv_quirk & NEBULA_QUIRK_SAMPLE_TURNING) { ++ return nebula_exec_sample_tuning(host, opcode); ++ } else { ++ return nebula_exec_edge_tuning(host, opcode); ++ } ++} ++ ++static void nebula_parse_priv_cap(struct sdhci_host *host, ++ struct device_node *np) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (of_get_property(np, "reset_out_drv", NULL) == NULL) ++ nebula->priv_cap |= NEBULA_CAP_RST_IN_DRV; ++ ++ if (of_get_property(np, "pm_runtime_enable", NULL)) ++ nebula->priv_cap |= NEBULA_CAP_PM_RUNTIME; ++ ++ if (of_get_property(np, "nm_card", NULL)) ++ nebula->priv_cap |= NEBULA_CAP_NM_CARD; ++} ++ ++static void nebula_parse_priv_quirk(struct sdhci_host *host, ++ const struct device_node *np) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (of_get_property(np, "sample_turning", NULL)) ++ nebula->priv_quirk |= NEBULA_QUIRK_SAMPLE_TURNING; ++ ++ if (of_get_property(np, "fpga", NULL)) ++ nebula->priv_quirk |= NEBULA_QUIRK_FPGA; ++ ++#ifdef CONFIG_NEBULA_SDHCI_FPGA_SUPPORT ++ nebula->priv_quirk |= NEBULA_QUIRK_FPGA; ++#endif ++ ++ if (of_get_property(np, "cd_inverted", NULL)) ++ nebula->priv_quirk |= NEBULA_QUIRK_CD_INVERTED; ++ ++ if (of_get_property(np, "pwr_en_inverted", NULL)) ++ nebula->priv_quirk |= NEBULA_QUIRK_PWR_EN_INVERTED; ++} ++ ++static void nebula_of_parse(struct platform_device *pdev, ++ struct sdhci_host *host) ++{ ++ u32 bus_width; ++ struct device *dev = &pdev->dev; ++ ++ if (device_property_present(dev, "mmc-broken-cmd23")) ++ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; ++ ++ if (device_property_present(dev, "sdhci,1-bit-only") || ++ (device_property_read_u32(dev, "bus-width", &bus_width) == 0 && ++ bus_width == 1)) ++ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; ++ ++ if (device_property_present(dev, "broken-cd")) ++ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; ++} ++ ++static int nebula_parse_comm_dt(struct platform_device *pdev, ++ struct sdhci_host *host) ++{ ++ int ret; ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ struct device_node *np = pdev->dev.of_node; ++ ++ ret = mmc_of_parse(host->mmc); ++ if (ret) { ++ dev_err(mmc_dev(host->mmc), "parse comm dt failed.\n"); ++ return ret; ++ } ++ ++ if ((of_property_read_u32(np, "devid", &nebula->devid) != 0) || ++ (nebula->devid >= MMC_DEV_TYPE_MAX)) { ++ return -EINVAL; ++ } ++ ++ if (of_property_read_bool(np, "fix-1v8")) ++ host->flags = (unsigned int)host->flags & ~SDHCI_SIGNALING_330; ++ ++ nebula_of_parse(pdev, host); ++ ++ nebula_parse_priv_cap(host, np); ++ ++ nebula_parse_priv_quirk(host, np); ++ ++#ifdef CONFIG_MMC_CQHCI ++ if (of_get_property(np, "mmc-cmdq", NULL)) ++ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; ++#endif ++ ++ if (of_property_read_u32(np, "max-frequency", &host->mmc->f_max)) ++ host->mmc->f_max = MAX_FREQ; ++ ++ pltfm_host->clk = devm_clk_get(mmc_dev(host->mmc), "mmc_clk"); ++ if (IS_ERR_OR_NULL(pltfm_host->clk)) { ++ dev_err(mmc_dev(host->mmc), "get clk failed\n"); ++ return -EINVAL; ++ } ++ ++ nebula->hclk = devm_clk_get(mmc_dev(host->mmc), "mmc_hclk"); ++ if (IS_ERR_OR_NULL(nebula->hclk)) { ++ dev_warn(mmc_dev(host->mmc), "dts no hclk\n"); ++ } else { ++ ret = clk_prepare_enable(nebula->hclk); ++ if (ret) { ++ dev_err(mmc_dev(host->mmc), "enable hclk failed.\n"); ++ return -EINVAL; ++ } ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++static int nebula_parse_reset_dt(struct platform_device *pdev, ++ struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if ((nebula->priv_cap & NEBULA_CAP_RST_IN_DRV) == 0) { ++ nebula->crg_rst = devm_reset_control_get(&pdev->dev, "crg_reset"); ++ if (IS_ERR_OR_NULL(nebula->crg_rst)) { ++ dev_err(&pdev->dev, "get crg_rst failed. %ld\n", \ ++ PTR_ERR(nebula->crg_rst)); ++ return (int)PTR_ERR(nebula->crg_rst); ++ } ++ ++ nebula->dll_rst = devm_reset_control_get(&pdev->dev, "dll_reset"); ++ if (IS_ERR_OR_NULL(nebula->dll_rst)) { ++ dev_err(&pdev->dev, "get dll_reset failed. %ld\n", \ ++ PTR_ERR(nebula->dll_rst)); ++ return (int)PTR_ERR(nebula->dll_rst); ++ } ++ ++ /* below crg rst not must */ ++ nebula->crg_tx = devm_reset_control_get(&pdev->dev, "crg_tx"); ++ if (IS_ERR_OR_NULL(nebula->crg_tx)) { ++ dev_warn(&pdev->dev, "crg tx rst not found with dts\n"); ++ nebula->crg_tx = nebula->crg_rst; ++ } ++ ++ nebula->crg_rx = devm_reset_control_get(&pdev->dev, "crg_rx"); ++ if (IS_ERR_OR_NULL(nebula->crg_rx)) { ++ dev_warn(&pdev->dev, "crg rx rst not found with dts\n"); ++ nebula->crg_rx = nebula->crg_rst; ++ } ++ ++ nebula->samp_rst = devm_reset_control_get(&pdev->dev, "samp_rst"); ++ if (IS_ERR_OR_NULL(nebula->samp_rst)) { ++ dev_warn(&pdev->dev, "crg samp rst not found with dts\n"); ++ nebula->samp_rst = NULL; ++ } ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++static int nebula_parse_regmap_dt(struct platform_device *pdev, ++ struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ struct device_node *np = pdev->dev.of_node; ++ ++ nebula->crg_regmap = syscon_regmap_lookup_by_phandle(np, ++ "crg_regmap"); ++ if (IS_ERR(nebula->crg_regmap)) { ++ dev_err(&pdev->dev, "get crg regmap failed. %ld\n", \ ++ PTR_ERR(nebula->crg_regmap)); ++ return (int)PTR_ERR(nebula->crg_regmap); ++ } ++ ++ nebula->iocfg_regmap = syscon_regmap_lookup_by_phandle(np, ++ "iocfg_regmap"); ++ if (IS_ERR(nebula->iocfg_regmap)) { ++ dev_err(&pdev->dev, "get iocfg regmap failed. %ld\n", \ ++ PTR_ERR(nebula->iocfg_regmap)); ++ return (int)PTR_ERR(nebula->iocfg_regmap); ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++int sdhci_nebula_pltfm_init(struct platform_device *pdev, ++ struct sdhci_host *host) ++{ ++ int ret; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ ret = nebula_parse_comm_dt(pdev, host); ++ if (ret) { ++ dev_err(&pdev->dev, "parse comm dt failed\n"); ++ return ret; ++ } ++ ++ ret = plat_host_pre_init(pdev, host); ++ if (ret) { ++ dev_err(&pdev->dev, "pltfm pre init failed\n"); ++ return ret; ++ } ++ ++ /* check nebula *info and *mask valid? */ ++ if (nebula->info == NULL || nebula->mask == NULL) { ++ dev_err(&pdev->dev, "info or mask data invalid\n"); ++ return -EINVAL; ++ } ++ ++ ret = nebula_parse_reset_dt(pdev, host); ++ if (ret) ++ return ret; ++ ++ ret = nebula_parse_regmap_dt(pdev, host); ++ if (ret) ++ return ret; ++ ++#ifdef CONFIG_MMC_QUICKBOOT ++ ret = mmc_fast_boot_init(host); ++ if (ret) ++ return ret; ++#endif ++ ++ /* Do ZQ calibration */ ++ ret = plat_resistance_calibration(host); ++ if (ret) ++ return ret; ++ ++ ret = plat_crg_init(host); ++ if (ret) ++ return ret; ++ ++#ifdef CONFIG_SOC_RPMB_MMC ++ host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN; ++#endif ++ ++ host->tuning_delay = 1; ++ plat_caps_quirks_init(host); ++ host->mmc_host_ops.hs400_enhanced_strobe = plat_hs400_enhanced_strobe; ++ ++ return ERET_SUCCESS; ++} ++ ++void sdhci_nebula_set_uhs_signaling(struct sdhci_host *host, ++ unsigned int timing) ++{ ++ nebula_set_emmc_card(host); ++ sdhci_set_uhs_signaling(host, timing); ++ host->timing = timing; ++#ifndef CONFIG_NEBULA_SDHCI_FPGA_SUPPORT ++ plat_set_drv_cap(host); ++#endif ++} ++ ++void sdhci_nebula_hw_reset(struct sdhci_host *host) ++{ ++ unsigned int reg; ++ reg = sdhci_readl(host, SDHCI_EMMC_HW_RESET); ++ reg &= ~SDHCI_EMMC_RST_N; ++ sdhci_writel(host, reg, SDHCI_EMMC_HW_RESET); ++ ++ udelay(10); /* delay 10 us */ ++ ++ reg = sdhci_readl(host, SDHCI_EMMC_HW_RESET); ++ reg |= SDHCI_EMMC_RST_N; ++ sdhci_writel(host, reg, SDHCI_EMMC_HW_RESET); ++ ++ udelay(200); /* delay 200 us */ ++} ++ ++int sdhci_nebula_runtime_suspend(struct device *dev) ++{ ++ struct sdhci_host *host = dev_get_drvdata(dev); ++ ++ nebula_disable_card_clk(host); ++ ++ return ERET_SUCCESS; ++} ++ ++int sdhci_nebula_runtime_resume(struct device *dev) ++{ ++ struct sdhci_host *host = dev_get_drvdata(dev); ++ ++ nebula_enable_card_clk(host); ++ ++ return ERET_SUCCESS; ++} ++ ++int sdhci_nebula_voltage_switch(struct sdhci_host *host, struct mmc_ios *ios) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->ops.plat_voltage_switch != NULL) ++ return nebula->ops.plat_voltage_switch(host, ios); ++ ++ return plat_voltage_switch(host, ios); ++} ++ ++void sdhci_nebula_extra_init(struct sdhci_host *host) ++{ ++#ifdef CONFIG_MMC_CARD_INFO ++ host->error_count = 0; ++#endif ++ return plat_extra_init(host); ++} ++ ++#ifdef CONFIG_MMC_CQHCI ++static void nebula_init_card(struct mmc_host *host, struct mmc_card *card) ++{ ++ u32 idx; ++ /* eMMC spec: cid product name offset: 0, 7, 6, 5, 4, 11 */ ++ const u8 cid_pnm_offset[] = {0, 7, 6, 5, 4, 11}; ++ ++ if (host == NULL || card == NULL) { ++ pr_err("%s: null card or host\n", mmc_hostname(host)); ++ return; ++ } ++ ++ if ((card->type == MMC_TYPE_MMC) && (host->caps2 & MMC_CAP2_CQE)) { ++ u8 *raw_cid = (u8 *)card->raw_cid; ++ ++ /* Skip whitelist */ ++ if (g_mmc_flag & MMC_CMDQ_DIS_WHITELIST) { ++ return; ++ } ++ ++ /* Clear MMC CQE capblility */ ++ host->caps2 &= ~(MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD); ++ ++ /* Manufacturer ID: b[127:120](eMMC v2.0 and upper), 0/24: idx/bit offset */ ++ card->cid.manfid = ((card->raw_cid[0] >> 24) & 0xFF); ++ ++ /* Decode CID with eMMC v2.0 and upper */ ++ for (idx = 0; idx < sizeof(cid_pnm_offset); idx++) { ++ card->cid.prod_name[idx] = raw_cid[cid_pnm_offset[idx]]; ++ } ++ card->cid.prod_name[++idx] = 0; ++ mmc_fixup_device(card, mmc_cmdq_whitelist); ++ } ++} ++ ++static void nebula_controller_v4_enable(struct sdhci_host *host, bool enable) ++{ ++ u16 ctrl; ++ ++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); ++ if (enable) ++ ctrl |= SDHCI_CTRL_V4_ENABLE; ++ else ++ ctrl &= ~SDHCI_CTRL_V4_ENABLE; ++ ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ ctrl |= SDHCI_CTRL_64BIT_ADDR; ++ ++ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); ++} ++ ++static void nebula_cqe_enable(struct mmc_host *mmc) ++{ ++ struct sdhci_host *host = mmc_priv(mmc); ++ u32 timeout = CQE_MAX_TIMEOUT; ++ u16 reg, clk; ++ u8 ctrl; ++ ++ /* SW_RST_DAT */ ++ sdhci_reset(host, SDHCI_RESET_DATA); ++ ++ nebula_controller_v4_enable(host, true); ++ ++ /* Set the DMA boundary value and block size */ ++ sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, ++ MMC_BLOCK_SIZE), SDHCI_BLOCK_SIZE); ++ ++ /* need to set multitransfer for cmdq */ ++ reg = sdhci_readw(host, SDHCI_TRANSFER_MODE); ++ reg |= SDHCI_TRNS_MULTI; ++ reg |= SDHCI_TRNS_BLK_CNT_EN; ++ sdhci_writew(host, reg, SDHCI_TRANSFER_MODE); ++ ++ /* ADMA2 only */ ++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); ++ ctrl &= ~SDHCI_CTRL_DMA_MASK; ++#ifdef CONFIG_MMC_SDHCI_ANT ++ ctrl |= SDHCI_CTRL_ADMA64; ++#else ++ ctrl |= SDHCI_CTRL_ADMA32; ++#endif ++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); ++ ++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); ++ clk |= SDHCI_CLOCK_PLL_EN; ++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); ++ ++ while (mmc->ops->card_busy(mmc)) { ++ timeout--; ++ if (!timeout) { ++ pr_err("%s: cqe enable wait busy timeout\n", mmc_hostname(mmc)); ++ break; ++ } ++ udelay(1); ++ } ++ ++ sdhci_cqe_enable(mmc); ++} ++ ++static void nebula_cqe_disable(struct mmc_host *mmc, bool recovery) ++{ ++ u32 timeout = CQE_MAX_TIMEOUT; ++ ++ while (mmc->ops->card_busy(mmc)) { ++ timeout--; ++ if (!timeout) { ++ pr_err("%s: cqe disable wait busy timeout\n", mmc_hostname(mmc)); ++ break; ++ } ++ udelay(1); ++ } ++ ++ nebula_controller_v4_enable(mmc_priv(mmc), 0); ++ ++ sdhci_cqe_disable(mmc, recovery); ++} ++ ++static void nebula_dumpregs(struct mmc_host *mmc) ++{ ++ sdhci_dumpregs(mmc_priv(mmc)); ++} ++ ++static const struct cqhci_host_ops sdhci_nebula_cqe_ops = { ++ .enable = nebula_cqe_enable, ++ .disable = nebula_cqe_disable, ++ .dumpregs = nebula_dumpregs, ++}; ++ ++static int nebula_cqe_add_host(struct sdhci_host *host) ++{ ++ int ret; ++ struct cqhci_host *cq_host = NULL; ++ bool dma64 = false; ++ ++ if (g_mmc_flag & MMC_CMDQ_FORCE_OFF) { ++ /* force cmdq off by bootarges */ ++ host->mmc->caps2 &= ~(MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD); ++ return sdhci_add_host(host); ++ } ++ ++ ret = sdhci_setup_host(host); ++ if (ret) ++ return ret; ++ ++ cq_host = devm_kzalloc(host->mmc->parent, sizeof(struct cqhci_host), GFP_KERNEL); ++ if (cq_host == NULL) { ++ pr_err("%s: allocate cqe host failed.\n", mmc_hostname(host->mmc)); ++ ret = -ENOMEM; ++ goto cleanup; ++ } ++ ++ cq_host->mmio = host->ioaddr + NEBULA_CQE_OFS; ++ cq_host->ops = &sdhci_nebula_cqe_ops; ++ ++ /* ++ * synopsys controller has dma 128M algin limit, ++ * may split the trans descriptors ++ */ ++ cq_host->quirks |= CQHCI_QUIRK_TXFR_DESC_SZ_SPLIT; ++ host->mmc->max_segs *= CQHCI_MAX_SEGS_MUL; ++ ++ dma64 = host->flags & SDHCI_USE_64_BIT_DMA; ++ if (dma64) ++ cq_host->caps |= CQHCI_TASK_DESC_SZ_128; ++ ++ ret = cqhci_init(cq_host, host->mmc, dma64); ++ if (ret) { ++ pr_err("%s: cqe init fail\n", mmc_hostname(host->mmc)); ++ return ret; ++ } ++ ++ ret = __sdhci_add_host(host); ++ if (ret) ++ return ret; ++ ++ host->mmc_host_ops.init_card = nebula_init_card; ++ ++ return ERET_SUCCESS; ++ ++cleanup: ++ sdhci_cleanup_host(host); ++ return ret; ++} ++ ++static u32 nebula_cqe_irq(struct sdhci_host *host, u32 intmask) ++{ ++ int cmd_error = 0; ++ int data_error = 0; ++ ++ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) ++ return intmask; ++ ++ cqhci_irq(host->mmc, intmask, cmd_error, data_error); ++ ++ return ERET_SUCCESS; ++} ++#else ++static int nebula_cqe_add_host(struct sdhci_host *host) ++{ ++ /* Rollback to no cqe mode */ ++ host->mmc->caps2 &= ~(MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD); ++ ++ return sdhci_add_host(host); ++} ++ ++static u32 nebula_cqe_irq(struct sdhci_host *host, u32 intmask) ++{ ++ return intmask; ++} ++#endif ++ ++u32 sdhci_nebula_irq(struct sdhci_host *host, u32 intmask) ++{ ++#ifdef CONFIG_SDHCI_NEBULA_DFX ++ sdhci_nebula_dfx_irq(host, intmask); ++#endif ++ ++ if (host->mmc->caps2 & MMC_CAP2_CQE) ++ return nebula_cqe_irq(host, intmask); ++ ++ return intmask; ++} ++ ++int sdhci_nebula_add_host(struct sdhci_host *host) ++{ ++ int ret; ++ ++#ifdef CONFIG_MMC_QUICKBOOT ++ if (mmc_is_fast_boot(host)) { ++ host->mmc->rescan_entered = 1; ++ /* Do not do repowerup before scan */ ++ host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; ++ /* Skip reset device for fast boot */ ++ host->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET; ++ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); ++ } ++#endif ++ ++ if (host->mmc->caps2 & MMC_CAP2_CQE) { ++ ret = nebula_cqe_add_host(host); ++ } else { ++ ret = sdhci_add_host(host); ++ } ++ if (ret != ERET_SUCCESS) ++ return ret; ++ ++#ifdef CONFIG_MMC_QUICKBOOT ++ if (mmc_is_fast_boot(host)) { ++ /* Clear SDHCI_QUIRK_NO_CARD_NO_RESET for normal reset */ ++ host->quirks &= ~SDHCI_QUIRK_NO_CARD_NO_RESET; ++ ++ mmc_parameter_init(host->mmc); ++ ret = mmc_quick_init_card(host->mmc, mmc_get_rocr(host), NULL); ++ if (ret) { ++ host->mmc->rescan_entered = 0; ++ mmc_detect_change(host->mmc, 0); ++ } ++ host->mmc->card_status = MMC_CARD_INIT; ++ if (host->mmc->ops->card_info_save) ++ host->mmc->ops->card_info_save(host->mmc); ++ } ++#endif ++ return ret; ++} ++ ++void sdhci_nebula_set_bus_width(struct sdhci_host *host, int width) ++{ ++ u8 ctrl; ++ ++ if (width <= MMC_BUS_WIDTH_4) { ++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); ++ ctrl &= ~SDHCI_CTRL_8BITBUS; ++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); ++ } ++ ++ return sdhci_set_bus_width(host, width); ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++void sdhci_nebula_adma_write_desc(struct sdhci_host *host, void **desc, ++ dma_addr_t addr, int len, unsigned int cmd) ++{ ++ int split_len; ++ ++ /* work around for buffer across 128M boundary, split the buffer */ ++ if (((addr & (SZ_128M - 1)) + len) > SZ_128M) { ++ split_len = SZ_128M - (int)(addr & (SZ_128M - 1)); ++ sdhci_adma_write_desc(host, desc, addr, split_len, ADMA2_TRAN_VALID); ++ addr += split_len; ++ len -= split_len; ++ } ++ ++ sdhci_adma_write_desc(host, desc, addr, len, cmd); ++} ++#endif ++ ++void sdhci_nebula_reset(struct sdhci_host *host, u8 mask) ++{ ++ u8 ctrl; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++#ifdef CONFIG_MMC_QUICKBOOT ++ /* eMMC quick boot up no need reset */ ++ if ((nebula->devid == MMC_DEV_TYPE_MMC_0) && \ ++ ((host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) != 0)) { ++ return; ++ } ++#endif ++ sdhci_reset(host, mask); ++ ++ ctrl = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); ++ ++ /* eMMC card detect inverted */ ++ if (nebula->priv_quirk & NEBULA_QUIRK_CD_INVERTED) ++ ctrl |= SDHCI_DETECT_POLARITY; ++ ++ /* eMMC power en inverted */ ++ if (nebula->priv_quirk & NEBULA_QUIRK_PWR_EN_INVERTED) ++ ctrl &= ~SDHCI_PWR_EN_POLARITY; ++ ++ sdhci_writeb(host, ctrl, SDHCI_WAKE_UP_CONTROL); ++} ++ ++#ifdef CONFIG_PM ++int sdhci_nebula_pltfm_suspend(struct device *dev) ++{ ++ int ret; ++ struct sdhci_host *host = dev_get_drvdata(dev); ++ struct sdhci_nebula *nebula = nebula_priv(host); ++#ifdef CONFIG_MMC_QUICKBOOT ++ struct mmc_card *card = host->mmc->card; ++ ++ if (mmc_is_fast_boot(host)) { ++ if ((card != NULL) && (mmc_card_suspended(card) != 0)) ++ mmc_set_cur_mode(host, SLEEP_MODE); ++ } ++#endif ++ ret = sdhci_pltfm_suspend(dev); ++ if (ret != 0) { ++ pr_err("%s: pltfm suspend fail\n", mmc_hostname(host->mmc)); ++ return ret; ++ } ++ ++ if (!IS_ERR_OR_NULL(nebula->hclk)) ++ clk_disable_unprepare(nebula->hclk); ++ ++ return ERET_SUCCESS; ++} ++ ++int sdhci_nebula_pltfm_resume(struct device *dev) ++{ ++ int ret; ++ struct sdhci_host *host = dev_get_drvdata(dev); ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ ++#ifdef CONFIG_MMC_QUICKBOOT ++ struct mmc_card *card = host->mmc->card; ++ ++ if (mmc_is_fast_boot(host)) { ++ if ((card != NULL) && (mmc_card_suspended(card) == 0)) ++ mmc_set_cur_mode(host, INIT_MODE); ++ } ++#endif ++ if (!IS_ERR_OR_NULL(nebula->hclk)) { ++ ret = clk_prepare_enable(nebula->hclk); ++ if (ret != 0) { ++ pr_err("%s: resume hclk enable fail\n", mmc_hostname(host->mmc)); ++ return ret; ++ } ++ } ++ ++ ret = plat_crg_init(host); ++ if (ret != 0) { ++ pr_err("%s: failed to reset crg\n", mmc_hostname(host->mmc)); ++ goto disabled_hclk; ++ } ++ ++ ret = sdhci_resume_host(host); ++ if (ret != 0) { ++ pr_err("%s: pltfm resume fail\n", mmc_hostname(host->mmc)); ++ goto disabled_clk; ++ } ++ ++ return ret; ++ ++disabled_clk: ++ clk_disable_unprepare(pltfm_host->clk); ++ ++disabled_hclk: ++ if (!IS_ERR_OR_NULL(nebula->hclk)) ++ clk_disable_unprepare(nebula->hclk); ++ ++ return ret; ++} ++#endif +diff --git a/drivers/vendor/mmc/adapter/nebula_fmea.c b/drivers/vendor/mmc/adapter/nebula_fmea.c +new file mode 100644 +index 000000000..ec5193033 +--- /dev/null ++++ b/drivers/vendor/mmc/adapter/nebula_fmea.c +@@ -0,0 +1,116 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Nebula SDHCI fmea ++ * Author: AuthorNameMagicTag ++ * Create: 2022-11-16 ++ */ ++ ++#ifdef CONFIG_ANDROID_PRODUCT ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "core.h" ++#include "mmc_ops.h" ++#include "drv_dft_event.h" ++#include "nebula_fmea.h" ++ ++static int g_check_immediately = 0; ++module_param(g_check_immediately, int, 0644); ++ ++static int nebula_fmea_update_extcsd(struct mmc_card *card) ++{ ++ int ret; ++ u8 *ext_csd; ++ ++ mmc_claim_host(card->host); ++ ret = mmc_get_ext_csd(card, &ext_csd); ++ mmc_release_host(card->host); ++ if (ret) ++ return ret; ++ ++ card->ext_csd.device_life_time_est_typ_a = ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A]; ++ card->ext_csd.device_life_time_est_typ_b = ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B]; ++ kfree(ext_csd); ++ ++ return 0; ++} ++ ++static noinline bool nebula_fmea_lifettime_exceed(struct mmc_card *card) ++{ ++ u8 type_a, type_b; ++ ++ type_a = card->ext_csd.device_life_time_est_typ_a; ++ type_b = card->ext_csd.device_life_time_est_typ_b; ++ ++ if ((type_a >= DEVICE_LIFE_TIME_EST_VAL) || (type_b >= DEVICE_LIFE_TIME_EST_VAL)) { ++ return true; ++ } ++ ++ return false; ++} ++ ++static void nebula_fmea_check_lifetime(struct work_struct *work) ++{ ++ int ret; ++ td_handle handle; ++ nebula_fmea *fmea = ++ container_of(work, struct sdhci_nebula_fmea, mmc_lifecheck_work.work); ++ struct mmc_card *card = fmea->host->mmc->card; ++ ++ if ((card == NULL) || (card->type != MMC_TYPE_MMC)) { ++ goto out; ++ } ++ ++ if ((fmea->life_check_interval > 0) && (g_check_immediately == 0)) { ++ fmea->life_check_interval--; ++ goto out; ++ } ++ ++ fmea->life_check_interval = DEVICE_LIFE_CHECK_INTERVAL; ++ ret = nebula_fmea_update_extcsd(card); ++ if (ret != 0) { ++ goto out; ++ } ++ ++ if (nebula_fmea_lifettime_exceed(card)) { ++ ret = dft_drv_event_create(SDHCI_FMEA_LIFE_TIME_EXCEED, &handle); ++ if (ret == TD_SUCCESS) { ++ dft_drv_event_put_string(handle, "PNAME", current->comm); ++ dft_drv_event_put_string(handle, "F1NAME", __func__); ++ dft_drv_event_put_string(handle, "FLASH_TYPE", "EMMC"); ++ dft_drv_event_put_integral(handle, "DEVICE_LIFE_TIME_EST_TYP_A", \ ++ card->ext_csd.device_life_time_est_typ_a); ++ dft_drv_event_put_integral(handle, "DEVICE_LIFE_TIME_EST_TYP_B", \ ++ card->ext_csd.device_life_time_est_typ_b); ++ dft_drv_event_report(handle); ++ dft_drv_event_destroy(handle); ++ } ++ } ++ ++out: ++ queue_delayed_work(system_freezable_wq, (struct delayed_work *)work, HZ); ++} ++ ++int sdhci_nebula_fmea_init(struct sdhci_host *host, nebula_fmea *fmea) ++{ ++ struct delayed_work *work = &fmea->mmc_lifecheck_work; ++ ++ fmea->host = host; ++ fmea->life_check_interval = DEVICE_LIFE_CHECK_INTERVAL; ++ INIT_DELAYED_WORK(work, nebula_fmea_check_lifetime); ++ queue_delayed_work(system_freezable_wq, work, HZ); ++ ++ return 0; ++} ++ ++int sdhci_nebula_fmea_deinit(nebula_fmea *fmea) ++{ ++ cancel_delayed_work(&fmea->mmc_lifecheck_work); ++ ++ return 0; ++} ++#endif +diff --git a/drivers/vendor/mmc/adapter/nebula_quick.c b/drivers/vendor/mmc/adapter/nebula_quick.c +new file mode 100644 +index 000000000..05cce73a3 +--- /dev/null ++++ b/drivers/vendor/mmc/adapter/nebula_quick.c +@@ -0,0 +1,254 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: Nebula SDHCI quick boot ++ * Author: AuthorNameMagicTag ++ * Create: 2023-02-27 ++ */ ++#ifdef CONFIG_MMC_QUICKBOOT ++#include ++ ++#include "sdhci.h" ++#include "sdhci_nebula.h" ++#include "nebula_quick.h" ++ ++static emmc_qboot_u mmc_get_qboot_mode(struct sdhci_host *host) ++{ ++ u32 param1_offset; ++ emmc_param_gen1_u param1; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ BUG_ON(nebula->qboot_virt_addr == NULL); ++ param1_offset = nebula->info->qboot_param1_ofs; ++ param1.u32 = readl(nebula->qboot_virt_addr + param1_offset); ++ return (emmc_qboot_u)param1.bits.emmc_qboot_mode; ++ } ++ ++ return QUICK_BOOT_DIS; ++} ++ ++bool mmc_is_fast_boot(struct sdhci_host *host) ++{ ++ return (mmc_get_qboot_mode(host) != QUICK_BOOT_DIS); ++} ++EXPORT_SYMBOL(mmc_is_fast_boot); ++ ++int mmc_fast_boot_init(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ if (nebula->info->qboot_phy_addr == INVALID_DATA) { ++ pr_err("%s: invalid quick phy addr.\n", mmc_hostname(host->mmc)); ++ return -EINVAL; ++ } ++ ++ nebula->qboot_virt_addr = ioremap(nebula->info->qboot_phy_addr, PAGE_SIZE); ++ if (nebula->qboot_virt_addr == NULL) { ++ pr_err("%s: quick boot remap failed.\n", mmc_hostname(host->mmc)); ++ return -ENOMEM; ++ } ++ ++ if (mmc_is_fast_boot(host)) { ++ nebula->priv_cap |= NEBULA_CAP_QUICK_BOOT; ++ } ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++static emmc_mode_u mmc_get_cur_mode(struct sdhci_host *host) ++{ ++ u32 param1_offset; ++ emmc_param_gen1_u param1; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0 && ++ (mmc_get_qboot_mode(host) == QUICK_BOOT_WARM)) { ++ param1_offset = nebula->info->qboot_param1_ofs; ++ param1.u32 = readl(nebula->qboot_virt_addr + param1_offset); ++ return (emmc_mode_u)param1.bits.emmc_cur_mode; ++ } ++ ++ return INIT_MODE; ++} ++ ++void mmc_set_cur_mode(struct sdhci_host *host, emmc_mode_u mode) ++{ ++ u32 param1_offset; ++ emmc_param_gen1_u param1; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0 && ++ (mmc_get_qboot_mode(host) == QUICK_BOOT_WARM)) { ++ BUG_ON(nebula->qboot_virt_addr == NULL); ++ param1_offset = nebula->info->qboot_param1_ofs; ++ param1.u32 = readl(nebula->qboot_virt_addr + param1_offset); ++ param1.bits.emmc_cur_mode = mode; ++ writel(param1.u32, nebula->qboot_virt_addr + param1_offset); ++ } ++} ++ ++void mmc_reset_init_mode(struct sdhci_host *host) ++{ ++ return mmc_set_cur_mode(host, INIT_MODE); ++} ++EXPORT_SYMBOL(mmc_reset_init_mode); ++ ++void mmc_parameter_init(struct mmc_host *mmc) ++{ ++ u8 reg8; ++ u32 param1_offset; ++ emmc_param_gen1_u param1; ++ struct sdhci_host *host = mmc_priv(mmc); ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ const u8 bus_width[] = {MMC_BUS_WIDTH_1, MMC_BUS_WIDTH_4, \ ++ MMC_BUS_WIDTH_8, MMC_BUS_WIDTH_1}; ++ const u8 uhs_mode[] = {MMC_TIMING_LEGACY, MMC_TIMING_MMC_HS, 0, \ ++ MMC_TIMING_MMC_HS200, MMC_TIMING_MMC_DDR52, 0, 0, MMC_TIMING_MMC_HS400}; ++ ++ if (nebula->devid != MMC_DEV_TYPE_MMC_0) { ++ return; ++ } ++ ++ BUG_ON(nebula->qboot_virt_addr == NULL); ++ param1_offset = nebula->info->qboot_param1_ofs; ++ param1.u32 = readl(nebula->qboot_virt_addr + param1_offset); ++ ++ if (mmc_get_qboot_mode(host) == QUICK_BOOT_COLD) { ++ /* Cold boot choice from host controller */ ++ param1.bits.emmc_uhs_mode_sel = \ ++ sdhci_readb(host, SDHCI_HOST_CONTROL2) & SDHCI_CTRL_UHS_MASK; ++ param1.bits.emmc_enh_strobe = \ ++ ((sdhci_readw(host, SDHCI_EMMC_CTRL) & SDHCI_ENH_STROBE_EN) != 0); ++ reg8 = sdhci_readb(host, SDHCI_HOST_CONTROL); ++ param1.bits.emmc_bus_width = ((reg8 & SDHCI_CTRL_8BITBUS) != 0) ? BUS_8BIT_IDX : \ ++ (((reg8 & SDHCI_CTRL_4BITBUS) != 0) ? BUS_4BIT_IDX : BUS_1BIT_IDX); ++ } ++ ++ mmc->ios.bus_width = bus_width[param1.bits.emmc_bus_width]; ++ mmc->ios.timing = uhs_mode[param1.bits.emmc_uhs_mode_sel]; ++ host->timing = mmc->ios.timing; ++#ifdef CONFIG_MMC_SDHCI_ANT ++ nebula->tuning_phase = plat_get_sample_phase(host); ++#else ++ nebula->tuning_phase = sdhci_readl(host, SDHCI_AT_STAT) & SDHCI_PHASE_SEL_MASK; ++#endif ++ mmc->ios.enhanced_strobe = param1.bits.emmc_enh_strobe; ++ ++ mmc->ios.vdd = (unsigned short)fls(mmc->ocr_avail) - 1; ++ mmc->ios.power_mode = MMC_POWER_ON; ++ /* OPENDRAIN: Identify process, PUSHPULL: Transfer */ ++ mmc->ios.bus_mode = MMC_BUSMODE_PUSHPULL; ++ mmc->ios.drv_type = 0; ++ mmc->ios.clock = mmc->f_max; ++ mmc->actual_clock = mmc->f_max; ++} ++ ++u32 mmc_get_rocr(struct sdhci_host *host) ++{ ++ u32 rocr = 0; ++ u32 param1_offset; ++ emmc_param_gen1_u param1; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ BUG_ON(nebula->qboot_virt_addr == NULL); ++ param1_offset = nebula->info->qboot_param1_ofs; ++ param1.u32 = readl(nebula->qboot_virt_addr + param1_offset); ++ rocr = MMC_VDD_165_195; ++ rocr |= MMC_VDD_32_33 | MMC_VDD_33_34; ++ /* for >= 2GiB Device, hcs mode assign from ext csd */ ++ rocr |= (param1.bits.emmc_hcs_mode << HCS_BIT); ++ } ++ ++ return rocr; ++} ++ ++static u32 mmc_get_io_info(struct sdhci_host *host) ++{ ++ u32 timing; ++ emmc_param_gen0_u param0; ++ nebula_timing *timing_data = NULL; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ const nebula_info *info = nebula->info; ++ ++ param0.u32 = 0; ++ ++ /* choice host timing data */ ++ timing_data = info->timing + host->timing; ++ if (!timing_data->data_valid) { ++ /* choice legacy mode timing */ ++ timing_data = info->timing; ++ } ++ ++ timing = timing_data->timing[IO_TYPE_CMD]; ++ param0.bits.emmc_cmd_drv = \ ++ (timing & info->io_drv_str_mask) >> info->io_drv_str_bit_ofs; ++ param0.bits.emmc_cmd_sl = \ ++ (timing & info->io_drv_str_mask) >> info->io_drv_str_bit_ofs; ++ ++ timing = timing_data->timing[IO_TYPE_CLK]; ++ param0.bits.emmc_clk_drv = \ ++ (timing & info->io_drv_str_mask) >> info->io_drv_str_bit_ofs; ++ param0.bits.emmc_clk_sl = \ ++ (timing & info->io_drv_str_mask) >> info->io_drv_str_bit_ofs; ++ ++ timing = timing_data->timing[IO_TYPE_DATA]; ++ param0.bits.emmc_data_drv = \ ++ (timing & info->io_drv_str_mask) >> info->io_drv_str_bit_ofs; ++ param0.bits.emmc_data_sl = \ ++ (timing & info->io_drv_str_mask) >> info->io_drv_str_bit_ofs; ++ ++ return param0.u32; ++} ++ ++int mmc_save_parameters(struct mmc_host *mmc) ++{ ++ unsigned int reg; ++ emmc_param_gen0_u param0; ++ emmc_param_gen1_u param1; ++ struct sdhci_host *host = mmc_priv(mmc); ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ const nebula_info *info = nebula->info; ++ ++ /* eMMC work parameters store by uboot, only eMMC support, cold boot no need */ ++ if ((nebula->devid != MMC_DEV_TYPE_MMC_0) || \ ++ mmc_get_qboot_mode(host) == QUICK_BOOT_COLD || \ ++ (mmc_get_cur_mode(host) == TRAN_MODE)) { ++ return ERET_SUCCESS; ++ } ++ ++ BUG_ON(nebula->qboot_virt_addr == NULL); ++ param0.u32 = readl(nebula->qboot_virt_addr); ++ ++ regmap_read(nebula->crg_regmap, info->crg_ofs[CRG_CLK_RST], ®); ++ param0.bits.emmc_clk_sel = \ ++ ((reg & nebula->mask->crg_clk_sel_mask) >> nebula->mask->crg_clk_sel_ofs); ++ param0.bits.emmc_clk_ph_sel = nebula->drv_phase; ++#ifdef CONFIG_MMC_SDHCI_ANT ++ param0.bits.emmc_sw_clk_ph = plat_get_sample_phase(host); ++#else ++ param0.bits.emmc_sw_clk_ph = sdhci_readb(host, SDHCI_AT_STAT); ++#endif ++ ++ /* eMMC IO info */ ++ param0.u32 |= mmc_get_io_info(host); ++ ++ writel(param0.u32, nebula->qboot_virt_addr); ++ ++ param1.u32 = readl(nebula->qboot_virt_addr + info->qboot_param1_ofs); ++ param1.bits.emmc_uhs_mode_sel = \ ++ sdhci_readb(host, SDHCI_HOST_CONTROL2) & SDHCI_CTRL_UHS_MASK; ++ param1.bits.emmc_enh_strobe = mmc->ios.enhanced_strobe; ++ param1.bits.emmc_bus_width = (mmc->ios.bus_width == MMC_BUS_WIDTH_8) ? BUS_8BIT_IDX : \ ++ ((mmc->ios.bus_width == MMC_BUS_WIDTH_4) ? BUS_4BIT_IDX : BUS_1BIT_IDX); ++ param1.bits.emmc_hcs_mode = mmc_card_is_blockaddr(mmc->card); ++ writel(param1.u32, nebula->qboot_virt_addr + info->qboot_param1_ofs); ++ ++ mmc_set_cur_mode(host, TRAN_MODE); ++ ++ return ERET_SUCCESS; ++} ++EXPORT_SYMBOL(mmc_save_parameters); ++#endif +diff --git a/drivers/vendor/mmc/adapter/nebula_quick.h b/drivers/vendor/mmc/adapter/nebula_quick.h +new file mode 100644 +index 000000000..19bcb242c +--- /dev/null ++++ b/drivers/vendor/mmc/adapter/nebula_quick.h +@@ -0,0 +1,87 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: Nebula SDHCI driver header ++ * Author: AuthorNameMagicTag ++ * Create: 2023-02-27 ++ */ ++ ++#ifndef _MMC_NEBULA_QUICK_H_ ++#define _MMC_NEBULA_QUICK_H_ ++ ++#include ++ ++#include "nebula_quick.h" ++ ++typedef enum { ++ BUS_1BIT_IDX = 0, ++ BUS_4BIT_IDX, ++ BUS_8BIT_IDX, ++} emmc_bus_idx_u; ++ ++#define MMC_CMD_SLEEP_AWAKE 5 ++#define MMC_SAWAKE_SHIFT_BIT 15 ++#define MMC_RCA_SHIFT_BIT 16 ++ ++#define HCS_BIT 30 ++ ++#define MMC_CID_MAGIC 0x45239867 ++ ++/* emmc parameters type */ ++typedef union { ++ struct { ++ u32 emmc_clk_ph_sel : 5; // [4:0] ++ u32 emmc_clk_sel : 3; // [7:5] ++ u32 emmc_sw_clk_ph : 8; // [15:8] ++ u32 emmc_cmd_drv : 4; // [19:16] ++ u32 emmc_cmd_sl : 1; // [20] ++ u32 emmc_clk_drv : 4; // [24:21] ++ u32 emmc_clk_sl : 1; // [25] ++ u32 emmc_data_drv : 4; // [29:26] ++ u32 emmc_data_sl : 1; // [30] ++ u32 reserved_0 : 1; // [31] ++ } bits; ++ u32 u32; ++} emmc_param_gen0_u; ++ ++/* emmc parameters type */ ++typedef union { ++ struct { ++ u32 emmc_uhs_mode_sel : 3; // [2:0] ++ u32 emmc_enh_strobe : 1; // [3] ++ u32 emmc_bus_width : 2; // [5:4] ++ u32 emmc_hcs_mode : 1; // [6] ++ u32 emmc_spec_ver : 4; // [10:7] ++ u32 emmc_chip_size : 11; // [21:11] ++ u32 emmc_qboot_mode : 2; // [23:22] ++ u32 emmc_cur_mode : 8; // [31:24] ++ } bits; ++ u32 u32; ++} emmc_param_gen1_u; ++ ++typedef enum { ++ INIT_MODE = 0x0, ++ SLEEP_MODE = 0x5A, ++ BOOT_MODE, ++ DS_MODE, ++ TRAN_MODE ++} emmc_mode_u; ++ ++typedef enum { ++ QUICK_BOOT_DIS = 0x0, ++ QUICK_BOOT_WARM, ++ QUICK_BOOT_COLD, ++ QUICK_BOOT_MAX ++} emmc_qboot_u; ++ ++struct mmc_host; ++struct sdhci_host; ++int mmc_fast_boot_init(struct sdhci_host *host); ++void mmc_parameter_init(struct mmc_host *mmc); ++void mmc_set_cur_mode(struct sdhci_host *host, emmc_mode_u mode); ++u32 mmc_get_rocr(struct sdhci_host *host); ++ ++int mmc_save_parameters(struct mmc_host *mmc); ++void mmc_reset_init_mode(struct sdhci_host *host); ++bool mmc_is_fast_boot(struct sdhci_host *host); ++ ++#endif /* _MMC_NEBULA_QUICK_H_ */ +diff --git a/drivers/vendor/mmc/adapter/nebula_quirk_ids.h b/drivers/vendor/mmc/adapter/nebula_quirk_ids.h +new file mode 100644 +index 000000000..c3b994f42 +--- /dev/null ++++ b/drivers/vendor/mmc/adapter/nebula_quirk_ids.h +@@ -0,0 +1,57 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: Nebula SDHCI driver header ++ * Author: AuthorNameMagicTag ++ * Create: 2023-01-05 ++ */ ++ ++#ifndef _DRIVERS_MMC_NEBULA_QUIRK_IDS_H ++#define _DRIVERS_MMC_NEBULA_QUIRK_IDS_H ++ ++#define MMC_CMDQ_FORCE_OFF 0x1 ++#define MMC_CMDQ_DIS_WHITELIST 0x2 ++ ++#ifdef CONFIG_MMC_CQHCI ++#include "card.h" ++#include "host.h" ++#include "quirks.h" ++ ++#define CID_MANFID_SANDISK_F 0x45 ++/* ++ * Quirk cmdq for MMC products. ++ */ ++static inline void __maybe_unused nebula_cmdq_quirk_mmc(struct mmc_card *card, int data) ++{ ++ struct mmc_host *host = card->host; ++ ++ if (host != NULL) { ++ host->caps2 |= (MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD); ++ pr_debug("Whitelist: match device %s\n", card->cid.prod_name); ++ } ++} ++ ++static const struct mmc_fixup mmc_cmdq_whitelist[] = { ++ /* Toshiba */ ++ MMC_FIXUP("008GB0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ MMC_FIXUP("016G30", CID_MANFID_TOSHIBA, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ MMC_FIXUP("016GB0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ MMC_FIXUP("008G30", CID_MANFID_TOSHIBA, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ /* Samsung */ ++ MMC_FIXUP("BJTD4R", CID_MANFID_SAMSUNG, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ MMC_FIXUP("CKTA42", CID_MANFID_SAMSUNG, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ MMC_FIXUP("8GTF4R", CID_MANFID_SAMSUNG, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ MMC_FIXUP("AJTD4R", CID_MANFID_SAMSUNG, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ /* Sandisk */ ++ MMC_FIXUP("DF4032", CID_MANFID_SANDISK_F, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ MMC_FIXUP("DG4016", CID_MANFID_SANDISK_F, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ MMC_FIXUP("DF4128", CID_MANFID_SANDISK_F, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ MMC_FIXUP("DG4008", CID_MANFID_SANDISK_F, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ /* Kingston */ ++ MMC_FIXUP("TB2816", CID_MANFID_KINGSTON, CID_OEMID_ANY, nebula_cmdq_quirk_mmc, 0), ++ /* null, no remove */ ++ END_FIXUP ++}; ++ ++#endif /* CONFIG_MMC_CQHCI */ ++ ++#endif /* _DRIVERS_MMC_BSP_QUIRK_IDS_H */ +diff --git a/drivers/vendor/mmc/dfx/mci_proc.c b/drivers/vendor/mmc/dfx/mci_proc.c +new file mode 100644 +index 000000000..58bf49030 +--- /dev/null ++++ b/drivers/vendor/mmc/dfx/mci_proc.c +@@ -0,0 +1,309 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: mci driver ++ * Author: AuthorNameMagicTag ++ * Create: 2023-04-20 ++ */ ++#include ++#include ++#include ++#include ++#include "card.h" ++#include "core.h" ++#include "sdhci_nebula.h" ++#include "mci_proc.h" ++ ++#define MCI_PARENT "mci" ++#define MCI_STATS_PROC "mci_info" ++#define MAX_CLOCK_SCALE 4 ++ ++static struct proc_dir_entry *g_proc_mci_dir; ++ ++static char *g_card_type[MAX_CARD_TYPE + 1] = { ++ "MMC card", ++ "SD card", ++ "SDIO card", ++ "SD combo (IO+mem) card", ++ "unknown" ++}; ++static char *g_clock_unit[MAX_CLOCK_SCALE] = { ++ "Hz", ++ "KHz", ++ "MHz", ++ "GHz" ++}; ++ ++#define BIT_WIDTH 32 ++static unsigned int unstuff_bits(const u32 *resp, u32 start, u32 size) ++{ ++ const u32 mask = ((size < BIT_WIDTH) ? 1 << size : 0) - 1; ++ const u32 off = 0x3 - ((start) / BIT_WIDTH); ++ const u32 shft = (start) & 31; /* max shift value 31 */ ++ u32 res; ++ ++ res = resp[off] >> shft; ++ if (size + shft > BIT_WIDTH) ++ res |= resp[off - 1] << ((BIT_WIDTH - shft) % BIT_WIDTH); ++ res = res & mask; ++ ++ return res; ++} ++ ++static char *mci_get_card_type(unsigned int sd_type) ++{ ++ if (sd_type >= MAX_CARD_TYPE) ++ return g_card_type[MAX_CARD_TYPE]; ++ else ++ return g_card_type[sd_type]; ++} ++ ++static unsigned int analyze_clock_scale(unsigned int clock, ++ unsigned int *clock_val) ++{ ++ unsigned int scale = 0; ++ unsigned int tmp = clock; ++ ++ while (1) { ++ tmp = tmp / 1000; /* Cal freq by dividing 1000 */ ++ if (tmp > 0) { ++ *clock_val = tmp; ++ scale++; ++ } else { ++ break; ++ } ++ } ++ return scale; ++} ++ ++static inline int is_card_uhs(unsigned char timing) ++{ ++ return timing >= MMC_TIMING_UHS_SDR12 && ++ timing <= MMC_TIMING_UHS_DDR50; ++}; ++ ++static inline int is_card_hs(unsigned char timing) ++{ ++ return timing == MMC_TIMING_SD_HS || timing == MMC_TIMING_MMC_HS; ++}; ++ ++static void mci_stats_seq_speed(struct seq_file *s, struct mmc_host *mmc) ++{ ++ unsigned int speed_class, grade_speed_uhs; ++ struct mmc_card *card = mmc->card; ++ const char *uhs_bus_speed_mode = ""; ++ static const char * const uhs_speeds[] = { ++ [UHS_SDR12_BUS_SPEED] = "SDR12 ", ++ [UHS_SDR25_BUS_SPEED] = "SDR25 ", ++ [UHS_SDR50_BUS_SPEED] = "SDR50 ", ++ [UHS_SDR104_BUS_SPEED] = "SDR104 ", ++ [UHS_DDR50_BUS_SPEED] = "DDR50 ", ++ }; ++ ++ if (is_card_uhs(mmc->ios.timing) && ++ card->sd_bus_speed < ARRAY_SIZE(uhs_speeds)) ++ uhs_bus_speed_mode = ++ uhs_speeds[card->sd_bus_speed]; ++ ++ seq_printf(s, "\tMode: %s %s\n", ++ is_card_uhs(mmc->ios.timing) ? "UHS" : ++ is_card_hs(mmc->ios.timing) ? "HS" : ++ (mmc->ios.enhanced_strobe == true) ? "HS400ES" : ++ (mmc->ios.timing == MMC_TIMING_MMC_HS400) ? "HS400" : ++ (mmc->ios.timing == MMC_TIMING_MMC_HS200) ? "HS200" : ++ (mmc->ios.timing == MMC_TIMING_MMC_DDR52) ? "DDR" : ++ "DS", uhs_bus_speed_mode); ++ ++ speed_class = unstuff_bits(card->raw_ssr, 56, 8); /* 56 = 440 - 384 */ ++ grade_speed_uhs = unstuff_bits(card->raw_ssr, 12, 4); /* 12 = 396 - 384 */ ++ seq_printf(s, "\tSpeed Class: Class %s\n", ++ (speed_class == 0x00) ? "0" : ++ (speed_class == 0x01) ? "2" : ++ (speed_class == 0x02) ? "4" : ++ (speed_class == 0x03) ? "6" : ++ (speed_class == 0x04) ? "10" : ++ "Reserved"); ++ seq_printf(s, "\tUhs Speed Grade: %s\n", ++ (grade_speed_uhs == 0x00) ? ++ "Less than 10MB/sec(0h)" : ++ (grade_speed_uhs == 0x01) ? ++ "10MB/sec and above(1h)" : ++ "Reserved"); ++} ++ ++static void mci_stats_seq_clock(struct seq_file *s, struct mmc_host *mmc) ++{ ++ unsigned int clock, clock_scale; ++ unsigned int clock_value = 0; ++ ++ clock = mmc->ios.clock; ++ clock_scale = analyze_clock_scale(clock, &clock_value); ++ seq_printf(s, "\tHost work clock: %d%s\n", ++ clock_value, g_clock_unit[clock_scale]); ++ ++ clock = mmc->ios.clock; ++ clock_scale = analyze_clock_scale(clock, &clock_value); ++ seq_printf(s, "\tCard support clock: %d%s\n", ++ clock_value, g_clock_unit[clock_scale]); ++ ++ clock = mmc->actual_clock; ++ clock_scale = analyze_clock_scale(clock, &clock_value); ++ seq_printf(s, "\tCard work clock: %d%s\n", ++ clock_value, g_clock_unit[clock_scale]); ++} ++ ++static void mci_stats_seq_printout(struct seq_file *s) ++{ ++ unsigned int index_mci; ++ const char *type = NULL; ++ struct mmc_host *mmc = NULL; ++ struct mmc_card *card = NULL; ++ int present; ++ struct sdhci_host *host = NULL; ++ struct sdhci_nebula *priv = NULL; ++ ++ for (index_mci = 0; index_mci < MCI_SLOT_NUM; index_mci++) { ++ mmc = g_mci_host[index_mci]; ++ ++ if (mmc == NULL) { ++ seq_printf(s, "MCI%d: invalid\n", index_mci); ++ continue; ++ } else { ++ seq_printf(s, "MCI%d", index_mci); ++ } ++ ++ mmc_claim_host(mmc); ++ host = mmc_priv(mmc); ++ priv = nebula_priv(host); ++ ++ present = host->mmc->ops->get_cd(host->mmc); ++ if (present != 0) ++ seq_puts(s, ": pluged"); ++ else ++ seq_puts(s, ": unplugged"); ++ ++ card = host->mmc->card; ++ if (present == 0) { ++ seq_puts(s, "_disconnected\n"); ++ } else if ((present != 0) && (card == NULL)) { ++ seq_puts(s, "_init_failed\n"); ++ } else if (card != NULL) { ++ seq_puts(s, "_connected\n"); ++ ++ seq_printf(s, "\tType: %s", ++ mci_get_card_type(card->type)); ++ ++ if (card->state & MMC_STATE_BLOCKADDR) { ++ type = ((card->state & MMC_CARD_SDXC) != 0) ? ++ "SDXC" : "SDHC"; ++ seq_printf(s, "(%s)\n", type); ++ } ++ ++ mci_stats_seq_speed(s, mmc); ++ mci_stats_seq_clock(s, mmc); ++ ++#ifdef CONFIG_MMC_CARD_INFO ++ /* add card read/write error count */ ++ seq_printf(s, "\tCard error count: %d\n", host->error_count); ++#endif ++ } ++ mmc_release_host(mmc); ++ } ++} ++ ++/* proc interface setup */ ++static void *mci_seq_start(struct seq_file *s, loff_t *pos) ++{ ++ /* counter is used to tracking multi proc interfaces ++ * We have only one interface so return zero ++ * pointer to start the sequence. ++ */ ++ static unsigned long counter; ++ ++ if (*pos == 0) ++ return &counter; ++ ++ return NULL; ++} ++ ++/* proc interface next */ ++static void *mci_seq_next(struct seq_file *s, void *v, loff_t *pos) ++{ ++ (*pos)++; ++ ++ return mci_seq_start(s, pos); ++} ++ ++/* define parameters where showed in proc file */ ++static int mci_stats_seq_show(struct seq_file *s, void *v) ++{ ++ mci_stats_seq_printout(s); ++ return 0; ++} ++ ++/* proc interface stop */ ++static void mci_seq_stop(struct seq_file *s, void *v) ++{ ++} ++ ++/* proc interface operation */ ++static const struct seq_operations mci_stats_seq_ops = { ++ .start = mci_seq_start, ++ .next = mci_seq_next, ++ .stop = mci_seq_stop, ++ .show = mci_stats_seq_show ++}; ++ ++/* proc file open */ ++static int mci_stats_proc_open(struct inode *inode, struct file *file) ++{ ++ return seq_open(file, &mci_stats_seq_ops); ++}; ++ ++/* proc file operation */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0) ++static const struct file_operations mci_stats_proc_ops = { ++ .open = mci_stats_proc_open, ++ .read = seq_read, ++ .release = seq_release ++}; ++#else ++static const struct proc_ops mci_stats_proc_ops = { ++ .proc_open = mci_stats_proc_open, ++ .proc_read = seq_read, ++ .proc_release = seq_release ++}; ++#endif ++ ++int mci_proc_init(void) ++{ ++ struct proc_dir_entry *proc_stats_entry = NULL; ++ ++ g_proc_mci_dir = proc_mkdir(MCI_PARENT, NULL); ++ if (g_proc_mci_dir == NULL) { ++ pr_err("%s: failed to create proc file %s\n", ++ __func__, MCI_PARENT); ++ return 1; ++ } ++ ++ proc_stats_entry = proc_create(MCI_STATS_PROC, ++ 0, g_proc_mci_dir, &mci_stats_proc_ops); ++ if (proc_stats_entry == NULL) { ++ pr_err("%s: failed to create proc file %s\n", ++ __func__, MCI_STATS_PROC); ++ remove_proc_entry(MCI_PARENT, NULL); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++int mci_proc_shutdown(void) ++{ ++ if (g_proc_mci_dir != NULL) { ++ remove_proc_entry(MCI_STATS_PROC, g_proc_mci_dir); ++ remove_proc_entry(MCI_PARENT, NULL); ++ g_proc_mci_dir = NULL; ++ } ++ ++ return 0; ++} +diff --git a/drivers/vendor/mmc/dfx/mci_proc.h b/drivers/vendor/mmc/dfx/mci_proc.h +new file mode 100644 +index 000000000..7cc38c94f +--- /dev/null ++++ b/drivers/vendor/mmc/dfx/mci_proc.h +@@ -0,0 +1,19 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: mci header ++ * Author: AuthorNameMagicTag ++ * Create: 2023-04-20 ++ */ ++ ++#ifndef __MCI_PROC_H__ ++#define __MCI_PROC_H__ ++ ++#include "mci_proc.h" ++ ++#define MAX_CARD_TYPE 4 ++#define MAX_SPEED_MODE 5 ++ ++int mci_proc_init(void); ++int mci_proc_shutdown(void); ++ ++#endif /* __MCI_PROC_H__ */ +diff --git a/drivers/vendor/mmc/dfx/nebula_dfx.c b/drivers/vendor/mmc/dfx/nebula_dfx.c +new file mode 100644 +index 000000000..821107123 +--- /dev/null ++++ b/drivers/vendor/mmc/dfx/nebula_dfx.c +@@ -0,0 +1,531 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: Nebula SDHCI driver header ++ * Author: AuthorNameMagicTag ++ * Create: 2023-01-05 ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "card.h" ++#include "core.h" ++#include "sdhci_nebula.h" ++#include "nebula_dfx.h" ++ ++#ifdef CONFIG_SDHCI_NEBULA_DFX ++ ++#define BITS_PER_U32 32 ++#define BYTES_PER_U32 4 ++ ++#define SD_SSR_BITS_OFFSET 384 ++#define SD_SSR_SPEED_CLASS_WIDTH 8 ++#define SD_SSR_SPEED_CLASS_OFS (440 - SD_SSR_BITS_OFFSET) ++#define SD_SSR_UHS_SPEED_GRADE_WIDTH 4 ++#define SD_SSR_UHS_SPEED_GRADE_OFS (396 - SD_SSR_BITS_OFFSET) ++ ++/** ++ * R1_OUT_OF_RANGE - Command argument out of range ++ * R1_ADDRESS_ERROR - Misaligned address ++ * R1_BLOCK_LEN_ERROR - Transferred block length incorrect ++ * R1_WP_VIOLATION - Tried to write to protected block ++ * R1_CC_ERROR - Card controller error ++ * R1_ERROR - General/unknown error ++ */ ++#define CMD_R1_ERRORS \ ++ (R1_OUT_OF_RANGE | R1_ADDRESS_ERROR | \ ++ R1_BLOCK_LEN_ERROR | R1_WP_VIOLATION | \ ++ R1_CC_ERROR | R1_ERROR) ++ ++#ifdef CONFIG_MMC_CARD_INFO ++static char *g_card_type_string[] = { ++ "MMC card", ++ "SD card", ++ "SDIO card", ++ "SD combo (IO+mem) card", ++ "unknown" ++}; ++#endif ++ ++static char *g_timing_str[] = { ++ "DS", ++ "HS", ++ "HS", ++ "UHS SDR12", ++ "UHS SDR25", ++ "UHS SDR50", ++ "UHS SDR104", ++ "DDR50", ++ "DDR52", ++ "HS200", ++ "HS400" ++}; ++ ++static void nebula_show_str(struct seq_file *m, const char *key, const char *val) ++{ ++ seq_printf(m, "%-25s:%-35s|\n", key, val); ++} ++ ++static void nebula_show_u32(struct seq_file *m, const char *key, u32 val) ++{ ++ seq_printf(m, "%-25s:%-35u|\n", key, val); ++} ++ ++static void nebula_show_hex(struct seq_file *m, const char *key, u32 val) ++{ ++ seq_printf(m, "%-25s:0x%-33x|\n", key, val); ++} ++ ++static void nebula_help_show(struct seq_file *m) ++{ ++ seq_puts(m, "#\n" ++ "# echo \"help \" > /proc/mmc[x]/status\n" ++ "# echo \"rescan \" > /proc/mmc[x]/status\n" ++ "# mode format: ,; example: \"hs200,4\"\n" ++ "# speed: ds, hs, hs200, hs400, hs400es, sdr12, sdr25, sdr50, sdr104\n" ++ "# bus_width: 1, 4, 8\n" ++ "# echo \"lvl=\" > /proc/mmc[x]/status\n" ++ "# log_level: 0, 1, 2\n" ++ "#\n"); ++} ++ ++static const char *nebula_get_timing_type(struct sdhci_host *host) ++{ ++ const char *timing_str = "unknow"; ++ ++ if (host->mmc->ios.timing < ARRAY_SIZE(g_timing_str)) { ++ timing_str = g_timing_str[host->mmc->ios.timing]; ++ if (host->mmc->ios.enhanced_strobe) ++ timing_str = "HS400ES"; ++ } ++ ++ return timing_str; ++} ++ ++#ifdef CONFIG_MMC_CARD_INFO ++static u32 unstuff_bits(const u32 *resp, u32 start, u32 size) ++{ ++ const u32 mask = ((size < BITS_PER_U32) ? (1 << size) : 0) - 1; ++ const u32 off = (BYTES_PER_U32 - 1) - ((start) / BITS_PER_U32); ++ const u32 shft = (start) & (BITS_PER_U32 - 1); ++ u32 res; ++ ++ res = resp[off] >> shft; ++ if ((size + shft) > BITS_PER_U32) ++ res |= resp[off - 1] << ((BITS_PER_U32 - shft) % BITS_PER_U32); ++ res = res & mask; ++ ++ return res; ++} ++ ++static char *nebula_get_card_type(u32 card_type) ++{ ++ if (card_type >= ARRAY_SIZE(g_card_type_string)) ++ return g_card_type_string[ARRAY_SIZE(g_card_type_string) - 1]; ++ else ++ return g_card_type_string[card_type]; ++} ++ ++static void nebula_seq_sd_info(struct seq_file *m) ++{ ++ struct sdhci_host *host = (struct sdhci_host *)m->private; ++ struct card_info *info = &host->c_info; ++ u32 speed_class, grade_speed_uhs; ++ ++ nebula_show_str(m, " capacity_type", ++ ((info->card_state & MMC_STATE_BLOCKADDR) == 0) ? "SD (<=2GB)" : ++ ((info->card_state & MMC_CARD_SDXC) != 0) ? "SDXC (<=2TB)" : "SDHC (<=32GB)"); ++ ++ speed_class = unstuff_bits(info->ssr, SD_SSR_SPEED_CLASS_OFS, ++ SD_SSR_SPEED_CLASS_WIDTH); ++ nebula_show_str(m, " speed_class", ++ (speed_class == SD_SPEED_CLASS0) ? "0" : ++ (speed_class == SD_SPEED_CLASS1) ? "2" : ++ (speed_class == SD_SPEED_CLASS2) ? "4" : ++ (speed_class == SD_SPEED_CLASS3) ? "6" : ++ (speed_class == SD_SPEED_CLASS4) ? "10" : ++ "Reserved"); ++ ++ grade_speed_uhs = unstuff_bits(info->ssr, SD_SSR_UHS_SPEED_GRADE_OFS, ++ SD_SSR_UHS_SPEED_GRADE_WIDTH); ++ nebula_show_str(m, " uhs_speed_grade", ++ (grade_speed_uhs == SD_SPEED_GRADE0) ? ++ "Less than 10MB/sec(0h)" : ++ (grade_speed_uhs == SD_SPEED_GRADE1) ? ++ "10MB/sec and above(1h)" : ++ "Reserved"); ++} ++ ++static void nebula_cmd_backtrace_show(struct seq_file *m) ++{ ++ int idx, sp; ++ struct sdhci_host *host = (struct sdhci_host *)m->private; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ nebula_show_str(m, "command_trace", "idx command"); ++ sp = (nebula->cmd_bt.sp % NEBULA_DFX_BT_MAX_NUM) + 1; ++ for (idx = 0; idx < NEBULA_DFX_BT_MAX_NUM; idx++) { ++ seq_printf(m, "%-25s: %d CMD%-27u|\n", "", idx, ++ nebula->cmd_bt.opcode[sp++]); ++ sp %= NEBULA_DFX_BT_MAX_NUM; ++ } ++} ++#endif ++ ++static void nebula_seq_common_info(struct seq_file *m) ++{ ++ struct sdhci_host *host = (struct sdhci_host *)m->private; ++ ++ nebula_show_hex(m, "host_quirk1", host->quirks); ++ nebula_show_hex(m, "host_quirk2", host->quirks2); ++} ++ ++static void nebula_seq_card_info(struct seq_file *m) ++{ ++ struct sdhci_host *host = (struct sdhci_host *)m->private; ++#ifdef CONFIG_MMC_CARD_INFO ++ bool card_insert = true; ++ struct card_info *info = &host->c_info; ++ ++ if ((mmc_card_is_removable(host->mmc) != 0) && host->mmc->ops->get_cd != NULL) ++ card_insert = host->mmc->ops->get_cd(host->mmc); ++ ++ if (card_insert && (info->card_connect != CARD_CONNECT) && (host->mmc->card_status == MMC_CARD_INIT_FAIL)) { ++ nebula_show_str(m, "card_status", "pluged_init_failed"); ++ nebula_cmd_backtrace_show(m); ++ } else if (card_insert && (info->card_connect == CARD_CONNECT)) { ++ nebula_show_str(m, "card_status", "plugged"); ++ } else { ++ nebula_show_str(m, "card_status", "unplugged"); ++ } ++ ++ if (card_insert && (info->card_connect == CARD_CONNECT)) { ++ nebula_show_str(m, " card_type", nebula_get_card_type(info->card_type)); ++ nebula_show_str(m, " work_mode", nebula_get_timing_type(host)); ++ if (info->card_type != MMC_TYPE_MMC) ++ nebula_seq_sd_info(m); ++ ++ nebula_show_str(m, " cmdq_enable", (host->cqe_on) ? "true" : "false"); ++ nebula_show_u32(m, " bus_width", (1 << host->mmc->ios.bus_width)); ++ nebula_show_u32(m, " host_work_clock", info->card_support_clock); ++ nebula_show_u32(m, " card_work_clock", host->mmc->actual_clock); ++ } ++#else ++ if (host->mmc->ios.clock != 0) { ++ nebula_show_str(m, "work_mode", nebula_get_timing_type(host)); ++ nebula_show_u32(m, "bus_width", (1 << host->mmc->ios.bus_width)); ++ nebula_show_u32(m, "host_work_clock", host->mmc->ios.clock); ++ } ++#endif ++} ++ ++static int nebula_stats_show(struct seq_file *m, void *v) ++{ ++ struct sdhci_host *host = (struct sdhci_host *)m->private; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->dfx_cap.help) { ++ nebula_help_show(m); ++ nebula->dfx_cap.help = false; ++ return 0; ++ } ++ ++ if (nebula->dfx_cap.log_level >= DEBUG_LVL_VERBOSE) ++ sdhci_dumpregs(host); ++ ++ seq_printf(m, "========================%s=================================\n", ++ mmc_hostname(host->mmc)); ++ nebula_show_str(m, "version", SDHCI_NEBULA_KERNEL_VERSION); ++ nebula_show_str(m, "mmc_device", host->hw_name); ++ ++ nebula_seq_card_info(m); ++ ++ nebula_show_hex(m, "host_caps1", host->mmc->caps); ++ nebula_show_hex(m, "host_caps2", host->mmc->caps2); ++#ifdef CONFIG_MMC_CARD_INFO ++ nebula_show_u32(m, "error_count", host->error_count); ++#endif ++ nebula_seq_common_info(m); ++ ++ seq_puts(m, "=============================================================\n"); ++ ++ return 0; ++} ++ ++/* proc file open */ ++static int nebula_stats_proc_open(struct inode *inode, struct file *file) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) ++ return single_open(file, nebula_stats_show, pde_data(inode)); ++#else ++ return single_open(file, nebula_stats_show, PDE_DATA(inode)); ++#endif ++}; ++ ++static void nebula_parse_bus_width(struct sdhci_host *host, const char *mode) ++{ ++ host->mmc->caps &= ~(MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); ++ ++ if (strstr(mode, ",4") != NULL) { ++ host->mmc->caps |= MMC_CAP_4_BIT_DATA; ++ } else if (strstr(mode, ",8") != NULL) { ++ host->mmc->caps |= MMC_CAP_8_BIT_DATA; ++ } ++} ++ ++static void nebula_parse_mmc_mode(struct sdhci_host *host, const char *mode) ++{ ++ u32 avail_type = 0; ++ struct mmc_card *card = host->mmc->card; ++ ++ if (mmc_card_is_removable(card->host)) { ++ return; ++ } ++ ++ nebula_parse_bus_width(host, mode); ++ ++ if (strstr(mode, "hs400es") != NULL) { ++ avail_type |= EXT_CSD_CARD_TYPE_HS400ES | EXT_CSD_CARD_TYPE_HS400 | \ ++ EXT_CSD_CARD_TYPE_HS200 | EXT_CSD_CARD_TYPE_HS; ++ host->mmc->caps |= MMC_CAP_8_BIT_DATA; ++ } else if (strstr(mode, "hs400") != NULL) { ++ avail_type |= EXT_CSD_CARD_TYPE_HS400 | EXT_CSD_CARD_TYPE_HS200 | \ ++ EXT_CSD_CARD_TYPE_HS; ++ host->mmc->caps |= MMC_CAP_8_BIT_DATA; ++ } else if (strstr(mode, "hs200") != NULL) { ++ avail_type |= EXT_CSD_CARD_TYPE_HS200 | EXT_CSD_CARD_TYPE_HS; ++ host->mmc->caps |= MMC_CAP_4_BIT_DATA; ++ } else if (strstr(mode, "hs") != NULL) { ++ avail_type |= EXT_CSD_CARD_TYPE_HS; ++ } ++ ++ card->mmc_avail_type = avail_type; ++} ++ ++static void nebula_parse_sd_mode(struct sdhci_host *host, const char *mode) ++{ ++ u32 sd_bus_speed = 0; ++ ++ host->mmc->caps &= ~(MMC_CAP_UHS | MMC_CAP_SD_HIGHSPEED); ++ ++ nebula_parse_bus_width(host, mode); ++ ++ if (strstr(mode, "sdr12")) { ++ sd_bus_speed |= MMC_CAP_UHS_SDR12; ++ host->mmc->caps |= MMC_CAP_4_BIT_DATA; ++ } else if (strstr(mode, "sdr25") != NULL) { ++ sd_bus_speed |= MMC_CAP_UHS_SDR25; ++ host->mmc->caps |= MMC_CAP_4_BIT_DATA; ++ } else if (strstr(mode, "sdr50") != NULL) { ++ sd_bus_speed |= MMC_CAP_UHS_SDR50; ++ host->mmc->caps |= MMC_CAP_4_BIT_DATA; ++ } else if (strstr(mode, "sdr104") != NULL) { ++ sd_bus_speed |= MMC_CAP_UHS_SDR104; ++ host->mmc->caps |= MMC_CAP_4_BIT_DATA; ++ } else if (strstr(mode, "hs") != NULL) { ++ sd_bus_speed |= MMC_CAP_SD_HIGHSPEED; ++ } ++ ++ host->mmc->caps |= sd_bus_speed; ++} ++ ++static void nebula_trigger_rescan(struct sdhci_host *host, const char *mode) ++{ ++ int ret; ++ struct mmc_host *mmc = host->mmc; ++ struct mmc_card *card = mmc->card; ++ ++ if (card != NULL) { ++ mmc_claim_host(mmc); ++ nebula_parse_mmc_mode(host, mode); ++ nebula_parse_sd_mode(host, mode); ++#ifdef CONFIG_MMC_CARD_INFO ++ mmc->card_status = MMC_CARD_UNINIT; ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) ++ ret = mmc_hw_reset(mmc->card); ++#else ++ ret = mmc_hw_reset(mmc); ++#endif ++#ifdef CONFIG_MMC_CARD_INFO ++ mmc->card_status = (ret == 0) ? MMC_CARD_INIT : MMC_CARD_INIT_FAIL; ++ if (mmc->ops->card_info_save) ++ mmc->ops->card_info_save(mmc); ++#endif ++ mmc_release_host(mmc); ++ } ++} ++ ++static void nebula_trigger_detect(struct sdhci_host *host, const char *mode) ++{ ++ u8 val; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->devid != MMC_DEV_TYPE_MMC_0) { ++ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); ++ val |= SDHCI_DETECT_POLARITY; ++ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); ++ } ++} ++ ++static void nebula_trigger_help(struct sdhci_host *host, const char *mode) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (strstr(mode, "on") != NULL) { ++ nebula->dfx_cap.help = true; ++ } else if (strstr(mode, "off") != NULL) { ++ nebula->dfx_cap.help = false; ++ } ++} ++ ++static void nebula_trigger_log_level(struct sdhci_host *host, const char *mode) ++{ ++ u32 lvl = DEBUG_LVL_INFO; ++ char *ptr = NULL; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ ptr = strstr(mode, "="); ++ if (ptr == NULL) { ++ return; ++ } ++ ++ ptr++; ++ ++ if (get_option(&ptr, &lvl) == 0) { ++ return; ++ } ++ ++ if (lvl >= DEBUG_LVL_INFO && lvl < DEBUG_LVL_MAX) { ++ nebula->dfx_cap.log_level = lvl; ++ } ++} ++ ++static ssize_t nebula_proc_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *pos) ++{ ++ char kbuf[MAX_STR_LEN] = {0}; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) ++ struct sdhci_host *host = (struct sdhci_host *)pde_data(file_inode(file)); ++#else ++ struct sdhci_host *host = (struct sdhci_host *)PDE_DATA(file_inode(file)); ++#endif ++ ++ if (count == 0) ++ return -EINVAL; ++ ++ if (count > sizeof(kbuf)) ++ count = sizeof(kbuf) - 1; ++ ++ if (copy_from_user(kbuf, buf, count)) ++ return -EFAULT; ++ ++ /* Strip trailing '\n' and terminate string */ ++ kbuf[count - 1] = 0; ++ ++ if (strstr(kbuf, "rescan") != NULL) { ++ nebula_trigger_rescan(host, kbuf); ++ } else if (strstr(kbuf, "help") != NULL) { ++ nebula_trigger_help(host, kbuf); ++ } else if (strstr(kbuf, "lvl") != NULL) { ++ nebula_trigger_log_level(host, kbuf); ++ } else if (strstr(kbuf, "detect") != NULL) { ++ nebula_trigger_detect(host, kbuf); ++ } ++ ++ return (ssize_t)count; ++} ++/* proc file operation */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++static const struct proc_ops nebula_stats_proc_ops = { ++ .proc_open = nebula_stats_proc_open, ++ .proc_read = seq_read, ++ .proc_write = nebula_proc_write, ++ .proc_release = single_release, ++}; ++#else ++static const struct file_operations nebula_stats_proc_ops = { ++ .owner = THIS_MODULE, ++ .open = nebula_stats_proc_open, ++ .read = seq_read, ++ .release = single_release, ++ .write = nebula_proc_write, ++}; ++#endif ++ ++void sdhci_nebula_dfx_irq(struct sdhci_host *host, u32 intmask) ++{ ++ struct mmc_command *cmd = host->cmd; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (cmd == NULL || ((intmask & SDHCI_INT_CMD_MASK) == 0)) { ++ return; ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) ++ if (host->mmc->doing_init_tune != 0) { ++ return; ++ } ++#endif ++ ++ nebula->cmd_bt.sp++; ++ nebula->cmd_bt.sp %= NEBULA_DFX_BT_MAX_NUM; ++ nebula->cmd_bt.opcode[nebula->cmd_bt.sp] = cmd->opcode; ++} ++ ++int sdhci_nebula_proc_init(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ nebula->proc_root = proc_mkdir(mmc_hostname(host->mmc), NULL); ++ if (nebula->proc_root == NULL) { ++ pr_err("%s: create proc file failed\n", mmc_hostname(host->mmc)); ++ return -ENOMEM; ++ } ++ ++ nebula->proc_stat = proc_create_data("status", 0, nebula->proc_root, ++ &nebula_stats_proc_ops, (void *)host); ++ if (nebula->proc_stat == NULL) { ++ pr_err("%s: create status file failed\n", mmc_hostname(host->mmc)); ++ remove_proc_entry(mmc_hostname(host->mmc), NULL); ++ nebula->proc_root = NULL; ++ return -ENOMEM; ++ } ++ ++ nebula->dfx_cap.log_level = DEBUG_LVL_INFO; ++ nebula->dfx_cap.help = false; ++ ++ return ERET_SUCCESS; ++} ++ ++int sdhci_nebula_proc_shutdown(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->proc_root != NULL) { ++ if (nebula->proc_stat != NULL) { ++ remove_proc_entry("status", nebula->proc_root); ++ nebula->proc_stat = NULL; ++ } ++ remove_proc_entry(mmc_hostname(host->mmc), NULL); ++ nebula->proc_root = NULL; ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++#else ++int sdhci_nebula_proc_init(struct sdhci_host *host) ++{ ++ return ERET_SUCCESS; ++} ++ ++int sdhci_nebula_proc_shutdown(struct sdhci_host *host) ++{ ++ return ERET_SUCCESS; ++}; ++#endif +diff --git a/drivers/vendor/mmc/dfx/nebula_dfx.h b/drivers/vendor/mmc/dfx/nebula_dfx.h +new file mode 100644 +index 000000000..2ba490454 +--- /dev/null ++++ b/drivers/vendor/mmc/dfx/nebula_dfx.h +@@ -0,0 +1,38 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: Nebula SDHCI driver header ++ * Author: AuthorNameMagicTag ++ * Create: 2023-01-05 ++ */ ++#ifndef __NEBULA_DFX_H__ ++#define __NEBULA_DFX_H__ ++ ++#include "nebula_dfx.h" ++ ++typedef enum { ++ DEBUG_LVL_INFO = 0, ++ DEBUG_LVL_NOTICE, ++ DEBUG_LVL_VERBOSE, ++ DEBUG_LVL_MAX, ++} nebula_dfx_log_lvl; ++ ++#define MAX_STR_LEN 20 ++#define MAX_NAMELEN 10 ++#define CLOCK_KHZ 1000 ++ ++enum sd_speed_class { ++ SD_SPEED_CLASS0 = 0, ++ SD_SPEED_CLASS1, ++ SD_SPEED_CLASS2, ++ SD_SPEED_CLASS3, ++ SD_SPEED_CLASS4, ++ SD_SPEED_CLASS_MAX, ++}; ++ ++enum sd_uhs_speed_grade { ++ SD_SPEED_GRADE0 = 0, ++ SD_SPEED_GRADE1, ++ SD_SPEED_GRADE_MAX, ++}; ++ ++#endif +diff --git a/drivers/vendor/mmc/driver_obj.mk b/drivers/vendor/mmc/driver_obj.mk +new file mode 100644 +index 000000000..f11fa4c3a +--- /dev/null ++++ b/drivers/vendor/mmc/driver_obj.mk +@@ -0,0 +1,3 @@ ++obj-$(CONFIG_MMC_SDHCI_SOCT) += huanglong/mmc/ ++obj-$(CONFIG_MMC_SDHCI_SHAOLINSWORD) += huanglong/mmc/ ++ +diff --git a/drivers/vendor/mmc/dtsi_usage.txt b/drivers/vendor/mmc/dtsi_usage.txt +new file mode 100644 +index 000000000..a6faf8569 +--- /dev/null ++++ b/drivers/vendor/mmc/dtsi_usage.txt +@@ -0,0 +1,58 @@ ++* Nebula SDHCI Controller ++ ++The Nebula SDHCI Controller act as a MMC controller ++to support MMC, SD, and SDIO types of memory cards. ++ ++This file documents differences between the core properties in mmc.txt ++and the properties used by the nebula driver. ++ ++Refer to mmc.txt for standard MMC bindings. ++ ++Required properties: ++- compatible: value should be "nebula,sdhci" for nebula controllers ++ ++- reg: physical base address of the controller and length ++- interrupts: Should contain MSDC interrupt number ++- clocks: Should contain phandle for the clock feeding the MMC controller ++- clock-names: Should contain the following: ++ "mmc_clk" - source clock (required) ++- resets: Array of clocks required for Nebula Host. ++- resets-names: Array of clocks required for Nebula Host. Should contain the following: ++ "crg_reset" - source reset (mandatory) ++ "dll_reset" - source reset (mandatory) ++ "crg_tx" - crg tx reset (Optional) ++ "crg_rx" - crg rx reset (Optional) ++ "samp_rst" - sample reset (Optional) ++ ++Optional properties: ++- devid: device id(emmc:0, sdio0:1, sdio1:2). ++- fpga: nebula driver go fpga branch, no turning, no iocfg ++- reset_out_drv: reset host out of driver, by resets node. ++- pm_runtime_enable: deivce support power mangerment runtime. ++- sample_turning: if device not support edge turning, enable sample_turning ++- crg_regmap: crg register handle. ++- iocfg_regmap: iocfg register handle. ++ ++Examples: ++emmc:emmc@0x01710000 { ++ compatible = "nebula,sdhci"; ++ reg = <0x01710000 0x1000>; ++ interrupts = <0 221 4>; ++ clocks = <&huanglong_clock PERI_CRG426_EMMC>; ++ clock-names = "mmc_clk"; ++ resets = <&clock 0x34c0 16>, <&clock 0x34c0 17>, <&clock 0x34c0 18>, <&clock 0x34c4 1>; ++ reset-names = "crg_reset", "crg_tx", "crg_rx", "dll_reset"; ++ crg_regmap = <&huanglong_clock>; ++ iocfg_regmap = <&huanglong_iocfg>; ++ max-frequency = <196000000>; ++ disable-wp; ++ non-removable; ++ bus-width = <8>; ++ mmc-hs200-1_8v; ++ mmc-hs400-1_8v; ++ mmc-hs400-enhanced-strobe; ++ cap-mmc-highspeed; ++ cap-mmc-hw-reset; ++ devid = <0>; ++ status = "okay"; ++}; +\ No newline at end of file +diff --git a/drivers/vendor/mmc/nebula_fmea.h b/drivers/vendor/mmc/nebula_fmea.h +new file mode 100644 +index 000000000..c3a733122 +--- /dev/null ++++ b/drivers/vendor/mmc/nebula_fmea.h +@@ -0,0 +1,28 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Nebula SDHCI fmea header ++ * Author: AuthorNameMagicTag ++ * Create: 2022-11-16 ++ */ ++ ++#ifndef __SDHCI_FMEA_H__ ++#define __SDHCI_FMEA_H__ ++ ++#include "sdhci.h" ++ ++#include "nebula_fmea.h" ++ ++#define SDHCI_FMEA_LIFE_TIME_EXCEED 955465100 ++ ++#define DEVICE_LIFE_CHECK_INTERVAL (3600) ++#define DEVICE_LIFE_TIME_EST_VAL (0x5) ++ ++typedef struct sdhci_nebula_fmea { ++ struct sdhci_host *host; ++ struct delayed_work mmc_lifecheck_work; ++ int life_check_interval; ++} nebula_fmea; ++ ++int sdhci_nebula_fmea_init(struct sdhci_host *host, nebula_fmea *fmea); ++int sdhci_nebula_fmea_deinit(nebula_fmea *fmea); ++#endif +diff --git a/drivers/vendor/mmc/nebula_intf.c b/drivers/vendor/mmc/nebula_intf.c +new file mode 100644 +index 000000000..6cec1dd00 +--- /dev/null ++++ b/drivers/vendor/mmc/nebula_intf.c +@@ -0,0 +1,100 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: nebula intf ++ * Author: AuthorNameMagicTag ++ * Create: 2023-04-20 ++ */ ++ ++#include ++#include ++#include ++#include ++#include "core.h" ++#include "sdhci_nebula.h" ++#include "nebula_intf.h" ++ ++/* ++ * This api is for wifi driver rescan the sdio device ++ */ ++int bsp_sdio_rescan(int slot) ++{ ++ struct mmc_host *mmc = NULL; ++ ++ if ((slot >= MCI_SLOT_NUM) || (slot < 0)) { ++ pr_err("invalid mmc slot, please check the argument\n"); ++ return -EINVAL; ++ } ++ ++ mmc = g_mci_host[slot]; ++ if (mmc == NULL) { ++ pr_err("invalid mmc, please check the argument\n"); ++ return -EINVAL; ++ } ++ ++ mmc->rescan_entered = 0; ++ mmc_detect_change(mmc, 0); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(bsp_sdio_rescan); ++ ++int hl_drv_sdio_rescan(int index) ++{ ++ struct mmc_host *mmc = NULL; ++ int i; ++ ++ if ((index < 0) || (index >= MCI_SLOT_NUM)) { ++ pr_err("invalid mmc_host index for sdio %d\n", index); ++ return -EINVAL; ++ } ++ ++ for (i = MMC_DEV_TYPE_SDIO_0; i <= MMC_DEV_TYPE_SDIO_1; i++) { ++ mmc = g_mmc_host[i]; ++ ++ if ((mmc == NULL) || (mmc->card != NULL)) { ++ continue; ++ } ++ printk("Trigger sdio%d scanning card successfully\n", i); ++ mmc_detect_change(mmc, 0); ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(hl_drv_sdio_rescan); ++ ++int sdhci_nebula_sdio_rescan(int index) ++{ ++ int ret = 0; ++ struct mmc_host *mmc = NULL; ++ ++ if ((index < 0) || (index >= MCI_SLOT_NUM)) { ++ pr_err("invalid mmc_host index for sdio %d\n", index); ++ return -EINVAL; ++ } ++ ++ mmc = g_mmc_host[index]; ++ if (mmc == NULL) { ++ pr_err("sdio %d not init\n", index); ++ return -EINVAL; ++ } ++ ++ pr_info("Trigger sdio%d rescan\n", index); ++ if (mmc->card == NULL) { ++ mmc->rescan_entered = 0; ++ mmc_detect_change(mmc, 0); ++ } else { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) ++ mmc_claim_host(mmc); ++ ret = mmc_hw_reset(mmc->card); ++ mmc_release_host(mmc); ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++ mmc_claim_host(mmc); ++ ret = mmc_hw_reset(mmc); ++ mmc_release_host(mmc); ++#else ++ ret = mmc_hw_reset(mmc); ++#endif ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(sdhci_nebula_sdio_rescan); +\ No newline at end of file +diff --git a/drivers/vendor/mmc/nebula_intf.h b/drivers/vendor/mmc/nebula_intf.h +new file mode 100644 +index 000000000..8662c2d8d +--- /dev/null ++++ b/drivers/vendor/mmc/nebula_intf.h +@@ -0,0 +1,17 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: nebula intf header ++ * Author: AuthorNameMagicTag ++ * Create: 2023-04-20 ++ */ ++ ++#ifndef NEBULA_INTF_H ++#define NEBULA_INTF_H ++ ++#include "nebula_intf.h" ++ ++int bsp_sdio_rescan(int slot); ++int hl_drv_sdio_rescan(int index); ++int sdhci_nebula_sdio_rescan(int index); ++ ++#endif +diff --git a/drivers/vendor/mmc/platform/platform_comm.c b/drivers/vendor/mmc/platform/platform_comm.c +new file mode 100644 +index 000000000..31ff144c0 +--- /dev/null ++++ b/drivers/vendor/mmc/platform/platform_comm.c +@@ -0,0 +1,609 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Nebula SDHCI platform comm ++ * Author: AuthorNameMagicTag ++ * Create: 2022-11-16 ++ */ ++ ++#include "sdhci_nebula.h" ++#include "platform_priv.h" ++ ++#define sdhci_nebula_dump(f, x...) \ ++ pr_err("%s: sdhci: " f, mmc_hostname(host->mmc), ## x) ++ ++static void comm_set_regmap(struct regmap *regmap, ++ u32 offset, u32 mask, u32 data) ++{ ++ u32 reg; ++ ++ regmap_read(regmap, offset, ®); ++ reg &= ~mask; ++ data &= mask; ++ reg |= data; ++ regmap_write(regmap, offset, reg); ++} ++ ++static void comm_set_regmap_byte(struct regmap *regmap, u32 offset, u32 mask, u32 data) ++{ ++ u32 bits_ofs; ++ ++ bits_ofs = (offset & 0x3) * BITS_PER_BYTE; /* 0x3: 4 bytes align */ ++ ++ /* 4 bytes align */ ++ regmap_write_bits(regmap, round_down(offset, 4), (mask << bits_ofs), (data << bits_ofs)); ++} ++ ++static void comm_get_io_data(struct sdhci_host *host, u32 offset, u32 *reg_data) ++{ ++ u32 reg, bits_ofs; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->priv_quirk & NEBULA_QUIRK_IO_CFG_WIDTH_BYTE) { ++ bits_ofs = (offset & 0x3) * BITS_PER_BYTE; /* 0x3: 4 bytes align */ ++ ++ /* 4 bytes align */ ++ regmap_read(nebula->iocfg_regmap, round_down(offset, 4), ®); ++ reg >>= bits_ofs; ++ reg &= 0xFF; ++ } else { ++ regmap_read(nebula->iocfg_regmap, offset, ®); ++ } ++ ++ *reg_data = reg; ++} ++ ++static int comm_wait_dll_timeout(struct sdhci_host *host, ++ u32 offset, u32 mask, u32 timeout) ++{ ++ u32 reg, save_timeout; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ save_timeout = timeout; ++ ++ do { ++ reg = 0; ++ regmap_read(nebula->crg_regmap, offset, ®); ++ if (reg & mask) ++ return ERET_SUCCESS; ++ ++ mdelay(1); ++ timeout--; ++ } while (timeout > 0); ++ ++ pr_err("%s: wait ofs 0x%x mask 0x%x timeout after %d ms\n", ++ mmc_hostname(host->mmc), offset, mask, save_timeout); ++ return -ETIMEDOUT; ++} ++ ++int plat_wait_sample_dll_ready(struct sdhci_host *host) ++{ ++ unsigned int offset, mask; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ offset = nebula->info->crg_ofs[CRG_DLL_STA]; ++ mask = nebula->mask->samp_ready_mask; ++ return comm_wait_dll_timeout(host, offset, mask, WAIT_MAX_TIMEOUT); ++} ++ ++int plat_wait_p4_dll_lock(struct sdhci_host *host) ++{ ++ unsigned int offset, mask; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ offset = nebula->info->crg_ofs[CRG_DLL_STA]; ++ mask = nebula->mask->p4_lock_mask; ++ return comm_wait_dll_timeout(host, offset, mask, WAIT_MAX_TIMEOUT); ++} ++ ++int plat_wait_ds_dll_ready(struct sdhci_host *host) ++{ ++ unsigned int offset, mask; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ offset = nebula->info->crg_ofs[CRG_DLL_STA]; ++ mask = nebula->mask->dll_ready_mask; ++ return comm_wait_dll_timeout(host, offset, mask, WAIT_MAX_TIMEOUT); ++} ++ ++void plat_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios) ++{ ++ u16 ctrl; ++ struct sdhci_host *host = mmc_priv(mmc); ++ ++ ctrl = sdhci_readw(host, SDHCI_EMMC_CTRL); ++ if (ios->enhanced_strobe) ++ ctrl |= SDHCI_ENH_STROBE_EN; ++ else ++ ctrl &= ~SDHCI_ENH_STROBE_EN; ++ ++ sdhci_writew(host, ctrl, SDHCI_EMMC_CTRL); ++} ++ ++void plat_get_drv_samp_phase(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ const nebula_info *info = nebula->info; ++ nebula_timing *timing_data = NULL; ++ ++ if ((info->timing_size == 0) || host->timing > info->timing_size) { ++ pr_err("%s: warning: check fixed timing %d\n", ++ mmc_hostname(host->mmc), host->timing); ++ return; ++ } ++ ++ /* choice host timing data */ ++ timing_data = info->timing + host->timing; ++ if (timing_data->data_valid == false) { ++ /* choice legacy mode timing */ ++ timing_data = info->timing; ++ } ++ ++ if (timing_data->data_valid == true) { ++ nebula->drv_phase = (timing_data->phase[DRV_PHASE] & TIMING_MASK); ++ if (is_timing_valid(timing_data->phase[SAMP_PHASE])) { ++ nebula->sample_phase = (timing_data->phase[SAMP_PHASE] & TIMING_MASK); ++ } else { ++ nebula->sample_phase = nebula->tuning_phase; ++ } ++ } else { ++ pr_err("%s: warning: check default timing valid?\n", ++ mmc_hostname(host->mmc)); ++ } ++} ++ ++void plat_set_drv_phase(struct sdhci_host *host, u32 phase) ++{ ++ unsigned int reg, offset; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ offset = nebula->info->crg_ofs[CRG_DRV_DLL]; ++ regmap_read(nebula->crg_regmap, offset, ®); ++ reg &= ~(nebula->mask->drv_phase_mask); ++ reg |= phase; ++ regmap_write(nebula->crg_regmap, offset, reg); ++} ++ ++#ifdef CONFIG_MMC_SDHCI_ANT ++u32 plat_get_sample_phase(struct sdhci_host *host) ++{ ++ unsigned int reg, offset; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ offset = nebula->info->crg_ofs[CRG_CLK_RST] + CRG_SAMP_DLL_OFFSET; ++ regmap_read(nebula->crg_regmap, offset, ®); ++ ++ return (reg >> COMM_PHASE_SEL_SHIFT) & COMM_PHASE_SEL_MASK; ++} ++ ++void plat_set_sample_phase(struct sdhci_host *host, unsigned int phase) ++{ ++ unsigned int reg, offset; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ offset = nebula->info->crg_ofs[CRG_CLK_RST] + CRG_SAMP_DLL_OFFSET; ++ regmap_read(nebula->crg_regmap, offset, ®); ++ reg &= ~(COMM_PHASE_SEL_MASK); ++ reg |= phase << COMM_PHASE_SEL_SHIFT; ++ regmap_write(nebula->crg_regmap, offset, reg); ++} ++#endif ++ ++static void priv_set_drv_cap_byte(struct sdhci_host *host, nebula_timing *timing_data) ++{ ++ int idx, bus_width; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ const nebula_info *info = nebula->info; ++ struct mmc_ios *ios = &host->mmc->ios; ++ ++ /* clk cmd rst timing setting */ ++ for (idx = 0; idx < IO_TYPE_MAX; idx++) { ++ if (is_timing_valid(timing_data->timing[idx])) { ++ comm_set_regmap_byte(nebula->iocfg_regmap, info->io_offset[idx], \ ++ info->io_drv_mask, timing_data->timing[idx] & TIMING_MASK); ++ } ++ } ++ ++ /* data line drv valid? fixed bus width */ ++ if (is_timing_valid(timing_data->timing[IO_TYPE_DATA])) { ++ /* data0 line set already */ ++ bus_width = (1 << ios->bus_width) - 1; ++ for (idx = IO_TYPE_D1; (idx < IO_TYPE_DMAX) && (bus_width != 0); idx++) { ++ bus_width--; ++ comm_set_regmap_byte(nebula->iocfg_regmap, info->io_offset[idx], \ ++ info->io_drv_mask, timing_data->timing[IO_TYPE_DATA] & TIMING_MASK); ++ } ++ } ++} ++ ++void plat_set_drv_cap(struct sdhci_host *host) ++{ ++ int idx, bus_width; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ const nebula_info *info = nebula->info; ++ nebula_timing *timing_data = NULL; ++ struct mmc_ios *ios = &host->mmc->ios; ++ ++ if (nebula->priv_quirk & NEBULA_QUIRK_FPGA) { ++ return; ++ } ++ ++ if ((info->timing_size == 0) || host->timing > info->timing_size) { ++ pr_err("%s: warning: check fixed timing %d\n", ++ mmc_hostname(host->mmc), host->timing); ++ return; ++ } ++ ++ /* choice host timing data */ ++ timing_data = info->timing + host->timing; ++ if (timing_data->data_valid == false) { ++ /* choice legacy mode timing */ ++ timing_data = info->timing; ++ } ++ ++ if (nebula->priv_quirk & NEBULA_QUIRK_IO_CFG_WIDTH_BYTE) { ++ priv_set_drv_cap_byte(host, timing_data); ++ return; ++ } ++ ++ /* clk cmd rst timing setting */ ++ for (idx = 0; idx < IO_TYPE_MAX; idx++) { ++ if (is_timing_valid(timing_data->timing[idx])) { ++ comm_set_regmap(nebula->iocfg_regmap, info->io_offset[idx], \ ++ info->io_drv_mask, timing_data->timing[idx] & TIMING_MASK); ++ } ++ } ++ ++ /* data line drv valid? fixed bus width */ ++ if (is_timing_valid(timing_data->timing[IO_TYPE_DATA])) { ++ /* data0 line set already */ ++ bus_width = (1 << ios->bus_width) - 1; ++ for (idx = IO_TYPE_D1; (idx < IO_TYPE_DMAX) && (bus_width != 0); idx++) { ++ bus_width--; ++ comm_set_regmap(nebula->iocfg_regmap, info->io_offset[idx], \ ++ info->io_drv_mask, timing_data->timing[IO_TYPE_DATA] & TIMING_MASK); ++ } ++ } ++} ++ ++void plat_dll_reset_assert(struct sdhci_host *host) ++{ ++ unsigned int reg, offset; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ offset = nebula->info->crg_ofs[CRG_DLL_RST]; ++ regmap_read(nebula->crg_regmap, offset, ®); ++ reg |= nebula->mask->dll_srst_mask; ++ regmap_write(nebula->crg_regmap, offset, reg); ++} ++ ++void plat_dll_reset_deassert(struct sdhci_host *host) ++{ ++ unsigned int reg, offset; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ offset = nebula->info->crg_ofs[CRG_DLL_RST]; ++ regmap_read(nebula->crg_regmap, offset, ®); ++ reg &= ~nebula->mask->dll_srst_mask; ++ regmap_write(nebula->crg_regmap, offset, reg); ++} ++ ++static void comm_crg_enable_clock(struct sdhci_host *host) ++{ ++ unsigned int reg, offset; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ offset = nebula->info->crg_ofs[CRG_CLK_RST]; ++ regmap_read(nebula->crg_regmap, offset, ®); ++ reg |= nebula->mask->crg_cken_mask; ++ regmap_write(nebula->crg_regmap, offset, reg); ++} ++ ++static void comm_crg_reset_assert(struct sdhci_host *host) ++{ ++ unsigned int reg, offset; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ offset = nebula->info->crg_ofs[CRG_CLK_RST]; ++ regmap_read(nebula->crg_regmap, offset, ®); ++ reg |= nebula->mask->crg_srst_mask; ++ regmap_write(nebula->crg_regmap, offset, reg); ++} ++ ++static void comm_crg_reset_deassert(struct sdhci_host *host) ++{ ++ unsigned int reg, offset; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ offset = nebula->info->crg_ofs[CRG_CLK_RST]; ++ regmap_read(nebula->crg_regmap, offset, ®); ++ reg &= ~nebula->mask->crg_srst_mask; ++ regmap_write(nebula->crg_regmap, offset, reg); ++} ++ ++int plat_crg_init(struct sdhci_host *host) ++{ ++ int ret; ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ ret = clk_prepare_enable(pltfm_host->clk); ++ if (ret) { ++ pr_err("%s: enable mmc clk failed\n", mmc_hostname(host->mmc)); ++ return ret; ++ } ++ ++ if ((nebula->priv_cap & NEBULA_CAP_QUICK_BOOT) != 0) { ++ return ERET_SUCCESS; ++ } ++ ++ if (nebula->priv_cap & NEBULA_CAP_RST_IN_DRV) { ++ comm_crg_enable_clock(host); ++ comm_crg_reset_assert(host); ++ plat_dll_reset_assert(host); ++ ++ udelay(25); /* delay 25 us */ ++ comm_crg_reset_deassert(host); ++ udelay(10); /* delay 10 us */ ++ } else { ++ reset_control_assert(nebula->crg_rst); ++ reset_control_assert(nebula->crg_tx); ++ reset_control_assert(nebula->crg_rx); ++ ++ reset_control_assert(nebula->dll_rst); ++ ++ udelay(25); /* delay 25 us */ ++ reset_control_deassert(nebula->crg_rst); ++ reset_control_deassert(nebula->crg_tx); ++ reset_control_deassert(nebula->crg_rx); ++ ++ udelay(10); /* delay 10 us */ ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++/* Do ZQ resistance calibration for eMMC PHY IO */ ++static int comm_resistance_calibration(struct sdhci_host *host) ++{ ++ int i; ++ u32 reg_val; ++ void __iomem *viraddr = NULL; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->info->zq_phy_addr == 0) { ++ pr_err("%s: zq_phy_addr is invalid.\n", mmc_hostname(host->mmc)); ++ return -EINVAL; ++ } ++ ++ viraddr = ioremap(nebula->info->zq_phy_addr, sizeof(u32)); ++ if (viraddr == NULL) { ++ pr_err("%s: io calibration ioremap error.\n", mmc_hostname(host->mmc)); ++ return -ENOMEM; ++ } ++ ++ reg_val = readl(viraddr); ++ reg_val |= EMMC_ZQ_INIT_EN | EMMC_ZQ_ZCAL_EN; ++ writel(reg_val, viraddr); ++ ++ for (i = 0; i < EMMC_ZQ_CHECK_TIMES; i++) { ++ reg_val = readl(viraddr); ++ if ((reg_val & (EMMC_ZQ_INIT_EN | EMMC_ZQ_ZCAL_EN)) == 0) { ++ iounmap(viraddr); ++ return ERET_SUCCESS; ++ } ++ udelay(10); /* delay 10 us */ ++ } ++ ++ iounmap(viraddr); ++ return -ETIMEDOUT; ++} ++ ++int plat_resistance_calibration(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if ((nebula->priv_cap & NEBULA_CAP_QUICK_BOOT) != 0) { ++ return ERET_SUCCESS; ++ } ++ ++ if (nebula->priv_cap & NEBULA_CAP_ZQ_CALB) { ++ return comm_resistance_calibration(host); ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++#ifdef CONFIG_MMC_SDHCI_ANT ++static int comm_voltage_switch(struct sdhci_host *host, const struct mmc_ios *ios) ++{ ++ u32 reg; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ regmap_read(nebula->crg_regmap, nebula->info->crg_ofs[CRG_CLK_RST], ®); ++ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) ++ reg |= BIT(9); /* bit9: voltage switch */ ++ else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) ++ reg &= ~BIT(9); /* bit9: voltage switch */ ++ regmap_write(nebula->crg_regmap, nebula->info->crg_ofs[CRG_CLK_RST], reg); ++ ++ return ERET_SUCCESS; ++} ++#else ++static int comm_voltage_switch(struct sdhci_host *host, const struct mmc_ios *ios) ++{ ++ u32 ctrl; ++ void __iomem *viraddr; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (ios == NULL || nebula->info->volt_sw_phy_addr == 0) { ++ pr_err("%s: ios or volt_sw_phy_addr is invalid.\n", ++ mmc_hostname(host->mmc)); ++ return -EINVAL; ++ } ++ ++ viraddr = ioremap(nebula->info->volt_sw_phy_addr, sizeof(u32)); ++ if (viraddr == NULL) { ++ pr_err("%s: volt switch ioremap error.\n", mmc_hostname(host->mmc)); ++ return -ENOMEM; ++ } ++ ++ ctrl = readl(viraddr); ++ ctrl |= nebula->mask->volt_sw_en_mask; ++ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) ++ ctrl |= nebula->mask->volt_sw_1v8_mask; ++ else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) ++ ctrl &= ~nebula->mask->volt_sw_1v8_mask; ++ ++ writel(ctrl, viraddr); ++ ++ usleep_range(1000, 2000); /* Sleep between 1000 and 2000us */ ++ ++ iounmap(viraddr); ++ ++ return ERET_SUCCESS; ++} ++#endif ++ ++int plat_voltage_switch(struct sdhci_host *host, struct mmc_ios *ios) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->priv_cap & NEBULA_CAP_VOLT_SW) { ++ return comm_voltage_switch(host, ios); ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++void __weak plat_caps_quirks_init(struct sdhci_host *host) ++{ ++} ++ ++void __weak plat_extra_init(struct sdhci_host *host) ++{ ++} ++ ++void __weak plat_dump_io_info(struct sdhci_host *host) ++{ ++ int idx, bus_width; ++ u32 reg0, reg1; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ const nebula_info *info = nebula->info; ++ ++ /* Cmd Clk */ ++ comm_get_io_data(host, info->io_offset[IO_TYPE_CLK], ®0); ++ comm_get_io_data(host, info->io_offset[IO_TYPE_CMD], ®1); ++ sdhci_nebula_dump("Clk io: 0x%08x | Cmd io: 0x%08x\n", reg0, reg1); ++ ++ if (info->io_offset[IO_TYPE_RST] != INVALID_DATA && \ ++ info->io_offset[IO_TYPE_DQS] != INVALID_DATA) { ++ /* Rst/Detect Dqs/Power_en */ ++ comm_get_io_data(host, info->io_offset[IO_TYPE_RST], ®0); ++ comm_get_io_data(host, info->io_offset[IO_TYPE_DQS], ®1); ++ sdhci_nebula_dump("Rst/Det: 0x%08x | Dqs/Pwen: 0x%08x\n", reg0, reg1); ++ } ++ ++ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_1) { ++ comm_get_io_data(host, info->io_offset[IO_TYPE_D0], ®0); ++ sdhci_nebula_dump("Data0 io: 0x%08x\n", reg0); ++ return; ++ } ++ ++ bus_width = (1 << host->mmc->ios.bus_width); ++ for (idx = 0; idx < bus_width; idx += DUMP_DATA_IO_STEP) { ++ comm_get_io_data(host, info->io_offset[idx + IO_TYPE_D0], ®0); ++ comm_get_io_data(host, info->io_offset[idx + IO_TYPE_D0 + 1], ®1); ++ sdhci_nebula_dump("Data%d io: 0x%08x | Data%d io: 0x%08x\n", ++ idx, reg0, idx + 1, reg1); ++ } ++} ++ ++static u32 comm_get_mmc_bus_width(struct sdhci_host *host) ++{ ++ void __iomem *sys_stat_reg; ++ unsigned int sys_stat; ++ unsigned int bus_width; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->info->bus_width_phy_addr == 0) { ++ pr_err("%s: bus_width_phy_addr is invalid.\n", mmc_hostname(host->mmc)); ++ return -EINVAL; ++ } ++ ++ sys_stat_reg = ioremap(nebula->info->bus_width_phy_addr, sizeof(u32)); ++ if (sys_stat_reg == NULL) { ++ pr_err("%s: bus width ioremap error.\n", mmc_hostname(host->mmc)); ++ return -ENOMEM; ++ } ++ ++ sys_stat = readl(sys_stat_reg); ++ iounmap(sys_stat_reg); ++ ++ if ((sys_stat & BOOT_FLAG_MASK) == BOOT_MEDIA_EMMC) { ++ bus_width = ((sys_stat & EMMC_BOOT_8BIT) != 0) ? ++ MMC_BUS_WIDTH_8 : MMC_BUS_WIDTH_4; ++ } else { ++ /* up to 4 bit mode support when spi nand start up */ ++ bus_width = MMC_BUS_WIDTH_4; ++ } ++ ++ return bus_width; ++} ++ ++void __maybe_unused plat_set_mmc_bus_width(struct sdhci_host *host) ++{ ++ u32 bus_width; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ /* for eMMC devices only */ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ bus_width = comm_get_mmc_bus_width(host); ++ if (bus_width == MMC_BUS_WIDTH_8) { ++ host->mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; ++ } else { ++ host->mmc->caps |= MMC_CAP_4_BIT_DATA; ++ host->mmc->caps &= ~MMC_CAP_8_BIT_DATA; ++ } ++ } ++} ++ ++void __maybe_unused plat_comm_caps_quirks_init(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ /* ++ * only eMMC has a hw reset, eMMC and NM card is fixed 1.8V voltage ++ */ ++ if ((host->mmc->caps & MMC_CAP_HW_RESET) ++ || (nebula->priv_cap & NEBULA_CAP_NM_CARD)) { ++ host->flags &= ~SDHCI_SIGNALING_330; ++ host->flags |= SDHCI_SIGNALING_180; ++ } ++ ++ /* ++ * we parse the support timings from dts, so we read the ++ * host capabilities early and clear the timing capabilities, ++ * SDHCI_QUIRK_MISSING_CAPS is set so that sdhci driver would ++ * not read it again ++ */ ++ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); ++ host->caps &= ~(SDHCI_CAN_DO_HISPD | SDHCI_CAN_VDD_300); ++ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); ++ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | ++ SDHCI_SUPPORT_DDR50 | SDHCI_CAN_DO_ADMA3); ++ host->quirks |= SDHCI_QUIRK_MISSING_CAPS | ++ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | ++ SDHCI_QUIRK_SINGLE_POWER_WRITE; ++ host->quirks2 &= ~SDHCI_QUIRK2_ACMD23_BROKEN; ++} ++ ++void plat_set_emmc_type(struct sdhci_host *host) ++{ ++#ifndef CONFIG_MMC_SDHCI_ANT ++ u32 ctrl; ++ ++ ctrl = sdhci_readl(host, SDHCI_EMMC_CTRL); ++ ctrl |= SDHCI_CARD_IS_EMMC; ++ sdhci_writel(host, ctrl, SDHCI_EMMC_CTRL); ++#endif ++} +diff --git a/drivers/vendor/mmc/platform/platform_priv.h b/drivers/vendor/mmc/platform/platform_priv.h +new file mode 100644 +index 000000000..97ff3c3bc +--- /dev/null ++++ b/drivers/vendor/mmc/platform/platform_priv.h +@@ -0,0 +1,224 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Nebula SDHCI platform priv header ++ * Author: AuthorNameMagicTag ++ * Create: 2022-11-16 ++ */ ++ ++#ifndef _DRIVERS_MMC_NEBULA_PLATFOMR_H ++#define _DRIVERS_MMC_NEBULA_PLATFOMR_H ++ ++#include "platform_priv.h" ++ ++#define DUMP_DATA_IO_STEP 2 ++ ++#define WAIT_MAX_TIMEOUT 20 ++ ++/* ZQ resistance calibration confiuration */ ++#define EMMC_ZQ_INIT_EN 0x1 ++#define EMMC_ZQ_ZCAL_EN (0x1 << 3) ++#define EMMC_ZQ_CHECK_TIMES 100 ++ ++/* Voltage switch configure */ ++#ifdef NEBULA_VOLT_SW_BVT ++#define PWR_CTRL_BY_MISC BIT(0) ++#define PWR_CTRL_BY_MISC_EN BIT(4) ++#define PWR_CRTL_EN (PWR_CTRL_BY_MISC | PWR_CTRL_BY_MISC_EN) ++#define IO_MODE_SEL_1V8 BIT(1) ++#define PWRSW_SEL_1V8 BIT(5) ++#define PWR_CRTL_1V8 (IO_MODE_SEL_1V8 | PWRSW_SEL_1V8) ++#else ++#define PWR_CRTL_EN BIT(0) ++#define PWR_CRTL_1V8 BIT(2) ++#endif ++ ++#define BOOT_MEDIA_EMMC 0xC ++#define EMMC_BOOT_8BIT BIT(11) ++#define BOOT_FLAG_MASK (0x3 << 2) ++ ++#ifndef CRG_CLK_BIT_OFS ++#define CRG_CLK_BIT_OFS INVALID_DATA ++#define CRG_CLK_SEL_MASK INVALID_DATA ++#endif ++ ++/* Help macro for create nebula crg mask info struct */ ++#if defined(CRG_SRST_REQ) && defined(CRG_DLL_SRST_REQ) ++#define NEBULA_CRG_MASK_DESC \ ++{ \ ++ .crg_srst_mask = CRG_SRST_REQ, \ ++ .crg_clk_sel_ofs = CRG_CLK_BIT_OFS, \ ++ .crg_clk_sel_mask = CRG_CLK_SEL_MASK, \ ++ .crg_cken_mask = CRG_CLK_EN_MASK, \ ++ .dll_srst_mask = CRG_DLL_SRST_REQ, \ ++ .p4_lock_mask = CRG_P4_DLL_LOCKED, \ ++ .dll_ready_mask = CRG_DS_DLL_READY, \ ++ .samp_ready_mask = CRG_SAM_DLL_READY, \ ++ .drv_phase_mask = CRG_DRV_PHASE_MASK, \ ++ .volt_sw_en_mask = PWR_CRTL_EN, \ ++ .volt_sw_1v8_mask = PWR_CRTL_1V8, \ ++} ++#endif ++ ++#ifndef EMMC_ZQ_CTRL_PHY_ADDR ++#define EMMC_ZQ_CTRL_PHY_ADDR 0x0 ++#endif ++#ifndef EMMC_VOLT_SW_PHY_ADDR ++#define EMMC_VOLT_SW_PHY_ADDR 0x0 ++#endif ++#ifndef EMMC_BUS_WIDTH_PHY_ADDR ++#define EMMC_BUS_WIDTH_PHY_ADDR 0x0 ++#endif ++ ++#ifndef EMMC_QUICK_BOOT_PHY_ADDR ++#define EMMC_QUICK_BOOT_PHY_ADDR INVALID_DATA ++#define EMMC_QUICK_BOOT_PARAM1_OFS INVALID_DATA ++#endif ++ ++/* Help macro for create nebula emmc info description struct */ ++#if defined(EMMC_CRG_CLK_OFS) && defined(EMMC_D7_GPIO_OFS) ++#define nebula_emmc_info_desc(_timing) \ ++{ \ ++ .io_offset = {EMMC_CLK_GPIO_OFS, EMMC_CMD_GPIO_OFS, \ ++ EMMC_RSTN_GPIO_OFS, EMMC_DQS_GPIO_OFS, \ ++ EMMC_D0_GPIO_OFS, EMMC_D1_GPIO_OFS, \ ++ EMMC_D2_GPIO_OFS, EMMC_D3_GPIO_OFS, \ ++ EMMC_D4_GPIO_OFS, EMMC_D5_GPIO_OFS, \ ++ EMMC_D6_GPIO_OFS, EMMC_D7_GPIO_OFS}, \ ++ .io_drv_mask = DRV_STR_MASK_GPIO | SR_STR_MASK_GPIO, \ ++ .io_drv_str_bit_ofs = DRV_STR_SHIFT, \ ++ .io_drv_str_mask = DRV_STR_MASK_GPIO, \ ++ .io_drv_sr_bit_ofs = SR_STR_SHIFT, \ ++ .io_drv_sr_mask = SR_STR_MASK_GPIO, \ ++ .crg_ofs = {EMMC_CRG_CLK_OFS, EMMC_DLL_RST_OFS, EMMC_DRV_DLL_OFS, EMMC_DLL_STA_OFS}, \ ++ .zq_phy_addr = EMMC_ZQ_CTRL_PHY_ADDR, \ ++ .volt_sw_phy_addr = EMMC_VOLT_SW_PHY_ADDR, \ ++ .bus_width_phy_addr = EMMC_BUS_WIDTH_PHY_ADDR, \ ++ .qboot_phy_addr = EMMC_QUICK_BOOT_PHY_ADDR, \ ++ .qboot_param1_ofs = EMMC_QUICK_BOOT_PARAM1_OFS, \ ++ .timing_size = ARRAY_SIZE(_timing), \ ++ .timing = (_timing), \ ++} ++#endif ++ ++#ifndef SDIO0_ZQ_CTRL_PHY_ADDR ++#define SDIO0_ZQ_CTRL_PHY_ADDR 0x0 ++#endif ++ ++#ifndef SDIO0_VOLT_SW_PHY_ADDR ++#define SDIO0_VOLT_SW_PHY_ADDR 0x0 ++#endif ++ ++#ifndef SDIO0_DETECT_OFS ++#define SDIO0_DETECT_OFS INVALID_DATA ++#endif ++ ++#ifndef SDIO0_PWEN_OFS ++#define SDIO0_PWEN_OFS INVALID_DATA ++#endif ++ ++/* Help macro for create nebula sdio0 info description struct */ ++#if defined(SDIO0_CLK_OFS) && defined(SDIO0_CRG_CLK_OFS) ++#define nebula_sdio0_info_desc(_timing) \ ++{ \ ++ .io_offset = {SDIO0_CLK_OFS, SDIO0_CMD_OFS, SDIO0_DETECT_OFS, \ ++ SDIO0_PWEN_OFS, SDIO0_D0_OFS, \ ++ SDIO0_D1_OFS, SDIO0_D2_OFS, SDIO0_D3_OFS}, \ ++ .io_drv_mask = DRV_STR_MASK_GPIO | SR_STR_MASK_GPIO, \ ++ .crg_ofs = {SDIO0_CRG_CLK_OFS, SDIO0_DLL_RST_OFS, SDIO0_DRV_DLL_OFS, SDIO0_DLL_STA_OFS}, \ ++ .zq_phy_addr = SDIO0_ZQ_CTRL_PHY_ADDR, \ ++ .volt_sw_phy_addr = SDIO0_VOLT_SW_PHY_ADDR, \ ++ .timing_size = ARRAY_SIZE(_timing), \ ++ .timing = (_timing), \ ++} ++#endif ++ ++#ifndef SDIO1_ZQ_CTRL_PHY_ADDR ++#define SDIO1_ZQ_CTRL_PHY_ADDR 0x0 ++#endif ++ ++#ifndef SDIO1_VOLT_SW_PHY_ADDR ++#define SDIO1_VOLT_SW_PHY_ADDR 0x0 ++#endif ++ ++#ifndef SDIO1_DETECT_OFS ++#define SDIO1_DETECT_OFS INVALID_DATA ++#endif ++ ++#ifndef SDIO1_PWEN_OFS ++#define SDIO1_PWEN_OFS INVALID_DATA ++#endif ++ ++/* Help macro for create nebula sdio1 info description struct */ ++#if defined(SDIO0_CLK_OFS) && defined(SDIO0_CRG_CLK_OFS) ++#define nebula_sdio1_info_desc(_timing) \ ++{ \ ++ .io_offset = {SDIO1_CLK_OFS, SDIO1_CMD_OFS, SDIO1_DETECT_OFS, \ ++ SDIO1_PWEN_OFS, SDIO1_D0_OFS, \ ++ SDIO1_D1_OFS, SDIO1_D2_OFS, SDIO1_D3_OFS}, \ ++ .io_drv_mask = DRV_STR_MASK_GPIO | SR_STR_MASK_GPIO, \ ++ .crg_ofs = {SDIO1_CRG_CLK_OFS, SDIO1_DLL_RST_OFS, SDIO1_DRV_DLL_OFS, SDIO1_DLL_STA_OFS}, \ ++ .zq_phy_addr = SDIO1_ZQ_CTRL_PHY_ADDR, \ ++ .volt_sw_phy_addr = SDIO1_VOLT_SW_PHY_ADDR, \ ++ .timing_size = ARRAY_SIZE(_timing), \ ++ .timing = (_timing), \ ++} ++#endif ++ ++/* Help macro for create nebula emmc high speed info description struct */ ++#if defined(EMMC_D0_IO_OFS) && defined(EMMC_CRG_CLK_OFS) ++#define nebula_emmc_hsio_info_desc(_timing) \ ++{ \ ++ .io_offset = {EMMC_CLK_IO_OFS, EMMC_CMD_IO_OFS, \ ++ EMMC_RSTN_IO_OFS, EMMC_DQS_IO_OFS, \ ++ EMMC_D0_IO_OFS, EMMC_D1_IO_OFS, \ ++ EMMC_D2_IO_OFS, EMMC_D3_IO_OFS, \ ++ EMMC_D4_IO_OFS, EMMC_D5_IO_OFS, \ ++ EMMC_D6_IO_OFS, EMMC_D7_IO_OFS}, \ ++ .io_drv_mask = DRV_STR_MASK_IO, \ ++ .io_drv_str_bit_ofs = DRV_STR_SHIFT, \ ++ .io_drv_str_mask = DRV_STR_MASK_IO, \ ++ .io_drv_sr_bit_ofs = SR_STR_SHIFT, \ ++ .io_drv_sr_mask = SR_STR_MASK_GPIO, \ ++ .crg_ofs = {EMMC_CRG_CLK_OFS, EMMC_DLL_RST_OFS, EMMC_DRV_DLL_OFS, EMMC_DLL_STA_OFS}, \ ++ .zq_phy_addr = EMMC_ZQ_CTRL_PHY_ADDR, \ ++ .volt_sw_phy_addr = EMMC_VOLT_SW_PHY_ADDR, \ ++ .qboot_phy_addr = EMMC_QUICK_BOOT_PHY_ADDR, \ ++ .qboot_param1_ofs = EMMC_QUICK_BOOT_PARAM1_OFS, \ ++ .timing_size = ARRAY_SIZE(_timing), \ ++ .timing = (_timing), \ ++} ++#endif ++ ++#define TIMING_MASK 0x7FFFFFFF ++#define TIMING_VALID 0x80000000 ++#define is_timing_valid(x) (((x) & TIMING_VALID) == TIMING_VALID) ++ ++/* Generial GPIO CFG */ ++#if defined(DRV_STR_SHIFT) && defined(SR_STR_SHIFT) ++#define gpio_drv_sel(str) ((str) << DRV_STR_SHIFT) ++#define gpio_sr_sel(sr) ((sr) << SR_STR_SHIFT) ++#define fixed_gpio_drv(drv_sel, sr_sel) \ ++ (TIMING_VALID | gpio_drv_sel(drv_sel) | gpio_sr_sel(sr_sel)) ++#endif ++ ++/* High speed IO CFG */ ++#if defined(DRV_STR_SHIFT) ++#define hsio_drv_sel(str) ((str) << DRV_STR_SHIFT) ++#define fixed_hsio_drv(drv_sel) (TIMING_VALID | hsio_drv_sel(drv_sel)) ++#endif ++ ++#ifdef CRG_DRV_PHASE_SHIFT ++/* Set drv phase only */ ++#define fixed_drv_phase_only(drv_phase) \ ++ .phase[DRV_PHASE] = (TIMING_VALID | ((drv_phase) << CRG_DRV_PHASE_SHIFT)) ++ ++/* Set drv and sample phase */ ++#define fixed_drv_samp_phase(drv_phase, samp_phase) \ ++ .phase[DRV_PHASE] = (TIMING_VALID | ((drv_phase) << CRG_DRV_PHASE_SHIFT)), \ ++ .phase[SAMP_PHASE] = (TIMING_VALID | (samp_phase)) ++#endif ++ ++struct sdhci_host; ++void plat_comm_caps_quirks_init(struct sdhci_host *host); ++ ++#endif /* _DRIVERS_MMC_NEBULA_ADAPTER_H */ +diff --git a/drivers/vendor/mmc/platform/platform_timing.h b/drivers/vendor/mmc/platform/platform_timing.h +new file mode 100644 +index 000000000..b72fe5b41 +--- /dev/null ++++ b/drivers/vendor/mmc/platform/platform_timing.h +@@ -0,0 +1,74 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Nebula SDHCI platform priv timing header ++ * Author: AuthorNameMagicTag ++ * Create: 2022-12-28 ++ */ ++ ++#ifndef _DRIVERS_MMC_NEBULA_PLATFOMR_TIMING_H ++#define _DRIVERS_MMC_NEBULA_PLATFOMR_TIMING_H ++ ++enum drv_timing_lvl { ++ TM_LVL_0 = 0, ++ TM_LVL_1, ++ TM_LVL_2, ++ TM_LVL_3, ++ TM_LVL_4, ++ TM_LVL_5, ++ TM_LVL_6, ++ TM_LVL_7, ++ TM_LVL_8, ++ TM_LVL_9, ++ TM_LVL_10, ++ TM_LVL_11, ++ TM_LVL_12, ++ TM_LVL_13, ++ TM_LVL_14, ++ TM_LVL_15, ++ TM_LVL_MAX ++}; ++ ++enum sr_lvl { ++ SR_LVL_0 = 0, ++ SR_LVL_1, ++ SR_LVL_2, ++ SR_LVL_3, ++ SR_LVL_MAX ++}; ++ ++enum phase_lvl { ++ PHASE_LVL_0, /* 0 degree */ ++ PHASE_LVL_1, /* 11.25 degree */ ++ PHASE_LVL_2, /* 22.5 degree */ ++ PHASE_LVL_3, /* 33.75 degree */ ++ PHASE_LVL_4, /* 45 degree */ ++ PHASE_LVL_5, /* 56.25 degree */ ++ PHASE_LVL_6, /* 67.5 degree */ ++ PHASE_LVL_7, /* 78.75 degree */ ++ PHASE_LVL_8, /* 90 degree */ ++ PHASE_LVL_9, /* 101.25 degree */ ++ PHASE_LVL_10, /* 112.5 degree */ ++ PHASE_LVL_11, /* 123.75 degree */ ++ PHASE_LVL_12, /* 135 degree */ ++ PHASE_LVL_13, /* 146.25 degree */ ++ PHASE_LVL_14, /* 157.5 degree */ ++ PHASE_LVL_15, /* 168.75 degree */ ++ PHASE_LVL_16, /* 180 degree */ ++ PHASE_LVL_17, /* 191.25 degree */ ++ PHASE_LVL_18, /* 202.5 degree */ ++ PHASE_LVL_19, /* 213.75 degree */ ++ PHASE_LVL_20, /* 225 degree */ ++ PHASE_LVL_21, /* 236.25 degree */ ++ PHASE_LVL_22, /* 247.5 degree */ ++ PHASE_LVL_23, /* 258.75 degree */ ++ PHASE_LVL_24, /* 270 degree */ ++ PHASE_LVL_25, /* 281.25 degree */ ++ PHASE_LVL_26, /* 292.5 degree */ ++ PHASE_LVL_27, /* 303.75 degree */ ++ PHASE_LVL_28, /* 315 degree */ ++ PHASE_LVL_29, /* 326.25 degree */ ++ PHASE_LVL_30, /* 337.5 degree */ ++ PHASE_LVL_31, /* 348.75 degree */ ++}; ++ ++#endif +diff --git a/drivers/vendor/mmc/platform/sdhci_hi3519dv500.c b/drivers/vendor/mmc/platform/sdhci_hi3519dv500.c +new file mode 100644 +index 000000000..72d24736b +--- /dev/null ++++ b/drivers/vendor/mmc/platform/sdhci_hi3519dv500.c +@@ -0,0 +1,354 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: Nebula SDHCI for hi3519dv500 ++ * Author: AuthorNameMagicTag ++ * Create: 2023-2-16 ++ */ ++#include ++ ++#include "sdhci.h" ++ ++/* Host controller CRG */ ++#define EMMC_CRG_CLK_OFS 0x34c0 ++#define SDIO0_CRG_CLK_OFS 0x35c0 ++#define SDIO1_CRG_CLK_OFS 0x36c0 ++#define CRG_SRST_REQ (BIT(16) | BIT(17) | BIT(18)) ++#define CRG_CLK_EN_MASK (BIT(0) | BIT(1)) ++#define CRG_CLK_BIT_OFS 24 ++#define CRG_CLK_SEL_MASK (0x7 << CRG_CLK_BIT_OFS) ++ ++/* Host dll reset register */ ++#define EMMC_DLL_RST_OFS 0x34c4 ++#define SDIO0_DLL_RST_OFS 0x35c4 ++#define SDIO1_DLL_RST_OFS 0x36c4 ++#define CRG_DLL_SRST_REQ BIT(1) ++ ++/* Host dll phase register */ ++#define EMMC_DRV_DLL_OFS 0x34c8 ++#define SDIO0_DRV_DLL_OFS 0x35c8 ++#define SDIO1_DRV_DLL_OFS 0x36c8 ++#define CRG_DRV_PHASE_SHIFT 15 ++#define CRG_DRV_PHASE_MASK (0x1F << CRG_DRV_PHASE_SHIFT) ++ ++/* Host dll state register */ ++#define EMMC_DLL_STA_OFS 0x34d8 ++#define SDIO0_DLL_STA_OFS 0x35d8 ++#define SDIO1_DLL_STA_OFS 0x36d8 ++#define CRG_P4_DLL_LOCKED BIT(9) ++#define CRG_DS_DLL_READY BIT(10) ++#define CRG_SAM_DLL_READY BIT(12) ++ ++/* Host drv cap config */ ++#define DRV_STR_SHIFT 4 ++#define DRV_STR_MASK_GPIO (0xF << DRV_STR_SHIFT) ++#define SR_STR_SHIFT 10 ++#define SR_STR_MASK_GPIO (0x1 << SR_STR_SHIFT) ++#define NEBULA_VOLT_SW_BVT 1 ++ ++/* EMMC IO register offset */ ++#define EMMC_CLK_GPIO_OFS 0x34 ++#define EMMC_CMD_GPIO_OFS 0x38 ++#define EMMC_RSTN_GPIO_OFS 0x4c ++#define EMMC_DQS_GPIO_OFS 0x50 ++#define EMMC_D0_GPIO_OFS 0x3c ++#define EMMC_D1_GPIO_OFS 0x40 ++#define EMMC_D2_GPIO_OFS 0x44 ++#define EMMC_D3_GPIO_OFS 0x48 ++#define EMMC_D4_GPIO_OFS 0x24 ++#define EMMC_D5_GPIO_OFS 0x28 ++#define EMMC_D6_GPIO_OFS 0x2c ++#define EMMC_D7_GPIO_OFS 0x30 ++ ++#define EMMC_BUS_WIDTH_PHY_ADDR 0x11020018 ++#define EMMC_QUICK_BOOT_PHY_ADDR 0x11120020 ++#define EMMC_QUICK_BOOT_PARAM1_OFS 0x4 ++ ++/* SDIO0 IO register offset */ ++#define SDIO0_DETECT_OFS 0xd0 ++#define SDIO0_PWEN_OFS 0xd4 ++#define SDIO0_CLK_OFS 0xcc ++#define SDIO0_CMD_OFS 0xb8 ++#define SDIO0_D0_OFS 0xbc ++#define SDIO0_D1_OFS 0xc0 ++#define SDIO0_D2_OFS 0xc4 ++#define SDIO0_D3_OFS 0xc8 ++ ++/* Voltage switch physical address */ ++#define SDIO0_VOLT_SW_PHY_ADDR 0x11024700 ++ ++/* SDIO1 IO register offset */ ++#define SDIO1_CLK_OFS 0xb0 ++#define SDIO1_CMD_OFS 0xb4 ++#define SDIO1_D0_OFS 0xa0 ++#define SDIO1_D1_OFS 0xa4 ++#define SDIO1_D2_OFS 0xa8 ++#define SDIO1_D3_OFS 0xac ++ ++#include "sdhci_nebula.h" ++#include "platform_priv.h" ++#include "platform_timing.h" ++ ++static const nebula_crg_mask g_crg_mask = NEBULA_CRG_MASK_DESC; ++ ++/* EMMC fixed timing parameter */ ++static nebula_timing g_timing_gpio_emmc[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ .timing[IO_TYPE_RST] = fixed_gpio_drv(TM_LVL_0, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_RST] = fixed_gpio_drv(TM_LVL_0, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_12, SR_LVL_0), ++ .timing[IO_TYPE_RST] = fixed_gpio_drv(TM_LVL_0, SR_LVL_0), ++ .timing[IO_TYPE_DQS] = fixed_gpio_drv(TM_LVL_0, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_12, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_18), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ .timing[IO_TYPE_RST] = fixed_gpio_drv(TM_LVL_0, SR_LVL_0), ++ .timing[IO_TYPE_DQS] = fixed_gpio_drv(TM_LVL_0, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_9), ++ }, ++}; ++ ++/* EMMC info struct */ ++static const nebula_info g_gpio_emmc_info = \ ++ nebula_emmc_info_desc(g_timing_gpio_emmc); ++ ++/* SDIO0 fixed timing parameter */ ++static nebula_timing g_timing_gpio_sdio0[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_19, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_8, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_6, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_6, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++ [MMC_TIMING_SD_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_UHS_SDR12] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_9, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_7, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_7, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_UHS_SDR25] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_9, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_7, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_7, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_UHS_SDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_12, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_10, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_10, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_8, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_6, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_6, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++}; ++ ++/* SDIO0 info struct */ ++static const nebula_info g_gpio_sdio0_info = \ ++ nebula_sdio0_info_desc(g_timing_gpio_sdio0); ++ ++/* SDIO1 fixed timing parameter */ ++static nebula_timing g_timing_gpio_sdio1[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_11, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_11, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++ [MMC_TIMING_SD_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_UHS_SDR12] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_9, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_7, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_7, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_UHS_SDR25] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_9, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_7, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_7, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_UHS_SDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_12, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_10, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_10, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_19), ++ }, ++}; ++ ++/* SDIO1 info struct */ ++static const nebula_info g_gpio_sdio1_info = \ ++ nebula_sdio1_info_desc(g_timing_gpio_sdio1); ++ ++int plat_host_pre_init(struct platform_device *pdev, struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ nebula->mask = &g_crg_mask; ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ nebula->info = &g_gpio_emmc_info; ++ } else if (nebula->devid == MMC_DEV_TYPE_SDIO_0) { ++ nebula->info = &g_gpio_sdio0_info; ++ nebula->priv_cap |= NEBULA_CAP_VOLT_SW; ++ } else if (nebula->devid == MMC_DEV_TYPE_SDIO_1) { ++ nebula->info = &g_gpio_sdio1_info; ++ } else { ++ pr_err("error: invalid device\n"); ++ return -EINVAL; ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++void plat_extra_init(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ plat_set_emmc_type(host); ++ } ++} ++ ++static void priv_plat_mux_init(struct sdhci_host *host) ++{ ++ u32 i; ++ u32 bus_width = 1; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ const nebula_info *info = nebula->info; ++ ++ if (host->mmc->caps & MMC_CAP_8_BIT_DATA) { ++ bus_width = (1 << MMC_BUS_WIDTH_8); ++ } else if (host->mmc->caps & MMC_CAP_4_BIT_DATA) { ++ bus_width = (1 << MMC_BUS_WIDTH_4); ++ } ++ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ regmap_write(nebula->iocfg_regmap, info->io_offset[IO_TYPE_CLK], \ ++ 0x11e1); /* 0x11e1: pinmux value */ ++ ++ regmap_write(nebula->iocfg_regmap, info->io_offset[IO_TYPE_CMD], \ ++ 0x1391); /* 0x1391: pinmux value */ ++ ++ for (i = IO_TYPE_D0; i < (IO_TYPE_D0 + bus_width); i++) ++ regmap_write(nebula->iocfg_regmap, info->io_offset[i], \ ++ (i < IO_TYPE_D4) ? 0x1391 : 0x1393); /* 0x1391, 0x1393: pinmux value */ ++ ++ regmap_write(nebula->iocfg_regmap, info->io_offset[IO_TYPE_RST], \ ++ 0x1301); /* 0x1301: pinmux value */ ++ ++ if (host->mmc->caps & MMC_CAP_8_BIT_DATA) ++ regmap_write(nebula->iocfg_regmap, info->io_offset[IO_TYPE_DQS], \ ++ 0x1101); /* 0x1101: pinmux value */ ++ } else if (nebula->devid == MMC_DEV_TYPE_SDIO_0 || \ ++ nebula->devid == MMC_DEV_TYPE_SDIO_1) { ++ if (nebula->devid == MMC_DEV_TYPE_SDIO_0) { ++ regmap_write(nebula->iocfg_regmap, info->io_offset[IO_TYPE_DET], \ ++ 0x1901); /* 0x1901: pinmux value */ ++ regmap_write(nebula->iocfg_regmap, info->io_offset[IO_TYPE_PWE], \ ++ 0x1501); /* 0x1501: pinmux value */ ++ } ++ ++ regmap_write(nebula->iocfg_regmap, info->io_offset[IO_TYPE_CLK], \ ++ 0x1101); /* 0x1101: pinmux value */ ++ regmap_write(nebula->iocfg_regmap, info->io_offset[IO_TYPE_CMD], \ ++ 0x1301); /* 0x1301: pinmux value */ ++ ++ for (i = IO_TYPE_D0; i < (IO_TYPE_D0 + bus_width); i++) ++ regmap_write(nebula->iocfg_regmap, info->io_offset[i], \ ++ 0x1301); /* 0x1301: pinmux value */ ++ } ++} ++ ++void plat_caps_quirks_init(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ plat_comm_caps_quirks_init(host); ++ ++ plat_set_mmc_bus_width(host); ++ if ((nebula->priv_cap & NEBULA_CAP_QUICK_BOOT) == 0) ++ priv_plat_mux_init(host); ++ ++ if ((host->mmc->caps2 & MMC_CAP2_NO_SDIO) == 0) ++ nebula->priv_cap |= NEBULA_CAP_PM_RUNTIME; ++} +diff --git a/drivers/vendor/mmc/platform/sdhci_hi3751v811_c.c b/drivers/vendor/mmc/platform/sdhci_hi3751v811_c.c +new file mode 100644 +index 000000000..bb87d4021 +--- /dev/null ++++ b/drivers/vendor/mmc/platform/sdhci_hi3751v811_c.c +@@ -0,0 +1,117 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2024. All rights reserved. ++ * Description: Nebula SDHCI for v811 ++ * Author: AuthorNameMagicTag ++ * Create: 2024-3-16 ++ */ ++#include ++ ++#include "sdhci.h" ++ ++/* Host controller CRG */ ++#define EMMC_CRG_CLK_OFS 0x5e0 ++#define CRG_SRST_REQ (BIT(16) | BIT(17) | BIT(18)) ++#define CRG_CLK_EN_MASK (BIT(0) | BIT(1)) ++#define CRG_CLK_BIT_OFS 24 ++#define CRG_CLK_SEL_MASK (0x7 << CRG_CLK_BIT_OFS) ++ ++/* Host dll reset register */ ++#define EMMC_DLL_RST_OFS 0x5e4 ++#define CRG_DLL_SRST_REQ BIT(1) ++ ++/* Host dll phase register */ ++#define EMMC_DRV_DLL_OFS 0x5e8 ++#define CRG_DRV_PHASE_SHIFT 15 ++#define CRG_DRV_PHASE_MASK (0x1F << CRG_DRV_PHASE_SHIFT) ++ ++/* Host dll state register */ ++#define EMMC_DLL_STA_OFS 0x5f8 ++#define CRG_P4_DLL_LOCKED BIT(9) ++#define CRG_DS_DLL_READY BIT(10) ++#define CRG_SAM_DLL_READY BIT(12) ++ ++/* Host drv cap config */ ++#define DRV_STR_SHIFT 0 ++#define DRV_STR_MASK_GPIO (0xF << DRV_STR_SHIFT) ++#define SR_STR_SHIFT 4 ++#define SR_STR_MASK_GPIO (0x1 << SR_STR_SHIFT) ++ ++/* EMMC IO register offset */ ++#define EMMC_DQS_GPIO_OFS 0x1C ++#define EMMC_CLK_GPIO_OFS 0x1D ++#define EMMC_CMD_GPIO_OFS 0x1E ++#define EMMC_D0_GPIO_OFS 0x1F ++#define EMMC_D1_GPIO_OFS 0x20 ++#define EMMC_D2_GPIO_OFS 0x21 ++#define EMMC_D3_GPIO_OFS 0x22 ++#define EMMC_D4_GPIO_OFS 0x23 ++#define EMMC_D5_GPIO_OFS 0x24 ++#define EMMC_D6_GPIO_OFS 0x25 ++#define EMMC_D7_GPIO_OFS 0x26 ++#define EMMC_RSTN_GPIO_OFS 0x27 ++ ++#include "sdhci_nebula.h" ++#include "platform_priv.h" ++#include "platform_timing.h" ++ ++static const nebula_crg_mask g_crg_mask = NEBULA_CRG_MASK_DESC; ++ ++/* EMMC fixed timing parameter */ ++static nebula_timing g_timing_gpio_emmc[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_8, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_10, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_10, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_8, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_9, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_9, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_0, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_8, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_8, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_19), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_1, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_DQS] = fixed_gpio_drv(TM_LVL_1, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_8), ++ }, ++}; ++ ++/* EMMC info struct */ ++static const nebula_info g_gpio_emmc_info = \ ++ nebula_emmc_info_desc(g_timing_gpio_emmc); ++ ++int plat_host_pre_init(struct platform_device *pdev, struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ nebula->mask = &g_crg_mask; ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ nebula->info = &g_gpio_emmc_info; ++ nebula->priv_cap |= NEBULA_CAP_PM_RUNTIME; ++ } else { ++ pr_err("error: invalid device\n"); ++ return -EINVAL; ++ } ++ ++ nebula->priv_quirk |= NEBULA_QUIRK_IO_CFG_WIDTH_BYTE; ++ ++ return ERET_SUCCESS; ++} ++ ++void plat_caps_quirks_init(struct sdhci_host *host) ++{ ++ plat_comm_caps_quirks_init(host); ++} +diff --git a/drivers/vendor/mmc/platform/sdhci_hiwing.c b/drivers/vendor/mmc/platform/sdhci_hiwing.c +new file mode 100644 +index 000000000..444c61548 +--- /dev/null ++++ b/drivers/vendor/mmc/platform/sdhci_hiwing.c +@@ -0,0 +1,188 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: Nebula SDHCI for hi3519dv500 ++ * Author: AuthorNameMagicTag ++ * Create: 2023-2-16 ++ */ ++#include ++ ++#include "sdhci.h" ++ ++/* Host controller CRG */ ++#define EMMC_CRG_CLK_OFS 0x80c ++#define SDIO0_CRG_CLK_OFS 0x7ec ++#define CRG_SRST_REQ (BIT(1) | BIT(3) | BIT(5)) ++#define CRG_CLK_EN_MASK (BIT(0) | BIT(2) | BIT(4)) ++#define CRG_CLK_BIT_OFS 11 ++#define CRG_CLK_SEL_MASK (0x7 << CRG_CLK_BIT_OFS) ++ ++/* Host dll reset register */ ++#define EMMC_DLL_RST_OFS 0x7f4 ++#define SDIO0_DLL_RST_OFS 0x7d4 ++#define CRG_DLL_SRST_REQ BIT(1) ++ ++/* Host dll phase register */ ++#define EMMC_DRV_DLL_OFS 0x7f8 ++#define SDIO0_DRV_DLL_OFS 0x7d8 ++#define CRG_DRV_PHASE_SHIFT 15 ++#define CRG_DRV_PHASE_MASK (0x1f << CRG_DRV_PHASE_SHIFT) ++ ++/* Host dll state register */ ++#define EMMC_DLL_STA_OFS 0x808 ++#define SDIO0_DLL_STA_OFS 0x7e8 ++#define CRG_P4_DLL_LOCKED BIT(9) ++#define CRG_DS_DLL_READY BIT(10) ++#define CRG_SAM_DLL_READY BIT(12) ++ ++/* Host drv cap config */ ++#define DRV_STR_SHIFT 4 ++#define DRV_STR_MASK_GPIO (0xf << DRV_STR_SHIFT) ++#define SR_STR_SHIFT 10 ++#define SR_STR_MASK_GPIO (0x1 << SR_STR_SHIFT) ++ ++/* EMMC IO register offset */ ++#define EMMC_CLK_GPIO_OFS 0x50 ++#define EMMC_CMD_GPIO_OFS 0x54 ++#define EMMC_RSTN_GPIO_OFS 0x68 ++#define EMMC_DQS_GPIO_OFS 0x58 ++#define EMMC_D0_GPIO_OFS 0x30 ++#define EMMC_D1_GPIO_OFS 0x34 ++#define EMMC_D2_GPIO_OFS 0x38 ++#define EMMC_D3_GPIO_OFS 0x3c ++#define EMMC_D4_GPIO_OFS 0x40 ++#define EMMC_D5_GPIO_OFS 0x44 ++#define EMMC_D6_GPIO_OFS 0x48 ++#define EMMC_D7_GPIO_OFS 0x4c ++ ++/* SDIO0 IO register offset */ ++#define SDIO0_DETECT_OFS 0x14 ++#define SDIO0_PWEN_OFS 0x10 ++#define SDIO0_CLK_OFS 0x18 ++#define SDIO0_CMD_OFS 0x1c ++#define SDIO0_D0_OFS 0x20 ++#define SDIO0_D1_OFS 0x24 ++#define SDIO0_D2_OFS 0x28 ++#define SDIO0_D3_OFS 0x2c ++ ++#include "sdhci_nebula.h" ++#include "platform_priv.h" ++#include "platform_timing.h" ++ ++static const nebula_crg_mask g_crg_mask = NEBULA_CRG_MASK_DESC; ++ ++/* EMMC fixed timing parameter */ ++static nebula_timing g_timing_gpio_emmc[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_RST] = fixed_gpio_drv(TM_LVL_0, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_RST] = fixed_gpio_drv(TM_LVL_0, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ .timing[IO_TYPE_RST] = fixed_gpio_drv(TM_LVL_0, SR_LVL_0), ++ .timing[IO_TYPE_DQS] = fixed_gpio_drv(TM_LVL_0, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ .timing[IO_TYPE_RST] = fixed_gpio_drv(TM_LVL_0, SR_LVL_0), ++ .timing[IO_TYPE_DQS] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_8), ++ }, ++}; ++ ++/* EMMC info struct */ ++static const nebula_info g_gpio_emmc_info = \ ++ nebula_emmc_info_desc(g_timing_gpio_emmc); ++ ++/* SDIO0 fixed timing parameter */ ++static nebula_timing g_timing_gpio_sdio0[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_SD_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_8, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_20, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_UHS_SDR12] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_UHS_SDR25] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_UHS_SDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++}; ++ ++/* SDIO0 info struct */ ++static const nebula_info g_gpio_sdio0_info = \ ++ nebula_sdio0_info_desc(g_timing_gpio_sdio0); ++ ++int plat_host_pre_init(struct platform_device *pdev, struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ nebula->mask = &g_crg_mask; ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ nebula->info = &g_gpio_emmc_info; ++ nebula->priv_cap |= NEBULA_CAP_PM_RUNTIME; ++ } else if (nebula->devid == MMC_DEV_TYPE_SDIO_0) { ++ nebula->info = &g_gpio_sdio0_info; ++ nebula->priv_cap |= NEBULA_CAP_VOLT_SW; ++ } else { ++ pr_err("error: invalid device\n"); ++ return -EINVAL; ++ } ++ ++ return ERET_SUCCESS; ++} ++void plat_caps_quirks_init(struct sdhci_host *host) ++{ ++ plat_comm_caps_quirks_init(host); ++ host->quirks2 |= SDHCI_QUIRK2_DELAY_BEFORE_POWER; ++ host->alloc_desc_sz = SDHCI_ADMA2_DESC_LEN; ++} +diff --git a/drivers/vendor/mmc/platform/sdhci_shaolinaxe.c b/drivers/vendor/mmc/platform/sdhci_shaolinaxe.c +new file mode 100644 +index 000000000..07fa79731 +--- /dev/null ++++ b/drivers/vendor/mmc/platform/sdhci_shaolinaxe.c +@@ -0,0 +1,147 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2024. All rights reserved. ++ * Description: Nebula SDHCI for shaolinaxe ++ * Author: AuthorNameMagicTag ++ * Create: 2024-3-16 ++ */ ++#include ++ ++#include "sdhci.h" ++ ++/* Host controller CRG */ ++#define EMMC_CRG_CLK_OFS 0x528 ++#define CRG_SRST_REQ (BIT(16) | BIT(17) | BIT(18)) ++#define CRG_CLK_EN_MASK (BIT(0) | BIT(1)) ++#define CRG_CLK_BIT_OFS 24 ++#define CRG_CLK_SEL_MASK (0x7 << CRG_CLK_BIT_OFS) ++ ++/* Host dll reset register */ ++#define EMMC_DLL_RST_OFS 0x52C ++#define CRG_DLL_SRST_REQ BIT(1) ++ ++/* Host dll phase register */ ++#define EMMC_DRV_DLL_OFS 0x530 ++#define CRG_DRV_PHASE_SHIFT 15 ++#define CRG_DRV_PHASE_MASK (0x1F << CRG_DRV_PHASE_SHIFT) ++ ++/* Host dll state register */ ++#define EMMC_DLL_STA_OFS 0x540 ++#define CRG_P4_DLL_LOCKED BIT(9) ++#define CRG_DS_DLL_READY BIT(10) ++#define CRG_SAM_DLL_READY BIT(12) ++ ++/* Host drv cap config */ ++#define DRV_STR_SHIFT 4 ++#define DRV_STR_MASK_GPIO (0xF << DRV_STR_SHIFT) ++#define SR_STR_SHIFT 8 ++#define SR_STR_MASK_GPIO (0x1 << SR_STR_SHIFT) ++ ++/* EMMC IO register offset */ ++#define EMMC_CLK_GPIO_OFS 0xC04 ++#define EMMC_CMD_GPIO_OFS 0xC08 ++#define EMMC_RSTN_GPIO_OFS 0xC2C ++#define EMMC_DQS_GPIO_OFS 0xC00 ++#define EMMC_D0_GPIO_OFS 0xC0C ++#define EMMC_D1_GPIO_OFS 0xC10 ++#define EMMC_D2_GPIO_OFS 0xC14 ++#define EMMC_D3_GPIO_OFS 0xC18 ++#define EMMC_D4_GPIO_OFS 0xC1C ++#define EMMC_D5_GPIO_OFS 0xC20 ++#define EMMC_D6_GPIO_OFS 0xC24 ++#define EMMC_D7_GPIO_OFS 0xC28 ++ ++/* reversion_b phy eye releated */ ++#define SYSVERSION_ADDR 0xf8000ee0 ++#define REVERSION_B_VAL 0xc3030100 ++ ++#include "sdhci_nebula.h" ++#include "platform_priv.h" ++#include "platform_timing.h" ++ ++static const nebula_crg_mask g_crg_mask = NEBULA_CRG_MASK_DESC; ++ ++/* EMMC fixed timing parameter */ ++static nebula_timing g_timing_gpio_emmc[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_14, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_14, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_14, SR_LVL_1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_14, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_14, SR_LVL_1), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_6, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_12, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_12, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_23), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_11, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_9, SR_LVL_0), ++ .timing[IO_TYPE_DQS] = fixed_gpio_drv(TM_LVL_15, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_9, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_6), ++ }, ++}; ++ ++/* EMMC info struct */ ++static const nebula_info g_gpio_emmc_info = \ ++ nebula_emmc_info_desc(g_timing_gpio_emmc); ++ ++static bool priv_is_reversion_b(void) ++{ ++ static u32 sys_version; ++ static bool init_already = false; ++ void __iomem *sysversion_addr = NULL; ++ ++ if (init_already) ++ return (sys_version == REVERSION_B_VAL); ++ ++ sysversion_addr = ioremap_nocache(SYSVERSION_ADDR, PAGE_SIZE); ++ if (sysversion_addr == NULL) { ++ pr_err("failed to remap sys ver addr.\n"); ++ return false; ++ } ++ ++ sys_version = readl_relaxed(sysversion_addr); ++ ++ iounmap(sysversion_addr); ++ sysversion_addr = NULL; ++ ++ init_already = true; ++ ++ return (sys_version == REVERSION_B_VAL); ++} ++ ++int plat_host_pre_init(struct platform_device *pdev, struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ nebula->mask = &g_crg_mask; ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ nebula->info = &g_gpio_emmc_info; ++ nebula->priv_cap |= NEBULA_CAP_PM_RUNTIME; ++ if (!priv_is_reversion_b()) ++ g_timing_gpio_emmc[MMC_TIMING_MMC_HS400].phase[DRV_PHASE] = \ ++ TIMING_VALID | (PHASE_LVL_7 << CRG_DRV_PHASE_SHIFT); ++ } else { ++ pr_err("error: invalid device\n"); ++ return -EINVAL; ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++void plat_caps_quirks_init(struct sdhci_host *host) ++{ ++ plat_comm_caps_quirks_init(host); ++} +diff --git a/drivers/vendor/mmc/platform/sdhci_shaolinfist.c b/drivers/vendor/mmc/platform/sdhci_shaolinfist.c +new file mode 100644 +index 000000000..8a6414c82 +--- /dev/null ++++ b/drivers/vendor/mmc/platform/sdhci_shaolinfist.c +@@ -0,0 +1,123 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2024. All rights reserved. ++ * Description: Nebula SDHCI for shaolinfist ++ * Author: AuthorNameMagicTag ++ * Create: 2024-3-16 ++ */ ++#include ++ ++#include "sdhci.h" ++ ++/* Host controller CRG */ ++#define EMMC_CRG_CLK_OFS 0x528 ++#define CRG_SRST_REQ (BIT(16) | BIT(17) | BIT(18)) ++#define CRG_CLK_EN_MASK (BIT(0) | BIT(1) | BIT(27)) ++#define CRG_CLK_BIT_OFS 24 ++#define CRG_CLK_SEL_MASK (0x7 << CRG_CLK_BIT_OFS) ++ ++/* Host dll reset register */ ++#define EMMC_DLL_RST_OFS 0x52C ++#define CRG_DLL_SRST_REQ BIT(1) ++ ++/* Host dll phase register */ ++#define EMMC_DRV_DLL_OFS 0x530 ++#define CRG_DRV_PHASE_SHIFT 15 ++#define CRG_DRV_PHASE_MASK (0x1F << CRG_DRV_PHASE_SHIFT) ++ ++/* Host dll state register */ ++#define EMMC_DLL_STA_OFS 0x540 ++#define CRG_P4_DLL_LOCKED BIT(9) ++#define CRG_DS_DLL_READY BIT(10) ++#define CRG_SAM_DLL_READY BIT(12) ++ ++/* Host drv cap config */ ++#define DRV_STR_SHIFT 0 ++#define DRV_STR_MASK_GPIO (0xF << DRV_STR_SHIFT) ++#define SR_STR_SHIFT 7 ++#define SR_STR_MASK_GPIO (0x1 << SR_STR_SHIFT) ++ ++/* EMMC IO register offset */ ++#define EMMC_DQS_GPIO_OFS 0x0 ++#define EMMC_CLK_GPIO_OFS 0x4 ++#define EMMC_CMD_GPIO_OFS 0x8 ++#define EMMC_D0_GPIO_OFS 0xC ++#define EMMC_D1_GPIO_OFS 0x10 ++#define EMMC_D2_GPIO_OFS 0x14 ++#define EMMC_D3_GPIO_OFS 0x18 ++#define EMMC_D4_GPIO_OFS 0x1C ++#define EMMC_D5_GPIO_OFS 0x20 ++#define EMMC_D6_GPIO_OFS 0x24 ++#define EMMC_D7_GPIO_OFS 0x28 ++#define EMMC_RSTN_GPIO_OFS 0x2C ++ ++#include "sdhci_nebula.h" ++#include "platform_priv.h" ++#include "platform_timing.h" ++ ++static const nebula_crg_mask g_crg_mask = NEBULA_CRG_MASK_DESC; ++ ++/* EMMC fixed timing parameter */ ++static nebula_timing g_timing_gpio_emmc[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_7, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_7, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_10, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_10, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_7, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_7, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_19), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ .timing[IO_TYPE_DQS] = fixed_gpio_drv(TM_LVL_15, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_9), ++ }, ++}; ++ ++/* EMMC info struct */ ++static const nebula_info g_gpio_emmc_info = \ ++ nebula_emmc_info_desc(g_timing_gpio_emmc); ++ ++int plat_host_pre_init(struct platform_device *pdev, struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ nebula->mask = &g_crg_mask; ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ nebula->info = &g_gpio_emmc_info; ++ nebula->priv_cap |= NEBULA_CAP_PM_RUNTIME; ++ } else { ++ pr_err("error: invalid device\n"); ++ return -EINVAL; ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++void plat_extra_init(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) ++ plat_set_emmc_type(host); ++} ++ ++void plat_caps_quirks_init(struct sdhci_host *host) ++{ ++ plat_comm_caps_quirks_init(host); ++} +diff --git a/drivers/vendor/mmc/platform/sdhci_shaolinspear.c b/drivers/vendor/mmc/platform/sdhci_shaolinspear.c +new file mode 100644 +index 000000000..6186de3a7 +--- /dev/null ++++ b/drivers/vendor/mmc/platform/sdhci_shaolinspear.c +@@ -0,0 +1,232 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2024. All rights reserved. ++ * Description: Nebula SDHCI for shaolinspear ++ * Author: AuthorNameMagicTag ++ * Create: 2024-3-16 ++ */ ++ ++/* Host controller CRG */ ++#define EMMC_CRG_CLK_OFS 0x6A8 ++#define SDIO0_CRG_CLK_OFS 0x6C4 ++#define CRG_SRST_REQ (BIT(16) | BIT(17) | BIT(18)) ++#define CRG_CLK_EN_MASK (BIT(0) | BIT(1)) ++#define CRG_CLK_BIT_OFS 24 ++#define CRG_CLK_SEL_MASK (0x7 << CRG_CLK_BIT_OFS) ++ ++/* Host dll reset register */ ++#define EMMC_DLL_RST_OFS 0x6AC ++#define SDIO0_DLL_RST_OFS 0x6C8 ++#define CRG_DLL_SRST_REQ BIT(1) ++ ++/* Host dll phase register */ ++#define EMMC_DRV_DLL_OFS 0x6B0 ++#define SDIO0_DRV_DLL_OFS 0x6CC ++#define CRG_DRV_PHASE_SHIFT 15 ++#define CRG_DRV_PHASE_MASK (0x1F << CRG_DRV_PHASE_SHIFT) ++ ++/* Host dll state register */ ++#define EMMC_DLL_STA_OFS 0x6C0 ++#define SDIO0_DLL_STA_OFS 0x6D8 ++#define CRG_P4_DLL_LOCKED BIT(9) ++#define CRG_DS_DLL_READY BIT(10) ++#define CRG_SAM_DLL_READY BIT(12) ++ ++/* Host drv cap config */ ++#define DRV_STR_SHIFT 4 ++#define DRV_STR_MASK_IO (0xf << DRV_STR_SHIFT) ++#define DRV_STR_MASK_GPIO (0xf << DRV_STR_SHIFT) ++ ++/* no SR bit */ ++#define SR_STR_SHIFT 8 ++#define SR_STR_MASK_GPIO (0x0 << SR_STR_SHIFT) ++ ++/* EMMC IO register offset */ ++#define EMMC_DQS_IO_OFS 0x28 ++#define EMMC_CLK_IO_OFS 0x20 ++#define EMMC_CMD_IO_OFS 0x24 ++#define EMMC_D0_IO_OFS 0x8 ++#define EMMC_D1_IO_OFS 0x10 ++#define EMMC_D2_IO_OFS 0x18 ++#define EMMC_D3_IO_OFS 0x0 ++#define EMMC_D4_IO_OFS 0x4 ++#define EMMC_D5_IO_OFS 0xC ++#define EMMC_D6_IO_OFS 0x14 ++#define EMMC_D7_IO_OFS 0x1C ++#define EMMC_RSTN_IO_OFS 0x2C ++ ++/* SDIO0(SD card) IO register offset */ ++#define SDIO0_DETECT_OFS 0x790 ++#define SDIO0_PWEN_OFS 0x794 ++#define SDIO0_CLK_OFS 0x7AC ++#define SDIO0_CMD_OFS 0x7A8 ++#define SDIO0_D0_OFS 0x7A4 ++#define SDIO0_D1_OFS 0x7A0 ++#define SDIO0_D2_OFS 0x79C ++#define SDIO0_D3_OFS 0x798 ++ ++/* High speed IO PHY */ ++#define REG_BASE_EMMC_PHY 0x01720000 ++#define EMMC_ZQ_CTRL_PHY_ADDR (REG_BASE_EMMC_PHY + 0x4) ++ ++/* only used for sdio0 power switch */ ++#define SDIO0_VOLT_SW_PHY_ADDR 0x00A10A80 ++#define SDIO0_PWR_SEL_OFFSET 0x00A10B80 ++#define PWR_SEL_1V8 BIT(7) ++ ++#include "sdhci_nebula.h" ++#include "cputable.h" ++#include "platform_priv.h" ++#include "platform_timing.h" ++ ++static const nebula_crg_mask g_crg_mask = NEBULA_CRG_MASK_DESC; ++ ++/* eMMC high speed IO fixed timing parameter */ ++static nebula_timing g_mmc_hsio_timing[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(TM_LVL_5), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(TM_LVL_5), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(TM_LVL_5), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(TM_LVL_5), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(TM_LVL_5), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(TM_LVL_5), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(TM_LVL_5), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(TM_LVL_5), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(TM_LVL_5), ++ fixed_drv_phase_only(PHASE_LVL_18), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(TM_LVL_5), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(TM_LVL_5), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(TM_LVL_5), ++ .timing[IO_TYPE_DQS] = fixed_hsio_drv(TM_LVL_5), ++ fixed_drv_phase_only(PHASE_LVL_8), ++ } ++}; ++ ++static const nebula_info g_mmc_hsio_info = \ ++ nebula_emmc_hsio_info_desc(g_mmc_hsio_timing); ++ ++/* SDIO0 fixed timing parameter */ ++static nebula_timing g_timing_gpio_sdio0[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_SD_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_5, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_UHS_SDR12] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_UHS_SDR25] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_UHS_SDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_2, SR_LVL_0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_0), ++ fixed_drv_phase_only(PHASE_LVL_22), ++ }, ++}; ++ ++/* SDIO0 info struct */ ++static const nebula_info g_gpio_sdio0_info = \ ++ nebula_sdio0_info_desc(g_timing_gpio_sdio0); ++ ++int plat_host_pre_init(struct platform_device *pdev, struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ nebula->mask = &g_crg_mask; ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ nebula->info = &g_mmc_hsio_info; ++ nebula->priv_cap |= NEBULA_CAP_PM_RUNTIME; ++ nebula->priv_cap |= NEBULA_CAP_ZQ_CALB; ++ } else if (nebula->devid == MMC_DEV_TYPE_SDIO_0) { ++ nebula->info = &g_gpio_sdio0_info; ++ nebula->priv_cap |= NEBULA_CAP_VOLT_SW; ++ } else { ++ pr_err("error: invalid device\n"); ++ return -EINVAL; ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++static void priv_prepare_voltage_switch(struct sdhci_host *host, struct mmc_ios *ios) ++{ ++ u32 power_sel; ++ void __iomem *viraddr; ++ ++ viraddr = ioremap(SDIO0_PWR_SEL_OFFSET, PAGE_SIZE); ++ if (viraddr == NULL) { ++ pr_err("%s: volt power sel ioremap error.\n", mmc_hostname(host->mmc)); ++ return; ++ } ++ ++ power_sel = readl(viraddr); ++ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) ++ power_sel |= PWR_SEL_1V8; ++ else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) ++ power_sel &= ~PWR_SEL_1V8; ++ ++ writel(power_sel, viraddr); ++ ++ usleep_range(1000, 2000); /* Sleep between 1000 and 2000us */ ++ ++ iounmap(viraddr); ++} ++ ++static int priv_voltage_switch(struct sdhci_host *host, struct mmc_ios *ios) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ if (nebula->priv_cap & NEBULA_CAP_VOLT_SW) { ++ priv_prepare_voltage_switch(host, ios); ++ return plat_voltage_switch(host, ios); ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++void plat_caps_quirks_init(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ plat_comm_caps_quirks_init(host); ++ host->quirks2 |= SDHCI_QUIRK2_DELAY_BEFORE_POWER; ++ nebula->ops.plat_voltage_switch = priv_voltage_switch; ++} +diff --git a/drivers/vendor/mmc/platform/sdhci_shaolinsword_c.c b/drivers/vendor/mmc/platform/sdhci_shaolinsword_c.c +new file mode 100644 +index 000000000..29ec61ead +--- /dev/null ++++ b/drivers/vendor/mmc/platform/sdhci_shaolinsword_c.c +@@ -0,0 +1,118 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Nebula SDHCI for shaolinsword c ++ * Author: AuthorNameMagicTag ++ * Create: 2022-11-16 ++ */ ++ ++/* Host controller CRG */ ++#define EMMC_CRG_CLK_OFS 0x5e0 ++ ++/* Host reset bits */ ++#define MMC_SRST_REQ BIT(16) ++#define MMC_RX_SRST_REQ BIT(17) ++#define MMC_TX_SRST_REQ BIT(18) ++#define CRG_SRST_REQ (MMC_SRST_REQ | MMC_RX_SRST_REQ | MMC_TX_SRST_REQ) ++ ++/* Host clock bits */ ++#define CRG_CKEN BIT(0) ++#define CRG_AHB_CKEN BIT(1) ++#define CRG_CLK_EN_MASK (CRG_CKEN | CRG_AHB_CKEN) ++ ++#define EMMC_DLL_RST_OFS 0x5e4 ++#define CRG_DLL_SRST_REQ BIT(1) ++ ++/* DRV DLL OFS */ ++#define EMMC_DRV_DLL_OFS 0x5e8 ++#define CRG_DRV_PHASE_SHIFT 15 ++#define CRG_DRV_PHASE_MASK (0x1F << CRG_DRV_PHASE_SHIFT) ++ ++#define EMMC_DLL_STA_OFS 0x5f8 ++#define CRG_P4_DLL_LOCKED BIT(9) ++#define CRG_DS_DLL_READY BIT(10) ++#define CRG_SAM_DLL_READY BIT(12) ++ ++#define DRV_STR_SHIFT 4 ++#define DRV_STR_MASK_GPIO (0xf << DRV_STR_SHIFT) ++#define SR_STR_SHIFT 8 ++#define SR_STR_MASK_GPIO (0x1 << SR_STR_SHIFT) ++ ++/* EMMC GPIO register offset */ ++#define EMMC_DQS_GPIO_OFS 0xC00 ++#define EMMC_CLK_GPIO_OFS 0xC04 ++#define EMMC_CMD_GPIO_OFS 0xC08 ++#define EMMC_D0_GPIO_OFS 0xC0C ++#define EMMC_D1_GPIO_OFS 0xC10 ++#define EMMC_D2_GPIO_OFS 0xC14 ++#define EMMC_D3_GPIO_OFS 0xC18 ++#define EMMC_D4_GPIO_OFS 0xC1C ++#define EMMC_D5_GPIO_OFS 0xC20 ++#define EMMC_D6_GPIO_OFS 0xC24 ++#define EMMC_D7_GPIO_OFS 0xC28 ++#define EMMC_RSTN_GPIO_OFS 0xC2C ++ ++#include "sdhci_nebula.h" ++#include "platform_priv.h" ++ ++static const nebula_crg_mask g_crg_mask = NEBULA_CRG_MASK_DESC; ++ ++/* eMMC GPIO fixed timing parameter */ ++static nebula_timing g_mmc_gpio_timing[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xE, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0xF, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0xE, 0x0), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xE, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0xE, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0xE, 0x1), ++ fixed_drv_samp_phase(0x10, 0x4), ++ }, ++ [MMC_TIMING_MMC_DDR52] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xE, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0xE, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0xE, 0x1), ++ fixed_drv_samp_phase(0x8, 0x8), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x6, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0xC, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0xC, 0x0), ++ fixed_drv_phase_only(0x15), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xB, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x9, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x9, 0x0), ++ .timing[IO_TYPE_DQS] = fixed_gpio_drv(0xF, 0x1), ++ fixed_drv_phase_only(0x5), ++ } ++}; ++ ++static const nebula_info g_mmc_gpio_info = \ ++ nebula_emmc_info_desc(g_mmc_gpio_timing); ++ ++int plat_host_pre_init(struct platform_device *pdev, ++ struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ nebula->mask = &g_crg_mask; ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ nebula->info = &g_mmc_gpio_info; ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++void plat_caps_quirks_init(struct sdhci_host *host) ++{ ++ plat_comm_caps_quirks_init(host); ++} +\ No newline at end of file +diff --git a/drivers/vendor/mmc/platform/sdhci_ss928v100.c b/drivers/vendor/mmc/platform/sdhci_ss928v100.c +new file mode 100644 +index 000000000..110dbde8d +--- /dev/null ++++ b/drivers/vendor/mmc/platform/sdhci_ss928v100.c +@@ -0,0 +1,346 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Nebula SDHCI for ss928v100 ++ * Author: AuthorNameMagicTag ++ * Create: 2022-12-16 ++ */ ++ ++#define EMMC_BUS_WIDTH_PHY_ADDR 0x11020018 ++ ++#define IO_CFG_SDIO_MUX 0x1 ++#define IO_CFG_EMMC_MUX 0x2 ++#define IO_CFG_MUX_MASK 0xF ++ ++/* Host controller CRG */ ++#define EMMC_CRG_CLK_OFS 0x34C0 ++#define SDIO0_CRG_CLK_OFS 0x35C0 ++#define SDIO1_CRG_CLK_OFS 0x36C0 ++#define CRG_SRST_REQ BIT(16) ++#define CRG_CLK_EN_MASK BIT(0) | BIT(1) ++ ++/* Host dll reset register */ ++#define EMMC_DLL_RST_OFS 0x34C4 ++#define SDIO0_DLL_RST_OFS 0x35C4 ++#define SDIO1_DLL_RST_OFS 0x36C4 ++#define CRG_DLL_SRST_REQ BIT(1) ++ ++/* Host dll phase register */ ++#define EMMC_DRV_DLL_OFS 0x34C8 ++#define SDIO0_DRV_DLL_OFS 0x35C8 ++#define SDIO1_DRV_DLL_OFS 0x36C8 ++#define CRG_DRV_PHASE_SHIFT 15 ++#define CRG_DRV_PHASE_MASK (0x1F << CRG_DRV_PHASE_SHIFT) ++ ++/* Host dll state register */ ++#define EMMC_DLL_STA_OFS 0x34D8 ++#define SDIO0_DLL_STA_OFS 0x35D8 ++#define SDIO1_DLL_STA_OFS 0x36D8 ++#define CRG_P4_DLL_LOCKED BIT(9) ++#define CRG_DS_DLL_READY BIT(10) ++#define CRG_SAM_DLL_READY BIT(12) ++ ++/* Host drv cap config */ ++#define DRV_STR_SHIFT 4 ++#define DRV_STR_MASK_GPIO (0xF << DRV_STR_SHIFT) ++#define SR_STR_SHIFT 8 ++#define SR_STR_MASK_GPIO (0x3 << SR_STR_SHIFT) ++#define NEBULA_VOLT_SW_BVT 1 ++ ++/* EMMC IO register offset */ ++#define EMMC_CLK_GPIO_OFS 0x00 ++#define EMMC_CMD_GPIO_OFS 0x04 ++#define EMMC_RSTN_GPIO_OFS 0x2C ++#define EMMC_DQS_GPIO_OFS 0x28 ++#define EMMC_D0_GPIO_OFS 0x08 ++#define EMMC_D1_GPIO_OFS 0x0C ++#define EMMC_D2_GPIO_OFS 0x10 ++#define EMMC_D3_GPIO_OFS 0x14 ++#define EMMC_D4_GPIO_OFS 0x18 ++#define EMMC_D5_GPIO_OFS 0x1C ++#define EMMC_D6_GPIO_OFS 0x20 ++#define EMMC_D7_GPIO_OFS 0x24 ++ ++/* ZQ calibration physical address */ ++#define EMMC_ZQ_CTRL_PHY_ADDR 0x10010004 ++ ++/* SDIO0 IO register offset */ ++#define SDIO0_DETECT_OFS 0x80 ++#define SDIO0_PWEN_OFS 0x84 ++#define SDIO0_CLK_OFS 0x9C ++#define SDIO0_CMD_OFS 0x88 ++#define SDIO0_D0_OFS 0x8C ++#define SDIO0_D1_OFS 0x90 ++#define SDIO0_D2_OFS 0x94 ++#define SDIO0_D3_OFS 0x98 ++ ++/* Voltage switch physical address */ ++#define SDIO0_VOLT_SW_PHY_ADDR 0x102E0010 ++ ++/* SDIO1 IO register offset */ ++#define SDIO1_CLK_OFS 0x50 ++#define SDIO1_CMD_OFS 0x54 ++#define SDIO1_D0_OFS 0x40 ++#define SDIO1_D1_OFS 0x44 ++#define SDIO1_D2_OFS 0x48 ++#define SDIO1_D3_OFS 0x4C ++ ++#include "sdhci_nebula.h" ++#include "platform_priv.h" ++#include "platform_timing.h" ++ ++static const nebula_crg_mask g_crg_mask = NEBULA_CRG_MASK_DESC; ++ ++/* EMMC fixed timing parameter */ ++static nebula_timing g_timing_gpio_emmc[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_2, SR_LVL_3), ++ .timing[IO_TYPE_RST] = fixed_gpio_drv(TM_LVL_0, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_2, SR_LVL_3), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_2, SR_LVL_3), ++ .timing[IO_TYPE_RST] = fixed_gpio_drv(TM_LVL_0, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_2, SR_LVL_3), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_5, SR_LVL_3), ++ .timing[IO_TYPE_RST] = fixed_gpio_drv(TM_LVL_0, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_5, SR_LVL_3), ++ fixed_drv_phase_only(PHASE_LVL_18), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_5, SR_LVL_3), ++ .timing[IO_TYPE_RST] = fixed_gpio_drv(TM_LVL_0, SR_LVL_1), ++ .timing[IO_TYPE_DQS] = fixed_gpio_drv(TM_LVL_4, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_5, SR_LVL_3), ++ fixed_drv_phase_only(PHASE_LVL_7), ++ }, ++}; ++ ++/* EMMC info struct */ ++static const nebula_info g_gpio_emmc_info = \ ++ nebula_emmc_info_desc(g_timing_gpio_emmc); ++ ++/* SDIO0 fixed timing parameter */ ++static nebula_timing g_timing_gpio_sdio0[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_9, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_1), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_SD_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_10, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_4, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_4, SR_LVL_1), ++ fixed_drv_samp_phase(PHASE_LVL_18, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_UHS_SDR12] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_5, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_0, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_0, SR_LVL_1), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_UHS_SDR25] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_7, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_1, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_1, SR_LVL_1), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_UHS_SDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_7, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_1), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_13, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_7, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_7, SR_LVL_1), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++}; ++ ++/* SDIO0 info struct */ ++static const nebula_info g_gpio_sdio0_info = \ ++ nebula_sdio0_info_desc(g_timing_gpio_sdio0); ++ ++/* SDIO1 fixed timing parameter */ ++static nebula_timing g_timing_gpio_sdio1[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_8, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_2, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_2, SR_LVL_1), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_9, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_1), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_SD_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_3, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_3, SR_LVL_1), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_UHS_SDR12] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_4, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_1, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_1, SR_LVL_1), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_0), ++ }, ++ [MMC_TIMING_UHS_SDR25] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_6, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_2, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_2, SR_LVL_1), ++ fixed_drv_samp_phase(PHASE_LVL_16, PHASE_LVL_4), ++ }, ++ [MMC_TIMING_UHS_SDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_6, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_2, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_2, SR_LVL_1), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_11, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_6, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_6, SR_LVL_1), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(TM_LVL_11, SR_LVL_2), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(TM_LVL_6, SR_LVL_1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(TM_LVL_6, SR_LVL_1), ++ fixed_drv_phase_only(PHASE_LVL_20), ++ }, ++}; ++ ++/* SDIO1 info struct */ ++static const nebula_info g_gpio_sdio1_info = \ ++ nebula_sdio1_info_desc(g_timing_gpio_sdio1); ++ ++static inline void priv_set_io_mux(struct sdhci_host *host, ++ unsigned int offset, unsigned int pin_mux) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ regmap_write_bits(nebula->iocfg_regmap, offset, ++ IO_CFG_MUX_MASK, pin_mux); ++} ++ ++static void priv_io_mux_config(struct sdhci_host *host) ++{ ++ u32 idx, bus_width, pin_mux; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ const nebula_info *info = nebula->info; ++ ++ if (nebula->priv_quirk & NEBULA_QUIRK_FPGA) { ++ return; ++ } ++ ++ pin_mux = (nebula->devid == MMC_DEV_TYPE_MMC_0) ? \ ++ IO_CFG_EMMC_MUX : IO_CFG_SDIO_MUX; ++ ++ priv_set_io_mux(host, info->io_offset[IO_TYPE_CLK], pin_mux); ++ priv_set_io_mux(host, info->io_offset[IO_TYPE_CMD], pin_mux); ++ ++ if (host->mmc->caps & MMC_CAP_8_BIT_DATA) { ++ bus_width = (1 << MMC_BUS_WIDTH_8); ++ } else if (host->mmc->caps & MMC_CAP_4_BIT_DATA) { ++ bus_width = (1 << MMC_BUS_WIDTH_4); ++ } else { ++ bus_width = (1 << MMC_BUS_WIDTH_1); ++ } ++ for (idx = IO_TYPE_D0; (idx < IO_TYPE_DMAX) && (bus_width != 0); idx++) { ++ bus_width--; ++ priv_set_io_mux(host, info->io_offset[idx], pin_mux); ++ } ++ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { /* eMMC device */ ++ priv_set_io_mux(host, info->io_offset[IO_TYPE_RST], pin_mux); ++ if (bus_width == (1 << MMC_BUS_WIDTH_8)) ++ priv_set_io_mux(host, info->io_offset[IO_TYPE_DQS], pin_mux); ++ } else if (nebula->devid == MMC_DEV_TYPE_SDIO_0) { ++ priv_set_io_mux(host, SDIO0_DETECT_OFS, pin_mux); ++ priv_set_io_mux(host, SDIO0_PWEN_OFS, pin_mux); ++ } ++} ++ ++int plat_host_pre_init(struct platform_device *pdev, struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ nebula->mask = &g_crg_mask; ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ nebula->info = &g_gpio_emmc_info; ++ nebula->priv_cap |= NEBULA_CAP_ZQ_CALB; ++ } else if (nebula->devid == MMC_DEV_TYPE_SDIO_0) { ++ nebula->info = &g_gpio_sdio0_info; ++ nebula->priv_cap |= NEBULA_CAP_VOLT_SW; ++ } else if (nebula->devid == MMC_DEV_TYPE_SDIO_1) { ++ nebula->info = &g_gpio_sdio1_info; ++ } else { ++ pr_err("error: invalid device\n"); ++ return -EINVAL; ++ } ++ ++ return ERET_SUCCESS; ++} ++ ++void plat_caps_quirks_init(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ struct mmc_host *mmc = host->mmc; ++ ++ plat_comm_caps_quirks_init(host); ++ ++ plat_set_mmc_bus_width(host); ++ ++ /* Initialization pin multiplexing first */ ++ priv_io_mux_config(host); ++ ++ if (!(mmc->caps2 & MMC_CAP2_NO_SDIO)) ++ nebula->priv_cap |= NEBULA_CAP_PM_RUNTIME; ++} ++ ++void plat_extra_init(struct sdhci_host *host) ++{ ++ u32 ctrl; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ ctrl = sdhci_readl(host, SDHCI_AXI_MBIU_CTRL); ++ ctrl &= ~SDHCI_UNDEFL_INCR_EN; ++ sdhci_writel(host, ctrl, SDHCI_AXI_MBIU_CTRL); ++ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ ctrl = sdhci_readl(host, SDHCI_EMMC_CTRL); ++ ctrl |= SDHCI_CARD_IS_EMMC; ++ sdhci_writel(host, ctrl, SDHCI_EMMC_CTRL); ++ } ++} +diff --git a/drivers/vendor/mmc/platform/sdhci_tianhe.c b/drivers/vendor/mmc/platform/sdhci_tianhe.c +new file mode 100644 +index 000000000..9e64665a2 +--- /dev/null ++++ b/drivers/vendor/mmc/platform/sdhci_tianhe.c +@@ -0,0 +1,371 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2025. All rights reserved. ++ * Description: Nebula SDHCI for tianhe ++ * Author: AuthorNameMagicTag ++ * Create: 2025-02-21 ++ */ ++ ++#include "sdhci_tianhe.h" ++#include "sdhci_nebula.h" ++ ++/* platform_priv.h must be included AFTER sdhci_tianhe.h. */ ++#include "platform_priv.h" ++ ++static const nebula_crg_mask g_crg_mask = NEBULA_CRG_MASK_DESC; ++ ++/* eMMC GPIO fixed timing parameter */ ++static nebula_timing g_mmc_gpio_timing[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x3, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x0), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x5, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x0), ++ fixed_drv_samp_phase(0x10, 0x4), ++ }, ++ [MMC_TIMING_MMC_DDR52] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x5, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x5, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x5, 0x0), ++ fixed_drv_samp_phase(0x8, 0x4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xc, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x8, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x8, 0x1), ++ fixed_drv_phase_only(0x12), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xb, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0xb, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0xb, 0x1), ++ .timing[IO_TYPE_DQS] = fixed_gpio_drv(0x0, 0x0), ++ fixed_drv_phase_only(0x8), ++ }, ++}; ++ ++static const nebula_info g_mmc_gpio_info = \ ++ nebula_emmc_info_desc(g_mmc_gpio_timing); ++ ++static const nebula_timing g_mmc_gpio_timing_es[] = { ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ fixed_drv_phase_only(0x14), ++ } ++}; ++ ++/* SDIO0 GPIO fixed timing parameter */ ++static nebula_timing g_sdio0_gpio_timing[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x5, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x4, 0x1), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_SD_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x8, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x4, 0x1), ++ fixed_drv_samp_phase(0x14, 0x4), ++ }, ++ [MMC_TIMING_UHS_SDR12] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x2, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x2, 0x1), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_UHS_SDR25] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x2, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x2, 0x1), ++ fixed_drv_samp_phase(0x10, 0x4), ++ }, ++ [MMC_TIMING_UHS_DDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x2, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x2, 0x1), ++ fixed_drv_samp_phase(0xc, 0x4), ++ }, ++ [MMC_TIMING_UHS_SDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x7, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x5, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x5, 0x1), ++ fixed_drv_phase_only(0x14), ++ }, ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xd, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x7, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x7, 0x1), ++ fixed_drv_phase_only(0x15), ++ } ++}; ++ ++static const nebula_info g_sdio0_gpio_info = \ ++ nebula_sdio0_info_desc(g_sdio0_gpio_timing); ++ ++static const nebula_timing g_sdio0_gpio_timing_es[] = { ++ [MMC_TIMING_SD_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x5, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x5, 0x1), ++ }, ++ [MMC_TIMING_UHS_SDR12] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x1), ++ }, ++ [MMC_TIMING_UHS_SDR25] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x1), ++ }, ++ [MMC_TIMING_UHS_DDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x1), ++ }, ++ [MMC_TIMING_UHS_SDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x1), ++ }, ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xc, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x7, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x7, 0x1), ++ fixed_drv_phase_only(0x14), ++ } ++}; ++ ++/* SDIO1 GPIO fixed timing parameter */ ++static nebula_timing g_sdio1_gpio_timing[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x5, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x4, 0x1), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_SD_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xb, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x5, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x5, 0x1), ++ fixed_drv_samp_phase(0x14, 0x4), ++ }, ++ [MMC_TIMING_UHS_SDR12] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x1), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_UHS_SDR25] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x6, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x6, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x6, 0x0), ++ fixed_drv_samp_phase(0x10, 0x4), ++ }, ++ [MMC_TIMING_UHS_DDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x1), ++ fixed_drv_samp_phase(0xc, 0x4), ++ }, ++ [MMC_TIMING_UHS_SDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x7, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x6, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x6, 0x0), ++ fixed_drv_phase_only(0x14), ++ }, ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x9, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x9, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x9, 0x1), ++ fixed_drv_phase_only(0x12), ++ } ++}; ++ ++static const nebula_info g_sdio1_gpio_info = \ ++ nebula_sdio1_info_desc(g_sdio1_gpio_timing); ++ ++static const nebula_timing g_sdio1_gpio_timing_es[] = { ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x7, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x6, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x6, 0x1), ++ fixed_drv_phase_only(0x14), ++ } ++}; ++ ++/* eMMC high speed IO fixed timing parameter */ ++static nebula_timing g_mmc_hsio_timing[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x4), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(0x4), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(0x4), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x4), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(0x4), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(0x4), ++ fixed_drv_samp_phase(0x10, 0x4), ++ }, ++ [MMC_TIMING_MMC_DDR52] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x4), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(0x4), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(0x4), ++ fixed_drv_samp_phase(0x8, 0x4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x5), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(0x5), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(0x5), ++ fixed_drv_phase_only(0x12), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x6), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(0x5), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(0x5), ++ .timing[IO_TYPE_DQS] = fixed_hsio_drv(0x3), ++ .timing[IO_TYPE_RST] = fixed_hsio_drv(0x0), ++ fixed_drv_phase_only(0x7), ++ } ++}; ++ ++static const nebula_info g_mmc_hsio_info = \ ++ nebula_emmc_hsio_info_desc(g_mmc_hsio_timing); ++ ++static const nebula_timing g_mmc_hsio_timing_es[] = { ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x5), ++ fixed_drv_phase_only(0x14), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x5), ++ fixed_drv_phase_only(0x9), ++ } ++}; ++ ++/* Read REG_PERI_START_MODE to determine eMMC IO type */ ++static int __maybe_unused priv_get_io_type(struct sdhci_nebula *nebula, bool force_io_type) ++{ ++ void __iomem *viraddr; ++ unsigned int io_type; ++ ++ if (force_io_type) { ++ nebula->io_type = MMC_IO_TYPE_IO; ++ return ERET_SUCCESS; ++ } ++ ++ viraddr = ioremap(REG_PERI_START_MODE, sizeof(u32)); ++ if (!viraddr) { ++ pr_err("%s ioremap error.\n", __func__); ++ return -ENOMEM; ++ } ++ io_type = readl(viraddr); ++ iounmap(viraddr); ++ if ((io_type & START_MODE_MASK) == START_MODE_EMMC) ++ nebula->io_type = MMC_IO_TYPE_GPIO; ++ else ++ nebula->io_type = MMC_IO_TYPE_IO; ++ ++ return ERET_SUCCESS; ++} ++ ++void plat_extra_init(struct sdhci_host *host) ++{ ++ u32 ctrl; ++ ++ ctrl = sdhci_readl(host, SDHCI_AXI_MBIU_CTRL); ++ ctrl &= ~SDHCI_UNDEFL_INCR_EN; ++ sdhci_writel(host, ctrl, SDHCI_AXI_MBIU_CTRL); ++} ++ ++void plat_caps_quirks_init(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ plat_comm_caps_quirks_init(host); ++ ++ host->quirks2 |= SDHCI_QUIRK2_DELAY_BEFORE_POWER; ++ ++ /* ++ * For eMMC with GPIO, max frequency is limited to 150MHz. ++ * Reset f_max if dts requires clock frequency higher than 150M. ++ * For eMMC with eMMC combo phy, there is no such limit. ++ */ ++ if ((nebula->devid == MMC_DEV_TYPE_MMC_0) && ++ (nebula->io_type == MMC_IO_TYPE_GPIO) && ++ (host->mmc->f_max > EMMC_GPIO_MAX_FREQ)) ++ host->mmc->f_max = EMMC_GPIO_MAX_FREQ; ++ ++ /* enable emmc & sd devices low power support only */ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0 || nebula->devid == MMC_DEV_TYPE_SDIO_0) { ++ nebula->priv_cap |= NEBULA_CAP_PM_RUNTIME; ++ } ++} ++ ++static void __maybe_unused priv_refixed_info(nebula_timing *timing, int timing_size, ++ const nebula_timing *refixed, int refixed_size) ++{ ++ int idx, i; ++ ++ if (timing == NULL || refixed == NULL || (timing_size < refixed_size)) { ++ pr_err("refixed invalid\n"); ++ return; ++ } ++ ++ for (idx = 0; idx <= refixed_size; timing++, refixed++, idx++) { ++ if (refixed->data_valid == false) { ++ continue; ++ } ++ /* refixed io timing */ ++ for (i = 0; i < IO_TYPE_MAX; i++) { ++ if (is_timing_valid(refixed->timing[i])) { ++ timing->timing[i] = refixed->timing[i]; ++ } ++ } ++ /* refixed phase timing */ ++ for (i = 0; i < PHASE_MAX; i++) { ++ if (is_timing_valid(refixed->phase[i])) { ++ timing->phase[i] = refixed->phase[i]; ++ } ++ } ++ } ++} ++ ++int plat_host_pre_init(struct platform_device *pdev, struct sdhci_host *host) ++{ ++ return ERET_SUCCESS; ++} +diff --git a/drivers/vendor/mmc/platform/sdhci_tianhe.h b/drivers/vendor/mmc/platform/sdhci_tianhe.h +new file mode 100644 +index 000000000..b29f0b20d +--- /dev/null ++++ b/drivers/vendor/mmc/platform/sdhci_tianhe.h +@@ -0,0 +1,101 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2025. All rights reserved. ++ * Description: Nebula SDHCI for tianhe header ++ * Author: AuthorNameMagicTag ++ * Create: 2025-02-21 ++ */ ++ ++#ifndef _DRIVERS_MMC_SDHCI_TIANHE_H ++#define _DRIVERS_MMC_SDHCI_TIANHE_H ++ ++/* Host controller CRG */ ++#define EMMC_CRG_CLK_OFS 0x6A8 ++#define SDIO0_CRG_CLK_OFS 0x6BC ++#define SDIO1_CRG_CLK_OFS 0x6D0 ++#define EMMC_DLL_RST_OFS EMMC_CRG_CLK_OFS ++#define SDIO0_DLL_RST_OFS SDIO0_CRG_CLK_OFS ++#define SDIO1_DLL_RST_OFS SDIO1_CRG_CLK_OFS ++#define CRG_SRST_REQ BIT(27) ++#define CRG_CKEN BIT(28) ++#define CRG_AHB_CKEN BIT(21) ++#define CRG_CLK_EN_MASK (CRG_CKEN | CRG_AHB_CKEN) ++#define CRG_DLL_SRST_REQ BIT(29) ++ ++#define EMMC_DRV_DLL_OFS 0x6B0 ++#define SDIO0_DRV_DLL_OFS 0x6C4 ++#define SDIO1_DRV_DLL_OFS 0x6D8 ++#define CRG_DRV_PHASE_SHIFT 24 ++#define CRG_DRV_PHASE_MASK (0x1F << CRG_DRV_PHASE_SHIFT) ++ ++#define EMMC_DLL_STA_OFS 0x6B8 ++#define SDIO0_DLL_STA_OFS 0x6CC ++#define SDIO1_DLL_STA_OFS 0x6E0 ++#define CRG_P4_DLL_LOCKED BIT(7) ++#define CRG_DS_DLL_READY BIT(1) ++#define CRG_SAM_DLL_READY BIT(0) ++ ++#define DRV_STR_SHIFT 4 ++#define DRV_STR_MASK_IO (0x7 << DRV_STR_SHIFT) ++#define DRV_STR_MASK_GPIO (0xf << DRV_STR_SHIFT) ++#define SR_STR_SHIFT 8 ++#define SR_STR_MASK_GPIO (0x1 << SR_STR_SHIFT) ++ ++#define REG_PERI_START_MODE 0x00A10100 ++#define START_MODE_SHIFT 9 ++#define START_MODE_MASK (0x7 << START_MODE_SHIFT) ++#define START_MODE_EMMC (0x2 << START_MODE_SHIFT) ++#define START_MODE_EMMC_PHY (0x3 << START_MODE_SHIFT) ++ ++/* EMMC IO register offset */ ++#define EMMC_DQS_IO_OFS 0x28 ++#define EMMC_CLK_IO_OFS 0x20 ++#define EMMC_CMD_IO_OFS 0x24 ++#define EMMC_D0_IO_OFS 0x8 ++#define EMMC_D1_IO_OFS 0x10 ++#define EMMC_D2_IO_OFS 0x18 ++#define EMMC_D3_IO_OFS 0x0 ++#define EMMC_D4_IO_OFS 0x4 ++#define EMMC_D5_IO_OFS 0xC ++#define EMMC_D6_IO_OFS 0x14 ++#define EMMC_D7_IO_OFS 0x1C ++#define EMMC_RSTN_IO_OFS 0x2C ++ ++/* EMMC GPIO register offset */ ++#define EMMC_GPIO_MAX_FREQ 150000000 ++#define EMMC_DQS_GPIO_OFS 0x8C ++#define EMMC_CLK_GPIO_OFS 0x7C ++#define EMMC_CMD_GPIO_OFS 0x84 ++#define EMMC_D0_GPIO_OFS 0x64 ++#define EMMC_D1_GPIO_OFS 0x6C ++#define EMMC_D2_GPIO_OFS 0x74 ++#define EMMC_D3_GPIO_OFS 0x5C ++#define EMMC_D4_GPIO_OFS 0x60 ++#define EMMC_D5_GPIO_OFS 0x68 ++#define EMMC_D6_GPIO_OFS 0x70 ++#define EMMC_D7_GPIO_OFS 0x78 ++#define EMMC_RSTN_GPIO_OFS 0x88 ++ ++/* SDIO0(SD card) IO register offset */ ++#define SDIO0_CLK_OFS 0xAB4 ++#define SDIO0_CMD_OFS 0xAB8 ++#define SDIO0_D0_OFS 0xAB0 ++#define SDIO0_D1_OFS 0xAAC ++#define SDIO0_D2_OFS 0xAC0 ++#define SDIO0_D3_OFS 0xABC ++ ++/* SDIO1(SDIO wifi) IO register offset */ ++#define SDIO1_CLK_OFS 0x540 ++#define SDIO1_CMD_OFS 0x544 ++#define SDIO1_D0_OFS 0x53C ++#define SDIO1_D1_OFS 0x538 ++#define SDIO1_D2_OFS 0x54C ++#define SDIO1_D3_OFS 0x548 ++ ++/* High speed IO PHY */ ++#define REG_BASE_EMMC_PHY 0x01720000 ++#define EMMC_ZQ_CTRL_PHY_ADDR (REG_BASE_EMMC_PHY + 0x4) ++ ++/* only used for sdio0 power switch */ ++#define SDIO0_VOLT_SW_PHY_ADDR 0x00A10A80 ++ ++#endif +diff --git a/drivers/vendor/mmc/platform/sdhci_wudangstick.c b/drivers/vendor/mmc/platform/sdhci_wudangstick.c +new file mode 100644 +index 000000000..5a7ed140c +--- /dev/null ++++ b/drivers/vendor/mmc/platform/sdhci_wudangstick.c +@@ -0,0 +1,511 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Nebula SDHCI for wudangstick ++ * Author: AuthorNameMagicTag ++ * Create: 2022-11-16 ++ */ ++ ++/* Host controller CRG */ ++#define EMMC_CRG_CLK_OFS 0x6A8 ++#define SDIO0_CRG_CLK_OFS 0x6BC ++#define SDIO1_CRG_CLK_OFS 0x6D0 ++#define EMMC_DLL_RST_OFS EMMC_CRG_CLK_OFS ++#define SDIO0_DLL_RST_OFS SDIO0_CRG_CLK_OFS ++#define SDIO1_DLL_RST_OFS SDIO1_CRG_CLK_OFS ++#if defined(CONFIG_ARCH_SHAOLINSPEAR) ++#define CRG_SRST_REQ (BIT(16) | BIT(17) | BIT(18)) ++#else ++#define CRG_SRST_REQ BIT(27) ++#endif ++#define CRG_CKEN BIT(28) ++#define CRG_AHB_CKEN BIT(21) ++#define CRG_CLK_EN_MASK (CRG_CKEN | CRG_AHB_CKEN) ++#define CRG_DLL_SRST_REQ BIT(29) ++ ++#define EMMC_DRV_DLL_OFS 0x6B0 ++#define SDIO0_DRV_DLL_OFS 0x6C4 ++#define SDIO1_DRV_DLL_OFS 0x6D8 ++#if defined(CONFIG_ARCH_SHAOLINSPEAR) ++#define CRG_DRV_PHASE_SHIFT 15 ++#else ++#define CRG_DRV_PHASE_SHIFT 24 ++#endif ++#define CRG_DRV_PHASE_MASK (0x1F << CRG_DRV_PHASE_SHIFT) ++ ++#define EMMC_DLL_STA_OFS 0x6B8 ++#define SDIO0_DLL_STA_OFS 0x6CC ++#define SDIO1_DLL_STA_OFS 0x6E0 ++#define CRG_P4_DLL_LOCKED BIT(7) ++#define CRG_DS_DLL_READY BIT(1) ++#define CRG_SAM_DLL_READY BIT(0) ++ ++#define DRV_STR_SHIFT 4 ++#define DRV_STR_MASK_IO (0x7 << DRV_STR_SHIFT) ++#define DRV_STR_MASK_GPIO (0xf << DRV_STR_SHIFT) ++#define SR_STR_SHIFT 8 ++#define SR_STR_MASK_GPIO (0x1 << SR_STR_SHIFT) ++ ++#define REG_PERI_START_MODE 0x00A10100 ++#define START_MODE_SHIFT 9 ++#define START_MODE_MASK (0x7 << START_MODE_SHIFT) ++#define START_MODE_EMMC (0x2 << START_MODE_SHIFT) ++#define START_MODE_EMMC_PHY (0x3 << START_MODE_SHIFT) ++ ++/* EMMC IO register offset */ ++#define EMMC_DQS_IO_OFS 0x28 ++#define EMMC_CLK_IO_OFS 0x20 ++#define EMMC_CMD_IO_OFS 0x24 ++#define EMMC_D0_IO_OFS 0x8 ++#define EMMC_D1_IO_OFS 0x10 ++#define EMMC_D2_IO_OFS 0x18 ++#define EMMC_D3_IO_OFS 0x0 ++#define EMMC_D4_IO_OFS 0x4 ++#define EMMC_D5_IO_OFS 0xC ++#define EMMC_D6_IO_OFS 0x14 ++#define EMMC_D7_IO_OFS 0x1C ++#define EMMC_RSTN_IO_OFS 0x2C ++ ++/* EMMC GPIO register offset */ ++#define EMMC_GPIO_MAX_FREQ 150000000 ++#define EMMC_DQS_GPIO_OFS 0x8C ++#define EMMC_CLK_GPIO_OFS 0x7C ++#define EMMC_CMD_GPIO_OFS 0x84 ++#define EMMC_D0_GPIO_OFS 0x64 ++#define EMMC_D1_GPIO_OFS 0x6C ++#define EMMC_D2_GPIO_OFS 0x74 ++#define EMMC_D3_GPIO_OFS 0x5C ++#define EMMC_D4_GPIO_OFS 0x60 ++#define EMMC_D5_GPIO_OFS 0x68 ++#define EMMC_D6_GPIO_OFS 0x70 ++#define EMMC_D7_GPIO_OFS 0x78 ++#define EMMC_RSTN_GPIO_OFS 0x88 ++ ++/* SDIO0(SD card) IO register offset */ ++#define SDIO0_CLK_OFS 0xAB4 ++#define SDIO0_CMD_OFS 0xAB8 ++#define SDIO0_D0_OFS 0xAB0 ++#define SDIO0_D1_OFS 0xAAC ++#define SDIO0_D2_OFS 0xAC0 ++#define SDIO0_D3_OFS 0xABC ++ ++/* SDIO1(SDIO wifi) IO register offset */ ++#define SDIO1_CLK_OFS 0x540 ++#define SDIO1_CMD_OFS 0x544 ++#define SDIO1_D0_OFS 0x53C ++#define SDIO1_D1_OFS 0x538 ++#define SDIO1_D2_OFS 0x54C ++#define SDIO1_D3_OFS 0x548 ++ ++/* High speed IO PHY */ ++#define REG_BASE_EMMC_PHY 0x01720000 ++#define EMMC_ZQ_CTRL_PHY_ADDR (REG_BASE_EMMC_PHY + 0x4) ++ ++/* only used for sdio0 power switch */ ++#define SDIO0_VOLT_SW_PHY_ADDR 0x00A10A80 ++ ++#include "sdhci_nebula.h" ++#include "cputable.h" ++#include "platform_priv.h" ++ ++static const nebula_crg_mask g_crg_mask = NEBULA_CRG_MASK_DESC; ++ ++/* eMMC GPIO fixed timing parameter */ ++static nebula_timing g_mmc_gpio_timing[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x3, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x0), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x5, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x0), ++ fixed_drv_samp_phase(0x10, 0x4), ++ }, ++ [MMC_TIMING_MMC_DDR52] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x5, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x5, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x5, 0x0), ++ fixed_drv_samp_phase(0x8, 0x4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xc, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x8, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x8, 0x1), ++ fixed_drv_phase_only(0x12), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xb, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0xb, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0xb, 0x1), ++ .timing[IO_TYPE_DQS] = fixed_gpio_drv(0x0, 0x0), ++ fixed_drv_phase_only(0x8), ++ }, ++}; ++ ++static const nebula_info g_mmc_gpio_info = \ ++ nebula_emmc_info_desc(g_mmc_gpio_timing); ++ ++static const nebula_timing g_mmc_gpio_timing_es[] = { ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ fixed_drv_phase_only(0x14), ++ } ++}; ++ ++/* SDIO0 GPIO fixed timing parameter */ ++static nebula_timing g_sdio0_gpio_timing[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x5, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x4, 0x1), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_SD_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x8, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x4, 0x1), ++ fixed_drv_samp_phase(0x14, 0x4), ++ }, ++ [MMC_TIMING_UHS_SDR12] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x2, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x2, 0x1), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_UHS_SDR25] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x2, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x2, 0x1), ++ fixed_drv_samp_phase(0x10, 0x4), ++ }, ++ [MMC_TIMING_UHS_DDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x2, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x2, 0x1), ++ fixed_drv_samp_phase(0xc, 0x4), ++ }, ++ [MMC_TIMING_UHS_SDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x7, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x5, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x5, 0x1), ++ fixed_drv_phase_only(0x14), ++ }, ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xd, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x7, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x7, 0x1), ++ fixed_drv_phase_only(0x15), ++ } ++}; ++ ++static const nebula_info g_sdio0_gpio_info = \ ++ nebula_sdio0_info_desc(g_sdio0_gpio_timing); ++ ++static const nebula_timing g_sdio0_gpio_timing_es[] = { ++ [MMC_TIMING_SD_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x5, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x5, 0x1), ++ }, ++ [MMC_TIMING_UHS_SDR12] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x1), ++ }, ++ [MMC_TIMING_UHS_SDR25] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x1), ++ }, ++ [MMC_TIMING_UHS_DDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x1), ++ }, ++ [MMC_TIMING_UHS_SDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x1), ++ }, ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xc, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x7, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x7, 0x1), ++ fixed_drv_phase_only(0x14), ++ } ++}; ++ ++/* SDIO1 GPIO fixed timing parameter */ ++static nebula_timing g_sdio1_gpio_timing[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x5, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x4, 0x1), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_SD_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0xb, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x5, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x5, 0x1), ++ fixed_drv_samp_phase(0x14, 0x4), ++ }, ++ [MMC_TIMING_UHS_SDR12] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x1), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_UHS_SDR25] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x6, 0x0), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x6, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x6, 0x0), ++ fixed_drv_samp_phase(0x10, 0x4), ++ }, ++ [MMC_TIMING_UHS_DDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x4, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x3, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x3, 0x1), ++ fixed_drv_samp_phase(0xc, 0x4), ++ }, ++ [MMC_TIMING_UHS_SDR50] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x7, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x6, 0x0), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x6, 0x0), ++ fixed_drv_phase_only(0x14), ++ }, ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x9, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x9, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x9, 0x1), ++ fixed_drv_phase_only(0x12), ++ } ++}; ++ ++static const nebula_info g_sdio1_gpio_info = \ ++ nebula_sdio1_info_desc(g_sdio1_gpio_timing); ++ ++static const nebula_timing g_sdio1_gpio_timing_es[] = { ++ [MMC_TIMING_UHS_SDR104] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_gpio_drv(0x7, 0x1), ++ .timing[IO_TYPE_CMD] = fixed_gpio_drv(0x6, 0x1), ++ .timing[IO_TYPE_DATA] = fixed_gpio_drv(0x6, 0x1), ++ fixed_drv_phase_only(0x14), ++ } ++}; ++ ++/* eMMC high speed IO fixed timing parameter */ ++static nebula_timing g_mmc_hsio_timing[] = { ++ [MMC_TIMING_LEGACY] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x4), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(0x4), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(0x4), ++ fixed_drv_samp_phase(0x10, 0x0), ++ }, ++ [MMC_TIMING_MMC_HS] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x4), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(0x4), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(0x4), ++ fixed_drv_samp_phase(0x10, 0x4), ++ }, ++ [MMC_TIMING_MMC_DDR52] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x4), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(0x4), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(0x4), ++ fixed_drv_samp_phase(0x8, 0x4), ++ }, ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x5), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(0x5), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(0x5), ++ fixed_drv_phase_only(0x12), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x6), ++ .timing[IO_TYPE_CMD] = fixed_hsio_drv(0x5), ++ .timing[IO_TYPE_DATA] = fixed_hsio_drv(0x5), ++ .timing[IO_TYPE_DQS] = fixed_hsio_drv(0x3), ++ .timing[IO_TYPE_RST] = fixed_hsio_drv(0x0), ++ fixed_drv_phase_only(0x7), ++ } ++}; ++ ++static const nebula_info g_mmc_hsio_info = \ ++ nebula_emmc_hsio_info_desc(g_mmc_hsio_timing); ++ ++static const nebula_timing g_mmc_hsio_timing_es[] = { ++ [MMC_TIMING_MMC_HS200] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x5), ++ fixed_drv_phase_only(0x14), ++ }, ++ [MMC_TIMING_MMC_HS400] = { ++ .data_valid = true, ++ .timing[IO_TYPE_CLK] = fixed_hsio_drv(0x5), ++ fixed_drv_phase_only(0x9), ++ } ++}; ++ ++/* Read REG_PERI_START_MODE to determine eMMC IO type */ ++static int priv_get_io_type(struct sdhci_nebula *nebula, bool force_io_type) ++{ ++ void __iomem *viraddr; ++ unsigned int io_type; ++ ++ if (force_io_type) { ++ nebula->io_type = MMC_IO_TYPE_IO; ++ return ERET_SUCCESS; ++ } ++ ++ viraddr = ioremap(REG_PERI_START_MODE, sizeof(u32)); ++ if (!viraddr) { ++ pr_err("%s ioremap error.\n", __func__); ++ return -ENOMEM; ++ } ++ io_type = readl(viraddr); ++ iounmap(viraddr); ++ if ((io_type & START_MODE_MASK) == START_MODE_EMMC) ++ nebula->io_type = MMC_IO_TYPE_GPIO; ++ else ++ nebula->io_type = MMC_IO_TYPE_IO; ++ ++ return ERET_SUCCESS; ++} ++ ++void plat_extra_init(struct sdhci_host *host) ++{ ++ u32 ctrl; ++ ++ ctrl = sdhci_readl(host, SDHCI_AXI_MBIU_CTRL); ++ ctrl &= ~SDHCI_UNDEFL_INCR_EN; ++ sdhci_writel(host, ctrl, SDHCI_AXI_MBIU_CTRL); ++} ++ ++void plat_caps_quirks_init(struct sdhci_host *host) ++{ ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ plat_comm_caps_quirks_init(host); ++ ++ host->quirks2 |= SDHCI_QUIRK2_DELAY_BEFORE_POWER; ++ ++ /* ++ * For eMMC with GPIO, max frequency is limited to 150MHz. ++ * Reset f_max if dts requires clock frequency higher than 150M. ++ * For eMMC with eMMC combo phy, there is no such limit. ++ */ ++ if ((nebula->devid == MMC_DEV_TYPE_MMC_0) && ++ (nebula->io_type == MMC_IO_TYPE_GPIO) && ++ (host->mmc->f_max > EMMC_GPIO_MAX_FREQ)) ++ host->mmc->f_max = EMMC_GPIO_MAX_FREQ; ++ ++ /* enable emmc & sd devices low power support only */ ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0 || nebula->devid == MMC_DEV_TYPE_SDIO_0) { ++ nebula->priv_cap |= NEBULA_CAP_PM_RUNTIME; ++ } ++} ++ ++static void priv_refixed_info(nebula_timing *timing, int timing_size, ++ const nebula_timing *refixed, int refixed_size) ++{ ++ int idx, i; ++ ++ if (timing == NULL || refixed == NULL || (timing_size < refixed_size)) { ++ pr_err("refixed invalid\n"); ++ return; ++ } ++ ++ for (idx = 0; idx <= refixed_size; timing++, refixed++, idx++) { ++ if (refixed->data_valid == false) { ++ continue; ++ } ++ /* refixed io timing */ ++ for (i = 0; i < IO_TYPE_MAX; i++) { ++ if (is_timing_valid(refixed->timing[i])) { ++ timing->timing[i] = refixed->timing[i]; ++ } ++ } ++ /* refixed phase timing */ ++ for (i = 0; i < PHASE_MAX; i++) { ++ if (is_timing_valid(refixed->phase[i])) { ++ timing->phase[i] = refixed->phase[i]; ++ } ++ } ++ } ++} ++ ++int plat_host_pre_init(struct platform_device *pdev, ++ struct sdhci_host *host) ++{ ++ int ret; ++ bool is_es_soc; ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++ nebula->mask = &g_crg_mask; ++ is_es_soc = ((get_chipid(0) == WUDANGSTICK) || (get_chipid(0) == SHAOLINGUN)); ++ if (nebula->devid == MMC_DEV_TYPE_MMC_0) { ++ ret = priv_get_io_type(nebula, !is_es_soc); ++ if (ret) ++ return ret; ++ ++ if (nebula->io_type == MMC_IO_TYPE_IO) { ++ nebula->info = &g_mmc_hsio_info; ++ if (is_es_soc) { ++ priv_refixed_info(nebula->info->timing, nebula->info->timing_size, \ ++ g_mmc_hsio_timing_es, ARRAY_SIZE(g_mmc_hsio_timing_es)); ++ } ++ nebula->priv_cap |= NEBULA_CAP_ZQ_CALB; ++ } else { ++ nebula->info = &g_mmc_gpio_info; ++ if (is_es_soc) { ++ priv_refixed_info(nebula->info->timing, nebula->info->timing_size, \ ++ g_mmc_gpio_timing_es, ARRAY_SIZE(g_mmc_gpio_timing_es)); ++ } ++ } ++ } else if (nebula->devid == MMC_DEV_TYPE_SDIO_0) { ++ nebula->info = &g_sdio0_gpio_info; ++ nebula->priv_cap |= NEBULA_CAP_VOLT_SW; ++ if (is_es_soc) { ++ priv_refixed_info(nebula->info->timing, nebula->info->timing_size, \ ++ g_sdio0_gpio_timing_es, ARRAY_SIZE(g_sdio0_gpio_timing_es)); ++ } ++ } else if (nebula->devid == MMC_DEV_TYPE_SDIO_1) { ++ nebula->info = &g_sdio1_gpio_info; ++ if (is_es_soc) { ++ priv_refixed_info(nebula->info->timing, nebula->info->timing_size, \ ++ g_sdio1_gpio_timing_es, ARRAY_SIZE(g_sdio1_gpio_timing_es)); ++ } ++ } else { ++ return -EINVAL; ++ } ++ ++ return ERET_SUCCESS; ++} ++ +diff --git a/drivers/vendor/mmc/sdhci_nebula.c b/drivers/vendor/mmc/sdhci_nebula.c +new file mode 100644 +index 000000000..b017148ec +--- /dev/null ++++ b/drivers/vendor/mmc/sdhci_nebula.c +@@ -0,0 +1,198 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Nebula SDHCI driver ++ * Author: AuthorNameMagicTag ++ * Create: 2022-11-16 ++ */ ++#include ++#include ++#include ++ ++#include "dfx/mci_proc.h" ++#include "nebula_fmea.h" ++#include "sdhci_nebula.h" ++ ++static unsigned int g_slot_index; ++struct mmc_host *g_mci_host[MCI_SLOT_NUM] = {NULL}; ++struct mmc_host *g_mmc_host[MCI_SLOT_NUM] = {NULL}; ++ ++static const struct of_device_id g_sdhci_nebula_match[] = { ++ { .compatible = "nebula,sdhci" }, ++ { .compatible = "huanglong,sdhci" }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, g_sdhci_nebula_match); ++ ++static struct sdhci_ops g_sdhci_nebula_ops = { ++ .platform_execute_tuning = sdhci_nebula_execute_tuning, ++ .reset = sdhci_nebula_reset, ++ .set_clock = sdhci_nebula_set_clock, ++ .irq = sdhci_nebula_irq, ++ .set_bus_width = sdhci_nebula_set_bus_width, ++ .set_uhs_signaling = sdhci_nebula_set_uhs_signaling, ++ .hw_reset = sdhci_nebula_hw_reset, ++ .signal_voltage_switch = sdhci_nebula_voltage_switch, ++ .init = sdhci_nebula_extra_init, ++#ifdef CONFIG_MMC_SDHCI_ANT ++ .get_max_clock = sdhci_nebula_get_max_clock, ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) ++ .dump_vendor_regs = sdhci_nebula_dump_vendor_regs, ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++ .adma_write_desc = sdhci_nebula_adma_write_desc, ++#endif ++ ++}; ++ ++static const struct sdhci_pltfm_data g_sdhci_nebula_pdata = { ++ .ops = &g_sdhci_nebula_ops, ++ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, ++ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, ++}; ++ ++static int sdhci_nebula_probe(struct platform_device *pdev) ++{ ++ int ret; ++ struct sdhci_host *host; ++ struct sdhci_nebula *nebula = NULL; ++ ++ host = sdhci_pltfm_init(pdev, &g_sdhci_nebula_pdata, ++ sizeof(struct sdhci_nebula)); ++ if (IS_ERR(host)) ++ return (int)PTR_ERR(host); ++ ++ ret = sdhci_nebula_pltfm_init(pdev, host); ++ if (ret) ++ goto pltfm_free; ++ ++ nebula = nebula_priv(host); ++ if (nebula->priv_cap & NEBULA_CAP_PM_RUNTIME) { ++ pm_runtime_get_noresume(&pdev->dev); ++ pm_runtime_set_autosuspend_delay(&pdev->dev, ++ MMC_AUTOSUSPEND_DELAY_MS); ++ pm_runtime_use_autosuspend(&pdev->dev); ++ pm_runtime_set_active(&pdev->dev); ++ pm_runtime_enable(&pdev->dev); ++ } ++ ++ ret = sdhci_nebula_add_host(host); ++ if (ret) ++ goto pm_runtime_disable; ++ ++ if (nebula->priv_cap & NEBULA_CAP_PM_RUNTIME) { ++ pm_runtime_mark_last_busy(&pdev->dev); ++ pm_runtime_put_autosuspend(&pdev->dev); ++ } ++ ++ ret = sdhci_nebula_proc_init(host); ++ if (ret) ++ goto pm_runtime_disable; ++ ++ g_mci_host[g_slot_index++] = host->mmc; ++ ++ if ((nebula->devid >= 0) && (nebula->devid < MCI_SLOT_NUM)) ++ g_mmc_host[nebula->devid] = host->mmc; ++ ++#ifdef CONFIG_ANDROID_PRODUCT ++ sdhci_nebula_fmea_init(host, &nebula->fmea); ++#endif ++ ++ return ERET_SUCCESS; ++ ++pm_runtime_disable: ++ if (nebula->priv_cap & NEBULA_CAP_PM_RUNTIME) { ++ pm_runtime_disable(&pdev->dev); ++ pm_runtime_set_suspended(&pdev->dev); ++ pm_runtime_put_noidle(&pdev->dev); ++ } ++ ++pltfm_free: ++ sdhci_pltfm_free(pdev); ++ return ret; ++} ++ ++static int sdhci_nebula_remove(struct platform_device *pdev) ++{ ++ int ret; ++ struct sdhci_host *host = platform_get_drvdata(pdev); ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_nebula *nebula = nebula_priv(host); ++ ++#ifdef CONFIG_ANDROID_PRODUCT ++ sdhci_nebula_fmea_deinit(&nebula->fmea); ++#endif ++ ++ if (nebula->priv_cap & NEBULA_CAP_PM_RUNTIME) { ++ pm_runtime_get_sync(&pdev->dev); ++ pm_runtime_disable(&pdev->dev); ++ pm_runtime_put_noidle(&pdev->dev); ++ } ++ ++ sdhci_remove_host(host, true); ++ ++ ret = sdhci_nebula_proc_shutdown(host); ++ if (ret != ERET_SUCCESS) { ++ pr_err("failed to shutdown proc.\n"); ++ return ret; ++ } ++ ++ if (!IS_ERR_OR_NULL(nebula->hclk)) ++ clk_disable_unprepare(nebula->hclk); ++ ++ clk_disable_unprepare(pltfm_host->clk); ++ ++ sdhci_pltfm_free(pdev); ++ ++ return ret; ++} ++ ++static const struct dev_pm_ops g_sdhci_nebula_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(sdhci_nebula_pltfm_suspend, ++ sdhci_nebula_pltfm_resume) ++ SET_RUNTIME_PM_OPS(sdhci_nebula_runtime_suspend, ++ sdhci_nebula_runtime_resume, ++ NULL) ++}; ++ ++static struct platform_driver g_sdhci_nebula_driver = { ++ .probe = sdhci_nebula_probe, ++ .remove = sdhci_nebula_remove, ++ .driver = { ++ .name = "sdhci_nebula", ++ .of_match_table = g_sdhci_nebula_match, ++ .pm = &g_sdhci_nebula_pm_ops, ++ }, ++}; ++ ++static int __init sdhci_bsp_init(void) ++{ ++ int ret; ++ ++ ret = platform_driver_register(&g_sdhci_nebula_driver); ++ if (ret) { ++ pr_err("failed to register sdhci drv.\n"); ++ return ret; ++ } ++ ++ ret = mci_proc_init(); ++ if (ret) ++ platform_driver_unregister(&g_sdhci_nebula_driver); ++ ++ return ret; ++} ++ ++static void __exit sdhci_bsp_exit(void) ++{ ++ mci_proc_shutdown(); ++ ++ platform_driver_unregister(&g_sdhci_nebula_driver); ++} ++ ++module_init(sdhci_bsp_init); ++module_exit(sdhci_bsp_exit); ++ ++MODULE_DESCRIPTION("SDHCI driver for vendor"); ++MODULE_AUTHOR("CompanyNameMagicTag."); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/vendor/mmc/sdhci_nebula.h b/drivers/vendor/mmc/sdhci_nebula.h +new file mode 100644 +index 000000000..81935c8c1 +--- /dev/null ++++ b/drivers/vendor/mmc/sdhci_nebula.h +@@ -0,0 +1,381 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Nebula SDHCI driver header ++ * Author: AuthorNameMagicTag ++ * Create: 2022-11-16 ++ */ ++ ++#ifndef _DRIVERS_MMC_SDHCI_NEBULA_H ++#define _DRIVERS_MMC_SDHCI_NEBULA_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "sdhci-pltfm.h" ++#include "nebula_fmea.h" ++#include "sdhci_nebula.h" ++ ++/* The operation completed successfully. */ ++#define ERET_SUCCESS 0 ++ ++#define SDHCI_HL_EDGE_TUNING /* enable edge tuning */ ++ ++#define PHASE_SCALE 32 ++#define EDGE_TUNING_PHASE_STEP 4 ++#define NOT_FOUND (-1) ++#define MAX_TUNING_NUM 1 ++#define WIN_DIV 2 ++#define WIN_MASK 0x3 ++#define WIN_RISE 0x2 ++#define WIN_FALL 0x1 ++ ++#define MAX_FREQ 200000000 ++#define MMC_BLOCK_SIZE 512 ++ ++#ifndef CQHCI_QUIRK_TXFR_DESC_SZ_SPLIT ++#define CQHCI_QUIRK_TXFR_DESC_SZ_SPLIT 0x2 ++#endif ++#define CQHCI_MAX_SEGS_MUL 2 ++#define SDHCI_CTRL_64BIT_ADDR 0x2000 ++#define SDHCI_CTRL_V4_ENABLE 0x1000 ++ ++#define CQE_MAX_TIMEOUT 10000 ++/* Software auto suspend delay */ ++#define MMC_AUTOSUSPEND_DELAY_MS 50 ++ ++#ifdef CONFIG_MMC_SDHCI_ANT ++#define SDEMMC_TIMING_CTRL_0 0x100 ++#define SDEMMC_TIMING_CTRL_1 0x104 ++#define SDEMMC_TIMING_CTRL_2 0x108 ++#define CRC_STATUS_CTRL 0x10c ++#define BUF_CLK_CTRL_AND_STS 0x120 ++#define CKG_CTRL_AND_STS 0x124 ++#define POLARITY_CTRL 0x138 ++#define DPHY_CTRL 0x144 ++#define AXI_CAPACITY_CFG 0x158 ++#define C28_RFT_RAM_CFG 0x160 ++ ++/* ++ * Ant extended host controller registers. ++ */ ++#define SDHCI_EMMC_CTRL 0x128 ++#define SDHCI_ENH_STROBE_EN BIT(1) ++ ++#define SDHCI_EMMC_HW_RESET SDHCI_EMMC_CTRL ++#define SDHCI_EMMC_RST_N BIT(0) ++ ++#define SDHCI_AT_STAT 0x32c ++#define SDHCI_PHASE_SEL_MASK 0x0000001F ++ ++#define SDHCI_MULTI_CYCLE 0x130 ++#define SDHCI_EDGE_DETECT_STAT 0x134 ++#define SDHCI_FOUND_EDGE BIT(31) ++#define SDHCI_EDGE_DETECT_EN BIT(0) ++ ++#define NEBULA_CQE_OFS 0xC00 ++ ++#define SDHCI_ADMA2_DESC_LEN 16 ++ ++#define CRG_SAMP_DLL_OFFSET 0xC ++#define COMM_PHASE_SEL_SHIFT 15 ++#define COMM_PHASE_SEL_WIDTH 0x1F ++#define COMM_PHASE_SEL_MASK (COMM_PHASE_SEL_WIDTH << COMM_PHASE_SEL_SHIFT) ++ ++#else ++/* ++ * Nebula extended host controller registers. ++ */ ++#define SDHCI_EMMC_CTRL 0x52C ++#define SDHCI_CARD_IS_EMMC 0x0001 ++#define SDHCI_ENH_STROBE_EN 0x0100 ++ ++#define SDHCI_EMMC_HW_RESET 0x534 ++#define SDHCI_EMMC_RST_N BIT(0) ++ ++#define SDHCI_AT_CTRL 0x540 ++#define SDHCI_SAMPLE_EN 0x00000010 ++ ++#define SDHCI_AXI_MBIU_CTRL 0x510 ++#define SDHCI_UNDEFL_INCR_EN 0x1 ++ ++#define SDHCI_AT_STAT 0x544 ++#define SDHCI_PHASE_SEL_MASK 0x000000FF ++ ++#define SDHCI_MULTI_CYCLE 0x54C ++#define SDHCI_EDGE_DETECT_STAT SDHCI_MULTI_CYCLE ++#define SDHCI_FOUND_EDGE (0x1 << 11) ++#define SDHCI_EDGE_DETECT_EN (0x1 << 8) ++#define SDHCI_DOUT_EN_F_EDGE (0x1 << 6) ++#define SDHCI_DATA_DLY_EN (0x1 << 3) ++#define SDHCI_CMD_DLY_EN (0x1 << 2) ++ ++#define SDHCI_MSHC_CTRL_R 0x508 ++#define SDHCI_DEBUG1_PORT 0x520 ++#define SDHCI_DEBUG2_PORT 0x524 ++#define SDHCI_GP_OUT_R 0x534 ++#define SDHCI_EMAX_R 0x548 ++#define SDHCI_MUTLI_CYCLE_EN 0x54C ++ ++#define NEBULA_CQE_OFS 0x180 ++#endif ++ ++#define SDHCI_DETECT_POLARITY BIT(3) ++#define SDHCI_PWR_EN_POLARITY BIT(5) ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) ++#define SDHCI_CAN_DO_ADMA3 0x08000000 ++#define SDHCI_CLOCK_PLL_EN 0x0008 ++#endif ++ ++#define CONFIG_SDHCI_NEBULA_DFX ++#define NEBULA_DFX_BT_MAX_NUM 8 ++ ++#define INVALID_DATA 0xFFFFFFFF ++ ++#define MCI_SLOT_NUM 4 ++ ++enum mmc_crg_type { ++ CRG_CLK_RST = 0, ++ CRG_DLL_RST, ++ CRG_DRV_DLL, ++ CRG_DLL_STA, ++ CRG_TYPE_MAX, ++}; ++ ++enum phase_type { ++ DRV_PHASE = 0, ++ SAMP_PHASE, ++ PHASE_MAX, ++}; ++ ++enum mmc_io_type { ++ MMC_IO_TYPE_IO, ++ MMC_IO_TYPE_GPIO, ++ MMC_IO_TYPE_MAX, ++}; ++ ++enum mmc_dev_type { ++ MMC_DEV_TYPE_MMC_0 = 0, ++ MMC_DEV_TYPE_SDIO_0, ++ MMC_DEV_TYPE_SDIO_1, ++ MMC_DEV_TYPE_MAX, ++}; ++ ++/** ++ * mmc spec define 5 io types, ++ * data line with 8 bit width ++ */ ++enum io_type { ++ IO_TYPE_CLK = 0, ++ IO_TYPE_CMD, ++ IO_TYPE_RST, ++ IO_TYPE_DET = IO_TYPE_RST, ++ IO_TYPE_DQS, ++ IO_TYPE_PWE = IO_TYPE_DQS, ++ IO_TYPE_DATA, ++ IO_TYPE_MAX, ++ IO_TYPE_D0 = IO_TYPE_DATA, ++ IO_TYPE_D1 = IO_TYPE_MAX, ++ IO_TYPE_D2, ++ IO_TYPE_D3, ++ IO_TYPE_D4, ++ IO_TYPE_D5, ++ IO_TYPE_D6, ++ IO_TYPE_D7, ++ IO_TYPE_DMAX, ++}; ++ ++/** ++ * struct mmc_timing_s - nebula host timing data array ++ * @data_valid: this timing valid? ++ * @timing: io timing array ++ * @phase: phase timing array ++ */ ++typedef struct mmc_timing_s { ++ bool data_valid; ++ u32 timing[IO_TYPE_MAX]; ++ u32 phase[PHASE_MAX]; ++} nebula_timing; ++ ++/** ++ * struct nebula_info_s - nebula host info data array ++ * @io_offset: io pin register offset array ++ * @io_drv_mask: io pin driver cap configure mask ++ * @io_drv_str_bit_ofs: io pin driver cap bit offset ++ * @io_drv_str_mask: io pin driver cap mask ++ * @io_drv_sr_bit_ofs: io pin driver cap bit offset ++ * @io_drv_sr_mask: io pin driver cap mask ++ * @crg_ofs: host crg register offset array ++ * @zq_phy_addr: zq resistance calibration physical addr ++ * @volt_sw_phy_addr: voltage switch ctrl phisical addr ++ * @bus_width_phy_addr: get bus width phisical addr ++ * @qboot_phy_addr: emmc quick boot parameters phisical addr ++ * @qboot_param1_ofs: emmc quick boot parameter1 offset ++ * @timing_size: host fixed timing size ++ * @timing: host fixed timing point ++ */ ++typedef struct nebula_info_s { ++ u32 io_offset[IO_TYPE_DMAX]; ++ u32 io_drv_mask; ++ u32 io_drv_str_bit_ofs; ++ u32 io_drv_str_mask; ++ u32 io_drv_sr_bit_ofs; ++ u32 io_drv_sr_mask; ++ u32 crg_ofs[CRG_TYPE_MAX]; ++ phys_addr_t zq_phy_addr; ++ phys_addr_t volt_sw_phy_addr; ++ phys_addr_t bus_width_phy_addr; ++ phys_addr_t qboot_phy_addr; ++ u32 qboot_param1_ofs; ++ u32 timing_size; ++ nebula_timing *timing; ++}nebula_info; ++ ++/** ++ * struct nebula_crg_mask_s - nebula host crg mask info ++ * @crg_srst_mask: reset/unreset mmc controller mask ++ * @crg_cken_mask: mmc controller clock enable mask ++ * @crg_clk_sel_ofs: mmc controller clock select bit offset ++ * @crg_clk_sel_mask: mmc controller clock select mask ++ * @dll_srst_mask: dll reset/unreset mask ++ * @p4_lock_mask: wait p4 lock status mask ++ * @dll_ready_mask: wait dll ready mask ++ * @samp_ready_mask: wait sample ready mask ++ * @drv_phase_mask: driver phase setting mask ++ */ ++typedef struct nebula_crg_mask_s { ++ u32 crg_srst_mask; ++ u32 crg_clk_sel_ofs; ++ u32 crg_clk_sel_mask; ++ u32 crg_cken_mask; ++ u32 dll_srst_mask; ++ u32 p4_lock_mask; ++ u32 dll_ready_mask; ++ u32 samp_ready_mask; ++ u32 drv_phase_mask; ++ u32 volt_sw_en_mask; ++ u32 volt_sw_1v8_mask; ++} nebula_crg_mask; ++ ++typedef struct nebula_cmd_info { ++ u32 opcode[NEBULA_DFX_BT_MAX_NUM]; ++ u32 sp; ++} nebula_cmd_bt; ++ ++typedef struct nebula_cap_s { ++ u32 help : 1; // [0] ++ u32 log_level : 7; // [7:1] ++} nebula_cap; ++ ++typedef struct nebula_plat_ops_s { ++ int (*plat_voltage_switch)(struct sdhci_host *host, struct mmc_ios *ios); ++} nebula_plat_ops; ++ ++struct sdhci_nebula { ++ enum mmc_io_type io_type; /* io type: gpio or high speed io */ ++ struct reset_control *crg_rst; /* reset handle for host controller */ ++ struct reset_control *crg_tx; ++ struct reset_control *crg_rx; ++ struct reset_control *dll_rst; ++ struct reset_control *samp_rst; ++ struct regmap *crg_regmap; /* regmap for host controller */ ++ struct regmap *iocfg_regmap; ++ const nebula_crg_mask *mask; ++ const nebula_info *info; /* io, timing, crg info */ ++ struct clk *hclk; /* AHB clock */ ++ ++ u32 priv_cap; ++#define NEBULA_CAP_PM_RUNTIME (1 << 0) /* Support PM runtime */ ++#define NEBULA_CAP_QUICK_BOOT (1 << 1) /* Support quick boot */ ++#define NEBULA_CAP_RST_IN_DRV (1 << 2) /* Support reset in driver */ ++#define NEBULA_CAP_VOLT_SW (1 << 3) /* Support voltage switch */ ++#define NEBULA_CAP_ZQ_CALB (1 << 4) /* Support ZQ resistance calibration */ ++#define NEBULA_CAP_NM_CARD (1 << 5) /* This controller plugged NM card */ ++ ++ u32 priv_quirk; /* Deviations from soc. */ ++#define NEBULA_QUIRK_FPGA (1 << 0) /* FPGA board */ ++#define NEBULA_QUIRK_SAMPLE_TURNING (1 << 1) /* for not support edge turning */ ++#define NEBULA_QUIRK_CD_INVERTED (1 << 2) /* This card detect inverted */ ++#define NEBULA_QUIRK_PWR_EN_INVERTED (1 << 3) /* This power en inverted */ ++#define NEBULA_QUIRK_IO_CFG_WIDTH_BYTE (1 << 4) /* IO configure width is one bytes */ ++ ++ unsigned int devid; /* device id, mapping to enum mmc_dev_type */ ++ unsigned int drv_phase; ++ unsigned int sample_phase; ++ unsigned int tuning_phase; ++ nebula_cmd_bt cmd_bt; ++ nebula_cap dfx_cap; ++ struct proc_dir_entry *proc_root; ++ struct proc_dir_entry *proc_stat; ++ void __iomem *qboot_virt_addr; ++ struct sdhci_nebula_fmea fmea; ++ nebula_plat_ops ops; ++}; ++ ++static inline void *nebula_priv(struct sdhci_host *host) ++{ ++ return sdhci_pltfm_priv(sdhci_priv(host)); ++} ++ ++extern struct mmc_host *g_mci_host[MCI_SLOT_NUM]; ++extern struct mmc_host *g_mmc_host[MCI_SLOT_NUM]; ++ ++/* Export api by platform */ ++void plat_extra_init(struct sdhci_host *host); ++void plat_set_drv_cap(struct sdhci_host *host); ++void plat_get_drv_samp_phase(struct sdhci_host *host); ++void plat_set_drv_phase(struct sdhci_host *host, u32 phase); ++void plat_dll_reset_assert(struct sdhci_host *host); ++void plat_dll_reset_deassert(struct sdhci_host *host); ++void plat_caps_quirks_init(struct sdhci_host *host); ++void plat_dump_io_info(struct sdhci_host *host); ++void plat_set_mmc_bus_width(struct sdhci_host *host); ++void plat_set_emmc_type(struct sdhci_host *host); ++void plat_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios); ++void plat_set_sample_phase(struct sdhci_host *host, unsigned int phase); ++u32 plat_get_sample_phase(struct sdhci_host *host); ++int plat_crg_init(struct sdhci_host *host); ++int plat_wait_sample_dll_ready(struct sdhci_host *host); ++int plat_wait_p4_dll_lock(struct sdhci_host *host); ++int plat_wait_ds_dll_ready(struct sdhci_host *host); ++int plat_host_init(struct platform_device *pdev, struct sdhci_host *host); ++int plat_voltage_switch(struct sdhci_host *host, struct mmc_ios *ios); ++int plat_host_pre_init(struct platform_device *pdev, struct sdhci_host *host); ++int plat_resistance_calibration(struct sdhci_host *host); ++ ++/* ++ * Export api for sdhci ops (by adapter) ++ */ ++void sdhci_nebula_extra_init(struct sdhci_host *host); ++void sdhci_nebula_set_uhs_signaling(struct sdhci_host *host, unsigned int timing); ++void sdhci_nebula_hw_reset(struct sdhci_host *host); ++void sdhci_nebula_set_clock(struct sdhci_host *host, unsigned int clk); ++int sdhci_nebula_execute_tuning(struct sdhci_host *host, u32 opcode); ++int sdhci_nebula_voltage_switch(struct sdhci_host *host, struct mmc_ios *ios); ++unsigned int sdhci_nebula_get_max_clock(struct sdhci_host *host); ++int sdhci_nebula_pltfm_init(struct platform_device *pdev, struct sdhci_host *host); ++int sdhci_nebula_runtime_suspend(struct device *dev); ++int sdhci_nebula_runtime_resume(struct device *dev); ++int sdhci_nebula_add_host(struct sdhci_host *host); ++int sdhci_nebula_pltfm_suspend(struct device *dev); ++int sdhci_nebula_pltfm_resume(struct device *dev); ++u32 sdhci_nebula_irq(struct sdhci_host *host, u32 intmask); ++void sdhci_nebula_dump_vendor_regs(struct sdhci_host *host); ++void sdhci_nebula_set_bus_width(struct sdhci_host *host, int width); ++void sdhci_nebula_adma_write_desc(struct sdhci_host *host, void **desc, ++ dma_addr_t addr, int len, unsigned int cmd); ++void sdhci_nebula_reset(struct sdhci_host *host, u8 mask); ++ ++/* ++ * Export api by dfx ++ */ ++int sdhci_nebula_proc_init(struct sdhci_host *host); ++int sdhci_nebula_proc_shutdown(struct sdhci_host *host); ++void sdhci_nebula_dfx_irq(struct sdhci_host *host, u32 intmask); ++ ++#endif /* _DRIVERS_MMC_SDHCI_NEBULA_H */ +diff --git a/drivers/vendor/mmc/sdhci_shaolinsword.c b/drivers/vendor/mmc/sdhci_shaolinsword.c +new file mode 100644 +index 000000000..2946f8948 +--- /dev/null ++++ b/drivers/vendor/mmc/sdhci_shaolinsword.c +@@ -0,0 +1,826 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: Nebula SDHCI for shaolinsword ++ * Author: AuthorNameMagicTag ++ * Create: 2022-11-16 ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "sdhci.h" ++#include "sdhci-pltfm.h" ++#include "nebula_fmea.h" ++ ++#define REG_BASE_EMMC_PHY 0xf8c50000 ++#define EMMC_PHY_INITCTRL (REG_BASE_EMMC_PHY + 0x4) ++#define EMMC_INIT_EN 0x1 ++#define EMMC_ZCAL_EN (0x1 << 3) ++#define INITCTRL_CHECK_TIMES 100 ++ ++#define MAX_TUNING_NUM 31 ++ ++#define SDHCI_EMMC_CTRL 0x52C ++#define SDHCI_EMMC_HW_RESET 0x534 ++#define SDHCI_AT_CTRL 0x540 ++#define SDHCI_AT_STAT 0X544 ++ ++#define MMC_CLK_200M 200000000 ++#define MMC_CLK_150M 150000000 ++#define MMC_CLK_120M 120000000 ++#define MMC_CLK_100M 100000000 ++#define MMC_CLK_50M 50000000 ++#define MMC_CLK_25M 25000000 ++#define MMC_CLK_400K 400000 ++#define MMC_CLK_100K 100000 ++#define MMC_CLK_NUM 8 ++ ++#define SDIO_CLK_50M 50000000 ++#define SDIO_CLK_25M 25000000 ++#define SDIO_CLK_400K 400000 ++#define SDIO_CLK_100K 100000 ++#define SDIO_CLK_NUM 4 ++ ++#define MMC_CLK_SEL_SHIFT 25 ++ ++#define EFUSE_FLAG0 0x840 ++#define MISC_CTRL20 0x978 ++#define MISC_CTRL28 0x1c ++ ++#define EMMC_PHY_INIT_CTRL 0x4 ++#define EMMC_PHY_DLY_CTL1 0x254 ++ ++#define EMMC_PHY_IOCTL_PUPD 0x260 ++#define RG_EMMC_PUPD_EN 0xbff ++#define RG_EMMC_PULL_UP 0xaff ++#define EMMC_PHY_IOCTL_RONSEL_1_0 0x264 ++#define RG_EMMC_RONSEL1 0xfff ++#define RG_EMMC_RONSEL0 0xfff ++#define EMMC_PHY_IOCTL_OD_RONSEL_2 0x268 ++#define EMMC_PHY_IOCTL_IOE 0x26c ++#define DA_EMMC_IE 0xfff ++#define DA_EMMC_E 0xeff ++ ++// MPLL Spread Spectrum Control register ++#define PERI_CRG110 0x1b8 ++ ++#define PERI_CRG231 0x39c ++#define PERI_CRG232 0x3a0 ++#define PERI_CRG233 0x3a4 ++#define PERI_CRG234 0x3a8 ++#define PERI_CRG237 0x3b4 ++#define PERI_CRG238 0x3b8 ++#define PERI_CRG239 0x3bc ++#define PERI_CRG240 0x3c0 ++#define PERI_CRG241 0x3c4 ++#define PERI_CRG242 0x3c8 ++ ++#define PERI_CRG211 0x34C ++#define PERI_CRG221 0x374 ++#define PERI_CRG222 0x378 ++ ++struct phase_param { ++ unsigned int value; ++ unsigned int length; ++}; ++ ++struct sdhci_hl_host { ++ struct platform_device *pdev; ++ void __iomem *core_mem; /* MSM SDCC mapped address */ ++ struct clk *clk; /* main SD/MMC bus clock */ ++ struct clk *pclk; /* SDHC peripheral bus clock */ ++ struct clk *bus_clk; /* SDHC bus voter clock */ ++ struct mmc_host *mmc; ++ struct sdhci_pltfm_data sdhci_hl_pdata; ++}; ++ ++struct sdhci_hl_priv { ++ void *crg_reg; ++ void *phy_reg; ++ void *freq_mem; ++ void *phase_mem; ++ unsigned int tuning_flag; ++ unsigned int emmc_resume; ++ int crg_tuning_val; ++ int last_mode; ++ int last_clk; ++ unsigned int devid; ++ nebula_fmea fmea; ++}; ++ ++struct loop_index { ++ int n; ++ int tuning_count; ++ int i; ++ int j; ++ int k; ++}; ++ ++static inline void *sdhci_get_pltfm_priv(struct sdhci_host *host) ++{ ++ return sdhci_pltfm_priv(sdhci_priv(host)); ++} ++ ++static int mmc_clock_index(struct sdhci_hl_priv *priv, int clock) ++{ ++ int i; ++ int mmc_clocks[MMC_CLK_NUM] = {MMC_CLK_200M, MMC_CLK_150M, ++ MMC_CLK_120M, MMC_CLK_100M, MMC_CLK_50M, ++ MMC_CLK_25M, MMC_CLK_400K, MMC_CLK_100K}; ++ ++ for (i = 0; i < MMC_CLK_NUM; i++) { ++ if (clock >= mmc_clocks[i]) { ++ return i; ++ } ++ } ++ ++ return -1; ++} ++ ++static int sdio_clock_index(struct sdhci_hl_priv *priv, int clock) ++{ ++ int i; ++ int sdio_clocks[SDIO_CLK_NUM] = {SDIO_CLK_50M, SDIO_CLK_25M, ++ SDIO_CLK_400K, SDIO_CLK_100K}; ++ ++ for (i = 0; i < SDIO_CLK_NUM; i++) { ++ if (clock >= sdio_clocks[i]) { ++ return i; ++ } ++ } ++ ++ return -1; ++} ++ ++static int get_clock_index(struct sdhci_hl_priv *priv, int clock) ++{ ++ if (priv->devid == 0) { ++ return mmc_clock_index(priv, clock); ++ } else { ++ return sdio_clock_index(priv, clock); ++ } ++} ++ ++static void set_sample_phase(struct sdhci_host *host, unsigned int sam_phase) ++{ ++ struct sdhci_hl_priv *priv = sdhci_pltfm_priv(sdhci_priv(host)); ++ u32 reg; ++ ++ reg = sdhci_readl(host, SDHCI_AT_CTRL); ++ reg |= 1<<4; /* 4: Left Shift Value */ ++ sdhci_writel(host, reg, SDHCI_AT_CTRL); ++ ++ if (host->timing == MMC_TIMING_MMC_HS400) { ++ reg = sdhci_readl(host, SDHCI_AT_STAT); ++ reg &= ~0xff; ++ reg |= sam_phase; ++ sdhci_writel(host, reg, SDHCI_AT_STAT); ++ ++ reg = readl(priv->crg_reg + PERI_CRG237); ++ reg |= 1<<3; /* 3: Left Shift Value */ ++ writel(reg, priv->crg_reg + PERI_CRG237); ++ ++ reg = readl(priv->crg_reg + PERI_CRG239); ++ reg |= 1<<3; /* 3: Left Shift Value */ ++ writel(reg, priv->crg_reg + PERI_CRG239); ++ ++ reg = readl(priv->crg_reg + PERI_CRG233); ++ reg |= 1<<16; /* 16: Left Shift Value */ ++ writel(reg, priv->crg_reg + PERI_CRG233); ++ } else if (priv->devid == 0) { ++ reg = sdhci_readl(host, SDHCI_AT_STAT); ++ reg &= ~0xFF; ++ sdhci_writel(host, reg, SDHCI_AT_STAT); ++ } else { ++ reg = sdhci_readl(host, SDHCI_AT_STAT); ++ reg &= ~0xFF; ++ reg |= sam_phase; ++ sdhci_writel(host, reg, SDHCI_AT_STAT); ++ } ++} ++ ++static int set_clock_control(struct sdhci_host *host) ++{ ++ struct sdhci_hl_priv *priv = sdhci_pltfm_priv(sdhci_priv(host)); ++ unsigned long wait_time = 20; /* 20: delay */ ++ u32 reg; ++ ++ reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL); ++ reg |= 1<<0; ++ sdhci_writel(host, reg, SDHCI_CLOCK_CONTROL); ++ ++ if (host->timing == MMC_TIMING_MMC_HS400) { ++ do { ++ reg = readl(priv->crg_reg + PERI_CRG240); ++ if (wait_time == 0) { ++ pr_err("%s: P4 DLL master no locked.\n", mmc_hostname(host->mmc)); ++ return -1; ++ } ++ wait_time--; ++ mdelay(1); ++ } while ((reg & BIT(0)) == 0); ++ ++ wait_time = 20; /* 20: max timeout */ ++ do { ++ reg = readl(priv->crg_reg + PERI_CRG238); ++ if (wait_time == 0) { ++ pr_err("%s: Slave no Ready.\n", mmc_hostname(host->mmc)); ++ return -1; ++ } ++ wait_time--; ++ mdelay(1); ++ } while ((reg & BIT(1)) == 0); ++ } ++ ++ reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL); ++ reg |= 1<<2; /* 2:delay */ ++ sdhci_writel(host, reg, SDHCI_CLOCK_CONTROL); ++ if (host->timing == MMC_TIMING_MMC_HS400) { ++ udelay(2); /* 2:delay */ ++ wait_time = 20; /* 20: max timeout */ ++ do { ++ reg = readl(priv->crg_reg + PERI_CRG234); ++ if (wait_time == 0) { ++ pr_err("%s: DS DLL master no locked.\n", mmc_hostname(host->mmc)); ++ return -1; ++ } ++ wait_time--; ++ mdelay(1); ++ } while ((reg & BIT(0)) == 0); ++ ++ sdhci_writel(host, sdhci_readl(host, SDHCI_EMMC_CTRL) | 0x1, SDHCI_EMMC_CTRL); ++ ++ reg = sdhci_readw(host, SDHCI_HOST_CONTROL2); ++ reg |= 0x7; ++ sdhci_writew(host, reg, SDHCI_HOST_CONTROL2); ++ } ++ ++ return 0; ++} ++ ++static void sdhci_set_clk(const struct sdhci_hl_priv *priv, u32 phase, u32 clk) ++{ ++ u32 reg; ++ ++ reg = readl(priv->freq_mem); ++ reg &= ~(0xF << MMC_CLK_SEL_SHIFT); ++ reg |= clk << MMC_CLK_SEL_SHIFT; ++ writel(reg, priv->freq_mem); // set clock ++ ++ reg = readl(priv->phase_mem); ++ reg &= ~(0xffffffff); ++ reg |= (phase | (0x10 << 8) | (0x8 << 16) | (0x8 << 24)); /* 8,16,24:Left Shift Value */ ++ writel(reg, priv->phase_mem); // set driver phase ++ return; ++} ++static int sdhci_hl_select_param(struct sdhci_host *host, int clock, u32 phase, u32 sam_phase, unsigned long timeout) ++{ ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_hl_priv *priv = sdhci_pltfm_priv(pltfm_host); ++ int clk; ++ u32 reg; ++ ++ clk = get_clock_index(priv, clock); ++ ++ host->mmc->actual_clock = clock; ++ ++ if (priv->last_mode != host->timing || priv->last_clk != clk) ++ pr_debug("%s: current clock: %dHz, clk:0x%x, host->timing:0x%x\n", ++ mmc_hostname(host->mmc), clock, clk, host->timing); ++ ++ reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL); ++ reg &= ~(1<<0x2); ++ sdhci_writel(host, reg, SDHCI_CLOCK_CONTROL); ++ ++ udelay(timeout); // 200ns ++ ++ reg &= ~(1<<0); ++ sdhci_writel(host, reg, SDHCI_CLOCK_CONTROL); ++ ++ reg = readl(priv->freq_mem); ++ reg |= 1<<17 | 1<<18; /* 17,18: Left Shift Value */ ++ writel(reg, priv->freq_mem); ++ ++ if (host->timing == MMC_TIMING_LEGACY) ++ udelay(25); /* 25:delay */ ++ ++ sdhci_set_clk(priv, phase, clk); ++ ++ set_sample_phase(host, sam_phase); // set sample phase ++ udelay(timeout); ++ ++ if ((host->timing == MMC_TIMING_MMC_HS400) || (host->timing == MMC_TIMING_MMC_HS200)) { ++ reg = readl(priv->crg_reg + PERI_CRG242); ++ reg &= ~(1<<17 | 1<<18); /* 17,18:Left Shift Value */ ++ writel(reg, priv->crg_reg + PERI_CRG242); ++ } ++ ++ if (set_clock_control(host)) ++ return -1; ++ ++ reg = sdhci_readl(host, 0x54c); ++ reg &= ~(1<<2 | 1<<3); /* 2,3:Left Shift Value */ ++ sdhci_writel(host, reg, 0x54c); ++ ++ if (priv->devid == 0) { ++ reg = readl(priv->crg_reg + PERI_CRG231); ++ reg |= 1 << 2; /* 2:Left Shift Value */ ++ writel(reg, priv->crg_reg + PERI_CRG231); ++ } else { ++ reg = readl(priv->crg_reg + PERI_CRG211); ++ reg |= 1 << 2; /* 2:Left Shift Value */ ++ writel(reg, priv->crg_reg + PERI_CRG211); ++ } ++ ++ priv->last_mode = host->timing; ++ priv->last_clk = clk; ++ ++ return 0; ++} ++ ++static inline u32 mci_ror32(u32 word, unsigned int shift) ++{ ++ unsigned int c; ++ ++ if (shift >= 32) { /* 32:Comparison Value */ ++ shift = 0; ++ } ++ c = (u32)(word >> shift) & 0x1; ++ return (u8)c; ++} ++ ++static int get_tuning_candiates(struct sdhci_host *host, u32 opcode, u32 *candt) ++{ ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_hl_priv *priv = sdhci_pltfm_priv(pltfm_host); ++ ++ int tuning_loop_counter = MAX_TUNING_NUM; ++ int wait_time = 20; /* 20:delay */ ++ u32 candiates = 0; ++ u32 reg_data; ++ u8 smpl = 0; ++ ++ do { ++ reg_data = sdhci_readl(host, SDHCI_CLOCK_CONTROL); ++ reg_data &= ~(1<<2); /* 2:Left Shift Value */ ++ sdhci_writel(host, reg_data, SDHCI_CLOCK_CONTROL); ++ ++ reg_data = sdhci_readl(host, SDHCI_AT_CTRL); ++ reg_data |= (1<<4); /* 4:Left Shift Value */ ++ sdhci_writel(host, reg_data, SDHCI_AT_CTRL); ++ ++ reg_data = sdhci_readl(host, SDHCI_AT_STAT); ++ reg_data = MAX_TUNING_NUM - tuning_loop_counter; ++ sdhci_writel(host, reg_data, SDHCI_AT_STAT); ++ ++ wait_time = 20; /* 20: max timeout */ ++ do { ++ reg_data = readl(priv->crg_reg + PERI_CRG238); ++ if (wait_time == 0) { ++ pr_err("%s: SAM DLL Slave no Ready.\n", mmc_hostname(host->mmc)); ++ return -1; ++ } ++ wait_time--; ++ mdelay(1); ++ } while ((reg_data & BIT(1)) == 0); ++ ++ reg_data = sdhci_readl(host, SDHCI_CLOCK_CONTROL); ++ reg_data |= (1<<2); /* 2:Left Shift Value */ ++ sdhci_writel(host, reg_data, SDHCI_CLOCK_CONTROL); ++ udelay(1); ++ ++ if (!mmc_send_tuning(host->mmc, opcode, NULL)) ++ candiates |= (1 << smpl); ++ smpl++; ++ } while (tuning_loop_counter--); ++ ++ *candt = candiates; ++ pr_info("%s: candiates: 0x%x\n", mmc_hostname(host->mmc), candiates); ++ ++ return 0; ++} ++ ++static int get_tuning_phase(struct sdhci_host *host, u32 candiates) ++{ ++ struct phase_param tuning_phase[16]; /* 16:arry size */ ++ struct phase_param tmp[1]; ++ ++ int tuning_val, tuning_next_val, tuning_sel; ++ int raise = -1; ++ int fall = -1; ++ int offset = 0; ++ struct loop_index tuning_loop_index = {0}; ++ ++ for (;;) { ++ tuning_val = mci_ror32(candiates, 0); ++ if (tuning_val) { ++ candiates = (candiates >> 1) | 1U << 31; /* 31:Left Shift Value */ ++ offset++; ++ } else { ++ break; ++ } ++ } ++ pr_debug("%s: fixed candiates: 0x%x\n", mmc_hostname(host->mmc), candiates); ++ for (; tuning_loop_index.tuning_count <= MAX_TUNING_NUM; tuning_loop_index.tuning_count++) { ++ tuning_val = mci_ror32(candiates, tuning_loop_index.tuning_count); ++ tuning_next_val = mci_ror32(candiates, tuning_loop_index.tuning_count + 1); ++ if (tuning_val > tuning_next_val) ++ fall = tuning_loop_index.tuning_count; ++ else if (tuning_next_val > tuning_val) ++ raise = tuning_loop_index.tuning_count; ++ ++ if (fall != -1 && raise != -1) { ++ tuning_phase[tuning_loop_index.n].length = (unsigned int)(fall - raise); ++ tuning_phase[tuning_loop_index.n].value = (unsigned int)(fall + raise + 1) / 2; /* 2:halved value */ ++ tuning_loop_index.n++; ++ fall = -1; ++ raise = -1; ++ } ++ } ++ ++ for (; tuning_loop_index.i < tuning_loop_index.n - 1; ++tuning_loop_index.i) { ++ for (; tuning_loop_index.j < tuning_loop_index.n - tuning_loop_index.i - 1; ++tuning_loop_index.j) { ++ if (tuning_phase[tuning_loop_index.j].length > tuning_phase[tuning_loop_index.j + 1].length) { ++ tmp[0] = tuning_phase[tuning_loop_index.j]; ++ tuning_phase[tuning_loop_index.j] = tuning_phase[tuning_loop_index.j + 1]; ++ tuning_phase[tuning_loop_index.j + 1] = tmp[0]; ++ } ++ } ++ } ++ ++ for (; tuning_loop_index.k < tuning_loop_index.n; tuning_loop_index.k++) ++ pr_debug("%d len: 0x%x val: 0x%x\n", ++ tuning_loop_index.k, tuning_phase[tuning_loop_index.k].length, tuning_phase[tuning_loop_index.k].value); ++ ++ tuning_sel = (tuning_phase[tuning_loop_index.n - 1].value + offset) % (MAX_TUNING_NUM + 1); ++ ++ pr_info("%s: tuning_sel: 0x%x\n", mmc_hostname(host->mmc), tuning_sel); ++ ++ return tuning_sel; ++} ++ ++static int sdhci_hl_tuning_normal_mode(struct sdhci_host *host, u32 opcode) ++{ ++ struct sdhci_hl_priv *priv = sdhci_get_pltfm_priv(host); ++ struct mmc_ios ios = host->mmc->ios; ++ ++ int wait_time = 20; /* 20:Left Shift Value */ ++ int tuning_sel; ++ u32 candiates = 0; ++ u32 reg_data; ++ ++ /* ++ * Tuning is required for SDR104, HS200 and HS400 cards and ++ * if clock frequency is greater than 100MHz in these modes. ++ */ ++ if (host->clock < 100 * 1000 * 1000 || /* 100,1000,1000:100MHz */ ++ !((ios.timing == MMC_TIMING_MMC_HS200) || (ios.timing == MMC_TIMING_UHS_SDR104))) ++ return 0; ++ ++ sdhci_writel(host, 0xffffffff, SDHCI_INT_STATUS); ++ ++ do { ++ reg_data = readl(priv->crg_reg + PERI_CRG240); ++ if (wait_time == 0) { ++ pr_err("%s: P4 DLL master no locked.\n", mmc_hostname(host->mmc)); ++ return -1; ++ } ++ wait_time--; ++ mdelay(1); ++ } while ((reg_data & BIT(0)) == 0); ++ ++ /* 3:Left Shift Value */ ++ writel((readl(priv->crg_reg + PERI_CRG237) | (unsigned int)(1<<3)), priv->crg_reg + PERI_CRG237); ++ ++ if (get_tuning_candiates(host, opcode, &candiates)) ++ return -1; ++ ++ tuning_sel = get_tuning_phase(host, candiates); ++ ++ reg_data = sdhci_readl(host, SDHCI_CLOCK_CONTROL); ++ reg_data &= ~(1<<2); /* 2:Left Shift Value */ ++ sdhci_writel(host, reg_data, SDHCI_CLOCK_CONTROL); ++ ++ reg_data = sdhci_readl(host, SDHCI_AT_CTRL); ++ reg_data |= (1<<4); /* 4:Left Shift Value */ ++ sdhci_writel(host, reg_data, SDHCI_AT_CTRL); ++ ++ reg_data = sdhci_readl(host, SDHCI_AT_STAT); ++ reg_data &= ~0xFF; ++ reg_data |= tuning_sel; ++ sdhci_writel(host, reg_data, SDHCI_AT_STAT); ++ ++ priv->crg_tuning_val = reg_data; ++ ++ wait_time = 20; /* 20: max timeout count */ ++ do { ++ reg_data = readl(priv->crg_reg + PERI_CRG238); ++ if (wait_time == 0) { ++ pr_err("%s: SAM DLL Slave no ready after sel tuning.\n", mmc_hostname(host->mmc)); ++ return -1; ++ } ++ wait_time--; ++ mdelay(1); ++ } while ((reg_data & BIT(1)) == 0); ++ ++ reg_data = sdhci_readl(host, SDHCI_CLOCK_CONTROL); ++ reg_data |= 1 << 2; /* 2:Left Shift Value */ ++ sdhci_writel(host, reg_data, SDHCI_CLOCK_CONTROL); ++ ++ return 0; ++} ++ ++static int sdhci_hl_execute_tuning(struct sdhci_host *host, u32 opcode) ++{ ++ return sdhci_hl_tuning_normal_mode(host, opcode); ++} ++ ++/* Do ZQ resistance calibration for eMMC PHY IO */ ++static int hl_resistance_calibration(void) ++{ ++ int i = 0; ++ u32 reg_val; ++ void __iomem *viraddr; ++ ++ viraddr = ioremap(EMMC_PHY_INITCTRL, sizeof(u32)); ++ if (!viraddr) { ++ pr_err("resistance_calibration ioremap error.\n"); ++ return -ENOMEM; ++ } ++ reg_val = readl(viraddr); ++ reg_val |= EMMC_INIT_EN | EMMC_ZCAL_EN; ++ writel(reg_val, viraddr); ++ ++ for (i = 0; i < INITCTRL_CHECK_TIMES; i++) { ++ reg_val = readl(viraddr); ++ if ((reg_val & (EMMC_INIT_EN | EMMC_ZCAL_EN)) == 0) { ++ iounmap(viraddr); ++ return 0; ++ } ++ udelay(10); /* delay 10 us */ ++ } ++ ++ iounmap(viraddr); ++ return -ETIMEDOUT; ++} ++ ++static int sdhci_hl_priv_attr_init(struct sdhci_hl_priv *priv, struct sdhci_host *host) ++{ ++ priv->crg_tuning_val = -1; ++ priv->last_mode = -1; ++ priv->last_clk = -1; ++ ++ priv->crg_reg = ioremap(0xf8a22000, 0x1000); ++ if (priv->crg_reg == NULL) { ++ pr_err("%s: Failed to map crg_reg\n", mmc_hostname(host->mmc)); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static int sdhci_hl_pltfm_init(struct sdhci_host *host) ++{ ++ struct device *dev = mmc_dev(host->mmc); ++ struct device_node *np = dev->of_node; ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_hl_priv *priv = sdhci_pltfm_priv(pltfm_host); ++ u32 reg; ++ ++ if (sdhci_hl_priv_attr_init(priv, host)) ++ return -EINVAL; ++ ++ if (of_property_read_u32(np, "devid", &priv->devid)) { ++ pr_err("%s: Failed to get devid\n", mmc_hostname(host->mmc)); ++ return -EINVAL; ++ } ++ ++ if (priv->devid == 1) { ++ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; ++ } else { ++ priv->phy_reg = ioremap(0xf8c50000, 0x1000); ++ if (priv->phy_reg == NULL) { ++ pr_err("%s: Failed to map phy_reg\n", mmc_hostname(host->mmc)); ++ return -EINVAL; ++ } ++ host->quirks2 &= ~SDHCI_QUIRK2_ACMD23_BROKEN; ++ } ++ ++ if (priv->devid == 0) { ++ hl_resistance_calibration(); ++ ++ priv->freq_mem = priv->crg_reg + PERI_CRG242; ++ priv->phase_mem = priv->crg_reg + PERI_CRG241; ++ ++ // Enable Spread Spectrum here to deal with clock pulse before tuning ++ // Disable SSMOD ++ reg = readl(priv->crg_reg + PERI_CRG110); ++ reg |= 1 << 2; /* 2:Left Shift Value */ ++ writel(reg, priv->crg_reg + PERI_CRG110); ++ ++ // Set clk disable and clk reset ++ reg = readl(priv->crg_reg + PERI_CRG110); ++ reg &= ~(1 << 0); ++ reg |= 1 << 1; ++ writel(reg, priv->crg_reg + PERI_CRG110); ++ ++ // Set ssmod parameters ++ reg = readl(priv->crg_reg + PERI_CRG110); ++ reg |= (2 << 9) | (1 << 3); /* 2,3,9:Left Shift Value */ ++ writel(reg, priv->crg_reg + PERI_CRG110); ++ ++ // Set clk enable and clk reset release ++ reg = readl(priv->crg_reg + PERI_CRG110); ++ reg |= 1 << 0; ++ reg &= ~(1 << 1); ++ writel(reg, priv->crg_reg + PERI_CRG110); ++ ++ // Enable SSMOD ++ reg = readl(priv->crg_reg + PERI_CRG110); ++ reg &= ~(1 << 2); /* 2:Left Shift Value */ ++ writel(reg, priv->crg_reg + PERI_CRG110); ++ } else { ++ priv->freq_mem = priv->crg_reg + PERI_CRG222; ++ priv->phase_mem = priv->crg_reg + PERI_CRG221; ++ } ++ ++ return 0; ++} ++ ++static void sdhci_hl_set_clock(struct sdhci_host *host, unsigned int clock) ++{ ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_hl_priv *priv = sdhci_pltfm_priv(pltfm_host); ++ ++ if (clock == 0) ++ return; ++ switch (host->timing) { ++ case MMC_TIMING_MMC_HS: ++ sdhci_hl_select_param(host, clock, 0x10, 0x4, 25); /* 25:Actual parameter value */ ++ break; ++ case MMC_TIMING_SD_HS: ++ sdhci_hl_select_param(host, clock, 0x10, 0x4, 25); /* 25:Actual parameter value */ ++ break; ++ case MMC_TIMING_MMC_HS200: ++ sdhci_hl_select_param(host, clock, 0x10, 0, 25); /* 25:Actual parameter value */ ++ break; ++ case MMC_TIMING_MMC_HS400: ++ if (priv->crg_tuning_val >= 0) { ++ sdhci_hl_select_param(host, clock, 0x8, priv->crg_tuning_val, 10); /* 10:Actual parameter value */ ++ } else { ++ pr_err("%s: tuning value is wrong\n", mmc_hostname(host->mmc)); ++ return; ++ } ++ break; ++ default: ++ sdhci_hl_select_param(host, clock, 0x10, 0, 25); /* 25:Actual parameter value */ ++ udelay(75); /* 75:delay */ ++ break; ++ } ++} ++ ++static void sdhci_hl_set_drv_cap(struct sdhci_host *host) ++{ ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_hl_priv *priv = sdhci_pltfm_priv(pltfm_host); ++ ++ /* driver capacity */ ++ if (priv->devid == 0) { ++ if ((host->timing == MMC_TIMING_MMC_HS400) || (host->timing == MMC_TIMING_MMC_HS200)) { ++ /* DRV28 */ ++ writel((RG_EMMC_RONSEL1 << 16) | RG_EMMC_RONSEL0, /* 16:Left Shift Value */ ++ priv->phy_reg + EMMC_PHY_IOCTL_RONSEL_1_0); ++ /* DRV28, OD = 0 */ ++ writel(0xfff, priv->phy_reg + EMMC_PHY_IOCTL_OD_RONSEL_2); ++ } else { ++ /* DRV50 */ ++ writel((RG_EMMC_RONSEL1 << 16) | RG_EMMC_RONSEL0, /* 16:Left Shift Value */ ++ priv->phy_reg + EMMC_PHY_IOCTL_RONSEL_1_0); ++ /* DRV50, OD = 0 */ ++ writel(0x0, priv->phy_reg + EMMC_PHY_IOCTL_OD_RONSEL_2); ++ } ++ } ++} ++ ++static void sdhci_hl_set_uhs_signaling(struct sdhci_host *host, unsigned timing) ++{ ++ sdhci_set_uhs_signaling(host, timing); ++ host->timing = timing; ++ ++ sdhci_hl_set_drv_cap(host); ++} ++ ++static void sdhci_hl_hw_reset(struct sdhci_host *host) ++{ ++ sdhci_writel(host, 0x0, SDHCI_EMMC_HW_RESET); ++ udelay(10); /* 10:delay */ ++ sdhci_writel(host, 0x1, SDHCI_EMMC_HW_RESET); ++ udelay(200); /* 200:delay */ ++} ++ ++static const struct of_device_id sdhci_hl_dt_match[] = { ++ { .compatible = "huanglong,sdhci" }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, sdhci_hl_dt_match); ++ ++static struct sdhci_ops sdhci_hl_ops = { ++ .platform_execute_tuning = sdhci_hl_execute_tuning, ++ .reset = sdhci_reset, ++ .set_clock = sdhci_hl_set_clock, ++ .set_bus_width = sdhci_set_bus_width, ++ .set_uhs_signaling = sdhci_hl_set_uhs_signaling, ++ .hw_reset = sdhci_hl_hw_reset, ++}; ++ ++static const struct sdhci_pltfm_data sdhci_hl_pdata = { ++ .ops = &sdhci_hl_ops, ++ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, ++ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | ++ SDHCI_QUIRK2_BROKEN_DDR50, ++}; ++ ++static int sdhci_hl_probe(struct platform_device *pdev) ++{ ++ struct sdhci_host *host = NULL; ++ int ret = 0; ++ struct sdhci_pltfm_host *pltfm_host = NULL; ++ struct sdhci_hl_priv *priv = NULL; ++ struct sdhci_hl_host *hl_host; ++ ++ hl_host = devm_kzalloc(&pdev->dev, sizeof(*hl_host), GFP_KERNEL); ++ if (hl_host == NULL) ++ return -ENOMEM; ++ ++ hl_host->sdhci_hl_pdata.ops = &sdhci_hl_ops; ++ hl_host->sdhci_hl_pdata.quirks = sdhci_hl_pdata.quirks; ++ hl_host->sdhci_hl_pdata.quirks2 = sdhci_hl_pdata.quirks2; ++ host = sdhci_pltfm_init(pdev, &hl_host->sdhci_hl_pdata, sizeof(struct sdhci_hl_priv)); ++ if (IS_ERR(host)) ++ return PTR_ERR(host); ++ ret = sdhci_hl_pltfm_init(host); ++ if (ret) ++ goto pltfm_free; ++ pltfm_host = sdhci_priv(host); ++ priv = sdhci_pltfm_priv(pltfm_host); ++ ++ ret = mmc_of_parse(host->mmc); ++ if (ret) ++ goto pltfm_free; ++ ++ sdhci_get_of_property(pdev); ++ ++ ret = sdhci_add_host(host); ++ if (ret) ++ goto pltfm_free; ++ ++#ifdef CONFIG_ANDROID_PRODUCT ++ sdhci_nebula_fmea_init(host, &priv->fmea); ++#endif ++ return 0; ++ ++pltfm_free: ++ sdhci_pltfm_free(pdev); ++ return ret; ++} ++ ++static int sdhci_hl_remove(struct platform_device *pdev) ++{ ++ struct sdhci_host *host = platform_get_drvdata(pdev); ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_hl_priv *priv = sdhci_pltfm_priv(pltfm_host); ++ int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == ++ 0xffffffff); ++#ifdef CONFIG_ANDROID_PRODUCT ++ sdhci_nebula_fmea_deinit(&priv->fmea); ++#endif ++ ++ iounmap(priv->crg_reg); ++ iounmap(priv->phy_reg); ++ sdhci_remove_host(host, dead); ++ sdhci_pltfm_free(pdev); ++ ++ return 0; ++} ++ ++static struct platform_driver sdhci_hl_driver = { ++ .probe = sdhci_hl_probe, ++ .remove = sdhci_hl_remove, ++ .driver = { ++ .name = "sdhci_hl", ++ .of_match_table = sdhci_hl_dt_match, ++ }, ++}; ++ ++module_platform_driver(sdhci_hl_driver); ++ ++MODULE_DESCRIPTION("SDHCI driver for HL"); ++MODULE_AUTHOR("CompanyNameMagicTag."); ++MODULE_LICENSE("GPL v2"); ++ +diff --git a/drivers/vendor/mmc/version.mak b/drivers/vendor/mmc/version.mak +new file mode 100644 +index 000000000..9b8d9c604 +--- /dev/null ++++ b/drivers/vendor/mmc/version.mak +@@ -0,0 +1 @@ ++SDHCI_NEBULA_KERNEL_VERSION="MMC_KERNEL 1.0.1" +diff --git a/drivers/vendor/npu/Kconfig b/drivers/vendor/npu/Kconfig +new file mode 100644 +index 000000000..f98678403 +--- /dev/null ++++ b/drivers/vendor/npu/Kconfig +@@ -0,0 +1,7 @@ ++ ++config VENDOR_NPU ++ bool "Vendor NPU Feature" ++ default n ++ help ++ Support Vendor NPU. ++ +diff --git a/drivers/vendor/npu/Makefile b/drivers/vendor/npu/Makefile +new file mode 100644 +index 000000000..e6f29554a +--- /dev/null ++++ b/drivers/vendor/npu/Makefile +@@ -0,0 +1,6 @@ ++ ++KBUILD_CFLAGS += -Werror ++ ++obj-$(CONFIG_VENDOR_NPU) += npu_svm.o ++obj-$(CONFIG_VENDOR_NPU) += npu_misc.o ++obj-$(CONFIG_VENDOR_NPU) += smmu_power_on.o +diff --git a/drivers/vendor/npu/npu_misc.c b/drivers/vendor/npu/npu_misc.c +new file mode 100644 +index 000000000..879365801 +--- /dev/null ++++ b/drivers/vendor/npu/npu_misc.c +@@ -0,0 +1,770 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2020-2021. All rights reserved. ++ * Description: npu misc ++ * Version: Initial Draft ++ * Create: 2020-01-16 ++ */ ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "linux/securec.h" ++ ++#define NPU_MISC_DEVICE_NAME "nmsc" ++ ++#define NPU_MISC_IOCTL_GET_PHYS 0xfff4 ++#define NPU_MISC_IOCTL_ADD_DBF 0xfff5 ++#define NPU_MISC_IOCTL_RMV_DBF 0xfff6 ++ ++#ifdef MISC_MEM_DBG ++#define NPU_MISC_IOCTL_DATA_COPY_TEST 0xfff8 ++#define NPU_MISC_IOCTL_SHOW_MEM 0xfff9 ++#endif ++ ++#define NPU_DBF_INDEX_VALID_FLAG 0x0 ++ ++#define NPU_MODULE_ID 0xAA ++#define NPU_MISC_SIG_INDEX_VALUE 2048 ++ ++#define NPU_MISC_MAX_DMA_BUF_TABLE 10 ++#define NPU_MISC_MAX_INDEX_VALUE (NPU_MISC_SIG_INDEX_VALUE * NPU_MISC_MAX_DMA_BUF_TABLE) ++ ++struct nmsc_device { ++ unsigned long long id; ++ struct miscdevice miscdev; ++ struct device *dev; ++}; ++ ++/* dbf(dma buffer fd) process */ ++struct dbf_process { ++ unsigned int flag; ++ unsigned int index; ++ struct dma_buf *dma_buf; ++ struct rb_node rb_node; ++}; ++ ++struct data_copy_info { ++ unsigned long long src_addr_info; ++ unsigned long long dst_addr_info; ++ unsigned int src_size; ++ unsigned int dst_size; ++}; ++ ++struct kva_map_params { ++ unsigned long long user_va; /* addr info from user space */ ++ void *kva; /* kernel virtual addr */ ++ void *buf_handle; /* buffer handle, e.g. dma buffer handle */ ++}; ++ ++struct mem_show_params { ++ unsigned long long user_addr_info; ++ unsigned int size; ++ unsigned int flag; /* 0: show vitual mem, 1: show physical mem */ ++}; ++ ++static struct mutex dbf_process_mutex; ++ ++struct dma_buf_table_index { ++ unsigned int valid_index; ++ unsigned int cur_table_index; ++ struct dma_buf** buff_index_table[NPU_MISC_MAX_DMA_BUF_TABLE]; // index for dma_buf table . ++}; ++ ++struct dfb_manager { ++ struct rb_root dfb_process_root; // rb tree to store dma_buffer info ++ struct dma_buf_table_index buf_table; ++}; ++ ++static struct dfb_manager dfb_man; ++ ++static char *nmsc_cmd_to_string(unsigned int cmd) ++{ ++ switch (cmd) { ++ case NPU_MISC_IOCTL_ADD_DBF: ++ return "add dma buffer fd"; ++ case NPU_MISC_IOCTL_RMV_DBF: ++ return "remove dma buffer fd"; ++#ifdef MISC_MEM_DBG ++ case NPU_MISC_IOCTL_GET_PHYS: ++ return "get phys"; ++ case NPU_MISC_IOCTL_DATA_COPY_TEST: ++ return "data copy test"; ++#endif ++ default: ++ return "unsupported"; ++ } ++} ++static struct nmsc_device *file_to_sdev(struct file *file) ++{ ++ return container_of(file->private_data, struct nmsc_device, miscdev); ++} ++#ifdef USE_ION ++ ++static void *kal_mem_handle_get(long fd, unsigned int module_id) ++{ ++ struct dma_buf *dmabuf = NULL; ++ ++ dmabuf = dma_buf_get(fd); ++ if (IS_ERR_OR_NULL(dmabuf)) { ++ pr_err("osal get handle failed!\n"); ++ return NULL; ++ } ++ ++ pr_debug("%s: module_id=%d get handle,ref:%pa,!\n", __func__, ++ module_id, &(dmabuf->file->f_count.counter)); ++ ++ return (void *)dmabuf; ++} ++ ++static void kal_mem_ref_put(void *handle, unsigned int module_id) ++{ ++ struct dma_buf *dmabuf = NULL; ++ ++ if (IS_ERR_OR_NULL(handle)) { ++ pr_err("%s, osal err args!\n", __func__); ++ return; ++ } ++ ++ dmabuf = (struct dma_buf *)handle; ++ dma_buf_put(dmabuf); ++ pr_debug("%s: module_id=%d put handle,ref:%pa,!\n", __func__, ++ module_id, &(dmabuf->file->f_count.counter)); ++ return; ++} ++ ++/* map cpu addr */ ++static void *kal_mem_kmap(void *handle, unsigned long offset, int cache) ++{ ++ void *virt = NULL; ++ struct dma_buf *dmabuf = NULL; ++ int ret; ++ ++ if (IS_ERR_OR_NULL(handle)) { ++ pr_err("%s, osal err args!\n", __func__); ++ return NULL; ++ } ++ dmabuf = (struct dma_buf *)handle; ++ ret = set_buffer_cached(dmabuf, cache); ++ if (ret) { ++ pr_err("osal set cache attr failed!\n"); ++ return NULL; ++ } ++ ++ virt = dma_buf_kmap(dmabuf, offset >> PAGE_SHIFT); ++ if (virt == NULL) { ++ set_buffer_cached(dmabuf, !cache); ++ pr_err("osal map failed!\n"); ++ return NULL; ++ } ++ ++ return virt; ++} ++ ++/* unmap cpu addr */ ++static void kal_mem_kunmap(void *handle, void *virt, unsigned long offset) ++{ ++ struct dma_buf *dmabuf = NULL; ++ ++ if (IS_ERR_OR_NULL(handle) || virt == NULL) { ++ pr_err("%s, osal err args!\n", __func__); ++ return; ++ } ++ ++ dmabuf = (struct dma_buf *)handle; ++ dma_buf_kunmap(dmabuf, offset >> PAGE_SHIFT, virt); ++} ++struct dma_buf *npu_misc_get_dma_buf(unsigned int db_idx) ++{ ++ struct dma_buf *temp = NULL; ++ unsigned int table_index; ++ unsigned int dma_index; ++ if (db_idx >= NPU_MISC_MAX_INDEX_VALUE) { ++ pr_err("%s, db_idx err args!\n", __func__); ++ return NULL; ++ } ++ ++ mutex_lock(&dbf_process_mutex); ++ table_index = db_idx / NPU_MISC_SIG_INDEX_VALUE; ++ dma_index = db_idx % NPU_MISC_SIG_INDEX_VALUE; ++ if (table_index > dfb_man.buf_table.cur_table_index) { ++ pr_err("%s, db_idx = %d is out of range! current table index = %d.\n", ++ __func__, db_idx, dfb_man.buf_table.cur_table_index); ++ return NULL; ++ } ++ temp = dfb_man.buf_table.buff_index_table[table_index][dma_index]; ++ if (temp == NULL) ++ pr_err("ERROR: db_idx = %d, has no dmabuff stored!!!\n", db_idx); ++ ++ mutex_unlock(&dbf_process_mutex); ++ return temp; ++} ++EXPORT_SYMBOL_GPL(npu_misc_get_dma_buf); ++ ++ ++int npu_kva_map(struct kva_map_params *kva_para) ++{ ++ unsigned int db_idx; ++ unsigned int buf_offset; ++ void *dma_buf = NULL; ++ void *kva = NULL; ++ if (kva_para == NULL) { ++ pr_err("%s[%d]: kva_para is illegal\n", __FUNCTION__, __LINE__); ++ return -EFAULT; ++ } ++ ++ db_idx = (unsigned int)(kva_para->user_va >> 32); // high 32 bit is used to save dfb index ++ if ((db_idx >> 24) != NPU_DBF_INDEX_VALID_FLAG) { // flag in in offset 24 bit ++ pr_err("%s[%d]: invalid user addr info \n", __FUNCTION__, __LINE__); ++ return -EFAULT; ++ } ++ ++ buf_offset = (unsigned int)(kva_para->user_va & 0xFFFFFFFF); ++ ++ dma_buf = (void *)npu_misc_get_dma_buf(db_idx & 0x00FFFFFF); ++ if (dma_buf == NULL) { ++ pr_err("%s[%d]: fail get dma buf handle\n", __FUNCTION__, __LINE__); ++ return -EFAULT; ++ } ++ kva_para->buf_handle = dma_buf; ++ ++ kva = kal_mem_kmap(dma_buf, buf_offset, 0); ++ if (kva == NULL) { ++ pr_err("%s[%d]: fail to map src address \n", __FUNCTION__, __LINE__); ++ return -EFAULT; ++ } ++ ++ kva_para->kva = kva + buf_offset % PAGE_SIZE; ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(npu_kva_map); ++ ++ ++int npu_kva_unmap(struct kva_map_params *kva_para) ++{ ++ void *kva = NULL; ++ unsigned int buf_offset; ++ ++ if (kva_para->kva == NULL || kva_para->buf_handle == NULL) { ++ pr_err("%s[%d]: invalid parameters\n", __FUNCTION__, __LINE__); ++ return -EFAULT; ++ } ++ ++ buf_offset = (unsigned int)(kva_para->user_va & 0xFFFFFFFF); ++ kva = kva_para->kva - buf_offset % PAGE_SIZE; ++ kal_mem_kunmap((void *)kva_para->buf_handle, kva, buf_offset); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(npu_kva_unmap); ++ ++static struct dbf_process *find_dfb_process(struct dma_buf *dma_buf) ++{ ++ struct rb_node *node = dfb_man.dfb_process_root.rb_node; ++ ++ while (node != NULL) { ++ struct dbf_process *process = NULL; ++ process = rb_entry(node, struct dbf_process, rb_node); ++ if (dma_buf < process->dma_buf) ++ node = node->rb_left; ++ else if (dma_buf > process->dma_buf) ++ node = node->rb_right; ++ else ++ return process; ++ } ++ ++ return NULL; ++} ++ ++static void delete_dfb_process(struct dbf_process *process) ++{ ++ rb_erase(&process->rb_node, &dfb_man.dfb_process_root); ++ RB_CLEAR_NODE(&process->rb_node); ++} ++ ++static unsigned int get_valid_index(struct dma_buf_table_index *buff_table) ++{ ++ unsigned int index; ++ unsigned int update_index; ++ unsigned int table_index; ++ unsigned int dma_index; ++ ++ if (buff_table == NULL) { ++ pr_err("%s[%d], buff_table_index is null!!\n", __func__, __LINE__); ++ return -1; ++ } ++ ++ index = buff_table->valid_index; ++ update_index = index + 1; ++ while (update_index < NPU_MISC_MAX_INDEX_VALUE) { ++ table_index = update_index / NPU_MISC_SIG_INDEX_VALUE; ++ dma_index = update_index % NPU_MISC_SIG_INDEX_VALUE; ++ if (table_index <= buff_table->cur_table_index && buff_table->buff_index_table[table_index][dma_index] == NULL) { ++ break; ++ } else if (table_index > buff_table->cur_table_index) { ++ // malloc ++ dfb_man.buf_table.cur_table_index = table_index; ++ dfb_man.buf_table.buff_index_table[table_index] = ++ kzalloc(sizeof(struct dma_buf*) * NPU_MISC_SIG_INDEX_VALUE, GFP_ATOMIC); ++ if (dfb_man.buf_table.buff_index_table[table_index] == NULL) { ++ pr_err("devm_kzalloc failed, unable to malloc buff_index_table space.\n"); ++ return -1; ++ } ++ break; ++ } ++ ++ update_index++; ++ } ++ ++ if (index >= NPU_MISC_MAX_INDEX_VALUE) { ++ pr_err("no valid index can be used, current index = %d, max index = %d!!!\n", ++ index, NPU_MISC_MAX_INDEX_VALUE); ++ return -1; ++ } ++ ++ buff_table->valid_index = update_index; ++ return index; ++} ++ ++static int reset_index(unsigned int index) ++{ ++ unsigned int table_index; ++ unsigned int dma_index; ++ ++ table_index = index / NPU_MISC_SIG_INDEX_VALUE; ++ dma_index = index % NPU_MISC_SIG_INDEX_VALUE; ++ if (table_index > dfb_man.buf_table.cur_table_index) { ++ pr_err("%s, db_idx = %d is out of range! current table index = %d.\n", ++ __func__, index, dfb_man.buf_table.cur_table_index); ++ return -1; ++ } ++ ++ dfb_man.buf_table.buff_index_table[table_index][dma_index] = 0; ++ if (dfb_man.buf_table.valid_index > index) ++ dfb_man.buf_table.valid_index = index; ++ ++ return 0; ++} ++static int add_dfb_node(struct dma_buf *dma_buf, unsigned int *dfb_idx) ++{ ++ struct rb_node **p = &dfb_man.dfb_process_root.rb_node; ++ struct rb_node *parent = NULL; ++ struct dbf_process *process = NULL; ++ unsigned int table_index; ++ unsigned int dma_index; ++ ++ mutex_lock(&dbf_process_mutex); ++ while (*p) { ++ struct dbf_process *tmp_process = NULL; ++ parent = *p; ++ tmp_process = rb_entry(parent, struct dbf_process, rb_node); ++ if (dma_buf < tmp_process->dma_buf) { ++ p = &(*p)->rb_left; ++ } else if (dma_buf > tmp_process->dma_buf) { ++ p = &(*p)->rb_right; ++ } else { ++ *dfb_idx = tmp_process->index; // asid already in the tree. ++ mutex_unlock(&dbf_process_mutex); ++ return 0; ++ } ++ } ++ ++ process = kzalloc(sizeof(*process), GFP_ATOMIC); ++ if (process == NULL) { ++ pr_err("%s, Fail to kzalloc memory for dfb node!\n", __func__); ++ mutex_unlock(&dbf_process_mutex); ++ return -1; ++ } ++ ++ process->flag = 0xA5A5A5A5; ++ process->dma_buf = dma_buf; ++ ++ process->index = get_valid_index(&dfb_man.buf_table); ++ if (process->index == -1) { ++ mutex_unlock(&dbf_process_mutex); ++ pr_err("%s, line: %d, Fail to get valid index!\n", __func__, __LINE__); ++ return -1; ++ } ++ ++ table_index = process->index / NPU_MISC_SIG_INDEX_VALUE; ++ dma_index = process->index % NPU_MISC_SIG_INDEX_VALUE; ++ dfb_man.buf_table.buff_index_table[table_index][dma_index] = dma_buf; ++ ++ rb_link_node(&process->rb_node, parent, p); ++ rb_insert_color(&process->rb_node, &dfb_man.dfb_process_root); ++ *dfb_idx = process->index; ++ mutex_unlock(&dbf_process_mutex); ++ return 0; ++} ++ ++static int rmv_dfb_node(struct dma_buf *dma_buf) ++{ ++ int ret; ++ struct dbf_process *temp_process = NULL; ++ ++ mutex_lock(&dbf_process_mutex); ++ temp_process = find_dfb_process(dma_buf); ++ if (temp_process == NULL) { ++ pr_err("%s, Fail to find dfb process, no such dma buff!\n", __func__); ++ mutex_unlock(&dbf_process_mutex); ++ return -1; ++ } ++ ret = reset_index(temp_process->index); ++ if (ret != 0) { ++ pr_err("%s, Fail to reset index!\n", __func__); ++ mutex_unlock(&dbf_process_mutex); ++ return ret; ++ } ++ ++ delete_dfb_process(temp_process); ++ kfree(temp_process); ++ temp_process = NULL; ++ ++ mutex_unlock(&dbf_process_mutex); ++ return ret; ++} ++ ++static int npu_misc_dbf_add(unsigned long __user *arg) ++{ ++ int err, db_fd; ++ unsigned int db_idx; ++ void *dma_buf = NULL; ++ unsigned long user_addr_info; ++ ++ if (arg == NULL) ++ return -EINVAL; ++ ++ if (get_user(user_addr_info, arg)) ++ return -EFAULT; ++ ++ db_fd = (int)(user_addr_info & 0xFFFFFFFF); ++ dma_buf = kal_mem_handle_get(db_fd, NPU_MODULE_ID); ++ if (dma_buf == NULL) { ++ pr_err("%s[%d]: call osal_mem_handle_get failure\n", __FUNCTION__, __LINE__); ++ return -EFAULT; ++ } ++ ++ err = add_dfb_node(dma_buf, &db_idx); ++ if (err < 0) { ++ pr_err("%s[%d]: fail to add dfb node, err = %d\n", __FUNCTION__, __LINE__, err); ++ kal_mem_ref_put(dma_buf, NPU_MODULE_ID); ++ return -EFAULT; ++ } ++ ++ db_idx |= (NPU_DBF_INDEX_VALID_FLAG << 24); ++ ++ if (dma_buf != NULL) { ++ kal_mem_ref_put(dma_buf, NPU_MODULE_ID); ++ } ++ ++ return put_user(db_idx, arg); ++} ++ ++static int npu_misc_dbf_rmv(unsigned long __user *arg) ++{ ++ int err, db_fd; ++ void *dma_buf = NULL; ++ unsigned long dbf_value; ++ ++ if (arg == NULL) ++ return -EINVAL; ++ ++ if (get_user(dbf_value, arg)) ++ return -EFAULT; ++ ++ db_fd = (int)(dbf_value & 0xFFFFFFFF); ++ ++ dma_buf = kal_mem_handle_get(db_fd, NPU_MODULE_ID); ++ if (dma_buf == NULL) { ++ pr_err("%s[%d]: call osal_mem_handle_get failure\n", __FUNCTION__, __LINE__); ++ return -EFAULT; ++ } ++ ++ err = rmv_dfb_node((struct dma_buf *)dma_buf); ++ if (err < 0) { ++ pr_err("%s[%d]: fail to rmv dfb node\n", __FUNCTION__, __LINE__); ++ return -EFAULT; ++ } ++ if (dma_buf != NULL) { ++ kal_mem_ref_put(dma_buf, NPU_MODULE_ID); ++ } ++ ++ return err; ++} ++#endif ++ ++#ifdef MISC_MEM_DBG ++static int npu_misc_data_copy_test(struct data_copy_info *copy_info) ++{ ++ int err; ++ struct kva_map_params src_kva_para = {0}; ++ struct kva_map_params dst_kva_para = {0}; ++ ++ src_kva_para.user_va = copy_info->src_addr_info; ++ err = npu_kva_map(&src_kva_para); ++ if (err != 0) { ++ pr_err("%s[%d]: Error: fail to kmap source address \n", __FUNCTION__, __LINE__); ++ err = -EFAULT; ++ goto __err_exit; ++ } ++ ++ dst_kva_para.user_va = copy_info->dst_addr_info; ++ err = npu_kva_map(&dst_kva_para); ++ if (err != 0) { ++ pr_err("%s[%d]: Error: fail to kmap source address \n", __FUNCTION__, __LINE__); ++ err = -EFAULT; ++ goto __err_exit; ++ } ++ ++ if (src_kva_para.kva + copy_info->src_size >= dst_kva_para.kva) { ++ pr_err("%s[%d]: Error: copy address override \n", __FUNCTION__, __LINE__); ++ err = -EFAULT; ++ goto __err_exit; ++ } ++ ++ err = memcpy_s(dst_kva_para.kva, copy_info->dst_size, src_kva_para.kva, copy_info->src_size); ++ ++__err_exit: ++ if (src_kva_para.kva != NULL) ++ npu_kva_unmap(&src_kva_para); ++ if (dst_kva_para.kva != NULL) ++ npu_kva_unmap(&dst_kva_para); ++ return err; ++} ++ ++static int npu_misc_show_mem(struct device *dev, struct mem_show_params *mem_params) ++{ ++ int err, db_fd; ++ unsigned int buf_offset, i; ++ void *dma_buf = NULL; ++ void *kva = NULL; ++ char *ptr = NULL; ++ ++ db_fd = (int)(mem_params->user_addr_info >> 32); // high 32 bit is used to save dma buffer fd ++ buf_offset = (unsigned int)(mem_params->user_addr_info & 0xFFFFFFFF); ++ ++ dma_buf = kal_mem_handle_get(db_fd, NPU_MODULE_ID); ++ if (dma_buf == NULL) { ++ pr_err("%s[%d]: call osal_mem_handle_get failure\n", __FUNCTION__, __LINE__); ++ return -EFAULT; ++ } ++ ++ kva = kal_mem_kmap(dma_buf, buf_offset, 0); ++ if (kva == NULL) { ++ pr_err("%s[%d]: fail to map src address \n", __FUNCTION__, __LINE__); ++ err = -EFAULT; ++ goto __err_exit; ++ } ++ ++ ptr = (char *)(uintptr_t)kva; ++ for (i = 0; i < mem_params->size; i++) { ++ if (i % 16 == 0) /* 16 bytes align for print output */ ++ dev_info(dev, "\n"); ++ ++ dev_info(dev, "0x%x ", ptr[i]); ++ } ++ dev_info(dev, "\n"); ++ ++ err = 0; ++__err_exit: ++ if (kva != NULL) ++ kal_mem_kunmap(dma_buf, kva, buf_offset); ++ if (dma_buf != NULL) ++ kal_mem_ref_put(dma_buf, NPU_MODULE_ID); ++ return err; ++} ++#endif ++ ++static long npu_misc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ int err = -EINVAL; ++ struct nmsc_device *sdev = file_to_sdev(file); ++#ifdef MISC_MEM_DBG ++ struct data_copy_info params; ++ struct mem_show_params mem_show; ++#endif ++ ++ if (arg == 0) ++ return -EINVAL; ++ ++ switch (cmd) { ++#ifdef USE_ION ++ case NPU_MISC_IOCTL_ADD_DBF: ++ err = npu_misc_dbf_add((unsigned long __user *)arg); ++ break; ++ case NPU_MISC_IOCTL_RMV_DBF: ++ err = npu_misc_dbf_rmv((unsigned long __user *)arg); ++ break; ++#endif ++#ifdef MISC_MEM_DBG ++ case NPU_MISC_IOCTL_DATA_COPY_TEST: ++ err = copy_from_user(¶ms, (void __user *)arg, sizeof(params)); ++ if (err) { ++ dev_err(sdev->dev, "fail to copy params\n"); ++ return -EFAULT; ++ } ++ err = npu_misc_data_copy_test(¶ms); ++ break; ++ case NPU_MISC_IOCTL_SHOW_MEM: ++ err = copy_from_user(&mem_show, (void __user *)arg, sizeof(mem_show)); ++ if (err) { ++ dev_err(sdev->dev, "fail to copy params\n"); ++ return -EFAULT; ++ } ++ err = npu_misc_show_mem(sdev->dev, &mem_show); ++ break; ++#endif ++ default: ++ err = -EINVAL; ++ break; ++ } ++ ++ if (err) ++ dev_err(sdev->dev, "%s: %s failed err = %d\n", __func__, nmsc_cmd_to_string(cmd), err); ++ ++ return err; ++} ++ ++static int npu_misc_open(struct inode *inode, struct file *file) ++{ ++ return 0; ++} ++ ++static const struct file_operations npu_misc_fops = { ++ .owner = THIS_MODULE, ++ .open = npu_misc_open, ++ .unlocked_ioctl = npu_misc_ioctl, ++}; ++ ++static int npu_misc_device_probe(struct platform_device *pdev) ++{ ++ int err; ++ struct device *dev = &pdev->dev; ++ struct nmsc_device *sdev = NULL; ++ ++ sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL); ++ if (sdev == NULL) ++ return -ENOMEM; ++ ++ sdev->dev = dev; ++ sdev->miscdev.minor = MISC_DYNAMIC_MINOR; ++ sdev->miscdev.fops = &npu_misc_fops; ++ sdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, NPU_MISC_DEVICE_NAME); ++ if (sdev->miscdev.name == NULL) ++ err = -ENOMEM; ++ ++ dev_set_drvdata(dev, sdev); ++ err = misc_register(&sdev->miscdev); ++ if (err) { ++ dev_err(dev, "Unable to register misc device\n"); ++ return err; ++ } ++ ++ dfb_man.dfb_process_root = RB_ROOT; ++ dfb_man.buf_table.cur_table_index = 0; ++ dfb_man.buf_table.buff_index_table[0] = kzalloc(sizeof(struct dma_buf*) * NPU_MISC_SIG_INDEX_VALUE, GFP_ATOMIC); ++ if (dfb_man.buf_table.buff_index_table[0] == NULL) { ++ dev_err(dev, "devm_kzalloc failed, unable to malloc buff_index_table space.\n"); ++ return err; ++ } ++ mutex_init(&dbf_process_mutex); ++ ++ return err; ++} ++ ++static int npu_misc_device_remove(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct nmsc_device *sdev = dev_get_drvdata(dev); ++ int i; ++ ++ for (i = 0; i <= dfb_man.buf_table.cur_table_index; i++) ++ kfree(dfb_man.buf_table.buff_index_table[i]); ++ ++ misc_deregister(&sdev->miscdev); ++ ++ return 0; ++} ++ ++static const struct of_device_id npu_misc_of_match[] = { ++ { .compatible = "vendor,npu_misc_device_drv" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, npu_misc_of_match); ++ ++ ++static struct platform_driver npu_misc_driver = { ++ .probe = npu_misc_device_probe, ++ .remove = npu_misc_device_remove, ++ .driver = { ++ .name = "npu_misc_device_drv", ++ .of_match_table = npu_misc_of_match, ++ }, ++}; ++ ++static void npu_misc_dev_release(struct device *dev) ++{ ++ return; ++} ++ ++static struct platform_device npu_misc_device = { ++ .name = "npu_misc_device_drv", ++ .id = -1, ++ .dev = { ++ .platform_data = NULL, ++ .release = npu_misc_dev_release, ++ }, ++}; ++ ++static int __init npu_drv_misc_platform_init(void) ++{ ++ int ret; ++ ++ ret = platform_device_register(&npu_misc_device); ++ if (ret < 0) { ++ printk("call platform_device_register failed!\n"); ++ return ret; ++ } ++ ++ ret = platform_driver_register(&npu_misc_driver); ++ if (ret) { ++ platform_device_unregister(&npu_misc_device); ++ printk("insmod npu misc platform driver fail. ret=%d\n", ret); ++ return ret; ++ } ++ ++ return ret; ++} ++module_init(npu_drv_misc_platform_init); ++ ++static void __exit npu_drv_misc_platform_exit(void) ++{ ++ platform_driver_unregister(&npu_misc_driver); ++ platform_device_unregister(&npu_misc_device); ++} ++module_exit(npu_drv_misc_platform_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("NPU MISC DRIVER"); ++MODULE_VERSION("V1.0"); ++ +diff --git a/drivers/vendor/npu/npu_svm.c b/drivers/vendor/npu/npu_svm.c +new file mode 100644 +index 000000000..a5a5dda0a +--- /dev/null ++++ b/drivers/vendor/npu/npu_svm.c +@@ -0,0 +1,1413 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2020-2021. All rights reserved. ++ * Description: npu svm ++ * Version: Initial Draft ++ * Create: 2020-01-16 ++ */ ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "linux/vendor/sva_ext.h" ++ ++#define SVM_DEVICE_NAME "svm" ++#define ASID_SHIFT 48 ++#define CORE_SID 0 /* for core sid register */ ++ ++#define SVM_IOCTL_PROCESS_BIND 0xffff ++#define SVM_IOCTL_PAGE_TABLE_SYNC 0xfffd ++ ++struct core_device { ++ struct device dev; ++ struct iommu_group *group; ++ struct iommu_domain *domain; ++ u8 smmu_bypass; ++ struct list_head entry; ++}; ++ ++struct svm_device { ++ unsigned long long id; ++ struct miscdevice miscdev; ++ struct device *dev; ++ phys_addr_t l2buff; ++ unsigned long l2size; ++}; ++ ++struct svm_bind_process { ++ pid_t vpid; ++ u64 ttbr; ++ u64 tcr; ++ int pasid; ++#define SVM_BIND_PID (1 << 0) ++ u32 flags; ++}; ++ ++struct svm_pg_sync_para { ++ u64 vaddr; ++ u32 len; ++}; ++ ++/* ++ * svm_process is released in svm_notifier_release() when mm refcnt ++ * goes down to 0. We should access svm_process only in the context ++ * where mm_struct is valid, which means wes should always get mm ++ * refcnt first (unless we are operating on current task). ++ */ ++struct svm_process { ++ struct pid *pid; ++ struct mm_struct *mm; ++ unsigned long asid; ++ struct rb_node rb_node; ++ struct mmu_notifier notifier; ++ /* For postponed release */ ++ struct rcu_head rcu; ++ int pasid; ++ struct mutex mutex; ++ struct svm_device *sdev; ++ struct iommu_sva *handle; ++}; ++ ++typedef void (*smmu_clk_live_func)(void); ++ ++static smmu_clk_live_func g_smmu_clk_live_enter = NULL; ++static smmu_clk_live_func g_smmu_clk_live_exit = NULL; ++ ++#define SVM_DEV_MAX 2 ++static struct rb_root svm_process_root[SVM_DEV_MAX] = {RB_ROOT, RB_ROOT}; ++ ++static struct mutex svm_process_mutex; ++ ++static DECLARE_RWSEM(svm_sem); ++ ++static unsigned int probe_index = 0; ++ ++static void *npu_dts_sys_peri = NULL; ++#define NPU_SVM_DEV_NAME "svm_npu" ++#define NPU_SMMU_DEV_NAME "smmu_npu" ++#define NPU_CRG_NAME "npu_crg_6560" ++ ++#if defined(CONFIG_ARCH_SS928V100) || defined(CONFIG_ARCH_SS927V100) ++#define SVP_NPU_SVM_DEV_NAME "svm_pqp" ++#define SVP_NPU_SMMU_DEV_NAME "smmu_pqp" ++#define SVP_NPU_CRG_NAME "pqp_crg_6592" ++#endif ++ ++#define SVM_DEV_NAME_LEN 64 ++#define CRG_NAME_LEN 16 ++#define CLK_EN_BIT 4 ++ ++struct svm_dev_wl_mng { ++ char svm_dev_name[SVM_DEV_NAME_LEN]; ++ char smmu_dev_name[SVM_DEV_NAME_LEN]; ++ char crg_name[CRG_NAME_LEN]; ++ int crg_offset; ++ bool is_inited; ++ bool is_suspend; ++ void *dev; ++}; ++static struct mutex svm_dev_pm_mutex; ++static struct mutex svm_open_bind_mutex; ++ ++static struct svm_dev_wl_mng svm_dev_white_list[SVM_DEV_MAX] = { ++ { NPU_SVM_DEV_NAME, NPU_SMMU_DEV_NAME, NPU_CRG_NAME, 0, false, false, NULL }, ++ { SVP_NPU_SVM_DEV_NAME, SVP_NPU_SMMU_DEV_NAME, SVP_NPU_CRG_NAME, 0, false, false, NULL } ++}; ++ ++static char *svm_cmd_to_string(unsigned int cmd) ++{ ++ switch (cmd) { ++ case SVM_IOCTL_PROCESS_BIND: ++ return "bind"; ++ case SVM_IOCTL_PAGE_TABLE_SYNC: ++ return "sync page table"; ++ default: ++ return "unsupported"; ++ } ++} ++ ++static int svm_device_get_smmu_devno(struct svm_device *sdev) ++{ ++ int i; ++ const char *device_name = dev_name(sdev->dev); ++ ++ if (device_name == NULL) ++ return -1; ++ ++ for (i = 0; i < SVM_DEV_MAX; i++) { ++ if (strnstr(device_name, svm_dev_white_list[i].svm_dev_name, strlen(device_name)) != NULL) ++ return i; ++ } ++ return -1; ++} ++ ++static struct svm_process *find_svm_process(unsigned long asid, int smmu_devid) ++{ ++ struct rb_node *node = svm_process_root[smmu_devid].rb_node; ++ ++ while (node != NULL) { ++ struct svm_process *process = NULL; ++ ++ process = rb_entry(node, struct svm_process, rb_node); ++ if (asid < process->asid) { ++ node = node->rb_left; ++ } else if (asid > process->asid) { ++ node = node->rb_right; ++ } else { ++ return process; ++ } ++ } ++ ++ return NULL; ++} ++ ++static void insert_svm_process(struct svm_process *process, int smmu_devid) ++{ ++ struct rb_node **p = &svm_process_root[smmu_devid].rb_node; ++ struct rb_node *parent = NULL; ++ ++ while (*p) { ++ struct svm_process *tmp_process = NULL; ++ ++ parent = *p; ++ tmp_process = rb_entry(parent, struct svm_process, rb_node); ++ if (process->asid < tmp_process->asid) { ++ p = &(*p)->rb_left; ++ } else if (process->asid > tmp_process->asid) { ++ p = &(*p)->rb_right; ++ } else { ++ WARN_ON_ONCE(1); ++ return; ++ } ++ } ++ ++ rb_link_node(&process->rb_node, parent, p); ++ rb_insert_color(&process->rb_node, &svm_process_root[smmu_devid]); ++} ++ ++static void delete_svm_process(struct svm_process *process) ++{ ++ int smmu_devid; ++ ++ smmu_devid = svm_device_get_smmu_devno(process->sdev); ++ if (smmu_devid < 0) ++ pr_err("fail to get smmu dev number\n"); ++ ++ rb_erase(&process->rb_node, &svm_process_root[smmu_devid]); ++ RB_CLEAR_NODE(&process->rb_node); ++} ++ ++struct bus_type svm_bus_type = { ++ .name = "svm-bus", ++}; ++ ++static inline struct core_device *to_core_device(struct device *d) ++{ ++ return container_of(d, struct core_device, dev); ++} ++ ++static int svm_unbind_core(struct device *dev, void *data) ++{ ++ struct svm_process *process = data; ++ struct core_device *cdev = to_core_device(dev); ++ ++ if (cdev->smmu_bypass) ++ return 0; ++ if (!process->handle) ++ return -EINVAL; ++ ++ iommu_sva_unbind_device(process->handle); ++ process->handle = NULL; ++ return 0; ++} ++ ++static int svm_bind_core(struct device *dev, void *data) ++{ ++ struct task_struct *task = NULL; ++ struct svm_process *process = data; ++ struct core_device *cdev = to_core_device(dev); ++ struct iommu_sva *handle; ++ ++ if (cdev->smmu_bypass) ++ return 0; ++ ++ task = get_pid_task(process->pid, PIDTYPE_PID); ++ if (task == NULL) { ++ pr_err("failed to get task_struct\n"); ++ return -ESRCH; ++ } ++ ++ handle = iommu_sva_bind_device(&cdev->dev, task->mm); ++ if (IS_ERR(handle)) { ++ pr_err("failed to bind the device\n"); ++ return PTR_ERR(handle); ++ } ++ ++ process->pasid = iommu_sva_get_pasid(handle); ++ if (process->pasid == IOMMU_PASID_INVALID) { ++ iommu_sva_unbind_device(handle); ++ return -ENODEV; ++ } ++ ++ process->handle = handle; ++ put_task_struct(task); ++ ++ return 0; ++} ++ ++static void svm_bind_cores(struct svm_process *process) ++{ ++ mutex_lock(&svm_open_bind_mutex); ++ device_for_each_child(process->sdev->dev, process, svm_bind_core); ++ mutex_unlock(&svm_open_bind_mutex); ++} ++ ++static void svm_unbind_cores(struct svm_process *process) ++{ ++ mutex_lock(&svm_open_bind_mutex); ++ device_for_each_child(process->sdev->dev, process, svm_unbind_core); ++ mutex_unlock(&svm_open_bind_mutex); ++} ++ ++static void cdev_device_release(struct device *dev) ++{ ++ struct core_device *cdev = to_core_device(dev); ++ ++ kfree(cdev); ++} ++ ++static int svm_remove_core(struct device *dev, void *data) ++{ ++ int err; ++ struct core_device *cdev = to_core_device(dev); ++ ++ if (cdev->smmu_bypass == 0) { ++ err = iommu_dev_disable_feature(&cdev->dev, IOMMU_DEV_FEAT_SVA); ++ if (err) { ++ dev_err(&cdev->dev, "failed to disable sva feature, %d\n", err); ++ } ++ err = iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_IOPF); ++ if (err) { ++ dev_err(&cdev->dev, "failed to disable iopf feature, %d\n", err); ++ } ++ iommu_detach_group(cdev->domain, cdev->group); ++ iommu_group_put(cdev->group); ++ iommu_domain_free(cdev->domain); ++ } ++ device_unregister(&cdev->dev); ++ ++ return 0; ++} ++ ++static int svm_register_device(struct svm_device *sdev, struct device_node *np, struct core_device **pcdev) ++{ ++ int err; ++ char *name = NULL; ++ struct core_device *cdev = NULL; ++ ++ name = devm_kasprintf(sdev->dev, GFP_KERNEL, "svm%llu_%s", sdev->id, np->name); ++ if (name == NULL) ++ return -ENOMEM; ++ ++ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); ++ if (cdev == NULL) ++ return -ENOMEM; ++ ++ cdev->dev.of_node = np; ++ cdev->dev.parent = sdev->dev; ++ cdev->dev.bus = &svm_bus_type; ++ cdev->dev.release = cdev_device_release; ++ cdev->smmu_bypass = of_property_read_bool(np, "vendor,smmu_bypass"); ++ dev_set_name(&cdev->dev, "%s", name); ++ ++ err = device_register(&cdev->dev); ++ if (err) { ++ dev_info(&cdev->dev, "core_device register failed\n"); ++ kfree(cdev); ++ return err; ++ } ++ *pcdev = cdev; ++ return 0; ++} ++ ++static int svm_iommu_attach_group(struct svm_device *sdev, struct core_device *cdev) ++{ ++ int err; ++ ++ cdev->group = iommu_group_get(&cdev->dev); ++ if (IS_ERR_OR_NULL(cdev->group)) { ++ dev_err(&cdev->dev, "smmu is not right configured\n"); ++ return -ENXIO; ++ } ++ ++ cdev->domain = iommu_domain_alloc(sdev->dev->bus); ++ if (cdev->domain == NULL) { ++ dev_err(&cdev->dev, "failed to alloc domain\n"); ++ iommu_group_put(cdev->group); ++ return -ENOMEM; ++ } ++ ++ err = iommu_attach_group(cdev->domain, cdev->group); ++ if (err) { ++ dev_err(&cdev->dev, "failed group to domain\n"); ++ iommu_group_put(cdev->group); ++ iommu_domain_free(cdev->domain); ++ return err; ++ } ++ ++ return 0; ++} ++ ++int iommu_request_dm_for_dev(struct device *dev); ++static int svm_of_add_core(struct svm_device *sdev, struct device_node *np) ++{ ++ int err; ++ struct resource res; ++ struct core_device *cdev = NULL; ++ ++ err = svm_register_device(sdev, np, &cdev); ++ if (err) { ++ dev_info(&cdev->dev, "fail to register svm device\n"); ++ return err; ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)) ++ err = of_dma_configure(&cdev->dev, np, true); ++#else ++ err = of_dma_configure(&cdev->dev, np); ++#endif ++ if (err) { ++ dev_dbg(&cdev->dev, "of_dma_configure failed\n"); ++ goto err_unregister_dev; ++ } ++ ++ err = of_address_to_resource(np, 0, &res); ++ if (err) ++ dev_info(&cdev->dev, "no reg, FW should install the sid\n"); ++ ++ /* If core device is smmu bypass, request direct map. */ ++ if (cdev->smmu_bypass) { ++ err = iommu_request_dm_for_dev(&cdev->dev); ++ if (err) ++ dev_err(&cdev->dev, "request domain for dev error\n"); ++ ++ return err; ++ } ++ err = svm_iommu_attach_group(sdev, cdev); ++ if (err) { ++ dev_err(&cdev->dev, "failed to init sva device\n"); ++ goto err_unregister_dev; ++ } ++ ++ err = iommu_dev_enable_feature(&cdev->dev, IOMMU_DEV_FEAT_IOPF); ++ if (err) { ++ dev_err(&cdev->dev, "failed to enable iopf feature, %d\n", err); ++ goto err_detach_group; ++ } ++ ++ err = iommu_dev_enable_feature(&cdev->dev, IOMMU_DEV_FEAT_SVA); ++ if (err) { ++ dev_err(&cdev->dev, "failed to enable sva feature, %d\n", err); ++ goto err_detach_group; ++ } ++ ++ return 0; ++err_detach_group: ++ iommu_detach_group(cdev->domain, cdev->group); ++ iommu_domain_free(cdev->domain); ++ iommu_group_put(cdev->group); ++err_unregister_dev: ++ device_unregister(&cdev->dev); ++ return err; ++} ++ ++static void svm_notifier_free(struct mmu_notifier *mn) ++{ ++ struct svm_process *process = NULL; ++ ++ process = container_of(mn, struct svm_process, notifier); ++ arm64_mm_context_put(process->mm); ++ kfree(process); ++} ++ ++static void svm_process_release(struct svm_process *process) ++{ ++ delete_svm_process(process); ++ put_pid(process->pid); ++ mmu_notifier_put(&process->notifier); ++} ++ ++static void svm_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) ++{ ++ struct svm_process *process = NULL; ++ process = container_of(mn, struct svm_process, notifier); ++ ++ svm_smmu_clk_live_enter(); ++ svm_unbind_cores(process); ++ svm_smmu_clk_live_exit(); ++ ++ mutex_lock(&svm_process_mutex); ++ svm_process_release(process); ++ mutex_unlock(&svm_process_mutex); ++} ++ ++/* ++ * Device CPU have the ability of DVM, which means when control CPU flush ++ * TLB, it will notify the device CPU by hardware instead of mmu_notifier. ++ */ ++static struct mmu_notifier_ops svm_process_mmu_notifier = { ++ .release = svm_notifier_release, ++ .free_notifier = svm_notifier_free, ++}; ++ ++static struct svm_process *svm_process_alloc(struct svm_device *sdev, struct pid *pid, ++ struct mm_struct *mm, unsigned long asid) ++{ ++ struct svm_process *process = kzalloc(sizeof(*process), GFP_ATOMIC); ++ if (process == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ process->sdev = sdev; ++ process->pid = pid; ++ process->mm = mm; ++ process->asid = asid; ++ mutex_init(&process->mutex); ++ process->notifier.ops = &svm_process_mmu_notifier; ++ ++ return process; ++} ++ ++static int get_task_info(struct task_struct *task, struct pid **ppid, struct mm_struct **pmm, unsigned long *pasid) ++{ ++ unsigned long asid; ++ struct pid *pid = NULL; ++ struct mm_struct *mm = NULL; ++ ++ pid = get_task_pid(task, PIDTYPE_PID); ++ if (pid == NULL) ++ return -EINVAL; ++ ++ mm = get_task_mm(task); ++ if (mm == NULL) { ++ put_pid(pid); ++ return -EINVAL; ++ } ++ ++ asid = arm64_mm_context_get(mm); ++ if (asid == 0) { ++ mmput(mm); ++ put_pid(pid); ++ return -ENOSPC; ++ } ++ ++ *ppid = pid; ++ *pmm = mm; ++ *pasid = asid; ++ return 0; ++} ++ ++static int svm_process_bind(struct task_struct *task, struct svm_device *sdev, ++ u64 *ttbr, u64 *tcr, int *pasid) ++{ ++ int err; ++ unsigned long asid; ++ struct pid *pid = NULL; ++ struct svm_process *process = NULL; ++ struct mm_struct *mm = NULL; ++ int smmu_devid = svm_device_get_smmu_devno(sdev); ++ if ((ttbr == NULL) || (tcr == NULL) || (pasid == NULL) || smmu_devid < 0) ++ return -EINVAL; ++ ++ err = get_task_info(task, &pid, &mm, &asid); ++ if (err != 0) ++ return err; ++ ++ /* If a svm_process already exists, use it */ ++ mutex_lock(&svm_process_mutex); ++ process = find_svm_process(asid, smmu_devid); ++ if (process == NULL) { ++ process = svm_process_alloc(sdev, pid, mm, asid); ++ if (IS_ERR(process)) { ++ err = PTR_ERR(process); ++ mutex_unlock(&svm_process_mutex); ++ goto err_put_mm_context; ++ } ++ ++ err = mmu_notifier_register(&process->notifier, mm); ++ if (err) { ++ mutex_unlock(&svm_process_mutex); ++ goto err_free_svm_process; ++ } ++ ++ insert_svm_process(process, smmu_devid); ++ svm_bind_cores(process); ++ mutex_unlock(&svm_process_mutex); ++ } else { ++ mutex_unlock(&svm_process_mutex); ++ arm64_mm_context_put(mm); ++ put_pid(pid); ++ } ++ ++ *ttbr = virt_to_phys(mm->pgd) | (asid << ASID_SHIFT); ++ *tcr = read_sysreg(tcr_el1); ++ *pasid = process->pasid; ++ ++ mmput(mm); ++ return 0; ++ ++err_free_svm_process: ++ kfree(process); ++ process = NULL; ++err_put_mm_context: ++ arm64_mm_context_put(mm); ++ mmput(mm); ++ put_pid(pid); ++ ++ return err; ++} ++ ++static struct svm_device *file_to_sdev(struct file *file) ++{ ++ return container_of(file->private_data, struct svm_device, miscdev); ++} ++ ++static struct svm_dev_wl_mng *svm_device_get_mng(const char *device_name) ++{ ++ int i; ++ int svm_name_len; ++ if (device_name == NULL) ++ return NULL; ++ ++ for (i = 0; i < SVM_DEV_MAX; i++) { ++ svm_name_len = strlen(svm_dev_white_list[i].svm_dev_name); ++ if (strncmp(device_name, svm_dev_white_list[i].svm_dev_name, svm_name_len) == 0) { ++ pr_debug("strncmp will return i = %d, svm_dev_name = %s, smmu_dev_name = %s\n", ++ i, svm_dev_white_list[i].svm_dev_name, svm_dev_white_list[i].smmu_dev_name); ++ return &svm_dev_white_list[i]; ++ } ++ } ++ return NULL; ++} ++ ++static bool svm_device_is_power_on(const char *device_name) ++{ ++ int i; ++ unsigned int svm_name_len; ++ unsigned int smmu_name_len; ++ unsigned int device_name_len; ++ unsigned int smmu_name_offset; ++ unsigned int read_val; ++ if (device_name == NULL || npu_dts_sys_peri == NULL || strlen(device_name) >= PATH_MAX) ++ return false; ++ device_name_len = strlen(device_name); ++ for (i = 0; i < SVM_DEV_MAX; i++) { ++ svm_name_len = strlen(svm_dev_white_list[i].svm_dev_name); ++ smmu_name_len = strlen(svm_dev_white_list[i].smmu_dev_name); ++ smmu_name_offset = device_name_len > smmu_name_len ? device_name_len - smmu_name_len : 0; ++ if (strncmp(device_name, svm_dev_white_list[i].svm_dev_name, svm_name_len) == 0 || ++ strncmp(device_name + smmu_name_offset, svm_dev_white_list[i].smmu_dev_name, smmu_name_len) == 0) { ++ if (npu_dts_sys_peri == NULL) { ++ pr_err("error : npu_dts_sys_peri is illegal\n"); ++ return false; ++ } ++ read_val = readl_relaxed(npu_dts_sys_peri + svm_dev_white_list[i].crg_offset); ++ pr_debug("npu_dts_sys_peri = 0x%llx offset = 0x%x val = 0x%x, smmu_name_offset = %u\n", ++ (uint64_t)(uintptr_t)npu_dts_sys_peri, svm_dev_white_list[i].crg_offset, read_val, smmu_name_offset); ++ if ((read_val & BIT(CLK_EN_BIT)) != 0) ++ return true; ++ } ++ } ++ pr_err("error : device name = %s ,smmu_name_offset = %u, svm is not powner on , please powner on svm first.\n", ++ device_name, smmu_name_offset); ++ return false; ++} ++ ++static int svm_device_post_probe(const char *device_name); ++static int svm_open(struct inode *inode, struct file *file) ++{ ++ int ret; ++ struct svm_dev_wl_mng *dev_mng = NULL; ++ struct svm_device *sdev = file_to_sdev(file); ++ const char *device_name = dev_name(sdev->dev); ++ ++ if (!svm_device_is_power_on(device_name)) { ++ dev_err(sdev->dev, "svm_open: svm is not power on\n"); ++ return -EFAULT; ++ } ++ ++ dev_mng = svm_device_get_mng(device_name); ++ if (dev_mng == NULL) { ++ dev_err(sdev->dev, "fail to get svm device mng\n"); ++ return -EFAULT; ++ } ++ mutex_lock(&svm_open_bind_mutex); ++ mutex_lock(&svm_dev_pm_mutex); ++ if (dev_mng->is_inited == false) { ++ ret = arm_smmu_device_post_probe(dev_mng->smmu_dev_name); ++ if (ret != 0) { ++ dev_err(sdev->dev, "fail to do smmu post probe\n"); ++ goto err_exit; ++ } ++ ++ ret = svm_device_post_probe(dev_mng->svm_dev_name); ++ if (ret != 0) { ++ dev_err(sdev->dev, "fail to do svm post probe\n"); ++ goto err_exit; ++ } ++ dev_mng->is_inited = true; ++ } else { ++ if (dev_mng->is_suspend == true) { ++ ret = arm_smmu_device_resume(dev_mng->smmu_dev_name); ++ if (ret != 0) { ++ dev_err(sdev->dev, "fail to resume smmu\n"); ++ goto err_exit; ++ } ++ dev_mng->is_suspend = false; ++ } ++ } ++ mutex_unlock(&svm_dev_pm_mutex); ++ mutex_unlock(&svm_open_bind_mutex); ++ return 0; ++ ++err_exit: ++ mutex_unlock(&svm_dev_pm_mutex); ++ mutex_unlock(&svm_open_bind_mutex); ++ return -EFAULT; ++} ++ ++static struct task_struct *svm_get_task(struct svm_bind_process params) ++{ ++ struct task_struct *task = NULL; ++ ++ if (params.flags & ~SVM_BIND_PID) ++ return ERR_PTR(-EINVAL); ++ ++ if (params.flags & SVM_BIND_PID) { ++ struct mm_struct *mm = NULL; ++ ++ rcu_read_lock(); ++ task = find_task_by_vpid(params.vpid); ++ if (task != NULL) ++ get_task_struct(task); ++ rcu_read_unlock(); ++ if (task == NULL) ++ return ERR_PTR(-ESRCH); ++ ++ /* check the permission */ ++ mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); ++ if (IS_ERR_OR_NULL(mm)) { ++ pr_err("cannot access mm\n"); ++ put_task_struct(task); ++ return ERR_PTR(-ESRCH); ++ } ++ ++ mmput(mm); ++ } else { ++ get_task_struct(current); ++ task = current; ++ } ++ ++ return task; ++} ++ ++int svm_get_pasid(pid_t vpid, int dev_id __maybe_unused) ++{ ++ int pasid; ++ unsigned long asid; ++ struct task_struct *task = NULL; ++ struct mm_struct *mm = NULL; ++ struct svm_process *process = NULL; ++ struct svm_bind_process params; ++ ++ params.flags = SVM_BIND_PID; ++ params.vpid = vpid; ++ params.pasid = -1; ++ params.ttbr = 0; ++ params.tcr = 0; ++ task = svm_get_task(params); ++ if (IS_ERR(task)) ++ return PTR_ERR(task); ++ ++ mm = get_task_mm(task); ++ if (mm == NULL) { ++ pasid = -EINVAL; ++ goto put_task; ++ } ++ ++ asid = arm64_mm_context_get(mm); ++ if (asid == 0) { ++ pasid = -ENOSPC; ++ goto put_mm; ++ } ++ ++ mutex_lock(&svm_process_mutex); ++ process = find_svm_process(asid, dev_id); ++ mutex_unlock(&svm_process_mutex); ++ if (process != NULL) ++ pasid = process->pasid; ++ else ++ pasid = -ESRCH; ++ ++ arm64_mm_context_put(mm); ++put_mm: ++ mmput(mm); ++put_task: ++ put_task_struct(task); ++ ++ return pasid; ++} ++EXPORT_SYMBOL_GPL(svm_get_pasid); ++ ++static inline void svm_dcache_clean_inval_poc(void *start, unsigned long size) ++{ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) ++ __flush_dcache_area(start, size); ++#else ++ dcache_clean_inval_poc((unsigned long)(uintptr_t)start, (unsigned long)(uintptr_t)start + size); ++#endif ++} ++ ++static void pte_flush_range(pmd_t *pmd, unsigned long addr, unsigned long end) ++{ ++ pte_t *pte = NULL; ++ pte_t *pte4k = NULL; ++ ++ pte = pte_offset_map(pmd, addr); ++ if (!pte_present(*pte)) ++ return; ++ ++ pte4k = (pte_t *)round_down((u64)pte, PAGE_SIZE); ++ svm_dcache_clean_inval_poc((void *)pte4k, PAGE_SIZE); ++ ++ pte_unmap(pte); ++} ++ ++static void pmd_flush_range(pud_t *pud, unsigned long addr, unsigned long end) ++{ ++ pmd_t *pmd = NULL; ++ pmd_t *pmd4k = NULL; ++ unsigned long next; ++ ++ pmd = pmd_offset(pud, addr); ++ if (!pmd_present(*pmd)) ++ return; ++ ++ pmd4k = (pmd_t *)round_down((u64)pmd, PAGE_SIZE); ++ ++ do { ++ next = pmd_addr_end(addr, end); ++ pte_flush_range(pmd, addr, next); ++ pmd++; ++ addr = next; ++ } while (addr != end); ++ ++ svm_dcache_clean_inval_poc((void *)pmd4k, PAGE_SIZE); ++} ++ ++static void pud_flush_range(pgd_t *pgd, unsigned long addr, unsigned long end) ++{ ++ p4d_t *p4d = NULL; ++ pud_t *pud = NULL; ++#if CONFIG_PGTABLE_LEVELS > 3 ++ pud_t *pud4k = NULL; ++#endif ++ unsigned long next; ++ ++ p4d = p4d_offset(pgd, addr); ++ if (!p4d_present(*p4d)) ++ return; ++ ++ pud = pud_offset(p4d, addr); ++ if (!pud_present(*pud)) ++ return; ++#if CONFIG_PGTABLE_LEVELS > 3 ++ pud4k = (pud_t *)round_down((u64)pud, PAGE_SIZE); ++#endif ++ ++ do { ++ next = pud_addr_end(addr, end); ++ pmd_flush_range(pud, addr, next); ++ pud++; ++ addr = next; ++ } while (addr != end); ++ ++#if CONFIG_PGTABLE_LEVELS > 3 ++ svm_dcache_clean_inval_poc((void *)pud4k, PAGE_SIZE); ++#endif ++} ++ ++static void svm_flush_range(struct mm_struct *mm, unsigned long start, unsigned long end) ++{ ++ pgd_t *pgd = NULL; ++ pgd_t *pgd4k = NULL; ++ unsigned long next; ++ ++ spin_lock(&mm->page_table_lock); ++ pgd = pgd_offset(mm, start); ++ if (!pgd_present(*pgd)) { ++ spin_unlock(&mm->page_table_lock); ++ return; ++ } ++ pgd4k = (pgd_t *)round_down((u64)pgd, PAGE_SIZE); ++ ++ do { ++ next = pgd_addr_end(start, end); ++ pud_flush_range(pgd, start, next); ++ pgd++; ++ start = next; ++ } while (start != end); ++ ++ svm_dcache_clean_inval_poc((void *)pgd4k, PAGE_SIZE); ++ spin_unlock(&mm->page_table_lock); ++} ++ ++int svm_flush_cache(struct mm_struct *mm, unsigned long addr, size_t size) ++{ ++ unsigned long start = round_down(addr, PAGE_SIZE); ++ unsigned long end = round_up(addr + size, PAGE_SIZE); ++ const char *device_name = NULL; ++ unsigned long asid; ++ struct device *dev = NULL; ++ struct iommu_domain *domain = NULL; ++ int i = 0; ++ ++ if (mm == NULL) { ++ pr_err("mm is null\n"); ++ return -1; ++ } ++ ++ asid = arm64_mm_context_get(mm); ++ if (asid == 0) { ++ pr_err("get asid failed\n"); ++ return -1; ++ } ++ ++ svm_flush_range(mm, addr, size); ++ ++ mutex_lock(&svm_dev_pm_mutex); ++ for (; i < SVM_DEV_MAX; i++) { ++ struct svm_process *process = find_svm_process(asid, i); ++ if (process == NULL || process->handle == NULL || process->handle->dev == NULL) { ++ continue; ++ } ++ dev = process->handle->dev; ++ domain = iommu_get_domain_for_dev(dev); ++ device_name = arm_smmu_get_device_name(domain); ++ if (svm_device_is_power_on(device_name) == true) { ++ domain->ops->inv_iotlb_range(domain, mm, start, end - start); ++ } ++ } ++ mutex_unlock(&svm_dev_pm_mutex); ++ arm64_mm_context_put(mm); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(svm_flush_cache); ++ ++static int svm_vma_check(const struct vm_area_struct *pvma1, const struct vm_area_struct *pvma2, ++ unsigned long vm_start, unsigned long vm_end) ++{ ++ if (pvma1 != pvma2) { ++ pr_err("ERROR: pvma1:[0x%lx,0x%lx) and pvma2:[0x%lx,0x%lx) are not equal\n", ++ pvma1->vm_start, pvma1->vm_end, pvma2->vm_start, pvma2->vm_end); ++ return -1; ++ } ++ ++ if ((pvma1->vm_flags & VM_WRITE) == 0) { ++ pr_err("ERROR vma flag:0x%lx\n", pvma1->vm_flags); ++ return -1; ++ } ++ ++ if (pvma1->vm_start > vm_start) { ++ pr_err("cannot find corresponding vma, vm[%lx, %lx], user range[%lx,%lx]\n", ++ pvma1->vm_start, pvma1->vm_end, vm_start, vm_end); ++ return -1; ++ } ++ ++ if (pvma1->vm_ops == NULL || pvma1->vm_file == NULL) { ++ pr_err("pvma1->vm_flags = 0x%lx, pvma2->vm_flags = 0x%lx, vm_ops = 0x%lx, vm_file = 0x%lx\n", ++ pvma1->vm_flags, pvma2->vm_flags, (uintptr_t)pvma1->vm_ops, (uintptr_t)pvma1->vm_file); ++ return -1; ++ } ++ return 0; ++} ++ ++static long svm_page_table_sync(unsigned long __user *arg) ++{ ++ int ret = -EINVAL; ++ struct svm_pg_sync_para remap_para; ++ struct vm_area_struct *pvma1 = NULL; ++ struct vm_area_struct *pvma2 = NULL; ++ struct mm_struct *mm = current->mm; ++ unsigned long end; ++ ++ if (arg == NULL) { ++ pr_err("arg is invalid.\n"); ++ return ret; ++ } ++ ++ ret = copy_from_user(&remap_para, (void __user *)arg, sizeof(remap_para)); ++ if (ret) { ++ pr_err("failed to copy args from user space.\n"); ++ return ret; ++ } ++ ++ if (U64_MAX - remap_para.len < remap_para.vaddr) { ++ pr_err("vaddr or len is too large.\n"); ++ return -1; ++ } ++ end = remap_para.vaddr + remap_para.len; ++ down_read(&mm->mmap_lock); ++ pvma1 = find_vma(mm, remap_para.vaddr); ++ if (pvma1 == NULL) { ++ up_read(&mm->mmap_lock); ++ pr_err("ERROR: pvma1 is null, vir addr = 0x%llx or len = %d is illegal.\n", remap_para.vaddr, remap_para.len); ++ return -1; ++ } ++ ++ pvma2 = find_vma(mm, end - 1); ++ if (pvma2 == NULL) { ++ up_read(&mm->mmap_lock); ++ pr_err("ERROR: pvma2 is null, vir addr = 0x%llx or len = %d is illegal.\n", remap_para.vaddr, remap_para.len); ++ return -1; ++ } ++ ++ ret = svm_vma_check(pvma1, pvma2, remap_para.vaddr, end); ++ if (ret != 0) { ++ up_read(&mm->mmap_lock); ++ pr_err("ERROR vma_check failed vir addr = 0x%llx or len = %d is illegal.\n", remap_para.vaddr, remap_para.len); ++ return -ESRCH; ++ } ++ ++ if (end > pvma1->vm_end || end < remap_para.vaddr) { ++ up_read(&mm->mmap_lock); ++ ret = -EINVAL; ++ pr_err("memory length is out of range, vaddr:%pK, len:%u.\n", (void *)remap_para.vaddr, remap_para.len); ++ return ret; ++ } ++ ++ svm_flush_cache(pvma1->vm_mm, remap_para.vaddr, remap_para.len); ++ up_read(&mm->mmap_lock); ++ return 0; ++} ++ ++static long svm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ int err = -EINVAL; ++ struct svm_bind_process params; ++ struct task_struct *task = NULL; ++ struct svm_device *sdev = file_to_sdev(file); ++ if (arg == 0 || sdev == NULL) ++ return -EINVAL; ++ ++ if (cmd == SVM_IOCTL_PROCESS_BIND) { ++ if (copy_from_user(¶ms, (void __user *)arg, sizeof(params))) { ++ dev_err(sdev->dev, "fail to copy params\n"); ++ return -EFAULT; ++ } ++ } ++ if (!svm_device_is_power_on(dev_name(sdev->dev))) { ++ dev_err(sdev->dev, "svm_ioctl: svm is not power on\n"); ++ return -EFAULT; ++ } ++ ++ switch (cmd) { ++ case SVM_IOCTL_PROCESS_BIND: ++ task = svm_get_task(params); ++ if (IS_ERR(task)) { ++ dev_err(sdev->dev, "failed to get task\n"); ++ return PTR_ERR(task); ++ } ++ err = svm_process_bind(task, sdev, ¶ms.ttbr, ¶ms.tcr, ¶ms.pasid); ++ if (err) { ++ put_task_struct(task); ++ dev_err(sdev->dev, "failed to bind task %d\n", err); ++ return err; ++ } ++ put_task_struct(task); ++ if (copy_to_user((void __user *)arg, ¶ms, sizeof(params))) ++ err = -EFAULT; ++ break; ++ case SVM_IOCTL_PAGE_TABLE_SYNC: ++ err = svm_page_table_sync((unsigned long __user*)arg); ++ break; ++ default: ++ err = -EINVAL; ++ break; ++ } ++ ++ if (err) ++ dev_err(sdev->dev, "%s: %s failed err = %d\n", __func__, svm_cmd_to_string(cmd), err); ++ ++ return err; ++} ++ ++static int svm_release(struct inode *inode_ptr, struct file *file_ptr) ++{ ++ return 0; ++} ++ ++static const struct file_operations svm_fops = { ++ .owner = THIS_MODULE, ++ .open = svm_open, ++ .unlocked_ioctl = svm_ioctl, ++ .release = svm_release, ++}; ++ ++int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); ++static int svm_init_core(struct svm_device *sdev, struct device_node *np) ++{ ++ int err = 0; ++ struct device_node *child = NULL; ++ struct device *dev = sdev->dev; ++ ++ down_write(&svm_sem); ++ if (svm_bus_type.iommu_ops == NULL) { ++ err = bus_register(&svm_bus_type); ++ if (err) { ++ up_write(&svm_sem); ++ dev_err(dev, "failed to register svm_bus_type\n"); ++ return err; ++ } ++ ++ err = bus_set_iommu(&svm_bus_type, dev->bus->iommu_ops); ++ if (err) { ++ up_write(&svm_sem); ++ dev_err(dev, "failed to set iommu for svm_bus_type\n"); ++ goto err_unregister_bus; ++ } ++ } else if (svm_bus_type.iommu_ops != dev->bus->iommu_ops) { ++ err = -EBUSY; ++ up_write(&svm_sem); ++ dev_err(dev, "iommu_ops configured, but changed!\n"); ++ return err; ++ } ++ up_write(&svm_sem); ++ ++ for_each_available_child_of_node(np, child) { ++ err = svm_of_add_core(sdev, child); ++ if (err) ++ device_for_each_child(dev, NULL, svm_remove_core); ++ } ++ ++ return err; ++ ++err_unregister_bus: ++ bus_unregister(&svm_bus_type); ++ ++ return err; ++} ++ ++static int svm_device_wl_process(struct platform_device *pdev, struct device *dev) ++{ ++ int i; ++ ++ for (i = 0; i < SVM_DEV_MAX; i++) { ++ if (strnstr(pdev->name, svm_dev_white_list[i].svm_dev_name, strlen(pdev->name)) != NULL) { ++ svm_dev_white_list[i].dev = (void *)dev; ++ return 0; ++ } ++ } ++ return -1; ++} ++static int svm_get_sys_and_crg(const struct device *dev) ++{ ++ unsigned int crg_base; ++ unsigned int crg_size; ++ int i; ++ int svm_name_len; ++ const char *device_name = dev_name(dev); ++ struct device_node *np = dev->of_node; ++ ++ for (i = 0; i < SVM_DEV_MAX; i++) { ++ svm_name_len = strlen(svm_dev_white_list[i].svm_dev_name); ++ if (strncmp(device_name, svm_dev_white_list[i].svm_dev_name, svm_name_len) == 0) { ++ pr_debug("svm_get_sys_and_crg : strncmp will return i = %d, svm_dev_name = %s, smmu_dev_name = %s\n", ++ i, svm_dev_white_list[i].svm_dev_name, svm_dev_white_list[i].smmu_dev_name); ++ break; ++ } ++ } ++ if (i == SVM_DEV_MAX) { ++ dev_err(dev, "defer probe svm device, device name = %s not match\n", device_name); ++ return -EPROBE_DEFER; ++ } ++ if (of_property_read_u32(np, "crg-base", &crg_base) != 0 || ++ of_property_read_u32(np, "crg-size", &crg_size) != 0 || ++ of_property_read_u32(np, svm_dev_white_list[i].crg_name, &svm_dev_white_list[i].crg_offset) != 0) { ++ pr_warn("Warning: missing crg-base property in dts tree, we don't support smmu powner on check!!!\n"); ++ npu_dts_sys_peri = NULL; ++ } else { ++ npu_dts_sys_peri = ioremap(crg_base, crg_size); ++ } ++ dev_dbg(dev, " read crg_offset i = %d, crg_name = %s, crg_offset = 0x%x \n", ++ i, svm_dev_white_list[i].crg_name, svm_dev_white_list[i].crg_offset); ++ return 0; ++} ++static int svm_device_probe(struct platform_device *pdev) ++{ ++ int err; ++ struct device *dev = &pdev->dev; ++ struct svm_device *sdev = NULL; ++ struct device_node *np = dev->of_node; ++ ++ if (np == NULL) ++ return -ENODEV; ++ ++ if (!dev->bus || !dev->bus->iommu_ops) { ++ /* If SMMU is not probed, it should defer probe of this driver */ ++ dev_dbg(dev, "this dev bus is NULL or defer probe svm device\n"); ++ return -EPROBE_DEFER; ++ } ++ sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL); ++ if (sdev == NULL) ++ return -ENOMEM; ++ ++ sdev->id = probe_index; ++ sdev->dev = dev; ++ sdev->miscdev.minor = MISC_DYNAMIC_MINOR; ++ sdev->miscdev.fops = &svm_fops; ++ sdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, SVM_DEVICE_NAME"%llu", sdev->id); ++ if (sdev->miscdev.name == NULL) ++ err = -ENOMEM; ++ ++ dev_set_drvdata(dev, sdev); ++ err = misc_register(&sdev->miscdev); ++ if (err) { ++ dev_err(dev, "Unable to register misc device\n"); ++ return err; ++ } ++ ++ if (svm_device_wl_process(pdev, dev) != 0) { ++ err = svm_init_core(sdev, np); ++ if (err) { ++ dev_err(dev, "failed to init cores\n"); ++ goto err_unregister_misc; ++ } ++ } ++ probe_index++; ++ if (svm_get_sys_and_crg(dev) != 0) { ++ dev_err(dev, "failed to svm_get_sys_and_crg, device error.\n"); ++ goto err_unregister_misc; ++ } ++ ++ mutex_init(&svm_process_mutex); ++ mutex_init(&svm_dev_pm_mutex); ++ dev_info(dev, "svm probe ok.\n"); ++ ++ return err; ++err_unregister_misc: ++ misc_deregister(&sdev->miscdev); ++ return err; ++} ++ ++static int svm_device_post_probe(const char *device_name) ++{ ++ int err, i; ++ struct device *dev = NULL; ++ struct svm_device *sdev = NULL; ++ ++ for (i = 0; i < SVM_DEV_MAX; i++) { ++ if (strnstr(device_name, svm_dev_white_list[i].svm_dev_name, strlen(device_name)) != NULL) { ++ dev = (struct device *)svm_dev_white_list[i].dev; ++ break; ++ } ++ } ++ ++ if (dev == NULL || i >= SVM_DEV_MAX) { ++ dev_err(dev, "faile to find svm device in white list \n"); ++ return -1; ++ } ++ ++ sdev = dev_get_drvdata(dev); ++ if (sdev == NULL) { ++ dev_err(dev, "failed get drv data\n"); ++ return -1; ++ } ++ ++ err = svm_init_core(sdev, dev->of_node); ++ if (err) { ++ dev_err(dev, "failed to init cores\n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int svm_smmu_device_suspend(const char *device_name) ++{ ++ int ret = 0; ++ struct svm_dev_wl_mng *dev_mng = NULL; ++ ++ dev_mng = svm_device_get_mng(device_name); ++ if (dev_mng == NULL) ++ return -1; ++ ++ mutex_lock(&svm_dev_pm_mutex); ++ if (dev_mng->is_suspend == false) { ++ dev_mng->is_suspend = true; ++ ret = arm_smmu_device_suspend(dev_mng->smmu_dev_name); ++ } ++ mutex_unlock(&svm_dev_pm_mutex); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(svm_smmu_device_suspend); ++ ++int svm_smmu_device_resume(const char *device_name) ++{ ++ int ret = 0; ++ struct svm_dev_wl_mng *dev_mng = NULL; ++ ++ dev_mng = svm_device_get_mng(device_name); ++ if (dev_mng == NULL) ++ return -1; ++ ++ mutex_lock(&svm_dev_pm_mutex); ++ if (dev_mng->is_suspend == true) { ++ ret = arm_smmu_device_resume(dev_mng->smmu_dev_name); ++ dev_mng->is_suspend = false; ++ } ++ mutex_unlock(&svm_dev_pm_mutex); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(svm_smmu_device_resume); ++ ++void svm_smmu_device_reset_lock(void) ++{ ++ mutex_lock(&svm_dev_pm_mutex); ++ return; ++} ++EXPORT_SYMBOL(svm_smmu_device_reset_lock); ++ ++int svm_smmu_device_reset(const char *device_name) ++{ ++ int ret = -1; ++ struct svm_dev_wl_mng *dev_mng = NULL; ++ ++ dev_mng = svm_device_get_mng(device_name); ++ if (dev_mng == NULL) ++ return -1; ++ ++ if (dev_mng->is_suspend == false) ++ ret = arm_smmu_device_reset_ex(dev_mng->smmu_dev_name); ++ return ret; ++} ++EXPORT_SYMBOL(svm_smmu_device_reset); ++ ++void svm_smmu_device_reset_unlock(void) ++{ ++ mutex_unlock(&svm_dev_pm_mutex); ++ return; ++} ++EXPORT_SYMBOL(svm_smmu_device_reset_unlock); ++ ++int svm_smmu_clk_live_process_register(smmu_clk_live_func enter, smmu_clk_live_func exit) ++{ ++ if (g_smmu_clk_live_enter == NULL) ++ g_smmu_clk_live_enter = enter; ++ ++ if (g_smmu_clk_live_exit == NULL) ++ g_smmu_clk_live_exit = exit; ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(svm_smmu_clk_live_process_register); ++ ++ ++void svm_smmu_clk_live_enter(void) ++{ ++ if (g_smmu_clk_live_enter != NULL) ++ g_smmu_clk_live_enter(); ++} ++EXPORT_SYMBOL_GPL(svm_smmu_clk_live_enter); ++ ++void svm_smmu_clk_live_exit(void) ++{ ++ if (g_smmu_clk_live_exit != NULL) ++ g_smmu_clk_live_exit(); ++} ++EXPORT_SYMBOL_GPL(svm_smmu_clk_live_exit); ++ ++static bool svm_device_is_inited(const char *device_name) ++{ ++ int i; ++ ++ if (device_name == NULL) ++ return false; ++ ++ for (i = 0; i < SVM_DEV_MAX; i++) { ++ if (strnstr(device_name, svm_dev_white_list[i].svm_dev_name, strlen(device_name)) != NULL) { ++ if (svm_dev_white_list[i].is_inited == true) ++ return true; ++ } ++ } ++ return false; ++} ++ ++static int svm_device_remove(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct svm_device *sdev = dev_get_drvdata(dev); ++ const char *device_name = dev_name(dev); ++ ++ mutex_lock(&svm_dev_pm_mutex); ++ if (npu_dts_sys_peri != NULL) { ++ iounmap(npu_dts_sys_peri); ++ npu_dts_sys_peri = NULL; ++ } ++ mutex_unlock(&svm_dev_pm_mutex); ++ ++ svm_smmu_clk_live_enter(); ++ if (svm_device_is_inited(device_name)) ++ device_for_each_child(sdev->dev, NULL, svm_remove_core); ++ svm_smmu_clk_live_exit(); ++ ++ misc_deregister(&sdev->miscdev); ++ return 0; ++} ++ ++static void svm_device_shutdown(struct platform_device *pdev) ++{ ++ svm_device_remove(pdev); ++ return; ++} ++ ++static const struct of_device_id svm_of_match[] = { ++ { .compatible = "vendor,svm" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, svm_of_match); ++ ++static struct platform_driver svm_driver = { ++ .probe = svm_device_probe, ++ .remove = svm_device_remove, ++ .shutdown = svm_device_shutdown, ++ .driver = { ++ .name = SVM_DEVICE_NAME, ++ .of_match_table = svm_of_match, ++ }, ++}; ++ ++module_platform_driver(svm_driver); ++ ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/vendor/npu/smmu_power_on.c b/drivers/vendor/npu/smmu_power_on.c +new file mode 100644 +index 000000000..ce0579115 +--- /dev/null ++++ b/drivers/vendor/npu/smmu_power_on.c +@@ -0,0 +1,91 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2020-2021. All rights reserved. ++ * Description: smmu pm ++ * Version: Initial Draft ++ * Create: 2020-01-16 ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define VENDOR_TOP_CTL_BASE (0x30000) ++ ++#define SMMU_LP_REQ (VENDOR_TOP_CTL_BASE + 0) ++#define TCU_QREQN_CG BIT(0) ++#define TCU_QREQN_PD BIT(1) ++ ++#define SMMU_LP_ACK (VENDOR_TOP_CTL_BASE + 0x4) ++#define TCU_QACCEPTN_CG BIT(0) ++#define TCU_QACCEPTN_PD BIT(4) ++ ++/* TBU reg */ ++#define SMMU_TBU_CR (0) ++#define TBU_EN_REQ BIT(0) ++ ++#define SMMU_TBU_CRACK (0x4) ++#define TBU_EN_ACK BIT(0) ++#define TBU_CONNECTED BIT(1) ++ ++#define ARM_SMMU_POLL_TIMEOUT_US 100 ++ ++static int npu_reg_bit_set_with_ack(void __iomem *base, unsigned int req_off, ++ unsigned int ack_off, unsigned int req_bit, unsigned int ack_bit) ++{ ++ u32 reg = 0; ++ u32 val = 0; ++ ++ val = readl_relaxed(base + req_off); ++ val |= req_bit; ++ writel_relaxed(val, base + req_off); ++ return readl_relaxed_poll_timeout(base + ack_off, reg, ++ reg & ack_bit, 1, ARM_SMMU_POLL_TIMEOUT_US); ++} ++ ++ ++int svm_smmu_power_on(void *base, unsigned int tcu_offset, unsigned int tbu_offset) ++{ ++ int ret; ++ void __iomem *tmp_base; ++ u32 reg; ++ ++ /****************tcu configure***************/ ++ tmp_base = (void __iomem *)base + tcu_offset; ++ /* Request leave power-down mode */ ++ ret = npu_reg_bit_set_with_ack(tmp_base, SMMU_LP_REQ, SMMU_LP_ACK, TCU_QREQN_CG, TCU_QACCEPTN_CG); ++ if (ret) { ++ /* To do , delete this log temporary */ ++ return -EINVAL; ++ } ++ /* Request leave clock-gating mode */ ++ ret = npu_reg_bit_set_with_ack(tmp_base, SMMU_LP_REQ, SMMU_LP_ACK, TCU_QREQN_PD, TCU_QACCEPTN_PD); ++ if (ret) { ++ printk("npu_reg_bit_set_with_ack failed !%s\n", __func__); ++ return -EINVAL; ++ } ++ ++ /****************tbu configure***************/ ++ /* enable AICore tbu */ ++ tmp_base = (void __iomem *)base + tbu_offset; ++ /* enable TBU request */ ++ npu_reg_bit_set_with_ack(tmp_base, SMMU_TBU_CR, SMMU_TBU_CRACK, TBU_EN_REQ, TBU_EN_ACK); ++ ++ /* check TBU enable acknowledge */ ++ reg = readl_relaxed(tmp_base + SMMU_TBU_CRACK); ++ if ((reg & TBU_CONNECTED) == 0) { ++ printk("%s:----------->Fail to CONNECTE TBU failed!\n", __func__); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(svm_smmu_power_on); ++ ++ ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/vendor/usb/Kconfig b/drivers/vendor/usb/Kconfig +new file mode 100755 +index 000000000..a5dfa069f +--- /dev/null ++++ b/drivers/vendor/usb/Kconfig +@@ -0,0 +1,6 @@ ++config USB_WING ++ tristate "Wing USB controller" ++ select EXTCON ++ select USB_DWC3_DUAL_ROLE ++ help ++ Say Y or M here if you want support Wing USB controller. +diff --git a/drivers/vendor/usb/Makefile b/drivers/vendor/usb/Makefile +new file mode 100755 +index 000000000..8d6678afa +--- /dev/null ++++ b/drivers/vendor/usb/Makefile +@@ -0,0 +1,12 @@ ++include $(src)/version.mak ++ ++KBUILD_CFLAGS += -Werror ++ ++ccflags-y += -Idrivers/usb/dwc3/ -DUSB_KERNEL_VERSION=\"$(USB_KERNEL_VERSION)\" ++ ++ifeq ($(CONFIG_SOCT_DRV_BUILD_KO),y) ++CONFIG_USB_WING = m ++endif ++ ++obj-$(CONFIG_USB_WING) += wing-usb.o ++wing-usb-y := wing_usb.o proc.o +diff --git a/drivers/vendor/usb/defconfig b/drivers/vendor/usb/defconfig +new file mode 100755 +index 000000000..4a7c1e657 +--- /dev/null ++++ b/drivers/vendor/usb/defconfig +@@ -0,0 +1,5 @@ ++# CONFIG_USB_XHCI_PM_SET_CLK is not set ++CONFIG_CMA_MEM_SHARED=y ++CONFIG_CMA_SIZE_MBYTES=32 ++CONFIG_USB_UAS=y ++CONFIG_SCSI_SCAN_ASYNC=y +diff --git a/drivers/vendor/usb/driver_config.mk b/drivers/vendor/usb/driver_config.mk +new file mode 100755 +index 000000000..a0a59edae +--- /dev/null ++++ b/drivers/vendor/usb/driver_config.mk +@@ -0,0 +1,5 @@ ++DRIVER_CONFIG += CONFIG_USB_WING=y ++DRIVER_DEFCONFIG_FILE += $(TARGET_KERNEL_DIR)/drivers/huanglong/ups/usb/defconfig ++ ++RECOVERY_DRIVER_CONFIG += CONFIG_USB_WING=y ++RECOVERY_DRIVER_DEFCONFIG_FILE += $(TARGET_KERNEL_DIR)/drivers/huanglong/ups/usb/defconfig +diff --git a/drivers/vendor/usb/driver_obj.mk b/drivers/vendor/usb/driver_obj.mk +new file mode 100755 +index 000000000..60b4cf4b2 +--- /dev/null ++++ b/drivers/vendor/usb/driver_obj.mk +@@ -0,0 +1,2 @@ ++obj-$(CONFIG_USB_WING) += huanglong/ups/usb/ ++ +diff --git a/drivers/vendor/usb/proc.c b/drivers/vendor/usb/proc.c +new file mode 100644 +index 000000000..090c02c44 +--- /dev/null ++++ b/drivers/vendor/usb/proc.c +@@ -0,0 +1,152 @@ ++ /* ++ * Copyright (c) CompanyNameMagicTag 2022-2029. All rights reserved. ++ * Description: For Wing USB Controller ++ * Author: AuthorNameMagicTag ++ * Create: 2022.09.01 ++ */ ++ ++#include "../../usb/dwc3/core.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "proc.h" ++ ++#define MODE_BUF_LEN 32 ++ ++static ssize_t wing_usb_mode_write(struct file *file, ++ const char __user *ubuf, size_t count, loff_t *ppos) ++{ ++ struct seq_file *s = file->private_data; ++ struct wing_usb *wusb = s->private; ++ struct wing_usb_event usb_event = {0}; ++ ++ char buf[MODE_BUF_LEN] = {0}; ++ ++ if (ubuf == NULL) ++ return -EFAULT; ++ ++ if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) ++ return -EFAULT; ++ ++ if (strncmp(buf, "device", strlen("device")) == 0) { ++ usb_event.type = SWITCH_TO_DEVICE; ++ } else if (strncmp(buf, "host", strlen("host")) == 0) { ++ usb_event.type = SWITCH_TO_HOST; ++ } else { ++ usb_event.type = NONE_EVENT; ++ wing_usb_err("input event type error\n"); ++ return -EINVAL; ++ } ++ ++ usb_event.ctrl_id = wusb->id; ++ wing_usb_queue_event(&usb_event, wusb); ++ ++ wing_usb_dbg("write %s\n", buf); ++ ++ return count; ++} ++ ++static int wing_usb_mode_show(struct seq_file *s, void *v) ++{ ++ struct wing_usb *wusb = s->private; ++ unsigned long flags; ++ u32 reg; ++ ++ spin_lock_irqsave(&wusb->event_lock, flags); ++ reg = readl(wusb->ctrl_base + DWC3_GCTL); ++ spin_unlock_irqrestore(&wusb->event_lock, flags); ++ switch (DWC3_GCTL_PRTCAP(reg)) { ++ case DWC3_GCTL_PRTCAP_HOST: ++ seq_printf(s, "host\n"); ++ wusb->state = WING_USB_STATE_HOST; ++ break; ++ case DWC3_GCTL_PRTCAP_DEVICE: ++ seq_printf(s, "device\n"); ++ wusb->state = WING_USB_STATE_DEVICE; ++ break; ++ case DWC3_GCTL_PRTCAP_OTG: ++ seq_printf(s, "otg\n"); ++ break; ++ default: ++ seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg)); ++ } ++ ++ return 0; ++} ++ ++static int wing_usb_mode_open(struct inode *inode, struct file *file) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) ++ return single_open(file, wing_usb_mode_show, pde_data(inode)); ++#else ++ return single_open(file, wing_usb_mode_show, PDE_DATA(inode)); ++#endif ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++static const struct proc_ops g_wing_usb_proc_mode_ops = { ++ .proc_open = wing_usb_mode_open, ++ .proc_write = wing_usb_mode_write, ++ .proc_read = seq_read, ++ .proc_release = single_release, ++}; ++ ++#else ++static const struct file_operations g_wing_usb_proc_mode_ops = { ++ .open = wing_usb_mode_open, ++ .write = wing_usb_mode_write, ++ .read = seq_read, ++ .release = single_release, ++}; ++ ++#endif ++ ++int wing_usb_create_proc_entry(struct device *dev, struct wing_usb *wusb) ++{ ++ struct proc_dir_entry *proc_entry = NULL; ++ ++ wing_usb_dbg("+\n"); ++ ++ if (wusb == NULL) ++ return -EINVAL; ++ ++ proc_entry = proc_mkdir(dev_name(dev), NULL); ++ if (proc_entry == NULL) { ++ wing_usb_err("failed to create proc file\n"); ++ return -ENOMEM; ++ } ++ ++ wusb->proc_entry = proc_entry; ++ ++ if (proc_create_data("mode", S_IRUGO | S_IWUSR, proc_entry, ++ &g_wing_usb_proc_mode_ops, wusb) == NULL) { ++ wing_usb_err("Failed to create proc file mode \n"); ++ goto remove_entry; ++ } ++ ++ wing_usb_dbg("-\n"); ++ return 0; ++ ++remove_entry: ++ remove_proc_entry(dev_name(dev), NULL); ++ wusb->proc_entry = NULL; ++ ++ return -ENOMEM; ++} ++ ++void wing_usb_remove_proc_entry(struct device *dev, struct wing_usb *wusb) ++{ ++ if (wusb->proc_entry == NULL) ++ return; ++ ++ remove_proc_entry("mode", wusb->proc_entry); ++ remove_proc_entry(dev_name(dev), NULL); ++ ++ wusb->proc_entry = NULL; ++} ++ +diff --git a/drivers/vendor/usb/proc.h b/drivers/vendor/usb/proc.h +new file mode 100755 +index 000000000..b9e7d1cce +--- /dev/null ++++ b/drivers/vendor/usb/proc.h +@@ -0,0 +1,18 @@ ++ /* ++ * Copyright (c) CompanyNameMagicTag 2022-2029. All rights reserved. ++ * Description: For Wing USB Controller ++ * Author: AuthorNameMagicTag ++ * Create: 2022.09.01 ++ */ ++ ++#ifndef _WING_USB_PROC_H_ ++#define _WING_USB_PROC_H_ ++ ++#include ++#include ++#include "wing_usb.h" ++ ++int wing_usb_create_proc_entry(struct device *dev, struct wing_usb *wusb); ++void wing_usb_remove_proc_entry(struct device *dev, struct wing_usb *wusb); ++ ++#endif /* _WING_USB_PROC_H_ */ +diff --git a/drivers/vendor/usb/version.mak b/drivers/vendor/usb/version.mak +new file mode 100755 +index 000000000..758a8ae5f +--- /dev/null ++++ b/drivers/vendor/usb/version.mak +@@ -0,0 +1 @@ ++USB_KERNEL_VERSION="USB_KERNEL 1.0.0" +diff --git a/drivers/vendor/usb/wing_usb.c b/drivers/vendor/usb/wing_usb.c +new file mode 100644 +index 000000000..b8987f49b +--- /dev/null ++++ b/drivers/vendor/usb/wing_usb.c +@@ -0,0 +1,1013 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022-2029. All rights reserved. ++ * Description: USB Controller driver ++ * Author: AuthorNameMagicTag ++ * Create: 2022.09.01 ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)) ++#include ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) ++#include ++#endif ++#include "../../usb/dwc3/core.h" ++#include ++ ++#include "proc.h" ++#include "wing_usb.h" ++ ++static LIST_HEAD(wing_usb_drd_dev_list); ++ ++static const unsigned int wing_usb_extcon_cable[] = { ++ EXTCON_USB, ++ EXTCON_USB_HOST, ++ EXTCON_NONE ++}; ++ ++static const char *wing_usb_event_type_string( ++ enum wing_usb_event_type event) ++{ ++ static const char * const wing_usb_event_strings[] = { ++ [SWITCH_TO_HOST] = "SWITCH_TO_HOST", ++ [SWITCH_TO_DEVICE] = "SWITCH_TO_DEVICE", ++ [NONE_EVENT] = "NONE_EVENT", ++ }; ++ ++ if (event > NONE_EVENT) ++ return "illegal event"; ++ ++ return wing_usb_event_strings[event]; ++} ++ ++static int __maybe_unused wing_usb_event_enqueue(struct wing_usb *wusb, ++ const struct wing_usb_event *event) ++{ ++ if (event->ctrl_id != wusb->id) { ++ wing_usb_info("event doesn't belong to this controller, event->ctrl_id = %d\n", ++ event->ctrl_id); ++ } ++ ++ if (kfifo_in(&wusb->event_fifo, event, 1) == 0) { ++ wing_usb_err("drop event %s\n", ++ wing_usb_event_type_string(event->type)); ++ return -ENOSPC; ++ } ++ ++ return 0; ++} ++ ++/* ++ * get event frome event_queue ++ * return the numbers of event dequeued, currently it is 1 ++ */ ++static int wing_usb_event_dequeue(struct wing_usb *wusb, ++ struct wing_usb_event *event) ++{ ++ return kfifo_out_spinlocked(&wusb->event_fifo, event, 1, ++ &wusb->event_lock); ++} ++ ++static int wing_usb_remove_child(struct device *dev, void __maybe_unused *data) ++{ ++ int ret; ++ ++ ret = of_platform_device_destroy(dev, NULL); ++ if (ret != 0) { ++ wing_usb_err("device destroy error (ret %d)\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++static struct dwc3 *wing_usb_get_role_sw(const struct wing_usb *wusb) ++{ ++ struct dwc3 *dwc = NULL; ++ struct dwc3 *dwc_role_sw = NULL; ++ ++ if (wusb->dwc3_dev == NULL) { ++ wing_usb_err("no dwc platform device found\n"); ++ return NULL; ++ } ++ ++ dwc = platform_get_drvdata(wusb->dwc3_dev); ++ if (dwc == NULL) { ++ wing_usb_err("no dwc driver data found\n"); ++ return NULL; ++ } ++ ++ if (dwc->role_sw == NULL) { ++ wing_usb_err("no dwc_role_sw device found\n"); ++ return NULL; ++ } ++ ++ dwc_role_sw = (struct dwc3 *)usb_role_switch_get_drvdata(dwc->role_sw); ++ if (dwc_role_sw == NULL) { ++ wing_usb_err("no dwc_role_sw driver data found\n"); ++ return NULL; ++ } ++ ++ return dwc_role_sw; ++} ++#endif ++ ++static int wing_usb_start_device(const struct wing_usb *wusb) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++ struct dwc3 *dwc = NULL; ++#endif ++ int ret = extcon_set_state_sync(wusb->edev, EXTCON_USB, true); ++ if (ret) { ++ wing_usb_err("extcon start peripheral error\n"); ++ return ret; ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++ dwc = wing_usb_get_role_sw(wusb); ++ if (dwc != NULL) { ++ if (dwc->role_sw != NULL) { ++ ret = usb_role_switch_set_role(dwc->role_sw, USB_ROLE_DEVICE); ++ } ++ } ++#endif ++ ++ wing_usb_dbg("wing usb status: OFF -> DEVICE\n"); ++ ++ return ret; ++} ++ ++static int wing_usb_stop_device(const struct wing_usb *wusb) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++ struct dwc3 *dwc = NULL; ++#endif ++ ++ int ret = extcon_set_state_sync(wusb->edev, EXTCON_USB, false); ++ if (ret) { ++ wing_usb_err("extcon stop peripheral error\n"); ++ return ret; ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++ dwc = wing_usb_get_role_sw(wusb); ++ if (dwc != NULL) { ++ if (dwc->role_sw != NULL) { ++ ret = usb_role_switch_set_role(dwc->role_sw, USB_ROLE_NONE); ++ } ++ } ++#endif ++ ++ wing_usb_dbg("wing usb status: DEVICE -> OFF\n"); ++ ++ return ret; ++} ++ ++static int wing_usb_start_host(const struct wing_usb *wusb) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++ struct dwc3 *dwc = NULL; ++#endif ++ int ret = extcon_set_state_sync(wusb->edev, EXTCON_USB_HOST, true); ++ if (ret) { ++ wing_usb_err("extcon start host error\n"); ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++ dwc = wing_usb_get_role_sw(wusb); ++ if (dwc != NULL) { ++ if (dwc->role_sw != NULL) { ++ ret = usb_role_switch_set_role(dwc->role_sw, USB_ROLE_HOST); ++ } ++ } ++#endif ++ ++ wing_usb_dbg("wing usb status: OFF -> HOST\n"); ++ ++ return ret; ++} ++ ++static int wing_usb_stop_host(const struct wing_usb *wusb) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++ struct dwc3 *dwc = NULL; ++#endif ++ int ret = extcon_set_state_sync(wusb->edev, EXTCON_USB_HOST, false); ++ if (ret) { ++ wing_usb_err("extcon stop host error\n"); ++ return ret; ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++ dwc = wing_usb_get_role_sw(wusb); ++ if (dwc != NULL) { ++ if (dwc->role_sw != NULL) { ++ ret = usb_role_switch_set_role(dwc->role_sw, USB_ROLE_HOST); ++ } ++ } ++#endif ++ ++ wing_usb_dbg("wing usb status: HOST -> OFF\n"); ++ ++ return ret; ++} ++ ++static int wing_usb_switch_to_host(struct wing_usb *wusb) ++{ ++ int ret; ++ ++ if (wusb == NULL) { ++ return -EINVAL; ++ } ++ ++ if (wusb->state == WING_USB_STATE_HOST) { ++ return 0; ++ } ++ ++ ret = wing_usb_stop_device(wusb); ++ if (ret != 0) { ++ wing_usb_err("stop device failed\n"); ++ return ret; ++ } ++ ++ ret = wing_usb_start_host(wusb); ++ if (ret != 0) { ++ wing_usb_err("start host failed\n"); ++ return ret; ++ } ++ ++ wusb->state = WING_USB_STATE_HOST; ++ ++ return 0; ++} ++ ++static int wing_usb_switch_to_device(struct wing_usb *wusb) ++{ ++ int ret; ++ ++ if (wusb == NULL) { ++ return -EINVAL; ++ } ++ ++ if (wusb->state == WING_USB_STATE_DEVICE) { ++ return 0; ++ } ++ ++ ret = wing_usb_stop_host(wusb); ++ if (ret != 0) { ++ wing_usb_err("stop host failed\n"); ++ return ret; ++ } ++ ++ ret = wing_usb_start_device(wusb); ++ if (ret != 0) { ++ wing_usb_err("start device failed\n"); ++ return ret; ++ } ++ ++ wusb->state = WING_USB_STATE_DEVICE; ++ ++ return 0; ++} ++ ++static void wing_usb_handle_event(struct wing_usb *wusb, ++ enum wing_usb_event_type event_type) ++{ ++ wing_usb_dbg("type: %s\n", wing_usb_event_type_string(event_type)); ++ ++ switch (event_type) { ++ case SWITCH_TO_HOST: ++ wing_usb_switch_to_host(wusb); ++ break; ++ ++ case SWITCH_TO_DEVICE: ++ wing_usb_switch_to_device(wusb); ++ break; ++ ++ default: ++ wing_usb_dbg("illegal event type!\n"); ++ break; ++ } ++} ++ ++static void wing_usb_event_work(struct work_struct *work) ++{ ++ struct wing_usb_event event = {0}; ++ ++ struct wing_usb *wusb = container_of(work, struct wing_usb, event_work); ++ ++ wing_usb_dbg("+\n"); ++ mutex_lock(&wusb->lock); ++ ++ while (wing_usb_event_dequeue(wusb, &event)) { ++ wing_usb_handle_event(wusb, event.type); ++ } ++ ++ mutex_unlock(&wusb->lock); ++ ++ wing_usb_dbg("-\n"); ++} ++ ++/* ++ * return 0 means event was accepted, others means event was rejected. ++ */ ++int wing_usb_queue_event(const struct wing_usb_event *usb_event, ++ struct wing_usb *wusb) ++{ ++#if IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) ++ unsigned long flags; ++ enum wing_usb_event_type event_type; ++ int controller_id; ++ ++ if (usb_event == NULL) ++ return -EINVAL; ++ ++ if (wusb == NULL) ++ return -ENODEV; ++ ++ spin_lock_irqsave(&(wusb->event_lock), flags); ++ ++ event_type = usb_event->type; ++ controller_id = usb_event->ctrl_id; ++ wing_usb_dbg("event: %s, controller id: %d\n", ++ wing_usb_event_type_string(event_type), controller_id); ++ ++ if (wing_usb_event_enqueue(wusb, usb_event)) { ++ wing_usb_err("can't enqueue event:%d\n", event_type); ++ spin_unlock_irqrestore(&(wusb->event_lock), flags); ++ return -EBUSY; ++ } ++ ++ schedule_work(&wusb->event_work); ++ ++ spin_unlock_irqrestore(&(wusb->event_lock), flags); ++ ++ return 0; ++#else ++ return 0; ++#endif ++} ++ ++int wing_usb_otg_event(enum wing_usb_event_type type, int ctrl_id) ++{ ++ struct wing_usb_event event = {0}; ++ struct wing_usb *wusb = NULL; ++ struct wing_usb *cur_wusb, *next_wusb; ++ ++ event.type = type; ++ event.ctrl_id = ctrl_id; ++ ++ list_for_each_entry_safe(cur_wusb, next_wusb, ++ &wing_usb_drd_dev_list, list) { ++ wing_usb_info("cur_wusb->id = %d\n", cur_wusb->id); ++ if (cur_wusb->id == ctrl_id) { ++ wusb = cur_wusb; ++ break; ++ } ++ } ++ ++ if (wusb == NULL) { ++ wing_usb_err("find wing_usb for controler_id = %d failed\n", ++ ctrl_id); ++ return -1; ++ } ++ ++ return wing_usb_queue_event(&event, wusb); ++} ++EXPORT_SYMBOL(wing_usb_otg_event); ++ ++static int wing_usb_get_resource(struct device *dev, struct wing_usb *wusb) ++{ ++ int ret; ++ ++ wusb->support_drd = of_property_read_bool(dev->of_node, "support-drd"); ++ ++ wusb->is_cur_host = of_property_read_bool(dev->of_node, "host-mode"); ++ ++ wusb->deempth = of_property_read_bool(dev->of_node, "deempth"); ++ ++ /* es chip disable suspend, cs not need, but you can disable it when you want */ ++ wusb->disable_suspend = of_property_read_bool(dev->of_node, "disable-suspend"); ++ ++ /* USB31 need configure this */ ++ wusb->powerdown_scale = of_property_read_bool(dev->of_node, "powerdown-scale"); ++ ++ wusb->is_usb2 = of_property_read_bool(dev->of_node, "is-usb2"); ++ ++ wusb->ctrl_base = of_iomap(dev->of_node, 0); ++ if (IS_ERR(wusb->ctrl_base)) { ++ wing_usb_err("alloc ctrl_base failed\n"); ++ return -1; ++ } ++ ++ wusb->usb2_phy = devm_phy_get(dev, "usb2-phy"); ++ if (IS_ERR(wusb->usb2_phy)) { ++ wing_usb_err("get u2phy failed\n"); ++ return -1; ++ } ++ ++ wusb->usb3_phy = devm_phy_get(dev, "usb3-phy"); ++ /* at least one of usb2phy and usb3phy */ ++ if (!wusb->is_usb2 && IS_ERR(wusb->usb3_phy)) { ++ wing_usb_err("get u3phy failed\n"); ++ return -1; ++ } ++ ++ wusb->ctrl_clk = devm_clk_get(dev, "ctrl-clk"); ++ if (IS_ERR(wusb->ctrl_clk)) { ++ wing_usb_err("get ctrl clk failed\n"); ++ return -1; ++ } ++ ++ ret = of_property_read_u32(dev->of_node, "tx-thrcfg", &wusb->tx_thrcfg); ++ if (ret) ++ wusb->tx_thrcfg = 0; ++ ++ ret = of_property_read_u32(dev->of_node, "rx-thrcfg", &wusb->rx_thrcfg); ++ if (ret) ++ wusb->rx_thrcfg = 0; ++ ++ return 0; ++} ++ ++static void wing_usb_set_tx_deemph(const struct wing_usb *wusb) ++{ ++ /* Set X1 Gen2 TX de-emp value for normal use, CP13 & CP14 */ ++ writel(GEN2_TX_DEEMPH_VAL, wusb->ctrl_base + LCSR_TX_DEEMPH_ADDR); ++ writel(LCSR_TX_DEEMPH_CP13_VAL, wusb->ctrl_base + LCSR_TX_DEEMPH_CP13_ADDR); ++ writel(LCSR_TX_DEEMPH_CP14_VAL, wusb->ctrl_base + LCSR_TX_DEEMPH_CP14_ADDR); ++} ++ ++static void wing_usb_disable_suspend(const struct wing_usb *wusb) ++{ ++ u32 reg; ++ ++ reg = readl(wusb->ctrl_base + REG_GUSB3PIPECTL0); ++ reg &= ~(SUSPENDENABLE); ++ writel(reg, wusb->ctrl_base + REG_GUSB3PIPECTL0); ++} ++ ++static void wing_usb_set_powerdown_scale(const struct wing_usb *wusb) ++{ ++ u32 reg; ++ ++ reg = readl(wusb->ctrl_base + GCTL); ++ reg &= ~(PWRDNSCALE_MASK); ++ reg |= PWRDNSCALE_VAL; ++ writel(reg, wusb->ctrl_base + GCTL); ++} ++ ++static void wing_usb_set_mode(const struct wing_usb *wusb) ++{ ++ u32 val; ++ ++ val = readl(wusb->ctrl_base + GCTL); ++ val &= ~(PRTCAPDIR_MASK); ++ if (wusb->is_cur_host) { ++ val |= (PRTCAPDIR_HOST); ++ } else { ++ val |= (PRTCAPDIR_DEVICE); ++ } ++ writel(val, wusb->ctrl_base + GCTL); ++} ++ ++#if IS_ENABLED(CONFIG_PM_SLEEP) ++static void wing_usb_save_mode(struct wing_usb *wusb) ++{ ++ u32 val; ++ ++ val = readl(wusb->ctrl_base + GCTL); ++ val &= PRTCAPDIR_MASK; ++ if (val == PRTCAPDIR_HOST) { ++ wusb->is_cur_host = true; ++ } else { ++ wusb->is_cur_host = false; ++ } ++} ++#endif ++ ++static void config_tx_thrcfg(const struct wing_usb *wusb) ++{ ++ u32 val; ++ ++ if (wusb->tx_thrcfg != 0) { ++ val = readl(wusb->ctrl_base + GTXTHRCFG); ++ val &= ~(USB_TX_PKT_CNT_MASK); ++ val |= (wusb->tx_thrcfg & USB_TX_PKT_CNT_MASK); ++ val &= ~(USB_MAX_TX_BURST_SIZE_MASK); ++ val |= (wusb->tx_thrcfg & USB_MAX_TX_BURST_SIZE_MASK); ++ val |= USB_TX_PKT_CNT_SEL; ++ writel(val, wusb->ctrl_base + GTXTHRCFG); ++ } ++} ++ ++static void config_rx_thrcfg(const struct wing_usb *wusb) ++{ ++ u32 val; ++ ++ if (wusb->rx_thrcfg != 0) { ++ val = readl(wusb->ctrl_base + GRXTHRCFG); ++ val &= ~(USB_RX_PKT_CNT_MASK); ++ val |= (wusb->rx_thrcfg & USB_RX_PKT_CNT_MASK); ++ val &= ~(USB_MAX_RX_BURST_SIZE_MASK); ++ val |= (wusb->rx_thrcfg & USB_MAX_RX_BURST_SIZE_MASK); ++ val |= USB_RX_PKT_CNT_SEL; ++ writel(val, wusb->ctrl_base + GRXTHRCFG); ++ } ++} ++ ++static void wing_usb_usb20_config(const struct wing_usb *wusb) ++{ ++ u32 reg; ++ ++ reg = readl(wusb->ctrl_base + REG_GUSB3PIPECTL0); ++ reg |= PCS_SSP_SOFT_RESET; ++ writel(reg, wusb->ctrl_base + REG_GUSB3PIPECTL0); ++ ++ if (wusb->support_drd) { ++ /* For DRD mode, it is recommanded set SUSPENDUSB20 to 0 */ ++ reg = readl(wusb->ctrl_base + GUSB2PHYCFG0); ++ reg &= ~(SUSPENDUSB20); ++ writel(reg, wusb->ctrl_base + GUSB2PHYCFG0); ++ } ++ ++ reg = readl(wusb->ctrl_base + REG_GUSB3PIPECTL0); ++ reg &= ~(SUSPENDENABLE | PCS_SSP_SOFT_RESET); ++ writel(reg, wusb->ctrl_base + REG_GUSB3PIPECTL0); ++} ++ ++static void wing_usb_feature_config(struct wing_usb *wusb) ++{ ++ if (wusb->deempth) { ++ wing_usb_set_tx_deemph(wusb); ++ } ++ ++ if (wusb->disable_suspend) { ++ wing_usb_disable_suspend(wusb); ++ } ++ ++ if (wusb->powerdown_scale) { ++ wing_usb_set_powerdown_scale(wusb); ++ } ++ ++ if (wusb->is_usb2) { ++ wing_usb_usb20_config(wusb); ++ } ++ ++ config_tx_thrcfg(wusb); ++ config_rx_thrcfg(wusb); ++ ++ wing_usb_set_mode(wusb); ++} ++ ++static void wing_usb_drd_initialize(struct device *dev, struct wing_usb *wusb) ++{ ++ int ret; ++ ++ ret = of_property_read_s32(dev->of_node, "controller_id", &wusb->id); ++ if (ret) { ++ wing_usb_info("cannot read controller_id: %d\n", ret); ++ wusb->id = -1; ++ } ++ ++ INIT_KFIFO(wusb->event_fifo); ++ spin_lock_init(&wusb->event_lock); ++ INIT_WORK(&wusb->event_work, wing_usb_event_work); ++ mutex_init(&wusb->lock); ++} ++ ++static int wing_usb_drd_init_state(struct device *dev, struct wing_usb *wusb) ++{ ++ struct wing_usb_event usb_event = {0}; ++ const char *buf = NULL; ++ int ret; ++ ++ wing_usb_dbg("+\n"); ++ ++ wusb->state = WING_USB_STATE_UNKNOWN; ++ ++ ret = of_property_read_string(dev->of_node, "init_mode", &buf); ++ if (ret) { ++ wing_usb_info("cannot read init mode: %d, set device mode\n", ret); ++ usb_event.type = SWITCH_TO_DEVICE; ++ } else { ++ wing_usb_dbg("init state: %s\n", buf); ++ ++ if (!strncmp(buf, "host", 4)) { /* host len is 4 */ ++ usb_event.type = SWITCH_TO_HOST; ++ } else { ++ usb_event.type = SWITCH_TO_DEVICE; ++ } ++ } ++ ++ ret = wing_usb_queue_event(&usb_event, wusb); ++ if (ret) { ++ wing_usb_err("usb_queue_event err: %d\n", ret); ++ } ++ ++ wing_usb_dbg("-\n"); ++ ++ return 0; ++} ++ ++static int wing_usb_controller_probe(struct device *dev, struct wing_usb *wusb) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++ struct device_node *child_node = NULL; ++ int ret; ++ ++ ret = of_platform_populate(dev->of_node, NULL, NULL, dev); ++ if (ret == 0) { ++ child_node = of_get_next_available_child(dev->of_node, NULL); ++ if (child_node) { ++ wusb->dwc3_dev = of_find_device_by_node(child_node); ++ } ++ } ++ return ret; ++#else ++ return of_platform_populate(dev->of_node, NULL, NULL, dev); ++#endif ++} ++ ++static int wing_usb_pm_runtime_enable(const struct wing_usb *wusb) ++{ ++ struct platform_device *pdev = wusb->pdev; ++ struct device *dev = &pdev->dev; ++ int ret; ++ ++ pm_runtime_set_active(dev); ++ pm_runtime_enable(dev); ++ ++ ret = pm_runtime_get_sync(dev); ++ if (ret < 0) { ++ wing_usb_err("pm_runtime_get_sync failed %d\n", ret); ++ return ret; ++ } ++ ++ pm_runtime_forbid(dev); ++ ++ return ret; ++} ++ ++static int wing_usb_extcon_init(struct device *dev, struct wing_usb *wusb) ++{ ++ int ret; ++ ++ wusb->edev = devm_extcon_dev_allocate(dev, wing_usb_extcon_cable); ++ if (IS_ERR(wusb->edev)) { ++ dev_err(dev, "failed to allocate extcon device\n"); ++ return PTR_ERR(wusb->edev); ++ } ++ ++ ret = devm_extcon_dev_register(dev, wusb->edev); ++ if (ret < 0) { ++ dev_err(dev, "failed to register extcon device\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int wing_usb_drd_init(struct device *dev, struct wing_usb *wusb) ++{ ++ int ret; ++ ++ ret = wing_usb_extcon_init(dev, wusb); ++ if (ret < 0) { ++ dev_err(dev, "failed to register extcon device\n"); ++ return -1; ++ } ++ ++ ret = wing_usb_create_proc_entry(dev, wusb); ++ if (ret) { ++ dev_err(dev, "create proc entry failed!\n"); ++ goto err_extcon_free; ++ } ++ ++ wing_usb_drd_initialize(dev, wusb); ++ ++ ret = wing_usb_pm_runtime_enable(wusb); ++ if (ret < 0) ++ goto err_remove_attr; ++ ++ ret = wing_usb_controller_probe(dev, wusb); ++ if (ret) { ++ wing_usb_err("register controller failed %d!\n", ret); ++ goto err_pm_put; ++ } ++ ++ ret = wing_usb_drd_init_state(dev, wusb); ++ if (ret) { ++ wing_usb_err("wing_usb_init_state failed!\n"); ++ goto err_remove_child; ++ } ++ ++ list_add_tail(&wusb->list, &wing_usb_drd_dev_list); ++ ++ pm_runtime_allow(dev); ++ wing_usb_dbg("-\n"); ++ ++ return 0; ++ ++err_remove_child: ++ device_for_each_child(dev, NULL, wing_usb_remove_child); ++ ++err_pm_put: ++ pm_runtime_put_sync(dev); ++ pm_runtime_disable(dev); ++ ++err_remove_attr: ++ wing_usb_remove_proc_entry(dev, wusb); ++ ++err_extcon_free: ++ devm_extcon_dev_unregister(dev, wusb->edev); ++ extcon_dev_free(wusb->edev); ++ wusb->edev = NULL; ++ ++ return ret; ++} ++ ++static int wing_usb_host_init(struct device *dev, struct wing_usb *wusb) ++{ ++ int ret; ++ ++ /* enable runtime pm. */ ++ ret = wing_usb_pm_runtime_enable(wusb); ++ if (ret < 0) { ++ wing_usb_err("anble pm runtime failed\n"); ++ return ret; ++ } ++ ++ ret = wing_usb_controller_probe(dev, wusb); ++ if (ret) { ++ wing_usb_err("register controller failed %d!\n", ret); ++ goto err_pm_put; ++ } ++ ++ pm_runtime_allow(dev); ++ ++ return 0; ++ ++err_pm_put: ++ pm_runtime_put_sync(dev); ++ pm_runtime_disable(dev); ++ ++ return ret; ++} ++ ++static int wing_usb_clk_phy_init(const struct wing_usb *wusb) ++{ ++ int ret = 0; ++ ++ ret = clk_prepare(wusb->ctrl_clk); ++ if (ret != 0) { ++ wing_usb_err("ctrl clk prepare failed\n"); ++ return -1; ++ } ++ ++ if (!IS_ERR(wusb->usb2_phy)) { ++ ret = phy_power_on(wusb->usb2_phy); ++ if (ret != 0) { ++ wing_usb_err("usb2 phy init failed\n"); ++ return -1; ++ } ++ } ++ ++ if (!IS_ERR(wusb->usb3_phy)) { ++ ret = phy_power_on(wusb->usb3_phy); ++ if (ret != 0) { ++ wing_usb_err("usb3 phy init failed\n"); ++ return -1; ++ } ++ } ++ ++ ret = clk_enable(wusb->ctrl_clk); ++ if (ret != 0) { ++ wing_usb_err("ctrl clk enable failed\n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static int wing_usb_clk_phy_deinit(const struct wing_usb *wusb) ++{ ++ int ret = 0; ++ ++ clk_disable(wusb->ctrl_clk); ++ ++ if (!IS_ERR(wusb->usb3_phy)) { ++ ret = phy_power_off(wusb->usb3_phy); ++ if (ret != 0) { ++ wing_usb_err("usb3 phy deinit failed\n"); ++ return -1; ++ } ++ } ++ ++ if (!IS_ERR(wusb->usb2_phy)) { ++ ret = phy_power_off(wusb->usb2_phy); ++ if (ret != 0) { ++ wing_usb_err("usb2 phy deinit failed\n"); ++ return -1; ++ } ++ } ++ ++ clk_unprepare(wusb->ctrl_clk); ++ ++ return 0; ++} ++ ++static int wing_usb_probe(struct platform_device *pdev) ++{ ++ int ret; ++ struct wing_usb *wusb = NULL; ++ struct device *dev = &pdev->dev; ++ ++ wing_usb_dbg("+++\n"); ++ ++ BUILD_BUG_ON(sizeof(struct wing_usb_event) != SIZE_WING_USB_EVENT); ++ ++ wusb = devm_kzalloc(dev, sizeof(*wusb), GFP_KERNEL); ++ if (wusb == NULL) { ++ wing_usb_err("alloc wusb failed\n"); ++ return -ENOMEM; ++ } ++ ++ platform_set_drvdata(pdev, wusb); ++ wusb->pdev = pdev; ++ ++ ret = wing_usb_get_resource(dev, wusb); ++ if (ret < 0) { ++ devm_kfree(dev, wusb); ++ wusb = NULL; ++ return -1; ++ } ++ ++ ret = wing_usb_clk_phy_init(wusb); ++ if (ret != 0) { ++ wing_usb_err("init phy failed\n"); ++ goto err_unmap; ++ } ++ ++ wing_usb_feature_config(wusb); ++ ++ if (wusb->support_drd) { ++ ret = wing_usb_drd_init(dev, wusb); ++ } else { ++ ret = wing_usb_host_init(dev, wusb); ++ } ++ ++ if (ret < 0) { ++ wing_usb_err("controller init failed, ret = %d\n", ret); ++ goto err_unmap; ++ } ++ ++ return 0; ++ ++err_unmap: ++ iounmap(wusb->ctrl_base); ++ ++ return ret; ++} ++ ++static int wing_usb_remove(struct platform_device *pdev) ++{ ++ struct wing_usb *wusb = platform_get_drvdata(pdev); ++ struct device *dev = &pdev->dev; ++ ++ wing_usb_dbg("+\n"); ++ if (wusb == NULL) { ++ wing_usb_err("wusb NULL\n"); ++ return -ENODEV; ++ } ++ ++ if (wusb->support_drd) { ++ wing_usb_remove_proc_entry(dev, wusb); ++ ++ devm_extcon_dev_unregister(dev, wusb->edev); ++ extcon_dev_free(wusb->edev); ++ wusb->edev = NULL; ++ ++ cancel_work_sync(&wusb->event_work); ++ } ++ ++ device_for_each_child(dev, NULL, wing_usb_remove_child); ++ ++ pm_runtime_put_sync(dev); ++ pm_runtime_disable(dev); ++ ++ wing_usb_clk_phy_deinit(wusb); ++ ++ iounmap(wusb->ctrl_base); ++ ++ wing_usb_dbg("-\n"); ++ ++ return 0; ++} ++ ++#if IS_ENABLED(CONFIG_PM_SLEEP) ++static int wing_usb_suspend(struct device *dev) ++{ ++ int ret; ++ struct wing_usb *wusb = dev_get_drvdata(dev); ++ ++ if (wusb == NULL) { ++ wing_usb_err("wusb is null\n"); ++ return -1; ++ } ++ ++ wing_usb_save_mode(wusb); ++ ++ ret = wing_usb_clk_phy_deinit(wusb); ++ if (ret != 0) { ++ wing_usb_err("deinit clk and phy failed when suspend\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int wing_usb_resume(struct device *dev) ++{ ++ int ret; ++ struct wing_usb *wusb = dev_get_drvdata(dev); ++ ++ if (wusb == NULL) { ++ wing_usb_err("wusb is null\n"); ++ return -1; ++ } ++ ++ ret = wing_usb_clk_phy_init(wusb); ++ if (ret != 0) { ++ wing_usb_err("init clk and phy failed when reusme\n"); ++ return ret; ++ } ++ ++ wing_usb_feature_config(wusb); ++ ++ return 0; ++} ++#endif ++ ++const struct dev_pm_ops g_wing_usb_dev_pm_ops = { ++#if IS_ENABLED(CONFIG_PM_SLEEP) ++ SET_SYSTEM_SLEEP_PM_OPS(wing_usb_suspend, wing_usb_resume) ++#endif ++}; ++ ++static const struct of_device_id g_wing_usb_match[] = { ++ { .compatible = "wing-usb,drd" }, ++ { .compatible = "wing-usb,host" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, g_wing_usb_match); ++ ++static struct platform_driver g_wing_usb_driver = { ++ .probe = wing_usb_probe, ++ .remove = wing_usb_remove, ++ .driver = { ++ .name = "wing-usb", ++ .of_match_table = of_match_ptr(g_wing_usb_match), ++ .pm = &g_wing_usb_dev_pm_ops, ++ }, ++}; ++ ++static int __init wing_usb_module_init(void) ++{ ++ int ret; ++ ++ ret = platform_driver_register(&g_wing_usb_driver); ++ if (ret != 0) { ++ wing_usb_err("register wing usb driver failed, ret = %d\n", ret); ++ return ret; ++ } ++ ++ wing_usb_info("register wing usb driver\n"); ++ ++ return ret; ++} ++module_init(wing_usb_module_init); ++ ++static void __exit wing_usb_module_exit(void) ++{ ++ platform_driver_unregister(&g_wing_usb_driver); ++ ++ wing_usb_info("unregister wing usb driver\n"); ++} ++module_exit(wing_usb_module_exit); ++ ++MODULE_DESCRIPTION("Wing USB Controller Driver"); ++MODULE_LICENSE("GPL v2"); ++MODULE_VERSION(USB_KERNEL_VERSION); +diff --git a/drivers/vendor/usb/wing_usb.h b/drivers/vendor/usb/wing_usb.h +new file mode 100755 +index 000000000..cb09a595c +--- /dev/null ++++ b/drivers/vendor/usb/wing_usb.h +@@ -0,0 +1,144 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022-2029. All rights reserved. ++ * Description: USB Controller driver ++ * Author: AuthorNameMagicTag ++ * Create: 2022.09.01 ++ */ ++ ++#ifndef _WING_USB_H_ ++#define _WING_USB_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define WING_USB_DEBUG 0 ++ ++#define wing_usb_dbg(format, arg...) \ ++ do { \ ++ if (WING_USB_DEBUG) \ ++ printk(KERN_INFO "[WING-USB][%s]"format, __func__, ##arg); \ ++ } while (0) ++ ++#define wing_usb_info(format, arg...) \ ++ printk(KERN_INFO "[WING-USB][%s]"format, __func__, ##arg) ++ ++#define wing_usb_err(format, arg...) \ ++ printk(KERN_ERR "[WING-USB][%s]"format, __func__, ##arg) ++ ++#define MAX_WING_USB_EVENT_COUNT 16 ++ ++#define LCSR_TX_DEEMPH_ADDR 0xD060 ++#define LCSR_TX_DEEMPH_CP13_ADDR 0xD064 ++#define LCSR_TX_DEEMPH_CP14_ADDR 0xD068 ++#define GEN2_TX_DEEMPH_VAL 0x14FC0 ++#define LCSR_TX_DEEMPH_CP13_VAL 0xFC0 ++#define LCSR_TX_DEEMPH_CP14_VAL 0x4FC5 ++ ++#define REG_GUSB3PIPECTL0 0xc2c0 ++#define SUSPENDENABLE BIT(17) ++#define PCS_SSP_SOFT_RESET BIT(31) ++ ++#define GCTL 0xc110 ++#define PRTCAPDIR_HOST BIT(12) ++#define PRTCAPDIR_DEVICE BIT(13) ++#define PRTCAPDIR_MASK (3U << 12) ++#define PWRDNSCALE_MASK (0x1fffU << 19) ++#define PWRDNSCALE_VAL (0x3fU << 19) ++ ++#define GTXTHRCFG 0xc108 ++#define USB_TX_PKT_CNT_SEL BIT(29) ++#define USB_TX_PKT_CNT_MASK (0xfU << 24) ++#define USB_TX_PKT_CNT (0x3U << 24) ++#define USB_MAX_TX_BURST_SIZE_MASK (0xffU << 16) ++#define USB_MAX_TX_BURST_SIZE (0x10U << 16) ++ ++#define GRXTHRCFG 0xc10c ++#define USB_RX_PKT_CNT_SEL BIT(29) ++#define USB_RX_PKT_CNT_MASK (0xfU << 24) ++#define USB_RX_PKT_CNT (0x3U << 24) ++#define USB_MAX_RX_BURST_SIZE_MASK (0xffU << 16) ++#define USB_MAX_RX_BURST_SIZE (0x10U << 16) ++ ++#define GUSB2PHYCFG0 0xc200 ++#define ULPI_UTMI_SEL (1U << 4) ++#define SUSPENDUSB20 (1U << 6) ++ ++enum wing_usb_state { ++ WING_USB_STATE_UNKNOWN = 0, ++ WING_USB_STATE_OFF, ++ WING_USB_STATE_HOST, ++ WING_USB_STATE_DEVICE, ++}; ++ ++enum wing_usb_event_type { ++ SWITCH_TO_HOST = 0, ++ SWITCH_TO_DEVICE, ++ NONE_EVENT, ++}; ++ ++#define SIZE_WING_USB_EVENT 32 ++ ++/* size of struct wing_usb_event must be a power of 2 for kfifo */ ++struct wing_usb_event { ++ enum wing_usb_event_type type; ++ int ctrl_id; ++#ifdef CONFIG_64BIT ++ u32 reserved; /* to keep struct size is 32 bytes in 64bit system */ ++#else ++ u32 reserved[3]; /* to keep struct size is 32 bytes in 32bit system */ ++#endif ++ u32 flags; ++ void (*callback)(struct wing_usb_event *event); ++ void *content; ++}; ++ ++struct wing_usb { ++ struct platform_device *pdev; ++ struct extcon_dev *edev; ++ struct platform_device *dwc3_dev; ++ ++ int id; ++ bool support_drd; ++ bool is_cur_host; ++ bool deempth; ++ bool disable_suspend; ++ bool powerdown_scale; ++ bool is_usb2; ++ struct list_head list; ++ u32 tx_thrcfg; ++ u32 rx_thrcfg; ++ ++ enum wing_usb_state state; ++ DECLARE_KFIFO(event_fifo, struct wing_usb_event, ++ MAX_WING_USB_EVENT_COUNT); ++ ++ spinlock_t event_lock; ++ struct work_struct event_work; ++ struct phy *usb2_phy; ++ struct phy *usb3_phy; ++ struct clk *ctrl_clk; ++ ++ struct mutex lock; ++ ++ void __iomem *ctrl_base; ++ struct proc_dir_entry *proc_entry; ++}; ++ ++/* ++ * The event will be added to tail of a queue, and processed in a work. ++ * Return 0 means the event added sucessfully, others means event was rejected. ++ */ ++int wing_usb_queue_event(const struct wing_usb_event *usb_event, ++ struct wing_usb *wusb); ++ ++int wing_usb_otg_event(enum wing_usb_event_type type, int ctrl_id); ++ ++#endif /* _WING_USB_H_ */ +diff --git a/drivers/vendor/usb_phy/Kconfig b/drivers/vendor/usb_phy/Kconfig +new file mode 100644 +index 000000000..b11d42fbf +--- /dev/null ++++ b/drivers/vendor/usb_phy/Kconfig +@@ -0,0 +1,27 @@ ++menu "Wing UPS Phy" ++ ++config WING_UPS_PHY ++ tristate "Support Wing UPS Phy" ++ select GENERIC_PHY ++ help ++ Say Y or M here if you want support Wing UPS phy. ++ ++if WING_UPS_PHY ++config WING_UPS_XVP_PHY ++ bool "Support UPS Xvp Phy" ++ help ++ Say Y here if you want to support xvp phy. ++ ++config WING_UPS_NANO_PHY ++ bool "Support UPS Nano Phy" ++ help ++ Say Y here if you want to support nano phy. ++ ++config WING_UPS_MISSILE_PHY ++ bool "Support UPS Missile Phy" ++ help ++ Say Y here if you want to support missile phy. ++ ++endif #WING_UPS_PHY ++endmenu ++ +diff --git a/drivers/vendor/usb_phy/Makefile b/drivers/vendor/usb_phy/Makefile +new file mode 100644 +index 000000000..3269ea485 +--- /dev/null ++++ b/drivers/vendor/usb_phy/Makefile +@@ -0,0 +1,22 @@ ++KBUILD_CFLAGS += -Werror ++ ++ifneq ($(CONFIG_ARCH_SHAOLINSWORD),y) ++ ++obj-$(CONFIG_WING_UPS_PHY) = wing-ups-phy.o ++wing-ups-phy-y := phy.o common.o proc.o ++wing-ups-phy-$(CONFIG_ARCH_SS626V100) += platform/ss626v100.o ++ ++ifeq ($(CONFIG_WING_UPS_XVP_PHY), y) ++wing-ups-phy-y += xvp.o ++endif ++ ++ifeq ($(CONFIG_WING_UPS_NANO_PHY), y) ++wing-ups-phy-y += nano.o ++endif ++ ++ifeq ($(CONFIG_WING_UPS_MISSILE_PHY), y) ++wing-ups-phy-y += missile.o ++endif ++ ++endif ++ +diff --git a/drivers/vendor/usb_phy/common.c b/drivers/vendor/usb_phy/common.c +new file mode 100644 +index 000000000..fcdc53a49 +--- /dev/null ++++ b/drivers/vendor/usb_phy/common.c +@@ -0,0 +1,93 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022-2023. All rights reserved. ++ * Description: ups phy module ++ * Author: General IP Group ++ * Create: 2022-09-01 ++ */ ++ ++#define DRVNAME "[ups-phy-common]" ++#define pr_fmt(fmt) DRVNAME ": " fmt ++ ++#include "phy.h" ++#include "reg_common.h" ++ ++#ifdef CONFIG_ARCH_HIWING ++#include "reg_hiwingv500.h" ++#else ++#include "reg_default.h" ++#endif ++ ++unsigned int combphy_read(void __iomem *addr, u32 offset) ++{ ++ u32 val; ++ ++ val = phy_readl(addr + PERI_COMBOPHY0_CTRL1); ++ val |= PERI_COMBPHY0_TEST_RST_N; ++ val &= ~PERI_COMBPHY_TEST_ADDR; ++ val &= ~PERI_COMBPHY_TEST_I; ++ val |= (offset << 0); ++ phy_writel(val, addr + PERI_COMBOPHY0_CTRL1); ++ ++ val = phy_readl(addr + PERI_COMBOPHY0_CTRL1); ++ val &= PERI_COMBPHY_TEST_O; ++ val = val >> 24; /* right shift 24 bits */ ++ ++ return val; ++} ++ ++unsigned int combphy2_read(void __iomem *addr, u32 offset) ++{ ++ u32 val; ++ ++ val = phy_readl(addr + PERI_COMBOPHY2_CTRL); ++ val |= PERI_COMBPHY20_TEST_RST_N; ++ val |= PERI_COMBPHY21_TEST_RST_N; ++ val |= PERI_COMBPHY22_TEST_RST_N; ++ val |= PERI_COMBPHY23_TEST_RST_N; ++ val &= ~PERI_COMBPHY_TEST_ADDR; ++ val &= ~PERI_COMBPHY_TEST_I; ++ val |= (offset << 0); ++ phy_writel(val, addr + PERI_COMBOPHY2_CTRL); ++ ++ val = phy_readl(addr + PERI_COMBOPHY2_CTRL); ++ val &= PERI_COMBPHY_TEST_O; ++ val = val >> 24; /* right shift 24 bits */ ++ ++ return val; ++} ++ ++void combphy_write(void __iomem *reg, u32 addr, u32 value) ++{ ++ u32 val; ++ ++ val = phy_readl(reg); ++ val |= PERI_COMBPHY0_TEST_RST_N; /* Combophy0,1 just l lane */ ++ val &= ~PERI_COMBPHY_TEST_ADDR; ++ val &= ~PERI_COMBPHY_TEST_I; ++ val |= (addr << PERI_COMBPHY_ADDR_OFFSET); ++ val |= (value << PERI_COMBPHY_DATA_OFFSET); ++ phy_writel(val, reg); ++ ++ val = phy_readl(reg); ++ val |= PERI_COMBPHY_TEST_WRITE; ++ phy_writel(val, reg); ++ ups_phy_dbg("ComboPHY write:addr(%#x),value(%#x)\n", addr, value); ++ val = phy_readl(reg); ++ val &= ~PERI_COMBPHY_TEST_WRITE; ++ phy_writel(val, reg); ++} ++ ++void combphy2_write(void __iomem *reg, u32 addr, u32 value) ++{ ++ u32 val; ++ ++ /* Combphy2 is 4 lanes, need configure this */ ++ val = phy_readl(reg); ++ val |= PERI_COMBPHY20_TEST_RST_N; ++ val |= PERI_COMBPHY21_TEST_RST_N; ++ val |= PERI_COMBPHY22_TEST_RST_N; ++ val |= PERI_COMBPHY23_TEST_RST_N; ++ phy_writel(val, reg); ++ ++ combphy_write(reg, addr, value); ++} +diff --git a/drivers/vendor/usb_phy/driver_config.mk b/drivers/vendor/usb_phy/driver_config.mk +new file mode 100644 +index 000000000..7fcaf8e5b +--- /dev/null ++++ b/drivers/vendor/usb_phy/driver_config.mk +@@ -0,0 +1,7 @@ ++DRIVER_CONFIG += CONFIG_WING_UPS_PHY=y ++DRIVER_CONFIG += CONFIG_WING_UPS_XVP_PHY=y CONFIG_WING_UPS_NANO_PHY=y ++DRIVER_CFLAGS += -DCONFIG_WING_UPS_XVP_PHY -DCONFIG_WING_UPS_NANO_PHY ++ ++RECOVERY_DRIVER_CONFIG += CONFIG_WING_UPS_PHY=y ++RECOVERY_DRIVER_CONFIG += CONFIG_WING_UPS_XVP_PHY=y CONFIG_WING_UPS_NANO_PHY=y ++RECOVERY_DRIVER_CFLAGS += -DCONFIG_WING_UPS_XVP_PHY -DCONFIG_WING_UPS_NANO_PHY +diff --git a/drivers/vendor/usb_phy/driver_obj.mk b/drivers/vendor/usb_phy/driver_obj.mk +new file mode 100644 +index 000000000..e1256fb4c +--- /dev/null ++++ b/drivers/vendor/usb_phy/driver_obj.mk +@@ -0,0 +1,2 @@ ++obj-$(CONFIG_WING_UPS_PHY) += huanglong/ups/ups_phy/ ++ +diff --git a/drivers/vendor/usb_phy/missile.c b/drivers/vendor/usb_phy/missile.c +new file mode 100644 +index 000000000..b3a4062d5 +--- /dev/null ++++ b/drivers/vendor/usb_phy/missile.c +@@ -0,0 +1,235 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++ ++#define DRVNAME "[usb-missile-phy]" ++#define pr_fmt(fmt) DRVNAME ": " fmt ++ ++#include "phy.h" ++#include "reg_common.h" ++ ++#ifdef CONFIG_ARCH_HIWING ++#include "reg_hiwingv500.h" ++#else ++#include "reg_default.h" ++#endif ++ ++/* TEST BIT */ ++#define MISSILE_PHY_TEST_REG_8 (0x20) ++#define MISSILE_PHY_CDR_CPF_TRIM_MASK (0xF) ++#define MISSILE_PHY_CDR_CPF_TRIM_VAL (0xF) ++ ++#define MISSILE_PHY_TEST_REG_15 (0x3C) ++#define MISSILE_PHY_TX_PLL_TRIM_USB3_SATA_MASK (0x1F) ++#define MISSILE_PHY_TX_PLL_TRIM_USB3_SATA_VAL (0x3) ++#define MISSILE_PHY_TX_PLL_TRIM_PCIE_MASK (0x1F << 5) ++#define MISSILE_PHY_TX_PLL_TRIM_PCIE_VAL ((0x3 << 5) & MISSILE_PHY_TX_PLL_TRIM_PCIE_MASK) ++ ++#define MISSILE_PHY_TEST_REG_16 (0x40) ++#define MISSILE_PHY_PI_CURRENT_TRIM_MASK (0x3) ++#define MISSILE_PHY_PI_CURRENT_TRIM_VAL (0x2) ++ ++#define MISSILE_PHY_TEST_REG_18 (0x48) ++#define MISSILE_PHY_RX_TERM_USB3_MASK (0xF) ++#define MISSILE_PHY_RX_TERM_USB3_VAL (0xA) ++#define MISSILE_PHY_RX_TERM_MASK (0xF << 4) ++#define MISSILE_PHY_RX_TERM_VAL ((0x7 << 4) & MISSILE_PHY_RX_TERM_MASK) ++ ++#define MISSILE_PHY_TEST_REG_22 (0x58) ++#define MISSILE_PHY_REDUCE_RX_DET_TH_MASK (0x1 << 1) ++#define MISSILE_PHY_REDUCE_RX_DET_TH_VAL (0x1 << 1) ++#define MISSILE_PHY_INTERPOLATOR_JUMP_LATER_MASK (0x1 << 2) ++#define MISSILE_PHY_INTERPOLATOR_JUMP_LATER_VAL (0x1 << 2) ++ ++#define MISSILE_PHY_ANALOG_REG0 (0x80) ++#define MISSILE_PHY_CDR_CTRL_MASK (0x7F << 16) ++#define MISSILE_PHY_CDR_CTRL_VAL ((0x48 << 16) & MISSILE_PHY_CDR_CTRL_MASK) ++ ++/* Performance optimization */ ++#define MISSILE_PHY_ANALOG_REG1 (0x84) ++#define MISSILE_PHY_CSEL_MASK (0x3 << 10) ++#define MISSILE_PHY_CSEL_VAL ((0x1 << 10) & MISSILE_PHY_CSEL_MASK) ++ ++#define MISSILE_PHY_RESEL11_MASK (0x3 << 12) ++#define MISSILE_PHY_RESEL11_VAL ((0x1 << 12) & MISSILE_PHY_RESEL11_MASK) ++ ++#define MISSILE_PHY_RESEL12_MASK (0x3 << 14) ++#define MISSILE_PHY_RESEL12_VAL ((0x0 << 14) & MISSILE_PHY_RESEL12_MASK) ++ ++#define MISSILE_PHY_AUTO_EQ_REG0 (0xB8) ++#define MISSILE_PHY_CFG_AUTO_DESKEW_EN_MASK (0x1 << 20) ++#define MISSILE_PHY_CFG_AUTO_DESKEW_EN_VAL ((0x0 << 20) & MISSILE_PHY_CFG_AUTO_DESKEW_EN_MASK) ++ ++#define MISSILE_PHY_CFG_AUTO_EQ_EN_MASK (0x1 << 21) ++#define MISSILE_PHY_CFG_AUTO_EQ_EN_VAL ((0x0 << 21) & MISSILE_PHY_CFG_AUTO_EQ_EN_MASK) ++ ++#define MISSILE_PHY_AUTO_EQ_REG5 (0xCC) ++#define MISSILE_PHY_CFG_EQ_OW_MASK (0x1F << 0) ++#define MISSILE_PHY_CFG_EQ_OW_VAL ((0xC << 0) & MISSILE_PHY_CFG_EQ_OW_MASK) ++ ++#define MISSILE_PHY_CFG_EQ_OW_EN_MASK (0x1 << 5) ++#define MISSILE_PHY_CFG_EQ_OW_EN_VAL ((0x1 << 5) & MISSILE_PHY_CFG_EQ_OW_EN_MASK) ++ ++#define MISSILE_PHY_CFG_DESKEW_OW_MASK (0x1F << 6) ++#define MISSILE_PHY_CFG_DESKEW_OW_VAL ((0x10 << 6) & MISSILE_PHY_CFG_DESKEW_OW_MASK) ++ ++#define MISSILE_PHY_CFG_DESKEW_OW_EN_MASK (0x1 << 11) ++#define MISSILE_PHY_CFG_DESKEW_OW_EN_VAL ((0x1 << 11) & MISSILE_PHY_CFG_DESKEW_OW_EN_MASK) ++ ++ ++static void missile_phy_eye(const struct ups_phy_priv *priv) ++{ ++ u32 reg; ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_ANALOG_REG1); ++ reg &= ~(MISSILE_PHY_CSEL_MASK); ++ reg |= (MISSILE_PHY_CSEL_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_ANALOG_REG1); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_ANALOG_REG1); ++ reg &= ~(MISSILE_PHY_RESEL11_MASK); ++ reg |= (MISSILE_PHY_RESEL11_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_ANALOG_REG1); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_ANALOG_REG1); ++ reg &= ~(MISSILE_PHY_RESEL12_MASK); ++ reg |= (MISSILE_PHY_RESEL12_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_ANALOG_REG1); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_AUTO_EQ_REG0); ++ reg &= ~(MISSILE_PHY_CFG_AUTO_DESKEW_EN_MASK); ++ reg |= (MISSILE_PHY_CFG_AUTO_DESKEW_EN_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_AUTO_EQ_REG0); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_AUTO_EQ_REG0); ++ reg &= ~(MISSILE_PHY_CFG_AUTO_EQ_EN_MASK); ++ reg |= (MISSILE_PHY_CFG_AUTO_EQ_EN_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_AUTO_EQ_REG0); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_AUTO_EQ_REG5); ++ reg &= ~(MISSILE_PHY_CFG_EQ_OW_MASK); ++ reg |= (MISSILE_PHY_CFG_EQ_OW_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_AUTO_EQ_REG5); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_AUTO_EQ_REG5); ++ reg &= ~(MISSILE_PHY_CFG_EQ_OW_EN_MASK); ++ reg |= (MISSILE_PHY_CFG_EQ_OW_EN_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_AUTO_EQ_REG5); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_AUTO_EQ_REG5); ++ reg &= ~(MISSILE_PHY_CFG_DESKEW_OW_MASK); ++ reg |= (MISSILE_PHY_CFG_DESKEW_OW_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_AUTO_EQ_REG5); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_AUTO_EQ_REG5); ++ reg &= ~(MISSILE_PHY_CFG_DESKEW_OW_EN_MASK); ++ reg |= (MISSILE_PHY_CFG_DESKEW_OW_EN_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_AUTO_EQ_REG5); ++ ++ return; ++} ++ ++static int missile_phy_common_init(const struct ups_phy_priv *priv) ++{ ++ u32 reg; ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_TEST_REG_8); ++ reg &= ~(MISSILE_PHY_CDR_CPF_TRIM_MASK); ++ reg |= (MISSILE_PHY_CDR_CPF_TRIM_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_TEST_REG_8); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_TEST_REG_15); ++ reg &= ~(MISSILE_PHY_TX_PLL_TRIM_USB3_SATA_MASK); ++ reg |= (MISSILE_PHY_TX_PLL_TRIM_USB3_SATA_VAL); ++ reg &= ~(MISSILE_PHY_TX_PLL_TRIM_PCIE_MASK); ++ reg |= (MISSILE_PHY_TX_PLL_TRIM_PCIE_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_TEST_REG_15); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_TEST_REG_16); ++ reg &= ~(MISSILE_PHY_PI_CURRENT_TRIM_MASK); ++ reg |= (MISSILE_PHY_PI_CURRENT_TRIM_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_TEST_REG_16); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_TEST_REG_18); ++ reg &= ~(MISSILE_PHY_RX_TERM_USB3_MASK); ++ reg |= (MISSILE_PHY_RX_TERM_USB3_VAL); ++ reg &= ~(MISSILE_PHY_RX_TERM_MASK); ++ reg |= (MISSILE_PHY_RX_TERM_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_TEST_REG_18); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_TEST_REG_22); ++ reg &= ~(MISSILE_PHY_REDUCE_RX_DET_TH_MASK | ++ MISSILE_PHY_INTERPOLATOR_JUMP_LATER_MASK); ++ reg |= (MISSILE_PHY_REDUCE_RX_DET_TH_VAL | ++ MISSILE_PHY_INTERPOLATOR_JUMP_LATER_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_TEST_REG_22); ++ ++ reg = phy_readl(priv->phy_base + MISSILE_PHY_ANALOG_REG0); ++ reg &= ~(MISSILE_PHY_CDR_CTRL_MASK); ++ reg |= (MISSILE_PHY_CDR_CTRL_VAL); ++ phy_writel(reg, priv->phy_base + MISSILE_PHY_ANALOG_REG0); ++ ++ return 0; ++} ++ ++static int missile_phy_power_on(struct phy *phy) ++{ ++ int ret; ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ ret = clk_prepare(priv->phy_clk); ++ if (ret != 0) { ++ ups_phy_err("missile phy clk prepare failed\n"); ++ return -1; ++ } ++ ++ ret = clk_enable(priv->phy_clk); ++ if (ret != 0) { ++ ups_phy_err("missile phy clk enable failed\n"); ++ return -1; ++ } ++ ++ ret = missile_phy_common_init(priv); ++ if (ret != 0) { ++ ups_phy_err("missile phy common init failed\n"); ++ return -1; ++ } ++ ++ missile_phy_eye(priv); ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int missile_phy_power_off(struct phy *phy) ++{ ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ clk_disable_unprepare(priv->phy_clk); ++ ++ return 0; ++} ++ ++struct phy_ops g_missile_phy_common_ops = { ++ .power_on = missile_phy_power_on, ++ .power_off = missile_phy_power_off, ++}; ++ ++static struct of_device_id g_missile_phy_of_match[] = { ++ { ++ .compatible = "combophy,missile_phy", ++ .data = &g_missile_phy_common_ops ++ }, ++ {}, ++}; ++ ++struct of_device_id* ups_missile_phy_get_of_device_id(int *num) ++{ ++ *num = (sizeof(g_missile_phy_of_match) / sizeof(struct of_device_id)) - 1; ++ ups_phy_info("missile phy num = %d\n", *num); ++ return of_match_ptr(g_missile_phy_of_match); ++} ++ +diff --git a/drivers/vendor/usb_phy/nano.c b/drivers/vendor/usb_phy/nano.c +new file mode 100644 +index 000000000..2f2f70de5 +--- /dev/null ++++ b/drivers/vendor/usb_phy/nano.c +@@ -0,0 +1,957 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (c) CompanyNameMagicTag 2022-2023. All rights reserved. ++ * Description: nano phy ++ * Author: General IP Group ++ * Create: 2022-09-01 ++ */ ++ ++#define DRVNAME "[usb-nano-phy]" ++#define pr_fmt(fmt) DRVNAME ": " fmt ++ ++#include "phy.h" ++#include "reg_common.h" ++ ++#ifdef CONFIG_ARCH_HIWING ++#include "reg_hiwingv500.h" ++#else ++#include "reg_default.h" ++#endif ++ ++#ifndef CONFIG_ARCH_BSP ++#ifdef CONFIG_ARCH_HIWING ++#include ++#else ++#include ++#endif ++#endif ++ ++#ifndef WUDANGSTICK_D_MASK ++#define WUDANGSTICK_D_MASK (3272147200ULL) ++#endif ++ ++#ifndef CONFIG_ARCH_BSP ++static bool chip_type_d(void) ++{ ++ return (get_chipid(WUDANGSTICK_D_MASK) == WUDANGSTICK_D_MASK); ++} ++#else ++static inline bool chip_type_d(void) ++{ ++ return true; ++} ++#endif ++ ++static void nano_phy_get_mode(struct ups_phy_priv *priv, ++ void __iomem *addr, u32 mask, u32 off) ++{ ++ u32 val; ++ enum ups_phy_mode mode; ++ ++ val = phy_readl(addr); ++ val &= mask; ++ val = val >> off; ++ if (val == COMBOPHY_MODE_USB) { ++ mode = UPS_PHY_MODE_USB; ++ } else if (val == COMBOPHY_MODE_PCIE) { ++ mode = UPS_PHY_MODE_PCIE; ++ } else if (val == COMBOPHY_MODE_SATA) { ++ mode = UPS_PHY_MODE_SATA; ++ } else { ++ mode = UPS_PHY_MODE_UNKNOW; ++ } ++ ++ priv->mode = mode; ++} ++ ++static void nano_phy_x1phy_usb_eye(const struct ups_phy_priv *priv, u32 phy_offset) ++{ ++ /* Disable X1 slew assist and SSC offset down 200ppm */ ++ combphy_write(priv->peri_base + phy_offset, ++ X1_SLEW_ASSIST_DIS_AND_SSC_ADDR, ++ X1_SLEW_ASSIST_DIS_AND_SSC_VAL); ++ ++ /* Set X1 TX swing compensation to be level 10 */ ++ combphy_write(priv->peri_base + phy_offset, ++ X1_TX_SWING_COMP_ADDR, X1_TX_SWING_COMP_VAL); ++ ++ /* Set X1 RX CDR direct trim & EQ peaking bit[0] */ ++ combphy_write(priv->peri_base + phy_offset, ++ X1_CDR_DIRECT_TRIM_EQ_PEAKING_ADDR, X1_CDR_DIRECT_TRIM_EQ_PEAKING_VAL); ++ ++ /* Disable X1 all DFE for 8G & 10G */ ++ combphy_write(priv->peri_base + phy_offset, ++ X1_DFE_DIS_8G10G_ADDR, X1_DFE_DIS_8G10G_VAL); ++ ++ /* Set X1 RX EQ swing & EQ peaking bit[1] */ ++ combphy_write(priv->peri_base + phy_offset, ++ EQ_SWING_INC_PEAK_FREQ_ADDR, EQ_SWING_INC_PEAK_FREQ_VAL); ++ ++ /* Set X1 TX PLL charge pump current to be 1.33uA */ ++ combphy_write(priv->peri_base + phy_offset, ++ X1_TXPLL_TRIM_ADDR, X1_TXPLL_TRIM_VAL); ++ ++ /* Set X1 reference PLL to be 100MHz */ ++ combphy_write(priv->peri_base + phy_offset, ++ X1_REF_CLK_100N250_ADDR, X1_REF_CLK_100N250_VAL); ++ ++ /* Set X1 EQ initial value to be 0x2a */ ++ combphy_write(priv->peri_base + phy_offset, ++ X1_EQ_INIT_MANUAL_SET1_ADDR, X1_EQ_INIT_MANUAL_SET1_VAL); ++ udelay(1); /* delay 1 us */ ++ ++ combphy_write(priv->peri_base + phy_offset, ++ X1_EQ_INIT_PWON_CDR_MANUAL_SET1_ADDR, ++ X1_EQ_INIT_PWON_CDR_MANUAL_SET1_VAL); ++ udelay(1); /* delay 1 us */ ++ ++ combphy_write(priv->peri_base + phy_offset, ++ X1_EQ_INIT_MANUAL_SET0_ADDR, X1_EQ_INIT_MANUAL_SET0_VAL); ++ udelay(1); /* delay 1 us */ ++ ++ combphy_write(priv->peri_base + phy_offset, ++ X1_EQ_INIT_PWON_CDR_MANUAL_SET0_ADDR, ++ X1_EQ_INIT_PWON_CDR_MANUAL_SET0_VAL); ++} ++ ++static void nano_phy_x4phy_lanec_usb_eye(const struct ups_phy_priv *priv) ++{ ++ /* Disable X4 slew assist and SSC offset down 200ppm */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANEC_SLEW_ASSIST_DIS_AND_SSC_ADDR, ++ X4_LANEC_SLEW_ASSIST_DIS_AND_SSC_VAL); ++ ++ /* Set X4 TX swing compensation to be level 10 */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANEC_TW_SWING_COMP_ADDR, X4_LANEC_TW_SWING_COMP_VAL); ++ ++ /* Set X4 RX CDR direct trim & EQ peaking bit[0] */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANEC_CDR_DIRECT_TRIM_ADDR, X4_LANEC_CDR_DIRECT_TRIM_VAL); ++ ++ /* Disable X4 all DFE for 8G & 10G */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANEC_DFE_DIS_8G10G_ADDR, X4_LANEC_DFE_DIS_8G10G_VAL); ++ ++ /* Set X4 RX EQ swing & EQ peaking bit[1] */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANEC_EQ_SWING_ADDR, X4_LANEC_EQ_SWING_VAL); ++ ++ /* Set X4 TX PLL charge pump current to be 1.33uA */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANEC_TXPLL_TRIM_ADDR, X4_LANEC_TXPLL_TRIM_VAL); ++ ++ /* Set X4 reference PLL to be 100MHz */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANEC_REF_CLK_100N250_ADDR, X4_LANEC_REF_CLK_100N250_VAL); ++ ++ /* Set X4 EQ initial value to be 0x2a */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANEC_EQ_INIT_MANUAL_SET1_ADDR, X4_LANEC_EQ_INIT_MANUAL_SET1_VAL); ++ udelay(1); /* delay 1 us */ ++ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANEC_EQ_INIT_PWON_CDR_MANUAL_SET1_ADDR, ++ X4_LANEC_EQ_INIT_PWON_CDR_MANUAL_SET1_VAL); ++ udelay(1); /* delay 1 us */ ++ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANEC_EQ_INIT_MANUAL_SET0_ADDR, X4_LANEC_EQ_INIT_MANUAL_SET0_VAL); ++ udelay(1); /* delay 1 us */ ++ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANEC_EQ_INIT_PWON_CDR_MANUAL_SET0_ADDR, ++ X4_LANEC_EQ_INIT_PWON_CDR_MANUAL_SET0_VAL); ++} ++ ++static void nano_phy_x4phy_laned_usb_eye(const struct ups_phy_priv *priv) ++{ ++ /* Disable X4 slew assist and SSC offset down 200ppm */ ++ combphy2_write(priv->phy_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANED_SLEW_ASSIST_DIS_AND_SSC_ADDR, ++ X4_LANED_SLEW_ASSIST_DIS_AND_SSC_VAL); ++ ++ /* Set X4 TX swing compensation to be level 10 */ ++ combphy2_write(priv->phy_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANED_TW_SWING_COMP_ADDR, X4_LANED_TW_SWING_COMP_VAL); ++ ++ /* Set X4 RX CDR direct trim & EQ peaking bit[0] */ ++ combphy2_write(priv->phy_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANED_CDR_DIRECT_TRIM_ADDR, X4_LANED_CDR_DIRECT_TRIM_VAL); ++ ++ /* Disable X4 all DFE for 8G & 10G */ ++ combphy2_write(priv->phy_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANED_DFE_DIS_8G10G_ADDR, X4_LANED_DFE_DIS_8G10G_VAL); ++ ++ /* Set X4 RX EQ swing & EQ peaking bit[1] */ ++ combphy2_write(priv->phy_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANED_EQ_SWING_ADDR, X4_LANED_EQ_SWING_VAL); ++ ++ /* Set X4 TX PLL charge pump current to be 1.33uA */ ++ combphy2_write(priv->phy_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANED_TXPLL_TRIM_ADDR, X4_LANED_TXPLL_TRIM_VAL); ++ ++ /* Set X4 reference PLL to be 100MHz */ ++ combphy2_write(priv->phy_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANED_REF_CLK_100N250_ADDR, X4_LANED_REF_CLK_100N250_VAL); ++ ++ /* Set inv_rxcdrclk to invert the polarity of CDR clock at input from ++ * analog to PCS, this is for X4 LaneD only */ ++ combphy2_write(priv->phy_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANED_INV_RXCDRCLK_ADDR, X4_LANED_INV_RXCDRCLK_VAL); ++ ++ /* Set X4 EQ initial value to be 0x2a */ ++ combphy2_write(priv->phy_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANED_EQ_INIT_MANUAL_SET1_ADDR, X4_LANED_EQ_INIT_MANUAL_SET1_VAL); ++ udelay(1); /* delay 1 us */ ++ ++ combphy2_write(priv->phy_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANED_EQ_INIT_PWON_CDR_MANUAL_SET1_ADDR, ++ X4_LANED_EQ_INIT_PWON_CDR_MANUAL_SET1_VAL); ++ udelay(1); /* delay 1 us */ ++ ++ combphy2_write(priv->phy_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANED_EQ_INIT_MANUAL_SET0_ADDR, ++ X4_LANED_EQ_INIT_MANUAL_SET0_VAL); ++ udelay(1); /* delay 1 us */ ++ ++ combphy2_write(priv->phy_base + PERI_COMBOPHY2_CTRL1, ++ X4_LANED_EQ_INIT_PWON_CDR_MANUAL_SET0_ADDR, ++ X4_LANED_EQ_INIT_PWON_CDR_MANUAL_SET0_VAL); ++} ++ ++static int nano_phy_usb_common_init(const struct ups_phy_priv *priv, ++ u32 reg, u32 u3_disable_bit) ++{ ++ int ret; ++ u32 val; ++ ++ val = phy_readl(priv->peri_base + reg); ++ val &= ~(u3_disable_bit); ++ if (priv->force_5g) ++ val |= PERI_USB3_PORT_FORCE_5G; ++ phy_writel(val, priv->peri_base + reg); ++ ++ ret = clk_prepare_enable(priv->phy_clk); ++ if (ret != 0) { ++ ups_phy_err("clk prepare and enable failed\n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static int nano_phy_combophy0_usb_power_on(struct phy *phy) ++{ ++ int ret; ++ u32 val; ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_COMBPHY0_CTRL0, ++ COMBPHY0_MODE_MASK, COMBPHY0_MODE_OFFSET); ++ ++ if (priv->mode == UPS_PHY_MODE_USB) { ++ ret = nano_phy_usb_common_init(priv, PERI_USB31_CTRL_CFG0, ++ PERI_USB3_PORT_DISABLE); ++ if (ret < 0) { ++ ups_phy_err("nano combophy0 init failed.\n"); ++ return -1; ++ } ++ ++ val = phy_readl(priv->peri_base + PERI_USB31_CTRL_CFG0); ++ /* both cs and cs ec need close ovrcur */ ++ val &= ~PERI_PORT_OVRCUR_EN; ++ if (!chip_type_d()) { ++ /* because cs chip X1 combphy bug, disable u3 */ ++ val |= PERI_USB3_PORT_DISABLE; ++ } ++ phy_writel(val, priv->peri_base + PERI_USB31_CTRL_CFG0); ++ ++ nano_phy_x1phy_usb_eye(priv, PERI_COMBOPHY0_CTRL1); ++ } ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_combophy0_usb_power_off(struct phy *phy) ++{ ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_COMBPHY0_CTRL0, ++ COMBPHY0_MODE_MASK, COMBPHY0_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_USB) { ++ clk_disable_unprepare(priv->phy_clk); ++ } ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_combophy1_usb_power_on(struct phy *phy) ++{ ++ int ret; ++ u32 val; ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ ret = nano_phy_usb_common_init(priv, PERI_USB31_CTRL_CFG1, ++ PERI_USB3_PORT_DISABLE); ++ if (ret < 0) { ++ ups_phy_err("nano combophy1 init failed.\n"); ++ return -1; ++ } ++ ++ val = phy_readl(priv->peri_base + PERI_USB31_CTRL_CFG1); ++ /* both cs and cs ec need close ovrcur */ ++ val &= ~PERI_PORT_OVRCUR_EN; ++ if (!chip_type_d()) { ++ /* because cs chip X1 combphy bug, disable u3 */ ++ val |= PERI_USB3_PORT_DISABLE; ++ } ++ phy_writel(val, priv->peri_base + PERI_USB31_CTRL_CFG1); ++ ++ nano_phy_x1phy_usb_eye(priv, PERI_COMBOPHY1_CTRL1); ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_combophy22_usb_power_on(struct phy *phy) ++{ ++ int ret; ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY22_SEL_MASK, COMBOPHY2_2_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_USB) { ++ ret = nano_phy_usb_common_init(priv, PERI_USB2_CTRL_CFG1, ++ PERI_USB3_PORT_DISABLE_CSU30); ++ if (ret < 0) { ++ ups_phy_err("nano combophy22 init failed.\n"); ++ return -1; ++ } ++ ++ nano_phy_x4phy_lanec_usb_eye(priv); ++ } ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_combophy22_usb_power_off(struct phy *phy) ++{ ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY22_SEL_MASK, COMBOPHY2_2_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_USB) { ++ clk_disable_unprepare(priv->phy_clk); ++ } ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_combophy23_usb_power_on(struct phy *phy) ++{ ++ int ret; ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY23_SEL_MASK, COMBOPHY2_3_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_USB) { ++ ret = nano_phy_usb_common_init(priv, PERI_USB31_CTRL_CFG2, ++ PERI_USB3_PORT_DISABLE); ++ if (ret < 0) { ++ ups_phy_err("nano combophy23 init failed.\n"); ++ return -1; ++ } ++ ++ nano_phy_x4phy_laned_usb_eye(priv); ++ } ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_combophy23_usb_power_off(struct phy *phy) ++{ ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY23_SEL_MASK, COMBOPHY2_3_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_USB) { ++ clk_disable_unprepare(priv->phy_clk); ++ } ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_x4phy_pcie_config(const struct ups_phy_priv *priv, u32 lane) ++{ ++ int ret; ++ u32 off = lane * 0x40; /* step is 0x40 for per lane */ ++ struct clk *clk = NULL; ++ ++ ups_phy_dbg("+++\n"); ++ ++ switch (lane) { ++ case COMBOPHY2_LANE0: ++ clk = priv->phy_clk; ++ break; ++ case COMBOPHY2_LANE1: ++ clk = priv->phy_clk1; ++ break; ++ case COMBOPHY2_LANE2: ++ clk = priv->phy_clk2; ++ break; ++ case COMBOPHY2_LANE3: ++ clk = priv->phy_clk3; ++ break; ++ default: ++ break; ++ } ++ ++ /* enable clk */ ++ ret = clk_prepare_enable(clk); ++ if (ret != 0) { ++ ups_phy_err("lane %d pcie clk enable failed\n", lane); ++ return -1; ++ } ++ ++ /* SLEW_ASSIST_DIS=1'b1 */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x04, 0x10); ++ ++ /* CDR_DIRECT_TRIM=2'b11, EQ_PEAKING=2'b00 */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x10, 0x39); ++ ++ /* DISABLE_TAP1_DFE */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x1b, 0x01); ++ ++ /* EQ_SWING=1'b1, EQ_PEAKING=2'b00 */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x12, 0x46); ++ ++ /* ls/fs/hs default */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x14, EQ_DEFAULT1); ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x15, EQ_DEFAULT1); ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x16, EQ_DEFAULT1); ++ ++ /* pwon_cdr_manual=1 */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x05, 0x10); ++ ++ udelay(20); /* delay 20 us */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x14, EQ_DEFAULT2); ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x15, EQ_DEFAULT2); ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x16, EQ_DEFAULT2); ++ ++ /* pwon_cdr_manual=0 */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x05, 0x00); ++ ++ /* open spread spectrum clocking */ ++ if (priv->ssc) ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, off + 0x04, 0x18); ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_lane0_pcie_init(struct ups_phy_priv *priv) ++{ ++ u32 val; ++ ++ /* phy lane0 RX_TERMINATION=1'b1 */ ++ val = phy_readl(priv->peri_base + PERI_COMBPHY2_CFG0); ++ val |= PERI_COMBPHY2_0_RX_TERMINATION; ++ val |= PERI_COMBPHY2_0_CLK_REQ; ++ phy_writel(val, priv->peri_base + PERI_COMBPHY2_CFG0); ++ ++ return nano_phy_x4phy_pcie_config(priv, COMBOPHY2_LANE0); ++} ++ ++static int nano_phy_lane1_pcie_init(struct ups_phy_priv *priv) ++{ ++ u32 val; ++ ++ /* phy lane2 RX_TERMINATION=1'b1 */ ++ val = phy_readl(priv->peri_base + PERI_COMBPHY2_CFG1); ++ val |= PERI_COMBPHY2_1_RX_TERMINATION; ++ val |= PERI_COMBPHY2_1_CLK_REQ; ++ phy_writel(val, priv->peri_base + PERI_COMBPHY2_CFG1); ++ ++ return nano_phy_x4phy_pcie_config(priv, COMBOPHY2_LANE1); ++} ++ ++static int nano_phy_lane2_pcie_init(struct ups_phy_priv *priv) ++{ ++ u32 val; ++ ++ /* phy lane2 RX_TERMINATION=1'b1 */ ++ val = phy_readl(priv->peri_base + PERI_COMBPHY2_CFG2); ++ val |= PERI_COMBPHY2_2_RX_TERMINATION; ++ val |= PERI_COMBPHY2_2_CLK_REQ; ++ phy_writel(val, priv->peri_base + PERI_COMBPHY2_CFG2); ++ ++ return nano_phy_x4phy_pcie_config(priv, COMBOPHY2_LANE2); ++} ++ ++static int nano_phy_lane3_pcie_init(struct ups_phy_priv *priv) ++{ ++ u32 val; ++ ++ /* phy lane3 RX_TERMINATION=1'b1 */ ++ val = phy_readl(priv->peri_base + PERI_COMBPHY2_CFG3); ++ val |= PERI_COMBPHY2_3_RX_TERMINATION; ++ val |= PERI_COMBPHY2_3_CLK_REQ; ++ phy_writel(val, priv->peri_base + PERI_COMBPHY2_CFG3); ++ ++ return nano_phy_x4phy_pcie_config(priv, COMBOPHY2_LANE3); ++} ++ ++static int nano_phy_pcie0_power_on(struct phy *phy) ++{ ++ int ret; ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, COMBPHY20_SEL_MASK, ++ COMBOPHY2_0_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ ret = nano_phy_lane0_pcie_init(priv); ++ if (ret != 0) ++ return ret; ++ } ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY21_SEL_MASK, COMBOPHY2_1_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ ret = nano_phy_lane1_pcie_init(priv); ++ if (ret != 0) ++ return ret; ++ } ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY22_SEL_MASK, COMBOPHY2_2_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ ret = nano_phy_lane2_pcie_init(priv); ++ if (ret != 0) ++ return ret; ++ } else { ++ /* if lane2 is not pcie, then lane3 is not belonging to pcie0 */ ++ return 0; ++ } ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY23_SEL_MASK, COMBOPHY2_3_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ ret = nano_phy_lane3_pcie_init(priv); ++ if (ret != 0) ++ return ret; ++ } ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_pcie0_power_off(struct phy *phy) ++{ ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY20_SEL_MASK, COMBOPHY2_0_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ clk_disable_unprepare(priv->phy_clk); ++ } ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY21_SEL_MASK, COMBOPHY2_1_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ clk_disable_unprepare(priv->phy_clk1); ++ } ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY22_SEL_MASK, COMBOPHY2_2_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ clk_disable_unprepare(priv->phy_clk2); ++ } else { ++ ups_phy_dbg("---\n"); ++ /* if lane2 is not pcie, then lane3 is not belonging to pcie0 */ ++ return 0; ++ } ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY23_SEL_MASK, COMBOPHY2_3_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ clk_disable_unprepare(priv->phy_clk3); ++ } ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_combophy0_pcie_config(const struct ups_phy_priv *priv) ++{ ++ int ret; ++ ++ /* enable clk */ ++ ret = clk_prepare_enable(priv->phy_clk); ++ if (ret != 0) { ++ ups_phy_err("combophy0 pcie clk enable failed\n"); ++ return -1; ++ } ++ ++ /* SLEW_ASSIST_DIS=1'b1 */ ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x04, 0x10); ++ ++ /* CDR_DIRECT_TRIM=2'b11, EQ_PEAKING=2'b00 */ ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x10, 0x39); ++ ++ /* DISABLE_TAP1_DFE */ ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x1b, 0x01); ++ ++ /* EQ_SWING=1'b1, EQ_PEAKING=2'b00 */ ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x12, 0x46); ++ ++ /* ls/fs/hs default */ ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x14, EQ_DEFAULT1); ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x15, EQ_DEFAULT1); ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x16, EQ_DEFAULT1); ++ ++ /* pwon_cdr_manual=1 */ ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x05, 0x10); ++ ++ udelay(20); /* delay 20 us */ ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x14, EQ_DEFAULT2); ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x15, EQ_DEFAULT2); ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x16, EQ_DEFAULT2); ++ ++ /* pwon_cdr_manual=0 */ ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x05, 0x00); ++ ++ /* open spread spectrum clocking */ ++ if (priv->ssc) ++ combphy_write(priv->peri_base + PERI_COMBOPHY0_CTRL1, 0x04, 0x08); ++ ++ return 0; ++} ++ ++static int nano_phy_pcie1_power_on(struct phy *phy) ++{ ++ u32 val; ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_COMBPHY0_CTRL0, ++ COMBPHY0_MODE_MASK, COMBPHY0_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ /* combphy0 RX_TERMINATION=1'b1 */ ++ val = phy_readl(priv->peri_base + PERI_COMBPHY0_CTRL0); ++ val |= PERI_COMBPHY0_RX_TERMINATION; ++ val |= PERI_COMBPHY0_WIDTH; ++ val |= PERI_COMBPHY0_CLK_REQ; ++ phy_writel(val, priv->peri_base + PERI_COMBPHY0_CTRL0); ++ ++ return nano_phy_combophy0_pcie_config(priv); ++ } ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY22_SEL_MASK, COMBOPHY2_2_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ ups_phy_info("combophy2 is x4 mode, don't init lane3 alone\n"); ++ return 0; ++ } ++ ++ /* if combphy0 is not pcie mode, try combophy2_3 */ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY23_SEL_MASK, COMBOPHY2_3_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ val = phy_readl(priv->peri_base + PERI_CTRL_1); ++ val |= PCIEX1CTRL_SEL; ++ phy_writel(val, priv->peri_base + PERI_CTRL_1); ++ ++ /* phy lane3 RX_TERMINATION=1'b1 */ ++ val = phy_readl(priv->peri_base + PERI_COMBPHY2_CFG3); ++ val |= PERI_COMBPHY2_3_RX_TERMINATION; ++ val |= PERI_COMBPHY2_3_CLK_REQ; ++ phy_writel(val, priv->peri_base + PERI_COMBPHY2_CFG3); ++ ++ return nano_phy_x4phy_pcie_config(priv, COMBOPHY2_LANE3); ++ } ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_pcie1_power_off(struct phy *phy) ++{ ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_COMBPHY0_CTRL0, ++ COMBPHY0_MODE_MASK, COMBPHY0_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ clk_disable_unprepare(priv->phy_clk); ++ return 0; ++ } ++ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY22_SEL_MASK, COMBOPHY2_2_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ ups_phy_info("combophy2 is x4 mode, don't deinit lane3 alone\n"); ++ return 0; ++ } ++ ++ /* if combphy0 is not pcie mode, try combophy2_3 */ ++ nano_phy_get_mode(priv, priv->peri_base + PERI_CTRL_1, ++ COMBPHY23_SEL_MASK, COMBOPHY2_3_MODE_OFFSET); ++ if (priv->mode == UPS_PHY_MODE_PCIE) { ++ clk_disable_unprepare(priv->phy_clk3); ++ } ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_power_on(struct phy *phy) ++{ ++ int ret; ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ ret = clk_prepare_enable(priv->phy_clk); ++ if (ret != 0) { ++ ups_phy_err("nano phy clk prepare and enable failed\n"); ++ return -1; ++ } ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_power_off(struct phy *phy) ++{ ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ clk_disable_unprepare(priv->phy_clk); ++ ++ return 0; ++} ++ ++static unsigned int nano_phy_get_sata_port_map( ++ struct ups_phy_priv *priv) ++{ ++ u32 val; ++ u32 port_map = 0x0; ++ ++ val = phy_readl(priv->peri_base + PERI_CTRL_1); ++ if (((val & COMBPHY20_SEL_MASK) >> COMBOPHY2_0_MODE_OFFSET) == COMBOPHY_MODE_SATA) ++ port_map |= SATA_PORT0_MAP; ++ if (((val & COMBPHY21_SEL_MASK) >> COMBOPHY2_1_MODE_OFFSET) == COMBOPHY_MODE_SATA) ++ port_map |= SATA_PORT1_MAP; ++ if (((val & COMBPHY22_SEL_MASK) >> COMBOPHY2_2_MODE_OFFSET) == COMBOPHY_MODE_SATA) ++ port_map |= SATA_PORT2_MAP; ++ if (((val & COMBPHY23_SEL_MASK) >> COMBOPHY2_3_MODE_OFFSET) == COMBOPHY_MODE_SATA) ++ port_map |= SATA_PORT3_MAP; ++ ++ ups_phy_dbg("sata port map = 0x%x", port_map); ++ ++ return port_map; ++} ++ ++static void nano_phy_sata_ssc_config(struct ups_phy_priv *priv, ++ int port_num) ++{ ++ if (priv->ssc) { ++ /* set bit3 to 0 to enable sata ssc, ssc_reduce_swing set to 1, ++ * ans set ssc offset to 0x3 */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ port_num * NANO_PHY_PORT_LEN + SSC_ADDR_OFFSET, 0x7); ++ } else { ++ /* set bit3 to 1 to disable sata ssc */ ++ combphy2_write(priv->peri_base + PERI_COMBOPHY2_CTRL1, ++ port_num * NANO_PHY_PORT_LEN + SSC_ADDR_OFFSET, 0x8); ++ } ++} ++ ++static void nano_phy_sata_config(struct ups_phy_priv *priv) ++{ ++ if (priv->port_map & SATA_PORT0_MAP) ++ nano_phy_sata_ssc_config(priv, 0x0); ++ ++ if (priv->port_map & SATA_PORT1_MAP) ++ nano_phy_sata_ssc_config(priv, 0x1); ++ ++ if (priv->port_map & SATA_PORT2_MAP) ++ nano_phy_sata_ssc_config(priv, 0x2); ++ ++ if (priv->port_map & SATA_PORT3_MAP) ++ nano_phy_sata_ssc_config(priv, 0x3); ++} ++ ++static int nano_phy_sata_power_on(struct phy *phy) ++{ ++ int ret; ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ ret = clk_prepare_enable(priv->phy_clk); ++ if (ret != 0) { ++ ups_phy_err("nano phy clk prepare failed\n"); ++ return -1; ++ } ++ ++ nano_phy_sata_config(priv); ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_sata_power_off(struct phy *phy) ++{ ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ clk_disable_unprepare(priv->phy_clk); ++ ++ return 0; ++} ++ ++static void nano_phy_x4phy_dump(ups_phy_dump_t dump, ++ struct ups_phy_priv *priv) ++{ ++ dump(priv, priv->peri_base, 0, 256); /* 4lane, read 256 byte */ ++} ++ ++static void nano_phy_combophy0_dump(ups_phy_dump_t dump, ++ struct ups_phy_priv *priv) ++{ ++ dump(priv, priv->peri_base, 0, 64); /* 1lane, read 64 byte */ ++} ++ ++static struct ups_phy_ops g_nano_phy_common_ops = { ++ .phy_ops.power_on = nano_phy_power_on, ++ .phy_ops.power_off = nano_phy_power_off, ++}; ++ ++static struct ups_phy_ops g_nano_phy_combophy0_ops = { ++ .phy_ops.power_on = nano_phy_combophy0_usb_power_on, ++ .phy_ops.power_off = nano_phy_combophy0_usb_power_off, ++}; ++ ++static struct ups_phy_ops g_nano_phy_combophy1_ops = { ++ .phy_ops.power_on = nano_phy_combophy1_usb_power_on, ++ .phy_ops.power_off = nano_phy_power_off, ++}; ++ ++static struct ups_phy_ops g_nano_phy_combophy22_ops = { ++ .phy_ops.power_on = nano_phy_combophy22_usb_power_on, ++ .phy_ops.power_off = nano_phy_combophy22_usb_power_off, ++}; ++ ++static struct ups_phy_ops g_nano_phy_combophy23_ops = { ++ .phy_ops.power_on = nano_phy_combophy23_usb_power_on, ++ .phy_ops.power_off = nano_phy_combophy23_usb_power_off, ++}; ++ ++static struct ups_phy_ops g_nano_phy_pcie0_ops = { ++ .phy_ops.power_on = nano_phy_pcie0_power_on, ++ .phy_ops.power_off = nano_phy_pcie0_power_off, ++ .ups_phy_read = combphy2_read, ++ .ups_phy_write = combphy2_write, ++ .ups_phy_dump = nano_phy_x4phy_dump, ++}; ++ ++static struct ups_phy_ops g_nano_phy_pcie1_ops = { ++ .phy_ops.power_on = nano_phy_pcie1_power_on, ++ .phy_ops.power_off = nano_phy_pcie1_power_off, ++ .ups_phy_read = combphy_read, ++ .ups_phy_write = combphy_write, ++ .ups_phy_dump = nano_phy_combophy0_dump, ++}; ++ ++static struct ups_phy_ops g_nano_phy_sata_ops = { ++ .phy_ops.power_on = nano_phy_sata_power_on, ++ .phy_ops.power_off = nano_phy_sata_power_off, ++ .ups_phy_get_port_map = nano_phy_get_sata_port_map, ++ .ups_phy_read = combphy2_read, ++ .ups_phy_write = combphy2_write, ++ .ups_phy_dump = nano_phy_x4phy_dump, ++}; ++ ++static struct of_device_id g_ups_nano_phy_of_match[] = { ++ { ++ .compatible = "combophy,common", ++ .data = &g_nano_phy_common_ops ++ }, ++ { ++ .compatible = "combophy,combophy0", ++ .data = &g_nano_phy_combophy0_ops ++ }, ++ { ++ .compatible = "combophy,combophy1", ++ .data = &g_nano_phy_combophy1_ops ++ }, ++ { ++ .compatible = "combophy,combophy2_2", ++ .data = &g_nano_phy_combophy22_ops ++ }, ++ { ++ .compatible = "combophy,combophy2_3", ++ .data = &g_nano_phy_combophy23_ops ++ }, ++ { ++ .compatible = "combophy,pcie0_phy", ++ .data = &g_nano_phy_pcie0_ops ++ }, ++ { ++ .compatible = "combophy,pcie1_phy", ++ .data = &g_nano_phy_pcie1_ops ++ }, ++ { ++ .compatible = "combophy,sata_phy", ++ .data = &g_nano_phy_sata_ops ++ }, ++ {}, ++}; ++ ++struct of_device_id* __attribute__((weak)) ups_nano_phy_get_of_device_id(int *num) ++{ ++ *num = (sizeof(g_ups_nano_phy_of_match) / sizeof(struct of_device_id)) - 1; ++ ups_phy_info("nano phy num = %d\n", *num); ++ return of_match_ptr(g_ups_nano_phy_of_match); ++} ++ +diff --git a/drivers/vendor/usb_phy/phy.c b/drivers/vendor/usb_phy/phy.c +new file mode 100644 +index 000000000..ee636c914 +--- /dev/null ++++ b/drivers/vendor/usb_phy/phy.c +@@ -0,0 +1,291 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (c) CompanyNameMagicTag 2022-2023. All rights reserved. ++ * Description: ups phy module ++ * Author: General IP Group ++ * Create: 2022-09-01 ++ */ ++ ++#define DRVNAME "ups-phy" ++#define pr_fmt(fmt) DRVNAME ": " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#ifdef CONFIG_PROC_FS ++#include ++#include ++#endif ++#include ++#include ++ ++#include "phy.h" ++#include "proc.h" ++ ++static struct phy *ups_phy_xlate(struct device *dev, ++ struct of_phandle_args *args) ++{ ++ struct phy *phy = dev_get_drvdata(dev); ++ ++ if (IS_ERR_OR_NULL(phy)) { ++ ups_phy_err("phy address is error or null\n"); ++ return ERR_PTR(-ENODEV); ++ } ++ ++ return phy; ++} ++ ++static void set_dev_args(struct ups_phy_priv *priv, struct device *dev) ++{ ++ const struct of_device_id *match = NULL; ++ ++ match = of_match_device(dev->driver->of_match_table, dev); ++ if (match != NULL) { ++ priv->ups_phy_ops = (struct ups_phy_ops*)(match->data); ++ } ++} ++ ++static void ups_phy_get_clk(struct device *dev, struct ups_phy_priv *priv) ++{ ++ priv->phy_clk1 = devm_clk_get(dev, "phy-clk1"); ++ if (IS_ERR(priv->phy_clk1)) { ++ ups_phy_dbg("no phy_clk1.\n"); ++ } ++ ++ priv->phy_clk2 = devm_clk_get(dev, "phy-clk2"); ++ if (IS_ERR(priv->phy_clk2)) { ++ ups_phy_dbg("no phy_clk2.\n"); ++ } ++ ++ priv->phy_clk3 = devm_clk_get(dev, "phy-clk3"); ++ if (IS_ERR(priv->phy_clk3)) { ++ ups_phy_dbg("no phy_clk3.\n"); ++ } ++} ++ ++static int ups_phy_set_priv(struct platform_device *pdev, ++ struct ups_phy_priv *priv) ++{ ++ int ret; ++ struct device *dev = &pdev->dev; ++ struct device_node *np = dev->of_node; ++ ++ priv->phy_clk = devm_clk_get(dev, "phy-clk"); ++ if (IS_ERR(priv->phy_clk)) { ++ ups_phy_err("get phy_clk failed.\n"); ++ return PTR_ERR(priv->phy_clk); ++ } ++ ++ ups_phy_get_clk(dev, priv); ++ ++ priv->phy_base = of_iomap(np, PHY_BASE_NODE_IDX); ++ if (IS_ERR_OR_NULL(priv->phy_base)) { ++ ups_phy_err("phy_base ioremap failed.\n"); ++ goto fail; ++ } ++ ++ priv->peri_base = of_iomap(np, PERI_BASE_NODE_IDX); ++ if (IS_ERR_OR_NULL(priv->peri_base)) { ++ ups_phy_err("peri_base ioremap failed.\n"); ++ goto fail; ++ } ++ ++ priv->u2phy_trim_otp = of_iomap(np, TRIM_OTP_NODE_IDX); ++ if (IS_ERR_OR_NULL(priv->u2phy_trim_otp)) ++ ups_phy_dbg("don't get u32phy trim otp.\n"); ++ ++ ret = of_property_read_s32(np, "otp-phy-trim-bitshift", &priv->otp_phy_trim_bitshift); ++ if (ret != 0) { ++ priv->otp_phy_trim_bitshift = 0; ++ } ++ ++ ret = of_property_read_variable_u32_array(np, U2PHY_TRIM_NAME, ++ priv->u2phy_trim, 0, TRIM_NUM_MAX); ++ if (ret != 0) ++ ups_phy_dbg("don't get u2phy trim\n"); ++ ++ priv->name = of_get_property(dev->of_node, "phy-name", NULL); ++ if (priv->name == NULL) ++ priv->name = "none"; ++ ++ priv->force_5g = of_property_read_bool(np, "force-5G"); ++ ++ priv->ssc = of_property_read_bool(dev->of_node, "spread-spectrum-clocking"); ++ ++ set_dev_args(priv, dev); ++ ++ return 0; ++ ++fail: ++ if (priv->phy_base != NULL) { ++ iounmap(priv->phy_base); ++ priv->phy_base = NULL; ++ } ++ ++ return -1; ++} ++ ++static void ups_get_port_map(struct phy *phy, struct ups_phy_priv *priv) ++{ ++ /* save the port map to phy.attrs.bus_width, so that the ++ * controller can get it, such as wing ahci */ ++ if (priv->ups_phy_ops->ups_phy_get_port_map == NULL) ++ return; ++ ++ priv->port_map = priv->ups_phy_ops->ups_phy_get_port_map(priv); ++ phy_set_bus_width(phy, priv->port_map); ++} ++ ++static int ups_phy_probe(struct platform_device *pdev) ++{ ++ int ret = -1; ++ struct phy *phy; ++ struct phy_provider *phy_provider; ++ struct ups_phy_priv *priv; ++ ++ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); ++ if (priv == NULL) { ++ ups_phy_err("failed dev_kzalloc\n"); ++ return -ENOMEM; ++ } ++ ++ ret = ups_phy_set_priv(pdev, priv); ++ if (ret != 0) ++ goto fail; ++ ++ phy = devm_phy_create(&pdev->dev, NULL, &priv->ups_phy_ops->phy_ops); ++ if (IS_ERR(phy)) { ++ ups_phy_err("failed to create PHY, %ld\n", PTR_ERR(phy)); ++ goto fail; ++ } ++ ++ ups_get_port_map(phy, priv); ++ ++ phy_set_drvdata(phy, priv); ++ dev_set_drvdata(&pdev->dev, phy); ++ ++ phy_provider = devm_of_phy_provider_register(&pdev->dev, ups_phy_xlate); ++ if (IS_ERR(phy_provider)) { ++ ups_phy_err("failed to register phy provider, %ld\n", ++ PTR_ERR(phy_provider)); ++ goto fail; ++ } ++ ++ ups_phy_proc_init(priv); ++ ++ return 0; ++fail: ++ if (priv->phy_base != NULL) { ++ iounmap(priv->phy_base); ++ priv->phy_base = NULL; ++ } ++ ++ if (priv->peri_base != NULL) { ++ iounmap(priv->peri_base); ++ priv->peri_base = NULL; ++ } ++ ++ if (priv->u2phy_trim_otp != NULL) { ++ iounmap(priv->u2phy_trim_otp); ++ priv->u2phy_trim_otp = NULL; ++ } ++ ++ return -1; ++} ++ ++static int __exit ups_phy_platform_remove(struct platform_device *pdev) ++{ ++ struct ups_phy_priv *priv = platform_get_drvdata(pdev); ++ ++ if (priv->phy_base != NULL) { ++ iounmap(priv->phy_base); ++ priv->phy_base = NULL; ++ } ++ ++ if (priv->peri_base != NULL) { ++ iounmap(priv->peri_base); ++ priv->peri_base = NULL; ++ } ++ ++ if (priv->u2phy_trim_otp != NULL) { ++ iounmap(priv->u2phy_trim_otp); ++ priv->u2phy_trim_otp = NULL; ++ } ++ ++ ups_phy_proc_deinit(); ++ ++ return 0; ++} ++ ++#define UPS_PHY_MAX_NUM 30 ++ ++static struct platform_driver g_ups_phy_driver = { ++ .probe = ups_phy_probe, ++ .remove = __exit_p(ups_phy_platform_remove), ++ .driver = { ++ .name = DRVNAME, ++ .owner = THIS_MODULE, ++ } ++}; ++ ++static struct of_device_id g_ups_phy_of_match[UPS_PHY_MAX_NUM] = { 0 }; ++ ++static void ups_phy_set_match_table(void) ++{ ++ int i; ++ int cur = 0; ++ int total_num = 0; ++ int phy_num; ++ struct of_device_id *id = NULL; ++ ++#ifdef CONFIG_WING_UPS_XVP_PHY ++ id = ups_xvp_get_of_device_id(&phy_num); ++ for (i = 0; i < phy_num && cur < UPS_PHY_MAX_NUM; i++) { ++ g_ups_phy_of_match[cur++] = id[i]; ++ } ++ total_num += phy_num; ++#endif ++ ++#ifdef CONFIG_WING_UPS_NANO_PHY ++ id = ups_nano_phy_get_of_device_id(&phy_num); ++ for (i = 0; i < phy_num && cur < UPS_PHY_MAX_NUM; i++) { ++ g_ups_phy_of_match[cur++] = id[i]; ++ } ++ total_num += phy_num; ++#endif ++ ++#ifdef CONFIG_WING_UPS_MISSILE_PHY ++ id = ups_missile_phy_get_of_device_id(&phy_num); ++ for (i = 0; i < phy_num && cur < UPS_PHY_MAX_NUM; i++) { ++ g_ups_phy_of_match[cur++] = id[i]; ++ } ++ total_num += phy_num; ++#endif ++ ++ if (total_num >= UPS_PHY_MAX_NUM) ++ ups_phy_err("total phy count too big, check it\n"); ++ ++ g_ups_phy_driver.driver.of_match_table = g_ups_phy_of_match; ++} ++ ++static int __init ups_phy_module_init(void) ++{ ++ ups_phy_info("registered new ups phy driver\n"); ++ ++ ups_phy_set_match_table(); ++ ++ return platform_driver_register(&g_ups_phy_driver); ++} ++subsys_initcall(ups_phy_module_init); ++ ++static void __exit ups_phy_module_exit(void) ++{ ++ platform_driver_unregister(&g_ups_phy_driver); ++} ++module_exit(ups_phy_module_exit); ++ ++MODULE_AUTHOR("Wing"); ++MODULE_DESCRIPTION("Wing UPS PHY driver"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/vendor/usb_phy/phy.h b/drivers/vendor/usb_phy/phy.h +new file mode 100644 +index 000000000..3b2a67c61 +--- /dev/null ++++ b/drivers/vendor/usb_phy/phy.h +@@ -0,0 +1,134 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (c) CompanyNameMagicTag 2022-2023. All rights reserved. ++ * Description: ups phy driver ++ * Author: General IP Group ++ * Create: 2022-09-01 ++ */ ++ ++#ifndef UPS_PHY_H ++#define UPS_PHY_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++enum { ++ U2PHY_ANA_CFG0 = 0x0, ++ U2PHY_ANA_CFG2 = 0x1, ++ U2PHY_ANA_CFG4 = 0x2, ++ TRIM_NUM_MAX ++}; ++ ++enum ups_phy_mode { ++ UPS_PHY_MODE_USB = 0x0, ++ UPS_PHY_MODE_PCIE = 0x1, ++ UPS_PHY_MODE_SATA = 0x2, ++ UPS_PHY_MODE_UNKNOW = 0x3 ++}; ++ ++#define PHY_BASE_NODE_IDX 0 ++#define PERI_BASE_NODE_IDX 1 ++#define TRIM_OTP_NODE_IDX 2 ++ ++#define U2PHY_TRIM_NAME "u2phy-trim" ++ ++#define SATA_PORT0_MAP BIT(0) ++#define SATA_PORT1_MAP BIT(1) ++#define SATA_PORT2_MAP BIT(2) ++#define SATA_PORT3_MAP BIT(3) ++ ++struct ups_phy_priv; ++ ++typedef void (*ups_phy_dump_t)(struct ups_phy_priv *priv, ++ void __iomem *addr, u32 off, u32 len); ++ ++struct ups_phy_ops { ++ struct phy_ops phy_ops; ++ ++ /* some phys have multiple phys ports, and ++ * the number of ports needs to be determined */ ++ unsigned int (*ups_phy_get_port_map)(struct ups_phy_priv *priv); ++ unsigned int (*ups_phy_read)(void __iomem *reg, u32 addr); ++ void (*ups_phy_write)(void __iomem *reg, u32 addr, u32 value); ++ void (*ups_phy_dump) (ups_phy_dump_t dump, struct ups_phy_priv *priv); ++}; ++ ++struct ups_phy_priv { ++ void __iomem *phy_base; ++ void __iomem *peri_base; ++ ++ struct clk *phy_clk; ++ /* clk1 to clk3 just for pcie */ ++ struct clk *phy_clk1; ++ struct clk *phy_clk2; ++ struct clk *phy_clk3; ++ ++ struct ups_phy_ops *ups_phy_ops; ++ ++ enum ups_phy_mode mode; ++ ++ const char *name; ++ ++ /* usb parameter */ ++ void __iomem *u2phy_trim_otp; ++ int otp_phy_trim_bitshift; ++ unsigned int u2phy_trim[TRIM_NUM_MAX]; ++ bool force_5g; ++ ++ /* pcie parameter */ ++ bool extern_clk; ++ bool ssc; ++ ++ /* sata parameter */ ++ u32 port_map; ++}; ++ ++#define UPS_PHY_DEBUG 0 ++ ++#define ups_phy_dbg(format, arg...) \ ++ do { \ ++ if (UPS_PHY_DEBUG != 0) \ ++ printk(KERN_INFO "[UPS-PHY][%s]"format, __func__, ##arg); \ ++ } while (0) ++ ++#define ups_phy_info(format, arg...) \ ++ printk(KERN_INFO "[UPS-PHY][%s]"format, __func__, ##arg) ++ ++#define ups_phy_err(format, arg...) \ ++ printk(KERN_ERR "[UPS-PHY][%s]"format, __func__, ##arg) ++ ++static inline unsigned int phy_readl(const void __iomem *addr) ++{ ++ unsigned int reg = readl(addr); ++ ++ ups_phy_dbg("readl(0x%lx) = %#08X\n", (uintptr_t)addr, reg); ++ return reg; ++} ++ ++static inline void phy_writel(unsigned int v, void __iomem *addr) ++{ ++ writel(v, addr); ++ ups_phy_dbg("writel(0x%lx) = %#08X\n", (uintptr_t)addr, v); ++} ++ ++void combphy_write(void __iomem *reg, u32 addr, u32 value); ++ ++unsigned int combphy_read(void __iomem *addr, u32 offset); ++ ++void combphy2_write(void __iomem *reg, u32 addr, u32 value); ++ ++unsigned int combphy2_read(void __iomem *addr, u32 offset); ++ ++struct of_device_id* ups_xvp_get_of_device_id(int *num); ++ ++struct of_device_id* ups_nano_phy_get_of_device_id(int *num); ++ ++struct of_device_id* ups_missile_phy_get_of_device_id(int *num); ++ ++#endif /* UPS_PHY_H */ +diff --git a/drivers/vendor/usb_phy/platform/ss626v100.c b/drivers/vendor/usb_phy/platform/ss626v100.c +new file mode 100644 +index 000000000..0e7de2855 +--- /dev/null ++++ b/drivers/vendor/usb_phy/platform/ss626v100.c +@@ -0,0 +1,201 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2024. All rights reserved. ++ * Description: nano phy driver ++ * Author: Common IP Group ++ * Create: 2024-06-08 ++ */ ++ ++#define DRVNAME "[ss626-nano-phy]" ++#define pr_fmt(fmt) DRVNAME ": " fmt ++ ++#include "../phy.h" ++ ++#define HP_UPS_PHY_CTRL3 0x88 ++#define HP_UPS_PHY_CTRL4 0x8C ++ ++#define UPS_PHY_TEST_O_MASK (0xffU << 24) ++#define UPS_PHY_TEST_O_OFF 24 ++#define UPS_PHY_TEST_I_MASK (0xffU << 16) ++#define UPS_PHY_TEST_I_OFF 16 ++#define UPS_PHY_TEST_ADDR_MASK (0xffU << 8) ++#define UPS_PHY_TEST_ADDR_OFF 8 ++#define UPS_PHY_TEST_WRITE BIT(0) ++ ++#define UPS_MODE_CASE0 0 ++#define UPS_MODE_CASE1 1 ++#define UPS_MODE_CASE2 2 ++#define UPS_MODE_CASE3 3 ++#define UPS_MODE_CASE4 4 ++#define UPS_MODE_CASE5 5 ++ ++#define SYS_STAT_REG 0x0018 ++#define UPS_MODE_MASK (BIT(16) | BIT(17) | BIT(18)) ++#define UPS_MODE_OFF 16 ++ ++static unsigned int upsphy_read(void __iomem *reg, u32 addr) ++{ ++ u32 val; ++ ++ val = phy_readl(reg); ++ val &= ~UPS_PHY_TEST_ADDR_MASK; ++ val &= ~UPS_PHY_TEST_I_MASK; ++ val |= (addr << UPS_PHY_TEST_ADDR_OFF); ++ phy_writel(val, reg); ++ ++ val = phy_readl(reg); ++ val &= UPS_PHY_TEST_O_MASK; ++ val = val >> UPS_PHY_TEST_O_OFF; ++ ++ return val; ++} ++ ++static void upsphy_write(void __iomem *reg, u32 addr, u32 value) ++{ ++ u32 val; ++ ++ val = phy_readl(reg); ++ val &= ~UPS_PHY_TEST_ADDR_MASK; ++ val &= ~UPS_PHY_TEST_I_MASK; ++ val |= (addr << UPS_PHY_TEST_ADDR_OFF); ++ val |= (value << UPS_PHY_TEST_I_OFF); ++ phy_writel(val, reg); ++ ++ val = phy_readl(reg); ++ val |= UPS_PHY_TEST_WRITE; ++ phy_writel(val, reg); ++ ups_phy_dbg("upsphy_write:addr(%#x),value(%#x)\n", addr, value); ++ val = phy_readl(reg); ++ val &= ~UPS_PHY_TEST_WRITE; ++ phy_writel(val, reg); ++} ++ ++static void nano_phy_sata_port_config(const struct ups_phy_priv *priv, ++ u32 reg_off, u32 addr_off) ++{ ++ /* SSC enable default, if priv->ssc is faulse, close it */ ++ if (!priv->ssc) ++ upsphy_write(priv->phy_base + reg_off, addr_off + 0x02, 0x8); ++ ++ /* 1.5G EQ=5'b00000c */ ++ upsphy_write(priv->phy_base + reg_off, addr_off + 0x0c, 0xc0); ++ ++ /* 3G EQ=5'b011000c */ ++ upsphy_write(priv->phy_base + reg_off, addr_off + 0x0d, 0xd8); ++ ++ /* headroom_inc TX_PLL and CDR = 1'b1c */ ++ upsphy_write(priv->phy_base + reg_off, addr_off + 0x16, 0x3); ++ ++ /* PM_TRIM EN_FREQUENCY_MONITOR = 1'b1c */ ++ upsphy_write(priv->phy_base + reg_off, addr_off + 0x14, 0x04); ++ ++ /* pwdn_6g_idle = 1'b1c */ ++ upsphy_write(priv->phy_base + reg_off, addr_off + 0x07, 0x04); ++ ++ /* TXPLL_TRIM_HBW = 3'b000 */ ++ upsphy_write(priv->phy_base + reg_off, addr_off + 0x10, 0x20); ++} ++ ++static unsigned int nano_phy_get_sata_port_map(struct ups_phy_priv *priv) ++{ ++ u32 val; ++ ++ val = phy_readl(priv->peri_base + SYS_STAT_REG); ++ val = (val & UPS_MODE_MASK) >> UPS_MODE_OFF; ++ switch (val) { ++ case UPS_MODE_CASE1: ++ return 0x1; /* 1 sata port */ ++ case UPS_MODE_CASE2: ++ case UPS_MODE_CASE3: ++ return 0x3; /* 2 sata ports */ ++ case UPS_MODE_CASE4: ++ return 0x7; /* 3 sata ports */ ++ case UPS_MODE_CASE5: ++ return 0xf; /* 4 sata ports */ ++ default: ++ break; ++ } ++ ++ return 0x0; ++} ++ ++static void nano_phy_sata_config(struct ups_phy_priv *priv) ++{ ++ if (priv->port_map & SATA_PORT0_MAP) /* phy1 and phy2 portb phy addr off is 0x80 */ ++ nano_phy_sata_port_config(priv, HP_UPS_PHY_CTRL3, 0x80); ++ ++ if (priv->port_map & SATA_PORT1_MAP) ++ nano_phy_sata_port_config(priv, HP_UPS_PHY_CTRL3, 0x0); ++ ++ if (priv->port_map & SATA_PORT2_MAP) ++ nano_phy_sata_port_config(priv, HP_UPS_PHY_CTRL4, 0x80); ++ ++ if (priv->port_map & SATA_PORT3_MAP) ++ nano_phy_sata_port_config(priv, HP_UPS_PHY_CTRL4, 0x0); ++} ++ ++static int nano_phy_sata_power_on(struct phy *phy) ++{ ++ int ret; ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++\n"); ++ ++ ret = clk_prepare_enable(priv->phy_clk); ++ if (ret != 0) { ++ ups_phy_err("nano phy clk prepare failed\n"); ++ return -1; ++ } ++ ++ nano_phy_sata_config(priv); ++ ++ ups_phy_dbg("---\n"); ++ ++ return 0; ++} ++ ++static int nano_phy_sata_power_off(struct phy *phy) ++{ ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ clk_disable_unprepare(priv->phy_clk); ++ ++ return 0; ++} ++ ++static void nano_phy_x4phy_dump(ups_phy_dump_t dump, ++ struct ups_phy_priv *priv) ++{ ++ if (priv->port_map & SATA_PORT0_MAP) /* phy1 and phy2 portb phy addr off is 0x80 */ ++ dump(priv, priv->phy_base + HP_UPS_PHY_CTRL3, 0x80, 0x20); ++ if (priv->port_map & SATA_PORT1_MAP) ++ dump(priv, priv->phy_base + HP_UPS_PHY_CTRL3, 0x0, 0x20); ++ if (priv->port_map & SATA_PORT2_MAP) ++ dump(priv, priv->phy_base + HP_UPS_PHY_CTRL4, 0x80, 0x20); ++ if (priv->port_map & SATA_PORT3_MAP) ++ dump(priv, priv->phy_base + HP_UPS_PHY_CTRL4, 0x0, 0x20); ++} ++ ++static struct ups_phy_ops g_nano_phy_sata_ops = { ++ .phy_ops.power_on = nano_phy_sata_power_on, ++ .phy_ops.power_off = nano_phy_sata_power_off, ++ .ups_phy_get_port_map = nano_phy_get_sata_port_map, ++ .ups_phy_read = upsphy_read, ++ .ups_phy_write = upsphy_write, ++ .ups_phy_dump = nano_phy_x4phy_dump, ++}; ++ ++static struct of_device_id g_ups_nano_phy_of_match[] = { ++ { ++ .compatible = "combophy,sata_phy", ++ .data = &g_nano_phy_sata_ops ++ }, ++ {}, ++}; ++ ++struct of_device_id* ups_nano_phy_get_of_device_id(int *num) ++{ ++ *num = (sizeof(g_ups_nano_phy_of_match) / sizeof(struct of_device_id)) - 1; ++ ups_phy_info("nano phy num = %d\n", *num); ++ ++ return of_match_ptr(g_ups_nano_phy_of_match); ++} +diff --git a/drivers/vendor/usb_phy/proc.c b/drivers/vendor/usb_phy/proc.c +new file mode 100644 +index 000000000..3cb7aeb45 +--- /dev/null ++++ b/drivers/vendor/usb_phy/proc.c +@@ -0,0 +1,138 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: ups phy proc ++ * Author: General IP Group ++ * Create: 2023-07-11 ++ */ ++ ++#define DRVNAME "[ups-phy-proc]" ++#define pr_fmt(fmt) DRVNAME ": " fmt ++ ++#include ++#include ++#include ++ ++#include "phy.h" ++ ++#ifdef CONFIG_PROC_FS ++ ++#define UPS_PHY_ENTRY_NAME "ups_phy" ++ ++static void ups_phy_proc_dump(struct ups_phy_priv *priv, ++ void __iomem *addr, u32 off, u32 len) ++{ ++ int i; ++ u32 val; ++ ++ if (priv->ups_phy_ops->ups_phy_read == NULL) { ++ pr_err("ups_phy_read is null\n"); ++ return; ++ } ++ ++ pr_cont("\n------------------combophy register start----------------\n\n"); ++ for (i = off; i < len + off; i++) { ++ val = priv->ups_phy_ops->ups_phy_read(addr, i); ++ if (i % 16 == 0) /* 16 bytes per line */ ++ pr_cont("%02x: ", i); ++ ++ pr_cont("%02x ", val); ++ ++ if (i % 16 == 15) /* 16 bytes per line, 15 is divisor */ ++ pr_cont("\n"); ++ } ++ pr_cont("\n------------------combophy register end------------------\n\n"); ++} ++ ++static int ups_phy_show(struct seq_file *m, void *v) ++{ ++ struct ups_phy_priv *priv = (struct ups_phy_priv *)(m->private); ++ struct ups_phy_ops *ops = priv->ups_phy_ops; ++ ++ if (ops != NULL && ops->ups_phy_dump != NULL) ++ ops->ups_phy_dump(ups_phy_proc_dump, priv); ++ ++ return 0; ++} ++ ++static int ups_phy_open(struct inode *inode, struct file *file) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) ++ struct ups_phy_priv *priv = pde_data(inode); ++#else ++ struct ups_phy_priv *priv = PDE_DATA(inode); ++#endif ++ ++ return single_open(file, ups_phy_show, priv); ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++static const struct proc_ops g_ups_phy_proc_fops = { ++ .proc_open = ups_phy_open, ++ .proc_read = seq_read, ++ .proc_lseek = seq_lseek, ++ .proc_release = single_release, ++}; ++#else ++static const struct file_operations g_ups_phy_proc_fops = { ++ .owner = THIS_MODULE, ++ .open = ups_phy_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++#endif ++ ++static struct proc_dir_entry *g_ups_phy_entry_dir = NULL; ++ ++static int ups_phy_create_proc_root_dir(void) ++{ ++ if (g_ups_phy_entry_dir != NULL) { ++ return 0; ++ } ++ ++ g_ups_phy_entry_dir = proc_mkdir(UPS_PHY_ENTRY_NAME, NULL); ++ if (g_ups_phy_entry_dir == NULL) { ++ pr_err("Create Proc directory %s failed\n", UPS_PHY_ENTRY_NAME); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static void ups_phy_create_proc(struct ups_phy_priv *priv) ++{ ++ struct proc_dir_entry *entry; ++ ++ entry = proc_mkdir_data(priv->name, S_IRUSR, g_ups_phy_entry_dir, priv); ++ if (entry == NULL) { ++ pr_err("Create proc directory %s failed\n", priv->name); ++ return; ++ } ++ ++ proc_create_data("phydump", S_IRUSR | S_IRGRP, entry, ++ &g_ups_phy_proc_fops, (void *)priv); ++} ++ ++void ups_phy_proc_init(struct ups_phy_priv *priv) ++{ ++ int ret; ++ ++ ret = ups_phy_create_proc_root_dir(); ++ if (ret < 0) ++ return; ++ ++ if (strcmp(priv->name, "none") != 0) { ++ ups_phy_create_proc(priv); ++ } ++} ++ ++void ups_phy_proc_deinit(void) ++{ ++ if (g_ups_phy_entry_dir != NULL) { ++ remove_proc_subtree(UPS_PHY_ENTRY_NAME, NULL); ++ g_ups_phy_entry_dir = NULL; ++ } ++} ++ ++#endif /* CONFIG_PROC_FS */ ++ +diff --git a/drivers/vendor/usb_phy/proc.h b/drivers/vendor/usb_phy/proc.h +new file mode 100644 +index 000000000..a6d757cc6 +--- /dev/null ++++ b/drivers/vendor/usb_phy/proc.h +@@ -0,0 +1,17 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2023. All rights reserved. ++ * Description: ups phy proc ++ * Author: General IP Group ++ * Create: 2023-07-11 ++ */ ++ ++#ifndef UPS_PHY_PROC_H ++#define UPS_PHY_PROC_H ++ ++#include "phy.h" ++ ++void ups_phy_proc_init(struct ups_phy_priv *priv); ++ ++void ups_phy_proc_deinit(void); ++ ++#endif /* UPS_PHY_PROC_H */ +diff --git a/drivers/vendor/usb_phy/reg_common.h b/drivers/vendor/usb_phy/reg_common.h +new file mode 100644 +index 000000000..2f8ad18c3 +--- /dev/null ++++ b/drivers/vendor/usb_phy/reg_common.h +@@ -0,0 +1,257 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: ups phy reg ++ * Author: AuthorNameMagicTag ++ * Create: 2022.09.01 ++ */ ++ ++#ifndef UPS_PHY_REG_H ++#define UPS_PHY_REG_H ++ ++#define UDELAY_LEVEL1 (10) ++#define UDELAY_LEVEL2 (20) ++#define UDELAY_LEVEL3 (50) ++#define UDELAY_LEVEL4 (100) ++#define UDELAY_LEVEL5 (1000) ++#define UDELAY_LEVEL6 (3000) ++ ++#define COMBOPHY_MODE_MASK 0x3 ++#define COMBOPHY_MODE_USB 0x1 ++#define COMBOPHY_MODE_PCIE 0x0 ++#define COMBOPHY_MODE_SATA 0x2 ++ ++#define PERI_COMBPHY0_CTRL0 0x480 ++#define COMBPHY0_MODE_OFFSET 0x8 ++#define COMBPHY0_MODE_MASK (BIT(8) | BIT(9)) ++ ++#define NANO_PHY_PORT_LEN 0x40 ++#define SSC_ADDR_OFFSET 0x04 ++ ++#define PERI_COMBPHY0_BYPASS_CODE BIT(31) ++#define PERI_COMBPHY0_LLSS_STATE BIT(30) ++#define PERI_COMBPHY0_RXEQEVAL BIT(29) ++#define PERI_COMBPHY0_PD (0xf<<24) ++#define PERI_COMBPHY0_RXPOLARITY BIT(23) ++#define PERI_COMBPHY0_COMPLIANCE BIT(22) ++#define PERI_COMBPHY0_SRIS_ENABLE BIT(21) ++#define PERI_COMBPHY0_TX_PATTERN (0x3 << 19) ++#define PERI_COMBPHY0_TXELECIDLE BIT(18) ++#define PERI_COMBPHY0_TXDETECTRX_LB BIT(17) ++#define PERI_COMBPHY0_SERDES_ARCH BIT(16) ++#define PERI_COMBPHY0_WIDTH BIT(15) ++#define PERI_COMBPHY0_TX_ONESZEROS BIT(14) ++#define PERI_COMBPHY0_RX_EQ_TRAINING BIT(13) ++#define PERI_COMBPHY0_RX_TERMINATION BIT(12) ++#define PERI_COMBPHY0_RX_STANDBY BIT(11) ++#define PERI_COMBPHY0_BUF_MODE BIT(10) ++#define PERI_COMBPHY0_PHY_MODE (0x3 << 8) ++#define PERI_COMBPHY0_BLKALIGNCTRL BIT(7) ++#define PERI_COMBPHY0_CLK_REQ BIT(6) ++#define PERI_COMBPHY0_RATE (0x3 << 4) ++#define PERI_COMBPHY0_BIST_MODE BIT(0) ++ ++#define PERI_COMBOPHY1_CTRL 0x490 ++#define COMBOPHY1_MODE_OFFSET 0x8 ++ ++#define PCIEX1CTRL_SEL BIT(24) ++#define COMBPHY23_SEL_MASK (BIT(22) | BIT(23)) ++#define COMBPHY22_SEL_MASK (BIT(20) | BIT(21)) ++#define COMBPHY21_SEL_MASK (BIT(18) | BIT(19)) ++#define COMBPHY20_SEL_MASK (BIT(16) | BIT(17)) ++ ++#define COMBOPHY2_0_MODE_OFFSET 16 ++#define COMBOPHY2_1_MODE_OFFSET 18 ++#define COMBOPHY2_2_MODE_OFFSET 20 ++#define COMBOPHY2_3_MODE_OFFSET 22 ++ ++#define COMBOPHY2_LANE0 0x0 ++#define COMBOPHY2_LANE1 0x1 ++#define COMBOPHY2_LANE2 0x2 ++#define COMBOPHY2_LANE3 0x3 ++ ++#define PERI_COMBOPHY1_CTRL1 0x494 ++#define PERI_COMBOPHY1_TEST_O (0xff << 24) ++#define PERI_COMBOPHY1_TEST_RST (0x01 << 17) ++#define PERI_COMBOPHY1_TEST_WRITE (0x01 << 16) ++#define PERI_COMBOPHY1_TEST_I (0xff << 8) ++#define PERI_COMBOPHY1_TEST_ADD 0xff ++ ++#define PERI_USB2_CTRL_CFG0 0x600 ++#define PERI_USB2_CTRL_CFG1 0x610 ++#define PERI_USB31_CTRL_CFG0 0x620 ++#define PERI_USB31_CTRL_CFG1 0x640 ++#define PERI_USB31_CTRL_CFG2 0x660 ++#define PERI_CTRL_CFG_DEFAULT_VAL 0x020002ff ++#define PERI_PORT_PWR_CTL_EN (0x01 << 4) ++#define PERI_PORT_PWREN_POL (0x01 << 6) ++#define PERI_PORT_OVRCUR_EN (0x01 << 5) ++#define PERI_PORT_OVRCUR_POL (0x01 << 7) ++#define PERI_USB3_PORT_DISABLE (0x01 << 9) ++#define PERI_USB3_PORT_FORCE_5G (0x01 << 10) ++#define PERI_USB3_PORT_DISABLE_CSU30 (0x01 << 12) ++ ++/* phy eye related */ ++#define PERI_COMBOPHY0_CTRL1 0x484 ++#define PERI_COMBOPHY1_CTRL1 0x494 ++ ++#define EQ_DEFAULT1 0x70 ++#define EQ_DEFAULT2 0x30 ++ ++#define PERI_COMBPHY2_0_BYPASS_CODEC BIT(31) ++#define PERI_COMBPHY2_0_BUF_MODE BIT(28) ++#define PERI_COMBPHY2_0_RXPOLARITY BIT(23) ++#define PERI_COMBPHY2_0_SRIS_ENABLE BIT(20) ++#define PERI_COMBPHY2_0_SERDES_ARCH BIT(16) ++#define PERI_COMBPHY2_0_WIDTH BIT(15) ++#define PERI_COMBPHY2_0_TX_ONESZEROS BIT(14) ++#define PERI_COMBPHY2_0_RX_EQ_TRAINING BIT(13) ++#define PERI_COMBPHY2_0_RX_TERMINATION BIT(12) ++#define PERI_COMBPHY2_0_RX_STANDBY BIT(11) ++#define PERI_COMBPHY2_0_CLK_REQ BIT(6) ++#define PERI_COMBPHY2_0_TX_PATTERN (0x3 << 2) ++ ++#define PERI_COMBPHY2_0_DATA_BUS_WIDTH (0x3 << 4) ++#define PERI_COMBPHY2_0_REF_CLK_NEEDED BIT(3) ++#define PERI_COMBPHY2_0_RX_DATA_VALID BIT(2) ++#define PERI_COMBPHY2_0_RX_STANDBY_STATUS BIT(1) ++#define PERI_COMBPHY2_0_ALIGN_DETECT BIT(0) ++ ++#define PERI_COMBPHY2_1_BYPASS_CODEC BIT(31) ++#define PERI_COMBPHY2_1_BUF_MODE BIT(28) ++#define PERI_COMBPHY2_1_SRIS_ENABLE BIT(20) ++#define PERI_COMBPHY2_1_SERDES_ARCH BIT(16) ++#define PERI_COMBPHY2_1_WIDTH BIT(15) ++#define PERI_COMBPHY2_1_TX_ONESZEROS BIT(14) ++#define PERI_COMBPHY2_1_RX_EQ_TRAINING BIT(13) ++#define PERI_COMBPHY2_1_RX_TERMINATION BIT(12) ++#define PERI_COMBPHY2_1_RX_STANDBY BIT(11) ++#define PERI_COMBPHY2_1_CLK_REQ BIT(6) ++#define PERI_COMBPHY2_1_TX_PATTERN (0x3 << 2) ++ ++#define PERI_COMBPHY2_1_DATA_BUS_WIDTH (0x3 << 4) ++#define PERI_COMBPHY2_1_REF_CLK_NEEDED BIT(3) ++#define PERI_COMBPHY2_1_RX_DATA_VALID BIT(2) ++#define PERI_COMBPHY2_1_RX_STANDBY_STATUS BIT(1) ++#define PERI_COMBPHY2_1_ALIGN_DETECT BIT(0) ++ ++#define PERI_COMBPHY2_2_BYPASS_CODEC BIT(31) ++#define PERI_COMBPHY2_2_BUF_MODE BIT(28) ++#define PERI_COMBPHY2_2_SRIS_ENABLE BIT(20) ++#define PERI_COMBPHY2_2_SERDES_ARCH BIT(16) ++#define PERI_COMBPHY2_2_WIDTH BIT(15) ++#define PERI_COMBPHY2_2_TX_ONESZEROS BIT(14) ++#define PERI_COMBPHY2_2_RX_EQ_TRAINING BIT(13) ++#define PERI_COMBPHY2_2_RX_TERMINATION BIT(12) ++#define PERI_COMBPHY2_2_RX_STANDBY BIT(11) ++#define PERI_COMBPHY2_2_CLK_REQ BIT(6) ++#define PERI_COMBPHY2_2_TX_PATTERN (0x3 << 2) ++ ++#define PERI_COMBPHY2_2_DATA_BUS_WIDTH (0x3 << 4) ++#define PERI_COMBPHY2_2_REF_CLK_NEEDED BIT(3) ++#define PERI_COMBPHY2_2_RX_DATA_VALID BIT(2) ++#define PERI_COMBPHY2_2_RX_STANDBY_STATUS BIT(1) ++#define PERI_COMBPHY2_2_ALIGN_DETECT BIT(0) ++ ++#define PERI_COMBPHY2_3_BYPASS_CODEC BIT(31) ++#define PERI_COMBPHY2_3_BUF_MODE BIT(28) ++#define PERI_COMBPHY2_3_SRIS_ENABLE BIT(20) ++#define PERI_COMBPHY2_3_SERDES_ARCH BIT(16) ++#define PERI_COMBPHY2_3_WIDTH BIT(15) ++#define PERI_COMBPHY2_3_TX_ONESZEROS BIT(14) ++#define PERI_COMBPHY2_3_RX_EQ_TRAINING BIT(13) ++#define PERI_COMBPHY2_3_RX_TERMINATION BIT(12) ++#define PERI_COMBPHY2_3_RX_STANDBY BIT(11) ++#define PERI_COMBPHY2_3_CLK_REQ BIT(6) ++#define PERI_COMBPHY2_3_TX_PATTERN (0x3 << 2) ++ ++#define PERI_COMBPHY2_3_DATA_BUS_WIDTH (0x3 << 4) ++#define PERI_COMBPHY2_3_REF_CLK_NEEDED BIT(3) ++#define PERI_COMBPHY2_3_RX_DATA_VALID BIT(2) ++#define PERI_COMBPHY2_3_RX_STANDBY_STATUS BIT(1) ++#define PERI_COMBPHY2_3_ALIGN_DETECT BIT(0) ++ ++#define PERI_COMBPHY_TEST_O (0xffU << 24) ++#define PERI_COMBPHY23_TEST_RST_N BIT(20) ++#define PERI_COMBPHY22_TEST_RST_N BIT(19) ++#define PERI_COMBPHY21_TEST_RST_N BIT(18) ++#define PERI_COMBPHY20_TEST_RST_N BIT(17) ++#define PERI_COMBPHY1_TEST_RST_N BIT(17) ++#define PERI_COMBPHY0_TEST_RST_N BIT(17) ++#define PERI_COMBPHY_TEST_WRITE BIT(16) ++#define PERI_COMBPHY_TEST_I (0xffU << 8) ++#define PERI_COMBPHY_TEST_ADDR 0xffU ++#define PERI_COMBPHY_DATA_OFFSET 0x8U ++#define PERI_COMBPHY_ADDR_OFFSET 0x0U ++ ++#define X1_SLEW_ASSIST_DIS_AND_SSC_ADDR 0x04 ++#define X1_SLEW_ASSIST_DIS_AND_SSC_VAL 0x12 ++#define X1_TX_SWING_COMP_ADDR 0x0B ++#define X1_TX_SWING_COMP_VAL 0xA0 ++#define X1_CDR_DIRECT_TRIM_EQ_PEAKING_ADDR 0x10 ++#define X1_CDR_DIRECT_TRIM_EQ_PEAKING_VAL 0x39 ++#define X1_DFE_DIS_8G10G_ADDR 0x1B ++#define X1_DFE_DIS_8G10G_VAL 0x1F ++#define EQ_SWING_INC_PEAK_FREQ_ADDR 0x12 ++#define EQ_SWING_INC_PEAK_FREQ_VAL 0x96 ++#define X1_TXPLL_TRIM_ADDR 0x00 ++#define X1_TXPLL_TRIM_VAL 0x30 ++#define X1_REF_CLK_100N250_ADDR 0x01 ++#define X1_REF_CLK_100N250_VAL 0x40 ++#define X1_EQ_INIT_MANUAL_SET1_ADDR 0x14 ++#define X1_EQ_INIT_MANUAL_SET1_VAL 0x6A ++#define X1_EQ_INIT_PWON_CDR_MANUAL_SET1_ADDR 0x05 ++#define X1_EQ_INIT_PWON_CDR_MANUAL_SET1_VAL 0x10 ++#define X1_EQ_INIT_MANUAL_SET0_ADDR 0x14 ++#define X1_EQ_INIT_MANUAL_SET0_VAL 0x2A ++#define X1_EQ_INIT_PWON_CDR_MANUAL_SET0_ADDR 0x05 ++#define X1_EQ_INIT_PWON_CDR_MANUAL_SET0_VAL 0x00 ++ ++#define X4_LANED_SLEW_ASSIST_DIS_AND_SSC_ADDR 0xC4 ++#define X4_LANED_SLEW_ASSIST_DIS_AND_SSC_VAL 0x12 ++#define X4_LANED_TW_SWING_COMP_ADDR 0xCB ++#define X4_LANED_TW_SWING_COMP_VAL 0xA0 ++#define X4_LANED_CDR_DIRECT_TRIM_ADDR 0xD0 ++#define X4_LANED_CDR_DIRECT_TRIM_VAL 0x39 ++#define X4_LANED_DFE_DIS_8G10G_ADDR 0xDB ++#define X4_LANED_DFE_DIS_8G10G_VAL 0x1F ++#define X4_LANED_EQ_SWING_ADDR 0xD2 ++#define X4_LANED_EQ_SWING_VAL 0x96 ++#define X4_LANED_TXPLL_TRIM_ADDR 0xC0 ++#define X4_LANED_TXPLL_TRIM_VAL 0x00 ++#define X4_LANED_REF_CLK_100N250_ADDR 0xC1 ++#define X4_LANED_REF_CLK_100N250_VAL 0x40 ++#define X4_LANED_INV_RXCDRCLK_ADDR 0xC8 ++#define X4_LANED_INV_RXCDRCLK_VAL 0x08 ++#define X4_LANED_EQ_INIT_MANUAL_SET1_ADDR 0xD4 ++#define X4_LANED_EQ_INIT_MANUAL_SET1_VAL 0x6A ++#define X4_LANED_EQ_INIT_PWON_CDR_MANUAL_SET1_ADDR 0xC5 ++#define X4_LANED_EQ_INIT_PWON_CDR_MANUAL_SET1_VAL 0x10 ++#define X4_LANED_EQ_INIT_MANUAL_SET0_ADDR 0xD4 ++#define X4_LANED_EQ_INIT_MANUAL_SET0_VAL 0x2A ++#define X4_LANED_EQ_INIT_PWON_CDR_MANUAL_SET0_ADDR 0xC5 ++#define X4_LANED_EQ_INIT_PWON_CDR_MANUAL_SET0_VAL 0x00 ++ ++#define X4_LANEC_SLEW_ASSIST_DIS_AND_SSC_ADDR 0x84 ++#define X4_LANEC_SLEW_ASSIST_DIS_AND_SSC_VAL 0x12 ++#define X4_LANEC_TW_SWING_COMP_ADDR 0x8B ++#define X4_LANEC_TW_SWING_COMP_VAL 0xA0 ++#define X4_LANEC_CDR_DIRECT_TRIM_ADDR 0x90 ++#define X4_LANEC_CDR_DIRECT_TRIM_VAL 0x39 ++#define X4_LANEC_DFE_DIS_8G10G_ADDR 0x9B ++#define X4_LANEC_DFE_DIS_8G10G_VAL 0x1F ++#define X4_LANEC_EQ_SWING_ADDR 0x92 ++#define X4_LANEC_EQ_SWING_VAL 0x96 ++#define X4_LANEC_TXPLL_TRIM_ADDR 0x80 ++#define X4_LANEC_TXPLL_TRIM_VAL 0x00 ++#define X4_LANEC_REF_CLK_100N250_ADDR 0x81 ++#define X4_LANEC_REF_CLK_100N250_VAL 0x40 ++#define X4_LANEC_EQ_INIT_MANUAL_SET1_ADDR 0x94 ++#define X4_LANEC_EQ_INIT_MANUAL_SET1_VAL 0x6A ++#define X4_LANEC_EQ_INIT_PWON_CDR_MANUAL_SET1_ADDR 0x85 ++#define X4_LANEC_EQ_INIT_PWON_CDR_MANUAL_SET1_VAL 0x10 ++#define X4_LANEC_EQ_INIT_MANUAL_SET0_ADDR 0x94 ++#define X4_LANEC_EQ_INIT_MANUAL_SET0_VAL 0x2A ++#define X4_LANEC_EQ_INIT_PWON_CDR_MANUAL_SET0_ADDR 0x85 ++#define X4_LANEC_EQ_INIT_PWON_CDR_MANUAL_SET0_VAL 0x00 ++ ++#endif /* UPS_PHY_REG_H */ +diff --git a/drivers/vendor/usb_phy/reg_default.h b/drivers/vendor/usb_phy/reg_default.h +new file mode 100644 +index 000000000..0739cfc9b +--- /dev/null ++++ b/drivers/vendor/usb_phy/reg_default.h +@@ -0,0 +1,17 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2022. All rights reserved. ++ * Description: ups phy reg ++ * Author: AuthorNameMagicTag ++ * Create: 2022.09.01 ++ */ ++#define PERI_CTRL_1 0x064 ++#define PERI_COMBOPHY2_CTRL 0x4a0 ++#define PERI_COMBOPHY2_CTRL1 0x4a0 ++#define PERI_COMBPHY2_CFG0 0x4b0 ++#define PERI_COMBPHY2_STATE0 0x4b4 ++#define PERI_COMBPHY2_CFG1 0x4c0 ++#define PERI_COMBPHY2_STATE1 0x4c4 ++#define PERI_COMBPHY2_CFG2 0x4d0 ++#define PERI_COMBPHY2_STATE2 0x4d4 ++#define PERI_COMBPHY2_CFG3 0x4e0 ++#define PERI_COMBPHY2_STATE3 0x4e4 +diff --git a/drivers/vendor/usb_phy/reg_hiwingv500.h b/drivers/vendor/usb_phy/reg_hiwingv500.h +new file mode 100644 +index 000000000..b9c361269 +--- /dev/null ++++ b/drivers/vendor/usb_phy/reg_hiwingv500.h +@@ -0,0 +1,17 @@ ++/* ++ * Copyright (c) CompanyNameMagicTag 2024. All rights reserved. ++ * Description: ups phy reg ++ * Author: AuthorNameMagicTag ++ * Create: 2024.04.01 ++ */ ++#define PERI_CTRL_1 0x0 ++#define PERI_COMBOPHY2_CTRL 0x0004 ++#define PERI_COMBOPHY2_CTRL1 0x0004 ++#define PERI_COMBPHY2_CFG0 0x0008 ++#define PERI_COMBPHY2_STATE0 0x000c ++#define PERI_COMBPHY2_CFG1 0x0010 ++#define PERI_COMBPHY2_STATE1 0x0014 ++#define PERI_COMBPHY2_CFG2 0x0018 ++#define PERI_COMBPHY2_STATE2 0x001c ++#define PERI_COMBPHY2_CFG3 0x0024 ++#define PERI_COMBPHY2_STATE3 0x0028 +diff --git a/drivers/vendor/usb_phy/xvp.c b/drivers/vendor/usb_phy/xvp.c +new file mode 100644 +index 000000000..cf76c1308 +--- /dev/null ++++ b/drivers/vendor/usb_phy/xvp.c +@@ -0,0 +1,137 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (c) CompanyNameMagicTag 2022-2023. All rights reserved. ++ * Description: xvp phy module ++ * Author: General IP Group ++ * Create: 2022-09-01 ++ */ ++ ++#define DRVNAME "[ups-xvp-phy]" ++#define pr_fmt(fmt) DRVNAME ": " fmt ++ ++#include "phy.h" ++ ++#define REG_OTPC_REGBASE 0x00b00000 ++#define PHY_PARA_OTP_OFFSET 0x032c ++#define PHY_RT_TRIM_MASK 0x001f ++#define RG_RT_TRIM_MASK 0x1f00 ++#define U2_PHY_TRIM_BIT 0x0008 ++#define TRIM_MAX_VALUE 0x001d ++#define TRIM_MIN_VALUE 0x0009 ++ ++#define U2PHY_ANA_CFG0_OFFSET 0x0000 ++#define U2PHY_ANA_CFG0_VALUE 0x0A33CC2B ++#define U2PHY_ANA_CFG2_OFFSET 0x0008 ++#define U2PHY_ANA_CFG2_VALUE 0x00260F0F ++#define U2PHY_ANA_CFG4_OFFSET 0x0010 ++#define U2PHY_ANA_CFG5_OFFSET 0x0014 ++#define PLL_EN_MASK (0x3U << 0) ++#define PLL_EN_VALUE (0x3U << 0) ++ ++#define TX_DEEMPHASIS_STRENGTH_MASK (0xFU << 8) ++#define MBIAS_MASK (0xFU << 0) ++#define DEEMPHASIS_HALF_BIT_MASK (0xFFU << 20) ++#define DISCONNECT_VREF_MASK (0x7U << 16) ++#define TX_DEEMPHASIS_ENABLE (0x1 << 5) ++ ++static void xvp_phy_config(const struct ups_phy_priv *priv) ++{ ++ u32 reg; ++ u32 usb2_phy_rt_trim; ++ ++ reg = phy_readl(priv->phy_base + U2PHY_ANA_CFG5_OFFSET); ++ reg &= ~(PLL_EN_MASK); ++ reg |= PLL_EN_VALUE; ++ phy_writel(reg, priv->phy_base + U2PHY_ANA_CFG5_OFFSET); ++ ++ /* BE CAREFUL ++ * ANA_CFG2 phy eye diagram config must set before trim config, ++ * because it will write total 32 bits when config ANA_CFG2. ++ * And just set several bits when config trim ++ */ ++ reg = priv->u2phy_trim[U2PHY_ANA_CFG0]; ++ if (reg != 0) ++ phy_writel(reg, priv->phy_base + U2PHY_ANA_CFG0_OFFSET); ++ ++ reg = priv->u2phy_trim[U2PHY_ANA_CFG2]; ++ if (reg != 0) ++ phy_writel(reg, priv->phy_base + U2PHY_ANA_CFG2_OFFSET); ++ ++ reg = priv->u2phy_trim[U2PHY_ANA_CFG4]; ++ if (reg != 0) ++ phy_writel(reg, priv->phy_base + U2PHY_ANA_CFG4_OFFSET); ++ ++ if (priv->u2phy_trim_otp == NULL) ++ return; ++ ++ /* configure trim from otp */ ++ usb2_phy_rt_trim = phy_readl(priv->u2phy_trim_otp); ++ usb2_phy_rt_trim = (usb2_phy_rt_trim >> priv->otp_phy_trim_bitshift) & PHY_RT_TRIM_MASK; ++ if ((usb2_phy_rt_trim >= TRIM_MIN_VALUE) && ++ (usb2_phy_rt_trim <= TRIM_MAX_VALUE)) { ++ reg = phy_readl(priv->phy_base + U2PHY_ANA_CFG2_OFFSET); ++ reg &= ~(RG_RT_TRIM_MASK); ++ reg |= (usb2_phy_rt_trim << U2_PHY_TRIM_BIT); ++ phy_writel(reg, priv->phy_base + U2PHY_ANA_CFG2_OFFSET); ++ } ++} ++ ++static int xvp_phy_power_on(struct phy *phy) ++{ ++ int ret; ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++"); ++ ++ ret = clk_prepare(priv->phy_clk); ++ if (ret != 0) { ++ ups_phy_err("xvp phy clk prepare failed\n"); ++ return ret; ++ } ++ ++ xvp_phy_config(priv); ++ ++ ret = clk_enable(priv->phy_clk); ++ if (ret != 0) { ++ ups_phy_err("xvp phy clk enable failed\n"); ++ return ret; ++ } ++ ++ ups_phy_dbg("---"); ++ ++ return 0; ++} ++ ++static int xvp_phy_power_off(struct phy *phy) ++{ ++ struct ups_phy_priv *priv = phy_get_drvdata(phy); ++ ++ ups_phy_dbg("+++"); ++ ++ clk_disable_unprepare(priv->phy_clk); ++ ++ ups_phy_dbg("---"); ++ ++ return 0; ++} ++ ++struct phy_ops g_xvp_phy_ops = { ++ .power_on = xvp_phy_power_on, ++ .power_off = xvp_phy_power_off, ++}; ++ ++static struct of_device_id g_ups_xvp_phy_of_match[] = { ++ { ++ .compatible = "usb2phy,xvpphy", ++ .data = &g_xvp_phy_ops ++ }, ++ {}, ++}; ++ ++struct of_device_id* ups_xvp_get_of_device_id(int *num) ++{ ++ *num = (sizeof(g_ups_xvp_phy_of_match) / sizeof(struct of_device_id)) - 1; ++ ups_phy_info("xvp phy num = %d\n", *num); ++ return of_match_ptr(g_ups_xvp_phy_of_match); ++} ++ +diff --git a/fs/Kconfig b/fs/Kconfig +index 73f263738..02a923780 100644 +--- a/fs/Kconfig ++++ b/fs/Kconfig +@@ -30,8 +30,6 @@ if BLOCK + + source "fs/ext2/Kconfig" + source "fs/ext4/Kconfig" +-source "fs/hmdfs/Kconfig" +-source "fs/sharefs/Kconfig" + source "fs/jbd2/Kconfig" + + config FS_MBCACHE +@@ -51,7 +49,6 @@ source "fs/btrfs/Kconfig" + source "fs/nilfs2/Kconfig" + source "fs/f2fs/Kconfig" + source "fs/zonefs/Kconfig" +-source "fs/proc/memory_security/Kconfig" + + endif # BLOCK + +@@ -129,8 +126,6 @@ config FILE_LOCKING + + source "fs/crypto/Kconfig" + +-source "fs/code_sign/Kconfig" +- + source "fs/verity/Kconfig" + + source "fs/notify/Kconfig" +@@ -404,7 +399,6 @@ endif # NETWORK_FILESYSTEMS + source "fs/nls/Kconfig" + source "fs/dlm/Kconfig" + source "fs/unicode/Kconfig" +-source "fs/epfs/Kconfig" + + config IO_WQ + bool +diff --git a/fs/Makefile b/fs/Makefile +index d04ef3afb..f9541f40b 100644 +--- a/fs/Makefile ++++ b/fs/Makefile +@@ -4,7 +4,7 @@ + # + # 14 Sep 2000, Christoph Hellwig + # Rewritten to use lists instead of if-statements. +-# ++# + + + obj-y := open.o read_write.o file_table.o super.o \ +@@ -30,7 +30,6 @@ obj-$(CONFIG_USERFAULTFD) += userfaultfd.o + obj-$(CONFIG_AIO) += aio.o + obj-$(CONFIG_FS_DAX) += dax.o + obj-$(CONFIG_FS_ENCRYPTION) += crypto/ +-obj-$(CONFIG_SECURITY_CODE_SIGN) += code_sign/ + obj-$(CONFIG_FS_VERITY) += verity/ + obj-$(CONFIG_FILE_LOCKING) += locks.o + obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o +@@ -64,8 +63,6 @@ obj-$(CONFIG_NETFS_SUPPORT) += netfs/ + obj-$(CONFIG_FSCACHE) += fscache/ + obj-$(CONFIG_REISERFS_FS) += reiserfs/ + obj-$(CONFIG_EXT4_FS) += ext4/ +-obj-$(CONFIG_HMDFS_FS) += hmdfs/ +-obj-$(CONFIG_SHARE_FS) += sharefs/ + # We place ext4 before ext2 so that clean ext3 root fs's do NOT mount using the + # ext2 driver, which doesn't know about journalling! Explicitly request ext2 + # by giving the rootfstype= parameter. +@@ -132,4 +129,3 @@ obj-$(CONFIG_EFIVAR_FS) += efivarfs/ + obj-$(CONFIG_EROFS_FS) += erofs/ + obj-$(CONFIG_VBOXSF_FS) += vboxsf/ + obj-$(CONFIG_ZONEFS_FS) += zonefs/ +-obj-$(CONFIG_EPFS) += epfs/ +diff --git a/fs/epfs/Kconfig b/fs/epfs/Kconfig +deleted file mode 100644 +index 6a631dba7..000000000 +--- a/fs/epfs/Kconfig ++++ /dev/null +@@ -1,12 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-config EPFS +- bool "Enhanced Proxy File System support" +- depends on TMPFS +- help +- Enhanced Proxy File System support. If unsure, say N. +- +-config EPFS_DEBUG +- bool "Debug message of Enhanced Proxy File System" +- depends on EPFS +- help +- Enhanced Proxy File System debug support. +diff --git a/fs/epfs/Makefile b/fs/epfs/Makefile +deleted file mode 100644 +index b7375e6f9..000000000 +--- a/fs/epfs/Makefile ++++ /dev/null +@@ -1,3 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-obj-$(CONFIG_EPFS) += epfs.o +-epfs-y := main.o super.o dentry.o inode.o file.o dir.o +diff --git a/fs/epfs/dentry.c b/fs/epfs/dentry.c +deleted file mode 100644 +index 62299eccd..000000000 +--- a/fs/epfs/dentry.c ++++ /dev/null +@@ -1,23 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/epfs/main.c +- * +- * Copyright (c) 2022 Huawei Technologies Co., Ltd. +- * Author: weilongping@huawei.com +- * Create: 2022-06-10 +- */ +-#include "internal.h" +- +-static int epfs_d_revalidate(struct dentry *dentry, unsigned int flags) +-{ +- return 1; +-} +- +-static void epfs_d_release(struct dentry *dentry) +-{ +-} +- +-const struct dentry_operations epfs_dops = { +- .d_revalidate = epfs_d_revalidate, +- .d_release = epfs_d_release, +-}; +diff --git a/fs/epfs/dir.c b/fs/epfs/dir.c +deleted file mode 100644 +index 4079feb28..000000000 +--- a/fs/epfs/dir.c ++++ /dev/null +@@ -1,18 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/epfs/dir.c +- * +- * Copyright (c) 2022 Huawei Technologies Co., Ltd. +- * Author: weilongping@huawei.com +- * Create: 2022-06-10 +- */ +-#include +- +-#include "internal.h" +- +-static int epfs_iterate(struct file *file, struct dir_context *ctx) +-{ +- return 0; +-} +- +-const struct file_operations epfs_dir_fops = { .iterate_shared = epfs_iterate }; +diff --git a/fs/epfs/epfs.h b/fs/epfs/epfs.h +deleted file mode 100644 +index 19e66e145..000000000 +--- a/fs/epfs/epfs.h ++++ /dev/null +@@ -1,43 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/epfs/epfs.h +- * +- * Copyright (c) 2022 Huawei Technologies Co., Ltd. +- * Author: weilongping@huawei.com +- * Create: 2022-06-10 +- */ +-#ifndef __FS_EPFS_H__ +-#define __FS_EPFS_H__ +- +-#include +-#include +-#include +- +-#define EPFS_MAX_RANGES 127 +- +-struct __attribute__((__packed__)) epfs_range { +- __u64 num; +- __u64 reserved; +- struct { +- __u64 begin; +- __u64 end; +- } range[0]; +-}; +- +-#define EPFS_IOCTL_MAGIC 0x71 +-#define IOC_SET_ORIGIN_FD _IOW(EPFS_IOCTL_MAGIC, 1, __s32) +-#define IOC_SET_EPFS_RANGE _IOW(EPFS_IOCTL_MAGIC, 2, struct epfs_range) +-#define EPFS_IOCTL_MAXNR 3 +- +-#define EPFS_TAG "Epfs" +- +-#define epfs_err(fmt, ...) \ +- pr_err("%s:%s:%d: " fmt, EPFS_TAG, __func__, __LINE__, ##__VA_ARGS__) +-#define epfs_info(fmt, ...) \ +- pr_info("%s:%s:%d: " fmt, EPFS_TAG, __func__, __LINE__, ##__VA_ARGS__) +-#define epfs_warn(fmt, ...) \ +- pr_warn("%s:%s:%d: " fmt, EPFS_TAG, __func__, __LINE__, ##__VA_ARGS__) +-#define epfs_debug(fmt, ...) \ +- pr_debug("%s:%s:%d: " fmt, EPFS_TAG, __func__, __LINE__, ##__VA_ARGS__) +- +-#endif // __FS_EPFS_H__ +diff --git a/fs/epfs/file.c b/fs/epfs/file.c +deleted file mode 100644 +index d2938ebb6..000000000 +--- a/fs/epfs/file.c ++++ /dev/null +@@ -1,299 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/epfs/file.c +- * +- * Copyright (c) 2022 Huawei Technologies Co., Ltd. +- * Author: weilongping@huawei.com +- * Create: 2022-06-10 +- */ +-#include +-#include +-#include +-#include +-#include +- +-#include "internal.h" +- +-long epfs_set_origin_fd(struct file *file, unsigned long arg) +-{ +- int fd = -1; +- struct file *origin_file; +- struct inode *inode = file->f_inode; +- struct epfs_inode_info *info = epfs_inode_to_private(inode); +- int ret = 0; +- +- if (copy_from_user(&fd, (int *)arg, sizeof(fd))) +- return -EFAULT; +- if (IS_ENABLED(CONFIG_EPFS_DEBUG)) +- epfs_debug("original fd: %d", fd); +- origin_file = fget(fd); +- if (!origin_file) { +- epfs_err("Original file not exist!"); +- return -EBADF; +- } +- +- mutex_lock(&info->lock); +- if (info->origin_file) { +- // origin_file had been set. +- ret = -EEXIST; +- fput(origin_file); +- } else if (file_inode(origin_file) == inode) { +- epfs_err("Could not set itself as origin_file!"); +- fput(origin_file); +- ret = -EINVAL; +- } else { +- info->origin_file = origin_file; +- fsstack_copy_attr_all(inode, file_inode(origin_file)); +- fsstack_copy_inode_size(inode, file_inode(origin_file)); +- } +- mutex_unlock(&info->lock); +- return ret; +-} +- +-int epfs_check_range(struct epfs_range *range) +-{ +- __u64 index; +- +- if (range->range[0].begin >= range->range[0].end) { +- epfs_err("Invalid range: [%llu, %llu)", range->range[0].begin, +- range->range[0].end); +- return -EINVAL; +- } +- +- for (index = 1; index < range->num; index++) { +- if ((range->range[index].begin >= range->range[index].end) || +- (range->range[index].begin < range->range[index - 1].end)) { +- epfs_err("Invalid range: [%llu, %llu), [%llu, %llu)", +- range->range[index - 1].begin, +- range->range[index - 1].end, +- range->range[index].begin, +- range->range[index].end); +- return -EINVAL; +- } +- } +- if (IS_ENABLED(CONFIG_EPFS_DEBUG)) { +- epfs_debug("epfs_range recv %llu ranges:", range->num); +- for (index = 0; index < range->num; index++) { +- epfs_debug("range:[%llu %llu)", +- range->range[index].begin, +- range->range[index].end); +- } +- epfs_debug("\n"); +- } +- return 0; +-} +- +-long epfs_set_range(struct file *file, unsigned long arg) +-{ +- struct inode *inode = file->f_inode; +- struct inode *origin_inode; +- struct epfs_inode_info *info = epfs_inode_to_private(inode); +- int ret = 0; +- __u64 num = 0; +- struct epfs_range *range; +- struct epfs_range header; +- +- mutex_lock(&info->lock); +- if (!info->origin_file) { +- epfs_err("origin file not exist!"); +- ret = -EBADF; +- goto out_set_range; +- } +- origin_inode = info->origin_file->f_inode; +- if (!in_group_p(origin_inode->i_gid)) { +- epfs_err("Only group member can set range: %u", +- i_gid_read(origin_inode)); +- ret = -EACCES; +- goto out_set_range; +- } +- +- if (copy_from_user(&header, (struct epfs_range *)arg, +- sizeof(header))) { +- ret = -EFAULT; +- epfs_err("get header failed!"); +- goto out_set_range; +- } +- num = header.num; +- +- if (num > EPFS_MAX_RANGES || num <= 0) { +- ret = -EINVAL; +- epfs_err("illegal num: %llu", num); +- goto out_set_range; +- } +- +- range = kzalloc(sizeof(header) + sizeof(header.range[0]) * num, +- GFP_KERNEL); +- if (!range) { +- ret = -ENOMEM; +- goto out_set_range; +- } +- +- if (copy_from_user(range, (struct epfs_range *)arg, +- sizeof(header) + sizeof(header.range[0]) * num)) { +- ret = -EFAULT; +- epfs_err("Failed to get range! num: %llu", num); +- kfree(range); +- goto out_set_range; +- } +- range->num = num; +- +- ret = epfs_check_range(range); +- if (ret) { +- kfree(range); +- goto out_set_range; +- } +- +- info->range = range; +-out_set_range: +- mutex_unlock(&info->lock); +- return ret; +-} +- +-static long __epfs_ioctl(struct file *file, unsigned int cmd, +- unsigned long arg) +-{ +- long rc = -ENOTTY; +- +- if (unlikely(_IOC_TYPE(cmd) != EPFS_IOCTL_MAGIC)) { +- epfs_err("Failed to check epfs magic: %u", _IOC_TYPE(cmd)); +- return -ENOTTY; +- } +- if (unlikely(_IOC_NR(cmd) >= EPFS_IOCTL_MAXNR)) { +- epfs_err("Failed to check ioctl number: %u", _IOC_NR(cmd)); +- return -ENOTTY; +- } +- if (unlikely(!access_ok((void __user *)arg, _IOC_SIZE(cmd)))) { +- epfs_err("Failed to check user address space range!"); +- return -EFAULT; +- } +- +- switch (cmd) { +- case IOC_SET_ORIGIN_FD: +- return epfs_set_origin_fd(file, arg); +- case IOC_SET_EPFS_RANGE: +- return epfs_set_range(file, arg); +- default: +- epfs_info("Exit epfs unsupported ioctl, ret: %ld", rc); +- return rc; +- } +-} +- +-static long epfs_compat_ioctl(struct file *file, unsigned int cmd, +- unsigned long arg) +-{ +- return __epfs_ioctl(file, cmd, arg); +-} +- +-static long epfs_unlocked_ioctl(struct file *file, unsigned int cmd, +- unsigned long arg) +-{ +- return __epfs_ioctl(file, cmd, arg); +-} +- +-static ssize_t epfs_read(struct file *file, char __user *buf, size_t count, +- loff_t *ppos) +-{ +- struct inode *inode = file_inode(file); +- struct epfs_inode_info *info = epfs_inode_to_private(inode); +- struct file *origin_file; +- struct epfs_range *range; +- ssize_t ret = 0; +- loff_t pos = *ppos; +- loff_t file_size; +- int current_range_index = 0; +- +- mutex_lock(&info->lock); +- range = info->range; +- if (!range) { +- ret = -EINVAL; +- epfs_err("Invalid inode range!"); +- goto out_read; +- } +- +- origin_file = info->origin_file; +- +- if (!origin_file) { +- ret = -ENOENT; +- epfs_err("origin file not exist!"); +- goto out_read; +- } +- +- // Reduce count when it will read over file size. +- file_size = i_size_read(file_inode(origin_file)); +- if (IS_ENABLED(CONFIG_EPFS_DEBUG)) +- if (count > (file_size - pos)) +- epfs_debug( +- "count will be truncated to %llu, as file_size=%llu, pos=%llu", +- file_size - pos, file_size, pos); +- count = count <= (file_size - pos) ? count : (file_size - pos); +- +- // Skip ranges before pos. +- while ((range->range[current_range_index].end <= pos) && +- (current_range_index < range->num)) +- current_range_index++; +- +- while (count > 0) { +- __u64 current_begin, current_end; +- +- if (current_range_index >= range->num) { +- // read directly when epfs range gone; +- if (IS_ENABLED(CONFIG_EPFS_DEBUG)) +- epfs_debug( +- "read from %llu with len %zu at the end.", +- pos, count); +- ret = vfs_read(origin_file, buf, count, &pos); +- break; +- } +- current_begin = range->range[current_range_index].begin; +- current_end = range->range[current_range_index].end; +- if (current_begin <= pos) { +- // Clear user memory +- unsigned long clear_len = current_end - pos; +- +- clear_len = clear_len < count ? clear_len : count; +- if (IS_ENABLED(CONFIG_EPFS_DEBUG)) +- epfs_debug( +- "clear user memory from %llu with len %lu", +- pos, clear_len); +- if (clear_user(buf, clear_len)) { +- ret = EFAULT; +- break; +- } +- buf += clear_len; +- pos += clear_len; +- count -= clear_len; +- current_range_index++; +- } else { +- // Read from pos to (next)current_begin +- unsigned long read_len = current_begin - pos; +- +- read_len = read_len < count ? read_len : count; +- if (IS_ENABLED(CONFIG_EPFS_DEBUG)) +- epfs_debug( +- "read from %llu with len %lu", +- pos, read_len); +- ret = vfs_read(origin_file, buf, read_len, &pos); +- if (ret < 0 || ret < read_len) { +- // Could not read enough bytes; +- break; +- } +- buf += ret; +- count -= ret; +- } +- } +- +- if (ret >= 0) { +- ret = pos - *ppos; +- *ppos = pos; +- } +-out_read: +- mutex_unlock(&info->lock); +- return ret; +-} +- +-const struct file_operations epfs_file_fops = { +- .unlocked_ioctl = epfs_unlocked_ioctl, +- .compat_ioctl = epfs_compat_ioctl, +- .read = epfs_read, +- .llseek = generic_file_llseek, +-}; +diff --git a/fs/epfs/inode.c b/fs/epfs/inode.c +deleted file mode 100644 +index 99b94108d..000000000 +--- a/fs/epfs/inode.c ++++ /dev/null +@@ -1,126 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/epfs/inode.c +- * +- * Copyright (c) 2022 Huawei Technologies Co., Ltd. +- * Author: weilongping@huawei.com +- * Create: 2022-06-10 +- */ +-#include +-#include +-#include +- +-#include "internal.h" +- +-#define USER_DATA_RW 1008 +-#define USER_DATA_RW_UID KUIDT_INIT(USER_DATA_RW) +-#define USER_DATA_RW_GID KGIDT_INIT(USER_DATA_RW) +- +-struct dentry *epfs_lookup(struct inode *dir, struct dentry *dentry, +- unsigned int flags) +-{ +- return ERR_PTR(-ENOENT); +-} +- +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) +-static int epfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, +- struct file *file, umode_t mode) +-#else +-static int epfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, +- struct file *file, umode_t mode) +-#endif +-#else +-static int epfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) +-#endif +-{ +- struct inode *inode = epfs_iget(dir->i_sb, false); +- +- if (!inode) +- return -ENOSPC; +- d_tmpfile(file, inode); +- if (IS_ENABLED(CONFIG_EPFS_DEBUG)) +- epfs_debug("epfs: tmpfile %p", inode); +- return finish_open_simple(file, 0);; +-} +- +-const struct inode_operations epfs_dir_iops = { +- .tmpfile = epfs_tmpfile, +- .lookup = epfs_lookup, +-}; +- +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) +-static int epfs_getattr(struct mnt_idmap *idmap, +- const struct path *path, struct kstat *stat, +- u32 request_mask, unsigned int flags) +-#else +-static int epfs_getattr(struct user_namespace *mnt_userns, +- const struct path *path, struct kstat *stat, +- u32 request_mask, unsigned int flags) +-#endif +-#else +-static int epfs_getattr(const struct path *path, struct kstat *stat, +- u32 request_mask, unsigned int flags) +-#endif +-{ +- struct dentry *dentry = path->dentry; +- struct inode *inode = d_inode(dentry); +- struct epfs_inode_info *info = epfs_inode_to_private(inode); +- struct file *origin_file; +- struct kstat origin_stat; +- int ret; +- +- mutex_lock(&info->lock); +- origin_file = info->origin_file; +- if (!origin_file) { +- ret = -ENOENT; +- goto out_getattr; +- } +- ret = vfs_getattr(&(origin_file->f_path), &origin_stat, request_mask, +- flags); +- if (ret) +- goto out_getattr; +- fsstack_copy_attr_all(inode, file_inode(origin_file)); +- fsstack_copy_inode_size(inode, file_inode(origin_file)); +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) +- generic_fillattr(idmap, request_mask, d_inode(dentry), stat); +-#else +- generic_fillattr(mnt_userns, d_inode(dentry), stat); +-#endif +-#else +- generic_fillattr(d_inode(dentry), stat); +-#endif +- stat->blocks = origin_stat.blocks; +- +-out_getattr: +- mutex_unlock(&info->lock); +- return ret; +-} +- +-const struct inode_operations epfs_file_iops = { +- .getattr = epfs_getattr, +-}; +- +-struct inode *epfs_iget(struct super_block *sb, bool is_dir) +-{ +- struct inode *inode = new_inode(sb); +- +- if (!inode) { +- epfs_err("Failed to allocate new inode"); +- return NULL; +- } +- if (is_dir) { +- inode->i_op = &epfs_dir_iops; +- inode->i_fop = &epfs_dir_fops; +- inode->i_mode = S_IFDIR | 0770; +- } else { +- inode->i_op = &epfs_file_iops; +- inode->i_fop = &epfs_file_fops; +- inode->i_mode = S_IFREG; +- } +- inode->i_uid = USER_DATA_RW_UID; +- inode->i_gid = USER_DATA_RW_GID; +- return inode; +-} +diff --git a/fs/epfs/internal.h b/fs/epfs/internal.h +deleted file mode 100644 +index c6634e705..000000000 +--- a/fs/epfs/internal.h ++++ /dev/null +@@ -1,38 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/epfs/internal.h +- * +- * Copyright (c) 2022 Huawei Technologies Co., Ltd. +- * Author: weilongping@huawei.com +- * Create: 2022-06-10 +- */ +-#ifndef __FS_EPFS_INTERNAL_H__ +-#define __FS_EPFS_INTERNAL_H__ +- +-#include +-#include +- +-#include "epfs.h" +- +-#define EPFS_SUPER_MAGIC 0x20220607 +- +-struct epfs_inode_info { +- struct inode vfs_inode; +- struct file *origin_file; +- struct epfs_range *range; +- struct mutex lock; +-}; +- +-static inline struct epfs_inode_info *epfs_inode_to_private(struct inode *inode) +-{ +- return container_of(inode, struct epfs_inode_info, vfs_inode); +-} +- +-struct inode *epfs_iget(struct super_block *sb, bool is_dir); +-extern const struct dentry_operations epfs_dops; +-extern const struct file_operations epfs_dir_fops; +-extern const struct file_operations epfs_file_fops; +-extern struct file_system_type epfs_fs_type; +-extern struct kmem_cache *epfs_inode_cachep; +- +-#endif // __FS_EPFS_INTERNAL_H__ +diff --git a/fs/epfs/main.c b/fs/epfs/main.c +deleted file mode 100644 +index c91e94f8f..000000000 +--- a/fs/epfs/main.c ++++ /dev/null +@@ -1,44 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/epfs/main.c +- * +- * Copyright (c) 2022 Huawei Technologies Co., Ltd. +- * Author: weilongping@huawei.com +- * Create: 2022-06-10 +- */ +-#include +-#include +-#include +- +-#include "internal.h" +- +-struct kmem_cache *epfs_inode_cachep; +- +-static int __init epfs_init(void) +-{ +- int ret; +- +- epfs_inode_cachep = +- kmem_cache_create("epfs_inode_cache", +- sizeof(struct epfs_inode_info), 0, 0, +- NULL); +- if (!epfs_inode_cachep) +- return -ENOMEM; +- ret = register_filesystem(&epfs_fs_type); +- if (ret) +- kmem_cache_destroy(epfs_inode_cachep); +- return ret; +-} +- +-static void __exit epfs_exit(void) +-{ +- unregister_filesystem(&epfs_fs_type); +- kmem_cache_destroy(epfs_inode_cachep); +-} +- +-module_init(epfs_init) +-module_exit(epfs_exit) +-MODULE_DESCRIPTION("Enhanced Proxy File System for OpenHarmony"); +-MODULE_AUTHOR("LongPing Wei weilongping@huawei.com"); +-MODULE_LICENSE("GPL v2"); +-MODULE_ALIAS_FS("epfs"); +diff --git a/fs/epfs/super.c b/fs/epfs/super.c +deleted file mode 100644 +index 7368af775..000000000 +--- a/fs/epfs/super.c ++++ /dev/null +@@ -1,127 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/epfs/super.c +- * +- * Copyright (c) 2022 Huawei Technologies Co., Ltd. +- * Author: weilongping@huawei.com +- * Create: 2022-06-10 +- */ +-#include +-#include +-#include +-#include +-#include +- +-#include "internal.h" +- +-static struct inode *epfs_alloc_inode(struct super_block *sb) +-{ +- struct epfs_inode_info *info = +- kmem_cache_zalloc(epfs_inode_cachep, GFP_KERNEL); +- if (IS_ENABLED(CONFIG_EPFS_DEBUG)) +- epfs_debug("inode info: %p", info); +- inode_init_once(&info->vfs_inode); +- mutex_init(&info->lock); +- return &info->vfs_inode; +-} +- +-// Free epfs_inode_info +-static void epfs_free_inode(struct inode *inode) +-{ +- if (IS_ENABLED(CONFIG_EPFS_DEBUG)) +- epfs_debug("free_inode: %p", inode); +- kmem_cache_free(epfs_inode_cachep, +- epfs_inode_to_private(inode)); +-} +- +-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) +-static void i_callback(struct rcu_head *head) +-{ +- struct inode *inode = container_of(head, struct inode, i_rcu); +- +- epfs_free_inode(inode); +-} +-#endif +- +-// Destroy epfs_range +-static void epfs_destroy_inode(struct inode *inode) +-{ +- struct epfs_inode_info *info = epfs_inode_to_private(inode); +- +- mutex_lock(&info->lock); +- kfree(info->range); +- info->range = NULL; +- mutex_unlock(&info->lock); +-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) +- call_rcu(&inode->i_rcu, i_callback); +-#endif +-} +- +-// Clear vfs_inode +-static void epfs_evict_inode(struct inode *inode) +-{ +- struct epfs_inode_info *info = epfs_inode_to_private(inode); +- +- clear_inode(inode); +- mutex_lock(&info->lock); +- if (info->origin_file) { +- fput(info->origin_file); +- info->origin_file = NULL; +- } +- mutex_unlock(&info->lock); +-} +- +-static int epfs_statfs(struct dentry *dentry, struct kstatfs *buf) +-{ +- buf->f_type = EPFS_SUPER_MAGIC; +- return 0; +-} +-struct super_operations epfs_sops = { +- .alloc_inode = epfs_alloc_inode, +- .destroy_inode = epfs_destroy_inode, +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0) +- .free_inode = epfs_free_inode, +-#endif +- .evict_inode = epfs_evict_inode, +- .statfs = epfs_statfs, +-}; +- +-static int epfs_fill_super(struct super_block *s, void *data, int silent) +-{ +- struct inode *inode; +- +- s->s_op = &epfs_sops; +- s->s_d_op = &epfs_dops; +- s->s_magic = EPFS_SUPER_MAGIC; +- inode = epfs_iget(s, true /* dir */); +- if (!inode) { +- epfs_err("Failed to get root inode!"); +- return -ENOMEM; +- } +- +- s->s_root = d_make_root(inode); +- if (!s->s_root) { +- epfs_err("Failed to make root inode"); +- return -ENOMEM; +- } +- +- return 0; +-} +- +-struct dentry *epfs_mount(struct file_system_type *fs_type, int flags, +- const char *dev_name, void *raw_data) +-{ +- return mount_nodev(fs_type, flags, raw_data, epfs_fill_super); +-} +- +-void epfs_kill_sb(struct super_block *sb) +-{ +- kill_anon_super(sb); +-} +- +-struct file_system_type epfs_fs_type = { +- .owner = THIS_MODULE, +- .name = "epfs", +- .mount = epfs_mount, +- .kill_sb = epfs_kill_sb, +-}; +diff --git a/fs/exec.c b/fs/exec.c +index d06936e3a..4a6255aa4 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -75,7 +75,6 @@ + #include "internal.h" + + #include +-#include + + static int bprm_creds_from_file(struct linux_binprm *bprm); + +@@ -1897,7 +1896,6 @@ static int bprm_execve(struct linux_binprm *bprm, + user_events_execve(current); + acct_update_integrals(current); + task_numa_free(current, false); +- CALL_HCK_LITE_HOOK(ced_detection_lhck, current); + return retval; + + out: +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index a6ba89d19..32218ac7f 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -3309,7 +3309,7 @@ static int ext4_split_extent_at(handle_t *handle, + } + + /* +- * ext4_split_extents() splits an extent and mark extent which is covered ++ * ext4_split_extent() splits an extent and mark extent which is covered + * by @map as split_flags indicates + * + * It may result in splitting the extent into multiple extents (up to three) +@@ -3385,7 +3385,7 @@ static int ext4_split_extent(handle_t *handle, + goto out; + } + +- ext4_ext_show_leaf(inode, path); ++ ext4_ext_show_leaf(inode, *ppath); + out: + return err ? err : allocated; + } +@@ -3850,14 +3850,13 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, + struct ext4_ext_path **ppath, int flags, + unsigned int allocated, ext4_fsblk_t newblock) + { +- struct ext4_ext_path __maybe_unused *path = *ppath; + int ret = 0; + int err = 0; + + ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n", + (unsigned long long)map->m_lblk, map->m_len, flags, + allocated); +- ext4_ext_show_leaf(inode, path); ++ ext4_ext_show_leaf(inode, *ppath); + + /* + * When writing into unwritten space, we should not fail to +@@ -3954,7 +3953,7 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, + if (allocated > map->m_len) + allocated = map->m_len; + map->m_len = allocated; +- ext4_ext_show_leaf(inode, path); ++ ext4_ext_show_leaf(inode, *ppath); + out2: + return err ? err : allocated; + } +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 41d7b9fd6..f019ce64e 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -3626,13 +3626,6 @@ int ext4_feature_set_ok(struct super_block *sb, int readonly) + return 0; + } + #endif +- if (EXT4_SB(sb)->s_es->s_def_hash_version == DX_HASH_SIPHASH && +- !ext4_has_feature_casefold(sb)) { +- ext4_msg(sb, KERN_ERR, +- "Filesystem without casefold feature cannot be " +- "mounted with siphash"); +- return 0; +- } + + if (readonly) + return 1; +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index 8da9d17b5..d28e3df61 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -4604,8 +4604,8 @@ static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi, + + static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) + { +- return fsverity_active(inode) && (idx < +- DIV_ROUND_UP(fsverity_get_verified_data_size(inode), PAGE_SIZE)); ++ return fsverity_active(inode) && ++ idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); + } + + #ifdef CONFIG_F2FS_FAULT_INJECTION +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c +index 49a1ed732..ae129044c 100644 +--- a/fs/f2fs/file.c ++++ b/fs/f2fs/file.c +@@ -3430,7 +3430,7 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) + return f2fs_resize_fs(filp, block_count); + } + +-static inline int f2fs_has_feature_verity(struct file *filp) ++static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) + { + struct inode *inode = file_inode(filp); + +@@ -3442,29 +3442,10 @@ static inline int f2fs_has_feature_verity(struct file *filp) + inode->i_ino); + return -EOPNOTSUPP; + } +- return 0; +-} +- +-static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) +-{ +- int err = f2fs_has_feature_verity(filp); +- +- if (err) +- return err; + + return fsverity_ioctl_enable(filp, (const void __user *)arg); + } + +-static int f2fs_ioc_enable_code_sign(struct file *filp, unsigned long arg) +-{ +- int err = f2fs_has_feature_verity(filp); +- +- if (err) +- return err; +- +- return fsverity_ioctl_enable_code_sign(filp, (const void __user *)arg); +-} +- + static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg) + { + if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp)))) +@@ -4455,8 +4436,6 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + return f2fs_ioc_resize_fs(filp, arg); + case FS_IOC_ENABLE_VERITY: + return f2fs_ioc_enable_verity(filp, arg); +- case FS_IOC_ENABLE_CODE_SIGN: +- return f2fs_ioc_enable_code_sign(filp, arg); + case FS_IOC_MEASURE_VERITY: + return f2fs_ioc_measure_verity(filp, arg); + case FS_IOC_READ_VERITY_METADATA: +@@ -5141,7 +5120,6 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + case F2FS_IOC_PRECACHE_EXTENTS: + case F2FS_IOC_RESIZE_FS: + case FS_IOC_ENABLE_VERITY: +- case FS_IOC_ENABLE_CODE_SIGN: + case FS_IOC_MEASURE_VERITY: + case FS_IOC_READ_VERITY_METADATA: + case FS_IOC_GETFSLABEL: +diff --git a/fs/hmdfs/Kconfig b/fs/hmdfs/Kconfig +deleted file mode 100644 +index 1bb5c2347..000000000 +--- a/fs/hmdfs/Kconfig ++++ /dev/null +@@ -1,40 +0,0 @@ +-config HMDFS_FS +- tristate "HMDFS filesystem support" +- help +- HMDFS is an overlay file system. Relying on the underlying file system, +- under the premise of networking, file exchanges across devices can be +- realized. Device view and merge view are provided. In the device view, +- the shared directories of the corresponding devices are provided under +- different device directories; in the merge view, acollection of shared +- files of all devices is provided. +- +-config HMDFS_FS_PERMISSION +- bool "HMDFS application permission management" +- depends on HMDFS_FS +- help +- HMDFS provides cross-device file and directory sharing. Only the same +- application can access the files and directories under the corresponding +- package directory. it provides management and control of access +- permissions. +- +- If unsure, say N. +- +-config HMDFS_FS_ENCRYPTION +- bool "HMDFS message encryption" +- depends on HMDFS_FS && TLS +- help +- HMDFS provides cross-device file and directory sharing by sending and +- receiving network messages. To ensure data security, TLS encryption is +- provided. +- +- If you want to improve performance, say N. +- +-config HMDFS_FS_DEBUG +- bool "HMDFS debug log" +- depends on HMDFS_FS +- help +- HMDFS print a lot of logs, but many of them are debugging information, +- which is actually unnecessary during operation. If there is a problem, +- it works. +- +- If unsure, say N. +diff --git a/fs/hmdfs/Makefile b/fs/hmdfs/Makefile +deleted file mode 100644 +index 6ff465348..000000000 +--- a/fs/hmdfs/Makefile ++++ /dev/null +@@ -1,17 +0,0 @@ +-obj-$(CONFIG_HMDFS_FS) += hmdfs.o +-ccflags-y += -I$(src) +- +-hmdfs-y := main.o super.o inode.o dentry.o inode_root.o file_merge.o +-hmdfs-y += hmdfs_client.o hmdfs_server.o inode_local.o inode_remote.o +-hmdfs-y += inode_merge.o hmdfs_dentryfile.o file_root.o file_remote.o +-hmdfs-y += file_local.o client_writeback.o server_writeback.o stash.o +-hmdfs-y += hmdfs_share.o +- +-hmdfs-y += file_cloud.o inode_cloud.o hmdfs_dentryfile_cloud.o +-hmdfs-y += inode_cloud_merge.o +-hmdfs-y += comm/device_node.o comm/message_verify.o comm/node_cb.o +-hmdfs-y += comm/connection.o comm/socket_adapter.o comm/transport.o +- +-hmdfs-$(CONFIG_HMDFS_FS_ENCRYPTION) += comm/crypto.o +-hmdfs-$(CONFIG_HMDFS_FS_PERMISSION) += authority/authentication.o +-hmdfs-$(CONFIG_HMDFS_FS_PERMISSION) += authority/config.o +diff --git a/fs/hmdfs/authority/authentication.c b/fs/hmdfs/authority/authentication.c +deleted file mode 100644 +index ff4ba1d04..000000000 +--- a/fs/hmdfs/authority/authentication.c ++++ /dev/null +@@ -1,462 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/comm/authority/authentication.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "authentication.h" +-#include +-#include +- +-#include "hmdfs.h" +- +-struct fs_struct *hmdfs_override_fsstruct(struct fs_struct *saved_fs) +-{ +-#if (defined CONFIG_HMDFS_FS_PERMISSION) && (defined CONFIG_SDCARD_FS) +- struct fs_struct *copied_fs = copy_fs_struct(saved_fs); +- +- if (!copied_fs) +- return NULL; +- copied_fs->umask = 0; +- task_lock(current); +- current->fs = copied_fs; +- task_unlock(current); +- return copied_fs; +-#else +- return saved_fs; +-#endif +-} +- +-void hmdfs_revert_fsstruct(struct fs_struct *saved_fs, +- struct fs_struct *copied_fs) +-{ +-#if (defined CONFIG_HMDFS_FS_PERMISSION) && (defined CONFIG_SDCARD_FS) +- task_lock(current); +- current->fs = saved_fs; +- task_unlock(current); +- free_fs_struct(copied_fs); +-#endif +-} +- +-const struct cred *hmdfs_override_fsids(bool is_recv_thread) +-{ +- struct cred *cred = NULL; +- const struct cred *old_cred = NULL; +- +- cred = prepare_creds(); +- if (!cred) +- return NULL; +- +- cred->fsuid = is_recv_thread ? SYSTEM_UID : USER_DATA_RW_UID; +- cred->fsgid = is_recv_thread ? SYSTEM_GID : USER_DATA_RW_GID; +- +- old_cred = override_creds(cred); +- +- return old_cred; +-} +- +-const struct cred *hmdfs_override_dir_fsids(struct inode *dir, +- struct dentry *dentry, __u16 *_perm) +-{ +- struct hmdfs_inode_info *hii = hmdfs_i(dir); +- struct cred *cred = NULL; +- const struct cred *old_cred = NULL; +- __u16 level = hmdfs_perm_get_next_level(hii->perm); +- __u16 perm = 0; +- +- cred = prepare_creds(); +- if (!cred) +- return NULL; +- +- switch (level) { +- case HMDFS_PERM_MNT: +- /* system : media_rw */ +- cred->fsuid = USER_DATA_RW_UID; +- cred->fsgid = USER_DATA_RW_GID; +- perm = (hii->perm & HMDFS_DIR_TYPE_MASK) | level; +- break; +- case HMDFS_PERM_DFS: +- /* +- * data : system : media_rw +- * system: system : media_rw, need authority +- * services: dfs_share : dfs_share +- * other : media_rw : media_rw +- **/ +- if (!strcmp(dentry->d_name.name, DFS_SHARE_NAME)) { +- perm = HMDFS_DIR_SERVICES | level; +- cred->fsuid = DFS_SHARE_UID; +- cred->fsgid = DFS_SHARE_GID; +- break; +- } +- if (!strcmp(dentry->d_name.name, PKG_ROOT_NAME)) { +- perm = HMDFS_DIR_DATA | level; +- } else { +- perm = HMDFS_DIR_PUBLIC | level; +- } +- cred->fsuid = USER_DATA_RW_UID; +- cred->fsgid = USER_DATA_RW_GID; +- break; +- case HMDFS_PERM_PKG: +- if (is_service_dir(hii->perm)) { +- cred->fsuid = DFS_SHARE_UID; +- cred->fsgid = DFS_SHARE_GID; +- perm = AUTH_SERVICES | HMDFS_DIR_PKG | level; +- break; +- } +- if (is_data_dir(hii->perm)) { +- /* +- * Mkdir for app pkg. +- * Get the appid by passing pkgname to configfs. +- * Set ROOT + media_rw for remote install, +- * local uninstall. +- * Set appid + media_rw for local install. +- */ +- int bid = get_bundle_uid(hmdfs_sb(dentry->d_sb), +- dentry->d_name.name); +- +- if (bid != 0) { +- cred->fsuid = KUIDT_INIT(bid); +- cred->fsgid = KGIDT_INIT(bid); +- } else { +- cred->fsuid = ROOT_UID; +- cred->fsgid = ROOT_GID; +- } +- perm = AUTH_PKG | HMDFS_DIR_PKG | level; +- } else { +- cred->fsuid = dir->i_uid; +- cred->fsgid = dir->i_gid; +- perm = (hii->perm & AUTH_MASK) | HMDFS_DIR_DEFAULT | level; +- } +- break; +- case HMDFS_PERM_OTHER: +- cred->fsuid = dir->i_uid; +- cred->fsgid = dir->i_gid; +- if (is_pkg_auth(hii->perm)) +- perm = AUTH_PKG | HMDFS_DIR_PKG_SUB | level; +- else +- perm = (hii->perm & AUTH_MASK) | HMDFS_DIR_DEFAULT | level; +- break; +- default: +- /* ! it should not get to here */ +- hmdfs_err("hmdfs perm incorrect got default case, level:%u", level); +- break; +- } +- +- *_perm = perm; +- old_cred = override_creds(cred); +- +- return old_cred; +-} +- +-int hmdfs_override_dir_id_fs(struct cache_fs_override *or, +- struct inode *dir, +- struct dentry *dentry, +- __u16 *perm) +-{ +- or->saved_cred = hmdfs_override_dir_fsids(dir, dentry, perm); +- if (!or->saved_cred) +- return -ENOMEM; +- +- or->saved_fs = current->fs; +- or->copied_fs = hmdfs_override_fsstruct(or->saved_fs); +- if (!or->copied_fs) { +- hmdfs_revert_fsids(or->saved_cred); +- return -ENOMEM; +- } +- +- return 0; +-} +- +-void hmdfs_revert_dir_id_fs(struct cache_fs_override *or) +-{ +- hmdfs_revert_fsstruct(or->saved_fs, or->copied_fs); +- hmdfs_revert_fsids(or->saved_cred); +-} +- +-const struct cred *hmdfs_override_file_fsids(struct inode *dir, __u16 *_perm) +-{ +- struct hmdfs_inode_info *hii = hmdfs_i(dir); +- struct cred *cred = NULL; +- const struct cred *old_cred = NULL; +- __u16 level = hmdfs_perm_get_next_level(hii->perm); +- uint16_t perm; +- +- perm = HMDFS_FILE_DEFAULT | level; +- +- cred = prepare_creds(); +- if (!cred) +- return NULL; +- +- cred->fsuid = dir->i_uid; +- cred->fsgid = dir->i_gid; +- if (is_pkg_auth(hii->perm)) +- perm = AUTH_PKG | HMDFS_FILE_PKG_SUB | level; +- else +- perm = (hii->perm & AUTH_MASK) | HMDFS_FILE_DEFAULT | level; +- +- *_perm = perm; +- old_cred = override_creds(cred); +- +- return old_cred; +-} +- +-void hmdfs_revert_fsids(const struct cred *old_cred) +-{ +- const struct cred *cur_cred; +- +- cur_cred = current->cred; +- revert_creds(old_cred); +- put_cred(cur_cred); +-} +- +-int hmdfs_persist_perm(struct dentry *dentry, __u16 *perm) +-{ +- int err; +- struct inode *minode = d_inode(dentry); +- +- if (!minode) +- return -EINVAL; +- +- inode_lock(minode); +- err = __vfs_setxattr(&nop_mnt_idmap, dentry, minode, HMDFS_PERM_XATTR, perm, +- sizeof(*perm), XATTR_CREATE); +- if (!err) +- fsnotify_xattr(dentry); +- else if (err && err != -EEXIST) +- hmdfs_err("failed to setxattr, err=%d", err); +- inode_unlock(minode); +- return err; +-} +- +-__u16 hmdfs_read_perm(struct inode *inode) +-{ +- __u16 ret = 0; +- int size = 0; +- struct dentry *dentry = d_find_alias(inode); +- +- if (!dentry) +- return ret; +- +- size = __vfs_getxattr(dentry, inode, HMDFS_PERM_XATTR, &ret, +- sizeof(ret)); +- /* +- * some file may not set setxattr with perm +- * eg. files created in sdcard dir by other user +- **/ +- if (size < 0 || size != sizeof(ret)) +- ret = HMDFS_ALL_MASK; +- +- dput(dentry); +- return ret; +-} +- +-static __u16 __inherit_perm_dir(struct inode *parent, struct inode *inode) +-{ +- __u16 perm = 0; +- struct hmdfs_inode_info *info = hmdfs_i(parent); +- __u16 level = hmdfs_perm_get_next_level(info->perm); +- struct dentry *dentry = d_find_alias(inode); +- +- if (!dentry) +- return perm; +- +- switch (level) { +- case HMDFS_PERM_MNT: +- /* system : media_rw */ +- perm = (info->perm & HMDFS_DIR_TYPE_MASK) | level; +- break; +- case HMDFS_PERM_DFS: +- /* +- * data : system : media_rw +- * system: system : media_rw, need authority +- * services: dfs_share : dfs_share +- * other : media_rw : media_rw +- **/ +- if (!strcmp(dentry->d_name.name, DFS_SHARE_NAME)) { +- // "services" +- perm = HMDFS_DIR_SERVICES | level; +- } else if (!strcmp(dentry->d_name.name, PKG_ROOT_NAME)) { +- // "data" +- perm = HMDFS_DIR_DATA | level; +- } else if (!strcmp(dentry->d_name.name, SYSTEM_NAME)) { +- // "system" +- perm = AUTH_SYSTEM | HMDFS_DIR_SYSTEM | level; +- } else { +- perm = HMDFS_DIR_PUBLIC | level; +- } +- break; +- case HMDFS_PERM_PKG: +- if (is_service_dir(info->perm)) { +- perm = AUTH_SERVICES | HMDFS_DIR_PKG | level; +- break; +- } +- if (is_data_dir(info->perm)) { +- /* +- * Mkdir for app pkg. +- * Get the appid by passing pkgname to configfs. +- * Set ROOT + media_rw for remote install, +- * local uninstall. +- * Set appid + media_rw for local install. +- */ +- perm = AUTH_PKG | HMDFS_DIR_PKG | level; +- } else { +- perm = (info->perm & AUTH_MASK) | HMDFS_DIR_DEFAULT | level; +- } +- break; +- case HMDFS_PERM_OTHER: +- if (is_pkg_auth(info->perm)) +- perm = AUTH_PKG | HMDFS_DIR_PKG_SUB | level; +- else +- perm = (info->perm & AUTH_MASK) | HMDFS_DIR_DEFAULT | level; +- break; +- default: +- /* ! it should not get to here */ +- hmdfs_err("hmdfs perm incorrect got default case, level:%u", level); +- break; +- } +- dput(dentry); +- return perm; +-} +- +-static __u16 __inherit_perm_file(struct inode *parent) +-{ +- struct hmdfs_inode_info *hii = hmdfs_i(parent); +- __u16 level = hmdfs_perm_get_next_level(hii->perm); +- uint16_t perm; +- +- perm = HMDFS_FILE_DEFAULT | level; +- +- if (is_pkg_auth(hii->perm)) +- perm = AUTH_PKG | HMDFS_FILE_PKG_SUB | level; +- else +- perm = (hii->perm & AUTH_MASK) | HMDFS_FILE_DEFAULT | level; +- +- return perm; +-} +- +-__u16 hmdfs_perm_inherit(struct inode *parent_inode, struct inode *child) +-{ +- __u16 perm; +- +- if (S_ISDIR(child->i_mode)) +- perm = __inherit_perm_dir(parent_inode, child); +- else +- perm = __inherit_perm_file(parent_inode); +- return perm; +-} +- +-void check_and_fixup_ownership(struct inode *parent_inode, struct inode *child) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(child); +- struct hmdfs_inode_info *dir = hmdfs_i(parent_inode); +- +- if (info->perm == HMDFS_ALL_MASK) +- info->perm = hmdfs_perm_inherit(parent_inode, child); +- if (is_service_dir(dir->perm)) +- child->i_mode = child->i_mode | S_IRWXG; +-} +- +-void check_and_fixup_ownership_remote(struct inode *dir, +- struct inode *dinode, +- struct dentry *dentry) +-{ +- struct hmdfs_inode_info *hii = hmdfs_i(dir); +- struct hmdfs_inode_info *dinfo = hmdfs_i(dinode); +- __u16 level = hmdfs_perm_get_next_level(hii->perm); +- __u16 perm = 0; +- +- if (IS_ERR_OR_NULL(dinode)) +- return; +- +- hmdfs_debug("level:0x%X", level); +- switch (level) { +- case HMDFS_PERM_MNT: +- /* system : media_rw */ +- dinode->i_uid = USER_DATA_RW_UID; +- dinode->i_gid = USER_DATA_RW_GID; +- perm = (hii->perm & HMDFS_DIR_TYPE_MASK) | level; +- break; +- case HMDFS_PERM_DFS: +- /* +- * data : system : media_rw +- * system: system : media_rw, need authority +- * other : media_rw : media_rw +- **/ +- if (!strcmp(dentry->d_name.name, DFS_SHARE_NAME)) { +- perm = HMDFS_DIR_SERVICES | level; +- dinode->i_uid = DFS_SHARE_UID; +- dinode->i_gid = DFS_SHARE_GID; +- dinode->i_mode = dinode->i_mode | S_IRWXG; +- break; +- } +- if (!strcmp(dentry->d_name.name, PKG_ROOT_NAME)) { +- perm = HMDFS_DIR_DATA | level; +- } else { +- perm = HMDFS_DIR_PUBLIC | level; +- } +- dinode->i_uid = USER_DATA_RW_UID; +- dinode->i_gid = USER_DATA_RW_GID; +- break; +- case HMDFS_PERM_PKG: +- if (is_service_dir(hii->perm)) { +- dinode->i_uid = DFS_SHARE_UID; +- dinode->i_gid = DFS_SHARE_GID; +- dinode->i_mode = dinode->i_mode | S_IRWXG; +- perm = AUTH_SERVICES | HMDFS_DIR_PKG | level; +- break; +- } +- if (is_data_dir(hii->perm)) { +- /* +- * Mkdir for app pkg. +- * Get the appid by passing pkgname to configfs. +- * Set ROOT + media_rw for remote install, +- * local uninstall. +- * Set appid + media_rw for local install. +- */ +- int bid = get_bundle_uid(hmdfs_sb(dentry->d_sb), +- dentry->d_name.name); +- if (bid != 0) { +- dinode->i_uid = KUIDT_INIT(bid); +- dinode->i_gid = KGIDT_INIT(bid); +- } else { +- dinode->i_uid = ROOT_UID; +- dinode->i_gid = ROOT_GID; +- } +- perm = AUTH_PKG | HMDFS_DIR_PKG | level; +- } else { +- dinode->i_uid = dir->i_uid; +- dinode->i_gid = dir->i_gid; +- perm = (hii->perm & AUTH_MASK) | HMDFS_DIR_DEFAULT | level; +- } +- break; +- case HMDFS_PERM_OTHER: +- dinode->i_uid = dir->i_uid; +- dinode->i_gid = dir->i_gid; +- if (is_service_auth(hii->perm)) { +- dinode->i_mode = dir->i_mode | S_IRWXG; +- perm = AUTH_PKG | HMDFS_DIR_PKG_SUB | level; +- break; +- } +- if (is_pkg_auth(hii->perm)) +- perm = AUTH_PKG | HMDFS_DIR_PKG_SUB | level; +- else +- perm = (hii->perm & AUTH_MASK) | HMDFS_DIR_DEFAULT | level; +- break; +- default: +- /* ! it should not get to here */ +- hmdfs_err("hmdfs perm incorrect got default case, level:%u", level); +- break; +- } +- +- dinfo->perm = perm; +-} +- +-void hmdfs_root_inode_perm_init(struct inode *root_inode) +-{ +- struct hmdfs_inode_info *hii = hmdfs_i(root_inode); +- +- hii->perm = HMDFS_DIR_ROOT | HMDFS_PERM_MNT; +- set_inode_uid(root_inode, USER_DATA_RW_UID); +- set_inode_gid(root_inode, USER_DATA_RW_GID); +-} +diff --git a/fs/hmdfs/authority/authentication.h b/fs/hmdfs/authority/authentication.h +deleted file mode 100644 +index d66c19898..000000000 +--- a/fs/hmdfs/authority/authentication.h ++++ /dev/null +@@ -1,352 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/comm/authority/authentication.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef AUTHENTICATION_H +-#define AUTHENTICATION_H +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include "hmdfs.h" +- +-struct cache_fs_override { +- struct fs_struct *saved_fs; +- struct fs_struct *copied_fs; +- const struct cred *saved_cred; +-}; +- +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- +-#define OID_ROOT 0 +-#define OID_SYSTEM 1000 +-#define OID_USER_DATA_RW 1008 +-#define OID_DFS_SHARE 3822 +- +-/* copied from sdcardfs/multiuser.h */ +-#define BASE_USER_RANGE 200000 /* offset for uid ranges for each user */ +- +-#define HMDFS_PERM_XATTR "user.hmdfs.perm" +- +-#define ROOT_UID KUIDT_INIT(OID_ROOT) +-#define SYSTEM_UID KUIDT_INIT(OID_SYSTEM) +-#define USER_DATA_RW_UID KUIDT_INIT(OID_USER_DATA_RW) +-#define DFS_SHARE_UID KUIDT_INIT(OID_DFS_SHARE) +- +-#define ROOT_GID KGIDT_INIT(OID_ROOT) +-#define SYSTEM_GID KGIDT_INIT(OID_SYSTEM) +-#define USER_DATA_RW_GID KGIDT_INIT(OID_USER_DATA_RW) +-#define DFS_SHARE_GID KGIDT_INIT(OID_DFS_SHARE) +- +-#define PKG_ROOT_NAME "data" +-#define DFS_SHARE_NAME "services" +-#define SYSTEM_NAME "system" +- +-/* +- * | perm fix | permmnt | permdfs | permpkg | perm other +- * /mnt/mdfs/ accoundID / device view / local / DATA / packageName /... +- * / system /... +- * / documents /... +- * / devid /....... +- * / merge view / +- * / sdcard / +- **/ +-#define HMDFS_PERM_MASK 0x000F +- +-#define HMDFS_PERM_FIX 0 +-#define HMDFS_PERM_MNT 1 +-#define HMDFS_PERM_DFS 2 +-#define HMDFS_PERM_PKG 3 +-#define HMDFS_PERM_OTHER 4 +- +-static inline bool is_perm_fix(__u16 perm) +-{ +- return (perm & HMDFS_PERM_MASK) == HMDFS_PERM_FIX; +-} +- +-static inline bool is_perm_mnt(__u16 perm) +-{ +- return (perm & HMDFS_PERM_MASK) == HMDFS_PERM_MNT; +-} +- +-static inline bool is_perm_dfs(__u16 perm) +-{ +- return (perm & HMDFS_PERM_MASK) == HMDFS_PERM_DFS; +-} +- +-static inline bool is_perm_pkg(__u16 perm) +-{ +- return (perm & HMDFS_PERM_MASK) == HMDFS_PERM_PKG; +-} +- +-static inline bool is_perm_other(__u16 perm) +-{ +- return (perm & HMDFS_PERM_MASK) == HMDFS_PERM_OTHER; +-} +- +-static inline void hmdfs_check_cred(const struct cred *cred) +-{ +- if (cred->fsuid.val != OID_SYSTEM || cred->fsgid.val != OID_SYSTEM) +- hmdfs_warning("uid is %u, gid is %u", cred->fsuid.val, +- cred->fsgid.val); +-} +- +-/* dir and file type mask for hmdfs */ +-#define HMDFS_DIR_TYPE_MASK 0x00F0 +- +-/* LEVEL 0 perm fix - permmnt , only root dir */ +-#define HMDFS_DIR_ROOT 0x0010 +- +-/* LEVEL 1 perm dfs */ +-#define HMDFS_DIR_PUBLIC 0x0020 +-#define HMDFS_DIR_DATA 0x0030 +-#define HMDFS_DIR_SYSTEM 0x0040 +- +-/* LEVEL 2 HMDFS_PERM_PKG */ +-#define HMDFS_DIR_PKG 0x0050 +- +-/* LEVEL 2~n HMDFS_PERM_OTHER */ +-#define PUBLIC_FILE 0x0060 +-#define PUBLIC_SUB_DIR 0x0070 +-#define SYSTEM_SUB_DIR 0x0080 +-#define SYSTEM_SUB_FILE 0x0090 +- +-#define HMDFS_DIR_PKG_SUB 0x00A0 +-#define HMDFS_FILE_PKG_SUB 0x00B0 +- +-/* access right is derived +- * PUBLIC_SUB_DIR SYSTEM_SUB_DIR HMDFS_DIR_PKG_SUB +- * PUBLIC_FILE SYSTEM_SUB_FILE HMDFS_FILE_PKG_SUB +- */ +-#define HMDFS_DIR_DEFAULT 0x00C0 +-#define HMDFS_FILE_DEFAULT 0x00D0 +-#define HMDFS_DIR_SERVICES 0x00E0 +-#define HMDFS_TYPE_DEFAULT 0x0000 +- +-static inline bool is_data_dir(__u16 perm) +-{ +- return (perm & HMDFS_DIR_TYPE_MASK) == HMDFS_DIR_DATA; +-} +- +-static inline bool is_service_dir(__u16 perm) +-{ +- return (perm & HMDFS_DIR_TYPE_MASK) == HMDFS_DIR_SERVICES; +-} +- +-static inline bool is_pkg_dir(__u16 perm) +-{ +- return (perm & HMDFS_DIR_TYPE_MASK) == HMDFS_DIR_PKG; +-} +- +-static inline bool is_pkg_sub_dir(__u16 perm) +-{ +- return (perm & HMDFS_DIR_TYPE_MASK) == HMDFS_DIR_PKG_SUB; +-} +- +-static inline bool is_pkg_sub_file(__u16 perm) +-{ +- return (perm & HMDFS_DIR_TYPE_MASK) == HMDFS_FILE_PKG_SUB; +-} +- +-static inline bool is_default_dir(__u16 perm) +-{ +- return (perm & HMDFS_DIR_TYPE_MASK) == HMDFS_DIR_DEFAULT; +-} +- +-static inline bool is_default_file(__u16 perm) +-{ +- return (perm & HMDFS_DIR_TYPE_MASK) == HMDFS_FILE_DEFAULT; +-} +- +-#define AUTH_MASK 0x0F00 +-#define AUTH_PKG 0x0100 +-#define AUTH_SYSTEM 0x0200 +-#define AUTH_SERVICES 0x0400 +- +-static inline bool is_pkg_auth(__u16 perm) +-{ +- return (perm & AUTH_MASK) == AUTH_PKG; +-} +- +-static inline bool is_system_auth(__u16 perm) +-{ +- return (perm & AUTH_MASK) == AUTH_SYSTEM; +-} +- +-static inline bool is_service_auth(__u16 perm) +-{ +- return (perm & AUTH_MASK) == AUTH_SERVICES; +-} +-#define HMDFS_MOUNT_POINT_MASK 0xF000 +-#define HMDFS_MNT_COMMON 0x0000 // sdcard +-#define HMDFS_MNT_SDCARD 0x1000 // sdcard +-#define HMDFS_MNT_ACNTID 0x2000 // accound id +- +-#define HMDFS_ALL_MASK (HMDFS_MOUNT_POINT_MASK | AUTH_MASK | HMDFS_DIR_TYPE_MASK | HMDFS_PERM_MASK) +- +-static inline void set_inode_gid(struct inode *inode, kgid_t gid) +-{ +- inode->i_gid = gid; +-} +- +-static inline kuid_t get_inode_uid(struct inode *inode) +-{ +- kuid_t uid = inode->i_uid; +- return uid; +-} +- +-static inline void set_inode_uid(struct inode *inode, kuid_t uid) +-{ +- inode->i_uid = uid; +-} +- +-static inline kuid_t hmdfs_override_inode_uid(struct inode *inode) +-{ +- kuid_t uid = get_inode_uid(inode); +- +- set_inode_uid(inode, current_fsuid()); +- return uid; +-} +- +-static inline void hmdfs_revert_inode_uid(struct inode *inode, kuid_t uid) +-{ +- set_inode_uid(inode, uid); +-} +- +-static inline const struct cred *hmdfs_override_creds(const struct cred *new) +-{ +- if (!new) +- return NULL; +- +- return override_creds(new); +-} +- +-static inline void hmdfs_revert_creds(const struct cred *old) +-{ +- if (old) +- revert_creds(old); +-} +- +-static inline __u16 hmdfs_perm_get_next_level(__u16 perm) +-{ +- __u16 level = (perm & HMDFS_PERM_MASK) + 1; +- +- if (level <= HMDFS_PERM_OTHER) +- return level; +- else +- return HMDFS_PERM_OTHER; +-} +- +-struct fs_struct *hmdfs_override_fsstruct(struct fs_struct *saved_fs); +-void hmdfs_revert_fsstruct(struct fs_struct *saved_fs, +- struct fs_struct *copied_fs); +-const struct cred *hmdfs_override_fsids(bool is_recv_thread); +-const struct cred *hmdfs_override_dir_fsids(struct inode *dir, +- struct dentry *dentry, __u16 *perm); +-const struct cred *hmdfs_override_file_fsids(struct inode *dir, __u16 *perm); +-void hmdfs_revert_fsids(const struct cred *old_cred); +-int hmdfs_persist_perm(struct dentry *dentry, __u16 *perm); +-__u16 hmdfs_read_perm(struct inode *inode); +-void hmdfs_root_inode_perm_init(struct inode *root_inode); +-void check_and_fixup_ownership(struct inode *parent_inode, struct inode *child); +-int hmdfs_override_dir_id_fs(struct cache_fs_override *or, +- struct inode *dir, +- struct dentry *dentry, +- __u16 *perm); +-void hmdfs_revert_dir_id_fs(struct cache_fs_override *or); +-void check_and_fixup_ownership_remote(struct inode *dir, +- struct inode *dinode, +- struct dentry *dentry); +-extern int get_bid(const char *bname); +-extern int __init hmdfs_init_configfs(void); +-extern void hmdfs_exit_configfs(void); +- +-static inline int get_bundle_uid(struct hmdfs_sb_info *sbi, const char *bname) +-{ +- return sbi->user_id * BASE_USER_RANGE + get_bid(bname); +-} +- +-#else +- +-static inline +-void hmdfs_root_inode_perm_init(struct inode *root_inode) +-{ +-} +- +-static inline +-void hmdfs_revert_fsids(const struct cred *old_cred) +-{ +-} +- +-static inline +-int hmdfs_override_dir_id_fs(struct cache_fs_override *or, +- struct inode *dir, +- struct dentry *dentry, +- __u16 *perm) +-{ +- return 0; +-} +- +-static inline +-void hmdfs_revert_dir_id_fs(struct cache_fs_override *or) +-{ +-} +- +-static inline +-void check_and_fixup_ownership(struct inode *parent_inode, struct inode *child) +-{ +-} +- +-static inline +-const struct cred *hmdfs_override_fsids(bool is_recv_thread) +-{ +- return ERR_PTR(-ENOTTY); +-} +- +-static inline +-const struct cred *hmdfs_override_creds(const struct cred *new) +-{ +- return ERR_PTR(-ENOTTY); +-} +- +-static inline +-void hmdfs_revert_creds(const struct cred *old) +-{ +- +-} +- +-static inline +-void check_and_fixup_ownership_remote(struct inode *dir, +- struct inode *inode, +- struct dentry *dentry) +-{ +-} +- +-static inline +-kuid_t hmdfs_override_inode_uid(struct inode *inode) +-{ +- return KUIDT_INIT((uid_t)0); +-} +- +-static inline +-void hmdfs_revert_inode_uid(struct inode *inode, kuid_t uid) +-{ +-} +- +-static inline +-void hmdfs_check_cred(const struct cred *cred) +-{ +-} +- +-static inline int __init hmdfs_init_configfs(void) { return 0; } +-static inline void hmdfs_exit_configfs(void) {} +- +-#endif /* CONFIG_HMDFS_FS_PERMISSION */ +- +-#endif +diff --git a/fs/hmdfs/authority/config.c b/fs/hmdfs/authority/config.c +deleted file mode 100644 +index 1610ca902..000000000 +--- a/fs/hmdfs/authority/config.c ++++ /dev/null +@@ -1,377 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/comm/authority/config.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include "hmdfs.h" +- +-#define UID_ATTR_TYPE 0 +-#define GID_ATTR_TYPE 1 +- +-static struct kmem_cache *hmdfs_bid_entry_cachep; +- +-struct hmdfs_bid_entry { +- struct hlist_node node; +- struct qstr str; +- int id; +-}; +- +-struct hmdfs_config_bitem { +- struct config_item item; +- struct qstr str; +-}; +- +-static unsigned int make_hash(const char *name, unsigned int len) +-{ +- unsigned long hash; +- +- hash = init_name_hash(0); +- while (len--) +- hash = partial_name_hash(tolower(*name++), hash); +- +- return end_name_hash(hash); +-} +- +-static struct qstr make_qstr(const char *name) +-{ +- struct qstr str; +- str.name = name; +- str.len = strlen(name); +- str.hash = make_hash(str.name, str.len); +- +- return str; +-} +- +-static struct hmdfs_bid_entry *alloc_bid_entry(const char *name, int id) +-{ +- struct hmdfs_bid_entry *bid_entry; +- char *bid_entry_name; +- +- bid_entry = kmem_cache_alloc(hmdfs_bid_entry_cachep, GFP_KERNEL); +- if (!bid_entry) { +- bid_entry = ERR_PTR(-ENOMEM); +- goto out; +- } +- +- bid_entry_name = kstrdup(name, GFP_KERNEL); +- if (!bid_entry_name) { +- kmem_cache_free(hmdfs_bid_entry_cachep, bid_entry); +- bid_entry = ERR_PTR(-ENOMEM); +- goto out; +- } +- +- INIT_HLIST_NODE(&bid_entry->node); +- bid_entry->str = make_qstr(bid_entry_name); +- bid_entry->id = id; +-out: +- return bid_entry; +-} +- +-static void free_bid_entry(struct hmdfs_bid_entry *bid_entry) +-{ +- if (bid_entry == NULL) +- return; +- +- kfree(bid_entry->str.name); +- kmem_cache_free(hmdfs_bid_entry_cachep, bid_entry); +-} +- +-static struct hmdfs_config_bitem *alloc_bitem(const char *name) +-{ +- struct hmdfs_config_bitem *bitem; +- char *bitem_name; +- +- bitem = kzalloc(sizeof(*bitem), GFP_KERNEL); +- if (!bitem) { +- bitem = ERR_PTR(-ENOMEM); +- goto out; +- } +- +- bitem_name = kstrdup(name, GFP_KERNEL); +- if (!bitem_name) { +- kfree(bitem); +- bitem = ERR_PTR(-ENOMEM); +- goto out; +- } +- +- bitem->str = make_qstr(bitem_name); +-out: +- return bitem; +-} +- +-static void free_bitem(struct hmdfs_config_bitem *bitem) +-{ +- if (bitem == NULL) +- return; +- +- kfree(bitem->str.name); +- kfree(bitem); +-} +- +-#define HMDFS_BUNDLE_ATTRIBUTE(_attr_) \ +- \ +-static DEFINE_HASHTABLE(hmdfs_##_attr_##_hash_table, 4); \ +- \ +-static DEFINE_MUTEX(hmdfs_##_attr_##_hash_mutex); \ +- \ +-static int query_##_attr_##_hash_entry(struct qstr *str) \ +-{ \ +- int id = 0; \ +- struct hmdfs_bid_entry *bid_entry; \ +- struct hlist_node *hash_node; \ +- \ +- mutex_lock(&hmdfs_##_attr_##_hash_mutex); \ +- hash_for_each_possible_safe(hmdfs_##_attr_##_hash_table, \ +- bid_entry, hash_node, node, str->hash) { \ +- if (qstr_case_eq(str, &bid_entry->str)) { \ +- id = bid_entry->id; \ +- break; \ +- } \ +- } \ +- mutex_unlock(&hmdfs_##_attr_##_hash_mutex); \ +- \ +- return id; \ +-} \ +- \ +-static int insert_##_attr_##_hash_entry(struct qstr *str, int id) \ +-{ \ +- int err = 0; \ +- struct hmdfs_bid_entry *bid_entry; \ +- struct hlist_node *hash_node; \ +- \ +- hmdfs_info("insert name = %s", str->name); \ +- \ +- mutex_lock(&hmdfs_##_attr_##_hash_mutex); \ +- hash_for_each_possible_safe(hmdfs_##_attr_##_hash_table, \ +- bid_entry, hash_node, node, str->hash) { \ +- if (qstr_case_eq(str, &bid_entry->str)) { \ +- bid_entry->id = id; \ +- mutex_unlock(&hmdfs_##_attr_##_hash_mutex); \ +- goto out; \ +- } \ +- } \ +- mutex_unlock(&hmdfs_##_attr_##_hash_mutex); \ +- \ +- bid_entry = alloc_bid_entry(str->name, id); \ +- if (IS_ERR(bid_entry)) { \ +- err = PTR_ERR(bid_entry); \ +- goto out; \ +- } \ +- \ +- hash_add_rcu(hmdfs_##_attr_##_hash_table, &bid_entry->node, \ +- bid_entry->str.hash); \ +-out: \ +- return err; \ +-} \ +- \ +-static void remove_##_attr_##_hash_entry(struct qstr *str) \ +-{ \ +- struct hmdfs_bid_entry *bid_entry; \ +- struct hlist_node *hash_node; \ +- \ +- hmdfs_info("remove name = %s", str->name); \ +- \ +- mutex_lock(&hmdfs_##_attr_##_hash_mutex); \ +- hash_for_each_possible_safe(hmdfs_##_attr_##_hash_table, \ +- bid_entry, hash_node, node, str->hash) { \ +- if (qstr_case_eq(str, &bid_entry->str)) { \ +- hash_del_rcu(&bid_entry->node); \ +- free_bid_entry(bid_entry); \ +- break; \ +- } \ +- } \ +- mutex_unlock(&hmdfs_##_attr_##_hash_mutex); \ +-} \ +- \ +-static void clear_##_attr_##_hash_entry(void) \ +-{ \ +- int index; \ +- struct hmdfs_bid_entry *bid_entry; \ +- struct hlist_node *hash_node; \ +- \ +- hmdfs_info("clear bid entry"); \ +- \ +- mutex_lock(&hmdfs_##_attr_##_hash_mutex); \ +- hash_for_each_safe(hmdfs_##_attr_##_hash_table, index, \ +- hash_node, bid_entry, node) { \ +- hash_del_rcu(&bid_entry->node); \ +- kfree(bid_entry->str.name); \ +- kmem_cache_free(hmdfs_bid_entry_cachep, bid_entry); \ +- } \ +- mutex_unlock(&hmdfs_##_attr_##_hash_mutex); \ +-} \ +- \ +-static int hmdfs_##_attr_##_get(const char *bname) \ +-{ \ +- struct qstr str; \ +- \ +- str = make_qstr(bname); \ +- return query_##_attr_##_hash_entry(&str); \ +-} \ +- \ +-static ssize_t hmdfs_##_attr_##_show(struct config_item *item, \ +- char *page) \ +-{ \ +- int id; \ +- struct hmdfs_config_bitem *bitem; \ +- \ +- hmdfs_info("show bundle id"); \ +- \ +- bitem = container_of(item, struct hmdfs_config_bitem, item); \ +- id = query_##_attr_##_hash_entry(&bitem->str); \ +- \ +- return scnprintf(page, PAGE_SIZE, "%u\n", id); \ +-} \ +- \ +-static ssize_t hmdfs_##_attr_##_store(struct config_item *item, \ +- const char *page, size_t count) \ +-{ \ +- int id; \ +- int err; \ +- size_t size; \ +- struct hmdfs_config_bitem *bitem; \ +- \ +- hmdfs_info("store bundle id"); \ +- \ +- bitem = container_of(item, struct hmdfs_config_bitem, item); \ +- \ +- if (kstrtouint(page, 10, &id)) { \ +- size = -EINVAL; \ +- goto out; \ +- } \ +- \ +- err = insert_##_attr_##_hash_entry(&bitem->str, id); \ +- if (err) { \ +- size = err; \ +- goto out; \ +- } \ +- \ +- size = count; \ +-out: \ +- return size; \ +-} \ +- \ +-static struct configfs_attribute hmdfs_##_attr_##_attr = { \ +- .ca_name = __stringify(_attr_), \ +- .ca_mode = S_IRUGO | S_IWUGO, \ +- .ca_owner = THIS_MODULE, \ +- .show = hmdfs_##_attr_##_show, \ +- .store = hmdfs_##_attr_##_store, \ +-}; +- +-HMDFS_BUNDLE_ATTRIBUTE(appid) +- +-static struct configfs_attribute *hmdfs_battrs[] = { +- &hmdfs_appid_attr, +- NULL, +-}; +- +-static void hmdfs_config_bitem_release(struct config_item *item) +-{ +- struct hmdfs_config_bitem *bitem; +- +- hmdfs_info("release bundle item"); +- +- bitem = container_of(item, struct hmdfs_config_bitem, item); +- remove_appid_hash_entry(&bitem->str); +- remove_appid_hash_entry(&bitem->str); +- free_bitem(bitem); +-} +- +-static struct configfs_item_operations hmdfs_config_bitem_ops = { +- .release = hmdfs_config_bitem_release, +-}; +- +-static struct config_item_type hmdfs_config_bitem_type = { +- .ct_item_ops = &hmdfs_config_bitem_ops, +- .ct_attrs = hmdfs_battrs, +- .ct_owner = THIS_MODULE, +-}; +- +-static struct config_item *hmdfs_make_bitem(struct config_group *group, +- const char *name) +-{ +- struct config_item *item; +- struct hmdfs_config_bitem *bitem; +- +- hmdfs_info("make bundle item = %s", name); +- +- bitem = alloc_bitem(name); +- if (IS_ERR(bitem)) { +- item = ERR_PTR(-ENOMEM); +- goto out; +- } +- +- config_item_init_type_name(&bitem->item, name, +- &hmdfs_config_bitem_type); +- item = &bitem->item; +-out: +- return item; +-} +- +-static struct configfs_group_operations hmdfs_group_ops = { +- .make_item = hmdfs_make_bitem, +-}; +- +-static struct config_item_type hmdfs_group_type = { +- .ct_group_ops = &hmdfs_group_ops, +- .ct_owner = THIS_MODULE, +-}; +- +-static struct configfs_subsystem hmdfs_subsystem = { +- .su_group = { +- .cg_item = { +- .ci_namebuf = "hmdfs", +- .ci_type = &hmdfs_group_type, +- }, +- }, +-}; +- +-int get_bid(const char *bname) +-{ +- return hmdfs_appid_get(bname); +-} +- +-int __init hmdfs_init_configfs(void) +-{ +- int err; +- struct configfs_subsystem *subsys; +- +- hmdfs_info("init configfs"); +- +- hmdfs_bid_entry_cachep = kmem_cache_create("hmdfs_bid_entry_cachep", +- sizeof(struct hmdfs_bid_entry), 0, 0, NULL); +- if (!hmdfs_bid_entry_cachep) { +- hmdfs_err("failed to create bid entry cachep"); +- err = -ENOMEM; +- goto out; +- } +- +- subsys = &hmdfs_subsystem; +- config_group_init(&subsys->su_group); +- mutex_init(&subsys->su_mutex); +- +- err = configfs_register_subsystem(subsys); +- if (err) +- hmdfs_err("failed to register subsystem"); +- +-out: +- return err; +-} +- +-void hmdfs_exit_configfs(void) +-{ +- hmdfs_info("hmdfs exit configfs"); +- +- configfs_unregister_subsystem(&hmdfs_subsystem); +- clear_appid_hash_entry(); +- +- kmem_cache_destroy(hmdfs_bid_entry_cachep); +-} +\ No newline at end of file +diff --git a/fs/hmdfs/client_writeback.c b/fs/hmdfs/client_writeback.c +deleted file mode 100644 +index 8f39025b2..000000000 +--- a/fs/hmdfs/client_writeback.c ++++ /dev/null +@@ -1,543 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/client_writeback.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "hmdfs.h" +-#include "hmdfs_trace.h" +- +-/* 200ms */ +-#define HMDFS_MAX_PAUSE max((HZ / 5), 1) +-#define HMDFS_BANDWIDTH_INTERVAL max((HZ / 5), 1) +-/* Dirty type */ +-#define HMDFS_DIRTY_FS 0 +-#define HMDFS_DIRTY_FILE 1 +-/* Exceed flags */ +-#define HMDFS_FS_EXCEED (1 << HMDFS_DIRTY_FS) +-#define HMDFS_FILE_EXCEED (1 << HMDFS_DIRTY_FILE) +-/* Ratelimit calculate shift */ +-#define HMDFS_LIMIT_SHIFT 10 +- +-void hmdfs_writeback_inodes_sb_handler(struct work_struct *work) +-{ +- struct hmdfs_writeback *hwb = container_of( +- work, struct hmdfs_writeback, dirty_sb_writeback_work.work); +- +- try_to_writeback_inodes_sb(hwb->sbi->sb, WB_REASON_FS_FREE_SPACE); +-} +- +-void hmdfs_writeback_inode_handler(struct work_struct *work) +-{ +- struct hmdfs_inode_info *info = NULL; +- struct inode *inode = NULL; +- struct hmdfs_writeback *hwb = container_of( +- work, struct hmdfs_writeback, dirty_inode_writeback_work.work); +- +- spin_lock(&hwb->inode_list_lock); +- while (likely(!list_empty(&hwb->inode_list_head))) { +- info = list_first_entry(&hwb->inode_list_head, +- struct hmdfs_inode_info, wb_list); +- list_del_init(&info->wb_list); +- spin_unlock(&hwb->inode_list_lock); +- +- inode = &info->vfs_inode; +- write_inode_now(inode, 0); +- iput(inode); +- spin_lock(&hwb->inode_list_lock); +- } +- spin_unlock(&hwb->inode_list_lock); +-} +- +-static void hmdfs_writeback_inodes_sb_delayed(struct super_block *sb, +- unsigned int delay) +-{ +- struct hmdfs_sb_info *sbi = sb->s_fs_info; +- unsigned long timeout; +- +- timeout = msecs_to_jiffies(delay); +- if (!timeout || !work_busy(&sbi->h_wb->dirty_sb_writeback_work.work)) +- mod_delayed_work(sbi->h_wb->dirty_sb_writeback_wq, +- &sbi->h_wb->dirty_sb_writeback_work, timeout); +-} +- +-static inline void hmdfs_writeback_inodes_sb(struct super_block *sb) +-{ +- hmdfs_writeback_inodes_sb_delayed(sb, 0); +-} +- +-static void hmdfs_writeback_inode(struct super_block *sb, struct inode *inode) +-{ +- struct hmdfs_sb_info *sbi = sb->s_fs_info; +- struct hmdfs_writeback *hwb = sbi->h_wb; +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- +- spin_lock(&hwb->inode_list_lock); +- if (list_empty(&info->wb_list)) { +- ihold(inode); +- list_add_tail(&info->wb_list, &hwb->inode_list_head); +- queue_delayed_work(hwb->dirty_inode_writeback_wq, +- &hwb->dirty_inode_writeback_work, 0); +- } +- spin_unlock(&hwb->inode_list_lock); +-} +- +-static unsigned long hmdfs_idirty_pages(struct inode *inode, int tag) +-{ +-#if KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE +- struct folio_batch fbatch; +-#else +- struct pagevec pvec; +-#endif +- unsigned long nr_dirty_pages = 0; +- pgoff_t index = 0; +- +-#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE +-#if KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE +- folio_batch_init(&fbatch); +-#else +- pagevec_init(&pvec); +-#endif +-#else +- pagevec_init(&pvec, 0); +-#endif +- +-#if KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE +- while (filemap_get_folios_tag(inode->i_mapping, &index, +- (pgoff_t)-1, tag, &fbatch)) { +- for (int i = 0; i < fbatch.nr; i++) { +- struct folio *folio = fbatch.folios[i]; +- if (folio_test_dirty(folio) || folio_test_writeback(folio)) { +- nr_dirty_pages++; +- } +- } +- folio_batch_release(&fbatch); +- cond_resched(); +- } +-#else +- while (pagevec_lookup_tag(&pvec, inode->i_mapping, &index, tag)) { +- nr_dirty_pages += pagevec_count(&pvec); +- pagevec_release(&pvec); +- cond_resched(); +- } +-#endif +- +- return nr_dirty_pages; +-} +- +-static inline unsigned long hmdfs_ratio_thresh(unsigned long ratio, +- unsigned long thresh) +-{ +- unsigned long ret = (ratio * thresh) >> HMDFS_LIMIT_SHIFT; +- +- return (ret == 0) ? 1 : ret; +-} +- +-static inline unsigned long hmdfs_thresh_ratio(unsigned long base, +- unsigned long thresh) +-{ +- unsigned long ratio = (base << HMDFS_LIMIT_SHIFT) / thresh; +- +- return (ratio == 0) ? 1 : ratio; +-} +- +-void hmdfs_calculate_dirty_thresh(struct hmdfs_writeback *hwb) +-{ +- hwb->dirty_fs_thresh = DIV_ROUND_UP(hwb->dirty_fs_bytes, PAGE_SIZE); +- hwb->dirty_file_thresh = DIV_ROUND_UP(hwb->dirty_file_bytes, PAGE_SIZE); +- hwb->dirty_fs_bg_thresh = +- DIV_ROUND_UP(hwb->dirty_fs_bg_bytes, PAGE_SIZE); +- hwb->dirty_file_bg_thresh = +- DIV_ROUND_UP(hwb->dirty_file_bg_bytes, PAGE_SIZE); +- +- hwb->fs_bg_ratio = hmdfs_thresh_ratio(hwb->dirty_fs_bg_thresh, +- hwb->dirty_fs_thresh); +- hwb->file_bg_ratio = hmdfs_thresh_ratio(hwb->dirty_file_bg_thresh, +- hwb->dirty_file_thresh); +- hwb->fs_file_ratio = hmdfs_thresh_ratio(hwb->dirty_file_thresh, +- hwb->dirty_fs_thresh); +-} +- +-static void hmdfs_init_dirty_limit(struct hmdfs_dirty_throttle_control *hdtc) +-{ +- struct hmdfs_writeback *hwb = hdtc->hwb; +- +- hdtc->fs_thresh = hdtc->hwb->dirty_fs_thresh; +- hdtc->file_thresh = hdtc->hwb->dirty_file_thresh; +- hdtc->fs_bg_thresh = hdtc->hwb->dirty_fs_bg_thresh; +- hdtc->file_bg_thresh = hdtc->hwb->dirty_file_bg_thresh; +- +- if (!hwb->dirty_auto_threshold) +- return; +- +- /* +- * Init thresh according the previous bandwidth adjusted thresh, +- * thresh should be no more than setting thresh. +- */ +- if (hwb->bw_fs_thresh < hdtc->fs_thresh) { +- hdtc->fs_thresh = hwb->bw_fs_thresh; +- hdtc->fs_bg_thresh = hmdfs_ratio_thresh(hwb->fs_bg_ratio, +- hdtc->fs_thresh); +- } +- if (hwb->bw_file_thresh < hdtc->file_thresh) { +- hdtc->file_thresh = hwb->bw_file_thresh; +- hdtc->file_bg_thresh = hmdfs_ratio_thresh(hwb->file_bg_ratio, +- hdtc->file_thresh); +- } +- /* +- * The thresh should be updated in the first time of dirty pages +- * exceed the freerun ceiling. +- */ +- hdtc->thresh_time_stamp = jiffies - HMDFS_BANDWIDTH_INTERVAL - 1; +-} +- +-static void hmdfs_update_dirty_limit(struct hmdfs_dirty_throttle_control *hdtc) +-{ +- struct hmdfs_writeback *hwb = hdtc->hwb; +- struct bdi_writeback *wb = hwb->wb; +- unsigned int time_limit = hwb->writeback_timelimit; +- unsigned long bw = wb->avg_write_bandwidth; +- unsigned long thresh; +- +- if (!hwb->dirty_auto_threshold) +- return; +- +- spin_lock(&hwb->write_bandwidth_lock); +- if (bw > hwb->max_write_bandwidth) +- hwb->max_write_bandwidth = bw; +- +- if (bw < hwb->min_write_bandwidth) +- hwb->min_write_bandwidth = bw; +- hwb->avg_write_bandwidth = bw; +- spin_unlock(&hwb->write_bandwidth_lock); +- +- /* +- * If the bandwidth is lower than the lower limit, it may propably +- * offline, there is meaningless to set such a lower thresh. +- */ +- bw = max(bw, hwb->bw_thresh_lowerlimit); +- thresh = bw * time_limit / roundup_pow_of_two(HZ); +- if (thresh >= hwb->dirty_fs_thresh) { +- hdtc->fs_thresh = hwb->dirty_fs_thresh; +- hdtc->file_thresh = hwb->dirty_file_thresh; +- hdtc->fs_bg_thresh = hwb->dirty_fs_bg_thresh; +- hdtc->file_bg_thresh = hwb->dirty_file_bg_thresh; +- } else { +- /* Adjust thresh according to current bandwidth */ +- hdtc->fs_thresh = thresh; +- hdtc->fs_bg_thresh = hmdfs_ratio_thresh(hwb->fs_bg_ratio, +- hdtc->fs_thresh); +- hdtc->file_thresh = hmdfs_ratio_thresh(hwb->fs_file_ratio, +- hdtc->fs_thresh); +- hdtc->file_bg_thresh = hmdfs_ratio_thresh(hwb->file_bg_ratio, +- hdtc->file_thresh); +- } +- /* Save bandwidth adjusted thresh */ +- hwb->bw_fs_thresh = hdtc->fs_thresh; +- hwb->bw_file_thresh = hdtc->file_thresh; +- /* Update time stamp */ +- hdtc->thresh_time_stamp = jiffies; +-} +- +-void hmdfs_update_ratelimit(struct hmdfs_writeback *hwb) +-{ +- struct hmdfs_dirty_throttle_control hdtc = {.hwb = hwb}; +- +- hmdfs_init_dirty_limit(&hdtc); +- +- /* hdtc.file_bg_thresh should be the lowest thresh */ +- hwb->ratelimit_pages = hdtc.file_bg_thresh / +- (num_online_cpus() * HMDFS_RATELIMIT_PAGES_GAP); +- if (hwb->ratelimit_pages < HMDFS_MIN_RATELIMIT_PAGES) +- hwb->ratelimit_pages = HMDFS_MIN_RATELIMIT_PAGES; +-} +- +-/* This is a copy of wb_max_pause() */ +-static unsigned long hmdfs_wb_pause(struct bdi_writeback *wb, +- unsigned long wb_dirty) +-{ +- unsigned long bw = wb->avg_write_bandwidth; +- unsigned long t; +- +- /* +- * Limit pause time for small memory systems. If sleeping for too long +- * time, a small pool of dirty/writeback pages may go empty and disk go +- * idle. +- * +- * 8 serves as the safety ratio. +- */ +- t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); +- t++; +- +- return min_t(unsigned long, t, HMDFS_MAX_PAUSE); +-} +- +-static unsigned long +-hmdfs_dirty_freerun_ceiling(struct hmdfs_dirty_throttle_control *hdtc, +- unsigned int type) +-{ +- if (type == HMDFS_DIRTY_FS) +- return (hdtc->fs_thresh + hdtc->fs_bg_thresh) / 2; +- else /* HMDFS_DIRTY_FILE_TYPE */ +- return (hdtc->file_thresh + hdtc->file_bg_thresh) / 2; +-} +- +-/* This is a copy of dirty_poll_interval() */ +-static inline unsigned long hmdfs_dirty_intv(unsigned long dirty, +- unsigned long thresh) +-{ +- if (thresh > dirty) +- return 1UL << (ilog2(thresh - dirty) >> 1); +- return 1; +-} +- +-static void hmdfs_balance_dirty_pages(struct address_space *mapping) +-{ +- struct inode *inode = mapping->host; +- struct super_block *sb = inode->i_sb; +- struct hmdfs_sb_info *sbi = sb->s_fs_info; +- struct hmdfs_writeback *hwb = sbi->h_wb; +- struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; +- struct hmdfs_dirty_throttle_control hdtc = {.hwb = hwb}; +- unsigned int dirty_exceeded = 0; +- unsigned long start_time = jiffies; +- unsigned long pause = 0; +- +- /* Add delay work to trigger timeout writeback */ +- if (hwb->dirty_writeback_interval != 0) +- hmdfs_writeback_inodes_sb_delayed( +- sb, hwb->dirty_writeback_interval * 10); +- +- hmdfs_init_dirty_limit(&hdtc); +- +- while (1) { +- unsigned long exceed = 0; +- unsigned long diff; +- +- /* Per-filesystem overbalance writeback */ +- hdtc.fs_nr_dirty = wb_stat_sum(wb, WB_RECLAIMABLE); +- hdtc.fs_nr_reclaimable = +- hdtc.fs_nr_dirty + wb_stat_sum(wb, WB_WRITEBACK); +- if (hdtc.fs_nr_reclaimable < hdtc.file_bg_thresh) { +- diff = hmdfs_dirty_intv(hdtc.fs_nr_reclaimable, +- hdtc.file_thresh); +- goto free_running; +- } +- +- /* Per-file overbalance writeback */ +- hdtc.file_nr_dirty = +- hmdfs_idirty_pages(inode, PAGECACHE_TAG_DIRTY); +- hdtc.file_nr_reclaimable = +- hmdfs_idirty_pages(inode, PAGECACHE_TAG_WRITEBACK) + +- hdtc.file_nr_dirty; +- if ((hdtc.fs_nr_reclaimable < +- hmdfs_dirty_freerun_ceiling(&hdtc, HMDFS_DIRTY_FS)) && +- (hdtc.file_nr_reclaimable < +- hmdfs_dirty_freerun_ceiling(&hdtc, HMDFS_DIRTY_FILE))) { +- unsigned long fs_intv, file_intv; +- +- fs_intv = hmdfs_dirty_intv(hdtc.fs_nr_reclaimable, +- hdtc.fs_thresh); +- file_intv = hmdfs_dirty_intv(hdtc.file_nr_reclaimable, +- hdtc.file_thresh); +- diff = min(fs_intv, file_intv); +-free_running: +- current->nr_dirtied_pause = diff; +- current->nr_dirtied = 0; +- break; +- } +- +- if (hdtc.fs_nr_reclaimable >= +- hmdfs_dirty_freerun_ceiling(&hdtc, HMDFS_DIRTY_FS)) { +- if (unlikely(!writeback_in_progress(wb))) +- hmdfs_writeback_inodes_sb(sb); +- } else { +- hmdfs_writeback_inode(sb, inode); +- } +- +- /* +- * If dirty_auto_threshold is enabled, recalculate writeback +- * thresh according to current bandwidth. Update bandwidth +- * could be better if possible, but wb_update_bandwidth() is +- * not exported, so we cannot update bandwidth here, so the +- * bandwidth' update will be delayed if writing a lot to a +- * single file. +- */ +- if (hwb->dirty_auto_threshold && +- time_is_before_jiffies(hdtc.thresh_time_stamp + +- HMDFS_BANDWIDTH_INTERVAL)) +- hmdfs_update_dirty_limit(&hdtc); +- +- if (unlikely(hdtc.fs_nr_reclaimable >= hdtc.fs_thresh)) +- exceed |= HMDFS_FS_EXCEED; +- if (unlikely(hdtc.file_nr_reclaimable >= hdtc.file_thresh)) +- exceed |= HMDFS_FILE_EXCEED; +- +- if (!exceed) { +- trace_hmdfs_balance_dirty_pages(sbi, wb, &hdtc, +- 0UL, start_time); +- current->nr_dirtied = 0; +- break; +- } +- /* +- * Per-file or per-fs reclaimable pages exceed throttle limit, +- * sleep pause time and check again. +- */ +- dirty_exceeded |= exceed; +- if (dirty_exceeded && !hwb->dirty_exceeded) +- hwb->dirty_exceeded = true; +- +- /* Pause */ +- pause = hmdfs_wb_pause(wb, hdtc.fs_nr_reclaimable); +- +- trace_hmdfs_balance_dirty_pages(sbi, wb, &hdtc, pause, +- start_time); +- +- __set_current_state(TASK_KILLABLE); +- io_schedule_timeout(pause); +- +- if (fatal_signal_pending(current)) +- break; +- } +- +- if (!dirty_exceeded && hwb->dirty_exceeded) +- hwb->dirty_exceeded = false; +- +- if (hdtc.fs_nr_reclaimable >= hdtc.fs_bg_thresh) { +- if (unlikely(!writeback_in_progress(wb))) +- hmdfs_writeback_inodes_sb(sb); +- } else if (hdtc.file_nr_reclaimable >= hdtc.file_bg_thresh) { +- hmdfs_writeback_inode(sb, inode); +- } +-} +- +-void hmdfs_balance_dirty_pages_ratelimited(struct address_space *mapping) +-{ +- struct hmdfs_sb_info *sbi = mapping->host->i_sb->s_fs_info; +- struct hmdfs_writeback *hwb = sbi->h_wb; +- int *bdp_ratelimits = NULL; +- int ratelimit; +- +- if (!hwb->dirty_writeback_control) +- return; +- +- /* Add delay work to trigger timeout writeback */ +- if (hwb->dirty_writeback_interval != 0) +- hmdfs_writeback_inodes_sb_delayed( +- mapping->host->i_sb, +- hwb->dirty_writeback_interval * 10); +- +- ratelimit = current->nr_dirtied_pause; +- if (hwb->dirty_exceeded) +- ratelimit = min(ratelimit, HMDFS_DIRTY_EXCEED_RATELIMIT); +- +- /* +- * This prevents one CPU to accumulate too many dirtied pages +- * without calling into hmdfs_balance_dirty_pages(), which can +- * happen when there are 1000+ tasks, all of them start dirtying +- * pages at exactly the same time, hence all honoured too large +- * initial task->nr_dirtied_pause. +- */ +- preempt_disable(); +- bdp_ratelimits = this_cpu_ptr(hwb->bdp_ratelimits); +- +- trace_hmdfs_balance_dirty_pages_ratelimited(sbi, hwb, *bdp_ratelimits); +- +- if (unlikely(current->nr_dirtied >= ratelimit)) { +- *bdp_ratelimits = 0; +- } else if (unlikely(*bdp_ratelimits >= hwb->ratelimit_pages)) { +- *bdp_ratelimits = 0; +- ratelimit = 0; +- } +- preempt_enable(); +- +- if (unlikely(current->nr_dirtied >= ratelimit)) +- hmdfs_balance_dirty_pages(mapping); +-} +- +-void hmdfs_destroy_writeback(struct hmdfs_sb_info *sbi) +-{ +- if (!sbi->h_wb) +- return; +- +- flush_delayed_work(&sbi->h_wb->dirty_sb_writeback_work); +- flush_delayed_work(&sbi->h_wb->dirty_inode_writeback_work); +- destroy_workqueue(sbi->h_wb->dirty_sb_writeback_wq); +- destroy_workqueue(sbi->h_wb->dirty_inode_writeback_wq); +- free_percpu(sbi->h_wb->bdp_ratelimits); +- kfree(sbi->h_wb); +- sbi->h_wb = NULL; +-} +- +-int hmdfs_init_writeback(struct hmdfs_sb_info *sbi) +-{ +- struct hmdfs_writeback *hwb; +- char name[HMDFS_WQ_NAME_LEN]; +- int ret = -ENOMEM; +- +- hwb = kzalloc(sizeof(struct hmdfs_writeback), GFP_KERNEL); +- if (!hwb) +- return ret; +- +- hwb->sbi = sbi; +- hwb->wb = &sbi->sb->s_bdi->wb; +- hwb->dirty_writeback_control = true; +- hwb->dirty_writeback_interval = HM_DEFAULT_WRITEBACK_INTERVAL; +- hwb->dirty_file_bg_bytes = HMDFS_FILE_BG_WB_BYTES; +- hwb->dirty_fs_bg_bytes = HMDFS_FS_BG_WB_BYTES; +- hwb->dirty_file_bytes = HMDFS_FILE_WB_BYTES; +- hwb->dirty_fs_bytes = HMDFS_FS_WB_BYTES; +- hmdfs_calculate_dirty_thresh(hwb); +- hwb->bw_file_thresh = hwb->dirty_file_thresh; +- hwb->bw_fs_thresh = hwb->dirty_fs_thresh; +- spin_lock_init(&hwb->inode_list_lock); +- INIT_LIST_HEAD(&hwb->inode_list_head); +- hwb->dirty_exceeded = false; +- hwb->ratelimit_pages = HMDFS_DEF_RATELIMIT_PAGES; +- hwb->dirty_auto_threshold = true; +- hwb->writeback_timelimit = HMDFS_DEF_WB_TIMELIMIT; +- hwb->bw_thresh_lowerlimit = HMDFS_BW_THRESH_DEF_LIMIT; +- spin_lock_init(&hwb->write_bandwidth_lock); +- hwb->avg_write_bandwidth = 0; +- hwb->max_write_bandwidth = 0; +- hwb->min_write_bandwidth = ULONG_MAX; +- hwb->bdp_ratelimits = alloc_percpu(int); +- if (!hwb->bdp_ratelimits) +- goto free_hwb; +- +- snprintf(name, sizeof(name), "dfs_ino_wb%u", sbi->seq); +- hwb->dirty_inode_writeback_wq = create_singlethread_workqueue(name); +- if (!hwb->dirty_inode_writeback_wq) { +- hmdfs_err("Failed to create inode writeback workqueue!"); +- goto free_bdp; +- } +- snprintf(name, sizeof(name), "dfs_sb_wb%u", sbi->seq); +- hwb->dirty_sb_writeback_wq = create_singlethread_workqueue(name); +- if (!hwb->dirty_sb_writeback_wq) { +- hmdfs_err("Failed to create filesystem writeback workqueue!"); +- goto free_i_wq; +- } +- INIT_DELAYED_WORK(&hwb->dirty_sb_writeback_work, +- hmdfs_writeback_inodes_sb_handler); +- INIT_DELAYED_WORK(&hwb->dirty_inode_writeback_work, +- hmdfs_writeback_inode_handler); +- sbi->h_wb = hwb; +- return 0; +-free_i_wq: +- destroy_workqueue(hwb->dirty_inode_writeback_wq); +-free_bdp: +- free_percpu(hwb->bdp_ratelimits); +-free_hwb: +- kfree(hwb); +- return ret; +-} +diff --git a/fs/hmdfs/client_writeback.h b/fs/hmdfs/client_writeback.h +deleted file mode 100644 +index 689a5e733..000000000 +--- a/fs/hmdfs/client_writeback.h ++++ /dev/null +@@ -1,136 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/client_writeback.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef CLIENT_WRITEBACK_H +-#define CLIENT_WRITEBACK_H +- +-#include "hmdfs.h" +- +-/* +- * HM_DEFAULT_WRITEBACK_INTERVAL - centiseconds +- * HMDFS_FILE_BG_WB_BYTES - background per-file threshold 10M +- * HMDFS_FS_BG_WB_BYTES - background per-fs threshold 50M +- * HMDFS_FILE_WB_BYTES - per-file throttle threshold +- * HMDFS_FS_WB_BYTES - per-fs throttle threshold +- */ +-#define HM_DEFAULT_WRITEBACK_INTERVAL 500 +-#define HMDFS_FILE_BG_WB_BYTES (10 * 1024 * 1024) +-#define HMDFS_FS_BG_WB_BYTES (50 * 1024 * 1024) +-#define HMDFS_FILE_WB_BYTES (HMDFS_FILE_BG_WB_BYTES << 1) +-#define HMDFS_FS_WB_BYTES (HMDFS_FS_BG_WB_BYTES << 1) +- +-/* writeback time limit (default 5s) */ +-#define HMDFS_DEF_WB_TIMELIMIT (5 * HZ) +-#define HMDFS_MAX_WB_TIMELIMIT (30 * HZ) +- +-/* bandwidth adjusted lower limit (default 1MB/s) */ +-#define HMDFS_BW_THRESH_MIN_LIMIT (1 << (20 - PAGE_SHIFT)) +-#define HMDFS_BW_THRESH_MAX_LIMIT (100 << (20 - PAGE_SHIFT)) +-#define HMDFS_BW_THRESH_DEF_LIMIT HMDFS_BW_THRESH_MIN_LIMIT +- +-#define HMDFS_DIRTY_EXCEED_RATELIMIT (32 >> (PAGE_SHIFT - 10)) +-#define HMDFS_RATELIMIT_PAGES_GAP 16 +-#define HMDFS_DEF_RATELIMIT_PAGES 32 +-#define HMDFS_MIN_RATELIMIT_PAGES 1 +- +-struct hmdfs_dirty_throttle_control { +- struct hmdfs_writeback *hwb; +- /* last time threshes are updated */ +- unsigned long thresh_time_stamp; +- +- unsigned long file_bg_thresh; +- unsigned long fs_bg_thresh; +- unsigned long file_thresh; +- unsigned long fs_thresh; +- +- unsigned long file_nr_dirty; +- unsigned long fs_nr_dirty; +- unsigned long file_nr_reclaimable; +- unsigned long fs_nr_reclaimable; +-}; +- +-struct hmdfs_writeback { +- struct hmdfs_sb_info *sbi; +- struct bdi_writeback *wb; +- /* enable hmdfs dirty writeback control */ +- bool dirty_writeback_control; +- +- /* writeback per-file inode list */ +- struct list_head inode_list_head; +- spinlock_t inode_list_lock; +- +- /* centiseconds */ +- unsigned int dirty_writeback_interval; +- /* per-file background threshold */ +- unsigned long dirty_file_bg_bytes; +- unsigned long dirty_file_bg_thresh; +- /* per-fs background threshold */ +- unsigned long dirty_fs_bg_bytes; +- unsigned long dirty_fs_bg_thresh; +- /* per-file throttle threshold */ +- unsigned long dirty_file_bytes; +- unsigned long dirty_file_thresh; +- /* per-fs throttle threshold */ +- unsigned long dirty_fs_bytes; +- unsigned long dirty_fs_thresh; +- /* ratio between background thresh and throttle thresh */ +- unsigned long fs_bg_ratio; +- unsigned long file_bg_ratio; +- /* ratio between file and fs throttle thresh */ +- unsigned long fs_file_ratio; +- +- /* +- * Enable auto-thresh. If enabled, the background and throttle +- * thresh are nolonger a fixed value storeed in dirty_*_bytes, +- * they are determined by the bandwidth of the network and the +- * writeback timelimit. +- */ +- bool dirty_auto_threshold; +- unsigned int writeback_timelimit; +- /* bandwitdh adjusted filesystem throttle thresh */ +- unsigned long bw_fs_thresh; +- /* bandwidth adjusted per-file throttle thresh */ +- unsigned long bw_file_thresh; +- /* bandwidth adjusted thresh lower limit */ +- unsigned long bw_thresh_lowerlimit; +- +- /* reclaimable pages exceed throttle thresh */ +- bool dirty_exceeded; +- /* percpu dirty pages ratelimit */ +- long ratelimit_pages; +- /* count percpu dirty pages */ +- int __percpu *bdp_ratelimits; +- +- /* per-fs writeback work */ +- struct workqueue_struct *dirty_sb_writeback_wq; +- struct delayed_work dirty_sb_writeback_work; +- /* per-file writeback work */ +- struct workqueue_struct *dirty_inode_writeback_wq; +- struct delayed_work dirty_inode_writeback_work; +- +- /* per-fs writeback bandwidth */ +- spinlock_t write_bandwidth_lock; +- unsigned long max_write_bandwidth; +- unsigned long min_write_bandwidth; +- unsigned long avg_write_bandwidth; +-}; +- +-void hmdfs_writeback_inodes_sb_handler(struct work_struct *work); +- +-void hmdfs_writeback_inode_handler(struct work_struct *work); +- +-void hmdfs_calculate_dirty_thresh(struct hmdfs_writeback *hwb); +- +-void hmdfs_update_ratelimit(struct hmdfs_writeback *hwb); +- +-void hmdfs_balance_dirty_pages_ratelimited(struct address_space *mapping); +- +-void hmdfs_destroy_writeback(struct hmdfs_sb_info *sbi); +- +-int hmdfs_init_writeback(struct hmdfs_sb_info *sbi); +- +-#endif +diff --git a/fs/hmdfs/comm/connection.c b/fs/hmdfs/comm/connection.c +deleted file mode 100644 +index 44a4cb933..000000000 +--- a/fs/hmdfs/comm/connection.c ++++ /dev/null +@@ -1,1279 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/comm/connection.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "connection.h" +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "device_node.h" +-#include "hmdfs.h" +-#include "message_verify.h" +-#include "node_cb.h" +-#include "protocol.h" +-#include "socket_adapter.h" +- +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +-#include "crypto.h" +-#endif +- +-#define HMDFS_WAIT_REQUEST_END_MIN 20 +-#define HMDFS_WAIT_REQUEST_END_MAX 30 +- +-#define HMDFS_WAIT_CONN_RELEASE (3 * HZ) +- +-#define HMDFS_RETRY_WB_WQ_MAX_ACTIVE 16 +- +-static void hs_fill_crypto_data(struct connection *conn_impl, __u8 ops, +- void *data, __u32 len) +-{ +- struct crypto_body *body = NULL; +- +- if (len < sizeof(struct crypto_body)) { +- hmdfs_info("crpto body len %u is err", len); +- return; +- } +- body = (struct crypto_body *)data; +- +- /* this is only test, later need to fill right algorithm. */ +- body->crypto |= HMDFS_HS_CRYPTO_KTLS_AES128; +- body->crypto = cpu_to_le32(body->crypto); +- +- hmdfs_info("fill crypto. ccrtypto=0x%08x", body->crypto); +-} +- +-static int hs_parse_crypto_data(struct connection *conn_impl, __u8 ops, +- void *data, __u32 len) +-{ +- struct crypto_body *hs_crypto = NULL; +- uint32_t crypto; +- +- if (len < sizeof(struct crypto_body)) { +- hmdfs_info("handshake msg len error, len=%u", len); +- return -1; +- } +- hs_crypto = (struct crypto_body *)data; +- crypto = le16_to_cpu(hs_crypto->crypto); +- conn_impl->crypto = crypto; +- hmdfs_info("ops=%u, len=%u, crypto=0x%08x", ops, len, crypto); +- return 0; +-} +- +-static void hs_fill_case_sense_data(struct connection *conn_impl, __u8 ops, +- void *data, __u32 len) +-{ +- struct case_sense_body *body = (struct case_sense_body *)data; +- +- if (len < sizeof(struct case_sense_body)) { +- hmdfs_err("case sensitive len %u is err", len); +- return; +- } +- body->case_sensitive = conn_impl->node->sbi->s_case_sensitive; +-} +- +-static int hs_parse_case_sense_data(struct connection *conn_impl, __u8 ops, +- void *data, __u32 len) +-{ +- struct case_sense_body *body = (struct case_sense_body *)data; +- __u8 sensitive = conn_impl->node->sbi->s_case_sensitive ? 1 : 0; +- +- if (len < sizeof(struct case_sense_body)) { +- hmdfs_info("case sensitive len %u is err", len); +- return -1; +- } +- if (body->case_sensitive != sensitive) { +- hmdfs_err("case sensitive inconsistent, server: %u,client: %u, ops: %u", +- body->case_sensitive, sensitive, ops); +- return -1; +- } +- return 0; +-} +- +-static void hs_fill_feature_data(struct connection *conn_impl, __u8 ops, +- void *data, __u32 len) +-{ +- struct feature_body *body = (struct feature_body *)data; +- +- if (len < sizeof(struct feature_body)) { +- hmdfs_err("feature len %u is err", len); +- return; +- } +- body->features = cpu_to_le64(conn_impl->node->sbi->s_features); +- body->reserved = cpu_to_le64(0); +-} +- +-static int hs_parse_feature_data(struct connection *conn_impl, __u8 ops, +- void *data, __u32 len) +-{ +- struct feature_body *body = (struct feature_body *)data; +- +- if (len < sizeof(struct feature_body)) { +- hmdfs_err("feature len %u is err", len); +- return -1; +- } +- +- conn_impl->node->features = le64_to_cpu(body->features); +- return 0; +-} +- +-/* should ensure len is small than 0xffff. */ +-static const struct conn_hs_extend_reg s_hs_extend_reg[HS_EXTEND_CODE_COUNT] = { +- [HS_EXTEND_CODE_CRYPTO] = { +- .len = sizeof(struct crypto_body), +- .resv = 0, +- .filler = hs_fill_crypto_data, +- .parser = hs_parse_crypto_data +- }, +- [HS_EXTEND_CODE_CASE_SENSE] = { +- .len = sizeof(struct case_sense_body), +- .resv = 0, +- .filler = hs_fill_case_sense_data, +- .parser = hs_parse_case_sense_data, +- }, +- [HS_EXTEND_CODE_FEATURE_SUPPORT] = { +- .len = sizeof(struct feature_body), +- .resv = 0, +- .filler = hs_fill_feature_data, +- .parser = hs_parse_feature_data, +- }, +- [HS_EXTEND_CODE_FEATURE_SUPPORT] = { +- .len = sizeof(struct feature_body), +- .resv = 0, +- .filler = hs_fill_feature_data, +- .parser = hs_parse_feature_data, +- }, +-}; +- +-static __u32 hs_get_extend_data_len(void) +-{ +- __u32 len; +- int i; +- +- len = sizeof(struct conn_hs_extend_head); +- +- for (i = 0; i < HS_EXTEND_CODE_COUNT; i++) { +- len += sizeof(struct extend_field_head); +- len += s_hs_extend_reg[i].len; +- } +- +- hmdfs_info("extend data total len is %u", len); +- return len; +-} +- +-static void hs_fill_extend_data(struct connection *conn_impl, __u8 ops, +- void *extend_data, __u32 len) +-{ +- struct conn_hs_extend_head *extend_head = NULL; +- struct extend_field_head *field = NULL; +- uint8_t *body = NULL; +- __u32 offset; +- __u16 i; +- +- if (sizeof(struct conn_hs_extend_head) > len) { +- hmdfs_info("len error. len=%u", len); +- return; +- } +- extend_head = (struct conn_hs_extend_head *)extend_data; +- extend_head->field_cn = 0; +- offset = sizeof(struct conn_hs_extend_head); +- +- for (i = 0; i < HS_EXTEND_CODE_COUNT; i++) { +- if (sizeof(struct extend_field_head) > (len - offset)) +- break; +- field = (struct extend_field_head *)((uint8_t *)extend_data + +- offset); +- offset += sizeof(struct extend_field_head); +- +- if (s_hs_extend_reg[i].len > (len - offset)) +- break; +- body = (uint8_t *)extend_data + offset; +- offset += s_hs_extend_reg[i].len; +- +- field->code = cpu_to_le16(i); +- field->len = cpu_to_le16(s_hs_extend_reg[i].len); +- +- if (s_hs_extend_reg[i].filler) +- s_hs_extend_reg[i].filler(conn_impl, ops, +- body, s_hs_extend_reg[i].len); +- +- extend_head->field_cn += 1; +- } +- +- extend_head->field_cn = cpu_to_le32(extend_head->field_cn); +-} +- +-static int hs_parse_extend_data(struct connection *conn_impl, __u8 ops, +- void *extend_data, __u32 extend_len) +-{ +- struct conn_hs_extend_head *extend_head = NULL; +- struct extend_field_head *field = NULL; +- uint8_t *body = NULL; +- __u32 offset; +- __u32 field_cnt; +- __u16 code; +- __u16 len; +- int i; +- int ret; +- +- if (sizeof(struct conn_hs_extend_head) > extend_len) { +- hmdfs_err("ops=%u,extend_len=%u", ops, extend_len); +- return -1; +- } +- extend_head = (struct conn_hs_extend_head *)extend_data; +- field_cnt = le32_to_cpu(extend_head->field_cn); +- hmdfs_info("extend_len=%u,field_cnt=%u", extend_len, field_cnt); +- +- offset = sizeof(struct conn_hs_extend_head); +- +- for (i = 0; i < field_cnt; i++) { +- if (sizeof(struct extend_field_head) > (extend_len - offset)) { +- hmdfs_err("cnt err, op=%u, extend_len=%u, cnt=%u, i=%u", +- ops, extend_len, field_cnt, i); +- return -1; +- } +- field = (struct extend_field_head *)((uint8_t *)extend_data + +- offset); +- offset += sizeof(struct extend_field_head); +- code = le16_to_cpu(field->code); +- len = le16_to_cpu(field->len); +- if (len > (extend_len - offset)) { +- hmdfs_err("len err, op=%u, extend_len=%u, cnt=%u, i=%u", +- ops, extend_len, field_cnt, i); +- hmdfs_err("len err, code=%u, len=%u, offset=%u", code, +- len, offset); +- return -1; +- } +- +- body = (uint8_t *)extend_data + offset; +- offset += len; +- if ((code < HS_EXTEND_CODE_COUNT) && +- (s_hs_extend_reg[code].parser)) { +- ret = s_hs_extend_reg[code].parser(conn_impl, ops, +- body, len); +- if (ret) +- return ret; +- } +- } +- return 0; +-} +- +-static int hs_proc_msg_data(struct connection *conn_impl, __u8 ops, void *data, +- __u32 data_len) +-{ +- struct connection_handshake_req *hs_req = NULL; +- uint8_t *extend_data = NULL; +- __u32 extend_len; +- __u32 req_len; +- int ret; +- +- if (!data) { +- hmdfs_err("err, msg data is null"); +- return -1; +- } +- +- if (data_len < sizeof(struct connection_handshake_req)) { +- hmdfs_err("ack msg data len error. data_len=%u, device_id=%llu", +- data_len, conn_impl->node->device_id); +- return -1; +- } +- +- hs_req = (struct connection_handshake_req *)data; +- req_len = le32_to_cpu(hs_req->len); +- if (req_len > (data_len - sizeof(struct connection_handshake_req))) { +- hmdfs_info( +- "ack msg hs_req len(%u) error. data_len=%u, device_id=%llu", +- req_len, data_len, conn_impl->node->device_id); +- return -1; +- } +- extend_len = +- data_len - sizeof(struct connection_handshake_req) - req_len; +- extend_data = (uint8_t *)data + +- sizeof(struct connection_handshake_req) + req_len; +- ret = hs_parse_extend_data(conn_impl, ops, extend_data, extend_len); +- if (!ret) +- hmdfs_info( +- "hs msg rcv, ops=%u, data_len=%u, device_id=%llu, req_len=%u", +- ops, data_len, conn_impl->node->device_id, hs_req->len); +- return ret; +-} +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +-static int connection_handshake_init_tls(struct connection *conn_impl, __u8 ops) +-{ +- // init ktls config, use key1/key2 as init write-key of each direction +- __u8 key1[HMDFS_KEY_SIZE]; +- __u8 key2[HMDFS_KEY_SIZE]; +- int ret; +- +- if ((ops != CONNECT_MESG_HANDSHAKE_RESPONSE) && +- (ops != CONNECT_MESG_HANDSHAKE_ACK)) { +- hmdfs_err("ops %u is err", ops); +- return -EINVAL; +- } +- +- update_key(conn_impl->master_key, key1, HKDF_TYPE_KEY_INITIATOR); +- update_key(conn_impl->master_key, key2, HKDF_TYPE_KEY_ACCEPTER); +- +- if (ops == CONNECT_MESG_HANDSHAKE_ACK) { +- memcpy(conn_impl->send_key, key1, HMDFS_KEY_SIZE); +- memcpy(conn_impl->recv_key, key2, HMDFS_KEY_SIZE); +- } else { +- memcpy(conn_impl->send_key, key2, HMDFS_KEY_SIZE); +- memcpy(conn_impl->recv_key, key1, HMDFS_KEY_SIZE); +- } +- +- memset(key1, 0, HMDFS_KEY_SIZE); +- memset(key2, 0, HMDFS_KEY_SIZE); +- +- hmdfs_info("hs: ops=%u start set crypto tls", ops); +- ret = tls_crypto_info_init(conn_impl); +- if (ret) +- hmdfs_err("setting tls fail. ops is %u", ops); +- +- return ret; +-} +-#endif +- +-static int do_send_handshake(struct connection *conn_impl, __u8 ops, +- __le16 request_id) +-{ +- int err; +- struct connection_msg_head *hs_head = NULL; +- struct connection_handshake_req *hs_data = NULL; +- uint8_t *hs_extend_data = NULL; +- struct hmdfs_send_data msg; +- __u32 send_len; +- __u32 len; +- __u32 extend_len; +- char buf[HMDFS_CID_SIZE] = { 0 }; +- +- len = scnprintf(buf, HMDFS_CID_SIZE, "%llu", 0ULL); +- send_len = sizeof(struct connection_msg_head) + +- sizeof(struct connection_handshake_req) + len; +- +- if (((ops == CONNECT_MESG_HANDSHAKE_RESPONSE) || +- (ops == CONNECT_MESG_HANDSHAKE_ACK))) { +- extend_len = hs_get_extend_data_len(); +- send_len += extend_len; +- } +- +- hs_head = kzalloc(send_len, GFP_KERNEL); +- if (!hs_head) +- return -ENOMEM; +- +- hs_data = (struct connection_handshake_req +- *)((uint8_t *)hs_head + +- sizeof(struct connection_msg_head)); +- +- hs_data->len = cpu_to_le32(len); +- memcpy(hs_data->dev_id, buf, len); +- +- if (((ops == CONNECT_MESG_HANDSHAKE_RESPONSE) || +- ops == CONNECT_MESG_HANDSHAKE_ACK)) { +- hs_extend_data = (uint8_t *)hs_data + +- sizeof(struct connection_handshake_req) + len; +- hs_fill_extend_data(conn_impl, ops, hs_extend_data, extend_len); +- } +- +- hs_head->magic = HMDFS_MSG_MAGIC; +- hs_head->version = HMDFS_VERSION; +- hs_head->flags |= 0x1; +- hmdfs_info("Send handshake message: ops = %d, fd = %d", ops, +- ((struct tcp_handle *)(conn_impl->connect_handle))->fd); +- hs_head->operations = ops; +- hs_head->request_id = request_id; +- hs_head->datasize = cpu_to_le32(send_len); +- hs_head->source = 0; +- hs_head->msg_id = 0; +- +- msg.head = hs_head; +- msg.head_len = sizeof(struct connection_msg_head); +- msg.data = hs_data; +- msg.len = send_len - msg.head_len; +- msg.sdesc = NULL; +- msg.sdesc_len = 0; +- err = conn_impl->send_message(conn_impl, &msg); +- kfree(hs_head); +- return err; +-} +- +-static int hmdfs_node_waiting_evt_sum(const struct hmdfs_peer *node) +-{ +- int sum = 0; +- int i; +- +- for (i = 0; i < RAW_NODE_EVT_NR; i++) +- sum += node->waiting_evt[i]; +- +- return sum; +-} +- +-static int hmdfs_update_node_waiting_evt(struct hmdfs_peer *node, int evt, +- unsigned int *seq) +-{ +- int last; +- int sum; +- unsigned int next; +- +- sum = hmdfs_node_waiting_evt_sum(node); +- if (sum % RAW_NODE_EVT_NR) +- last = !node->pending_evt; +- else +- last = node->pending_evt; +- +- /* duplicated event */ +- if (evt == last) { +- node->dup_evt[evt]++; +- return 0; +- } +- +- node->waiting_evt[evt]++; +- hmdfs_debug("add node->waiting_evt[%d]=%d", evt, +- node->waiting_evt[evt]); +- +- /* offline wait + online wait + offline wait = offline wait +- * online wait + offline wait + online wait != online wait +- * As the first online related resource (e.g. fd) must be invalidated +- */ +- if (node->waiting_evt[RAW_NODE_EVT_OFF] >= 2 && +- node->waiting_evt[RAW_NODE_EVT_ON] >= 1) { +- node->waiting_evt[RAW_NODE_EVT_OFF] -= 1; +- node->waiting_evt[RAW_NODE_EVT_ON] -= 1; +- node->seq_wr_idx -= 2; +- node->merged_evt += 2; +- } +- +- next = hmdfs_node_inc_evt_seq(node); +- node->seq_tbl[(node->seq_wr_idx++) % RAW_NODE_EVT_MAX_NR] = next; +- *seq = next; +- +- return 1; +-} +- +-static void hmdfs_run_evt_cb_verbosely(struct hmdfs_peer *node, int raw_evt, +- bool sync, unsigned int seq) +-{ +- int evt = (raw_evt == RAW_NODE_EVT_OFF) ? NODE_EVT_OFFLINE : +- NODE_EVT_ONLINE; +- int cur_evt_idx = sync ? 1 : 0; +- +- node->cur_evt[cur_evt_idx] = raw_evt; +- node->cur_evt_seq[cur_evt_idx] = seq; +- hmdfs_node_call_evt_cb(node, evt, sync, seq); +- node->cur_evt[cur_evt_idx] = RAW_NODE_EVT_NR; +-} +- +-static void hmdfs_node_evt_work(struct work_struct *work) +-{ +- struct hmdfs_peer *node = +- container_of(work, struct hmdfs_peer, evt_dwork.work); +- unsigned int seq; +- +- /* +- * N-th sync cb completes before N-th async cb, +- * so use seq_lock as a barrier in read & write path +- * to ensure we can read the required seq. +- */ +- mutex_lock(&node->seq_lock); +- seq = node->seq_tbl[(node->seq_rd_idx++) % RAW_NODE_EVT_MAX_NR]; +- hmdfs_run_evt_cb_verbosely(node, node->pending_evt, false, seq); +- mutex_unlock(&node->seq_lock); +- +- mutex_lock(&node->evt_lock); +- if (hmdfs_node_waiting_evt_sum(node)) { +- node->pending_evt = !node->pending_evt; +- node->pending_evt_seq = +- node->seq_tbl[node->seq_rd_idx % RAW_NODE_EVT_MAX_NR]; +- node->waiting_evt[node->pending_evt]--; +- /* sync cb has been done */ +- schedule_delayed_work(&node->evt_dwork, +- node->sbi->async_cb_delay * HZ); +- } else { +- node->last_evt = node->pending_evt; +- node->pending_evt = RAW_NODE_EVT_NR; +- } +- mutex_unlock(&node->evt_lock); +-} +- +-/* +- * The running orders of cb are: +- * +- * (1) sync callbacks are invoked according to the queue order of raw events: +- * ensured by seq_lock. +- * (2) async callbacks are invoked according to the queue order of raw events: +- * ensured by evt_lock & evt_dwork +- * (3) async callback is invoked after sync callback of the same raw event: +- * ensured by seq_lock. +- * (4) async callback of N-th raw event and sync callback of (N+x)-th raw +- * event can run concurrently. +- */ +-static void hmdfs_queue_raw_node_evt(struct hmdfs_peer *node, int evt) +-{ +- unsigned int seq = 0; +- +- mutex_lock(&node->evt_lock); +- if (node->pending_evt == RAW_NODE_EVT_NR) { +- if (evt == node->last_evt) { +- node->dup_evt[evt]++; +- mutex_unlock(&node->evt_lock); +- return; +- } +- node->pending_evt = evt; +- seq = hmdfs_node_inc_evt_seq(node); +- node->seq_tbl[(node->seq_wr_idx++) % RAW_NODE_EVT_MAX_NR] = seq; +- node->pending_evt_seq = seq; +- mutex_lock(&node->seq_lock); +- mutex_unlock(&node->evt_lock); +- /* call sync cb, then async cb */ +- hmdfs_run_evt_cb_verbosely(node, evt, true, seq); +- mutex_unlock(&node->seq_lock); +- schedule_delayed_work(&node->evt_dwork, +- node->sbi->async_cb_delay * HZ); +- } else if (hmdfs_update_node_waiting_evt(node, evt, &seq) > 0) { +- /* +- * Take seq_lock firstly to ensure N-th sync cb +- * is called before N-th async cb. +- */ +- mutex_lock(&node->seq_lock); +- mutex_unlock(&node->evt_lock); +- hmdfs_run_evt_cb_verbosely(node, evt, true, seq); +- mutex_unlock(&node->seq_lock); +- } else { +- mutex_unlock(&node->evt_lock); +- } +-} +- +-void connection_send_handshake(struct connection *conn_impl, __u8 ops, +- __le16 request_id) +-{ +- struct tcp_handle *tcp = NULL; +- int err = do_send_handshake(conn_impl, ops, request_id); +- +- if (likely(err >= 0)) +- return; +- +- tcp = conn_impl->connect_handle; +- hmdfs_err("Failed to send handshake: err = %d, fd = %d", err, tcp->fd); +- hmdfs_reget_connection(conn_impl); +-} +- +-void connection_handshake_notify(struct hmdfs_peer *node, int notify_type) +-{ +- struct notify_param param; +- +- param.notify = notify_type; +- param.fd = INVALID_SOCKET_FD; +- memcpy(param.remote_cid, node->cid, HMDFS_CID_SIZE); +- notify(node, ¶m); +-} +- +- +-void peer_online(struct hmdfs_peer *peer) +-{ +- // To evaluate if someone else has made the peer online +- u8 prev_stat = xchg(&peer->status, NODE_STAT_ONLINE); +- unsigned long jif_tmp = jiffies; +- +- if (prev_stat == NODE_STAT_ONLINE) +- return; +- WRITE_ONCE(peer->conn_time, jif_tmp); +- WRITE_ONCE(peer->sbi->connections.recent_ol, jif_tmp); +- hmdfs_queue_raw_node_evt(peer, RAW_NODE_EVT_ON); +-} +- +-void connection_to_working(struct hmdfs_peer *node) +-{ +- struct connection *conn_impl = NULL; +- struct tcp_handle *tcp = NULL; +- +- if (!node) +- return; +- mutex_lock(&node->conn_impl_list_lock); +- list_for_each_entry(conn_impl, &node->conn_impl_list, list) { +- if (conn_impl->type == CONNECT_TYPE_TCP && +- conn_impl->status == CONNECT_STAT_WAIT_RESPONSE) { +- tcp = conn_impl->connect_handle; +- hmdfs_info("fd %d to working", tcp->fd); +- conn_impl->status = CONNECT_STAT_WORKING; +- } +- } +- mutex_unlock(&node->conn_impl_list_lock); +- peer_online(node); +-} +- +-void connection_handshake_recv_handler(struct connection *conn_impl, void *buf, +- void *data, __u32 data_len) +-{ +- __u8 ops; +- __u8 status; +- int fd = ((struct tcp_handle *)(conn_impl->connect_handle))->fd; +- struct connection_msg_head *head = (struct connection_msg_head *)buf; +- int ret; +- +- if (head->version != HMDFS_VERSION) +- goto out; +- +- conn_impl->node->version = head->version; +- ops = head->operations; +- status = conn_impl->status; +- switch (ops) { +- case CONNECT_MESG_HANDSHAKE_REQUEST: +- hmdfs_info( +- "Recved handshake request: device_id = %llu, head->len = %d, tcp->fd = %d", +- conn_impl->node->device_id, head->datasize, fd); +- connection_send_handshake(conn_impl, +- CONNECT_MESG_HANDSHAKE_RESPONSE, +- head->msg_id); +- conn_impl->status = CONNECT_STAT_WAIT_ACK; +- conn_impl->node->status = NODE_STAT_SHAKING; +- break; +- case CONNECT_MESG_HANDSHAKE_RESPONSE: +- hmdfs_info( +- "Recved handshake response: device_id = %llu, cmd->status = %hhu, tcp->fd = %d", +- conn_impl->node->device_id, status, fd); +- +- ret = hs_proc_msg_data(conn_impl, ops, data, data_len); +- if (ret) +- goto nego_err; +- connection_send_handshake(conn_impl, +- CONNECT_MESG_HANDSHAKE_ACK, +- head->msg_id); +- hmdfs_info("respon rcv handle,conn_impl->crypto=0x%0x", +- conn_impl->crypto); +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +- ret = connection_handshake_init_tls(conn_impl, ops); +- if (ret) { +- hmdfs_err("init_tls_key fail, ops %u", ops); +- goto out; +- } +-#endif +- +- conn_impl->status = CONNECT_STAT_WORKING; +- peer_online(conn_impl->node); +- break; +- case CONNECT_MESG_HANDSHAKE_ACK: +- ret = hs_proc_msg_data(conn_impl, ops, data, data_len); +- if (ret) +- goto nego_err; +- hmdfs_info("ack rcv handle, conn_impl->crypto=0x%0x", +- conn_impl->crypto); +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +- ret = connection_handshake_init_tls(conn_impl, ops); +- if (ret) { +- hmdfs_err("init_tls_key fail, ops %u", ops); +- goto out; +- } +-#endif +- conn_impl->status = CONNECT_STAT_WORKING; +- peer_online(conn_impl->node); +- break; +- fallthrough; +- default: +- break; +- } +-out: +- kfree(data); +- return; +-nego_err: +- conn_impl->status = CONNECT_STAT_NEGO_FAIL; +- connection_handshake_notify(conn_impl->node, NOTIFY_OFFLINE); +- hmdfs_err("protocol negotiation failed, remote device_id = %llu, tcp->fd = %d", +- conn_impl->node->device_id, fd); +- goto out; +-} +- +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +-static void update_tls_crypto_key(struct connection *conn, +- struct hmdfs_head_cmd *head, void *data, +- __u32 data_len) +-{ +- // rekey message handler +- struct connection_rekey_request *rekey_req = NULL; +- int ret = 0; +- +- if (hmdfs_message_verify(conn->node, head, data) < 0) { +- hmdfs_err("Rekey msg %d has been abandoned", head->msg_id); +- goto out_err; +- } +- +- hmdfs_info("recv REKEY request"); +- set_crypto_info(conn, SET_CRYPTO_RECV); +- // update send key if requested +- rekey_req = data; +- if (le32_to_cpu(rekey_req->update_request) == UPDATE_REQUESTED) { +- ret = tcp_send_rekey_request(conn); +- if (ret == 0) +- set_crypto_info(conn, SET_CRYPTO_SEND); +- } +-out_err: +- kfree(data); +-} +- +-static bool cmd_update_tls_crypto_key(struct connection *conn, +- struct hmdfs_head_cmd *head) +-{ +- struct tcp_handle *tcp = conn->connect_handle; +- +- if (conn->type != CONNECT_TYPE_TCP || !tcp) +- return false; +- return head->operations.command == F_CONNECT_REKEY; +-} +-#endif +- +-void connection_working_recv_handler(struct connection *conn_impl, void *buf, +- void *data, __u32 data_len) +-{ +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +- if (cmd_update_tls_crypto_key(conn_impl, buf)) { +- update_tls_crypto_key(conn_impl, buf, data, data_len); +- return; +- } +-#endif +- hmdfs_recv_mesg_callback(conn_impl->node, buf, data); +-} +- +-static void connection_release(struct kref *ref) +-{ +- struct tcp_handle *tcp = NULL; +- struct connection *conn = container_of(ref, struct connection, ref_cnt); +- +- hmdfs_info("connection release"); +- memset(conn->master_key, 0, HMDFS_KEY_SIZE); +- memset(conn->send_key, 0, HMDFS_KEY_SIZE); +- memset(conn->recv_key, 0, HMDFS_KEY_SIZE); +- if (conn->close) +- conn->close(conn); +- tcp = conn->connect_handle; +- crypto_free_aead(conn->tfm); +- // need to check and test: fput(tcp->sock->file); +- if (tcp && tcp->sock) { +- hmdfs_info("connection release: fd = %d, refcount %ld", tcp->fd, +- file_count(tcp->sock->file)); +- sockfd_put(tcp->sock); +- } +- if (tcp && tcp->recv_cache) +- kmem_cache_destroy(tcp->recv_cache); +- +- if (!list_empty(&conn->list)) { +- mutex_lock(&conn->node->conn_impl_list_lock); +- list_del(&conn->list); +- mutex_unlock(&conn->node->conn_impl_list_lock); +- /* +- * wakup hmdfs_disconnect_node to check +- * conn_deleting_list if empty. +- */ +- wake_up_interruptible(&conn->node->deleting_list_wq); +- } +- +- kfree(tcp); +- kfree(conn); +-} +- +-static void hmdfs_peer_release(struct kref *ref) +-{ +- struct hmdfs_peer *peer = container_of(ref, struct hmdfs_peer, ref_cnt); +- struct mutex *lock = &peer->sbi->connections.node_lock; +- +- if (!list_empty(&peer->list)) +- hmdfs_info("releasing a on-sbi peer: device_id %llu ", +- peer->device_id); +- else +- hmdfs_info("releasing a redundant peer: device_id %llu ", +- peer->device_id); +- +- cancel_delayed_work_sync(&peer->evt_dwork); +- list_del(&peer->list); +- idr_destroy(&peer->msg_idr); +- idr_destroy(&peer->file_id_idr); +- flush_workqueue(peer->req_handle_wq); +- flush_workqueue(peer->async_wq); +- flush_workqueue(peer->retry_wb_wq); +- destroy_workqueue(peer->dentry_wq); +- destroy_workqueue(peer->req_handle_wq); +- destroy_workqueue(peer->async_wq); +- destroy_workqueue(peer->retry_wb_wq); +- destroy_workqueue(peer->reget_conn_wq); +- kfree(peer); +- mutex_unlock(lock); +-} +- +-void connection_put(struct connection *conn) +-{ +- struct mutex *lock = &conn->ref_lock; +- +- kref_put_mutex(&conn->ref_cnt, connection_release, lock); +-} +- +-void peer_put(struct hmdfs_peer *peer) +-{ +- struct mutex *lock = &peer->sbi->connections.node_lock; +- +- kref_put_mutex(&peer->ref_cnt, hmdfs_peer_release, lock); +-} +- +-static void hmdfs_dump_deleting_list(struct hmdfs_peer *node) +-{ +- struct connection *con = NULL; +- struct tcp_handle *tcp = NULL; +- int count = 0; +- +- mutex_lock(&node->conn_impl_list_lock); +- list_for_each_entry(con, &node->conn_deleting_list, list) { +- tcp = con->connect_handle; +- hmdfs_info("deleting list %d:device_id %llu tcp_fd %d refcnt %d", +- count, node->device_id, tcp ? tcp->fd : -1, +- kref_read(&con->ref_cnt)); +- count++; +- } +- mutex_unlock(&node->conn_impl_list_lock); +-} +- +-static bool hmdfs_conn_deleting_list_empty(struct hmdfs_peer *node) +-{ +- bool empty = false; +- +- mutex_lock(&node->conn_impl_list_lock); +- empty = list_empty(&node->conn_deleting_list); +- mutex_unlock(&node->conn_impl_list_lock); +- +- return empty; +-} +- +-void hmdfs_disconnect_node(struct hmdfs_peer *node) +-{ +- LIST_HEAD(local_conns); +- struct connection *conn_impl = NULL; +- struct connection *next = NULL; +- struct tcp_handle *tcp = NULL; +- +- if (unlikely(!node)) +- return; +- +- hmdfs_node_inc_evt_seq(node); +- /* Refer to comments in hmdfs_is_node_offlined() */ +- smp_mb__after_atomic(); +- node->status = NODE_STAT_OFFLINE; +- hmdfs_info("Try to disconnect peer: device_id %llu", node->device_id); +- +- mutex_lock(&node->conn_impl_list_lock); +- if (!list_empty(&node->conn_impl_list)) +- list_replace_init(&node->conn_impl_list, &local_conns); +- mutex_unlock(&node->conn_impl_list_lock); +- +- list_for_each_entry_safe(conn_impl, next, &local_conns, list) { +- tcp = conn_impl->connect_handle; +- if (tcp && tcp->sock) { +- kernel_sock_shutdown(tcp->sock, SHUT_RDWR); +- hmdfs_info("shudown sock: fd = %d, refcount %ld", +- tcp->fd, file_count(tcp->sock->file)); +- } +- if (tcp) +- tcp->fd = INVALID_SOCKET_FD; +- +- tcp_close_socket(tcp); +- list_del_init(&conn_impl->list); +- +- connection_put(conn_impl); +- } +- +- if (wait_event_interruptible_timeout(node->deleting_list_wq, +- hmdfs_conn_deleting_list_empty(node), +- HMDFS_WAIT_CONN_RELEASE) <= 0) +- hmdfs_dump_deleting_list(node); +- +- /* wait all request process end */ +- spin_lock(&node->idr_lock); +- while (node->msg_idr_process) { +- spin_unlock(&node->idr_lock); +- usleep_range(HMDFS_WAIT_REQUEST_END_MIN, +- HMDFS_WAIT_REQUEST_END_MAX); +- spin_lock(&node->idr_lock); +- } +- spin_unlock(&node->idr_lock); +- +- hmdfs_queue_raw_node_evt(node, RAW_NODE_EVT_OFF); +-} +- +-static void hmdfs_run_simple_evt_cb(struct hmdfs_peer *node, int evt) +-{ +- unsigned int seq = hmdfs_node_inc_evt_seq(node); +- +- mutex_lock(&node->seq_lock); +- hmdfs_node_call_evt_cb(node, evt, true, seq); +- mutex_unlock(&node->seq_lock); +-} +- +-static void hmdfs_del_peer(struct hmdfs_peer *node) +-{ +- /* +- * No need for offline evt cb, because all files must +- * have been flushed and closed, else the filesystem +- * will be un-mountable. +- */ +- cancel_delayed_work_sync(&node->evt_dwork); +- +- hmdfs_run_simple_evt_cb(node, NODE_EVT_DEL); +- +- hmdfs_release_peer_sysfs(node); +- +- flush_workqueue(node->reget_conn_wq); +- peer_put(node); +-} +- +-void hmdfs_connections_stop(struct hmdfs_sb_info *sbi) +-{ +- struct hmdfs_peer *node = NULL; +- struct hmdfs_peer *con_tmp = NULL; +- +- mutex_lock(&sbi->connections.node_lock); +- list_for_each_entry_safe(node, con_tmp, &sbi->connections.node_list, +- list) { +- mutex_unlock(&sbi->connections.node_lock); +- hmdfs_disconnect_node(node); +- hmdfs_del_peer(node); +- mutex_lock(&sbi->connections.node_lock); +- } +- mutex_unlock(&sbi->connections.node_lock); +-} +- +-struct connection *get_conn_impl(struct hmdfs_peer *node, int connect_type) +-{ +- struct connection *conn_impl = NULL; +- +- if (!node) +- return NULL; +- mutex_lock(&node->conn_impl_list_lock); +- list_for_each_entry(conn_impl, &node->conn_impl_list, list) { +- if (conn_impl->type == connect_type && +- conn_impl->status == CONNECT_STAT_WORKING) { +- connection_get(conn_impl); +- mutex_unlock(&node->conn_impl_list_lock); +- return conn_impl; +- } +- } +- mutex_unlock(&node->conn_impl_list_lock); +- hmdfs_err_ratelimited("device %llu not find connection, type %d", +- node->device_id, connect_type); +- return NULL; +-} +- +-void set_conn_sock_quickack(struct hmdfs_peer *node) +-{ +- struct connection *conn_impl = NULL; +- struct tcp_handle *tcp = NULL; +- int option = 1; +- +- if (!node) +- return; +- mutex_lock(&node->conn_impl_list_lock); +- list_for_each_entry(conn_impl, &node->conn_impl_list, list) { +- if (conn_impl->type == CONNECT_TYPE_TCP && +- conn_impl->status == CONNECT_STAT_WORKING && +- conn_impl->connect_handle) { +- tcp = (struct tcp_handle *)(conn_impl->connect_handle); +- tcp_sock_set_quickack(tcp->sock->sk, option); +- } +- } +- mutex_unlock(&node->conn_impl_list_lock); +-} +- +-struct hmdfs_peer *hmdfs_lookup_from_devid(struct hmdfs_sb_info *sbi, +- uint64_t device_id) +-{ +- struct hmdfs_peer *con = NULL; +- struct hmdfs_peer *lookup = NULL; +- +- if (!sbi) +- return NULL; +- mutex_lock(&sbi->connections.node_lock); +- list_for_each_entry(con, &sbi->connections.node_list, list) { +- if (con->status != NODE_STAT_ONLINE || +- con->device_id != device_id) +- continue; +- lookup = con; +- peer_get(lookup); +- break; +- } +- mutex_unlock(&sbi->connections.node_lock); +- return lookup; +-} +- +-struct hmdfs_peer *hmdfs_lookup_from_cid(struct hmdfs_sb_info *sbi, +- uint8_t *cid) +-{ +- struct hmdfs_peer *con = NULL; +- struct hmdfs_peer *lookup = NULL; +- +- if (!sbi) +- return NULL; +- mutex_lock(&sbi->connections.node_lock); +- list_for_each_entry(con, &sbi->connections.node_list, list) { +- if (strncmp(con->cid, cid, HMDFS_CID_SIZE) != 0) +- continue; +- lookup = con; +- peer_get(lookup); +- break; +- } +- mutex_unlock(&sbi->connections.node_lock); +- return lookup; +-} +- +-static struct hmdfs_peer *lookup_peer_by_cid_unsafe(struct hmdfs_sb_info *sbi, +- uint8_t *cid) +-{ +- struct hmdfs_peer *node = NULL; +- +- list_for_each_entry(node, &sbi->connections.node_list, list) +- if (!strncmp(node->cid, cid, HMDFS_CID_SIZE)) { +- peer_get(node); +- return node; +- } +- return NULL; +-} +- +-static struct hmdfs_peer *add_peer_unsafe(struct hmdfs_sb_info *sbi, +- struct hmdfs_peer *peer2add) +-{ +- struct hmdfs_peer *peer; +- int err; +- +- peer = lookup_peer_by_cid_unsafe(sbi, peer2add->cid); +- if (peer) +- return peer; +- +- err = hmdfs_register_peer_sysfs(sbi, peer2add); +- if (err) { +- hmdfs_err("register peer %llu sysfs err %d", +- peer2add->device_id, err); +- return ERR_PTR(err); +- } +- list_add_tail(&peer2add->list, &sbi->connections.node_list); +- peer_get(peer2add); +- hmdfs_run_simple_evt_cb(peer2add, NODE_EVT_ADD); +- return peer2add; +-} +- +-static struct hmdfs_peer *alloc_peer(struct hmdfs_sb_info *sbi, uint8_t *cid, +- uint32_t devsl) +-{ +- struct hmdfs_peer *node = kzalloc(sizeof(*node), GFP_KERNEL); +- +- if (!node) +- return NULL; +- +- node->device_id = (u32)atomic_inc_return(&sbi->connections.conn_seq); +- +- node->async_wq = alloc_workqueue("dfs_async%u_%llu", WQ_MEM_RECLAIM, 0, +- sbi->seq, node->device_id); +- if (!node->async_wq) { +- hmdfs_err("Failed to alloc async wq"); +- goto out_err; +- } +- node->req_handle_wq = alloc_workqueue("dfs_req%u_%llu", +- WQ_UNBOUND | WQ_MEM_RECLAIM, +- sbi->async_req_max_active, +- sbi->seq, node->device_id); +- if (!node->req_handle_wq) { +- hmdfs_err("Failed to alloc req wq"); +- goto out_err; +- } +- node->dentry_wq = alloc_workqueue("dfs_dentry%u_%llu", +- WQ_UNBOUND | WQ_MEM_RECLAIM, +- 0, sbi->seq, node->device_id); +- if (!node->dentry_wq) { +- hmdfs_err("Failed to alloc dentry wq"); +- goto out_err; +- } +- node->retry_wb_wq = alloc_workqueue("dfs_rwb%u_%llu", +- WQ_UNBOUND | WQ_MEM_RECLAIM, +- HMDFS_RETRY_WB_WQ_MAX_ACTIVE, +- sbi->seq, node->device_id); +- if (!node->retry_wb_wq) { +- hmdfs_err("Failed to alloc retry writeback wq"); +- goto out_err; +- } +- node->reget_conn_wq = alloc_workqueue("dfs_regetcon%u_%llu", +- WQ_UNBOUND, 0, +- sbi->seq, node->device_id); +- if (!node->reget_conn_wq) { +- hmdfs_err("Failed to alloc reget conn wq"); +- goto out_err; +- } +- INIT_LIST_HEAD(&node->conn_impl_list); +- mutex_init(&node->conn_impl_list_lock); +- INIT_LIST_HEAD(&node->conn_deleting_list); +- init_waitqueue_head(&node->deleting_list_wq); +- idr_init(&node->msg_idr); +- spin_lock_init(&node->idr_lock); +- idr_init(&node->file_id_idr); +- spin_lock_init(&node->file_id_lock); +- INIT_LIST_HEAD(&node->list); +- kref_init(&node->ref_cnt); +- node->owner = sbi->seq; +- node->sbi = sbi; +- node->version = HMDFS_VERSION; +- node->status = NODE_STAT_SHAKING; +- node->conn_time = jiffies; +- memcpy(node->cid, cid, HMDFS_CID_SIZE); +- atomic64_set(&node->sb_dirty_count, 0); +- node->fid_cookie = 0; +- atomic_set(&node->evt_seq, 0); +- mutex_init(&node->seq_lock); +- mutex_init(&node->offline_cb_lock); +- mutex_init(&node->evt_lock); +- node->pending_evt = RAW_NODE_EVT_NR; +- node->last_evt = RAW_NODE_EVT_NR; +- node->cur_evt[0] = RAW_NODE_EVT_NR; +- node->cur_evt[1] = RAW_NODE_EVT_NR; +- node->seq_wr_idx = (unsigned char)UINT_MAX; +- node->seq_rd_idx = node->seq_wr_idx; +- INIT_DELAYED_WORK(&node->evt_dwork, hmdfs_node_evt_work); +- node->msg_idr_process = 0; +- node->offline_start = false; +- spin_lock_init(&node->wr_opened_inode_lock); +- INIT_LIST_HEAD(&node->wr_opened_inode_list); +- spin_lock_init(&node->stashed_inode_lock); +- node->stashed_inode_nr = 0; +- atomic_set(&node->rebuild_inode_status_nr, 0); +- init_waitqueue_head(&node->rebuild_inode_status_wq); +- INIT_LIST_HEAD(&node->stashed_inode_list); +- node->need_rebuild_stash_list = false; +- node->devsl = devsl; +- +- return node; +- +-out_err: +- if (node->async_wq) { +- destroy_workqueue(node->async_wq); +- node->async_wq = NULL; +- } +- if (node->req_handle_wq) { +- destroy_workqueue(node->req_handle_wq); +- node->req_handle_wq = NULL; +- } +- if (node->dentry_wq) { +- destroy_workqueue(node->dentry_wq); +- node->dentry_wq = NULL; +- } +- if (node->retry_wb_wq) { +- destroy_workqueue(node->retry_wb_wq); +- node->retry_wb_wq = NULL; +- } +- if (node->reget_conn_wq) { +- destroy_workqueue(node->reget_conn_wq); +- node->reget_conn_wq = NULL; +- } +- kfree(node); +- return NULL; +-} +- +-struct hmdfs_peer *hmdfs_get_peer(struct hmdfs_sb_info *sbi, uint8_t *cid, +- uint32_t devsl) +-{ +- struct hmdfs_peer *peer = NULL, *on_sbi_peer = NULL; +- +- mutex_lock(&sbi->connections.node_lock); +- peer = lookup_peer_by_cid_unsafe(sbi, cid); +- mutex_unlock(&sbi->connections.node_lock); +- if (peer) { +- hmdfs_info("Got a existing peer: device_id = %llu", +- peer->device_id); +- goto out; +- } +- +- peer = alloc_peer(sbi, cid, devsl); +- if (unlikely(!peer)) { +- hmdfs_info("Failed to alloc a peer"); +- goto out; +- } +- +- mutex_lock(&sbi->connections.node_lock); +- on_sbi_peer = add_peer_unsafe(sbi, peer); +- mutex_unlock(&sbi->connections.node_lock); +- if (IS_ERR(on_sbi_peer)) { +- peer_put(peer); +- peer = NULL; +- goto out; +- } else if (unlikely(on_sbi_peer != peer)) { +- hmdfs_info("Got a existing peer: device_id = %llu", +- on_sbi_peer->device_id); +- peer_put(peer); +- peer = on_sbi_peer; +- } else { +- hmdfs_info("Got a newly allocated peer: device_id = %llu", +- peer->device_id); +- } +- +-out: +- return peer; +-} +- +-static void head_release(struct kref *kref) +-{ +- struct hmdfs_msg_idr_head *head; +- struct hmdfs_peer *con; +- +- head = (struct hmdfs_msg_idr_head *)container_of(kref, +- struct hmdfs_msg_idr_head, ref); +- con = head->peer; +- idr_remove(&con->msg_idr, head->msg_id); +- spin_unlock(&con->idr_lock); +- +- kfree(head); +-} +- +-void head_put(struct hmdfs_msg_idr_head *head) +-{ +- kref_put_lock(&head->ref, head_release, &head->peer->idr_lock); +-} +- +-struct hmdfs_msg_idr_head *hmdfs_find_msg_head(struct hmdfs_peer *peer, +- int id, struct hmdfs_cmd operations) +-{ +- struct hmdfs_msg_idr_head *head = NULL; +- +- spin_lock(&peer->idr_lock); +- head = idr_find(&peer->msg_idr, id); +- if (head && head->send_cmd_operations.command == operations.command) +- kref_get(&head->ref); +- else +- head = NULL; +- spin_unlock(&peer->idr_lock); +- +- return head; +-} +- +-int hmdfs_alloc_msg_idr(struct hmdfs_peer *peer, enum MSG_IDR_TYPE type, +- void *ptr, struct hmdfs_cmd operations) +-{ +- int ret = -EAGAIN; +- struct hmdfs_msg_idr_head *head = ptr; +- +- idr_preload(GFP_KERNEL); +- spin_lock(&peer->idr_lock); +- if (!peer->offline_start) +- ret = idr_alloc_cyclic(&peer->msg_idr, ptr, +- 1, 0, GFP_NOWAIT); +- if (ret >= 0) { +- kref_init(&head->ref); +- head->msg_id = ret; +- head->type = type; +- head->peer = peer; +- head->send_cmd_operations = operations; +- peer->msg_idr_process++; +- ret = 0; +- } +- spin_unlock(&peer->idr_lock); +- idr_preload_end(); +- +- return ret; +-} +diff --git a/fs/hmdfs/comm/connection.h b/fs/hmdfs/comm/connection.h +deleted file mode 100644 +index 1988e99f7..000000000 +--- a/fs/hmdfs/comm/connection.h ++++ /dev/null +@@ -1,358 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/comm/connection.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_CONNECTION_H +-#define HMDFS_CONNECTION_H +- +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +-#include +-#endif +- +-#include +-#include +-#include "protocol.h" +-#include "node_cb.h" +- +-#define HMDFS_KEY_SIZE 32 +-#define HMDFS_IV_SIZE 12 +-#define HMDFS_TAG_SIZE 16 +-#define HMDFS_CID_SIZE 64 +- +-enum { +- CONNECT_MESG_HANDSHAKE_REQUEST = 1, +- CONNECT_MESG_HANDSHAKE_RESPONSE = 2, +- CONNECT_MESG_HANDSHAKE_ACK = 3, +-}; +- +-enum { +- CONNECT_STAT_WAIT_REQUEST = 0, +- CONNECT_STAT_WAIT_RESPONSE, +- CONNECT_STAT_WORKING, +- CONNECT_STAT_STOP, +- CONNECT_STAT_WAIT_ACK, +- CONNECT_STAT_NEGO_FAIL, +- CONNECT_STAT_COUNT +-}; +- +-enum { +- CONNECT_TYPE_TCP = 0, +- CONNECT_TYPE_UNSUPPORT, +-}; +- +-struct connection_stat { +- int64_t send_bytes; +- int64_t recv_bytes; +- int send_message_count; +- int recv_message_count; +- unsigned long rekey_time; +-}; +- +-struct connection { +- struct list_head list; +- struct kref ref_cnt; +- struct mutex ref_lock; +- struct hmdfs_peer *node; +- int type; +- int status; +- void *connect_handle; +- struct crypto_aead *tfm; +- u8 master_key[HMDFS_KEY_SIZE]; +- u8 send_key[HMDFS_KEY_SIZE]; +- u8 recv_key[HMDFS_KEY_SIZE]; +- struct connection_stat stat; +- struct work_struct reget_work; +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +- struct tls12_crypto_info_aes_gcm_128 send_crypto_info; +- struct tls12_crypto_info_aes_gcm_128 recv_crypto_info; +-#endif +- void (*close)(struct connection *connect); +- int (*send_message)(struct connection *connect, +- struct hmdfs_send_data *msg); +- uint32_t crypto; +-}; +- +-enum { +- NODE_STAT_SHAKING = 0, +- NODE_STAT_ONLINE, +- NODE_STAT_OFFLINE, +-}; +- +-struct hmdfs_async_work { +- struct hmdfs_msg_idr_head head; +- struct page *page; +- struct delayed_work d_work; +- unsigned long start; +-}; +- +-enum { +- RAW_NODE_EVT_OFF = 0, +- RAW_NODE_EVT_ON, +- RAW_NODE_EVT_NR, +-}; +- +-#define RAW_NODE_EVT_MAX_NR 4 +- +-struct hmdfs_stash_statistics { +- unsigned int cur_ok; +- unsigned int cur_nothing; +- unsigned int cur_fail; +- unsigned int total_ok; +- unsigned int total_nothing; +- unsigned int total_fail; +- unsigned long long ok_pages; +- unsigned long long fail_pages; +-}; +- +-struct hmdfs_restore_statistics { +- unsigned int cur_ok; +- unsigned int cur_fail; +- unsigned int cur_keep; +- unsigned int total_ok; +- unsigned int total_fail; +- unsigned int total_keep; +- unsigned long long ok_pages; +- unsigned long long fail_pages; +-}; +- +-struct hmdfs_rebuild_statistics { +- unsigned int cur_ok; +- unsigned int cur_fail; +- unsigned int cur_invalid; +- unsigned int total_ok; +- unsigned int total_fail; +- unsigned int total_invalid; +- unsigned int time; +-}; +- +-struct hmdfs_peer_statistics { +- /* stash statistics */ +- struct hmdfs_stash_statistics stash; +- /* restore statistics */ +- struct hmdfs_restore_statistics restore; +- /* rebuild statistics */ +- struct hmdfs_rebuild_statistics rebuild; +-}; +- +-struct hmdfs_peer { +- struct list_head list; +- struct kref ref_cnt; +- unsigned int owner; +- uint64_t device_id; +- unsigned long conn_time; +- uint8_t version; +- int status; +- u64 features; +- long long old_sb_dirty_count; +- atomic64_t sb_dirty_count; +- /* +- * cookie for opened file id. +- * It will be increased if peer has offlined +- */ +- uint16_t fid_cookie; +- struct mutex conn_impl_list_lock; +- struct list_head conn_impl_list; +- /* +- * when async message process context call hmdfs_reget_connection +- * add conn node to conn_deleting_list, so call hmdfs_disconnect_node +- * can wait all receive thread exit +- */ +- struct list_head conn_deleting_list; +- wait_queue_head_t deleting_list_wq; +- struct idr msg_idr; +- spinlock_t idr_lock; +- struct idr file_id_idr; +- spinlock_t file_id_lock; +- int recvbuf_maxsize; +- struct crypto_aead *tfm; +- char cid[HMDFS_CID_SIZE + 1]; +- struct hmdfs_sb_info *sbi; +- struct workqueue_struct *async_wq; +- struct workqueue_struct *req_handle_wq; +- struct workqueue_struct *dentry_wq; +- struct workqueue_struct *retry_wb_wq; +- struct workqueue_struct *reget_conn_wq; +- atomic_t evt_seq; +- /* sync cb may be blocking */ +- struct mutex seq_lock; +- struct mutex offline_cb_lock; +- struct mutex evt_lock; +- unsigned char pending_evt; +- unsigned char last_evt; +- unsigned char waiting_evt[RAW_NODE_EVT_NR]; +- unsigned char seq_rd_idx; +- unsigned char seq_wr_idx; +- unsigned int seq_tbl[RAW_NODE_EVT_MAX_NR]; +- unsigned int pending_evt_seq; +- unsigned char cur_evt[NODE_EVT_TYPE_NR]; +- unsigned int cur_evt_seq[NODE_EVT_TYPE_NR]; +- unsigned int merged_evt; +- unsigned int dup_evt[RAW_NODE_EVT_NR]; +- struct delayed_work evt_dwork; +- /* protected by idr_lock */ +- uint64_t msg_idr_process; +- bool offline_start; +- spinlock_t wr_opened_inode_lock; +- struct list_head wr_opened_inode_list; +- /* +- * protect @stashed_inode_list and @stashed_inode_nr in stash process +- * and fill_inode_remote->hmdfs_remote_init_stash_status process +- */ +- spinlock_t stashed_inode_lock; +- unsigned int stashed_inode_nr; +- struct list_head stashed_inode_list; +- bool need_rebuild_stash_list; +- /* how many inodes are rebuilding statsh status */ +- atomic_t rebuild_inode_status_nr; +- wait_queue_head_t rebuild_inode_status_wq; +- struct hmdfs_peer_statistics stats; +- /* sysfs */ +- struct kobject kobj; +- struct completion kobj_unregister; +- uint32_t devsl; +-}; +- +-#define HMDFS_DEVID_LOCAL 0 +- +-/* Be Compatible to DFS1.0, dont add packed attribute so far */ +-struct connection_msg_head { +- __u8 magic; +- __u8 version; +- __u8 operations; +- __u8 flags; +- __le32 datasize; +- __le64 source; +- __le16 msg_id; +- __le16 request_id; +- __le32 reserved1; +-} __packed; +- +-struct connection_handshake_req { +- __le32 len; +- char dev_id[0]; +-} __packed; +- +-enum { +- HS_EXTEND_CODE_CRYPTO = 0, +- HS_EXTEND_CODE_CASE_SENSE, +- HS_EXTEND_CODE_FEATURE_SUPPORT, +- HS_EXTEND_CODE_COUNT +-}; +- +-struct conn_hs_extend_reg { +- __u16 len; +- __u16 resv; +- void (*filler)(struct connection *conn_impl, __u8 ops, +- void *data, __u32 len); +- int (*parser)(struct connection *conn_impl, __u8 ops, +- void *data, __u32 len); +-}; +- +-struct conn_hs_extend_head { +- __le32 field_cn; +- char data[0]; +-}; +- +-struct extend_field_head { +- __le16 code; +- __le16 len; +-} __packed; +- +-struct crypto_body { +- __le32 crypto; +-} __packed; +- +-struct case_sense_body { +- __u8 case_sensitive; +-} __packed; +- +-struct feature_body { +- __u64 features; +- __u64 reserved; +-} __packed; +- +-#define HMDFS_HS_CRYPTO_KTLS_AES128 0x00000001 +-#define HMDFS_HS_CRYPTO_KTLS_AES256 0x00000002 +- +-static inline bool hmdfs_is_node_online(const struct hmdfs_peer *node) +-{ +- return READ_ONCE(node->status) == NODE_STAT_ONLINE; +-} +- +-static inline unsigned int hmdfs_node_inc_evt_seq(struct hmdfs_peer *node) +-{ +- /* Use the atomic as an unsigned integer */ +- return atomic_inc_return(&node->evt_seq); +-} +- +-static inline unsigned int hmdfs_node_evt_seq(const struct hmdfs_peer *node) +-{ +- return atomic_read(&node->evt_seq); +-} +- +-struct connection *get_conn_impl(struct hmdfs_peer *node, int connect_type); +- +-void set_conn_sock_quickack(struct hmdfs_peer *node); +- +-struct hmdfs_peer *hmdfs_get_peer(struct hmdfs_sb_info *sbi, uint8_t *cid, +- uint32_t devsl); +- +-struct hmdfs_peer *hmdfs_lookup_from_devid(struct hmdfs_sb_info *sbi, +- uint64_t device_id); +-struct hmdfs_peer *hmdfs_lookup_from_cid(struct hmdfs_sb_info *sbi, +- uint8_t *cid); +-void connection_send_handshake(struct connection *conn_impl, __u8 operations, +- __le16 request_id); +-void connection_handshake_recv_handler(struct connection *conn_impl, void *buf, +- void *data, __u32 data_len); +-void connection_working_recv_handler(struct connection *conn_impl, void *head, +- void *data, __u32 data_len); +-static inline void connection_get(struct connection *conn) +-{ +- kref_get(&conn->ref_cnt); +-} +- +-void connection_put(struct connection *conn); +-static inline void peer_get(struct hmdfs_peer *peer) +-{ +- kref_get(&peer->ref_cnt); +-} +- +-void peer_put(struct hmdfs_peer *peer); +- +-int hmdfs_sendmessage(struct hmdfs_peer *node, struct hmdfs_send_data *msg); +-void hmdfs_connections_stop(struct hmdfs_sb_info *sbi); +- +-void hmdfs_disconnect_node(struct hmdfs_peer *node); +- +-void connection_to_working(struct hmdfs_peer *node); +- +-int hmdfs_alloc_msg_idr(struct hmdfs_peer *peer, enum MSG_IDR_TYPE type, +- void *ptr, struct hmdfs_cmd operations); +-struct hmdfs_msg_idr_head *hmdfs_find_msg_head(struct hmdfs_peer *peer, int id, +- struct hmdfs_cmd operations); +- +-static inline void hmdfs_start_process_offline(struct hmdfs_peer *peer) +-{ +- spin_lock(&peer->idr_lock); +- peer->offline_start = true; +- spin_unlock(&peer->idr_lock); +-} +- +-static inline void hmdfs_stop_process_offline(struct hmdfs_peer *peer) +-{ +- spin_lock(&peer->idr_lock); +- peer->offline_start = false; +- spin_unlock(&peer->idr_lock); +-} +- +-static inline void hmdfs_dec_msg_idr_process(struct hmdfs_peer *peer) +-{ +- spin_lock(&peer->idr_lock); +- peer->msg_idr_process--; +- spin_unlock(&peer->idr_lock); +-} +-#endif +diff --git a/fs/hmdfs/comm/crypto.c b/fs/hmdfs/comm/crypto.c +deleted file mode 100644 +index 01d5d3feb..000000000 +--- a/fs/hmdfs/comm/crypto.c ++++ /dev/null +@@ -1,262 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/comm/crypto.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "crypto.h" +- +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "hmdfs.h" +- +-static void tls_crypto_set_key(struct connection *conn_impl, int tx) +-{ +- int rc = 0; +- struct tcp_handle *tcp = conn_impl->connect_handle; +- struct tls_context *ctx = NULL; +- struct cipher_context *cctx = NULL; +- struct tls_sw_context_tx *sw_ctx_tx = NULL; +- struct tls_sw_context_rx *sw_ctx_rx = NULL; +- struct crypto_aead **aead = NULL; +- struct tls12_crypto_info_aes_gcm_128 *crypto_info = NULL; +- +- lock_sock(tcp->sock->sk); +- ctx = tls_get_ctx(tcp->sock->sk); +- if (tx) { +- crypto_info = &conn_impl->send_crypto_info; +- cctx = &ctx->tx; +- sw_ctx_tx = tls_sw_ctx_tx(ctx); +- aead = &sw_ctx_tx->aead_send; +- } else { +- crypto_info = &conn_impl->recv_crypto_info; +- cctx = &ctx->rx; +- sw_ctx_rx = tls_sw_ctx_rx(ctx); +- aead = &sw_ctx_rx->aead_recv; +- } +- +- memcpy(cctx->iv, crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); +- memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, crypto_info->iv, +- TLS_CIPHER_AES_GCM_128_IV_SIZE); +- memcpy(cctx->rec_seq, crypto_info->rec_seq, +- TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); +- rc = crypto_aead_setkey(*aead, crypto_info->key, +- TLS_CIPHER_AES_GCM_128_KEY_SIZE); +- if (rc) +- hmdfs_err("crypto set key error"); +- release_sock(tcp->sock->sk); +-} +- +-int tls_crypto_info_init(struct connection *conn_impl) +-{ +- int ret = 0; +- u8 key_meterial[HMDFS_KEY_SIZE]; +- struct tcp_handle *tcp = +- (struct tcp_handle *)(conn_impl->connect_handle); +- if (!tcp) +- return -EINVAL; +- // send +- update_key(conn_impl->send_key, key_meterial, HKDF_TYPE_IV); +- ret = tcp->sock->ops->setsockopt(tcp->sock, SOL_TCP, TCP_ULP, +- KERNEL_SOCKPTR("tls"), sizeof("tls")); +- if (ret) +- hmdfs_err("set tls error %d", ret); +- tcp->connect->send_crypto_info.info.version = TLS_1_2_VERSION; +- tcp->connect->send_crypto_info.info.cipher_type = +- TLS_CIPHER_AES_GCM_128; +- +- memcpy(tcp->connect->send_crypto_info.key, tcp->connect->send_key, +- TLS_CIPHER_AES_GCM_128_KEY_SIZE); +- memcpy(tcp->connect->send_crypto_info.iv, +- key_meterial + CRYPTO_IV_OFFSET, TLS_CIPHER_AES_GCM_128_IV_SIZE); +- memcpy(tcp->connect->send_crypto_info.salt, +- key_meterial + CRYPTO_SALT_OFFSET, +- TLS_CIPHER_AES_GCM_128_SALT_SIZE); +- memcpy(tcp->connect->send_crypto_info.rec_seq, +- key_meterial + CRYPTO_SEQ_OFFSET, +- TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); +- +- ret = tcp->sock->ops->setsockopt(tcp->sock, SOL_TLS, TLS_TX, +- KERNEL_SOCKPTR(&(tcp->connect->send_crypto_info)), +- sizeof(tcp->connect->send_crypto_info)); +- if (ret) +- hmdfs_err("set tls send_crypto_info error %d", ret); +- +- // recv +- update_key(tcp->connect->recv_key, key_meterial, HKDF_TYPE_IV); +- tcp->connect->recv_crypto_info.info.version = TLS_1_2_VERSION; +- tcp->connect->recv_crypto_info.info.cipher_type = +- TLS_CIPHER_AES_GCM_128; +- +- memcpy(tcp->connect->recv_crypto_info.key, tcp->connect->recv_key, +- TLS_CIPHER_AES_GCM_128_KEY_SIZE); +- memcpy(tcp->connect->recv_crypto_info.iv, +- key_meterial + CRYPTO_IV_OFFSET, TLS_CIPHER_AES_GCM_128_IV_SIZE); +- memcpy(tcp->connect->recv_crypto_info.salt, +- key_meterial + CRYPTO_SALT_OFFSET, +- TLS_CIPHER_AES_GCM_128_SALT_SIZE); +- memcpy(tcp->connect->recv_crypto_info.rec_seq, +- key_meterial + CRYPTO_SEQ_OFFSET, +- TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); +- memset(key_meterial, 0, HMDFS_KEY_SIZE); +- +- ret = tcp->sock->ops->setsockopt(tcp->sock, SOL_TLS, TLS_RX, +- KERNEL_SOCKPTR(&(tcp->connect->recv_crypto_info)), +- sizeof(tcp->connect->recv_crypto_info)); +- if (ret) +- hmdfs_err("set tls recv_crypto_info error %d", ret); +- return ret; +-} +- +-static int tls_set_tx(struct tcp_handle *tcp) +-{ +- int ret = 0; +- u8 new_key[HMDFS_KEY_SIZE]; +- u8 key_meterial[HMDFS_KEY_SIZE]; +- +- ret = update_key(tcp->connect->send_key, new_key, HKDF_TYPE_REKEY); +- if (ret < 0) +- return ret; +- memcpy(tcp->connect->send_key, new_key, HMDFS_KEY_SIZE); +- ret = update_key(tcp->connect->send_key, key_meterial, HKDF_TYPE_IV); +- if (ret < 0) +- return ret; +- +- memcpy(tcp->connect->send_crypto_info.key, tcp->connect->send_key, +- TLS_CIPHER_AES_GCM_128_KEY_SIZE); +- memcpy(tcp->connect->send_crypto_info.iv, +- key_meterial + CRYPTO_IV_OFFSET, TLS_CIPHER_AES_GCM_128_IV_SIZE); +- memcpy(tcp->connect->send_crypto_info.salt, +- key_meterial + CRYPTO_SALT_OFFSET, +- TLS_CIPHER_AES_GCM_128_SALT_SIZE); +- memcpy(tcp->connect->send_crypto_info.rec_seq, +- key_meterial + CRYPTO_SEQ_OFFSET, +- TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); +- memset(new_key, 0, HMDFS_KEY_SIZE); +- memset(key_meterial, 0, HMDFS_KEY_SIZE); +- +- tls_crypto_set_key(tcp->connect, 1); +- return 0; +-} +- +-static int tls_set_rx(struct tcp_handle *tcp) +-{ +- int ret = 0; +- u8 new_key[HMDFS_KEY_SIZE]; +- u8 key_meterial[HMDFS_KEY_SIZE]; +- +- ret = update_key(tcp->connect->recv_key, new_key, HKDF_TYPE_REKEY); +- if (ret < 0) +- return ret; +- memcpy(tcp->connect->recv_key, new_key, HMDFS_KEY_SIZE); +- ret = update_key(tcp->connect->recv_key, key_meterial, HKDF_TYPE_IV); +- if (ret < 0) +- return ret; +- +- memcpy(tcp->connect->recv_crypto_info.key, tcp->connect->recv_key, +- TLS_CIPHER_AES_GCM_128_KEY_SIZE); +- memcpy(tcp->connect->recv_crypto_info.iv, +- key_meterial + CRYPTO_IV_OFFSET, TLS_CIPHER_AES_GCM_128_IV_SIZE); +- memcpy(tcp->connect->recv_crypto_info.salt, +- key_meterial + CRYPTO_SALT_OFFSET, +- TLS_CIPHER_AES_GCM_128_SALT_SIZE); +- memcpy(tcp->connect->recv_crypto_info.rec_seq, +- key_meterial + CRYPTO_SEQ_OFFSET, +- TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); +- memset(new_key, 0, HMDFS_KEY_SIZE); +- memset(key_meterial, 0, HMDFS_KEY_SIZE); +- tls_crypto_set_key(tcp->connect, 0); +- return 0; +-} +- +-int set_crypto_info(struct connection *conn_impl, int set_type) +-{ +- int ret = 0; +- struct tcp_handle *tcp = +- (struct tcp_handle *)(conn_impl->connect_handle); +- if (!tcp) +- return -EINVAL; +- +- if (set_type == SET_CRYPTO_SEND) { +- ret = tls_set_tx(tcp); +- if (ret) { +- hmdfs_err("tls set tx fail"); +- return ret; +- } +- } +- if (set_type == SET_CRYPTO_RECV) { +- ret = tls_set_rx(tcp); +- if (ret) { +- hmdfs_err("tls set rx fail"); +- return ret; +- } +- } +- hmdfs_info("KTLS setting success"); +- return ret; +-} +- +-static int hmac_sha256(u8 *key, u8 key_len, char *info, u8 info_len, u8 *output) +-{ +- struct crypto_shash *tfm = NULL; +- struct shash_desc *shash = NULL; +- int ret = 0; +- +- if (!key) +- return -EINVAL; +- +- tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); +- if (IS_ERR(tfm)) { +- hmdfs_err("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm)); +- return PTR_ERR(tfm); +- } +- +- ret = crypto_shash_setkey(tfm, key, key_len); +- if (ret) { +- hmdfs_err("crypto_ahash_setkey failed: err %d", ret); +- goto failed; +- } +- +- shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), +- GFP_KERNEL); +- if (!shash) { +- ret = -ENOMEM; +- goto failed; +- } +- +- shash->tfm = tfm; +- +- ret = crypto_shash_digest(shash, info, info_len, output); +- +- kfree(shash); +- +-failed: +- crypto_free_shash(tfm); +- return ret; +-} +- +-static const char *const g_key_lable[] = { "ktls key initiator", +- "ktls key accepter", +- "ktls key update", "ktls iv&salt" }; +-static const int g_key_lable_len[] = { 18, 17, 15, 12 }; +- +-int update_key(__u8 *old_key, __u8 *new_key, int type) +-{ +- int ret = 0; +- char lable[MAX_LABLE_SIZE]; +- u8 lable_size; +- +- lable_size = g_key_lable_len[type] + sizeof(u16) + sizeof(char); +- *((u16 *)lable) = HMDFS_KEY_SIZE; +- memcpy(lable + sizeof(u16), g_key_lable[type], g_key_lable_len[type]); +- *(lable + sizeof(u16) + g_key_lable_len[type]) = 0x01; +- ret = hmac_sha256(old_key, HMDFS_KEY_SIZE, lable, lable_size, new_key); +- if (ret < 0) +- hmdfs_err("hmac sha256 error"); +- return ret; +-} +diff --git a/fs/hmdfs/comm/crypto.h b/fs/hmdfs/comm/crypto.h +deleted file mode 100644 +index 7549f3897..000000000 +--- a/fs/hmdfs/comm/crypto.h ++++ /dev/null +@@ -1,36 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/comm/crypto.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_FS_ENCRYPTION_H +-#define HMDFS_FS_ENCRYPTION_H +- +-#include "transport.h" +- +-#define MAX_LABLE_SIZE 30 +-#define CRYPTO_IV_OFFSET 0 +-#define CRYPTO_SALT_OFFSET (CRYPTO_IV_OFFSET + TLS_CIPHER_AES_GCM_128_IV_SIZE) +-#define CRYPTO_SEQ_OFFSET \ +- (CRYPTO_SALT_OFFSET + TLS_CIPHER_AES_GCM_128_SALT_SIZE) +-#define REKEY_LIFETIME (60 * 60 * HZ) +- +-enum HKDF_TYPE { +- HKDF_TYPE_KEY_INITIATOR = 0, +- HKDF_TYPE_KEY_ACCEPTER = 1, +- HKDF_TYPE_REKEY = 2, +- HKDF_TYPE_IV = 3, +-}; +- +-enum SET_CRYPTO_TYPE { +- SET_CRYPTO_SEND = 0, +- SET_CRYPTO_RECV = 1, +-}; +- +-int tls_crypto_info_init(struct connection *conn_impl); +-int set_crypto_info(struct connection *conn_impl, int set_type); +-int update_key(__u8 *old_key, __u8 *new_key, int type); +- +-#endif +diff --git a/fs/hmdfs/comm/device_node.c b/fs/hmdfs/comm/device_node.c +deleted file mode 100644 +index ed568e0c1..000000000 +--- a/fs/hmdfs/comm/device_node.c ++++ /dev/null +@@ -1,1647 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/comm/device_node.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "device_node.h" +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "client_writeback.h" +-#include "server_writeback.h" +-#include "connection.h" +-#include "hmdfs_client.h" +-#include "socket_adapter.h" +-#include "authority/authentication.h" +- +-DEFINE_MUTEX(hmdfs_sysfs_mutex); +-static struct kset *hmdfs_kset; +- +-static void ctrl_cmd_update_socket_handler(const char *buf, size_t len, +- struct hmdfs_sb_info *sbi) +-{ +- struct update_socket_param cmd; +- struct hmdfs_peer *node = NULL; +- struct connection *conn = NULL; +- +- if (unlikely(!buf || len != sizeof(cmd))) { +- hmdfs_err("len/buf error"); +- goto out; +- } +- memcpy(&cmd, buf, sizeof(cmd)); +- if (cmd.status != CONNECT_STAT_WAIT_REQUEST && +- cmd.status != CONNECT_STAT_WAIT_RESPONSE) { +- hmdfs_err("invalid status"); +- goto out; +- } +- +- node = hmdfs_get_peer(sbi, cmd.cid, cmd.devsl); +- if (unlikely(!node)) { +- hmdfs_err("failed to update ctrl node: cannot get peer"); +- goto out; +- } +- +- conn = hmdfs_get_conn_tcp(node, cmd.newfd, cmd.masterkey, cmd.status); +- if (unlikely(!conn)) { +- hmdfs_err("failed to update ctrl node: cannot get conn"); +- } else if (!sbi->system_cred) { +- const struct cred *system_cred = get_cred(current_cred()); +- +- if (cmpxchg_relaxed(&sbi->system_cred, NULL, system_cred)) +- put_cred(system_cred); +- else +- hmdfs_check_cred(system_cred); +- } +- +- if (conn) +- connection_put(conn); +-out: +- if (node) +- peer_put(node); +-} +- +-static void ctrl_cmd_update_devsl_handler(const char *buf, size_t len, +- struct hmdfs_sb_info *sbi) +-{ +- struct update_devsl_param cmd; +- struct hmdfs_peer *node = NULL; +- +- if (unlikely(!buf || len != sizeof(cmd))) { +- hmdfs_err("Recved a invalid userbuf"); +- return; +- } +- memcpy(&cmd, buf, sizeof(cmd)); +- +- node = hmdfs_lookup_from_cid(sbi, cmd.cid); +- if (unlikely(!node)) { +- hmdfs_err("failed to update devsl: cannot get peer"); +- return; +- } +- hmdfs_info("Found peer: device_id = %llu", node->device_id); +- node->devsl = cmd.devsl; +- peer_put(node); +-} +- +-static inline void hmdfs_disconnect_node_marked(struct hmdfs_peer *conn) +-{ +- hmdfs_start_process_offline(conn); +- hmdfs_disconnect_node(conn); +- hmdfs_stop_process_offline(conn); +-} +- +-static void ctrl_cmd_off_line_handler(const char *buf, size_t len, +- struct hmdfs_sb_info *sbi) +-{ +- struct offline_param cmd; +- struct hmdfs_peer *node = NULL; +- +- if (unlikely(!buf || len != sizeof(cmd))) { +- hmdfs_err("Recved a invalid userbuf"); +- return; +- } +- memcpy(&cmd, buf, sizeof(cmd)); +- node = hmdfs_lookup_from_cid(sbi, cmd.remote_cid); +- if (unlikely(!node)) { +- hmdfs_err("Cannot find node by device"); +- return; +- } +- hmdfs_info("Found peer: device_id = %llu", node->device_id); +- hmdfs_disconnect_node_marked(node); +- peer_put(node); +-} +- +-typedef void (*ctrl_cmd_handler)(const char *buf, size_t len, +- struct hmdfs_sb_info *sbi); +- +-static const ctrl_cmd_handler cmd_handler[CMD_CNT] = { +- [CMD_UPDATE_SOCKET] = ctrl_cmd_update_socket_handler, +- [CMD_UPDATE_DEVSL] = ctrl_cmd_update_devsl_handler, +- [CMD_OFF_LINE] = ctrl_cmd_off_line_handler, +-}; +- +-static ssize_t sbi_cmd_show(struct kobject *kobj, struct sbi_attribute *attr, +- char *buf) +-{ +- struct notify_param param; +- int out_len; +- struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- memset(¶m, 0, sizeof(param)); +- spin_lock(&sbi->notify_fifo_lock); +- out_len = kfifo_out(&sbi->notify_fifo, ¶m, sizeof(param)); +- spin_unlock(&sbi->notify_fifo_lock); +- if (out_len != sizeof(param)) +- param.notify = NOTIFY_NONE; +- memcpy(buf, ¶m, sizeof(param)); +- return sizeof(param); +-} +- +-static const char *cmd2str(int cmd) +-{ +- switch (cmd) { +- case 0: +- return "CMD_UPDATE_SOCKET"; +- case 1: +- return "CMD_UPDATE_DEVSL"; +- case 2: +- return "CMD_OFF_LINE"; +- default: +- return "illegal cmd"; +- } +-} +- +-static ssize_t sbi_cmd_store(struct kobject *kobj, struct sbi_attribute *attr, +- const char *buf, size_t len) +-{ +- int cmd; +- struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- if (!sbi) { +- hmdfs_info("Fatal! Empty sbi. Mount fs first"); +- return len; +- } +- if (len < sizeof(int)) { +- hmdfs_err("Illegal cmd: cmd len = %zu", len); +- return len; +- } +- cmd = *(int *)buf; +- if (cmd < 0 || cmd >= CMD_CNT) { +- hmdfs_err("Illegal cmd : cmd = %d", cmd); +- return len; +- } +- mutex_lock(&sbi->cmd_handler_mutex); +- hmdfs_info("Recved cmd: %s", cmd2str(cmd)); +- if (cmd_handler[cmd]) +- cmd_handler[cmd](buf, len, sbi); +- mutex_unlock(&sbi->cmd_handler_mutex); +- return len; +-} +- +-static struct sbi_attribute sbi_cmd_attr = +- __ATTR(cmd, 0664, sbi_cmd_show, sbi_cmd_store); +- +-static ssize_t sbi_status_show(struct kobject *kobj, struct sbi_attribute *attr, +- char *buf) +-{ +- ssize_t size = 0; +- struct hmdfs_sb_info *sbi = NULL; +- struct hmdfs_peer *peer = NULL; +- struct connection *conn_impl = NULL; +- struct tcp_handle *tcp = NULL; +- +- sbi = to_sbi(kobj); +- size += snprintf(buf + size, PAGE_SIZE - size, "peers status\n"); +- +- mutex_lock(&sbi->connections.node_lock); +- list_for_each_entry(peer, &sbi->connections.node_list, list) { +- size += snprintf(buf + size, PAGE_SIZE - size, "%s %d\n", +- peer->cid, peer->status); +- // connection information +- size += snprintf( +- buf + size, PAGE_SIZE - size, +- "\t socket_fd connection_status tcp_status ... refcnt\n"); +- mutex_lock(&peer->conn_impl_list_lock); +- list_for_each_entry(conn_impl, &peer->conn_impl_list, list) { +- tcp = conn_impl->connect_handle; +- size += snprintf(buf + size, PAGE_SIZE - size, +- "\t %d \t%d \t%d \t%p \t%ld\n", +- tcp->fd, conn_impl->status, +- tcp->sock->state, tcp->sock, file_count(tcp->sock->file)); +- } +- mutex_unlock(&peer->conn_impl_list_lock); +- } +- mutex_unlock(&sbi->connections.node_lock); +- return size; +-} +- +-static ssize_t sbi_status_store(struct kobject *kobj, +- struct sbi_attribute *attr, const char *buf, +- size_t len) +-{ +- return len; +-} +- +-static struct sbi_attribute sbi_status_attr = +- __ATTR(status, 0664, sbi_status_show, sbi_status_store); +- +-static ssize_t sbi_stat_show(struct kobject *kobj, struct sbi_attribute *attr, +- char *buf) +-{ +- ssize_t size = 0; +- struct hmdfs_sb_info *sbi = NULL; +- struct hmdfs_peer *peer = NULL; +- struct connection *conn_impl = NULL; +- struct tcp_handle *tcp = NULL; +- +- sbi = to_sbi(kobj); +- mutex_lock(&sbi->connections.node_lock); +- list_for_each_entry(peer, &sbi->connections.node_list, list) { +- // connection information +- mutex_lock(&peer->conn_impl_list_lock); +- list_for_each_entry(conn_impl, &peer->conn_impl_list, list) { +- tcp = conn_impl->connect_handle; +- size += snprintf(buf + size, PAGE_SIZE - size, +- "socket_fd: %d\n", tcp->fd); +- size += snprintf(buf + size, PAGE_SIZE - size, +- "\tsend_msg %d \tsend_bytes %llu\n", +- conn_impl->stat.send_message_count, +- conn_impl->stat.send_bytes); +- size += snprintf(buf + size, PAGE_SIZE - size, +- "\trecv_msg %d \trecv_bytes %llu\n", +- conn_impl->stat.recv_message_count, +- conn_impl->stat.recv_bytes); +- } +- mutex_unlock(&peer->conn_impl_list_lock); +- } +- mutex_unlock(&sbi->connections.node_lock); +- return size; +-} +- +-static ssize_t sbi_stat_store(struct kobject *kobj, struct sbi_attribute *attr, +- const char *buf, size_t len) +-{ +- struct hmdfs_sb_info *sbi = NULL; +- struct hmdfs_peer *peer = NULL; +- struct connection *conn_impl = NULL; +- +- sbi = to_sbi(kobj); +- mutex_lock(&sbi->connections.node_lock); +- list_for_each_entry(peer, &sbi->connections.node_list, list) { +- // connection information +- mutex_lock(&peer->conn_impl_list_lock); +- list_for_each_entry(conn_impl, &peer->conn_impl_list, list) { +- conn_impl->stat.send_message_count = 0; +- conn_impl->stat.send_bytes = 0; +- conn_impl->stat.recv_message_count = 0; +- conn_impl->stat.recv_bytes = 0; +- } +- mutex_unlock(&peer->conn_impl_list_lock); +- } +- mutex_unlock(&sbi->connections.node_lock); +- return len; +-} +- +-static struct sbi_attribute sbi_statistic_attr = +- __ATTR(statistic, 0664, sbi_stat_show, sbi_stat_store); +- +-static ssize_t sbi_dcache_precision_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- return snprintf(buf, PAGE_SIZE, "%u\n", to_sbi(kobj)->dcache_precision); +-} +- +-#define PRECISION_MAX 3600000 +- +-static ssize_t sbi_dcache_precision_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, size_t len) +-{ +- int ret; +- unsigned int precision; +- struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- ret = kstrtouint(skip_spaces(buf), 0, &precision); +- if (!ret) { +- if (precision <= PRECISION_MAX) +- sbi->dcache_precision = precision; +- else +- ret = -EINVAL; +- } +- +- return ret ? ret : len; +-} +- +-static struct sbi_attribute sbi_dcache_precision_attr = +- __ATTR(dcache_precision, 0664, sbi_dcache_precision_show, +- sbi_dcache_precision_store); +- +-static ssize_t sbi_dcache_threshold_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- return snprintf(buf, PAGE_SIZE, "%lu\n", +- to_sbi(kobj)->dcache_threshold); +-} +- +-static ssize_t sbi_dcache_threshold_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, size_t len) +-{ +- int ret; +- unsigned long threshold; +- struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- ret = kstrtoul(skip_spaces(buf), 0, &threshold); +- if (!ret) +- sbi->dcache_threshold = threshold; +- +- return ret ? ret : len; +-} +- +-static struct sbi_attribute sbi_dcache_threshold_attr = +- __ATTR(dcache_threshold, 0664, sbi_dcache_threshold_show, +- sbi_dcache_threshold_store); +- +-static ssize_t server_statistic_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- int i, ret; +- const size_t size = PAGE_SIZE - 1; +- ssize_t pos = 0; +- struct server_statistic *stat = to_sbi(kobj)->s_server_statis; +- +- for (i = 0; i < F_SIZE; i++) { +- +- ret = snprintf(buf + pos, size - pos, +- "%llu %u %llu %llu\n", +- stat[i].cnt, +- jiffies_to_msecs(stat[i].max), +- stat[i].snd_cnt, stat[i].snd_fail_cnt); +- if (ret > size - pos) +- break; +- pos += ret; +- } +- +- /* If break, we should add a new line */ +- if (i < F_SIZE) { +- ret = snprintf(buf + pos, size + 1 - pos, "\n"); +- pos += ret; +- } +- return pos; +-} +- +-static struct sbi_attribute sbi_local_op_attr = __ATTR_RO(server_statistic); +- +-static ssize_t client_statistic_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- int i, ret; +- const size_t size = PAGE_SIZE - 1; +- ssize_t pos = 0; +- struct client_statistic *stat = to_sbi(kobj)->s_client_statis; +- +- for (i = 0; i < F_SIZE; i++) { +- +- ret = snprintf(buf + pos, size - pos, +- "%llu %llu %llu %llu %llu %u\n", +- stat[i].snd_cnt, +- stat[i].snd_fail_cnt, +- stat[i].resp_cnt, +- stat[i].timeout_cnt, +- stat[i].delay_resp_cnt, +- jiffies_to_msecs(stat[i].max)); +- if (ret > size - pos) +- break; +- pos += ret; +- } +- +- /* If break, we should add a new line */ +- if (i < F_SIZE) { +- ret = snprintf(buf + pos, size + 1 - pos, "\n"); +- pos += ret; +- } +- +- return pos; +-} +- +-static struct sbi_attribute sbi_delay_resp_attr = __ATTR_RO(client_statistic); +- +-static inline unsigned long pages_to_kbytes(unsigned long page) +-{ +- return page << (PAGE_SHIFT - 10); +-} +- +-static ssize_t dirty_writeback_stats_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- struct hmdfs_writeback *hwb = sbi->h_wb; +- unsigned long avg; +- unsigned long max; +- unsigned long min; +- +- spin_lock(&hwb->write_bandwidth_lock); +- avg = hwb->avg_write_bandwidth; +- max = hwb->max_write_bandwidth; +- min = hwb->min_write_bandwidth; +- spin_unlock(&hwb->write_bandwidth_lock); +- +- if (min == ULONG_MAX) +- min = 0; +- +- return snprintf(buf, PAGE_SIZE, +- "%10lu\n" +- "%10lu\n" +- "%10lu\n", +- pages_to_kbytes(avg), +- pages_to_kbytes(max), +- pages_to_kbytes(min)); +-} +- +-static struct sbi_attribute sbi_dirty_writeback_stats_attr = +- __ATTR_RO(dirty_writeback_stats); +- +-static ssize_t sbi_wb_timeout_ms_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%u\n", sbi->wb_timeout_ms); +-} +- +-static ssize_t sbi_wb_timeout_ms_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, size_t len) +-{ +- struct hmdfs_sb_info *sbi = to_sbi(kobj); +- unsigned int val; +- int err; +- +- err = kstrtouint(buf, 10, &val); +- if (err) +- return err; +- +- if (!val || val > HMDFS_MAX_WB_TIMEOUT_MS) +- return -EINVAL; +- +- sbi->wb_timeout_ms = val; +- +- return len; +-} +- +-static struct sbi_attribute sbi_wb_timeout_ms_attr = +- __ATTR(wb_timeout_ms, 0664, sbi_wb_timeout_ms_show, +- sbi_wb_timeout_ms_store); +- +-static ssize_t sbi_dirty_writeback_centisecs_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%u\n", +- sbi->h_wb->dirty_writeback_interval); +-} +- +-static ssize_t sbi_dirty_writeback_centisecs_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, size_t len) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- int err; +- +- err = kstrtouint(buf, 10, &sbi->h_wb->dirty_writeback_interval); +- if (err) +- return err; +- return len; +-} +- +-static struct sbi_attribute sbi_dirty_writeback_centisecs_attr = +- __ATTR(dirty_writeback_centisecs, 0664, +- sbi_dirty_writeback_centisecs_show, +- sbi_dirty_writeback_centisecs_store); +- +-static ssize_t sbi_dirty_file_background_bytes_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%lu\n", +- sbi->h_wb->dirty_file_bg_bytes); +-} +- +-static ssize_t sbi_dirty_file_background_bytes_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, +- size_t len) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- unsigned long file_background_bytes = 0; +- int err; +- +- err = kstrtoul(buf, 10, &file_background_bytes); +- if (err) +- return err; +- if (file_background_bytes == 0) +- return -EINVAL; +- +- sbi->h_wb->dirty_fs_bytes = +- max(sbi->h_wb->dirty_fs_bytes, file_background_bytes); +- sbi->h_wb->dirty_fs_bg_bytes = +- max(sbi->h_wb->dirty_fs_bg_bytes, file_background_bytes); +- sbi->h_wb->dirty_file_bytes = +- max(sbi->h_wb->dirty_file_bytes, file_background_bytes); +- +- sbi->h_wb->dirty_file_bg_bytes = file_background_bytes; +- hmdfs_calculate_dirty_thresh(sbi->h_wb); +- hmdfs_update_ratelimit(sbi->h_wb); +- return len; +-} +- +-static ssize_t sbi_dirty_fs_background_bytes_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_fs_bg_bytes); +-} +- +-static ssize_t sbi_dirty_fs_background_bytes_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, size_t len) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- unsigned long fs_background_bytes = 0; +- int err; +- +- err = kstrtoul(buf, 10, &fs_background_bytes); +- if (err) +- return err; +- if (fs_background_bytes == 0) +- return -EINVAL; +- +- sbi->h_wb->dirty_file_bg_bytes = +- min(sbi->h_wb->dirty_file_bg_bytes, fs_background_bytes); +- sbi->h_wb->dirty_fs_bytes = +- max(sbi->h_wb->dirty_fs_bytes, fs_background_bytes); +- +- sbi->h_wb->dirty_fs_bg_bytes = fs_background_bytes; +- hmdfs_calculate_dirty_thresh(sbi->h_wb); +- hmdfs_update_ratelimit(sbi->h_wb); +- return len; +-} +- +-static struct sbi_attribute sbi_dirty_file_background_bytes_attr = +- __ATTR(dirty_file_background_bytes, 0644, +- sbi_dirty_file_background_bytes_show, +- sbi_dirty_file_background_bytes_store); +-static struct sbi_attribute sbi_dirty_fs_background_bytes_attr = +- __ATTR(dirty_fs_background_bytes, 0644, +- sbi_dirty_fs_background_bytes_show, +- sbi_dirty_fs_background_bytes_store); +- +-static ssize_t sbi_dirty_file_bytes_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_file_bytes); +-} +- +-static ssize_t sbi_dirty_file_bytes_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, size_t len) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- unsigned long file_bytes = 0; +- int err; +- +- err = kstrtoul(buf, 10, &file_bytes); +- if (err) +- return err; +- if (file_bytes == 0) +- return -EINVAL; +- +- sbi->h_wb->dirty_file_bg_bytes = +- min(sbi->h_wb->dirty_file_bg_bytes, file_bytes); +- sbi->h_wb->dirty_fs_bytes = max(sbi->h_wb->dirty_fs_bytes, file_bytes); +- +- sbi->h_wb->dirty_file_bytes = file_bytes; +- hmdfs_calculate_dirty_thresh(sbi->h_wb); +- hmdfs_update_ratelimit(sbi->h_wb); +- return len; +-} +- +-static ssize_t sbi_dirty_fs_bytes_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_fs_bytes); +-} +- +-static ssize_t sbi_dirty_fs_bytes_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, size_t len) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- unsigned long fs_bytes = 0; +- int err; +- +- err = kstrtoul(buf, 10, &fs_bytes); +- if (err) +- return err; +- if (fs_bytes == 0) +- return -EINVAL; +- +- sbi->h_wb->dirty_file_bg_bytes = +- min(sbi->h_wb->dirty_file_bg_bytes, fs_bytes); +- sbi->h_wb->dirty_file_bytes = +- min(sbi->h_wb->dirty_file_bytes, fs_bytes); +- sbi->h_wb->dirty_fs_bg_bytes = +- min(sbi->h_wb->dirty_fs_bg_bytes, fs_bytes); +- +- sbi->h_wb->dirty_fs_bytes = fs_bytes; +- hmdfs_calculate_dirty_thresh(sbi->h_wb); +- hmdfs_update_ratelimit(sbi->h_wb); +- return len; +-} +- +-static struct sbi_attribute sbi_dirty_file_bytes_attr = +- __ATTR(dirty_file_bytes, 0644, sbi_dirty_file_bytes_show, +- sbi_dirty_file_bytes_store); +-static struct sbi_attribute sbi_dirty_fs_bytes_attr = +- __ATTR(dirty_fs_bytes, 0644, sbi_dirty_fs_bytes_show, +- sbi_dirty_fs_bytes_store); +- +-static ssize_t sbi_dirty_writeback_timelimit_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%u\n", +- sbi->h_wb->writeback_timelimit / HZ); +-} +- +-static ssize_t sbi_dirty_writeback_timelimit_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, +- size_t len) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- unsigned int time_limit = 0; +- int err; +- +- err = kstrtouint(buf, 10, &time_limit); +- if (err) +- return err; +- if (time_limit == 0 || time_limit > (HMDFS_MAX_WB_TIMELIMIT / HZ)) +- return -EINVAL; +- +- sbi->h_wb->writeback_timelimit = time_limit * HZ; +- return len; +-} +- +-static struct sbi_attribute sbi_dirty_writeback_timelimit_attr = +-__ATTR(dirty_writeback_timelimit, 0644, sbi_dirty_writeback_timelimit_show, +- sbi_dirty_writeback_timelimit_store); +- +-static ssize_t sbi_dirty_thresh_lowerlimit_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%lu\n", +- sbi->h_wb->bw_thresh_lowerlimit << PAGE_SHIFT); +-} +- +-static ssize_t sbi_dirty_thresh_lowerlimit_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, +- size_t len) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- unsigned long bw_thresh_lowerbytes = 0; +- unsigned long bw_thresh_lowerlimit; +- int err; +- +- err = kstrtoul(buf, 10, &bw_thresh_lowerbytes); +- if (err) +- return err; +- +- bw_thresh_lowerlimit = DIV_ROUND_UP(bw_thresh_lowerbytes, PAGE_SIZE); +- if (bw_thresh_lowerlimit < HMDFS_BW_THRESH_MIN_LIMIT || +- bw_thresh_lowerlimit > HMDFS_BW_THRESH_MAX_LIMIT) +- return -EINVAL; +- +- sbi->h_wb->bw_thresh_lowerlimit = bw_thresh_lowerlimit; +- return len; +-} +- +-static struct sbi_attribute sbi_dirty_thresh_lowerlimit_attr = +-__ATTR(dirty_thresh_lowerlimit, 0644, sbi_dirty_thresh_lowerlimit_show, +- sbi_dirty_thresh_lowerlimit_store); +- +-static ssize_t sbi_dirty_writeback_autothresh_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%d\n", +- sbi->h_wb->dirty_auto_threshold); +-} +- +-static ssize_t sbi_dirty_writeback_autothresh_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, +- size_t len) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- bool dirty_auto_threshold = false; +- int err; +- +- err = kstrtobool(buf, &dirty_auto_threshold); +- if (err) +- return err; +- +- sbi->h_wb->dirty_auto_threshold = dirty_auto_threshold; +- return len; +-} +- +-static struct sbi_attribute sbi_dirty_writeback_autothresh_attr = +-__ATTR(dirty_writeback_autothresh, 0644, sbi_dirty_writeback_autothresh_show, +- sbi_dirty_writeback_autothresh_store); +- +-static ssize_t sbi_dirty_writeback_control_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%d\n", +- sbi->h_wb->dirty_writeback_control); +-} +- +-static ssize_t sbi_dirty_writeback_control_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, size_t len) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- unsigned int dirty_writeback_control = 0; +- int err; +- +- err = kstrtouint(buf, 10, &dirty_writeback_control); +- if (err) +- return err; +- +- sbi->h_wb->dirty_writeback_control = (bool)dirty_writeback_control; +- return len; +-} +- +-static struct sbi_attribute sbi_dirty_writeback_control_attr = +- __ATTR(dirty_writeback_control, 0644, sbi_dirty_writeback_control_show, +- sbi_dirty_writeback_control_store); +- +-static ssize_t sbi_srv_dirty_thresh_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%d\n", +- sbi->h_swb->dirty_thresh_pg >> HMDFS_MB_TO_PAGE_SHIFT); +-} +- +-static ssize_t sbi_srv_dirty_thresh_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, +- size_t len) +-{ +- struct hmdfs_server_writeback *hswb = to_sbi(kobj)->h_swb; +- int dirty_thresh_mb; +- unsigned long long pages; +- int err; +- +- err = kstrtoint(buf, 10, &dirty_thresh_mb); +- if (err) +- return err; +- +- if (dirty_thresh_mb <= 0) +- return -EINVAL; +- +- pages = dirty_thresh_mb; +- pages <<= HMDFS_MB_TO_PAGE_SHIFT; +- if (pages > INT_MAX) { +- hmdfs_err("Illegal dirty_thresh_mb %d, its page count beyonds max int", +- dirty_thresh_mb); +- return -EINVAL; +- } +- +- hswb->dirty_thresh_pg = (unsigned int)pages; +- return len; +-} +- +-static struct sbi_attribute sbi_srv_dirty_thresh_attr = +-__ATTR(srv_dirty_thresh, 0644, sbi_srv_dirty_thresh_show, +- sbi_srv_dirty_thresh_store); +- +- +-static ssize_t sbi_srv_dirty_wb_control_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%d\n", +- sbi->h_swb->dirty_writeback_control); +-} +- +-static ssize_t sbi_srv_dirty_wb_conctrol_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, +- size_t len) +-{ +- struct hmdfs_server_writeback *hswb = to_sbi(kobj)->h_swb; +- bool dirty_writeback_control = true; +- int err; +- +- err = kstrtobool(buf, &dirty_writeback_control); +- if (err) +- return err; +- +- hswb->dirty_writeback_control = dirty_writeback_control; +- +- return len; +-} +- +-static struct sbi_attribute sbi_srv_dirty_wb_control_attr = +-__ATTR(srv_dirty_writeback_control, 0644, sbi_srv_dirty_wb_control_show, +- sbi_srv_dirty_wb_conctrol_store); +- +-static ssize_t sbi_dcache_timeout_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%u\n", sbi->dcache_timeout); +-} +- +-static ssize_t sbi_dcache_timeout_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, size_t len) +-{ +- struct hmdfs_sb_info *sbi = to_sbi(kobj); +- unsigned int timeout; +- int err; +- +- err = kstrtouint(buf, 0, &timeout); +- if (err) +- return err; +- +- /* zero is invalid, and it doesn't mean no cache */ +- if (timeout == 0 || timeout > MAX_DCACHE_TIMEOUT) +- return -EINVAL; +- +- sbi->dcache_timeout = timeout; +- +- return len; +-} +- +-static struct sbi_attribute sbi_dcache_timeout_attr = +- __ATTR(dcache_timeout, 0644, sbi_dcache_timeout_show, +- sbi_dcache_timeout_store); +- +-static ssize_t sbi_write_cache_timeout_sec_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- return snprintf(buf, PAGE_SIZE, "%u\n", +- to_sbi(kobj)->write_cache_timeout); +-} +- +-static ssize_t sbi_write_cache_timeout_sec_store(struct kobject *kobj, +- struct sbi_attribute *attr, const char *buf, size_t len) +-{ +- int ret; +- unsigned int timeout; +- struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- ret = kstrtouint(buf, 0, &timeout); +- if (ret) +- return ret; +- +- /* set write_cache_timeout to 0 means this functionality is disabled */ +- sbi->write_cache_timeout = timeout; +- +- return len; +-} +- +-static struct sbi_attribute sbi_write_cache_timeout_sec_attr = +- __ATTR(write_cache_timeout_sec, 0664, sbi_write_cache_timeout_sec_show, +- sbi_write_cache_timeout_sec_store); +- +-static ssize_t sbi_node_evt_cb_delay_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%u\n", sbi->async_cb_delay); +-} +- +-static ssize_t sbi_node_evt_cb_delay_store(struct kobject *kobj, +- struct sbi_attribute *attr, +- const char *buf, +- size_t len) +-{ +- struct hmdfs_sb_info *sbi = to_sbi(kobj); +- unsigned int delay = 0; +- int err; +- +- err = kstrtouint(buf, 10, &delay); +- if (err) +- return err; +- +- sbi->async_cb_delay = delay; +- +- return len; +-} +- +-static struct sbi_attribute sbi_node_evt_cb_delay_attr = +-__ATTR(node_event_delay, 0644, sbi_node_evt_cb_delay_show, +- sbi_node_evt_cb_delay_store); +- +-static int calc_idr_number(struct idr *idr) +-{ +- void *entry = NULL; +- int id; +- int number = 0; +- +- idr_for_each_entry(idr, entry, id) { +- number++; +- if (number % HMDFS_IDR_RESCHED_COUNT == 0) +- cond_resched(); +- } +- +- return number; +-} +- +-static ssize_t sbi_show_idr_stats(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf, bool showmsg) +-{ +- ssize_t size = 0; +- int count; +- struct hmdfs_sb_info *sbi = NULL; +- struct hmdfs_peer *peer = NULL; +- struct idr *idr = NULL; +- +- sbi = to_sbi(kobj); +- +- mutex_lock(&sbi->connections.node_lock); +- list_for_each_entry(peer, &sbi->connections.node_list, list) { +- idr = showmsg ? &peer->msg_idr : &peer->file_id_idr; +- count = calc_idr_number(idr); +- size += snprintf(buf + size, PAGE_SIZE - size, +- "device-id\tcount\tnext-id\n\t%llu\t\t%d\t%u\n", +- peer->device_id, count, idr_get_cursor(idr)); +- if (size >= PAGE_SIZE) { +- size = PAGE_SIZE; +- break; +- } +- } +- mutex_unlock(&sbi->connections.node_lock); +- +- return size; +-} +- +-static ssize_t pending_message_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- return sbi_show_idr_stats(kobj, attr, buf, true); +-} +- +-static struct sbi_attribute sbi_pending_message_attr = +- __ATTR_RO(pending_message); +- +-static ssize_t peer_opened_fd_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- return sbi_show_idr_stats(kobj, attr, buf, false); +-} +- +-static struct sbi_attribute sbi_peer_opened_fd_attr = __ATTR_RO(peer_opened_fd); +- +-static ssize_t sbi_srv_req_max_active_attr_show(struct kobject *kobj, +- struct sbi_attribute *attr, +- char *buf) +-{ +- const struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return snprintf(buf, PAGE_SIZE, "%u\n", sbi->async_req_max_active); +-} +- +-static ssize_t sbi_srv_req_max_active_attr_store(struct kobject *kobj, +- struct sbi_attribute *attr, const char *buf, size_t len) +-{ +- int ret; +- unsigned int max_active; +- struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- ret = kstrtouint(buf, 0, &max_active); +- if (ret) +- return ret; +- +- sbi->async_req_max_active = max_active; +- +- return len; +-} +- +-static struct sbi_attribute sbi_srv_req_max_active_attr = +-__ATTR(srv_req_handle_max_active, 0644, sbi_srv_req_max_active_attr_show, +- sbi_srv_req_max_active_attr_store); +- +- +-static ssize_t cache_file_show(struct hmdfs_sb_info *sbi, +- struct list_head *head, char *buf) +-{ +- struct cache_file_node *cfn = NULL; +- ssize_t pos = 0; +- +- mutex_lock(&sbi->cache_list_lock); +- list_for_each_entry(cfn, head, list) { +- pos += snprintf(buf + pos, PAGE_SIZE - pos, +- "dev_id: %s relative_path: %s\n", +- cfn->cid, cfn->relative_path); +- if (pos >= PAGE_SIZE) { +- pos = PAGE_SIZE; +- break; +- } +- } +- mutex_unlock(&sbi->cache_list_lock); +- +- return pos; +-} +- +-static ssize_t client_cache_file_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- return cache_file_show(to_sbi(kobj), &to_sbi(kobj)->client_cache, buf); +-} +-static ssize_t server_cache_file_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- return cache_file_show(to_sbi(kobj), &to_sbi(kobj)->server_cache, buf); +-} +- +-static struct sbi_attribute sbi_server_cache_file_attr = +- __ATTR_RO(server_cache_file); +-static struct sbi_attribute sbi_client_cache_file_attr = +- __ATTR_RO(client_cache_file); +- +-static ssize_t sb_seq_show(struct kobject *kobj, struct sbi_attribute *attr, +- char *buf) +-{ +- return snprintf(buf, PAGE_SIZE, "%u\n", to_sbi(kobj)->seq); +-} +- +-static struct sbi_attribute sbi_seq_attr = __ATTR_RO(sb_seq); +- +-static ssize_t peers_sum_attr_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- struct hmdfs_sb_info *sbi = to_sbi(kobj); +- struct hmdfs_peer *node = NULL; +- unsigned int stash_ok = 0, stash_fail = 0, restore_ok = 0, +- restore_fail = 0, rebuild_ok = 0, rebuild_fail = 0, rebuild_invalid = 0, +- rebuild_time = 0; +- unsigned long long stash_ok_pages = 0, stash_fail_pages = 0, +- restore_ok_pages = 0, restore_fail_pages = 0; +- +- mutex_lock(&sbi->connections.node_lock); +- list_for_each_entry(node, &sbi->connections.node_list, list) { +- peer_get(node); +- mutex_unlock(&sbi->connections.node_lock); +- stash_ok += node->stats.stash.total_ok; +- stash_fail += node->stats.stash.total_fail; +- stash_ok_pages += node->stats.stash.ok_pages; +- stash_fail_pages += node->stats.stash.fail_pages; +- restore_ok += node->stats.restore.total_ok; +- restore_fail += node->stats.restore.total_fail; +- restore_ok_pages += node->stats.restore.ok_pages; +- restore_fail_pages += node->stats.restore.fail_pages; +- rebuild_ok += node->stats.rebuild.total_ok; +- rebuild_fail += node->stats.rebuild.total_fail; +- rebuild_invalid += node->stats.rebuild.total_invalid; +- rebuild_time += node->stats.rebuild.time; +- peer_put(node); +- mutex_lock(&sbi->connections.node_lock); +- } +- mutex_unlock(&sbi->connections.node_lock); +- +- return snprintf(buf, PAGE_SIZE, +- "%u %u %llu %llu\n" +- "%u %u %llu %llu\n" +- "%u %u %u %u\n", +- stash_ok, stash_fail, stash_ok_pages, stash_fail_pages, +- restore_ok, restore_fail, restore_ok_pages, +- restore_fail_pages, rebuild_ok, rebuild_fail, +- rebuild_invalid, rebuild_time); +-} +- +-static struct sbi_attribute sbi_peers_attr = __ATTR_RO(peers_sum_attr); +- +-const char * const flag_name[] = { +- "READPAGES", +- "READPAGES_OPEN", +- "ATOMIC_OPEN", +-}; +- +-static ssize_t fill_features(char *buf, unsigned long long flag) +-{ +- int i; +- ssize_t pos = 0; +- bool sep = false; +- int flag_name_count = ARRAY_SIZE(flag_name) / sizeof(flag_name[0]); +- +- for (i = 0; i < sizeof(flag) * BITS_PER_BYTE; ++i) { +- if (!(flag & BIT(i))) +- continue; +- +- if (sep) +- pos += snprintf(buf + pos, PAGE_SIZE - pos, "|"); +- sep = true; +- +- if (pos >= PAGE_SIZE) { +- pos = PAGE_SIZE; +- break; +- } +- +- if (i < flag_name_count && flag_name[i]) +- pos += snprintf(buf + pos, PAGE_SIZE - pos, "%s", +- flag_name[i]); +- else +- pos += snprintf(buf + pos, PAGE_SIZE - pos, "%d", i); +- +- if (pos >= PAGE_SIZE) { +- pos = PAGE_SIZE; +- break; +- } +- } +- pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n"); +- if (pos >= PAGE_SIZE) +- pos = PAGE_SIZE; +- +- return pos; +-} +- +-static ssize_t sbi_features_show(struct kobject *kobj, +- struct sbi_attribute *attr, char *buf) +-{ +- struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- return fill_features(buf, sbi->s_features); +-} +- +-static struct sbi_attribute sbi_features_attr = __ATTR(features, 0444, +- sbi_features_show, NULL); +- +-static struct attribute *sbi_attrs[] = { +- &sbi_cmd_attr.attr, +- &sbi_status_attr.attr, +- &sbi_statistic_attr.attr, +- &sbi_dcache_precision_attr.attr, +- &sbi_dcache_threshold_attr.attr, +- &sbi_dcache_timeout_attr.attr, +- &sbi_write_cache_timeout_sec_attr.attr, +- &sbi_local_op_attr.attr, +- &sbi_delay_resp_attr.attr, +- &sbi_wb_timeout_ms_attr.attr, +- &sbi_dirty_writeback_centisecs_attr.attr, +- &sbi_dirty_file_background_bytes_attr.attr, +- &sbi_dirty_fs_background_bytes_attr.attr, +- &sbi_dirty_file_bytes_attr.attr, +- &sbi_dirty_fs_bytes_attr.attr, +- &sbi_dirty_writeback_autothresh_attr.attr, +- &sbi_dirty_writeback_timelimit_attr.attr, +- &sbi_dirty_thresh_lowerlimit_attr.attr, +- &sbi_dirty_writeback_control_attr.attr, +- &sbi_dirty_writeback_stats_attr.attr, +- &sbi_srv_dirty_thresh_attr.attr, +- &sbi_srv_dirty_wb_control_attr.attr, +- &sbi_node_evt_cb_delay_attr.attr, +- &sbi_srv_req_max_active_attr.attr, +- &sbi_pending_message_attr.attr, +- &sbi_peer_opened_fd_attr.attr, +- &sbi_server_cache_file_attr.attr, +- &sbi_client_cache_file_attr.attr, +- &sbi_seq_attr.attr, +- &sbi_peers_attr.attr, +- &sbi_features_attr.attr, +- NULL, +-}; +-ATTRIBUTE_GROUPS(sbi); +- +-static ssize_t sbi_attr_show(struct kobject *kobj, struct attribute *attr, +- char *buf) +-{ +- struct sbi_attribute *sbi_attr = to_sbi_attr(attr); +- +- if (!sbi_attr->show) +- return -EIO; +- return sbi_attr->show(kobj, sbi_attr, buf); +-} +- +-static ssize_t sbi_attr_store(struct kobject *kobj, struct attribute *attr, +- const char *buf, size_t len) +-{ +- struct sbi_attribute *sbi_attr = to_sbi_attr(attr); +- +- if (!sbi_attr->store) +- return -EIO; +- return sbi_attr->store(kobj, sbi_attr, buf, len); +-} +- +-static const struct sysfs_ops sbi_sysfs_ops = { +- .show = sbi_attr_show, +- .store = sbi_attr_store, +-}; +- +-static void sbi_release(struct kobject *kobj) +-{ +- struct hmdfs_sb_info *sbi = to_sbi(kobj); +- +- complete(&sbi->s_kobj_unregister); +-} +- +-static struct kobj_type sbi_ktype = { +- .sysfs_ops = &sbi_sysfs_ops, +- .default_groups = sbi_groups, +- .release = sbi_release, +-}; +- +-static inline struct sbi_cmd_attribute *to_sbi_cmd_attr(struct attribute *x) +-{ +- return container_of(x, struct sbi_cmd_attribute, attr); +-} +- +-static inline struct hmdfs_sb_info *cmd_kobj_to_sbi(struct kobject *x) +-{ +- return container_of(x, struct hmdfs_sb_info, s_cmd_timeout_kobj); +-} +- +-static ssize_t cmd_timeout_show(struct kobject *kobj, struct attribute *attr, +- char *buf) +-{ +- int cmd = to_sbi_cmd_attr(attr)->command; +- struct hmdfs_sb_info *sbi = cmd_kobj_to_sbi(kobj); +- +- if (cmd < 0 || cmd >= F_SIZE) +- return 0; +- +- return snprintf(buf, PAGE_SIZE, "%u\n", get_cmd_timeout(sbi, cmd)); +-} +- +-static ssize_t cmd_timeout_store(struct kobject *kobj, struct attribute *attr, +- const char *buf, size_t len) +-{ +- unsigned int value; +- int cmd = to_sbi_cmd_attr(attr)->command; +- int ret = kstrtouint(skip_spaces(buf), 0, &value); +- struct hmdfs_sb_info *sbi = cmd_kobj_to_sbi(kobj); +- +- if (cmd < 0 || cmd >= F_SIZE) +- return -EINVAL; +- +- if (!ret) +- set_cmd_timeout(sbi, cmd, value); +- +- return ret ? ret : len; +-} +- +-#define HMDFS_CMD_ATTR(_name, _cmd) \ +- static struct sbi_cmd_attribute hmdfs_attr_##_name = { \ +- .attr = { .name = __stringify(_name), .mode = 0664 }, \ +- .command = (_cmd), \ +- } +- +-HMDFS_CMD_ATTR(open, F_OPEN); +-HMDFS_CMD_ATTR(release, F_RELEASE); +-HMDFS_CMD_ATTR(readpage, F_READPAGE); +-HMDFS_CMD_ATTR(writepage, F_WRITEPAGE); +-HMDFS_CMD_ATTR(iterate, F_ITERATE); +-HMDFS_CMD_ATTR(rmdir, F_RMDIR); +-HMDFS_CMD_ATTR(unlink, F_UNLINK); +-HMDFS_CMD_ATTR(rename, F_RENAME); +-HMDFS_CMD_ATTR(setattr, F_SETATTR); +-HMDFS_CMD_ATTR(statfs, F_STATFS); +-HMDFS_CMD_ATTR(drop_push, F_DROP_PUSH); +-HMDFS_CMD_ATTR(getattr, F_GETATTR); +-HMDFS_CMD_ATTR(fsync, F_FSYNC); +-HMDFS_CMD_ATTR(syncfs, F_SYNCFS); +-HMDFS_CMD_ATTR(getxattr, F_GETXATTR); +-HMDFS_CMD_ATTR(setxattr, F_SETXATTR); +-HMDFS_CMD_ATTR(listxattr, F_LISTXATTR); +- +-#define ATTR_LIST(_name) (&hmdfs_attr_##_name.attr) +- +-static struct attribute *sbi_timeout_attrs[] = { +- ATTR_LIST(open), ATTR_LIST(release), +- ATTR_LIST(readpage), ATTR_LIST(writepage), +- ATTR_LIST(iterate), ATTR_LIST(rmdir), +- ATTR_LIST(unlink), ATTR_LIST(rename), +- ATTR_LIST(setattr), +- ATTR_LIST(statfs), ATTR_LIST(drop_push), +- ATTR_LIST(getattr), ATTR_LIST(fsync), +- ATTR_LIST(syncfs), ATTR_LIST(getxattr), +- ATTR_LIST(setxattr), ATTR_LIST(listxattr), +- NULL +-}; +-ATTRIBUTE_GROUPS(sbi_timeout); +- +-static const struct sysfs_ops sbi_cmd_sysfs_ops = { +- .show = cmd_timeout_show, +- .store = cmd_timeout_store, +-}; +- +-static void sbi_timeout_release(struct kobject *kobj) +-{ +- struct hmdfs_sb_info *sbi = container_of(kobj, struct hmdfs_sb_info, +- s_cmd_timeout_kobj); +- +- complete(&sbi->s_timeout_kobj_unregister); +-} +- +-static struct kobj_type sbi_timeout_ktype = { +- .sysfs_ops = &sbi_cmd_sysfs_ops, +- .default_groups = sbi_timeout_groups, +- .release = sbi_timeout_release, +-}; +- +-void hmdfs_release_sysfs(struct hmdfs_sb_info *sbi) +-{ +- kobject_put(&sbi->s_cmd_timeout_kobj); +- wait_for_completion(&sbi->s_timeout_kobj_unregister); +- kobject_put(&sbi->kobj); +- wait_for_completion(&sbi->s_kobj_unregister); +-} +- +-int hmdfs_register_sysfs(const char *name, struct hmdfs_sb_info *sbi) +-{ +- int ret; +- struct kobject *kobj = NULL; +- +- mutex_lock(&hmdfs_sysfs_mutex); +- kobj = kset_find_obj(hmdfs_kset, name); +- if (kobj) { +- hmdfs_err("mount failed, already exist"); +- kobject_put(kobj); +- mutex_unlock(&hmdfs_sysfs_mutex); +- return -EEXIST; +- } +- +- sbi->kobj.kset = hmdfs_kset; +- init_completion(&sbi->s_kobj_unregister); +- ret = kobject_init_and_add(&sbi->kobj, &sbi_ktype, +- &hmdfs_kset->kobj, "%s", name); +- sysfs_change_owner(&sbi->kobj, KUIDT_INIT(1000), KGIDT_INIT(1000)); +- mutex_unlock(&hmdfs_sysfs_mutex); +- +- if (ret) { +- kobject_put(&sbi->kobj); +- wait_for_completion(&sbi->s_kobj_unregister); +- return ret; +- } +- +- init_completion(&sbi->s_timeout_kobj_unregister); +- ret = kobject_init_and_add(&sbi->s_cmd_timeout_kobj, &sbi_timeout_ktype, +- &sbi->kobj, "cmd_timeout"); +- if (ret) { +- hmdfs_release_sysfs(sbi); +- return ret; +- } +- +- kobject_uevent(&sbi->kobj, KOBJ_ADD); +- return 0; +-} +- +-void hmdfs_unregister_sysfs(struct hmdfs_sb_info *sbi) +-{ +- kobject_del(&sbi->s_cmd_timeout_kobj); +- kobject_del(&sbi->kobj); +-} +- +-static inline int to_sysfs_fmt_evt(unsigned int evt) +-{ +- return evt == RAW_NODE_EVT_NR ? -1 : evt; +-} +- +-static ssize_t features_show(struct kobject *kobj, struct peer_attribute *attr, +- char *buf) +-{ +- struct hmdfs_peer *peer = to_peer(kobj); +- +- return fill_features(buf, peer->features); +-} +- +-static ssize_t event_show(struct kobject *kobj, struct peer_attribute *attr, +- char *buf) +-{ +- struct hmdfs_peer *peer = to_peer(kobj); +- +- return snprintf(buf, PAGE_SIZE, +- "cur_async evt %d seq %u\n" +- "cur_sync evt %d seq %u\n" +- "pending evt %d seq %u\n" +- "merged evt %u\n" +- "dup_drop evt %u %u\n" +- "waiting evt %u %u\n" +- "seq_tbl %u %u %u %u\n" +- "seq_rd_idx %u\n" +- "seq_wr_idx %u\n", +- to_sysfs_fmt_evt(peer->cur_evt[0]), +- peer->cur_evt_seq[0], +- to_sysfs_fmt_evt(peer->cur_evt[1]), +- peer->cur_evt_seq[1], +- to_sysfs_fmt_evt(peer->pending_evt), +- peer->pending_evt_seq, +- peer->merged_evt, +- peer->dup_evt[RAW_NODE_EVT_OFF], +- peer->dup_evt[RAW_NODE_EVT_ON], +- peer->waiting_evt[RAW_NODE_EVT_OFF], +- peer->waiting_evt[RAW_NODE_EVT_ON], +- peer->seq_tbl[0], peer->seq_tbl[1], peer->seq_tbl[2], +- peer->seq_tbl[3], +- peer->seq_rd_idx % RAW_NODE_EVT_MAX_NR, +- peer->seq_wr_idx % RAW_NODE_EVT_MAX_NR); +-} +- +-static ssize_t stash_show(struct kobject *kobj, struct peer_attribute *attr, +- char *buf) +-{ +- struct hmdfs_peer *peer = to_peer(kobj); +- +- return snprintf(buf, PAGE_SIZE, +- "cur_ok %u\n" +- "cur_nothing %u\n" +- "cur_fail %u\n" +- "total_ok %u\n" +- "total_nothing %u\n" +- "total_fail %u\n" +- "ok_pages %llu\n" +- "fail_pages %llu\n", +- peer->stats.stash.cur_ok, +- peer->stats.stash.cur_nothing, +- peer->stats.stash.cur_fail, +- peer->stats.stash.total_ok, +- peer->stats.stash.total_nothing, +- peer->stats.stash.total_fail, +- peer->stats.stash.ok_pages, +- peer->stats.stash.fail_pages); +-} +- +-static ssize_t restore_show(struct kobject *kobj, struct peer_attribute *attr, +- char *buf) +-{ +- struct hmdfs_peer *peer = to_peer(kobj); +- +- return snprintf(buf, PAGE_SIZE, +- "cur_ok %u\n" +- "cur_fail %u\n" +- "cur_keep %u\n" +- "total_ok %u\n" +- "total_fail %u\n" +- "total_keep %u\n" +- "ok_pages %llu\n" +- "fail_pages %llu\n", +- peer->stats.restore.cur_ok, +- peer->stats.restore.cur_fail, +- peer->stats.restore.cur_keep, +- peer->stats.restore.total_ok, +- peer->stats.restore.total_fail, +- peer->stats.restore.total_keep, +- peer->stats.restore.ok_pages, +- peer->stats.restore.fail_pages); +-} +- +-static ssize_t rebuild_show(struct kobject *kobj, struct peer_attribute *attr, +- char *buf) +-{ +- struct hmdfs_peer *peer = to_peer(kobj); +- +- return snprintf(buf, PAGE_SIZE, +- "cur_ok %u\n" +- "cur_fail %u\n" +- "cur_invalid %u\n" +- "total_ok %u\n" +- "total_fail %u\n" +- "total_invalid %u\n" +- "time %u\n", +- peer->stats.rebuild.cur_ok, +- peer->stats.rebuild.cur_fail, +- peer->stats.rebuild.cur_invalid, +- peer->stats.rebuild.total_ok, +- peer->stats.rebuild.total_fail, +- peer->stats.rebuild.total_invalid, +- peer->stats.rebuild.time); +-} +- +-static struct peer_attribute peer_features_attr = __ATTR_RO(features); +-static struct peer_attribute peer_event_attr = __ATTR_RO(event); +-static struct peer_attribute peer_stash_attr = __ATTR_RO(stash); +-static struct peer_attribute peer_restore_attr = __ATTR_RO(restore); +-static struct peer_attribute peer_rebuild_attr = __ATTR_RO(rebuild); +- +-static struct attribute *peer_attrs[] = { +- &peer_features_attr.attr, +- &peer_event_attr.attr, +- &peer_stash_attr.attr, +- &peer_restore_attr.attr, +- &peer_rebuild_attr.attr, +- NULL, +-}; +-ATTRIBUTE_GROUPS(peer); +- +-static ssize_t peer_attr_show(struct kobject *kobj, struct attribute *attr, +- char *buf) +-{ +- struct peer_attribute *peer_attr = to_peer_attr(attr); +- +- if (!peer_attr->show) +- return -EIO; +- return peer_attr->show(kobj, peer_attr, buf); +-} +- +-static ssize_t peer_attr_store(struct kobject *kobj, struct attribute *attr, +- const char *buf, size_t len) +-{ +- struct peer_attribute *peer_attr = to_peer_attr(attr); +- +- if (!peer_attr->store) +- return -EIO; +- return peer_attr->store(kobj, peer_attr, buf, len); +-} +- +-static const struct sysfs_ops peer_sysfs_ops = { +- .show = peer_attr_show, +- .store = peer_attr_store, +-}; +- +-static void peer_sysfs_release(struct kobject *kobj) +-{ +- struct hmdfs_peer *peer = to_peer(kobj); +- +- complete(&peer->kobj_unregister); +-} +- +-static struct kobj_type peer_ktype = { +- .sysfs_ops = &peer_sysfs_ops, +- .default_groups = peer_groups, +- .release = peer_sysfs_release, +-}; +- +-int hmdfs_register_peer_sysfs(struct hmdfs_sb_info *sbi, +- struct hmdfs_peer *peer) +-{ +- int err = 0; +- +- init_completion(&peer->kobj_unregister); +- err = kobject_init_and_add(&peer->kobj, &peer_ktype, &sbi->kobj, +- "peer_%llu", peer->device_id); +- return err; +-} +- +-void hmdfs_release_peer_sysfs(struct hmdfs_peer *peer) +-{ +- kobject_del(&peer->kobj); +- kobject_put(&peer->kobj); +- wait_for_completion(&peer->kobj_unregister); +-} +- +-void notify(struct hmdfs_peer *node, struct notify_param *param) +-{ +- struct hmdfs_sb_info *sbi = node->sbi; +- int in_len; +- +- if (!param) +- return; +- spin_lock(&sbi->notify_fifo_lock); +- in_len = +- kfifo_in(&sbi->notify_fifo, param, sizeof(struct notify_param)); +- spin_unlock(&sbi->notify_fifo_lock); +- if (in_len != sizeof(struct notify_param)) +- return; +- sysfs_notify(&sbi->kobj, NULL, "cmd"); +-} +- +-int hmdfs_sysfs_init(void) +-{ +- hmdfs_kset = kset_create_and_add("hmdfs", NULL, fs_kobj); +- if (!hmdfs_kset) +- return -ENOMEM; +- +- return 0; +-} +- +-void hmdfs_sysfs_exit(void) +-{ +- kset_unregister(hmdfs_kset); +- hmdfs_kset = NULL; +-} +diff --git a/fs/hmdfs/comm/device_node.h b/fs/hmdfs/comm/device_node.h +deleted file mode 100644 +index a0c596991..000000000 +--- a/fs/hmdfs/comm/device_node.h ++++ /dev/null +@@ -1,108 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/comm/device_node.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_DEVICE_NODE_H +-#define HMDFS_DEVICE_NODE_H +- +-#include "hmdfs.h" +-#include "transport.h" +- +-enum CTRL_NODE_CMD { +- CMD_UPDATE_SOCKET = 0, +- CMD_UPDATE_DEVSL, +- CMD_OFF_LINE, +- CMD_CNT, +-}; +- +-struct update_socket_param { +- int32_t cmd; +- int32_t newfd; +- uint32_t devsl; +- uint8_t status; +- uint8_t masterkey[HMDFS_KEY_SIZE]; +- uint8_t cid[HMDFS_CID_SIZE]; +-} __packed; +- +-struct update_devsl_param { +- int32_t cmd; +- uint32_t devsl; +- uint8_t cid[HMDFS_CID_SIZE]; +-} __attribute__((packed)); +- +-struct offline_param { +- int32_t cmd; +- uint8_t remote_cid[HMDFS_CID_SIZE]; +-} __packed; +- +-struct offline_all_param { +- int32_t cmd; +-} __packed; +- +-enum NOTIFY { +- NOTIFY_GET_SESSION, +- NOTIFY_OFFLINE, +- NOTIFY_NONE, +- NOTIFY_CNT, +-}; +- +-struct notify_param { +- int32_t notify; +- int32_t fd; +- uint8_t remote_cid[HMDFS_CID_SIZE]; +-} __packed; +- +-struct sbi_attribute { +- struct attribute attr; +- ssize_t (*show)(struct kobject *kobj, struct sbi_attribute *attr, +- char *buf); +- ssize_t (*store)(struct kobject *kobj, struct sbi_attribute *attr, +- const char *buf, size_t len); +-}; +- +-struct peer_attribute { +- struct attribute attr; +- ssize_t (*show)(struct kobject *kobj, struct peer_attribute *attr, +- char *buf); +- ssize_t (*store)(struct kobject *kobj, struct peer_attribute *attr, +- const char *buf, size_t len); +-}; +- +-struct sbi_cmd_attribute { +- struct attribute attr; +- int command; +-}; +- +-void notify(struct hmdfs_peer *node, struct notify_param *param); +-int hmdfs_register_sysfs(const char *name, struct hmdfs_sb_info *sbi); +-void hmdfs_unregister_sysfs(struct hmdfs_sb_info *sbi); +-void hmdfs_release_sysfs(struct hmdfs_sb_info *sbi); +-int hmdfs_register_peer_sysfs(struct hmdfs_sb_info *sbi, +- struct hmdfs_peer *peer); +-void hmdfs_release_peer_sysfs(struct hmdfs_peer *peer); +-int hmdfs_sysfs_init(void); +-void hmdfs_sysfs_exit(void); +- +-static inline struct sbi_attribute *to_sbi_attr(struct attribute *x) +-{ +- return container_of(x, struct sbi_attribute, attr); +-} +- +-static inline struct hmdfs_sb_info *to_sbi(struct kobject *x) +-{ +- return container_of(x, struct hmdfs_sb_info, kobj); +-} +- +-static inline struct peer_attribute *to_peer_attr(struct attribute *x) +-{ +- return container_of(x, struct peer_attribute, attr); +-} +- +-static inline struct hmdfs_peer *to_peer(struct kobject *x) +-{ +- return container_of(x, struct hmdfs_peer, kobj); +-} +-#endif +diff --git a/fs/hmdfs/comm/message_verify.c b/fs/hmdfs/comm/message_verify.c +deleted file mode 100644 +index 4c5933907..000000000 +--- a/fs/hmdfs/comm/message_verify.c ++++ /dev/null +@@ -1,980 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/comm/message_verify.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "message_verify.h" +- +-#include +-#include +-#include +- +-#include "connection.h" +-#include "hmdfs.h" +-#include "hmdfs_server.h" +- +-size_t message_length[C_FLAG_SIZE][F_SIZE][HMDFS_MESSAGE_MIN_MAX]; +-bool need_response[F_SIZE]; +- +-void hmdfs_message_verify_init(void) +-{ +- int flag, cmd; +- +- for (cmd = 0; cmd < F_SIZE; cmd++) +- need_response[cmd] = true; +- need_response[F_RELEASE] = false; +- need_response[F_CONNECT_REKEY] = false; +- need_response[F_DROP_PUSH] = false; +- +- for (flag = 0; flag < C_FLAG_SIZE; flag++) { +- for (cmd = 0; cmd < F_SIZE; cmd++) { +- message_length[flag][cmd][HMDFS_MESSAGE_MIN_INDEX] = 1; +- message_length[flag][cmd][HMDFS_MESSAGE_MAX_INDEX] = 0; +- message_length[flag][cmd][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- } +- } +- +- message_length[C_REQUEST][F_OPEN][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct open_request); +- message_length[C_REQUEST][F_OPEN][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct open_request) + PATH_MAX + 1; +- message_length[C_REQUEST][F_OPEN][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_OPEN][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_OPEN][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct open_response); +- message_length[C_RESPONSE][F_OPEN][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_ATOMIC_OPEN][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct atomic_open_request); +- message_length[C_REQUEST][F_ATOMIC_OPEN][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct atomic_open_request) + PATH_MAX + NAME_MAX + 1; +- message_length[C_REQUEST][F_ATOMIC_OPEN][HMDFS_MESSAGE_LEN_JUDGE_INDEX] +- = MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_ATOMIC_OPEN][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_ATOMIC_OPEN][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct atomic_open_response); +- message_length[C_RESPONSE][F_ATOMIC_OPEN][HMDFS_MESSAGE_LEN_JUDGE_INDEX] +- = MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_RELEASE][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct release_request); +- message_length[C_REQUEST][F_RELEASE][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct release_request); +- message_length[C_REQUEST][F_RELEASE][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_FSYNC][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct fsync_request); +- message_length[C_REQUEST][F_FSYNC][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct fsync_request); +- message_length[C_REQUEST][F_FSYNC][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- message_length[C_RESPONSE][F_FSYNC][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_FSYNC][HMDFS_MESSAGE_MAX_INDEX] = 0; +- message_length[C_RESPONSE][F_FSYNC][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_READPAGE][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct readpage_request); +- message_length[C_REQUEST][F_READPAGE][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct readpage_request); +- message_length[C_REQUEST][F_READPAGE][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- message_length[C_RESPONSE][F_READPAGE][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_READPAGE][HMDFS_MESSAGE_MAX_INDEX] = +- HMDFS_PAGE_SIZE; +- message_length[C_RESPONSE][F_READPAGE][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- +- message_length[C_REQUEST][F_WRITEPAGE][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct writepage_request) + HMDFS_PAGE_SIZE; +- message_length[C_REQUEST][F_WRITEPAGE][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct writepage_request) + HMDFS_PAGE_SIZE; +- message_length[C_REQUEST][F_WRITEPAGE][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- message_length[C_RESPONSE][F_WRITEPAGE][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_WRITEPAGE][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct writepage_response); +- message_length[C_RESPONSE][F_WRITEPAGE][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_ITERATE][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct readdir_request); +- message_length[C_REQUEST][F_ITERATE][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct readdir_request) + PATH_MAX + 1; +- message_length[C_REQUEST][F_ITERATE][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_ITERATE][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_ITERATE][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(__le64) + HMDFS_MAX_MESSAGE_LEN; +- message_length[C_RESPONSE][F_ITERATE][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- +- message_length[C_REQUEST][F_MKDIR][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct mkdir_request); +- message_length[C_REQUEST][F_MKDIR][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct mkdir_request) + PATH_MAX + NAME_MAX + 2; +- message_length[C_REQUEST][F_MKDIR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_MKDIR][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct hmdfs_inodeinfo_response); +- message_length[C_RESPONSE][F_MKDIR][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct hmdfs_inodeinfo_response); +- message_length[C_RESPONSE][F_MKDIR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_CREATE][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct create_request); +- message_length[C_REQUEST][F_CREATE][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct create_request) + PATH_MAX + NAME_MAX + 2; +- message_length[C_REQUEST][F_CREATE][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_CREATE][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct hmdfs_inodeinfo_response); +- message_length[C_RESPONSE][F_CREATE][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct hmdfs_inodeinfo_response); +- message_length[C_RESPONSE][F_CREATE][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_RMDIR][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct rmdir_request); +- message_length[C_REQUEST][F_RMDIR][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct rmdir_request) + PATH_MAX + NAME_MAX + 2; +- message_length[C_REQUEST][F_RMDIR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_RMDIR][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_RMDIR][HMDFS_MESSAGE_MAX_INDEX] = 0; +- message_length[C_RESPONSE][F_RMDIR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_UNLINK][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct unlink_request); +- message_length[C_REQUEST][F_UNLINK][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct unlink_request) + PATH_MAX + NAME_MAX + 2; +- message_length[C_REQUEST][F_UNLINK][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_UNLINK][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_UNLINK][HMDFS_MESSAGE_MAX_INDEX] = 0; +- message_length[C_RESPONSE][F_UNLINK][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_RENAME][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct rename_request); +- message_length[C_REQUEST][F_RENAME][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct rename_request) + 4 + 4 * PATH_MAX; +- message_length[C_REQUEST][F_RENAME][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_RENAME][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_RENAME][HMDFS_MESSAGE_MAX_INDEX] = 0; +- message_length[C_RESPONSE][F_RENAME][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_SETATTR][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct setattr_request); +- message_length[C_REQUEST][F_SETATTR][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct setattr_request) + PATH_MAX + 1; +- message_length[C_REQUEST][F_SETATTR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_SETATTR][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_SETATTR][HMDFS_MESSAGE_MAX_INDEX] = 0; +- message_length[C_RESPONSE][F_SETATTR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_GETATTR][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct getattr_request); +- message_length[C_REQUEST][F_GETATTR][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct getattr_request) + PATH_MAX + 1; +- message_length[C_REQUEST][F_GETATTR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_GETATTR][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_GETATTR][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct getattr_response); +- message_length[C_RESPONSE][F_GETATTR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_STATFS][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct statfs_request); +- message_length[C_REQUEST][F_STATFS][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct statfs_request) + PATH_MAX + 1; +- message_length[C_REQUEST][F_STATFS][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_STATFS][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_STATFS][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct statfs_response); +- message_length[C_RESPONSE][F_STATFS][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_SYNCFS][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct syncfs_request); +- message_length[C_REQUEST][F_SYNCFS][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct syncfs_request); +- message_length[C_REQUEST][F_SYNCFS][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- message_length[C_RESPONSE][F_SYNCFS][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_SYNCFS][HMDFS_MESSAGE_MAX_INDEX] = 0; +- message_length[C_RESPONSE][F_SYNCFS][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_GETXATTR][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct getxattr_request); +- message_length[C_REQUEST][F_GETXATTR][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct getxattr_request) + PATH_MAX + XATTR_NAME_MAX + 2; +- message_length[C_REQUEST][F_GETXATTR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_GETXATTR][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_GETXATTR][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct getxattr_response) + HMDFS_XATTR_SIZE_MAX; +- message_length[C_RESPONSE][F_GETXATTR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- +- message_length[C_REQUEST][F_SETXATTR][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct setxattr_request); +- message_length[C_REQUEST][F_SETXATTR][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct setxattr_request) + PATH_MAX + XATTR_NAME_MAX + +- HMDFS_XATTR_SIZE_MAX + 2; +- message_length[C_REQUEST][F_SETXATTR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_SETXATTR][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_SETXATTR][HMDFS_MESSAGE_MAX_INDEX] = 0; +- message_length[C_RESPONSE][F_SETXATTR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_LISTXATTR][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct listxattr_request); +- message_length[C_REQUEST][F_LISTXATTR][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct listxattr_request) + PATH_MAX + 1; +- message_length[C_REQUEST][F_LISTXATTR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- message_length[C_RESPONSE][F_LISTXATTR][HMDFS_MESSAGE_MIN_INDEX] = 0; +- message_length[C_RESPONSE][F_LISTXATTR][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct listxattr_response) + HMDFS_LISTXATTR_SIZE_MAX; +- message_length[C_RESPONSE][F_LISTXATTR][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +- +- message_length[C_REQUEST][F_CONNECT_REKEY][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct connection_rekey_request); +- message_length[C_REQUEST][F_CONNECT_REKEY][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct connection_rekey_request); +- message_length[C_REQUEST][F_CONNECT_REKEY] +- [HMDFS_MESSAGE_LEN_JUDGE_INDEX] = MESSAGE_LEN_JUDGE_BIN; +- +- message_length[C_REQUEST][F_DROP_PUSH][HMDFS_MESSAGE_MIN_INDEX] = +- sizeof(struct drop_push_request); +- message_length[C_REQUEST][F_DROP_PUSH][HMDFS_MESSAGE_MAX_INDEX] = +- sizeof(struct drop_push_request) + PATH_MAX + 1; +- message_length[C_REQUEST][F_DROP_PUSH][HMDFS_MESSAGE_LEN_JUDGE_INDEX] = +- MESSAGE_LEN_JUDGE_RANGE; +-} +- +-static int is_str_msg_valid(char *msg, int str_len[], size_t str_num) +-{ +- int i = 0; +- int pos = 0; +- +- for (i = 0; i < str_num; i++) { +- if (msg[pos + str_len[i]] != '\0' || +- strnlen(msg + pos, PATH_MAX) != str_len[i]) +- return -EINVAL; +- pos += str_len[i] + 1; +- } +- +- return 0; +-} +- +-static int verify_open_req(size_t msg_len, void *msg) +-{ +- struct open_request *req = msg; +- int str_len[] = { req->path_len }; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1) +- return -EINVAL; +- +- str_len[0] = req->path_len; +- if (is_str_msg_valid(req->buf, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int verify_open_resp(size_t msg_len, void *msg) +-{ +- struct open_response *resp = msg; +- +- if (msg_len != sizeof(*resp)) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_open_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_open_req(msg_len, msg); +- else +- return verify_open_resp(msg_len, msg); +-} +- +-static int verify_atomic_open_req(size_t msg_len, void *msg) +-{ +- struct atomic_open_request *req = msg; +- int str_len[] = { req->path_len, req->file_len}; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX || +- req->file_len < 0 || req->file_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1 + req->file_len + 1) +- return -EINVAL; +- +- if (is_str_msg_valid(req->buf, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int verify_atomic_open_resp(size_t msg_len, void *msg) +-{ +- struct atomic_open_response *resp = msg; +- +- if (msg_len != sizeof(*resp)) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_atomic_open_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_atomic_open_req(msg_len, msg); +- else +- return verify_atomic_open_resp(msg_len, msg); +-} +- +-static int verify_iterate_req(size_t msg_len, void *msg) +-{ +- struct readdir_request *req = msg; +- int str_len[] = { req->path_len }; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1) +- return -EINVAL; +- +- if (is_str_msg_valid(req->path, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_iterate_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_iterate_req(msg_len, msg); +- +- return 0; +-} +- +-static int verify_mkdir_req(size_t msg_len, void *msg) +-{ +- struct mkdir_request *req = msg; +- int str_len[] = { req->path_len, req->name_len }; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX || +- req->name_len < 0 || req->name_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1 + req->name_len + 1) +- return -EINVAL; +- +- if (is_str_msg_valid(req->path, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int verify_mkdir_resp(size_t msg_len, void *msg) +-{ +- struct hmdfs_inodeinfo_response *resp = msg; +- +- if (msg_len != sizeof(*resp)) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_mkdir_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_mkdir_req(msg_len, msg); +- else +- return verify_mkdir_resp(msg_len, msg); +-} +- +-static int verify_create_req(size_t msg_len, void *msg) +-{ +- struct create_request *req = msg; +- int str_len[] = { req->path_len, req->name_len }; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX || +- req->name_len < 0 || req->name_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1 + req->name_len + 1) +- return -EINVAL; +- +- if (is_str_msg_valid(req->path, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int verify_create_resp(size_t msg_len, void *msg) +-{ +- struct hmdfs_inodeinfo_response *resp = msg; +- +- if (msg_len != sizeof(*resp)) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_create_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_create_req(msg_len, msg); +- else +- return verify_create_resp(msg_len, msg); +-} +- +-static int verify_rmdir_req(size_t msg_len, void *msg) +-{ +- struct rmdir_request *req = msg; +- int str_len[] = { req->path_len, req->name_len }; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX || +- req->name_len < 0 || req->name_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1 + req->name_len + 1) +- return -EINVAL; +- +- if (is_str_msg_valid(req->path, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_rmdir_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_rmdir_req(msg_len, msg); +- +- return 0; +-} +- +-static int verify_unlink_req(size_t msg_len, void *msg) +-{ +- struct unlink_request *req = msg; +- int str_len[] = { req->path_len, req->name_len }; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX || +- req->name_len < 0 || req->name_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1 + req->name_len + 1) +- return -EINVAL; +- +- if (is_str_msg_valid(req->path, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_unlink_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_unlink_req(msg_len, msg); +- +- return 0; +-} +- +-static int verify_rename_req(size_t msg_len, void *msg) +-{ +- struct rename_request *req = msg; +- int str_len[] = { req->old_path_len, req->new_path_len, +- req->old_name_len, req->new_name_len }; +- +- if (req->old_path_len < 0 || req->old_path_len >= PATH_MAX || +- req->new_path_len < 0 || req->new_path_len >= PATH_MAX || +- req->old_name_len < 0 || req->old_name_len >= PATH_MAX || +- req->new_name_len < 0 || req->new_name_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->old_path_len + 1 + +- req->new_path_len + 1 + req->old_name_len + 1 + +- req->new_name_len + 1) +- return -EINVAL; +- +- if (is_str_msg_valid(req->path, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_rename_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_rename_req(msg_len, msg); +- +- return 0; +-} +- +-static int verify_setattr_req(size_t msg_len, void *msg) +-{ +- struct setattr_request *req = msg; +- int str_len[] = { req->path_len }; +- +- req = msg; +- if (req->path_len < 0 || req->path_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1) +- return -EINVAL; +- +- if (is_str_msg_valid(req->buf, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_setattr_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_setattr_req(msg_len, msg); +- +- return 0; +-} +- +-static int verify_getattr_req(size_t msg_len, void *msg) +-{ +- struct getattr_request *req = msg; +- int str_len[] = { req->path_len }; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1) +- return -EINVAL; +- +- if (is_str_msg_valid(req->buf, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int verify_getattr_resp(size_t msg_len, void *msg) +-{ +- struct getattr_response *resp = msg; +- +- if (msg_len != sizeof(*resp)) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_getattr_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_getattr_req(msg_len, msg); +- else +- return verify_getattr_resp(msg_len, msg); +-} +- +-static int verify_getxattr_req(size_t msg_len, void *msg) +-{ +- struct getxattr_request *req = msg; +- int str_len[] = { req->path_len, req->name_len}; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX || +- req->name_len < 0 || req->name_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1 + req->name_len + 1) +- return -EINVAL; +- +- if (req->name_len > XATTR_NAME_MAX || req->size < 0 || +- req->size > XATTR_SIZE_MAX) +- return -EINVAL; +- +- if (is_str_msg_valid(req->buf, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int verify_getxattr_resp(size_t msg_len, void *msg) +-{ +- struct getxattr_response *resp = msg; +- +- if (resp->size != sizeof(*resp->value)) +- return -EINVAL; +- +- if (msg_len < sizeof(*resp)) +- return -EINVAL; +- +- if (resp->size > XATTR_SIZE_MAX) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_getxattr_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_getxattr_req(msg_len, msg); +- else +- return verify_getxattr_resp(msg_len, msg); +-} +- +-static int verify_setxattr_req(size_t msg_len, void *msg) +-{ +- struct setxattr_request *req = msg; +- int str_len[] = { req->path_len, req->name_len}; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX || +- req->name_len < 0 || req->name_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1 + req->name_len + 1 + +- req->size) +- return -EINVAL; +- +- if (req->name_len > XATTR_NAME_MAX || req->size < 0 || +- req->size > XATTR_SIZE_MAX) +- return -EINVAL; +- +- if (is_str_msg_valid(req->buf, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_setxattr_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_setxattr_req(msg_len, msg); +- +- return 0; +-} +- +-static int verify_listxattr_req(size_t msg_len, void *msg) +-{ +- struct listxattr_request *req = msg; +- int str_len[] = { req->path_len }; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1) +- return -EINVAL; +- +- if (req->size < 0 || req->size > XATTR_LIST_MAX) +- return -EINVAL; +- +- if (is_str_msg_valid(req->buf, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int verify_listxattr_resp(size_t msg_len, void *msg) +-{ +- struct listxattr_response *resp = msg; +- +- if (resp->size != sizeof(*resp->list)) +- return -EINVAL; +- +- if (msg_len < sizeof(*resp)) +- return -EINVAL; +- +- if (resp->size > XATTR_LIST_MAX) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_listxattr_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_listxattr_req(msg_len, msg); +- else +- return verify_listxattr_resp(msg_len, msg); +-} +- +-static int hmdfs_readpage_verify(int flag, size_t msg_len, void *msg) +-{ +- struct readpage_request *req = NULL; +- +- if (flag != C_REQUEST || !msg || !msg_len) +- return 0; +- +- req = msg; +- if (msg_len != sizeof(*req)) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_writepage_verify(int flag, size_t msg_len, void *msg) +-{ +- struct writepage_request *req = NULL; +- +- if (flag != C_REQUEST || !msg || !msg_len) +- return 0; +- +- req = msg; +- if (req->count <= 0 || req->count > HMDFS_PAGE_SIZE) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + HMDFS_PAGE_SIZE) +- return -EINVAL; +- +- return 0; +-} +- +-static int verify_statfs_req(size_t msg_len, void *msg) +-{ +- struct statfs_request *req = msg; +- int str_len[] = { req->path_len }; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1) +- return -EINVAL; +- +- if (is_str_msg_valid(req->path, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int verify_statfs_resp(size_t msg_len, void *msg) +-{ +- struct statfs_response *resp = msg; +- +- if (msg_len != sizeof(*resp)) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_statfs_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_statfs_req(msg_len, msg); +- else +- return verify_statfs_resp(msg_len, msg); +-} +- +-static int verify_drop_push_req(size_t msg_len, void *msg) +-{ +- struct drop_push_request *req = msg; +- int str_len[] = { req->path_len }; +- +- if (req->path_len < 0 || req->path_len >= PATH_MAX) +- return -EINVAL; +- +- if (msg_len != sizeof(*req) + req->path_len + 1) +- return -EINVAL; +- +- if (is_str_msg_valid(req->path, str_len, sizeof(str_len) / sizeof(int))) +- return -EINVAL; +- +- return 0; +-} +- +-static int hmdfs_drop_push_verify(int flag, size_t msg_len, void *msg) +-{ +- if (!msg || !msg_len) +- return 0; +- +- if (flag == C_REQUEST) +- return verify_drop_push_req(msg_len, msg); +- +- return 0; +-} +- +-typedef int (*hmdfs_message_verify_func)(int, size_t, void *); +- +-static const hmdfs_message_verify_func message_verify[F_SIZE] = { +- [F_OPEN] = hmdfs_open_verify, +- [F_READPAGE] = hmdfs_readpage_verify, +- [F_WRITEPAGE] = hmdfs_writepage_verify, +- [F_ITERATE] = hmdfs_iterate_verify, +- [F_MKDIR] = hmdfs_mkdir_verify, +- [F_RMDIR] = hmdfs_rmdir_verify, +- [F_CREATE] = hmdfs_create_verify, +- [F_UNLINK] = hmdfs_unlink_verify, +- [F_RENAME] = hmdfs_rename_verify, +- [F_SETATTR] = hmdfs_setattr_verify, +- [F_STATFS] = hmdfs_statfs_verify, +- [F_DROP_PUSH] = hmdfs_drop_push_verify, +- [F_GETATTR] = hmdfs_getattr_verify, +- [F_GETXATTR] = hmdfs_getxattr_verify, +- [F_SETXATTR] = hmdfs_setxattr_verify, +- [F_LISTXATTR] = hmdfs_listxattr_verify, +- [F_ATOMIC_OPEN] = hmdfs_atomic_open_verify, +-}; +- +-static void handle_bad_message(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *head, int *err) +-{ +- /* +- * Bad message won't be awared by upper layer, so ETIME is +- * always given to upper layer. It is prefer to pass EOPNOTSUPP +- * to upper layer when bad message (eg. caused by wrong len) +- * received. +- */ +- if (head->operations.cmd_flag == C_RESPONSE) { +- /* +- * Change msg ret code. To let upper layer handle +- * EOPNOTSUPP, hmdfs_message_verify() should return +- * 0, so err code is modified either. +- */ +- head->ret_code = cpu_to_le32(-EOPNOTSUPP); +- *err = 0; +- } else { +- if (head->operations.command >= F_SIZE) +- return; +- /* +- * Some request messages do not need to be responded. +- * Even if a response is returned, the response msg +- * is automatically ignored in hmdfs_response_recv(). +- * Therefore, it is normal to directly return a response. +- */ +- if (need_response[head->operations.command]) +- hmdfs_send_err_response(con, head, -EOPNOTSUPP); +- } +-} +- +-bool is_reserved_command(int command) +-{ +- if ((command >= F_RESERVED_1 && command <= F_RESERVED_4) || +- command == F_RESERVED_5 || command == F_RESERVED_6 || +- command == F_RESERVED_7 || command == F_RESERVED_8) +- return true; +- return false; +-} +- +-int hmdfs_message_verify(struct hmdfs_peer *con, struct hmdfs_head_cmd *head, +- void *data) +-{ +- int err = 0; +- int flag, cmd, len_type; +- size_t len, min, max; +- +- if (!head) +- return -EINVAL; +- +- flag = head->operations.cmd_flag; +- if (flag != C_REQUEST && flag != C_RESPONSE) +- return -EINVAL; +- +- cmd = head->operations.command; +- if (cmd >= F_SIZE || cmd < F_OPEN || is_reserved_command(cmd)) { +- err = -EINVAL; +- goto handle_bad_msg; +- } +- +- len = le32_to_cpu(head->data_len) - +- sizeof(struct hmdfs_head_cmd); +- min = message_length[flag][cmd][HMDFS_MESSAGE_MIN_INDEX]; +- if (head->operations.command == F_ITERATE && flag == C_RESPONSE) +- max = sizeof(struct slice_descriptor) + PAGE_SIZE; +- else +- max = message_length[flag][cmd][HMDFS_MESSAGE_MAX_INDEX]; +- len_type = +- message_length[flag][cmd][HMDFS_MESSAGE_LEN_JUDGE_INDEX]; +- +- if (len_type == MESSAGE_LEN_JUDGE_RANGE) { +- if (len < min || len > max) { +- hmdfs_err( +- "cmd %d -> %d message verify fail, len = %zu", +- cmd, flag, len); +- err = -EINVAL; +- goto handle_bad_msg; +- } +- } else { +- if (len != min && len != max) { +- hmdfs_err( +- "cmd %d -> %d message verify fail, len = %zu", +- cmd, flag, len); +- err = -EINVAL; +- goto handle_bad_msg; +- } +- } +- +- if (message_verify[cmd]) +- err = message_verify[cmd](flag, len, data); +- +- if (err) +- goto handle_bad_msg; +- +- return err; +- +-handle_bad_msg: +- handle_bad_message(con, head, &err); +- return err; +-} +diff --git a/fs/hmdfs/comm/message_verify.h b/fs/hmdfs/comm/message_verify.h +deleted file mode 100644 +index 99e696a44..000000000 +--- a/fs/hmdfs/comm/message_verify.h ++++ /dev/null +@@ -1,27 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/comm/message_verify.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_MESSAGE_VERIFY_H +-#define HMDFS_MESSAGE_VERIFY_H +- +-#include "protocol.h" +- +-enum MESSAGE_LEN_JUDGE_TYPE { +- MESSAGE_LEN_JUDGE_RANGE = 0, +- MESSAGE_LEN_JUDGE_BIN = 1, +-}; +- +-#define HMDFS_MESSAGE_MIN_INDEX 0 +-#define HMDFS_MESSAGE_MAX_INDEX 1 +-#define HMDFS_MESSAGE_LEN_JUDGE_INDEX 2 +-#define HMDFS_MESSAGE_MIN_MAX 3 +- +-void hmdfs_message_verify_init(void); +-int hmdfs_message_verify(struct hmdfs_peer *con, struct hmdfs_head_cmd *head, +- void *data); +- +-#endif +diff --git a/fs/hmdfs/comm/node_cb.c b/fs/hmdfs/comm/node_cb.c +deleted file mode 100644 +index 991ebde1d..000000000 +--- a/fs/hmdfs/comm/node_cb.c ++++ /dev/null +@@ -1,73 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/comm/node_cb.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +- +-#include "node_cb.h" +-#include "connection.h" +- +-static struct list_head cb_head[NODE_EVT_NR][NODE_EVT_TYPE_NR]; +- +-static const char *evt_str_tbl[NODE_EVT_NR] = { +- "add", "online", "offline", "del", +-}; +- +-static inline bool hmdfs_is_valid_node_evt(int evt) +-{ +- return (evt >= 0 && evt < NODE_EVT_NR); +-} +- +-static const char *hmdfs_evt_str(int evt) +-{ +- if (!hmdfs_is_valid_node_evt(evt)) +- return "unknown"; +- return evt_str_tbl[evt]; +-} +- +-void hmdfs_node_evt_cb_init(void) +-{ +- int i; +- +- for (i = 0; i < ARRAY_SIZE(cb_head); i++) { +- int j; +- +- for (j = 0; j < ARRAY_SIZE(cb_head[0]); j++) +- INIT_LIST_HEAD(&cb_head[i][j]); +- } +-} +- +-void hmdfs_node_add_evt_cb(struct hmdfs_node_cb_desc *desc, int nr) +-{ +- int i; +- +- for (i = 0; i < nr; i++) { +- int evt = desc[i].evt; +- bool sync = desc[i].sync; +- +- if (!hmdfs_is_valid_node_evt(evt)) +- continue; +- +- list_add_tail(&desc[i].list, &cb_head[evt][sync]); +- } +-} +- +-void hmdfs_node_call_evt_cb(struct hmdfs_peer *conn, int evt, bool sync, +- unsigned int seq) +-{ +- struct hmdfs_node_cb_desc *desc = NULL; +- +- hmdfs_info("node 0x%x:0x%llx call %s %s cb seq %u", +- conn->owner, conn->device_id, hmdfs_evt_str(evt), +- sync ? "sync" : "async", seq); +- +- if (!hmdfs_is_valid_node_evt(evt)) +- return; +- +- list_for_each_entry(desc, &cb_head[evt][sync], list) { +- desc->fn(conn, evt, seq); +- } +-} +diff --git a/fs/hmdfs/comm/node_cb.h b/fs/hmdfs/comm/node_cb.h +deleted file mode 100644 +index 8bd6a4bbc..000000000 +--- a/fs/hmdfs/comm/node_cb.h ++++ /dev/null +@@ -1,43 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/comm/node_cb.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_NODE_CB_H +-#define HMDFS_NODE_CB_H +- +-#include "hmdfs.h" +- +-/* async & sync */ +-#define NODE_EVT_TYPE_NR 2 +- +-enum { +- NODE_EVT_ADD = 0, +- NODE_EVT_ONLINE, +- NODE_EVT_OFFLINE, +- NODE_EVT_DEL, +- NODE_EVT_NR, +-}; +- +-struct hmdfs_peer; +- +-typedef void (*hmdfs_node_evt_cb)(struct hmdfs_peer *conn, +- int evt, unsigned int seq); +- +-struct hmdfs_node_cb_desc { +- int evt; +- bool sync; +- hmdfs_node_evt_cb fn; +- struct list_head list; +-}; +- +-extern void hmdfs_node_evt_cb_init(void); +- +-/* Only initialize during module init */ +-extern void hmdfs_node_add_evt_cb(struct hmdfs_node_cb_desc *desc, int nr); +-extern void hmdfs_node_call_evt_cb(struct hmdfs_peer *node, int evt, bool sync, +- unsigned int seq); +- +-#endif /* HMDFS_NODE_CB_H */ +diff --git a/fs/hmdfs/comm/protocol.h b/fs/hmdfs/comm/protocol.h +deleted file mode 100644 +index beaa5adf4..000000000 +--- a/fs/hmdfs/comm/protocol.h ++++ /dev/null +@@ -1,454 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/comm/protocol.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_PROTOCOL_H +-#define HMDFS_PROTOCOL_H +- +-#include +-#include +-#include +-#include +- +-struct hmdfs_cmd { +- __u8 reserved; +- __u8 cmd_flag; +- __u8 command; +- __u8 reserved2; +-} __packed; +- +-#define HMDFS_MSG_MAGIC 0xF7 +-#define HMDFS_MAX_MESSAGE_LEN (8 * 1024 * 1024) +- +-struct hmdfs_head_cmd { +- __u8 magic; +- __u8 version; +- __le16 reserved; +- __le32 data_len; +- struct hmdfs_cmd operations; +- __le32 ret_code; +- __le32 msg_id; +- __le32 reserved1; +-} __packed; +- +-enum FILE_RECV_STATE { +- FILE_RECV_PROCESS = 0, +- FILE_RECV_SUCC, +- FILE_RECV_ERR_NET, +- FILE_RECV_ERR_SPC, +-}; +- +-struct file_recv_info { +- void *local_filp; +- atomic_t local_fslices; +- atomic_t state; +-}; +- +-enum MSG_IDR_TYPE { +- MSG_IDR_1_0_NONE = 0, +- MSG_IDR_1_0_MESSAGE_SYNC, +- MSG_IDR_1_0_PAGE, +- MSG_IDR_MESSAGE_SYNC, +- MSG_IDR_MESSAGE_ASYNC, +- MSG_IDR_PAGE, +- MSG_IDR_MAX, +-}; +- +-struct hmdfs_msg_idr_head { +- __u32 type; +- __u32 msg_id; +- struct hmdfs_cmd send_cmd_operations; +- struct kref ref; +- struct hmdfs_peer *peer; +-}; +- +-struct sendmsg_wait_queue { +- struct hmdfs_msg_idr_head head; +- wait_queue_head_t response_q; +- struct list_head async_msg; +- atomic_t valid; +- __u32 size; +- void *buf; +- __u32 ret; +- unsigned long start; +- struct file_recv_info recv_info; +-}; +- +-struct hmdfs_send_command { +- struct hmdfs_cmd operations; +- void *data; +- size_t len; +- void *local_filp; +- void *out_buf; +- size_t out_len; +- __u32 ret_code; +-}; +- +-struct hmdfs_req { +- struct hmdfs_cmd operations; +- /* +- * Normally, the caller ought set timeout to TIMEOUT_CONFIG, so that +- * hmdfs_send_async_request will search s_cmd_timeout for the user- +- * configured timeout values. +- * +- * However, consider the given scenery: +- * The caller may want to issue multiple requests sharing the same +- * timeout value, but the users may update the value during the gap. +- * To ensure the "atomicty" of timeout-using for these requests, we +- * provide the timeout field for hacking. +- */ +- unsigned int timeout; +- void *data; +- size_t data_len; +- +- void *private; // optional +- size_t private_len; // optional +-}; +- +-struct hmdfs_resp { +- void *out_buf; +- size_t out_len; +- __u32 ret_code; +-}; +- +-struct hmdfs_msg_parasite { +- struct hmdfs_msg_idr_head head; +- struct delayed_work d_work; +- bool wfired; +- struct hmdfs_req req; +- struct hmdfs_resp resp; +- unsigned long start; +-}; +- +-struct hmdfs_send_data { +- // sect1: head +- void *head; +- size_t head_len; +- +- // sect2: slice descriptor +- void *sdesc; +- size_t sdesc_len; +- +- // sect3: request / response / file slice +- void *data; +- size_t len; +-}; +- +-struct slice_descriptor { +- __le32 num_slices; +- __le32 slice_size; +- __le32 slice_sn; +- __le32 content_size; +-} __packed; +- +-enum DFS_VERSION { +- HMDFS_VERSION = 0x40, +- MAX_VERSION = 0xFF +-}; +- +-enum CMD_FLAG { C_REQUEST = 0, C_RESPONSE = 1, C_FLAG_SIZE }; +- +-enum FILE_CMD { +- F_OPEN = 0, +- F_RELEASE = 1, +- F_READPAGE = 2, +- F_WRITEPAGE = 3, +- F_ITERATE = 4, +- F_RESERVED_1 = 5, +- F_RESERVED_2 = 6, +- F_RESERVED_3 = 7, +- F_RESERVED_4 = 8, +- F_MKDIR = 9, +- F_RMDIR = 10, +- F_CREATE = 11, +- F_UNLINK = 12, +- F_RENAME = 13, +- F_SETATTR = 14, +- F_RESERVED_5 = 15, +- F_STATFS = 16, +- F_CONNECT_REKEY = 17, +- F_DROP_PUSH = 18, +- F_RESERVED_6 = 19, +- F_GETATTR = 20, +- F_FSYNC = 21, +- F_SYNCFS = 22, +- F_GETXATTR = 23, +- F_SETXATTR = 24, +- F_LISTXATTR = 25, +- F_RESERVED_7 = 26, +- F_RESERVED_8 = 27, +- F_ATOMIC_OPEN = 28, +- F_SIZE, +-}; +- +-struct open_request { +- __u8 file_type; +- __le32 flags; +- __le32 path_len; +- char buf[0]; +-} __packed; +- +-struct open_response { +- __le32 change_detect_cap; +- __le64 file_ver; +- __le32 file_id; +- __le64 file_size; +- __le64 ino; +- __le64 ctime; +- __le32 ctime_nsec; +- __le64 mtime; +- __le32 mtime_nsec; +- __le64 stable_ctime; +- __le32 stable_ctime_nsec; +- __le64 ichange_count; +-} __packed; +- +-enum hmdfs_open_flags { +- HMDFS_O_TRUNC = O_TRUNC, +- HMDFS_O_EXCL = O_EXCL, +-}; +- +-struct atomic_open_request { +- __le32 open_flags; +- __le16 mode; +- __le16 reserved1; +- __le32 path_len; +- __le32 file_len; +- __le64 reserved2[4]; +- char buf[0]; +-} __packed; +- +-struct atomic_open_response { +- __le32 fno; +- __le16 i_mode; +- __le16 reserved1; +- __le32 i_flags; +- __le32 reserved2; +- __le64 reserved3[4]; +- struct open_response open_resp; +-} __packed; +- +-struct release_request { +- __le64 file_ver; +- __le32 file_id; +-} __packed; +- +-struct fsync_request { +- __le64 file_ver; +- __le32 file_id; +- __le32 datasync; +- __le64 start; +- __le64 end; +-} __packed; +- +-struct readpage_request { +- __le64 file_ver; +- __le32 file_id; +- __le32 size; +- __le64 index; +-} __packed; +- +-struct readpage_response { +- char buf[0]; +-} __packed; +- +-struct writepage_request { +- __le64 file_ver; +- __le32 file_id; +- __le64 index; +- __le32 count; +- char buf[0]; +-} __packed; +- +-struct writepage_response { +- __le64 ichange_count; +- __le64 ctime; +- __le32 ctime_nsec; +-} __packed; +- +-struct readdir_request { +- __le64 dcache_crtime; +- __le64 dcache_crtime_nsec; +- __le64 dentry_ctime; +- __le64 dentry_ctime_nsec; +- __le64 num; +- __le32 verify_cache; +- __le32 path_len; +- char path[0]; +-} __packed; +- +-struct hmdfs_inodeinfo_response { +- __le64 i_size; +- __le64 i_mtime; +- __le32 i_mtime_nsec; +- __le32 fno; +- __le16 i_mode; +- __le64 i_ino; +- __le32 i_flags; +- __le32 i_reserved; +-} __packed; +- +-struct mkdir_request { +- __le32 path_len; +- __le32 name_len; +- __le16 mode; +- char path[0]; +-} __packed; +- +-struct create_request { +- __le32 path_len; +- __le32 name_len; +- __le16 mode; +- __u8 want_excl; +- char path[0]; +-} __packed; +- +-struct rmdir_request { +- __le32 path_len; +- __le32 name_len; +- char path[0]; +-} __packed; +- +-struct unlink_request { +- __le32 path_len; +- __le32 name_len; +- char path[0]; +-} __packed; +- +-struct rename_request { +- __le32 old_path_len; +- __le32 new_path_len; +- __le32 old_name_len; +- __le32 new_name_len; +- __le32 flags; +- char path[0]; +-} __packed; +- +-struct drop_push_request { +- __le32 path_len; +- char path[0]; +-} __packed; +- +-struct setattr_request { +- __le64 size; +- __le32 valid; +- __le16 mode; +- __le32 uid; +- __le32 gid; +- __le64 atime; +- __le32 atime_nsec; +- __le64 mtime; +- __le32 mtime_nsec; +- __le32 path_len; +- char buf[0]; +-} __packed; +- +-struct getattr_request { +- __le32 lookup_flags; +- __le32 path_len; +- char buf[0]; +-} __packed; +- +-struct getattr_response { +- __le32 change_detect_cap; +- __le32 result_mask; +- __le32 flags; +- __le64 fsid; +- __le16 mode; +- __le32 nlink; +- __le32 uid; +- __le32 gid; +- __le32 rdev; +- __le64 ino; +- __le64 size; +- __le64 blocks; +- __le32 blksize; +- __le64 atime; +- __le32 atime_nsec; +- __le64 mtime; +- __le32 mtime_nsec; +- __le64 ctime; +- __le32 ctime_nsec; +- __le64 crtime; +- __le32 crtime_nsec; +- __le64 ichange_count; +-} __packed; +- +-struct statfs_request { +- __le32 path_len; +- char path[0]; +-} __packed; +- +-struct statfs_response { +- __le64 f_type; +- __le64 f_bsize; +- __le64 f_blocks; +- __le64 f_bfree; +- __le64 f_bavail; +- __le64 f_files; +- __le64 f_ffree; +- __le32 f_fsid_0; +- __le32 f_fsid_1; +- __le64 f_namelen; +- __le64 f_frsize; +- __le64 f_flags; +- __le64 f_spare_0; +- __le64 f_spare_1; +- __le64 f_spare_2; +- __le64 f_spare_3; +-} __packed; +- +-struct syncfs_request { +- __le64 version; +- __le32 flags; +-} __packed; +- +-struct getxattr_request { +- __le32 path_len; +- __le32 name_len; +- __le32 size; +- char buf[0]; +-} __packed; +- +-struct getxattr_response { +- __le32 size; +- char value[0]; /* xattr value may non-printable */ +-} __packed; +- +-struct setxattr_request { +- __le32 path_len; +- __le32 name_len; +- __le32 size; +- __le32 flags; +- __u8 del; /* remove xattr */ +- char buf[0]; +-} __packed; +- +-struct listxattr_request { +- __le32 path_len; +- __le32 size; +- char buf[0]; +-} __packed; +- +-struct listxattr_response { +- __le32 size; +- char list[0]; +-} __packed; +- +-struct connection_rekey_request { +- __le32 update_request; +-} __packed; +- +-enum CONNECTION_KEY_UPDATE_REQUEST { +- UPDATE_NOT_REQUESTED = 0, +- UPDATE_REQUESTED = 1 +-}; +- +-enum MSG_QUEUE_STATUS { +- MSG_Q_SEND = 0, +- MSG_Q_END_RECV, +-}; +-#endif +diff --git a/fs/hmdfs/comm/socket_adapter.c b/fs/hmdfs/comm/socket_adapter.c +deleted file mode 100644 +index b9f35b9e1..000000000 +--- a/fs/hmdfs/comm/socket_adapter.c ++++ /dev/null +@@ -1,1121 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/comm/socket_adapter.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "socket_adapter.h" +- +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "authority/authentication.h" +-#include "comm/device_node.h" +-#include "hmdfs_client.h" +-#include "hmdfs_server.h" +-#include "hmdfs_trace.h" +-#include "message_verify.h" +- +-#define ACQUIRE_WFIRED_INTVAL_USEC_MIN 10 +-#define ACQUIRE_WFIRED_INTVAL_USEC_MAX 30 +- +-typedef void (*request_callback)(struct hmdfs_peer *, struct hmdfs_head_cmd *, +- void *); +-typedef void (*response_callback)(struct hmdfs_peer *, +- struct sendmsg_wait_queue *, void *, size_t); +- +-static const request_callback s_recv_callbacks[F_SIZE] = { +- [F_OPEN] = hmdfs_server_open, +- [F_READPAGE] = hmdfs_server_readpage, +- [F_RELEASE] = hmdfs_server_release, +- [F_WRITEPAGE] = hmdfs_server_writepage, +- [F_ITERATE] = hmdfs_server_readdir, +- [F_MKDIR] = hmdfs_server_mkdir, +- [F_CREATE] = hmdfs_server_create, +- [F_RMDIR] = hmdfs_server_rmdir, +- [F_UNLINK] = hmdfs_server_unlink, +- [F_RENAME] = hmdfs_server_rename, +- [F_SETATTR] = hmdfs_server_setattr, +- [F_STATFS] = hmdfs_server_statfs, +- [F_DROP_PUSH] = hmdfs_server_get_drop_push, +- [F_GETATTR] = hmdfs_server_getattr, +- [F_FSYNC] = hmdfs_server_fsync, +- [F_SYNCFS] = hmdfs_server_syncfs, +- [F_GETXATTR] = hmdfs_server_getxattr, +- [F_SETXATTR] = hmdfs_server_setxattr, +- [F_LISTXATTR] = hmdfs_server_listxattr, +- [F_ATOMIC_OPEN] = hmdfs_server_atomic_open, +-}; +- +-typedef void (*file_request_callback)(struct hmdfs_peer *, +- struct hmdfs_send_command *); +- +-struct async_req_callbacks { +- void (*on_wakeup)(struct hmdfs_peer *peer, const struct hmdfs_req *req, +- const struct hmdfs_resp *resp); +-}; +- +-static const struct async_req_callbacks g_async_req_callbacks[F_SIZE] = { +- [F_SYNCFS] = { .on_wakeup = hmdfs_recv_syncfs_cb }, +- [F_WRITEPAGE] = { .on_wakeup = hmdfs_writepage_cb }, +-}; +- +-static void msg_release(struct kref *kref) +-{ +- struct sendmsg_wait_queue *msg_wq; +- struct hmdfs_peer *con; +- +- msg_wq = (struct sendmsg_wait_queue *)container_of(kref, +- struct hmdfs_msg_idr_head, ref); +- con = msg_wq->head.peer; +- idr_remove(&con->msg_idr, msg_wq->head.msg_id); +- spin_unlock(&con->idr_lock); +- +- kfree(msg_wq->buf); +- if (msg_wq->recv_info.local_filp) +- fput(msg_wq->recv_info.local_filp); +- kfree(msg_wq); +-} +- +-// Always remember to find before put, and make sure con is avilable +-void msg_put(struct sendmsg_wait_queue *msg_wq) +-{ +- kref_put_lock(&msg_wq->head.ref, msg_release, +- &msg_wq->head.peer->idr_lock); +-} +- +-static void recv_info_init(struct file_recv_info *recv_info) +-{ +- memset(recv_info, 0, sizeof(struct file_recv_info)); +- atomic_set(&recv_info->local_fslices, 0); +- atomic_set(&recv_info->state, FILE_RECV_PROCESS); +-} +- +-static int msg_init(struct hmdfs_peer *con, struct sendmsg_wait_queue *msg_wq, +- struct hmdfs_cmd operations) +-{ +- int ret = 0; +- struct file_recv_info *recv_info = &msg_wq->recv_info; +- +- ret = hmdfs_alloc_msg_idr(con, MSG_IDR_MESSAGE_SYNC, msg_wq, operations); +- if (unlikely(ret)) +- return ret; +- +- atomic_set(&msg_wq->valid, MSG_Q_SEND); +- init_waitqueue_head(&msg_wq->response_q); +- recv_info_init(recv_info); +- msg_wq->start = jiffies; +- return 0; +-} +- +-static inline void statistic_con_sb_dirty(struct hmdfs_peer *con, +- const struct hmdfs_cmd *op) +-{ +- if (op->command == F_WRITEPAGE && op->cmd_flag == C_REQUEST) +- atomic64_inc(&con->sb_dirty_count); +-} +- +-int hmdfs_sendmessage(struct hmdfs_peer *node, struct hmdfs_send_data *msg) +-{ +- int ret = 0; +- struct connection *connect = NULL; +- struct tcp_handle *tcp = NULL; +- struct hmdfs_head_cmd *head = msg->head; +- const struct cred *old_cred; +- +- if (!node) { +- hmdfs_err("node NULL when send cmd %d", +- head->operations.command); +- ret = -EAGAIN; +- goto out_err; +- } else if (node->status != NODE_STAT_ONLINE) { +- hmdfs_err("device %llu OFFLINE %d when send cmd %d", +- node->device_id, node->status, +- head->operations.command); +- ret = -EAGAIN; +- goto out; +- } +- +- old_cred = hmdfs_override_creds(node->sbi->system_cred); +- +- do { +- connect = get_conn_impl(node, CONNECT_TYPE_TCP); +- if (!connect) { +- hmdfs_info_ratelimited( +- "device %llu no connection available when send cmd %d, get new session", +- node->device_id, head->operations.command); +- if (node->status != NODE_STAT_OFFLINE) { +- struct notify_param param; +- +- memcpy(param.remote_cid, node->cid, +- HMDFS_CID_SIZE); +- param.notify = NOTIFY_OFFLINE; +- param.fd = INVALID_SOCKET_FD; +- notify(node, ¶m); +- } +- ret = -EAGAIN; +- goto revert_cred; +- } +- +- ret = connect->send_message(connect, msg); +- if (ret == -ESHUTDOWN) { +- hmdfs_info("device %llu send cmd %d message fail, connection stop", +- node->device_id, head->operations.command); +- connect->status = CONNECT_STAT_STOP; +- tcp = connect->connect_handle; +- if (node->status != NODE_STAT_OFFLINE) { +- connection_get(connect); +- if (!queue_work(node->reget_conn_wq, +- &connect->reget_work)) +- connection_put(connect); +- } +- connection_put(connect); +- /* +- * node->status is OFFLINE can not ensure +- * node_seq will be increased before +- * hmdfs_sendmessage() returns. +- */ +- hmdfs_node_inc_evt_seq(node); +- } else { +- connection_put(connect); +- goto revert_cred; +- } +- } while (node->status != NODE_STAT_OFFLINE); +-revert_cred: +- hmdfs_revert_creds(old_cred); +- +- if (!ret) +- statistic_con_sb_dirty(node, &head->operations); +-out: +- if (head->operations.cmd_flag == C_REQUEST) +- hmdfs_client_snd_statis(node->sbi, +- head->operations.command, ret); +- else if (head->operations.cmd_flag == C_RESPONSE) +- hmdfs_server_snd_statis(node->sbi, +- head->operations.command, ret); +-out_err: +- return ret; +-} +- +-int hmdfs_sendmessage_response(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, __u32 data_len, +- void *buf, __u32 ret_code) +-{ +- int ret; +- struct hmdfs_send_data msg; +- struct hmdfs_head_cmd head; +- +- head.magic = HMDFS_MSG_MAGIC; +- head.version = HMDFS_VERSION; +- head.operations = cmd->operations; +- head.operations.cmd_flag = C_RESPONSE; +- head.data_len = cpu_to_le32(data_len + sizeof(struct hmdfs_head_cmd)); +- head.ret_code = cpu_to_le32(ret_code); +- head.msg_id = cmd->msg_id; +- head.reserved = cmd->reserved; +- head.reserved1 = cmd->reserved1; +- msg.head = &head; +- msg.head_len = sizeof(struct hmdfs_head_cmd); +- msg.data = buf; +- msg.len = data_len; +- msg.sdesc = NULL; +- msg.sdesc_len = 0; +- +- ret = hmdfs_sendmessage(con, &msg); +- return ret; +-} +- +-static void mp_release(struct kref *kref) +-{ +- struct hmdfs_msg_parasite *mp = NULL; +- struct hmdfs_peer *peer = NULL; +- +- mp = (struct hmdfs_msg_parasite *)container_of(kref, +- struct hmdfs_msg_idr_head, ref); +- peer = mp->head.peer; +- idr_remove(&peer->msg_idr, mp->head.msg_id); +- spin_unlock(&peer->idr_lock); +- +- peer_put(peer); +- kfree(mp->resp.out_buf); +- kfree(mp); +-} +- +-void mp_put(struct hmdfs_msg_parasite *mp) +-{ +- kref_put_lock(&mp->head.ref, mp_release, &mp->head.peer->idr_lock); +-} +- +-static void async_request_cb_on_wakeup_fn(struct work_struct *w) +-{ +- struct hmdfs_msg_parasite *mp = +- container_of(w, struct hmdfs_msg_parasite, d_work.work); +- struct async_req_callbacks cbs; +- const struct cred *old_cred = +- hmdfs_override_creds(mp->head.peer->sbi->cred); +- +- if (mp->resp.ret_code == -ETIME) +- hmdfs_client_resp_statis(mp->head.peer->sbi, +- mp->req.operations.command, +- HMDFS_RESP_TIMEOUT, 0, 0); +- +- cbs = g_async_req_callbacks[mp->req.operations.command]; +- if (cbs.on_wakeup) +- (*cbs.on_wakeup)(mp->head.peer, &mp->req, &mp->resp); +- mp_put(mp); +- hmdfs_revert_creds(old_cred); +-} +- +-static struct hmdfs_msg_parasite *mp_alloc(struct hmdfs_peer *peer, +- const struct hmdfs_req *req) +-{ +- struct hmdfs_msg_parasite *mp = kzalloc(sizeof(*mp), GFP_KERNEL); +- int ret; +- +- if (unlikely(!mp)) +- return ERR_PTR(-ENOMEM); +- +- ret = hmdfs_alloc_msg_idr(peer, MSG_IDR_MESSAGE_ASYNC, mp, +- req->operations); +- if (unlikely(ret)) { +- kfree(mp); +- return ERR_PTR(ret); +- } +- +- mp->start = jiffies; +- peer_get(mp->head.peer); +- mp->resp.ret_code = -ETIME; +- INIT_DELAYED_WORK(&mp->d_work, async_request_cb_on_wakeup_fn); +- mp->wfired = false; +- mp->req = *req; +- return mp; +-} +- +-/** +- * hmdfs_send_async_request - sendout a async request +- * @peer: target device node +- * @req: request descriptor + necessary contexts +- * +- * Sendout a request synchronously and wait for its response asynchronously +- * Return -ESHUTDOWN when the device node is unachievable +- * Return -EAGAIN if the network is recovering +- * Return -ENOMEM if out of memory +- * +- * Register g_async_req_callbacks to recv the response +- */ +-int hmdfs_send_async_request(struct hmdfs_peer *peer, +- const struct hmdfs_req *req) +-{ +- int ret = 0; +- struct hmdfs_send_data msg; +- struct hmdfs_head_cmd head; +- struct hmdfs_msg_parasite *mp = NULL; +- size_t msg_len = req->data_len + sizeof(struct hmdfs_head_cmd); +- unsigned int timeout; +- +- if (req->timeout == TIMEOUT_CONFIG) +- timeout = get_cmd_timeout(peer->sbi, req->operations.command); +- else +- timeout = req->timeout; +- if (timeout == TIMEOUT_UNINIT || timeout == TIMEOUT_NONE) { +- hmdfs_err("send msg %d with uninitialized/invalid timeout", +- req->operations.command); +- return -EINVAL; +- } +- +- if (!hmdfs_is_node_online(peer)) +- return -EAGAIN; +- +- mp = mp_alloc(peer, req); +- if (IS_ERR(mp)) +- return PTR_ERR(mp); +- head.magic = HMDFS_MSG_MAGIC; +- head.version = HMDFS_VERSION; +- head.data_len = cpu_to_le32(msg_len); +- head.operations = mp->req.operations; +- head.msg_id = cpu_to_le32(mp->head.msg_id); +- head.reserved = 0; +- head.reserved1 = 0; +- +- msg.head = &head; +- msg.head_len = sizeof(head); +- msg.data = mp->req.data; +- msg.len = mp->req.data_len; +- msg.sdesc_len = 0; +- msg.sdesc = NULL; +- +- ret = hmdfs_sendmessage(peer, &msg); +- if (unlikely(ret)) { +- mp_put(mp); +- goto out; +- } +- +- queue_delayed_work(peer->async_wq, &mp->d_work, timeout * HZ); +- /* +- * The work may havn't been queued upon the arriving of it's response, +- * resulting in meaningless waiting. So we use the membar to tell the +- * recv thread if the work has been queued +- */ +- smp_store_release(&mp->wfired, true); +-out: +- hmdfs_dec_msg_idr_process(peer); +- return ret; +-} +- +-static int hmdfs_record_async_readdir(struct hmdfs_peer *con, +- struct sendmsg_wait_queue *msg_wq) +-{ +- struct hmdfs_sb_info *sbi = con->sbi; +- +- spin_lock(&sbi->async_readdir_msg_lock); +- if (sbi->async_readdir_prohibit) { +- spin_unlock(&sbi->async_readdir_msg_lock); +- return -EINTR; +- } +- +- list_add(&msg_wq->async_msg, &sbi->async_readdir_msg_list); +- spin_unlock(&sbi->async_readdir_msg_lock); +- +- return 0; +-} +- +-static void hmdfs_untrack_async_readdir(struct hmdfs_peer *con, +- struct sendmsg_wait_queue *msg_wq) +-{ +- struct hmdfs_sb_info *sbi = con->sbi; +- +- spin_lock(&sbi->async_readdir_msg_lock); +- list_del(&msg_wq->async_msg); +- spin_unlock(&sbi->async_readdir_msg_lock); +-} +- +-int hmdfs_sendmessage_request(struct hmdfs_peer *con, +- struct hmdfs_send_command *sm) +-{ +- int time_left; +- int ret = 0; +- struct sendmsg_wait_queue *msg_wq = NULL; +- struct hmdfs_send_data msg; +- size_t outlen = sm->len + sizeof(struct hmdfs_head_cmd); +- unsigned int timeout = +- get_cmd_timeout(con->sbi, sm->operations.command); +- struct hmdfs_head_cmd *head = NULL; +- bool dec = false; +- +- if (!hmdfs_is_node_online(con)) { +- ret = -EAGAIN; +- goto free_filp; +- } +- +- if (timeout == TIMEOUT_UNINIT) { +- hmdfs_err_ratelimited("send msg %d with uninitialized timeout", +- sm->operations.command); +- ret = -EINVAL; +- goto free_filp; +- } +- +- head = kzalloc(sizeof(struct hmdfs_head_cmd), GFP_KERNEL); +- if (!head) { +- ret = -ENOMEM; +- goto free_filp; +- } +- +- sm->out_buf = NULL; +- head->magic = HMDFS_MSG_MAGIC; +- head->version = HMDFS_VERSION; +- head->operations = sm->operations; +- head->data_len = cpu_to_le32(outlen); +- head->ret_code = cpu_to_le32(sm->ret_code); +- head->reserved = 0; +- head->reserved1 = 0; +- if (timeout != TIMEOUT_NONE) { +- msg_wq = kzalloc(sizeof(*msg_wq), GFP_KERNEL); +- if (!msg_wq) { +- ret = -ENOMEM; +- goto free_filp; +- } +- ret = msg_init(con, msg_wq, sm->operations); +- if (ret) { +- kfree(msg_wq); +- msg_wq = NULL; +- goto free_filp; +- } +- dec = true; +- head->msg_id = cpu_to_le32(msg_wq->head.msg_id); +- if (sm->operations.command == F_ITERATE) +- msg_wq->recv_info.local_filp = sm->local_filp; +- } +- msg.head = head; +- msg.head_len = sizeof(struct hmdfs_head_cmd); +- msg.data = sm->data; +- msg.len = sm->len; +- msg.sdesc_len = 0; +- msg.sdesc = NULL; +- ret = hmdfs_sendmessage(con, &msg); +- if (ret) { +- hmdfs_err_ratelimited("send err sm->device_id, %lld, msg_id %u", +- con->device_id, head->msg_id); +- goto free; +- } +- +- if (timeout == TIMEOUT_NONE) +- goto free; +- +- hmdfs_dec_msg_idr_process(con); +- dec = false; +- +- if (sm->operations.command == F_ITERATE) { +- ret = hmdfs_record_async_readdir(con, msg_wq); +- if (ret) { +- atomic_set(&msg_wq->recv_info.state, FILE_RECV_ERR_SPC); +- goto free; +- } +- } +- +- time_left = wait_event_interruptible_timeout( +- msg_wq->response_q, +- (atomic_read(&msg_wq->valid) == MSG_Q_END_RECV), timeout * HZ); +- +- if (sm->operations.command == F_ITERATE) +- hmdfs_untrack_async_readdir(con, msg_wq); +- +- if (time_left == -ERESTARTSYS || time_left == 0) { +- hmdfs_err("timeout err sm->device_id %lld, msg_id %d cmd %d", +- con->device_id, head->msg_id, +- head->operations.command); +- if (sm->operations.command == F_ITERATE) +- atomic_set(&msg_wq->recv_info.state, FILE_RECV_ERR_NET); +- ret = -ETIME; +- hmdfs_client_resp_statis(con->sbi, sm->operations.command, +- HMDFS_RESP_TIMEOUT, 0, 0); +- goto free; +- } +- sm->out_buf = msg_wq->buf; +- msg_wq->buf = NULL; +- sm->out_len = msg_wq->size - sizeof(struct hmdfs_head_cmd); +- ret = msg_wq->ret; +- +-free: +- if (msg_wq) +- msg_put(msg_wq); +- if (dec) +- hmdfs_dec_msg_idr_process(con); +- kfree(head); +- return ret; +- +-free_filp: +- if (sm->local_filp) +- fput(sm->local_filp); +- kfree(head); +- return ret; +-} +- +-static int hmdfs_send_slice(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- struct slice_descriptor *sdesc, void *slice_buf) +-{ +- int ret; +- struct hmdfs_send_data msg; +- struct hmdfs_head_cmd head; +- int content_size = le32_to_cpu(sdesc->content_size); +- int msg_len = sizeof(struct hmdfs_head_cmd) + content_size + +- sizeof(struct slice_descriptor); +- +- head.magic = HMDFS_MSG_MAGIC; +- head.version = HMDFS_VERSION; +- head.operations = cmd->operations; +- head.operations.cmd_flag = C_RESPONSE; +- head.data_len = cpu_to_le32(msg_len); +- head.ret_code = cpu_to_le32(0); +- head.msg_id = cmd->msg_id; +- head.reserved = cmd->reserved; +- head.reserved1 = cmd->reserved1; +- +- msg.head = &head; +- msg.head_len = sizeof(struct hmdfs_head_cmd); +- msg.sdesc = sdesc; +- msg.sdesc_len = le32_to_cpu(sizeof(struct slice_descriptor)); +- msg.data = slice_buf; +- msg.len = content_size; +- +- ret = hmdfs_sendmessage(con, &msg); +- +- return ret; +-} +- +-int hmdfs_readfile_response(struct hmdfs_peer *con, struct hmdfs_head_cmd *head, +- struct file *filp) +-{ +- int ret; +- const unsigned int slice_size = PAGE_SIZE; +- char *slice_buf = NULL; +- loff_t file_offset = 0, file_size; +- ssize_t size; +- struct slice_descriptor sdesc; +- unsigned int slice_sn = 0; +- +- if (!filp) +- return hmdfs_sendmessage_response(con, head, 0, NULL, 0); +- +- sdesc.slice_size = cpu_to_le32(slice_size); +- file_size = i_size_read(file_inode(filp)); +- file_size = round_up(file_size, slice_size); +- sdesc.num_slices = cpu_to_le32(file_size / slice_size); +- +- slice_buf = kmalloc(slice_size, GFP_KERNEL); +- if (!slice_buf) { +- ret = -ENOMEM; +- goto out; +- } +- +- while (1) { +- sdesc.slice_sn = cpu_to_le32(slice_sn++); +- size = kernel_read(filp, slice_buf, (size_t)slice_size, +- &file_offset); +- if (IS_ERR_VALUE(size)) { +- ret = (int)size; +- goto out; +- } +- sdesc.content_size = cpu_to_le32(size); +- ret = hmdfs_send_slice(con, head, &sdesc, slice_buf); +- if (ret) { +- hmdfs_info("Cannot send file slice %d ", +- le32_to_cpu(sdesc.slice_sn)); +- break; +- } +- if (file_offset >= i_size_read(file_inode(filp))) +- break; +- } +- +-out: +- kfree(slice_buf); +- if (ret) +- hmdfs_sendmessage_response(con, head, 0, NULL, ret); +- return ret; +-} +- +-static void asw_release(struct kref *kref) +-{ +- struct hmdfs_async_work *asw = NULL; +- struct hmdfs_peer *peer = NULL; +- +- asw = (struct hmdfs_async_work *)container_of(kref, +- struct hmdfs_msg_idr_head, ref); +- peer = asw->head.peer; +- idr_remove(&peer->msg_idr, asw->head.msg_id); +- spin_unlock(&peer->idr_lock); +- kfree(asw); +-} +- +-void asw_put(struct hmdfs_async_work *asw) +-{ +- kref_put_lock(&asw->head.ref, asw_release, &asw->head.peer->idr_lock); +-} +- +-void hmdfs_recv_page_work_fn(struct work_struct *ptr) +-{ +- struct hmdfs_async_work *async_work = +- container_of(ptr, struct hmdfs_async_work, d_work.work); +- +- hmdfs_client_resp_statis(async_work->head.peer->sbi, +- F_READPAGE, HMDFS_RESP_TIMEOUT, 0, 0); +- hmdfs_err_ratelimited("timeout and release page, msg_id:%u", +- async_work->head.msg_id); +- asw_done(async_work); +-} +- +-int hmdfs_sendpage_request(struct hmdfs_peer *con, +- struct hmdfs_send_command *sm) +-{ +- int ret = 0; +- struct hmdfs_send_data msg; +- struct hmdfs_async_work *async_work = NULL; +- size_t outlen = sm->len + sizeof(struct hmdfs_head_cmd); +- struct hmdfs_head_cmd head; +- unsigned int timeout; +- unsigned long start = jiffies; +- +- WARN_ON(!sm->out_buf); +- +- timeout = get_cmd_timeout(con->sbi, sm->operations.command); +- if (timeout == TIMEOUT_UNINIT) { +- hmdfs_err("send msg %d with uninitialized timeout", +- sm->operations.command); +- ret = -EINVAL; +- goto unlock; +- } +- +- if (!hmdfs_is_node_online(con)) { +- ret = -EAGAIN; +- goto unlock; +- } +- +- memset(&head, 0, sizeof(head)); +- head.magic = HMDFS_MSG_MAGIC; +- head.version = HMDFS_VERSION; +- head.operations = sm->operations; +- head.data_len = cpu_to_le32(outlen); +- head.ret_code = cpu_to_le32(sm->ret_code); +- head.reserved = 0; +- head.reserved1 = 0; +- +- msg.head = &head; +- msg.head_len = sizeof(struct hmdfs_head_cmd); +- msg.data = sm->data; +- msg.len = sm->len; +- msg.sdesc_len = 0; +- msg.sdesc = NULL; +- +- async_work = kzalloc(sizeof(*async_work), GFP_KERNEL); +- if (!async_work) { +- ret = -ENOMEM; +- goto unlock; +- } +- async_work->start = start; +- ret = hmdfs_alloc_msg_idr(con, MSG_IDR_PAGE, async_work, sm->operations); +- if (ret) { +- hmdfs_err("alloc msg_id failed, err %d", ret); +- goto unlock; +- } +- head.msg_id = cpu_to_le32(async_work->head.msg_id); +- async_work->page = sm->out_buf; +- asw_get(async_work); +- INIT_DELAYED_WORK(&async_work->d_work, hmdfs_recv_page_work_fn); +- ret = queue_delayed_work(con->async_wq, &async_work->d_work, +- timeout * HZ); +- if (!ret) { +- hmdfs_err("queue_delayed_work failed, msg_id %u", head.msg_id); +- goto fail_and_unlock_page; +- } +- ret = hmdfs_sendmessage(con, &msg); +- if (ret) { +- hmdfs_err("send err sm->device_id, %lld, msg_id %u", +- con->device_id, head.msg_id); +- if (!cancel_delayed_work(&async_work->d_work)) { +- hmdfs_err("cancel async work err"); +- asw_put(async_work); +- hmdfs_dec_msg_idr_process(con); +- goto out; +- } +- goto fail_and_unlock_page; +- } +- +- asw_put(async_work); +- hmdfs_dec_msg_idr_process(con); +- return 0; +- +-fail_and_unlock_page: +- asw_put(async_work); +- asw_done(async_work); +- hmdfs_dec_msg_idr_process(con); +- return ret; +-unlock: +- kfree(async_work); +- unlock_page(sm->out_buf); +-out: +- return ret; +-} +- +-static void hmdfs_request_handle_sync(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *head, void *buf) +-{ +- unsigned long start = jiffies; +- const struct cred *saved_cred = hmdfs_override_fsids(true); +- +- if (!saved_cred) { +- hmdfs_err("prepare cred failed!"); +- kfree(buf); +- return; +- } +- +- s_recv_callbacks[head->operations.command](con, head, buf); +- hmdfs_statistic(con->sbi, head->operations.command, jiffies - start); +- +- kfree(buf); +- +- hmdfs_revert_fsids(saved_cred); +-} +- +-static void hmdfs_msg_handle_sync(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *head, void *buf) +-{ +- const struct cred *old_cred = hmdfs_override_creds(con->sbi->cred); +- +- /* +- * Reuse PF_NPROC_EXCEEDED as an indication of hmdfs server context: +- * 1. PF_NPROC_EXCEEDED will set by setreuid()/setuid()/setresuid(), +- * we assume kwork will not call theses syscalls. +- * 2. PF_NPROC_EXCEEDED will be cleared by execv(), and kworker +- * will not call it. +- */ +- current->flags |= PF_NPROC_EXCEEDED; +- hmdfs_request_handle_sync(con, head, buf); +- current->flags &= ~PF_NPROC_EXCEEDED; +- +- hmdfs_revert_creds(old_cred); +-} +- +- +-static void hmdfs_request_work_fn(struct work_struct *ptr) +-{ +- struct work_handler_desp *desp = +- container_of(ptr, struct work_handler_desp, work); +- +- hmdfs_msg_handle_sync(desp->peer, desp->head, desp->buf); +- peer_put(desp->peer); +- kfree(desp->head); +- kfree(desp); +-} +- +-static int hmdfs_msg_handle_async(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *head, void *buf, +- struct workqueue_struct *wq, +- void (*work_fn)(struct work_struct *ptr)) +-{ +- struct work_handler_desp *desp = NULL; +- struct hmdfs_head_cmd *dup_head = NULL; +- int ret; +- +- desp = kzalloc(sizeof(*desp), GFP_KERNEL); +- if (!desp) { +- ret = -ENOMEM; +- goto exit_desp; +- } +- +- dup_head = kzalloc(sizeof(*dup_head), GFP_KERNEL); +- if (!dup_head) { +- ret = -ENOMEM; +- goto exit_desp; +- } +- +- *dup_head = *head; +- desp->peer = con; +- desp->head = dup_head; +- desp->buf = buf; +- INIT_WORK(&desp->work, work_fn); +- +- peer_get(con); +- queue_work(wq, &desp->work); +- +- ret = 0; +- return ret; +- +-exit_desp: +- kfree(desp); +- return ret; +-} +- +-static int hmdfs_request_recv(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *head, void *buf) +-{ +- int ret; +- +- if (head->operations.command >= F_SIZE || +- !s_recv_callbacks[head->operations.command]) { +- ret = -EINVAL; +- hmdfs_err("NULL callback, command %d", +- head->operations.command); +- goto out; +- } +- +- switch (head->operations.command) { +- case F_OPEN: +- case F_RELEASE: +- case F_ITERATE: +- case F_MKDIR: +- case F_RMDIR: +- case F_CREATE: +- case F_UNLINK: +- case F_RENAME: +- case F_SETATTR: +- case F_STATFS: +- case F_CONNECT_REKEY: +- case F_DROP_PUSH: +- case F_GETATTR: +- case F_FSYNC: +- case F_SYNCFS: +- case F_GETXATTR: +- case F_SETXATTR: +- case F_LISTXATTR: +- case F_ATOMIC_OPEN: +- ret = hmdfs_msg_handle_async(con, head, buf, con->req_handle_wq, +- hmdfs_request_work_fn); +- break; +- case F_WRITEPAGE: +- case F_READPAGE: +- hmdfs_msg_handle_sync(con, head, buf); +- ret = 0; +- break; +- default: +- hmdfs_err("Fatal! Unexpected request command %d", +- head->operations.command); +- ret = -EINVAL; +- } +- +-out: +- return ret; +-} +- +-void hmdfs_response_wakeup(struct sendmsg_wait_queue *msg_info, +- __u32 ret_code, __u32 data_len, void *buf) +-{ +- msg_info->ret = ret_code; +- msg_info->size = data_len; +- msg_info->buf = buf; +- atomic_set(&msg_info->valid, MSG_Q_END_RECV); +- wake_up_interruptible(&msg_info->response_q); +-} +- +-static int hmdfs_readfile_slice(struct sendmsg_wait_queue *msg_info, +- struct work_handler_desp *desp) +-{ +- struct slice_descriptor *sdesc = desp->buf; +- void *slice_buf = sdesc + 1; +- struct file_recv_info *recv_info = &msg_info->recv_info; +- struct file *filp = recv_info->local_filp; +- loff_t offset; +- ssize_t written_size; +- +- if (filp == NULL) { +- hmdfs_warning("recv_info filp is NULL \n"); +- return -EINVAL; +- } +- +- if (atomic_read(&recv_info->state) != FILE_RECV_PROCESS) +- return -EBUSY; +- +- offset = le32_to_cpu(sdesc->slice_size) * le32_to_cpu(sdesc->slice_sn); +- +- written_size = kernel_write(filp, slice_buf, +- le32_to_cpu(sdesc->content_size), &offset); +- if (IS_ERR_VALUE(written_size)) { +- atomic_set(&recv_info->state, FILE_RECV_ERR_SPC); +- hmdfs_info("Fatal! Cannot store a file slice %d/%d, ret = %d", +- le32_to_cpu(sdesc->slice_sn), +- le32_to_cpu(sdesc->num_slices), (int)written_size); +- return (int)written_size; +- } +- +- if (atomic_inc_return(&recv_info->local_fslices) >= +- le32_to_cpu(sdesc->num_slices)) +- atomic_set(&recv_info->state, FILE_RECV_SUCC); +- return 0; +-} +- +-static void hmdfs_file_response_work_fn(struct work_struct *ptr) +-{ +- struct work_handler_desp *desp = +- container_of(ptr, struct work_handler_desp, work); +- struct sendmsg_wait_queue *msg_info = NULL; +- int ret; +- atomic_t *pstate = NULL; +- u8 cmd = desp->head->operations.command; +- const struct cred *old_cred = +- hmdfs_override_creds(desp->peer->sbi->cred); +- +- msg_info = (struct sendmsg_wait_queue *)hmdfs_find_msg_head(desp->peer, +- le32_to_cpu(desp->head->msg_id), desp->head->operations); +- if (!msg_info || atomic_read(&msg_info->valid) != MSG_Q_SEND) { +- hmdfs_client_resp_statis(desp->peer->sbi, cmd, HMDFS_RESP_DELAY, +- 0, 0); +- hmdfs_info("cannot find msg(id %d)", +- le32_to_cpu(desp->head->msg_id)); +- goto free; +- } +- +- ret = le32_to_cpu(desp->head->ret_code); +- if (ret || le32_to_cpu(desp->head->data_len) == sizeof(*desp->head)) +- goto wakeup; +- ret = hmdfs_readfile_slice(msg_info, desp); +- pstate = &msg_info->recv_info.state; +- if (ret || atomic_read(pstate) != FILE_RECV_PROCESS) +- goto wakeup; +- goto free; +- +-wakeup: +- hmdfs_response_wakeup(msg_info, ret, sizeof(struct hmdfs_head_cmd), +- NULL); +- hmdfs_client_resp_statis(desp->peer->sbi, cmd, HMDFS_RESP_NORMAL, +- msg_info->start, jiffies); +-free: +- if (msg_info) +- msg_put(msg_info); +- peer_put(desp->peer); +- hmdfs_revert_creds(old_cred); +- +- kfree(desp->buf); +- kfree(desp->head); +- kfree(desp); +-} +- +-static void hmdfs_wait_mp_wfired(struct hmdfs_msg_parasite *mp) +-{ +- /* We just cancel queued works */ +- while (unlikely(!smp_load_acquire(&mp->wfired))) +- usleep_range(ACQUIRE_WFIRED_INTVAL_USEC_MIN, +- ACQUIRE_WFIRED_INTVAL_USEC_MAX); +-} +- +-int hmdfs_response_handle_sync(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *head, void *buf) +-{ +- struct sendmsg_wait_queue *msg_info = NULL; +- struct hmdfs_msg_parasite *mp = NULL; +- struct hmdfs_msg_idr_head *msg_head = NULL; +- u32 msg_id = le32_to_cpu(head->msg_id); +- bool woke = false; +- u8 cmd = head->operations.command; +- +- msg_head = hmdfs_find_msg_head(con, msg_id, head->operations); +- if (!msg_head) +- goto out; +- +- switch (msg_head->type) { +- case MSG_IDR_MESSAGE_SYNC: +- msg_info = (struct sendmsg_wait_queue *)msg_head; +- if (atomic_read(&msg_info->valid) == MSG_Q_SEND) { +- hmdfs_response_wakeup(msg_info, +- le32_to_cpu(head->ret_code), +- le32_to_cpu(head->data_len), buf); +- hmdfs_client_resp_statis(con->sbi, cmd, +- HMDFS_RESP_NORMAL, +- msg_info->start, jiffies); +- woke = true; +- } +- +- msg_put(msg_info); +- break; +- case MSG_IDR_MESSAGE_ASYNC: +- mp = (struct hmdfs_msg_parasite *)msg_head; +- +- hmdfs_wait_mp_wfired(mp); +- if (cancel_delayed_work(&mp->d_work)) { +- mp->resp.out_buf = buf; +- mp->resp.out_len = +- le32_to_cpu(head->data_len) - sizeof(*head); +- mp->resp.ret_code = le32_to_cpu(head->ret_code); +- queue_delayed_work(con->async_wq, &mp->d_work, 0); +- hmdfs_client_resp_statis(con->sbi, cmd, +- HMDFS_RESP_NORMAL, mp->start, +- jiffies); +- woke = true; +- } +- mp_put(mp); +- break; +- default: +- hmdfs_err("receive incorrect msg type %d msg_id %d cmd %d", +- msg_head->type, msg_id, cmd); +- break; +- } +- +- if (likely(woke)) +- return 0; +-out: +- hmdfs_client_resp_statis(con->sbi, cmd, HMDFS_RESP_DELAY, 0, 0); +- hmdfs_info("cannot find msg_id %d cmd %d", msg_id, cmd); +- return -EINVAL; +-} +- +-static int hmdfs_response_recv(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *head, void *buf) +-{ +- __u16 command = head->operations.command; +- int ret; +- +- if (command >= F_SIZE) { +- ret = -EINVAL; +- return ret; +- } +- +- switch (head->operations.command) { +- case F_OPEN: +- case F_RELEASE: +- case F_READPAGE: +- case F_WRITEPAGE: +- case F_MKDIR: +- case F_RMDIR: +- case F_CREATE: +- case F_UNLINK: +- case F_RENAME: +- case F_SETATTR: +- case F_STATFS: +- case F_CONNECT_REKEY: +- case F_DROP_PUSH: +- case F_GETATTR: +- case F_FSYNC: +- case F_SYNCFS: +- case F_GETXATTR: +- case F_SETXATTR: +- case F_LISTXATTR: +- ret = hmdfs_response_handle_sync(con, head, buf); +- return ret; +- +- case F_ITERATE: +- ret = hmdfs_msg_handle_async(con, head, buf, con->async_wq, +- hmdfs_file_response_work_fn); +- return ret; +- +- default: +- hmdfs_err("Fatal! Unexpected response command %d", +- head->operations.command); +- ret = -EINVAL; +- return ret; +- } +-} +- +-void hmdfs_recv_mesg_callback(struct hmdfs_peer *con, void *head, +- void *buf) +-{ +- struct hmdfs_head_cmd *hmdfs_head = (struct hmdfs_head_cmd *)head; +- +- trace_hmdfs_recv_mesg_callback(hmdfs_head); +- +- if (hmdfs_message_verify(con, hmdfs_head, buf) < 0) { +- hmdfs_info("Message %d has been abandoned", hmdfs_head->msg_id); +- goto out_err; +- } +- +- switch (hmdfs_head->operations.cmd_flag) { +- case C_REQUEST: +- if (hmdfs_request_recv(con, hmdfs_head, buf) < 0) +- goto out_err; +- break; +- +- case C_RESPONSE: +- if (hmdfs_response_recv(con, hmdfs_head, buf) < 0) +- goto out_err; +- break; +- +- default: +- hmdfs_err("Fatal! Unexpected msg cmd %d", +- hmdfs_head->operations.cmd_flag); +- goto out_err; +- } +- return; +- +-out_err: +- kfree(buf); +-} +- +-void hmdfs_wakeup_parasite(struct hmdfs_msg_parasite *mp) +-{ +- hmdfs_wait_mp_wfired(mp); +- if (!cancel_delayed_work(&mp->d_work)) +- hmdfs_err("cancel parasite work err msg_id=%d cmd=%d", +- mp->head.msg_id, mp->req.operations.command); +- else +- async_request_cb_on_wakeup_fn(&mp->d_work.work); +-} +- +-void hmdfs_wakeup_async_work(struct hmdfs_async_work *async_work) +-{ +- if (!cancel_delayed_work(&async_work->d_work)) +- hmdfs_err("cancel async work err msg_id=%d", +- async_work->head.msg_id); +- else +- hmdfs_recv_page_work_fn(&async_work->d_work.work); +-} +diff --git a/fs/hmdfs/comm/socket_adapter.h b/fs/hmdfs/comm/socket_adapter.h +deleted file mode 100644 +index 35275ccac..000000000 +--- a/fs/hmdfs/comm/socket_adapter.h ++++ /dev/null +@@ -1,179 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/comm/socket_adapter.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef SOCKET_ADAPTER_H +-#define SOCKET_ADAPTER_H +- +-#include +-#include +- +-#include "connection.h" +-#include "hmdfs.h" +-#include "protocol.h" +- +-#define HMDFS_KEY_SIZE 32 +-#define HMDFS_IV_SIZE 12 +-#define HMDFS_TAG_SIZE 16 +-#define HMDFS_CID_SIZE 64 +-#define INVALID_SOCKET_FD (-1) +- +-#define HMDFS_IDR_RESCHED_COUNT 512 +- +-/***************************************************************************** +- * connections(TCP, UDP, .etc) adapter for RPC +- *****************************************************************************/ +- +-struct work_handler_desp { +- struct work_struct work; +- struct hmdfs_peer *peer; +- struct hmdfs_head_cmd *head; +- void *buf; +-}; +- +-struct work_readfile_request_async { +- struct work_struct work; +- struct hmdfs_peer *con; +- struct hmdfs_send_command sm; +-}; +- +-static inline void hmdfs_init_cmd(struct hmdfs_cmd *op, u8 cmd) +-{ +- op->reserved = 0; +- op->cmd_flag = C_REQUEST; +- op->command = cmd; +- op->reserved2 = 0; +-} +- +-int hmdfs_send_async_request(struct hmdfs_peer *peer, +- const struct hmdfs_req *req); +-int hmdfs_sendmessage_request(struct hmdfs_peer *con, +- struct hmdfs_send_command *msg); +-int hmdfs_sendpage_request(struct hmdfs_peer *con, +- struct hmdfs_send_command *msg); +- +-int hmdfs_sendmessage_response(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, __u32 data_len, +- void *buf, __u32 ret_code); +-int hmdfs_readfile_response(struct hmdfs_peer *con, struct hmdfs_head_cmd *head, +- struct file *filp); +- +-void hmdfs_recv_page_work_fn(struct work_struct *ptr); +- +-/***************************************************************************** +- * statistics info for RPC +- *****************************************************************************/ +- +-enum hmdfs_resp_type { +- HMDFS_RESP_NORMAL, +- HMDFS_RESP_DELAY, +- HMDFS_RESP_TIMEOUT +-}; +- +-struct server_statistic { +- unsigned long long cnt; /* request received */ +- unsigned long long max; /* max processing time */ +- unsigned long long total; /* total processing time */ +- unsigned long long snd_cnt; /* resp send to client */ +- unsigned long long snd_fail_cnt; /* send resp to client failed cnt */ +-}; +- +-struct client_statistic { +- unsigned long long snd_cnt; /* request send to server */ +- unsigned long long resp_cnt; /* response receive from server */ +- unsigned long long timeout_cnt; /* no respone from server */ +- unsigned long long delay_resp_cnt; /* delay response from server */ +- unsigned long long max; /* max waiting time */ +- unsigned long long total; /* total waiting time */ +- unsigned long long snd_fail_cnt; /* request send failed to server */ +-}; +- +- +-static inline void hmdfs_statistic(struct hmdfs_sb_info *sbi, u8 cmd, +- unsigned long jiff) +-{ +- if (cmd >= F_SIZE) +- return; +- +- sbi->s_server_statis[cmd].cnt++; +- sbi->s_server_statis[cmd].total += jiff; +- if (jiff > sbi->s_server_statis[cmd].max) +- sbi->s_server_statis[cmd].max = jiff; +-} +- +-static inline void hmdfs_server_snd_statis(struct hmdfs_sb_info *sbi, +- u8 cmd, int ret) +-{ +- if (cmd >= F_SIZE) +- return; +- ret ? sbi->s_server_statis[cmd].snd_fail_cnt++ : +- sbi->s_server_statis[cmd].snd_cnt++; +-} +- +-static inline void hmdfs_client_snd_statis(struct hmdfs_sb_info *sbi, +- u8 cmd, int ret) +-{ +- if (cmd >= F_SIZE) +- return; +- ret ? sbi->s_client_statis[cmd].snd_fail_cnt++ : +- sbi->s_client_statis[cmd].snd_cnt++; +-} +- +-extern void hmdfs_client_resp_statis(struct hmdfs_sb_info *sbi, u8 cmd, +- enum hmdfs_resp_type type, +- unsigned long start, unsigned long end); +- +-/***************************************************************************** +- * timeout configuration for RPC +- *****************************************************************************/ +- +-enum HMDFS_TIME_OUT { +- TIMEOUT_NONE = 0, +- TIMEOUT_COMMON = 4, +- TIMEOUT_6S = 6, +- TIMEOUT_30S = 30, +- TIMEOUT_1M = 60, +- TIMEOUT_90S = 90, +- TIMEOUT_CONFIG = UINT_MAX - 1, // for hmdfs_req to read from config +- TIMEOUT_UNINIT = UINT_MAX, +-}; +- +-static inline int get_cmd_timeout(struct hmdfs_sb_info *sbi, enum FILE_CMD cmd) +-{ +- return sbi->s_cmd_timeout[cmd]; +-} +- +-static inline void set_cmd_timeout(struct hmdfs_sb_info *sbi, enum FILE_CMD cmd, +- unsigned int value) +-{ +- sbi->s_cmd_timeout[cmd] = value; +-} +- +-void hmdfs_recv_mesg_callback(struct hmdfs_peer *con, void *head, void *buf); +- +-void hmdfs_response_wakeup(struct sendmsg_wait_queue *msg_info, +- __u32 ret_code, __u32 data_len, void *buf); +- +-void hmdfs_wakeup_parasite(struct hmdfs_msg_parasite *mp); +- +-void hmdfs_wakeup_async_work(struct hmdfs_async_work *async_work); +- +-void msg_put(struct sendmsg_wait_queue *msg_wq); +-void head_put(struct hmdfs_msg_idr_head *head); +-void mp_put(struct hmdfs_msg_parasite *mp); +-void asw_put(struct hmdfs_async_work *asw); +-static inline void asw_done(struct hmdfs_async_work *asw) +-{ +- if (asw->page) +- unlock_page(asw->page); +- asw_put(asw); +-} +- +-static inline void asw_get(struct hmdfs_async_work *asw) +-{ +- kref_get(&asw->head.ref); +-} +-#endif +diff --git a/fs/hmdfs/comm/transport.c b/fs/hmdfs/comm/transport.c +deleted file mode 100644 +index fdd7fd98f..000000000 +--- a/fs/hmdfs/comm/transport.c ++++ /dev/null +@@ -1,1253 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/comm/transport.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "transport.h" +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "device_node.h" +-#include "hmdfs_trace.h" +-#include "socket_adapter.h" +-#include "authority/authentication.h" +- +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +-#include +-#include "crypto.h" +-#endif +- +-typedef void (*connect_recv_handler)(struct connection *, void *, void *, +- __u32); +- +-static connect_recv_handler connect_recv_callback[CONNECT_STAT_COUNT] = { +- [CONNECT_STAT_WAIT_REQUEST] = connection_handshake_recv_handler, +- [CONNECT_STAT_WAIT_RESPONSE] = connection_handshake_recv_handler, +- [CONNECT_STAT_WORKING] = connection_working_recv_handler, +- [CONNECT_STAT_STOP] = NULL, +- [CONNECT_STAT_WAIT_ACK] = connection_handshake_recv_handler, +- [CONNECT_STAT_NEGO_FAIL] = NULL, +-}; +- +-static int recvmsg_nofs(struct socket *sock, struct msghdr *msg, +- struct kvec *vec, size_t num, size_t size, int flags) +-{ +- unsigned int nofs_flags; +- int ret; +- +- /* enable NOFS for memory allocation */ +- nofs_flags = memalloc_nofs_save(); +- ret = kernel_recvmsg(sock, msg, vec, num, size, flags); +- memalloc_nofs_restore(nofs_flags); +- +- return ret; +-} +- +-static int sendmsg_nofs(struct socket *sock, struct msghdr *msg, +- struct kvec *vec, size_t num, size_t size) +-{ +- unsigned int nofs_flags; +- int ret; +- +- /* enable NOFS for memory allocation */ +- nofs_flags = memalloc_nofs_save(); +- ret = kernel_sendmsg(sock, msg, vec, num, size); +- memalloc_nofs_restore(nofs_flags); +- +- return ret; +-} +- +-static int tcp_set_recvtimeo(struct socket *sock, int timeout) +-{ +- long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC); +- +- tcp_sock_set_nodelay(sock->sk); +- tcp_sock_set_user_timeout(sock->sk, jiffies_left); +- return 0; +-} +- +-uint32_t hmdfs_tcpi_rtt(struct hmdfs_peer *con) +-{ +- uint32_t rtt_us = 0; +- struct connection *conn_impl = NULL; +- struct tcp_handle *tcp = NULL; +- +- conn_impl = get_conn_impl(con, CONNECT_TYPE_TCP); +- if (!conn_impl) +- return rtt_us; +- tcp = (struct tcp_handle *)(conn_impl->connect_handle); +- if (tcp->sock) +- rtt_us = tcp_sk(tcp->sock->sk)->srtt_us >> 3; +- connection_put(conn_impl); +- return rtt_us; +-} +- +-static int tcp_read_head_from_socket(struct socket *sock, void *buf, +- unsigned int to_read) +-{ +- int rc = 0; +- struct msghdr hmdfs_msg; +- struct kvec iov; +- +- iov.iov_base = buf; +- iov.iov_len = to_read; +- memset(&hmdfs_msg, 0, sizeof(hmdfs_msg)); +- hmdfs_msg.msg_flags = MSG_WAITALL; +- hmdfs_msg.msg_control = NULL; +- hmdfs_msg.msg_controllen = 0; +- rc = recvmsg_nofs(sock, &hmdfs_msg, &iov, 1, to_read, +- hmdfs_msg.msg_flags); +- if (rc == -EAGAIN || rc == -ETIMEDOUT || rc == -EINTR || +- rc == -EBADMSG) { +- usleep_range(1000, 2000); +- return -EAGAIN; +- } +- // error occurred +- if (rc != to_read) { +- hmdfs_err("tcp recv error %d", rc); +- return -ESHUTDOWN; +- } +- return 0; +-} +- +-static int tcp_read_buffer_from_socket(struct socket *sock, void *buf, +- unsigned int to_read) +-{ +- int read_cnt = 0; +- int retry_time = 0; +- int rc = 0; +- struct msghdr hmdfs_msg; +- struct kvec iov; +- +- do { +- iov.iov_base = (char *)buf + read_cnt; +- iov.iov_len = to_read - read_cnt; +- memset(&hmdfs_msg, 0, sizeof(hmdfs_msg)); +- hmdfs_msg.msg_flags = MSG_WAITALL; +- hmdfs_msg.msg_control = NULL; +- hmdfs_msg.msg_controllen = 0; +- rc = recvmsg_nofs(sock, &hmdfs_msg, &iov, 1, +- to_read - read_cnt, hmdfs_msg.msg_flags); +- if (rc == -EBADMSG) { +- usleep_range(1000, 2000); +- continue; +- } +- if (rc == -EAGAIN || rc == -ETIMEDOUT || rc == -EINTR) { +- retry_time++; +- hmdfs_info("read again %d", rc); +- usleep_range(1000, 2000); +- continue; +- } +- // error occurred +- if (rc <= 0) { +- hmdfs_err("tcp recv error %d", rc); +- return -ESHUTDOWN; +- } +- read_cnt += rc; +- if (read_cnt != to_read) +- hmdfs_info("read again %d/%d", read_cnt, to_read); +- } while (read_cnt < to_read && retry_time < MAX_RECV_RETRY_TIMES); +- if (read_cnt == to_read) +- return 0; +- return -ESHUTDOWN; +-} +- +-static int hmdfs_drop_readpage_buffer(struct socket *sock, +- struct hmdfs_head_cmd *recv) +-{ +- unsigned int len; +- void *buf = NULL; +- int err; +- +- len = le32_to_cpu(recv->data_len) - sizeof(struct hmdfs_head_cmd); +- if (len > HMDFS_PAGE_SIZE || !len) { +- hmdfs_err("recv invalid readpage length %u", len); +- return -EINVAL; +- } +- +- /* Abort the connection if no memory */ +- buf = kmalloc(len, GFP_KERNEL); +- if (!buf) +- return -ESHUTDOWN; +- +- err = tcp_read_buffer_from_socket(sock, buf, len); +- kfree(buf); +- +- return err; +-} +- +-static int hmdfs_get_readpage_buffer(struct socket *sock, +- struct hmdfs_head_cmd *recv, +- struct page *page) +-{ +- char *page_buf = NULL; +- unsigned int out_len; +- int err; +- +- out_len = le32_to_cpu(recv->data_len) - sizeof(struct hmdfs_head_cmd); +- if (out_len > HMDFS_PAGE_SIZE || !out_len) { +- hmdfs_err("recv invalid readpage length %u", out_len); +- return -EINVAL; +- } +- +- page_buf = kmap(page); +- err = tcp_read_buffer_from_socket(sock, page_buf, out_len); +- if (err) +- goto out_unmap; +- if (out_len != HMDFS_PAGE_SIZE) +- memset(page_buf + out_len, 0, HMDFS_PAGE_SIZE - out_len); +- +-out_unmap: +- kunmap(page); +- return err; +-} +- +-static int tcp_recvpage_tls(struct connection *connect, +- struct hmdfs_head_cmd *recv) +-{ +- int ret = 0; +- struct tcp_handle *tcp = NULL; +- struct hmdfs_peer *node = NULL; +- struct page *page = NULL; +- struct hmdfs_async_work *async_work = NULL; +- int rd_err; +- +- if (!connect) { +- hmdfs_err("tcp connect == NULL"); +- return -ESHUTDOWN; +- } +- node = connect->node; +- tcp = (struct tcp_handle *)(connect->connect_handle); +- +- rd_err = le32_to_cpu(recv->ret_code); +- if (rd_err) +- hmdfs_warning("tcp: readpage from peer %llu ret err %d", +- node->device_id, rd_err); +- +- async_work = (struct hmdfs_async_work *)hmdfs_find_msg_head(node, +- le32_to_cpu(recv->msg_id), recv->operations); +- if (!async_work || !cancel_delayed_work(&async_work->d_work)) +- goto out; +- +- page = async_work->page; +- if (!page) { +- hmdfs_err("page not found"); +- goto out; +- } +- +- if (!rd_err) { +- ret = hmdfs_get_readpage_buffer(tcp->sock, recv, page); +- if (ret) +- rd_err = ret; +- } +- hmdfs_client_recv_readpage(recv, rd_err, async_work); +- asw_put(async_work); +- return ret; +- +-out: +- /* async_work will be released by recvpage in normal processure */ +- if (async_work) +- asw_put(async_work); +- hmdfs_err_ratelimited("timeout and droppage"); +- hmdfs_client_resp_statis(node->sbi, F_READPAGE, HMDFS_RESP_DELAY, 0, 0); +- if (!rd_err) +- ret = hmdfs_drop_readpage_buffer(tcp->sock, recv); +- return ret; +-} +- +-static void aeadcipher_cb(void *req, int error) +-{ +- struct aeadcrypt_result *result = ((struct crypto_async_request *)req)->data; +- +- if (error == -EINPROGRESS) +- return; +- result->err = error; +- complete(&result->completion); +-} +- +-static int aeadcipher_en_de(struct aead_request *req, +- struct aeadcrypt_result result, int flag) +-{ +- int rc = 0; +- +- if (flag) +- rc = crypto_aead_encrypt(req); +- else +- rc = crypto_aead_decrypt(req); +- switch (rc) { +- case 0: +- break; +- case -EINPROGRESS: +- case -EBUSY: +- rc = wait_for_completion_interruptible(&result.completion); +- if (!rc && !result.err) +- reinit_completion(&result.completion); +- break; +- default: +- hmdfs_err("returned rc %d result %d", rc, result.err); +- break; +- } +- return rc; +-} +- +-static int set_aeadcipher(struct crypto_aead *tfm, struct aead_request *req, +- struct aeadcrypt_result *result) +-{ +- init_completion(&result->completion); +- aead_request_set_callback( +- req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, +- aeadcipher_cb, result); +- return 0; +-} +- +-int aeadcipher_encrypt_buffer(struct connection *con, __u8 *src_buf, +- size_t src_len, __u8 *dst_buf, size_t dst_len) +-{ +- int ret = 0; +- struct scatterlist src, dst; +- struct aead_request *req = NULL; +- struct aeadcrypt_result result; +- __u8 cipher_iv[HMDFS_IV_SIZE]; +- +- if (src_len <= 0) +- return -EINVAL; +- if (!virt_addr_valid(src_buf) || !virt_addr_valid(dst_buf)) { +- WARN_ON(1); +- hmdfs_err("encrypt address is invalid"); +- return -EPERM; +- } +- +- get_random_bytes(cipher_iv, HMDFS_IV_SIZE); +- memcpy(dst_buf, cipher_iv, HMDFS_IV_SIZE); +- req = aead_request_alloc(con->tfm, GFP_KERNEL); +- if (!req) { +- hmdfs_err("aead_request_alloc() failed"); +- return -ENOMEM; +- } +- ret = set_aeadcipher(con->tfm, req, &result); +- if (ret) { +- hmdfs_err("set_enaeadcipher exit fault"); +- goto out; +- } +- +- sg_init_one(&src, src_buf, src_len); +- sg_init_one(&dst, dst_buf + HMDFS_IV_SIZE, dst_len - HMDFS_IV_SIZE); +- aead_request_set_crypt(req, &src, &dst, src_len, cipher_iv); +- aead_request_set_ad(req, 0); +- ret = aeadcipher_en_de(req, result, ENCRYPT_FLAG); +-out: +- aead_request_free(req); +- return ret; +-} +- +-int aeadcipher_decrypt_buffer(struct connection *con, __u8 *src_buf, +- size_t src_len, __u8 *dst_buf, size_t dst_len) +-{ +- int ret = 0; +- struct scatterlist src, dst; +- struct aead_request *req = NULL; +- struct aeadcrypt_result result; +- __u8 cipher_iv[HMDFS_IV_SIZE]; +- +- if (src_len <= HMDFS_IV_SIZE + HMDFS_TAG_SIZE) +- return -EINVAL; +- if (!virt_addr_valid(src_buf) || !virt_addr_valid(dst_buf)) { +- WARN_ON(1); +- hmdfs_err("decrypt address is invalid"); +- return -EPERM; +- } +- +- memcpy(cipher_iv, src_buf, HMDFS_IV_SIZE); +- req = aead_request_alloc(con->tfm, GFP_KERNEL); +- if (!req) { +- hmdfs_err("aead_request_alloc() failed"); +- return -ENOMEM; +- } +- ret = set_aeadcipher(con->tfm, req, &result); +- if (ret) { +- hmdfs_err("set_deaeadcipher exit fault"); +- goto out; +- } +- +- sg_init_one(&src, src_buf + HMDFS_IV_SIZE, src_len - HMDFS_IV_SIZE); +- sg_init_one(&dst, dst_buf, dst_len); +- aead_request_set_crypt(req, &src, &dst, src_len - HMDFS_IV_SIZE, +- cipher_iv); +- aead_request_set_ad(req, 0); +- ret = aeadcipher_en_de(req, result, DECRYPT_FLAG); +-out: +- aead_request_free(req); +- return ret; +-} +- +-static int tcp_recvbuffer_cipher(struct connection *connect, +- struct hmdfs_head_cmd *recv) +-{ +- int ret = 0; +- struct tcp_handle *tcp = NULL; +- size_t cipherbuffer_len; +- __u8 *cipherbuffer = NULL; +- size_t outlen = 0; +- __u8 *outdata = NULL; +- __u32 recv_len = le32_to_cpu(recv->data_len); +- +- tcp = (struct tcp_handle *)(connect->connect_handle); +- if (recv_len == sizeof(struct hmdfs_head_cmd)) +- goto out_recv_head; +- else if (recv_len > sizeof(struct hmdfs_head_cmd) && +- recv_len <= ADAPTER_MESSAGE_LENGTH) +- cipherbuffer_len = recv_len - sizeof(struct hmdfs_head_cmd) + +- HMDFS_IV_SIZE + HMDFS_TAG_SIZE; +- else +- return -ENOMSG; +- cipherbuffer = kzalloc(cipherbuffer_len, GFP_KERNEL); +- if (!cipherbuffer) { +- hmdfs_err("zalloc cipherbuffer error"); +- return -ESHUTDOWN; +- } +- outlen = cipherbuffer_len - HMDFS_IV_SIZE - HMDFS_TAG_SIZE; +- outdata = kzalloc(outlen, GFP_KERNEL); +- if (!outdata) { +- hmdfs_err("encrypt zalloc outdata error"); +- kfree(cipherbuffer); +- return -ESHUTDOWN; +- } +- +- ret = tcp_read_buffer_from_socket(tcp->sock, cipherbuffer, +- cipherbuffer_len); +- if (ret) +- goto out_recv; +- ret = aeadcipher_decrypt_buffer(connect, cipherbuffer, cipherbuffer_len, +- outdata, outlen); +- if (ret) { +- hmdfs_err("decrypt_buf fail"); +- goto out_recv; +- } +-out_recv_head: +- if (connect_recv_callback[connect->status]) { +- connect_recv_callback[connect->status](connect, recv, outdata, +- outlen); +- } else { +- kfree(outdata); +- hmdfs_err("encypt callback NULL status %d", connect->status); +- } +- kfree(cipherbuffer); +- return ret; +-out_recv: +- kfree(cipherbuffer); +- kfree(outdata); +- return ret; +-} +- +-static int tcp_recvbuffer_tls(struct connection *connect, +- struct hmdfs_head_cmd *recv) +-{ +- int ret = 0; +- struct tcp_handle *tcp = NULL; +- size_t outlen; +- __u8 *outdata = NULL; +- __u32 recv_len = le32_to_cpu(recv->data_len); +- +- tcp = (struct tcp_handle *)(connect->connect_handle); +- outlen = recv_len - sizeof(struct hmdfs_head_cmd); +- if (outlen == 0) +- goto out_recv_head; +- +- /* +- * NOTE: Up to half of the allocated memory may be wasted due to +- * the Internal Fragmentation, however the memory allocation times +- * can be reduced and we don't have to adjust existing message +- * transporting mechanism +- */ +- outdata = kmalloc(outlen, GFP_KERNEL); +- if (!outdata) +- return -ESHUTDOWN; +- +- ret = tcp_read_buffer_from_socket(tcp->sock, outdata, outlen); +- if (ret) { +- kfree(outdata); +- return ret; +- } +- tcp->connect->stat.recv_bytes += outlen; +-out_recv_head: +- if (connect_recv_callback[connect->status]) { +- connect_recv_callback[connect->status](connect, recv, outdata, +- outlen); +- } else { +- kfree(outdata); +- hmdfs_err("callback NULL status %d", connect->status); +- } +- return 0; +-} +- +-static int tcp_receive_from_sock(struct tcp_handle *tcp) +-{ +- struct hmdfs_head_cmd *recv = NULL; +- int ret = 0; +- +- if (!tcp) { +- hmdfs_info("tcp recv thread !tcp"); +- return -ESHUTDOWN; +- } +- +- if (!tcp->sock) { +- hmdfs_info("tcp recv thread !sock"); +- return -ESHUTDOWN; +- } +- +- recv = kmem_cache_alloc(tcp->recv_cache, GFP_KERNEL); +- if (!recv) { +- hmdfs_info("tcp recv thread !cache"); +- return -ESHUTDOWN; +- } +- +- ret = tcp_read_head_from_socket(tcp->sock, recv, +- sizeof(struct hmdfs_head_cmd)); +- if (ret) +- goto out; +- +- tcp->connect->stat.recv_bytes += sizeof(struct hmdfs_head_cmd); +- tcp->connect->stat.recv_message_count++; +- +- if (recv->magic != HMDFS_MSG_MAGIC || recv->version != HMDFS_VERSION) { +- hmdfs_info_ratelimited("tcp recv fd %d wrong magic. drop message", +- tcp->fd); +- goto out; +- } +- +- if ((le32_to_cpu(recv->data_len) > +- HMDFS_MAX_MESSAGE_LEN + sizeof(struct hmdfs_head_cmd)) || +- (le32_to_cpu(recv->data_len) < sizeof(struct hmdfs_head_cmd))) { +- hmdfs_info("tcp recv fd %d length error. drop message", +- tcp->fd); +- goto out; +- } +- +- if (tcp->connect->status == CONNECT_STAT_WORKING && +- recv->operations.command == F_READPAGE && +- recv->operations.cmd_flag == C_RESPONSE) { +- ret = tcp_recvpage_tls(tcp->connect, recv); +- goto out; +- } +- +- if (tcp->connect->status == CONNECT_STAT_WORKING) +- ret = tcp_recvbuffer_tls(tcp->connect, recv); +- else +- ret = tcp_recvbuffer_cipher(tcp->connect, recv); +- +-out: +- kmem_cache_free(tcp->recv_cache, recv); +- return ret; +-} +- +-static bool tcp_handle_is_available(struct tcp_handle *tcp) +-{ +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +- struct tls_context *tls_ctx = NULL; +- struct tls_sw_context_rx *ctx = NULL; +- +-#endif +- if (!tcp || !tcp->sock || !tcp->sock->sk) { +- hmdfs_err("Invalid tcp connection"); +- return false; +- } +- +- if (tcp->sock->sk->sk_state != TCP_ESTABLISHED) { +- hmdfs_err("TCP conn %d is broken, current sk_state is %d", +- tcp->fd, tcp->sock->sk->sk_state); +- return false; +- } +- +- if (tcp->sock->state != SS_CONNECTING && +- tcp->sock->state != SS_CONNECTED) { +- hmdfs_err("TCP conn %d is broken, current sock state is %d", +- tcp->fd, tcp->sock->state); +- return false; +- } +- +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +- tls_ctx = tls_get_ctx(tcp->sock->sk); +- if (tls_ctx) { +- ctx = tls_sw_ctx_rx(tls_ctx); +- if (ctx && ctx->strp.stopped) { +- hmdfs_err( +- "TCP conn %d is broken, the strparser has stopped", +- tcp->fd); +- return false; +- } +- } +-#endif +- return true; +-} +- +-static int tcp_recv_thread(void *arg) +-{ +- int ret = 0; +- struct tcp_handle *tcp = (struct tcp_handle *)arg; +- const struct cred *old_cred; +- +- WARN_ON(!tcp); +- WARN_ON(!tcp->sock); +- set_freezable(); +- +- old_cred = hmdfs_override_creds(tcp->connect->node->sbi->system_cred); +- +- while (!kthread_should_stop()) { +- /* +- * 1. In case the redundant connection has not been mounted on +- * a peer +- * 2. Lock is unnecessary since a transient state is acceptable +- */ +- if (tcp_handle_is_available(tcp) && +- list_empty(&tcp->connect->list)) +- goto freeze; +- if (!mutex_trylock(&tcp->close_mutex)) +- continue; +- if (tcp_handle_is_available(tcp)) +- ret = tcp_receive_from_sock(tcp); +- else +- ret = -ESHUTDOWN; +- /* +- * This kthread will exit if ret is -ESHUTDOWN, thus we need to +- * set recv_task to NULL to avoid calling kthread_stop() from +- * tcp_close_socket(). +- */ +- if (ret == -ESHUTDOWN) +- tcp->recv_task = NULL; +- mutex_unlock(&tcp->close_mutex); +- if (ret == -ESHUTDOWN) { +- hmdfs_node_inc_evt_seq(tcp->connect->node); +- tcp->connect->status = CONNECT_STAT_STOP; +- if (tcp->connect->node->status != NODE_STAT_OFFLINE) +- hmdfs_reget_connection(tcp->connect); +- break; +- } +-freeze: +- schedule(); +- try_to_freeze(); +- } +- +- hmdfs_info("Exiting. Now, sock state = %d", tcp->sock->state); +- hmdfs_revert_creds(old_cred); +- connection_put(tcp->connect); +- return 0; +-} +- +-static int tcp_send_message_sock_cipher(struct tcp_handle *tcp, +- struct hmdfs_send_data *msg) +-{ +- int ret = 0; +- __u8 *outdata = NULL; +- size_t outlen = 0; +- int send_len = 0; +- int send_vec_cnt = 0; +- struct msghdr tcp_msg; +- struct kvec iov[TCP_KVEC_ELE_DOUBLE]; +- +- memset(&tcp_msg, 0, sizeof(tcp_msg)); +- if (!tcp || !tcp->sock) { +- hmdfs_err("encrypt tcp socket = NULL"); +- return -ESHUTDOWN; +- } +- iov[0].iov_base = msg->head; +- iov[0].iov_len = msg->head_len; +- send_vec_cnt = TCP_KVEC_HEAD; +- if (msg->len == 0) +- goto send; +- +- outlen = msg->len + HMDFS_IV_SIZE + HMDFS_TAG_SIZE; +- outdata = kzalloc(outlen, GFP_KERNEL); +- if (!outdata) { +- hmdfs_err("tcp send message encrypt fail to alloc outdata"); +- return -ENOMEM; +- } +- ret = aeadcipher_encrypt_buffer(tcp->connect, msg->data, msg->len, +- outdata, outlen); +- if (ret) { +- hmdfs_err("encrypt_buf fail"); +- goto out; +- } +- iov[1].iov_base = outdata; +- iov[1].iov_len = outlen; +- send_vec_cnt = TCP_KVEC_ELE_DOUBLE; +-send: +- mutex_lock(&tcp->send_mutex); +- send_len = sendmsg_nofs(tcp->sock, &tcp_msg, iov, send_vec_cnt, +- msg->head_len + outlen); +- mutex_unlock(&tcp->send_mutex); +- if (send_len <= 0) { +- hmdfs_err("error %d", send_len); +- ret = -ESHUTDOWN; +- } else if (send_len != msg->head_len + outlen) { +- hmdfs_err("send part of message. %d/%zu", send_len, +- msg->head_len + outlen); +- ret = -EAGAIN; +- } else { +- ret = 0; +- } +-out: +- kfree(outdata); +- return ret; +-} +- +-static int tcp_send_message_sock_tls(struct tcp_handle *tcp, +- struct hmdfs_send_data *msg) +-{ +- int send_len = 0; +- int send_vec_cnt = 0; +- struct msghdr tcp_msg; +- struct kvec iov[TCP_KVEC_ELE_TRIPLE]; +- +- memset(&tcp_msg, 0, sizeof(tcp_msg)); +- if (!tcp || !tcp->sock) { +- hmdfs_err("tcp socket = NULL"); +- return -ESHUTDOWN; +- } +- iov[TCP_KVEC_HEAD].iov_base = msg->head; +- iov[TCP_KVEC_HEAD].iov_len = msg->head_len; +- if (msg->len == 0 && msg->sdesc_len == 0) { +- send_vec_cnt = TCP_KVEC_ELE_SINGLE; +- } else if (msg->sdesc_len == 0) { +- iov[TCP_KVEC_DATA].iov_base = msg->data; +- iov[TCP_KVEC_DATA].iov_len = msg->len; +- send_vec_cnt = TCP_KVEC_ELE_DOUBLE; +- } else { +- iov[TCP_KVEC_FILE_PARA].iov_base = msg->sdesc; +- iov[TCP_KVEC_FILE_PARA].iov_len = msg->sdesc_len; +- iov[TCP_KVEC_FILE_CONTENT].iov_base = msg->data; +- iov[TCP_KVEC_FILE_CONTENT].iov_len = msg->len; +- send_vec_cnt = TCP_KVEC_ELE_TRIPLE; +- } +- mutex_lock(&tcp->send_mutex); +- send_len = sendmsg_nofs(tcp->sock, &tcp_msg, iov, send_vec_cnt, +- msg->head_len + msg->len + msg->sdesc_len); +- mutex_unlock(&tcp->send_mutex); +- if (send_len == -EBADMSG) { +- return -EBADMSG; +- } else if (send_len <= 0) { +- hmdfs_err("error %d", send_len); +- return -ESHUTDOWN; +- } else if (send_len != msg->head_len + msg->len + msg->sdesc_len) { +- hmdfs_err("send part of message. %d/%zu", send_len, +- msg->head_len + msg->len); +- tcp->connect->stat.send_bytes += send_len; +- return -EAGAIN; +- } +- tcp->connect->stat.send_bytes += send_len; +- tcp->connect->stat.send_message_count++; +- return 0; +-} +- +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +-int tcp_send_rekey_request(struct connection *connect) +-{ +- int ret = 0; +- struct hmdfs_send_data msg; +- struct tcp_handle *tcp = connect->connect_handle; +- struct hmdfs_head_cmd *head = NULL; +- struct connection_rekey_request *rekey_request_param = NULL; +- struct hmdfs_cmd operations; +- +- hmdfs_init_cmd(&operations, F_CONNECT_REKEY); +- head = kzalloc(sizeof(struct hmdfs_head_cmd) + +- sizeof(struct connection_rekey_request), +- GFP_KERNEL); +- if (!head) +- return -ENOMEM; +- rekey_request_param = +- (struct connection_rekey_request +- *)((uint8_t *)head + sizeof(struct hmdfs_head_cmd)); +- +- rekey_request_param->update_request = cpu_to_le32(UPDATE_NOT_REQUESTED); +- +- head->magic = HMDFS_MSG_MAGIC; +- head->version = HMDFS_VERSION; +- head->operations = operations; +- head->data_len = +- cpu_to_le32(sizeof(*head) + sizeof(*rekey_request_param)); +- head->reserved = 0; +- head->reserved1 = 0; +- head->ret_code = 0; +- +- msg.head = head; +- msg.head_len = sizeof(*head); +- msg.data = rekey_request_param; +- msg.len = sizeof(*rekey_request_param); +- msg.sdesc = NULL; +- msg.sdesc_len = 0; +- ret = tcp_send_message_sock_tls(tcp, &msg); +- if (ret != 0) +- hmdfs_err("return error %d", ret); +- kfree(head); +- return ret; +-} +-#endif +- +-static int tcp_send_message(struct connection *connect, +- struct hmdfs_send_data *msg) +-{ +- int ret = 0; +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +- unsigned long nowtime = jiffies; +-#endif +- struct tcp_handle *tcp = NULL; +- +- if (!connect) { +- hmdfs_err("tcp connection = NULL "); +- return -ESHUTDOWN; +- } +- if (!msg) { +- hmdfs_err("msg = NULL"); +- return -EINVAL; +- } +- if (msg->len > HMDFS_MAX_MESSAGE_LEN) { +- hmdfs_err("message->len error: %zu", msg->len); +- return -EINVAL; +- } +- tcp = (struct tcp_handle *)(connect->connect_handle); +- if (connect->status == CONNECT_STAT_STOP) +- return -EAGAIN; +- +- trace_hmdfs_tcp_send_message(msg->head); +- +- if (connect->status == CONNECT_STAT_WORKING) +- ret = tcp_send_message_sock_tls(tcp, msg); +- else +- ret = tcp_send_message_sock_cipher(tcp, msg); +- +- if (ret != 0) { +- hmdfs_err("return error %d", ret); +- return ret; +- } +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +- if (nowtime - connect->stat.rekey_time >= REKEY_LIFETIME && +- connect->status == CONNECT_STAT_WORKING) { +- hmdfs_info("send rekey message to devid %llu", +- connect->node->device_id); +- ret = tcp_send_rekey_request(connect); +- if (ret == 0) +- set_crypto_info(connect, SET_CRYPTO_SEND); +- connect->stat.rekey_time = nowtime; +- } +-#endif +- return ret; +-} +- +-void tcp_close_socket(struct tcp_handle *tcp) +-{ +- int ret; +- if (!tcp) +- return; +- mutex_lock(&tcp->close_mutex); +- if (tcp->recv_task) { +- ret = kthread_stop(tcp->recv_task); +- /* recv_task killed before sched, we need to put the connect */ +- if (ret == -EINTR) +- connection_put(tcp->connect); +- tcp->recv_task = NULL; +- } +- mutex_unlock(&tcp->close_mutex); +-} +- +-static int set_tfm(__u8 *master_key, struct crypto_aead *tfm) +-{ +- int ret = 0; +- int iv_len; +- __u8 *sec_key = NULL; +- +- sec_key = master_key; +- crypto_aead_clear_flags(tfm, ~0); +- ret = crypto_aead_setkey(tfm, sec_key, HMDFS_KEY_SIZE); +- if (ret) { +- hmdfs_err("failed to set the key"); +- goto out; +- } +- ret = crypto_aead_setauthsize(tfm, HMDFS_TAG_SIZE); +- if (ret) { +- hmdfs_err("authsize length is error"); +- goto out; +- } +- +- iv_len = crypto_aead_ivsize(tfm); +- if (iv_len != HMDFS_IV_SIZE) { +- hmdfs_err("IV recommended value should be set %d", iv_len); +- ret = -ENODATA; +- } +-out: +- return ret; +-} +- +-static bool is_tcp_socket(struct tcp_handle *tcp) +-{ +- struct inet_connection_sock *icsk; +- +- if (!tcp || !tcp->sock || !tcp->sock->sk) { +- hmdfs_err("invalid tcp handle"); +- return false; +- } +- +- lock_sock(tcp->sock->sk); +- if (tcp->sock->sk->sk_protocol != IPPROTO_TCP || +- tcp->sock->type != SOCK_STREAM || +- tcp->sock->sk->sk_family != AF_INET) { +- hmdfs_err("invalid socket protocol"); +- release_sock(tcp->sock->sk); +- return false; +- } +- +- icsk = inet_csk(tcp->sock->sk); +- if (icsk->icsk_ulp_ops) { +- hmdfs_err("ulp not NULL"); +- release_sock(tcp->sock->sk); +- return false; +- } +- +- release_sock(tcp->sock->sk); +- return true; +-} +- +-static int tcp_update_socket(struct tcp_handle *tcp, int fd, +- uint8_t *master_key, struct socket *socket) +-{ +- int err = 0; +- struct hmdfs_peer *node = NULL; +- +- if (!master_key || fd == 0) +- return -EAGAIN; +- +- tcp->sock = socket; +- tcp->fd = fd; +- +- if (!is_tcp_socket(tcp)) { +- err = -EINVAL; +- goto put_sock; +- } +- +- if (!tcp_handle_is_available(tcp)) { +- err = -EPIPE; +- goto put_sock; +- } +- +- hmdfs_info("socket fd %d, state %d, refcount %ld protocol %d", fd, +- socket->state, file_count(socket->file), +- socket->sk->sk_protocol); +- +- tcp->recv_cache = kmem_cache_create("hmdfs_socket", +- tcp->recvbuf_maxsize, +- 0, SLAB_HWCACHE_ALIGN, NULL); +- if (!tcp->recv_cache) { +- err = -ENOMEM; +- goto put_sock; +- } +- +- err = tcp_set_recvtimeo(socket, TCP_RECV_TIMEOUT); +- if (err) { +- hmdfs_err("tcp set timeout error"); +- goto free_mem_cache; +- } +- +- /* send key and recv key, default MASTER KEY */ +- memcpy(tcp->connect->master_key, master_key, HMDFS_KEY_SIZE); +- memcpy(tcp->connect->send_key, master_key, HMDFS_KEY_SIZE); +- memcpy(tcp->connect->recv_key, master_key, HMDFS_KEY_SIZE); +- tcp->connect->tfm = crypto_alloc_aead("gcm(aes)", 0, 0); +- if (IS_ERR(tcp->connect->tfm)) { +- err = PTR_ERR(tcp->connect->tfm); +- tcp->connect->tfm = NULL; +- hmdfs_err("failed to load transform for gcm(aes):%d", err); +- goto free_mem_cache; +- } +- +- err = set_tfm(master_key, tcp->connect->tfm); +- if (err) { +- hmdfs_err("tfm seting exit fault"); +- goto free_crypto; +- } +- +- connection_get(tcp->connect); +- +- node = tcp->connect->node; +- tcp->recv_task = kthread_create(tcp_recv_thread, (void *)tcp, +- "dfs_rcv%u_%llu_%d", +- node->owner, node->device_id, fd); +- if (IS_ERR(tcp->recv_task)) { +- err = PTR_ERR(tcp->recv_task); +- hmdfs_err("tcp->rcev_task %d", err); +- goto put_conn; +- } +- +- return 0; +- +-put_conn: +- tcp->recv_task = NULL; +- connection_put(tcp->connect); +-free_crypto: +- crypto_free_aead(tcp->connect->tfm); +- tcp->connect->tfm = NULL; +-free_mem_cache: +- kmem_cache_destroy(tcp->recv_cache); +- tcp->recv_cache = NULL; +-put_sock: +- tcp->sock = NULL; +- tcp->fd = 0; +- +- return err; +-} +- +-static struct tcp_handle *tcp_alloc_handle(struct connection *connect, +- int socket_fd, uint8_t *master_key, struct socket *socket) +-{ +- int ret = 0; +- struct tcp_handle *tcp = kzalloc(sizeof(*tcp), GFP_KERNEL); +- +- if (!tcp) +- return NULL; +- tcp->connect = connect; +- tcp->connect->connect_handle = (void *)tcp; +- tcp->recvbuf_maxsize = MAX_RECV_SIZE; +- tcp->recv_task = NULL; +- tcp->recv_cache = NULL; +- tcp->sock = NULL; +- mutex_init(&tcp->close_mutex); +- mutex_init(&tcp->send_mutex); +- ret = tcp_update_socket(tcp, socket_fd, master_key, socket); +- if (ret) { +- kfree(tcp); +- return NULL; +- } +- return tcp; +-} +- +-void hmdfs_get_connection(struct hmdfs_peer *peer) +-{ +- struct notify_param param; +- +- if (!peer) +- return; +- param.notify = NOTIFY_GET_SESSION; +- param.fd = INVALID_SOCKET_FD; +- memcpy(param.remote_cid, peer->cid, HMDFS_CID_SIZE); +- notify(peer, ¶m); +-} +- +-static void connection_notify_to_close(struct connection *conn) +-{ +- struct notify_param param; +- struct hmdfs_peer *peer = NULL; +- struct tcp_handle *tcp = NULL; +- +- tcp = conn->connect_handle; +- peer = conn->node; +- +- // libdistbus/src/TcpSession.cpp will close the socket +- param.notify = NOTIFY_GET_SESSION; +- param.fd = tcp->fd; +- memcpy(param.remote_cid, peer->cid, HMDFS_CID_SIZE); +- notify(peer, ¶m); +-} +- +-void hmdfs_reget_connection(struct connection *conn) +-{ +- struct tcp_handle *tcp = NULL; +- struct connection *conn_impl = NULL; +- struct connection *next = NULL; +- struct task_struct *recv_task = NULL; +- bool should_put = false; +- bool stop_thread = true; +- +- if (!conn) +- return; +- +- // One may put a connection if and only if he took it out of the list +- mutex_lock(&conn->node->conn_impl_list_lock); +- list_for_each_entry_safe(conn_impl, next, &conn->node->conn_impl_list, +- list) { +- if (conn_impl == conn) { +- should_put = true; +- list_move(&conn->list, &conn->node->conn_deleting_list); +- break; +- } +- } +- if (!should_put) { +- mutex_unlock(&conn->node->conn_impl_list_lock); +- return; +- } +- +- tcp = conn->connect_handle; +- if (tcp) { +- recv_task = tcp->recv_task; +- /* +- * To avoid the receive thread to stop itself. Ensure receive +- * thread stop before process offline event +- */ +- if (!recv_task || recv_task->pid == current->pid) +- stop_thread = false; +- } +- mutex_unlock(&conn->node->conn_impl_list_lock); +- +- if (tcp) { +- if (tcp->sock) { +- hmdfs_info("shudown sock: fd = %d, sockref = %ld, connref = %u stop_thread = %d", +- tcp->fd, file_count(tcp->sock->file), +- kref_read(&conn->ref_cnt), stop_thread); +- kernel_sock_shutdown(tcp->sock, SHUT_RDWR); +- } +- +- if (stop_thread) +- tcp_close_socket(tcp); +- +- if (tcp->fd != INVALID_SOCKET_FD) +- connection_notify_to_close(conn); +- } +- connection_put(conn); +-} +- +-static struct connection * +-lookup_conn_by_socketfd_unsafe(struct hmdfs_peer *node, struct socket *socket) +-{ +- struct connection *tcp_conn = NULL; +- struct tcp_handle *tcp = NULL; +- +- list_for_each_entry(tcp_conn, &node->conn_impl_list, list) { +- if (tcp_conn->connect_handle) { +- tcp = (struct tcp_handle *)(tcp_conn->connect_handle); +- if (tcp->sock == socket) { +- connection_get(tcp_conn); +- return tcp_conn; +- } +- } +- } +- return NULL; +-} +- +-static void hmdfs_reget_connection_work_fn(struct work_struct *work) +-{ +- struct connection *conn = +- container_of(work, struct connection, reget_work); +- +- hmdfs_reget_connection(conn); +- connection_put(conn); +-} +- +-struct connection *alloc_conn_tcp(struct hmdfs_peer *node, int socket_fd, +- uint8_t *master_key, uint8_t status, struct socket *socket) +-{ +- struct connection *tcp_conn = NULL; +- unsigned long nowtime = jiffies; +- +- tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL); +- if (!tcp_conn) +- goto out_err; +- +- kref_init(&tcp_conn->ref_cnt); +- mutex_init(&tcp_conn->ref_lock); +- INIT_LIST_HEAD(&tcp_conn->list); +- tcp_conn->node = node; +- tcp_conn->close = tcp_stop_connect; +- tcp_conn->send_message = tcp_send_message; +- tcp_conn->type = CONNECT_TYPE_TCP; +- tcp_conn->status = status; +- tcp_conn->stat.rekey_time = nowtime; +- tcp_conn->connect_handle = +- (void *)tcp_alloc_handle(tcp_conn, socket_fd, master_key, socket); +- INIT_WORK(&tcp_conn->reget_work, hmdfs_reget_connection_work_fn); +- if (!tcp_conn->connect_handle) { +- hmdfs_err("Failed to alloc tcp_handle for strcut conn"); +- goto out_err; +- } +- return tcp_conn; +- +-out_err: +- kfree(tcp_conn); +- return NULL; +-} +- +-static struct connection *add_conn_tcp_unsafe(struct hmdfs_peer *node, +- struct socket *socket, +- struct connection *conn2add) +-{ +- struct connection *conn; +- +- conn = lookup_conn_by_socketfd_unsafe(node, socket); +- if (conn) { +- hmdfs_info("socket already in list"); +- return conn; +- } +- +- /* Prefer to use socket opened by local device */ +- if (conn2add->status == CONNECT_STAT_WAIT_REQUEST) +- list_add(&conn2add->list, &node->conn_impl_list); +- else +- list_add_tail(&conn2add->list, &node->conn_impl_list); +- connection_get(conn2add); +- return conn2add; +-} +- +-struct connection *hmdfs_get_conn_tcp(struct hmdfs_peer *node, int fd, +- uint8_t *master_key, uint8_t status) +-{ +- struct connection *tcp_conn = NULL, *on_peer_conn = NULL; +- struct tcp_handle *tcp = NULL; +- struct socket *socket = NULL; +- int err = 0; +- +- socket = sockfd_lookup(fd, &err); +- if (!socket) { +- hmdfs_err("lookup socket fail, socket_fd %d, err %d", fd, err); +- return NULL; +- } +- mutex_lock(&node->conn_impl_list_lock); +- tcp_conn = lookup_conn_by_socketfd_unsafe(node, socket); +- mutex_unlock(&node->conn_impl_list_lock); +- if (tcp_conn) { +- hmdfs_info("Got a existing tcp conn: fsocket_fd = %d", +- fd); +- sockfd_put(socket); +- goto out; +- } +- +- tcp_conn = alloc_conn_tcp(node, fd, master_key, status, socket); +- if (!tcp_conn) { +- hmdfs_info("Failed to alloc a tcp conn, socket_fd %d", fd); +- sockfd_put(socket); +- goto out; +- } +- +- mutex_lock(&node->conn_impl_list_lock); +- on_peer_conn = add_conn_tcp_unsafe(node, socket, tcp_conn); +- mutex_unlock(&node->conn_impl_list_lock); +- tcp = tcp_conn->connect_handle; +- if (on_peer_conn == tcp_conn) { +- hmdfs_info("Got a newly allocated tcp conn: socket_fd = %d", fd); +- wake_up_process(tcp->recv_task); +- if (status == CONNECT_STAT_WAIT_RESPONSE) +- connection_send_handshake( +- on_peer_conn, CONNECT_MESG_HANDSHAKE_REQUEST, +- 0); +- } else { +- hmdfs_info("Got a existing tcp conn: socket_fd = %d", fd); +- tcp->fd = INVALID_SOCKET_FD; +- tcp_close_socket(tcp); +- connection_put(tcp_conn); +- +- tcp_conn = on_peer_conn; +- } +- +-out: +- return tcp_conn; +-} +- +-void tcp_stop_connect(struct connection *connect) +-{ +- hmdfs_info("now nothing to do"); +-} +diff --git a/fs/hmdfs/comm/transport.h b/fs/hmdfs/comm/transport.h +deleted file mode 100644 +index bce882cb6..000000000 +--- a/fs/hmdfs/comm/transport.h ++++ /dev/null +@@ -1,76 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/comm/transport.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_TRANSPORT_H +-#define HMDFS_TRANSPORT_H +- +-#include "connection.h" +- +-#define ENCRYPT_FLAG 1 +-#define DECRYPT_FLAG 0 +- +-struct aeadcrypt_result { +- struct completion completion; +- int err; +-}; +- +-#define ADAPTER_MESSAGE_LENGTH (1024 * 1024 + 1024) // 1M + 1K +-#define MAX_RECV_SIZE sizeof(struct hmdfs_head_cmd) +- +-#define TCP_KVEC_HEAD 0 +-#define TCP_KVEC_DATA 1 +- +-enum TCP_KVEC_FILE_ELE_INDEX { +- TCP_KVEC_FILE_PARA = 1, +- TCP_KVEC_FILE_CONTENT = 2, +-}; +- +-enum TCP_KVEC_TYPE { +- TCP_KVEC_ELE_SINGLE = 1, +- TCP_KVEC_ELE_DOUBLE = 2, +- TCP_KVEC_ELE_TRIPLE = 3, +-}; +- +-#define TCP_RECV_TIMEOUT 2 +-#define MAX_RECV_RETRY_TIMES 2 +- +-#ifndef SO_RCVTIMEO +-#define SO_RCVTIMEO SO_RCVTIMEO_OLD +-#endif +- +-struct tcp_handle { +- struct connection *connect; +- int recvbuf_maxsize; +- struct mutex close_mutex; +- /* +- * To achieve atomicity. +- * +- * The sock lock held at the tcp layer may be temporally released at +- * `sk_wait_event()` when waiting for sock buffer. From this point on, +- * threads serialized at the initial call to `lock_sock()` contained +- * in `tcp_sendmsg()` can proceed, resuling in intermixed messages. +- */ +- struct mutex send_mutex; +- struct socket *sock; +- int fd; +- struct kmem_cache *recv_cache; +- struct task_struct *recv_task; +-}; +- +-void hmdfs_get_connection(struct hmdfs_peer *peer); +-void hmdfs_reget_connection(struct connection *conn); +-struct connection *hmdfs_get_conn_tcp(struct hmdfs_peer *node, int socket_fd, +- uint8_t *master_key, uint8_t status); +-void tcp_stop_connect(struct connection *connect); +-uint32_t hmdfs_tcpi_rtt(struct hmdfs_peer *node); +-void tcp_close_socket(struct tcp_handle *tcp); +- +-#ifdef CONFIG_HMDFS_FS_ENCRYPTION +-int tcp_send_rekey_request(struct connection *connect); +-#endif +- +-#endif +diff --git a/fs/hmdfs/dentry.c b/fs/hmdfs/dentry.c +deleted file mode 100644 +index 040d698e1..000000000 +--- a/fs/hmdfs/dentry.c ++++ /dev/null +@@ -1,357 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/dentry.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +- +-#include "comm/connection.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_device_view.h" +-#include "hmdfs_merge_view.h" +- +-extern struct kmem_cache *hmdfs_dentry_cachep; +- +-void hmdfs_set_time(struct dentry *dentry, unsigned long time) +-{ +- struct hmdfs_dentry_info *d_info = dentry->d_fsdata; +- +- if (d_info) +- d_info->time = time; +-} +- +-unsigned long hmdfs_get_time(struct dentry *dentry) +-{ +- struct hmdfs_dentry_info *d_info = dentry->d_fsdata; +- +- if (d_info) +- return (unsigned long)d_info->time; +- return 0; +-} +- +-static int hmdfs_d_remote_revalidate(struct hmdfs_peer *conn, +- struct dentry *target, +- struct dentry *parent) +-{ +- unsigned int timeout = hmdfs_sb(target->d_sb)->dcache_timeout; +- unsigned long dentry_time = hmdfs_get_time(target); +- struct clearcache_item *item; +- +- item = hmdfs_find_cache_item(conn->device_id, parent); +- if (!item) +- return 0; +- kref_put(&item->ref, release_cache_item); +- +- if (cache_item_revalidate(READ_ONCE(conn->conn_time), +- dentry_time, timeout)) +- return 1; +- +- return 0; +-} +- +-static inline void lock_for_dname_cmp(struct dentry *dentry, +- struct dentry *lower_dentry) +-{ +- if (dentry < lower_dentry) { +- spin_lock(&dentry->d_lock); +- spin_lock_nested(&lower_dentry->d_lock, DENTRY_D_LOCK_NESTED); +- } else { +- spin_lock(&lower_dentry->d_lock); +- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); +- } +-} +- +-static inline void unlock_for_dname_cmp(struct dentry *dentry, +- struct dentry *lower_dentry) +-{ +- spin_unlock(&dentry->d_lock); +- spin_unlock(&lower_dentry->d_lock); +-} +- +-static int hmdfs_dev_d_revalidate(struct dentry *direntry, unsigned int flags) +-{ +- struct inode *dinode = NULL; +- struct hmdfs_inode_info *info = NULL; +- +- spin_lock(&direntry->d_lock); +- if (IS_ROOT(direntry)) { +- spin_unlock(&direntry->d_lock); +- return 1; +- } +- spin_unlock(&direntry->d_lock); +- +- dinode = d_inode(direntry); +- if (!dinode) +- return 0; +- +- info = hmdfs_i(dinode); +- if (info->inode_type == HMDFS_LAYER_SECOND_LOCAL || +- info->inode_type == HMDFS_LAYER_FIRST_DEVICE) { +- return 1; +- } +- if (info->conn && info->conn->status == NODE_STAT_ONLINE) +- return 1; +- +- return 0; +-} +- +-static int hmdfs_d_revalidate(struct dentry *direntry, unsigned int flags) +-{ +- struct inode *dinode = NULL; +- struct hmdfs_inode_info *info = NULL; +- struct path lower_path, parent_lower_path; +- struct dentry *parent_dentry = NULL; +- struct dentry *parent_lower_dentry = NULL; +- struct dentry *lower_cur_parent_dentry = NULL; +- struct dentry *lower_dentry = NULL; +- int ret; +- +- if (flags & LOOKUP_RCU) +- return -ECHILD; +- +- if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET | LOOKUP_REVAL)) +- return 0; +- +- dinode = d_inode(direntry); +- if (!dinode) +- return 0; +- +- /* remote dentry timeout */ +- info = hmdfs_i(dinode); +- parent_dentry = dget_parent(direntry); +- if (info->conn) { +- ret = hmdfs_d_remote_revalidate(info->conn, direntry, +- parent_dentry); +- dput(parent_dentry); +- return ret; +- } +- +- hmdfs_get_lower_path(direntry, &lower_path); +- lower_dentry = lower_path.dentry; +- lower_cur_parent_dentry = dget_parent(lower_dentry); +- hmdfs_get_lower_path(parent_dentry, &parent_lower_path); +- parent_lower_dentry = parent_lower_path.dentry; +- if ((lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) { +- ret = lower_dentry->d_op->d_revalidate(lower_dentry, flags); +- if (ret == 0) +- goto out; +- } +- +- spin_lock(&lower_dentry->d_lock); +- if (d_unhashed(lower_dentry)) { +- spin_unlock(&lower_dentry->d_lock); +- ret = 0; +- goto out; +- } +- spin_unlock(&lower_dentry->d_lock); +- +- if (parent_lower_dentry != lower_cur_parent_dentry) { +- ret = 0; +- goto out; +- } +- +- ret = 1; +- lock_for_dname_cmp(direntry, lower_dentry); +- if (!qstr_case_eq(&direntry->d_name, &lower_dentry->d_name)) +- ret = 0; +- unlock_for_dname_cmp(direntry, lower_dentry); +- +-out: +- hmdfs_put_lower_path(&parent_lower_path); +- dput(lower_cur_parent_dentry); +- hmdfs_put_lower_path(&lower_path); +- dput(parent_dentry); +- return ret; +-} +- +-static void hmdfs_dev_d_release(struct dentry *dentry) +-{ +- struct clearcache_item *item; +- if (!dentry || !dentry->d_fsdata) +- return; +- +- switch (hmdfs_d(dentry)->dentry_type) { +- case HMDFS_LAYER_SECOND_LOCAL: +- hmdfs_clear_cache_dents(dentry, false); +- hmdfs_drop_remote_cache_dents(dentry); +- path_put(&(hmdfs_d(dentry)->lower_path)); +- break; +- case HMDFS_LAYER_ZERO: +- hmdfs_put_reset_lower_path(dentry); +- break; +- case HMDFS_LAYER_FIRST_DEVICE: +- break; +- case HMDFS_LAYER_SECOND_REMOTE: +- hmdfs_clear_cache_dents(dentry, false); +- break; +- case HMDFS_LAYER_SECOND_CLOUD: +- item = hmdfs_find_cache_item(CLOUD_DEVICE, dentry); +- if (item) { +- /* cloud dentryfile didn't link to +- 'struct cache_file_node', so close file here. +- */ +- filp_close(item->filp, NULL); +- kref_put(&item->ref, release_cache_item); +- } +- hmdfs_clear_cache_dents(dentry, false); +- break; +- default: +- hmdfs_err("Unexpected dentry type %d", +- hmdfs_d(dentry)->dentry_type); +- return; +- } +- +- kmem_cache_free(hmdfs_dentry_cachep, dentry->d_fsdata); +- dentry->d_fsdata = NULL; +-} +- +-static void hmdfs_d_release(struct dentry *dentry) +-{ +- if (!dentry || !dentry->d_fsdata) +- return; +- +- hmdfs_clear_cache_dents(dentry, false); +- hmdfs_drop_remote_cache_dents(dentry); +- hmdfs_put_reset_lower_path(dentry); +- kmem_cache_free(hmdfs_dentry_cachep, dentry->d_fsdata); +- dentry->d_fsdata = NULL; +-} +- +-static int hmdfs_cmp_ci(const struct dentry *dentry, unsigned int len, +- const char *str, const struct qstr *name) +-{ +- struct hmdfs_sb_info *sbi = hmdfs_sb(dentry->d_sb); +- +- if (name->len != len) +- return 1; +- +- if (!sbi->s_case_sensitive) { +- if (str_n_case_eq(name->name, str, len)) +- return 0; +- } else { +- if (!strncmp(name->name, str, len)) +- return 0; +- } +- return 1; +-} +- +-static int hmdfs_hash_ci(const struct dentry *dentry, struct qstr *qstr) +-{ +- const unsigned char *name = qstr->name; +- unsigned int len = qstr->len; +- unsigned long hash; +- struct hmdfs_sb_info *sbi = hmdfs_sb(dentry->d_sb); +- +- if (sbi->s_case_sensitive) +- return 0; +- +- hash = init_name_hash(dentry); +- while (len--) +- hash = partial_name_hash(tolower(*name++), hash); +- qstr->hash = end_name_hash(hash); +- return 0; +-} +- +-void clear_comrades_locked(struct list_head *comrade_list) +-{ +- struct hmdfs_dentry_comrade *cc, *nc; +- +- WARN_ON(!comrade_list); +- list_for_each_entry_safe(cc, nc, comrade_list, list) { +- dput(cc->lo_d); +- kfree(cc); +- } +- INIT_LIST_HEAD(comrade_list); +-} +- +-void clear_comrades(struct dentry *dentry) +-{ +- struct hmdfs_dentry_info_merge *cdi = hmdfs_dm(dentry); +- +- wait_event(cdi->wait_queue, !has_merge_lookup_work(cdi)); +- mutex_lock(&cdi->comrade_list_lock); +- clear_comrades_locked(&cdi->comrade_list); +- mutex_unlock(&cdi->comrade_list_lock); +-} +- +-/** +- * d_revalidate_merge - revalidate a merge dentry +- * +- * Always return 0 to invalidate a dentry for fault-tolerance. +- * The cost is acceptable for a overlay filesystem. +- */ +-static int d_revalidate_merge(struct dentry *direntry, unsigned int flags) +-{ +- struct hmdfs_dentry_info_merge *dim = hmdfs_dm(direntry); +- struct hmdfs_dentry_comrade *comrade = NULL; +- struct dentry *parent_dentry = NULL; +- struct dentry *lower_cur_parent_dentry = NULL; +- struct inode *dinode = NULL; +- struct hmdfs_inode_info *info = NULL; +- int ret = 1; +- +- if (flags & LOOKUP_RCU) { +- return -ECHILD; +- } +- +- if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET | LOOKUP_REVAL)) { +- return 0; +- } +- +- dinode = d_inode(direntry); +- if (!dinode) +- return 0; +- +- info = hmdfs_i(dinode); +- if (info->inode_type == HMDFS_LAYER_FIRST_MERGE_CLOUD) +- return 1; +- +- parent_dentry = dget_parent(direntry); +- mutex_lock(&dim->comrade_list_lock); +- list_for_each_entry(comrade, &(dim->comrade_list), list) { +- lower_cur_parent_dentry = dget_parent(comrade->lo_d); +- if ((comrade->lo_d->d_flags & DCACHE_OP_REVALIDATE)) { +- ret = comrade->lo_d->d_op->d_revalidate( +- comrade->lo_d, flags); +- if (ret == 0) { +- dput(lower_cur_parent_dentry); +- goto out; +- } +- } +- dput(lower_cur_parent_dentry); +- } +-out: +- mutex_unlock(&dim->comrade_list_lock); +- dput(parent_dentry); +- return ret; +-} +- +-static void d_release_merge(struct dentry *dentry) +-{ +- if (!dentry || !dentry->d_fsdata) +- return; +- +- clear_comrades(dentry); +- kmem_cache_free(hmdfs_dentry_merge_cachep, dentry->d_fsdata); +- dentry->d_fsdata = NULL; +-} +- +-const struct dentry_operations hmdfs_dops_merge = { +- .d_revalidate = d_revalidate_merge, +- .d_release = d_release_merge, +-}; +- +-const struct dentry_operations hmdfs_dev_dops = { +- .d_revalidate = hmdfs_dev_d_revalidate, +- .d_release = hmdfs_dev_d_release, +-}; +- +-const struct dentry_operations hmdfs_dops = { +- .d_revalidate = hmdfs_d_revalidate, +- .d_release = hmdfs_d_release, +- .d_compare = hmdfs_cmp_ci, +- .d_hash = hmdfs_hash_ci, +-}; +diff --git a/fs/hmdfs/file_cloud.c b/fs/hmdfs/file_cloud.c +deleted file mode 100644 +index 088d89929..000000000 +--- a/fs/hmdfs/file_cloud.c ++++ /dev/null +@@ -1,425 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/file_cloud.c +- * +- * Copyright (c) 2023-2023 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "file_remote.h" +- +-#include "comm/socket_adapter.h" +-#include "hmdfs.h" +-#include "hmdfs_client.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_dentryfile_cloud.h" +-#include "hmdfs_trace.h" +- +-static const struct vm_operations_struct hmdfs_cloud_vm_ops = { +- .fault = filemap_fault, +- .map_pages = filemap_map_pages, +- .page_mkwrite = NULL, +-}; +- +-int hmdfs_file_open_cloud(struct inode *inode, struct file *file) +-{ +- const char *dir_path; +- struct hmdfs_sb_info *sbi = inode->i_sb->s_fs_info; +- struct path root_path; +- struct file *lower_file; +- int err = 0; +- +- struct hmdfs_file_info *gfi = kzalloc(sizeof(*gfi), GFP_KERNEL); +- if (!gfi) +- return -ENOMEM; +- +- if (!sbi->cloud_dir) { +- hmdfs_info("no cloud_dir"); +- kfree(gfi); +- return -EPERM; +- } +- +- err = kern_path(sbi->cloud_dir, 0, &root_path); +- if (err) { +- hmdfs_info("kern_path failed: %d", err); +- kfree(gfi); +- return err; +- } +- +- dir_path = hmdfs_get_dentry_relative_path(file->f_path.dentry); +- if(!dir_path) { +- hmdfs_err("get cloud path failed"); +- kfree(gfi); +- return -ENOENT; +- } +- +- lower_file = file_open_root(&root_path, dir_path, +- file->f_flags | O_DIRECT, file->f_mode); +- path_put(&root_path); +- if (IS_ERR(lower_file)) { +- hmdfs_info("file_open_root failed: %ld", PTR_ERR(lower_file)); +- err = PTR_ERR(lower_file); +- kfree(gfi); +- } else { +- gfi->lower_file = lower_file; +- file->private_data = gfi; +- } +- kfree(dir_path); +- return err; +-} +- +-int hmdfs_file_release_cloud(struct inode *inode, struct file *file) +-{ +- struct hmdfs_file_info *gfi = hmdfs_f(file); +- +- file->private_data = NULL; +- fput(gfi->lower_file); +- kfree(gfi); +- return 0; +-} +- +-static int hmdfs_file_flush_cloud(struct file *file, fl_owner_t id) +-{ +- struct hmdfs_file_info *gfi = hmdfs_f(file); +- +- if(!gfi || !gfi->lower_file) +- return 0; +- +- if (gfi->lower_file->f_op->flush) +- return gfi->lower_file->f_op->flush(gfi->lower_file, id); +- return 0; +-} +- +-int hmdfs_file_mmap_cloud(struct file *file, struct vm_area_struct *vma) +-{ +- struct hmdfs_file_info *private_data = file->private_data; +- struct file *realfile = NULL; +- int ret; +- +- if (!private_data) +- return -EINVAL; +- +- realfile = private_data->lower_file; +- if (!realfile) +- return -EINVAL; +- +- if (!realfile->f_op->mmap) +- return -ENODEV; +- +- if (WARN_ON(file != vma->vm_file)) +- return -EIO; +- +- vma->vm_file = get_file(realfile); +- ret = call_mmap(vma->vm_file, vma); +- if (ret) +- fput(realfile); +- else +- fput(file); +- +- file_accessed(file); +- +- return ret; +-} +- +-static int hmdfs_do_readpages_cloud(struct file *filp, int cnt, +- struct page **vec) +-{ +- struct hmdfs_file_info *gfi = filp->private_data; +- struct file *lower_filp; +- loff_t pos = (loff_t)(vec[0]->index) << HMDFS_PAGE_OFFSET; +- void *pages_buf = NULL; +- int idx, ret; +- +- if (gfi) { +- lower_filp = gfi->lower_file; +- } +- else { +- ret = -EINVAL; +- goto out_err; +- } +- +- pages_buf = vmap(vec, cnt, VM_MAP, PAGE_KERNEL); +- if (!pages_buf) { +- ret = -ENOMEM; +- goto out_err; +- } +- +- trace_hmdfs_do_readpages_cloud_begin(cnt, pos); +- ret = kernel_read(lower_filp, pages_buf, cnt * HMDFS_PAGE_SIZE, &pos); +- trace_hmdfs_do_readpages_cloud_end(cnt, pos, ret); +- +- if (ret >= 0) +- memset(pages_buf + ret, 0, cnt * HMDFS_PAGE_SIZE - ret); +- else +- goto out_err; +- +- vunmap(pages_buf); +- for (idx = 0; idx < cnt; ++idx) { +- SetPageUptodate(vec[idx]); +- unlock_page(vec[idx]); +- } +- goto out; +- +-out_err: +- if (pages_buf) +- vunmap(pages_buf); +- for (idx = 0; idx < cnt; ++idx) { +- folio_clear_uptodate((struct folio *)vec[idx]); +- filemap_remove_folio((struct folio *)vec[idx]); +- unlock_page(vec[idx]); +- put_page(vec[idx]); +- } +-out: +- return ret; +-} +- +-static void hmdfs_readahead(struct readahead_control *ractl) +-{ +- struct file *filp = ractl->file; +- struct address_space *mapping = ractl->mapping; +- unsigned int nr_pages = readahead_count(ractl); +- struct hmdfs_sb_info *sbi = hmdfs_sb(file_inode(filp)->i_sb); +- unsigned int ret = 0, idx, cnt, limit; +- unsigned long next_index; +- gfp_t gfp = readahead_gfp_mask(mapping); +- struct page **vec = NULL; +- +- limit = sbi->s_readpages_nr; +- vec = kmalloc(limit * sizeof(*vec), GFP_KERNEL); +- if (!vec) { +- hmdfs_warning("cannot alloc vec (%u pages)", limit); +- return; +- } +- +- cnt = 0; +- next_index = 0; +- for (idx = 0; idx < nr_pages; ++idx) { +- struct page *page = readahead_page(ractl); +- +- if (add_to_page_cache_lru(page, mapping, page->index, gfp)) { +- unlock_page(page); +- put_page(page); +- continue; +- } +- +- if (cnt && (cnt >= limit || page->index != next_index)) { +- ret = hmdfs_do_readpages_cloud(filp, cnt, vec); +- cnt = 0; +- if (ret) +- break; +- } +- next_index = page->index + 1; +- vec[cnt++] = page; +- } +- +- if (cnt) +- ret = hmdfs_do_readpages_cloud(filp, cnt, vec); +- +- kfree(vec); +- trace_hmdfs_readpages_cloud(nr_pages, ret); +- return; +-} +- +-static int hmdfs_readpage(struct file *file, struct page *page) +-{ +- loff_t offset = page_file_offset(page); +- int ret = -EACCES; +- char *page_buf; +- struct hmdfs_file_info *gfi = file->private_data; +- struct file *lower_file; +- +- if (gfi) +- lower_file = gfi->lower_file; +- else +- goto out; +- +- page_buf = kmap(page); +- if (!page_buf) +- goto out; +- ret = kernel_read(lower_file, page_buf, PAGE_SIZE, &offset); +- +- if (ret >= 0 && ret <= PAGE_SIZE) { +- ret = 0; +- memset(page_buf + ret, 0, PAGE_SIZE - ret); +- } +- +- kunmap(page); +- if (ret == 0) +- SetPageUptodate(page); +-out: +- unlock_page(page); +- return ret; +-} +- +-static int hmdfs_read_folio(struct file *file, struct folio *folio) +-{ +- struct page *page = &folio->page; +- return hmdfs_readpage(file, page); +-} +- +-const struct file_operations hmdfs_dev_file_fops_cloud = { +- .owner = THIS_MODULE, +- .llseek = generic_file_llseek, +- .read_iter = generic_file_read_iter, +- .write_iter = NULL, +- .mmap = hmdfs_file_mmap_cloud, +- .open = hmdfs_file_open_cloud, +- .release = hmdfs_file_release_cloud, +- .flush = hmdfs_file_flush_cloud, +- .fsync = NULL, +- .splice_read = NULL, +- .splice_write = NULL, +-}; +- +- +-const struct address_space_operations hmdfs_dev_file_aops_cloud = { +- .read_folio = hmdfs_read_folio, +- .readahead = hmdfs_readahead, +- .write_begin = NULL, +- .write_end = NULL, +- .writepage = NULL, +- .dirty_folio = NULL, +-}; +- +-const struct address_space_operations hmdfs_aops_cloud = { +- .read_folio = hmdfs_read_folio, +- .readahead = hmdfs_readahead, +-}; +- +-int analysis_dentry_file_from_cloud(struct hmdfs_sb_info *sbi, +- struct file *file, struct file *handler, +- struct dir_context *ctx) +-{ +- struct hmdfs_dentry_group_cloud *dentry_group = NULL; +- loff_t pos = ctx->pos; +- unsigned long dev_id = (unsigned long)((pos << 1) >> (POS_BIT_NUM - DEV_ID_BIT_NUM)); +- unsigned long group_id = (unsigned long)((pos << (1 + DEV_ID_BIT_NUM)) >> +- (POS_BIT_NUM - GROUP_ID_BIT_NUM)); +- loff_t offset = pos & OFFSET_BIT_MASK; +- int group_num = 0; +- char *dentry_name = NULL; +- int iterate_result = 0; +- int i, j; +- +- dentry_group = kzalloc(sizeof(*dentry_group), GFP_KERNEL); +- +- if (!dentry_group) +- return -ENOMEM; +- +- if (IS_ERR_OR_NULL(handler)) { +- kfree(dentry_group); +- return -ENOENT; +- } +- +- group_num = get_dentry_group_cnt(file_inode(handler)); +- dentry_name = kzalloc(DENTRY_NAME_MAX_LEN, GFP_KERNEL); +- if (!dentry_name) { +- kfree(dentry_group); +- return -ENOMEM; +- } +- +- for (i = group_id; i < group_num; i++) { +- int ret = hmdfs_metainfo_read_nocred(handler, dentry_group, +- sizeof(struct hmdfs_dentry_group_cloud), +- i); +- if (ret != sizeof(struct hmdfs_dentry_group_cloud)) { +- hmdfs_err("read dentry group failed ret:%d", ret); +- goto done; +- } +- +- for (j = offset; j < DENTRY_PER_GROUP_CLOUD; j++) { +- int len; +- int file_type = DT_UNKNOWN; +- bool is_continue; +- +- len = le16_to_cpu(dentry_group->nsl[j].namelen); +- if (!test_bit_le(j, dentry_group->bitmap) || len == 0) +- continue; +- +- memset(dentry_name, 0, DENTRY_NAME_MAX_LEN); +- if (S_ISDIR(le16_to_cpu(dentry_group->nsl[j].i_mode))) +- file_type = DT_DIR; +- else if (S_ISREG(le16_to_cpu( +- dentry_group->nsl[j].i_mode))) +- file_type = DT_REG; +- +- strncat(dentry_name, dentry_group->filename[j], len); +- pos = hmdfs_set_pos(dev_id, i, j); +- is_continue = +- dir_emit(ctx, dentry_name, len, +- pos + INUNUMBER_START, file_type); +- if (!is_continue) { +- ctx->pos = pos; +- iterate_result = 1; +- goto done; +- } +- } +- offset = 0; +- } +- +-done: +- kfree(dentry_name); +- kfree(dentry_group); +- return iterate_result; +-} +- +-static int hmdfs_iterate_cloud(struct file *file, struct dir_context *ctx) +-{ +- int err = 0; +- loff_t start_pos = ctx->pos; +- +- if (ctx->pos == -1) +- return 0; +- err = analysis_dentry_file_from_cloud( +- file->f_inode->i_sb->s_fs_info, file, file->private_data, ctx); +- +- if (err <= 0) +- ctx->pos = -1; +- +- trace_hmdfs_iterate_remote(file->f_path.dentry, start_pos, ctx->pos, +- err); +- return err; +-} +- +-int hmdfs_dir_open_cloud(struct inode *inode, struct file *file) +-{ +- struct clearcache_item *cache_item = NULL; +- +- get_cloud_cache_file(file->f_path.dentry, file->f_inode->i_sb->s_fs_info); +- cache_item = hmdfs_find_cache_item(CLOUD_DEVICE, +- file->f_path.dentry); +- if (cache_item) { +- file->private_data = cache_item->filp; +- get_file(file->private_data); +- kref_put(&cache_item->ref, release_cache_item); +- return 0; +- } +- +- return -ENOENT; +-} +- +-static int hmdfs_dir_release_cloud(struct inode *inode, struct file *file) +-{ +- if (file->private_data) +- fput(file->private_data); +- file->private_data = NULL; +- return 0; +-} +- +-const struct file_operations hmdfs_dev_dir_ops_cloud = { +- .owner = THIS_MODULE, +- .iterate_shared = hmdfs_iterate_cloud, +- .open = hmdfs_dir_open_cloud, +- .release = hmdfs_dir_release_cloud, +- .fsync = __generic_file_fsync, +-}; +diff --git a/fs/hmdfs/file_local.c b/fs/hmdfs/file_local.c +deleted file mode 100644 +index ceb54be8a..000000000 +--- a/fs/hmdfs/file_local.c ++++ /dev/null +@@ -1,405 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/file_local.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "hmdfs_client.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_device_view.h" +-#include "hmdfs_merge_view.h" +-#include "hmdfs_share.h" +-#include "hmdfs_trace.h" +- +-int hmdfs_file_open_local(struct inode *inode, struct file *file) +-{ +- int err = 0; +- struct file *lower_file = NULL; +- struct path lower_path; +- struct super_block *sb = inode->i_sb; +- const struct cred *cred = hmdfs_sb(sb)->cred; +- struct hmdfs_file_info *gfi = kzalloc(sizeof(*gfi), GFP_KERNEL); +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- +- if (!gfi) { +- err = -ENOMEM; +- goto out_err; +- } +- +- hmdfs_get_lower_path(file->f_path.dentry, &lower_path); +- if (inode->i_mapping != NULL && +- inode->i_mapping->a_ops == &hmdfs_aops_cloud) +- lower_file = dentry_open(&lower_path, file->f_flags | O_DIRECT, +- cred); +- else +- lower_file = dentry_open(&lower_path, file->f_flags, cred); +- hmdfs_put_lower_path(&lower_path); +- if (IS_ERR(lower_file)) { +- err = PTR_ERR(lower_file); +- kfree(gfi); +- } else { +- gfi->lower_file = lower_file; +- file->private_data = gfi; +- hmdfs_update_upper_file(file, lower_file); +- if (file->f_flags & (O_RDWR | O_WRONLY)) +- atomic_inc(&info->write_opened); +- } +-out_err: +- return err; +-} +- +-int hmdfs_file_release_local(struct inode *inode, struct file *file) +-{ +- struct hmdfs_file_info *gfi = hmdfs_f(file); +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- +- if (file->f_flags & (O_RDWR | O_WRONLY)) +- atomic_dec(&info->write_opened); +- file->private_data = NULL; +- fput(gfi->lower_file); +- kfree(gfi); +- return 0; +-} +- +-static void hmdfs_file_accessed(struct file *file) +-{ +- struct file *lower_file = hmdfs_f(file)->lower_file; +- struct inode *inode = file_inode(file); +- struct inode *lower_inode = file_inode(lower_file); +- +- if (file->f_flags & O_NOATIME) +- return; +- +- inode->i_atime = lower_inode->i_atime; +-} +- +-ssize_t hmdfs_do_read_iter(struct file *file, struct iov_iter *iter, +- loff_t *ppos) +-{ +- ssize_t ret; +- struct file *lower_file = hmdfs_f(file)->lower_file; +- struct kiocb *iocb; +- +- if (!iov_iter_count(iter)) +- return 0; +- +- if (file->f_inode->i_mapping != NULL && +- file->f_inode->i_mapping->a_ops == &hmdfs_aops_cloud) { +- iocb = container_of(ppos, struct kiocb, ki_pos); +- ret = generic_file_read_iter(iocb, iter); +- } else { +- ret = vfs_iter_read(lower_file, iter, ppos, 0); +- } +- hmdfs_file_accessed(file); +- +- return ret; +-} +- +-static ssize_t hmdfs_local_read_iter(struct kiocb *iocb, struct iov_iter *iter) +-{ +- return hmdfs_do_read_iter(iocb->ki_filp, iter, &iocb->ki_pos); +-} +- +-static void hmdfs_file_modified(struct file *file) +-{ +- struct inode *inode = file_inode(file); +- struct dentry *dentry = file_dentry(file); +- struct file *lower_file = hmdfs_f(file)->lower_file; +- struct inode *lower_inode = file_inode(lower_file); +- +- inode->i_atime = lower_inode->i_atime; +- inode->__i_ctime = lower_inode->__i_ctime; +- inode->i_mtime = lower_inode->i_mtime; +- i_size_write(inode, i_size_read(lower_inode)); +- +- if (!hmdfs_i_merge(hmdfs_i(inode))) +- update_inode_to_dentry(dentry, inode); +-} +- +-ssize_t hmdfs_do_write_iter(struct file *file, struct iov_iter *iter, +- loff_t *ppos) +-{ +- ssize_t ret; +- struct file *lower_file = hmdfs_f(file)->lower_file; +- struct inode *inode = file_inode(file); +- +- if (!iov_iter_count(iter)) +- return 0; +- +- inode_lock(inode); +- +- ret = file_remove_privs(file); +- if (ret) +- goto out_unlock; +- +- file_start_write(lower_file); +- ret = vfs_iter_write(lower_file, iter, ppos, 0); +- file_end_write(lower_file); +- +- hmdfs_file_modified(file); +- +-out_unlock: +- inode_unlock(inode); +- return ret; +-} +- +-ssize_t hmdfs_local_write_iter(struct kiocb *iocb, struct iov_iter *iter) +-{ +- return hmdfs_do_write_iter(iocb->ki_filp, iter, &iocb->ki_pos); +-} +- +-int hmdfs_fsync_local(struct file *file, loff_t start, loff_t end, int datasync) +-{ +- int err; +- struct file *lower_file = hmdfs_f(file)->lower_file; +- +- err = __generic_file_fsync(file, start, end, datasync); +- if (err) +- goto out; +- +- err = vfs_fsync_range(lower_file, start, end, datasync); +-out: +- return err; +-} +- +-loff_t hmdfs_file_llseek_local(struct file *file, loff_t offset, int whence) +-{ +- loff_t ret; +- struct file *lower_file; +- +- lower_file = hmdfs_f(file)->lower_file; +- lower_file->f_pos = file->f_pos; +- ret = vfs_llseek(lower_file, offset, whence); +- file->f_pos = lower_file->f_pos; +- +- return ret; +-} +- +-int hmdfs_file_mmap_local(struct file *file, struct vm_area_struct *vma) +-{ +- struct hmdfs_file_info *private_data = file->private_data; +- struct file *realfile = NULL; +- int ret; +- +- if (!private_data) +- return -EINVAL; +- +- realfile = private_data->lower_file; +- if (!realfile) +- return -EINVAL; +- +- if (!realfile->f_op->mmap) +- return -ENODEV; +- +- if (WARN_ON(file != vma->vm_file)) +- return -EIO; +- +- vma->vm_file = get_file(realfile); +- ret = call_mmap(vma->vm_file, vma); +- if (ret) +- fput(realfile); +- else +- fput(file); +- +- file_accessed(file); +- +- return ret; +-} +- +-const struct file_operations hmdfs_file_fops_local = { +- .owner = THIS_MODULE, +- .llseek = hmdfs_file_llseek_local, +- .read_iter = hmdfs_local_read_iter, +- .write_iter = hmdfs_local_write_iter, +- .mmap = hmdfs_file_mmap_local, +- .open = hmdfs_file_open_local, +- .release = hmdfs_file_release_local, +- .fsync = hmdfs_fsync_local, +- .splice_read = copy_splice_read, +- .splice_write = iter_file_splice_write, +-}; +- +-static int hmdfs_iterate_local(struct file *file, struct dir_context *ctx) +-{ +- int err = 0; +- loff_t start_pos = ctx->pos; +- struct file *lower_file = hmdfs_f(file)->lower_file; +- +- if (ctx->pos == -1) +- return 0; +- +- lower_file->f_pos = file->f_pos; +- err = iterate_dir(lower_file, ctx); +- file->f_pos = lower_file->f_pos; +- +- if (err < 0) +- ctx->pos = -1; +- +- trace_hmdfs_iterate_local(file->f_path.dentry, start_pos, ctx->pos, +- err); +- return err; +-} +- +-int hmdfs_dir_open_local(struct inode *inode, struct file *file) +-{ +- int err = 0; +- struct file *lower_file = NULL; +- struct dentry *dentry = file->f_path.dentry; +- struct path lower_path; +- struct super_block *sb = inode->i_sb; +- const struct cred *cred = hmdfs_sb(sb)->cred; +- struct hmdfs_file_info *gfi = kzalloc(sizeof(*gfi), GFP_KERNEL); +- +- if (!gfi) +- return -ENOMEM; +- +- if (IS_ERR_OR_NULL(cred)) { +- err = -EPERM; +- goto out_err; +- } +- hmdfs_get_lower_path(dentry, &lower_path); +- lower_file = dentry_open(&lower_path, file->f_flags, cred); +- hmdfs_put_lower_path(&lower_path); +- if (IS_ERR(lower_file)) { +- err = PTR_ERR(lower_file); +- goto out_err; +- } else { +- gfi->lower_file = lower_file; +- file->private_data = gfi; +- } +- return err; +- +-out_err: +- kfree(gfi); +- return err; +-} +- +-static int hmdfs_dir_release_local(struct inode *inode, struct file *file) +-{ +- struct hmdfs_file_info *gfi = hmdfs_f(file); +- +- file->private_data = NULL; +- fput(gfi->lower_file); +- kfree(gfi); +- return 0; +-} +- +-const struct file_operations hmdfs_dir_ops_local = { +- .owner = THIS_MODULE, +- .iterate_shared = hmdfs_iterate_local, +- .open = hmdfs_dir_open_local, +- .release = hmdfs_dir_release_local, +- .fsync = hmdfs_fsync_local, +-}; +- +-static int __hmdfs_ioc_set_share_path(struct file *file, +- struct hmdfs_share_control *sc) +-{ +- struct super_block *sb = file->f_inode->i_sb; +- struct hmdfs_sb_info *sbi = hmdfs_sb(sb); +- struct hmdfs_share_table *st = &sbi->share_table; +- struct hmdfs_share_item *item; +- struct dentry *dentry; +- const char *dir_path, *full_path; +- struct qstr relative_path; +- struct fd src; +- int err = 0; +- +- src = fdget(sc->src_fd); +- if (!src.file) +- return -EBADF; +- +- /* only reg file can be shared */ +- if (!S_ISREG(src.file->f_inode->i_mode)) { +- err = -EPERM; +- goto err_out; +- } +- +- /* share file is not allowed to be shared */ +- if (hmdfs_is_share_file(src.file)) { +- err = -EPERM; +- goto err_out; +- } +- +- dentry = src.file->f_path.dentry; +- if (dentry->d_name.len > NAME_MAX) { +- err = -ENAMETOOLONG; +- goto err_out; +- } +- +- dir_path = hmdfs_get_dentry_relative_path(file->f_path.dentry); +- if (unlikely(!dir_path)) { +- err = -ENOMEM; +- goto err_out; +- } +- +- full_path = hmdfs_connect_path(dir_path, dentry->d_name.name); +- if (unlikely(!full_path)) { +- err = -ENOMEM; +- goto free_dir; +- } +- relative_path.name = full_path; +- relative_path.len = strlen(full_path); +- +- spin_lock(&sbi->share_table.item_list_lock); +- item = hmdfs_lookup_share_item(st, &relative_path); +- if (!item) { +- err = insert_share_item(st, &relative_path, src.file, sc->cid); +- goto unlock; +- } +- +- if (item->opened) +- err = -EEXIST; +- else +- update_share_item(item, src.file, sc->cid); +- +-unlock: +- spin_unlock(&sbi->share_table.item_list_lock); +- kfree(full_path); +-free_dir: +- kfree(dir_path); +-err_out: +- fdput(src); +- return err; +-} +- +-static int hmdfs_ioc_set_share_path(struct file *file, unsigned long arg) +-{ +- struct hmdfs_share_control sc; +- +- if (copy_from_user(&sc, (struct hmdfs_share_control __user *)arg, +- sizeof(sc))) +- return -EFAULT; +- +- return __hmdfs_ioc_set_share_path(file, &sc); +-} +- +-static long hmdfs_dir_ioctl_local(struct file *file, unsigned int cmd, +- unsigned long arg) +-{ +- switch (cmd) { +- case HMDFS_IOC_SET_SHARE_PATH: +- return hmdfs_ioc_set_share_path(file, arg); +- default: +- return -ENOTTY; +- } +-} +- +-const struct file_operations hmdfs_dir_ops_share = { +- .owner = THIS_MODULE, +- .iterate_shared = hmdfs_iterate_local, +- .open = hmdfs_dir_open_local, +- .release = hmdfs_dir_release_local, +- .fsync = hmdfs_fsync_local, +- .unlocked_ioctl = hmdfs_dir_ioctl_local, +- .compat_ioctl = hmdfs_dir_ioctl_local, +-}; +diff --git a/fs/hmdfs/file_merge.c b/fs/hmdfs/file_merge.c +deleted file mode 100644 +index 2a2998e32..000000000 +--- a/fs/hmdfs/file_merge.c ++++ /dev/null +@@ -1,841 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/file_merge.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "hmdfs_merge_view.h" +- +-#include +- +-#include "hmdfs.h" +-#include "hmdfs_trace.h" +-#include "authority/authentication.h" +- +-struct hmdfs_iterate_callback_merge { +- struct dir_context ctx; +- struct dir_context *caller; +- /* +- * Record the return value of 'caller->actor': +- * +- * false, buffer is exhausted +- * false, current task is pending +- * false, something is wrong +- * true, success and can do more +- */ +- bool result ; +- struct rb_root *root; +- uint64_t dev_id; +-}; +- +-struct hmdfs_cache_entry { +- struct rb_node rb_node; +- int name_len; +- char *name; +- int file_type; +-}; +- +-struct hmdfs_user_info { +- char *local_path; +- char *distributed_path; +- char *bundle_name; +-}; +- +-struct hmdfs_cache_entry *allocate_entry(const char *name, int namelen, +- int d_type) +-{ +- struct hmdfs_cache_entry *data; +- +- data = kmalloc(sizeof(*data), GFP_KERNEL); +- if (!data) +- return ERR_PTR(-ENOMEM); +- +- data->name = kstrndup(name, namelen, GFP_KERNEL); +- if (!data->name) { +- kfree(data); +- return ERR_PTR(-ENOMEM); +- } +- +- data->name_len = namelen; +- data->file_type = d_type; +- +- return data; +-} +- +-int insert_filename(struct rb_root *root, struct hmdfs_cache_entry **new_entry) +-{ +- struct rb_node *parent = NULL; +- struct rb_node **new_node = &(root->rb_node); +- int cmp_res = 0; +- struct hmdfs_cache_entry *data = *new_entry; +- +- while (*new_node) { +- struct hmdfs_cache_entry *entry = container_of( +- *new_node, struct hmdfs_cache_entry, rb_node); +- parent = *new_node; +- +- if (data->name_len < entry->name_len) +- cmp_res = -1; +- else if (data->name_len > entry->name_len) +- cmp_res = 1; +- else +- cmp_res = strncmp(data->name, entry->name, +- data->name_len); +- +- if (!cmp_res) { +- kfree(data->name); +- kfree(data); +- *new_entry = entry; +- return entry->file_type; +- } +- +- if (cmp_res < 0) +- new_node = &((*new_node)->rb_left); +- else if (cmp_res > 0) +- new_node = &((*new_node)->rb_right); +- } +- +- rb_link_node(&data->rb_node, parent, new_node); +- rb_insert_color(&data->rb_node, root); +- +- return 0; +-} +- +-static void recursive_delete(struct rb_node *node) +-{ +- struct hmdfs_cache_entry *entry = NULL; +- +- if (!node) +- return; +- +- recursive_delete(node->rb_left); +- recursive_delete(node->rb_right); +- +- entry = container_of(node, struct hmdfs_cache_entry, rb_node); +- kfree(entry->name); +- kfree(entry); +-} +- +-static void destroy_tree(struct rb_root *root) +-{ +- if (!root) +- return; +- recursive_delete(root->rb_node); +- root->rb_node = NULL; +-} +- +-static void delete_filename(struct rb_root *root, +- struct hmdfs_cache_entry *data) +-{ +- struct rb_node **node = &(root->rb_node); +- struct hmdfs_cache_entry *entry = NULL; +- int cmp_res = 0; +- +- while (*node) { +- entry = container_of(*node, struct hmdfs_cache_entry, rb_node); +- if (data->name_len < entry->name_len) +- cmp_res = -1; +- else if (data->name_len > entry->name_len) +- cmp_res = 1; +- else +- cmp_res = strncmp(data->name, entry->name, +- data->name_len); +- +- if (!cmp_res) +- goto found; +- +- if (cmp_res < 0) +- node = &((*node)->rb_left); +- else if (cmp_res > 0) +- node = &((*node)->rb_right); +- } +- return; +- +-found: +- rb_erase(*node, root); +- kfree(entry->name); +- kfree(entry); +-} +- +-static void rename_conflicting_file(char *dentry_name, int *len, +- unsigned int dev_id) +-{ +- int i = *len - 1; +- int dot_pos = -1; +- char *buffer; +- +- buffer = kzalloc(DENTRY_NAME_MAX_LEN, GFP_KERNEL); +- if (!buffer) +- return; +- +- while (i >= 0) { +- if (dentry_name[i] == '/') +- break; +- if (dentry_name[i] == '.') { +- // TODO: 这个修改同步到 CT01 +- dot_pos = i; +- break; +- } +- i--; +- } +- +- if (dot_pos == -1) { +- snprintf(dentry_name + *len, DENTRY_NAME_MAX_LEN - *len, +- CONFLICTING_FILE_SUFFIX, dev_id); +- goto done; +- } +- +- for (i = 0; i < *len - dot_pos; i++) +- buffer[i] = dentry_name[i + dot_pos]; +- +- buffer[i] = '\0'; +- snprintf(dentry_name + dot_pos, DENTRY_NAME_MAX_LEN - dot_pos, +- CONFLICTING_FILE_SUFFIX, dev_id); +- strcat(dentry_name, buffer); +- +-done: +- *len = strlen(dentry_name); +- kfree(buffer); +-} +- +-static void rename_conflicting_directory(char *dentry_name, int *len) +-{ +- snprintf(dentry_name + *len, DENTRY_NAME_MAX_LEN - *len, +- CONFLICTING_DIR_SUFFIX); +- *len += strlen(CONFLICTING_DIR_SUFFIX); +-} +- +-static bool hmdfs_actor_merge(struct dir_context *ctx, const char *name, +- int namelen, long long offset, unsigned long long ino, +- unsigned int d_type) +-{ +- bool ret = true; +- int insert_res = 0; +- int max_devid_len = 2; +- char *dentry_name = NULL; +- int dentry_len = namelen; +- struct hmdfs_cache_entry *cache_entry = NULL; +- struct hmdfs_iterate_callback_merge *iterate_callback_merge = NULL; +- struct dir_context *org_ctx = NULL; +- +- if (hmdfs_file_type(name) != HMDFS_TYPE_COMMON) { +- /* +- * return true here, so that the caller can continue to next +- * dentry even if failed on this dentry somehow. +- */ +- return true; +- } +- +- +- if (namelen > NAME_MAX) +- return false; +- dentry_name = kzalloc(NAME_MAX + 1, GFP_KERNEL); +- if (!dentry_name) +- return false; +- +- strncpy(dentry_name, name, dentry_len); +- +- cache_entry = allocate_entry(dentry_name, dentry_len, d_type); +- if (IS_ERR(cache_entry)) { +- ret = PTR_ERR(cache_entry); +- goto done; +- } +- +- iterate_callback_merge = +- container_of(ctx, struct hmdfs_iterate_callback_merge, ctx); +- insert_res = +- insert_filename(iterate_callback_merge->root, &cache_entry); +- if (d_type == DT_DIR && insert_res == DT_DIR) { +- goto done; +- } else if (d_type == DT_DIR && +- (insert_res == DT_REG || insert_res == DT_LNK)) { +- if (strlen(CONFLICTING_DIR_SUFFIX) > NAME_MAX - dentry_len) { +- ret = false; +- goto delete; +- } +- rename_conflicting_directory(dentry_name, &dentry_len); +- cache_entry->file_type = DT_DIR; +- } else if ((d_type == DT_REG || d_type == DT_LNK) && insert_res > 0) { +- if (strlen(CONFLICTING_FILE_SUFFIX) + max_devid_len > +- NAME_MAX - dentry_len) { +- ret = false; +- goto delete; +- } +- rename_conflicting_file(dentry_name, &dentry_len, +- iterate_callback_merge->dev_id); +- } +- +- org_ctx = iterate_callback_merge->caller; +- ret = org_ctx->actor(org_ctx, dentry_name, dentry_len, org_ctx->pos, +- ino, d_type); +- /* +- * Record original return value, so that the caller can be aware of +- * different situations. +- */ +- iterate_callback_merge->result = ret; +- if (!ret && d_type == DT_DIR && cache_entry->file_type == DT_DIR && +- (insert_res == DT_REG || insert_res == DT_LNK)) +- cache_entry->file_type = DT_REG; +- +-delete: +- if (!ret && !insert_res) +- delete_filename(iterate_callback_merge->root, cache_entry); +-done: +- kfree(dentry_name); +- return ret; +-} +- +-struct hmdfs_file_info * +-get_next_hmdfs_file_info(struct hmdfs_file_info *fi_head, int device_id) +-{ +- struct hmdfs_file_info *fi_iter = NULL; +- struct hmdfs_file_info *fi_result = NULL; +- +- mutex_lock(&fi_head->comrade_list_lock); +- list_for_each_entry_safe(fi_iter, fi_result, &(fi_head->comrade_list), +- comrade_list) { +- if (fi_iter->device_id == device_id) +- break; +- } +- mutex_unlock(&fi_head->comrade_list_lock); +- +- return fi_result != fi_head ? fi_result : NULL; +-} +- +-struct hmdfs_file_info *get_hmdfs_file_info(struct hmdfs_file_info *fi_head, +- int device_id) +-{ +- struct hmdfs_file_info *fi_iter = NULL; +- +- mutex_lock(&fi_head->comrade_list_lock); +- list_for_each_entry(fi_iter, &(fi_head->comrade_list), comrade_list) { +- if (fi_iter->device_id == device_id) { +- mutex_unlock(&fi_head->comrade_list_lock); +- return fi_iter; +- } +- } +- mutex_unlock(&fi_head->comrade_list_lock); +- +- return NULL; +-} +- +-int hmdfs_iterate_merge(struct file *file, struct dir_context *ctx) +-{ +- int err = 0; +- struct hmdfs_file_info *fi_head = hmdfs_f(file); +- struct hmdfs_file_info *fi_iter = NULL; +- struct file *lower_file_iter = NULL; +- loff_t start_pos = ctx->pos; +- unsigned long device_id = (unsigned long)((ctx->pos) << 1 >> +- (POS_BIT_NUM - DEV_ID_BIT_NUM)); +- struct hmdfs_iterate_callback_merge ctx_merge = { +- .ctx.actor = hmdfs_actor_merge, +- .caller = ctx, +- .root = &fi_head->root, +- .dev_id = device_id +- }; +- +- /* pos = -1 indicates that all devices have been traversed +- * or an error has occurred. +- */ +- if (ctx->pos == -1) +- return 0; +- +- fi_iter = get_hmdfs_file_info(fi_head, device_id); +- if (!fi_iter) { +- fi_iter = get_next_hmdfs_file_info(fi_head, device_id); +- // dev_id is changed, parameter is set 0 to get next file info +- if (fi_iter) +- ctx_merge.ctx.pos = +- hmdfs_set_pos(fi_iter->device_id, 0, 0); +- } +- while (fi_iter) { +- ctx_merge.dev_id = fi_iter->device_id; +- device_id = ctx_merge.dev_id; +- lower_file_iter = fi_iter->lower_file; +- lower_file_iter->f_pos = file->f_pos; +- err = iterate_dir(lower_file_iter, &ctx_merge.ctx); +- file->f_pos = lower_file_iter->f_pos; +- ctx->pos = file->f_pos; +- +- if (err) +- goto done; +- /* +- * ctx->actor return nonzero means buffer is exhausted or +- * something is wrong, thus we should not continue. +- */ +- if (ctx_merge.result) +- goto done; +- fi_iter = get_next_hmdfs_file_info(fi_head, device_id); +- if (fi_iter) { +- file->f_pos = hmdfs_set_pos(fi_iter->device_id, 0, 0); +- ctx->pos = file->f_pos; +- } +- } +-done: +- trace_hmdfs_iterate_merge(file->f_path.dentry, start_pos, ctx->pos, +- err); +- return err; +-} +- +-int do_dir_open_merge(struct file *file, const struct cred *cred, +- struct hmdfs_file_info *fi_head) +-{ +- int ret = -EINVAL; +- struct hmdfs_dentry_info_merge *dim = hmdfs_dm(file->f_path.dentry); +- struct hmdfs_dentry_comrade *comrade = NULL; +- struct hmdfs_file_info *fi = NULL; +- struct path lo_p = { .mnt = file->f_path.mnt }; +- struct file *lower_file = NULL; +- +- if (IS_ERR_OR_NULL(cred)) +- return ret; +- +- wait_event(dim->wait_queue, !has_merge_lookup_work(dim)); +- +- mutex_lock(&dim->comrade_list_lock); +- list_for_each_entry(comrade, &(dim->comrade_list), list) { +- fi = kzalloc(sizeof(*fi), GFP_KERNEL); +- if (!fi) { +- ret = ret ? -ENOMEM : 0; +- continue; // allow some dir to fail to open +- } +- lo_p.dentry = comrade->lo_d; +- // make sure that dentry will not be dentry_kill before open +- dget(lo_p.dentry); +- if (unlikely(d_is_negative(lo_p.dentry))) { +- hmdfs_info("dentry is negative, try again"); +- kfree(fi); +- dput(lo_p.dentry); +- continue; // skip this device +- } +- lower_file = dentry_open(&lo_p, file->f_flags, cred); +- dput(lo_p.dentry); +- if (IS_ERR(lower_file)) { +- kfree(fi); +- continue; +- } +- ret = 0; +- fi->device_id = comrade->dev_id; +- fi->lower_file = lower_file; +- mutex_lock(&fi_head->comrade_list_lock); +- list_add_tail(&fi->comrade_list, &fi_head->comrade_list); +- mutex_unlock(&fi_head->comrade_list_lock); +- } +- mutex_unlock(&dim->comrade_list_lock); +- return ret; +-} +- +-int hmdfs_dir_open_merge(struct inode *inode, struct file *file) +-{ +- int ret = 0; +- struct hmdfs_file_info *fi = NULL; +- +- fi = kzalloc(sizeof(*fi), GFP_KERNEL); +- if (!fi) +- return -ENOMEM; +- +- file->private_data = fi; +- fi->root = RB_ROOT; +- mutex_init(&fi->comrade_list_lock); +- INIT_LIST_HEAD(&fi->comrade_list); +- +- ret = do_dir_open_merge(file, hmdfs_sb(inode->i_sb)->cred, fi); +- if (ret) +- kfree(fi); +- +- return ret; +-} +- +-int hmdfs_dir_release_merge(struct inode *inode, struct file *file) +-{ +- struct hmdfs_file_info *fi_head = hmdfs_f(file); +- struct hmdfs_file_info *fi_iter = NULL; +- struct hmdfs_file_info *fi_temp = NULL; +- +- mutex_lock(&fi_head->comrade_list_lock); +- list_for_each_entry_safe(fi_iter, fi_temp, &(fi_head->comrade_list), +- comrade_list) { +- list_del_init(&(fi_iter->comrade_list)); +- fput(fi_iter->lower_file); +- kfree(fi_iter); +- } +- mutex_unlock(&fi_head->comrade_list_lock); +- destroy_tree(&fi_head->root); +- file->private_data = NULL; +- kfree(fi_head); +- +- return 0; +-} +- +-static long hmdfs_ioc_get_dst_path(struct file *filp, unsigned long arg); +- +-long hmdfs_dir_unlocked_ioctl_merge(struct file *file, unsigned int cmd, +- unsigned long arg) +-{ +- struct hmdfs_file_info *fi_head = hmdfs_f(file); +- struct hmdfs_file_info *fi_iter = NULL; +- struct hmdfs_file_info *fi_temp = NULL; +- struct file *lower_file = NULL; +- int error = -ENOTTY; +- +- if (cmd == HMDFS_IOC_GET_DST_PATH) +- return hmdfs_ioc_get_dst_path(file, arg); +- mutex_lock(&fi_head->comrade_list_lock); +- list_for_each_entry_safe(fi_iter, fi_temp, &(fi_head->comrade_list), +- comrade_list) { +- if (fi_iter->device_id == 0) { +- lower_file = fi_iter->lower_file; +- if (lower_file->f_op->unlocked_ioctl) +- error = lower_file->f_op->unlocked_ioctl( +- lower_file, cmd, arg); +- break; +- } +- } +- mutex_unlock(&fi_head->comrade_list_lock); +- return error; +-} +- +-long hmdfs_dir_compat_ioctl_merge(struct file *file, unsigned int cmd, +- unsigned long arg) +-{ +- struct hmdfs_file_info *fi_head = hmdfs_f(file); +- struct hmdfs_file_info *fi_iter = NULL; +- struct hmdfs_file_info *fi_temp = NULL; +- struct file *lower_file = NULL; +- int error = -ENOTTY; +- +- if (cmd == HMDFS_IOC_GET_DST_PATH) +- return hmdfs_ioc_get_dst_path(file, arg); +- mutex_lock(&fi_head->comrade_list_lock); +- list_for_each_entry_safe(fi_iter, fi_temp, &(fi_head->comrade_list), +- comrade_list) { +- if (fi_iter->device_id == 0) { +- lower_file = fi_iter->lower_file; +- if (lower_file->f_op->compat_ioctl) +- error = lower_file->f_op->compat_ioctl( +- lower_file, cmd, arg); +- break; +- } +- } +- mutex_unlock(&fi_head->comrade_list_lock); +- return error; +-} +- +-const struct file_operations hmdfs_dir_fops_merge = { +- .owner = THIS_MODULE, +- .iterate_shared = hmdfs_iterate_merge, +- .open = hmdfs_dir_open_merge, +- .release = hmdfs_dir_release_merge, +- .unlocked_ioctl = hmdfs_dir_unlocked_ioctl_merge, +- .compat_ioctl = hmdfs_dir_compat_ioctl_merge, +-}; +- +-static ssize_t hmdfs_merge_read_iter(struct kiocb *iocb, struct iov_iter *iter) +-{ +- return hmdfs_do_read_iter(iocb->ki_filp, iter, &iocb->ki_pos); +-} +- +-ssize_t hmdfs_merge_write_iter(struct kiocb *iocb, struct iov_iter *iter) +-{ +- return hmdfs_do_write_iter(iocb->ki_filp, iter, &iocb->ki_pos); +-} +- +-int hmdfs_file_open_merge(struct inode *inode, struct file *file) +-{ +- int err = 0; +- struct file *lower_file = NULL; +- struct path lo_p = { .mnt = file->f_path.mnt }; +- struct super_block *sb = inode->i_sb; +- const struct cred *cred = hmdfs_sb(sb)->cred; +- struct hmdfs_file_info *gfi = NULL; +- struct dentry *parent = NULL; +- +- lo_p.dentry = hmdfs_get_fst_lo_d(file->f_path.dentry); +- if (!lo_p.dentry) { +- err = -EINVAL; +- goto out_err; +- } +- +- gfi = kzalloc(sizeof(*gfi), GFP_KERNEL); +- if (!gfi) { +- err = -ENOMEM; +- goto out_err; +- } +- +- parent = dget_parent(file->f_path.dentry); +- lower_file = dentry_open(&lo_p, file->f_flags, cred); +- if (IS_ERR(lower_file)) { +- err = PTR_ERR(lower_file); +- kfree(gfi); +- } else { +- gfi->lower_file = lower_file; +- file->private_data = gfi; +- hmdfs_update_upper_file(file, lower_file); +- } +- dput(parent); +-out_err: +- dput(lo_p.dentry); +- return err; +-} +- +-int hmdfs_file_flush_merge(struct file *file, fl_owner_t id) +-{ +- struct hmdfs_file_info *gfi = hmdfs_f(file); +- struct file *lower_file = gfi->lower_file; +- +- if (lower_file->f_op->flush) +- return lower_file->f_op->flush(lower_file, id); +- +- return 0; +-} +- +-static long hmdfs_ioc_get_writeopen_cnt(struct file *filp, unsigned long arg) +-{ +- struct hmdfs_file_info *gfi = hmdfs_f(filp); +- struct file *lower_file = gfi->lower_file; +- struct inode *lower_inode = file_inode(lower_file); +- +- u32 wo_cnt = atomic_read(&(hmdfs_i(lower_inode))->write_opened); +- +- return put_user(wo_cnt, (int __user *)arg); +-} +- +-static int copy_string_from_user(unsigned long pos, unsigned long len, +- char **data) +-{ +- char *tmp_data; +- +- if (len >= PATH_MAX) +- return -EINVAL; +- if (!access_ok((char __user *)pos, len)) +- return -EFAULT; +- +- tmp_data = kzalloc(len + 1, GFP_KERNEL); +- if (!tmp_data) +- return -ENOMEM; +- *data = tmp_data; +- +- if (copy_from_user(tmp_data, (char __user *)pos, len)) +- return -EFAULT; +- +- return 0; +-} +- +-static int hmdfs_get_info_from_user(unsigned long pos, +- struct hmdfs_dst_info *hdi, struct hmdfs_user_info *data) +-{ +- int ret = 0; +- +- if (!access_ok((struct hmdfs_dst_info __user *)pos, +- sizeof(struct hmdfs_dst_info))) +- return -ENOMEM; +- if (copy_from_user(hdi, (struct hmdfs_dst_info __user *)pos, +- sizeof(struct hmdfs_dst_info))) +- return -EFAULT; +- +- ret = copy_string_from_user(hdi->local_path_pos, hdi->local_path_len, +- &data->local_path); +- if (ret != 0) +- return ret; +- +- ret = copy_string_from_user(hdi->distributed_path_pos, +- hdi->distributed_path_len, +- &data->distributed_path); +- if (ret != 0) +- return ret; +- +- ret = copy_string_from_user(hdi->bundle_name_pos, hdi->bundle_name_len, +- &data->bundle_name); +- if (ret != 0) +- return ret; +- +- return 0; +-} +- +-static const struct cred *change_cred(struct dentry *dentry, +- const char *bundle_name) +-{ +- int bid; +- struct cred *cred = NULL; +- const struct cred *old_cred = NULL; +- +- cred = prepare_creds(); +- if (!cred) { +- return NULL; +- } +- bid = get_bundle_uid(hmdfs_sb(dentry->d_sb), bundle_name); +- if (bid != 0) { +- cred->fsuid = KUIDT_INIT(bid); +- cred->fsgid = KGIDT_INIT(bid); +- old_cred = override_creds(cred); +- } +- +- return old_cred; +-} +- +-static int get_file_size(const char *path_value, uint64_t pos) +-{ +- int ret; +- uint64_t size; +- struct path path; +- struct kstat buf; +- +- ret = kern_path(path_value, 0, &path); +- if (ret) +- return ret; +- ret = vfs_getattr(&path, &buf, STATX_BASIC_STATS | STATX_BTIME, 0); +- path_put(&path); +- if (ret) { +- hmdfs_err("call vfs_getattr failed, err %d", ret); +- return ret; +- } +- +- size = buf.size; +- ret = copy_to_user((uint64_t __user *)pos, &size, sizeof(uint64_t)); +- return ret; +-} +- +-static int create_link_file(struct hmdfs_user_info *data) +-{ +- int ret; +- struct dentry *dentry; +- struct path path; +- +- ret = kern_path(data->distributed_path, 0, &path); +- if (ret == 0){ +- path_put(&path); +- return ret; +- } +- +- dentry = kern_path_create(AT_FDCWD, data->distributed_path, &path, 0); +- if (IS_ERR(dentry)) +- return PTR_ERR(dentry); +- ret = vfs_symlink(&nop_mnt_idmap, path.dentry->d_inode, dentry, data->local_path); +- done_path_create(&path, dentry); +- +- return ret; +-} +- +-static int create_dir(const char *path_value, mode_t mode) +-{ +- int err = 0; +- struct path path; +- struct dentry *dentry; +- +- dentry = kern_path_create(AT_FDCWD, path_value, &path, LOOKUP_DIRECTORY); +- if(PTR_ERR(dentry) == -EEXIST) +- return 0; +- if (IS_ERR(dentry)) +- return PTR_ERR(dentry); +- +- err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode); +- if (err && err != -EEXIST) +- hmdfs_err("vfs_mkdir failed, err = %d", err); +- done_path_create(&path, dentry); +- +- return err; +-} +- +-static int create_dir_recursive(const char *path_value, mode_t mode) +-{ +- int err = 0; +- char *tmp_path = kstrdup(path_value, GFP_KERNEL); +- char *p = tmp_path; +- +- if (!tmp_path) +- return -ENOMEM; +- +- if (*p == '/') +- p++; +- +- while (*p) { +- if (*p == '/') { +- *p = '\0'; +- err = create_dir(tmp_path, mode); +- if (err != 0) +- break; +- *p = '/'; +- } +- p++; +- } +- +- kfree(tmp_path); +- return err; +-} +- +-static long hmdfs_ioc_get_dst_path(struct file *filp, unsigned long arg) +-{ +- int ret = 0; +- const struct cred *old_cred; +- struct hmdfs_dst_info hdi; +- struct hmdfs_user_info *data; +- +- data = kzalloc(sizeof(*data), GFP_KERNEL); +- if (!data) { +- ret = -ENOMEM; +- goto err_free_data; +- } +- +- ret = hmdfs_get_info_from_user(arg, &hdi, data); +- if (ret != 0) +- goto err_free_all; +- +- old_cred = change_cred(filp->f_path.dentry, data->bundle_name); +- if (!old_cred) { +- ret = -EACCES; +- goto err_free_all; +- } +- +- ret = create_dir_recursive(data->distributed_path, DIR_MODE); +- if (ret != 0) +- goto err_revert; +- +- ret = create_link_file(data); +- if (ret != 0 && ret != -EEXIST) +- goto err_revert; +- +- ret = get_file_size(data->local_path, hdi.size); +- +-err_revert: +- revert_creds(old_cred); +-err_free_all: +- kfree(data->local_path); +- kfree(data->distributed_path); +- kfree(data->bundle_name); +-err_free_data: +- kfree(data); +- return ret; +-} +- +-static long hmdfs_file_ioctl_merge(struct file *filp, unsigned int cmd, unsigned long arg) +-{ +- switch (cmd) { +- case HMDFS_IOC_GET_WRITEOPEN_CNT: +- return hmdfs_ioc_get_writeopen_cnt(filp, arg); +- case HMDFS_IOC_GET_DST_PATH: +- return hmdfs_ioc_get_dst_path(filp, arg); +- default: +- return -ENOTTY; +- } +-} +- +-/* Transparent transmission of parameters to device_view level, +- * so file operations are same as device_view local operations. +- */ +-const struct file_operations hmdfs_file_fops_merge = { +- .owner = THIS_MODULE, +- .llseek = hmdfs_file_llseek_local, +- .read_iter = hmdfs_merge_read_iter, +- .write_iter = hmdfs_merge_write_iter, +- .mmap = hmdfs_file_mmap_local, +- .open = hmdfs_file_open_merge, +- .flush = hmdfs_file_flush_merge, +- .release = hmdfs_file_release_local, +- .fsync = hmdfs_fsync_local, +- .unlocked_ioctl = hmdfs_file_ioctl_merge, +- .compat_ioctl = hmdfs_file_ioctl_merge, +- .splice_read = copy_splice_read, +- .splice_write = iter_file_splice_write, +-}; +diff --git a/fs/hmdfs/file_remote.c b/fs/hmdfs/file_remote.c +deleted file mode 100644 +index d2f086ed9..000000000 +--- a/fs/hmdfs/file_remote.c ++++ /dev/null +@@ -1,1063 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/file_remote.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "file_remote.h" +- +-#include "comm/socket_adapter.h" +-#include "hmdfs.h" +-#include "hmdfs_client.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_trace.h" +- +-static inline bool hmdfs_remote_write_cache_expired( +- struct hmdfs_inode_info *info) +-{ +- return time_after(jiffies, info->writecache_expire); +-} +- +-enum expire_reason { +- ALL_GOOD = 0, +- INO_DISMATCH = 1, +- SIZE_OR_CTIME_DISMATCH = 2, +- TIMER_EXPIRE = 3, +- TIMER_WORKING = 4, +- STABLE_CTIME_DISMATCH = 5, +- KEEP_CACHE = 6, +-}; +- +-/* +- * hmdfs_open_final_remote - Do final steps of opening a remote file, update +- * local inode cache and decide whether of not to truncate inode pages. +- * +- * @info: hmdfs inode info +- * @open_ret: values returned from remote when opening a remote file +- * @keep_cache: keep local cache & i_size +- */ +-static int hmdfs_open_final_remote(struct hmdfs_inode_info *info, +- struct hmdfs_open_ret *open_ret, +- struct file *file, bool keep_cache) +-{ +- struct inode *inode = &info->vfs_inode; +- bool truncate = false; +- enum expire_reason reason = ALL_GOOD; +- int ret = 0; +- +- /* +- * if remote inode number changed and lookup stale data, we'll return +- * -ESTALE, and reopen the file with metedate from remote getattr. +- */ +- if (info->remote_ino != open_ret->ino) { +- hmdfs_debug( +- "got stale local inode, ino in local %llu, ino from open %llu", +- info->remote_ino, open_ret->ino); +- hmdfs_send_close(info->conn, &open_ret->fid); +- reason = INO_DISMATCH; +- ret = -ESTALE; +- goto out; +- } +- +- if (keep_cache) { +- reason = KEEP_CACHE; +- trace_hmdfs_open_final_remote(info, open_ret, file, reason); +- goto set_fid_out; +- } +- +- /* +- * if remote size do not match local inode, or remote ctime do not match +- * the last time same file was opened. +- */ +- if (inode->i_size != open_ret->file_size || +- hmdfs_time_compare(&info->remote_ctime, &open_ret->remote_ctime)) { +- truncate = true; +- reason = SIZE_OR_CTIME_DISMATCH; +- goto out; +- } +- +- /* +- * If 'writecache_expire' is set, check if it expires. And skip the +- * checking of stable_ctime. +- */ +- if (info->writecache_expire) { +- truncate = hmdfs_remote_write_cache_expired(info); +- if (truncate) +- reason = TIMER_EXPIRE; +- else +- reason = TIMER_WORKING; +- goto out; +- } +- +- /* the first time, or remote ctime is ahead of remote time */ +- if (info->stable_ctime.tv_sec == 0 && info->stable_ctime.tv_nsec == 0) { +- truncate = true; +- reason = STABLE_CTIME_DISMATCH; +- goto out; +- } +- +- /* +- * - if last stable_ctime == stable_ctime, we do nothing. +- * a. if ctime < stable_ctime, data is ensured to be uptodate, +- * b. if ctime == stable_ctime, stale data might be accessed. This is +- * acceptable since pagecache will be dropped later. +- * c. ctime > stable_ctime is impossible. +- * - if last stable_ctime < stable_ctime, we clear the cache. +- * d. ctime != last stable_ctime is impossible +- * e. ctime == last stable_ctime, this is possible to read again from +- * b, thus we need to drop the cache. +- * - if last stable_ctime > stable_ctime, we clear the cache. +- * stable_ctime must be zero in this case, this is possible because +- * system time might be changed. +- */ +- if (hmdfs_time_compare(&info->stable_ctime, &open_ret->stable_ctime)) { +- truncate = true; +- reason = STABLE_CTIME_DISMATCH; +- goto out; +- } +- +-out: +- trace_hmdfs_open_final_remote(info, open_ret, file, reason); +- if (ret) +- return ret; +- +- if (reason == SIZE_OR_CTIME_DISMATCH) { +- inode->__i_ctime = open_ret->remote_ctime; +- info->remote_ctime = open_ret->remote_ctime; +- } +- +- if (truncate) { +- info->writecache_expire = 0; +- truncate_inode_pages(inode->i_mapping, 0); +- } +- +- atomic64_set(&info->write_counter, 0); +- info->stable_ctime = open_ret->stable_ctime; +- i_size_write(inode, open_ret->file_size); +- info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; +-set_fid_out: +- spin_lock(&info->fid_lock); +- info->fid = open_ret->fid; +- spin_unlock(&info->fid_lock); +- return 0; +-} +- +-int hmdfs_do_open_remote(struct file *file, bool keep_cache) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(file_inode(file)); +- struct hmdfs_peer *conn = info->conn; +- struct hmdfs_open_ret open_ret; +- __u8 file_type = hmdfs_d(file->f_path.dentry)->file_type; +- char *send_buf; +- int err = 0; +- +- send_buf = hmdfs_get_dentry_relative_path(file->f_path.dentry); +- if (!send_buf) { +- err = -ENOMEM; +- goto out_free; +- } +- err = hmdfs_send_open(conn, send_buf, file_type, &open_ret); +- if (err) { +- hmdfs_err("hmdfs_send_open return failed with %d", err); +- goto out_free; +- } +- +- err = hmdfs_open_final_remote(info, &open_ret, file, keep_cache); +- +-out_free: +- kfree(send_buf); +- return err; +-} +- +-static inline bool hmdfs_remote_need_reopen(struct hmdfs_inode_info *info) +-{ +- return test_bit(HMDFS_FID_NEED_OPEN, &info->fid_flags); +-} +- +-static inline bool hmdfs_remote_is_opening_file(struct hmdfs_inode_info *info) +-{ +- return test_bit(HMDFS_FID_OPENING, &info->fid_flags); +-} +- +-static int hmdfs_remote_wait_opening_file(struct hmdfs_inode_info *info) +-{ +- int err; +- +- if (!hmdfs_remote_is_opening_file(info)) +- return 0; +- +- err = ___wait_event(info->fid_wq, hmdfs_remote_is_opening_file(info), +- TASK_INTERRUPTIBLE, 0, 0, +- spin_unlock(&info->fid_lock); +- schedule(); +- spin_lock(&info->fid_lock)); +- if (err) +- err = -EINTR; +- +- return err; +-} +- +-static int hmdfs_remote_file_reopen(struct hmdfs_inode_info *info, +- struct file *filp) +-{ +- int err = 0; +- struct hmdfs_peer *conn = info->conn; +- struct inode *inode = NULL; +- struct hmdfs_fid fid; +- +- if (conn->status == NODE_STAT_OFFLINE) +- return -EAGAIN; +- +- spin_lock(&info->fid_lock); +- err = hmdfs_remote_wait_opening_file(info); +- if (err || !hmdfs_remote_need_reopen(info)) { +- spin_unlock(&info->fid_lock); +- goto out; +- } +- +- set_bit(HMDFS_FID_OPENING, &info->fid_flags); +- fid = info->fid; +- spin_unlock(&info->fid_lock); +- +- inode = &info->vfs_inode; +- inode_lock(inode); +- /* +- * Most closing cases are meaningless, except for one: +- * read process A read process B +- * err = -EBADF err = -EBADF (caused by re-online) +- * set_need_reopen +- * do reopen +- * fid = new fid_1 [server hold fid_1] +- * set need_reopen +- * do reopen +- * send close (fid_1) // In case of leak +- * fid = new fid_2 +- */ +- if (fid.id != HMDFS_INODE_INVALID_FILE_ID) +- hmdfs_send_close(conn, &fid); +- err = hmdfs_do_open_remote(filp, true); +- inode_unlock(inode); +- +- spin_lock(&info->fid_lock); +- /* +- * May make the bit set in offline handler lost, but server +- * will tell us whether or not the newly-opened file id is +- * generated before offline, if it is opened before offline, +- * the operation on the file id will return -EBADF and +- * HMDFS_FID_NEED_OPEN bit will be set again. +- */ +- if (!err) +- clear_bit(HMDFS_FID_NEED_OPEN, &info->fid_flags); +- clear_bit(HMDFS_FID_OPENING, &info->fid_flags); +- spin_unlock(&info->fid_lock); +- +- wake_up_interruptible_all(&info->fid_wq); +-out: +- return err; +-} +- +-static int hmdfs_remote_check_and_reopen(struct hmdfs_inode_info *info, +- struct file *filp) +-{ +- if (!hmdfs_remote_need_reopen(info)) +- return 0; +- +- return hmdfs_remote_file_reopen(info, filp); +-} +- +-void hmdfs_do_close_remote(struct kref *kref) +-{ +- struct hmdfs_inode_info *info = +- container_of(kref, struct hmdfs_inode_info, ref); +- struct hmdfs_fid fid; +- +- hmdfs_remote_fetch_fid(info, &fid); +- /* This function can return asynchronously */ +- hmdfs_send_close(info->conn, &fid); +-} +- +-static inline bool hmdfs_remote_need_track_file(const struct hmdfs_sb_info *sbi, +- fmode_t mode) +-{ +- return (hmdfs_is_stash_enabled(sbi) && (mode & FMODE_WRITE)); +-} +- +-static void +-hmdfs_remote_del_wr_opened_inode_nolock(struct hmdfs_inode_info *info) +-{ +- WARN_ON(list_empty(&info->wr_opened_node)); +- if (atomic_dec_and_test(&info->wr_opened_cnt)) +- list_del_init(&info->wr_opened_node); +-} +- +-void hmdfs_remote_del_wr_opened_inode(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info) +-{ +- spin_lock(&conn->wr_opened_inode_lock); +- hmdfs_remote_del_wr_opened_inode_nolock(info); +- spin_unlock(&conn->wr_opened_inode_lock); +-} +- +-void hmdfs_remote_add_wr_opened_inode_nolock(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info) +-{ +- if (list_empty(&info->wr_opened_node)) { +- atomic_set(&info->wr_opened_cnt, 1); +- list_add_tail(&info->wr_opened_node, +- &conn->wr_opened_inode_list); +- } else { +- atomic_inc(&info->wr_opened_cnt); +- } +-} +- +-static void hmdfs_remote_add_wr_opened_inode(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info) +-{ +- spin_lock(&conn->wr_opened_inode_lock); +- hmdfs_remote_add_wr_opened_inode_nolock(conn, info); +- spin_unlock(&conn->wr_opened_inode_lock); +-} +- +-int hmdfs_file_open_remote(struct inode *inode, struct file *file) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- struct kref *ref = &(info->ref); +- int err = 0; +- +- inode_lock(inode); +- if (kref_read(ref) == 0) { +- err = hmdfs_do_open_remote(file, false); +- if (err == 0) +- kref_init(ref); +- } else { +- kref_get(ref); +- } +- inode_unlock(inode); +- +- if (!err && hmdfs_remote_need_track_file(hmdfs_sb(inode->i_sb), +- file->f_mode)) +- hmdfs_remote_add_wr_opened_inode(info->conn, info); +- +- return err; +-} +- +-static void hmdfs_set_writecache_expire(struct hmdfs_inode_info *info, +- unsigned int seconds) +-{ +- unsigned long new_expire = jiffies + (unsigned long)seconds * HZ; +- +- /* +- * When file has been written before closing, set pagecache expire +- * if it has not been set yet. This is necessary because ctime might +- * stay the same after overwrite. +- */ +- if (info->writecache_expire && +- time_after(new_expire, info->writecache_expire)) +- return; +- +- info->writecache_expire = new_expire; +-} +- +-static void hmdfs_remote_keep_writecache(struct inode *inode, struct file *file) +-{ +- struct hmdfs_inode_info *info = NULL; +- struct kref *ref = NULL; +- struct hmdfs_getattr_ret *getattr_ret = NULL; +- unsigned int write_cache_timeout = +- hmdfs_sb(inode->i_sb)->write_cache_timeout; +- int err; +- +- if (!write_cache_timeout) +- return; +- +- info = hmdfs_i(inode); +- ref = &(info->ref); +- /* +- * don't do anything if file is still opening or file hasn't been +- * written. +- */ +- if (kref_read(ref) > 0 || !atomic64_read(&info->write_counter)) +- return; +- +- /* +- * If remote getattr failed, and we don't update ctime, +- * pagecache will be truncated the next time file is opened. +- */ +- err = hmdfs_remote_getattr(info->conn, file_dentry(file), 0, +- &getattr_ret); +- if (err) { +- hmdfs_err("remote getattr failed with err %d", err); +- return; +- } +- +- if (!(getattr_ret->stat.result_mask & STATX_CTIME)) { +- hmdfs_err("get remote ctime failed with mask 0x%x", +- getattr_ret->stat.result_mask); +- kfree(getattr_ret); +- return; +- } +- /* +- * update ctime from remote, in case that pagecahe will be +- * truncated in next open. +- */ +- inode->__i_ctime = getattr_ret->stat.ctime; +- info->remote_ctime = getattr_ret->stat.ctime; +- hmdfs_set_writecache_expire(info, write_cache_timeout); +- kfree(getattr_ret); +-} +- +-int hmdfs_file_release_remote(struct inode *inode, struct file *file) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- +- if (hmdfs_remote_need_track_file(hmdfs_sb(inode->i_sb), file->f_mode)) +- hmdfs_remote_del_wr_opened_inode(info->conn, info); +- +- inode_lock(inode); +- kref_put(&info->ref, hmdfs_do_close_remote); +- hmdfs_remote_keep_writecache(inode, file); +- inode_unlock(inode); +- +- return 0; +-} +- +-static int hmdfs_file_flush(struct file *file, fl_owner_t id) +-{ +- int err = 0; +- struct inode *inode = file_inode(file); +- +- if (!(file->f_mode & FMODE_WRITE)) +- return 0; +- +- /* +- * Continue regardless of whether file reopen fails or not, +- * because there may be no dirty page. +- */ +- hmdfs_remote_check_and_reopen(hmdfs_i(inode), file); +- +- /* +- * Wait for wsem here would impact the performance greatly, so we +- * overlap the time to issue as many wbs as we can, expecting async +- * wbs are eliminated afterwards. +- */ +- filemap_fdatawrite(inode->i_mapping); +- down_write(&hmdfs_i(inode)->wpage_sem); +- err = filemap_write_and_wait(inode->i_mapping); +- up_write(&hmdfs_i(inode)->wpage_sem); +- return err; +-} +- +-static ssize_t hmdfs_file_read_iter_remote(struct kiocb *iocb, +- struct iov_iter *iter) +-{ +- struct file *filp = iocb->ki_filp; +- struct hmdfs_inode_info *info = hmdfs_i(file_inode(filp)); +- struct file_ra_state *ra = NULL; +- unsigned int rtt; +- int err; +- bool tried = false; +- +-retry: +- err = hmdfs_remote_check_and_reopen(info, filp); +- if (err) +- return err; +- +- ra = &filp->f_ra; +- /* rtt is measured in 10 msecs */ +- rtt = hmdfs_tcpi_rtt(info->conn) / 10000; +- switch (rtt) { +- case 0: +- break; +- case 1: +- ra->ra_pages = 256; +- break; +- case 2: +- ra->ra_pages = 512; +- break; +- default: +- ra->ra_pages = 1024; +- break; +- } +- +- err = generic_file_read_iter(iocb, iter); +- if (err < 0 && !tried && hmdfs_remote_need_reopen(info)) { +- /* Read from a stale fid, try read again once. */ +- tried = true; +- goto retry; +- } +- +- return err; +-} +- +-static inline bool hmdfs_is_file_unwritable(const struct hmdfs_inode_info *info, +- bool check_stash) +-{ +- return (check_stash && hmdfs_inode_is_stashing(info)) || +- !hmdfs_is_node_online(info->conn); +-} +- +-static ssize_t __hmdfs_file_write_iter_remote(struct kiocb *iocb, +- struct iov_iter *iter, +- bool check_stash) +-{ +- struct file *filp = iocb->ki_filp; +- struct inode *inode = file_inode(filp); +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- ssize_t ret; +- +- if (hmdfs_is_file_unwritable(info, check_stash)) +- return -EAGAIN; +- +- ret = hmdfs_remote_check_and_reopen(info, filp); +- if (ret) +- return ret; +- +- inode_lock(inode); +- if (hmdfs_is_file_unwritable(info, check_stash)) { +- ret = -EAGAIN; +- goto out; +- } +- ret = generic_write_checks(iocb, iter); +- if (ret > 0) +- ret = __generic_file_write_iter(iocb, iter); +-out: +- inode_unlock(inode); +- +- if (ret > 0) +- ret = generic_write_sync(iocb, ret); +- return ret; +-} +- +-ssize_t hmdfs_file_write_iter_remote_nocheck(struct kiocb *iocb, +- struct iov_iter *iter) +-{ +- return __hmdfs_file_write_iter_remote(iocb, iter, false); +-} +- +-static ssize_t hmdfs_file_write_iter_remote(struct kiocb *iocb, +- struct iov_iter *iter) +-{ +- return __hmdfs_file_write_iter_remote(iocb, iter, true); +-} +- +-/* hmdfs not support mmap write remote file */ +-static vm_fault_t hmdfs_page_mkwrite(struct vm_fault *vmf) +-{ +- return VM_FAULT_SIGBUS; +-} +- +-static const struct vm_operations_struct hmdfs_file_vm_ops = { +- .fault = filemap_fault, +- .map_pages = filemap_map_pages, +- .page_mkwrite = hmdfs_page_mkwrite, +-}; +- +-static int hmdfs_file_mmap_remote(struct file *file, struct vm_area_struct *vma) +-{ +- vma->vm_ops = &hmdfs_file_vm_ops; +- file_accessed(file); +- +- return 0; +-} +- +-static int hmdfs_file_fsync_remote(struct file *file, loff_t start, loff_t end, +- int datasync) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(file_inode(file)); +- struct hmdfs_peer *conn = info->conn; +- struct hmdfs_fid fid; +- int err; +- +- trace_hmdfs_fsync_enter_remote(conn->sbi, conn->device_id, +- info->remote_ino, datasync); +- /* +- * Continue regardless of whether file reopen fails or not, +- * because there may be no dirty page. +- */ +- hmdfs_remote_check_and_reopen(info, file); +- +- filemap_fdatawrite(file->f_mapping); +- down_write(&info->wpage_sem); +- err = file_write_and_wait_range(file, start, end); +- up_write(&info->wpage_sem); +- if (err) { +- hmdfs_err("local fsync fail with %d", err); +- goto out; +- } +- +- hmdfs_remote_fetch_fid(info, &fid); +- err = hmdfs_send_fsync(conn, &fid, start, end, datasync); +- if (err) +- hmdfs_err("send fsync fail with %d", err); +- +-out: +- trace_hmdfs_fsync_exit_remote(conn->sbi, conn->device_id, +- info->remote_ino, +- get_cmd_timeout(conn->sbi, F_FSYNC), err); +- +- /* Compatible with POSIX retcode */ +- if (err == -ETIME) +- err = -EIO; +- +- return err; +-} +- +-const struct file_operations hmdfs_dev_file_fops_remote = { +- .owner = THIS_MODULE, +- .llseek = generic_file_llseek, +- .read_iter = hmdfs_file_read_iter_remote, +- .write_iter = hmdfs_file_write_iter_remote, +- .mmap = hmdfs_file_mmap_remote, +- .open = hmdfs_file_open_remote, +- .release = hmdfs_file_release_remote, +- .flush = hmdfs_file_flush, +- .fsync = hmdfs_file_fsync_remote, +- .splice_read = copy_splice_read, +- .splice_write = iter_file_splice_write, +-}; +- +-static void hmdfs_fill_page_zero(struct page *page) +-{ +- void *addr = NULL; +- +- addr = kmap(page); +- memset(addr, 0, PAGE_SIZE); +- kunmap(page); +- SetPageUptodate(page); +- unlock_page(page); +-} +- +-static int hmdfs_readpage_remote(struct file *file, struct page *page) +-{ +- struct inode *inode = file_inode(file); +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- loff_t isize = i_size_read(inode); +- pgoff_t end_index = (isize - 1) >> PAGE_SHIFT; +- struct hmdfs_fid fid; +- +- if (!isize || page->index > end_index) { +- hmdfs_fill_page_zero(page); +- return 0; +- } +- +- if (!isize || page->index > end_index) { +- hmdfs_fill_page_zero(page); +- return 0; +- } +- +- hmdfs_remote_fetch_fid(info, &fid); +- return hmdfs_client_readpage(info->conn, &fid, page); +-} +- +-static int hmdfs_read_folio(struct file *file, struct folio *folio) +-{ +- struct page *page = &folio->page; +- return hmdfs_readpage_remote(file, page); +-} +- +-uint32_t hmdfs_get_writecount(struct page *page) +-{ +- uint32_t count = 0; +- loff_t pos = (loff_t)page->index << HMDFS_PAGE_OFFSET; +- struct inode *inode = page->mapping->host; +- loff_t size = i_size_read(inode); +- /* +- * If page offset is greater than i_size, this is possible when +- * writepage concurrent with truncate. In this case, we don't need to +- * do remote writepage since it'll be truncated after the page is +- * unlocked. +- */ +- if (pos >= size) +- count = 0; +- /* +- * If the page about to write is beyond i_size, we can't write beyond +- * i_size because remote file size will be wrong. +- */ +- else if (size < pos + HMDFS_PAGE_SIZE) +- count = size - pos; +- /* It's safe to write the whole page */ +- else +- count = HMDFS_PAGE_SIZE; +- +- return count; +-} +- +-static bool allow_cur_thread_wpage(struct hmdfs_inode_info *info, +- bool *rsem_held, bool sync_all) +-{ +- WARN_ON(!rsem_held); +- +- if (sync_all) { +- *rsem_held = false; +- return true; +- } +- *rsem_held = down_read_trylock(&info->wpage_sem); +- return *rsem_held; +-} +- +-/** +- * hmdfs_writepage_remote - writeback a dirty page to remote +- * +- * INFO: +- * When asked to WB_SYNC_ALL, this function should leave with both the page and +- * the radix tree node clean to achieve close-to-open consitency. Moreover, +- * this shall never return -EIO to help filemap to iterate all dirty pages. +- * +- * INFO: +- * When asked to WB_SYNC_NONE, this function should be mercy if faults(oom or +- * bad pipe) happended to enable subsequent r/w & wb. +- */ +-static int hmdfs_writepage_remote(struct page *page, +- struct writeback_control *wbc) +-{ +- struct inode *inode = page->mapping->host; +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- struct hmdfs_sb_info *sbi = hmdfs_sb(inode->i_sb); +- int ret = 0; +- bool rsem_held = false; +- bool sync = wbc->sync_mode == WB_SYNC_ALL; +- struct hmdfs_writepage_context *param = NULL; +- +- if (!allow_cur_thread_wpage(info, &rsem_held, sync)) +- goto out_unlock; +- +- set_page_writeback(page); +- +- param = kzalloc(sizeof(*param), GFP_NOFS); +- if (!param) { +- ret = -ENOMEM; +- goto out_endwb; +- } +- +- if (sync && hmdfs_usr_sig_pending(current)) { +- ClearPageUptodate(page); +- goto out_free; +- } +- param->count = hmdfs_get_writecount(page); +- if (!param->count) +- goto out_free; +- param->rsem_held = rsem_held; +- hmdfs_remote_fetch_fid(info, ¶m->fid); +- param->sync_all = sync; +- param->caller = current; +- get_task_struct(current); +- param->page = page; +- param->timeout = jiffies + msecs_to_jiffies(sbi->wb_timeout_ms); +- INIT_DELAYED_WORK(¶m->retry_dwork, hmdfs_remote_writepage_retry); +- ret = hmdfs_remote_do_writepage(info->conn, param); +- if (likely(!ret)) +- return 0; +- +- put_task_struct(current); +-out_free: +- kfree(param); +-out_endwb: +- end_page_writeback(page); +- if (rsem_held) +- up_read(&info->wpage_sem); +-out_unlock: +- if (sync || !hmdfs_need_redirty_page(info, ret)) { +- SetPageError(page); +- mapping_set_error(page->mapping, ret); +- } else { +- redirty_page_for_writepage(wbc, page); +- } +- unlock_page(page); +- return ret; +-} +- +-static void hmdfs_account_dirty_pages(struct address_space *mapping) +-{ +- struct hmdfs_sb_info *sbi = mapping->host->i_sb->s_fs_info; +- +- if (!sbi->h_wb->dirty_writeback_control) +- return; +- +- this_cpu_inc(*sbi->h_wb->bdp_ratelimits); +-} +- +-static int hmdfs_write_begin_remote(struct file *file, +- struct address_space *mapping, loff_t pos, +- unsigned int len, +- struct page **pagep, void **fsdata) +-{ +- pgoff_t index = ((unsigned long long)pos) >> PAGE_SHIFT; +- struct inode *inode = file_inode(file); +- struct page *page = NULL; +- int ret = 0; +- +-start: +- page = grab_cache_page_write_begin(mapping, index); +- if (!page) +- return -ENOMEM; +- *pagep = page; +- wait_on_page_writeback(page); +- +- // If this page will be covered completely. +- if (len == HMDFS_PAGE_SIZE || PageUptodate(page)) +- return 0; +- +- /* +- * If data existed in this page will covered, +- * we just need to clear this page. +- */ +- if (!((unsigned long long)pos & (HMDFS_PAGE_SIZE - 1)) && +- (pos + len) >= i_size_read(inode)) { +- zero_user_segment(page, len, HMDFS_PAGE_SIZE); +- return 0; +- } +- /* +- * We need readpage before write date to this page. +- */ +- ret = hmdfs_readpage_remote(file, page); +- if (!ret) { +- if (PageLocked(page)) { +- ret = folio_lock_killable(page_folio(page)); +- if (!ret) +- unlock_page(page); +- } +- +- if (!ret && PageUptodate(page)) { +- put_page(page); +- goto start; +- } +- if (!ret) +- ret = -EIO; +- } +- put_page(page); +- return ret; +-} +- +-static int hmdfs_write_end_remote(struct file *file, +- struct address_space *mapping, loff_t pos, +- unsigned int len, unsigned int copied, +- struct page *page, void *fsdata) +-{ +- struct inode *inode = page->mapping->host; +- +- if (!PageUptodate(page)) { +- if (unlikely(copied != len)) +- copied = 0; +- else +- SetPageUptodate(page); +- } +- if (!copied) +- goto unlock_out; +- +- if (!PageDirty(page)) { +- hmdfs_account_dirty_pages(mapping); +- set_page_dirty(page); +- } +- +- if (pos + copied > i_size_read(inode)) { +- i_size_write(inode, pos + copied); +- hmdfs_i(inode)->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; +- } +-unlock_out: +- unlock_page(page); +- put_page(page); +- +- /* hmdfs private writeback control */ +- hmdfs_balance_dirty_pages_ratelimited(mapping); +- return copied; +-} +- +-const struct address_space_operations hmdfs_dev_file_aops_remote = { +- .read_folio = hmdfs_read_folio, +- .write_begin = hmdfs_write_begin_remote, +- .write_end = hmdfs_write_end_remote, +- .writepage = hmdfs_writepage_remote, +- .dirty_folio = filemap_dirty_folio, +-}; +- +-loff_t hmdfs_set_pos(unsigned long dev_id, unsigned long group_id, +- unsigned long offset) +-{ +- loff_t pos; +- +- pos = ((loff_t)dev_id << (POS_BIT_NUM - 1 - DEV_ID_BIT_NUM)) + +- ((loff_t)group_id << OFFSET_BIT_NUM) + offset; +- if (dev_id) +- pos |= ((loff_t)1 << (POS_BIT_NUM - 1)); +- return pos; +-} +- +-int analysis_dentry_file_from_con(struct hmdfs_sb_info *sbi, +- struct file *file, +- struct file *handler, +- struct dir_context *ctx) +-{ +- struct hmdfs_dentry_group *dentry_group = NULL; +- loff_t pos = ctx->pos; +- unsigned long dev_id = (unsigned long)((pos << 1) >> (POS_BIT_NUM - DEV_ID_BIT_NUM)); +- unsigned long group_id = (unsigned long)((pos << (1 + DEV_ID_BIT_NUM)) >> +- (POS_BIT_NUM - GROUP_ID_BIT_NUM)); +- loff_t offset = pos & OFFSET_BIT_MASK; +- int group_num = 0; +- char *dentry_name = NULL; +- int iterate_result = 0; +- int i, j; +- +- dentry_group = kzalloc(sizeof(*dentry_group), GFP_KERNEL); +- +- if (!dentry_group) +- return -ENOMEM; +- +- if (IS_ERR_OR_NULL(handler)) { +- kfree(dentry_group); +- return -ENOENT; +- } +- +- group_num = get_dentry_group_cnt(file_inode(handler)); +- dentry_name = kzalloc(DENTRY_NAME_MAX_LEN, GFP_KERNEL); +- if (!dentry_name) { +- kfree(dentry_group); +- return -ENOMEM; +- } +- +- for (i = group_id; i < group_num; i++) { +- int ret = hmdfs_metainfo_read(sbi, handler, dentry_group, +- sizeof(struct hmdfs_dentry_group), +- i); +- if (ret != sizeof(struct hmdfs_dentry_group)) { +- hmdfs_err("read dentry group failed ret:%d", ret); +- goto done; +- } +- +- for (j = offset; j < DENTRY_PER_GROUP; j++) { +- int len; +- int file_type = DT_UNKNOWN; +- bool is_continue; +- +- len = le16_to_cpu(dentry_group->nsl[j].namelen); +- if (!test_bit_le(j, dentry_group->bitmap) || len == 0) +- continue; +- +- memset(dentry_name, 0, DENTRY_NAME_MAX_LEN); +- // TODO: Support more file_type +- if (S_ISDIR(le16_to_cpu(dentry_group->nsl[j].i_mode))) +- file_type = DT_DIR; +- else if (S_ISREG(le16_to_cpu( +- dentry_group->nsl[j].i_mode))) +- file_type = DT_REG; +- else if (S_ISLNK(le16_to_cpu( +- dentry_group->nsl[j].i_mode))) +- file_type = DT_LNK; +- +- strncat(dentry_name, dentry_group->filename[j], len); +- pos = hmdfs_set_pos(dev_id, i, j); +- is_continue = +- dir_emit(ctx, dentry_name, len, +- pos + INUNUMBER_START, file_type); +- if (!is_continue) { +- ctx->pos = pos; +- iterate_result = 1; +- goto done; +- } +- } +- offset = 0; +- } +- +-done: +- kfree(dentry_name); +- kfree(dentry_group); +- return iterate_result; +-} +- +-int hmdfs_dev_readdir_from_con(struct hmdfs_peer *con, struct file *file, +- struct dir_context *ctx) +-{ +- int iterate_result = 0; +- +- iterate_result = analysis_dentry_file_from_con( +- con->sbi, file, file->private_data, ctx); +- return iterate_result; +-} +- +-static int hmdfs_iterate_remote(struct file *file, struct dir_context *ctx) +-{ +- int err = 0; +- loff_t start_pos = ctx->pos; +- struct hmdfs_peer *con = NULL; +- struct hmdfs_dentry_info *di = hmdfs_d(file->f_path.dentry); +- bool is_local = !((ctx->pos) >> (POS_BIT_NUM - 1)); +- uint64_t dev_id = di->device_id; +- +- if (ctx->pos == -1) +- return 0; +- if (is_local) +- ctx->pos = hmdfs_set_pos(dev_id, 0, 0); +- +- con = hmdfs_lookup_from_devid(file->f_inode->i_sb->s_fs_info, dev_id); +- if (con) { +- // ctx->pos = 0; +- err = hmdfs_dev_readdir_from_con(con, file, ctx); +- if (unlikely(!con)) { +- hmdfs_err("con is null"); +- goto done; +- } +- peer_put(con); +- if (err) +- goto done; +- } +- +-done: +- if (err <= 0) +- ctx->pos = -1; +- +- trace_hmdfs_iterate_remote(file->f_path.dentry, start_pos, ctx->pos, +- err); +- return err; +-} +- +-int hmdfs_dir_open_remote(struct inode *inode, struct file *file) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- struct clearcache_item *cache_item = NULL; +- +- if (info->conn) { +- if (!hmdfs_cache_revalidate(READ_ONCE(info->conn->conn_time), +- info->conn->device_id, +- file->f_path.dentry)) +- get_remote_dentry_file_sync(file->f_path.dentry, +- info->conn); +- cache_item = hmdfs_find_cache_item(info->conn->device_id, +- file->f_path.dentry); +- if (cache_item) { +- file->private_data = cache_item->filp; +- get_file(file->private_data); +- kref_put(&cache_item->ref, release_cache_item); +- return 0; +- } +- return -ENOENT; +- } +- return -ENOENT; +-} +- +-static int hmdfs_dir_release_remote(struct inode *inode, struct file *file) +-{ +- if (file->private_data) +- fput(file->private_data); +- file->private_data = NULL; +- return 0; +-} +- +-const struct file_operations hmdfs_dev_dir_ops_remote = { +- .owner = THIS_MODULE, +- .iterate_shared = hmdfs_iterate_remote, +- .open = hmdfs_dir_open_remote, +- .release = hmdfs_dir_release_remote, +- .fsync = __generic_file_fsync, +-}; +diff --git a/fs/hmdfs/file_remote.h b/fs/hmdfs/file_remote.h +deleted file mode 100644 +index 3ed9f0f20..000000000 +--- a/fs/hmdfs/file_remote.h ++++ /dev/null +@@ -1,30 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/file_remote.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_FILE_REMOTE_H +-#define HMDFS_FILE_REMOTE_H +- +-#include +-#include +- +-#include "hmdfs.h" +-#include "comm/connection.h" +- +-void hmdfs_remote_del_wr_opened_inode(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info); +- +-void hmdfs_remote_add_wr_opened_inode_nolock(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info); +- +-ssize_t hmdfs_file_write_iter_remote_nocheck(struct kiocb *iocb, +- struct iov_iter *iter); +- +-int analysis_dentry_file_from_con(struct hmdfs_sb_info *sbi, +- struct file *file, +- struct file *handler, +- struct dir_context *ctx); +-#endif +diff --git a/fs/hmdfs/file_root.c b/fs/hmdfs/file_root.c +deleted file mode 100644 +index 60d04f921..000000000 +--- a/fs/hmdfs/file_root.c ++++ /dev/null +@@ -1,174 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/file_root.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +- +-#include "authority/authentication.h" +-#include "comm/socket_adapter.h" +-#include "comm/transport.h" +-#include "hmdfs.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_device_view.h" +- +-#define DEVICE_VIEW_CTX_POS 2 +-#define MERGE_VIEW_CTX_POS 3 +-#define CLOUD_MERGE_VIEW_CTX_POS 4 +-#define ROOT_DIR_INO_START 20000000 +- +-// used by hmdfs_device_iterate functions +-#define DEVICE_VIEW_INO_START 20000002 +-#define LOCAL_DEVICE_CTX_POS 2 +-#define CLOUD_DEVICE_CTX_POS 3 +- +-struct hmdfs_peer *get_next_con(struct hmdfs_sb_info *sbi, +- unsigned long current_dev_id) +-{ +- struct hmdfs_peer *con = NULL; +- struct hmdfs_peer *next_con = NULL; +- struct list_head *head, *node; +- +- mutex_lock(&sbi->connections.node_lock); +- head = &sbi->connections.node_list; +- if (current_dev_id == 0) { +- node = head->next; +- if (node == head) +- goto done; +- next_con = container_of(node, struct hmdfs_peer, list); +- if (next_con->status == NODE_STAT_ONLINE) +- goto done; +- current_dev_id = next_con->device_id; +- next_con = NULL; +- } +- +- list_for_each_entry(con, &sbi->connections.node_list, list) { +- if ((con->device_id & 0xFFFF) == (current_dev_id & 0xFFFF)) { +- node = con->list.next; +- if (node == head) +- goto done; +- next_con = container_of(node, struct hmdfs_peer, list); +- if (next_con->status == NODE_STAT_ONLINE) +- goto done; +- current_dev_id = next_con->device_id; +- next_con = NULL; +- } +- } +-done: +- if (next_con) +- peer_get(next_con); +- mutex_unlock(&sbi->connections.node_lock); +- return next_con; +-} +- +-int hmdfs_device_iterate(struct file *file, struct dir_context *ctx) +-{ +- int err = 0; +- uint64_t ino_start = DEVICE_VIEW_INO_START; +- struct hmdfs_peer *next_con = NULL; +- unsigned long dev_id = 0; +- struct hmdfs_peer *con = NULL; +- char *remote_device_name = NULL; +- +- if (ctx->pos != 0) +- goto out; +- dir_emit_dots(file, ctx); +- +- if (ctx->pos == LOCAL_DEVICE_CTX_POS) { +- err = dir_emit(ctx, DEVICE_VIEW_LOCAL, +- sizeof(DEVICE_VIEW_LOCAL) - 1, ino_start++, +- DT_DIR); +- if (!err) +- goto out; +- (ctx->pos)++; +- } +- +- if (ctx->pos == CLOUD_DEVICE_CTX_POS) { +- err = dir_emit(ctx, DEVICE_VIEW_CLOUD, +- sizeof(DEVICE_VIEW_CLOUD) - 1, ino_start++, +- DT_DIR); +- if (!err) +- goto out; +- (ctx->pos)++; +- } +- +- next_con = get_next_con(file->f_inode->i_sb->s_fs_info, 0); +- if (!next_con) +- goto out; +- +- dev_id = next_con->device_id; +- peer_put(next_con); +- con = hmdfs_lookup_from_devid(file->f_inode->i_sb->s_fs_info, dev_id); +- remote_device_name = kmalloc(HMDFS_CID_SIZE + 1, GFP_KERNEL); +- if (!remote_device_name) { +- err = -ENOMEM; +- goto out; +- } +- while (con) { +- peer_put(con); +- snprintf(remote_device_name, HMDFS_CID_SIZE + 1, "%s", +- con->cid); +- if (!dir_emit(ctx, remote_device_name, +- strlen(remote_device_name), ino_start++, DT_DIR)) +- goto done; +- +- (ctx->pos)++; +- con = get_next_con(file->f_inode->i_sb->s_fs_info, dev_id); +- if (!con) +- goto done; +- +- dev_id = con->device_id; +- } +-done: +- kfree(remote_device_name); +-out: +- if (err <= 0) +- ctx->pos = -1; +- +- return err; +-} +- +-int hmdfs_root_iterate(struct file *file, struct dir_context *ctx) +-{ +- uint64_t ino_start = ROOT_DIR_INO_START; +- struct hmdfs_sb_info *sbi = file_inode(file)->i_sb->s_fs_info; +- +- if (!dir_emit_dots(file, ctx)) +- return 0; +- if (ctx->pos == DEVICE_VIEW_CTX_POS) { +- if (!dir_emit(ctx, DEVICE_VIEW_ROOT, +- sizeof(DEVICE_VIEW_ROOT) - 1, ino_start, DT_DIR)) +- return 0; +- ino_start++; +- ctx->pos = MERGE_VIEW_CTX_POS; +- } +- if (sbi->s_merge_switch && ctx->pos == MERGE_VIEW_CTX_POS) { +- if (!dir_emit(ctx, MERGE_VIEW_ROOT, sizeof(MERGE_VIEW_ROOT) - 1, +- ino_start, DT_DIR)) +- return 0; +- ino_start++; +- (ctx->pos)++; +- } +- if (sbi->s_merge_switch && ctx->pos == CLOUD_MERGE_VIEW_CTX_POS) { +- if (!dir_emit(ctx, CLOUD_MERGE_VIEW_ROOT, sizeof(CLOUD_MERGE_VIEW_ROOT) - 1, +- ino_start, DT_DIR)) +- return 0; +- ino_start++; +- (ctx->pos)++; +- } +- return 0; +-} +- +-const struct file_operations hmdfs_root_fops = { +- .owner = THIS_MODULE, +- .iterate_shared = hmdfs_root_iterate, +-}; +- +-const struct file_operations hmdfs_device_fops = { +- .owner = THIS_MODULE, +- .iterate_shared = hmdfs_device_iterate, +-}; +diff --git a/fs/hmdfs/hmdfs.h b/fs/hmdfs/hmdfs.h +deleted file mode 100644 +index 5d1eec787..000000000 +--- a/fs/hmdfs/hmdfs.h ++++ /dev/null +@@ -1,370 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/hmdfs.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_H +-#define HMDFS_H +- +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "comm/protocol.h" +- +-#if KERNEL_VERSION(4, 15, 0) < LINUX_VERSION_CODE +-#define hmdfs_time_t timespec64 +-#define hmdfs_time_compare timespec64_compare +-#define hmdfs_time_add timespec64_add +-#else +-#define hmdfs_time_t timespec +-#define hmdfs_time_compare timespec_compare +-#define hmdfs_time_add timespec_add +-#endif +- +-#define HMDFS_IOC 0xf2 +-#define HMDFS_IOC_SET_SHARE_PATH _IOW(HMDFS_IOC, 1, struct hmdfs_share_control) +-#define HMDFS_IOC_GET_WRITEOPEN_CNT _IOR(HMDFS_IOC, 2, __u32) +-#define HMDFS_IOC_GET_DST_PATH _IOR(HMDFS_IOC, 3, __u32) +- +- +-#define HMDFS_PAGE_SIZE 4096 +-#define HMDFS_PAGE_OFFSET 12 +- +-/* max xattr value size, not include '\0' */ +-#define HMDFS_XATTR_SIZE_MAX 4096 +-/* max listxattr response size, include '\0' */ +-#define HMDFS_LISTXATTR_SIZE_MAX 4096 +- +-// 20 digits +'\0', Converted from a u64 integer +-#define HMDFS_ACCOUNT_HASH_MAX_LEN 21 +-#define CTRL_PATH_MAX_LEN 11 +- +-#define HMDFS_SUPER_MAGIC 0x20200302 +- +-#define DEFAULT_WRITE_CACHE_TIMEOUT 30 +-#define DEFAULT_SRV_REQ_MAX_ACTIVE 16 +- +-#define HMDFS_INODE_INVALID_FILE_ID (1U << 31) +-#define HMDFS_FID_VER_BOOT_COOKIE_SHIFT 15 +- +-/* According to task_struct instead of workqueue_struct */ +-#define HMDFS_WQ_NAME_LEN 16 +- +-#define HMDFS_DEF_WB_TIMEOUT_MS 60000 +-#define HMDFS_MAX_WB_TIMEOUT_MS 900000 +- +-#define HMDFS_READPAGES_NR_MAX 32 +-#define HMDFS_READPAGES_NR_DEF 1024 +- +-#define HMDFS_CID_SIZE 64 +- +-#define DIR_MODE 0771 +- +-enum { +- HMDFS_FEATURE_READPAGES = 1ULL << 0, +- HMDFS_FEATURE_READPAGES_OPEN = 1ULL << 1, +- HMDFS_ATOMIC_OPEN = 1ULL << 2, +-}; +- +-struct client_statistic; +-struct server_statistic; +-struct hmdfs_writeback; +-struct hmdfs_server_writeback; +-struct hmdfs_syncfs_info { +- wait_queue_head_t wq; +- atomic_t wait_count; +- int remote_ret; +- unsigned long long version; +- +- /* Protect version in concurrent operations */ +- spinlock_t v_lock; +- /* +- * Serialize hmdfs_sync_fs() process: +- * |<- pending_list ->| exexuting |<- wait_list ->| +- * syncfs_1 syncfs_2 (syncfs_3) syncfs_4 syncfs_5 +- * +- * Abandon syncfs processes in pending_list after syncfs_3 finished; +- * Pick the last syncfs process in wait_list after syncfs_3 finished; +- */ +- bool is_executing; +- /* syncfs process arriving after current exexcuting syncfs */ +- struct list_head wait_list; +- /* syncfs process arriving before current exexcuting syncfs */ +- struct list_head pending_list; +- spinlock_t list_lock; +-}; +- +-struct hmdfs_share_table { +- struct list_head item_list_head; +- spinlock_t item_list_lock; +- struct workqueue_struct *share_item_timeout_wq; +- int item_cnt; +- int max_cnt; +-}; +- +-struct hmdfs_sb_info { +- /* list for all registered superblocks */ +- struct list_head list; +- struct mutex umount_mutex; +- +- struct kobject kobj; +- struct completion s_kobj_unregister; +- struct super_block *sb; +- struct super_block *lower_sb; +- /* from mount, which is root */ +- const struct cred *cred; +- /* from update cmd, expected to be system */ +- const struct cred *system_cred; +- struct { +- struct mutex node_lock; +- struct list_head node_list; +- atomic_t conn_seq; +- unsigned long recent_ol; +- } connections; +- char *local_dst; +- char *real_dst; +- char *local_src; +- char *cache_dir; +- char *cloud_dir; +- /* seq number for hmdfs super block */ +- unsigned int seq; +- +- /* +- * This value indicate how long the pagecache stay valid(in seconds) in +- * client if metadate(except iversion) is equal to server. This +- * functionality is disabled if this value is 0. +- */ +- unsigned int write_cache_timeout; +- unsigned int dcache_timeout; +- unsigned int dcache_precision; +- unsigned long dcache_threshold; +- struct list_head client_cache; +- struct list_head server_cache; +- struct list_head to_delete; +- struct mutex cache_list_lock; +- +- /* local operation time statistic */ +- struct server_statistic *s_server_statis; +- +- /* client statistic */ +- struct client_statistic *s_client_statis; +- +- /* TIMEOUT of each command */ +- struct kobject s_cmd_timeout_kobj; +- struct completion s_timeout_kobj_unregister; +- unsigned int s_cmd_timeout[F_SIZE]; +- +- /* For case sensitive */ +- bool s_case_sensitive; +- +- /* For features supporting */ +- u64 s_features; +- +- /* number of pages to read */ +- unsigned int s_readpages_nr; +- +- /* For merge & device view */ +- unsigned int s_merge_switch; +- /* For cloud disk*/ +- unsigned int s_cloud_disk_switch; +- /* For writeback */ +- struct hmdfs_writeback *h_wb; +- /* For server writeback */ +- struct hmdfs_server_writeback *h_swb; +- +- /* syncfs info */ +- struct hmdfs_syncfs_info hsi; +- +- /* To bridge the userspace utils */ +- struct kfifo notify_fifo; +- spinlock_t notify_fifo_lock; +- struct mutex cmd_handler_mutex; +- +- /* For reboot detect */ +- uint64_t boot_cookie; +- /* offline process */ +- unsigned int async_cb_delay; +- /* For server handle requests */ +- unsigned int async_req_max_active; +- /* stash dirty pages during offline */ +- bool s_offline_stash; +- +- /* Timeout (ms) to retry writing remote pages */ +- unsigned int wb_timeout_ms; +- +- struct path stash_work_dir; +- /* dentry cache */ +- bool s_dentry_cache; +- +- /* share table */ +- struct hmdfs_share_table share_table; +- +- /* msgs that are waiting for remote */ +- struct list_head async_readdir_msg_list; +- /* protect async_readdir_msg_list */ +- spinlock_t async_readdir_msg_lock; +- /* async readdir work that are queued but not finished */ +- struct list_head async_readdir_work_list; +- /* protect async_readdir_work_list */ +- spinlock_t async_readdir_work_lock; +- /* wait for async_readdir_work_list to be empty in umount */ +- wait_queue_head_t async_readdir_wq; +- /* don't allow async readdir */ +- bool async_readdir_prohibit; +- +- /* multi user */ +- unsigned int user_id; +-}; +- +-static inline struct hmdfs_sb_info *hmdfs_sb(struct super_block *sb) +-{ +- return sb->s_fs_info; +-} +- +-static inline bool hmdfs_is_stash_enabled(const struct hmdfs_sb_info *sbi) +-{ +- return sbi->s_offline_stash; +-} +- +-struct hmdfs_dst_info{ +- uint64_t local_path_len; +- uint64_t local_path_pos; +- uint64_t distributed_path_len; +- uint64_t distributed_path_pos; +- uint64_t bundle_name_len; +- uint64_t bundle_name_pos; +- uint64_t size; +-}; +- +-struct setattr_info { +- loff_t size; +- unsigned int valid; +- umode_t mode; +- kuid_t uid; +- kgid_t gid; +- long long atime; +- long atime_nsec; +- long long mtime; +- long mtime_nsec; +- long long ctime; +- long ctime_nsec; +-}; +- +-struct hmdfs_file_info { +- union { +- struct { +- struct rb_root root; +- struct mutex comrade_list_lock; +- }; +- struct { +- struct file *lower_file; +- int device_id; +- }; +- }; +- struct list_head comrade_list; +-}; +- +-static inline struct hmdfs_file_info *hmdfs_f(struct file *file) +-{ +- return file->private_data; +-} +- +-// Almost all the source files want this, so... +-#include "inode.h" +- +-/* locking helpers */ +-static inline struct dentry *lock_parent(struct dentry *dentry) +-{ +- struct dentry *dir = dget_parent(dentry); +- +- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); +- return dir; +-} +- +-static inline void unlock_dir(struct dentry *dir) +-{ +- inode_unlock(d_inode(dir)); +- dput(dir); +-} +- +-extern uint64_t path_hash(const char *path, int len, bool case_sense); +-extern int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, +- const char *name, unsigned int flags, +- struct path *path); +-extern ssize_t hmdfs_remote_listxattr(struct dentry *dentry, char *buffer, +- size_t size); +- +-int check_filename(const char *name, int len); +- +-int hmdfs_permission(struct mnt_idmap *idmap, struct inode *inode, int mask); +- +-int hmdfs_parse_options(struct hmdfs_sb_info *sbi, const char *data); +- +-/* Refer to comments in hmdfs_request_work_fn() */ +-#define HMDFS_SERVER_CTX_FLAGS (PF_KTHREAD | PF_WQ_WORKER | PF_NPROC_EXCEEDED) +- +-static inline bool is_current_hmdfs_server_ctx(void) +-{ +- return ((current->flags & HMDFS_SERVER_CTX_FLAGS) == +- HMDFS_SERVER_CTX_FLAGS); +-} +- +-extern uint64_t hmdfs_gen_boot_cookie(void); +- +-static inline bool str_n_case_eq(const char *s1, const char *s2, size_t len) +-{ +- return !strncasecmp(s1, s2, len); +-} +- +-static inline bool qstr_case_eq(const struct qstr *q1, const struct qstr *q2) +-{ +- return q1->len == q2->len && str_n_case_eq(q1->name, q2->name, q2->len); +-} +- +-static inline bool qstr_eq(const struct qstr *q1, const struct qstr *q2) +-{ +- return q1->len == q2->len && !strncmp(q1->name, q2->name, q2->len); +-} +- +-/***************************************************************************** +- * log print helpers +- *****************************************************************************/ +-__printf(4, 5) void __hmdfs_log(const char *level, const bool ratelimited, +- const char *function, const char *fmt, ...); +-#define hmdfs_err(fmt, ...) \ +- __hmdfs_log(KERN_ERR, false, __func__, fmt, ##__VA_ARGS__) +-#define hmdfs_warning(fmt, ...) \ +- __hmdfs_log(KERN_WARNING, false, __func__, fmt, ##__VA_ARGS__) +-#define hmdfs_info(fmt, ...) \ +- __hmdfs_log(KERN_INFO, false, __func__, fmt, ##__VA_ARGS__) +-#define hmdfs_err_ratelimited(fmt, ...) \ +- __hmdfs_log(KERN_ERR, true, __func__, fmt, ##__VA_ARGS__) +-#define hmdfs_warning_ratelimited(fmt, ...) \ +- __hmdfs_log(KERN_WARNING, true, __func__, fmt, ##__VA_ARGS__) +-#define hmdfs_info_ratelimited(fmt, ...) \ +- __hmdfs_log(KERN_INFO, true, __func__, fmt, ##__VA_ARGS__) +-#ifdef CONFIG_HMDFS_FS_DEBUG +-#define hmdfs_debug(fmt, ...) \ +- __hmdfs_log(KERN_DEBUG, false, __func__, fmt, ##__VA_ARGS__) +-#define hmdfs_debug_ratelimited(fmt, ...) \ +- __hmdfs_log(KERN_DEBUG, true, __func__, fmt, ##__VA_ARGS__) +-#else +-#define hmdfs_debug(fmt, ...) ((void)0) +-#define hmdfs_debug_ratelimited(fmt, ...) ((void)0) +-#endif +- +-/***************************************************************************** +- * inode/file operations declartion +- *****************************************************************************/ +-extern const struct inode_operations hmdfs_device_ops; +-extern const struct inode_operations hmdfs_root_ops; +-extern const struct file_operations hmdfs_root_fops; +-extern const struct file_operations hmdfs_device_fops; +- +-#endif // HMDFS_H +diff --git a/fs/hmdfs/hmdfs_client.c b/fs/hmdfs/hmdfs_client.c +deleted file mode 100644 +index 36ca34de9..000000000 +--- a/fs/hmdfs/hmdfs_client.c ++++ /dev/null +@@ -1,1123 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/hmdfs_client.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "hmdfs_client.h" +-#include "hmdfs_server.h" +- +-#include +-#include +-#include +- +-#include "comm/socket_adapter.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_trace.h" +-#include "comm/node_cb.h" +-#include "stash.h" +-#include "authority/authentication.h" +- +-#define HMDFS_SYNC_WPAGE_RETRY_MS 2000 +- +-static inline void free_sm_outbuf(struct hmdfs_send_command *sm) +-{ +- if (sm->out_buf && sm->out_len != 0) +- kfree(sm->out_buf); +- sm->out_len = 0; +- sm->out_buf = NULL; +-} +- +-int hmdfs_send_open(struct hmdfs_peer *con, const char *send_buf, +- __u8 file_type, struct hmdfs_open_ret *open_ret) +-{ +- int ret; +- int path_len = strlen(send_buf); +- size_t send_len = sizeof(struct open_request) + path_len + 1; +- struct open_request *open_req = kzalloc(send_len, GFP_KERNEL); +- struct open_response *resp; +- struct hmdfs_send_command sm = { +- .data = open_req, +- .len = send_len, +- .out_buf = NULL, +- .local_filp = NULL, +- }; +- hmdfs_init_cmd(&sm.operations, F_OPEN); +- +- if (!open_req) { +- ret = -ENOMEM; +- goto out; +- } +- open_req->file_type = file_type; +- open_req->path_len = cpu_to_le32(path_len); +- strcpy(open_req->buf, send_buf); +- ret = hmdfs_sendmessage_request(con, &sm); +- kfree(open_req); +- +- if (!ret && (sm.out_len == 0 || !sm.out_buf)) +- ret = -ENOENT; +- if (ret) +- goto out; +- resp = sm.out_buf; +- +- open_ret->ino = le64_to_cpu(resp->ino); +- open_ret->fid.ver = le64_to_cpu(resp->file_ver); +- open_ret->fid.id = le32_to_cpu(resp->file_id); +- open_ret->file_size = le64_to_cpu(resp->file_size); +- open_ret->remote_ctime.tv_sec = le64_to_cpu(resp->ctime); +- open_ret->remote_ctime.tv_nsec = le32_to_cpu(resp->ctime_nsec); +- open_ret->stable_ctime.tv_sec = le64_to_cpu(resp->stable_ctime); +- open_ret->stable_ctime.tv_nsec = le32_to_cpu(resp->stable_ctime_nsec); +- +-out: +- free_sm_outbuf(&sm); +- return ret; +-} +- +-void hmdfs_send_close(struct hmdfs_peer *con, const struct hmdfs_fid *fid) +-{ +- size_t send_len = sizeof(struct release_request); +- struct release_request *release_req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = release_req, +- .len = send_len, +- .local_filp = NULL, +- }; +- hmdfs_init_cmd(&sm.operations, F_RELEASE); +- +- if (!release_req) +- return; +- +- release_req->file_ver = cpu_to_le64(fid->ver); +- release_req->file_id = cpu_to_le32(fid->id); +- +- hmdfs_sendmessage_request(con, &sm); +- kfree(release_req); +-} +- +-int hmdfs_send_fsync(struct hmdfs_peer *con, const struct hmdfs_fid *fid, +- __s64 start, __s64 end, __s32 datasync) +-{ +- int ret; +- struct fsync_request *fsync_req = +- kzalloc(sizeof(struct fsync_request), GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = fsync_req, +- .len = sizeof(struct fsync_request), +- .out_buf = NULL, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_FSYNC); +- if (!fsync_req) +- return -ENOMEM; +- +- fsync_req->file_ver = cpu_to_le64(fid->ver); +- fsync_req->file_id = cpu_to_le32(fid->id); +- fsync_req->datasync = cpu_to_le32(datasync); +- fsync_req->start = cpu_to_le64(start); +- fsync_req->end = cpu_to_le64(end); +- +- ret = hmdfs_sendmessage_request(con, &sm); +- +- free_sm_outbuf(&sm); +- kfree(fsync_req); +- return ret; +-} +- +-int hmdfs_client_readpage(struct hmdfs_peer *con, const struct hmdfs_fid *fid, +- struct page *page) +-{ +- int ret; +- size_t send_len = sizeof(struct readpage_request); +- struct readpage_request *read_data = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = read_data, +- .len = send_len, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_READPAGE); +- if (!read_data) { +- unlock_page(page); +- return -ENOMEM; +- } +- +- sm.out_buf = page; +- read_data->file_ver = cpu_to_le64(fid->ver); +- read_data->file_id = cpu_to_le32(fid->id); +- read_data->size = cpu_to_le32(HMDFS_PAGE_SIZE); +- read_data->index = cpu_to_le64(page->index); +- ret = hmdfs_sendpage_request(con, &sm); +- kfree(read_data); +- return ret; +-} +- +-bool hmdfs_usr_sig_pending(struct task_struct *p) +-{ +- sigset_t *sig = &p->pending.signal; +- +- if (likely(!signal_pending(p))) +- return false; +- return sigismember(sig, SIGINT) || sigismember(sig, SIGTERM) || +- sigismember(sig, SIGKILL); +-} +- +-void hmdfs_client_writepage_done(struct hmdfs_inode_info *info, +- struct hmdfs_writepage_context *ctx) +-{ +- struct page *page = ctx->page; +- bool unlock = ctx->rsem_held; +- +- SetPageUptodate(page); +- end_page_writeback(page); +- if (unlock) +- up_read(&info->wpage_sem); +- unlock_page(page); +-} +- +-static void hmdfs_client_writepage_err(struct hmdfs_peer *peer, +- struct hmdfs_inode_info *info, +- struct hmdfs_writepage_context *ctx, +- int err) +-{ +- struct page *page = ctx->page; +- bool unlock = ctx->rsem_held; +- +- if (err == -ENOMEM || err == -EAGAIN || err == -ESHUTDOWN || +- err == -ETIME) +- SetPageUptodate(page); +- else +- hmdfs_info("Page %ld of file %u writeback err %d devid %llu", +- page->index, ctx->fid.id, err, peer->device_id); +- +- /* +- * Current and subsequent writebacks have been canceled by the +- * user, leaving these pages' states in chaos. Read pages in +- * the future to update these pages. +- */ +- if (ctx->sync_all && hmdfs_usr_sig_pending(ctx->caller)) +- ClearPageUptodate(page); +- +- if (ctx->sync_all || !time_is_after_eq_jiffies(ctx->timeout) || +- !(err == -ETIME || hmdfs_need_redirty_page(info, err))) { +- SetPageError(page); +- mapping_set_error(page->mapping, -EIO); +- } else { +- __set_page_dirty_nobuffers(page); +- } +- +- end_page_writeback(page); +- if (unlock) +- up_read(&info->wpage_sem); +- unlock_page(page); +-} +- +-static inline bool +-hmdfs_no_timedout_sync_write(struct hmdfs_writepage_context *ctx) +-{ +- return ctx->sync_all && time_is_after_eq_jiffies(ctx->timeout); +-} +- +-static inline bool +-hmdfs_client_rewrite_for_timeout(struct hmdfs_writepage_context *ctx, int err) +-{ +- return (err == -ETIME && hmdfs_no_timedout_sync_write(ctx) && +- !hmdfs_usr_sig_pending(ctx->caller)); +-} +- +-static inline bool +-hmdfs_client_rewrite_for_offline(struct hmdfs_sb_info *sbi, +- struct hmdfs_writepage_context *ctx, int err) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host); +- unsigned int status = READ_ONCE(info->stash_status); +- +- /* +- * No retry if offline occurs during inode restoration. +- * +- * Do retry if local file cache is ready even it is not +- * a WB_SYNC_ALL write, else no-sync_all writeback will +- * return -EIO, mapping_set_error(mapping, -EIO) will be +- * called and it will make the concurrent calling of +- * filemap_write_and_wait() in hmdfs_flush_stash_file_data() +- * return -EIO. +- */ +- return (hmdfs_is_stash_enabled(sbi) && +- status != HMDFS_REMOTE_INODE_RESTORING && +- (hmdfs_no_timedout_sync_write(ctx) || +- status == HMDFS_REMOTE_INODE_STASHING) && +- hmdfs_is_offline_or_timeout_err(err)); +-} +- +-static inline bool +-hmdfs_client_redo_writepage(struct hmdfs_sb_info *sbi, +- struct hmdfs_writepage_context *ctx, int err) +-{ +- return hmdfs_client_rewrite_for_timeout(ctx, err) || +- hmdfs_client_rewrite_for_offline(sbi, ctx, err); +-} +- +-static bool hmdfs_remote_write_to_remote(struct hmdfs_inode_info *info) +-{ +- unsigned int status = READ_ONCE(info->stash_status); +- bool stashing; +- +- if (status != HMDFS_REMOTE_INODE_STASHING) +- return true; +- +- /* Ensure it's OK to use info->cache afterwards */ +- spin_lock(&info->stash_lock); +- stashing = (info->stash_status == HMDFS_REMOTE_INODE_STASHING); +- spin_unlock(&info->stash_lock); +- +- return !stashing; +-} +- +-int hmdfs_remote_do_writepage(struct hmdfs_peer *con, +- struct hmdfs_writepage_context *ctx) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host); +- bool to_remote = false; +- int err = 0; +- +- to_remote = hmdfs_remote_write_to_remote(info); +- if (to_remote) +- err = hmdfs_client_writepage(info->conn, ctx); +- else +- err = hmdfs_stash_writepage(info->conn, ctx); +- if (!err) +- return 0; +- +- if (!(to_remote && +- hmdfs_client_rewrite_for_offline(con->sbi, ctx, err))) +- return err; +- +- queue_delayed_work(con->retry_wb_wq, &ctx->retry_dwork, +- msecs_to_jiffies(HMDFS_SYNC_WPAGE_RETRY_MS)); +- +- return 0; +-} +- +-void hmdfs_remote_writepage_retry(struct work_struct *work) +-{ +- struct hmdfs_writepage_context *ctx = +- container_of(work, struct hmdfs_writepage_context, +- retry_dwork.work); +- struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host); +- struct hmdfs_peer *peer = info->conn; +- const struct cred *old_cred = NULL; +- int err; +- +- old_cred = hmdfs_override_creds(peer->sbi->cred); +- err = hmdfs_remote_do_writepage(peer, ctx); +- hmdfs_revert_creds(old_cred); +- if (err) { +- hmdfs_client_writepage_err(peer, info, ctx, err); +- put_task_struct(ctx->caller); +- kfree(ctx); +- } +-} +- +-void hmdfs_writepage_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req, +- const struct hmdfs_resp *resp) +-{ +- struct hmdfs_writepage_context *ctx = req->private; +- struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host); +- int ret = resp->ret_code; +- unsigned long page_index = ctx->page->index; +- +- trace_hmdfs_writepage_cb_enter(peer, info->remote_ino, page_index, ret); +- +- if (!ret) { +- hmdfs_client_writepage_done(info, ctx); +- atomic64_inc(&info->write_counter); +- goto cleanup_all; +- } +- +- if (hmdfs_client_redo_writepage(peer->sbi, ctx, ret)) { +- ret = hmdfs_remote_do_writepage(peer, ctx); +- if (!ret) +- goto cleanup_req; +- WARN_ON(ret == -ETIME); +- } +- +- hmdfs_client_writepage_err(peer, info, ctx, ret); +- +-cleanup_all: +- put_task_struct(ctx->caller); +- kfree(ctx); +-cleanup_req: +- kfree(req->data); +- +- trace_hmdfs_writepage_cb_exit(peer, info->remote_ino, page_index, ret); +-} +- +-int hmdfs_client_writepage(struct hmdfs_peer *con, +- struct hmdfs_writepage_context *param) +-{ +- int ret = 0; +- size_t send_len = sizeof(struct writepage_request) + HMDFS_PAGE_SIZE; +- struct writepage_request *write_data = kzalloc(send_len, GFP_NOFS); +- struct hmdfs_req req; +- char *data = NULL; +- +- if (unlikely(!write_data)) +- return -ENOMEM; +- +- WARN_ON(!PageLocked(param->page)); // VFS +- WARN_ON(PageDirty(param->page)); // VFS +- WARN_ON(!PageWriteback(param->page)); // hmdfs +- +- write_data->file_ver = cpu_to_le64(param->fid.ver); +- write_data->file_id = cpu_to_le32(param->fid.id); +- write_data->index = cpu_to_le64(param->page->index); +- write_data->count = cpu_to_le32(param->count); +- data = kmap(param->page); +- memcpy((char *)write_data->buf, data, HMDFS_PAGE_SIZE); +- kunmap(param->page); +- req.data = write_data; +- req.data_len = send_len; +- +- req.private = param; +- req.private_len = sizeof(*param); +- +- req.timeout = TIMEOUT_CONFIG; +- hmdfs_init_cmd(&req.operations, F_WRITEPAGE); +- ret = hmdfs_send_async_request(con, &req); +- if (unlikely(ret)) +- kfree(write_data); +- return ret; +-} +- +-void hmdfs_client_recv_readpage(struct hmdfs_head_cmd *head, int err, +- struct hmdfs_async_work *async_work) +-{ +- struct page *page = async_work->page; +- int ret = le32_to_cpu(head->ret_code); +- struct hmdfs_inode_info *info = hmdfs_i(page->mapping->host); +- unsigned long page_index = page->index; +- +- if (!err) +- SetPageUptodate(page); +- else if (err == -EBADF) +- /* There may be a stale fd caused by fid version, need reopen */ +- set_bit(HMDFS_FID_NEED_OPEN, &info->fid_flags); +- +- hmdfs_client_resp_statis(async_work->head.peer->sbi, F_READPAGE, +- HMDFS_RESP_NORMAL, async_work->start, jiffies); +- +- trace_hmdfs_client_recv_readpage(async_work->head.peer, +- info->remote_ino, page_index, ret); +- +- asw_done(async_work); +-} +- +-/* read cache dentry file at path and write them into filp */ +-int hmdfs_client_start_readdir(struct hmdfs_peer *con, struct file *filp, +- const char *path, int path_len, +- struct hmdfs_dcache_header *header) +-{ +- int ret; +- size_t send_len = sizeof(struct readdir_request) + path_len + 1; +- struct readdir_request *req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = req, +- .len = send_len, +- .local_filp = filp, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_ITERATE); +- if (!req) +- return -ENOMEM; +- +- /* add ref or it will be release at msg put */ +- get_file(sm.local_filp); +- req->path_len = cpu_to_le32(path_len); +- strncpy(req->path, path, path_len); +- +- /* +- * Is we already have a cache file, verify it. If it is +- * uptodate, then we don't have to transfer a new one +- */ +- if (header) { +- req->dcache_crtime = header->dcache_crtime; +- req->dcache_crtime_nsec = header->dcache_crtime_nsec; +- req->dentry_ctime = header->dentry_ctime; +- req->dentry_ctime_nsec = header->dentry_ctime_nsec; +- req->num = header->num; +- req->verify_cache = cpu_to_le32(1); +- } +- +- ret = hmdfs_sendmessage_request(con, &sm); +- kfree(req); +- return ret; +-} +- +-int hmdfs_client_start_mkdir(struct hmdfs_peer *con, +- const char *path, const char *name, +- umode_t mode, struct hmdfs_lookup_ret *mkdir_ret) +-{ +- int ret = 0; +- int path_len = strlen(path); +- int name_len = strlen(name); +- size_t send_len = sizeof(struct mkdir_request) + path_len + 1 + +- name_len + 1; +- struct mkdir_request *mkdir_req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_inodeinfo_response *resp = NULL; +- struct hmdfs_send_command sm = { +- .data = mkdir_req, +- .len = send_len, +- .out_buf = NULL, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_MKDIR); +- if (!mkdir_req) +- return -ENOMEM; +- +- mkdir_req->path_len = cpu_to_le32(path_len); +- mkdir_req->name_len = cpu_to_le32(name_len); +- mkdir_req->mode = cpu_to_le16(mode); +- strncpy(mkdir_req->path, path, path_len); +- strncpy(mkdir_req->path + path_len + 1, name, name_len); +- +- ret = hmdfs_sendmessage_request(con, &sm); +- if (ret == -ENOENT || ret == -ETIME || ret == -EOPNOTSUPP) +- goto out; +- if (!sm.out_buf) { +- ret = -ENOENT; +- goto out; +- } +- resp = sm.out_buf; +- mkdir_ret->i_mode = le16_to_cpu(resp->i_mode); +- mkdir_ret->i_size = le64_to_cpu(resp->i_size); +- mkdir_ret->i_mtime = le64_to_cpu(resp->i_mtime); +- mkdir_ret->i_mtime_nsec = le32_to_cpu(resp->i_mtime_nsec); +- mkdir_ret->i_ino = le64_to_cpu(resp->i_ino); +- +-out: +- free_sm_outbuf(&sm); +- kfree(mkdir_req); +- return ret; +-} +- +-int hmdfs_client_start_create(struct hmdfs_peer *con, +- const char *path, const char *name, +- umode_t mode, bool want_excl, +- struct hmdfs_lookup_ret *create_ret) +-{ +- int ret = 0; +- int path_len = strlen(path); +- int name_len = strlen(name); +- size_t send_len = sizeof(struct create_request) + path_len + 1 + +- name_len + 1; +- struct create_request *create_req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_inodeinfo_response *resp = NULL; +- struct hmdfs_send_command sm = { +- .data = create_req, +- .len = send_len, +- .out_buf = NULL, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_CREATE); +- if (!create_req) +- return -ENOMEM; +- +- create_req->path_len = cpu_to_le32(path_len); +- create_req->name_len = cpu_to_le32(name_len); +- create_req->mode = cpu_to_le16(mode); +- create_req->want_excl = want_excl; +- strncpy(create_req->path, path, path_len); +- strncpy(create_req->path + path_len + 1, name, name_len); +- +- ret = hmdfs_sendmessage_request(con, &sm); +- if (ret == -ENOENT || ret == -ETIME || ret == -EOPNOTSUPP) +- goto out; +- if (!sm.out_buf) { +- ret = -ENOENT; +- goto out; +- } +- resp = sm.out_buf; +- create_ret->i_mode = le16_to_cpu(resp->i_mode); +- create_ret->i_size = le64_to_cpu(resp->i_size); +- create_ret->i_mtime = le64_to_cpu(resp->i_mtime); +- create_ret->i_mtime_nsec = le32_to_cpu(resp->i_mtime_nsec); +- create_ret->i_ino = le64_to_cpu(resp->i_ino); +- +-out: +- free_sm_outbuf(&sm); +- kfree(create_req); +- return ret; +-} +- +-int hmdfs_client_start_rmdir(struct hmdfs_peer *con, const char *path, +- const char *name) +-{ +- int ret; +- int path_len = strlen(path); +- int name_len = strlen(name); +- size_t send_len = sizeof(struct rmdir_request) + path_len + 1 + +- name_len + 1; +- struct rmdir_request *rmdir_req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = rmdir_req, +- .len = send_len, +- .out_buf = NULL, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_RMDIR); +- if (!rmdir_req) +- return -ENOMEM; +- +- rmdir_req->path_len = cpu_to_le32(path_len); +- rmdir_req->name_len = cpu_to_le32(name_len); +- strncpy(rmdir_req->path, path, path_len); +- strncpy(rmdir_req->path + path_len + 1, name, name_len); +- +- ret = hmdfs_sendmessage_request(con, &sm); +- free_sm_outbuf(&sm); +- kfree(rmdir_req); +- return ret; +-} +- +-int hmdfs_client_start_unlink(struct hmdfs_peer *con, const char *path, +- const char *name) +-{ +- int ret; +- int path_len = strlen(path); +- int name_len = strlen(name); +- size_t send_len = sizeof(struct unlink_request) + path_len + 1 + +- name_len + 1; +- struct unlink_request *unlink_req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = unlink_req, +- .len = send_len, +- .out_buf = NULL, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_UNLINK); +- if (!unlink_req) +- return -ENOMEM; +- +- unlink_req->path_len = cpu_to_le32(path_len); +- unlink_req->name_len = cpu_to_le32(name_len); +- strncpy(unlink_req->path, path, path_len); +- strncpy(unlink_req->path + path_len + 1, name, name_len); +- +- ret = hmdfs_sendmessage_request(con, &sm); +- kfree(unlink_req); +- free_sm_outbuf(&sm); +- return ret; +-} +- +-int hmdfs_client_start_rename(struct hmdfs_peer *con, const char *old_path, +- const char *old_name, const char *new_path, +- const char *new_name, unsigned int flags) +-{ +- int ret; +- int old_path_len = strlen(old_path); +- int new_path_len = strlen(new_path); +- int old_name_len = strlen(old_name); +- int new_name_len = strlen(new_name); +- +- size_t send_len = sizeof(struct rename_request) + old_path_len + 1 + +- new_path_len + 1 + old_name_len + 1 + new_name_len + +- 1; +- struct rename_request *rename_req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = rename_req, +- .len = send_len, +- .out_buf = NULL, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_RENAME); +- if (!rename_req) +- return -ENOMEM; +- +- rename_req->old_path_len = cpu_to_le32(old_path_len); +- rename_req->new_path_len = cpu_to_le32(new_path_len); +- rename_req->old_name_len = cpu_to_le32(old_name_len); +- rename_req->new_name_len = cpu_to_le32(new_name_len); +- rename_req->flags = cpu_to_le32(flags); +- +- strncpy(rename_req->path, old_path, old_path_len); +- strncpy(rename_req->path + old_path_len + 1, new_path, new_path_len); +- +- strncpy(rename_req->path + old_path_len + 1 + new_path_len + 1, +- old_name, old_name_len); +- strncpy(rename_req->path + old_path_len + 1 + new_path_len + 1 + +- old_name_len + 1, +- new_name, new_name_len); +- +- ret = hmdfs_sendmessage_request(con, &sm); +- free_sm_outbuf(&sm); +- kfree(rename_req); +- return ret; +-} +- +-int hmdfs_send_setattr(struct hmdfs_peer *con, const char *send_buf, +- struct setattr_info *attr_info) +-{ +- int ret; +- int path_len = strlen(send_buf); +- size_t send_len = path_len + 1 + sizeof(struct setattr_request); +- struct setattr_request *setattr_req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = setattr_req, +- .len = send_len, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_SETATTR); +- if (!setattr_req) +- return -ENOMEM; +- +- strcpy(setattr_req->buf, send_buf); +- setattr_req->path_len = cpu_to_le32(path_len); +- setattr_req->valid = cpu_to_le32(attr_info->valid); +- setattr_req->size = cpu_to_le64(attr_info->size); +- setattr_req->mtime = cpu_to_le64(attr_info->mtime); +- setattr_req->mtime_nsec = cpu_to_le32(attr_info->mtime_nsec); +- ret = hmdfs_sendmessage_request(con, &sm); +- kfree(setattr_req); +- return ret; +-} +- +-static void hmdfs_update_getattr_ret(struct getattr_response *resp, +- struct hmdfs_getattr_ret *result) +-{ +- struct kstat *stat = &result->stat; +- +- stat->result_mask = le32_to_cpu(resp->result_mask); +- if (stat->result_mask == 0) +- return; +- +- stat->ino = le64_to_cpu(resp->ino); +- stat->mode = le16_to_cpu(resp->mode); +- stat->nlink = le32_to_cpu(resp->nlink); +- stat->uid.val = le32_to_cpu(resp->uid); +- stat->gid.val = le32_to_cpu(resp->gid); +- stat->size = le64_to_cpu(resp->size); +- stat->blocks = le64_to_cpu(resp->blocks); +- stat->blksize = le32_to_cpu(resp->blksize); +- stat->atime.tv_sec = le64_to_cpu(resp->atime); +- stat->atime.tv_nsec = le32_to_cpu(resp->atime_nsec); +- stat->mtime.tv_sec = le64_to_cpu(resp->mtime); +- stat->mtime.tv_nsec = le32_to_cpu(resp->mtime_nsec); +- stat->ctime.tv_sec = le64_to_cpu(resp->ctime); +- stat->ctime.tv_nsec = le32_to_cpu(resp->ctime_nsec); +- stat->btime.tv_sec = le64_to_cpu(resp->crtime); +- stat->btime.tv_nsec = le32_to_cpu(resp->crtime_nsec); +- result->fsid = le64_to_cpu(resp->fsid); +- /* currently not used */ +- result->i_flags = 0; +-} +- +-int hmdfs_send_getattr(struct hmdfs_peer *con, const char *send_buf, +- unsigned int lookup_flags, +- struct hmdfs_getattr_ret *result) +-{ +- int path_len = strlen(send_buf); +- size_t send_len = path_len + 1 + sizeof(struct getattr_request); +- int ret = 0; +- struct getattr_request *req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = req, +- .len = send_len, +- .out_buf = NULL, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_GETATTR); +- if (!req) +- return -ENOMEM; +- +- req->path_len = cpu_to_le32(path_len); +- req->lookup_flags = cpu_to_le32(lookup_flags); +- strncpy(req->buf, send_buf, path_len); +- ret = hmdfs_sendmessage_request(con, &sm); +- if (!ret && (sm.out_len == 0 || !sm.out_buf)) +- ret = -ENOENT; +- if (ret) +- goto out; +- +- hmdfs_update_getattr_ret(sm.out_buf, result); +- +-out: +- kfree(req); +- free_sm_outbuf(&sm); +- return ret; +-} +- +-static void hmdfs_update_statfs_ret(struct statfs_response *resp, +- struct kstatfs *buf) +-{ +- buf->f_type = le64_to_cpu(resp->f_type); +- buf->f_bsize = le64_to_cpu(resp->f_bsize); +- buf->f_blocks = le64_to_cpu(resp->f_blocks); +- buf->f_bfree = le64_to_cpu(resp->f_bfree); +- buf->f_bavail = le64_to_cpu(resp->f_bavail); +- buf->f_files = le64_to_cpu(resp->f_files); +- buf->f_ffree = le64_to_cpu(resp->f_ffree); +- buf->f_fsid.val[0] = le32_to_cpu(resp->f_fsid_0); +- buf->f_fsid.val[1] = le32_to_cpu(resp->f_fsid_1); +- buf->f_namelen = le64_to_cpu(resp->f_namelen); +- buf->f_frsize = le64_to_cpu(resp->f_frsize); +- buf->f_flags = le64_to_cpu(resp->f_flags); +- buf->f_spare[0] = le64_to_cpu(resp->f_spare_0); +- buf->f_spare[1] = le64_to_cpu(resp->f_spare_1); +- buf->f_spare[2] = le64_to_cpu(resp->f_spare_2); +- buf->f_spare[3] = le64_to_cpu(resp->f_spare_3); +-} +- +-int hmdfs_send_statfs(struct hmdfs_peer *con, const char *path, +- struct kstatfs *buf) +-{ +- int ret; +- int path_len = strlen(path); +- size_t send_len = sizeof(struct statfs_request) + path_len + 1; +- struct statfs_request *req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = req, +- .len = send_len, +- .out_buf = NULL, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_STATFS); +- if (!req) +- return -ENOMEM; +- +- req->path_len = cpu_to_le32(path_len); +- strncpy(req->path, path, path_len); +- +- ret = hmdfs_sendmessage_request(con, &sm); +- +- if (ret == -ETIME) +- ret = -EIO; +- if (!ret && (sm.out_len == 0 || !sm.out_buf)) +- ret = -ENOENT; +- if (ret) +- goto out; +- +- hmdfs_update_statfs_ret(sm.out_buf, buf); +-out: +- kfree(req); +- free_sm_outbuf(&sm); +- return ret; +-} +- +-int hmdfs_send_syncfs(struct hmdfs_peer *con, int syncfs_timeout) +-{ +- int ret; +- struct hmdfs_req req; +- struct hmdfs_sb_info *sbi = con->sbi; +- struct syncfs_request *syncfs_req = +- kzalloc(sizeof(struct syncfs_request), GFP_KERNEL); +- +- if (!syncfs_req) { +- hmdfs_err("cannot allocate syncfs_request"); +- return -ENOMEM; +- } +- +- hmdfs_init_cmd(&req.operations, F_SYNCFS); +- req.timeout = syncfs_timeout; +- +- syncfs_req->version = cpu_to_le64(sbi->hsi.version); +- req.data = syncfs_req; +- req.data_len = sizeof(*syncfs_req); +- +- ret = hmdfs_send_async_request(con, &req); +- if (ret) { +- kfree(syncfs_req); +- hmdfs_err("ret fail with %d", ret); +- } +- +- return ret; +-} +- +-static void hmdfs_update_getxattr_ret(struct getxattr_response *resp, +- void *value, size_t o_size, int *ret) +-{ +- ssize_t size = le32_to_cpu(resp->size); +- +- if (o_size && o_size < size) { +- *ret = -ERANGE; +- return; +- } +- +- if (o_size) +- memcpy(value, resp->value, size); +- +- *ret = size; +-} +- +-int hmdfs_send_getxattr(struct hmdfs_peer *con, const char *send_buf, +- const char *name, void *value, size_t size) +-{ +- size_t path_len = strlen(send_buf); +- size_t name_len = strlen(name); +- size_t send_len = path_len + name_len + +- sizeof(struct getxattr_request) + 2; +- int ret = 0; +- struct getxattr_request *req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = req, +- .len = send_len, +- .out_buf = NULL, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_GETXATTR); +- if (!req) +- return -ENOMEM; +- +- req->path_len = cpu_to_le32(path_len); +- req->name_len = cpu_to_le32(name_len); +- req->size = cpu_to_le32(size); +- strncpy(req->buf, send_buf, path_len); +- strncpy(req->buf + path_len + 1, name, name_len); +- ret = hmdfs_sendmessage_request(con, &sm); +- if (!ret && (sm.out_len == 0 || !sm.out_buf)) +- ret = -ENOENT; +- if (ret) +- goto out; +- +- hmdfs_update_getxattr_ret(sm.out_buf, value, size, &ret); +- +-out: +- kfree(req); +- free_sm_outbuf(&sm); +- return ret; +-} +- +-int hmdfs_send_setxattr(struct hmdfs_peer *con, const char *send_buf, +- const char *name, const void *value, +- size_t size, int flags) +-{ +- size_t path_len = strlen(send_buf); +- size_t name_len = strlen(name); +- size_t send_len = path_len + name_len + size + 2 + +- sizeof(struct setxattr_request); +- int ret = 0; +- struct setxattr_request *req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = req, +- .len = send_len, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_SETXATTR); +- if (!req) +- return -ENOMEM; +- +- req->path_len = cpu_to_le32(path_len); +- req->name_len = cpu_to_le32(name_len); +- req->size = cpu_to_le32(size); +- req->flags = cpu_to_le32(flags); +- strncpy(req->buf, send_buf, path_len); +- strncpy(req->buf + path_len + 1, name, name_len); +- if (!value) +- req->del = true; +- else +- memcpy(req->buf + path_len + name_len + 2, value, size); +- +- ret = hmdfs_sendmessage_request(con, &sm); +- kfree(req); +- return ret; +-} +- +-static void hmdfs_update_listxattr_ret(struct listxattr_response *resp, +- char *list, size_t o_size, ssize_t *ret) +-{ +- ssize_t size = le32_to_cpu(resp->size); +- +- if (o_size && o_size < size) { +- *ret = -ERANGE; +- return; +- } +- +- /* multi name split with '\0', use memcpy */ +- if (o_size) +- memcpy(list, resp->list, size); +- +- *ret = size; +-} +- +-ssize_t hmdfs_send_listxattr(struct hmdfs_peer *con, const char *send_buf, +- char *list, size_t size) +-{ +- size_t path_len = strlen(send_buf); +- size_t send_len = path_len + 1 + sizeof(struct listxattr_request); +- ssize_t ret = 0; +- struct listxattr_request *req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = req, +- .len = send_len, +- .out_buf = NULL, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_LISTXATTR); +- if (!req) +- return -ENOMEM; +- +- req->path_len = cpu_to_le32(path_len); +- req->size = cpu_to_le32(size); +- strncpy(req->buf, send_buf, path_len); +- ret = hmdfs_sendmessage_request(con, &sm); +- if (!ret && (sm.out_len == 0 || !sm.out_buf)) +- ret = -ENOENT; +- if (ret) +- goto out; +- +- hmdfs_update_listxattr_ret(sm.out_buf, list, size, &ret); +- +-out: +- kfree(req); +- free_sm_outbuf(&sm); +- return ret; +-} +- +-void hmdfs_recv_syncfs_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req, +- const struct hmdfs_resp *resp) +-{ +- struct hmdfs_sb_info *sbi = peer->sbi; +- struct syncfs_request *syncfs_req = (struct syncfs_request *)req->data; +- +- WARN_ON(!syncfs_req); +- spin_lock(&sbi->hsi.v_lock); +- if (le64_to_cpu(syncfs_req->version) != sbi->hsi.version) { +- hmdfs_info( +- "Recv stale syncfs resp[ver: %llu] from device %llu, current ver %llu", +- le64_to_cpu(syncfs_req->version), peer->device_id, +- sbi->hsi.version); +- spin_unlock(&sbi->hsi.v_lock); +- goto out; +- } +- +- if (!sbi->hsi.remote_ret) +- sbi->hsi.remote_ret = resp->ret_code; +- +- if (resp->ret_code) { +- hmdfs_err("Recv syncfs error code %d from device %llu", +- resp->ret_code, peer->device_id); +- } else { +- /* +- * Set @sb_dirty_count to zero if no one else produce +- * dirty data on remote server during remote sync. +- */ +- atomic64_cmpxchg(&peer->sb_dirty_count, +- peer->old_sb_dirty_count, 0); +- } +- +- atomic_dec(&sbi->hsi.wait_count); +- spin_unlock(&sbi->hsi.v_lock); +- wake_up_interruptible(&sbi->hsi.wq); +- +-out: +- kfree(syncfs_req); +-} +- +-void hmdfs_send_drop_push(struct hmdfs_peer *con, const char *path) +-{ +- int path_len = strlen(path); +- size_t send_len = sizeof(struct drop_push_request) + path_len + 1; +- struct drop_push_request *dp_req = kzalloc(send_len, GFP_KERNEL); +- struct hmdfs_send_command sm = { +- .data = dp_req, +- .len = send_len, +- .local_filp = NULL, +- }; +- +- hmdfs_init_cmd(&sm.operations, F_DROP_PUSH); +- if (!dp_req) +- return; +- +- dp_req->path_len = cpu_to_le32(path_len); +- strncpy(dp_req->path, path, path_len); +- +- hmdfs_sendmessage_request(con, &sm); +- kfree(dp_req); +-} +- +-static void *hmdfs_get_msg_next(struct hmdfs_peer *peer, int *id) +-{ +- struct hmdfs_msg_idr_head *head = NULL; +- +- spin_lock(&peer->idr_lock); +- head = idr_get_next(&peer->msg_idr, id); +- if (head && head->type < MSG_IDR_MAX && head->type >= 0) +- kref_get(&head->ref); +- +- spin_unlock(&peer->idr_lock); +- +- return head; +-} +- +-void hmdfs_client_offline_notify(struct hmdfs_peer *conn, int evt, +- unsigned int seq) +-{ +- int id; +- int count = 0; +- struct hmdfs_msg_idr_head *head = NULL; +- +- for (id = 0; (head = hmdfs_get_msg_next(conn, &id)) != NULL; ++id) { +- switch (head->type) { +- case MSG_IDR_1_0_NONE: +- head_put(head); +- head_put(head); +- break; +- case MSG_IDR_MESSAGE_SYNC: +- case MSG_IDR_1_0_MESSAGE_SYNC: +- hmdfs_response_wakeup((struct sendmsg_wait_queue *)head, +- -ETIME, 0, NULL); +- hmdfs_debug("wakeup id=%d", head->msg_id); +- msg_put((struct sendmsg_wait_queue *)head); +- break; +- case MSG_IDR_MESSAGE_ASYNC: +- hmdfs_wakeup_parasite( +- (struct hmdfs_msg_parasite *)head); +- hmdfs_debug("wakeup parasite id=%d", head->msg_id); +- mp_put((struct hmdfs_msg_parasite *)head); +- break; +- case MSG_IDR_PAGE: +- case MSG_IDR_1_0_PAGE: +- hmdfs_wakeup_async_work( +- (struct hmdfs_async_work *)head); +- hmdfs_debug("wakeup async work id=%d", head->msg_id); +- asw_put((struct hmdfs_async_work *)head); +- break; +- default: +- hmdfs_err("Bad type=%d id=%d", head->type, +- head->msg_id); +- break; +- } +- +- count++; +- /* If there are too many idr to process, avoid to soft lockup, +- * process every 512 message we resched +- */ +- if (count % HMDFS_IDR_RESCHED_COUNT == 0) +- cond_resched(); +- } +-} +- +-static struct hmdfs_node_cb_desc client_cb[] = { +- { +- .evt = NODE_EVT_OFFLINE, +- .sync = true, +- .fn = hmdfs_client_offline_notify, +- }, +-}; +- +-void __init hmdfs_client_add_node_evt_cb(void) +-{ +- hmdfs_node_add_evt_cb(client_cb, ARRAY_SIZE(client_cb)); +-} +diff --git a/fs/hmdfs/hmdfs_client.h b/fs/hmdfs/hmdfs_client.h +deleted file mode 100644 +index ab2867dca..000000000 +--- a/fs/hmdfs/hmdfs_client.h ++++ /dev/null +@@ -1,121 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/hmdfs_client.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_CLIENT_H +-#define HMDFS_CLIENT_H +- +-#include "comm/transport.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_device_view.h" +- +-struct hmdfs_open_ret { +- struct hmdfs_fid fid; +- __u64 file_size; +- __u64 ino; +- struct hmdfs_time_t remote_ctime; +- struct hmdfs_time_t stable_ctime; +-}; +- +-struct hmdfs_writepage_context { +- struct hmdfs_fid fid; +- uint32_t count; +- bool sync_all; +- bool rsem_held; +- unsigned long timeout; +- struct task_struct *caller; +- struct page *page; +- struct delayed_work retry_dwork; +-}; +- +-int hmdfs_client_start_readdir(struct hmdfs_peer *con, struct file *filp, +- const char *path, int path_len, +- struct hmdfs_dcache_header *header); +-int hmdfs_client_start_mkdir(struct hmdfs_peer *con, +- const char *path, const char *name, +- umode_t mode, struct hmdfs_lookup_ret *mkdir_ret); +-int hmdfs_client_start_create(struct hmdfs_peer *con, +- const char *path, const char *name, +- umode_t mode, bool want_excl, +- struct hmdfs_lookup_ret *create_ret); +-int hmdfs_client_start_rmdir(struct hmdfs_peer *con, const char *path, +- const char *name); +-int hmdfs_client_start_unlink(struct hmdfs_peer *con, const char *path, +- const char *name); +-int hmdfs_client_start_rename(struct hmdfs_peer *con, const char *old_path, +- const char *old_name, const char *new_path, +- const char *new_name, unsigned int flags); +- +-static inline bool hmdfs_is_offline_err(int err) +-{ +- /* +- * writepage() will get -EBADF if peer is online +- * again during offline stash, and -EBADF also +- * needs redo. +- */ +- return (err == -EAGAIN || err == -ESHUTDOWN || err == -EBADF); +-} +- +-static inline bool hmdfs_is_offline_or_timeout_err(int err) +-{ +- return hmdfs_is_offline_err(err) || err == -ETIME; +-} +- +-static inline bool hmdfs_need_redirty_page(const struct hmdfs_inode_info *info, +- int err) +-{ +- /* +- * 1. stash is enabled +- * 2. offline related error +- * 3. no restore +- */ +- return hmdfs_is_stash_enabled(info->conn->sbi) && +- hmdfs_is_offline_err(err) && +- READ_ONCE(info->stash_status) != HMDFS_REMOTE_INODE_RESTORING; +-} +- +-bool hmdfs_usr_sig_pending(struct task_struct *p); +-void hmdfs_writepage_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req, +- const struct hmdfs_resp *resp); +-int hmdfs_client_writepage(struct hmdfs_peer *con, +- struct hmdfs_writepage_context *param); +-int hmdfs_remote_do_writepage(struct hmdfs_peer *con, +- struct hmdfs_writepage_context *ctx); +-void hmdfs_remote_writepage_retry(struct work_struct *work); +- +-void hmdfs_client_writepage_done(struct hmdfs_inode_info *info, +- struct hmdfs_writepage_context *ctx); +- +-int hmdfs_send_open(struct hmdfs_peer *con, const char *send_buf, +- __u8 file_type, struct hmdfs_open_ret *open_ret); +-void hmdfs_send_close(struct hmdfs_peer *con, const struct hmdfs_fid *fid); +-int hmdfs_send_fsync(struct hmdfs_peer *con, const struct hmdfs_fid *fid, +- __s64 start, __s64 end, __s32 datasync); +-int hmdfs_client_readpage(struct hmdfs_peer *con, const struct hmdfs_fid *fid, +- struct page *page); +- +-int hmdfs_send_setattr(struct hmdfs_peer *con, const char *send_buf, +- struct setattr_info *attr_info); +-int hmdfs_send_getattr(struct hmdfs_peer *con, const char *send_buf, +- unsigned int lookup_flags, +- struct hmdfs_getattr_ret *getattr_result); +-int hmdfs_send_statfs(struct hmdfs_peer *con, const char *path, +- struct kstatfs *buf); +-void hmdfs_client_recv_readpage(struct hmdfs_head_cmd *head, int err, +- struct hmdfs_async_work *async_work); +-int hmdfs_send_syncfs(struct hmdfs_peer *con, int syncfs_timeout); +-int hmdfs_send_getxattr(struct hmdfs_peer *con, const char *send_buf, +- const char *name, void *value, size_t size); +-int hmdfs_send_setxattr(struct hmdfs_peer *con, const char *send_buf, +- const char *name, const void *val, +- size_t size, int flags); +-ssize_t hmdfs_send_listxattr(struct hmdfs_peer *con, const char *send_buf, +- char *list, size_t size); +-void hmdfs_recv_syncfs_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req, +- const struct hmdfs_resp *resp); +- +-void __init hmdfs_client_add_node_evt_cb(void); +-#endif +diff --git a/fs/hmdfs/hmdfs_dentryfile.c b/fs/hmdfs/hmdfs_dentryfile.c +deleted file mode 100644 +index fda2f034b..000000000 +--- a/fs/hmdfs/hmdfs_dentryfile.c ++++ /dev/null +@@ -1,2890 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/hmdfs_dentryfile.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "hmdfs_dentryfile.h" +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "authority/authentication.h" +-#include "comm/transport.h" +-#include "hmdfs_client.h" +-#include "hmdfs_device_view.h" +-#include "hmdfs_merge_view.h" +- +-/* Hashing code copied from f2fs */ +-#define HMDFS_HASH_COL_BIT ((0x1ULL) << 63) +-#define DELTA 0x9E3779B9 +- +-static void str2hashbuf(const unsigned char *msg, size_t len, unsigned int *buf, +- int num, bool case_sense) +-{ +- unsigned int pad, val; +- int i; +- unsigned char c; +- +- pad = (__u32)len | ((__u32)len << 8); +- pad |= pad << 16; +- +- val = pad; +- if (len > (size_t)num * 4) +- len = (size_t)num * 4; +- for (i = 0; i < len; i++) { +- if ((i % 4) == 0) +- val = pad; +- c = msg[i]; +- if (!case_sense) +- c = tolower(c); +- val = c + (val << 8); +- if ((i % 4) == 3) { +- *buf++ = val; +- val = pad; +- num--; +- } +- } +- if (--num >= 0) +- *buf++ = val; +- while (--num >= 0) +- *buf++ = pad; +-} +- +-static void tea_transform(unsigned int buf[4], unsigned int const in[]) +-{ +- __u32 sum = 0; +- __u32 b0 = buf[0], b1 = buf[1]; +- __u32 a = in[0], b = in[1], c = in[2], d = in[3]; +- int n = 16; +- +- do { +- sum += DELTA; +- b0 += ((b1 << 4) + a) ^ (b1 + sum) ^ ((b1 >> 5) + b); +- b1 += ((b0 << 4) + c) ^ (b0 + sum) ^ ((b0 >> 5) + d); +- } while (--n); +- +- buf[0] += b0; +- buf[1] += b1; +-} +- +-__u32 hmdfs_dentry_hash(const struct qstr *qstr, bool case_sense) +-{ +- __u32 hash; +- __u32 hmdfs_hash; +- const unsigned char *p = qstr->name; +- __u32 len = qstr->len; +- __u32 in[8], buf[4]; +- +- if (is_dot_dotdot(p, len)) +- return 0; +- +- /* Initialize the default seed for the hash checksum functions */ +- buf[0] = 0x67452301; +- buf[1] = 0xefcdab89; +- buf[2] = 0x98badcfe; +- buf[3] = 0x10325476; +- +- while (1) { +- str2hashbuf(p, len, in, 4, case_sense); +- tea_transform(buf, in); +- p += 16; +- if (len <= 16) +- break; +- len -= 16; +- } +- hash = buf[0]; +- hmdfs_hash = hash & ~HMDFS_HASH_COL_BIT; +- return hmdfs_hash; +-} +- +-static atomic_t curr_ino = ATOMIC_INIT(INUNUMBER_START); +-int get_inonumber(void) +-{ +- return atomic_inc_return(&curr_ino); +-} +- +-static int hmdfs_get_root_dentry_type(struct dentry *dentry, int *is_root) +-{ +- struct hmdfs_dentry_info *d_info = hmdfs_d(dentry); +- +- *is_root = 1; +- switch (d_info->dentry_type) { +- case HMDFS_LAYER_OTHER_LOCAL: +- *is_root = 0; +- fallthrough; +- case HMDFS_LAYER_SECOND_LOCAL: +- return HMDFS_LAYER_SECOND_LOCAL; +- case HMDFS_LAYER_OTHER_CLOUD: +- *is_root = 0; +- fallthrough; +- case HMDFS_LAYER_SECOND_CLOUD: +- return HMDFS_LAYER_SECOND_CLOUD; +- case HMDFS_LAYER_OTHER_REMOTE: +- *is_root = 0; +- fallthrough; +- case HMDFS_LAYER_SECOND_REMOTE: +- return HMDFS_LAYER_SECOND_REMOTE; +- default: +- hmdfs_info("Unexpected dentry type %d", d_info->dentry_type); +- return -EINVAL; +- } +-} +- +-static int prepend(char **buffer, int *buflen, const char *str, int namelen) +-{ +- *buflen -= namelen; +- if (*buflen < 0) +- return -ENAMETOOLONG; +- *buffer -= namelen; +- memcpy(*buffer, str, namelen); +- return 0; +-} +- +-static int prepend_name(char **buffer, int *buflen, const struct qstr *name) +-{ +- const char *dname = name->name; +- u32 dlen = name->len; +- char *p = NULL; +- +- *buflen -= dlen + 1; +- if (*buflen < 0) +- return -ENAMETOOLONG; +- p = *buffer -= dlen + 1; +- *p++ = '/'; +- while (dlen--) { +- char c = *dname++; +- +- if (!c) +- break; +- *p++ = c; +- } +- return 0; +-} +- +-static char *hmdfs_dentry_path_raw(struct dentry *d, char *buf, int buflen) +-{ +- struct dentry *dentry = NULL; +- char *end = NULL; +- char *retval = NULL; +- unsigned int len; +- unsigned int seq = 0; +- int root_flag = 0; +- int error = 0; +- struct hmdfs_dentry_info *di = hmdfs_d(d); +- int hmdfs_root_dentry_type = 0; +- +- di->time = jiffies; +- hmdfs_root_dentry_type = hmdfs_get_root_dentry_type(d, &root_flag); +- if (hmdfs_root_dentry_type < 0) +- return NULL; +- if (root_flag) { +- strcpy(buf, "/"); +- return buf; +- } +- rcu_read_lock(); +-restart: +- dentry = d; +- di = hmdfs_d(dentry); +- di->time = jiffies; +- end = buf + buflen; +- len = buflen; +- prepend(&end, &len, "\0", 1); +- retval = end - 1; +- *retval = '/'; +- read_seqbegin_or_lock(&rename_lock, &seq); +- while (di->dentry_type != hmdfs_root_dentry_type) { +- struct dentry *parent = dentry->d_parent; +- +- prefetch(parent); +- error = prepend_name(&end, &len, &dentry->d_name); +- if (error) +- break; +- retval = end; +- dentry = parent; +- di = hmdfs_d(dentry); +- di->time = jiffies; +- } +- if (!(seq & 1)) +- rcu_read_unlock(); +- if (need_seqretry(&rename_lock, seq)) { +- seq = 1; +- goto restart; +- } +- done_seqretry(&rename_lock, seq); +- if (error) +- goto Elong; +- return retval; +-Elong: +- return ERR_PTR(-ENAMETOOLONG); +-} +- +-char *hmdfs_get_dentry_relative_path(struct dentry *dentry) +-{ +- char *final_buf = NULL; +- char *buf = NULL; +- char *p = NULL; +- +- buf = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!buf) +- return NULL; +- +- final_buf = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!final_buf) { +- kfree(buf); +- return NULL; +- } +- +- /* NULL dentry return root dir */ +- if (!dentry) { +- strcpy(final_buf, "/"); +- kfree(buf); +- return final_buf; +- } +- p = hmdfs_dentry_path_raw(dentry, buf, PATH_MAX); +- if (IS_ERR_OR_NULL(p)) { +- kfree(buf); +- kfree(final_buf); +- return NULL; +- } +- +- if (strlen(p) >= PATH_MAX) { +- kfree(buf); +- kfree(final_buf); +- return NULL; +- } +- strcpy(final_buf, p); +- kfree(buf); +- return final_buf; +-} +- +-static char *hmdfs_merge_dentry_path_raw(struct dentry *d, char *buf, int buflen) +-{ +- struct dentry *dentry = NULL; +- char *end = NULL; +- char *retval = NULL; +- unsigned int len; +- unsigned int seq = 0; +- int error = 0; +- struct hmdfs_dentry_info_merge *mdi = NULL; +- +- rcu_read_lock(); +-restart: +- mdi = hmdfs_dm(d); +- dentry = d; +- end = buf + buflen; +- len = buflen; +- prepend(&end, &len, "\0", 1); +- retval = end - 1; +- *retval = '/'; +- read_seqbegin_or_lock(&rename_lock, &seq); +- while (mdi->dentry_type != HMDFS_LAYER_FIRST_MERGE && +- mdi->dentry_type != HMDFS_LAYER_FIRST_MERGE_CLOUD) { +- struct dentry *parent = dentry->d_parent; +- +- prefetch(parent); +- error = prepend_name(&end, &len, &dentry->d_name); +- if (error) +- break; +- retval = end; +- dentry = parent; +- mdi = hmdfs_dm(dentry); +- } +- if (!(seq & 1)) +- rcu_read_unlock(); +- if (need_seqretry(&rename_lock, seq)) { +- seq = 1; +- goto restart; +- } +- done_seqretry(&rename_lock, seq); +- if (error) +- goto Elong; +- return retval; +-Elong: +- return ERR_PTR(-ENAMETOOLONG); +-} +- +-char *hmdfs_merge_get_dentry_relative_path(struct dentry *dentry) +-{ +- char *final_buf = NULL; +- char *buf = NULL; +- char *p = NULL; +- +- buf = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!buf) +- return NULL; +- +- final_buf = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!final_buf) { +- kfree(buf); +- return NULL; +- } +- +- /* NULL dentry return root dir */ +- if (!dentry) { +- strcpy(final_buf, "/"); +- kfree(buf); +- return final_buf; +- } +- p = hmdfs_merge_dentry_path_raw(dentry, buf, PATH_MAX); +- if (IS_ERR_OR_NULL(p)) { +- kfree(buf); +- kfree(final_buf); +- return NULL; +- } +- +- if (strlen(p) >= PATH_MAX) { +- kfree(buf); +- kfree(final_buf); +- return NULL; +- } +- strcpy(final_buf, p); +- kfree(buf); +- return final_buf; +-} +- +-char *hmdfs_get_dentry_absolute_path(const char *rootdir, +- const char *relative_path) +-{ +- char *buf = 0; +- +- if (!rootdir || !relative_path) +- return NULL; +- if (strlen(rootdir) + strlen(relative_path) >= PATH_MAX) +- return NULL; +- +- buf = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!buf) +- return NULL; +- +- strcpy(buf, rootdir); +- strcat(buf, relative_path); +- return buf; +-} +- +-char *hmdfs_connect_path(const char *path, const char *name) +-{ +- char *buf = 0; +- size_t path_len, name_len; +- +- if (!path || !name) +- return NULL; +- +- path_len = strnlen(path, PATH_MAX); +- name_len = strnlen(name, PATH_MAX); +- if (path_len + name_len + 1 >= PATH_MAX) +- return NULL; +- +- buf = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!buf) +- return NULL; +- +- strncpy(buf, path, path_len); +- strcat(buf, "/"); +- strncat(buf, name, name_len); +- return buf; +-} +- +-int hmdfs_metainfo_read_nocred(struct file *filp, +- void *buffer, int size, int bidx) +-{ +- loff_t pos = get_dentry_group_pos(bidx); +- +- return kernel_read(filp, buffer, (size_t)size, &pos); +-} +- +-int hmdfs_metainfo_read(struct hmdfs_sb_info *sbi, struct file *filp, +- void *buffer, int size, int bidx) +-{ +- loff_t pos = get_dentry_group_pos(bidx); +- +- return cache_file_read(sbi, filp, buffer, (size_t)size, &pos); +-} +- +-int hmdfs_metainfo_write(struct hmdfs_sb_info *sbi, struct file *filp, +- const void *buffer, int size, int bidx) +-{ +- loff_t pos = get_dentry_group_pos(bidx); +- +- return cache_file_write(sbi, filp, buffer, (size_t)size, &pos); +-} +- +-/* for each level */ +-/* bucketseq start offset by 0,for example +- * level0 bucket0(0) +- * level1 bucket0(1) bucket1(2) +- * level2 bucket0(3) bucket1(4) bucket2(5) bucket3(6) +- * return bucket number. +- */ +-__u64 get_bucketaddr(unsigned int level, __u64 buckoffset) +-{ +- __u64 all_level_bucketaddr = 0; +- __u64 curlevelmaxbucks; +- +- if (level >= MAX_BUCKET_LEVEL) { +- hmdfs_err("level = %d overflow", level); +- return all_level_bucketaddr; +- } +- curlevelmaxbucks = ((__u64)1 << level); +- if (buckoffset >= curlevelmaxbucks) { +- hmdfs_err("buckoffset %llu overflow, level %d has %llu buckets max", +- buckoffset, level, curlevelmaxbucks); +- return all_level_bucketaddr; +- } +- all_level_bucketaddr = curlevelmaxbucks + buckoffset - 1; +- +- return all_level_bucketaddr; +-} +- +-__u64 get_bucket_by_level(unsigned int level) +-{ +- __u64 buckets = 0; +- +- if (level >= MAX_BUCKET_LEVEL) { +- hmdfs_err("level = %d overflow", level); +- return buckets; +- } +- +- buckets = ((__u64)1 << level); +- return buckets; +-} +- +-static __u64 get_overall_bucket(unsigned int level) +-{ +- __u64 buckets = 0; +- +- if (level >= MAX_BUCKET_LEVEL) { +- hmdfs_err("level = %d overflow", level); +- return buckets; +- } +- buckets = ((__u64)1 << (level + 1)) - 1; +- return buckets; +-} +- +-static inline loff_t get_dcache_file_size(unsigned int level) +-{ +- loff_t buckets = get_overall_bucket(level); +- +- return buckets * DENTRYGROUP_SIZE * BUCKET_BLOCKS + DENTRYGROUP_HEADER; +-} +- +-static char *get_relative_path(struct hmdfs_sb_info *sbi, char *from) +-{ +- char *relative; +- +- if (strncmp(from, sbi->local_src, strlen(sbi->local_src))) { +- hmdfs_warning("orig path do not start with local_src"); +- return NULL; +- } +- relative = from + strlen(sbi->local_src); +- if (*relative == '/') +- relative++; +- return relative; +-} +- +-struct file *hmdfs_get_or_create_dents(struct hmdfs_sb_info *sbi, char *name) +-{ +- struct path root_path, path; +- struct file *filp = NULL; +- char *relative; +- int err; +- +- err = kern_path(sbi->local_src, 0, &root_path); +- if (err) { +- hmdfs_err("kern_path failed err = %d", err); +- return NULL; +- } +- relative = get_relative_path(sbi, name); +- if (!relative) { +- hmdfs_err("get relative path failed"); +- goto err_root_path; +- } +- err = vfs_path_lookup(root_path.dentry, root_path.mnt, relative, 0, +- &path); +- if (err) { +- hmdfs_err("lookup failed err = %d", err); +- goto err_root_path; +- } +- +- filp = hmdfs_server_cache_revalidate(sbi, relative, &path); +- if (IS_ERR_OR_NULL(filp)) { +- filp = hmdfs_server_rebuild_dents(sbi, &path, NULL, relative); +- if (IS_ERR_OR_NULL(filp)) +- goto err_lookup_path; +- } +- +-err_lookup_path: +- path_put(&path); +-err_root_path: +- path_put(&root_path); +- return filp; +-} +- +-/* read all dentry in target path directory */ +-int read_dentry(struct hmdfs_sb_info *sbi, char *file_name, +- struct dir_context *ctx) +-{ +- unsigned long pos = (unsigned long)(ctx->pos); +- unsigned long group_id = (pos << (1 + DEV_ID_BIT_NUM)) >> +- (POS_BIT_NUM - GROUP_ID_BIT_NUM); +- unsigned long offset = pos & OFFSET_BIT_MASK; +- struct hmdfs_dentry_group *dentry_group = NULL; +- struct file *handler = NULL; +- int group_num = 0; +- int iterate_result = 0; +- int i, j; +- const struct cred *saved_cred; +- +- saved_cred = hmdfs_override_fsids(false); +- if (!saved_cred) { +- hmdfs_err("prepare cred failed!"); +- return -ENOMEM; +- } +- +- +- if (!file_name) +- return -EINVAL; +- +- dentry_group = kzalloc(sizeof(*dentry_group), GFP_KERNEL); +- if (!dentry_group) +- return -ENOMEM; +- +- handler = hmdfs_get_or_create_dents(sbi, file_name); +- if (IS_ERR_OR_NULL(handler)) { +- kfree(dentry_group); +- return -ENOENT; +- } +- +- group_num = get_dentry_group_cnt(file_inode(handler)); +- +- for (i = group_id; i < group_num; i++) { +- hmdfs_metainfo_read(sbi, handler, dentry_group, +- sizeof(struct hmdfs_dentry_group), i); +- for (j = offset; j < DENTRY_PER_GROUP; j++) { +- int len; +- int file_type = 0; +- bool is_continue; +- +- len = le16_to_cpu(dentry_group->nsl[j].namelen); +- if (!test_bit_le(j, dentry_group->bitmap) || len == 0) +- continue; +- +- if (S_ISDIR(le16_to_cpu(dentry_group->nsl[j].i_mode))) +- file_type = DT_DIR; +- else if (S_ISREG(le16_to_cpu( +- dentry_group->nsl[j].i_mode))) +- file_type = DT_REG; +- else if (S_ISLNK(le16_to_cpu( +- dentry_group->nsl[j].i_mode))) +- file_type = DT_LNK; +- else +- continue; +- +- pos = hmdfs_set_pos(0, i, j); +- is_continue = dir_emit( +- ctx, dentry_group->filename[j], len, +- le64_to_cpu(dentry_group->nsl[j].i_ino), +- file_type); +- if (!is_continue) { +- ctx->pos = pos; +- iterate_result = 1; +- goto done; +- } +- } +- offset = 0; +- } +- +-done: +- hmdfs_revert_fsids(saved_cred); +- kfree(dentry_group); +- fput(handler); +- return iterate_result; +-} +- +-unsigned int get_max_depth(struct file *filp) +-{ +- size_t isize; +- +- isize = get_dentry_group_cnt(file_inode(filp)) / BUCKET_BLOCKS; +- +- return get_count_order(isize + 1); +-} +- +-struct hmdfs_dentry_group *find_dentry_page(struct hmdfs_sb_info *sbi, +- pgoff_t index, struct file *filp) +-{ +- int size; +- struct hmdfs_dentry_group *dentry_blk = NULL; +- loff_t pos = get_dentry_group_pos(index); +- int err; +- +- dentry_blk = kmalloc(sizeof(*dentry_blk), GFP_KERNEL); +- if (!dentry_blk) +- return NULL; +- +- err = hmdfs_wlock_file(filp, pos, DENTRYGROUP_SIZE); +- if (err) { +- hmdfs_err("lock file pos %lld failed", pos); +- kfree(dentry_blk); +- return NULL; +- } +- +- size = cache_file_read(sbi, filp, dentry_blk, (size_t)DENTRYGROUP_SIZE, +- &pos); +- if (size != DENTRYGROUP_SIZE) { +- hmdfs_unlock_file(filp, pos, DENTRYGROUP_SIZE); +- kfree(dentry_blk); +- dentry_blk = NULL; +- } +- +- return dentry_blk; +-} +- +-static ssize_t write_dentry_page(struct file *filp, const void *buffer, +- int buffersize, loff_t position) +-{ +- ssize_t size; +- +- size = kernel_write(filp, buffer, (size_t)buffersize, &position); +- if (size != buffersize) +- hmdfs_err("write failed, ret = %zd", size); +- +- return size; +-} +- +-static struct hmdfs_dentry *find_in_block(struct hmdfs_dentry_group *dentry_blk, +- __u32 namehash, +- const struct qstr *qstr, +- struct hmdfs_dentry **insense_de, +- bool case_sense) +-{ +- struct hmdfs_dentry *de; +- unsigned long bit_pos = 0; +- int max_len = 0; +- +- while (bit_pos < DENTRY_PER_GROUP) { +- if (!test_bit_le(bit_pos, dentry_blk->bitmap)) { +- bit_pos++; +- max_len++; +- continue; +- } +- de = &dentry_blk->nsl[bit_pos]; +- if (unlikely(!de->namelen)) { +- bit_pos++; +- continue; +- } +- +- if (le32_to_cpu(de->hash) == namehash && +- le16_to_cpu(de->namelen) == qstr->len && +- !memcmp(qstr->name, dentry_blk->filename[bit_pos], +- le16_to_cpu(de->namelen))) +- goto found; +- if (!(*insense_de) && !case_sense && +- le32_to_cpu(de->hash) == namehash && +- le16_to_cpu(de->namelen) == qstr->len && +- str_n_case_eq(qstr->name, dentry_blk->filename[bit_pos], +- le16_to_cpu(de->namelen))) +- *insense_de = de; +- max_len = 0; +- bit_pos += get_dentry_slots(le16_to_cpu(de->namelen)); +- } +- de = NULL; +-found: +- return de; +-} +- +-static struct hmdfs_dentry *hmdfs_in_level(struct dentry *child_dentry, +- unsigned int level, +- struct hmdfs_dcache_lookup_ctx *ctx) +-{ +- unsigned long nbucket; +- unsigned long bidx, end_block; +- struct hmdfs_dentry *de = NULL; +- struct hmdfs_dentry *tmp_insense_de = NULL; +- struct hmdfs_dentry_group *dentry_blk; +- +- nbucket = get_bucket_by_level(level); +- if (!nbucket) +- return de; +- +- bidx = get_bucketaddr(level, ctx->hash % nbucket) * BUCKET_BLOCKS; +- end_block = bidx + BUCKET_BLOCKS; +- +- for (; bidx < end_block; bidx++) { +- dentry_blk = find_dentry_page(ctx->sbi, bidx, ctx->filp); +- if (!dentry_blk) +- break; +- +- de = find_in_block(dentry_blk, ctx->hash, ctx->name, +- &tmp_insense_de, ctx->sbi->s_case_sensitive); +- if (!de && !(ctx->insense_de) && tmp_insense_de) { +- ctx->insense_de = tmp_insense_de; +- ctx->insense_page = dentry_blk; +- ctx->insense_bidx = bidx; +- } else if (!de) { +- hmdfs_unlock_file(ctx->filp, get_dentry_group_pos(bidx), +- DENTRYGROUP_SIZE); +- kfree(dentry_blk); +- } else { +- ctx->page = dentry_blk; +- break; +- } +- } +- ctx->bidx = bidx; +- return de; +-} +- +-struct hmdfs_dentry *hmdfs_find_dentry(struct dentry *child_dentry, +- struct hmdfs_dcache_lookup_ctx *ctx) +-{ +- struct hmdfs_dentry *de = NULL; +- unsigned int max_depth; +- unsigned int level; +- +- if (!ctx->filp) +- return NULL; +- +- ctx->hash = hmdfs_dentry_hash(ctx->name, ctx->sbi->s_case_sensitive); +- +- max_depth = get_max_depth(ctx->filp); +- for (level = 0; level < max_depth; level++) { +- de = hmdfs_in_level(child_dentry, level, ctx); +- if (de) { +- if (ctx->insense_page) { +- hmdfs_unlock_file(ctx->filp, +- get_dentry_group_pos(ctx->insense_bidx), +- DENTRYGROUP_SIZE); +- kfree(ctx->insense_page); +- ctx->insense_page = NULL; +- } +- return de; +- } +- } +- if (ctx->insense_de) { +- ctx->bidx = ctx->insense_bidx; +- ctx->page = ctx->insense_page; +- ctx->insense_bidx = 0; +- ctx->insense_page = NULL; +- } +- return ctx->insense_de; +-} +- +-void update_dentry(struct hmdfs_dentry_group *d, struct dentry *child_dentry, +- struct inode *inode, struct super_block *hmdfs_sb, +- __u32 name_hash, unsigned int bit_pos) +-{ +- struct hmdfs_dentry *de; +- struct hmdfs_dentry_info *gdi; +- const struct qstr name = child_dentry->d_name; +- int slots = get_dentry_slots(name.len); +- int i; +- unsigned long ino; +- __u32 igen; +- +- gdi = hmdfs_sb == child_dentry->d_sb ? hmdfs_d(child_dentry) : NULL; +- if (!gdi && S_ISLNK(d_inode(child_dentry)->i_mode)) { +- ino = d_inode(child_dentry)->i_ino; +- igen = d_inode(child_dentry)->i_generation; +- } else { +- ino = inode->i_ino; +- igen = inode->i_generation; +- } +- +- de = &d->nsl[bit_pos]; +- de->hash = cpu_to_le32(name_hash); +- de->namelen = cpu_to_le16(name.len); +- memcpy(d->filename[bit_pos], name.name, name.len); +- de->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); +- de->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); +- de->i_size = cpu_to_le64(inode->i_size); +- de->i_ino = cpu_to_le64(generate_u64_ino(ino, igen)); +- de->i_flag = 0; +- if (gdi && hm_islnk(gdi->file_type)) +- de->i_mode = cpu_to_le16(S_IFLNK); +- else if (!gdi && S_ISLNK(d_inode(child_dentry)->i_mode)) +- de->i_mode = d_inode(child_dentry)->i_mode; +- else +- de->i_mode = cpu_to_le16(inode->i_mode); +- +- for (i = 0; i < slots; i++) { +- __set_bit_le(bit_pos + i, d->bitmap); +- /* avoid wrong garbage data for readdir */ +- if (i) +- (de + i)->namelen = 0; +- } +-} +- +-int room_for_filename(const void *bitmap, int slots, int max_slots) +-{ +- int bit_start = 0; +- int zero_start, zero_end; +-next: +- zero_start = find_next_zero_bit_le(bitmap, max_slots, bit_start); +- if (zero_start >= max_slots) +- return max_slots; +- +- zero_end = find_next_bit_le(bitmap, max_slots, zero_start); +- if (zero_end - zero_start >= slots) +- return zero_start; +- +- bit_start = zero_end + 1; +- +- if (zero_end + 1 >= max_slots) +- return max_slots; +- goto next; +-} +- +-void create_in_cache_file(uint64_t dev_id, struct dentry *dentry) +-{ +- struct clearcache_item *item = NULL; +- +- item = hmdfs_find_cache_item(dev_id, dentry->d_parent); +- if (item) { +- if (d_inode(dentry)) +- create_dentry(dentry, d_inode(dentry), item->filp, +- hmdfs_sb(dentry->d_sb)); +- else +- hmdfs_err("inode is null!"); +- kref_put(&item->ref, release_cache_item); +- } else { +- hmdfs_info("find cache item failed, device_id:%llu", dev_id); +- } +-} +- +-int create_dentry(struct dentry *child_dentry, struct inode *inode, +- struct file *file, struct hmdfs_sb_info *sbi) +-{ +- unsigned int bit_pos, level; +- unsigned long bidx, end_block; +- const struct qstr qstr = child_dentry->d_name; +- __u32 namehash; +- loff_t pos; +- ssize_t size; +- int ret = 0; +- struct hmdfs_dentry_group *dentry_blk = NULL; +- +- level = 0; +- +- namehash = hmdfs_dentry_hash(&qstr, sbi->s_case_sensitive); +- +- dentry_blk = kmalloc(sizeof(*dentry_blk), GFP_KERNEL); +- if (!dentry_blk) { +- ret = -ENOMEM; +- goto out_err; +- } +-find: +- if (level == MAX_BUCKET_LEVEL) { +- ret = -ENOSPC; +- goto out; +- } +- bidx = BUCKET_BLOCKS * +- get_bucketaddr(level, namehash % get_bucket_by_level(level)); +- end_block = bidx + BUCKET_BLOCKS; +- if (end_block > get_dentry_group_cnt(file_inode(file))) { +- if (cache_file_truncate(sbi, &(file->f_path), +- get_dcache_file_size(level))) { +- ret = -ENOSPC; +- goto out; +- } +- } +- +- for (; bidx < end_block; bidx++) { +- int size; +- +- pos = get_dentry_group_pos(bidx); +- ret = hmdfs_wlock_file(file, pos, DENTRYGROUP_SIZE); +- if (ret) +- goto out; +- +- size = cache_file_read(sbi, file, dentry_blk, +- (size_t)DENTRYGROUP_SIZE, &pos); +- if (size != DENTRYGROUP_SIZE) { +- ret = -ENOSPC; +- hmdfs_unlock_file(file, pos, DENTRYGROUP_SIZE); +- goto out; +- } +- +- bit_pos = room_for_filename(&dentry_blk->bitmap, +- get_dentry_slots(qstr.len), +- DENTRY_PER_GROUP); +- if (bit_pos < DENTRY_PER_GROUP) +- goto add; +- hmdfs_unlock_file(file, pos, DENTRYGROUP_SIZE); +- } +- ++level; +- goto find; +-add: +- pos = get_dentry_group_pos(bidx); +- update_dentry(dentry_blk, child_dentry, inode, sbi->sb, namehash, +- bit_pos); +- size = cache_file_write(sbi, file, dentry_blk, +- sizeof(struct hmdfs_dentry_group), &pos); +- if (size != sizeof(struct hmdfs_dentry_group)) +- hmdfs_err("cache file write failed!, ret = %zd", size); +- hmdfs_unlock_file(file, pos, DENTRYGROUP_SIZE); +-out: +- kfree(dentry_blk); +-out_err: +- return ret; +-} +- +-void hmdfs_init_dcache_lookup_ctx(struct hmdfs_dcache_lookup_ctx *ctx, +- struct hmdfs_sb_info *sbi, +- const struct qstr *qstr, struct file *filp) +-{ +- ctx->sbi = sbi; +- ctx->name = qstr; +- ctx->filp = filp; +- ctx->bidx = 0; +- ctx->page = NULL; +- ctx->insense_de = NULL; +- ctx->insense_bidx = 0; +- ctx->insense_page = NULL; +-} +- +-int update_inode_to_dentry(struct dentry *child_dentry, struct inode *inode) +-{ +- struct hmdfs_sb_info *sbi = d_inode(child_dentry)->i_sb->s_fs_info; +- struct hmdfs_dentry *de = NULL; +- loff_t ipos; +- struct dentry *parent_dentry; +- struct cache_file_node *cfn = NULL; +- char *relative_path = NULL; +- struct hmdfs_dcache_lookup_ctx ctx; +- +- parent_dentry = child_dentry->d_parent; +- if (hmdfs_d(parent_dentry)->dentry_type == HMDFS_LAYER_FIRST_DEVICE) +- return 0; +- +- relative_path = hmdfs_get_dentry_relative_path(parent_dentry); +- if (!relative_path) +- return -ENOMEM; +- +- cfn = find_cfn(sbi, HMDFS_SERVER_CID, relative_path, true); +- if (!cfn) +- goto out; +- +- hmdfs_init_dcache_lookup_ctx(&ctx, sbi, &child_dentry->d_name, +- cfn->filp); +- de = hmdfs_find_dentry(child_dentry, &ctx); +- if (!de) +- goto out_cfn; +- +- de->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); +- de->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); +- de->i_size = cpu_to_le64(inode->i_size); +- de->i_ino = cpu_to_le64( +- generate_u64_ino(inode->i_ino, inode->i_generation)); +- de->i_flag = 0; +- +- ipos = get_dentry_group_pos(ctx.bidx); +- write_dentry_page(cfn->filp, ctx.page, +- sizeof(struct hmdfs_dentry_group), ipos); +- hmdfs_unlock_file(cfn->filp, ipos, DENTRYGROUP_SIZE); +- kfree(ctx.page); +-out_cfn: +- release_cfn(cfn); +-out: +- kfree(relative_path); +- return 0; +-} +- +-void hmdfs_delete_dentry(struct dentry *d, struct file *filp) +-{ +- struct hmdfs_dentry *de = NULL; +- unsigned int bit_pos; +- int slots, i; +- loff_t ipos; +- ssize_t size; +- struct hmdfs_dcache_lookup_ctx ctx; +- +- hmdfs_init_dcache_lookup_ctx(&ctx, hmdfs_sb(d->d_sb), &d->d_name, filp); +- +- de = hmdfs_find_dentry(d, &ctx); +- if (IS_ERR_OR_NULL(de)) { +- hmdfs_info("find dentry failed!, err=%ld", PTR_ERR(de)); +- return; +- } +- slots = get_dentry_slots(le16_to_cpu(de->namelen)); +- +- bit_pos = de - ctx.page->nsl; +- for (i = 0; i < slots; i++) +- __clear_bit_le(bit_pos + i, &ctx.page->bitmap); +- +- ipos = get_dentry_group_pos(ctx.bidx); +- size = cache_file_write(hmdfs_sb(d->d_sb), filp, ctx.page, +- sizeof(struct hmdfs_dentry_group), &ipos); +- if (size != sizeof(struct hmdfs_dentry_group)) +- hmdfs_err("cache file write failed!, ret = %zd", size); +- hmdfs_unlock_file(filp, ipos, DENTRYGROUP_SIZE); +- kfree(ctx.page); +-} +- +-static int hmdfs_get_cache_path(struct hmdfs_sb_info *sbi, struct path *dir) +-{ +- struct hmdfs_dentry_info *di = hmdfs_d(sbi->sb->s_root); +- int err; +- +- if (!sbi->s_dentry_cache) { +- *dir = di->lower_path; +- return 0; +- } +- +- err = kern_path(sbi->cache_dir, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, dir); +- if (err) +- hmdfs_err("open failed, errno = %d", err); +- +- return err; +-} +- +-static void hmdfs_put_cache_path(struct hmdfs_sb_info *sbi, struct path *dir) +-{ +- if (!sbi->s_dentry_cache) +- return; +- path_put(dir); +-} +- +-struct file *create_local_dentry_file_cache(struct hmdfs_sb_info *sbi) +-{ +- struct file *filp = NULL; +- const struct cred *old_cred = hmdfs_override_creds(sbi->system_cred); +- struct path cache_dir; +- int err; +- +- err = hmdfs_get_cache_path(sbi, &cache_dir); +- if (err) { +- filp = ERR_PTR(err); +- goto out; +- } +- +- filp = file_open_root(&cache_dir, ".", +- O_RDWR | O_LARGEFILE | O_TMPFILE, +- DENTRY_FILE_PERM); +- if (IS_ERR(filp)) +- hmdfs_err("dentryfile open failed and exit err=%ld", +- PTR_ERR(filp)); +- +- hmdfs_put_cache_path(sbi, &cache_dir); +-out: +- hmdfs_revert_creds(old_cred); +- return filp; +-} +- +-static int hmdfs_linkat(struct path *old_path, const char *newname) +-{ +- struct dentry *new_dentry = NULL; +- struct path new_path; +- int error; +- +- new_dentry = kern_path_create(AT_FDCWD, newname, &new_path, 0); +- if (IS_ERR(new_dentry)) { +- hmdfs_err("create kernel path failed, error: %ld", +- PTR_ERR(new_dentry)); +- return PTR_ERR(new_dentry); +- } +- +- error = -EXDEV; +- if (old_path->mnt != new_path.mnt) +- goto out_dput; +- +- error = vfs_link(old_path->dentry, &nop_mnt_idmap, new_path.dentry->d_inode, new_dentry, +- NULL); +- +-out_dput: +- done_path_create(&new_path, new_dentry); +- return error; +-} +- +-static int cache_file_mkdir(const char *name, umode_t mode) +-{ +- struct dentry *dentry; +- struct path path; +- int err; +- +- dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY); +- if (IS_ERR(dentry)) +- return PTR_ERR(dentry); +- +- err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode); +- if (err && err != -EEXIST) +- hmdfs_err("vfs_mkdir failed, err = %d", err); +- +- done_path_create(&path, dentry); +- return err; +-} +- +-static int cache_file_create_path(const char *fullpath) +-{ +- char *path; +- char *s; +- int err = 0; +- +- path = kstrdup(fullpath, GFP_KERNEL); +- if (!path) +- return -ENOMEM; +- +- s = path + 1; +- while (true) { +- s = strchr(s, '/'); +- if (!s) +- break; +- s[0] = '\0'; +- err = cache_file_mkdir(path, 0755); +- if (err && err != -EEXIST) +- break; +- s[0] = '/'; +- s++; +- } +- kfree(path); +- return err; +-} +- +-static void hmdfs_cache_path_create(char *s, const char *dir, bool server) +-{ +- if (server) +- snprintf(s, PATH_MAX, "%s/dentry_cache/server/", dir); +- else +- snprintf(s, PATH_MAX, "%s/dentry_cache/client/", dir); +-} +- +-static void concat_cachefile_name(char *s, uint64_t hash, const char *id, +- bool server) +-{ +- int offset = strlen(s); +- +- if (server) +- snprintf(s + offset, PATH_MAX - offset, "%016llx", hash); +- else +- snprintf(s + offset, PATH_MAX - offset, "%s_%016llx", id, hash); +-} +- +-int cache_file_name_generate(char *fullname, struct hmdfs_peer *con, +- const char *relative_path, bool server) +-{ +- struct hmdfs_sb_info *sbi = con->sbi; +- uint64_t hash; +- char cid[HMDFS_CFN_CID_SIZE]; +- int err; +- +- hmdfs_cache_path_create(fullname, sbi->cache_dir, server); +- +- err = cache_file_create_path(fullname); +- if (err && err != -EEXIST) { +- hmdfs_err("making dir failed %d", err); +- return err; +- } +- +- strncpy(cid, con->cid, HMDFS_CFN_CID_SIZE - 1); +- cid[HMDFS_CFN_CID_SIZE - 1] = '\0'; +- +- hash = path_hash(relative_path, strlen(relative_path), +- sbi->s_case_sensitive); +- concat_cachefile_name(fullname, hash, cid, server); +- +- return 0; +-} +- +-static void free_cfn(struct cache_file_node *cfn) +-{ +- if (!IS_ERR_OR_NULL(cfn->filp)) +- filp_close(cfn->filp, NULL); +- +- kfree(cfn->relative_path); +- kfree(cfn); +-} +- +-static bool path_cmp(const char *path1, const char *path2, bool case_sensitive) +-{ +- int ret; +- +- if (case_sensitive) +- ret = strcmp(path1, path2); +- else +- ret = strcasecmp(path1, path2); +- +- return !ret; +-} +- +-static bool dentry_file_match(struct cache_file_node *cfn, const char *id, +- const char *path) +-{ +- return (path_cmp(cfn->relative_path, path, cfn->sbi->s_case_sensitive) && +- !strncmp((cfn)->cid, id, HMDFS_CFN_CID_SIZE - 1)); +-} +- +-struct cache_file_node *__find_cfn(struct hmdfs_sb_info *sbi, const char *cid, +- const char *path, bool server) +-{ +- struct cache_file_node *cfn = NULL; +- struct list_head *head = NULL; +- +- head = get_list_head(sbi, server); +- +- list_for_each_entry(cfn, head, list) { +- if (dentry_file_match(cfn, cid, path)) { +- refcount_inc(&cfn->ref); +- return cfn; +- } +- } +- return NULL; +-} +- +-struct cache_file_node *create_cfn(struct hmdfs_sb_info *sbi, const char *path, +- const char *cid, bool server) +-{ +- struct cache_file_node *cfn = kzalloc(sizeof(*cfn), GFP_KERNEL); +- +- if (!cfn) +- return NULL; +- +- cfn->relative_path = kstrdup(path, GFP_KERNEL); +- if (!cfn->relative_path) +- goto out; +- +- refcount_set(&cfn->ref, 1); +- strncpy(cfn->cid, cid, HMDFS_CFN_CID_SIZE - 1); +- cfn->cid[HMDFS_CFN_CID_SIZE - 1] = '\0'; +- cfn->sbi = sbi; +- cfn->server = server; +- return cfn; +-out: +- free_cfn(cfn); +- return NULL; +-} +- +-static struct file *insert_cfn(struct hmdfs_sb_info *sbi, const char *filename, +- const char *path, const char *cid, bool server) +-{ +- const struct cred *old_cred = NULL; +- struct cache_file_node *cfn = NULL; +- struct cache_file_node *exist = NULL; +- struct list_head *head = NULL; +- struct file *filp = NULL; +- +- cfn = create_cfn(sbi, path, cid, server); +- if (!cfn) +- return ERR_PTR(-ENOMEM); +- +- old_cred = hmdfs_override_creds(sbi->system_cred); +- filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0); +- hmdfs_revert_creds(old_cred); +- if (IS_ERR(filp)) { +- hmdfs_err("open file failed, err=%ld", PTR_ERR(filp)); +- goto out; +- } +- +- head = get_list_head(sbi, server); +- +- mutex_lock(&sbi->cache_list_lock); +- exist = __find_cfn(sbi, cid, path, server); +- if (!exist) { +- cfn->filp = filp; +- list_add_tail(&cfn->list, head); +- } else { +- mutex_unlock(&sbi->cache_list_lock); +- release_cfn(exist); +- filp_close(filp, NULL); +- filp = ERR_PTR(-EEXIST); +- goto out; +- } +- mutex_unlock(&sbi->cache_list_lock); +- return filp; +-out: +- free_cfn(cfn); +- return filp; +-} +- +-int hmdfs_rename_dentry(struct dentry *old_dentry, struct dentry *new_dentry, +- struct file *old_filp, struct file *new_filp) +-{ +- int ret; +- struct hmdfs_sb_info *sbi = hmdfs_sb(new_dentry->d_sb); +- +- /* +- * Try to delete first, because stale dentry might exist after +- * coverwrite. +- */ +- hmdfs_delete_dentry(new_dentry, new_filp); +- +- ret = create_dentry(new_dentry, d_inode(old_dentry), new_filp, sbi); +- if (ret) { +- hmdfs_err("create dentry failed!, err=%d", ret); +- return ret; +- } +- +- hmdfs_delete_dentry(old_dentry, old_filp); +- return 0; +-} +- +-/** +- * cache_file_persistent - link the tmpfile to the cache dir +- * @con: the connection peer +- * @filp: the file handler of the tmpfile +- * @relative_path: the relative path which the tmpfile belongs +- * @server: server or client +- * +- * Return value: the new file handler of the persistent file if the +- * persistent operation succeed. Otherwise will return the original handler +- * of the tmpfile passed in, so that the caller does not have to check +- * the returned handler. +- * +- */ +-struct file *cache_file_persistent(struct hmdfs_peer *con, struct file *filp, +- const char *relative_path, bool server) +-{ +- struct cache_file_node *cfn = NULL; +- char *fullname = NULL; +- char *cid = server ? HMDFS_SERVER_CID : (char *)con->cid; +- struct file *newf = NULL; +- int i = 0; +- int len; +- int err; +- +- if (!con->sbi->s_dentry_cache) +- return filp; +- +- cfn = find_cfn(con->sbi, cid, relative_path, server); +- if (cfn) { +- release_cfn(cfn); +- return filp; +- } +- fullname = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!fullname) +- return filp; +- +- err = cache_file_name_generate(fullname, con, relative_path, server); +- if (err) +- goto out; +- +- err = __vfs_setxattr(&nop_mnt_idmap, file_dentry(filp), file_inode(filp), +- DENTRY_FILE_XATTR_NAME, relative_path, +- strlen(relative_path), 0); +- if (err) { +- hmdfs_err("setxattr for file failed, err=%d", err); +- goto out; +- } +- +- len = strlen(fullname); +- +- do { +- err = hmdfs_linkat(&filp->f_path, fullname); +- if (!err) +- break; +- +- snprintf(fullname + len, PATH_MAX - len, "_%d", i); +- } while (i++ < DENTRY_FILE_NAME_RETRY); +- +- if (err) { +- hmdfs_err("link for file failed, err=%d", err); +- goto out; +- } +- +- newf = insert_cfn(con->sbi, fullname, relative_path, cid, server); +- if (!IS_ERR(newf)) +- filp = newf; +-out: +- kfree(fullname); +- return filp; +-} +- +-int get_cloud_cache_file(struct dentry *dentry, struct hmdfs_sb_info *sbi) +-{ +- int ret; +- ssize_t res; +- struct hmdfs_dentry_info *d_info = hmdfs_d(dentry); +- struct clearcache_item *item; +- struct file *filp = NULL; +- uint64_t hash; +- char *relative_path = NULL; +- char *dirname = NULL; +- char *fullname = NULL; +- char *cache_file_name = NULL; +- char *kvalue = NULL; +- +- item = hmdfs_find_cache_item(CLOUD_DEVICE, dentry); +- if (item) { +- kref_put(&item->ref, release_cache_item); +- return 0; +- } +- +- relative_path = hmdfs_get_dentry_relative_path(dentry); +- if (unlikely(!relative_path)) { +- hmdfs_err("get relative path failed %d", -ENOMEM); +- ret = -ENOMEM; +- goto out; +- } +- +- dirname = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!dirname) { +- ret = -ENOMEM; +- goto out; +- } +- +- cache_file_name = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!cache_file_name) { +- ret = -ENOMEM; +- goto out; +- } +- +- fullname = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!fullname) { +- ret = -ENOMEM; +- goto out; +- } +- +- kvalue = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!kvalue) { +- ret = -ENOMEM; +- goto out; +- } +- +- hash = path_hash(relative_path, strlen(relative_path), +- sbi->s_case_sensitive); +- concat_cachefile_name(cache_file_name, hash, CLOUD_CID, false); +- snprintf(dirname, PATH_MAX, "%s/dentry_cache/cloud/", +- sbi->cache_dir); +- snprintf(fullname, PATH_MAX, "%s%s", dirname, cache_file_name); +- +- filp = filp_open(fullname, O_RDWR | O_LARGEFILE, 0); +- if (IS_ERR(filp)) { +- hmdfs_debug("open fail %ld", PTR_ERR(filp)); +- ret = PTR_ERR(filp); +- goto out; +- } +- +- res = __vfs_getxattr(file_dentry(filp), file_inode(filp), +- DENTRY_FILE_XATTR_NAME, kvalue, PATH_MAX); +- if (res <= 0 || res >= PATH_MAX) { +- hmdfs_err("getxattr return: %zd", res); +- filp_close(filp, NULL); +- ret = -ENOENT; +- goto out; +- } +- kvalue[res] = '\0'; +- +- if (!path_cmp(relative_path, kvalue, sbi->s_case_sensitive)) { +- hmdfs_err("relative path from xattr do not match"); +- filp_close(filp, NULL); +- ret = -ENOENT; +- goto out; +- } +- +- mutex_lock(&d_info->cache_pull_lock); +- hmdfs_add_cache_list(CLOUD_DEVICE, dentry, filp); +- mutex_unlock(&d_info->cache_pull_lock); +- +- ret = 0; +-out: +- kfree(relative_path); +- kfree(dirname); +- kfree(fullname); +- kfree(cache_file_name); +- kfree(kvalue); +- +- return ret; +-} +- +-void __destroy_cfn(struct list_head *head) +-{ +- struct cache_file_node *cfn = NULL; +- struct cache_file_node *n = NULL; +- +- list_for_each_entry_safe(cfn, n, head, list) { +- list_del_init(&cfn->list); +- release_cfn(cfn); +- } +-} +- +-void hmdfs_cfn_destroy(struct hmdfs_sb_info *sbi) +-{ +- mutex_lock(&sbi->cache_list_lock); +- __destroy_cfn(&sbi->client_cache); +- __destroy_cfn(&sbi->server_cache); +- mutex_unlock(&sbi->cache_list_lock); +-} +- +-struct cache_file_node *find_cfn(struct hmdfs_sb_info *sbi, const char *cid, +- const char *path, bool server) +-{ +- struct cache_file_node *cfn = NULL; +- +- mutex_lock(&sbi->cache_list_lock); +- cfn = __find_cfn(sbi, cid, path, server); +- mutex_unlock(&sbi->cache_list_lock); +- return cfn; +-} +- +-void release_cfn(struct cache_file_node *cfn) +-{ +- if (refcount_dec_and_test(&cfn->ref)) +- free_cfn(cfn); +-} +- +-void remove_cfn(struct cache_file_node *cfn) +-{ +- struct hmdfs_sb_info *sbi = cfn->sbi; +- bool deleted; +- +- mutex_lock(&sbi->cache_list_lock); +- deleted = list_empty(&cfn->list); +- if (!deleted) +- list_del_init(&cfn->list); +- mutex_unlock(&sbi->cache_list_lock); +- if (!deleted) { +- delete_dentry_file(cfn->filp); +- release_cfn(cfn); +- } +-} +- +-int hmdfs_do_lock_file(struct file *filp, unsigned char fl_type, loff_t start, +- loff_t len) +-{ +- struct file_lock fl; +- int err; +- +- locks_init_lock(&fl); +- +- fl.fl_type = fl_type; +- fl.fl_flags = FL_POSIX | FL_CLOSE | FL_SLEEP; +- fl.fl_start = start; +- fl.fl_end = start + len - 1; +- fl.fl_owner = filp; +- fl.fl_pid = current->tgid; +- fl.fl_file = filp; +- fl.fl_ops = NULL; +- fl.fl_lmops = NULL; +- +- err = locks_lock_file_wait(filp, &fl); +- if (err) +- hmdfs_err("lock file wait failed: %d", err); +- +- return err; +-} +- +-int hmdfs_wlock_file(struct file *filp, loff_t start, loff_t len) +-{ +- return hmdfs_do_lock_file(filp, F_WRLCK, start, len); +-} +- +-int hmdfs_rlock_file(struct file *filp, loff_t start, loff_t len) +-{ +- return hmdfs_do_lock_file(filp, F_RDLCK, start, len); +-} +- +-int hmdfs_unlock_file(struct file *filp, loff_t start, loff_t len) +-{ +- return hmdfs_do_lock_file(filp, F_UNLCK, start, len); +-} +- +-long cache_file_truncate(struct hmdfs_sb_info *sbi, const struct path *path, +- loff_t length) +-{ +- const struct cred *old_cred = hmdfs_override_creds(sbi->system_cred); +- long ret = vfs_truncate(path, length); +- +- hmdfs_revert_creds(old_cred); +- +- return ret; +-} +- +-ssize_t cache_file_read(struct hmdfs_sb_info *sbi, struct file *filp, void *buf, +- size_t count, loff_t *pos) +-{ +- const struct cred *old_cred = hmdfs_override_creds(sbi->system_cred); +- ssize_t ret = kernel_read(filp, buf, count, pos); +- +- hmdfs_revert_creds(old_cred); +- +- return ret; +-} +- +-ssize_t cache_file_write(struct hmdfs_sb_info *sbi, struct file *filp, +- const void *buf, size_t count, loff_t *pos) +-{ +- const struct cred *old_cred = hmdfs_override_creds(sbi->system_cred); +- ssize_t ret = kernel_write(filp, buf, count, pos); +- +- hmdfs_revert_creds(old_cred); +- +- return ret; +-} +- +- +-int read_header(struct hmdfs_sb_info *sbi, struct file *filp, +- struct hmdfs_dcache_header *header) +-{ +- ssize_t bytes; +- loff_t pos = 0; +- +- bytes = cache_file_read(sbi, filp, header, sizeof(*header), &pos); +- if (bytes != sizeof(*header)) { +- hmdfs_err("read file failed, err:%zd", bytes); +- return -EIO; +- } +- +- return 0; +-} +- +-static unsigned long long cache_get_dentry_count(struct hmdfs_sb_info *sbi, +- struct file *filp) +-{ +- struct hmdfs_dcache_header header; +- int overallpage; +- +- overallpage = get_dentry_group_cnt(file_inode(filp)); +- if (overallpage == 0) +- return 0; +- +- if (read_header(sbi, filp, &header)) +- return 0; +- +- return le64_to_cpu(header.num); +-} +- +-static int cache_check_case_sensitive(struct hmdfs_sb_info *sbi, +- struct file *filp) +-{ +- struct hmdfs_dcache_header header; +- +- if (read_header(sbi, filp, &header)) +- return 0; +- +- if (sbi->s_case_sensitive != (bool)header.case_sensitive) { +- hmdfs_info("Case sensitive inconsistent, current fs is: %d, cache is %d, will drop cache", +- sbi->s_case_sensitive, header.case_sensitive); +- return 0; +- } +- return 1; +-} +- +-int write_header(struct file *filp, struct hmdfs_dcache_header *header) +-{ +- loff_t pos = 0; +- ssize_t size; +- +- size = kernel_write(filp, header, sizeof(*header), &pos); +- if (size != sizeof(*header)) { +- hmdfs_err("update dcache header failed %zd", size); +- return -EIO; +- } +- +- return 0; +-} +- +-void add_to_delete_list(struct hmdfs_sb_info *sbi, struct cache_file_node *cfn) +-{ +- mutex_lock(&sbi->cache_list_lock); +- list_add_tail(&cfn->list, &sbi->to_delete); +- mutex_unlock(&sbi->cache_list_lock); +-} +- +-void load_cfn(struct hmdfs_sb_info *sbi, const char *fullname, const char *path, +- const char *cid, bool server) +-{ +- struct cache_file_node *cfn = NULL; +- struct cache_file_node *cfn1 = NULL; +- struct list_head *head = NULL; +- +- cfn = create_cfn(sbi, path, cid, server); +- if (!cfn) +- return; +- +- cfn->filp = filp_open(fullname, O_RDWR | O_LARGEFILE, 0); +- if (IS_ERR(cfn->filp)) { +- hmdfs_err("open fail %ld", PTR_ERR(cfn->filp)); +- goto out; +- } +- +- if (cache_get_dentry_count(sbi, cfn->filp) < sbi->dcache_threshold && strcmp(cid, CLOUD_CID)) { +- add_to_delete_list(sbi, cfn); +- return; +- } +- +- if (!cache_check_case_sensitive(sbi, cfn->filp) && strcmp(cid, CLOUD_CID)) { +- add_to_delete_list(sbi, cfn); +- return; +- } +- +- head = get_list_head(sbi, server); +- +- mutex_lock(&sbi->cache_list_lock); +- cfn1 = __find_cfn(sbi, cid, path, server); +- if (!cfn1) { +- list_add_tail(&cfn->list, head); +- } else { +- release_cfn(cfn1); +- mutex_unlock(&sbi->cache_list_lock); +- add_to_delete_list(sbi, cfn); +- return; +- } +- mutex_unlock(&sbi->cache_list_lock); +- +- return; +-out: +- free_cfn(cfn); +-} +- +-static int get_cid_and_hash(const char *name, uint64_t *hash, char *cid) +-{ +- int len; +- char *p = strstr(name, "_"); +- +- if (!p) +- return -EINVAL; +- +- len = p - name; +- if (len >= HMDFS_CFN_CID_SIZE) +- return -EINVAL; +- +- memcpy(cid, name, len); +- cid[len] = '\0'; +- +- if (sscanf(++p, "%llx", hash) != 1) +- return -EINVAL; +- return 0; +-} +- +-static void store_one(const char *name, struct cache_file_callback *cb) +-{ +- struct file *file = NULL; +- char *fullname = NULL; +- char *kvalue = NULL; +- char cid[HMDFS_CFN_CID_SIZE]; +- uint64_t hash; +- ssize_t error; +- +- if (strlen(name) + strlen(cb->dirname) >= PATH_MAX) +- return; +- +- fullname = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!fullname) +- return; +- +- snprintf(fullname, PATH_MAX, "%s%s", cb->dirname, name); +- +- file = filp_open(fullname, O_RDWR | O_LARGEFILE, 0); +- if (IS_ERR(file)) { +- hmdfs_err("open fail %ld", PTR_ERR(file)); +- goto out; +- } +- +- kvalue = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!kvalue) +- goto out_file; +- +- error = __vfs_getxattr(file_dentry(file), file_inode(file), +- DENTRY_FILE_XATTR_NAME, kvalue, PATH_MAX); +- if (error <= 0 || error >= PATH_MAX) { +- hmdfs_err("getxattr return: %zd", error); +- goto out_kvalue; +- } +- +- kvalue[error] = '\0'; +- cid[0] = '\0'; +- +- if (!cb->server) { +- if (get_cid_and_hash(name, &hash, cid)) { +- hmdfs_err("get cid and hash fail"); +- goto out_kvalue; +- } +- } +- +- load_cfn(cb->sbi, fullname, kvalue, cid, cb->server); +- +-out_kvalue: +- kfree(kvalue); +-out_file: +- filp_close(file, NULL); +-out: +- kfree(fullname); +-} +- +-static bool cache_file_iterate(struct dir_context *ctx, const char *name, +- int name_len, loff_t offset, u64 ino, +- unsigned int d_type) +-{ +- struct cache_file_item *cfi = NULL; +- struct cache_file_callback *cb = +- container_of(ctx, struct cache_file_callback, ctx); +- +- if (name_len > NAME_MAX) { +- hmdfs_err("name_len:%d NAME_MAX:%u", name_len, NAME_MAX); +- return true; +- } +- +- if (d_type != DT_REG) +- return true; +- +- cfi = kmalloc(sizeof(*cfi), GFP_KERNEL); +- if (!cfi) +- return false; +- +- cfi->name = kstrndup(name, name_len, GFP_KERNEL); +- if (!cfi->name) { +- kfree(cfi); +- return false; +- } +- +- list_add_tail(&cfi->list, &cb->list); +- +- return true; +-} +- +-void hmdfs_do_load(struct hmdfs_sb_info *sbi, const char *fullname, bool server) +-{ +- struct file *file = NULL; +- struct path dirpath; +- int err; +- struct cache_file_item *cfi = NULL; +- struct cache_file_item *n = NULL; +- struct cache_file_callback cb = { +- .ctx.actor = cache_file_iterate, +- .ctx.pos = 0, +- .dirname = fullname, +- .sbi = sbi, +- .server = server, +- }; +- INIT_LIST_HEAD(&cb.list); +- +- +- err = kern_path(fullname, LOOKUP_DIRECTORY, &dirpath); +- if (err) { +- hmdfs_info("No file path"); +- return; +- } +- +- file = dentry_open(&dirpath, O_RDONLY, current_cred()); +- if (IS_ERR_OR_NULL(file)) { +- hmdfs_err("dentry_open failed, error: %ld", PTR_ERR(file)); +- path_put(&dirpath); +- return; +- } +- +- err = iterate_dir(file, &cb.ctx); +- if (err) +- hmdfs_err("iterate_dir failed, err: %d", err); +- +- list_for_each_entry_safe(cfi, n, &cb.list, list) { +- store_one(cfi->name, &cb); +- list_del_init(&cfi->list); +- kfree(cfi->name); +- kfree(cfi); +- } +- +- fput(file); +- path_put(&dirpath); +-} +- +-/** +- * This function just used for delete dentryfile.dat +- */ +-int delete_dentry_file(struct file *filp) +-{ +- int err = 0; +- struct dentry *dentry = file_dentry(filp); +- struct dentry *parent = lock_parent(dentry); +- +- if (dentry->d_parent == parent) { +- dget(dentry); +- err = vfs_unlink(&nop_mnt_idmap, d_inode(parent), dentry, NULL); +- dput(dentry); +- } +- unlock_dir(parent); +- +- return err; +-} +- +-void hmdfs_delete_useless_cfn(struct hmdfs_sb_info *sbi) +-{ +- struct cache_file_node *cfn = NULL; +- struct cache_file_node *n = NULL; +- +- mutex_lock(&sbi->cache_list_lock); +- +- list_for_each_entry_safe(cfn, n, &sbi->to_delete, list) { +- delete_dentry_file(cfn->filp); +- list_del_init(&cfn->list); +- release_cfn(cfn); +- } +- mutex_unlock(&sbi->cache_list_lock); +-} +- +-void hmdfs_cfn_load(struct hmdfs_sb_info *sbi) +-{ +- char *fullname = NULL; +- +- if (!sbi->s_dentry_cache) +- return; +- +- fullname = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!fullname) +- return; +- +- snprintf(fullname, PATH_MAX, "%s/dentry_cache/client/", +- sbi->cache_dir); +- hmdfs_do_load(sbi, fullname, false); +- +- snprintf(fullname, PATH_MAX, "%s/dentry_cache/server/", +- sbi->cache_dir); +- hmdfs_do_load(sbi, fullname, true); +- +- kfree(fullname); +- +- hmdfs_delete_useless_cfn(sbi); +-} +- +-static void __cache_file_destroy_by_path(struct list_head *head, +- const char *path) +-{ +- struct cache_file_node *cfn = NULL; +- struct cache_file_node *n = NULL; +- +- list_for_each_entry_safe(cfn, n, head, list) { +- if (strcmp(path, cfn->relative_path) != 0) +- continue; +- list_del_init(&cfn->list); +- delete_dentry_file(cfn->filp); +- release_cfn(cfn); +- } +-} +- +-static void cache_file_destroy_by_path(struct hmdfs_sb_info *sbi, +- const char *path) +-{ +- mutex_lock(&sbi->cache_list_lock); +- +- __cache_file_destroy_by_path(&sbi->server_cache, path); +- __cache_file_destroy_by_path(&sbi->client_cache, path); +- +- mutex_unlock(&sbi->cache_list_lock); +-} +- +-static void cache_file_find_and_delete(struct hmdfs_peer *con, +- const char *relative_path) +-{ +- struct cache_file_node *cfn; +- +- cfn = find_cfn(con->sbi, con->cid, relative_path, false); +- if (!cfn) +- return; +- +- remove_cfn(cfn); +- release_cfn(cfn); +-} +- +-void cache_file_delete_by_dentry(struct hmdfs_peer *con, struct dentry *dentry) +-{ +- char *relative_path = NULL; +- +- relative_path = hmdfs_get_dentry_relative_path(dentry); +- if (unlikely(!relative_path)) { +- hmdfs_err("get relative path failed %d", -ENOMEM); +- return; +- } +- cache_file_find_and_delete(con, relative_path); +- kfree(relative_path); +-} +- +-struct file *hmdfs_get_new_dentry_file(struct hmdfs_peer *con, +- const char *relative_path, +- struct hmdfs_dcache_header *header) +-{ +- struct hmdfs_sb_info *sbi = con->sbi; +- int len = strlen(relative_path); +- struct file *filp = NULL; +- int err; +- +- filp = create_local_dentry_file_cache(sbi); +- if (IS_ERR(filp)) +- return filp; +- +- err = hmdfs_client_start_readdir(con, filp, relative_path, len, header); +- if (err) { +- if (err != -ENOENT) +- hmdfs_err("readdir failed dev: %llu err: %d", +- con->device_id, err); +- fput(filp); +- filp = ERR_PTR(err); +- } +- +- return filp; +-} +- +-void add_cfn_to_item(struct dentry *dentry, struct hmdfs_peer *con, +- struct cache_file_node *cfn) +-{ +- struct file *file = cfn->filp; +- int err; +- +- err = hmdfs_add_cache_list(con->device_id, dentry, file); +- if (unlikely(err)) { +- hmdfs_err("add cache list failed devid:%llu err:%d", +- con->device_id, err); +- return; +- } +-} +- +-int hmdfs_add_file_to_cache(struct dentry *dentry, struct hmdfs_peer *con, +- struct file *file, const char *relative_path) +-{ +- struct hmdfs_sb_info *sbi = con->sbi; +- struct file *newf = file; +- +- if (cache_get_dentry_count(sbi, file) >= sbi->dcache_threshold) +- newf = cache_file_persistent(con, file, relative_path, false); +- else +- cache_file_find_and_delete(con, relative_path); +- +- return hmdfs_add_cache_list(con->device_id, dentry, newf); +-} +- +-static struct file *read_header_and_revalidate(struct hmdfs_peer *con, +- struct file *filp, +- const char *relative_path) +-{ +- struct hmdfs_dcache_header header; +- struct hmdfs_dcache_header *p = NULL; +- +- if (read_header(con->sbi, filp, &header) == 0) +- p = &header; +- +- return hmdfs_get_new_dentry_file(con, relative_path, p); +-} +- +-void remote_file_revalidate_cfn(struct dentry *dentry, struct hmdfs_peer *con, +- struct cache_file_node *cfn, +- const char *relative_path) +-{ +- struct file *file = NULL; +- int err; +- +- file = read_header_and_revalidate(con, cfn->filp, relative_path); +- if (IS_ERR(file)) +- return; +- +- /* +- * If the request returned ok but file length is 0, we assume +- * that the server verified the client cache file is uptodate. +- */ +- if (i_size_read(file->f_inode) == 0) { +- hmdfs_info("The cfn cache for dev:%llu is uptodate", +- con->device_id); +- fput(file); +- add_cfn_to_item(dentry, con, cfn); +- return; +- } +- +- /* OK, cfn is not uptodate, let's remove it and add the new file */ +- remove_cfn(cfn); +- +- err = hmdfs_add_file_to_cache(dentry, con, file, relative_path); +- if (unlikely(err)) +- hmdfs_err("add cache list failed devid:%llu err:%d", +- con->device_id, err); +- fput(file); +-} +- +-void remote_file_revalidate_item(struct dentry *dentry, struct hmdfs_peer *con, +- struct clearcache_item *item, +- const char *relative_path) +-{ +- struct file *file = NULL; +- int err; +- +- file = read_header_and_revalidate(con, item->filp, relative_path); +- if (IS_ERR(file)) +- return; +- +- /* +- * If the request returned ok but file length is 0, we assume +- * that the server verified the client cache file is uptodate. +- */ +- if (i_size_read(file->f_inode) == 0) { +- hmdfs_info("The item cache for dev:%llu is uptodate", +- con->device_id); +- item->time = jiffies; +- fput(file); +- return; +- } +- +- /* We need to replace the old item */ +- remove_cache_item(item); +- cache_file_find_and_delete(con, relative_path); +- +- err = hmdfs_add_file_to_cache(dentry, con, file, relative_path); +- if (unlikely(err)) +- hmdfs_err("add cache list failed devid:%llu err:%d", +- con->device_id, err); +- fput(file); +-} +- +-bool get_remote_dentry_file(struct dentry *dentry, struct hmdfs_peer *con) +-{ +- struct hmdfs_dentry_info *d_info = hmdfs_d(dentry); +- struct cache_file_node *cfn = NULL; +- struct hmdfs_sb_info *sbi = con->sbi; +- char *relative_path = NULL; +- int err = 0; +- struct file *filp = NULL; +- struct clearcache_item *item; +- +- if (hmdfs_cache_revalidate(READ_ONCE(con->conn_time), con->device_id, +- dentry)) +- return false; +- +- relative_path = hmdfs_get_dentry_relative_path(dentry); +- if (unlikely(!relative_path)) { +- hmdfs_err("get relative path failed %d", -ENOMEM); +- return false; +- } +- mutex_lock(&d_info->cache_pull_lock); +- if (hmdfs_cache_revalidate(READ_ONCE(con->conn_time), con->device_id, +- dentry)) +- goto out_unlock; +- +- item = hmdfs_find_cache_item(con->device_id, dentry); +- if (item) { +- remote_file_revalidate_item(dentry, con, item, relative_path); +- kref_put(&item->ref, release_cache_item); +- goto out_unlock; +- } +- +- cfn = find_cfn(sbi, con->cid, relative_path, false); +- if (cfn) { +- remote_file_revalidate_cfn(dentry, con, cfn, relative_path); +- release_cfn(cfn); +- goto out_unlock; +- } +- +- filp = hmdfs_get_new_dentry_file(con, relative_path, NULL); +- if (IS_ERR(filp)) { +- err = PTR_ERR(filp); +- goto out_unlock; +- } +- +- err = hmdfs_add_file_to_cache(dentry, con, filp, relative_path); +- if (unlikely(err)) +- hmdfs_err("add cache list failed devid:%lu err:%d", +- (unsigned long)con->device_id, err); +- fput(filp); +- +-out_unlock: +- mutex_unlock(&d_info->cache_pull_lock); +- if (err && err != -ENOENT) +- hmdfs_err("readdir failed dev:%lu err:%d", +- (unsigned long)con->device_id, err); +- kfree(relative_path); +- return true; +-} +- +-int hmdfs_file_type(const char *name) +-{ +- if (!name) +- return -EINVAL; +- +- if (!strcmp(name, CURRENT_DIR) || !strcmp(name, PARENT_DIR)) +- return HMDFS_TYPE_DOT; +- +- return HMDFS_TYPE_COMMON; +-} +- +-struct clearcache_item *hmdfs_find_cache_item(uint64_t dev_id, +- struct dentry *dentry) +-{ +- struct clearcache_item *item = NULL; +- struct hmdfs_dentry_info *d_info = hmdfs_d(dentry); +- +- if (!d_info) +- return NULL; +- +- spin_lock(&d_info->cache_list_lock); +- list_for_each_entry(item, &(d_info->cache_list_head), list) { +- if (dev_id == item->dev_id) { +- kref_get(&item->ref); +- spin_unlock(&d_info->cache_list_lock); +- return item; +- } +- } +- spin_unlock(&d_info->cache_list_lock); +- return NULL; +-} +- +-bool hmdfs_cache_revalidate(unsigned long conn_time, uint64_t dev_id, +- struct dentry *dentry) +-{ +- bool ret = false; +- struct clearcache_item *item = NULL; +- struct hmdfs_dentry_info *d_info = hmdfs_d(dentry); +- unsigned int timeout; +- +- if (!d_info) +- return ret; +- +- timeout = hmdfs_sb(dentry->d_sb)->dcache_timeout; +- spin_lock(&d_info->cache_list_lock); +- list_for_each_entry(item, &(d_info->cache_list_head), list) { +- if (dev_id == item->dev_id) { +- ret = cache_item_revalidate(conn_time, item->time, +- timeout); +- break; +- } +- } +- spin_unlock(&d_info->cache_list_lock); +- return ret; +-} +- +-void remove_cache_item(struct clearcache_item *item) +-{ +- bool deleted; +- +- spin_lock(&item->d_info->cache_list_lock); +- deleted = list_empty(&item->list); +- if (!deleted) +- list_del_init(&item->list); +- spin_unlock(&item->d_info->cache_list_lock); +- if (!deleted) +- kref_put(&item->ref, release_cache_item); +-} +- +-void release_cache_item(struct kref *ref) +-{ +- struct clearcache_item *item = +- container_of(ref, struct clearcache_item, ref); +- +- if (item->filp) +- fput(item->filp); +- kfree(item); +-} +- +-void hmdfs_remove_cache_filp(struct hmdfs_peer *con, struct dentry *dentry) +-{ +- struct clearcache_item *item = NULL; +- struct clearcache_item *item_temp = NULL; +- struct hmdfs_dentry_info *d_info = hmdfs_d(dentry); +- // struct path *lower_path = NULL; +- +- if (!d_info) +- return; +- +- spin_lock(&d_info->cache_list_lock); +- list_for_each_entry_safe(item, item_temp, &(d_info->cache_list_head), +- list) { +- if (con->device_id == item->dev_id) { +- list_del_init(&item->list); +- spin_unlock(&d_info->cache_list_lock); +- cache_file_delete_by_dentry(con, dentry); +- kref_put(&item->ref, release_cache_item); +- return; +- } +- } +- spin_unlock(&d_info->cache_list_lock); +-} +- +-int hmdfs_add_cache_list(uint64_t dev_id, struct dentry *dentry, +- struct file *filp) +-{ +- struct clearcache_item *item = NULL; +- struct hmdfs_dentry_info *d_info = hmdfs_d(dentry); +- +- if (!d_info) +- return -ENOMEM; +- +- item = kzalloc(sizeof(*item), GFP_KERNEL); +- if (!item) +- return -ENOMEM; +- +- item->dev_id = dev_id; +- item->filp = get_file(filp); +- item->time = jiffies; +- item->d_info = d_info; +- kref_init(&item->ref); +- spin_lock(&d_info->cache_list_lock); +- list_add_tail(&(item->list), &(d_info->cache_list_head)); +- spin_unlock(&d_info->cache_list_lock); +- return 0; +-} +- +-void hmdfs_add_remote_cache_list(struct hmdfs_peer *con, const char *dir_path) +-{ +- int err = 0; +- struct remotecache_item *item = NULL; +- struct remotecache_item *item_temp = NULL; +- struct path path, root_path; +- struct hmdfs_dentry_info *d_info = NULL; +- +- err = kern_path(con->sbi->local_dst, 0, &root_path); +- if (err) { +- hmdfs_err("kern_path failed err = %d", err); +- return; +- } +- +- err = vfs_path_lookup(root_path.dentry, root_path.mnt, dir_path, 0, +- &path); +- if (err) +- goto out_put_root; +- +- d_info = hmdfs_d(path.dentry); +- if (!d_info) { +- err = -EINVAL; +- goto out; +- } +- +- /* find duplicate con */ +- mutex_lock(&d_info->remote_cache_list_lock); +- list_for_each_entry_safe(item, item_temp, +- &(d_info->remote_cache_list_head), list) { +- if (item->con->device_id == con->device_id) { +- mutex_unlock(&d_info->remote_cache_list_lock); +- goto out; +- } +- } +- +- item = kzalloc(sizeof(*item), GFP_KERNEL); +- if (!item) { +- err = -ENOMEM; +- mutex_unlock(&d_info->remote_cache_list_lock); +- goto out; +- } +- +- item->con = con; +- item->drop_flag = 0; +- list_add(&(item->list), &(d_info->remote_cache_list_head)); +- mutex_unlock(&d_info->remote_cache_list_lock); +- +-out: +- path_put(&path); +-out_put_root: +- path_put(&root_path); +-} +- +-int hmdfs_drop_remote_cache_dents(struct dentry *dentry) +-{ +- struct path lower_path; +- struct inode *lower_inode = NULL; +- struct remotecache_item *item = NULL; +- struct remotecache_item *item_temp = NULL; +- struct hmdfs_dentry_info *d_info = NULL; +- char *relative_path = NULL; +- +- if (!dentry) { +- hmdfs_err("dentry null and return"); +- return 0; +- } +- +- d_info = hmdfs_d(dentry); +- if (!d_info) { +- hmdfs_err("d_info null and return"); +- return 0; +- } +- hmdfs_get_lower_path(dentry, &lower_path); +- if (IS_ERR_OR_NULL(lower_path.dentry)) { +- hmdfs_put_lower_path(&lower_path); +- return 0; +- } +- lower_inode = d_inode(lower_path.dentry); +- hmdfs_put_lower_path(&lower_path); +- if (IS_ERR_OR_NULL(lower_inode)) +- return 0; +- /* only for directory */ +- if (!S_ISDIR(lower_inode->i_mode)) +- return 0; +- +- relative_path = hmdfs_get_dentry_relative_path(dentry); +- if (!relative_path) { +- hmdfs_err("get dentry relative path failed"); +- return 0; +- } +- mutex_lock(&d_info->remote_cache_list_lock); +- list_for_each_entry_safe(item, item_temp, +- &(d_info->remote_cache_list_head), list) { +- if (item->drop_flag) { +- item->drop_flag = 0; +- continue; +- } +- mutex_unlock(&d_info->remote_cache_list_lock); +- hmdfs_send_drop_push(item->con, relative_path); +- mutex_lock(&d_info->remote_cache_list_lock); +- list_del(&item->list); +- kfree(item); +- } +- mutex_unlock(&d_info->remote_cache_list_lock); +- +- kfree(relative_path); +- return 0; +-} +- +-/* Clear the dentry cache files of target directory */ +-int hmdfs_clear_cache_dents(struct dentry *dentry, bool remove_cache) +-{ +- struct clearcache_item *item = NULL; +- struct clearcache_item *item_temp = NULL; +- struct hmdfs_dentry_info *d_info = hmdfs_d(dentry); +- char *path = NULL; +- +- if (!d_info) +- return 0; +- +- spin_lock(&d_info->cache_list_lock); +- list_for_each_entry_safe(item, item_temp, &(d_info->cache_list_head), +- list) { +- list_del_init(&item->list); +- kref_put(&item->ref, release_cache_item); +- } +- spin_unlock(&d_info->cache_list_lock); +- +- if (!remove_cache) +- return 0; +- +- /* it also need confirm that there are no dentryfile_dev* +- * under this dentry +- */ +- path = hmdfs_get_dentry_relative_path(dentry); +- +- if (unlikely(!path)) { +- hmdfs_err("get relative path failed"); +- return 0; +- } +- +- cache_file_destroy_by_path(hmdfs_sb(dentry->d_sb), path); +- +- kfree(path); +- return 0; +-} +- +-void hmdfs_mark_drop_flag(uint64_t device_id, struct dentry *dentry) +-{ +- struct remotecache_item *item = NULL; +- struct hmdfs_dentry_info *d_info = NULL; +- +- d_info = hmdfs_d(dentry); +- if (!d_info) { +- hmdfs_err("d_info null and return"); +- return; +- } +- +- mutex_lock(&d_info->remote_cache_list_lock); +- list_for_each_entry(item, &(d_info->remote_cache_list_head), list) { +- if (item->con->device_id == device_id) { +- item->drop_flag = 1; +- break; +- } +- } +- mutex_unlock(&d_info->remote_cache_list_lock); +-} +- +-void hmdfs_clear_drop_flag(struct dentry *dentry) +-{ +- struct remotecache_item *item = NULL; +- struct hmdfs_dentry_info *d_info = NULL; +- +- if (!dentry) { +- hmdfs_err("dentry null and return"); +- return; +- } +- +- d_info = hmdfs_d(dentry); +- if (!d_info) { +- hmdfs_err("d_info null and return"); +- return; +- } +- +- mutex_lock(&d_info->remote_cache_list_lock); +- list_for_each_entry(item, &(d_info->remote_cache_list_head), list) { +- if (item->drop_flag) +- item->drop_flag = 0; +- } +- mutex_unlock(&d_info->remote_cache_list_lock); +-} +- +-#define DUSTBIN_SUFFIX ".hwbk" +-static void hmdfs_rename_bak(struct dentry *dentry) +-{ +- struct path lower_path; +- struct dentry *lower_parent = NULL; +- struct dentry *lower_dentry = NULL; +- struct dentry *new_dentry = NULL; +- struct renamedata rename_data; +- char *name = NULL; +- int len = 0; +- int err = 0; +- +- hmdfs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- len = strlen(lower_dentry->d_name.name) + strlen(DUSTBIN_SUFFIX) + 2; +- if (len >= NAME_MAX) { +- err = -ENAMETOOLONG; +- goto put_lower_path; +- } +- +- name = kmalloc(len, GFP_KERNEL); +- if (!name) { +- err = -ENOMEM; +- goto put_lower_path; +- } +- +- snprintf(name, len, ".%s%s", lower_dentry->d_name.name, DUSTBIN_SUFFIX); +- err = mnt_want_write(lower_path.mnt); +- if (err) { +- hmdfs_info("get write access failed, err %d", err); +- goto free_name; +- } +- +- lower_parent = lock_parent(lower_dentry); +- new_dentry = lookup_one_len(name, lower_parent, strlen(name)); +- if (IS_ERR(new_dentry)) { +- err = PTR_ERR(new_dentry); +- hmdfs_info("lookup new dentry failed, err %d", err); +- goto unlock_parent; +- } +- +- rename_data.old_mnt_idmap = &nop_mnt_idmap; +- rename_data.old_dir = d_inode(lower_parent); +- rename_data.old_dentry = lower_dentry; +- rename_data.new_mnt_idmap = &nop_mnt_idmap; +- rename_data.new_dir = d_inode(lower_parent); +- rename_data.new_dentry = new_dentry; +- rename_data.flags = 0; +- err = vfs_rename(&rename_data); +- +- dput(new_dentry); +-unlock_parent: +- unlock_dir(lower_parent); +- mnt_drop_write(lower_path.mnt); +-free_name: +- kfree(name); +-put_lower_path: +- hmdfs_put_lower_path(&lower_path); +- +- if (err) +- hmdfs_err("failed to rename file, err %d", err); +-} +- +-int hmdfs_root_unlink(uint64_t device_id, struct path *root_path, +- const char *unlink_dir, const char *unlink_name) +-{ +- int err = 0; +- struct path path; +- struct dentry *child_dentry = NULL; +- struct inode *dir = NULL; +- struct inode *child_inode = NULL; +- kuid_t tmp_uid; +- +- err = vfs_path_lookup(root_path->dentry, root_path->mnt, +- unlink_dir, LOOKUP_DIRECTORY, &path); +- if (err) { +- hmdfs_err("found path failed err = %d", err); +- return err; +- } +- dir = d_inode(path.dentry); +- inode_lock_nested(dir, I_MUTEX_PARENT); +- +- child_dentry = lookup_one_len(unlink_name, path.dentry, +- strlen(unlink_name)); +- if (IS_ERR(child_dentry)) { +- err = PTR_ERR(child_dentry); +- hmdfs_err("lookup_one_len failed, err = %d", err); +- goto unlock_out; +- } +- if (d_is_negative(child_dentry)) { +- err = -ENOENT; +- dput(child_dentry); +- goto unlock_out; +- } +- child_inode = d_inode(child_dentry); +- if (!child_inode) +- goto unlock_out; +- +- tmp_uid = hmdfs_override_inode_uid(dir); +- +- hmdfs_mark_drop_flag(device_id, path.dentry); +- ihold(child_inode); +- err = vfs_unlink(&nop_mnt_idmap, dir, child_dentry, NULL); +- /* +- * -EOWNERDEAD means we want to put the file in a specail dir instead of +- * deleting it, specifically dustbin in phone, so that user can +- * recover the deleted images and videos. +- */ +- if (err == -EOWNERDEAD) { +- hmdfs_rename_bak(child_dentry); +- err = 0; +- } +- if (err) +- hmdfs_err("unlink path failed err = %d", err); +- hmdfs_revert_inode_uid(dir, tmp_uid); +- dput(child_dentry); +- +-unlock_out: +- inode_unlock(dir); +- if (child_inode) +- iput(child_inode); +- path_put(&path); +- return err; +-} +- +-struct dentry *hmdfs_root_mkdir(uint64_t device_id, const char *local_dst_path, +- const char *mkdir_dir, const char *mkdir_name, +- umode_t mode) +-{ +- int err; +- struct path path; +- struct dentry *child_dentry = NULL; +- struct dentry *ret = NULL; +- char *mkdir_path = NULL; +- char *mkdir_abs_path = NULL; +- +- mkdir_path = hmdfs_connect_path(mkdir_dir, mkdir_name); +- if (!mkdir_path) +- return ERR_PTR(-EACCES); +- +- mkdir_abs_path = +- hmdfs_get_dentry_absolute_path(local_dst_path, mkdir_path); +- if (!mkdir_abs_path) { +- ret = ERR_PTR(-ENOMEM); +- goto out; +- } +- +- child_dentry = kern_path_create(AT_FDCWD, mkdir_abs_path, +- &path, LOOKUP_DIRECTORY); +- if (IS_ERR(child_dentry)) { +- ret = child_dentry; +- goto out; +- } +- +- hmdfs_mark_drop_flag(device_id, child_dentry->d_parent); +- err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), child_dentry, mode); +- if (err) { +- hmdfs_err("mkdir failed! err=%d", err); +- ret = ERR_PTR(err); +- goto out_put; +- } +- ret = dget(child_dentry); +-out_put: +- done_path_create(&path, child_dentry); +-out: +- kfree(mkdir_path); +- kfree(mkdir_abs_path); +- return ret; +-} +- +-struct dentry *hmdfs_root_create(uint64_t device_id, const char *local_dst_path, +- const char *create_dir, +- const char *create_name, +- umode_t mode, bool want_excl) +-{ +- int err; +- struct path path; +- struct dentry *child_dentry = NULL; +- struct dentry *ret = NULL; +- char *create_path = NULL; +- char *create_abs_path = NULL; +- +- create_path = hmdfs_connect_path(create_dir, create_name); +- if (!create_path) +- return ERR_PTR(-EACCES); +- +- create_abs_path = +- hmdfs_get_dentry_absolute_path(local_dst_path, create_path); +- if (!create_abs_path) { +- ret = ERR_PTR(-ENOMEM); +- goto out; +- } +- +- child_dentry = kern_path_create(AT_FDCWD, create_abs_path, &path, 0); +- +- if (IS_ERR(child_dentry)) { +- ret = child_dentry; +- goto out; +- } +- hmdfs_mark_drop_flag(device_id, child_dentry->d_parent); +- err = vfs_create(&nop_mnt_idmap, d_inode(path.dentry), child_dentry, mode, want_excl); +- if (err) { +- hmdfs_err("path create failed! err=%d", err); +- ret = ERR_PTR(err); +- goto out_put; +- } +- ret = dget(child_dentry); +-out_put: +- done_path_create(&path, child_dentry); +-out: +- kfree(create_path); +- kfree(create_abs_path); +- return ret; +-} +- +-int hmdfs_root_rmdir(uint64_t device_id, struct path *root_path, +- const char *rmdir_dir, const char *rmdir_name) +-{ +- int err = 0; +- struct path path; +- struct dentry *child_dentry = NULL; +- struct inode *dir = NULL; +- +- err = vfs_path_lookup(root_path->dentry, root_path->mnt, +- rmdir_dir, LOOKUP_DIRECTORY, &path); +- if (err) { +- hmdfs_err("found path failed err = %d", err); +- return err; +- } +- dir = d_inode(path.dentry); +- inode_lock_nested(dir, I_MUTEX_PARENT); +- +- child_dentry = lookup_one_len(rmdir_name, path.dentry, +- strlen(rmdir_name)); +- if (IS_ERR(child_dentry)) { +- err = PTR_ERR(child_dentry); +- hmdfs_err("lookup_one_len failed, err = %d", err); +- goto unlock_out; +- } +- if (d_is_negative(child_dentry)) { +- err = -ENOENT; +- dput(child_dentry); +- goto unlock_out; +- } +- +- hmdfs_mark_drop_flag(device_id, path.dentry); +- err = vfs_rmdir(&nop_mnt_idmap, dir, child_dentry); +- if (err) +- hmdfs_err("rmdir failed err = %d", err); +- dput(child_dentry); +- +-unlock_out: +- inode_unlock(dir); +- path_put(&path); +- return err; +-} +- +-int hmdfs_root_rename(struct hmdfs_sb_info *sbi, uint64_t device_id, +- const char *oldpath, const char *oldname, +- const char *newpath, const char *newname, +- unsigned int flags) +-{ +- int err = 0; +- struct path path_dst; +- struct path path_old; +- struct path path_new; +- struct dentry *trap = NULL; +- struct dentry *old_dentry = NULL; +- struct dentry *new_dentry = NULL; +- struct renamedata rename_data; +- +- err = kern_path(sbi->local_dst, 0, &path_dst); +- if (err) { +- hmdfs_err("kern_path for local dst failed %d", err); +- return err; +- } +- +- err = vfs_path_lookup(path_dst.dentry, path_dst.mnt, oldpath, 0, +- &path_old); +- if (err) { +- hmdfs_info("lookup oldpath from local_dst failed, err %d", err); +- goto put_path_dst; +- } +- +- err = vfs_path_lookup(path_dst.dentry, path_dst.mnt, newpath, 0, +- &path_new); +- if (err) { +- hmdfs_info("lookup newpath from local_dst failed, err %d", err); +- goto put_path_old; +- } +- +- err = mnt_want_write(path_dst.mnt); +- if (err) { +- hmdfs_info("get write access failed for local_dst, err %d", +- err); +- goto put_path_new; +- } +- +- trap = lock_rename(path_new.dentry, path_old.dentry); +- +- old_dentry = lookup_one_len(oldname, path_old.dentry, strlen(oldname)); +- if (IS_ERR(old_dentry)) { +- err = PTR_ERR(old_dentry); +- hmdfs_info("lookup old dentry failed, err %d", err); +- goto unlock; +- } +- +- /* source should not be ancestor of target */ +- if (old_dentry == trap) { +- err = -EINVAL; +- goto put_old_dentry; +- } +- +- new_dentry = lookup_one_len(newname, path_new.dentry, strlen(newname)); +- if (IS_ERR(new_dentry)) { +- err = PTR_ERR(new_dentry); +- hmdfs_info("lookup new dentry failed, err %d", err); +- goto put_old_dentry; +- } +- +- /* +- * Exchange rename is not supported, thus target should not be an +- * ancestor of source. +- */ +- if (trap == new_dentry) { +- err = -ENOTEMPTY; +- goto put_new_dentry; +- } +- +- if (d_is_positive(new_dentry) && (flags & RENAME_NOREPLACE)) { +- err = -EEXIST; +- goto put_new_dentry; +- } +- +- hmdfs_mark_drop_flag(device_id, path_old.dentry); +- if (path_old.dentry != path_new.dentry) +- hmdfs_mark_drop_flag(device_id, path_new.dentry); +- +- rename_data.old_mnt_idmap = &nop_mnt_idmap; +- rename_data.old_dir = d_inode(path_old.dentry); +- rename_data.old_dentry = old_dentry; +- rename_data.new_mnt_idmap = &nop_mnt_idmap; +- rename_data.new_dir = d_inode(path_new.dentry); +- rename_data.new_dentry = new_dentry; +- rename_data.flags = flags; +- err = vfs_rename(&rename_data); +- +-put_new_dentry: +- dput(new_dentry); +-put_old_dentry: +- dput(old_dentry); +-unlock: +- unlock_rename(path_new.dentry, path_old.dentry); +- mnt_drop_write(path_dst.mnt); +-put_path_new: +- path_put(&path_new); +-put_path_old: +- path_put(&path_old); +-put_path_dst: +- path_put(&path_dst); +- +- return err; +-} +- +-int hmdfs_get_path_in_sb(struct super_block *sb, const char *name, +- unsigned int flags, struct path *path) +-{ +- int err; +- +- err = kern_path(name, flags, path); +- if (err) { +- hmdfs_err("can't get %s %d\n", name, err); +- return err; +- } +- +- /* should ensure the path is belong sb */ +- if (path->dentry->d_sb != sb) { +- err = -EINVAL; +- hmdfs_err("Wrong sb: %s on %s", name, +- path->dentry->d_sb->s_type->name); +- path_put(path); +- } +- +- return err; +-} +diff --git a/fs/hmdfs/hmdfs_dentryfile.h b/fs/hmdfs/hmdfs_dentryfile.h +deleted file mode 100644 +index b3907ce1b..000000000 +--- a/fs/hmdfs/hmdfs_dentryfile.h ++++ /dev/null +@@ -1,349 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/hmdfs_dentryfile.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_DENTRYFILE_H +-#define HMDFS_DENTRYFILE_H +- +-#include "hmdfs.h" +-#include +- +-/* use for escape from hmdfs file system, hmdfs hide follow names */ +-#define CURRENT_DIR "." +-#define PARENT_DIR ".." +- +-/* local dentry cache data */ +-#define DENTRY_FILE_XATTR_NAME "user.hmdfs_cache" +- +-#define DENTRY_FILE_NAME_RETRY 10 +- +-#define MAX_BUCKET_LEVEL 63 +-#define BUCKET_BLOCKS 2 +-#define MAX_DIR_BUCKETS (1 << ((MAX_BUCKET_LEVEL / 2) - 1)) +- +-#define CONFLICTING_FILE_CONST_SUFFIX "_conflict_dev" +-#define CONFLICTING_FILE_SUFFIX "_conflict_dev%u" +-#define CONFLICTING_DIR_SUFFIX "_remote_directory" +- +-#define POS_BIT_NUM 64 +-#define DEV_ID_BIT_NUM 16 +-#define GROUP_ID_BIT_NUM 39 +-#define OFFSET_BIT_NUM 8 +-#define OFFSET_BIT_MASK 0xFF +- +-#define DEFAULT_DCACHE_TIMEOUT 30 +-#define DEFAULT_DCACHE_PRECISION 10 +-#define DEFAULT_DCACHE_THRESHOLD 1000 +-#define HMDFS_STALE_REMOTE_ISIZE ULLONG_MAX +- +-/* Seconds per-week */ +-#define MAX_DCACHE_TIMEOUT 604800 +- +-struct hmdfs_iterate_callback { +- struct dir_context ctx; +- struct dir_context *caller; +- int result; +- struct rb_root *root; +-}; +- +-/* +- * 4096 = version(1) + bitmap(10) + reserved(5) +- * + nsl(80 * 43) + filename(80 * 8) +- */ +-#define DENTRYGROUP_SIZE 4096 +-#define DENTRY_NAME_LEN 8 +-#define DENTRY_RESERVED_LENGTH 3 +-#define DENTRY_PER_GROUP 80 +-#define DENTRY_BITMAP_LENGTH 10 +-#define DENTRY_GROUP_RESERVED 5 +-#define DENTRYGROUP_HEADER 4096 +- +-struct hmdfs_dentry { +- __le32 hash; +- __le16 i_mode; +- __le16 namelen; +- __le64 i_size; +- /* modification time */ +- __le64 i_mtime; +- /* modification time in nano scale */ +- __le32 i_mtime_nsec; +- /* combination of inode number and generation */ +- __le64 i_ino; +- __le32 i_flag; +- /* reserved bytes for long term extend, total 43 bytes */ +- __u8 reserved[DENTRY_RESERVED_LENGTH]; +-} __packed; +- +-/* 4K/51 Bytes = 80 dentries for per dentrygroup */ +-struct hmdfs_dentry_group { +- __u8 dentry_version; /* dentry version start from 1 */ +- __u8 bitmap[DENTRY_BITMAP_LENGTH]; +- struct hmdfs_dentry nsl[DENTRY_PER_GROUP]; +- __u8 filename[DENTRY_PER_GROUP][DENTRY_NAME_LEN]; +- __u8 reserved[DENTRY_GROUP_RESERVED]; +-} __packed; +- +-/** +- * The content of 1st 4k block in dentryfile.dat. +- * Used for check whether the dcache can be used directly or +- * need to rebuild. +- * +- * Since the ctime has 10ms or less precision, if the dcache +- * rebuild at the same time of the dentry inode ctime, maybe +- * non-consistent in dcache. +- * eg: create 1.jpg 2.jpg 3.jpg +- * dcache rebuild may only has 1.jpg 2.jpg +- * So, we need use these time to verify the dcache. +- */ +-struct hmdfs_dcache_header { +- /* The time of dcache rebuild */ +- __le64 dcache_crtime; +- __le64 dcache_crtime_nsec; +- +- /* The directory inode ctime when dcache rebuild */ +- __le64 dentry_ctime; +- __le64 dentry_ctime_nsec; +- +- /* The dentry count */ +- __le64 num; +- +- /* The case sensitive */ +- __u8 case_sensitive; +-} __packed; +- +-static inline loff_t get_dentry_group_pos(unsigned int bidx) +-{ +- return ((loff_t)bidx) * DENTRYGROUP_SIZE + DENTRYGROUP_HEADER; +-} +- +-static inline unsigned int get_dentry_group_cnt(struct inode *inode) +-{ +- loff_t size = i_size_read(inode); +- +- return size >= DENTRYGROUP_HEADER ? +- (size - DENTRYGROUP_HEADER) / DENTRYGROUP_SIZE : +- 0; +-} +- +-#define DENTRY_NAME_MAX_LEN (DENTRY_PER_GROUP * DENTRY_NAME_LEN) +-#define BITS_PER_BYTE 8 +-#define HMDFS_SLOT_LEN_BITS 3 +-#define get_dentry_slots(x) (((x) + BITS_PER_BYTE - 1) >> HMDFS_SLOT_LEN_BITS) +- +-#define INUNUMBER_START 10000000 +- +-#ifdef CONFIG_HMDFS_FS_PERMISSION +-#define DENTRY_FILE_PERM 0660 +-#else +-#define DENTRY_FILE_PERM 0666 +-#endif +- +-struct hmdfs_dcache_lookup_ctx { +- struct hmdfs_sb_info *sbi; +- const struct qstr *name; +- struct file *filp; +- __u32 hash; +- +- /* for case sensitive */ +- unsigned int bidx; +- struct hmdfs_dentry_group *page; +- +- /* for case insensitive */ +- struct hmdfs_dentry *insense_de; +- unsigned int insense_bidx; +- struct hmdfs_dentry_group *insense_page; +-}; +- +-extern void hmdfs_init_dcache_lookup_ctx(struct hmdfs_dcache_lookup_ctx *ctx, +- struct hmdfs_sb_info *sbi, +- const struct qstr *qstr, +- struct file *filp); +- +-int create_dentry(struct dentry *child_dentry, struct inode *inode, +- struct file *file, struct hmdfs_sb_info *sbi); +-int read_dentry(struct hmdfs_sb_info *sbi, char *file_name, +- struct dir_context *ctx); +-struct hmdfs_dentry *hmdfs_find_dentry(struct dentry *child_dentry, +- struct hmdfs_dcache_lookup_ctx *ctx); +-void hmdfs_delete_dentry(struct dentry *d, struct file *filp); +-int hmdfs_rename_dentry(struct dentry *old_dentry, struct dentry *new_dentry, +- struct file *old_filp, struct file *new_filp); +-int get_inonumber(void); +-struct file *create_local_dentry_file_cache(struct hmdfs_sb_info *sbi); +-int update_inode_to_dentry(struct dentry *child_dentry, struct inode *inode); +-struct file *cache_file_persistent(struct hmdfs_peer *con, struct file *filp, +- const char *relative_path, bool server); +- +-#define HMDFS_TYPE_COMMON 0 +-#define HMDFS_TYPE_DOT 1 +-#define HMDFS_TYPE_DENTRY 2 +-#define HMDFS_TYPE_DENTRY_CACHE 3 +-int hmdfs_file_type(const char *name); +- +-loff_t hmdfs_set_pos(unsigned long dev_id, unsigned long group_id, +- unsigned long offset); +- +-struct getdents_callback_real { +- struct dir_context ctx; +- struct path *parent_path; +- loff_t num; +- struct file *file; +- struct hmdfs_sb_info *sbi; +- const char *dir; +-}; +- +-struct file *hmdfs_server_rebuild_dents(struct hmdfs_sb_info *sbi, +- struct path *path, loff_t *num, +- const char *dir); +- +-#define DCACHE_LIFETIME 30 +- +-struct clearcache_item { +- uint64_t dev_id; +- struct file *filp; +- unsigned long time; +- struct list_head list; +- struct kref ref; +- struct hmdfs_dentry_info *d_info; +-}; +- +-void hmdfs_add_remote_cache_list(struct hmdfs_peer *con, const char *dir_path); +- +-struct remotecache_item { +- struct hmdfs_peer *con; +- struct list_head list; +- __u8 drop_flag; +-}; +- +-#define HMDFS_CFN_CID_SIZE 65 +-#define HMDFS_SERVER_CID "" +- +-struct cache_file_node { +- struct list_head list; +- struct hmdfs_sb_info *sbi; +- char *relative_path; +- u8 cid[HMDFS_CFN_CID_SIZE]; +- refcount_t ref; +- bool server; +- struct file *filp; +-}; +- +-struct cache_file_item { +- struct list_head list; +- const char *name; +-}; +- +-struct cache_file_callback { +- struct dir_context ctx; +- const char *dirname; +- struct hmdfs_sb_info *sbi; +- bool server; +- struct list_head list; +-}; +- +-int hmdfs_drop_remote_cache_dents(struct dentry *dentry); +-void hmdfs_send_drop_push(struct hmdfs_peer *con, const char *path); +-void hmdfs_mark_drop_flag(uint64_t device_id, struct dentry *dentry); +-void hmdfs_clear_drop_flag(struct dentry *dentry); +-void delete_in_cache_file(uint64_t dev_id, struct dentry *dentry); +-void create_in_cache_file(uint64_t dev_id, struct dentry *dentry); +-struct clearcache_item *hmdfs_find_cache_item(uint64_t dev_id, +- struct dentry *dentry); +-bool hmdfs_cache_revalidate(unsigned long conn_time, uint64_t dev_id, +- struct dentry *dentry); +-void hmdfs_remove_cache_filp(struct hmdfs_peer *con, struct dentry *dentry); +-int hmdfs_add_cache_list(uint64_t dev_id, struct dentry *dentry, +- struct file *filp); +-int hmdfs_clear_cache_dents(struct dentry *dentry, bool remove_cache); +- +-int hmdfs_root_unlink(uint64_t device_id, struct path *root_path, +- const char *unlink_dir, const char *unlink_name); +-struct dentry *hmdfs_root_mkdir(uint64_t device_id, const char *local_dst_path, +- const char *mkdir_dir, const char *mkdir_name, +- umode_t mode); +-struct dentry *hmdfs_root_create(uint64_t device_id, const char *local_dst_path, +- const char *create_dir, +- const char *create_name, +- umode_t mode, bool want_excl); +-int hmdfs_root_rmdir(uint64_t device_id, struct path *root_path, +- const char *rmdir_dir, const char *rmdir_name); +-int hmdfs_root_rename(struct hmdfs_sb_info *sbi, uint64_t device_id, +- const char *oldpath, const char *oldname, +- const char *newpath, const char *newname, +- unsigned int flags); +- +-int hmdfs_get_path_in_sb(struct super_block *sb, const char *name, +- unsigned int flags, struct path *path); +- +-int hmdfs_wlock_file(struct file *filp, loff_t start, loff_t len); +-int hmdfs_rlock_file(struct file *filp, loff_t start, loff_t len); +-int hmdfs_unlock_file(struct file *filp, loff_t start, loff_t len); +-long cache_file_truncate(struct hmdfs_sb_info *sbi, const struct path *path, +- loff_t length); +-ssize_t cache_file_read(struct hmdfs_sb_info *sbi, struct file *filp, void *buf, +- size_t count, loff_t *pos); +-ssize_t cache_file_write(struct hmdfs_sb_info *sbi, struct file *filp, +- const void *buf, size_t count, loff_t *pos); +-int hmdfs_metainfo_read_nocred(struct file *filp, +- void *buffer, int size, int bidx); +-int hmdfs_metainfo_read(struct hmdfs_sb_info *sbi, struct file *filp, +- void *buffer, int buffersize, int bidx); +- +-bool get_remote_dentry_file(struct dentry *dentry, struct hmdfs_peer *con); +-void get_remote_dentry_file_sync(struct dentry *dentry, struct hmdfs_peer *con); +-int get_cloud_cache_file(struct dentry *dentry, struct hmdfs_sb_info *sbi); +- +-void release_cache_item(struct kref *ref); +-void remove_cache_item(struct clearcache_item *item); +- +-void hmdfs_cfn_load(struct hmdfs_sb_info *sbi); +-void hmdfs_cfn_destroy(struct hmdfs_sb_info *sbi); +-struct cache_file_node *find_cfn(struct hmdfs_sb_info *sbi, const char *cid, +- const char *path, bool server); +-void release_cfn(struct cache_file_node *cfn); +-void destroy_cfn(struct hmdfs_sb_info *sbi); +-void remove_cfn(struct cache_file_node *cfn); +-int delete_dentry_file(struct file *filp); +-struct file *hmdfs_server_cache_revalidate(struct hmdfs_sb_info *sbi, +- const char *recvpath, +- struct path *path); +-int write_header(struct file *filp, struct hmdfs_dcache_header *header); +- +-static inline struct list_head *get_list_head(struct hmdfs_sb_info *sbi, +- bool server) +-{ +- return ((server) ? &(sbi)->server_cache : &(sbi)->client_cache); +-} +- +-/* +- * generate_u64_ino - generate a new 64 bit inode number +- * +- * @ino: origin 32 bit inode number +- * @generation: origin 32 bit inode generation +- * +- * We need both remote inode number and generation to ensure the uniqueness of +- * the local inode, thus we store inode->i_ino in lower 32 bits, and +- * inode->i_generation in higher 32 bits. +- */ +-static inline uint64_t generate_u64_ino(unsigned long ino, +- unsigned int generation) +-{ +- return (uint64_t)ino | ((uint64_t)generation << 32); +-} +- +-static inline bool cache_item_revalidate(unsigned long conn_time, +- unsigned long item_time, +- unsigned int timeout) +-{ +- return time_before_eq(jiffies, item_time + timeout * HZ) && +- time_before_eq(conn_time, item_time); +-} +- +-__u32 hmdfs_dentry_hash(const struct qstr *qstr, bool case_sense); +-__u64 get_bucketaddr(unsigned int level, __u64 buckoffset); +-__u64 get_bucket_by_level(unsigned int level); +-unsigned int get_max_depth(struct file *filp); +-#endif +diff --git a/fs/hmdfs/hmdfs_dentryfile_cloud.c b/fs/hmdfs/hmdfs_dentryfile_cloud.c +deleted file mode 100644 +index 4e7dd0099..000000000 +--- a/fs/hmdfs/hmdfs_dentryfile_cloud.c ++++ /dev/null +@@ -1,171 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/hmdfs_dentryfile_cloud.c +- * +- * Copyright (c) 2023-2023 Huawei Device Co., Ltd. +- */ +- +-#include "hmdfs_dentryfile_cloud.h" +- +-#include +- +-void hmdfs_init_dcache_lookup_ctx_cloud( +- struct hmdfs_dcache_lookup_ctx_cloud *ctx, struct hmdfs_sb_info *sbi, +- const struct qstr *qstr, struct file *filp) +-{ +- ctx->sbi = sbi; +- ctx->name = qstr; +- ctx->filp = filp; +- ctx->bidx = 0; +- ctx->page = NULL; +- ctx->insense_de = NULL; +- ctx->insense_bidx = 0; +- ctx->insense_page = NULL; +-} +- +-static struct hmdfs_dentry_group_cloud *find_dentry_page(struct hmdfs_sb_info *sbi, +- pgoff_t index, struct file *filp) +-{ +- int size; +- struct hmdfs_dentry_group_cloud *dentry_blk = NULL; +- loff_t pos = get_dentry_group_pos(index); +- int err; +- +- dentry_blk = kmalloc(sizeof(*dentry_blk), GFP_KERNEL); +- if (!dentry_blk) +- return NULL; +- +- err = hmdfs_wlock_file(filp, pos, DENTRYGROUP_SIZE); +- if (err) { +- hmdfs_err("lock file pos %lld failed %d", pos, err); +- kfree(dentry_blk); +- return NULL; +- } +- +- size = kernel_read(filp, dentry_blk, (size_t)DENTRYGROUP_SIZE, +- &pos); +- if (size != DENTRYGROUP_SIZE) { +- hmdfs_err("read pos %lld failed %d", pos, size); +- hmdfs_unlock_file(filp, pos, DENTRYGROUP_SIZE); +- kfree(dentry_blk); +- dentry_blk = NULL; +- } +- +- return dentry_blk; +-} +- +-static struct hmdfs_dentry_cloud * +-find_in_block(struct hmdfs_dentry_group_cloud *dentry_blk, __u32 namehash, +- const struct qstr *qstr, struct hmdfs_dentry_cloud **insense_de, +- bool case_sense) +-{ +- struct hmdfs_dentry_cloud *de; +- unsigned long bit_pos = 0; +- int max_len = 0; +- +- while (bit_pos < DENTRY_PER_GROUP_CLOUD) { +- if (!test_bit_le(bit_pos, dentry_blk->bitmap)) { +- bit_pos++; +- max_len++; +- continue; +- } +- de = &dentry_blk->nsl[bit_pos]; +- if (unlikely(!de->namelen)) { +- bit_pos++; +- continue; +- } +- +- if (le32_to_cpu(de->hash) == namehash && +- le16_to_cpu(de->namelen) == qstr->len && +- !memcmp(qstr->name, dentry_blk->filename[bit_pos], +- le16_to_cpu(de->namelen))) +- goto found; +- if (!(*insense_de) && !case_sense && +- le32_to_cpu(de->hash) == namehash && +- le16_to_cpu(de->namelen) == qstr->len && +- str_n_case_eq(qstr->name, dentry_blk->filename[bit_pos], +- le16_to_cpu(de->namelen))) +- *insense_de = de; +- max_len = 0; +- bit_pos += get_dentry_slots(le16_to_cpu(de->namelen)); +- } +- de = NULL; +-found: +- return de; +-} +- +-static struct hmdfs_dentry_cloud * +-hmdfs_in_level(struct dentry *child_dentry, unsigned int level, +- struct hmdfs_dcache_lookup_ctx_cloud *ctx) +-{ +- unsigned long nbucket; +- unsigned long bidx, end_block; +- struct hmdfs_dentry_cloud *de = NULL; +- struct hmdfs_dentry_cloud *tmp_insense_de = NULL; +- struct hmdfs_dentry_group_cloud *dentry_blk; +- +- nbucket = get_bucket_by_level(level); +- if (!nbucket) +- return de; +- +- bidx = get_bucketaddr(level, ctx->hash % nbucket) * BUCKET_BLOCKS; +- end_block = bidx + BUCKET_BLOCKS; +- +- for (; bidx < end_block; bidx++) { +- dentry_blk = find_dentry_page(ctx->sbi, bidx, ctx->filp); +- if (!dentry_blk) +- break; +- +- de = find_in_block(dentry_blk, ctx->hash, ctx->name, +- &tmp_insense_de, ctx->sbi->s_case_sensitive); +- if (!de && !(ctx->insense_de) && tmp_insense_de) { +- ctx->insense_de = tmp_insense_de; +- ctx->insense_page = dentry_blk; +- ctx->insense_bidx = bidx; +- } else if (!de) { +- hmdfs_unlock_file(ctx->filp, get_dentry_group_pos(bidx), +- DENTRYGROUP_SIZE); +- kfree(dentry_blk); +- } else { +- ctx->page = dentry_blk; +- break; +- } +- } +- ctx->bidx = bidx; +- return de; +-} +- +-struct hmdfs_dentry_cloud * +-hmdfs_find_dentry_cloud(struct dentry *child_dentry, +- struct hmdfs_dcache_lookup_ctx_cloud *ctx) +-{ +- struct hmdfs_dentry_cloud *de = NULL; +- unsigned int max_depth; +- unsigned int level; +- +- if (!ctx->filp) +- return NULL; +- +- ctx->hash = hmdfs_dentry_hash(ctx->name, ctx->sbi->s_case_sensitive); +- max_depth = get_max_depth(ctx->filp); +- for (level = 0; level < max_depth; level++) { +- de = hmdfs_in_level(child_dentry, level, ctx); +- if (de) { +- if (ctx->insense_page) { +- hmdfs_unlock_file(ctx->filp, +- get_dentry_group_pos(ctx->insense_bidx), +- DENTRYGROUP_SIZE); +- kfree(ctx->insense_page); +- ctx->insense_page = NULL; +- } +- return de; +- } +- } +- if (ctx->insense_de) { +- ctx->bidx = ctx->insense_bidx; +- ctx->page = ctx->insense_page; +- ctx->insense_bidx = 0; +- ctx->insense_page = NULL; +- } +- return ctx->insense_de; +-} +diff --git a/fs/hmdfs/hmdfs_dentryfile_cloud.h b/fs/hmdfs/hmdfs_dentryfile_cloud.h +deleted file mode 100644 +index aba3cf9ea..000000000 +--- a/fs/hmdfs/hmdfs_dentryfile_cloud.h ++++ /dev/null +@@ -1,63 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/hmdfs_dentryfile_cloud.h +- * +- * Copyright (c) 2023-2023 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_DENTRYFILE_CLOUD_H +-#define HMDFS_DENTRYFILE_CLOUD_H +- +-#include "inode.h" +-#include "hmdfs_dentryfile.h" +- +-/* +- * 4096 = version(1) + bitmap(8) + reserved(7) +- * + nsl(60 * 60) + filename(60 * 8) +- */ +-#define DENTRY_BITMAP_LENGTH_CLOUD 8 +-#define DENTRY_PER_GROUP_CLOUD 60 +-#define DENTRY_GROUP_RESERVED_CLOUD 7 +-struct hmdfs_dentry_cloud { +- __le32 hash; +- __le16 i_mode; +- __le16 namelen; +- __le64 i_size; +- __le64 i_mtime; +- __u8 record_id[CLOUD_RECORD_ID_LEN]; +- /* reserved bytes for long term extend, total 60 bytes */ +- __u8 reserved[CLOUD_DENTRY_RESERVED_LENGTH]; +-} __packed; +- +-/* 4K/68 Bytes = 60 dentries for per dentrygroup */ +-struct hmdfs_dentry_group_cloud { +- __u8 dentry_version; +- __u8 bitmap[DENTRY_BITMAP_LENGTH_CLOUD]; +- struct hmdfs_dentry_cloud nsl[DENTRY_PER_GROUP_CLOUD]; +- __u8 filename[DENTRY_PER_GROUP_CLOUD][DENTRY_NAME_LEN]; +- __u8 reserved[DENTRY_GROUP_RESERVED_CLOUD]; +-} __packed; +- +-struct hmdfs_dcache_lookup_ctx_cloud { +- struct hmdfs_sb_info *sbi; +- const struct qstr *name; +- struct file *filp; +- __u32 hash; +- +- /* for case sensitive */ +- unsigned long bidx; +- struct hmdfs_dentry_group_cloud *page; +- +- /* for case insensitive */ +- struct hmdfs_dentry_cloud *insense_de; +- unsigned long insense_bidx; +- struct hmdfs_dentry_group_cloud *insense_page; +-}; +- +-void hmdfs_init_dcache_lookup_ctx_cloud( +- struct hmdfs_dcache_lookup_ctx_cloud *ctx, struct hmdfs_sb_info *sbi, +- const struct qstr *qstr, struct file *filp); +-struct hmdfs_dentry_cloud * +-hmdfs_find_dentry_cloud(struct dentry *child_dentry, +- struct hmdfs_dcache_lookup_ctx_cloud *ctx); +-#endif +diff --git a/fs/hmdfs/hmdfs_device_view.h b/fs/hmdfs/hmdfs_device_view.h +deleted file mode 100644 +index 6b228da9a..000000000 +--- a/fs/hmdfs/hmdfs_device_view.h ++++ /dev/null +@@ -1,263 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/hmdfs_device_view.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_DEVICE_VIEW_H +-#define HMDFS_DEVICE_VIEW_H +- +-#include "hmdfs.h" +- +-/***************************************************************************** +- * macro defination +- *****************************************************************************/ +- +-#define DEVICE_VIEW_ROOT "device_view" +-#define MERGE_VIEW_ROOT "merge_view" +-#define CLOUD_MERGE_VIEW_ROOT "cloud_merge_view" +-#define UPDATE_LOCAL_DST "/device_view/local/" +-#define UPDATE_CLOUD_DST "/device_view/cloud/" +- +-#define DEVICE_VIEW_LOCAL "local" +-#define DEVICE_VIEW_CLOUD "cloud" +-#define CLOUD_CID "cloud" +-#define CLOUD_DEVICE (1) +- +-/* +- * in order to distinguish from vfs, we define our own bitmask, this should +- * covert to vfs bitmask while calling vfs apis +- */ +-#define HMDFS_LOOKUP_REVAL 0x1 +- +-enum HMDFS_FILE_TYPE { +- HM_REG = 0, +- HM_SYMLINK = 1, +- HM_SHARE = 2, +- +- HM_MAX_FILE_TYPE = 0XFF +-}; +- +-struct bydev_inode_info { +- struct inode *lower_inode; +- uint64_t ino; +-}; +- +-struct hmdfs_dentry_info { +- struct path lower_path; +- unsigned long time; +- struct list_head cache_list_head; +- spinlock_t cache_list_lock; +- struct list_head remote_cache_list_head; +- struct mutex remote_cache_list_lock; +- __u8 file_type; +- __u8 dentry_type; +- uint64_t device_id; +- spinlock_t lock; +- struct mutex cache_pull_lock; +- int async_readdir_in_progress; +-}; +- +-struct hmdfs_lookup_ret { +- uint64_t i_size; +- uint64_t i_mtime; +- uint32_t i_mtime_nsec; +- uint16_t i_mode; +- uint64_t i_ino; +-}; +- +-struct hmdfs_getattr_ret { +- /* +- * if stat->result_mask is 0, it means this remote getattr failed with +- * look up, see details in hmdfs_server_getattr. +- */ +- struct kstat stat; +- uint32_t i_flags; +- uint64_t fsid; +-}; +- +-extern int hmdfs_remote_getattr(struct hmdfs_peer *conn, struct dentry *dentry, +- unsigned int lookup_flags, +- struct hmdfs_getattr_ret **getattr_result); +- +-/***************************************************************************** +- * local/remote inode/file operations +- *****************************************************************************/ +- +-extern const struct dentry_operations hmdfs_dops; +-extern const struct dentry_operations hmdfs_dev_dops; +- +-/* local device operation */ +-extern const struct inode_operations hmdfs_file_iops_local; +-extern const struct file_operations hmdfs_file_fops_local; +-extern const struct inode_operations hmdfs_dir_inode_ops_local; +-extern const struct file_operations hmdfs_dir_ops_local; +-extern const struct file_operations hmdfs_dir_ops_share; +-extern const struct inode_operations hmdfs_symlink_iops_local; +-extern const struct inode_operations hmdfs_dir_inode_ops_share; +- +-/* remote device operation */ +-extern const struct inode_operations hmdfs_dev_file_iops_remote; +-extern const struct file_operations hmdfs_dev_file_fops_remote; +-extern const struct address_space_operations hmdfs_dev_file_aops_remote; +-extern const struct inode_operations hmdfs_dev_dir_inode_ops_remote; +-extern const struct file_operations hmdfs_dev_dir_ops_remote; +- +-/* cloud device operation */ +-extern const struct inode_operations hmdfs_dev_file_iops_cloud; +-extern const struct file_operations hmdfs_dev_file_fops_cloud; +-extern const struct address_space_operations hmdfs_dev_file_aops_cloud; +-extern const struct address_space_operations hmdfs_aops_cloud; +-extern const struct inode_operations hmdfs_dev_dir_inode_ops_cloud; +-extern const struct file_operations hmdfs_dev_dir_ops_cloud; +-extern int hmdfs_dev_unlink_from_con(struct hmdfs_peer *conn, +- struct dentry *dentry); +-extern int hmdfs_dev_readdir_from_con(struct hmdfs_peer *con, struct file *file, +- struct dir_context *ctx); +-int hmdfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); +-int hmdfs_rmdir(struct inode *dir, struct dentry *dentry); +-int hmdfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, +- bool want_excl); +-int hmdfs_unlink(struct inode *dir, struct dentry *dentry); +-int hmdfs_remote_unlink(struct hmdfs_peer *conn, struct dentry *dentry); +-int hmdfs_rename(struct inode *old_dir, struct dentry *old_dentry, +- struct inode *new_dir, struct dentry *new_dentry, +- unsigned int flags); +-loff_t hmdfs_file_llseek_local(struct file *file, loff_t offset, int whence); +- +-ssize_t hmdfs_do_read_iter(struct file *file, struct iov_iter *iter, +- loff_t *ppos); +-ssize_t hmdfs_do_write_iter(struct file *file, struct iov_iter *iter, +- loff_t *ppos); +- +-int hmdfs_file_release_local(struct inode *inode, struct file *file); +-int hmdfs_file_mmap_local(struct file *file, struct vm_area_struct *vma); +-struct dentry *hmdfs_lookup(struct inode *parent_inode, +- struct dentry *child_dentry, unsigned int flags); +-struct dentry *hmdfs_lookup_local(struct inode *parent_inode, +- struct dentry *child_dentry, +- unsigned int flags); +-struct dentry *hmdfs_lookup_remote(struct inode *parent_inode, +- struct dentry *child_dentry, +- unsigned int flags); +-int hmdfs_symlink_local(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, +- const char *symname); +-int hmdfs_fsync_local(struct file *file, loff_t start, loff_t end, +- int datasync); +-int hmdfs_symlink(struct inode *dir, struct dentry *dentry, +- const char *symname); +-int hmdfs_fsync(struct file *file, loff_t start, loff_t end, int datasync); +- +-/***************************************************************************** +- * common functions declaration +- *****************************************************************************/ +- +-static inline struct hmdfs_dentry_info *hmdfs_d(struct dentry *dentry) +-{ +- return dentry->d_fsdata; +-} +- +-static inline bool hm_isreg(uint8_t file_type) +-{ +- return (file_type == HM_REG); +-} +- +-static inline bool hm_islnk(uint8_t file_type) +-{ +- return (file_type == HM_SYMLINK); +-} +- +-static inline bool hm_isshare(uint8_t file_type) +-{ +- return (file_type == HM_SHARE); +-} +- +-struct inode *fill_inode_remote(struct super_block *sb, struct hmdfs_peer *con, +- struct hmdfs_lookup_ret *lookup_result, +- struct inode *dir); +-struct hmdfs_lookup_ret *get_remote_inode_info(struct hmdfs_peer *con, +- struct dentry *dentry, +- unsigned int flags); +-void hmdfs_set_time(struct dentry *dentry, unsigned long time); +-struct inode *fill_inode_local(struct super_block *sb, +- struct inode *lower_inode, const char *name); +-struct inode *fill_root_inode(struct super_block *sb, +- struct hmdfs_sb_info *sbi, struct inode *lower_inode); +-struct inode *fill_device_inode(struct super_block *sb, +- struct inode *lower_inode); +-struct hmdfs_lookup_ret *hmdfs_lookup_by_con(struct hmdfs_peer *con, +- struct dentry *dentry, +- struct qstr *qstr, +- unsigned int flags, +- const char *relative_path); +-char *hmdfs_connect_path(const char *path, const char *name); +- +-char *hmdfs_get_dentry_relative_path(struct dentry *dentry); +-char *hmdfs_merge_get_dentry_relative_path(struct dentry *dentry); +-char *hmdfs_get_dentry_absolute_path(const char *rootdir, +- const char *relative_path); +-int hmdfs_convert_lookup_flags(unsigned int hmdfs_flags, +- unsigned int *vfs_flags); +-static inline void hmdfs_get_lower_path(struct dentry *dent, struct path *pname) +-{ +- spin_lock(&hmdfs_d(dent)->lock); +- pname->dentry = hmdfs_d(dent)->lower_path.dentry; +- pname->mnt = hmdfs_d(dent)->lower_path.mnt; +- path_get(pname); +- spin_unlock(&hmdfs_d(dent)->lock); +-} +- +-static inline void hmdfs_put_lower_path(struct path *pname) +-{ +- path_put(pname); +-} +- +-static inline void hmdfs_put_reset_lower_path(struct dentry *dent) +-{ +- struct path pname; +- +- spin_lock(&hmdfs_d(dent)->lock); +- if (hmdfs_d(dent)->lower_path.dentry) { +- pname.dentry = hmdfs_d(dent)->lower_path.dentry; +- pname.mnt = hmdfs_d(dent)->lower_path.mnt; +- hmdfs_d(dent)->lower_path.dentry = NULL; +- hmdfs_d(dent)->lower_path.mnt = NULL; +- spin_unlock(&hmdfs_d(dent)->lock); +- path_put(&pname); +- } else { +- spin_unlock(&hmdfs_d(dent)->lock); +- } +-} +- +-static inline void hmdfs_set_lower_path(struct dentry *dent, struct path *pname) +-{ +- spin_lock(&hmdfs_d(dent)->lock); +- hmdfs_d(dent)->lower_path.dentry = pname->dentry; +- hmdfs_d(dent)->lower_path.mnt = pname->mnt; +- spin_unlock(&hmdfs_d(dent)->lock); +-} +- +-/* Only reg file for HMDFS_LAYER_OTHER_* support xattr */ +-static inline bool hmdfs_support_xattr(struct dentry *dentry) +-{ +- struct inode *inode = d_inode(dentry); +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- +- if (info->inode_type != HMDFS_LAYER_OTHER_LOCAL && +- info->inode_type != HMDFS_LAYER_OTHER_REMOTE && +- info->inode_type != HMDFS_LAYER_OTHER_MERGE && +- info->inode_type != HMDFS_LAYER_OTHER_MERGE_CLOUD) +- return false; +- +- if (info->inode_type == HMDFS_LAYER_OTHER_LOCAL && +- hm_islnk(hmdfs_d(dentry)->file_type)) +- return false; +- +- return true; +-} +- +-int init_hmdfs_dentry_info(struct hmdfs_sb_info *sbi, struct dentry *dentry, +- int dentry_type); +- +-#endif +diff --git a/fs/hmdfs/hmdfs_merge_view.h b/fs/hmdfs/hmdfs_merge_view.h +deleted file mode 100644 +index 940741a5b..000000000 +--- a/fs/hmdfs/hmdfs_merge_view.h ++++ /dev/null +@@ -1,241 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/hmdfs_merge_view.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_MERGE_VIEW_H +-#define HMDFS_MERGE_VIEW_H +- +-#include "hmdfs.h" +- +-#include "comm/connection.h" +-#include +-#include +-#include +-#include +- +-/***************************************************************************** +- * Dentires for merge view and their comrades. +- * A dentry's lower dentry is named COMRADE. +- *****************************************************************************/ +- +-struct merge_lookup_work { +- char *name; +- int devid; +- unsigned int flags; +- struct hmdfs_sb_info *sbi; +- wait_queue_head_t *wait_queue; +- struct work_struct work; +-}; +- +-struct hmdfs_dentry_info_merge { +- unsigned long ctime; +- int type; +- int work_count; +- struct mutex work_lock; +- wait_queue_head_t wait_queue; +- __u8 dentry_type; +- struct mutex comrade_list_lock; +- struct list_head comrade_list; +-}; +- +-struct hmdfs_dentry_comrade { +- uint64_t dev_id; +- struct dentry *lo_d; +- struct list_head list; +-}; +- +-enum FILE_CMD_MERGE { +- F_MKDIR_MERGE = 0, +- F_CREATE_MERGE = 1, +-}; +- +-struct hmdfs_recursive_para { +- bool is_last; +- int opcode; +- umode_t mode; +- bool want_excl; +- const char *name; +-}; +- +-struct hmdfs_rename_para { +- struct inode *old_dir; +- struct dentry *old_dentry; +- struct inode *new_dir; +- struct dentry *new_dentry; +- unsigned int flags; +-}; +- +-static inline struct hmdfs_dentry_info_merge *hmdfs_dm(struct dentry *dentry) +-{ +- return dentry->d_fsdata; +-} +- +-static inline umode_t hmdfs_cm(struct hmdfs_dentry_comrade *comrade) +-{ +- return d_inode(comrade->lo_d)->i_mode; +-} +- +-static inline bool comrade_is_local(struct hmdfs_dentry_comrade *comrade) +-{ +- return comrade->dev_id == HMDFS_DEVID_LOCAL; +-} +- +-struct hmdfs_cache_entry *allocate_entry(const char *name, int namelen, +- int d_type); +- +-struct dentry *hmdfs_lookup_cloud_merge(struct inode *parent_inode, +- struct dentry *child_dentry, +- unsigned int flags); +- +-struct dentry *hmdfs_lookup_merge(struct inode *parent_inode, +- struct dentry *child_dentry, +- unsigned int flags); +-struct hmdfs_file_info * +-get_next_hmdfs_file_info(struct hmdfs_file_info *fi_head, int device_id); +- +-struct hmdfs_file_info *get_hmdfs_file_info(struct hmdfs_file_info *fi_head, +- int device_id); +-int insert_filename(struct rb_root *root, struct hmdfs_cache_entry **new_entry); +-struct hmdfs_dentry_comrade *alloc_comrade(struct dentry *lo_d, int dev_id); +-int check_filename(const char *name, int len); +-int init_hmdfs_dentry_info_merge(struct hmdfs_sb_info *sbi, +- struct dentry *dentry); +-void hmdfs_init_recursive_para(struct hmdfs_recursive_para *rec_op_para, +- int opcode, mode_t mode, bool want_excl, +- const char *name); +-void link_comrade(struct list_head *onstack_comrades_head, +- struct hmdfs_dentry_comrade *comrade); +-void update_inode_attr(struct inode *inode, struct dentry *child_dentry); +-int get_num_comrades(struct dentry *dentry); +-void assign_comrades_unlocked(struct dentry *child_dentry, +- struct list_head *onstack_comrades_head); +-struct hmdfs_dentry_comrade *lookup_comrade(struct path lower_path, +- const char *d_name, +- int dev_id, +- unsigned int flags); +-bool is_valid_comrade(struct hmdfs_dentry_info_merge *mdi, umode_t mode); +-int merge_lookup_async(struct hmdfs_dentry_info_merge *mdi, +- struct hmdfs_sb_info *sbi, int devid, +- const char *name, unsigned int flags); +-char *hmdfs_get_real_dname(struct dentry *dentry, int *devid, int *type); +-void lock_root_inode_shared(struct inode *root, bool *locked, bool *down); +-void restore_root_inode_sem(struct inode *root, bool locked, bool down); +-int hmdfs_getattr_merge(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, +- u32 request_mask, unsigned int flags); +-int hmdfs_setattr_merge(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *ia); +-int hmdfs_rmdir_merge(struct inode *dir, struct dentry *dentry); +-int hmdfs_unlink_merge(struct inode *dir, struct dentry *dentry); +-int hmdfs_rename_merge(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, +- struct inode *new_dir, struct dentry *new_dentry, +- unsigned int flags); +-int do_rename_merge(struct inode *old_dir, struct dentry *old_dentry, +- struct inode *new_dir, struct dentry *new_dentry, +- unsigned int flags); +- +-static inline void destroy_comrade(struct hmdfs_dentry_comrade *comrade) +-{ +- dput(comrade->lo_d); +- kfree(comrade); +-} +- +-void clear_comrades(struct dentry *dentry); +- +-static inline void link_comrade_unlocked(struct dentry *dentry, +- struct hmdfs_dentry_comrade *comrade) +-{ +- mutex_lock(&hmdfs_dm(dentry)->comrade_list_lock); +- link_comrade(&hmdfs_dm(dentry)->comrade_list, comrade); +- mutex_unlock(&hmdfs_dm(dentry)->comrade_list_lock); +-} +- +-void clear_comrades_locked(struct list_head *comrade_list); +- +-static inline bool is_comrade_list_empty(struct hmdfs_dentry_info_merge *mdi) +-{ +- bool ret; +- +- mutex_lock(&mdi->comrade_list_lock); +- ret = list_empty(&mdi->comrade_list); +- mutex_unlock(&mdi->comrade_list_lock); +- +- return ret; +-} +- +-static inline bool has_merge_lookup_work(struct hmdfs_dentry_info_merge *mdi) +-{ +- bool ret; +- +- mutex_lock(&mdi->work_lock); +- ret = (mdi->work_count != 0); +- mutex_unlock(&mdi->work_lock); +- +- return ret; +-} +- +-static inline bool is_merge_lookup_end(struct hmdfs_dentry_info_merge *mdi) +-{ +- bool ret; +- +- mutex_lock(&mdi->work_lock); +- ret = mdi->work_count == 0 || !is_comrade_list_empty(mdi); +- mutex_unlock(&mdi->work_lock); +- +- return ret; +-} +- +-void hmdfs_update_meta(struct inode *dir); +- +-#define for_each_comrade_locked(_dentry, _comrade) \ +- list_for_each_entry(_comrade, &(hmdfs_dm(_dentry)->comrade_list), list) +- +-#define hmdfs_trace_merge(_trace_func, _parent_inode, _child_dentry, err) \ +- { \ +- struct hmdfs_dentry_comrade *comrade; \ +- struct hmdfs_dentry_info_merge *dm = hmdfs_dm(_child_dentry); \ +- _trace_func(_parent_inode, _child_dentry, err); \ +- if (likely(dm)) { \ +- mutex_lock(&dm->comrade_list_lock); \ +- for_each_comrade_locked(_child_dentry, comrade) \ +- trace_hmdfs_show_comrade(_child_dentry, \ +- comrade->lo_d, \ +- comrade->dev_id); \ +- mutex_unlock(&dm->comrade_list_lock); \ +- } \ +- } +- +-/***************************************************************************** +- * Helper functions abstarcting out comrade +- *****************************************************************************/ +- +-static inline bool hmdfs_i_merge(struct hmdfs_inode_info *hii) +-{ +- __u8 t = hii->inode_type; +- return t == HMDFS_LAYER_FIRST_MERGE || t == HMDFS_LAYER_OTHER_MERGE || +- t == HMDFS_LAYER_FIRST_MERGE_CLOUD || +- t == HMDFS_LAYER_OTHER_MERGE_CLOUD; +-} +- +-struct dentry *hmdfs_get_lo_d(struct dentry *dentry, int dev_id); +-struct dentry *hmdfs_get_fst_lo_d(struct dentry *dentry); +- +-/***************************************************************************** +- * Inode operations for the merge view +- *****************************************************************************/ +- +-extern const struct inode_operations hmdfs_file_iops_merge; +-extern const struct file_operations hmdfs_file_fops_merge; +-extern const struct inode_operations hmdfs_dir_iops_merge; +-extern const struct file_operations hmdfs_dir_fops_merge; +-extern const struct inode_operations hmdfs_file_iops_cloud_merge; +-extern const struct inode_operations hmdfs_dir_iops_cloud_merge; +-extern const struct dentry_operations hmdfs_dops_merge; +- +-/***************************************************************************** +- * dentry cache for the merge view +- *****************************************************************************/ +-extern struct kmem_cache *hmdfs_dentry_merge_cachep; +- +-#endif // HMDFS_MERGE_H +diff --git a/fs/hmdfs/hmdfs_server.c b/fs/hmdfs/hmdfs_server.c +deleted file mode 100644 +index b1217f476..000000000 +--- a/fs/hmdfs/hmdfs_server.c ++++ /dev/null +@@ -1,2125 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/hmdfs_server.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "hmdfs_server.h" +- +-#include +-#include +-#include +-#include +-#include +- +-#include "authority/authentication.h" +-#include "hmdfs.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_share.h" +-#include "hmdfs_trace.h" +-#include "server_writeback.h" +-#include "comm/node_cb.h" +- +-#define HMDFS_MAX_HIDDEN_DIR 1 +- +-struct hmdfs_open_info { +- struct file *file; +- struct inode *inode; +- bool stat_valid; +- struct kstat stat; +- uint64_t real_ino; +- int file_id; +-}; +- +-static void find_first_no_slash(const char **name, int *len) +-{ +- const char *s = *name; +- int l = *len; +- +- while (l > 0 && *s == '/') { +- s++; +- l--; +- } +- +- *name = s; +- *len = l; +-} +- +-static void find_first_slash(const char **name, int *len) +-{ +- const char *s = *name; +- int l = *len; +- +- while (l > 0 && *s != '/') { +- s++; +- l--; +- } +- +- *name = s; +- *len = l; +-} +- +-static bool path_contain_dotdot(const char *name, int len) +-{ +- while (true) { +- find_first_no_slash(&name, &len); +- +- if (len == 0) +- return false; +- +- if (len >= 2 && name[0] == '.' && name[1] == '.' && +- (len == 2 || name[2] == '/')) +- return true; +- +- find_first_slash(&name, &len); +- } +-} +- +-static int insert_file_into_conn(struct hmdfs_peer *conn, struct file *file) +-{ +- struct idr *idr = &(conn->file_id_idr); +- int ret; +- +- idr_preload(GFP_KERNEL); +- spin_lock(&(conn->file_id_lock)); +- ret = idr_alloc_cyclic(idr, file, 0, 0, GFP_NOWAIT); +- spin_unlock(&(conn->file_id_lock)); +- idr_preload_end(); +- return ret; +-} +- +-/* +- * get_file_from_conn - get file from conn by file_id. It should be noted that +- * an additional reference will be acquired for returned file, the called should +- * put it after the file is not used anymore. +- */ +-static struct file *get_file_from_conn(struct hmdfs_peer *conn, __u32 file_id) +-{ +- struct file *file; +- struct idr *idr = &(conn->file_id_idr); +- +- rcu_read_lock(); +- file = idr_find(idr, file_id); +- if (file && !get_file_rcu(file)) +- file = NULL; +- rcu_read_unlock(); +- return file; +-} +- +-int remove_file_from_conn(struct hmdfs_peer *conn, __u32 file_id) +-{ +- spinlock_t *lock = &(conn->file_id_lock); +- struct idr *idr = &(conn->file_id_idr); +- struct file *file; +- +- spin_lock(lock); +- file = idr_remove(idr, file_id); +- spin_unlock(lock); +- +- if (!file) { +- return -ENOENT; +- } else { +- return 0; +- } +-} +- +-struct file *hmdfs_open_link(struct hmdfs_sb_info *sbi, +- const char *path) +-{ +- struct file *file; +- int err; +- const char *root_name = sbi->local_dst; +- char *real_path; +- int path_len; +- +- path_len = strlen(root_name) + strlen(path) + 2; +- if (path_len > PATH_MAX) { +- err = -EINVAL; +- return ERR_PTR(err); +- } +- real_path = kzalloc(path_len, GFP_KERNEL); +- if (!real_path) { +- err = -ENOMEM; +- return ERR_PTR(err); +- } +- +- sprintf(real_path, "%s%s", root_name, path); +- file = filp_open(real_path, O_RDWR | O_LARGEFILE, 0644); +- if (IS_ERR(file)) { +- hmdfs_info("filp_open failed: %ld", PTR_ERR(file)); +- } else { +- hmdfs_info("get file with magic %lu", +- file->f_inode->i_sb->s_magic); +- } +- +- kfree(real_path); +- return file; +-} +- +-struct file *hmdfs_open_path(struct hmdfs_sb_info *sbi, const char *path) +-{ +- struct path root_path; +- struct file *file; +- int err; +- const char *root_name = sbi->local_dst; +- +- err = kern_path(root_name, 0, &root_path); +- if (err) { +- hmdfs_info("kern_path failed: %d", err); +- return ERR_PTR(err); +- } +- file = file_open_root(&root_path, path, +- O_RDWR | O_LARGEFILE, 0644); +- path_put(&root_path); +- if (IS_ERR(file)) { +- hmdfs_err( +- "GRAPERR sb->s_readonly_remount %d sb_flag %lu", +- sbi->sb->s_readonly_remount, sbi->sb->s_flags); +- hmdfs_info("file_open_root failed: %ld", PTR_ERR(file)); +- } else { +- hmdfs_info("get file with magic %lu", +- file->f_inode->i_sb->s_magic); +- } +- return file; +-} +- +-inline void hmdfs_close_path(struct file *file) +-{ +- fput(file); +-} +- +-/* After offline server close all files opened by client */ +-void hmdfs_server_offline_notify(struct hmdfs_peer *conn, int evt, +- unsigned int seq) +-{ +- int id; +- int count = 0; +- unsigned int next; +- struct file *filp = NULL; +- struct idr *idr = &conn->file_id_idr; +- +- /* wait all async work complete */ +- flush_workqueue(conn->req_handle_wq); +- flush_workqueue(conn->async_wq); +- +- /* If there is some open requests in processing, +- * Maybe, we need to close file when peer offline +- */ +- idr_for_each_entry(idr, filp, id) { +- hmdfs_debug("[%d]Server close: id=%d", count, id); +- hmdfs_close_path(filp); +- count++; +- if (count % HMDFS_IDR_RESCHED_COUNT == 0) +- cond_resched(); +- } +- +- hmdfs_clear_share_item_offline(conn); +- +- /* Reinitialize idr */ +- next = idr_get_cursor(idr); +- idr_destroy(idr); +- +- idr_init(idr); +- idr_set_cursor(idr, next); +- +- /* Make old file id to be stale */ +- conn->fid_cookie++; +-} +- +-static struct hmdfs_node_cb_desc server_cb[] = { +- { +- .evt = NODE_EVT_OFFLINE, +- .sync = true, +- .fn = hmdfs_server_offline_notify +- }, +-}; +- +-void __init hmdfs_server_add_node_evt_cb(void) +-{ +- hmdfs_node_add_evt_cb(server_cb, ARRAY_SIZE(server_cb)); +-} +- +-static int hmdfs_get_inode_by_name(struct hmdfs_peer *con, const char *filename, +- uint64_t *ino) +-{ +- int ret = 0; +- struct path root_path; +- struct path dst_path; +- struct inode *inode = NULL; +- +- ret = kern_path(con->sbi->local_dst, 0, &root_path); +- if (ret) { +- hmdfs_err("kern_path failed err = %d", ret); +- return ret; +- } +- +- ret = vfs_path_lookup(root_path.dentry, root_path.mnt, filename, 0, +- &dst_path); +- if (ret) { +- path_put(&root_path); +- return ret; +- } +- +- inode = d_inode(dst_path.dentry); +- if (con->sbi->sb == inode->i_sb) +- inode = hmdfs_i(inode)->lower_inode; +- *ino = generate_u64_ino(inode->i_ino, inode->i_generation); +- +- path_put(&dst_path); +- path_put(&root_path); +- +- return 0; +-} +- +-static const char *datasl_str[] = { +- "s0", "s1", "s2", "s3", "s4" +-}; +- +-static int parse_data_sec_level(const char *sl_value, size_t sl_value_len) +-{ +- int i; +- +- for (i = 0; i < sizeof(datasl_str) / sizeof(datasl_str[0]); i++) { +- if (!strncmp(sl_value, datasl_str[i], strlen(datasl_str[i]))) +- return i + DATA_SEC_LEVEL0; +- } +- +- return DATA_SEC_LEVEL3; +-} +- +-static int check_sec_level(struct hmdfs_peer *node, const char *file_name) +-{ +- int err; +- int ret = 0; +- struct path root_path; +- struct path file_path; +- char *value = NULL; +- size_t value_len = DATA_SEC_LEVEL_LENGTH; +- +- if (node->devsl <= 0) { +- ret = -EACCES; +- goto out_free; +- } +- +- value = kzalloc(value_len, GFP_KERNEL); +- if (!value) { +- ret = -ENOMEM; +- goto out_free; +- } +- +- err = kern_path(node->sbi->local_dst, LOOKUP_DIRECTORY, &root_path); +- if (err) { +- hmdfs_err("get root path error"); +- ret = err; +- goto out_free; +- } +- +- err = vfs_path_lookup(root_path.dentry, root_path.mnt, file_name, 0, +- &file_path); +- if (err) { +- hmdfs_err("get file path error"); +- ret = err; +- goto out_err; +- } +- +- err = vfs_getxattr(&nop_mnt_idmap, file_path.dentry, DATA_SEC_LEVEL_LABEL, value, +- value_len); +- if (err <= 0 && node->devsl >= DATA_SEC_LEVEL3) +- goto out; +- if (err > 0 && node->devsl >= parse_data_sec_level(value, err)) +- goto out; +- +- ret = -EACCES; +-out: +- path_put(&file_path); +-out_err: +- path_put(&root_path); +-out_free: +- kfree(value); +- return ret; +-} +- +-static struct file *hmdfs_open_file(struct hmdfs_peer *con, +- const char *filename, uint8_t file_type, +- int *file_id) +-{ +- struct file *file = NULL; +- int err = 0; +- int id; +- +- if (!filename) { +- hmdfs_err("filename is NULL"); +- return ERR_PTR(-EINVAL); +- } +- +- if (check_sec_level(con, filename)) { +- hmdfs_err("devsl permission denied"); +- return ERR_PTR(-EACCES); +- } +- +- if (hm_isshare(file_type)) { +- err = hmdfs_check_share_access_permission(con->sbi, +- filename, con->cid); +- if (err) +- return ERR_PTR(err); +- } +- +- if (hm_islnk(file_type)) +- file = hmdfs_open_link(con->sbi, filename); +- else +- file = hmdfs_open_path(con->sbi, filename); +- +- if (IS_ERR(file)) { +- reset_item_opened_status(con->sbi, filename); +- return file; +- } +- +- get_file(file); +- id = insert_file_into_conn(con, file); +- if (id < 0) { +- hmdfs_err("file_id alloc failed! err=%d", id); +- reset_item_opened_status(con->sbi, filename); +- hmdfs_close_path(file); +- hmdfs_close_path(file); +- return ERR_PTR(id); +- } +- *file_id = id; +- +- return file; +-} +- +-static struct hmdfs_time_t msec_to_timespec(unsigned int msec) +-{ +- struct hmdfs_time_t timespec = { +- .tv_sec = msec / MSEC_PER_SEC, +- .tv_nsec = (msec % MSEC_PER_SEC) * NSEC_PER_MSEC, +- }; +- +- return timespec; +-} +- +-static struct hmdfs_time_t hmdfs_current_kernel_time(void) +-{ +- struct hmdfs_time_t time; +- +-#if KERNEL_VERSION(4, 18, 0) < LINUX_VERSION_CODE +- ktime_get_coarse_real_ts64(&time); +-#else +- time = current_kernel_time(); +-#endif +- return time; +-} +- +-/* +- * Generate fid version like following format: +- * +- * | boot cookie | con cookie | +- * |---------------------|-------------| +- * 49 15 (bits) +- */ +-static uint64_t hmdfs_server_pack_fid_ver(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd) +-{ +- uint64_t boot_cookie = con->sbi->boot_cookie; +- uint16_t con_cookie = con->fid_cookie; +- +- return (boot_cookie | +- (con_cookie & ((1 << HMDFS_FID_VER_BOOT_COOKIE_SHIFT) - 1))); +-} +- +-static struct file *get_file_by_fid_and_ver(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, +- __u32 file_id, __u64 file_ver) +-{ +- struct file *file = NULL; +- __u64 cur_file_ver = hmdfs_server_pack_fid_ver(con, cmd); +- +- if (file_ver != cur_file_ver) { +- hmdfs_warning("Stale file version %llu for fid %u", +- file_ver, file_id); +- return ERR_PTR(-EBADF); +- } +- +- file = get_file_from_conn(con, file_id); +- if (!file) +- return ERR_PTR(-EBADF); +- +- return file; +-} +- +-static void hmdfs_update_open_response(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, +- struct hmdfs_open_info *info, +- struct open_response *resp) +-{ +- struct hmdfs_time_t current_time = hmdfs_current_kernel_time(); +- struct hmdfs_time_t ctime = info->stat_valid ? info->stat.ctime : +- info->inode->__i_ctime; +- struct hmdfs_time_t precision = +- msec_to_timespec(con->sbi->dcache_precision); +- loff_t size = info->stat_valid ? info->stat.size : +- i_size_read(info->inode); +- +- resp->ino = cpu_to_le64(info->real_ino); +- resp->file_ver = cpu_to_le64(hmdfs_server_pack_fid_ver(con, cmd)); +- resp->file_id = cpu_to_le32(info->file_id); +- resp->file_size = cpu_to_le64(size); +- resp->ctime = cpu_to_le64(ctime.tv_sec); +- resp->ctime_nsec = cpu_to_le32(ctime.tv_nsec); +- +- /* +- * In server, ctime might stay the same after coverwrite. We introduce a +- * new value stable_ctime to handle the problem. +- * - if open rpc time < ctime, stable_ctime = 0; +- * - if ctime <= open rpc time < ctime + dcache_precision, stable_ctime +- * = ctime +- * - else, stable_ctime = ctime + dcache_precision; +- */ +- precision = hmdfs_time_add(ctime, precision); +- if (hmdfs_time_compare(¤t_time, &ctime) < 0) { +- resp->stable_ctime = cpu_to_le64(0); +- resp->stable_ctime_nsec = cpu_to_le32(0); +- } else if (hmdfs_time_compare(¤t_time, &ctime) >= 0 && +- hmdfs_time_compare(¤t_time, &precision) < 0) { +- resp->stable_ctime = resp->ctime; +- resp->stable_ctime_nsec = resp->ctime_nsec; +- } else { +- resp->stable_ctime = cpu_to_le64(precision.tv_sec); +- resp->stable_ctime_nsec = cpu_to_le32(precision.tv_nsec); +- } +-} +- +-static int hmdfs_get_open_info(struct hmdfs_peer *con, uint8_t file_type, +- const char *filename, +- struct hmdfs_open_info *info) +-{ +- int ret = 0; +- +- info->inode = file_inode(info->file); +- info->stat_valid = false; +- if (con->sbi->sb == info->inode->i_sb) { +- /* if open a regular file */ +- info->inode = hmdfs_i(info->inode)->lower_inode; +- } else if (con->sbi->lower_sb != info->inode->i_sb) { +- /* It's possible that inode is not from lower, for example: +- * 1. touch /f2fs/file +- * 2. ln -s /sdcard_fs/file /f2fs/link +- * 3. cat /hmdfs/link -> generate dentry cache in sdcard_fs +- * 4. echo hi >> /hmdfs/file -> append write not through +- * sdcard_fs +- * 5. cat /hmdfs/link -> got inode in sdcard, which size is +- * still 0 +- * +- * If src file isn't in lower, use getattr to get +- * information. +- */ +- ret = vfs_getattr(&info->file->f_path, &info->stat, STATX_BASIC_STATS | STATX_BTIME, +- 0); +- if (ret) { +- hmdfs_err("call vfs_getattr failed, err %d", ret); +- return ret; +- } +- info->stat_valid = true; +- } +- +- if (hm_islnk(file_type)) { +- ret = hmdfs_get_inode_by_name(con, filename, &info->real_ino); +- if (ret) +- return ret; +- } else { +- info->real_ino = generate_u64_ino(info->inode->i_ino, +- info->inode->i_generation); +- } +- return 0; +-} +- +-void hmdfs_server_open(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- struct open_request *recv = data; +- int sizeread = sizeof(struct open_response); +- struct open_response *resp = NULL; +- struct hmdfs_open_info *info = NULL; +- int ret = 0; +- +- trace_hmdfs_server_open_enter(con, recv); +- +- resp = kzalloc(sizeread, GFP_KERNEL); +- info = kmalloc(sizeof(*info), GFP_KERNEL); +- if (!resp || !info) { +- ret = -ENOMEM; +- goto err_free; +- } +- +- if (path_contain_dotdot(recv->buf, recv->path_len)) { +- ret = -EINVAL; +- goto err_free; +- } +- +- info->file = hmdfs_open_file(con, recv->buf, recv->file_type, +- &info->file_id); +- if (IS_ERR(info->file)) { +- ret = PTR_ERR(info->file); +- goto err_free; +- } +- +- ret = hmdfs_get_open_info(con, recv->file_type, recv->buf, info); +- if (ret) +- goto err_close; +- +- hmdfs_update_open_response(con, cmd, info, resp); +- +- trace_hmdfs_server_open_exit(con, resp, info->file, 0); +- ret = hmdfs_sendmessage_response(con, cmd, sizeread, resp, 0); +- if (ret) { +- hmdfs_err("sending msg response failed, file_id %d, err %d", +- info->file_id, ret); +- remove_file_from_conn(con, info->file_id); +- hmdfs_close_path(info->file); +- } +- hmdfs_close_path(info->file); +- kfree(resp); +- kfree(info); +- return; +- +-err_close: +- hmdfs_close_path(info->file); +- remove_file_from_conn(con, info->file_id); +- hmdfs_close_path(info->file); +-err_free: +- kfree(resp); +- kfree(info); +- trace_hmdfs_server_open_exit(con, NULL, NULL, ret); +- hmdfs_send_err_response(con, cmd, ret); +-} +- +-static int hmdfs_check_and_create(struct path *path_parent, +- struct dentry *dentry, uint64_t device_id, +- umode_t mode, bool is_excl) +-{ +- int err = 0; +- +- /* if inode doesn't exist, create it */ +- if (d_is_negative(dentry)) { +- hmdfs_mark_drop_flag(device_id, path_parent->dentry); +- err = vfs_create(&nop_mnt_idmap, d_inode(path_parent->dentry), dentry, mode, +- is_excl); +- if (err) +- hmdfs_err("create failed, err %d", err); +- } else { +- if (is_excl) +- err = -EEXIST; +- else if (S_ISREG(d_inode(dentry)->i_mode) && +- hm_islnk(hmdfs_d(dentry)->file_type)) +- err = -EINVAL; +- else if (S_ISDIR(d_inode(dentry)->i_mode)) +- err = -EISDIR; +- } +- +- return err; +-} +-static int hmdfs_lookup_create(struct hmdfs_peer *con, +- struct atomic_open_request *recv, +- struct path *child_path, bool *truncate) +-{ +- int err = 0; +- struct path path_root; +- struct path path_parent; +- uint32_t open_flags = le32_to_cpu(recv->open_flags); +- char *path = recv->buf; +- char *filename = recv->buf + le32_to_cpu(recv->path_len) + 1; +- struct dentry *dentry = NULL; +- +- err = kern_path(con->sbi->local_dst, LOOKUP_DIRECTORY, &path_root); +- if (err) { +- hmdfs_err("no path for %s, err %d", con->sbi->local_dst, err); +- return err; +- } +- +- err = vfs_path_lookup(path_root.dentry, path_root.mnt, path, +- LOOKUP_DIRECTORY, &path_parent); +- if (err) { +- hmdfs_info("no dir in %s, err %d", con->sbi->local_dst, err); +- goto put_path_root; +- } +- +- inode_lock(d_inode(path_parent.dentry)); +- dentry = lookup_one_len(filename, path_parent.dentry, strlen(filename)); +- if (IS_ERR(dentry)) { +- err = PTR_ERR(dentry); +- inode_unlock(d_inode(path_parent.dentry)); +- goto put_path_parent; +- } +- /* only truncate if inode already exists */ +- *truncate = ((open_flags & HMDFS_O_TRUNC) && d_is_positive(dentry)); +- err = hmdfs_check_and_create(&path_parent, dentry, con->device_id, +- le16_to_cpu(recv->mode), +- open_flags & HMDFS_O_EXCL); +- inode_unlock(d_inode(path_parent.dentry)); +- if (err) { +- dput(dentry); +- } else { +- child_path->dentry = dentry; +- child_path->mnt = mntget(path_parent.mnt); +- } +- +-put_path_parent: +- path_put(&path_parent); +-put_path_root: +- path_put(&path_root); +- return err; +-} +- +-static int hmdfs_dentry_open(struct hmdfs_peer *con, +- const struct path *path, +- struct hmdfs_open_info *info) +-{ +- int err = 0; +- +- info->file = dentry_open(path, O_RDWR | O_LARGEFILE, current_cred()); +- if (IS_ERR(info->file)) { +- err = PTR_ERR(info->file); +- hmdfs_err("open file failed, err %d", err); +- return err; +- } +- +- get_file(info->file); +- info->file_id = insert_file_into_conn(con, info->file); +- if (info->file_id < 0) { +- err = info->file_id; +- hmdfs_err("file_id alloc failed! err %d", err); +- hmdfs_close_path(info->file); +- hmdfs_close_path(info->file); +- return err; +- } +- +- return 0; +-} +- +-static int hmdfs_server_do_atomic_open(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, +- struct atomic_open_request *recv, +- struct hmdfs_open_info *info, +- struct atomic_open_response *resp) +-{ +- struct path child_path; +- bool truncate = false; +- int err = 0; +- +- err = hmdfs_lookup_create(con, recv, &child_path, &truncate); +- if (err) +- return err; +- +- err = hmdfs_dentry_open(con, &child_path, info); +- if (err) +- goto put_child; +- +- err = hmdfs_get_open_info(con, HM_REG, NULL, info); +- if (err) +- goto fail_close; +- +- if (truncate) { +- err = vfs_truncate(&child_path, 0); +- if (err) { +- hmdfs_err("truncate failed, err %d", err); +- goto fail_close; +- } +- } +- hmdfs_update_open_response(con, cmd, info, &resp->open_resp); +- resp->i_mode = cpu_to_le16(file_inode(info->file)->i_mode); +- +-fail_close: +- if (err) { +- remove_file_from_conn(con, info->file_id); +- hmdfs_close_path(info->file); +- hmdfs_close_path(info->file); +- } +-put_child: +- path_put(&child_path); +- return err; +-} +- +-void hmdfs_server_atomic_open(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, void *data) +-{ +- int err; +- struct atomic_open_request *recv = data; +- struct atomic_open_response *resp = NULL; +- struct hmdfs_open_info *info = NULL; +- char *file_path = recv->buf; +- char *file = recv->buf + recv->path_len + 1; +- +- if (path_contain_dotdot(file_path, recv->path_len)) { +- err = -EINVAL; +- goto out; +- } +- if (path_contain_dotdot(file, recv->file_len)) { +- err = -EINVAL; +- goto out; +- } +- +- info = kmalloc(sizeof(*info), GFP_KERNEL); +- resp = kzalloc(sizeof(*resp), GFP_KERNEL); +- if (!resp || !info) { +- err = -ENOMEM; +- goto out; +- } +- +- err = hmdfs_server_do_atomic_open(con, cmd, recv, info, resp); +- +-out: +- if (err) { +- hmdfs_send_err_response(con, cmd, err); +- } else { +- err = hmdfs_sendmessage_response(con, cmd, sizeof(*resp), resp, +- 0); +- if (err) { +- hmdfs_err("sending msg response failed, file_id %d, err %d", +- info->file_id, err); +- remove_file_from_conn(con, info->file_id); +- hmdfs_close_path(info->file); +- } +- } +- kfree(info); +- kfree(resp); +-} +- +-void hmdfs_server_release(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- struct release_request *release_recv = data; +- struct file *file = NULL; +- __u32 file_id; +- __u64 file_ver; +- int ret = 0; +- +- file_id = le32_to_cpu(release_recv->file_id); +- file_ver = le64_to_cpu(release_recv->file_ver); +- file = get_file_by_fid_and_ver(con, cmd, file_id, file_ver); +- if (IS_ERR(file)) { +- hmdfs_err("cannot find %u", file_id); +- ret = PTR_ERR(file); +- goto out; +- } +- +- if (hmdfs_is_share_file(file)) +- hmdfs_close_share_item(con->sbi, file, con->cid); +- +- /* put the reference acquired by get_file_by_fid_and_ver() */ +- hmdfs_close_path(file); +- hmdfs_info("close %u", file_id); +- ret = remove_file_from_conn(con, file_id); +- if (ret) { +- hmdfs_err("cannot find after close %u", file_id); +- goto out; +- } +- +- hmdfs_close_path(file); +- +-out: +- trace_hmdfs_server_release(con, file_id, file_ver, ret); +- set_conn_sock_quickack(con); +-} +- +-void hmdfs_server_fsync(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- struct fsync_request *fsync_recv = data; +- __s32 datasync = le32_to_cpu(fsync_recv->datasync); +- __s64 start = le64_to_cpu(fsync_recv->start); +- __s64 end = le64_to_cpu(fsync_recv->end); +- struct file *file = NULL; +- __u32 file_id; +- __u64 file_ver; +- int ret = 0; +- +- file_id = le32_to_cpu(fsync_recv->file_id); +- file_ver = le64_to_cpu(fsync_recv->file_ver); +- file = get_file_by_fid_and_ver(con, cmd, file_id, file_ver); +- if (IS_ERR(file)) { +- hmdfs_err("cannot find %u", file_id); +- ret = PTR_ERR(file); +- goto out; +- } +- +- ret = vfs_fsync_range(file, start, end, datasync); +- if (ret) +- hmdfs_err("fsync fail, ret %d", ret); +- +- hmdfs_close_path(file); +-out: +- hmdfs_send_err_response(con, cmd, ret); +-} +- +-void hmdfs_server_readpage(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- struct readpage_request *readpage_recv = data; +- __u64 file_ver; +- __u32 file_id; +- struct file *file = NULL; +- loff_t pos; +- struct readpage_response *readpage = NULL; +- int ret = 0; +- size_t read_len; +- +- file_id = le32_to_cpu(readpage_recv->file_id); +- file_ver = le64_to_cpu(readpage_recv->file_ver); +- file = get_file_by_fid_and_ver(con, cmd, file_id, file_ver); +- if (IS_ERR(file)) { +- hmdfs_info( +- "file with id %u does not exist, pgindex %llu, devid %llu", +- file_id, le64_to_cpu(readpage_recv->index), +- con->device_id); +- ret = PTR_ERR(file); +- goto fail; +- } +- +- read_len = (size_t)le32_to_cpu(readpage_recv->size); +- if (read_len == 0) +- goto fail_put_file; +- +- readpage = kmalloc(read_len, GFP_KERNEL); +- if (!readpage) { +- ret = -ENOMEM; +- goto fail_put_file; +- } +- +- pos = (loff_t)le64_to_cpu(readpage_recv->index) << HMDFS_PAGE_OFFSET; +- ret = kernel_read(file, readpage->buf, read_len, &pos); +- if (ret < 0) { +- hmdfs_send_err_response(con, cmd, -EIO); +- } else { +- if (ret != read_len) +- memset(readpage->buf + ret, 0, read_len - ret); +- hmdfs_sendmessage_response(con, cmd, read_len, readpage, 0); +- } +- +- hmdfs_close_path(file); +- kfree(readpage); +- return; +- +-fail_put_file: +- hmdfs_close_path(file); +-fail: +- hmdfs_send_err_response(con, cmd, ret); +-} +- +-static bool need_rebuild_dcache(struct hmdfs_dcache_header *h, +- struct hmdfs_time_t time, +- unsigned int precision) +-{ +- struct hmdfs_time_t crtime = { .tv_sec = le64_to_cpu(h->dcache_crtime), +- .tv_nsec = le64_to_cpu( +- h->dcache_crtime_nsec) }; +- struct hmdfs_time_t ctime = { .tv_sec = le64_to_cpu(h->dentry_ctime), +- .tv_nsec = le64_to_cpu( +- h->dentry_ctime_nsec) }; +- struct hmdfs_time_t pre_time = { .tv_sec = precision / MSEC_PER_SEC, +- .tv_nsec = precision % MSEC_PER_SEC * +- NSEC_PER_MSEC }; +- +- if (hmdfs_time_compare(&time, &ctime) != 0) +- return true; +- +- pre_time = hmdfs_time_add(time, pre_time); +- if (hmdfs_time_compare(&crtime, &pre_time) < 0) +- return true; +- +- return false; +-} +- +-static bool hmdfs_server_cache_validate(struct file *filp, struct inode *inode, +- unsigned long precision) +-{ +- struct hmdfs_dcache_header header; +- int overallpage; +- ssize_t bytes; +- loff_t pos = 0; +- +- overallpage = get_dentry_group_cnt(file_inode(filp)); +- if (overallpage == 0) { +- hmdfs_err("cache file size is 0"); +- return false; +- } +- +- bytes = kernel_read(filp, &header, sizeof(header), &pos); +- if (bytes != sizeof(header)) { +- hmdfs_err("read file failed, err:%zd", bytes); +- return false; +- } +- +- return !need_rebuild_dcache(&header, inode->__i_ctime, precision); +-} +- +-struct file *hmdfs_server_cache_revalidate(struct hmdfs_sb_info *sbi, +- const char *recvpath, +- struct path *path) +-{ +- struct cache_file_node *cfn = NULL; +- struct file *file; +- +- cfn = find_cfn(sbi, HMDFS_SERVER_CID, recvpath, true); +- if (!cfn) +- return NULL; +- +- if (!hmdfs_server_cache_validate(cfn->filp, path->dentry->d_inode, +- sbi->dcache_precision)) { +- remove_cfn(cfn); +- release_cfn(cfn); +- return NULL; +- } +- file = cfn->filp; +- get_file(cfn->filp); +- release_cfn(cfn); +- +- return file; +-} +- +-bool hmdfs_client_cache_validate(struct hmdfs_sb_info *sbi, +- struct readdir_request *readdir_recv, +- struct path *path) +-{ +- struct inode *inode = path->dentry->d_inode; +- struct hmdfs_dcache_header header; +- +- /* always rebuild dentryfile for small dir */ +- if (le64_to_cpu(readdir_recv->num) < sbi->dcache_threshold) +- return false; +- +- header.dcache_crtime = readdir_recv->dcache_crtime; +- header.dcache_crtime_nsec = readdir_recv->dcache_crtime_nsec; +- header.dentry_ctime = readdir_recv->dentry_ctime; +- header.dentry_ctime_nsec = readdir_recv->dentry_ctime_nsec; +- +- return !need_rebuild_dcache(&header, inode->__i_ctime, +- sbi->dcache_precision); +-} +- +-static char *server_lower_dentry_path_raw(struct hmdfs_peer *peer, +- struct dentry *lo_d) +-{ +- struct hmdfs_dentry_info *di = hmdfs_d(peer->sbi->sb->s_root); +- struct dentry *lo_d_root = di->lower_path.dentry; +- struct dentry *lo_d_tmp = NULL; +- char *lo_p_buf = NULL; +- char *buf_head = NULL; +- char *buf_tail = NULL; +- size_t path_len = 0; +- +- lo_p_buf = kzalloc(PATH_MAX, GFP_KERNEL); +- if (unlikely(!lo_p_buf)) +- return ERR_PTR(-ENOMEM); +- +- /* To generate a reversed path str */ +- for (lo_d_tmp = lo_d; lo_d_tmp != lo_d_root && !IS_ROOT(lo_d_tmp); +- lo_d_tmp = lo_d_tmp->d_parent) { +- u32 dlen = lo_d_tmp->d_name.len; +- int reverse_index = dlen - 1; +- +- /* Considering the appended slash and '\0' */ +- if (unlikely(path_len + dlen + 1 > PATH_MAX - 1)) { +- kfree(lo_p_buf); +- return ERR_PTR(-ENAMETOOLONG); +- } +- for (; reverse_index >= 0; --reverse_index) +- lo_p_buf[path_len++] = +- lo_d_tmp->d_name.name[reverse_index]; +- lo_p_buf[path_len++] = '/'; +- } +- +- /* Reverse the reversed path str to get the real path str */ +- for (buf_head = lo_p_buf, buf_tail = lo_p_buf + path_len - 1; +- buf_head < buf_tail; ++buf_head, --buf_tail) +- swap(*buf_head, *buf_tail); +- +- if (path_len == 0) +- lo_p_buf[0] = '/'; +- return lo_p_buf; +-} +- +-static int server_lookup(struct hmdfs_peer *peer, const char *req_path, +- struct path *path) +-{ +- struct path root_path; +- int err = 0; +- +- err = kern_path(peer->sbi->local_dst, 0, &root_path); +- if (err) +- goto out_noroot; +- +- err = vfs_path_lookup(root_path.dentry, root_path.mnt, req_path, +- LOOKUP_DIRECTORY, path); +- path_put(&root_path); +-out_noroot: +- return err; +-} +- +-/** +- * server_lookup_lower - lookup lower file-system +- * @peer: target device node +- * @req_path: abs path (mount point as the root) from the request +- * @lo_o: the lower path to return +- * +- * return the lower path's name, with characters' cases matched +- */ +-static char *server_lookup_lower(struct hmdfs_peer *peer, const char *req_path, +- struct path *lo_p) +-{ +- char *lo_p_name = ERR_PTR(-ENOENT); +- struct path up_p; +- int err = 0; +- +- err = server_lookup(peer, req_path, &up_p); +- if (err) +- goto out; +- +- hmdfs_get_lower_path(up_p.dentry, lo_p); +- path_put(&up_p); +- +- lo_p_name = server_lower_dentry_path_raw(peer, lo_p->dentry); +- if (IS_ERR(lo_p_name)) { +- err = PTR_ERR(lo_p_name); +- path_put(lo_p); +- } +-out: +- return err ? ERR_PTR(err) : lo_p_name; +-} +- +-void hmdfs_server_readdir(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- struct readdir_request *readdir_recv = data; +- struct path lo_p; +- struct file *filp = NULL; +- int err = 0; +- unsigned long long num = 0; +- char *lo_p_name = NULL; +- +- trace_hmdfs_server_readdir(readdir_recv); +- +- if (path_contain_dotdot(readdir_recv->path, readdir_recv->path_len)) { +- err = -EINVAL; +- goto send_err; +- } +- +- lo_p_name = server_lookup_lower(con, readdir_recv->path, &lo_p); +- if (IS_ERR(lo_p_name)) { +- err = PTR_ERR(lo_p_name); +- hmdfs_info("Failed to get lower path: %d", err); +- goto send_err; +- } +- +- if (le32_to_cpu(readdir_recv->verify_cache)) { +- if (hmdfs_client_cache_validate(con->sbi, readdir_recv, &lo_p)) +- goto out_response; +- } +- +- filp = hmdfs_server_cache_revalidate(con->sbi, lo_p_name, &lo_p); +- if (IS_ERR_OR_NULL(filp)) { +- filp = hmdfs_server_rebuild_dents(con->sbi, &lo_p, &num, +- lo_p_name); +- if (IS_ERR_OR_NULL(filp)) { +- err = PTR_ERR(filp); +- goto err_lookup_path; +- } +- } +- +-out_response: +- err = hmdfs_readfile_response(con, cmd, filp); +- if (!err) +- hmdfs_add_remote_cache_list(con, lo_p_name); +- if (num >= con->sbi->dcache_threshold) +- cache_file_persistent(con, filp, lo_p_name, true); +- if (filp) +- fput(filp); +-err_lookup_path: +- path_put(&lo_p); +- kfree(lo_p_name); +-send_err: +- if (err) +- hmdfs_send_err_response(con, cmd, err); +-} +- +-void hmdfs_server_mkdir(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- int err = 0; +- struct mkdir_request *mkdir_recv = data; +- struct inode *child_inode = NULL; +- struct dentry *dent = NULL; +- char *mkdir_dir = NULL; +- char *mkdir_name = NULL; +- struct hmdfs_inodeinfo_response *mkdir_resp = NULL; +- int respsize = sizeof(struct hmdfs_inodeinfo_response); +- int path_len = le32_to_cpu(mkdir_recv->path_len); +- +- mkdir_resp = kzalloc(respsize, GFP_KERNEL); +- if (!mkdir_resp) { +- err = -ENOMEM; +- goto mkdir_out; +- } +- +- mkdir_dir = mkdir_recv->path; +- mkdir_name = mkdir_recv->path + path_len + 1; +- if (path_contain_dotdot(mkdir_dir, mkdir_recv->path_len)) { +- err = -EINVAL; +- goto mkdir_out; +- } +- if (path_contain_dotdot(mkdir_name, mkdir_recv->name_len)) { +- err = -EINVAL; +- goto mkdir_out; +- } +- +- dent = hmdfs_root_mkdir(con->device_id, con->sbi->local_dst, +- mkdir_dir, mkdir_name, +- le16_to_cpu(mkdir_recv->mode)); +- if (IS_ERR(dent)) { +- err = PTR_ERR(dent); +- hmdfs_err("hmdfs_root_mkdir failed err = %d", err); +- goto mkdir_out; +- } +- child_inode = d_inode(dent); +- mkdir_resp->i_mode = cpu_to_le16(child_inode->i_mode); +- mkdir_resp->i_size = cpu_to_le64(child_inode->i_size); +- mkdir_resp->i_mtime = cpu_to_le64(child_inode->i_mtime.tv_sec); +- mkdir_resp->i_mtime_nsec = cpu_to_le32(child_inode->i_mtime.tv_nsec); +- mkdir_resp->i_ino = cpu_to_le64(child_inode->i_ino); +- dput(dent); +-mkdir_out: +- hmdfs_sendmessage_response(con, cmd, respsize, mkdir_resp, err); +- kfree(mkdir_resp); +-} +- +-void hmdfs_server_create(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- int err = 0; +- struct create_request *create_recv = data; +- struct inode *child_inode = NULL; +- struct dentry *dent = NULL; +- char *create_dir = NULL; +- char *create_name = NULL; +- struct hmdfs_inodeinfo_response *create_resp = NULL; +- int respsize = sizeof(struct hmdfs_inodeinfo_response); +- int path_len = le32_to_cpu(create_recv->path_len); +- +- create_resp = kzalloc(respsize, GFP_KERNEL); +- if (!create_resp) { +- err = -ENOMEM; +- goto create_out; +- } +- +- create_dir = create_recv->path; +- create_name = create_recv->path + path_len + 1; +- if (path_contain_dotdot(create_dir, create_recv->path_len)) { +- err = -EINVAL; +- goto create_out; +- } +- if (path_contain_dotdot(create_name, create_recv->name_len)) { +- err = -EINVAL; +- goto create_out; +- } +- +- dent = hmdfs_root_create(con->device_id, con->sbi->local_dst, +- create_dir, create_name, +- le16_to_cpu(create_recv->mode), +- create_recv->want_excl); +- if (IS_ERR(dent)) { +- err = PTR_ERR(dent); +- hmdfs_err("hmdfs_root_create failed err = %d", err); +- goto create_out; +- } +- child_inode = d_inode(dent); +- create_resp->i_mode = cpu_to_le16(child_inode->i_mode); +- create_resp->i_size = cpu_to_le64(child_inode->i_size); +- create_resp->i_mtime = cpu_to_le64(child_inode->i_mtime.tv_sec); +- create_resp->i_mtime_nsec = cpu_to_le32(child_inode->i_mtime.tv_nsec); +- /* +- * keep same as hmdfs_server_open, +- * to prevent hmdfs_open_final_remote from judging ino errors. +- */ +- create_resp->i_ino = cpu_to_le64( +- generate_u64_ino(hmdfs_i(child_inode)->lower_inode->i_ino, +- child_inode->i_generation)); +- dput(dent); +-create_out: +- hmdfs_sendmessage_response(con, cmd, respsize, create_resp, err); +- kfree(create_resp); +-} +- +-void hmdfs_server_rmdir(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- int err = 0; +- struct path root_path; +- char *path = NULL; +- char *name = NULL; +- struct rmdir_request *rmdir_recv = data; +- +- path = rmdir_recv->path; +- name = rmdir_recv->path + le32_to_cpu(rmdir_recv->path_len) + 1; +- if (path_contain_dotdot(path, rmdir_recv->path_len)) { +- err = -EINVAL; +- goto rmdir_out; +- } +- if (path_contain_dotdot(name, rmdir_recv->name_len)) { +- err = -EINVAL; +- goto rmdir_out; +- } +- +- err = kern_path(con->sbi->local_dst, 0, &root_path); +- if (!err) { +- err = hmdfs_root_rmdir(con->device_id, &root_path, path, name); +- path_put(&root_path); +- } +- +-rmdir_out: +- hmdfs_send_err_response(con, cmd, err); +-} +- +-void hmdfs_server_unlink(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- int err = 0; +- struct path root_path; +- char *path = NULL; +- char *name = NULL; +- struct unlink_request *unlink_recv = data; +- +- path = unlink_recv->path; +- name = unlink_recv->path + le32_to_cpu(unlink_recv->path_len) + 1; +- if (path_contain_dotdot(path, unlink_recv->path_len)) { +- err = -EINVAL; +- goto unlink_out; +- } +- if (path_contain_dotdot(name, unlink_recv->name_len)) { +- err = -EINVAL; +- goto unlink_out; +- } +- +- err = kern_path(con->sbi->local_dst, 0, &root_path); +- if (!err) { +- err = hmdfs_root_unlink(con->device_id, &root_path, path, name); +- path_put(&root_path); +- } +- +-unlink_out: +- hmdfs_send_err_response(con, cmd, err); +-} +- +-void hmdfs_server_rename(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- int err = 0; +- int old_path_len; +- int new_path_len; +- int old_name_len; +- int new_name_len; +- unsigned int flags; +- char *path_old = NULL; +- char *name_old = NULL; +- char *path_new = NULL; +- char *name_new = NULL; +- struct rename_request *recv = data; +- +- old_path_len = le32_to_cpu(recv->old_path_len); +- new_path_len = le32_to_cpu(recv->new_path_len); +- old_name_len = le32_to_cpu(recv->old_name_len); +- new_name_len = le32_to_cpu(recv->new_name_len); +- flags = le32_to_cpu(recv->flags); +- +- path_old = recv->path; +- path_new = recv->path + old_path_len + 1; +- name_old = recv->path + old_path_len + 1 + new_path_len + 1; +- name_new = recv->path + old_path_len + 1 + new_path_len + 1 + +- old_name_len + 1; +- if (path_contain_dotdot(path_old, old_path_len)) { +- err = -EINVAL; +- goto rename_out; +- } +- if (path_contain_dotdot(path_new, new_path_len)) { +- err = -EINVAL; +- goto rename_out; +- } +- if (path_contain_dotdot(name_old, old_name_len)) { +- err = -EINVAL; +- goto rename_out; +- } +- if (path_contain_dotdot(name_new, new_name_len)) { +- err = -EINVAL; +- goto rename_out; +- } +- +- err = hmdfs_root_rename(con->sbi, con->device_id, path_old, name_old, +- path_new, name_new, flags); +- +-rename_out: +- hmdfs_send_err_response(con, cmd, err); +-} +- +-static int hmdfs_lookup_symlink(struct path *link_path, const char *path_fmt, +- ... ) +-{ +- int ret; +- va_list args; +- char *path = kmalloc(PATH_MAX, GFP_KERNEL); +- +- if (!path) +- return -ENOMEM; +- +- va_start(args, path_fmt); +- ret = vsnprintf(path, PATH_MAX, path_fmt, args); +- va_end(args); +- +- if(ret >= PATH_MAX) { +- ret = -ENAMETOOLONG; +- goto out; +- } +- +- ret = kern_path(path, LOOKUP_FOLLOW, link_path); +- if (ret) { +- hmdfs_err("kern_path failed err = %d", ret); +- goto out; +- } +- +- if (!S_ISREG(d_inode(link_path->dentry)->i_mode)) { +- hmdfs_err("path is dir symlink"); +- path_put(link_path); +- ret = -EOPNOTSUPP; +- goto out; +- } +- +-out: +- kfree(path); +- return ret; +-} +- +-struct dir_entry_info { +- struct list_head list; +- char *name; +- int name_len; +- unsigned int d_type; +-}; +- +-static bool hmdfs_filldir_real(struct dir_context *ctx, const char *name, +- int name_len, long long offset, unsigned long long ino, +- unsigned int d_type) +-{ +- int res = 0; +- char namestr[NAME_MAX + 1]; +- struct getdents_callback_real *gc = NULL; +- struct dentry *child = NULL; +- +- if (name_len > NAME_MAX) { +- hmdfs_err("name_len:%d NAME_MAX:%u", name_len, NAME_MAX); +- goto out; +- } +- +- gc = container_of(ctx, struct getdents_callback_real, ctx); +- +- memcpy(namestr, name, name_len); +- namestr[name_len] = '\0'; +- +- if (hmdfs_file_type(namestr) != HMDFS_TYPE_COMMON) +- goto out; +- +- /* parent lock already hold by iterate_dir */ +- child = lookup_one_len(name, gc->parent_path->dentry, name_len); +- if (IS_ERR(child)) { +- res = PTR_ERR(child); +- hmdfs_err("lookup failed because %d", res); +- goto out; +- } +- +- if (d_really_is_negative(child)) { +- dput(child); +- hmdfs_err("lookup failed because negative dentry"); +- /* just do not fill this entry and continue for next entry */ +- goto out; +- } +- +- if (d_type == DT_REG || d_type == DT_DIR) { +- create_dentry(child, d_inode(child), gc->file, gc->sbi); +- gc->num++; +- } else if (d_type == DT_LNK) { +- struct path link_path; +- +- res = hmdfs_lookup_symlink(&link_path, "%s/%s/%s", +- gc->sbi->local_src, gc->dir, +- name); +- if (!res) { +- create_dentry(child, d_inode(link_path.dentry), +- gc->file, gc->sbi); +- path_put(&link_path); +- gc->num++; +- } else if (res == -ENOENT) { +- create_dentry(child, d_inode(child), gc->file, gc->sbi); +- gc->num++; +- } +- } +- dput(child); +- +-out: +- /* +- * we always return true here, so that the caller can continue to next +- * dentry even if failed on this dentry somehow. +- */ +- return true; +-} +- +-static void hmdfs_server_set_header(struct hmdfs_dcache_header *header, +- struct file *file, struct file *dentry_file) +-{ +- struct inode *inode = NULL; +- struct hmdfs_time_t cur_time; +- +- inode = file_inode(file); +- cur_time = current_time(file_inode(dentry_file)); +- header->dcache_crtime = cpu_to_le64(cur_time.tv_sec); +- header->dcache_crtime_nsec = cpu_to_le64(cur_time.tv_nsec); +- header->dentry_ctime = cpu_to_le64(inode->__i_ctime.tv_sec); +- header->dentry_ctime_nsec = cpu_to_le64(inode->__i_ctime.tv_nsec); +-} +- +-// Get the dentries of target directory +-struct file *hmdfs_server_rebuild_dents(struct hmdfs_sb_info *sbi, +- struct path *path, loff_t *num, +- const char *dir) +-{ +- int err = 0; +- struct getdents_callback_real gc = { +- .ctx.actor = hmdfs_filldir_real, +- .ctx.pos = 0, +- .num = 0, +- .sbi = sbi, +- .dir = dir, +- }; +- struct file *file = NULL; +- struct file *dentry_file = NULL; +- struct hmdfs_dcache_header header; +- +- dentry_file = create_local_dentry_file_cache(sbi); +- if (IS_ERR(dentry_file)) { +- hmdfs_err("file create failed err=%ld", PTR_ERR(dentry_file)); +- return dentry_file; +- } +- +- file = dentry_open(path, O_RDONLY | O_DIRECTORY, current_cred()); +- if (IS_ERR(file)) { +- err = PTR_ERR(file); +- hmdfs_err("dentry_open failed"); +- goto out; +- } +- +- hmdfs_server_set_header(&header, file, dentry_file); +- +- gc.parent_path = path; +- gc.file = dentry_file; +- +- err = iterate_dir(file, &(gc.ctx)); +- if (err) { +- hmdfs_err("iterate_dir failed"); +- goto out; +- } +- +- header.case_sensitive = sbi->s_case_sensitive; +- header.num = cpu_to_le64(gc.num); +- if (num) +- *num = gc.num; +- +- err = write_header(dentry_file, &header); +-out: +- if (!IS_ERR_OR_NULL(file)) +- fput(file); +- +- if (err) { +- fput(dentry_file); +- dentry_file = ERR_PTR(err); +- } +- +- trace_hmdfs_server_rebuild_dents(&header, err); +- return dentry_file; +-} +- +-void hmdfs_server_writepage(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- struct writepage_request *writepage_recv = data; +- struct hmdfs_server_writeback *hswb = NULL; +- __u64 file_ver; +- __u32 file_id; +- struct file *file = NULL; +- loff_t pos; +- __u32 count; +- ssize_t ret; +- int err = 0; +- +- file_id = le32_to_cpu(writepage_recv->file_id); +- file_ver = le64_to_cpu(writepage_recv->file_ver); +- file = get_file_by_fid_and_ver(con, cmd, file_id, file_ver); +- if (IS_ERR(file)) { +- hmdfs_info( +- "file with id %u does not exist, pgindex %llu, devid %llu", +- file_id, le64_to_cpu(writepage_recv->index), +- con->device_id); +- err = PTR_ERR(file); +- goto out; +- } +- +- pos = (loff_t)le64_to_cpu(writepage_recv->index) << HMDFS_PAGE_OFFSET; +- count = le32_to_cpu(writepage_recv->count); +- ret = kernel_write(file, writepage_recv->buf, count, &pos); +- if (ret != count) +- err = -EIO; +- +- hmdfs_close_path(file); +-out: +- hmdfs_send_err_response(con, cmd, err); +- +- hswb = con->sbi->h_swb; +- if (!err && hswb->dirty_writeback_control) +- hmdfs_server_check_writeback(hswb); +-} +- +-static int hmdfs_lookup_linkpath(struct hmdfs_sb_info *sbi, +- const char *path_name, struct path *dst_path) +-{ +- struct path link_path; +- int err; +- +- err = hmdfs_lookup_symlink(&link_path, "%s/%s", sbi->local_dst, +- path_name); +- if (err) +- return err; +- +- if (d_inode(link_path.dentry)->i_sb != sbi->sb) { +- path_put(dst_path); +- *dst_path = link_path; +- } else { +- path_put(&link_path); +- } +- +- return 0; +-} +- +-static struct inode *hmdfs_verify_path(struct dentry *dentry, char *recv_buf, +- struct super_block *sb) +-{ +- struct inode *inode = d_inode(dentry); +- struct hmdfs_inode_info *info = NULL; +- +- /* if we found path from wrong fs */ +- if (inode->i_sb != sb) { +- hmdfs_err("super block do not match"); +- return NULL; +- } +- +- info = hmdfs_i(inode); +- /* make sure lower inode is not NULL */ +- if (info->lower_inode) +- return info->lower_inode; +- +- /* +- * we don't expect lower inode to be NULL in server. However, it's +- * possible because dentry cache can contain stale data. +- */ +- hmdfs_info("lower inode is NULL, is remote file: %d", +- info->conn != NULL); +- return NULL; +-} +- +-static int hmdfs_notify_change(struct vfsmount *mnt, struct dentry *dentry, +- struct iattr *attr, +- struct inode **delegated_inode) +-{ +-#ifdef CONFIG_SDCARD_FS +- /* sdcard_fs need to call setattr2, notify_change will call setattr */ +- return notify_change2(mnt, dentry, attr, delegated_inode); +-#else +- return notify_change(&nop_mnt_idmap, dentry, attr, delegated_inode); +-#endif +-} +- +-void hmdfs_server_setattr(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- int err = 0; +- struct dentry *dentry = NULL; +- struct inode *inode = NULL; +- struct setattr_request *recv = data; +- struct path root_path, dst_path; +- struct iattr attr; +- __u32 valid = le32_to_cpu(recv->valid); +- +- if (path_contain_dotdot(recv->buf, recv->path_len)) { +- err = -EINVAL; +- goto out; +- } +- +- err = kern_path(con->sbi->local_dst, 0, &root_path); +- if (err) { +- hmdfs_err("kern_path failed err = %d", err); +- goto out; +- } +- +- err = vfs_path_lookup(root_path.dentry, root_path.mnt, recv->buf, 0, +- &dst_path); +- if (err) +- goto out_put_root; +- +- inode = hmdfs_verify_path(dst_path.dentry, recv->buf, con->sbi->sb); +- if (!inode) { +- err = -ENOENT; +- goto out_put_dst; +- } +- +- if (S_ISLNK(inode->i_mode)) { +- err = hmdfs_lookup_linkpath(con->sbi, recv->buf, &dst_path); +- if(err == -ENOENT) +- err = 0; +- else if (err) +- goto out_put_dst; +- } +- +- dentry = dst_path.dentry; +- memset(&attr, 0, sizeof(attr)); +- /* only support size and mtime */ +- if (valid & (ATTR_SIZE | ATTR_MTIME)) +- attr.ia_valid = +- (valid & (ATTR_MTIME | ATTR_MTIME_SET | ATTR_SIZE)); +- attr.ia_size = le64_to_cpu(recv->size); +- attr.ia_mtime.tv_sec = le64_to_cpu(recv->mtime); +- attr.ia_mtime.tv_nsec = le32_to_cpu(recv->mtime_nsec); +- +- inode_lock(dentry->d_inode); +- err = hmdfs_notify_change(dst_path.mnt, dentry, &attr, NULL); +- inode_unlock(dentry->d_inode); +- +-out_put_dst: +- path_put(&dst_path); +-out_put_root: +- path_put(&root_path); +-out: +- hmdfs_send_err_response(con, cmd, err); +-} +- +-static void update_getattr_response(struct hmdfs_peer *con, struct inode *inode, +- struct kstat *ks, +- struct getattr_response *resp) +-{ +- /* if getattr for link, get ino and mode from actual lower inode */ +- resp->ino = cpu_to_le64( +- generate_u64_ino(inode->i_ino, inode->i_generation)); +- resp->mode = cpu_to_le16(inode->i_mode); +- +- /* get other information from vfs_getattr() */ +- resp->result_mask = cpu_to_le32(STATX_BASIC_STATS | STATX_BTIME); +- resp->fsid = cpu_to_le64(ks->dev); +- resp->nlink = cpu_to_le32(ks->nlink); +- resp->uid = cpu_to_le32(ks->uid.val); +- resp->gid = cpu_to_le32(ks->gid.val); +- resp->size = cpu_to_le64(ks->size); +- resp->blocks = cpu_to_le64(ks->blocks); +- resp->blksize = cpu_to_le32(ks->blksize); +- resp->atime = cpu_to_le64(ks->atime.tv_sec); +- resp->atime_nsec = cpu_to_le32(ks->atime.tv_nsec); +- resp->mtime = cpu_to_le64(ks->mtime.tv_sec); +- resp->mtime_nsec = cpu_to_le32(ks->mtime.tv_nsec); +- resp->ctime = cpu_to_le64(ks->ctime.tv_sec); +- resp->ctime_nsec = cpu_to_le32(ks->ctime.tv_nsec); +- resp->crtime = cpu_to_le64(ks->btime.tv_sec); +- resp->crtime_nsec = cpu_to_le32(ks->btime.tv_nsec); +-} +- +-void hmdfs_server_getattr(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- int err = 0; +- struct getattr_request *recv = data; +- int size_read = sizeof(struct getattr_response); +- struct getattr_response *resp = NULL; +- struct kstat ks; +- struct path root_path, dst_path; +- struct inode *inode = NULL; +- unsigned int recv_flags = le32_to_cpu(recv->lookup_flags); +- unsigned int lookup_flags = 0; +- +- if (path_contain_dotdot(recv->buf, recv->path_len)) { +- err = -EINVAL; +- goto err; +- } +- +- err = hmdfs_convert_lookup_flags(recv_flags, &lookup_flags); +- if (err) +- goto err; +- +- resp = kzalloc(size_read, GFP_KERNEL); +- if (!resp) { +- err = -ENOMEM; +- goto err; +- } +- err = kern_path(con->sbi->local_dst, 0, &root_path); +- if (err) { +- hmdfs_err("kern_path failed err = %d", err); +- goto err_free_resp; +- } +- +- err = vfs_path_lookup(root_path.dentry, root_path.mnt, recv->buf, +- lookup_flags, &dst_path); +- if (err) +- goto out_put_root; +- +- inode = hmdfs_verify_path(dst_path.dentry, recv->buf, con->sbi->sb); +- if (!inode) { +- err = -ENOENT; +- goto out_put_dst; +- } +- +- if (S_ISLNK(inode->i_mode)) { +- err = hmdfs_lookup_linkpath(con->sbi, recv->buf, &dst_path); +- if(err && err != -ENOENT) +- goto out_put_dst; +- } +- +- err = vfs_getattr(&dst_path, &ks, STATX_BASIC_STATS | STATX_BTIME, 0); +- if (err) +- goto err_put_dst; +- update_getattr_response(con, inode, &ks, resp); +- +-out_put_dst: +- path_put(&dst_path); +-out_put_root: +- /* +- * if path lookup failed, we return with result_mask setting to +- * zero. So we can be aware of such situation in caller. +- */ +- if (err) +- resp->result_mask = cpu_to_le32(0); +- path_put(&root_path); +- hmdfs_sendmessage_response(con, cmd, size_read, resp, err); +- kfree(resp); +- return; +- +-err_put_dst: +- path_put(&dst_path); +- path_put(&root_path); +-err_free_resp: +- kfree(resp); +-err: +- hmdfs_send_err_response(con, cmd, err); +-} +- +-static void init_statfs_response(struct statfs_response *resp, +- struct kstatfs *st) +-{ +- resp->f_type = cpu_to_le64(HMDFS_SUPER_MAGIC); +- resp->f_bsize = cpu_to_le64(st->f_bsize); +- resp->f_blocks = cpu_to_le64(st->f_blocks); +- resp->f_bfree = cpu_to_le64(st->f_bfree); +- resp->f_bavail = cpu_to_le64(st->f_bavail); +- resp->f_files = cpu_to_le64(st->f_files); +- resp->f_ffree = cpu_to_le64(st->f_ffree); +- resp->f_fsid_0 = cpu_to_le32(st->f_fsid.val[0]); +- resp->f_fsid_1 = cpu_to_le32(st->f_fsid.val[1]); +- resp->f_namelen = cpu_to_le64(st->f_namelen); +- resp->f_frsize = cpu_to_le64(st->f_frsize); +- resp->f_flags = cpu_to_le64(st->f_flags); +- /* f_spare is not used in f2fs or ext4 */ +- resp->f_spare_0 = cpu_to_le64(st->f_spare[0]); +- resp->f_spare_1 = cpu_to_le64(st->f_spare[1]); +- resp->f_spare_2 = cpu_to_le64(st->f_spare[2]); +- resp->f_spare_3 = cpu_to_le64(st->f_spare[3]); +-} +- +-void hmdfs_server_statfs(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- struct statfs_request *recv = data; +- struct statfs_response *resp = NULL; +- struct path root_path, path; +- struct kstatfs *st = NULL; +- int err = 0; +- +- if (path_contain_dotdot(recv->path, recv->path_len)) { +- err = -EINVAL; +- goto out; +- } +- +- st = kzalloc(sizeof(*st), GFP_KERNEL); +- if (!st) { +- err = -ENOMEM; +- goto out; +- } +- +- resp = kmalloc(sizeof(*resp), GFP_KERNEL); +- if (!resp) { +- err = -ENOMEM; +- goto free_st; +- } +- +- err = kern_path(con->sbi->local_src, 0, &root_path); +- if (err) { +- hmdfs_info("kern_path failed err = %d", err); +- goto free_st; +- } +- +- err = vfs_path_lookup(root_path.dentry, root_path.mnt, recv->path, 0, +- &path); +- if (err) { +- hmdfs_info("recv->path found failed err = %d", err); +- goto put_root; +- } +- +- err = vfs_statfs(&path, st); +- if (err) +- hmdfs_info("statfs local dentry failed, err = %d", err); +- init_statfs_response(resp, st); +- path_put(&path); +- +-put_root: +- path_put(&root_path); +-free_st: +- kfree(st); +-out: +- if (err) +- hmdfs_send_err_response(con, cmd, err); +- else +- hmdfs_sendmessage_response(con, cmd, sizeof(*resp), resp, 0); +- +- kfree(resp); +-} +- +-void hmdfs_server_syncfs(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data) +-{ +- /* +- * Reserved interface. There is a difference compared with traditional +- * syncfs process. Remote syncfs process in client: +- * 1. Remote writepages by async call +- * 2. Remote syncfs calling +- * 3. Wait all remote async calls(writepages) return in step 1 +- */ +- int ret = 0; +- +- hmdfs_send_err_response(con, cmd, ret); +-} +- +-void hmdfs_server_getxattr(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, void *data) +-{ +- struct getxattr_request *recv = data; +- size_t size = le32_to_cpu(recv->size); +- size_t size_read = sizeof(struct getxattr_response) + size; +- struct getxattr_response *resp = NULL; +- struct path root_path; +- struct path path; +- char *file_path = recv->buf; +- char *name = recv->buf + recv->path_len + 1; +- int err = -ENOMEM; +- +- if (path_contain_dotdot(file_path, recv->path_len)) { +- err = -EINVAL; +- goto err; +- } +- if (path_contain_dotdot(name, recv->name_len)) { +- err = -EINVAL; +- goto err; +- } +- +- resp = kzalloc(size_read, GFP_KERNEL); +- if (!resp) { +- err = -ENOMEM; +- goto err; +- } +- +- err = kern_path(con->sbi->local_dst, LOOKUP_DIRECTORY, &root_path); +- if (err) { +- hmdfs_info("kern_path failed err = %d", err); +- goto err_free_resp; +- } +- +- err = vfs_path_lookup(root_path.dentry, root_path.mnt, +- file_path, 0, &path); +- if (err) { +- hmdfs_info("path found failed err = %d", err); +- goto err_put_root; +- } +- +- if (!size) +- err = vfs_getxattr(&nop_mnt_idmap, path.dentry, name, NULL, size); +- else +- err = vfs_getxattr(&nop_mnt_idmap, path.dentry, name, resp->value, size); +- if (err < 0) { +- hmdfs_info("getxattr failed err %d", err); +- goto err_put_path; +- } +- +- resp->size = cpu_to_le32(err); +- hmdfs_sendmessage_response(con, cmd, size_read, resp, 0); +- path_put(&path); +- path_put(&root_path); +- kfree(resp); +- return; +- +-err_put_path: +- path_put(&path); +-err_put_root: +- path_put(&root_path); +-err_free_resp: +- kfree(resp); +-err: +- hmdfs_send_err_response(con, cmd, err); +-} +- +-void hmdfs_server_setxattr(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, void *data) +-{ +- struct setxattr_request *recv = data; +- size_t size = le32_to_cpu(recv->size); +- int flags = le32_to_cpu(recv->flags); +- bool del = recv->del; +- struct path root_path; +- struct path path; +- const char *file_path = recv->buf; +- const char *name = recv->buf + recv->path_len + 1; +- const void *value = name + recv->name_len + 1; +- int err; +- +- if (path_contain_dotdot(file_path, recv->path_len)) { +- err = -EINVAL; +- goto err; +- } +- if (path_contain_dotdot(name, recv->name_len)) { +- err = -EINVAL; +- goto err; +- } +- +- err = kern_path(con->sbi->local_dst, LOOKUP_DIRECTORY, &root_path); +- if (err) { +- hmdfs_info("kern_path failed err = %d", err); +- goto err; +- } +- err = vfs_path_lookup(root_path.dentry, root_path.mnt, +- file_path, 0, &path); +- if (err) { +- hmdfs_info("path found failed err = %d", err); +- goto err_put_root; +- } +- +- if (del) { +- WARN_ON(flags != XATTR_REPLACE); +- err = vfs_removexattr(&nop_mnt_idmap, path.dentry, name); +- } else { +- err = vfs_setxattr(&nop_mnt_idmap, path.dentry, name, value, size, flags); +- } +- +- path_put(&path); +-err_put_root: +- path_put(&root_path); +-err: +- hmdfs_send_err_response(con, cmd, err); +-} +- +-void hmdfs_server_listxattr(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, void *data) +-{ +- struct listxattr_request *recv = data; +- size_t size = le32_to_cpu(recv->size); +- int size_read = sizeof(struct listxattr_response) + size; +- struct listxattr_response *resp = NULL; +- const char *file_path = recv->buf; +- struct path root_path; +- struct path path; +- int err = 0; +- +- if (path_contain_dotdot(file_path, recv->path_len)) { +- err = -EINVAL; +- goto err; +- } +- +- resp = kzalloc(size_read, GFP_KERNEL); +- if (!resp) { +- err = -ENOMEM; +- goto err; +- } +- +- err = kern_path(con->sbi->local_dst, LOOKUP_DIRECTORY, &root_path); +- if (err) { +- hmdfs_info("kern_path failed err = %d", err); +- goto err_free_resp; +- } +- err = vfs_path_lookup(root_path.dentry, root_path.mnt, +- file_path, 0, &path); +- if (err) { +- hmdfs_info("path found failed err = %d", err); +- goto err_put_root; +- } +- +- if (!size) +- err = vfs_listxattr(path.dentry, NULL, size); +- else +- err = vfs_listxattr(path.dentry, resp->list, size); +- if (err < 0) { +- hmdfs_info("listxattr failed err = %d", err); +- goto err_put_path; +- } +- +- resp->size = cpu_to_le32(err); +- hmdfs_sendmessage_response(con, cmd, size_read, resp, 0); +- path_put(&root_path); +- path_put(&path); +- kfree(resp); +- return; +- +-err_put_path: +- path_put(&path); +-err_put_root: +- path_put(&root_path); +-err_free_resp: +- kfree(resp); +-err: +- hmdfs_send_err_response(con, cmd, err); +-} +- +-void hmdfs_server_get_drop_push(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, void *data) +-{ +- struct drop_push_request *dp_recv = data; +- struct path root_path, path; +- int err; +- char *tmp_path = NULL; +- +- if (path_contain_dotdot(dp_recv->path, dp_recv->path_len)) { +- err = -EINVAL; +- goto quickack; +- } +- +- err = kern_path(con->sbi->real_dst, 0, &root_path); +- if (err) { +- hmdfs_err("kern_path failed err = %d", err); +- goto quickack; +- } +- tmp_path = kzalloc(PATH_MAX, GFP_KERNEL); +- if (!tmp_path) +- goto out; +- snprintf(tmp_path, PATH_MAX, "/" DEVICE_VIEW_ROOT "/%s%s", +- con->cid, dp_recv->path); +- +- err = vfs_path_lookup(root_path.dentry, root_path.mnt, tmp_path, 0, +- &path); +- if (err) { +- hmdfs_info("path found failed err = %d", err); +- goto free; +- } +- hmdfs_remove_cache_filp(con, path.dentry); +- +- path_put(&path); +-free: +- kfree(tmp_path); +-out: +- path_put(&root_path); +-quickack: +- set_conn_sock_quickack(con); +-} +diff --git a/fs/hmdfs/hmdfs_server.h b/fs/hmdfs/hmdfs_server.h +deleted file mode 100644 +index e832c7ff8..000000000 +--- a/fs/hmdfs/hmdfs_server.h ++++ /dev/null +@@ -1,79 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/hmdfs_server.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_SERVER_H +-#define HMDFS_SERVER_H +- +-#include "hmdfs.h" +-#include "comm/transport.h" +-#include "comm/socket_adapter.h" +- +-#define DATA_SEC_LEVEL0 0 +-#define DATA_SEC_LEVEL1 1 +-#define DATA_SEC_LEVEL2 2 +-#define DATA_SEC_LEVEL3 3 +-#define DATA_SEC_LEVEL4 4 +-#define DATA_SEC_LEVEL_LABEL "user.security" +-#define DATA_SEC_LEVEL_LENGTH 10 +- +-static inline void hmdfs_send_err_response(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, int err) +-{ +- if (hmdfs_sendmessage_response(con, cmd, 0, NULL, (__u32)err)) +- hmdfs_warning("send err failed"); +-} +- +-void hmdfs_server_open(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +-void hmdfs_server_atomic_open(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, void *data); +-void hmdfs_server_fsync(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +-void hmdfs_server_release(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +-void hmdfs_server_readpage(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +-void hmdfs_server_writepage(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +- +-void hmdfs_server_readdir(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +- +-void hmdfs_server_mkdir(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +- +-void hmdfs_server_create(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +- +-void hmdfs_server_rmdir(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +- +-void hmdfs_server_unlink(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +- +-void hmdfs_server_rename(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +- +-void hmdfs_server_setattr(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +-void hmdfs_server_getattr(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +-void hmdfs_server_statfs(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +-void hmdfs_server_syncfs(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +-void hmdfs_server_getxattr(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +-void hmdfs_server_setxattr(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +-void hmdfs_server_listxattr(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd, +- void *data); +-void hmdfs_server_get_drop_push(struct hmdfs_peer *con, +- struct hmdfs_head_cmd *cmd, void *data); +- +-void __init hmdfs_server_add_node_evt_cb(void); +-#endif +diff --git a/fs/hmdfs/hmdfs_share.c b/fs/hmdfs/hmdfs_share.c +deleted file mode 100644 +index 436d3324f..000000000 +--- a/fs/hmdfs/hmdfs_share.c ++++ /dev/null +@@ -1,349 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/inode_share.h +- * +- * Copyright (c) 2021-2022 Huawei Device Co., Ltd. +- */ +- +-#include "hmdfs_share.h" +- +-static inline bool hmdfs_is_dst_path(struct path *src, struct path *dst) +-{ +- return (src->dentry == dst->dentry) && (src->mnt == dst->mnt); +-} +- +-static inline bool is_dst_device(char *src_cid, char *dst_cid) +-{ +- return strncmp(src_cid, dst_cid, HMDFS_CID_SIZE) == 0; +-} +- +-bool hmdfs_is_share_file(struct file *file) +-{ +- struct file *cur_file = file; +- struct hmdfs_dentry_info *gdi; +- struct hmdfs_file_info *gfi; +- +- while (cur_file->f_inode->i_sb->s_magic == HMDFS_SUPER_MAGIC) { +- gdi = hmdfs_d(cur_file->f_path.dentry); +- gfi = hmdfs_f(cur_file); +- if (hm_isshare(gdi->file_type)) +- return true; +- if (gfi->lower_file) +- cur_file = gfi->lower_file; +- else +- break; +- } +- +- return false; +-} +- +-static void remove_and_release_share_item(struct hmdfs_share_item *item) +-{ +- list_del(&item->list); +- item->hst->item_cnt--; +- fput(item->file); +- kfree(item->relative_path.name); +- kfree(item); +-} +- +-static inline bool is_share_item_timeout(struct hmdfs_share_item *item) +-{ +- return !item->opened && item->timeout; +-} +- +-struct hmdfs_share_item *hmdfs_lookup_share_item(struct hmdfs_share_table *st, +- struct qstr *cur_relative_path) +-{ +- struct hmdfs_share_item *item, *tmp; +- +- list_for_each_entry_safe(item, tmp, &st->item_list_head, list) { +- if (is_share_item_timeout(item)){ +- remove_and_release_share_item(item); +- } else { +- if (qstr_eq(&item->relative_path, cur_relative_path)) +- return item; +- } +- } +- +- return NULL; +-} +- +-static void share_item_timeout_work(struct work_struct *work) { +- struct hmdfs_share_item *item = +- container_of(work, struct hmdfs_share_item, d_work.work); +- +- item->timeout = true; +-} +- +-int insert_share_item(struct hmdfs_share_table *st, struct qstr *relative_path, +- struct file *file, char *cid) +-{ +- struct hmdfs_share_item *new_item = NULL; +- char *path_name; +- int err = 0; +- +- if (st->item_cnt >= st->max_cnt) { +- int ret = hmdfs_clear_first_item(st); +- if (unlikely(ret)) { +- err = -EMFILE; +- goto err_out; +- } +- } +- +- path_name = kzalloc(PATH_MAX, GFP_KERNEL); +- if (unlikely(!path_name)) { +- err = -EMFILE; +- goto err_out; +- } +- strcpy(path_name, relative_path->name); +- +- new_item = kmalloc(sizeof(*new_item), GFP_KERNEL); +- if (unlikely(!new_item)) { +- err = -ENOMEM; +- kfree(path_name); +- goto err_out; +- } +- +- new_item->file = file; +- get_file(file); +- new_item->relative_path.name = path_name; +- new_item->relative_path.len = relative_path->len; +- memcpy(new_item->cid, cid, HMDFS_CID_SIZE); +- new_item->opened = false; +- new_item->timeout = false; +- list_add_tail(&new_item->list, &st->item_list_head); +- new_item->hst = st; +- +- INIT_DELAYED_WORK(&new_item->d_work, share_item_timeout_work); +- queue_delayed_work(new_item->hst->share_item_timeout_wq, +- &new_item->d_work, HZ * HMDFS_SHARE_ITEM_TIMEOUT_S); +- +- st->item_cnt++; +- +-err_out: +- return err; +-} +- +-void update_share_item(struct hmdfs_share_item *item, struct file *file, +- char *cid) +-{ +- /* if not the same file, we need to update struct file */ +- if (!hmdfs_is_dst_path(&file->f_path, &item->file->f_path)) { +- fput(item->file); +- get_file(file); +- item->file = file; +- } +- memcpy(item->cid, cid, HMDFS_CID_SIZE); +- +- if (!cancel_delayed_work_sync(&item->d_work)) +- item->timeout = false; +- +- queue_delayed_work(item->hst->share_item_timeout_wq, &item->d_work, +- HZ * HMDFS_SHARE_ITEM_TIMEOUT_S); +-} +- +-bool in_share_dir(struct dentry *child_dentry) +-{ +- struct dentry *parent_dentry = dget_parent(child_dentry); +- bool ret = false; +- +- if (!strncmp(parent_dentry->d_name.name, SHARE_RESERVED_DIR, +- strlen(SHARE_RESERVED_DIR))) +- ret = true; +- +- dput(parent_dentry); +- return ret; +-} +- +-inline bool is_share_dir(struct inode *inode, const char *name) +-{ +- return (S_ISDIR(inode->i_mode) && +- !strncmp(name, SHARE_RESERVED_DIR, sizeof(SHARE_RESERVED_DIR))); +-} +- +-int get_path_from_share_table(struct hmdfs_sb_info *sbi, +- struct dentry *cur_dentry, +- struct path *src_path) +-{ +- struct hmdfs_share_item *item; +- const char *path_name; +- struct qstr relative_path; +- int err = 0; +- +- path_name = hmdfs_get_dentry_relative_path(cur_dentry); +- if (unlikely(!path_name)) { +- err = -ENOMEM; +- goto err_out; +- } +- relative_path.name = path_name; +- relative_path.len = strlen(path_name); +- +- spin_lock(&sbi->share_table.item_list_lock); +- item = hmdfs_lookup_share_item(&sbi->share_table, &relative_path); +- if (!item) { +- err = -ENOENT; +- goto unlock; +- } +- path_get(&item->file->f_path); +- *src_path = item->file->f_path; +-unlock: +- spin_unlock(&sbi->share_table.item_list_lock); +- kfree(path_name); +-err_out: +- return err; +-} +- +-void hmdfs_clear_share_item_offline(struct hmdfs_peer *conn) +-{ +- struct hmdfs_sb_info *sbi = conn->sbi; +- struct hmdfs_share_item *item, *tmp; +- +- spin_lock(&sbi->share_table.item_list_lock); +- list_for_each_entry_safe(item, tmp, &sbi->share_table.item_list_head, +- list) { +- if (is_dst_device(item->cid, conn->cid)) { +- /* release the item that was not closed properly */ +- if (item->opened) +- remove_and_release_share_item(item); +- } +- } +- spin_unlock(&sbi->share_table.item_list_lock); +-} +- +-void reset_item_opened_status(struct hmdfs_sb_info *sbi, const char *filename) +-{ +- struct qstr candidate = QSTR_INIT(filename, strlen(filename)); +- struct hmdfs_share_item *item = NULL; +- +- spin_lock(&sbi->share_table.item_list_lock); +- item = hmdfs_lookup_share_item(&sbi->share_table, &candidate); +- if (item) { +- item->opened = false; +- queue_delayed_work(item->hst->share_item_timeout_wq, +- &item->d_work, HZ * HMDFS_SHARE_ITEM_TIMEOUT_S); +- } +- spin_unlock(&sbi->share_table.item_list_lock); +-} +- +-void hmdfs_close_share_item(struct hmdfs_sb_info *sbi, struct file *file, +- char *cid) +-{ +- struct qstr relativepath; +- const char *path_name; +- struct hmdfs_share_item *item = NULL; +- +- path_name = hmdfs_get_dentry_relative_path(file->f_path.dentry); +- if (unlikely(!path_name)) { +- hmdfs_err("get dentry relative path error"); +- return; +- } +- +- relativepath.name = path_name; +- relativepath.len = strlen(path_name); +- +- spin_lock(&sbi->share_table.item_list_lock); +- item = hmdfs_lookup_share_item(&sbi->share_table, &relativepath); +- if (unlikely(!item)) { +- hmdfs_err("cannot get share item %s", relativepath.name); +- goto unlock; +- } +- +- /* +- * If the item is shared to all device, we should close the item directly. +- */ +- if (!strcmp(item->cid, SHARE_ALL_DEVICE)) { +- goto close; +- } +- +- if (unlikely(!is_dst_device(item->cid, cid))) { +- hmdfs_err("item not right, dst cid is: %s", item->cid); +- goto unlock; +- } +- +- /* +- * After remote close, we should reset the opened status and restart +- * delayed timeout work. +- */ +-close: +- item->opened = false; +- queue_delayed_work(item->hst->share_item_timeout_wq, &item->d_work, +- HZ * HMDFS_SHARE_ITEM_TIMEOUT_S); +- +-unlock: +- spin_unlock(&sbi->share_table.item_list_lock); +- kfree(path_name); +-} +- +-int hmdfs_check_share_access_permission(struct hmdfs_sb_info *sbi, +- const char *filename, +- char *cid) +-{ +- struct qstr candidate = QSTR_INIT(filename, strlen(filename)); +- struct hmdfs_share_item *item = NULL; +- int ret = -ENOENT; +- +- spin_lock(&sbi->share_table.item_list_lock); +- item = hmdfs_lookup_share_item(&sbi->share_table, &candidate); +- /* +- * When cid matches, we set the item status opened and canel +- * its delayed work to ensure that the open process can get +- * the correct path +- */ +- if (item && (is_dst_device(item->cid, cid) || !strcmp(item->cid, SHARE_ALL_DEVICE))) { +- item->opened = true; +- if (!cancel_delayed_work_sync(&item->d_work)) { +- item->timeout = false; +- } +- ret = 0; +- } +- spin_unlock(&sbi->share_table.item_list_lock); +- +- return ret; +-} +- +- +-int hmdfs_init_share_table(struct hmdfs_sb_info *sbi) +-{ +- spin_lock_init(&sbi->share_table.item_list_lock); +- INIT_LIST_HEAD(&sbi->share_table.item_list_head); +- sbi->share_table.item_cnt = 0; +- sbi->share_table.max_cnt = HMDFS_SHARE_ITEMS_MAX; +- sbi->share_table.share_item_timeout_wq = +- create_singlethread_workqueue("share_item_timeout_wq"); +- +- if (!sbi->share_table.share_item_timeout_wq) +- return -ENOMEM; +- return 0; +-} +- +-void hmdfs_clear_share_table(struct hmdfs_sb_info *sbi) +-{ +- struct hmdfs_share_table *st = &sbi->share_table; +- struct hmdfs_share_item *item, *tmp; +- +- spin_lock(&sbi->share_table.item_list_lock); +- list_for_each_entry_safe(item, tmp, &sbi->share_table.item_list_head, +- list) { +- flush_delayed_work(&item->d_work); +- remove_and_release_share_item(item); +- } +- spin_unlock(&sbi->share_table.item_list_lock); +- +- if (st->share_item_timeout_wq != NULL) +- destroy_workqueue(st->share_item_timeout_wq); +-} +- +-int hmdfs_clear_first_item(struct hmdfs_share_table *st) +-{ +- int ret = -EMFILE; +- struct hmdfs_share_item *item, *tmp; +- list_for_each_entry_safe(item, tmp, &st->item_list_head, list) { +- if (!item->timeout) { +- cancel_delayed_work_sync(&item->d_work); +- } +- remove_and_release_share_item(item); +- ret = 0; +- break; +- } +- return ret; +-} +diff --git a/fs/hmdfs/hmdfs_share.h b/fs/hmdfs/hmdfs_share.h +deleted file mode 100644 +index 2db8c3a4c..000000000 +--- a/fs/hmdfs/hmdfs_share.h ++++ /dev/null +@@ -1,63 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/hmdfs_share.h +- * +- * Copyright (c) 2021-2022 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_SHARE_H +-#define HMDFS_SHARE_H +- +-#include +-#include +-#include +- +-#include "hmdfs_device_view.h" +-#include "comm/connection.h" +- +-#define HMDFS_SHARE_ITEM_TIMEOUT_S 120 +-#define HMDFS_SHARE_ITEMS_MAX 128 +- +-#define SHARE_RESERVED_DIR ".share" +-#define SHARE_ALL_DEVICE "0" +- +-struct hmdfs_share_control { +- __u32 src_fd; +- char cid[HMDFS_CID_SIZE]; +-}; +- +-struct hmdfs_share_item { +- struct file *file; +- struct qstr relative_path; +- char cid[HMDFS_CID_SIZE]; +- bool opened; +- bool timeout; +- struct list_head list; +- struct delayed_work d_work; +- struct hmdfs_share_table *hst; +-}; +- +-bool hmdfs_is_share_file(struct file *file); +-struct hmdfs_share_item *hmdfs_lookup_share_item(struct hmdfs_share_table *st, +- struct qstr *cur_relative_path); +-int insert_share_item(struct hmdfs_share_table *st, struct qstr *relative_path, +- struct file *file, char *cid); +-void update_share_item(struct hmdfs_share_item *item, struct file *file, +- char *cid); +-bool in_share_dir(struct dentry *child_dentry); +-inline bool is_share_dir(struct inode *inode, const char *name); +-int get_path_from_share_table(struct hmdfs_sb_info *sbi, +- struct dentry *cur_dentry, struct path *src_path); +- +-void hmdfs_clear_share_item_offline(struct hmdfs_peer *conn); +-void reset_item_opened_status(struct hmdfs_sb_info *sbi, const char *filename); +-void hmdfs_close_share_item(struct hmdfs_sb_info *sbi, struct file *file, +- char *cid); +-int hmdfs_check_share_access_permission(struct hmdfs_sb_info *sbi, +- const char *filename, char *cid); +- +-int hmdfs_init_share_table(struct hmdfs_sb_info *sbi); +-void hmdfs_clear_share_table(struct hmdfs_sb_info *sbi); +-int hmdfs_clear_first_item(struct hmdfs_share_table *st); +- +-#endif // HMDFS_SHARE_H +diff --git a/fs/hmdfs/hmdfs_trace.h b/fs/hmdfs/hmdfs_trace.h +deleted file mode 100644 +index 0660d0640..000000000 +--- a/fs/hmdfs/hmdfs_trace.h ++++ /dev/null +@@ -1,954 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/hmdfs_trace.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#undef TRACE_SYSTEM +-#define TRACE_SYSTEM hmdfs +- +-#if !defined(__HMDFS_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ) +- +-#define __HMDFS_TRACE_H__ +- +-#include +-#include "comm/protocol.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_client.h" +-#include "hmdfs_device_view.h" +-#include "hmdfs_merge_view.h" +-#include "client_writeback.h" +- +-TRACE_EVENT(hmdfs_permission, +- +- TP_PROTO(unsigned long ino), +- +- TP_ARGS(ino), +- +- TP_STRUCT__entry(__field(unsigned long, ino)), +- +- TP_fast_assign(__entry->ino = ino;), +- +- TP_printk("permission check for ino %lu failed", __entry->ino)); +- +-/* communication */ +-TRACE_EVENT(hmdfs_recv_mesg_callback, +- +- TP_PROTO(struct hmdfs_head_cmd *cmd), +- +- TP_ARGS(cmd), +- +- TP_STRUCT__entry( +- __field(__u32, msg_id) +- __field(__u32, magic) +- __field(__u16, command) +- __field(__u16, cmd_flag) +- __field(__u32, data_len) +- __field(__u32, ret_code) +- ), +- +- TP_fast_assign( +- __entry->msg_id = le32_to_cpu(cmd->msg_id); +- __entry->magic = cmd->magic; +- __entry->command = cmd->operations.command; +- __entry->cmd_flag = cmd->operations.cmd_flag; +- __entry->data_len = cmd->data_len; +- __entry->ret_code = cmd->ret_code; +- ), +- +- TP_printk("msg_id:%u magic:%u command:%hu, cmd_flag:%hu, data_len:%u, ret_code:%u", +- __entry->msg_id, __entry->magic, __entry->command, +- __entry->cmd_flag, __entry->data_len, __entry->ret_code) +-); +- +-TRACE_EVENT(hmdfs_tcp_send_message, +- +- TP_PROTO(struct hmdfs_head_cmd *cmd), +- +- TP_ARGS(cmd), +- +- TP_STRUCT__entry( +- __field(__u32, msg_id) +- __field(__u32, magic) +- __field(__u16, command) +- __field(__u16, cmd_flag) +- __field(__u32, data_len) +- __field(__u32, ret_code) +- ), +- +- TP_fast_assign( +- __entry->msg_id = le32_to_cpu(cmd->msg_id); +- __entry->magic = cmd->magic; +- __entry->command = cmd->operations.command; +- __entry->cmd_flag = cmd->operations.cmd_flag; +- __entry->data_len = cmd->data_len; +- __entry->ret_code = cmd->ret_code; +- ), +- +- TP_printk("msg_id:%u magic:%u command:%hu, cmd_flag:%hu, data_len:%u, ret_code:%u", +- __entry->msg_id, __entry->magic, __entry->command, +- __entry->cmd_flag, __entry->data_len, __entry->ret_code) +-); +- +-/* file system interface */ +-DECLARE_EVENT_CLASS(hmdfs_iterate_op_end, +- +- TP_PROTO(struct dentry *__d, loff_t start_pos, loff_t end_pos, int err), +- +- TP_ARGS(__d, start_pos, end_pos, err), +- +- TP_STRUCT__entry( +- __string(name_str, __d->d_name.name) +- __field(loff_t, start) +- __field(loff_t, end) +- __field(int, err) +- ), +- +- TP_fast_assign( +- __assign_str(name_str, __d->d_name.name); +- __entry->start = start_pos; +- __entry->end = end_pos; +- __entry->err = err; +- ), +- +- TP_printk("dentry[%s] start_pos:%llx, end_pos:%llx, err:%d", +- __get_str(name_str), __entry->start, +- __entry->end, __entry->err) +-); +- +-#define define_hmdfs_iterate_op_end_event(event_name) \ +- DEFINE_EVENT(hmdfs_iterate_op_end, event_name, \ +- TP_PROTO(struct dentry *__d, loff_t start_pos, \ +- loff_t end_pos, int err), \ +- TP_ARGS(__d, start_pos, end_pos, err)) +- +-define_hmdfs_iterate_op_end_event(hmdfs_iterate_local); +-define_hmdfs_iterate_op_end_event(hmdfs_iterate_remote); +-define_hmdfs_iterate_op_end_event(hmdfs_iterate_merge); +- +- +-TRACE_EVENT(hmdfs_lookup, +- +- TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), +- +- TP_ARGS(dir, dentry, flags), +- +- TP_STRUCT__entry( +- __field(ino_t, ino) +- __string(name_str, dentry->d_name.name) +- __field(unsigned int, flags) +- ), +- +- TP_fast_assign( +- __entry->ino = dir->i_ino; +- __assign_str(name_str, dentry->d_name.name); +- __entry->flags = flags; +- ), +- +- TP_printk("parent_ino = %lu, name:%s, flags:%u", +- __entry->ino, __get_str(name_str), __entry->flags) +-); +- +-DECLARE_EVENT_CLASS(hmdfs_lookup_op_end, +- +- TP_PROTO(struct inode *dir, struct dentry *dentry, int err), +- +- TP_ARGS(dir, dentry, err), +- +- TP_STRUCT__entry( +- __field(ino_t, ino) +- __string(name_str, dentry->d_name.name) +- __field(int, err) +- ), +- +- TP_fast_assign( +- __entry->ino = dir->i_ino; +- __assign_str(name_str, dentry->d_name.name); +- __entry->err = err; +- ), +- +- TP_printk("parent_ino = %lu, name:%s, err:%d", +- __entry->ino, __get_str(name_str), __entry->err) +-); +- +-#define define_hmdfs_lookup_op_end_event(event_name) \ +- DEFINE_EVENT(hmdfs_lookup_op_end, event_name, \ +- TP_PROTO(struct inode *dir, struct dentry *dentry, \ +- int err), \ +- TP_ARGS(dir, dentry, err)) +- +- +-define_hmdfs_lookup_op_end_event(hmdfs_root_lookup); +-define_hmdfs_lookup_op_end_event(hmdfs_root_lookup_end); +- +-define_hmdfs_lookup_op_end_event(hmdfs_device_lookup); +-define_hmdfs_lookup_op_end_event(hmdfs_device_lookup_end); +- +-define_hmdfs_lookup_op_end_event(hmdfs_lookup_local); +-define_hmdfs_lookup_op_end_event(hmdfs_lookup_local_end); +-define_hmdfs_lookup_op_end_event(hmdfs_mkdir_local); +-define_hmdfs_lookup_op_end_event(hmdfs_rmdir_local); +-define_hmdfs_lookup_op_end_event(hmdfs_create_local); +- +-define_hmdfs_lookup_op_end_event(hmdfs_lookup_remote); +-define_hmdfs_lookup_op_end_event(hmdfs_lookup_remote_end); +-define_hmdfs_lookup_op_end_event(hmdfs_mkdir_remote); +-define_hmdfs_lookup_op_end_event(hmdfs_rmdir_remote); +-define_hmdfs_lookup_op_end_event(hmdfs_create_remote); +- +-define_hmdfs_lookup_op_end_event(hmdfs_lookup_merge); +-define_hmdfs_lookup_op_end_event(hmdfs_lookup_merge_end); +-define_hmdfs_lookup_op_end_event(hmdfs_mkdir_merge); +-define_hmdfs_lookup_op_end_event(hmdfs_rmdir_merge); +-define_hmdfs_lookup_op_end_event(hmdfs_create_merge); +- +-define_hmdfs_lookup_op_end_event(hmdfs_get_link_local); +-define_hmdfs_lookup_op_end_event(hmdfs_lookup_share); +-define_hmdfs_lookup_op_end_event(hmdfs_lookup_share_end); +- +-TRACE_EVENT(hmdfs_show_comrade, +- +- TP_PROTO(struct dentry *d, struct dentry *lo_d, uint64_t devid), +- +- TP_ARGS(d, lo_d, devid), +- +- TP_STRUCT__entry( +- __string(name, d->d_name.name) +- __string(lo_name, lo_d->d_name.name) +- __field(uint64_t, devid) +- ), +- +- TP_fast_assign( +- __assign_str(name, d->d_name.name) +- __assign_str(lo_name, lo_d->d_name.name) +- __entry->devid = devid; +- ), +- +- TP_printk("parent_name:%s -> lo_d_name:%s, lo_d_devid:%llu", +- __get_str(name), __get_str(lo_name), __entry->devid) +-); +- +-DECLARE_EVENT_CLASS(hmdfs_rename_op_end, +- +- TP_PROTO(struct inode *olddir, struct dentry *olddentry, +- struct inode *newdir, struct dentry *newdentry, +- unsigned int flags), +- +- TP_ARGS(olddir, olddentry, newdir, newdentry, flags), +- +- TP_STRUCT__entry( +- __field(ino_t, oldino) +- __string(oldname_str, olddentry->d_name.name) +- __field(ino_t, newino) +- __string(newname_str, newdentry->d_name.name) +- __field(unsigned int, flags) +- ), +- +- TP_fast_assign( +- __entry->oldino = olddir->i_ino; +- __assign_str(oldname_str, olddentry->d_name.name); +- __entry->newino = newdir->i_ino; +- __assign_str(newname_str, newdentry->d_name.name); +- __entry->flags = flags; +- ), +- +- TP_printk("old_pino = %lu, oldname:%s; new_pino = %lu, newname:%s, flags:%u", +- __entry->oldino, __get_str(oldname_str), +- __entry->newino, __get_str(newname_str), __entry->flags) +-); +- +-#define define_hmdfs_rename_op_end_event(event_name) \ +- DEFINE_EVENT(hmdfs_rename_op_end, event_name, \ +- TP_PROTO(struct inode *olddir, struct dentry *olddentry, \ +- struct inode *newdir, struct dentry *newdentry, \ +- unsigned int flags), \ +- TP_ARGS(olddir, olddentry, newdir, newdentry, flags)) +- +-define_hmdfs_rename_op_end_event(hmdfs_rename_local); +-define_hmdfs_rename_op_end_event(hmdfs_rename_remote); +-define_hmdfs_rename_op_end_event(hmdfs_rename_merge); +- +-TRACE_EVENT(hmdfs_statfs, +- +- TP_PROTO(struct dentry *d, uint8_t type), +- +- TP_ARGS(d, type), +- +- TP_STRUCT__entry( +- __string(name, d->d_name.name) +- __field(uint8_t, type) +- ), +- +- TP_fast_assign( +- __assign_str(name, d->d_name.name) +- __entry->type = type; +- ), +- +- TP_printk("dentry_name:%s, lo_d_devid:%u", +- __get_str(name), __entry->type) +-); +- +- +- +-TRACE_EVENT(hmdfs_balance_dirty_pages_ratelimited, +- +- TP_PROTO(struct hmdfs_sb_info *sbi, +- struct hmdfs_writeback *hwb, +- int bdp_ratelimits), +- +- TP_ARGS(sbi, hwb, bdp_ratelimits), +- +- TP_STRUCT__entry( +- __array(char, dst, 128) +- __field(int, nr_dirtied) +- __field(int, nr_dirtied_pause) +- __field(int, dirty_exceeded) +- __field(long long, bdp_ratelimits) +- __field(long, ratelimit_pages) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->dst, sbi->local_dst, 128); +- +- __entry->nr_dirtied = current->nr_dirtied; +- __entry->nr_dirtied_pause = current->nr_dirtied_pause; +- __entry->dirty_exceeded = hwb->dirty_exceeded; +- __entry->bdp_ratelimits = bdp_ratelimits; +- __entry->ratelimit_pages = hwb->ratelimit_pages; +- ), +- +- TP_printk("hmdfs dst:%s nr_dirtied=%d nr_dirtied_pause=%d dirty_exceeded=%d bdp_ratelimits=%lld ratelimit_pages=%ld", +- __entry->dst, __entry->nr_dirtied, __entry->nr_dirtied_pause, +- __entry->dirty_exceeded, __entry->bdp_ratelimits, +- __entry->ratelimit_pages) +-); +- +-TRACE_EVENT(hmdfs_balance_dirty_pages, +- +- TP_PROTO(struct hmdfs_sb_info *sbi, +- struct bdi_writeback *wb, +- struct hmdfs_dirty_throttle_control *hdtc, +- unsigned long pause, +- unsigned long start_time), +- +- TP_ARGS(sbi, wb, hdtc, pause, start_time), +- +- TP_STRUCT__entry( +- __array(char, dst, 128) +- __field(unsigned long, write_bw) +- __field(unsigned long, avg_write_bw) +- __field(unsigned long, file_bg_thresh) +- __field(unsigned long, fs_bg_thresh) +- __field(unsigned long, file_thresh) +- __field(unsigned long, fs_thresh) +- __field(unsigned long, file_nr_dirty) +- __field(unsigned long, fs_nr_dirty) +- __field(unsigned long, file_nr_rec) +- __field(unsigned long, fs_nr_rec) +- __field(unsigned long, pause) +- __field(unsigned long, paused) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->dst, sbi->local_dst, 128); +- +- __entry->write_bw = wb->write_bandwidth; +- __entry->avg_write_bw = wb->avg_write_bandwidth; +- __entry->file_bg_thresh = hdtc->file_bg_thresh; +- __entry->fs_bg_thresh = hdtc->fs_bg_thresh; +- __entry->file_thresh = hdtc->file_thresh; +- __entry->fs_thresh = hdtc->fs_thresh; +- __entry->file_nr_dirty = hdtc->file_nr_dirty; +- __entry->fs_nr_dirty = hdtc->fs_nr_dirty; +- __entry->file_nr_rec = hdtc->file_nr_reclaimable; +- __entry->fs_nr_rec = hdtc->fs_nr_reclaimable; +- __entry->pause = pause * 1000 / HZ; +- __entry->paused = (jiffies - start_time) * +- 1000 / HZ; +- ), +- +- TP_printk("hmdfs dst:%s write_bw=%lu, awrite_bw=%lu, bg_thresh=%lu,%lu thresh=%lu,%lu dirty=%lu,%lu reclaimable=%lu,%lu pause=%lu paused=%lu", +- __entry->dst, __entry->write_bw, __entry->avg_write_bw, +- __entry->file_bg_thresh, __entry->fs_bg_thresh, +- __entry->file_thresh, __entry->fs_thresh, +- __entry->file_nr_dirty, __entry->fs_nr_dirty, +- __entry->file_nr_rec, __entry->fs_nr_rec, +- __entry->pause, __entry->paused +- ) +-); +- +-TRACE_EVENT(hmdfs_start_srv_wb, +- +- TP_PROTO(struct hmdfs_sb_info *sbi, int dirty_pages, +- unsigned int dirty_thresh_pg), +- +- TP_ARGS(sbi, dirty_pages, dirty_thresh_pg), +- +- TP_STRUCT__entry( +- __array(char, src, 128) +- __field(int, dirty_pages) +- __field(unsigned int, dirty_thresh_pg) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->src, sbi->local_src, 128); +- __entry->dirty_pages = dirty_pages; +- __entry->dirty_thresh_pg = dirty_thresh_pg; +- ), +- +- TP_printk("hmdfs src: %s, start writeback dirty pages. writeback %d pages dirty_thresh is %d pages", +- __entry->src, __entry->dirty_pages, __entry->dirty_thresh_pg) +-); +- +-TRACE_EVENT(hmdfs_fsync_enter_remote, +- +- TP_PROTO(struct hmdfs_sb_info *sbi, unsigned long long device_id, +- unsigned long long remote_ino, int datasync), +- +- TP_ARGS(sbi, device_id, remote_ino, datasync), +- +- TP_STRUCT__entry( +- __array(char, src, 128) +- __field(uint64_t, device_id) +- __field(uint64_t, remote_ino) +- __field(int, datasync) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->src, sbi->local_src, 128); +- __entry->device_id = device_id; +- __entry->remote_ino = remote_ino; +- __entry->datasync = datasync; +- ), +- +- TP_printk("hmdfs: src %s, start remote fsync file(remote dev_id=%llu,ino=%llu), datasync=%d", +- __entry->src, __entry->device_id, +- __entry->remote_ino, __entry->datasync) +-); +- +-TRACE_EVENT(hmdfs_fsync_exit_remote, +- +- TP_PROTO(struct hmdfs_sb_info *sbi, unsigned long long device_id, +- unsigned long long remote_ino, unsigned int timeout, int err), +- +- TP_ARGS(sbi, device_id, remote_ino, timeout, err), +- +- TP_STRUCT__entry( +- __array(char, src, 128) +- __field(uint64_t, device_id) +- __field(uint64_t, remote_ino) +- __field(uint32_t, timeout) +- __field(int, err) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->src, sbi->local_src, 128); +- __entry->device_id = device_id; +- __entry->remote_ino = remote_ino; +- __entry->timeout = timeout; +- __entry->err = err; +- ), +- +- TP_printk("hmdfs: src %s, finish remote fsync file(remote dev_id=%llu,ino=%llu), timeout=%u, err=%d", +- __entry->src, __entry->device_id, __entry->remote_ino, +- __entry->timeout, __entry->err) +-); +- +-TRACE_EVENT(hmdfs_syncfs_enter, +- +- TP_PROTO(struct hmdfs_sb_info *sbi), +- +- TP_ARGS(sbi), +- +- TP_STRUCT__entry( +- __array(char, src, 128) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->src, sbi->local_src, 128); +- ), +- +- TP_printk("hmdfs: src %s, start syncfs", __entry->src) +-); +- +-TRACE_EVENT(hmdfs_syncfs_exit, +- +- TP_PROTO(struct hmdfs_sb_info *sbi, int remain_count, +- unsigned int timeout, int err), +- +- TP_ARGS(sbi, remain_count, timeout, err), +- +- TP_STRUCT__entry( +- __array(char, src, 128) +- __field(int, remain_count) +- __field(uint32_t, timeout) +- __field(int, err) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->src, sbi->local_src, 128); +- __entry->remain_count = remain_count; +- __entry->timeout = timeout; +- __entry->err = err; +- ), +- +- TP_printk("hmdfs: src %s, finish syncfs(timeout=%u), remain %d remote devices to response, err=%d", +- __entry->src, __entry->timeout, +- __entry->remain_count, __entry->err) +-); +- +-TRACE_EVENT(hmdfs_server_release, +- +- TP_PROTO(struct hmdfs_peer *con, uint32_t file_id, +- uint64_t file_ver, int err), +- +- TP_ARGS(con, file_id, file_ver, err), +- +- TP_STRUCT__entry( +- __array(char, src, 128) +- __field(uint32_t, file_id) +- __field(uint64_t, file_ver) +- __field(uint64_t, device_id) +- __field(int, err) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->src, con->sbi->local_src, 128); +- __entry->file_id = file_id; +- __entry->file_ver = file_ver; +- __entry->device_id = con->device_id; +- __entry->err = err; +- ), +- +- TP_printk("hmdfs: src %s, server release file, fid=%u, fid_ver=%llu, remote_dev=%llu, err=%d", +- __entry->src, __entry->file_id, __entry->file_ver, +- __entry->device_id, __entry->err) +-); +- +-TRACE_EVENT(hmdfs_readpages_cloud, +- +- TP_PROTO(unsigned int nr_pages, int err), +- +- TP_ARGS(nr_pages, err), +- +- TP_STRUCT__entry( +- __field(unsigned int, nr_pages) +- __field(int, err) +- ), +- +- TP_fast_assign( +- __entry->nr_pages = nr_pages; +- __entry->err = err; +- ), +- +- TP_printk("nr_pages:%u, lo_d_devid:%d", +- __entry->nr_pages, __entry->err) +-); +- +-TRACE_EVENT(hmdfs_do_readpages_cloud_begin, +- +- TP_PROTO(int cnt, loff_t pos), +- +- TP_ARGS(cnt, pos), +- +- TP_STRUCT__entry( +- __field(int, cnt) +- __field(loff_t, pos) +- ), +- +- TP_fast_assign( +- __entry->cnt = cnt; +- __entry->pos = pos; +- ), +- +- TP_printk("cnt:%d, pos:%llx", +- __entry->cnt, __entry->pos) +-); +- +-TRACE_EVENT(hmdfs_do_readpages_cloud_end, +- +- TP_PROTO(int cnt, loff_t pos, int ret), +- +- TP_ARGS(cnt, pos, ret), +- +- TP_STRUCT__entry( +- __field(int, cnt) +- __field(loff_t, pos) +- __field(int, ret) +- ), +- +- TP_fast_assign( +- __entry->cnt = cnt; +- __entry->pos = pos; +- __entry->ret = ret; +- ), +- +- TP_printk("cnt:%d, pos:%llx", +- __entry->cnt, __entry->pos, __entry->ret) +-); +- +-TRACE_EVENT(hmdfs_client_recv_readpage, +- +- TP_PROTO(struct hmdfs_peer *con, unsigned long long remote_ino, +- unsigned long page_index, int err), +- +- TP_ARGS(con, remote_ino, page_index, err), +- +- TP_STRUCT__entry( +- __array(char, src, 128) +- __field(uint64_t, remote_ino) +- __field(unsigned long, page_index) +- __field(uint64_t, device_id) +- __field(int, err) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->src, con->sbi->local_src, 128); +- __entry->remote_ino = remote_ino; +- __entry->page_index = page_index; +- __entry->device_id = con->device_id; +- __entry->err = err; +- ), +- +- TP_printk("hmdfs: src %s, client readpage callback from remote device %llu, remote_ino=%llu, page_idx=%lu, err=%d", +- __entry->src, __entry->device_id, +- __entry->remote_ino, __entry->page_index, __entry->err) +-); +- +-TRACE_EVENT(hmdfs_writepage_cb_enter, +- +- TP_PROTO(struct hmdfs_peer *con, unsigned long long remote_ino, +- unsigned long page_index, int err), +- +- TP_ARGS(con, remote_ino, page_index, err), +- +- TP_STRUCT__entry( +- __array(char, src, 128) +- __field(uint64_t, remote_ino) +- __field(unsigned long, page_index) +- __field(uint64_t, device_id) +- __field(int, err) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->src, con->sbi->local_src, 128); +- __entry->remote_ino = remote_ino; +- __entry->page_index = page_index; +- __entry->device_id = con->device_id; +- __entry->err = err; +- ), +- +- TP_printk("hmdfs: src %s, writepage_cb start, return from remote device %llu, remote_ino=%llu, page_idx=%lu, err=%d", +- __entry->src, __entry->device_id, +- __entry->remote_ino, __entry->page_index, __entry->err) +-); +- +-TRACE_EVENT(hmdfs_writepage_cb_exit, +- +- TP_PROTO(struct hmdfs_peer *con, unsigned long long remote_ino, +- unsigned long page_index, int err), +- +- TP_ARGS(con, remote_ino, page_index, err), +- +- TP_STRUCT__entry( +- __array(char, src, 128) +- __field(uint64_t, remote_ino) +- __field(unsigned long, page_index) +- __field(uint64_t, device_id) +- __field(int, err) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->src, con->sbi->local_src, 128); +- __entry->remote_ino = remote_ino; +- __entry->page_index = page_index; +- __entry->device_id = con->device_id; +- __entry->err = err; +- ), +- +- TP_printk("hmdfs: src %s, writepage_cb exit, return from remote device %llu, remote_ino=%llu, page_index=%lu, err=%d", +- __entry->src, __entry->device_id, +- __entry->remote_ino, __entry->page_index, __entry->err) +-); +- +-TRACE_EVENT(hmdfs_server_rebuild_dents, +- +- TP_PROTO(struct hmdfs_dcache_header *__h, int err), +- +- TP_ARGS(__h, err), +- +- TP_STRUCT__entry( +- __field(uint64_t, crtime) +- __field(uint64_t, crtime_nsec) +- __field(uint64_t, ctime) +- __field(uint64_t, ctime_nsec) +- __field(uint64_t, num) +- __field(int, err) +- ), +- +- TP_fast_assign( +- __entry->crtime = le64_to_cpu(__h->dcache_crtime); +- __entry->crtime_nsec = le64_to_cpu(__h->dcache_crtime_nsec); +- __entry->ctime = le64_to_cpu(__h->dentry_ctime); +- __entry->ctime_nsec = le64_to_cpu(__h->dentry_ctime_nsec); +- __entry->num = le64_to_cpu(__h->num); +- __entry->err = err; +- ), +- +- TP_printk("dcache crtime %llu:%llu ctime %llu:%llu has %llu dentry err %d", +- __entry->crtime, __entry->crtime_nsec, __entry->ctime, +- __entry->ctime_nsec, __entry->num, __entry->err) +-); +- +-TRACE_EVENT(hmdfs_server_readdir, +- +- TP_PROTO(struct readdir_request *req), +- +- TP_ARGS(req), +- +- TP_STRUCT__entry( +- __string(path, req->path) +- ), +- +- TP_fast_assign( +- __assign_str(path, req->path); +- ), +- +- TP_printk("hmdfs_server_readdir %s", __get_str(path)) +-); +- +-TRACE_EVENT(hmdfs_open_final_remote, +- +- TP_PROTO(struct hmdfs_inode_info *info, +- struct hmdfs_open_ret *open_ret, +- struct file *file, +- int reason), +- +- TP_ARGS(info, open_ret, file, reason), +- +- TP_STRUCT__entry( +- __array(char, file_path, MAX_FILTER_STR_VAL) +- __field(uint32_t, reason) +- __field(uint32_t, file_id) +- __field(uint64_t, file_ver) +- __field(uint64_t, remote_file_size) +- __field(uint64_t, remote_ino) +- __field(uint64_t, remote_ctime) +- __field(uint64_t, remote_ctime_nsec) +- __field(uint64_t, remote_stable_ctime) +- __field(uint64_t, remote_stable_ctime_nsec) +- __field(uint64_t, local_file_size) +- __field(uint64_t, local_ino) +- __field(uint64_t, local_ctime) +- __field(uint64_t, local_ctime_nsec) +- __field(uint64_t, local_stable_ctime) +- __field(uint64_t, local_stable_ctime_nsec) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->file_path, file->f_path.dentry->d_name.name, +- MAX_FILTER_STR_VAL); +- __entry->reason = reason; +- __entry->file_id = open_ret->fid.id; +- __entry->file_ver = open_ret->fid.ver; +- __entry->remote_file_size = open_ret->file_size; +- __entry->remote_ino = open_ret->ino; +- __entry->remote_ctime = open_ret->remote_ctime.tv_sec; +- __entry->remote_ctime_nsec = open_ret->remote_ctime.tv_nsec; +- __entry->remote_stable_ctime = open_ret->stable_ctime.tv_sec; +- __entry->remote_stable_ctime_nsec = +- open_ret->stable_ctime.tv_nsec; +- __entry->local_file_size = info->vfs_inode.i_size; +- __entry->local_ino = info->remote_ino; +- __entry->local_ctime = info->remote_ctime.tv_sec; +- __entry->local_ctime_nsec = info->remote_ctime.tv_nsec; +- __entry->local_stable_ctime = info->stable_ctime.tv_sec; +- __entry->local_stable_ctime_nsec = info->stable_ctime.tv_nsec; +- ), +- +- TP_printk("file path: %s, file id: %u, file ver: %llu, reason: %d, file size: %llu/%llu, ino: %llu/%llu, ctime: %llu.%llu/%llu.%llu, stable_ctime: %llu.%llu/%llu.%llu from remote/local", +- __entry->file_path, __entry->file_id, __entry->file_ver, +- __entry->reason, __entry->remote_file_size, +- __entry->local_file_size, __entry->remote_ino, +- __entry->local_ino, __entry->remote_ctime, +- __entry->remote_ctime_nsec, __entry->local_ctime, +- __entry->local_ctime_nsec, __entry->remote_stable_ctime, +- __entry->remote_stable_ctime_nsec, +- __entry->local_stable_ctime, __entry->local_stable_ctime_nsec) +-); +- +-TRACE_EVENT(hmdfs_server_open_enter, +- +- TP_PROTO(struct hmdfs_peer *con, +- struct open_request *recv), +- +- TP_ARGS(con, recv), +- +- TP_STRUCT__entry( +- __array(char, open_path, MAX_FILTER_STR_VAL) +- __array(char, dst_path, MAX_FILTER_STR_VAL) +- __field(uint32_t, file_type) +- ), +- +- TP_fast_assign( +- strlcpy(__entry->open_path, recv->buf, MAX_FILTER_STR_VAL); +- strlcpy(__entry->dst_path, con->sbi->local_dst, +- MAX_FILTER_STR_VAL); +- __entry->file_type = recv->file_type; +- ), +- +- TP_printk("server open file %s from %s, file_type is %u", +- __entry->open_path, __entry->dst_path, +- __entry->file_type) +-); +- +-TRACE_EVENT(hmdfs_server_open_exit, +- +- TP_PROTO(struct hmdfs_peer *con, +- struct open_response *resp, +- struct file *file, +- int ret), +- +- TP_ARGS(con, resp, file, ret), +- +- TP_STRUCT__entry( +- __array(char, file_path, MAX_FILTER_STR_VAL) +- __array(char, src_path, MAX_FILTER_STR_VAL) +- __field(uint32_t, file_id) +- __field(uint64_t, file_size) +- __field(uint64_t, ino) +- __field(uint64_t, ctime) +- __field(uint64_t, ctime_nsec) +- __field(uint64_t, stable_ctime) +- __field(uint64_t, stable_ctime_nsec) +- __field(int, retval) +- ), +- +- TP_fast_assign( +- if (file) +- strlcpy(__entry->file_path, +- file->f_path.dentry->d_name.name, +- MAX_FILTER_STR_VAL); +- else +- strlcpy(__entry->file_path, "null", MAX_FILTER_STR_VAL); +- strlcpy(__entry->src_path, con->sbi->local_src, +- MAX_FILTER_STR_VAL); +- __entry->file_id = resp ? resp->file_id : UINT_MAX; +- __entry->file_size = resp ? resp->file_size : ULLONG_MAX; +- __entry->ino = resp ? resp->ino : 0; +- __entry->ctime = resp ? resp->ctime : 0; +- __entry->ctime_nsec = resp ? resp->ctime_nsec : 0; +- __entry->stable_ctime = resp ? resp->stable_ctime : 0; +- __entry->stable_ctime_nsec = resp ? resp->stable_ctime_nsec : 0; +- __entry->retval = ret; +- ), +- +- TP_printk("server file %s is opened from %s, open result: %d, file id: %u, file size: %llu, ino: %llu, ctime: %llu.%llu, stable ctime: %llu.%llu", +- __entry->file_path, __entry->src_path, +- __entry->retval, __entry->file_id, +- __entry->file_size, __entry->ino, __entry->ctime, +- __entry->ctime_nsec, __entry->stable_ctime, +- __entry->stable_ctime_nsec) +-); +- +-TRACE_EVENT(hmdfs_merge_lookup_work_enter, +- +- TP_PROTO(struct merge_lookup_work *ml_work), +- +- TP_ARGS(ml_work), +- +- TP_STRUCT__entry( +- __field(int, devid) +- __string(name, ml_work->name) +- __field(unsigned int, flags) +- ), +- +- TP_fast_assign( +- __entry->devid = ml_work->devid; +- __assign_str(name, ml_work->name); +- __entry->flags = ml_work->flags; +- ), +- +- TP_printk("devid = %d, name:%s, flags:%u", +- __entry->devid, +- __get_str(name), +- __entry->flags) +-); +- +-TRACE_EVENT(hmdfs_merge_lookup_work_exit, +- +- TP_PROTO(struct merge_lookup_work *ml_work, int found), +- +- TP_ARGS(ml_work, found), +- +- TP_STRUCT__entry( +- __field(int, devid) +- __string(name, ml_work->name) +- __field(unsigned int, flags) +- __field(int, found) +- ), +- +- TP_fast_assign( +- __entry->devid = ml_work->devid; +- __assign_str(name, ml_work->name); +- __entry->flags = ml_work->flags; +- __entry->found = found; +- ), +- +- TP_printk("devid = %d, name:%s, flags:%u, found:%d", +- __entry->devid, +- __get_str(name), +- __entry->flags, +- __entry->found) +-); +- +-TRACE_EVENT(hmdfs_merge_update_dentry_info_enter, +- +- TP_PROTO(struct dentry *src_dentry, struct dentry *dst_dentry), +- +- TP_ARGS(src_dentry, dst_dentry), +- +- TP_STRUCT__entry( +- __string(src_name, src_dentry->d_name.name) +- __string(dst_name, dst_dentry->d_name.name) +- ), +- +- TP_fast_assign( +- __assign_str(src_name, src_dentry->d_name.name); +- __assign_str(dst_name, dst_dentry->d_name.name); +- ), +- +- TP_printk("src name:%s, dst name:%s", +- __get_str(src_name), +- __get_str(dst_name)) +-); +- +-TRACE_EVENT(hmdfs_merge_update_dentry_info_exit, +- +- TP_PROTO(struct dentry *src_dentry, struct dentry *dst_dentry), +- +- TP_ARGS(src_dentry, dst_dentry), +- +- TP_STRUCT__entry( +- __string(src_name, src_dentry->d_name.name) +- __string(dst_name, dst_dentry->d_name.name) +- ), +- +- TP_fast_assign( +- __assign_str(src_name, src_dentry->d_name.name); +- __assign_str(dst_name, dst_dentry->d_name.name); +- ), +- +- TP_printk("src name:%s, dst name:%s", +- __get_str(src_name), +- __get_str(dst_name)) +-); +- +-#endif +- +-#undef TRACE_INCLUDE_PATH +-#undef TRACE_INCLUDE_FILE +-#define TRACE_INCLUDE_PATH . +-#define TRACE_INCLUDE_FILE hmdfs_trace +-#include +diff --git a/fs/hmdfs/inode.c b/fs/hmdfs/inode.c +deleted file mode 100644 +index 33cc8c741..000000000 +--- a/fs/hmdfs/inode.c ++++ /dev/null +@@ -1,357 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/inode.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "hmdfs_device_view.h" +-#include "inode.h" +-#include "comm/connection.h" +- +-/** +- * Rules to generate inode numbers: +- * +- * "/", "/device_view", "/merge_view", "/device_view/local", "/device_view/cid" +- * = DOMAIN {3} : dev_id {29} : HMDFS_ROOT {32} +- * +- * "/device_view/cid/xxx" +- * = DOMAIN {3} : dev_id {29} : hash(remote_ino){32} +- * +- * "/merge_view/xxx" +- * = DOMAIN {3} : lower's dev_id {29} : lower's ino_raw {32} +- */ +- +-#define BIT_WIDE_TOTAL 64 +- +-#define BIT_WIDE_DOMAIN 3 +-#define BIT_WIDE_DEVID 29 +-#define BIT_WIDE_INO_RAW 32 +- +-enum DOMAIN { +- DOMAIN_ROOT, +- DOMAIN_DEVICE_LOCAL, +- DOMAIN_DEVICE_REMOTE, +- DOMAIN_DEVICE_CLOUD, +- DOMAIN_MERGE_VIEW, +- DOMAIN_CLOUD_MERGE_VIEW, +- DOMAIN_INVALID, +-}; +- +-union hmdfs_ino { +- const uint64_t ino_output; +- struct { +- uint64_t ino_raw : BIT_WIDE_INO_RAW; +- uint64_t dev_id : BIT_WIDE_DEVID; +- uint8_t domain : BIT_WIDE_DOMAIN; +- }; +-}; +- +-static uint8_t read_ino_domain(uint64_t ino) +-{ +- union hmdfs_ino _ino = { +- .ino_output = ino, +- }; +- +- return _ino.domain; +-} +- +-struct iget_args { +- /* The lower inode of local/merge/root(part) inode */ +- struct inode *lo_i; +- /* The peer of remote inode */ +- struct hmdfs_peer *peer; +- /* The ino of remote inode */ +- uint64_t remote_ino; +- +- /* The recordId of cloud inode */ +- uint8_t *cloud_record_id; +- uint8_t *reserved; +- +- /* Returned inode's ino */ +- union hmdfs_ino ino; +-}; +- +-/** +- * iget_test - whether or not the inode with matched hashval is the one we are +- * looking for +- * +- * @inode: the local inode we found in inode cache with matched hashval +- * @data: struct iget_args +- */ +-static int iget_test(struct inode *inode, void *data) +-{ +- struct hmdfs_inode_info *hii = hmdfs_i(inode); +- struct iget_args *ia = data; +- int res = 0; +- +- WARN_ON(ia->ino.domain < DOMAIN_ROOT || +- ia->ino.domain >= DOMAIN_INVALID); +- +- if (read_ino_domain(inode->i_ino) == DOMAIN_ROOT) +- return 1; +- if (read_ino_domain(inode->i_ino) != ia->ino.domain) +- return 0; +- +- switch (ia->ino.domain) { +- case DOMAIN_MERGE_VIEW: +- case DOMAIN_CLOUD_MERGE_VIEW: +- res = (ia->lo_i == hii->lower_inode); +- break; +- case DOMAIN_DEVICE_LOCAL: +- res = (ia->lo_i == hii->lower_inode); +- break; +- case DOMAIN_DEVICE_REMOTE: +- res = (ia->peer == hii->conn && +- ia->remote_ino == hii->remote_ino); +- break; +- case DOMAIN_DEVICE_CLOUD: +- res = (ia->cloud_record_id && +- (memcmp(ia->cloud_record_id, hii->cloud_record_id, +- CLOUD_RECORD_ID_LEN) == 0) && +- (ia->reserved[0] == hii->reserved[0])); +- break; +- } +- +- return res; +-} +- +-/** +- * iget_set - initialize a inode with iget_args +- * +- * @sb: the superblock of current hmdfs instance +- * @data: struct iget_args +- */ +-static int iget_set(struct inode *inode, void *data) +-{ +- struct hmdfs_inode_info *hii = hmdfs_i(inode); +- struct iget_args *ia = (struct iget_args *)data; +- +- inode->i_ino = ia->ino.ino_output; +- inode_inc_iversion(inode); +- +- hii->conn = ia->peer; +- hii->remote_ino = ia->remote_ino; +- hii->lower_inode = ia->lo_i; +- +- if (ia->cloud_record_id) { +- memcpy(hii->cloud_record_id, ia->cloud_record_id, CLOUD_RECORD_ID_LEN); +- memcpy(hii->reserved, ia->reserved, CLOUD_DENTRY_RESERVED_LENGTH); +- } +- +- return 0; +-} +- +-static uint64_t make_ino_raw_dev_local(uint64_t lo_ino) +-{ +- if (!(lo_ino >> BIT_WIDE_INO_RAW)) +- return lo_ino; +- +- return lo_ino * GOLDEN_RATIO_64 >> BIT_WIDE_INO_RAW; +-} +- +-static uint64_t make_ino_raw_dev_remote(uint64_t remote_ino) +-{ +- return hash_long(remote_ino, BIT_WIDE_INO_RAW); +-} +- +-/** +- * hmdfs_iget5_locked_merge - obtain an inode for the merge-view +- * +- * @sb: superblock of current instance +- * @fst_lo_i: the lower inode of it's first comrade +- * +- * Simply replace the lower's domain for a new ino. +- */ +-struct inode *hmdfs_iget5_locked_merge(struct super_block *sb, +- struct dentry *fst_lo_d) +-{ +- struct iget_args ia = { +- .lo_i = d_inode(fst_lo_d), +- .peer = NULL, +- .remote_ino = 0, +- .cloud_record_id = NULL, +- .ino.ino_output = 0, +- }; +- +- if (unlikely(!d_inode(fst_lo_d))) { +- hmdfs_err("Received a invalid lower inode"); +- return NULL; +- } +- if (unlikely(!hmdfs_d(fst_lo_d))) { +- hmdfs_err("Received a invalid fsdata"); +- return NULL; +- } +- +- ia.ino.ino_raw = d_inode(fst_lo_d)->i_ino; +- ia.ino.dev_id = hmdfs_d(fst_lo_d)->device_id; +- ia.ino.domain = DOMAIN_MERGE_VIEW; +- return iget5_locked(sb, ia.ino.ino_output, iget_test, iget_set, &ia); +-} +- +-struct inode *hmdfs_iget5_locked_cloud_merge(struct super_block *sb, +- struct dentry *fst_lo_d) +-{ +- struct iget_args ia = { +- .lo_i = d_inode(fst_lo_d), +- .peer = NULL, +- .remote_ino = 0, +- .cloud_record_id = NULL, +- .ino.ino_output = 0, +- }; +- +- if (unlikely(!d_inode(fst_lo_d))) { +- hmdfs_err("Received a invalid lower inode"); +- return NULL; +- } +- if (unlikely(!hmdfs_d(fst_lo_d))) { +- hmdfs_err("Received a invalid fsdata"); +- return NULL; +- } +- +- ia.ino.ino_raw = d_inode(fst_lo_d)->i_ino; +- ia.ino.dev_id = hmdfs_d(fst_lo_d)->device_id; +- ia.ino.domain = DOMAIN_CLOUD_MERGE_VIEW; +- return iget5_locked(sb, ia.ino.ino_output, iget_test, iget_set, &ia); +-} +- +-/** +- * hmdfs_iget5_locked_local - obtain an inode for the local-dev-view +- * +- * @sb: superblock of current instance +- * @lo_i: the lower inode from local filesystem +- * +- * Hashing local inode's ino to generate our ino. We continue to compare the +- * address of the lower_inode for uniqueness when collisions occurred. +- */ +-struct inode *hmdfs_iget5_locked_local(struct super_block *sb, +- struct inode *lo_i) +-{ +- struct iget_args ia = { +- .lo_i = lo_i, +- .peer = NULL, +- .remote_ino = 0, +- .cloud_record_id = NULL, +- .ino.ino_output = 0, +- }; +- +- if (unlikely(!lo_i)) { +- hmdfs_err("Received a invalid lower inode"); +- return NULL; +- } +- ia.ino.ino_raw = make_ino_raw_dev_local(lo_i->i_ino); +- ia.ino.dev_id = 0; +- ia.ino.domain = DOMAIN_DEVICE_LOCAL; +- return iget5_locked(sb, ia.ino.ino_output, iget_test, iget_set, &ia); +-} +- +-/** +- * hmdfs_iget5_locked_remote - obtain an inode for the remote-dev-view +- * +- * @sb: superblock of current instance +- * @peer: corresponding device node +- * @remote_ino: remote inode's ino +- * +- * Hash remote ino for ino's 32bit~1bit. +- * +- * Note that currenly implementation assume the each remote inode has unique +- * ino. Thus the combination of the peer's unique dev_id and the remote_ino +- * is enough to determine a unique remote inode. +- */ +-struct inode *hmdfs_iget5_locked_remote(struct super_block *sb, +- struct hmdfs_peer *peer, +- uint64_t remote_ino) +-{ +- struct iget_args ia = { +- .lo_i = NULL, +- .peer = peer, +- .remote_ino = remote_ino, +- .cloud_record_id = NULL, +- .ino.ino_output = 0, +- }; +- +- if (unlikely(!peer)) { +- hmdfs_err("Received a invalid peer"); +- return NULL; +- } +- +- ia.ino.ino_raw = make_ino_raw_dev_remote(remote_ino); +- ia.ino.dev_id = peer->device_id; +- ia.ino.domain = DOMAIN_DEVICE_REMOTE; +- return iget5_locked(sb, ia.ino.ino_output, iget_test, iget_set, &ia); +-} +- +-/** +- * hmdfs_iget5_locked_cloud - obtain an inode for the cloud-dev-view +- * +- * @sb: superblock of current instance +- * @peer: corresponding device node +- * @cloud_id: cloud file record id +- * +- * Hash remote ino for ino's 32bit~1bit. +- * +- * Note that currenly implementation assume the each remote inode has unique +- * ino. Thus the combination of the peer's unique dev_id and the remote_ino +- * is enough to determine a unique remote inode. +- */ +-struct inode *hmdfs_iget5_locked_cloud(struct super_block *sb, +- struct hmdfs_peer *peer, +- struct hmdfs_lookup_cloud_ret *res) +-{ +- struct iget_args ia = { +- .lo_i = NULL, +- .peer = peer, +- .remote_ino = 0, +- .cloud_record_id = res->record_id, +- .reserved = res->reserved, +- .ino.ino_output = 0, +- }; +- +- if (unlikely(!peer)) { +- hmdfs_err("Received a invalid peer"); +- return NULL; +- } +- +- ia.ino.ino_raw = make_ino_raw_cloud(res->record_id) + res->reserved[0]; +- ia.ino.dev_id = peer->device_id; +- ia.ino.domain = DOMAIN_DEVICE_CLOUD; +- return iget5_locked(sb, ia.ino.ino_output, iget_test, iget_set, &ia); +-} +- +-struct inode *hmdfs_iget_locked_root(struct super_block *sb, uint64_t root_ino, +- struct inode *lo_i, +- struct hmdfs_peer *peer) +-{ +- struct iget_args ia = { +- .lo_i = lo_i, +- .peer = peer, +- .remote_ino = 0, +- .cloud_record_id = NULL, +- .ino.ino_raw = root_ino, +- .ino.dev_id = peer ? peer->device_id : 0, +- .ino.domain = DOMAIN_ROOT, +- }; +- +- if (unlikely(root_ino < 0 || root_ino >= HMDFS_ROOT_INVALID)) { +- hmdfs_err("Root %llu is invalid", root_ino); +- return NULL; +- } +- if (unlikely(root_ino == HMDFS_ROOT_DEV_REMOTE && !peer)) { +- hmdfs_err("Root %llu received a invalid peer", root_ino); +- return NULL; +- } +- +- return iget5_locked(sb, ia.ino.ino_output, iget_test, iget_set, &ia); +-} +- +- +-void hmdfs_update_upper_file(struct file *upper_file, struct file *lower_file) +-{ +- loff_t upper_size = i_size_read(upper_file->f_inode); +- loff_t lower_size = i_size_read(lower_file->f_inode); +- +- if (upper_file->f_inode->i_mapping && upper_size != lower_size) { +- i_size_write(upper_file->f_inode, lower_size); +- truncate_inode_pages(upper_file->f_inode->i_mapping, 0); +- } +-} +\ No newline at end of file +diff --git a/fs/hmdfs/inode.h b/fs/hmdfs/inode.h +deleted file mode 100644 +index fb9bd2929..000000000 +--- a/fs/hmdfs/inode.h ++++ /dev/null +@@ -1,264 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/inode.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef INODE_H +-#define INODE_H +- +-#include "hmdfs.h" +- +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) +-#include +-#endif +- +-enum { +- HMDFS_REMOTE_INODE_NONE = 0, +- HMDFS_REMOTE_INODE_STASHING, +- HMDFS_REMOTE_INODE_RESTORING, +-}; +- +-/***************************************************************************** +- * fid +- *****************************************************************************/ +- +-/* Bits for fid_flags */ +-enum { +- HMDFS_FID_NEED_OPEN = 0, +- HMDFS_FID_OPENING, +-}; +- +-struct hmdfs_fid { +- __u64 ver; +- __u32 id; +-}; +- +-/* +- * Cache file is stored in file like following format: +- * ________________________________________________________________ +- * |meta file info| remote file(s) path | file content | +- * | head | path | data | +- * ↑ ↑ +- * path_offs data_offs +- */ +-struct hmdfs_cache_info { +- /* Path start offset in file (HMDFS_STASH_BLK_SIZE aligned) */ +- __u32 path_offs; +- __u32 path_len; +- __u32 path_cnt; +- char *path_buf; +- /* Stricky remote file(hardlink)s' path, split by '\0' */ +- char *path; +- /* Data start offset in file (HMDFS_STASH_BLK_SIZE aligned) */ +- __u32 data_offs; +- /* # of pages need to be written to remote file during offline */ +- atomic64_t to_write_pgs; +- /* # of pages written to remote file during offline */ +- atomic64_t written_pgs; +- /* Stash file handler */ +- struct file *cache_file; +-}; +- +-/***************************************************************************** +- * inode info and it's inline helpers +- *****************************************************************************/ +- +-struct hmdfs_inode_info { +- struct inode *lower_inode; // for local/merge inode +- struct hmdfs_peer *conn; // for remote inode +- struct kref ref; +- spinlock_t fid_lock; +- struct hmdfs_fid fid; +- unsigned long fid_flags; +- wait_queue_head_t fid_wq; +- __u8 inode_type; // deprecated: use ino system instead +- atomic_t write_opened; +- +- /* writeback list */ +- struct list_head wb_list; +- +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- __u16 perm; +-#endif +- /* +- * lookup remote file will generate a local inode, this store the +- * combination of remote inode number and generation in such situation. +- * the uniqueness of local inode can be determined. +- */ +- __u64 remote_ino; +-#define CLOUD_RECORD_ID_LEN 33 +- __u8 cloud_record_id[CLOUD_RECORD_ID_LEN]; +-#define CLOUD_DENTRY_RESERVED_LENGTH 3 +- __u8 reserved[CLOUD_DENTRY_RESERVED_LENGTH]; +- /* +- * if this value is not ULLONG_MAX, it means that remote getattr syscall +- * should return this value as inode size. +- */ +- __u64 getattr_isize; +- /* +- * this value stores remote ctime, explicitly when remote file is opened +- */ +- struct hmdfs_time_t remote_ctime; +- /* +- * this value stores the last time, aligned to dcache_precision, that +- * remote file was modified. It should be noted that this value won't +- * be effective if writecace_expire is set. +- */ +- struct hmdfs_time_t stable_ctime; +- /* +- * If this value is set nonzero, pagecache should be truncated if the +- * time that the file is opened is beyond the value. Furthermore, +- * the functionality of stable_ctime won't be effective. +- */ +- unsigned long writecache_expire; +- /* +- * This value record how many times the file has been written while file +- * is opened. 'writecache_expire' will set in close if this value is +- * nonzero. +- */ +- atomic64_t write_counter; +- /* +- * will be linked to hmdfs_peer::wr_opened_inode_list +- * if the remote inode is writable-opened. And using +- * wr_opened_cnt to track possibly multiple writeable-open. +- */ +- struct list_head wr_opened_node; +- atomic_t wr_opened_cnt; +- spinlock_t stash_lock; +- unsigned int stash_status; +- struct hmdfs_cache_info *cache; +- /* link to hmdfs_peer::stashed_inode_list when stashing completes */ +- struct list_head stash_node; +- /* +- * The flush/fsync thread will hold the write lock while threads +- * calling writepage will hold the read lock. We use rwlock to +- * eliminate the cases that flush/fsync operations are done with +- * re-dirtied pages remain dirty. +- * +- * Here is the explanation in detail: +- * +- * During `writepage()`, the state of a re-dirtied page will switch +- * to the following states in sequence: +- * s1: page dirty + tree dirty +- * s2: page dirty + tree dirty +- * s3: page clean + tree dirty +- * s4: page clean + tree clean + write back +- * s5: page dirty + tree dirty + write back +- * s6: page dirty + tree dirty +- * +- * A page upon s4 will thus be ignored by the concurrent +- * `do_writepages()` contained by `close()`, `fsync()`, making it's +- * state inconsistent. +- * +- * To avoid such situation, we use per-file rwsems to prevent +- * concurrent in-flight `writepage` during `close()` or `fsync()`. +- * +- * Minimal overhead is brought in since rsems allow concurrent +- * `writepage` while `close()` or `fsync()` is natural to wait for +- * in-flight `writepage()`s to complete. +- * +- * NOTE that in the worst case, a process may wait for wsem for TIMEOUT +- * even if a signal is pending. But we've to wait there to iterate all +- * pages and make sure that no dirty page should remain. +- */ +- struct rw_semaphore wpage_sem; +- +- // The real inode shared with vfs. ALWAYS PUT IT AT THE BOTTOM. +- struct inode vfs_inode; +-}; +- +-struct hmdfs_readdir_work { +- struct list_head head; +- struct dentry *dentry; +- struct hmdfs_peer *con; +- struct delayed_work dwork; +-}; +- +-static inline struct hmdfs_inode_info *hmdfs_i(struct inode *inode) +-{ +- return container_of(inode, struct hmdfs_inode_info, vfs_inode); +-} +- +-static inline bool hmdfs_inode_is_stashing(const struct hmdfs_inode_info *info) +-{ +- const struct hmdfs_sb_info *sbi = hmdfs_sb(info->vfs_inode.i_sb); +- +- /* Refer to comments in hmdfs_stash_remote_inode() */ +- return (hmdfs_is_stash_enabled(sbi) && +- smp_load_acquire(&info->stash_status)); // protect +-} +- +-static inline void hmdfs_remote_fetch_fid(struct hmdfs_inode_info *info, +- struct hmdfs_fid *fid) +-{ +- spin_lock(&info->fid_lock); +- *fid = info->fid; +- spin_unlock(&info->fid_lock); +-} +- +-/***************************************************************************** +- * ino allocator +- *****************************************************************************/ +- +-enum HMDFS_ROOT { +- HMDFS_ROOT_ANCESTOR = 1, // / +- HMDFS_ROOT_DEV, // /device_view +- HMDFS_ROOT_DEV_LOCAL, // /device_view/local +- HMDFS_ROOT_DEV_REMOTE, // /device_view/remote +- HMDFS_ROOT_DEV_CLOUD, // /device_view/cloud +- HMDFS_ROOT_MERGE, // /merge_view +- HMDFS_ROOT_MERGE_CLOUD, // /cloud_merge_view +- +- HMDFS_ROOT_INVALID, +-}; +- +-// delete layer, directory layer, not overlay layer +-enum HMDFS_LAYER_TYPE { +- HMDFS_LAYER_ZERO = 0, // / +- HMDFS_LAYER_FIRST_DEVICE, // /device_view +- HMDFS_LAYER_SECOND_LOCAL, // /device_view/local +- HMDFS_LAYER_SECOND_REMOTE, // /device_view/remote +- HMDFS_LAYER_SECOND_CLOUD, // /device_view/cloud +- HMDFS_LAYER_OTHER_LOCAL, // /device_view/local/xx +- HMDFS_LAYER_OTHER_REMOTE, // /device_view/remote/xx +- HMDFS_LAYER_OTHER_CLOUD, // /device_view/cloud/xx +- +- HMDFS_LAYER_FIRST_MERGE, // /merge_view +- HMDFS_LAYER_OTHER_MERGE, // /merge_view/xxx +- HMDFS_LAYER_FIRST_MERGE_CLOUD, // /cloud_merge_view +- HMDFS_LAYER_OTHER_MERGE_CLOUD, // /coud_merge_view/xxx +- HMDFS_LAYER_INVALID, +-}; +- +-struct inode *hmdfs_iget_locked_root(struct super_block *sb, uint64_t root_ino, +- struct inode *lo_i, +- struct hmdfs_peer *peer); +-struct inode *hmdfs_iget5_locked_merge(struct super_block *sb, +- struct dentry *fst_lo_d); +-struct inode *hmdfs_iget5_locked_cloud_merge(struct super_block *sb, +- struct dentry *fst_lo_d); +- +-struct inode *hmdfs_iget5_locked_local(struct super_block *sb, +- struct inode *lo_i); +-struct hmdfs_peer; +-struct inode *hmdfs_iget5_locked_remote(struct super_block *sb, +- struct hmdfs_peer *peer, +- uint64_t remote_ino); +- +-struct hmdfs_lookup_cloud_ret { +- uint64_t i_size; +- uint64_t i_mtime; +- uint8_t record_id[CLOUD_RECORD_ID_LEN]; +- uint8_t reserved[CLOUD_DENTRY_RESERVED_LENGTH]; +- uint16_t i_mode; +-}; +- +-struct inode *hmdfs_iget5_locked_cloud(struct super_block *sb, +- struct hmdfs_peer *peer, +- struct hmdfs_lookup_cloud_ret *res); +- +-void hmdfs_update_upper_file(struct file *upper_file, struct file *lower_file); +-uint32_t make_ino_raw_cloud(uint8_t *cloud_id); +-#endif // INODE_H +diff --git a/fs/hmdfs/inode_cloud.c b/fs/hmdfs/inode_cloud.c +deleted file mode 100644 +index 6099f1336..000000000 +--- a/fs/hmdfs/inode_cloud.c ++++ /dev/null +@@ -1,446 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/inode_cloud.c +- * +- * Copyright (c) 2023-2023 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +- +-#include "comm/socket_adapter.h" +-#include "hmdfs.h" +-#include "hmdfs_client.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_dentryfile_cloud.h" +-#include "hmdfs_share.h" +-#include "hmdfs_trace.h" +-#include "authority/authentication.h" +-#include "stash.h" +- +-uint32_t make_ino_raw_cloud(uint8_t *cloud_id) +-{ +- struct qstr str; +- +- str.len = CLOUD_RECORD_ID_LEN; +- str.name = cloud_id; +- return hmdfs_dentry_hash(&str, CLOUD_RECORD_ID_LEN); +-} +- +-struct hmdfs_lookup_cloud_ret *lookup_cloud_dentry(struct dentry *child_dentry, +- const struct qstr *qstr, +- uint64_t dev_id) +-{ +- struct hmdfs_lookup_cloud_ret *lookup_ret; +- struct hmdfs_dentry_cloud *dentry = NULL; +- struct clearcache_item *cache_item = NULL; +- struct hmdfs_dcache_lookup_ctx_cloud ctx; +- struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); +- +- get_cloud_cache_file(child_dentry->d_parent, sbi); +- cache_item = hmdfs_find_cache_item(dev_id, child_dentry->d_parent); +- if (!cache_item) +- return NULL; +- +- lookup_ret = kmalloc(sizeof(*lookup_ret), GFP_KERNEL); +- if (!lookup_ret) +- goto out; +- +- hmdfs_init_dcache_lookup_ctx_cloud(&ctx, sbi, qstr, cache_item->filp); +- dentry = hmdfs_find_dentry_cloud(child_dentry, &ctx); +- if (!dentry) { +- kfree(lookup_ret); +- lookup_ret = NULL; +- goto out; +- } +- +- lookup_ret->i_mode = le16_to_cpu(dentry->i_mode); +- lookup_ret->i_size = le64_to_cpu(dentry->i_size); +- lookup_ret->i_mtime = le64_to_cpu(dentry->i_mtime); +- memcpy(lookup_ret->record_id, dentry->record_id, CLOUD_RECORD_ID_LEN); +- memcpy(lookup_ret->reserved, dentry->reserved, CLOUD_DENTRY_RESERVED_LENGTH); +- +- hmdfs_unlock_file(ctx.filp, get_dentry_group_pos(ctx.bidx), +- DENTRYGROUP_SIZE); +- kfree(ctx.page); +-out: +- kref_put(&cache_item->ref, release_cache_item); +- return lookup_ret; +-} +- +-static struct hmdfs_lookup_cloud_ret * +-hmdfs_lookup_by_cloud(struct dentry *dentry, unsigned int flags) +-{ +- struct hmdfs_lookup_cloud_ret *result = NULL; +- char *file_name = NULL; +- struct qstr qstr; +- int file_name_len = dentry->d_name.len; +- +- file_name = kzalloc(NAME_MAX + 1, GFP_KERNEL); +- if (!file_name) +- return NULL; +- strncpy(file_name, dentry->d_name.name, file_name_len); +- qstr.name = file_name; +- qstr.len = strlen(file_name); +- +- result = lookup_cloud_dentry(dentry, &qstr, CLOUD_DEVICE); +- +- kfree(file_name); +- return result; +-} +- +-/* +- * hmdfs_update_inode_size - update inode size when finding aready existed +- * inode. +- * +- * First of all, if the file is opened for writing, we don't update inode size +- * here, because inode size is about to be changed after writing. +- * +- * If the file is not opened, simply update getattr_isize(not actual inode size, +- * just a value showed to user). This is safe because inode size will be +- * up-to-date after open. +- * +- * If the file is opened for read: +- * a. getattr_isize == HMDFS_STALE_REMOTE_ISIZE +- * 1) i_size == new_size, nothing need to be done. +- * 2) i_size > new_size, we keep the i_size and set getattr_isize to new_size, +- * stale data might be readed in this case, which is fine because file is +- * opened before remote truncate the file. +- * 3) i_size < new_size, we drop the last page of the file if i_size is not +- * aligned to PAGE_SIZE, clear getattr_isize, and update i_size to +- * new_size. +- * b. getattr_isize != HMDFS_STALE_REMOTE_ISIZE, getattr_isize will only be set +- * after 2). +- * 4) getattr_isize > i_size, this situation is impossible. +- * 5) i_size >= new_size, this case is the same as 2). +- * 6) i_size < new_size, this case is the same as 3). +- */ +-static void hmdfs_update_inode_size(struct inode *inode, uint64_t new_size) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- int writecount; +- uint64_t size; +- +- inode_lock(inode); +- size = info->getattr_isize; +- if (size == HMDFS_STALE_REMOTE_ISIZE) +- size = i_size_read(inode); +- if (size == new_size) { +- inode_unlock(inode); +- return; +- } +- +- writecount = atomic_read(&inode->i_writecount); +- /* check if writing is in progress */ +- if (writecount > 0) { +- info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; +- inode_unlock(inode); +- return; +- } +- +- /* check if there is no one who opens the file */ +- if (kref_read(&info->ref) == 0) +- goto update_info; +- +- /* check if there is someone who opens the file for read */ +- if (writecount == 0) { +- uint64_t aligned_size; +- +- /* use inode size here instead of getattr_isize */ +- size = i_size_read(inode); +- if (new_size <= size) +- goto update_info; +- /* +- * if the old inode size is not aligned to HMDFS_PAGE_SIZE, we +- * need to drop the last page of the inode, otherwise zero will +- * be returned while reading the new range in the page after +- * chaning inode size. +- */ +- aligned_size = round_down(size, HMDFS_PAGE_SIZE); +- if (aligned_size != size) +- truncate_inode_pages(inode->i_mapping, aligned_size); +- i_size_write(inode, new_size); +- info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; +- inode_unlock(inode); +- return; +- } +- +-update_info: +- info->getattr_isize = new_size; +- inode_unlock(inode); +-} +- +-static void hmdfs_update_inode(struct inode *inode, +- struct hmdfs_lookup_cloud_ret *lookup_result) +-{ +- struct hmdfs_time_t remote_mtime = { +- .tv_sec = lookup_result->i_mtime, +- .tv_nsec = 0, +- }; +- +- /* +- * We only update mtime if the file is not opened for writing. If we do +- * update it before writing is about to start, user might see the mtime +- * up-and-down if system time in server and client do not match. However +- * mtime in client will eventually match server after timeout without +- * writing. +- */ +- if (!inode_is_open_for_write(inode)) +- inode->i_mtime = remote_mtime; +- +- /* +- * We don't care i_size of dir, and lock inode for dir +- * might cause deadlock. +- */ +- if (S_ISREG(inode->i_mode)) +- hmdfs_update_inode_size(inode, lookup_result->i_size); +-} +- +-static void hmdfs_fill_inode_permission(struct inode *inode, struct inode *dir, +- umode_t mode) +-{ +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- inode->i_uid = dir->i_uid; +- inode->i_gid = dir->i_gid; +-#endif +-} +- +-struct hmdfs_peer peer; +- +-struct inode *fill_inode_cloud(struct super_block *sb, struct hmdfs_lookup_cloud_ret *res, struct inode *dir) +-{ +- int ret = 0; +- struct inode *inode = NULL; +- struct hmdfs_inode_info *info; +- umode_t mode = res->i_mode; +- peer.device_id = CLOUD_DEVICE; +- +- inode = hmdfs_iget5_locked_cloud(sb, &peer, res); +- if (!inode) +- return ERR_PTR(-ENOMEM); +- +- info = hmdfs_i(inode); +- info->inode_type = HMDFS_LAYER_OTHER_CLOUD; +- /* the inode was found in cache */ +- if (!(inode->i_state & I_NEW)) { +- hmdfs_fill_inode_permission(inode, dir, mode); +- hmdfs_update_inode(inode, res); +- return inode; +- } +- +- inode->__i_ctime.tv_sec = 0; +- inode->__i_ctime.tv_nsec = 0; +- inode->i_mtime.tv_sec = res->i_mtime; +- inode->i_mtime.tv_nsec = 0; +- +- inode->i_uid = USER_DATA_RW_UID; +- inode->i_gid = USER_DATA_RW_GID; +- +- if (S_ISDIR(mode)) +- inode->i_mode = S_IFDIR | S_IRWXU | S_IRWXG | S_IXOTH; +- else if (S_ISREG(mode)) +- inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; +- else { +- ret = -EIO; +- goto bad_inode; +- } +- +- if (S_ISREG(mode)) { +- inode->i_op = &hmdfs_dev_file_iops_cloud; +- inode->i_fop = &hmdfs_dev_file_fops_cloud; +- inode->i_size = res->i_size; +- set_nlink(inode, 1); +- } else if (S_ISDIR(mode)) { +- inode->i_op = &hmdfs_dev_dir_inode_ops_cloud; +- inode->i_fop = &hmdfs_dev_dir_ops_cloud; +- set_nlink(inode, 2); +- } else { +- ret = -EIO; +- goto bad_inode; +- } +- +- inode->i_mapping->a_ops = &hmdfs_dev_file_aops_cloud; +- +- hmdfs_fill_inode_permission(inode, dir, mode); +- unlock_new_inode(inode); +- return inode; +-bad_inode: +- iget_failed(inode); +- return ERR_PTR(ret); +-} +- +-static struct dentry *hmdfs_lookup_cloud_dentry(struct inode *parent_inode, +- struct dentry *child_dentry, +- int flags) +-{ +- struct dentry *ret = NULL; +- struct inode *inode = NULL; +- struct super_block *sb = parent_inode->i_sb; +- struct hmdfs_lookup_cloud_ret *lookup_result = NULL; +- struct hmdfs_dentry_info *gdi = hmdfs_d(child_dentry); +- +- lookup_result = hmdfs_lookup_by_cloud(child_dentry, flags); +- if (lookup_result != NULL) { +- if (in_share_dir(child_dentry)) +- gdi->file_type = HM_SHARE; +- inode = fill_inode_cloud(sb, lookup_result, parent_inode); +- if (IS_ERR(inode)) { +- ret = ERR_CAST(inode); +- goto out; +- } +- +- check_and_fixup_ownership_remote(parent_inode, +- inode, +- child_dentry); +- ret = d_splice_alias(inode, child_dentry); +- if (!IS_ERR_OR_NULL(ret)) +- child_dentry = ret; +- } else { +- ret = ERR_PTR(-ENOENT); +- } +-out: +- kfree(lookup_result); +- return ret; +-} +- +-struct dentry *hmdfs_lookup_cloud(struct inode *parent_inode, +- struct dentry *child_dentry, +- unsigned int flags) +-{ +- int err = 0; +- struct dentry *ret = NULL; +- struct hmdfs_dentry_info *gdi = NULL; +- struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); +- +- trace_hmdfs_lookup_remote(parent_inode, child_dentry, flags); +- if (child_dentry->d_name.len > NAME_MAX) { +- err = -ENAMETOOLONG; +- ret = ERR_PTR(-ENAMETOOLONG); +- goto out; +- } +- +- err = init_hmdfs_dentry_info(sbi, child_dentry, +- HMDFS_LAYER_OTHER_CLOUD); +- if (err) { +- ret = ERR_PTR(err); +- goto out; +- } +- gdi = hmdfs_d(child_dentry); +- gdi->device_id = hmdfs_d(child_dentry->d_parent)->device_id; +- +- ret = hmdfs_lookup_cloud_dentry(parent_inode, child_dentry, flags); +- /* +- * don't return error if inode do not exist, so that vfs can continue +- * to create it. +- */ +- if (IS_ERR_OR_NULL(ret)) { +- err = PTR_ERR(ret); +- if (err == -ENOENT) +- ret = NULL; +- } else { +- child_dentry = ret; +- } +- +-out: +- if (!err) +- hmdfs_set_time(child_dentry, jiffies); +- trace_hmdfs_lookup_remote_end(parent_inode, child_dentry, err); +- return ret; +-} +- +-int hmdfs_mkdir_cloud(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) +-{ +- return -EPERM; +-} +- +-int hmdfs_create_cloud(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, +- bool want_excl) +-{ +- return -EPERM; +-} +- +-int hmdfs_rmdir_cloud(struct inode *dir, struct dentry *dentry) +-{ +- return -EPERM; +-} +- +-int hmdfs_unlink_cloud(struct inode *dir, struct dentry *dentry) +-{ +- return 0; +-} +- +-int hmdfs_rename_cloud(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, +- struct inode *new_dir, struct dentry *new_dentry, +- unsigned int flags) +-{ +- return -EPERM; +-} +- +-static int hmdfs_dir_setattr_cloud(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *ia) +-{ +- // Do not support dir setattr +- return 0; +-} +- +-const struct inode_operations hmdfs_dev_dir_inode_ops_cloud = { +- .lookup = hmdfs_lookup_cloud, +- .mkdir = hmdfs_mkdir_cloud, +- .create = hmdfs_create_cloud, +- .rmdir = hmdfs_rmdir_cloud, +- .unlink = hmdfs_unlink_cloud, +- .rename = hmdfs_rename_cloud, +- .setattr = hmdfs_dir_setattr_cloud, +- .permission = hmdfs_permission, +-}; +- +-static int hmdfs_setattr_cloud(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *ia) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(d_inode(dentry)); +- struct inode *inode = d_inode(dentry); +- int err = 0; +- +- if (hmdfs_inode_is_stashing(info)) +- return -EAGAIN; +- +- if (ia->ia_valid & ATTR_SIZE) { +- err = inode_newsize_ok(inode, ia->ia_size); +- if (err) +- return err; +- truncate_setsize(inode, ia->ia_size); +- info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; +- } +- if (ia->ia_valid & ATTR_MTIME) +- inode->i_mtime = ia->ia_mtime; +- +- return err; +-} +- +- +-static int hmdfs_get_cached_attr_cloud(struct mnt_idmap *idmap, const struct path *path, +- struct kstat *stat, u32 request_mask, +- unsigned int flags) +-{ +- struct inode *inode = d_inode(path->dentry); +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- uint64_t size = info->getattr_isize; +- +- stat->ino = inode->i_ino; +- stat->mtime = inode->i_mtime; +- stat->mode = inode->i_mode; +- stat->uid.val = inode->i_uid.val; +- stat->gid.val = inode->i_gid.val; +- if (size == HMDFS_STALE_REMOTE_ISIZE) +- size = i_size_read(inode); +- +- stat->size = size; +- return 0; +-} +- +-const struct inode_operations hmdfs_dev_file_iops_cloud = { +- .setattr = hmdfs_setattr_cloud, +- .permission = hmdfs_permission, +- .getattr = hmdfs_get_cached_attr_cloud, +- .listxattr = NULL, +-}; +diff --git a/fs/hmdfs/inode_cloud_merge.c b/fs/hmdfs/inode_cloud_merge.c +deleted file mode 100644 +index a6f0b150f..000000000 +--- a/fs/hmdfs/inode_cloud_merge.c ++++ /dev/null +@@ -1,724 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/inode_cloud_merge.c +- * +- * Copyright (c) 2023-2023 Huawei Device Co., Ltd. +- */ +- +-#include "hmdfs_merge_view.h" +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include "authority/authentication.h" +-#include "hmdfs_trace.h" +- +-static struct inode *fill_inode_merge(struct super_block *sb, +- struct inode *parent_inode, +- struct dentry *child_dentry, +- struct dentry *lo_d_dentry) +-{ +- int ret = 0; +- struct dentry *fst_lo_d = NULL; +- struct hmdfs_inode_info *info = NULL; +- struct inode *inode = NULL; +- umode_t mode; +- +- if (lo_d_dentry) { +- fst_lo_d = lo_d_dentry; +- dget(fst_lo_d); +- } else { +- fst_lo_d = hmdfs_get_fst_lo_d(child_dentry); +- } +- if (!fst_lo_d) { +- inode = ERR_PTR(-EINVAL); +- goto out; +- } +- if (hmdfs_i(parent_inode)->inode_type == HMDFS_LAYER_ZERO) +- inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_MERGE_CLOUD, NULL, +- NULL); +- else +- inode = hmdfs_iget5_locked_cloud_merge(sb, fst_lo_d); +- if (!inode) { +- hmdfs_err("iget5_locked get inode NULL"); +- inode = ERR_PTR(-ENOMEM); +- goto out; +- } +- if (!(inode->i_state & I_NEW)) +- goto out; +- info = hmdfs_i(inode); +- if (hmdfs_i(parent_inode)->inode_type == HMDFS_LAYER_ZERO) +- info->inode_type = HMDFS_LAYER_FIRST_MERGE_CLOUD; +- else +- info->inode_type = HMDFS_LAYER_OTHER_MERGE_CLOUD; +- +- inode->i_uid = USER_DATA_RW_UID; +- inode->i_gid = USER_DATA_RW_GID; +- +- update_inode_attr(inode, child_dentry); +- mode = d_inode(fst_lo_d)->i_mode; +- +- if (S_ISREG(mode)) { +- inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; +- inode->i_op = &hmdfs_file_iops_cloud_merge; +- inode->i_fop = &hmdfs_file_fops_merge; +- set_nlink(inode, 1); +- } else if (S_ISDIR(mode)) { +- inode->i_mode = S_IFDIR | S_IRWXU | S_IRWXG | S_IXOTH; +- inode->i_op = &hmdfs_dir_iops_cloud_merge; +- inode->i_fop = &hmdfs_dir_fops_merge; +- set_nlink(inode, get_num_comrades(child_dentry) + 2); +- } else { +- ret = -EIO; +- goto bad_inode; +- } +- +- unlock_new_inode(inode); +-out: +- dput(fst_lo_d); +- return inode; +-bad_inode: +- iget_failed(inode); +- return ERR_PTR(ret); +-} +- +-static struct hmdfs_dentry_comrade * +-cloud_merge_lookup_comrade(struct hmdfs_sb_info *sbi, +- const char *name, +- int devid, +- unsigned int flags) +-{ +- int err; +- struct path root, path; +- struct hmdfs_dentry_comrade *comrade = NULL; +- +- err = kern_path(sbi->real_dst, LOOKUP_DIRECTORY, &root); +- if (err) { +- comrade = ERR_PTR(err); +- goto out; +- } +- +- err = vfs_path_lookup(root.dentry, root.mnt, name, flags, &path); +- if (err) { +- comrade = ERR_PTR(err); +- goto root_put; +- } +- +- comrade = alloc_comrade(path.dentry, devid); +- +- path_put(&path); +-root_put: +- path_put(&root); +-out: +- return comrade; +-} +- +-static void merge_lookup_sync(struct hmdfs_dentry_info_merge *mdi, +- struct hmdfs_sb_info *sbi, +- int devid, +- const char *name, +- unsigned int flags) +-{ +- struct hmdfs_dentry_comrade *comrade; +- +- comrade = cloud_merge_lookup_comrade(sbi, name, devid, flags); +- if (IS_ERR(comrade)) +- return; +- +- mutex_lock(&mdi->comrade_list_lock); +- +- if (!is_valid_comrade(mdi, hmdfs_cm(comrade))) +- destroy_comrade(comrade); +- else +- link_comrade(&mdi->comrade_list, comrade); +- +- mutex_unlock(&mdi->comrade_list_lock); +-} +- +-static int lookup_merge_normal(struct dentry *dentry, unsigned int flags) +-{ +- int ret = -ENOMEM; +- int devid = -1; +- struct dentry *pdentry = dget_parent(dentry); +- struct hmdfs_dentry_info_merge *mdi = hmdfs_dm(dentry); +- struct hmdfs_sb_info *sbi = hmdfs_sb(dentry->d_sb); +- char *rname, *ppath, *cpath; +- +- rname = hmdfs_get_real_dname(dentry, &devid, &mdi->type); +- if (unlikely(!rname)) { +- goto out; +- } +- +- ppath = hmdfs_merge_get_dentry_relative_path(pdentry); +- if (unlikely(!ppath)) { +- hmdfs_err("failed to get parent relative path"); +- goto out_rname; +- } +- +- cpath = kzalloc(PATH_MAX, GFP_KERNEL); +- if (unlikely(!cpath)) { +- hmdfs_err("failed to get child device_view path"); +- goto out_ppath; +- } +- +- if (mdi->type != DT_REG || devid == 0) { +- snprintf(cpath, PATH_MAX, "device_view/local%s/%s", ppath, +- rname); +- merge_lookup_sync(mdi, sbi, 0, cpath, flags); +- } +- if (mdi->type == DT_REG && !is_comrade_list_empty(mdi)) { +- ret = 0; +- goto found; +- } +- +- snprintf(cpath, PATH_MAX, "device_view/%s%s/%s", CLOUD_CID, +- ppath, rname); +- merge_lookup_sync(mdi, sbi, CLOUD_DEVICE, cpath, flags); +- +- ret = -ENOENT; +- if (!is_comrade_list_empty(mdi)) +- ret = 0; +- +-found: +- kfree(cpath); +-out_ppath: +- kfree(ppath); +-out_rname: +- kfree(rname); +-out: +- dput(pdentry); +- return ret; +-} +- +-/** +- * do_lookup_merge_root - lookup the root of the merge view(root/merge_view) +- * +- * It's common for a network filesystem to incur various of faults, so we +- * intent to show mercy for faults here, except faults reported by the local. +- */ +-static int do_lookup_cloud_merge_root(struct path path_dev, +- struct dentry *child_dentry, unsigned int flags) +-{ +- struct hmdfs_dentry_comrade *comrade; +- const int buf_len = +- max((int)HMDFS_CID_SIZE + 1, (int)sizeof(DEVICE_VIEW_LOCAL)); +- char *buf = kzalloc(buf_len, GFP_KERNEL); +- LIST_HEAD(head); +- int ret; +- +- if (!buf) +- return -ENOMEM; +- +- // lookup real_dst/device_view/local +- memcpy(buf, DEVICE_VIEW_LOCAL, sizeof(DEVICE_VIEW_LOCAL)); +- comrade = lookup_comrade(path_dev, buf, HMDFS_DEVID_LOCAL, flags); +- if (IS_ERR(comrade)) { +- ret = PTR_ERR(comrade); +- goto out; +- } +- link_comrade(&head, comrade); +- +- memcpy(buf, CLOUD_CID, 6); +- buf[5] = '\0'; +- comrade = lookup_comrade(path_dev, buf, CLOUD_DEVICE, flags); +- if (IS_ERR(comrade)) { +- ret = 0; +- goto out; +- } +- +- link_comrade(&head, comrade); +- +- assign_comrades_unlocked(child_dentry, &head); +- ret = 0; +- +-out: +- kfree(buf); +- return ret; +-} +- +-static int lookup_cloud_merge_root(struct inode *root_inode, +- struct dentry *child_dentry, unsigned int flags) +-{ +- struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); +- struct path path_dev; +- int ret = -ENOENT; +- int buf_len; +- char *buf = NULL; +- bool locked, down; +- +- // consider additional one slash and one '\0' +- buf_len = strlen(sbi->real_dst) + 1 + sizeof(DEVICE_VIEW_ROOT); +- if (buf_len > PATH_MAX) +- return -ENAMETOOLONG; +- +- buf = kmalloc(buf_len, GFP_KERNEL); +- if (unlikely(!buf)) +- return -ENOMEM; +- +- sprintf(buf, "%s/%s", sbi->real_dst, DEVICE_VIEW_ROOT); +- lock_root_inode_shared(root_inode, &locked, &down); +- ret = hmdfs_get_path_in_sb(child_dentry->d_sb, buf, LOOKUP_DIRECTORY, +- &path_dev); +- if (ret) +- goto free_buf; +- +- ret = do_lookup_cloud_merge_root(path_dev, child_dentry, flags); +- path_put(&path_dev); +- +-free_buf: +- kfree(buf); +- restore_root_inode_sem(root_inode, locked, down); +- return ret; +-} +- +-// do this in a map-reduce manner +-struct dentry *hmdfs_lookup_cloud_merge(struct inode *parent_inode, +- struct dentry *child_dentry, +- unsigned int flags) +-{ +- bool create = flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET); +- struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); +- struct hmdfs_inode_info *pii = hmdfs_i(parent_inode); +- struct inode *child_inode = NULL; +- struct dentry *ret_dentry = NULL; +- int err = 0; +- +- /* +- * Internal flags like LOOKUP_CREATE should not pass to device view. +- * LOOKUP_REVAL is needed because dentry cache in hmdfs might be stale +- * after rename in lower fs. LOOKUP_DIRECTORY is not needed because +- * merge_view can do the judgement that whether result is directory or +- * not. +- */ +- flags = flags & LOOKUP_REVAL; +- +- child_dentry->d_fsdata = NULL; +- +- if (child_dentry->d_name.len > NAME_MAX) { +- err = -ENAMETOOLONG; +- goto out; +- } +- +- err = init_hmdfs_dentry_info_merge(sbi, child_dentry); +- if (unlikely(err)) +- goto out; +- +- if (pii->inode_type == HMDFS_LAYER_ZERO) { +- hmdfs_dm(child_dentry)->dentry_type = HMDFS_LAYER_FIRST_MERGE_CLOUD; +- err = lookup_cloud_merge_root(parent_inode, child_dentry, flags); +- } else { +- hmdfs_dm(child_dentry)->dentry_type = HMDFS_LAYER_OTHER_MERGE_CLOUD; +- err = lookup_merge_normal(child_dentry, flags); +- } +- +- if (!err) { +- struct hmdfs_inode_info *info = NULL; +- +- child_inode = fill_inode_merge(parent_inode->i_sb, parent_inode, +- child_dentry, NULL); +- if (IS_ERR(child_inode)) { +- err = PTR_ERR(child_inode); +- goto out; +- } +- info = hmdfs_i(child_inode); +- if (info->inode_type == HMDFS_LAYER_FIRST_MERGE) +- hmdfs_root_inode_perm_init(child_inode); +- else +- check_and_fixup_ownership_remote(parent_inode, +- child_inode, +- child_dentry); +- +- ret_dentry = d_splice_alias(child_inode, child_dentry); +- if (IS_ERR(ret_dentry)) { +- clear_comrades(child_dentry); +- err = PTR_ERR(ret_dentry); +- goto out; +- } +- if (ret_dentry) +- child_dentry = ret_dentry; +- +- goto out; +- } +- +- if ((err == -ENOENT) && create) +- err = 0; +- +-out: +- return err ? ERR_PTR(err) : ret_dentry; +-} +- +-const struct inode_operations hmdfs_file_iops_cloud_merge = { +- .getattr = hmdfs_getattr_merge, +- .setattr = hmdfs_setattr_merge, +- .permission = hmdfs_permission, +-}; +- +-int do_mkdir_cloud_merge(struct inode *parent_inode, struct dentry *child_dentry, +- umode_t mode, struct inode *lo_i_parent, +- struct dentry *lo_d_child) +-{ +- int ret = 0; +- struct super_block *sb = parent_inode->i_sb; +- struct inode *child_inode = NULL; +- +- ret = vfs_mkdir(&nop_mnt_idmap, lo_i_parent, lo_d_child, mode); +- if (ret) +- goto out; +- +- child_inode = +- fill_inode_merge(sb, parent_inode, child_dentry, lo_d_child); +- if (IS_ERR(child_inode)) { +- ret = PTR_ERR(child_inode); +- goto out; +- } +- +- d_add(child_dentry, child_inode); +- /* nlink should be increased with the joining of children */ +- set_nlink(parent_inode, 2); +- hmdfs_update_meta(parent_inode); +-out: +- return ret; +-} +- +-int do_create_cloud_merge(struct inode *parent_inode, struct dentry *child_dentry, +- umode_t mode, bool want_excl, struct inode *lo_i_parent, +- struct dentry *lo_d_child) +-{ +- int ret = 0; +- struct super_block *sb = parent_inode->i_sb; +- struct inode *child_inode = NULL; +- +- ret = vfs_create(&nop_mnt_idmap, lo_i_parent, lo_d_child, mode, want_excl); +- if (ret) +- goto out; +- +- child_inode = +- fill_inode_merge(sb, parent_inode, child_dentry, lo_d_child); +- if (IS_ERR(child_inode)) { +- ret = PTR_ERR(child_inode); +- goto out; +- } +- +- d_add(child_dentry, child_inode); +- /* nlink should be increased with the joining of children */ +- set_nlink(parent_inode, 2); +- hmdfs_update_meta(parent_inode); +-out: +- return ret; +-} +- +-int hmdfs_do_ops_cloud_merge(struct inode *i_parent, struct dentry *d_child, +- struct dentry *lo_d_child, struct path path, +- struct hmdfs_recursive_para *rec_op_para) +-{ +- int ret = 0; +- +- if (rec_op_para->is_last) { +- switch (rec_op_para->opcode) { +- case F_MKDIR_MERGE: +- ret = do_mkdir_cloud_merge(i_parent, d_child, +- rec_op_para->mode, +- d_inode(path.dentry), lo_d_child); +- break; +- case F_CREATE_MERGE: +- ret = do_create_cloud_merge(i_parent, d_child, +- rec_op_para->mode, +- rec_op_para->want_excl, +- d_inode(path.dentry), lo_d_child); +- break; +- default: +- ret = -EINVAL; +- break; +- } +- } else { +- ret = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), lo_d_child, +- rec_op_para->mode); +- } +- if (ret) +- hmdfs_err("vfs_ops failed, ops %d, err = %d", +- rec_op_para->opcode, ret); +- return ret; +-} +- +-int hmdfs_create_lower_cloud_dentry(struct inode *i_parent, struct dentry *d_child, +- struct dentry *lo_d_parent, bool is_dir, +- struct hmdfs_recursive_para *rec_op_para) +-{ +- struct hmdfs_sb_info *sbi = i_parent->i_sb->s_fs_info; +- struct hmdfs_dentry_comrade *new_comrade = NULL; +- struct dentry *lo_d_child = NULL; +- char *path_buf = kmalloc(PATH_MAX, GFP_KERNEL); +- char *absolute_path_buf = kmalloc(PATH_MAX, GFP_KERNEL); +- char *path_name = NULL; +- struct path path = { .mnt = NULL, .dentry = NULL }; +- int ret = 0; +- +- if (unlikely(!path_buf || !absolute_path_buf)) { +- ret = -ENOMEM; +- goto out; +- } +- +- path_name = dentry_path_raw(lo_d_parent, path_buf, PATH_MAX); +- if (IS_ERR(path_name)) { +- ret = PTR_ERR(path_name); +- goto out; +- } +- if ((strlen(sbi->real_dst) + strlen(path_name) + +- strlen(d_child->d_name.name) + 2) > PATH_MAX) { +- ret = -ENAMETOOLONG; +- goto out; +- } +- +- sprintf(absolute_path_buf, "%s%s/%s", sbi->real_dst, path_name, +- d_child->d_name.name); +- +- if (is_dir) +- lo_d_child = kern_path_create(AT_FDCWD, absolute_path_buf, +- &path, LOOKUP_DIRECTORY); +- else +- lo_d_child = kern_path_create(AT_FDCWD, absolute_path_buf, +- &path, 0); +- if (IS_ERR(lo_d_child)) { +- ret = PTR_ERR(lo_d_child); +- goto out; +- } +- // to ensure link_comrade after vfs_mkdir succeed +- ret = hmdfs_do_ops_cloud_merge(i_parent, d_child, lo_d_child, path, +- rec_op_para); +- if (ret) +- goto out_put; +- new_comrade = alloc_comrade(lo_d_child, HMDFS_DEVID_LOCAL); +- if (IS_ERR(new_comrade)) { +- ret = PTR_ERR(new_comrade); +- goto out_put; +- } else { +- link_comrade_unlocked(d_child, new_comrade); +- } +- +- update_inode_attr(d_inode(d_child), d_child); +- +-out_put: +- done_path_create(&path, lo_d_child); +-out: +- kfree(absolute_path_buf); +- kfree(path_buf); +- return ret; +-} +- +-static int create_lo_d_parent_recur(struct dentry *d_parent, +- struct dentry *d_child, umode_t mode, +- struct hmdfs_recursive_para *rec_op_para) +-{ +- struct dentry *lo_d_parent, *d_pparent; +- int ret = 0; +- +- lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); +- if (!lo_d_parent) { +- d_pparent = dget_parent(d_parent); +- ret = create_lo_d_parent_recur(d_pparent, d_parent, +- d_inode(d_parent)->i_mode, +- rec_op_para); +- dput(d_pparent); +- if (ret) +- goto out; +- lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); +- if (!lo_d_parent) { +- ret = -ENOENT; +- goto out; +- } +- } +- rec_op_para->is_last = false; +- rec_op_para->mode = mode; +- ret = hmdfs_create_lower_cloud_dentry(d_inode(d_parent), d_child, lo_d_parent, +- true, rec_op_para); +-out: +- dput(lo_d_parent); +- return ret; +-} +- +-int create_lo_d_cloud_child(struct inode *i_parent, struct dentry *d_child, +- bool is_dir, struct hmdfs_recursive_para *rec_op_para) +-{ +- struct dentry *d_pparent, *lo_d_parent, *lo_d_child; +- struct dentry *d_parent = dget_parent(d_child); +- int ret = 0; +- mode_t d_child_mode = rec_op_para->mode; +- +- lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); +- if (!lo_d_parent) { +- d_pparent = dget_parent(d_parent); +- ret = create_lo_d_parent_recur(d_pparent, d_parent, +- d_inode(d_parent)->i_mode, +- rec_op_para); +- dput(d_pparent); +- if (unlikely(ret)) { +- lo_d_child = ERR_PTR(ret); +- goto out; +- } +- lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); +- if (!lo_d_parent) { +- lo_d_child = ERR_PTR(-ENOENT); +- goto out; +- } +- } +- rec_op_para->is_last = true; +- rec_op_para->mode = d_child_mode; +- ret = hmdfs_create_lower_cloud_dentry(i_parent, d_child, lo_d_parent, is_dir, +- rec_op_para); +- +-out: +- dput(d_parent); +- dput(lo_d_parent); +- return ret; +-} +- +-int hmdfs_mkdir_cloud_merge(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) +-{ +- int ret = 0; +- struct hmdfs_recursive_para *rec_op_para = NULL; +- +- // confict_name & file_type is checked by hmdfs_mkdir_local +- if (hmdfs_file_type(dentry->d_name.name) != HMDFS_TYPE_COMMON) { +- ret = -EACCES; +- goto out; +- } +- rec_op_para = kmalloc(sizeof(*rec_op_para), GFP_KERNEL); +- if (!rec_op_para) { +- ret = -ENOMEM; +- goto out; +- } +- +- hmdfs_init_recursive_para(rec_op_para, F_MKDIR_MERGE, mode, false, +- NULL); +- ret = create_lo_d_cloud_child(dir, dentry, true, rec_op_para); +-out: +- hmdfs_trace_merge(trace_hmdfs_mkdir_merge, dir, dentry, ret); +- if (ret) +- d_drop(dentry); +- kfree(rec_op_para); +- return ret; +-} +- +-int hmdfs_create_cloud_merge(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, +- bool want_excl) +-{ +- struct hmdfs_recursive_para *rec_op_para = NULL; +- int ret = 0; +- +- rec_op_para = kmalloc(sizeof(*rec_op_para), GFP_KERNEL); +- if (!rec_op_para) { +- ret = -ENOMEM; +- goto out; +- } +- hmdfs_init_recursive_para(rec_op_para, F_CREATE_MERGE, mode, want_excl, +- NULL); +- // confict_name & file_type is checked by hmdfs_create_local +- ret = create_lo_d_cloud_child(dir, dentry, false, rec_op_para); +-out: +- hmdfs_trace_merge(trace_hmdfs_create_merge, dir, dentry, ret); +- if (ret) +- d_drop(dentry); +- kfree(rec_op_para); +- return ret; +-} +- +-static int rename_lo_d_cloud_child(struct hmdfs_rename_para *rename_para, +- struct hmdfs_recursive_para *rec_op_para) +-{ +- struct dentry *d_pparent, *lo_d_parent; +- struct dentry *d_parent = dget_parent(rename_para->new_dentry); +- int ret = 0; +- +- lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); +- if (!lo_d_parent) { +- d_pparent = dget_parent(d_parent); +- ret = create_lo_d_parent_recur(d_pparent, d_parent, +- d_inode(d_parent)->i_mode, +- rec_op_para); +- dput(d_pparent); +- if (unlikely(ret)) +- goto out; +- lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); +- if (!lo_d_parent) { +- ret = -ENOENT; +- goto out; +- } +- } +- ret = do_rename_merge(rename_para->old_dir, rename_para->old_dentry, +- rename_para->new_dir, rename_para->new_dentry, +- rename_para->flags); +- +-out: +- dput(d_parent); +- dput(lo_d_parent); +- return ret; +-} +- +-static int hmdfs_rename_cloud_merge(struct mnt_idmap *idmap, struct inode *old_dir, +- struct dentry *old_dentry, +- struct inode *new_dir, +- struct dentry *new_dentry, +- unsigned int flags) +-{ +- struct hmdfs_recursive_para *rec_op_para = NULL; +- struct hmdfs_rename_para rename_para = { old_dir, old_dentry, new_dir, +- new_dentry, flags }; +- int ret = 0; +- +- if (hmdfs_file_type(old_dentry->d_name.name) != HMDFS_TYPE_COMMON || +- hmdfs_file_type(new_dentry->d_name.name) != HMDFS_TYPE_COMMON) { +- ret = -EACCES; +- goto rename_out; +- } +- +- if (hmdfs_i(old_dir)->inode_type != hmdfs_i(new_dir)->inode_type) { +- hmdfs_err("in different view"); +- ret = -EPERM; +- goto rename_out; +- } +- +- rec_op_para = kmalloc(sizeof(*rec_op_para), GFP_KERNEL); +- if (!rec_op_para) { +- ret = -ENOMEM; +- goto rename_out; +- } +- trace_hmdfs_rename_merge(old_dir, old_dentry, new_dir, new_dentry, +- flags); +- +- hmdfs_init_recursive_para(rec_op_para, F_MKDIR_MERGE, 0, 0, NULL); +- ret = rename_lo_d_cloud_child(&rename_para, rec_op_para); +- if (ret != 0) { +- d_drop(new_dentry); +- } else { +- hmdfs_update_meta(old_dir); +- if (old_dir != new_dir) +- hmdfs_update_meta(new_dir); +- } +- +- if (S_ISREG(old_dentry->d_inode->i_mode) && !ret) +- d_invalidate(old_dentry); +-rename_out: +- kfree(rec_op_para); +- return ret; +-} +- +-void hmdfs_update_meta(struct inode *dir) +-{ +- dir->__i_ctime = dir->i_mtime = current_time(dir); +-} +- +-const struct inode_operations hmdfs_dir_iops_cloud_merge = { +- .lookup = hmdfs_lookup_cloud_merge, +- .mkdir = hmdfs_mkdir_cloud_merge, +- .create = hmdfs_create_cloud_merge, +- .rmdir = hmdfs_rmdir_merge, +- .unlink = hmdfs_unlink_merge, +- .rename = hmdfs_rename_cloud_merge, +- .permission = hmdfs_permission, +-}; +diff --git a/fs/hmdfs/inode_local.c b/fs/hmdfs/inode_local.c +deleted file mode 100644 +index 4509d3e40..000000000 +--- a/fs/hmdfs/inode_local.c ++++ /dev/null +@@ -1,1074 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/inode_local.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "authority/authentication.h" +-#include "comm/socket_adapter.h" +-#include "comm/transport.h" +-#include "hmdfs_client.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_device_view.h" +-#include "hmdfs_share.h" +-#include "hmdfs_trace.h" +- +-extern struct kmem_cache *hmdfs_dentry_cachep; +- +-struct hmdfs_name_data { +- struct dir_context ctx; +- const struct qstr *to_find; +- char *name; +- bool found; +-}; +- +-int init_hmdfs_dentry_info(struct hmdfs_sb_info *sbi, struct dentry *dentry, +- int dentry_type) +-{ +- struct hmdfs_dentry_info *info = +- kmem_cache_zalloc(hmdfs_dentry_cachep, GFP_ATOMIC); +- +- if (!info) +- return -ENOMEM; +- INIT_LIST_HEAD(&info->cache_list_head); +- INIT_LIST_HEAD(&info->remote_cache_list_head); +- spin_lock_init(&info->cache_list_lock); +- mutex_init(&info->remote_cache_list_lock); +- mutex_init(&info->cache_pull_lock); +- spin_lock_init(&info->lock); +- info->dentry_type = dentry_type; +- info->device_id = 0; +- dentry->d_fsdata = info; +- if (dentry_type == HMDFS_LAYER_ZERO || +- dentry_type == HMDFS_LAYER_FIRST_DEVICE || +- dentry_type == HMDFS_LAYER_SECOND_LOCAL || +- dentry_type == HMDFS_LAYER_SECOND_CLOUD || +- dentry_type == HMDFS_LAYER_SECOND_REMOTE) +- d_set_d_op(dentry, &hmdfs_dev_dops); +- else +- d_set_d_op(dentry, &hmdfs_dops); +- return 0; +-} +- +-static inline void set_sharefile_flag(struct hmdfs_dentry_info *gdi) +-{ +- gdi->file_type = HM_SHARE; +-} +- +-static void check_and_fixup_share_ops(struct inode *inode, +- const char *name) +-{ +- if (is_share_dir(inode, name)) { +- inode->i_op = &hmdfs_dir_inode_ops_share; +- inode->i_fop = &hmdfs_dir_ops_share; +- } +-} +- +-struct inode *fill_inode_local(struct super_block *sb, +- struct inode *lower_inode, const char *name) +-{ +- int ret = 0; +- struct inode *inode; +- struct hmdfs_sb_info *sbi = hmdfs_sb(sb); +- struct hmdfs_inode_info *info; +- +- if (!igrab(lower_inode)) +- return ERR_PTR(-ESTALE); +- +- inode = hmdfs_iget5_locked_local(sb, lower_inode); +- if (!inode) { +- hmdfs_err("iget5_locked get inode NULL"); +- iput(lower_inode); +- return ERR_PTR(-ENOMEM); +- } +- if (!(inode->i_state & I_NEW)) { +- iput(lower_inode); +- return inode; +- } +- +- info = hmdfs_i(inode); +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- info->perm = hmdfs_read_perm(lower_inode); +-#endif +- if (S_ISDIR(lower_inode->i_mode)) +- inode->i_mode = (lower_inode->i_mode & S_IFMT) | S_IRWXU | +- S_IRWXG | S_IXOTH; +- else if (S_ISREG(lower_inode->i_mode)) +- inode->i_mode = (lower_inode->i_mode & S_IFMT) | S_IRUSR | +- S_IWUSR | S_IRGRP | S_IWGRP; +- else if (S_ISLNK(lower_inode->i_mode)) +- inode->i_mode = +- S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; +- +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- inode->i_uid = lower_inode->i_uid; +- inode->i_gid = lower_inode->i_gid; +-#else +- inode->i_uid = KUIDT_INIT((uid_t)1000); +- inode->i_gid = KGIDT_INIT((gid_t)1000); +-#endif +- inode->i_atime = lower_inode->i_atime; +- inode->__i_ctime = lower_inode->__i_ctime; +- inode->i_mtime = lower_inode->i_mtime; +- inode->i_generation = lower_inode->i_generation; +- +- info->inode_type = HMDFS_LAYER_OTHER_LOCAL; +- if (S_ISDIR(lower_inode->i_mode)) { +- inode->i_op = &hmdfs_dir_inode_ops_local; +- inode->i_fop = &hmdfs_dir_ops_local; +- inode->i_mode |= S_IXUGO; +- } else if (S_ISREG(lower_inode->i_mode)) { +- inode->i_op = &hmdfs_file_iops_local; +- inode->i_fop = &hmdfs_file_fops_local; +- } else if (S_ISLNK(lower_inode->i_mode)) { +- inode->i_op = &hmdfs_symlink_iops_local; +- inode->i_fop = &hmdfs_file_fops_local; +- inode->i_size = i_size_read(lower_inode); +- } else { +- ret = -EIO; +- goto bad_inode; +- } +- +- if (sbi->s_cloud_disk_switch) +- inode->i_mapping->a_ops = &hmdfs_aops_cloud; +- +- fsstack_copy_inode_size(inode, lower_inode); +- check_and_fixup_share_ops(inode, name); +- unlock_new_inode(inode); +- return inode; +-bad_inode: +- iget_failed(inode); +- return ERR_PTR(ret); +-} +- +-/* hmdfs_convert_lookup_flags - covert hmdfs lookup flags to vfs lookup flags +- * +- * @hmdfs_flags: hmdfs lookup flags +- * @vfs_flags: pointer to converted flags +- * +- * return 0 on success, or err code on failure. +- */ +-int hmdfs_convert_lookup_flags(unsigned int hmdfs_flags, +- unsigned int *vfs_flags) +-{ +- *vfs_flags = 0; +- +- /* currently only support HMDFS_LOOKUP_REVAL */ +- if (hmdfs_flags & ~HMDFS_LOOKUP_REVAL) +- return -EINVAL; +- +- if (hmdfs_flags & HMDFS_LOOKUP_REVAL) +- *vfs_flags |= LOOKUP_REVAL; +- +- return 0; +-} +- +-static bool hmdfs_name_match(struct dir_context *ctx, const char *name, +- int namelen, loff_t offset, u64 ino, +- unsigned int d_type) +-{ +- struct hmdfs_name_data *buf = +- container_of(ctx, struct hmdfs_name_data, ctx); +- struct qstr candidate = QSTR_INIT(name, namelen); +- +- if (qstr_case_eq(buf->to_find, &candidate)) { +- memcpy(buf->name, name, namelen); +- buf->name[namelen] = 0; +- buf->found = true; +- return false; +- } +- return true; +-} +- +-static int __lookup_nosensitive(struct path *lower_parent_path, +- struct dentry *child_dentry, unsigned int flags, +- struct path *lower_path) +-{ +- struct file *file; +- const struct cred *cred = current_cred(); +- const struct qstr *name = &child_dentry->d_name; +- int err; +- struct hmdfs_name_data buffer = { +- .ctx.actor = hmdfs_name_match, +- .to_find = name, +- .name = __getname(), +- .found = false, +- }; +- +- if (!buffer.name) { +- err = -ENOMEM; +- goto out; +- } +- file = dentry_open(lower_parent_path, O_RDONLY, cred); +- if (IS_ERR(file)) { +- err = PTR_ERR(file); +- goto put_name; +- } +- err = iterate_dir(file, &buffer.ctx); +- fput(file); +- if (err) +- goto put_name; +- if (buffer.found) +- err = vfs_path_lookup(lower_parent_path->dentry, +- lower_parent_path->mnt, buffer.name, +- flags, lower_path); +- else +- err = -ENOENT; +-put_name: +- __putname(buffer.name); +-out: +- return err; +-} +- +-static inline void set_symlink_flag(struct hmdfs_dentry_info *gdi) +-{ +- gdi->file_type = HM_SYMLINK; +-} +- +-struct dentry *hmdfs_lookup_local(struct inode *parent_inode, +- struct dentry *child_dentry, +- unsigned int flags) +-{ +- const char *d_name = child_dentry->d_name.name; +- int err = 0; +- struct path lower_path, lower_parent_path; +- struct dentry *lower_dentry = NULL, *parent_dentry = NULL, *ret = NULL; +- struct hmdfs_dentry_info *gdi = NULL; +- struct inode *child_inode = NULL; +- struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); +- +- trace_hmdfs_lookup_local(parent_inode, child_dentry, flags); +- if (child_dentry->d_name.len > NAME_MAX) { +- ret = ERR_PTR(-ENAMETOOLONG); +- goto out; +- } +- +- /* local device */ +- parent_dentry = dget_parent(child_dentry); +- hmdfs_get_lower_path(parent_dentry, &lower_parent_path); +- err = init_hmdfs_dentry_info(sbi, child_dentry, +- HMDFS_LAYER_OTHER_LOCAL); +- if (err) { +- ret = ERR_PTR(err); +- goto out_err; +- } +- +- gdi = hmdfs_d(child_dentry); +- +- flags &= ~LOOKUP_FOLLOW; +- err = vfs_path_lookup(lower_parent_path.dentry, lower_parent_path.mnt, +- (child_dentry->d_name.name), 0, &lower_path); +- if (err && err != -ENOENT) { +- ret = ERR_PTR(err); +- goto out_err; +- } else if (!err) { +- hmdfs_set_lower_path(child_dentry, &lower_path); +- child_inode = fill_inode_local(parent_inode->i_sb, +- d_inode(lower_path.dentry), +- child_dentry->d_name.name); +- +- if (S_ISLNK(d_inode(lower_path.dentry)->i_mode)) +- set_symlink_flag(gdi); +- if (IS_ERR(child_inode)) { +- err = PTR_ERR(child_inode); +- ret = ERR_PTR(err); +- hmdfs_put_reset_lower_path(child_dentry); +- goto out_err; +- } +- ret = d_splice_alias(child_inode, child_dentry); +- if (IS_ERR(ret)) { +- err = PTR_ERR(ret); +- hmdfs_put_reset_lower_path(child_dentry); +- goto out_err; +- } +- +- check_and_fixup_ownership(parent_inode, child_inode); +- goto out_err; +- } +- /* +- * return 0 here, so that vfs can continue the process of making this +- * negative dentry to a positive one while creating a new file. +- */ +- err = 0; +- ret = 0; +- +- lower_dentry = lookup_one_len_unlocked(d_name, lower_parent_path.dentry, +- child_dentry->d_name.len); +- if (IS_ERR(lower_dentry)) { +- err = PTR_ERR(lower_dentry); +- ret = lower_dentry; +- goto out_err; +- } +- lower_path.dentry = lower_dentry; +- lower_path.mnt = mntget(lower_parent_path.mnt); +- hmdfs_set_lower_path(child_dentry, &lower_path); +- +-out_err: +- if (!err) +- hmdfs_set_time(child_dentry, jiffies); +- hmdfs_put_lower_path(&lower_parent_path); +- dput(parent_dentry); +-out: +- trace_hmdfs_lookup_local_end(parent_inode, child_dentry, err); +- return ret; +-} +- +-int hmdfs_mkdir_local_dentry(struct inode *dir, struct dentry *dentry, +- umode_t mode) +-{ +- struct inode *lower_dir = hmdfs_i(dir)->lower_inode; +- struct dentry *lower_dir_dentry = NULL; +- struct super_block *sb = dir->i_sb; +- struct path lower_path; +- struct dentry *lower_dentry = NULL; +- int error = 0; +- struct inode *lower_inode = NULL; +- struct inode *child_inode = NULL; +- bool local_res = false; +- struct cache_fs_override or; +- __u16 child_perm; +- kuid_t tmp_uid; +- +- error = hmdfs_override_dir_id_fs(&or, dir, dentry, &child_perm); +- if (error) +- goto cleanup; +- +- hmdfs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- lower_dir_dentry = lock_parent(lower_dentry); +- +- tmp_uid = hmdfs_override_inode_uid(lower_dir); +- mode = (mode & S_IFMT) | 00771; +- +- error = vfs_mkdir(&nop_mnt_idmap, lower_dir, lower_dentry, mode); +- hmdfs_revert_inode_uid(lower_dir, tmp_uid); +- if (error) { +- hmdfs_err("vfs_mkdir() error:%d", error); +- goto out; +- } +- local_res = true; +- lower_inode = d_inode(lower_dentry); +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- error = hmdfs_persist_perm(lower_dentry, &child_perm); +-#endif +- child_inode = fill_inode_local(sb, lower_inode, dentry->d_name.name); +- if (IS_ERR(child_inode)) { +- error = PTR_ERR(child_inode); +- goto out; +- } +- d_add(dentry, child_inode); +- set_nlink(dir, hmdfs_i(dir)->lower_inode->i_nlink); +-out: +- unlock_dir(lower_dir_dentry); +- if (local_res) +- hmdfs_drop_remote_cache_dents(dentry->d_parent); +- +- if (error) { +- hmdfs_clear_drop_flag(dentry->d_parent); +- d_drop(dentry); +- } +- hmdfs_put_lower_path(&lower_path); +- hmdfs_revert_dir_id_fs(&or); +-cleanup: +- return error; +-} +- +-int hmdfs_mkdir_local(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) +-{ +- int err = 0; +- +- if (check_filename(dentry->d_name.name, dentry->d_name.len)) { +- err = -EINVAL; +- return err; +- } +- +- if (hmdfs_file_type(dentry->d_name.name) != HMDFS_TYPE_COMMON) { +- err = -EACCES; +- return err; +- } +- err = hmdfs_mkdir_local_dentry(dir, dentry, mode); +- trace_hmdfs_mkdir_local(dir, dentry, err); +- return err; +-} +- +-int hmdfs_create_local_dentry(struct inode *dir, struct dentry *dentry, +- umode_t mode, bool want_excl) +-{ +- struct inode *lower_dir = NULL; +- struct dentry *lower_dir_dentry = NULL; +- struct super_block *sb = dir->i_sb; +- struct path lower_path; +- struct dentry *lower_dentry = NULL; +- int error = 0; +- struct inode *lower_inode = NULL; +- struct inode *child_inode = NULL; +- kuid_t tmp_uid; +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- const struct cred *saved_cred = NULL; +- struct fs_struct *saved_fs = NULL, *copied_fs = NULL; +- __u16 child_perm; +-#endif +- +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- saved_cred = hmdfs_override_file_fsids(dir, &child_perm); +- if (!saved_cred) { +- error = -ENOMEM; +- goto path_err; +- } +- +- saved_fs = current->fs; +- copied_fs = hmdfs_override_fsstruct(saved_fs); +- if (!copied_fs) { +- error = -ENOMEM; +- goto revert_fsids; +- } +-#endif +- hmdfs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- mode = (mode & S_IFMT) | 00660; +- lower_dir_dentry = lock_parent(lower_dentry); +- lower_dir = d_inode(lower_dir_dentry); +- tmp_uid = hmdfs_override_inode_uid(lower_dir); +- error = vfs_create(&nop_mnt_idmap, lower_dir, lower_dentry, mode, want_excl); +- hmdfs_revert_inode_uid(lower_dir, tmp_uid); +- unlock_dir(lower_dir_dentry); +- if (error) +- goto out; +- +- lower_inode = d_inode(lower_dentry); +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- error = hmdfs_persist_perm(lower_dentry, &child_perm); +-#endif +- child_inode = fill_inode_local(sb, lower_inode, dentry->d_name.name); +- if (IS_ERR(child_inode)) { +- error = PTR_ERR(child_inode); +- goto out_created; +- } +- d_add(dentry, child_inode); +- +-out_created: +- hmdfs_drop_remote_cache_dents(dentry->d_parent); +-out: +- if (error) { +- hmdfs_clear_drop_flag(dentry->d_parent); +- d_drop(dentry); +- } +- hmdfs_put_lower_path(&lower_path); +- +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- hmdfs_revert_fsstruct(saved_fs, copied_fs); +-revert_fsids: +- hmdfs_revert_fsids(saved_cred); +-#endif +-#ifdef CONFIG_HMDFS_FS_PERMISSION +-path_err: +-#endif +- return error; +-} +- +-int hmdfs_create_local(struct mnt_idmap *idmap, struct inode *dir, struct dentry *child_dentry, +- umode_t mode, bool want_excl) +-{ +- int err = 0; +- +- if (check_filename(child_dentry->d_name.name, +- child_dentry->d_name.len)) { +- err = -EINVAL; +- return err; +- } +- +- if (hmdfs_file_type(child_dentry->d_name.name) != HMDFS_TYPE_COMMON) { +- err = -EACCES; +- return err; +- } +- +- err = hmdfs_create_local_dentry(dir, child_dentry, mode, want_excl); +- trace_hmdfs_create_local(dir, child_dentry, err); +- return err; +-} +- +-int hmdfs_rmdir_local_dentry(struct inode *dir, struct dentry *dentry) +-{ +- struct inode *lower_dir = NULL; +- struct dentry *lower_dir_dentry = NULL; +- kuid_t tmp_uid; +- struct path lower_path; +- struct dentry *lower_dentry = NULL; +- struct dentry *lookup_dentry = NULL; +- int error = 0; +- +- hmdfs_clear_cache_dents(dentry, true); +- hmdfs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- lower_dir_dentry = lock_parent(lower_dentry); +- lower_dir = d_inode(lower_dir_dentry); +- +- lookup_dentry = lookup_one_len(lower_dentry->d_name.name, lower_dir_dentry, +- lower_dentry->d_name.len); +- if (IS_ERR(lookup_dentry)) { +- error = PTR_ERR(lookup_dentry); +- hmdfs_err("lookup_one_len failed, err = %d", error); +- goto lookup_err; +- } +- tmp_uid = hmdfs_override_inode_uid(lower_dir); +- +- error = vfs_rmdir(&nop_mnt_idmap, lower_dir, lookup_dentry); +- hmdfs_revert_inode_uid(lower_dir, tmp_uid); +- dput(lookup_dentry); +-lookup_err: +- unlock_dir(lower_dir_dentry); +- hmdfs_put_lower_path(&lower_path); +- if (error) +- goto path_err; +- hmdfs_drop_remote_cache_dents(dentry->d_parent); +-path_err: +- if (error) +- hmdfs_clear_drop_flag(dentry->d_parent); +- return error; +-} +- +-int hmdfs_rmdir_local(struct inode *dir, struct dentry *dentry) +-{ +- int err = 0; +- +- if (hmdfs_file_type(dentry->d_name.name) != HMDFS_TYPE_COMMON) { +- err = -EACCES; +- goto out; +- } +- +- err = hmdfs_rmdir_local_dentry(dir, dentry); +- if (err != 0) { +- hmdfs_err("rm dir failed:%d", err); +- goto out; +- } +- +- /* drop dentry even remote failed +- * it maybe cause that one remote devices disconnect +- * when doing remote rmdir +- */ +- d_drop(dentry); +-out: +- /* return connect device's errcode */ +- trace_hmdfs_rmdir_local(dir, dentry, err); +- return err; +-} +- +-int hmdfs_unlink_local_dentry(struct inode *dir, struct dentry *dentry) +-{ +- struct dentry *lower_dir_dentry = NULL; +- struct path lower_path; +- struct inode *lower_dir = NULL; +- struct dentry *lower_dentry = NULL; +- struct dentry *lookup_dentry = NULL; +- int error; +- kuid_t tmp_uid; +- +- hmdfs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- dget(lower_dentry); +- lower_dir_dentry = lock_parent(lower_dentry); +- lower_dir = d_inode(lower_dir_dentry); +- lookup_dentry = lookup_one_len(lower_dentry->d_name.name, lower_dir_dentry, +- lower_dentry->d_name.len); +- if (IS_ERR(lookup_dentry)) { +- error = PTR_ERR(lookup_dentry); +- hmdfs_err("lookup_one_len failed, err = %d", error); +- goto lookup_err; +- } +- +- tmp_uid = hmdfs_override_inode_uid(lower_dir); +- error = vfs_unlink(&nop_mnt_idmap, lower_dir, lookup_dentry, NULL); +- hmdfs_revert_inode_uid(lower_dir, tmp_uid); +- set_nlink(d_inode(dentry), +- hmdfs_i(d_inode(dentry))->lower_inode->i_nlink); +- dput(lookup_dentry); +-lookup_err: +- unlock_dir(lower_dir_dentry); +- dput(lower_dentry); +- if (error) +- goto path_err; +- +- hmdfs_drop_remote_cache_dents(dentry->d_parent); +- d_drop(dentry); +- +-path_err: +- hmdfs_put_lower_path(&lower_path); +- if (error) +- hmdfs_clear_drop_flag(dentry->d_parent); +- return error; +-} +- +-int hmdfs_unlink_local(struct inode *dir, struct dentry *dentry) +-{ +- if (hmdfs_file_type(dentry->d_name.name) != HMDFS_TYPE_COMMON) +- return -EACCES; +- +- return hmdfs_unlink_local_dentry(dir, dentry); +-} +- +-int hmdfs_rename_local_dentry(struct inode *old_dir, struct dentry *old_dentry, +- struct inode *new_dir, struct dentry *new_dentry, +- unsigned int flags) +-{ +- struct path lower_old_path; +- struct path lower_new_path; +- struct dentry *lower_old_dentry = NULL; +- struct dentry *lower_new_dentry = NULL; +- struct dentry *lower_old_dir_dentry = NULL; +- struct dentry *lower_new_dir_dentry = NULL; +- struct dentry *trap = NULL; +- struct renamedata rename_data; +- int rc = 0; +- kuid_t old_dir_uid, new_dir_uid; +- +- if (flags) +- return -EINVAL; +- +- hmdfs_get_lower_path(old_dentry, &lower_old_path); +- lower_old_dentry = lower_old_path.dentry; +- if (!lower_old_dentry) { +- hmdfs_err("lower_old_dentry as NULL"); +- rc = -EACCES; +- goto out_put_old_path; +- } +- +- hmdfs_get_lower_path(new_dentry, &lower_new_path); +- lower_new_dentry = lower_new_path.dentry; +- if (!lower_new_dentry) { +- hmdfs_err("lower_new_dentry as NULL"); +- rc = -EACCES; +- goto out_put_new_path; +- } +- +- lower_old_dir_dentry = dget_parent(lower_old_dentry); +- lower_new_dir_dentry = dget_parent(lower_new_dentry); +- trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry); +- new_dir_uid = hmdfs_override_inode_uid(d_inode(lower_new_dir_dentry)); +- old_dir_uid = hmdfs_override_inode_uid(d_inode(lower_old_dir_dentry)); +- +- /* source should not be ancestor of target */ +- if (trap == lower_old_dentry) { +- rc = -EINVAL; +- goto out_lock; +- } +- /* target should not be ancestor of source */ +- if (trap == lower_new_dentry) { +- rc = -ENOTEMPTY; +- goto out_lock; +- } +- +- rename_data.old_mnt_idmap = &nop_mnt_idmap; +- rename_data.old_dir = d_inode(lower_old_dir_dentry); +- rename_data.old_dentry = lower_old_dentry; +- rename_data.new_mnt_idmap = &nop_mnt_idmap; +- rename_data.new_dir = d_inode(lower_new_dir_dentry); +- rename_data.new_dentry = lower_new_dentry; +- rename_data.flags = flags; +- rc = vfs_rename(&rename_data); +- +-out_lock: +- dget(old_dentry); +- +- hmdfs_revert_inode_uid(d_inode(lower_old_dir_dentry), old_dir_uid); +- hmdfs_revert_inode_uid(d_inode(lower_new_dir_dentry), new_dir_uid); +- +- unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry); +- if (rc == 0) { +- hmdfs_drop_remote_cache_dents(old_dentry->d_parent); +- if (old_dentry->d_parent != new_dentry->d_parent) +- hmdfs_drop_remote_cache_dents(new_dentry->d_parent); +- } else { +- hmdfs_clear_drop_flag(old_dentry->d_parent); +- if (old_dentry->d_parent != new_dentry->d_parent) +- hmdfs_clear_drop_flag(old_dentry->d_parent); +- d_drop(new_dentry); +- } +- +- dput(old_dentry); +- dput(lower_old_dir_dentry); +- dput(lower_new_dir_dentry); +- +-out_put_new_path: +- hmdfs_put_lower_path(&lower_new_path); +-out_put_old_path: +- hmdfs_put_lower_path(&lower_old_path); +- return rc; +-} +- +-int hmdfs_rename_local(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, +- struct inode *new_dir, struct dentry *new_dentry, +- unsigned int flags) +-{ +- int err = 0; +- int ret = 0; +- +- trace_hmdfs_rename_local(old_dir, old_dentry, new_dir, new_dentry, +- flags); +- if (hmdfs_file_type(old_dentry->d_name.name) != HMDFS_TYPE_COMMON || +- hmdfs_file_type(new_dentry->d_name.name) != HMDFS_TYPE_COMMON) { +- err = -EACCES; +- goto rename_out; +- } +- +- if (hmdfs_i(old_dir)->inode_type != hmdfs_i(new_dir)->inode_type) { +- hmdfs_err("in different view"); +- err = -EPERM; +- goto rename_out; +- } +- +- if (hmdfs_d(old_dentry)->device_id != hmdfs_d(new_dentry)->device_id) { +- err = -EXDEV; +- goto rename_out; +- } +- +- if (S_ISREG(old_dentry->d_inode->i_mode)) { +- err = hmdfs_rename_local_dentry(old_dir, old_dentry, new_dir, +- new_dentry, flags); +- } else if (S_ISDIR(old_dentry->d_inode->i_mode)) { +- ret = hmdfs_rename_local_dentry(old_dir, old_dentry, new_dir, +- new_dentry, flags); +- if (ret != 0) { +- err = ret; +- goto rename_out; +- } +- } +- +- if (!err) +- d_invalidate(old_dentry); +- +-rename_out: +- return err; +-} +- +-static bool symname_is_allowed(const char *symname) +-{ +- char *p = NULL; +- size_t len; +- +- len = strnlen(symname, PATH_MAX); +- if (len >= PATH_MAX) +- return false; +- +- p = strstr(symname, "/../"); +- if (p) +- return false; +- +- if (len == 2u && strncmp(symname, "..", 2u) == 0) +- return false; +- if (len >= 3u && strncmp(symname, "../", 3u) == 0) +- return false; +- if (len >= 3u && strncmp(symname + len - 3u, "/..", 3u) == 0) +- return false; +- return true; +-} +- +-int hmdfs_symlink_local(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, +- const char *symname) +-{ +- int err; +- struct dentry *lower_dentry = NULL; +- struct dentry *lower_parent_dentry = NULL; +- struct path lower_path; +- struct inode *child_inode = NULL; +- struct inode *lower_dir_inode = hmdfs_i(dir)->lower_inode; +- struct hmdfs_dentry_info *gdi = hmdfs_d(dentry); +- kuid_t tmp_uid; +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- const struct cred *saved_cred = NULL; +- struct fs_struct *saved_fs = NULL, *copied_fs = NULL; +- __u16 child_perm; +-#endif +- +- if (unlikely(!symname_is_allowed(symname))) { +- err = -EPERM; +- goto path_err; +- } +- +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- saved_cred = hmdfs_override_file_fsids(dir, &child_perm); +- if (!saved_cred) { +- err = -ENOMEM; +- goto path_err; +- } +- +- saved_fs = current->fs; +- copied_fs = hmdfs_override_fsstruct(saved_fs); +- if (!copied_fs) { +- err = -ENOMEM; +- goto revert_fsids; +- } +-#endif +- hmdfs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- lower_parent_dentry = lock_parent(lower_dentry); +- tmp_uid = hmdfs_override_inode_uid(lower_dir_inode); +- err = vfs_symlink(&nop_mnt_idmap, lower_dir_inode, lower_dentry, symname); +- hmdfs_revert_inode_uid(lower_dir_inode, tmp_uid); +- unlock_dir(lower_parent_dentry); +- if (err) +- goto out_err; +- set_symlink_flag(gdi); +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- err = hmdfs_persist_perm(lower_dentry, &child_perm); +-#endif +- child_inode = fill_inode_local(dir->i_sb, d_inode(lower_dentry), +- dentry->d_name.name); +- if (IS_ERR(child_inode)) { +- err = PTR_ERR(child_inode); +- goto out_err; +- } +- d_add(dentry, child_inode); +- fsstack_copy_attr_times(dir, lower_dir_inode); +- fsstack_copy_inode_size(dir, lower_dir_inode); +- +-out_err: +- hmdfs_put_lower_path(&lower_path); +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- hmdfs_revert_fsstruct(saved_fs, copied_fs); +-revert_fsids: +- hmdfs_revert_fsids(saved_cred); +-#endif +-path_err: +- return err; +-} +- +-static const char *hmdfs_get_link_local(struct dentry *dentry, +- struct inode *inode, +- struct delayed_call *done) +-{ +- const char *link = NULL; +- struct dentry *lower_dentry = NULL; +- struct inode *lower_inode = NULL; +- struct path lower_path; +- +- if(!dentry) { +- hmdfs_err("dentry MULL"); +- link = ERR_PTR(-ECHILD); +- goto link_out; +- } +- +- hmdfs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- lower_inode = d_inode(lower_dentry); +- if(!lower_inode->i_op || !lower_inode->i_op->get_link) { +- hmdfs_err("The lower inode doesn't support get_link i_op"); +- link = ERR_PTR(-EINVAL); +- goto out; +- } +- +- link = lower_inode->i_op->get_link(lower_dentry, lower_inode, done); +- if(IS_ERR_OR_NULL(link)) +- goto out; +- fsstack_copy_attr_atime(inode, lower_inode); +-out: +- hmdfs_put_lower_path(&lower_path); +-link_out: +- return link; +-} +- +-static int hmdfs_setattr_local(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *ia) +-{ +- struct inode *inode = d_inode(dentry); +- struct inode *lower_inode = hmdfs_i(inode)->lower_inode; +- struct path lower_path; +- struct dentry *lower_dentry = NULL; +- struct iattr lower_ia; +- unsigned int ia_valid = ia->ia_valid; +- int err = 0; +- kuid_t tmp_uid; +- +- hmdfs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- memcpy(&lower_ia, ia, sizeof(lower_ia)); +- if (ia_valid & ATTR_FILE) +- lower_ia.ia_file = hmdfs_f(ia->ia_file)->lower_file; +- lower_ia.ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE); +- if (ia_valid & ATTR_SIZE) { +- err = inode_newsize_ok(inode, ia->ia_size); +- if (err) +- goto out; +- truncate_setsize(inode, ia->ia_size); +- } +- inode_lock(lower_inode); +- tmp_uid = hmdfs_override_inode_uid(lower_inode); +- +- err = notify_change(&nop_mnt_idmap, lower_dentry, &lower_ia, NULL); +- i_size_write(inode, i_size_read(lower_inode)); +- inode->i_atime = lower_inode->i_atime; +- inode->i_mtime = lower_inode->i_mtime; +- inode->__i_ctime = lower_inode->__i_ctime; +- err = update_inode_to_dentry(dentry, inode); +- hmdfs_revert_inode_uid(lower_inode, tmp_uid); +- +- inode_unlock(lower_inode); +-out: +- hmdfs_put_lower_path(&lower_path); +- return err; +-} +- +-static int hmdfs_getattr_local(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, +- u32 request_mask, unsigned int flags) +-{ +- struct path lower_path; +- int ret; +- +- if (path->dentry == NULL || hmdfs_d(path->dentry) == NULL) { +- hmdfs_err("dentry is NULL"); +- return -ENOENT; +- } +- +- hmdfs_get_lower_path(path->dentry, &lower_path); +- ret = vfs_getattr_nosec(&lower_path, stat, request_mask, flags); +- stat->ino = d_inode(path->dentry)->i_ino; +- stat->uid = d_inode(path->dentry)->i_uid; +- stat->gid = d_inode(path->dentry)->i_gid; +- stat->dev = 0; +- stat->rdev = 0; +- hmdfs_put_lower_path(&lower_path); +- +- return ret; +-} +- +-int hmdfs_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) +-{ +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- unsigned int mode = inode->i_mode; +- kuid_t cur_uid = current_fsuid(); +- +- if (uid_eq(cur_uid, ROOT_UID) || uid_eq(cur_uid, SYSTEM_UID)) +- return 0; +- +- if (uid_eq(cur_uid, inode->i_uid)) { +- mode >>= 6; +- } else if (in_group_p(inode->i_gid)) { +- mode >>= 3; +- } +- +- if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) +- return 0; +- +- trace_hmdfs_permission(inode->i_ino); +- return -EACCES; +-#else +- +- return 0; +-#endif +-} +- +-static ssize_t hmdfs_local_listxattr(struct dentry *dentry, char *list, +- size_t size) +-{ +- struct path lower_path; +- ssize_t res = 0; +- size_t r_size = size; +- +- if (!hmdfs_support_xattr(dentry)) +- return -EOPNOTSUPP; +- +- if (size > HMDFS_LISTXATTR_SIZE_MAX) +- r_size = HMDFS_LISTXATTR_SIZE_MAX; +- +- hmdfs_get_lower_path(dentry, &lower_path); +- res = vfs_listxattr(lower_path.dentry, list, r_size); +- hmdfs_put_lower_path(&lower_path); +- +- if (res == -ERANGE && r_size != size) { +- hmdfs_info("no support listxattr size over than %d", +- HMDFS_LISTXATTR_SIZE_MAX); +- res = -E2BIG; +- } +- +- return res; +-} +-struct dentry *hmdfs_lookup_share(struct inode *parent_inode, +- struct dentry *child_dentry, unsigned int flags) +-{ +- const struct qstr *d_name = &child_dentry->d_name; +- int err = 0; +- struct dentry *ret = NULL; +- struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); +- struct path src_path; +- struct inode *child_inode = NULL; +- +- trace_hmdfs_lookup_share(parent_inode, child_dentry, flags); +- if (d_name->len > NAME_MAX) { +- ret = ERR_PTR(-ENAMETOOLONG); +- goto err_out; +- } +- +- err = init_hmdfs_dentry_info(sbi, child_dentry, HMDFS_LAYER_OTHER_LOCAL); +- if (err) { +- ret = ERR_PTR(err); +- goto err_out; +- } +- +- err = get_path_from_share_table(sbi, child_dentry, &src_path); +- if (err) { +- ret = ERR_PTR(err); +- goto err_out; +- } +- +- hmdfs_set_lower_path(child_dentry, &src_path); +- child_inode = fill_inode_local(parent_inode->i_sb, +- d_inode(src_path.dentry), d_name->name); +- +- set_sharefile_flag(hmdfs_d(child_dentry)); +- +- if (IS_ERR(child_inode)) { +- err = PTR_ERR(child_inode); +- ret = ERR_PTR(err); +- hmdfs_put_reset_lower_path(child_dentry); +- goto err_out; +- } +- ret = d_splice_alias(child_inode, child_dentry); +- if (IS_ERR(ret)) { +- err = PTR_ERR(ret); +- hmdfs_put_reset_lower_path(child_dentry); +- goto err_out; +- } +- +- check_and_fixup_ownership(parent_inode, child_inode); +- +-err_out: +- trace_hmdfs_lookup_share_end(parent_inode, child_dentry, err); +- return ret; +-} +- +-const struct inode_operations hmdfs_dir_inode_ops_local = { +- .lookup = hmdfs_lookup_local, +- .mkdir = hmdfs_mkdir_local, +- .create = hmdfs_create_local, +- .rmdir = hmdfs_rmdir_local, +- .unlink = hmdfs_unlink_local, +- .symlink = hmdfs_symlink_local, +- .rename = hmdfs_rename_local, +- .permission = hmdfs_permission, +- .setattr = hmdfs_setattr_local, +- .getattr = hmdfs_getattr_local, +-}; +- +-const struct inode_operations hmdfs_symlink_iops_local = { +- .get_link = hmdfs_get_link_local, +- .permission = hmdfs_permission, +- .setattr = hmdfs_setattr_local, +-}; +- +-const struct inode_operations hmdfs_dir_inode_ops_share = { +- .lookup = hmdfs_lookup_share, +- .permission = hmdfs_permission, +-}; +- +-const struct inode_operations hmdfs_file_iops_local = { +- .setattr = hmdfs_setattr_local, +- .getattr = hmdfs_getattr_local, +- .permission = hmdfs_permission, +- .listxattr = hmdfs_local_listxattr, +-}; +diff --git a/fs/hmdfs/inode_merge.c b/fs/hmdfs/inode_merge.c +deleted file mode 100644 +index 7c1e1e4f8..000000000 +--- a/fs/hmdfs/inode_merge.c ++++ /dev/null +@@ -1,1414 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/inode_merge.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include "hmdfs_merge_view.h" +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include "authority/authentication.h" +-#include "hmdfs_trace.h" +- +-struct kmem_cache *hmdfs_dentry_merge_cachep; +- +-struct dentry *hmdfs_get_fst_lo_d(struct dentry *dentry) +-{ +- struct hmdfs_dentry_info_merge *dim = hmdfs_dm(dentry); +- struct hmdfs_dentry_comrade *comrade = NULL; +- struct dentry *d = NULL; +- +- mutex_lock(&dim->comrade_list_lock); +- comrade = list_first_entry_or_null(&dim->comrade_list, +- struct hmdfs_dentry_comrade, list); +- if (comrade) +- d = dget(comrade->lo_d); +- mutex_unlock(&dim->comrade_list_lock); +- return d; +-} +- +-struct dentry *hmdfs_get_lo_d(struct dentry *dentry, int dev_id) +-{ +- struct hmdfs_dentry_info_merge *dim = hmdfs_dm(dentry); +- struct hmdfs_dentry_comrade *comrade = NULL; +- struct dentry *d = NULL; +- +- mutex_lock(&dim->comrade_list_lock); +- list_for_each_entry(comrade, &dim->comrade_list, list) { +- if (comrade->dev_id == dev_id) { +- d = dget(comrade->lo_d); +- break; +- } +- } +- mutex_unlock(&dim->comrade_list_lock); +- return d; +-} +- +-void update_inode_attr(struct inode *inode, struct dentry *child_dentry) +-{ +- struct inode *li = NULL; +- struct hmdfs_dentry_info_merge *cdi = hmdfs_dm(child_dentry); +- struct hmdfs_dentry_comrade *comrade = NULL; +- struct hmdfs_dentry_comrade *fst_comrade = NULL; +- +- mutex_lock(&cdi->comrade_list_lock); +- fst_comrade = list_first_entry(&cdi->comrade_list, +- struct hmdfs_dentry_comrade, list); +- list_for_each_entry(comrade, &cdi->comrade_list, list) { +- li = d_inode(comrade->lo_d); +- if (!li) +- continue; +- +- if (comrade == fst_comrade) { +- inode->i_atime = li->i_atime; +- inode->__i_ctime = li->__i_ctime; +- inode->i_mtime = li->i_mtime; +- inode->i_size = li->i_size; +- continue; +- } +- +- if (hmdfs_time_compare(&inode->i_mtime, &li->i_mtime) < 0) +- inode->i_mtime = li->i_mtime; +- } +- mutex_unlock(&cdi->comrade_list_lock); +-} +- +-int get_num_comrades(struct dentry *dentry) +-{ +- struct list_head *pos; +- struct hmdfs_dentry_info_merge *dim = hmdfs_dm(dentry); +- int count = 0; +- +- mutex_lock(&dim->comrade_list_lock); +- list_for_each(pos, &dim->comrade_list) +- count++; +- mutex_unlock(&dim->comrade_list_lock); +- return count; +-} +- +-static struct inode *fill_inode_merge(struct super_block *sb, +- struct inode *parent_inode, +- struct dentry *child_dentry, +- struct dentry *lo_d_dentry) +-{ +- int ret = 0; +- struct dentry *fst_lo_d = NULL; +- struct hmdfs_inode_info *info = NULL; +- struct inode *inode = NULL; +- umode_t mode; +- +- if (lo_d_dentry) { +- fst_lo_d = lo_d_dentry; +- dget(fst_lo_d); +- } else { +- fst_lo_d = hmdfs_get_fst_lo_d(child_dentry); +- } +- if (!fst_lo_d) { +- inode = ERR_PTR(-EINVAL); +- goto out; +- } +- if (hmdfs_i(parent_inode)->inode_type == HMDFS_LAYER_ZERO) +- inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_MERGE, NULL, +- NULL); +- else +- inode = hmdfs_iget5_locked_merge(sb, fst_lo_d); +- if (!inode) { +- hmdfs_err("iget5_locked get inode NULL"); +- inode = ERR_PTR(-ENOMEM); +- goto out; +- } +- if (!(inode->i_state & I_NEW)) +- goto out; +- info = hmdfs_i(inode); +- if (hmdfs_i(parent_inode)->inode_type == HMDFS_LAYER_ZERO) +- info->inode_type = HMDFS_LAYER_FIRST_MERGE; +- else +- info->inode_type = HMDFS_LAYER_OTHER_MERGE; +- +- inode->i_uid = KUIDT_INIT((uid_t)1000); +- inode->i_gid = KGIDT_INIT((gid_t)1000); +- +- update_inode_attr(inode, child_dentry); +- mode = d_inode(fst_lo_d)->i_mode; +- +- if (S_ISREG(mode)) { +- inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; +- inode->i_op = &hmdfs_file_iops_merge; +- inode->i_fop = &hmdfs_file_fops_merge; +- set_nlink(inode, 1); +- } else if (S_ISDIR(mode)) { +- inode->i_mode = S_IFDIR | S_IRWXU | S_IRWXG | S_IXOTH; +- inode->i_op = &hmdfs_dir_iops_merge; +- inode->i_fop = &hmdfs_dir_fops_merge; +- set_nlink(inode, get_num_comrades(child_dentry) + 2); +- } else { +- ret = -EIO; +- goto bad_inode; +- } +- +- unlock_new_inode(inode); +-out: +- dput(fst_lo_d); +- return inode; +-bad_inode: +- iget_failed(inode); +- return ERR_PTR(ret); +-} +- +-struct hmdfs_dentry_comrade *alloc_comrade(struct dentry *lo_d, int dev_id) +-{ +- struct hmdfs_dentry_comrade *comrade = NULL; +- +- // 文件只有一个 comrade,考虑 {comrade, list + list lock} +- comrade = kzalloc(sizeof(*comrade), GFP_KERNEL); +- if (unlikely(!comrade)) +- return ERR_PTR(-ENOMEM); +- +- comrade->lo_d = lo_d; +- comrade->dev_id = dev_id; +- dget(lo_d); +- return comrade; +-} +- +-void link_comrade(struct list_head *onstack_comrades_head, +- struct hmdfs_dentry_comrade *comrade) +-{ +- struct hmdfs_dentry_comrade *c = NULL; +- +- list_for_each_entry(c, onstack_comrades_head, list) { +- if (likely(c->dev_id != comrade->dev_id)) +- continue; +- hmdfs_err("Redundant comrade of device %llu", c->dev_id); +- dput(comrade->lo_d); +- kfree(comrade); +- WARN_ON(1); +- return; +- } +- +- if (comrade_is_local(comrade)) +- list_add(&comrade->list, onstack_comrades_head); +- else +- list_add_tail(&comrade->list, onstack_comrades_head); +-} +- +-/** +- * assign_comrades_unlocked - assign a child dentry with comrades +- * +- * We tend to setup a local list of all the comrades we found and place the +- * list onto the dentry_info to achieve atomicity. +- */ +-void assign_comrades_unlocked(struct dentry *child_dentry, +- struct list_head *onstack_comrades_head) +-{ +- struct hmdfs_dentry_info_merge *cdi = hmdfs_dm(child_dentry); +- +- mutex_lock(&cdi->comrade_list_lock); +- WARN_ON(!list_empty(&cdi->comrade_list)); +- list_splice_init(onstack_comrades_head, &cdi->comrade_list); +- mutex_unlock(&cdi->comrade_list_lock); +-} +- +-struct hmdfs_dentry_comrade *lookup_comrade(struct path lower_path, +- const char *d_name, +- int dev_id, +- unsigned int flags) +-{ +- struct path path; +- struct hmdfs_dentry_comrade *comrade = NULL; +- int err; +- +- err = vfs_path_lookup(lower_path.dentry, lower_path.mnt, d_name, flags, +- &path); +- if (err) +- return ERR_PTR(err); +- +- comrade = alloc_comrade(path.dentry, dev_id); +- path_put(&path); +- return comrade; +-} +- +-/** +- * conf_name_trans_nop - do nothing but copy +- * +- * WARNING: always check before translation +- */ +-static char *conf_name_trans_nop(struct dentry *d) +-{ +- return kstrndup(d->d_name.name, d->d_name.len, GFP_KERNEL); +-} +- +-/** +- * conf_name_trans_dir - conflicted name translation for directory +- * +- * WARNING: always check before translation +- */ +-static char *conf_name_trans_dir(struct dentry *d) +-{ +- int len = d->d_name.len - strlen(CONFLICTING_DIR_SUFFIX); +- +- return kstrndup(d->d_name.name, len, GFP_KERNEL); +-} +- +-/** +- * conf_name_trans_reg - conflicted name translation for regular file +- * +- * WARNING: always check before translation +- */ +-static char *conf_name_trans_reg(struct dentry *d, int *dev_id) +-{ +- int dot_pos, start_cpy_pos, num_len, i; +- int len = d->d_name.len; +- char *name = kstrndup(d->d_name.name, d->d_name.len, GFP_KERNEL); +- +- if (unlikely(!name)) +- return NULL; +- +- // find the last dot if possible +- for (dot_pos = len - 1; dot_pos >= 0; dot_pos--) { +- if (name[dot_pos] == '.') +- break; +- } +- if (dot_pos == -1) +- dot_pos = len; +- +- // retrieve the conf sn (i.e. dev_id) +- num_len = 0; +- for (i = dot_pos - 1; i >= 0; i--) { +- if (name[i] >= '0' && name[i] <= '9') +- num_len++; +- else +- break; +- } +- +- *dev_id = 0; +- for (i = 0; i < num_len; i++) +- *dev_id = *dev_id * 10 + name[dot_pos - num_len + i] - '0'; +- +- // move the file suffix( '\0' included) right after the file name +- start_cpy_pos = +- dot_pos - num_len - strlen(CONFLICTING_FILE_CONST_SUFFIX); +- memmove(name + start_cpy_pos, name + dot_pos, len - dot_pos + 1); +- return name; +-} +- +-int check_filename(const char *name, int len) +-{ +- int cmp_res = 0; +- +- if (len >= strlen(CONFLICTING_DIR_SUFFIX)) { +- cmp_res = strncmp(name + len - strlen(CONFLICTING_DIR_SUFFIX), +- CONFLICTING_DIR_SUFFIX, +- strlen(CONFLICTING_DIR_SUFFIX)); +- if (cmp_res == 0) +- return DT_DIR; +- } +- +- if (len >= strlen(CONFLICTING_FILE_CONST_SUFFIX)) { +- int dot_pos, start_cmp_pos, num_len, i; +- +- for (dot_pos = len - 1; dot_pos >= 0; dot_pos--) { +- if (name[dot_pos] == '.') +- break; +- } +- if (dot_pos == -1) +- dot_pos = len; +- +- num_len = 0; +- for (i = dot_pos - 1; i >= 0; i--) { +- if (name[i] >= '0' && name[i] <= '9') +- num_len++; +- else +- break; +- } +- +- start_cmp_pos = dot_pos - num_len - +- strlen(CONFLICTING_FILE_CONST_SUFFIX); +- cmp_res = strncmp(name + start_cmp_pos, +- CONFLICTING_FILE_CONST_SUFFIX, +- strlen(CONFLICTING_FILE_CONST_SUFFIX)); +- if (cmp_res == 0) +- return DT_REG; +- } +- +- return 0; +-} +- +-static struct hmdfs_dentry_comrade *merge_lookup_comrade( +- struct hmdfs_sb_info *sbi, const char *name, int devid, +- unsigned int flags) +-{ +- int err; +- struct path root, path; +- struct hmdfs_dentry_comrade *comrade = NULL; +- const struct cred *old_cred = hmdfs_override_creds(sbi->cred); +- +- err = kern_path(sbi->real_dst, LOOKUP_DIRECTORY, &root); +- if (err) { +- comrade = ERR_PTR(err); +- goto out; +- } +- +- err = vfs_path_lookup(root.dentry, root.mnt, name, flags, &path); +- if (err) { +- comrade = ERR_PTR(err); +- goto root_put; +- } +- +- comrade = alloc_comrade(path.dentry, devid); +- +- path_put(&path); +-root_put: +- path_put(&root); +-out: +- hmdfs_revert_creds(old_cred); +- return comrade; +-} +- +-bool is_valid_comrade(struct hmdfs_dentry_info_merge *mdi, umode_t mode) +-{ +- if (mdi->type == DT_UNKNOWN) { +- mdi->type = S_ISDIR(mode) ? DT_DIR : DT_REG; +- return true; +- } +- +- if (mdi->type == DT_DIR && S_ISDIR(mode)) { +- return true; +- } +- +- if (mdi->type == DT_REG && list_empty(&mdi->comrade_list) && +- !S_ISDIR(mode)) { +- return true; +- } +- +- return false; +-} +- +-static void merge_lookup_work_func(struct work_struct *work) +-{ +- struct merge_lookup_work *ml_work; +- struct hmdfs_dentry_comrade *comrade; +- struct hmdfs_dentry_info_merge *mdi; +- int found = false; +- +- ml_work = container_of(work, struct merge_lookup_work, work); +- mdi = container_of(ml_work->wait_queue, struct hmdfs_dentry_info_merge, +- wait_queue); +- +- trace_hmdfs_merge_lookup_work_enter(ml_work); +- +- comrade = merge_lookup_comrade(ml_work->sbi, ml_work->name, +- ml_work->devid, ml_work->flags); +- if (IS_ERR(comrade)) { +- mutex_lock(&mdi->work_lock); +- goto out; +- } +- +- mutex_lock(&mdi->work_lock); +- mutex_lock(&mdi->comrade_list_lock); +- if (!is_valid_comrade(mdi, hmdfs_cm(comrade))) { +- destroy_comrade(comrade); +- } else { +- found = true; +- link_comrade(&mdi->comrade_list, comrade); +- } +- mutex_unlock(&mdi->comrade_list_lock); +- +-out: +- if (--mdi->work_count == 0 || found) +- wake_up_all(ml_work->wait_queue); +- mutex_unlock(&mdi->work_lock); +- +- trace_hmdfs_merge_lookup_work_exit(ml_work, found); +- kfree(ml_work->name); +- kfree(ml_work); +-} +- +-int merge_lookup_async(struct hmdfs_dentry_info_merge *mdi, +- struct hmdfs_sb_info *sbi, int devid, const char *name, +- unsigned int flags) +-{ +- int err = -ENOMEM; +- struct merge_lookup_work *ml_work; +- +- ml_work = kmalloc(sizeof(*ml_work), GFP_KERNEL); +- if (!ml_work) +- goto out; +- +- ml_work->name = kstrdup(name, GFP_KERNEL); +- if (!ml_work->name) { +- kfree(ml_work); +- goto out; +- } +- +- ml_work->devid = devid; +- ml_work->flags = flags; +- ml_work->sbi = sbi; +- ml_work->wait_queue = &mdi->wait_queue; +- INIT_WORK(&ml_work->work, merge_lookup_work_func); +- +- schedule_work(&ml_work->work); +- ++mdi->work_count; +- err = 0; +-out: +- return err; +-} +- +-char *hmdfs_get_real_dname(struct dentry *dentry, int *devid, int *type) +-{ +- char *rname; +- +- *type = check_filename(dentry->d_name.name, dentry->d_name.len); +- if (*type == DT_REG) +- rname = conf_name_trans_reg(dentry, devid); +- else if (*type == DT_DIR) +- rname = conf_name_trans_dir(dentry); +- else +- rname = conf_name_trans_nop(dentry); +- +- return rname; +-} +- +-static int lookup_merge_normal(struct dentry *dentry, unsigned int flags) +-{ +- int ret = -ENOMEM; +- int err = 0; +- int devid = -1; +- struct dentry *pdentry = dget_parent(dentry); +- struct hmdfs_dentry_info_merge *mdi = hmdfs_dm(dentry); +- struct hmdfs_sb_info *sbi = hmdfs_sb(dentry->d_sb); +- struct hmdfs_peer *peer; +- char *rname, *ppath, *cpath; +- +- rname = hmdfs_get_real_dname(dentry, &devid, &mdi->type); +- if (unlikely(!rname)) { +- goto out; +- } +- +- ppath = hmdfs_merge_get_dentry_relative_path(pdentry); +- if (unlikely(!ppath)) { +- hmdfs_err("failed to get parent relative path"); +- goto out_rname; +- } +- +- cpath = kzalloc(PATH_MAX, GFP_KERNEL); +- if (unlikely(!cpath)) { +- hmdfs_err("failed to get child device_view path"); +- goto out_ppath; +- } +- +- mutex_lock(&mdi->work_lock); +- mutex_lock(&sbi->connections.node_lock); +- if (mdi->type != DT_REG || devid == 0) { +- snprintf(cpath, PATH_MAX, "device_view/local%s/%s", ppath, +- rname); +- err = merge_lookup_async(mdi, sbi, 0, cpath, flags); +- if (err) +- hmdfs_err("failed to create local lookup work"); +- } +- +- list_for_each_entry(peer, &sbi->connections.node_list, list) { +- if (mdi->type == DT_REG && peer->device_id != devid) +- continue; +- snprintf(cpath, PATH_MAX, "device_view/%s%s/%s", peer->cid, +- ppath, rname); +- err = merge_lookup_async(mdi, sbi, peer->device_id, cpath, +- flags); +- if (err) +- hmdfs_err("failed to create remote lookup work"); +- } +- mutex_unlock(&sbi->connections.node_lock); +- mutex_unlock(&mdi->work_lock); +- +- wait_event(mdi->wait_queue, is_merge_lookup_end(mdi)); +- +- ret = -ENOENT; +- if (!is_comrade_list_empty(mdi)) +- ret = 0; +- +- kfree(cpath); +-out_ppath: +- kfree(ppath); +-out_rname: +- kfree(rname); +-out: +- dput(pdentry); +- return ret; +-} +- +-/** +- * do_lookup_merge_root - lookup the root of the merge view(root/merge_view) +- * +- * It's common for a network filesystem to incur various of faults, so we +- * intent to show mercy for faults here, except faults reported by the local. +- */ +-static int do_lookup_merge_root(struct path path_dev, +- struct dentry *child_dentry, unsigned int flags) +-{ +- struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); +- struct hmdfs_dentry_comrade *comrade; +- const int buf_len = +- max((int)HMDFS_CID_SIZE + 1, (int)sizeof(DEVICE_VIEW_LOCAL)); +- char *buf = kzalloc(buf_len, GFP_KERNEL); +- struct hmdfs_peer *peer; +- LIST_HEAD(head); +- int ret; +- +- if (!buf) +- return -ENOMEM; +- +- // lookup real_dst/device_view/local +- memcpy(buf, DEVICE_VIEW_LOCAL, sizeof(DEVICE_VIEW_LOCAL)); +- comrade = lookup_comrade(path_dev, buf, HMDFS_DEVID_LOCAL, flags); +- if (IS_ERR(comrade)) { +- ret = PTR_ERR(comrade); +- goto out; +- } +- link_comrade(&head, comrade); +- +- // lookup real_dst/device_view/cidxx +- mutex_lock(&sbi->connections.node_lock); +- list_for_each_entry(peer, &sbi->connections.node_list, list) { +- mutex_unlock(&sbi->connections.node_lock); +- memcpy(buf, peer->cid, HMDFS_CID_SIZE); +- comrade = lookup_comrade(path_dev, buf, peer->device_id, flags); +- if (IS_ERR(comrade)) +- continue; +- +- link_comrade(&head, comrade); +- mutex_lock(&sbi->connections.node_lock); +- } +- mutex_unlock(&sbi->connections.node_lock); +- +- assign_comrades_unlocked(child_dentry, &head); +- ret = 0; +- +-out: +- kfree(buf); +- return ret; +-} +- +-// mkdir -p +-void lock_root_inode_shared(struct inode *root, bool *locked, bool *down) +-{ +- struct rw_semaphore *sem = &root->i_rwsem; +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) +-#define RWSEM_READER_OWNED (1UL << 0) +-#define RWSEM_RD_NONSPINNABLE (1UL << 1) +-#define RWSEM_WR_NONSPINNABLE (1UL << 2) +-#define RWSEM_NONSPINNABLE (RWSEM_RD_NONSPINNABLE | RWSEM_WR_NONSPINNABLE) +-#define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE) +- struct task_struct *sem_owner = +- (struct task_struct *)(atomic_long_read(&sem->owner) & +- ~RWSEM_OWNER_FLAGS_MASK); +-#else +- struct task_struct *sem_owner = sem->owner; +-#endif +- +- *locked = false; +- *down = false; +- +- if (sem_owner != current) +- return; +- +- // It's us that takes the wsem +- if (!inode_trylock_shared(root)) { +- downgrade_write(sem); +- *down = true; +- } +- *locked = true; +-} +- +-void restore_root_inode_sem(struct inode *root, bool locked, bool down) +-{ +- if (!locked) +- return; +- +- inode_unlock_shared(root); +- if (down) +- inode_lock(root); +-} +- +-static int lookup_merge_root(struct inode *root_inode, +- struct dentry *child_dentry, unsigned int flags) +-{ +- struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); +- struct path path_dev; +- int ret = -ENOENT; +- int buf_len; +- char *buf = NULL; +- bool locked, down; +- +- // consider additional one slash and one '\0' +- buf_len = strlen(sbi->real_dst) + 1 + sizeof(DEVICE_VIEW_ROOT); +- if (buf_len > PATH_MAX) +- return -ENAMETOOLONG; +- +- buf = kmalloc(buf_len, GFP_KERNEL); +- if (unlikely(!buf)) +- return -ENOMEM; +- +- sprintf(buf, "%s/%s", sbi->real_dst, DEVICE_VIEW_ROOT); +- lock_root_inode_shared(root_inode, &locked, &down); +- ret = hmdfs_get_path_in_sb(child_dentry->d_sb, buf, LOOKUP_DIRECTORY, +- &path_dev); +- if (ret) +- goto free_buf; +- +- ret = do_lookup_merge_root(path_dev, child_dentry, flags); +- path_put(&path_dev); +- +-free_buf: +- kfree(buf); +- restore_root_inode_sem(root_inode, locked, down); +- return ret; +-} +- +-int init_hmdfs_dentry_info_merge(struct hmdfs_sb_info *sbi, +- struct dentry *dentry) +-{ +- struct hmdfs_dentry_info_merge *mdi = NULL; +- +- mdi = kmem_cache_zalloc(hmdfs_dentry_merge_cachep, GFP_NOFS); +- if (!mdi) +- return -ENOMEM; +- +- mdi->ctime = jiffies; +- mdi->type = DT_UNKNOWN; +- mdi->work_count = 0; +- mutex_init(&mdi->work_lock); +- init_waitqueue_head(&mdi->wait_queue); +- INIT_LIST_HEAD(&mdi->comrade_list); +- mutex_init(&mdi->comrade_list_lock); +- +- d_set_d_op(dentry, &hmdfs_dops_merge); +- dentry->d_fsdata = mdi; +- return 0; +-} +- +-// do this in a map-reduce manner +-struct dentry *hmdfs_lookup_merge(struct inode *parent_inode, +- struct dentry *child_dentry, +- unsigned int flags) +-{ +- bool create = flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET); +- struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); +- struct hmdfs_inode_info *pii = hmdfs_i(parent_inode); +- struct inode *child_inode = NULL; +- struct dentry *ret_dentry = NULL; +- int err = 0; +- +- /* +- * Internal flags like LOOKUP_CREATE should not pass to device view. +- * LOOKUP_REVAL is needed because dentry cache in hmdfs might be stale +- * after rename in lower fs. LOOKUP_DIRECTORY is not needed because +- * merge_view can do the judgement that whether result is directory or +- * not. +- */ +- flags = flags & LOOKUP_REVAL; +- +- child_dentry->d_fsdata = NULL; +- +- if (child_dentry->d_name.len > NAME_MAX) { +- err = -ENAMETOOLONG; +- goto out; +- } +- +- err = init_hmdfs_dentry_info_merge(sbi, child_dentry); +- if (unlikely(err)) +- goto out; +- +- if (pii->inode_type == HMDFS_LAYER_ZERO) { +- hmdfs_dm(child_dentry)->dentry_type = HMDFS_LAYER_FIRST_MERGE; +- err = lookup_merge_root(parent_inode, child_dentry, flags); +- } else { +- hmdfs_dm(child_dentry)->dentry_type = HMDFS_LAYER_OTHER_MERGE; +- err = lookup_merge_normal(child_dentry, flags); +- } +- +- if (!err) { +- struct hmdfs_inode_info *info = NULL; +- +- child_inode = fill_inode_merge(parent_inode->i_sb, parent_inode, +- child_dentry, NULL); +- if (IS_ERR(child_inode)) { +- err = PTR_ERR(child_inode); +- goto out; +- } +- info = hmdfs_i(child_inode); +- if (info->inode_type == HMDFS_LAYER_FIRST_MERGE) +- hmdfs_root_inode_perm_init(child_inode); +- else +- check_and_fixup_ownership_remote(parent_inode, +- child_inode, +- child_dentry); +- +- ret_dentry = d_splice_alias(child_inode, child_dentry); +- if (IS_ERR(ret_dentry)) { +- clear_comrades(child_dentry); +- err = PTR_ERR(ret_dentry); +- goto out; +- } +- if (ret_dentry) +- child_dentry = ret_dentry; +- +- goto out; +- } +- +- if ((err == -ENOENT) && create) +- err = 0; +- +-out: +- return err ? ERR_PTR(err) : ret_dentry; +-} +- +-int hmdfs_getattr_merge(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, +- u32 request_mask, unsigned int flags) +-{ +- int ret; +- struct path lower_path = { +- .dentry = hmdfs_get_fst_lo_d(path->dentry), +- .mnt = path->mnt, +- }; +- +- if (unlikely(!lower_path.dentry)) { +- hmdfs_err("Fatal! No comrades"); +- ret = -EINVAL; +- goto out; +- } +- +- ret = vfs_getattr_nosec(&lower_path, stat, request_mask, flags); +-out: +- dput(lower_path.dentry); +- return ret; +-} +- +-int hmdfs_setattr_merge(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *ia) +-{ +- struct inode *inode = d_inode(dentry); +- struct dentry *lower_dentry = hmdfs_get_fst_lo_d(dentry); +- struct inode *lower_inode = NULL; +- struct iattr lower_ia; +- unsigned int ia_valid = ia->ia_valid; +- int err = 0; +- kuid_t tmp_uid; +- +- if (!lower_dentry) { +- WARN_ON(1); +- err = -EINVAL; +- goto out; +- } +- +- lower_inode = d_inode(lower_dentry); +- memcpy(&lower_ia, ia, sizeof(lower_ia)); +- if (ia_valid & ATTR_FILE) +- lower_ia.ia_file = hmdfs_f(ia->ia_file)->lower_file; +- lower_ia.ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE); +- +- inode_lock(lower_inode); +- tmp_uid = hmdfs_override_inode_uid(lower_inode); +- +- err = notify_change(&nop_mnt_idmap, lower_dentry, &lower_ia, NULL); +- i_size_write(inode, i_size_read(lower_inode)); +- inode->i_atime = lower_inode->i_atime; +- inode->i_mtime = lower_inode->i_mtime; +- inode->__i_ctime = lower_inode->__i_ctime; +- hmdfs_revert_inode_uid(lower_inode, tmp_uid); +- +- inode_unlock(lower_inode); +- +-out: +- dput(lower_dentry); +- return err; +-} +- +-const struct inode_operations hmdfs_file_iops_merge = { +- .getattr = hmdfs_getattr_merge, +- .setattr = hmdfs_setattr_merge, +- .permission = hmdfs_permission, +-}; +- +-int do_mkdir_merge(struct inode *parent_inode, struct dentry *child_dentry, +- umode_t mode, struct inode *lo_i_parent, +- struct dentry *lo_d_child) +-{ +- int ret = 0; +- struct super_block *sb = parent_inode->i_sb; +- struct inode *child_inode = NULL; +- +- ret = vfs_mkdir(&nop_mnt_idmap, lo_i_parent, lo_d_child, mode); +- if (ret) +- goto out; +- +- child_inode = +- fill_inode_merge(sb, parent_inode, child_dentry, lo_d_child); +- if (IS_ERR(child_inode)) { +- ret = PTR_ERR(child_inode); +- goto out; +- } +- check_and_fixup_ownership_remote(parent_inode, child_inode, +- child_dentry); +- +- d_add(child_dentry, child_inode); +- /* nlink should be increased with the joining of children */ +- set_nlink(parent_inode, 2); +-out: +- return ret; +-} +- +-int do_create_merge(struct inode *parent_inode, struct dentry *child_dentry, +- umode_t mode, bool want_excl, struct inode *lo_i_parent, +- struct dentry *lo_d_child) +-{ +- int ret = 0; +- struct super_block *sb = parent_inode->i_sb; +- struct inode *child_inode = NULL; +- +- ret = vfs_create(&nop_mnt_idmap, lo_i_parent, lo_d_child, mode, want_excl); +- if (ret) +- goto out; +- +- child_inode = +- fill_inode_merge(sb, parent_inode, child_dentry, lo_d_child); +- if (IS_ERR(child_inode)) { +- ret = PTR_ERR(child_inode); +- goto out; +- } +- check_and_fixup_ownership_remote(parent_inode, child_inode, +- child_dentry); +- +- d_add(child_dentry, child_inode); +- /* nlink should be increased with the joining of children */ +- set_nlink(parent_inode, 2); +-out: +- return ret; +-} +- +-int hmdfs_do_ops_merge(struct inode *i_parent, struct dentry *d_child, +- struct dentry *lo_d_child, struct path path, +- struct hmdfs_recursive_para *rec_op_para) +-{ +- int ret = 0; +- +- if (rec_op_para->is_last) { +- switch (rec_op_para->opcode) { +- case F_MKDIR_MERGE: +- ret = do_mkdir_merge(i_parent, d_child, +- rec_op_para->mode, +- d_inode(path.dentry), lo_d_child); +- break; +- case F_CREATE_MERGE: +- ret = do_create_merge(i_parent, d_child, +- rec_op_para->mode, +- rec_op_para->want_excl, +- d_inode(path.dentry), lo_d_child); +- break; +- default: +- ret = -EINVAL; +- break; +- } +- } else { +- ret = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), lo_d_child, +- rec_op_para->mode); +- } +- if (ret) +- hmdfs_err("vfs_ops failed, ops %d, err = %d", +- rec_op_para->opcode, ret); +- return ret; +-} +- +-int hmdfs_create_lower_dentry(struct inode *i_parent, struct dentry *d_child, +- struct dentry *lo_d_parent, bool is_dir, +- struct hmdfs_recursive_para *rec_op_para) +-{ +- struct hmdfs_sb_info *sbi = i_parent->i_sb->s_fs_info; +- struct hmdfs_dentry_comrade *new_comrade = NULL; +- struct dentry *lo_d_child = NULL; +- char *path_buf = kmalloc(PATH_MAX, GFP_KERNEL); +- char *absolute_path_buf = kmalloc(PATH_MAX, GFP_KERNEL); +- char *path_name = NULL; +- struct path path = { .mnt = NULL, .dentry = NULL }; +- int ret = 0; +- +- if (unlikely(!path_buf || !absolute_path_buf)) { +- ret = -ENOMEM; +- goto out; +- } +- +- path_name = dentry_path_raw(lo_d_parent, path_buf, PATH_MAX); +- if (IS_ERR(path_name)) { +- ret = PTR_ERR(path_name); +- goto out; +- } +- if ((strlen(sbi->real_dst) + strlen(path_name) + +- strlen(d_child->d_name.name) + 2) > PATH_MAX) { +- ret = -ENAMETOOLONG; +- goto out; +- } +- +- sprintf(absolute_path_buf, "%s%s/%s", sbi->real_dst, path_name, +- d_child->d_name.name); +- +- if (is_dir) +- lo_d_child = kern_path_create(AT_FDCWD, absolute_path_buf, +- &path, LOOKUP_DIRECTORY); +- else +- lo_d_child = kern_path_create(AT_FDCWD, absolute_path_buf, +- &path, 0); +- if (IS_ERR(lo_d_child)) { +- ret = PTR_ERR(lo_d_child); +- goto out; +- } +- // to ensure link_comrade after vfs_mkdir succeed +- ret = hmdfs_do_ops_merge(i_parent, d_child, lo_d_child, path, +- rec_op_para); +- if (ret) +- goto out_put; +- new_comrade = alloc_comrade(lo_d_child, HMDFS_DEVID_LOCAL); +- if (IS_ERR(new_comrade)) { +- ret = PTR_ERR(new_comrade); +- goto out_put; +- } else { +- link_comrade_unlocked(d_child, new_comrade); +- } +- +- update_inode_attr(d_inode(d_child), d_child); +- +-out_put: +- done_path_create(&path, lo_d_child); +-out: +- kfree(absolute_path_buf); +- kfree(path_buf); +- return ret; +-} +- +-static int create_lo_d_parent_recur(struct dentry *d_parent, +- struct dentry *d_child, umode_t mode, +- struct hmdfs_recursive_para *rec_op_para) +-{ +- struct dentry *lo_d_parent, *d_pparent; +- struct hmdfs_dentry_info_merge *pmdi = NULL; +- int ret = 0; +- +- pmdi = hmdfs_dm(d_parent); +- wait_event(pmdi->wait_queue, !has_merge_lookup_work(pmdi)); +- lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); +- if (!lo_d_parent) { +- d_pparent = dget_parent(d_parent); +- ret = create_lo_d_parent_recur(d_pparent, d_parent, +- d_inode(d_parent)->i_mode, +- rec_op_para); +- dput(d_pparent); +- if (ret) +- goto out; +- lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); +- if (!lo_d_parent) { +- ret = -ENOENT; +- goto out; +- } +- } +- rec_op_para->is_last = false; +- rec_op_para->mode = mode; +- ret = hmdfs_create_lower_dentry(d_inode(d_parent), d_child, lo_d_parent, +- true, rec_op_para); +-out: +- dput(lo_d_parent); +- return ret; +-} +- +-int create_lo_d_child(struct inode *i_parent, struct dentry *d_child, +- bool is_dir, struct hmdfs_recursive_para *rec_op_para) +-{ +- struct dentry *d_pparent, *lo_d_parent, *lo_d_child; +- struct dentry *d_parent = dget_parent(d_child); +- struct hmdfs_dentry_info_merge *pmdi = hmdfs_dm(d_parent); +- int ret = 0; +- mode_t d_child_mode = rec_op_para->mode; +- +- wait_event(pmdi->wait_queue, !has_merge_lookup_work(pmdi)); +- +- lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); +- if (!lo_d_parent) { +- d_pparent = dget_parent(d_parent); +- ret = create_lo_d_parent_recur(d_pparent, d_parent, +- d_inode(d_parent)->i_mode, +- rec_op_para); +- dput(d_pparent); +- if (unlikely(ret)) { +- lo_d_child = ERR_PTR(ret); +- goto out; +- } +- lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); +- if (!lo_d_parent) { +- lo_d_child = ERR_PTR(-ENOENT); +- goto out; +- } +- } +- rec_op_para->is_last = true; +- rec_op_para->mode = d_child_mode; +- ret = hmdfs_create_lower_dentry(i_parent, d_child, lo_d_parent, is_dir, +- rec_op_para); +- +-out: +- dput(d_parent); +- dput(lo_d_parent); +- return ret; +-} +- +-void hmdfs_init_recursive_para(struct hmdfs_recursive_para *rec_op_para, +- int opcode, mode_t mode, bool want_excl, +- const char *name) +-{ +- rec_op_para->is_last = true; +- rec_op_para->opcode = opcode; +- rec_op_para->mode = mode; +- rec_op_para->want_excl = want_excl; +- rec_op_para->name = name; +-} +- +-int hmdfs_mkdir_merge(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) +-{ +- int ret = 0; +- struct hmdfs_recursive_para *rec_op_para = NULL; +- +- // confict_name & file_type is checked by hmdfs_mkdir_local +- if (hmdfs_file_type(dentry->d_name.name) != HMDFS_TYPE_COMMON) { +- ret = -EACCES; +- goto out; +- } +- rec_op_para = kmalloc(sizeof(*rec_op_para), GFP_KERNEL); +- if (!rec_op_para) { +- ret = -ENOMEM; +- goto out; +- } +- +- hmdfs_init_recursive_para(rec_op_para, F_MKDIR_MERGE, mode, false, +- NULL); +- ret = create_lo_d_child(dir, dentry, true, rec_op_para); +-out: +- hmdfs_trace_merge(trace_hmdfs_mkdir_merge, dir, dentry, ret); +- if (ret) +- d_drop(dentry); +- kfree(rec_op_para); +- return ret; +-} +- +-int hmdfs_create_merge(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, +- bool want_excl) +-{ +- struct hmdfs_recursive_para *rec_op_para = NULL; +- int ret = 0; +- +- rec_op_para = kmalloc(sizeof(*rec_op_para), GFP_KERNEL); +- if (!rec_op_para) { +- ret = -ENOMEM; +- goto out; +- } +- hmdfs_init_recursive_para(rec_op_para, F_CREATE_MERGE, mode, want_excl, +- NULL); +- // confict_name & file_type is checked by hmdfs_create_local +- ret = create_lo_d_child(dir, dentry, false, rec_op_para); +-out: +- hmdfs_trace_merge(trace_hmdfs_create_merge, dir, dentry, ret); +- if (ret) +- d_drop(dentry); +- kfree(rec_op_para); +- return ret; +-} +- +-int do_rmdir_merge(struct inode *dir, struct dentry *dentry) +-{ +- int ret = 0; +- struct hmdfs_dentry_info_merge *dim = hmdfs_dm(dentry); +- struct hmdfs_dentry_comrade *comrade = NULL; +- struct dentry *lo_d = NULL; +- struct dentry *lo_d_dir = NULL; +- struct inode *lo_i_dir = NULL; +- +- wait_event(dim->wait_queue, !has_merge_lookup_work(dim)); +- +- mutex_lock(&dim->comrade_list_lock); +- list_for_each_entry(comrade, &(dim->comrade_list), list) { +- lo_d = comrade->lo_d; +- lo_d_dir = lock_parent(lo_d); +- lo_i_dir = d_inode(lo_d_dir); +- ret = vfs_rmdir(&nop_mnt_idmap, lo_i_dir, lo_d); +- unlock_dir(lo_d_dir); +- if (ret) +- break; +- } +- mutex_unlock(&dim->comrade_list_lock); +- hmdfs_trace_merge(trace_hmdfs_rmdir_merge, dir, dentry, ret); +- return ret; +-} +- +-int hmdfs_rmdir_merge(struct inode *dir, struct dentry *dentry) +-{ +- int ret = 0; +- +- if (hmdfs_file_type(dentry->d_name.name) != HMDFS_TYPE_COMMON) { +- ret = -EACCES; +- goto out; +- } +- +- ret = do_rmdir_merge(dir, dentry); +- if (ret) { +- hmdfs_err("rm dir failed:%d", ret); +- goto out; +- } +- +- hmdfs_update_meta(dir); +- d_drop(dentry); +-out: +- hmdfs_trace_merge(trace_hmdfs_rmdir_merge, dir, dentry, ret); +- return ret; +-} +- +-int do_unlink_merge(struct inode *dir, struct dentry *dentry) +-{ +- int ret = 0; +- struct hmdfs_dentry_info_merge *dim = hmdfs_dm(dentry); +- struct hmdfs_dentry_comrade *comrade = NULL; +- struct dentry *lo_d = NULL; +- struct dentry *lo_d_dir = NULL; +- struct dentry *lo_d_lookup = NULL; +- struct inode *lo_i_dir = NULL; +- +- wait_event(dim->wait_queue, !has_merge_lookup_work(dim)); +- +- mutex_lock(&dim->comrade_list_lock); +- list_for_each_entry(comrade, &(dim->comrade_list), list) { +- lo_d = comrade->lo_d; +- dget(lo_d); +- lo_d_dir = lock_parent(lo_d); +- /* lo_d could be unhashed, need to lookup again here */ +- lo_d_lookup = lookup_one_len(lo_d->d_name.name, lo_d_dir, +- strlen(lo_d->d_name.name)); +- if (IS_ERR(lo_d_lookup)) { +- ret = PTR_ERR(lo_d_lookup); +- hmdfs_err("lookup_one_len failed, err = %d", ret); +- unlock_dir(lo_d_dir); +- dput(lo_d); +- break; +- } +- lo_i_dir = d_inode(lo_d_dir); +- ret = vfs_unlink(&nop_mnt_idmap, lo_i_dir, lo_d_lookup, NULL); +- dput(lo_d_lookup); +- unlock_dir(lo_d_dir); +- dput(lo_d); +- if (ret) +- break; +- } +- mutex_unlock(&dim->comrade_list_lock); +- +- return ret; +-} +- +-int hmdfs_unlink_merge(struct inode *dir, struct dentry *dentry) +-{ +- int ret = 0; +- +- if (hmdfs_file_type(dentry->d_name.name) != HMDFS_TYPE_COMMON) { +- ret = -EACCES; +- goto out; +- } +- +- ret = do_unlink_merge(dir, dentry); +- if (ret) { +- hmdfs_err("unlink failed:%d", ret); +- goto out; +- } else { +- hmdfs_update_meta(dir); +- } +- +- d_drop(dentry); +-out: +- return ret; +-} +- +-int do_rename_merge(struct inode *old_dir, struct dentry *old_dentry, +- struct inode *new_dir, struct dentry *new_dentry, +- unsigned int flags) +-{ +- int ret = 0; +- struct hmdfs_sb_info *sbi = (old_dir->i_sb)->s_fs_info; +- struct hmdfs_dentry_info_merge *dim = hmdfs_dm(old_dentry); +- struct hmdfs_dentry_comrade *comrade = NULL, *new_comrade = NULL; +- struct path lo_p_new = { .mnt = NULL, .dentry = NULL }; +- struct inode *lo_i_old_dir = NULL, *lo_i_new_dir = NULL; +- struct dentry *lo_d_old_dir = NULL, *lo_d_old = NULL, +- *lo_d_new_dir = NULL, *lo_d_new = NULL; +- struct dentry *d_new_dir = NULL; +- char *path_buf = kmalloc(PATH_MAX, GFP_KERNEL); +- char *abs_path_buf = kmalloc(PATH_MAX, GFP_KERNEL); +- char *path_name = NULL; +- struct hmdfs_dentry_info_merge *pmdi = NULL; +- struct renamedata rename_data; +- +- if (flags & ~RENAME_NOREPLACE) { +- ret = -EINVAL; +- goto out; +- } +- +- if (unlikely(!path_buf || !abs_path_buf)) { +- ret = -ENOMEM; +- goto out; +- } +- +- wait_event(dim->wait_queue, !has_merge_lookup_work(dim)); +- +- list_for_each_entry(comrade, &dim->comrade_list, list) { +- lo_d_old = comrade->lo_d; +- d_new_dir = d_find_alias(new_dir); +- pmdi = hmdfs_dm(d_new_dir); +- wait_event(pmdi->wait_queue, !has_merge_lookup_work(pmdi)); +- lo_d_new_dir = hmdfs_get_lo_d(d_new_dir, comrade->dev_id); +- dput(d_new_dir); +- +- if (!lo_d_new_dir) +- continue; +- path_name = dentry_path_raw(lo_d_new_dir, path_buf, PATH_MAX); +- dput(lo_d_new_dir); +- if (IS_ERR(path_name)) { +- ret = PTR_ERR(path_name); +- continue; +- } +- +- if (strlen(sbi->real_dst) + strlen(path_name) + +- strlen(new_dentry->d_name.name) + 2 > PATH_MAX) { +- ret = -ENAMETOOLONG; +- goto out; +- } +- +- snprintf(abs_path_buf, PATH_MAX, "%s%s/%s", sbi->real_dst, +- path_name, new_dentry->d_name.name); +- if (S_ISDIR(d_inode(old_dentry)->i_mode)) +- lo_d_new = kern_path_create(AT_FDCWD, abs_path_buf, +- &lo_p_new, +- LOOKUP_DIRECTORY); +- else +- lo_d_new = kern_path_create(AT_FDCWD, abs_path_buf, +- &lo_p_new, 0); +- if (IS_ERR(lo_d_new)) { +- ret = PTR_ERR(lo_d_new); +- goto out; +- } +- +- lo_d_new_dir = dget_parent(lo_d_new); +- lo_i_new_dir = d_inode(lo_d_new_dir); +- lo_d_old_dir = dget_parent(lo_d_old); +- lo_i_old_dir = d_inode(lo_d_old_dir); +- +- rename_data.old_mnt_idmap = &nop_mnt_idmap; +- rename_data.old_dir = lo_i_old_dir; +- rename_data.old_dentry = lo_d_old; +- rename_data.new_mnt_idmap = &nop_mnt_idmap; +- rename_data.new_dir = lo_i_new_dir; +- rename_data.new_dentry = lo_d_new; +- rename_data.flags = flags; +- ret = vfs_rename(&rename_data); +- +- new_comrade = alloc_comrade(lo_p_new.dentry, comrade->dev_id); +- if (IS_ERR(new_comrade)) { +- ret = PTR_ERR(new_comrade); +- goto no_comrade; +- } +- +- link_comrade_unlocked(new_dentry, new_comrade); +-no_comrade: +- done_path_create(&lo_p_new, lo_d_new); +- dput(lo_d_old_dir); +- dput(lo_d_new_dir); +- } +-out: +- kfree(abs_path_buf); +- kfree(path_buf); +- return ret; +-} +- +-int hmdfs_rename_merge(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, +- struct inode *new_dir, struct dentry *new_dentry, +- unsigned int flags) +-{ +- char *old_dir_buf = NULL; +- char *new_dir_buf = NULL; +- char *old_dir_path = NULL; +- char *new_dir_path = NULL; +- struct dentry *old_dir_dentry = NULL; +- struct dentry *new_dir_dentry = NULL; +- int ret = 0; +- +- if (hmdfs_file_type(old_dentry->d_name.name) != HMDFS_TYPE_COMMON || +- hmdfs_file_type(new_dentry->d_name.name) != HMDFS_TYPE_COMMON) { +- ret = -EACCES; +- goto rename_out; +- } +- +- if (hmdfs_i(old_dir)->inode_type != hmdfs_i(new_dir)->inode_type) { +- hmdfs_err("in different view"); +- ret = -EPERM; +- goto rename_out; +- } +- +- old_dir_buf = kmalloc(PATH_MAX, GFP_KERNEL); +- new_dir_buf = kmalloc(PATH_MAX, GFP_KERNEL); +- if (!old_dir_buf || !new_dir_buf) { +- ret = -ENOMEM; +- goto rename_out; +- } +- +- new_dir_dentry = d_find_alias(new_dir); +- if (!new_dir_dentry) { +- ret = -EINVAL; +- goto rename_out; +- } +- +- old_dir_dentry = d_find_alias(old_dir); +- if (!old_dir_dentry) { +- ret = -EINVAL; +- dput(new_dir_dentry); +- goto rename_out; +- } +- +- old_dir_path = dentry_path_raw(old_dir_dentry, old_dir_buf, PATH_MAX); +- new_dir_path = dentry_path_raw(new_dir_dentry, new_dir_buf, PATH_MAX); +- dput(new_dir_dentry); +- dput(old_dir_dentry); +- if (strcmp(old_dir_path, new_dir_path)) { +- ret = -EPERM; +- goto rename_out; +- } +- +- trace_hmdfs_rename_merge(old_dir, old_dentry, new_dir, new_dentry, +- flags); +- ret = do_rename_merge(old_dir, old_dentry, new_dir, new_dentry, flags); +- +- if (ret != 0) +- d_drop(new_dentry); +- +- if (S_ISREG(old_dentry->d_inode->i_mode) && !ret) +- d_invalidate(old_dentry); +- +-rename_out: +- kfree(old_dir_buf); +- kfree(new_dir_buf); +- return ret; +-} +- +-const struct inode_operations hmdfs_dir_iops_merge = { +- .lookup = hmdfs_lookup_merge, +- .mkdir = hmdfs_mkdir_merge, +- .create = hmdfs_create_merge, +- .rmdir = hmdfs_rmdir_merge, +- .unlink = hmdfs_unlink_merge, +- .rename = hmdfs_rename_merge, +- .permission = hmdfs_permission, +-}; +diff --git a/fs/hmdfs/inode_remote.c b/fs/hmdfs/inode_remote.c +deleted file mode 100644 +index 6d1a54b3c..000000000 +--- a/fs/hmdfs/inode_remote.c ++++ /dev/null +@@ -1,996 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/inode_remote.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +- +-#include "comm/socket_adapter.h" +-#include "hmdfs.h" +-#include "hmdfs_client.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_share.h" +-#include "hmdfs_trace.h" +-#include "authority/authentication.h" +-#include "stash.h" +- +-struct hmdfs_lookup_ret *lookup_remote_dentry(struct dentry *child_dentry, +- const struct qstr *qstr, +- uint64_t dev_id) +-{ +- struct hmdfs_lookup_ret *lookup_ret; +- struct hmdfs_dentry *dentry = NULL; +- struct clearcache_item *cache_item = NULL; +- struct hmdfs_dcache_lookup_ctx ctx; +- struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); +- +- cache_item = hmdfs_find_cache_item(dev_id, child_dentry->d_parent); +- if (!cache_item) +- return NULL; +- +- lookup_ret = kmalloc(sizeof(*lookup_ret), GFP_KERNEL); +- if (!lookup_ret) +- goto out; +- +- hmdfs_init_dcache_lookup_ctx(&ctx, sbi, qstr, cache_item->filp); +- dentry = hmdfs_find_dentry(child_dentry, &ctx); +- if (!dentry) { +- kfree(lookup_ret); +- lookup_ret = NULL; +- goto out; +- } +- +- lookup_ret->i_mode = le16_to_cpu(dentry->i_mode); +- lookup_ret->i_size = le64_to_cpu(dentry->i_size); +- lookup_ret->i_mtime = le64_to_cpu(dentry->i_mtime); +- lookup_ret->i_mtime_nsec = le32_to_cpu(dentry->i_mtime_nsec); +- lookup_ret->i_ino = le64_to_cpu(dentry->i_ino); +- +- hmdfs_unlock_file(ctx.filp, get_dentry_group_pos(ctx.bidx), +- DENTRYGROUP_SIZE); +- kfree(ctx.page); +-out: +- kref_put(&cache_item->ref, release_cache_item); +- return lookup_ret; +-} +- +-/* get_remote_inode_info - fill hmdfs_lookup_ret by info from remote getattr +- * +- * @dentry: local dentry +- * @hmdfs_peer: which remote devcie +- * @flags: lookup flags +- * +- * return allocaed and initialized hmdfs_lookup_ret on success, and NULL on +- * failure. +- */ +-struct hmdfs_lookup_ret *get_remote_inode_info(struct hmdfs_peer *con, +- struct dentry *dentry, +- unsigned int flags) +-{ +- int err = 0; +- struct hmdfs_lookup_ret *lookup_ret = NULL; +- struct hmdfs_getattr_ret *getattr_ret = NULL; +- unsigned int expected_flags = 0; +- +- lookup_ret = kmalloc(sizeof(*lookup_ret), GFP_KERNEL); +- if (!lookup_ret) +- return NULL; +- +- err = hmdfs_remote_getattr(con, dentry, flags, &getattr_ret); +- if (err) { +- hmdfs_debug("inode info get failed with err %d", err); +- kfree(lookup_ret); +- return NULL; +- } +- /* make sure we got everything we need */ +- expected_flags = STATX_INO | STATX_SIZE | STATX_MODE | STATX_MTIME; +- if ((getattr_ret->stat.result_mask & expected_flags) != +- expected_flags) { +- hmdfs_debug("remote getattr failed with flag %x", +- getattr_ret->stat.result_mask); +- kfree(lookup_ret); +- kfree(getattr_ret); +- return NULL; +- } +- +- lookup_ret->i_mode = getattr_ret->stat.mode; +- lookup_ret->i_size = getattr_ret->stat.size; +- lookup_ret->i_mtime = getattr_ret->stat.mtime.tv_sec; +- lookup_ret->i_mtime_nsec = getattr_ret->stat.mtime.tv_nsec; +- lookup_ret->i_ino = getattr_ret->stat.ino; +- kfree(getattr_ret); +- return lookup_ret; +-} +- +-static void hmdfs_remote_readdir_work(struct work_struct *work) +-{ +- struct hmdfs_readdir_work *rw = +- container_of(to_delayed_work(work), struct hmdfs_readdir_work, +- dwork); +- struct dentry *dentry = rw->dentry; +- struct hmdfs_peer *con = rw->con; +- const struct cred *old_cred = hmdfs_override_creds(con->sbi->cred); +- bool empty = false; +- +- get_remote_dentry_file(dentry, con); +- hmdfs_d(dentry)->async_readdir_in_progress = 0; +- hmdfs_revert_creds(old_cred); +- +- spin_lock(&con->sbi->async_readdir_work_lock); +- list_del(&rw->head); +- empty = list_empty(&con->sbi->async_readdir_work_list); +- spin_unlock(&con->sbi->async_readdir_work_lock); +- +- dput(dentry); +- peer_put(con); +- kfree(rw); +- +- if (empty) +- wake_up_interruptible(&con->sbi->async_readdir_wq); +-} +- +-static void get_remote_dentry_file_in_wq(struct dentry *dentry, +- struct hmdfs_peer *con) +-{ +- struct hmdfs_readdir_work *rw = NULL; +- +- /* do nothing if async readdir is already in progress */ +- if (cmpxchg_relaxed(&hmdfs_d(dentry)->async_readdir_in_progress, 0, +- 1)) +- return; +- +- rw = kmalloc(sizeof(*rw), GFP_KERNEL); +- if (!rw) { +- hmdfs_d(dentry)->async_readdir_in_progress = 0; +- return; +- } +- +- dget(dentry); +- peer_get(con); +- rw->dentry = dentry; +- rw->con = con; +- spin_lock(&con->sbi->async_readdir_work_lock); +- INIT_DELAYED_WORK(&rw->dwork, hmdfs_remote_readdir_work); +- list_add(&rw->head, &con->sbi->async_readdir_work_list); +- spin_unlock(&con->sbi->async_readdir_work_lock); +- queue_delayed_work(con->dentry_wq, &rw->dwork, 0); +-} +- +-void get_remote_dentry_file_sync(struct dentry *dentry, struct hmdfs_peer *con) +-{ +- get_remote_dentry_file_in_wq(dentry, con); +- flush_workqueue(con->dentry_wq); +-} +- +-struct hmdfs_lookup_ret *hmdfs_lookup_by_con(struct hmdfs_peer *con, +- struct dentry *dentry, +- struct qstr *qstr, +- unsigned int flags, +- const char *relative_path) +-{ +- struct hmdfs_lookup_ret *result = NULL; +- +- /* +- * LOOKUP_REVAL means we found stale info from dentry file, thus +- * we need to use remote getattr. +- */ +- if (flags & LOOKUP_REVAL) { +- /* +- * HMDFS_LOOKUP_REVAL means we need to skip dentry cache +- * in lookup, because dentry cache in server might have +- * stale data. +- */ +- result = get_remote_inode_info(con, dentry, +- HMDFS_LOOKUP_REVAL); +- get_remote_dentry_file_in_wq(dentry->d_parent, con); +- return result; +- } +- +- /* If cache file is still valid */ +- if (hmdfs_cache_revalidate(READ_ONCE(con->conn_time), +- con->device_id, dentry->d_parent)) { +- result = lookup_remote_dentry(dentry, qstr, +- con->device_id); +- /* +- * If lookup from cache file failed, use getattr to see +- * if remote have created the file. +- */ +- if (!(flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) && +- !result) +- result = get_remote_inode_info(con, dentry, 0); +- /* If cache file expired, use getattr directly +- * except create and rename opt +- */ +- } else { +- result = get_remote_inode_info(con, dentry, 0); +- get_remote_dentry_file_in_wq(dentry->d_parent, con); +- } +- +- return result; +-} +- +-/* +- * hmdfs_update_inode_size - update inode size when finding aready existed +- * inode. +- * +- * First of all, if the file is opened for writing, we don't update inode size +- * here, because inode size is about to be changed after writing. +- * +- * If the file is not opened, simply update getattr_isize(not actual inode size, +- * just a value showed to user). This is safe because inode size will be +- * up-to-date after open. +- * +- * If the file is opened for read: +- * a. getattr_isize == HMDFS_STALE_REMOTE_ISIZE +- * 1) i_size == new_size, nothing need to be done. +- * 2) i_size > new_size, we keep the i_size and set getattr_isize to new_size, +- * stale data might be readed in this case, which is fine because file is +- * opened before remote truncate the file. +- * 3) i_size < new_size, we drop the last page of the file if i_size is not +- * aligned to PAGE_SIZE, clear getattr_isize, and update i_size to +- * new_size. +- * b. getattr_isize != HMDFS_STALE_REMOTE_ISIZE, getattr_isize will only be set +- * after 2). +- * 4) getattr_isize > i_size, this situation is impossible. +- * 5) i_size >= new_size, this case is the same as 2). +- * 6) i_size < new_size, this case is the same as 3). +- */ +-static void hmdfs_update_inode_size(struct inode *inode, uint64_t new_size) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- int writecount; +- uint64_t size; +- +- inode_lock(inode); +- size = info->getattr_isize; +- if (size == HMDFS_STALE_REMOTE_ISIZE) +- size = i_size_read(inode); +- if (size == new_size) { +- inode_unlock(inode); +- return; +- } +- +- writecount = atomic_read(&inode->i_writecount); +- /* check if writing is in progress */ +- if (writecount > 0) { +- info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; +- inode_unlock(inode); +- return; +- } +- +- /* check if there is no one who opens the file */ +- if (kref_read(&info->ref) == 0) +- goto update_info; +- +- /* check if there is someone who opens the file for read */ +- if (writecount == 0) { +- uint64_t aligned_size; +- +- /* use inode size here instead of getattr_isize */ +- size = i_size_read(inode); +- if (new_size <= size) +- goto update_info; +- /* +- * if the old inode size is not aligned to HMDFS_PAGE_SIZE, we +- * need to drop the last page of the inode, otherwise zero will +- * be returned while reading the new range in the page after +- * chaning inode size. +- */ +- aligned_size = round_down(size, HMDFS_PAGE_SIZE); +- if (aligned_size != size) +- truncate_inode_pages(inode->i_mapping, aligned_size); +- i_size_write(inode, new_size); +- info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; +- inode_unlock(inode); +- return; +- } +- +-update_info: +- info->getattr_isize = new_size; +- inode_unlock(inode); +-} +- +-static void hmdfs_update_inode(struct inode *inode, +- struct hmdfs_lookup_ret *lookup_result) +-{ +- struct hmdfs_time_t remote_mtime = { +- .tv_sec = lookup_result->i_mtime, +- .tv_nsec = lookup_result->i_mtime_nsec, +- }; +- +- /* +- * We only update mtime if the file is not opened for writing. If we do +- * update it before writing is about to start, user might see the mtime +- * up-and-down if system time in server and client do not match. However +- * mtime in client will eventually match server after timeout without +- * writing. +- */ +- if (!inode_is_open_for_write(inode)) +- inode->i_mtime = remote_mtime; +- +- /* +- * We don't care i_size of dir, and lock inode for dir +- * might cause deadlock. +- */ +- if (S_ISREG(inode->i_mode)) +- hmdfs_update_inode_size(inode, lookup_result->i_size); +-} +- +-static void hmdfs_fill_inode_remote(struct inode *inode, struct inode *dir, +- umode_t mode) +-{ +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- inode->i_uid = dir->i_uid; +- inode->i_gid = dir->i_gid; +-#endif +-} +- +-struct inode *fill_inode_remote(struct super_block *sb, struct hmdfs_peer *con, +- struct hmdfs_lookup_ret *res, struct inode *dir) +-{ +- int ret = 0; +- struct inode *inode = NULL; +- struct hmdfs_inode_info *info; +- umode_t mode = res->i_mode; +- +- inode = hmdfs_iget5_locked_remote(sb, con, res->i_ino); +- if (!inode) +- return ERR_PTR(-ENOMEM); +- +- info = hmdfs_i(inode); +- info->inode_type = HMDFS_LAYER_OTHER_REMOTE; +- +- /* the inode was found in cache */ +- if (!(inode->i_state & I_NEW)) { +- hmdfs_fill_inode_remote(inode, dir, mode); +- hmdfs_update_inode(inode, res); +- return inode; +- } +- +- hmdfs_remote_init_stash_status(con, inode, mode); +- +- inode->__i_ctime.tv_sec = 0; +- inode->__i_ctime.tv_nsec = 0; +- inode->i_mtime.tv_sec = res->i_mtime; +- inode->i_mtime.tv_nsec = res->i_mtime_nsec; +- +- inode->i_uid = KUIDT_INIT((uid_t)1000); +- inode->i_gid = KGIDT_INIT((gid_t)1000); +- +- if (S_ISDIR(mode)) +- inode->i_mode = S_IFDIR | S_IRWXU | S_IRWXG | S_IXOTH; +- else if (S_ISREG(mode)) +- inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; +- else if (S_ISLNK(mode)) +- inode->i_mode = S_IFREG | S_IRWXU | S_IRWXG; +- else { +- ret = -EIO; +- goto bad_inode; +- } +- +- if (S_ISREG(mode) || S_ISLNK(mode)) { +- inode->i_op = &hmdfs_dev_file_iops_remote; +- inode->i_fop = &hmdfs_dev_file_fops_remote; +- inode->i_size = res->i_size; +- set_nlink(inode, 1); +- } else if (S_ISDIR(mode)) { +- inode->i_op = &hmdfs_dev_dir_inode_ops_remote; +- inode->i_fop = &hmdfs_dev_dir_ops_remote; +- set_nlink(inode, 2); +- } else { +- ret = -EIO; +- goto bad_inode; +- } +- +- inode->i_mapping->a_ops = &hmdfs_dev_file_aops_remote; +- +- hmdfs_fill_inode_remote(inode, dir, mode); +- unlock_new_inode(inode); +- return inode; +-bad_inode: +- iget_failed(inode); +- return ERR_PTR(ret); +-} +- +-static struct dentry *hmdfs_lookup_remote_dentry(struct inode *parent_inode, +- struct dentry *child_dentry, +- int flags) +-{ +- struct dentry *ret = NULL; +- struct inode *inode = NULL; +- struct super_block *sb = parent_inode->i_sb; +- struct hmdfs_sb_info *sbi = sb->s_fs_info; +- struct hmdfs_lookup_ret *lookup_result = NULL; +- struct hmdfs_peer *con = NULL; +- char *file_name = NULL; +- int file_name_len = child_dentry->d_name.len; +- struct qstr qstr; +- struct hmdfs_dentry_info *gdi = hmdfs_d(child_dentry); +- uint64_t device_id = 0; +- char *relative_path = NULL; +- +- file_name = kzalloc(NAME_MAX + 1, GFP_KERNEL); +- if (!file_name) +- return ERR_PTR(-ENOMEM); +- strncpy(file_name, child_dentry->d_name.name, file_name_len); +- +- qstr.name = file_name; +- qstr.len = strlen(file_name); +- +- device_id = gdi->device_id; +- con = hmdfs_lookup_from_devid(sbi, device_id); +- if (!con) { +- ret = ERR_PTR(-ESHUTDOWN); +- goto done; +- } +- +- relative_path = hmdfs_get_dentry_relative_path(child_dentry->d_parent); +- if (unlikely(!relative_path)) { +- ret = ERR_PTR(-ENOMEM); +- hmdfs_err("get relative path failed %d", -ENOMEM); +- goto done; +- } +- +- lookup_result = hmdfs_lookup_by_con(con, child_dentry, &qstr, flags, +- relative_path); +- if (lookup_result != NULL) { +- if (S_ISLNK(lookup_result->i_mode)) +- gdi->file_type = HM_SYMLINK; +- else if (in_share_dir(child_dentry)) +- gdi->file_type = HM_SHARE; +- inode = fill_inode_remote(sb, con, lookup_result, parent_inode); +- check_and_fixup_ownership_remote(parent_inode, +- inode, +- child_dentry); +- ret = d_splice_alias(inode, child_dentry); +- if (!IS_ERR_OR_NULL(ret)) +- child_dentry = ret; +- } else { +- ret = ERR_PTR(-ENOENT); +- } +- +-done: +- if (con) +- peer_put(con); +- kfree(relative_path); +- kfree(lookup_result); +- kfree(file_name); +- return ret; +-} +- +-struct dentry *hmdfs_lookup_remote(struct inode *parent_inode, +- struct dentry *child_dentry, +- unsigned int flags) +-{ +- int err = 0; +- struct dentry *ret = NULL; +- struct hmdfs_dentry_info *gdi = NULL; +- struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); +- +- trace_hmdfs_lookup_remote(parent_inode, child_dentry, flags); +- if (child_dentry->d_name.len > NAME_MAX) { +- err = -ENAMETOOLONG; +- ret = ERR_PTR(-ENAMETOOLONG); +- goto out; +- } +- +- err = init_hmdfs_dentry_info(sbi, child_dentry, +- HMDFS_LAYER_OTHER_REMOTE); +- if (err) { +- ret = ERR_PTR(err); +- goto out; +- } +- gdi = hmdfs_d(child_dentry); +- gdi->device_id = hmdfs_d(child_dentry->d_parent)->device_id; +- +- if (is_current_hmdfs_server_ctx()) +- goto out; +- +- ret = hmdfs_lookup_remote_dentry(parent_inode, child_dentry, flags); +- /* +- * don't return error if inode do not exist, so that vfs can continue +- * to create it. +- */ +- if (IS_ERR_OR_NULL(ret)) { +- err = PTR_ERR(ret); +- if (err == -ENOENT) +- ret = NULL; +- } else { +- child_dentry = ret; +- } +- +-out: +- if (!err) +- hmdfs_set_time(child_dentry, jiffies); +- trace_hmdfs_lookup_remote_end(parent_inode, child_dentry, err); +- return ret; +-} +- +-/* delete dentry in cache file */ +-void delete_in_cache_file(uint64_t dev_id, struct dentry *dentry) +-{ +- struct clearcache_item *item = NULL; +- +- item = hmdfs_find_cache_item(dev_id, dentry->d_parent); +- if (item) { +- hmdfs_delete_dentry(dentry, item->filp); +- kref_put(&item->ref, release_cache_item); +- } else { +- hmdfs_info("find cache item failed, con:%llu", dev_id); +- } +-} +- +-int hmdfs_mkdir_remote_dentry(struct hmdfs_peer *conn, struct dentry *dentry, +- umode_t mode) +-{ +- int err = 0; +- char *dir_path = NULL; +- struct dentry *parent_dentry = dentry->d_parent; +- struct inode *parent_inode = d_inode(parent_dentry); +- struct super_block *sb = parent_inode->i_sb; +- const unsigned char *d_name = dentry->d_name.name; +- struct hmdfs_lookup_ret *mkdir_ret = NULL; +- struct inode *inode = NULL; +- +- mkdir_ret = kmalloc(sizeof(*mkdir_ret), GFP_KERNEL); +- if (!mkdir_ret) { +- err = -ENOMEM; +- return err; +- } +- dir_path = hmdfs_get_dentry_relative_path(parent_dentry); +- if (!dir_path) { +- err = -EACCES; +- goto mkdir_out; +- } +- err = hmdfs_client_start_mkdir(conn, dir_path, d_name, mode, mkdir_ret); +- if (err) { +- hmdfs_err("hmdfs_client_start_mkdir failed err = %d", err); +- goto mkdir_out; +- } +- if (mkdir_ret) { +- inode = fill_inode_remote(sb, conn, mkdir_ret, parent_inode); +- check_and_fixup_ownership_remote(parent_inode, +- inode, +- dentry); +- if (!IS_ERR(inode)) +- d_add(dentry, inode); +- else +- err = PTR_ERR(inode); +- } else { +- err = -ENOENT; +- } +- +-mkdir_out: +- kfree(dir_path); +- kfree(mkdir_ret); +- return err; +-} +- +-int hmdfs_mkdir_remote(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) +-{ +- int err = 0; +- struct hmdfs_inode_info *info = hmdfs_i(dir); +- struct hmdfs_peer *con = info->conn; +- +- if (!con) { +- hmdfs_warning("qpb_debug: con is null!"); +- goto out; +- } +- +- err = hmdfs_mkdir_remote_dentry(con, dentry, mode); +- if (!err) +- create_in_cache_file(con->device_id, dentry); +- else +- hmdfs_err("remote mkdir failed err = %d", err); +- +-out: +- trace_hmdfs_mkdir_remote(dir, dentry, err); +- return err; +-} +- +-int hmdfs_create_remote_dentry(struct hmdfs_peer *conn, struct dentry *dentry, +- umode_t mode, bool want_excl) +-{ +- int err = 0; +- char *dir_path = NULL; +- struct dentry *parent_dentry = dentry->d_parent; +- struct inode *parent_inode = d_inode(parent_dentry); +- struct super_block *sb = parent_inode->i_sb; +- const unsigned char *d_name = dentry->d_name.name; +- struct hmdfs_lookup_ret *create_ret = NULL; +- struct inode *inode = NULL; +- +- create_ret = kmalloc(sizeof(*create_ret), GFP_KERNEL); +- if (!create_ret) { +- err = -ENOMEM; +- return err; +- } +- dir_path = hmdfs_get_dentry_relative_path(parent_dentry); +- if (!dir_path) { +- err = -EACCES; +- goto create_out; +- } +- err = hmdfs_client_start_create(conn, dir_path, d_name, mode, +- want_excl, create_ret); +- if (err) { +- hmdfs_err("hmdfs_client_start_create failed err = %d", err); +- goto create_out; +- } +- if (create_ret) { +- inode = fill_inode_remote(sb, conn, create_ret, parent_inode); +- check_and_fixup_ownership_remote(parent_inode, +- inode, +- dentry); +- if (!IS_ERR(inode)) +- d_add(dentry, inode); +- else +- err = PTR_ERR(inode); +- } else { +- err = -ENOENT; +- hmdfs_err("get remote inode info failed err = %d", err); +- } +- +-create_out: +- kfree(dir_path); +- kfree(create_ret); +- return err; +-} +- +-int hmdfs_create_remote(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, +- bool want_excl) +-{ +- int err = 0; +- struct hmdfs_inode_info *info = hmdfs_i(dir); +- struct hmdfs_peer *con = info->conn; +- +- if (!con) { +- hmdfs_warning("qpb_debug: con is null!"); +- goto out; +- } +- +- err = hmdfs_create_remote_dentry(con, dentry, mode, want_excl); +- if (!err) +- create_in_cache_file(con->device_id, dentry); +- else +- hmdfs_err("remote create failed err = %d", err); +- +-out: +- trace_hmdfs_create_remote(dir, dentry, err); +- return err; +-} +- +-int hmdfs_rmdir_remote_dentry(struct hmdfs_peer *conn, struct dentry *dentry) +-{ +- int error = 0; +- char *dir_path = NULL; +- const char *dentry_name = dentry->d_name.name; +- +- dir_path = hmdfs_get_dentry_relative_path(dentry->d_parent); +- if (!dir_path) { +- error = -EACCES; +- goto rmdir_out; +- } +- +- error = hmdfs_client_start_rmdir(conn, dir_path, dentry_name); +- if (!error) +- delete_in_cache_file(conn->device_id, dentry); +- +-rmdir_out: +- kfree(dir_path); +- return error; +-} +- +-int hmdfs_rmdir_remote(struct inode *dir, struct dentry *dentry) +-{ +- int err = 0; +- struct hmdfs_inode_info *info = hmdfs_i(dentry->d_inode); +- struct hmdfs_peer *con = info->conn; +- +- if (!con) +- goto out; +- +- if (hmdfs_file_type(dentry->d_name.name) != HMDFS_TYPE_COMMON) { +- err = -EACCES; +- goto out; +- } +- +- err = hmdfs_rmdir_remote_dentry(con, dentry); +- /* drop dentry even remote failed +- * it maybe cause that one remote devices disconnect +- * when doing remote rmdir +- */ +- d_drop(dentry); +-out: +- /* return connect device's errcode */ +- trace_hmdfs_rmdir_remote(dir, dentry, err); +- return err; +-} +- +-int hmdfs_dev_unlink_from_con(struct hmdfs_peer *conn, struct dentry *dentry) +-{ +- int error = 0; +- char *dir_path = NULL; +- const char *dentry_name = dentry->d_name.name; +- +- dir_path = hmdfs_get_dentry_relative_path(dentry->d_parent); +- if (!dir_path) { +- error = -EACCES; +- goto unlink_out; +- } +- error = hmdfs_client_start_unlink(conn, dir_path, dentry_name); +- if (!error) { +- delete_in_cache_file(conn->device_id, dentry); +- drop_nlink(d_inode(dentry)); +- d_drop(dentry); +- } +-unlink_out: +- kfree(dir_path); +- return error; +-} +- +-int hmdfs_unlink_remote(struct inode *dir, struct dentry *dentry) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(dentry->d_inode); +- struct hmdfs_peer *conn = info->conn; +- +- if (hmdfs_file_type(dentry->d_name.name) != HMDFS_TYPE_COMMON) +- return -EACCES; +- +- if (!conn) +- return 0; +- +- if (conn->status != NODE_STAT_ONLINE) +- return 0; +- +- return hmdfs_dev_unlink_from_con(conn, dentry); +-} +- +-/* rename dentry in cache file */ +-static void rename_in_cache_file(uint64_t dev_id, struct dentry *old_dentry, +- struct dentry *new_dentry) +-{ +- struct clearcache_item *old_item = NULL; +- struct clearcache_item *new_item = NULL; +- +- old_item = hmdfs_find_cache_item(dev_id, old_dentry->d_parent); +- new_item = hmdfs_find_cache_item(dev_id, new_dentry->d_parent); +- if (old_item != NULL && new_item != NULL) { +- hmdfs_rename_dentry(old_dentry, new_dentry, old_item->filp, +- new_item->filp); +- } else if (old_item != NULL) { +- hmdfs_err("new cache item find failed!"); +- } else if (new_item != NULL) { +- hmdfs_err("old cache item find failed!"); +- } else { +- hmdfs_err("both cache item find failed!"); +- } +- +- if (old_item) +- kref_put(&old_item->ref, release_cache_item); +- if (new_item) +- kref_put(&new_item->ref, release_cache_item); +-} +- +-int hmdfs_rename_remote(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, +- struct inode *new_dir, struct dentry *new_dentry, +- unsigned int flags) +-{ +- int err = 0; +- int ret = 0; +- const char *old_dentry_d_name = old_dentry->d_name.name; +- char *relative_old_dir_path = 0; +- const char *new_dentry_d_name = new_dentry->d_name.name; +- char *relative_new_dir_path = 0; +- struct hmdfs_inode_info *info = hmdfs_i(old_dentry->d_inode); +- struct hmdfs_peer *con = info->conn; +- +- trace_hmdfs_rename_remote(old_dir, old_dentry, new_dir, new_dentry, +- flags); +- +- if (flags & ~RENAME_NOREPLACE) +- return -EINVAL; +- +- if (hmdfs_file_type(old_dentry->d_name.name) != HMDFS_TYPE_COMMON || +- hmdfs_file_type(new_dentry->d_name.name) != HMDFS_TYPE_COMMON) { +- return -EACCES; +- } +- +- if (hmdfs_i(old_dir)->inode_type != hmdfs_i(new_dir)->inode_type) { +- hmdfs_err("in different view"); +- return -EPERM; +- } +- +- if (hmdfs_d(old_dentry)->device_id != hmdfs_d(new_dentry)->device_id) +- return -EXDEV; +- +- relative_old_dir_path = +- hmdfs_get_dentry_relative_path(old_dentry->d_parent); +- relative_new_dir_path = +- hmdfs_get_dentry_relative_path(new_dentry->d_parent); +- if (!relative_old_dir_path || !relative_new_dir_path) { +- err = -EACCES; +- goto rename_out; +- } +- if (S_ISREG(old_dentry->d_inode->i_mode)) { +- hmdfs_debug("send MSG to remote devID %llu", +- con->device_id); +- err = hmdfs_client_start_rename( +- con, relative_old_dir_path, old_dentry_d_name, +- relative_new_dir_path, new_dentry_d_name, +- flags); +- if (!err) +- rename_in_cache_file(con->device_id, old_dentry, +- new_dentry); +- } else if (S_ISDIR(old_dentry->d_inode->i_mode)) { +- if (con->status == NODE_STAT_ONLINE) { +- ret = hmdfs_client_start_rename( +- con, relative_old_dir_path, old_dentry_d_name, +- relative_new_dir_path, new_dentry_d_name, +- flags); +- if (!ret) +- rename_in_cache_file(con->device_id, old_dentry, +- new_dentry); +- else +- err = ret; +- } +- } +- if (!err) +- d_invalidate(old_dentry); +-rename_out: +- kfree(relative_old_dir_path); +- kfree(relative_new_dir_path); +- return err; +-} +- +-static int hmdfs_dir_setattr_remote(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *ia) +-{ +- // Do not support dir setattr +- return 0; +-} +- +-const struct inode_operations hmdfs_dev_dir_inode_ops_remote = { +- .lookup = hmdfs_lookup_remote, +- .mkdir = hmdfs_mkdir_remote, +- .create = hmdfs_create_remote, +- .rmdir = hmdfs_rmdir_remote, +- .unlink = hmdfs_unlink_remote, +- .rename = hmdfs_rename_remote, +- .setattr = hmdfs_dir_setattr_remote, +- .permission = hmdfs_permission, +-}; +- +-static int hmdfs_setattr_remote(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *ia) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(d_inode(dentry)); +- struct hmdfs_peer *conn = info->conn; +- struct inode *inode = d_inode(dentry); +- char *send_buf = NULL; +- int err = 0; +- +- if (hmdfs_inode_is_stashing(info)) +- return -EAGAIN; +- +- send_buf = hmdfs_get_dentry_relative_path(dentry); +- if (!send_buf) { +- err = -ENOMEM; +- goto out_free; +- } +- if (ia->ia_valid & ATTR_SIZE) { +- err = inode_newsize_ok(inode, ia->ia_size); +- if (err) +- goto out_free; +- truncate_setsize(inode, ia->ia_size); +- info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; +- } +- if (ia->ia_valid & ATTR_MTIME) +- inode->i_mtime = ia->ia_mtime; +- +- if ((ia->ia_valid & ATTR_SIZE) || (ia->ia_valid & ATTR_MTIME)) { +- struct setattr_info send_setattr_info = { +- .size = cpu_to_le64(ia->ia_size), +- .valid = cpu_to_le32(ia->ia_valid), +- .mtime = cpu_to_le64(ia->ia_mtime.tv_sec), +- .mtime_nsec = cpu_to_le32(ia->ia_mtime.tv_nsec), +- }; +- err = hmdfs_send_setattr(conn, send_buf, &send_setattr_info); +- } +-out_free: +- kfree(send_buf); +- return err; +-} +- +-int hmdfs_remote_getattr(struct hmdfs_peer *conn, struct dentry *dentry, +- unsigned int lookup_flags, +- struct hmdfs_getattr_ret **result) +-{ +- char *send_buf = NULL; +- struct hmdfs_getattr_ret *attr = NULL; +- int err = 0; +- +- if (dentry->d_sb != conn->sbi->sb || !result) +- return -EINVAL; +- +- attr = kzalloc(sizeof(*attr), GFP_KERNEL); +- if (!attr) +- return -ENOMEM; +- +- send_buf = hmdfs_get_dentry_relative_path(dentry); +- if (!send_buf) { +- kfree(attr); +- return -ENOMEM; +- } +- +- err = hmdfs_send_getattr(conn, send_buf, lookup_flags, attr); +- kfree(send_buf); +- +- if (err) { +- kfree(attr); +- return err; +- } +- +- *result = attr; +- return 0; +-} +- +-static int hmdfs_get_cached_attr_remote(struct mnt_idmap *idmap, const struct path *path, +- struct kstat *stat, u32 request_mask, +- unsigned int flags) +-{ +- struct inode *inode = d_inode(path->dentry); +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- uint64_t size = info->getattr_isize; +- +- stat->ino = inode->i_ino; +- stat->mtime = inode->i_mtime; +- stat->mode = inode->i_mode; +- stat->uid.val = inode->i_uid.val; +- stat->gid.val = inode->i_gid.val; +- if (size == HMDFS_STALE_REMOTE_ISIZE) +- size = i_size_read(inode); +- +- stat->size = size; +- return 0; +-} +- +-ssize_t hmdfs_remote_listxattr(struct dentry *dentry, char *list, size_t size) +-{ +- struct inode *inode = d_inode(dentry); +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- struct hmdfs_peer *conn = info->conn; +- char *send_buf = NULL; +- ssize_t res = 0; +- size_t r_size = size; +- +- if (!hmdfs_support_xattr(dentry)) +- return -EOPNOTSUPP; +- +- if (size > HMDFS_LISTXATTR_SIZE_MAX) +- r_size = HMDFS_LISTXATTR_SIZE_MAX; +- +- send_buf = hmdfs_get_dentry_relative_path(dentry); +- if (!send_buf) +- return -ENOMEM; +- +- res = hmdfs_send_listxattr(conn, send_buf, list, r_size); +- kfree(send_buf); +- +- if (res == -ERANGE && r_size != size) { +- hmdfs_info("no support listxattr size over than %d", +- HMDFS_LISTXATTR_SIZE_MAX); +- res = -E2BIG; +- } +- +- return res; +-} +- +-const struct inode_operations hmdfs_dev_file_iops_remote = { +- .setattr = hmdfs_setattr_remote, +- .permission = hmdfs_permission, +- .getattr = hmdfs_get_cached_attr_remote, +- .listxattr = hmdfs_remote_listxattr, +-}; +diff --git a/fs/hmdfs/inode_root.c b/fs/hmdfs/inode_root.c +deleted file mode 100644 +index 988178f74..000000000 +--- a/fs/hmdfs/inode_root.c ++++ /dev/null +@@ -1,376 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/inode_root.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +- +-#include "authority/authentication.h" +-#include "comm/socket_adapter.h" +-#include "comm/transport.h" +-#include "hmdfs_dentryfile.h" +-#include "hmdfs_device_view.h" +-#include "hmdfs_merge_view.h" +-#include "hmdfs_trace.h" +- +-static struct inode *fill_device_local_inode(struct super_block *sb, +- struct inode *lower_inode) +-{ +- struct inode *inode = NULL; +- struct hmdfs_inode_info *info = NULL; +- +- if (!igrab(lower_inode)) +- return ERR_PTR(-ESTALE); +- +- inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV_LOCAL, lower_inode, +- NULL); +- if (!inode) { +- hmdfs_err("iget5_locked get inode NULL"); +- iput(lower_inode); +- return ERR_PTR(-ENOMEM); +- } +- if (!(inode->i_state & I_NEW)) { +- iput(lower_inode); +- return inode; +- } +- +- info = hmdfs_i(inode); +- info->inode_type = HMDFS_LAYER_SECOND_LOCAL; +- +- inode->i_mode = +- (lower_inode->i_mode & S_IFMT) | S_IRWXU | S_IRWXG | S_IXOTH; +- +- inode->i_uid = KUIDT_INIT((uid_t)1000); +- inode->i_gid = KGIDT_INIT((gid_t)1000); +- +- inode->i_atime = lower_inode->i_atime; +- inode->__i_ctime = lower_inode->__i_ctime; +- inode->i_mtime = lower_inode->i_mtime; +- +- inode->i_op = &hmdfs_dir_inode_ops_local; +- inode->i_fop = &hmdfs_dir_ops_local; +- +- fsstack_copy_inode_size(inode, lower_inode); +- unlock_new_inode(inode); +- return inode; +-} +- +-static struct inode *fill_device_inode_remote(struct super_block *sb, +- uint64_t dev_id) +-{ +- struct inode *inode = NULL; +- struct hmdfs_inode_info *info = NULL; +- struct hmdfs_peer *con = NULL; +- +- con = hmdfs_lookup_from_devid(sb->s_fs_info, dev_id); +- if (!con) +- return ERR_PTR(-ENOENT); +- +- inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV_REMOTE, NULL, con); +- if (!inode) { +- hmdfs_err("get inode NULL"); +- inode = ERR_PTR(-ENOMEM); +- goto out; +- } +- if (!(inode->i_state & I_NEW)) +- goto out; +- +- info = hmdfs_i(inode); +- info->inode_type = HMDFS_LAYER_SECOND_REMOTE; +- +- inode->i_mode = S_IFDIR | S_IRWXU | S_IRWXG | S_IXOTH; +- +- inode->i_uid = KUIDT_INIT((uid_t)1000); +- inode->i_gid = KGIDT_INIT((gid_t)1000); +- inode->i_op = &hmdfs_dev_dir_inode_ops_remote; +- inode->i_fop = &hmdfs_dev_dir_ops_remote; +- +- unlock_new_inode(inode); +- +-out: +- peer_put(con); +- return inode; +-} +- +-static struct inode *fill_device_inode_cloud(struct super_block *sb) +-{ +- struct inode *inode = NULL; +- struct hmdfs_inode_info *info = NULL; +- +- inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV_CLOUD, NULL, NULL); +- if (!inode) { +- hmdfs_err("get inode NULL"); +- inode = ERR_PTR(-ENOMEM); +- goto out; +- } +- if (!(inode->i_state & I_NEW)) +- goto out; +- +- info = hmdfs_i(inode); +- info->inode_type = HMDFS_LAYER_SECOND_CLOUD; +- +- inode->i_mode = S_IFDIR | S_IRWXU | S_IRWXG | S_IXOTH; +- +- inode->i_uid = KUIDT_INIT((uid_t)1000); +- inode->i_gid = KGIDT_INIT((gid_t)1000); +- inode->i_op = &hmdfs_dev_dir_inode_ops_cloud; +- inode->i_fop = &hmdfs_dev_dir_ops_cloud; +- +- unlock_new_inode(inode); +- +-out: +- return inode; +-} +- +-struct dentry *hmdfs_device_lookup(struct inode *parent_inode, +- struct dentry *child_dentry, +- unsigned int flags) +-{ +- const char *d_name = child_dentry->d_name.name; +- struct inode *root_inode = NULL; +- struct super_block *sb = parent_inode->i_sb; +- struct hmdfs_sb_info *sbi = sb->s_fs_info; +- struct dentry *ret_dentry = NULL; +- int err = 0; +- struct hmdfs_peer *con = NULL; +- struct hmdfs_dentry_info *di = NULL; +- uint8_t *cid = NULL; +- struct path *root_lower_path = NULL; +- +- trace_hmdfs_device_lookup(parent_inode, child_dentry, flags); +- if (!strncmp(d_name, DEVICE_VIEW_LOCAL, +- sizeof(DEVICE_VIEW_LOCAL))) { +- err = init_hmdfs_dentry_info(sbi, child_dentry, +- HMDFS_LAYER_SECOND_LOCAL); +- if (err) { +- ret_dentry = ERR_PTR(err); +- goto out; +- } +- di = hmdfs_d(sb->s_root); +- root_lower_path = &(di->lower_path); +- hmdfs_set_lower_path(child_dentry, root_lower_path); +- path_get(root_lower_path); +- root_inode = fill_device_local_inode( +- sb, d_inode(root_lower_path->dentry)); +- if (IS_ERR(root_inode)) { +- err = PTR_ERR(root_inode); +- ret_dentry = ERR_PTR(err); +- hmdfs_put_reset_lower_path(child_dentry); +- goto out; +- } +- ret_dentry = d_splice_alias(root_inode, child_dentry); +- if (IS_ERR(ret_dentry)) { +- err = PTR_ERR(ret_dentry); +- ret_dentry = ERR_PTR(err); +- hmdfs_put_reset_lower_path(child_dentry); +- goto out; +- } +- } else if (!strncmp(d_name, DEVICE_VIEW_CLOUD, +- sizeof(DEVICE_VIEW_CLOUD))) { +- err = init_hmdfs_dentry_info(sbi, child_dentry, +- HMDFS_LAYER_SECOND_CLOUD); +- if (err) { +- ret_dentry = ERR_PTR(err); +- goto out; +- } +- di = hmdfs_d(sb->s_root); +- root_inode = fill_device_inode_cloud(sb); +- if (IS_ERR(root_inode)) { +- err = PTR_ERR(root_inode); +- ret_dentry = ERR_PTR(err); +- goto out; +- } +- ret_dentry = d_splice_alias(root_inode, child_dentry); +- if (IS_ERR(ret_dentry)) { +- err = PTR_ERR(ret_dentry); +- ret_dentry = ERR_PTR(err); +- goto out; +- } +- +- } else { +- err = init_hmdfs_dentry_info(sbi, child_dentry, +- HMDFS_LAYER_SECOND_REMOTE); +- di = hmdfs_d(child_dentry); +- if (err) { +- ret_dentry = ERR_PTR(err); +- goto out; +- } +- cid = kzalloc(HMDFS_CID_SIZE + 1, GFP_KERNEL); +- if (!cid) { +- err = -ENOMEM; +- ret_dentry = ERR_PTR(err); +- goto out; +- } +- strncpy(cid, d_name, HMDFS_CID_SIZE); +- cid[HMDFS_CID_SIZE] = '\0'; +- con = hmdfs_lookup_from_cid(sbi, cid); +- if (!con) { +- kfree(cid); +- err = -ENOENT; +- ret_dentry = ERR_PTR(err); +- goto out; +- } +- di->device_id = con->device_id; +- root_inode = fill_device_inode_remote(sb, di->device_id); +- if (IS_ERR(root_inode)) { +- kfree(cid); +- err = PTR_ERR(root_inode); +- ret_dentry = ERR_PTR(err); +- goto out; +- } +- ret_dentry = d_splice_alias(root_inode, child_dentry); +- kfree(cid); +- } +- if (root_inode) +- hmdfs_root_inode_perm_init(root_inode); +- if (!err) +- hmdfs_set_time(child_dentry, jiffies); +-out: +- if (con) +- peer_put(con); +- trace_hmdfs_device_lookup_end(parent_inode, child_dentry, err); +- return ret_dentry; +-} +- +-struct dentry *hmdfs_root_lookup(struct inode *parent_inode, +- struct dentry *child_dentry, +- unsigned int flags) +-{ +- const char *d_name = child_dentry->d_name.name; +- struct inode *root_inode = NULL; +- struct super_block *sb = parent_inode->i_sb; +- struct hmdfs_sb_info *sbi = sb->s_fs_info; +- struct dentry *ret = ERR_PTR(-ENOENT); +- struct path root_path; +- +- trace_hmdfs_root_lookup(parent_inode, child_dentry, flags); +- if (sbi->s_merge_switch && !strcmp(d_name, MERGE_VIEW_ROOT)) { +- ret = hmdfs_lookup_merge(parent_inode, child_dentry, flags); +- if (ret && !IS_ERR(ret)) +- child_dentry = ret; +- root_inode = d_inode(child_dentry); +- } else if (sbi->s_merge_switch && !strcmp(d_name, CLOUD_MERGE_VIEW_ROOT)) { +- ret = hmdfs_lookup_cloud_merge(parent_inode, child_dentry, flags); +- if (ret && !IS_ERR(ret)) +- child_dentry = ret; +- root_inode = d_inode(child_dentry); +- } else if (!strcmp(d_name, DEVICE_VIEW_ROOT)) { +- ret = ERR_PTR(init_hmdfs_dentry_info( +- sbi, child_dentry, HMDFS_LAYER_FIRST_DEVICE)); +- if (IS_ERR(ret)) +- goto out; +- ret = ERR_PTR(kern_path(sbi->local_src, 0, &root_path)); +- if (IS_ERR(ret)) +- goto out; +- root_inode = fill_device_inode(sb, d_inode(root_path.dentry)); +- ret = d_splice_alias(root_inode, child_dentry); +- path_put(&root_path); +- } +- if (!IS_ERR(ret) && root_inode) +- hmdfs_root_inode_perm_init(root_inode); +- +-out: +- trace_hmdfs_root_lookup_end(parent_inode, child_dentry, +- PTR_ERR_OR_ZERO(ret)); +- return ret; +-} +- +-const struct inode_operations hmdfs_device_ops = { +- .lookup = hmdfs_device_lookup, +-}; +- +-const struct inode_operations hmdfs_root_ops = { +- .lookup = hmdfs_root_lookup, +-}; +- +-struct inode *fill_device_inode(struct super_block *sb, +- struct inode *lower_inode) +-{ +- struct inode *inode = NULL; +- struct hmdfs_inode_info *info = NULL; +- +- inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV, NULL, NULL); +- if (!inode) { +- hmdfs_err("iget5_locked get inode NULL"); +- return ERR_PTR(-ENOMEM); +- } +- if (!(inode->i_state & I_NEW)) +- return inode; +- +- info = hmdfs_i(inode); +- info->inode_type = HMDFS_LAYER_FIRST_DEVICE; +- +- inode->i_atime = lower_inode->i_atime; +- inode->__i_ctime = lower_inode->__i_ctime; +- inode->i_mtime = lower_inode->i_mtime; +- +- inode->i_mode = (lower_inode->i_mode & S_IFMT) | S_IRUSR | S_IXUSR | +- S_IRGRP | S_IXGRP | S_IXOTH; +- inode->i_uid = KUIDT_INIT((uid_t)1000); +- inode->i_gid = KGIDT_INIT((gid_t)1000); +- inode->i_op = &hmdfs_device_ops; +- inode->i_fop = &hmdfs_device_fops; +- +- fsstack_copy_inode_size(inode, lower_inode); +- unlock_new_inode(inode); +- return inode; +-} +- +-struct inode *fill_root_inode(struct super_block *sb, struct hmdfs_sb_info *sbi, struct inode *lower_inode) +-{ +- struct inode *inode = NULL; +- struct hmdfs_inode_info *info = NULL; +- +- if (!igrab(lower_inode)) +- return ERR_PTR(-ESTALE); +- +- if (sbi->s_cloud_disk_switch) { +- inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV_LOCAL, lower_inode, +- NULL); +- } else { +- inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_ANCESTOR, lower_inode, +- NULL); +- } +- +- if (!inode) { +- hmdfs_err("iget5_locked get inode NULL"); +- iput(lower_inode); +- return ERR_PTR(-ENOMEM); +- } +- if (!(inode->i_state & I_NEW)) { +- iput(lower_inode); +- return inode; +- } +- +- info = hmdfs_i(inode); +- if (sbi->s_cloud_disk_switch) { +- info->inode_type = HMDFS_LAYER_SECOND_LOCAL; +- inode->i_op = &hmdfs_dir_inode_ops_local; +- inode->i_fop = &hmdfs_dir_ops_local; +- } else { +- info->inode_type = HMDFS_LAYER_ZERO; +- inode->i_op = &hmdfs_root_ops; +- inode->i_fop = &hmdfs_root_fops; +- } +- inode->i_mode = (lower_inode->i_mode & S_IFMT) | S_IRUSR | S_IXUSR | +- S_IRGRP | S_IXGRP | S_IXOTH; +- +-#ifdef CONFIG_HMDFS_FS_PERMISSION +- inode->i_uid = lower_inode->i_uid; +- inode->i_gid = lower_inode->i_gid; +-#else +- inode->i_uid = KUIDT_INIT((uid_t)1000); +- inode->i_gid = KGIDT_INIT((gid_t)1000); +-#endif +- inode->i_atime = lower_inode->i_atime; +- inode->__i_ctime = lower_inode->__i_ctime; +- inode->i_mtime = lower_inode->i_mtime; +- +- fsstack_copy_inode_size(inode, lower_inode); +- unlock_new_inode(inode); +- return inode; +-} +diff --git a/fs/hmdfs/main.c b/fs/hmdfs/main.c +deleted file mode 100644 +index 7e0952990..000000000 +--- a/fs/hmdfs/main.c ++++ /dev/null +@@ -1,1134 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/main.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +- +-#include "hmdfs.h" +- +-#include +-#include +-#include +-#include +-#include +-#if KERNEL_VERSION(5, 9, 0) < LINUX_VERSION_CODE +-#include +-#else +-#include +-#endif +- +-#include "authority/authentication.h" +-#include "hmdfs_server.h" +-#include "comm/device_node.h" +-#include "comm/message_verify.h" +-#include "comm/protocol.h" +-#include "comm/socket_adapter.h" +-#include "hmdfs_merge_view.h" +-#include "server_writeback.h" +-#include "hmdfs_share.h" +- +-#include "comm/node_cb.h" +-#include "stash.h" +- +-#define CREATE_TRACE_POINTS +-#include "hmdfs_trace.h" +- +-#define HMDFS_BOOT_COOKIE_RAND_SHIFT 33 +- +-#define HMDFS_SB_SEQ_FROM 1 +- +-struct hmdfs_mount_priv { +- const char *dev_name; +- const char *raw_data; +-}; +- +-struct syncfs_item { +- struct list_head list; +- struct completion done; +- bool need_abort; +-}; +- +-static DEFINE_IDA(hmdfs_sb_seq); +- +-static inline int hmdfs_alloc_sb_seq(void) +-{ +- return ida_simple_get(&hmdfs_sb_seq, HMDFS_SB_SEQ_FROM, 0, GFP_KERNEL); +-} +- +-static inline void hmdfs_free_sb_seq(unsigned int seq) +-{ +- if (!seq) +- return; +- ida_simple_remove(&hmdfs_sb_seq, seq); +-} +- +-static int hmdfs_xattr_local_get(struct dentry *dentry, const char *name, +- void *value, size_t size) +-{ +- struct path lower_path; +- ssize_t res = 0; +- +- hmdfs_get_lower_path(dentry, &lower_path); +- res = vfs_getxattr(&nop_mnt_idmap, lower_path.dentry, name, value, size); +- hmdfs_put_lower_path(&lower_path); +- return res; +-} +- +-static int hmdfs_xattr_remote_get(struct dentry *dentry, const char *name, +- void *value, size_t size) +-{ +- struct inode *inode = d_inode(dentry); +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- struct hmdfs_peer *conn = info->conn; +- char *send_buf = NULL; +- ssize_t res = 0; +- +- send_buf = hmdfs_get_dentry_relative_path(dentry); +- if (!send_buf) +- return -ENOMEM; +- +- res = hmdfs_send_getxattr(conn, send_buf, name, value, size); +- kfree(send_buf); +- return res; +-} +- +-static int hmdfs_xattr_merge_get(struct dentry *dentry, const char *name, +- void *value, size_t size) +-{ +- int err = 0; +- struct dentry *lower_dentry = hmdfs_get_lo_d(dentry, HMDFS_DEVID_LOCAL); +- +- if (!lower_dentry) { +- err = -EOPNOTSUPP; +- goto out; +- } +- err = hmdfs_xattr_local_get(lower_dentry, name, value, size); +-out: +- dput(lower_dentry); +- return err; +-} +- +-static int hmdfs_xattr_get(const struct xattr_handler *handler, +- struct dentry *dentry, struct inode *inode, +- const char *name, void *value, size_t size) +-{ +- int res = 0; +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- size_t r_size = size; +- +- if (!hmdfs_support_xattr(dentry)) +- return -EOPNOTSUPP; +- +- if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) +- return -EOPNOTSUPP; +- +- if (size > HMDFS_XATTR_SIZE_MAX) +- r_size = HMDFS_XATTR_SIZE_MAX; +- +- if (info->inode_type == HMDFS_LAYER_OTHER_LOCAL) +- res = hmdfs_xattr_local_get(dentry, name, value, r_size); +- else if (info->inode_type == HMDFS_LAYER_OTHER_REMOTE) +- res = hmdfs_xattr_remote_get(dentry, name, value, r_size); +- else if (info->inode_type == HMDFS_LAYER_OTHER_MERGE || +- info->inode_type == HMDFS_LAYER_OTHER_MERGE_CLOUD) +- res = hmdfs_xattr_merge_get(dentry, name, value, r_size); +- else +- res = -EOPNOTSUPP; +- +- if (res == -ERANGE && r_size != size) { +- hmdfs_info("no support xattr value size over than: %d", +- HMDFS_XATTR_SIZE_MAX); +- res = -E2BIG; +- } +- +- return res; +-} +- +-static int hmdfs_xattr_local_set(struct dentry *dentry, const char *name, +- const void *value, size_t size, int flags) +-{ +- struct path lower_path; +- int res = 0; +- +- hmdfs_get_lower_path(dentry, &lower_path); +- kuid_t tmp_uid = hmdfs_override_inode_uid(d_inode(lower_path.dentry)); +- if (value) { +- res = vfs_setxattr(&nop_mnt_idmap, lower_path.dentry, name, value, size, flags); +- } else { +- WARN_ON(flags != XATTR_REPLACE); +- res = vfs_removexattr(&nop_mnt_idmap, lower_path.dentry, name); +- } +- hmdfs_revert_inode_uid(d_inode(lower_path.dentry), tmp_uid); +- +- hmdfs_put_lower_path(&lower_path); +- return res; +-} +- +-static int hmdfs_xattr_remote_set(struct dentry *dentry, const char *name, +- const void *value, size_t size, int flags) +-{ +- struct inode *inode = d_inode(dentry); +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- struct hmdfs_peer *conn = info->conn; +- char *send_buf = NULL; +- int res = 0; +- +- send_buf = hmdfs_get_dentry_relative_path(dentry); +- if (!send_buf) +- return -ENOMEM; +- +- res = hmdfs_send_setxattr(conn, send_buf, name, value, size, flags); +- kfree(send_buf); +- return res; +-} +- +-static int hmdfs_xattr_merge_set(struct dentry *dentry, const char *name, +- const void *value, size_t size, int flags) +-{ +- int err = 0; +- struct dentry *lower_dentry = hmdfs_get_lo_d(dentry, HMDFS_DEVID_LOCAL); +- +- if (!lower_dentry) { +- err = -EOPNOTSUPP; +- goto out; +- } +- err = hmdfs_xattr_local_set(lower_dentry, name, value, size, flags); +-out: +- dput(lower_dentry); +- return err; +-} +- +-static int hmdfs_xattr_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, +- struct dentry *dentry, struct inode *inode, +- const char *name, const void *value, +- size_t size, int flags) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- +- if (!hmdfs_support_xattr(dentry)) +- return -EOPNOTSUPP; +- +- if (size > HMDFS_XATTR_SIZE_MAX) { +- hmdfs_info("no support too long xattr value: %zu", size); +- return -E2BIG; +- } +- +- if (info->inode_type == HMDFS_LAYER_OTHER_LOCAL) +- return hmdfs_xattr_local_set(dentry, name, value, size, flags); +- else if (info->inode_type == HMDFS_LAYER_OTHER_REMOTE) +- return hmdfs_xattr_remote_set(dentry, name, value, size, flags); +- else if (info->inode_type == HMDFS_LAYER_OTHER_MERGE || +- info->inode_type == HMDFS_LAYER_OTHER_MERGE_CLOUD) +- return hmdfs_xattr_merge_set(dentry, name, value, size, flags); +- +- return -EOPNOTSUPP; +-} +- +-const struct xattr_handler hmdfs_xattr_handler = { +- .prefix = "", /* catch all */ +- .get = hmdfs_xattr_get, +- .set = hmdfs_xattr_set, +-}; +- +-static const struct xattr_handler *hmdfs_xattr_handlers[] = { +- &hmdfs_xattr_handler, +-}; +- +-#define HMDFS_NODE_EVT_CB_DELAY 2 +- +-struct kmem_cache *hmdfs_inode_cachep; +-struct kmem_cache *hmdfs_dentry_cachep; +- +-static void i_callback(struct rcu_head *head) +-{ +- struct inode *inode = container_of(head, struct inode, i_rcu); +- +- kmem_cache_free(hmdfs_inode_cachep, +- container_of(inode, struct hmdfs_inode_info, +- vfs_inode)); +-} +- +-static void hmdfs_destroy_inode(struct inode *inode) +-{ +- call_rcu(&inode->i_rcu, i_callback); +-} +- +-static void hmdfs_evict_inode(struct inode *inode) +-{ +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- +- truncate_inode_pages(&inode->i_data, 0); +- clear_inode(inode); +- if (info->inode_type == HMDFS_LAYER_FIRST_DEVICE || +- info->inode_type == HMDFS_LAYER_SECOND_REMOTE) +- return; +- if (info->inode_type == HMDFS_LAYER_ZERO || +- info->inode_type == HMDFS_LAYER_OTHER_LOCAL || +- info->inode_type == HMDFS_LAYER_SECOND_LOCAL) { +- iput(info->lower_inode); +- info->lower_inode = NULL; +- } +-} +- +-void hmdfs_put_super(struct super_block *sb) +-{ +- struct hmdfs_sb_info *sbi = hmdfs_sb(sb); +- struct super_block *lower_sb = sbi->lower_sb; +- +- hmdfs_info("local_dst is %s, local_src is %s", sbi->local_dst, +- sbi->local_src); +- +- hmdfs_cfn_destroy(sbi); +- hmdfs_unregister_sysfs(sbi); +- hmdfs_connections_stop(sbi); +- hmdfs_clear_share_table(sbi); +- hmdfs_destroy_server_writeback(sbi); +- hmdfs_exit_stash(sbi); +- atomic_dec(&lower_sb->s_active); +- put_cred(sbi->cred); +- if (sbi->system_cred) +- put_cred(sbi->system_cred); +- hmdfs_destroy_writeback(sbi); +- kfree(sbi->local_src); +- kfree(sbi->local_dst); +- kfree(sbi->real_dst); +- kfree(sbi->cache_dir); +- kfree(sbi->cloud_dir); +- kfifo_free(&sbi->notify_fifo); +- sb->s_fs_info = NULL; +- sbi->lower_sb = NULL; +- hmdfs_release_sysfs(sbi); +- /* After all access are completed */ +- hmdfs_free_sb_seq(sbi->seq); +- kfree(sbi->s_server_statis); +- kfree(sbi->s_client_statis); +- kfree(sbi); +-} +- +-static struct inode *hmdfs_alloc_inode(struct super_block *sb) +-{ +- struct hmdfs_inode_info *gi = +- kmem_cache_alloc(hmdfs_inode_cachep, GFP_KERNEL); +- if (!gi) +- return NULL; +- memset(gi, 0, offsetof(struct hmdfs_inode_info, vfs_inode)); +- INIT_LIST_HEAD(&gi->wb_list); +- init_rwsem(&gi->wpage_sem); +- gi->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; +- atomic64_set(&gi->write_counter, 0); +- gi->fid.id = HMDFS_INODE_INVALID_FILE_ID; +- spin_lock_init(&gi->fid_lock); +- INIT_LIST_HEAD(&gi->wr_opened_node); +- atomic_set(&gi->wr_opened_cnt, 0); +- init_waitqueue_head(&gi->fid_wq); +- INIT_LIST_HEAD(&gi->stash_node); +- spin_lock_init(&gi->stash_lock); +- return &gi->vfs_inode; +-} +- +-static int hmdfs_remote_statfs(struct dentry *dentry, struct kstatfs *buf) +-{ +- int error = 0; +- int ret = 0; +- char *dir_path = NULL; +- char *name_path = NULL; +- struct hmdfs_peer *con = NULL; +- struct hmdfs_sb_info *sbi = hmdfs_sb(dentry->d_inode->i_sb); +- +- dir_path = hmdfs_get_dentry_relative_path(dentry->d_parent); +- if (!dir_path) { +- error = -EACCES; +- goto rmdir_out; +- } +- +- name_path = hmdfs_connect_path(dir_path, dentry->d_name.name); +- if (!name_path) { +- error = -EACCES; +- goto rmdir_out; +- } +- mutex_lock(&sbi->connections.node_lock); +- list_for_each_entry(con, &sbi->connections.node_list, list) { +- if (con->status == NODE_STAT_ONLINE) { +- peer_get(con); +- mutex_unlock(&sbi->connections.node_lock); +- hmdfs_debug("send MSG to remote devID %llu", +- con->device_id); +- ret = hmdfs_send_statfs(con, name_path, buf); +- if (ret != 0) +- error = ret; +- peer_put(con); +- mutex_lock(&sbi->connections.node_lock); +- } +- } +- mutex_unlock(&sbi->connections.node_lock); +- +-rmdir_out: +- kfree(dir_path); +- kfree(name_path); +- return error; +-} +- +-static int hmdfs_statfs(struct dentry *dentry, struct kstatfs *buf) +-{ +- int err = 0; +- struct path lower_path; +- struct hmdfs_inode_info *info = hmdfs_i(dentry->d_inode); +- struct super_block *sb = d_inode(dentry)->i_sb; +- struct hmdfs_sb_info *sbi = sb->s_fs_info; +- +- trace_hmdfs_statfs(dentry, info->inode_type); +- // merge_view & merge_view/xxx & device_view assigned src_inode info +- if (hmdfs_i_merge(info) || +- (info->inode_type == HMDFS_LAYER_SECOND_REMOTE)) { +- err = kern_path(sbi->local_src, 0, &lower_path); +- if (err) +- goto out; +- err = vfs_statfs(&lower_path, buf); +- path_put(&lower_path); +- } else if (!IS_ERR_OR_NULL(info->lower_inode)) { +- hmdfs_get_lower_path(dentry, &lower_path); +- err = vfs_statfs(&lower_path, buf); +- hmdfs_put_lower_path(&lower_path); +- } else { +- err = hmdfs_remote_statfs(dentry, buf); +- } +- +- buf->f_type = HMDFS_SUPER_MAGIC; +-out: +- return err; +-} +- +-static int hmdfs_show_options(struct seq_file *m, struct dentry *root) +-{ +- struct hmdfs_sb_info *sbi = hmdfs_sb(root->d_sb); +- +- if (sbi->s_case_sensitive) +- seq_puts(m, ",sensitive"); +- else +- seq_puts(m, ",insensitive"); +- +- if (sbi->s_merge_switch) +- seq_puts(m, ",merge_enable"); +- else +- seq_puts(m, ",merge_disable"); +- +- seq_printf(m, ",ra_pages=%lu", root->d_sb->s_bdi->ra_pages); +- seq_printf(m, ",user_id=%u", sbi->user_id); +- +- if (sbi->cache_dir) +- seq_printf(m, ",cache_dir=%s", sbi->cache_dir); +- if (sbi->real_dst) +- seq_printf(m, ",real_dst=%s", sbi->real_dst); +- if (sbi->cloud_dir) +- seq_printf(m, ",cloud_dir=%s", sbi->cloud_dir); +- +- seq_printf(m, ",%soffline_stash", sbi->s_offline_stash ? "" : "no_"); +- seq_printf(m, ",%sdentry_cache", sbi->s_dentry_cache ? "" : "no_"); +- +- return 0; +-} +- +-static int hmdfs_sync_fs(struct super_block *sb, int wait) +-{ +- int time_left; +- int err = 0; +- struct hmdfs_peer *con = NULL; +- struct hmdfs_sb_info *sbi = hmdfs_sb(sb); +- int syncfs_timeout = get_cmd_timeout(sbi, F_SYNCFS); +- struct syncfs_item item, *entry = NULL, *tmp = NULL; +- +- if (!wait) +- return 0; +- +- trace_hmdfs_syncfs_enter(sbi); +- +- spin_lock(&sbi->hsi.list_lock); +- if (!sbi->hsi.is_executing) { +- sbi->hsi.is_executing = true; +- item.need_abort = false; +- spin_unlock(&sbi->hsi.list_lock); +- } else { +- init_completion(&item.done); +- list_add_tail(&item.list, &sbi->hsi.wait_list); +- spin_unlock(&sbi->hsi.list_lock); +- wait_for_completion(&item.done); +- } +- +- if (item.need_abort) +- goto out; +- +- /* +- * Syncfs can not concurrent in hmdfs_sync_fs. Because we should make +- * sure all remote syncfs calls return back or timeout by waiting, +- * during the waiting period we must protect @sbi->remote_syncfs_count +- * and @sbi->remote_syncfs_ret from concurrent executing. +- */ +- +- spin_lock(&sbi->hsi.v_lock); +- sbi->hsi.version++; +- /* +- * Attention: We put @sbi->hsi.remote_ret and @sbi->hsi.wait_count +- * into spinlock protection area to avoid following scenario caused +- * by out-of-order execution: +- * +- * synfs syncfs_cb +- * sbi->hsi.remote_ret = 0; +- * atomic_set(&sbi->hsi.wait_count, 0); +- * lock +- * version == old_version +- * sbi->hsi.remote_ret = resp->ret_code +- * atomic_dec(&sbi->hsi.wait_count); +- * unlock +- * lock +- * version = old_version + 1 +- * unlock +- * +- * @sbi->hsi.remote_ret and @sbi->hsi.wait_count can be assigned +- * before spin lock which may compete with syncfs_cb(), making +- * these two values' assignment protected by spinlock can fix this. +- */ +- sbi->hsi.remote_ret = 0; +- atomic_set(&sbi->hsi.wait_count, 0); +- spin_unlock(&sbi->hsi.v_lock); +- +- mutex_lock(&sbi->connections.node_lock); +- list_for_each_entry(con, &sbi->connections.node_list, list) { +- /* +- * Dirty data does not need to be synchronized to remote +- * devices that go offline normally. It's okay to drop +- * them. +- */ +- if (con->status != NODE_STAT_ONLINE) +- continue; +- +- peer_get(con); +- mutex_unlock(&sbi->connections.node_lock); +- +- /* +- * There exists a gap between sync_inodes_sb() and sync_fs() +- * which may race with remote writing, leading error count +- * on @sb_dirty_count. The dirty data produced during the +- * gap period won't be synced in next syncfs operation. +- * To avoid this, we have to invoke sync_inodes_sb() again +- * after getting @con->sb_dirty_count. +- */ +- con->old_sb_dirty_count = atomic64_read(&con->sb_dirty_count); +- sync_inodes_sb(sb); +- +- if (!con->old_sb_dirty_count) { +- peer_put(con); +- mutex_lock(&sbi->connections.node_lock); +- continue; +- } +- +- err = hmdfs_send_syncfs(con, syncfs_timeout); +- if (err) { +- hmdfs_warning("send syncfs failed with %d on node %llu", +- err, con->device_id); +- sbi->hsi.remote_ret = err; +- peer_put(con); +- mutex_lock(&sbi->connections.node_lock); +- continue; +- } +- +- atomic_inc(&sbi->hsi.wait_count); +- +- peer_put(con); +- mutex_lock(&sbi->connections.node_lock); +- } +- mutex_unlock(&sbi->connections.node_lock); +- +- /* +- * Async work in background will make sure @sbi->remote_syncfs_count +- * decreased to zero finally whether syncfs success or fail. +- */ +- time_left = wait_event_interruptible( +- sbi->hsi.wq, atomic_read(&sbi->hsi.wait_count) == 0); +- if (time_left < 0) { +- hmdfs_warning("syncfs is interrupted by external signal"); +- err = -EINTR; +- } +- +- if (!err && sbi->hsi.remote_ret) +- err = sbi->hsi.remote_ret; +- +- /* Abandon syncfs processes in pending_list */ +- list_for_each_entry_safe(entry, tmp, &sbi->hsi.pending_list, list) { +- entry->need_abort = true; +- complete(&entry->done); +- } +- INIT_LIST_HEAD(&sbi->hsi.pending_list); +- +- /* Pick the last syncfs process in wait_list */ +- spin_lock(&sbi->hsi.list_lock); +- if (list_empty(&sbi->hsi.wait_list)) { +- sbi->hsi.is_executing = false; +- } else { +- entry = list_last_entry(&sbi->hsi.wait_list, struct syncfs_item, +- list); +- list_del_init(&entry->list); +- list_splice_init(&sbi->hsi.wait_list, &sbi->hsi.pending_list); +- entry->need_abort = false; +- complete(&entry->done); +- } +- spin_unlock(&sbi->hsi.list_lock); +- +-out: +- trace_hmdfs_syncfs_exit(sbi, atomic_read(&sbi->hsi.wait_count), +- get_cmd_timeout(sbi, F_SYNCFS), err); +- +- /* TODO: Return synfs err back to syscall */ +- +- return err; +-} +- +-struct super_operations hmdfs_sops = { +- .alloc_inode = hmdfs_alloc_inode, +- .destroy_inode = hmdfs_destroy_inode, +- .evict_inode = hmdfs_evict_inode, +- .put_super = hmdfs_put_super, +- .statfs = hmdfs_statfs, +- .show_options = hmdfs_show_options, +- .sync_fs = hmdfs_sync_fs, +-}; +- +-static void init_once(void *obj) +-{ +- struct hmdfs_inode_info *i = obj; +- +- inode_init_once(&i->vfs_inode); +-} +- +-static int __init hmdfs_init_caches(void) +-{ +- int err = -ENOMEM; +- +- hmdfs_inode_cachep = +- kmem_cache_create("hmdfs_inode_cache", +- sizeof(struct hmdfs_inode_info), 0, +- SLAB_RECLAIM_ACCOUNT, init_once); +- if (unlikely(!hmdfs_inode_cachep)) +- goto out; +- hmdfs_dentry_cachep = +- kmem_cache_create("hmdfs_dentry_cache", +- sizeof(struct hmdfs_dentry_info), 0, +- SLAB_RECLAIM_ACCOUNT, NULL); +- if (unlikely(!hmdfs_dentry_cachep)) +- goto out_des_ino; +- hmdfs_dentry_merge_cachep = +- kmem_cache_create("hmdfs_dentry_merge_cache", +- sizeof(struct hmdfs_dentry_info_merge), 0, +- SLAB_RECLAIM_ACCOUNT, NULL); +- if (unlikely(!hmdfs_dentry_merge_cachep)) +- goto out_des_dc; +- return 0; +- +-out_des_dc: +- kmem_cache_destroy(hmdfs_dentry_cachep); +-out_des_ino: +- kmem_cache_destroy(hmdfs_inode_cachep); +-out: +- return err; +-} +- +-static void hmdfs_destroy_caches(void) +-{ +- rcu_barrier(); +- kmem_cache_destroy(hmdfs_inode_cachep); +- hmdfs_inode_cachep = NULL; +- kmem_cache_destroy(hmdfs_dentry_cachep); +- hmdfs_dentry_cachep = NULL; +- kmem_cache_destroy(hmdfs_dentry_merge_cachep); +- hmdfs_dentry_merge_cachep = NULL; +-} +- +-uint64_t path_hash(const char *path, int len, bool case_sense) +-{ +- uint64_t res = 0; +- const char *kp = path; +- char c; +- /* Mocklisp hash function. */ +- while (*kp) { +- c = *kp; +- if (!case_sense) +- c = tolower(c); +- res = (res << 5) - res + (uint64_t)(c); +- kp++; +- } +- return res; +-} +- +-static char *get_full_path(struct path *path) +-{ +- char *buf, *tmp; +- char *ret = NULL; +- +- buf = kmalloc(PATH_MAX, GFP_KERNEL); +- if (!buf) +- goto out; +- +- tmp = d_path(path, buf, PATH_MAX); +- if (IS_ERR(tmp)) +- goto out; +- +- ret = kstrdup(tmp, GFP_KERNEL); +-out: +- kfree(buf); +- return ret; +-} +- +-static void hmdfs_init_cmd_timeout(struct hmdfs_sb_info *sbi) +-{ +- memset(sbi->s_cmd_timeout, 0xff, sizeof(sbi->s_cmd_timeout)); +- +- set_cmd_timeout(sbi, F_OPEN, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_RELEASE, TIMEOUT_NONE); +- set_cmd_timeout(sbi, F_READPAGE, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_WRITEPAGE, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_ITERATE, TIMEOUT_30S); +- set_cmd_timeout(sbi, F_CREATE, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_MKDIR, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_RMDIR, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_UNLINK, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_RENAME, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_SETATTR, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_STATFS, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_CONNECT_REKEY, TIMEOUT_NONE); +- set_cmd_timeout(sbi, F_DROP_PUSH, TIMEOUT_NONE); +- set_cmd_timeout(sbi, F_GETATTR, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_FSYNC, TIMEOUT_90S); +- set_cmd_timeout(sbi, F_SYNCFS, TIMEOUT_30S); +- set_cmd_timeout(sbi, F_GETXATTR, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_SETXATTR, TIMEOUT_COMMON); +- set_cmd_timeout(sbi, F_LISTXATTR, TIMEOUT_COMMON); +-} +- +-static int hmdfs_init_sbi(struct hmdfs_sb_info *sbi) +-{ +- int ret; +- +- ret = kfifo_alloc(&sbi->notify_fifo, PAGE_SIZE, GFP_KERNEL); +- if (ret) +- goto out; +- +- /* +- * We have to use dynamic memory since struct server/client_statistic +- * are DECLARED in hmdfs.h but DEFINED in socket_adapter.h. +- */ +- sbi->s_server_statis = +- kzalloc(sizeof(*sbi->s_server_statis) * F_SIZE, GFP_KERNEL); +- sbi->s_client_statis = +- kzalloc(sizeof(*sbi->s_client_statis) * F_SIZE, GFP_KERNEL); +- if (!sbi->s_server_statis || !sbi->s_client_statis) { +- ret = -ENOMEM; +- goto out; +- } +- +- ret = hmdfs_alloc_sb_seq(); +- if (ret < 0) { +- hmdfs_err("no sb seq available err %d", ret); +- goto out; +- } +- sbi->seq = ret; +- ret = 0; +- +- spin_lock_init(&sbi->notify_fifo_lock); +- mutex_init(&sbi->cmd_handler_mutex); +- sbi->s_case_sensitive = false; +- sbi->s_features = HMDFS_FEATURE_READPAGES | +- HMDFS_FEATURE_READPAGES_OPEN | +- HMDFS_ATOMIC_OPEN; +- sbi->s_merge_switch = false; +- sbi->s_cloud_disk_switch = false; +- sbi->dcache_threshold = DEFAULT_DCACHE_THRESHOLD; +- sbi->dcache_precision = DEFAULT_DCACHE_PRECISION; +- sbi->dcache_timeout = DEFAULT_DCACHE_TIMEOUT; +- sbi->write_cache_timeout = DEFAULT_WRITE_CACHE_TIMEOUT; +- hmdfs_init_cmd_timeout(sbi); +- sbi->async_cb_delay = HMDFS_NODE_EVT_CB_DELAY; +- sbi->async_req_max_active = DEFAULT_SRV_REQ_MAX_ACTIVE; +- sbi->s_offline_stash = true; +- sbi->s_dentry_cache = true; +- sbi->wb_timeout_ms = HMDFS_DEF_WB_TIMEOUT_MS; +- sbi->s_readpages_nr = HMDFS_READPAGES_NR_DEF; +- /* Initialize before hmdfs_register_sysfs() */ +- atomic_set(&sbi->connections.conn_seq, 0); +- mutex_init(&sbi->connections.node_lock); +- INIT_LIST_HEAD(&sbi->connections.node_list); +- +- ret = hmdfs_init_share_table(sbi); +- if (ret) +- goto out; +- init_waitqueue_head(&sbi->async_readdir_wq); +- INIT_LIST_HEAD(&sbi->async_readdir_msg_list); +- INIT_LIST_HEAD(&sbi->async_readdir_work_list); +- spin_lock_init(&sbi->async_readdir_msg_lock); +- spin_lock_init(&sbi->async_readdir_work_lock); +- +- return 0; +- +-out: +- return ret; +-} +- +-void hmdfs_client_resp_statis(struct hmdfs_sb_info *sbi, u8 cmd, +- enum hmdfs_resp_type type, unsigned long start, +- unsigned long end) +-{ +- unsigned long duration; +- +- switch (type) { +- case HMDFS_RESP_DELAY: +- sbi->s_client_statis[cmd].delay_resp_cnt++; +- break; +- case HMDFS_RESP_TIMEOUT: +- sbi->s_client_statis[cmd].timeout_cnt++; +- break; +- case HMDFS_RESP_NORMAL: +- duration = end - start; +- sbi->s_client_statis[cmd].total += duration; +- sbi->s_client_statis[cmd].resp_cnt++; +- if (sbi->s_client_statis[cmd].max < duration) +- sbi->s_client_statis[cmd].max = duration; +- break; +- default: +- hmdfs_err("Wrong cmd %d with resp type %d", cmd, type); +- } +-} +- +-static int hmdfs_update_dst(struct hmdfs_sb_info *sbi) +-{ +- int err = 0; +- const char *path_local = UPDATE_LOCAL_DST; +- int len = 0; +- +- sbi->real_dst = kstrdup(sbi->local_dst, GFP_KERNEL); +- if (!sbi->real_dst) { +- err = -ENOMEM; +- goto out_err; +- } +- kfree(sbi->local_dst); +- sbi->local_dst = NULL; +- +- len = strlen(sbi->real_dst) + strlen(path_local) + 1; +- if (len > PATH_MAX) { +- err = -EINVAL; +- goto out_err; +- } +- sbi->local_dst = kmalloc(len, GFP_KERNEL); +- if (!sbi->local_dst) { +- err = -ENOMEM; +- goto out_err; +- } +- snprintf(sbi->local_dst, strlen(sbi->real_dst) + strlen(path_local) + 1, +- "%s%s", sbi->real_dst, path_local); +-out_err: +- return err; +-} +- +-/* +- * Generate boot cookie like following format: +- * +- * | random | boot time(ms) | 0x00 | +- * |--------|-----------------|-------| +- * 16 33 15 (bits) +- * +- * This will make sure boot cookie is unique in a period +- * 2^33 / 1000 / 3600 / 24 = 99.4(days). +- */ +-uint64_t hmdfs_gen_boot_cookie(void) +-{ +- uint64_t now; +- uint16_t rand; +- struct rnd_state rnd_state; +- +- now = ktime_to_ms(ktime_get()); +- prandom_bytes_state(&rnd_state, (void *)&rand, 2); +- +- now &= (1ULL << HMDFS_BOOT_COOKIE_RAND_SHIFT) - 1; +- now |= ((uint64_t)rand << HMDFS_BOOT_COOKIE_RAND_SHIFT); +- +- return now << HMDFS_FID_VER_BOOT_COOKIE_SHIFT; +-} +- +-static int hmdfs_fill_super(struct super_block *sb, void *data, int silent) +-{ +- struct hmdfs_mount_priv *priv = (struct hmdfs_mount_priv *)data; +- const char *dev_name = priv->dev_name; +- const char *raw_data = priv->raw_data; +- struct hmdfs_sb_info *sbi; +- int err = 0; +- struct inode *root_inode; +- struct path lower_path; +- struct super_block *lower_sb; +- struct dentry *root_dentry; +- char ctrl_path[CTRL_PATH_MAX_LEN]; +- +- if (!raw_data) +- return -EINVAL; +- +- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); +- if (!sbi) { +- err = -ENOMEM; +- goto out_err; +- } +- err = hmdfs_init_sbi(sbi); +- if (err) +- goto out_freesbi; +- sbi->sb = sb; +- err = hmdfs_parse_options(sbi, raw_data); +- if (err) +- goto out_freesbi; +- +- sb->s_fs_info = sbi; +- sb->s_magic = HMDFS_SUPER_MAGIC; +- sb->s_xattr = hmdfs_xattr_handlers; +- sb->s_op = &hmdfs_sops; +- +- sbi->boot_cookie = hmdfs_gen_boot_cookie(); +- +- err = hmdfs_init_writeback(sbi); +- if (err) +- goto out_freesbi; +- err = hmdfs_init_server_writeback(sbi); +- if (err) +- goto out_freesbi; +- +- err = hmdfs_init_stash(sbi); +- if (err) +- goto out_freesbi; +- +- // add ctrl sysfs node +- scnprintf(ctrl_path, CTRL_PATH_MAX_LEN, "%u", sb->s_dev); +- hmdfs_debug("s_dev %u", sb->s_dev); +- err = hmdfs_register_sysfs(ctrl_path, sbi); +- if (err) +- goto out_freesbi; +- +- err = hmdfs_update_dst(sbi); +- if (err) +- goto out_unreg_sysfs; +- +- err = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, +- &lower_path); +- if (err) { +- hmdfs_err("open dev failed, errno = %d", err); +- goto out_unreg_sysfs; +- } +- +- lower_sb = lower_path.dentry->d_sb; +- atomic_inc(&lower_sb->s_active); +- sbi->lower_sb = lower_sb; +- sbi->local_src = get_full_path(&lower_path); +- if (!sbi->local_src) { +- hmdfs_err("get local_src failed!"); +- goto out_sput; +- } +- +- sb->s_time_gran = lower_sb->s_time_gran; +- sb->s_maxbytes = lower_sb->s_maxbytes; +- sb->s_stack_depth = lower_sb->s_stack_depth + 1; +- if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { +- hmdfs_err("maximum fs stacking depth exceeded"); +- err = -EINVAL; +- goto out_sput; +- } +- root_inode = fill_root_inode(sb, sbi, d_inode(lower_path.dentry)); +- if (IS_ERR(root_inode)) { +- err = PTR_ERR(root_inode); +- goto out_sput; +- } +- hmdfs_root_inode_perm_init(root_inode); +- sb->s_root = root_dentry = d_make_root(root_inode); +- if (!root_dentry) { +- err = -ENOMEM; +- goto out_sput; +- } +- if (sbi->s_cloud_disk_switch) +- err = init_hmdfs_dentry_info(sbi, root_dentry, HMDFS_LAYER_SECOND_LOCAL); +- else +- err = init_hmdfs_dentry_info(sbi, root_dentry, HMDFS_LAYER_ZERO); +- if (err) +- goto out_freeroot; +- hmdfs_set_lower_path(root_dentry, &lower_path); +- sbi->cred = get_cred(current_cred()); +- INIT_LIST_HEAD(&sbi->client_cache); +- INIT_LIST_HEAD(&sbi->server_cache); +- INIT_LIST_HEAD(&sbi->to_delete); +- mutex_init(&sbi->cache_list_lock); +- hmdfs_cfn_load(sbi); +- +- /* Initialize syncfs info */ +- spin_lock_init(&sbi->hsi.v_lock); +- init_waitqueue_head(&sbi->hsi.wq); +- sbi->hsi.version = 0; +- sbi->hsi.is_executing = false; +- INIT_LIST_HEAD(&sbi->hsi.wait_list); +- INIT_LIST_HEAD(&sbi->hsi.pending_list); +- spin_lock_init(&sbi->hsi.list_lock); +- +- return err; +-out_freeroot: +- dput(sb->s_root); +- sb->s_root = NULL; +-out_sput: +- atomic_dec(&lower_sb->s_active); +- path_put(&lower_path); +-out_unreg_sysfs: +- hmdfs_unregister_sysfs(sbi); +- hmdfs_release_sysfs(sbi); +-out_freesbi: +- if (sbi) { +- sb->s_fs_info = NULL; +- hmdfs_clear_share_table(sbi); +- hmdfs_exit_stash(sbi); +- hmdfs_destroy_writeback(sbi); +- hmdfs_destroy_server_writeback(sbi); +- kfifo_free(&sbi->notify_fifo); +- hmdfs_free_sb_seq(sbi->seq); +- kfree(sbi->local_src); +- kfree(sbi->local_dst); +- kfree(sbi->real_dst); +- kfree(sbi->cache_dir); +- kfree(sbi->cloud_dir); +- kfree(sbi->s_server_statis); +- kfree(sbi->s_client_statis); +- kfree(sbi); +- } +-out_err: +- return err; +-} +- +-static struct dentry *hmdfs_mount(struct file_system_type *fs_type, int flags, +- const char *dev_name, void *raw_data) +-{ +- struct hmdfs_mount_priv priv = { +- .dev_name = dev_name, +- .raw_data = raw_data, +- }; +- +- /* hmdfs needs a valid dev_name to get the lower_sb's metadata */ +- if (!dev_name || !*dev_name) +- return ERR_PTR(-EINVAL); +- return mount_nodev(fs_type, flags, &priv, hmdfs_fill_super); +-} +- +- +-static void hmdfs_cancel_async_readdir(struct hmdfs_sb_info *sbi) +-{ +- struct sendmsg_wait_queue *msg_wq = NULL; +- struct hmdfs_readdir_work *rw = NULL; +- struct hmdfs_readdir_work *tmp = NULL; +- struct list_head del_work; +- +- /* cancel work that are not running */ +- +- INIT_LIST_HEAD(&del_work); +- spin_lock(&sbi->async_readdir_work_lock); +- list_for_each_entry_safe(rw, tmp, &sbi->async_readdir_work_list, head) { +- if (cancel_delayed_work(&rw->dwork)) +- list_move(&rw->head, &del_work); +- } +- spin_unlock(&sbi->async_readdir_work_lock); +- +- list_for_each_entry_safe(rw, tmp, &del_work, head) { +- dput(rw->dentry); +- peer_put(rw->con); +- kfree(rw); +- } +- +- /* wake up async readdir that are waiting for remote */ +- spin_lock(&sbi->async_readdir_msg_lock); +- sbi->async_readdir_prohibit = true; +- list_for_each_entry(msg_wq, &sbi->async_readdir_msg_list, async_msg) +- hmdfs_response_wakeup(msg_wq, -EINTR, 0, NULL); +- spin_unlock(&sbi->async_readdir_msg_lock); +- +- /* wait for all async readdir to finish */ +- if (!list_empty(&sbi->async_readdir_work_list)) +- wait_event_interruptible_timeout(sbi->async_readdir_wq, +- (list_empty(&sbi->async_readdir_work_list)), HZ); +- +- WARN_ON(!(list_empty(&sbi->async_readdir_work_list))); +-} +- +-static void hmdfs_kill_super(struct super_block *sb) +-{ +- struct hmdfs_sb_info *sbi = hmdfs_sb(sb); +- +- /* +- * async readdir is holding ref for dentry, not for vfsmount. Thus +- * shrink_dcache_for_umount() will warn about dentry still in use +- * if async readdir is not done. +- */ +- if (sbi) +- hmdfs_cancel_async_readdir(sbi); +- kill_anon_super(sb); +-} +- +-static struct file_system_type hmdfs_fs_type = { +- .owner = THIS_MODULE, +- .name = "hmdfs", +- .mount = hmdfs_mount, +- .kill_sb = hmdfs_kill_super, +-}; +- +-static int __init hmdfs_init(void) +-{ +- int err = 0; +- +- err = hmdfs_init_caches(); +- if (err) +- goto out_err; +- +- hmdfs_node_evt_cb_init(); +- +- hmdfs_stash_add_node_evt_cb(); +- hmdfs_client_add_node_evt_cb(); +- hmdfs_server_add_node_evt_cb(); +- +- err = register_filesystem(&hmdfs_fs_type); +- if (err) { +- hmdfs_err("hmdfs register failed!"); +- goto out_err; +- } +- +- err = hmdfs_init_configfs(); +- if (err) +- goto out_err; +- +- err = hmdfs_sysfs_init(); +- if (err) +- goto out_err; +- +- hmdfs_message_verify_init(); +- return 0; +-out_err: +- hmdfs_sysfs_exit(); +- hmdfs_exit_configfs(); +- unregister_filesystem(&hmdfs_fs_type); +- hmdfs_destroy_caches(); +- hmdfs_err("hmdfs init failed!"); +- return err; +-} +- +-static void __exit hmdfs_exit(void) +-{ +- hmdfs_sysfs_exit(); +- hmdfs_exit_configfs(); +- unregister_filesystem(&hmdfs_fs_type); +- ida_destroy(&hmdfs_sb_seq); +- hmdfs_destroy_caches(); +- hmdfs_info("hmdfs exited!"); +-} +- +-module_init(hmdfs_init); +-module_exit(hmdfs_exit); +- +-EXPORT_TRACEPOINT_SYMBOL_GPL(hmdfs_recv_mesg_callback); +- +-MODULE_LICENSE("GPL v2"); +-MODULE_AUTHOR("LongPing.WEI, Jingjing.Mao"); +-MODULE_DESCRIPTION("Harmony distributed file system"); +diff --git a/fs/hmdfs/server_writeback.c b/fs/hmdfs/server_writeback.c +deleted file mode 100644 +index b3a18ff67..000000000 +--- a/fs/hmdfs/server_writeback.c ++++ /dev/null +@@ -1,135 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/server_writeback.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +- +-#include "hmdfs.h" +-#include "hmdfs_trace.h" +-#include "server_writeback.h" +- +-#define HMDFS_SRV_WB_DEF_DIRTY_THRESH 50UL +- +-static void hmdfs_srv_wb_handler(struct work_struct *work) +-{ +- struct hmdfs_server_writeback *hswb = container_of(work, +- struct hmdfs_server_writeback, +- dirty_sb_writeback_work); +- struct super_block *lower_sb = hswb->sbi->lower_sb; +- int dirty_pages; +- +- if (writeback_in_progress(&lower_sb->s_bdi->wb) || +- !down_read_trylock(&lower_sb->s_umount)) +- return; +- +- dirty_pages = hswb->dirty_nr_pages_to_wb; +- writeback_inodes_sb_nr(lower_sb, dirty_pages, WB_REASON_FS_FREE_SPACE); +- up_read(&lower_sb->s_umount); +- +- trace_hmdfs_start_srv_wb(hswb->sbi, dirty_pages, hswb->dirty_thresh_pg); +-} +- +-void hmdfs_server_check_writeback(struct hmdfs_server_writeback *hswb) +-{ +- unsigned long old_time, now; +- int dirty_nr_pages; +- +- old_time = hswb->last_reset_time; +- now = jiffies; +- dirty_nr_pages = atomic_inc_return(&hswb->dirty_nr_pages); +- if (time_after(now, old_time + HZ) && +- cmpxchg(&hswb->last_reset_time, old_time, now) == old_time) { +- /* +- * We calculate the speed of page dirting to handle +- * following situations: +- * +- * 1. Dense writing, average page writing speed +- * exceeds @hswb->dirty_thresh_pg: +- * 0-1s 100MB +- * 2. Sporadic writing, average page writing speed +- * belows @hswb->dirty_thresh_pg: +- * 0-0.1s 40MB +- * 3.1-3.2 20MB +- */ +- unsigned int writepage_speed; +- +- writepage_speed = dirty_nr_pages / ((now - old_time) / HZ); +- if (writepage_speed >= hswb->dirty_thresh_pg) { +- /* +- * Writeback @hswb->dirty_nr_pages_to_wb pages in +- * server-writeback work. If work is delayed after +- * 1s, @hswb->dirty_nr_pages_to_wb could be assigned +- * another new value (eg. 60MB), the old value (eg. +- * 80MB) will be overwritten, which means 80MB data +- * will be omitted to writeback. We can tolerate this +- * situation, The writeback pressure is too high if +- * the previous work is not completed, so it's +- * meaningless to continue subsequent work. +- */ +- hswb->dirty_nr_pages_to_wb = dirty_nr_pages; +- /* +- * There are 3 conditions to trigger queuing work: +- * +- * A. Server successfully handles writepage for client +- * B. Every 1 second interval +- * C. Speed for page dirting exceeds @dirty_thresh_pg +- */ +- queue_work(hswb->dirty_writeback_wq, +- &hswb->dirty_sb_writeback_work); +- } +- +- /* +- * There is no need to account the number of dirty pages +- * from remote client very accurately. Allow the missing +- * count to increase by other process in the gap between +- * increment and zero out. +- */ +- atomic_set(&hswb->dirty_nr_pages, 0); +- } +-} +- +-void hmdfs_destroy_server_writeback(struct hmdfs_sb_info *sbi) +-{ +- if (!sbi->h_swb) +- return; +- +- flush_work(&sbi->h_swb->dirty_sb_writeback_work); +- destroy_workqueue(sbi->h_swb->dirty_writeback_wq); +- kfree(sbi->h_swb); +- sbi->h_swb = NULL; +-} +- +-int hmdfs_init_server_writeback(struct hmdfs_sb_info *sbi) +-{ +- struct hmdfs_server_writeback *hswb; +- char name[HMDFS_WQ_NAME_LEN]; +- +- hswb = kzalloc(sizeof(struct hmdfs_server_writeback), GFP_KERNEL); +- if (!hswb) +- return -ENOMEM; +- +- hswb->sbi = sbi; +- hswb->dirty_writeback_control = true; +- hswb->dirty_thresh_pg = HMDFS_SRV_WB_DEF_DIRTY_THRESH << +- HMDFS_MB_TO_PAGE_SHIFT; +- atomic_set(&hswb->dirty_nr_pages, 0); +- hswb->last_reset_time = jiffies; +- +- snprintf(name, sizeof(name), "dfs_srv_wb%u", sbi->seq); +- hswb->dirty_writeback_wq = create_singlethread_workqueue(name); +- if (!hswb->dirty_writeback_wq) { +- hmdfs_err("Failed to create server writeback workqueue!"); +- kfree(hswb); +- return -ENOMEM; +- } +- INIT_WORK(&hswb->dirty_sb_writeback_work, hmdfs_srv_wb_handler); +- sbi->h_swb = hswb; +- +- return 0; +-} +- +diff --git a/fs/hmdfs/server_writeback.h b/fs/hmdfs/server_writeback.h +deleted file mode 100644 +index eb645e639..000000000 +--- a/fs/hmdfs/server_writeback.h ++++ /dev/null +@@ -1,40 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/server_writeback.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef SERVER_WRITEBACK_H +-#define SERVER_WRITEBACK_H +- +-#include "hmdfs.h" +- +-#define HMDFS_MB_TO_PAGE_SHIFT (20 - HMDFS_PAGE_OFFSET) +- +-struct hmdfs_server_writeback { +- struct hmdfs_sb_info *sbi; +- /* Enable hmdfs server dirty writeback control */ +- bool dirty_writeback_control; +- +- /* Current # of dirty pages from remote client in recent 1s */ +- atomic_t dirty_nr_pages; +- /* Current # of dirty pages to writeback */ +- int dirty_nr_pages_to_wb; +- /* Dirty thresh(Dirty data pages in 1s) to trigger wb */ +- unsigned int dirty_thresh_pg; +- /* Last reset timestamp(in jiffies) for @dirty_nr_pages */ +- unsigned long last_reset_time; +- +- struct workqueue_struct *dirty_writeback_wq; +- /* Per-fs pages from client writeback work */ +- struct work_struct dirty_sb_writeback_work; +-}; +- +-void hmdfs_server_check_writeback(struct hmdfs_server_writeback *hswb); +- +-void hmdfs_destroy_server_writeback(struct hmdfs_sb_info *sbi); +- +-int hmdfs_init_server_writeback(struct hmdfs_sb_info *sbi); +- +-#endif +diff --git a/fs/hmdfs/stash.c b/fs/hmdfs/stash.c +deleted file mode 100644 +index 21c5fac34..000000000 +--- a/fs/hmdfs/stash.c ++++ /dev/null +@@ -1,2226 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/stash.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "stash.h" +-#include "comm/node_cb.h" +-#include "comm/protocol.h" +-#include "comm/connection.h" +-#include "file_remote.h" +-#include "hmdfs_dentryfile.h" +-#include "authority/authentication.h" +- +-/* Head magic used to identify a stash file */ +-#define HMDFS_STASH_FILE_HEAD_MAGIC 0xF7AB06C3 +-/* Head and path in stash file are aligned with HMDFS_STASH_BLK_SIZE */ +-#define HMDFS_STASH_BLK_SIZE 4096 +-#define HMDFS_STASH_BLK_SHIFT 12 +-#define HMDFS_STASH_PAGE_TO_SECTOR_SHIFT 3 +-#define HMDFS_STASH_DIR_NAME "stash" +-#define HMDFS_STASH_FMT_DIR_NAME "v1" +-#define HMDFS_STASH_WORK_DIR_NAME \ +- (HMDFS_STASH_DIR_NAME "/" HMDFS_STASH_FMT_DIR_NAME) +- +-#define HMDFS_STASH_FILE_NAME_LEN 20 +- +-#define HMDFS_STASH_FLUSH_CNT 2 +- +-#define HMDFS_STASH_PATH_LEN (HMDFS_CID_SIZE + HMDFS_STASH_FILE_NAME_LEN + 1) +- +-struct hmdfs_cache_file_head { +- __le32 magic; +- __le32 crc_offset; +- __le64 ino; +- __le64 size; +- __le64 blocks; +- __le64 last_write_pos; +- __le64 ctime; +- __le32 ctime_nsec; +- __le32 change_detect_cap; +- __le64 ichange_count; +- __le32 path_offs; +- __le32 path_len; +- __le32 path_cnt; +- __le32 data_offs; +- /* Attention: expand new fields in here to compatible with old ver */ +- __le32 crc32; +-} __packed; +- +-struct hmdfs_stash_work { +- struct hmdfs_peer *conn; +- struct list_head *list; +- struct work_struct work; +- struct completion done; +-}; +- +-struct hmdfs_inode_tbl { +- unsigned int cnt; +- unsigned int max; +- uint64_t inodes[0]; +-}; +- +-struct hmdfs_stash_dir_context { +- struct dir_context dctx; +- char name[NAME_MAX + 1]; +- struct hmdfs_inode_tbl *tbl; +-}; +- +-struct hmdfs_restore_stats { +- unsigned int succeed; +- unsigned int fail; +- unsigned int keep; +- unsigned long long ok_pages; +- unsigned long long fail_pages; +-}; +- +-struct hmdfs_stash_stats { +- unsigned int succeed; +- unsigned int donothing; +- unsigned int fail; +- unsigned long long ok_pages; +- unsigned long long fail_pages; +-}; +- +-struct hmdfs_file_restore_ctx { +- struct hmdfs_peer *conn; +- struct path src_dir_path; +- struct path dst_root_path; +- char *dst; +- char *page; +- struct file *src_filp; +- uint64_t inum; +- uint64_t pages; +- unsigned int seq; +- unsigned int data_offs; +- /* output */ +- bool keep; +-}; +- +-struct hmdfs_copy_args { +- struct file *src; +- struct file *dst; +- void *buf; +- size_t buf_len; +- unsigned int seq; +- unsigned int data_offs; +- uint64_t inum; +-}; +- +-struct hmdfs_copy_ctx { +- struct hmdfs_copy_args args; +- loff_t src_pos; +- loff_t dst_pos; +- /* output */ +- size_t copied; +- bool eof; +-}; +- +-struct hmdfs_rebuild_stats { +- unsigned int succeed; +- unsigned int total; +- unsigned int fail; +- unsigned int invalid; +-}; +- +-struct hmdfs_check_work { +- struct hmdfs_peer *conn; +- struct work_struct work; +- struct completion done; +-}; +- +-typedef int (*stash_operation_func)(struct hmdfs_peer *, +- unsigned int, +- struct path *, +- const struct hmdfs_inode_tbl *, +- void *); +- +-static struct dentry *hmdfs_do_vfs_mkdir(struct dentry *parent, +- const char *name, int namelen, +- umode_t mode) +-{ +- struct inode *dir = d_inode(parent); +- struct dentry *child = NULL; +- int err; +- +- inode_lock_nested(dir, I_MUTEX_PARENT); +- +- child = lookup_one_len(name, parent, namelen); +- if (IS_ERR(child)) +- goto out; +- +- if (d_is_positive(child)) { +- if (d_can_lookup(child)) +- goto out; +- +- dput(child); +- child = ERR_PTR(-EINVAL); +- goto out; +- } +- +- err = vfs_mkdir(&nop_mnt_idmap, dir, child, mode); +- if (err) { +- dput(child); +- child = ERR_PTR(err); +- goto out; +- } +- +-out: +- inode_unlock(dir); +- return child; +-} +- +-struct dentry *hmdfs_stash_new_work_dir(struct dentry *parent) +-{ +- struct dentry *base = NULL; +- struct dentry *work = NULL; +- +- base = hmdfs_do_vfs_mkdir(parent, HMDFS_STASH_DIR_NAME, +- strlen(HMDFS_STASH_DIR_NAME), 0700); +- if (IS_ERR(base)) +- return base; +- +- work = hmdfs_do_vfs_mkdir(base, HMDFS_STASH_FMT_DIR_NAME, +- strlen(HMDFS_STASH_FMT_DIR_NAME), 0700); +- dput(base); +- +- return work; +-} +- +-static struct file *hmdfs_new_stash_file(struct path *d_path, const char *cid) +-{ +- struct dentry *parent = NULL; +- struct file *filp = NULL; +- struct path stash; +- int err; +- +- parent = hmdfs_do_vfs_mkdir(d_path->dentry, cid, strlen(cid), 0700); +- if (IS_ERR(parent)) { +- err = PTR_ERR(parent); +- hmdfs_err("mkdir error %d", err); +- goto mkdir_err; +- } +- +- stash.mnt = d_path->mnt; +- stash.dentry = parent; +- filp = kernel_tmpfile_open(&nop_mnt_idmap, &stash, S_IFREG | 0600, +- O_LARGEFILE | O_WRONLY, current_cred()); +- if (IS_ERR(filp)) { +- err = PTR_ERR(filp); +- hmdfs_err("open stash file error %d", err); +- goto open_err; +- } +- +- dput(parent); +- +- return filp; +- +-open_err: +- dput(parent); +-mkdir_err: +- return ERR_PTR(err); +-} +- +-static inline bool hmdfs_is_dir(struct dentry *child) +-{ +- return d_is_positive(child) && d_can_lookup(child); +-} +- +-static inline bool hmdfs_is_reg(struct dentry *child) +-{ +- return d_is_positive(child) && d_is_reg(child); +-} +- +-static void hmdfs_set_stash_file_head(const struct hmdfs_cache_info *cache, +- uint64_t ino, +- struct hmdfs_cache_file_head *head) +-{ +- long long blocks; +- unsigned int crc_offset; +- +- memset(head, 0, sizeof(*head)); +- head->magic = cpu_to_le32(HMDFS_STASH_FILE_HEAD_MAGIC); +- head->ino = cpu_to_le64(ino); +- head->size = cpu_to_le64(i_size_read(file_inode(cache->cache_file))); +- blocks = atomic64_read(&cache->written_pgs) << +- HMDFS_STASH_PAGE_TO_SECTOR_SHIFT; +- head->blocks = cpu_to_le64(blocks); +- head->path_offs = cpu_to_le32(cache->path_offs); +- head->path_len = cpu_to_le32(cache->path_len); +- head->path_cnt = cpu_to_le32(cache->path_cnt); +- head->data_offs = cpu_to_le32(cache->data_offs); +- crc_offset = offsetof(struct hmdfs_cache_file_head, crc32); +- head->crc_offset = cpu_to_le32(crc_offset); +- head->crc32 = cpu_to_le32(crc32(0, head, crc_offset)); +-} +- +-static int hmdfs_flush_stash_file_metadata(struct hmdfs_inode_info *info) +-{ +- struct hmdfs_cache_info *cache = NULL; +- struct hmdfs_peer *conn = info->conn; +- struct hmdfs_cache_file_head cache_head; +- size_t written; +- loff_t pos; +- unsigned int head_size; +- +- /* No metadata if no cache file info */ +- cache = info->cache; +- if (!cache) +- return -EINVAL; +- +- if (strlen(cache->path) == 0) { +- long long to_write_pgs = atomic64_read(&cache->to_write_pgs); +- +- /* Nothing to stash. No need to flush meta data. */ +- if (to_write_pgs == 0) +- return 0; +- +- hmdfs_err("peer 0x%x:0x%llx inode 0x%llx lost %lld pages due to no path", +- conn->owner, conn->device_id, +- info->remote_ino, to_write_pgs); +- return -EINVAL; +- } +- +- hmdfs_set_stash_file_head(cache, info->remote_ino, &cache_head); +- +- /* Write head */ +- pos = 0; +- head_size = sizeof(cache_head); +- written = kernel_write(cache->cache_file, &cache_head, head_size, &pos); +- if (written != head_size) { +- hmdfs_err("stash peer 0x%x:0x%llx ino 0x%llx write head len %u err %zd", +- conn->owner, conn->device_id, info->remote_ino, +- head_size, written); +- return -EIO; +- } +- /* Write path */ +- pos = (loff_t)cache->path_offs << HMDFS_STASH_BLK_SHIFT; +- written = kernel_write(cache->cache_file, cache->path, cache->path_len, +- &pos); +- if (written != cache->path_len) { +- hmdfs_err("stash peer 0x%x:0x%llx ino 0x%llx write path len %u err %zd", +- conn->owner, conn->device_id, info->remote_ino, +- cache->path_len, written); +- return -EIO; +- } +- +- return 0; +-} +- +-/* Mainly from inode_wait_for_writeback() */ +-static void hmdfs_wait_remote_writeback_once(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info) +-{ +- struct inode *inode = &info->vfs_inode; +- DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); +- wait_queue_head_t *wq_head = NULL; +- bool in_sync = false; +- +- spin_lock(&inode->i_lock); +- in_sync = inode->i_state & I_SYNC; +- spin_unlock(&inode->i_lock); +- +- if (!in_sync) +- return; +- +- hmdfs_info("peer 0x%x:0x%llx ino 0x%llx wait for wb once", +- conn->owner, conn->device_id, info->remote_ino); +- +- wq_head = bit_waitqueue(&inode->i_state, __I_SYNC); +- __wait_on_bit(wq_head, &wq, bit_wait, TASK_UNINTERRUPTIBLE); +-} +- +-static void hmdfs_reset_remote_write_err(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info) +-{ +- struct address_space *mapping = info->vfs_inode.i_mapping; +- int flags_err; +- errseq_t old; +- int wb_err; +- +- flags_err = filemap_check_errors(mapping); +- +- old = errseq_sample(&mapping->wb_err); +- wb_err = errseq_check_and_advance(&mapping->wb_err, &old); +- if (flags_err || wb_err) +- hmdfs_warning("peer 0x%x:0x%llx inode 0x%llx wb error %d %d before stash", +- conn->owner, conn->device_id, info->remote_ino, +- flags_err, wb_err); +-} +- +-static bool hmdfs_is_mapping_clean(struct address_space *mapping) +-{ +- bool clean = false; +- +- /* b93b016313b3b ("page cache: use xa_lock") introduces i_pages */ +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) +- xa_lock_irq(&mapping->i_pages); +-#else +- spin_lock_irq(&mapping->tree_lock); +-#endif +- clean = !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && +- !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK); +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) +- xa_unlock_irq(&mapping->i_pages); +-#else +- spin_unlock_irq(&mapping->tree_lock); +-#endif +- return clean; +-} +- +-static int hmdfs_flush_stash_file_data(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info) +-{ +- struct inode *inode = &info->vfs_inode; +- struct address_space *mapping = inode->i_mapping; +- bool all_clean = true; +- int err = 0; +- int i; +- +- /* Wait for the completion of write syscall */ +- inode_lock(inode); +- inode_unlock(inode); +- +- all_clean = hmdfs_is_mapping_clean(mapping); +- if (all_clean) { +- hmdfs_reset_remote_write_err(conn, info); +- return 0; +- } +- +- /* +- * No-sync_all writeback during offline may have not seen +- * the setting of stash_status as HMDFS_REMOTE_INODE_STASHING +- * and will call mapping_set_error() after we just reset +- * the previous error. So waiting for these writeback once, +- * and the following writeback will do local write. +- */ +- hmdfs_wait_remote_writeback_once(conn, info); +- +- /* Need to clear previous error ? */ +- hmdfs_reset_remote_write_err(conn, info); +- +- /* +- * 1. dirty page: do write back +- * 2. writeback page: wait for its completion +- * 3. writeback -> redirty page: do filemap_write_and_wait() +- * twice, so 2th writeback should not allow +- * writeback -> redirty transition +- */ +- for (i = 0; i < HMDFS_STASH_FLUSH_CNT; i++) { +- err = filemap_write_and_wait(mapping); +- if (err) { +- hmdfs_err("peer 0x%x:0x%llx inode 0x%llx #%d stash flush error %d", +- conn->owner, conn->device_id, +- info->remote_ino, i, err); +- return err; +- } +- } +- +- if (!hmdfs_is_mapping_clean(mapping)) +- hmdfs_err("peer 0x%x:0x%llx inode 0x%llx is still dirty dt %d wb %d", +- conn->owner, conn->device_id, info->remote_ino, +- !!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY), +- !!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)); +- +- return 0; +-} +- +-static int hmdfs_flush_stash_file(struct hmdfs_inode_info *info) +-{ +- int err; +- +- err = hmdfs_flush_stash_file_data(info->conn, info); +- if (!err) +- err = hmdfs_flush_stash_file_metadata(info); +- +- return err; +-} +- +-static int hmdfs_enable_stash_file(struct hmdfs_inode_info *info, +- struct dentry *stash) +-{ +- char name[HMDFS_STASH_FILE_NAME_LEN]; +- struct dentry *parent = NULL; +- struct inode *dir = NULL; +- struct dentry *child = NULL; +- int err = 0; +- bool retried = false; +- +- snprintf(name, sizeof(name), "0x%llx", info->remote_ino); +- +- parent = lock_parent(stash); +- dir = d_inode(parent); +- +-lookup_again: +- child = lookup_one_len(name, parent, strlen(name)); +- if (IS_ERR(child)) { +- err = PTR_ERR(child); +- child = NULL; +- hmdfs_err("lookup %s err %d", name, err); +- goto out; +- } +- +- if (d_is_positive(child)) { +- hmdfs_warning("%s exists (mode 0%o)", +- name, d_inode(child)->i_mode); +- +- err = vfs_unlink(&nop_mnt_idmap, dir, child, NULL); +- if (err) { +- hmdfs_err("unlink %s err %d", name, err); +- goto out; +- } +- if (retried) { +- err = -EEXIST; +- goto out; +- } +- +- retried = true; +- dput(child); +- goto lookup_again; +- } +- +- err = vfs_link(stash, &nop_mnt_idmap, dir, child, NULL); +- if (err) { +- hmdfs_err("link stash file to %s err %d", name, err); +- goto out; +- } +- +-out: +- unlock_dir(parent); +- if (child) +- dput(child); +- +- return err; +-} +- +-/* Return 1 if stash is done, 0 if nothing is stashed */ +-static int hmdfs_close_stash_file(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info) +-{ +- struct file *cache_file = info->cache->cache_file; +- struct dentry *c_dentry = file_dentry(cache_file); +- struct inode *c_inode = d_inode(c_dentry); +- long long to_write_pgs = atomic64_read(&info->cache->to_write_pgs); +- int err; +- +- hmdfs_info("peer 0x%x:0x%llx inode 0x%llx stashed bytes %lld pages %lld", +- conn->owner, conn->device_id, info->remote_ino, +- i_size_read(c_inode), to_write_pgs); +- +- if (to_write_pgs == 0) +- return 0; +- +- err = vfs_fsync(cache_file, 0); +- if (!err) +- err = hmdfs_enable_stash_file(info, c_dentry); +- else +- hmdfs_err("fsync stash file err %d", err); +- +- return err < 0 ? err : 1; +-} +- +-static void hmdfs_del_file_cache(struct hmdfs_cache_info *cache) +-{ +- if (!cache) +- return; +- +- fput(cache->cache_file); +- kfree(cache->path_buf); +- kfree(cache); +-} +- +-static struct hmdfs_cache_info * +-hmdfs_new_file_cache(struct hmdfs_peer *conn, struct hmdfs_inode_info *info) +-{ +- struct hmdfs_cache_info *cache = NULL; +- struct dentry *stash_dentry = NULL; +- int err; +- +- cache = kzalloc(sizeof(*cache), GFP_KERNEL); +- if (!cache) +- return ERR_PTR(-ENOMEM); +- +- atomic64_set(&cache->to_write_pgs, 0); +- atomic64_set(&cache->written_pgs, 0); +- cache->path_buf = kmalloc(PATH_MAX, GFP_KERNEL); +- if (!cache->path_buf) { +- err = -ENOMEM; +- goto free_cache; +- } +- +- /* Need to handle "hardlink" ? */ +- stash_dentry = d_find_any_alias(&info->vfs_inode); +- if (stash_dentry) { +- /* Needs full path in hmdfs, will be a device-view path */ +- cache->path = dentry_path_raw(stash_dentry, cache->path_buf, +- PATH_MAX); +- dput(stash_dentry); +- if (IS_ERR(cache->path)) { +- err = PTR_ERR(cache->path); +- hmdfs_err("peer 0x%x:0x%llx inode 0x%llx gen path err %d", +- conn->owner, conn->device_id, +- info->remote_ino, err); +- goto free_path; +- } +- } else { +- /* Write-opened file was closed before finding dentry */ +- hmdfs_info("peer 0x%x:0x%llx inode 0x%llx no dentry found", +- conn->owner, conn->device_id, info->remote_ino); +- cache->path_buf[0] = '\0'; +- cache->path = cache->path_buf; +- } +- +- cache->path_cnt = 1; +- cache->path_len = strlen(cache->path) + 1; +- cache->path_offs = DIV_ROUND_UP(sizeof(struct hmdfs_cache_file_head), +- HMDFS_STASH_BLK_SIZE); +- cache->data_offs = cache->path_offs + DIV_ROUND_UP(cache->path_len, +- HMDFS_STASH_BLK_SIZE); +- cache->cache_file = hmdfs_new_stash_file(&conn->sbi->stash_work_dir, +- conn->cid); +- if (IS_ERR(cache->cache_file)) { +- err = PTR_ERR(cache->cache_file); +- goto free_path; +- } +- +- return cache; +- +-free_path: +- kfree(cache->path_buf); +-free_cache: +- kfree(cache); +- return ERR_PTR(err); +-} +- +-static void hmdfs_init_stash_file_cache(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info) +-{ +- struct hmdfs_cache_info *cache = NULL; +- +- cache = hmdfs_new_file_cache(conn, info); +- if (IS_ERR(cache)) +- /* +- * Continue even creating stash info failed. +- * We need to ensure there is no dirty pages +- * after stash completes +- */ +- cache = NULL; +- +- /* Make write() returns */ +- spin_lock(&info->stash_lock); +- info->cache = cache; +- info->stash_status = HMDFS_REMOTE_INODE_STASHING; +- spin_unlock(&info->stash_lock); +-} +- +-static void hmdfs_update_stash_stats(struct hmdfs_stash_stats *stats, +- const struct hmdfs_cache_info *cache, +- int err) +-{ +- unsigned long long ok_pages, fail_pages; +- +- if (cache) { +- ok_pages = err > 0 ? atomic64_read(&cache->written_pgs) : 0; +- fail_pages = atomic64_read(&cache->to_write_pgs) - ok_pages; +- stats->ok_pages += ok_pages; +- stats->fail_pages += fail_pages; +- } +- +- if (err > 0) +- stats->succeed++; +- else if (!err) +- stats->donothing++; +- else +- stats->fail++; +-} +- +-/* Return 1 if stash is done, 0 if nothing is stashed */ +-static int hmdfs_stash_remote_inode(struct hmdfs_inode_info *info, +- struct hmdfs_stash_stats *stats) +-{ +- struct hmdfs_cache_info *cache = info->cache; +- struct hmdfs_peer *conn = info->conn; +- unsigned int status; +- int err = 0; +- +- hmdfs_info("stash peer 0x%x:0x%llx ino 0x%llx", +- conn->owner, conn->device_id, info->remote_ino); +- +- err = hmdfs_flush_stash_file(info); +- if (!err) +- err = hmdfs_close_stash_file(conn, info); +- +- if (err <= 0) +- set_bit(HMDFS_FID_NEED_OPEN, &info->fid_flags); +- status = err > 0 ? HMDFS_REMOTE_INODE_RESTORING : +- HMDFS_REMOTE_INODE_NONE; +- spin_lock(&info->stash_lock); +- info->cache = NULL; +- /* +- * Use smp_store_release() to ensure order between HMDFS_FID_NEED_OPEN +- * and HMDFS_REMOTE_INODE_NONE. +- */ +- smp_store_release(&info->stash_status, status); +- spin_unlock(&info->stash_lock); +- +- hmdfs_update_stash_stats(stats, cache, err); +- hmdfs_del_file_cache(cache); +- +- return err; +-} +- +-static void hmdfs_init_cache_for_stash_files(struct hmdfs_peer *conn, +- struct list_head *list) +-{ +- const struct cred *old_cred = NULL; +- struct hmdfs_inode_info *info = NULL; +- +- /* For file creation under stash_work_dir */ +- old_cred = hmdfs_override_creds(conn->sbi->cred); +- list_for_each_entry(info, list, stash_node) +- hmdfs_init_stash_file_cache(conn, info); +- hmdfs_revert_creds(old_cred); +-} +- +-static void hmdfs_init_stash_cache_work_fn(struct work_struct *base) +-{ +- struct hmdfs_stash_work *work = +- container_of(base, struct hmdfs_stash_work, work); +- +- hmdfs_init_cache_for_stash_files(work->conn, work->list); +- complete(&work->done); +-} +- +-static void hmdfs_init_cache_for_stash_files_by_work(struct hmdfs_peer *conn, +- struct list_head *list) +-{ +- struct hmdfs_stash_work work = { +- .conn = conn, +- .list = list, +- .done = COMPLETION_INITIALIZER_ONSTACK(work.done), +- }; +- +- INIT_WORK_ONSTACK(&work.work, hmdfs_init_stash_cache_work_fn); +- schedule_work(&work.work); +- wait_for_completion(&work.done); +-} +- +-static void hmdfs_stash_fetch_ready_files(struct hmdfs_peer *conn, +- bool check, struct list_head *list) +-{ +- struct hmdfs_inode_info *info = NULL; +- +- spin_lock(&conn->wr_opened_inode_lock); +- list_for_each_entry(info, &conn->wr_opened_inode_list, wr_opened_node) { +- int status; +- +- /* Paired with *_release() in hmdfs_reset_stashed_inode() */ +- status = smp_load_acquire(&info->stash_status); +- if (status == HMDFS_REMOTE_INODE_NONE) { +- list_add_tail(&info->stash_node, list); +- /* +- * Prevent close() removing the inode from +- * writeable-opened inode list +- */ +- hmdfs_remote_add_wr_opened_inode_nolock(conn, info); +- /* Prevent the inode from eviction */ +- ihold(&info->vfs_inode); +- } else if (check && status == HMDFS_REMOTE_INODE_STASHING) { +- hmdfs_warning("peer 0x%x:0x%llx inode 0x%llx unexpected stash status %d", +- conn->owner, conn->device_id, +- info->remote_ino, status); +- } +- } +- spin_unlock(&conn->wr_opened_inode_lock); +-} +- +-static void hmdfs_stash_offline_prepare(struct hmdfs_peer *conn, int evt, +- unsigned int seq) +-{ +- LIST_HEAD(preparing); +- +- if (!hmdfs_is_stash_enabled(conn->sbi)) +- return; +- +- mutex_lock(&conn->offline_cb_lock); +- +- hmdfs_stash_fetch_ready_files(conn, true, &preparing); +- +- if (list_empty(&preparing)) +- goto out; +- +- hmdfs_init_cache_for_stash_files_by_work(conn, &preparing); +-out: +- mutex_unlock(&conn->offline_cb_lock); +-} +- +-static void hmdfs_track_inode_locked(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info) +-{ +- spin_lock(&conn->stashed_inode_lock); +- list_add_tail(&info->stash_node, &conn->stashed_inode_list); +- conn->stashed_inode_nr++; +- spin_unlock(&conn->stashed_inode_lock); +-} +- +-static void +-hmdfs_update_peer_stash_stats(struct hmdfs_stash_statistics *stash_stats, +- const struct hmdfs_stash_stats *stats) +-{ +- stash_stats->cur_ok = stats->succeed; +- stash_stats->cur_nothing = stats->donothing; +- stash_stats->cur_fail = stats->fail; +- stash_stats->total_ok += stats->succeed; +- stash_stats->total_nothing += stats->donothing; +- stash_stats->total_fail += stats->fail; +- stash_stats->ok_pages += stats->ok_pages; +- stash_stats->fail_pages += stats->fail_pages; +-} +- +-static void hmdfs_stash_remote_inodes(struct hmdfs_peer *conn, +- struct list_head *list) +-{ +- const struct cred *old_cred = NULL; +- struct hmdfs_inode_info *info = NULL; +- struct hmdfs_inode_info *next = NULL; +- struct hmdfs_stash_stats stats; +- +- /* For file creation, write and relink under stash_work_dir */ +- old_cred = hmdfs_override_creds(conn->sbi->cred); +- +- memset(&stats, 0, sizeof(stats)); +- list_for_each_entry_safe(info, next, list, stash_node) { +- int err; +- +- list_del_init(&info->stash_node); +- +- err = hmdfs_stash_remote_inode(info, &stats); +- if (err > 0) +- hmdfs_track_inode_locked(conn, info); +- +- hmdfs_remote_del_wr_opened_inode(conn, info); +- if (err <= 0) +- iput(&info->vfs_inode); +- } +- hmdfs_revert_creds(old_cred); +- +- hmdfs_update_peer_stash_stats(&conn->stats.stash, &stats); +- hmdfs_info("peer 0x%x:0x%llx total stashed %u cur ok %u none %u fail %u", +- conn->owner, conn->device_id, conn->stashed_inode_nr, +- stats.succeed, stats.donothing, stats.fail); +-} +- +-static void hmdfs_stash_offline_do_stash(struct hmdfs_peer *conn, int evt, +- unsigned int seq) +-{ +- struct hmdfs_inode_info *info = NULL; +- LIST_HEAD(preparing); +- LIST_HEAD(stashing); +- +- if (!hmdfs_is_stash_enabled(conn->sbi)) +- return; +- +- /* release seq_lock to prevent blocking no-offline sync cb */ +- mutex_unlock(&conn->seq_lock); +- /* acquire offline_cb_lock to serialized with offline sync cb */ +- mutex_lock(&conn->offline_cb_lock); +- +- hmdfs_stash_fetch_ready_files(conn, false, &preparing); +- if (!list_empty(&preparing)) +- hmdfs_init_cache_for_stash_files(conn, &preparing); +- +- spin_lock(&conn->wr_opened_inode_lock); +- list_for_each_entry(info, &conn->wr_opened_inode_list, wr_opened_node) { +- int status = READ_ONCE(info->stash_status); +- +- if (status == HMDFS_REMOTE_INODE_STASHING) +- list_add_tail(&info->stash_node, &stashing); +- } +- spin_unlock(&conn->wr_opened_inode_lock); +- +- if (list_empty(&stashing)) +- goto unlock; +- +- hmdfs_stash_remote_inodes(conn, &stashing); +- +-unlock: +- mutex_unlock(&conn->offline_cb_lock); +- mutex_lock(&conn->seq_lock); +-} +- +-static struct hmdfs_inode_info * +-hmdfs_lookup_stash_inode(struct hmdfs_peer *conn, uint64_t inum) +-{ +- struct hmdfs_inode_info *info = NULL; +- +- list_for_each_entry(info, &conn->stashed_inode_list, stash_node) { +- if (info->remote_ino == inum) +- return info; +- } +- +- return NULL; +-} +- +-static void hmdfs_untrack_stashed_inode(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info) +-{ +- list_del_init(&info->stash_node); +- iput(&info->vfs_inode); +- +- conn->stashed_inode_nr--; +-} +- +-static void hmdfs_reset_stashed_inode(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info) +-{ +- struct inode *ino = &info->vfs_inode; +- +- /* +- * For updating stash_status after iput() +- * in hmdfs_untrack_stashed_inode() +- */ +- ihold(ino); +- hmdfs_untrack_stashed_inode(conn, info); +- /* +- * Ensure the order of stash_node and stash_status: +- * only update stash_status to NONE after removal of +- * stash_node is completed. +- */ +- smp_store_release(&info->stash_status, +- HMDFS_REMOTE_INODE_NONE); +- iput(ino); +-} +- +-static void hmdfs_drop_stashed_inodes(struct hmdfs_peer *conn) +-{ +- struct hmdfs_inode_info *info = NULL; +- struct hmdfs_inode_info *next = NULL; +- +- if (list_empty(&conn->stashed_inode_list)) +- return; +- +- hmdfs_warning("peer 0x%x:0x%llx drop unrestorable file %u", +- conn->owner, conn->device_id, conn->stashed_inode_nr); +- +- list_for_each_entry_safe(info, next, +- &conn->stashed_inode_list, stash_node) { +- hmdfs_warning("peer 0x%x:0x%llx inode 0x%llx unrestorable status %u", +- conn->owner, conn->device_id, info->remote_ino, +- READ_ONCE(info->stash_status)); +- +- hmdfs_reset_stashed_inode(conn, info); +- } +-} +- +-static struct file *hmdfs_open_stash_dir(struct path *d_path, const char *cid) +-{ +- int err = 0; +- struct dentry *parent = d_path->dentry; +- struct inode *dir = d_inode(parent); +- struct dentry *child = NULL; +- struct path peer_path; +- struct file *filp = NULL; +- +- inode_lock_nested(dir, I_MUTEX_PARENT); +- child = lookup_one_len(cid, parent, strlen(cid)); +- if (!IS_ERR(child)) { +- if (!hmdfs_is_dir(child)) { +- if (d_is_positive(child)) { +- hmdfs_err("invalid stash dir mode 0%o", d_inode(child)->i_mode); +- err = -EINVAL; +- } else { +- err = -ENOENT; +- } +- dput(child); +- } +- } else { +- err = PTR_ERR(child); +- hmdfs_err("lookup stash dir err %d", err); +- } +- inode_unlock(dir); +- +- if (err) +- return ERR_PTR(err); +- +- peer_path.mnt = d_path->mnt; +- peer_path.dentry = child; +- filp = dentry_open(&peer_path, O_RDONLY | O_DIRECTORY, current_cred()); +- if (IS_ERR(filp)) +- hmdfs_err("open err %d", (int)PTR_ERR(filp)); +- +- dput(child); +- +- return filp; +-} +- +-static int hmdfs_new_inode_tbl(struct hmdfs_inode_tbl **tbl) +-{ +- struct hmdfs_inode_tbl *new = NULL; +- +- new = kmalloc(PAGE_SIZE, GFP_KERNEL); +- if (!new) +- return -ENOMEM; +- +- new->cnt = 0; +- new->max = (PAGE_SIZE - offsetof(struct hmdfs_inode_tbl, inodes)) / +- sizeof(new->inodes[0]); +- *tbl = new; +- +- return 0; +-} +- +-static int hmdfs_parse_stash_file_name(struct dir_context *dctx, +- const char *name, +- int namelen, +- unsigned int d_type, +- uint64_t *stash_inum) +-{ +- struct hmdfs_stash_dir_context *ctx = NULL; +- int err; +- +- if (d_type != DT_UNKNOWN && d_type != DT_REG) +- return 0; +- if (namelen > NAME_MAX) +- return 0; +- +- ctx = container_of(dctx, struct hmdfs_stash_dir_context, dctx); +- memcpy(ctx->name, name, namelen); +- ctx->name[namelen] = '\0'; +- err = kstrtoull(ctx->name, 16, stash_inum); +- if (err) { +- hmdfs_err("unexpected stash file err %d", err); +- return 0; +- } +- return 1; +-} +- +-static bool hmdfs_has_stash_file(struct dir_context *dctx, const char *name, +- int namelen, loff_t offset, +- u64 inum, unsigned int d_type) +-{ +- struct hmdfs_stash_dir_context *ctx = NULL; +- uint64_t stash_inum; +- int err; +- +- ctx = container_of(dctx, struct hmdfs_stash_dir_context, dctx); +- err = hmdfs_parse_stash_file_name(dctx, name, namelen, +- d_type, &stash_inum); +- if (!err) +- return true; +- +- ctx->tbl->cnt++; +- return false; +-} +- +-static bool hmdfs_fill_stash_file(struct dir_context *dctx, const char *name, +- int namelen, loff_t offset, +- u64 inum, unsigned int d_type) +-{ +- struct hmdfs_stash_dir_context *ctx = NULL; +- uint64_t stash_inum; +- int err; +- +- ctx = container_of(dctx, struct hmdfs_stash_dir_context, dctx); +- err = hmdfs_parse_stash_file_name(dctx, name, namelen, +- d_type, &stash_inum); +- if (!err) +- return true; +- if (ctx->tbl->cnt >= ctx->tbl->max) +- return false; +- +- ctx->tbl->inodes[ctx->tbl->cnt++] = stash_inum; +- +- return true; +-} +- +-static int hmdfs_del_stash_file(struct dentry *parent, struct dentry *child) +-{ +- struct inode *dir = d_inode(parent); +- int err = 0; +- +- /* Prevent d_delete() from calling dentry_unlink_inode() */ +- dget(child); +- +- inode_lock_nested(dir, I_MUTEX_PARENT); +- err = vfs_unlink(&nop_mnt_idmap, dir, child, NULL); +- if (err) +- hmdfs_err("remove stash file err %d", err); +- inode_unlock(dir); +- +- dput(child); +- +- return err; +-} +- +-static inline bool hmdfs_is_node_offlined(const struct hmdfs_peer *conn, +- unsigned int seq) +-{ +- /* +- * open()/fsync() may fail due to "status = NODE_STAT_OFFLINE" +- * in hmdfs_disconnect_node(). +- * Pair with smp_mb() in hmdfs_disconnect_node() to ensure +- * getting the newest event sequence. +- */ +- smp_mb__before_atomic(); +- return hmdfs_node_evt_seq(conn) != seq; +-} +- +-static int hmdfs_verify_restore_file_head(struct hmdfs_file_restore_ctx *ctx, +- const struct hmdfs_cache_file_head *head) +-{ +- struct inode *inode = file_inode(ctx->src_filp); +- struct hmdfs_peer *conn = ctx->conn; +- unsigned int crc, read_crc, crc_offset; +- loff_t path_offs, data_offs, isize; +- int err = 0; +- +- if (le32_to_cpu(head->magic) != HMDFS_STASH_FILE_HEAD_MAGIC) { +- err = -EUCLEAN; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx invalid magic: got 0x%x, exp 0x%x", +- conn->owner, conn->device_id, ctx->inum, +- le32_to_cpu(head->magic), +- HMDFS_STASH_FILE_HEAD_MAGIC); +- goto out; +- } +- +- crc_offset = le32_to_cpu(head->crc_offset); +- read_crc = le32_to_cpu(*((__le32 *)((char *)head + crc_offset))); +- crc = crc32(0, head, crc_offset); +- if (read_crc != crc) { +- err = -EUCLEAN; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx invalid crc: got 0x%x, exp 0x%x", +- conn->owner, conn->device_id, ctx->inum, +- read_crc, crc); +- goto out; +- } +- +- if (le64_to_cpu(head->ino) != ctx->inum) { +- err = -EUCLEAN; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx invalid ino: got %llu, exp %llu", +- conn->owner, conn->device_id, ctx->inum, +- le64_to_cpu(head->ino), ctx->inum); +- goto out; +- } +- +- path_offs = (loff_t)le32_to_cpu(head->path_offs) << +- HMDFS_STASH_BLK_SHIFT; +- if (path_offs <= 0 || path_offs >= i_size_read(inode)) { +- err = -EUCLEAN; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx invalid path_offs %d, stash file size %llu", +- conn->owner, conn->device_id, ctx->inum, +- le32_to_cpu(head->path_offs), i_size_read(inode)); +- goto out; +- } +- +- data_offs = (loff_t)le32_to_cpu(head->data_offs) << +- HMDFS_STASH_BLK_SHIFT; +- if (path_offs >= data_offs) { +- err = -EUCLEAN; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx invalid data_offs %d, path_offs %d", +- conn->owner, conn->device_id, ctx->inum, +- le32_to_cpu(head->data_offs), +- le32_to_cpu(head->path_offs)); +- goto out; +- } +- if (data_offs <= 0 || data_offs >= i_size_read(inode)) { +- err = -EUCLEAN; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx invalid data_offs %d, stash file size %llu", +- conn->owner, conn->device_id, ctx->inum, +- le32_to_cpu(head->data_offs), i_size_read(inode)); +- goto out; +- } +- +- isize = le64_to_cpu(head->size); +- if (isize != i_size_read(inode)) { +- err = -EUCLEAN; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx invalid isize: got %llu, exp %llu", +- conn->owner, conn->device_id, ctx->inum, +- le64_to_cpu(head->size), i_size_read(inode)); +- goto out; +- } +- +- if (le32_to_cpu(head->path_cnt) < 1) { +- err = -EUCLEAN; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx invalid path_cnt %d", +- conn->owner, conn->device_id, ctx->inum, +- le32_to_cpu(head->path_cnt)); +- goto out; +- } +- +-out: +- return err; +-} +- +-static int hmdfs_get_restore_file_metadata(struct hmdfs_file_restore_ctx *ctx) +-{ +- struct hmdfs_cache_file_head head; +- struct hmdfs_peer *conn = ctx->conn; +- unsigned int head_size, read_size, head_crc_offset; +- loff_t pos; +- ssize_t rd; +- int err = 0; +- +- head_size = sizeof(struct hmdfs_cache_file_head); +- memset(&head, 0, head_size); +- /* Read part head */ +- pos = 0; +- read_size = offsetof(struct hmdfs_cache_file_head, crc_offset) + +- sizeof(head.crc_offset); +- rd = kernel_read(ctx->src_filp, &head, read_size, &pos); +- if (rd != read_size) { +- err = rd < 0 ? rd : -ENODATA; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx read part head err %d", +- conn->owner, conn->device_id, ctx->inum, err); +- goto out; +- } +- head_crc_offset = le32_to_cpu(head.crc_offset); +- if (head_crc_offset + sizeof(head.crc32) < head_crc_offset || +- head_crc_offset + sizeof(head.crc32) > head_size) { +- err = -EUCLEAN; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx got bad head: Too long crc_offset %u which exceeds head size %u", +- conn->owner, conn->device_id, ctx->inum, +- head_crc_offset, head_size); +- goto out; +- } +- +- /* Read full head */ +- pos = 0; +- read_size = le32_to_cpu(head.crc_offset) + sizeof(head.crc32); +- rd = kernel_read(ctx->src_filp, &head, read_size, &pos); +- if (rd != read_size) { +- err = rd < 0 ? rd : -ENODATA; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx read full head err %d", +- conn->owner, conn->device_id, ctx->inum, err); +- goto out; +- } +- +- err = hmdfs_verify_restore_file_head(ctx, &head); +- if (err) +- goto out; +- +- ctx->pages = le64_to_cpu(head.blocks) >> +- HMDFS_STASH_PAGE_TO_SECTOR_SHIFT; +- ctx->data_offs = le32_to_cpu(head.data_offs); +- /* Read path */ +- read_size = min_t(unsigned int, le32_to_cpu(head.path_len), PATH_MAX); +- pos = (loff_t)le32_to_cpu(head.path_offs) << HMDFS_STASH_BLK_SHIFT; +- rd = kernel_read(ctx->src_filp, ctx->dst, read_size, &pos); +- if (rd != read_size) { +- err = rd < 0 ? rd : -ENODATA; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx read path err %d", +- conn->owner, conn->device_id, ctx->inum, err); +- goto out; +- } +- if (strnlen(ctx->dst, read_size) >= read_size) { +- err = -EUCLEAN; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx read path not end with \\0", +- conn->owner, conn->device_id, ctx->inum); +- goto out; +- } +- /* TODO: Pick a valid path from all paths */ +- +-out: +- return err; +-} +- +-static int hmdfs_open_restore_dst_file(struct hmdfs_file_restore_ctx *ctx, +- unsigned int rw_flag, struct file **filp) +-{ +- struct hmdfs_peer *conn = ctx->conn; +- struct file *dst = NULL; +- int err = 0; +- +- err = hmdfs_get_restore_file_metadata(ctx); +- if (err) +- goto out; +- +- /* Error comes from connection or server ? */ +- dst = file_open_root(&ctx->dst_root_path, +- ctx->dst, O_LARGEFILE | rw_flag, 0); +- if (IS_ERR(dst)) { +- err = PTR_ERR(dst); +- hmdfs_err("open remote file ino 0x%llx err %d", ctx->inum, err); +- if (hmdfs_is_node_offlined(conn, ctx->seq)) +- err = -ESHUTDOWN; +- goto out; +- } +- +- *filp = dst; +-out: +- return err; +-} +- +-static bool hmdfs_need_abort_restore(struct hmdfs_file_restore_ctx *ctx, +- struct hmdfs_inode_info *pinned, +- struct file *opened_file) +-{ +- struct hmdfs_inode_info *opened = hmdfs_i(file_inode(opened_file)); +- +- if (opened->inode_type != HMDFS_LAYER_OTHER_REMOTE) +- goto abort; +- +- if (opened == pinned) +- return false; +- +-abort: +- hmdfs_warning("peer 0x%x:0x%llx inode 0x%llx invalid remote file", +- ctx->conn->owner, ctx->conn->device_id, ctx->inum); +- hmdfs_warning("got: peer 0x%x:0x%llx inode 0x%llx type %d status %d", +- opened->conn ? opened->conn->owner : 0, +- opened->conn ? opened->conn->device_id : 0, +- opened->remote_ino, opened->inode_type, +- opened->stash_status); +- hmdfs_warning("pinned: peer 0x%x:0x%llx inode 0x%llx type %d status %d", +- pinned->conn->owner, pinned->conn->device_id, +- pinned->remote_ino, pinned->inode_type, +- pinned->stash_status); +- return true; +-} +- +-static void hmdfs_init_copy_args(const struct hmdfs_file_restore_ctx *ctx, +- struct file *dst, struct hmdfs_copy_args *args) +-{ +- args->src = ctx->src_filp; +- args->dst = dst; +- args->buf = ctx->page; +- args->buf_len = PAGE_SIZE; +- args->seq = ctx->seq; +- args->data_offs = ctx->data_offs; +- args->inum = ctx->inum; +-} +- +-static ssize_t hmdfs_write_dst(struct hmdfs_peer *conn, struct file *filp, +- void *buf, size_t len, loff_t pos) +-{ +- struct kiocb kiocb; +- struct iovec iov; +- struct iov_iter iter; +- ssize_t wr; +- int err = 0; +- +- file_start_write(filp); +- +- init_sync_kiocb(&kiocb, filp); +- kiocb.ki_pos = pos; +- +- iov.iov_base = buf; +- iov.iov_len = len; +- iov_iter_init(&iter, WRITE, &iov, 1, len); +- +- wr = hmdfs_file_write_iter_remote_nocheck(&kiocb, &iter); +- +- file_end_write(filp); +- +- if (wr != len) { +- struct hmdfs_inode_info *info = hmdfs_i(file_inode(filp)); +- +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx short write ret %zd exp %zu", +- conn->owner, conn->device_id, info->remote_ino, +- wr, len); +- err = wr < 0 ? (int)wr : -EFAULT; +- } +- +- return err; +-} +- +-static int hmdfs_rd_src_wr_dst(struct hmdfs_peer *conn, +- struct hmdfs_copy_ctx *ctx) +-{ +- const struct hmdfs_copy_args *args = NULL; +- int err = 0; +- loff_t rd_pos; +- ssize_t rd; +- +- ctx->eof = false; +- ctx->copied = 0; +- +- args = &ctx->args; +- rd_pos = ctx->src_pos; +- rd = kernel_read(args->src, args->buf, args->buf_len, &rd_pos); +- if (rd < 0) { +- err = (int)rd; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx short read err %d", +- conn->owner, conn->device_id, args->inum, err); +- goto out; +- } else if (rd == 0) { +- ctx->eof = true; +- goto out; +- } +- +- err = hmdfs_write_dst(conn, args->dst, args->buf, rd, ctx->dst_pos); +- if (!err) +- ctx->copied = rd; +- else if (hmdfs_is_node_offlined(conn, args->seq)) +- err = -ESHUTDOWN; +-out: +- return err; +-} +- +-static int hmdfs_copy_src_to_dst(struct hmdfs_peer *conn, +- const struct hmdfs_copy_args *args) +-{ +- int err = 0; +- struct file *src = NULL; +- struct hmdfs_copy_ctx ctx; +- loff_t seek_pos, data_init_pos; +- loff_t src_size; +- +- ctx.args = *args; +- +- src = ctx.args.src; +- data_init_pos = (loff_t)ctx.args.data_offs << HMDFS_STASH_BLK_SHIFT; +- seek_pos = data_init_pos; +- src_size = i_size_read(file_inode(src)); +- while (true) { +- loff_t data_pos; +- +- data_pos = vfs_llseek(src, seek_pos, SEEK_DATA); +- if (data_pos > seek_pos) { +- seek_pos = data_pos; +- continue; +- } else if (data_pos < 0) { +- if (data_pos == -ENXIO) { +- loff_t src_blks = file_inode(src)->i_blocks; +- +- hmdfs_info("peer 0x%x:0x%llx ino 0x%llx end at 0x%llx (sz 0x%llx blk 0x%llx)", +- conn->owner, conn->device_id, +- args->inum, seek_pos, +- src_size, src_blks); +- } else { +- err = (int)data_pos; +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx seek pos 0x%llx err %d", +- conn->owner, conn->device_id, +- args->inum, seek_pos, err); +- } +- break; +- } +- +- hmdfs_debug("peer 0x%x:0x%llx ino 0x%llx seek to 0x%llx", +- conn->owner, conn->device_id, args->inum, data_pos); +- +- ctx.src_pos = data_pos; +- ctx.dst_pos = data_pos - data_init_pos; +- err = hmdfs_rd_src_wr_dst(conn, &ctx); +- if (err || ctx.eof) +- break; +- +- seek_pos += ctx.copied; +- if (seek_pos >= src_size) +- break; +- } +- +- return err; +-} +- +-static int hmdfs_restore_src_to_dst(struct hmdfs_file_restore_ctx *ctx, +- struct file *dst) +-{ +- struct file *src = ctx->src_filp; +- struct hmdfs_copy_args args; +- int err; +- +- hmdfs_init_copy_args(ctx, dst, &args); +- err = hmdfs_copy_src_to_dst(ctx->conn, &args); +- if (err) +- goto out; +- +- err = vfs_fsync(dst, 0); +- if (err) { +- hmdfs_err("fsync remote file ino 0x%llx err %d", ctx->inum, err); +- if (hmdfs_is_node_offlined(ctx->conn, ctx->seq)) +- err = -ESHUTDOWN; +- } +- +-out: +- if (err) +- truncate_inode_pages(file_inode(dst)->i_mapping, 0); +- +- /* Remove the unnecessary cache */ +- invalidate_mapping_pages(file_inode(src)->i_mapping, 0, -1); +- +- return err; +-} +- +- +-static int hmdfs_restore_file(struct hmdfs_file_restore_ctx *ctx) +-{ +- struct hmdfs_peer *conn = ctx->conn; +- uint64_t inum = ctx->inum; +- struct hmdfs_inode_info *pinned_info = NULL; +- struct file *dst_filp = NULL; +- int err = 0; +- bool keep = false; +- +- hmdfs_info("peer 0x%x:0x%llx ino 0x%llx do restore", +- conn->owner, conn->device_id, inum); +- +- pinned_info = hmdfs_lookup_stash_inode(conn, inum); +- if (pinned_info) { +- unsigned int status = READ_ONCE(pinned_info->stash_status); +- +- if (status != HMDFS_REMOTE_INODE_RESTORING) { +- hmdfs_err("peer 0x%x:0x%llx ino 0x%llx invalid status %u", +- conn->owner, conn->device_id, inum, status); +- err = -EINVAL; +- goto clean; +- } +- } else { +- hmdfs_warning("peer 0x%x:0x%llx ino 0x%llx doesn't being pinned", +- conn->owner, conn->device_id, inum); +- err = -EINVAL; +- goto clean; +- } +- +- set_bit(HMDFS_FID_NEED_OPEN, &pinned_info->fid_flags); +- err = hmdfs_open_restore_dst_file(ctx, O_RDWR, &dst_filp); +- if (err) { +- if (err == -ESHUTDOWN) +- keep = true; +- goto clean; +- } +- +- if (hmdfs_need_abort_restore(ctx, pinned_info, dst_filp)) +- goto abort; +- +- err = hmdfs_restore_src_to_dst(ctx, dst_filp); +- if (err == -ESHUTDOWN) +- keep = true; +-abort: +- fput(dst_filp); +-clean: +- if (pinned_info && !keep) +- hmdfs_reset_stashed_inode(conn, pinned_info); +- ctx->keep = keep; +- +- hmdfs_info("peer 0x%x:0x%llx ino 0x%llx restore err %d keep %d", +- conn->owner, conn->device_id, inum, err, ctx->keep); +- +- return err; +-} +- +-static int hmdfs_init_file_restore_ctx(struct hmdfs_peer *conn, +- unsigned int seq, struct path *src_dir, +- struct hmdfs_file_restore_ctx *ctx) +-{ +- struct hmdfs_sb_info *sbi = conn->sbi; +- struct path dst_root; +- char *dst = NULL; +- char *page = NULL; +- int err = 0; +- +- err = hmdfs_get_path_in_sb(sbi->sb, sbi->real_dst, LOOKUP_DIRECTORY, +- &dst_root); +- if (err) +- return err; +- +- dst = kmalloc(PATH_MAX, GFP_KERNEL); +- if (!dst) { +- err = -ENOMEM; +- goto put_path; +- } +- +- page = kmalloc(PAGE_SIZE, GFP_KERNEL); +- if (!page) { +- err = -ENOMEM; +- goto free_dst; +- } +- +- ctx->conn = conn; +- ctx->src_dir_path = *src_dir; +- ctx->dst_root_path = dst_root; +- ctx->dst = dst; +- ctx->page = page; +- ctx->seq = seq; +- +- return 0; +-free_dst: +- kfree(dst); +-put_path: +- path_put(&dst_root); +- return err; +-} +- +-static void hmdfs_exit_file_restore_ctx(struct hmdfs_file_restore_ctx *ctx) +-{ +- path_put(&ctx->dst_root_path); +- kfree(ctx->dst); +- kfree(ctx->page); +-} +- +-static struct file *hmdfs_open_stash_file(struct path *p_path, char *name) +-{ +- struct dentry *parent = NULL; +- struct inode *dir = NULL; +- struct dentry *child = NULL; +- struct file *filp = NULL; +- struct path c_path; +- int err = 0; +- +- parent = p_path->dentry; +- dir = d_inode(parent); +- inode_lock_nested(dir, I_MUTEX_PARENT); +- child = lookup_one_len(name, parent, strlen(name)); +- if (!IS_ERR(child) && !hmdfs_is_reg(child)) { +- if (d_is_positive(child)) { +- hmdfs_err("invalid stash file (mode 0%o)", +- d_inode(child)->i_mode); +- err = -EINVAL; +- } else { +- hmdfs_err("missing stash file"); +- err = -ENOENT; +- } +- dput(child); +- } else if (IS_ERR(child)) { +- err = PTR_ERR(child); +- hmdfs_err("lookup stash file err %d", err); +- } +- inode_unlock(dir); +- +- if (err) +- return ERR_PTR(err); +- +- c_path.mnt = p_path->mnt; +- c_path.dentry = child; +- filp = dentry_open(&c_path, O_RDONLY | O_LARGEFILE, current_cred()); +- if (IS_ERR(filp)) +- hmdfs_err("open stash file err %d", (int)PTR_ERR(filp)); +- +- dput(child); +- +- return filp; +-} +- +-static void hmdfs_update_restore_stats(struct hmdfs_restore_stats *stats, +- bool keep, uint64_t pages, int err) +-{ +- if (!err) { +- stats->succeed++; +- stats->ok_pages += pages; +- } else if (keep) { +- stats->keep++; +- } else { +- stats->fail++; +- stats->fail_pages += pages; +- } +-} +- +-static int hmdfs_restore_files(struct hmdfs_peer *conn, +- unsigned int seq, struct path *dir, +- const struct hmdfs_inode_tbl *tbl, +- void *priv) +-{ +- unsigned int i; +- struct hmdfs_file_restore_ctx ctx; +- int err = 0; +- struct hmdfs_restore_stats *stats = priv; +- +- err = hmdfs_init_file_restore_ctx(conn, seq, dir, &ctx); +- if (err) +- return err; +- +- for (i = 0; i < tbl->cnt; i++) { +- char name[HMDFS_STASH_FILE_NAME_LEN]; +- struct file *filp = NULL; +- +- snprintf(name, sizeof(name), "0x%llx", tbl->inodes[i]); +- filp = hmdfs_open_stash_file(dir, name); +- /* Continue to restore if any error */ +- if (IS_ERR(filp)) { +- stats->fail++; +- continue; +- } +- +- ctx.inum = tbl->inodes[i]; +- ctx.src_filp = filp; +- ctx.keep = false; +- ctx.pages = 0; +- err = hmdfs_restore_file(&ctx); +- hmdfs_update_restore_stats(stats, ctx.keep, ctx.pages, err); +- +- if (!ctx.keep) +- hmdfs_del_stash_file(dir->dentry, +- file_dentry(ctx.src_filp)); +- fput(ctx.src_filp); +- +- /* Continue to restore */ +- if (err == -ESHUTDOWN) +- break; +- err = 0; +- } +- +- hmdfs_exit_file_restore_ctx(&ctx); +- +- return err; +-} +- +-static bool hmdfs_is_valid_stash_status(struct hmdfs_inode_info *inode_info, +- uint64_t ino) +-{ +- return (inode_info->inode_type == HMDFS_LAYER_OTHER_REMOTE && +- inode_info->stash_status == HMDFS_REMOTE_INODE_RESTORING && +- inode_info->remote_ino == ino); +-} +- +-static int hmdfs_rebuild_stash_list(struct hmdfs_peer *conn, +- unsigned int seq, +- struct path *dir, +- const struct hmdfs_inode_tbl *tbl, +- void *priv) +-{ +- struct hmdfs_file_restore_ctx ctx; +- unsigned int i; +- int err; +- struct hmdfs_rebuild_stats *stats = priv; +- +- err = hmdfs_init_file_restore_ctx(conn, seq, dir, &ctx); +- if (err) +- return err; +- +- stats->total += tbl->cnt; +- +- for (i = 0; i < tbl->cnt; i++) { +- char name[HMDFS_STASH_FILE_NAME_LEN]; +- struct file *src_filp = NULL; +- struct file *dst_filp = NULL; +- struct hmdfs_inode_info *inode_info = NULL; +- bool is_valid = true; +- +- snprintf(name, sizeof(name), "0x%llx", tbl->inodes[i]); +- src_filp = hmdfs_open_stash_file(dir, name); +- if (IS_ERR(src_filp)) { +- stats->fail++; +- continue; +- } +- ctx.inum = tbl->inodes[i]; +- ctx.src_filp = src_filp; +- +- /* No need to track the open which only needs meta info */ +- err = hmdfs_open_restore_dst_file(&ctx, O_RDONLY, &dst_filp); +- if (err) { +- fput(src_filp); +- if (err == -ESHUTDOWN) +- break; +- stats->fail++; +- err = 0; +- continue; +- } +- +- inode_info = hmdfs_i(file_inode(dst_filp)); +- is_valid = hmdfs_is_valid_stash_status(inode_info, +- ctx.inum); +- if (is_valid) { +- stats->succeed++; +- } else { +- hmdfs_err("peer 0x%x:0x%llx inode 0x%llx invalid state: type: %d, status: %u, inode: %llu", +- conn->owner, conn->device_id, ctx.inum, +- inode_info->inode_type, +- READ_ONCE(inode_info->stash_status), +- inode_info->remote_ino); +- stats->invalid++; +- } +- +- fput(ctx.src_filp); +- fput(dst_filp); +- } +- +- hmdfs_exit_file_restore_ctx(&ctx); +- return err; +-} +- +-static int hmdfs_iter_stash_file(struct hmdfs_peer *conn, +- unsigned int seq, +- struct file *filp, +- stash_operation_func op, +- void *priv) +-{ +- int err = 0; +- struct hmdfs_stash_dir_context ctx = { +- .dctx.actor = hmdfs_fill_stash_file, +- }; +- struct hmdfs_inode_tbl *tbl = NULL; +- struct path dir; +- +- err = hmdfs_new_inode_tbl(&tbl); +- if (err) +- goto out; +- +- dir.mnt = filp->f_path.mnt; +- dir.dentry = file_dentry(filp); +- +- ctx.tbl = tbl; +- ctx.dctx.pos = 0; +- do { +- tbl->cnt = 0; +- err = iterate_dir(filp, &ctx.dctx); +- if (err || !tbl->cnt) { +- if (err) +- hmdfs_err("iterate stash dir err %d", err); +- break; +- } +- err = op(conn, seq, &dir, tbl, priv); +- } while (!err); +- +-out: +- kfree(tbl); +- return err; +-} +- +-static void hmdfs_rebuild_check_work_fn(struct work_struct *base) +-{ +- struct hmdfs_check_work *work = +- container_of(base, struct hmdfs_check_work, work); +- struct hmdfs_peer *conn = work->conn; +- struct hmdfs_sb_info *sbi = conn->sbi; +- struct file *filp = NULL; +- const struct cred *old_cred = NULL; +- struct hmdfs_stash_dir_context ctx = { +- .dctx.actor = hmdfs_has_stash_file, +- }; +- struct hmdfs_inode_tbl tbl; +- int err; +- +- old_cred = hmdfs_override_creds(sbi->cred); +- filp = hmdfs_open_stash_dir(&sbi->stash_work_dir, conn->cid); +- if (IS_ERR(filp)) +- goto out; +- +- memset(&tbl, 0, sizeof(tbl)); +- ctx.tbl = &tbl; +- err = iterate_dir(filp, &ctx.dctx); +- if (!err && ctx.tbl->cnt > 0) +- conn->need_rebuild_stash_list = true; +- +- fput(filp); +-out: +- hmdfs_revert_creds(old_cred); +- hmdfs_info("peer 0x%x:0x%llx %sneed to rebuild stash list", +- conn->owner, conn->device_id, +- conn->need_rebuild_stash_list ? "" : "don't "); +- complete(&work->done); +-} +- +-static void hmdfs_stash_add_do_check(struct hmdfs_peer *conn, int evt, +- unsigned int seq) +-{ +- struct hmdfs_sb_info *sbi = conn->sbi; +- struct hmdfs_check_work work = { +- .conn = conn, +- .done = COMPLETION_INITIALIZER_ONSTACK(work.done), +- }; +- +- if (!hmdfs_is_stash_enabled(sbi)) +- return; +- +- INIT_WORK_ONSTACK(&work.work, hmdfs_rebuild_check_work_fn); +- schedule_work(&work.work); +- wait_for_completion(&work.done); +-} +- +-static void +-hmdfs_update_peer_rebuild_stats(struct hmdfs_rebuild_statistics *rebuild_stats, +- const struct hmdfs_rebuild_stats *stats) +-{ +- rebuild_stats->cur_ok = stats->succeed; +- rebuild_stats->cur_fail = stats->fail; +- rebuild_stats->cur_invalid = stats->invalid; +- rebuild_stats->total_ok += stats->succeed; +- rebuild_stats->total_fail += stats->fail; +- rebuild_stats->total_invalid += stats->invalid; +-} +- +-/* rebuild stash inode list */ +-static void hmdfs_stash_online_prepare(struct hmdfs_peer *conn, int evt, +- unsigned int seq) +-{ +- struct hmdfs_sb_info *sbi = conn->sbi; +- struct file *filp = NULL; +- const struct cred *old_cred = NULL; +- int err; +- struct hmdfs_rebuild_stats stats; +- +- if (!hmdfs_is_stash_enabled(sbi) || +- !conn->need_rebuild_stash_list) +- return; +- +- /* release seq_lock to prevent blocking no-online sync cb */ +- mutex_unlock(&conn->seq_lock); +- old_cred = hmdfs_override_creds(sbi->cred); +- filp = hmdfs_open_stash_dir(&sbi->stash_work_dir, conn->cid); +- if (IS_ERR(filp)) +- goto out; +- +- memset(&stats, 0, sizeof(stats)); +- err = hmdfs_iter_stash_file(conn, seq, filp, +- hmdfs_rebuild_stash_list, &stats); +- if (err == -ESHUTDOWN) { +- hmdfs_info("peer 0x%x:0x%llx offline again during rebuild", +- conn->owner, conn->device_id); +- } else { +- WRITE_ONCE(conn->need_rebuild_stash_list, false); +- if (err) +- hmdfs_warning("partial rebuild fail err %d", err); +- } +- +- hmdfs_update_peer_rebuild_stats(&conn->stats.rebuild, &stats); +- hmdfs_info("peer 0x%x:0x%llx rebuild stashed-file total %u succeed %u fail %u invalid %u", +- conn->owner, conn->device_id, stats.total, stats.succeed, +- stats.fail, stats.invalid); +- fput(filp); +-out: +- conn->stats.rebuild.time++; +- hmdfs_revert_creds(old_cred); +- if (!READ_ONCE(conn->need_rebuild_stash_list)) { +- /* +- * Use smp_mb__before_atomic() to ensure order between +- * writing @conn->need_rebuild_stash_list and +- * reading conn->rebuild_inode_status_nr. +- */ +- smp_mb__before_atomic(); +- /* +- * Wait until all inodes finish rebuilding stash status before +- * accessing @conn->stashed_inode_list in restoring. +- */ +- wait_event(conn->rebuild_inode_status_wq, +- !atomic_read(&conn->rebuild_inode_status_nr)); +- } +- mutex_lock(&conn->seq_lock); +-} +- +-static void +-hmdfs_update_peer_restore_stats(struct hmdfs_restore_statistics *restore_stats, +- const struct hmdfs_restore_stats *stats) +-{ +- restore_stats->cur_ok = stats->succeed; +- restore_stats->cur_fail = stats->fail; +- restore_stats->cur_keep = stats->keep; +- restore_stats->total_ok += stats->succeed; +- restore_stats->total_fail += stats->fail; +- restore_stats->total_keep += stats->keep; +- restore_stats->ok_pages += stats->ok_pages; +- restore_stats->fail_pages += stats->fail_pages; +-} +- +-static void hmdfs_stash_online_do_restore(struct hmdfs_peer *conn, int evt, +- unsigned int seq) +-{ +- struct hmdfs_sb_info *sbi = conn->sbi; +- struct file *filp = NULL; +- const struct cred *old_cred = NULL; +- struct hmdfs_restore_stats stats; +- int err = 0; +- +- if (!hmdfs_is_stash_enabled(sbi) || conn->need_rebuild_stash_list) { +- if (conn->need_rebuild_stash_list) +- hmdfs_info("peer 0x%x:0x%llx skip restoring due to rebuild-need", +- conn->owner, conn->device_id); +- return; +- } +- +- /* release seq_lock to prevent blocking no-online sync cb */ +- mutex_unlock(&conn->seq_lock); +- /* For dir iteration, file read and unlink */ +- old_cred = hmdfs_override_creds(conn->sbi->cred); +- +- memset(&stats, 0, sizeof(stats)); +- filp = hmdfs_open_stash_dir(&sbi->stash_work_dir, conn->cid); +- if (IS_ERR(filp)) { +- err = PTR_ERR(filp); +- goto out; +- } +- +- err = hmdfs_iter_stash_file(conn, seq, filp, +- hmdfs_restore_files, &stats); +- +- fput(filp); +-out: +- hmdfs_revert_creds(old_cred); +- +- /* offline again ? */ +- if (err != -ESHUTDOWN) +- hmdfs_drop_stashed_inodes(conn); +- +- hmdfs_update_peer_restore_stats(&conn->stats.restore, &stats); +- hmdfs_info("peer 0x%x:0x%llx restore stashed-file ok %u fail %u keep %u", +- conn->owner, conn->device_id, +- stats.succeed, stats.fail, stats.keep); +- +- mutex_lock(&conn->seq_lock); +-} +- +-static void hmdfs_stash_del_do_cleanup(struct hmdfs_peer *conn, int evt, +- unsigned int seq) +-{ +- struct hmdfs_inode_info *info = NULL; +- struct hmdfs_inode_info *next = NULL; +- unsigned int preparing; +- +- if (!hmdfs_is_stash_enabled(conn->sbi)) +- return; +- +- /* Async cb is cancelled */ +- preparing = 0; +- list_for_each_entry_safe(info, next, &conn->wr_opened_inode_list, +- wr_opened_node) { +- int status = READ_ONCE(info->stash_status); +- +- if (status == HMDFS_REMOTE_INODE_STASHING) { +- struct hmdfs_cache_info *cache = NULL; +- +- spin_lock(&info->stash_lock); +- cache = info->cache; +- info->cache = NULL; +- info->stash_status = HMDFS_REMOTE_INODE_NONE; +- spin_unlock(&info->stash_lock); +- +- hmdfs_remote_del_wr_opened_inode(conn, info); +- hmdfs_del_file_cache(cache); +- /* put inode after all access are completed */ +- iput(&info->vfs_inode); +- preparing++; +- } +- } +- hmdfs_info("release %u preparing inodes", preparing); +- +- hmdfs_info("release %u pinned inodes", conn->stashed_inode_nr); +- if (list_empty(&conn->stashed_inode_list)) +- return; +- +- list_for_each_entry_safe(info, next, +- &conn->stashed_inode_list, stash_node) +- hmdfs_untrack_stashed_inode(conn, info); +-} +- +-void hmdfs_exit_stash(struct hmdfs_sb_info *sbi) +-{ +- if (!sbi->s_offline_stash) +- return; +- +- if (sbi->stash_work_dir.dentry) { +- path_put(&sbi->stash_work_dir); +- sbi->stash_work_dir.dentry = NULL; +- } +-} +- +-int hmdfs_init_stash(struct hmdfs_sb_info *sbi) +-{ +- int err = 0; +- struct path parent; +- struct dentry *child = NULL; +- +- if (!sbi->s_offline_stash) +- return 0; +- +- err = kern_path(sbi->cache_dir, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, +- &parent); +- if (err) { +- hmdfs_err("invalid cache dir err %d", err); +- goto out; +- } +- +- child = hmdfs_stash_new_work_dir(parent.dentry); +- if (!IS_ERR(child)) { +- sbi->stash_work_dir.mnt = mntget(parent.mnt); +- sbi->stash_work_dir.dentry = child; +- } else { +- err = PTR_ERR(child); +- hmdfs_err("create stash work dir err %d", err); +- } +- +- path_put(&parent); +-out: +- return err; +-} +- +-static int hmdfs_stash_write_local_file(struct hmdfs_peer *conn, +- struct hmdfs_inode_info *info, +- struct hmdfs_writepage_context *ctx, +- struct hmdfs_cache_info *cache) +-{ +- struct page *page = ctx->page; +- const struct cred *old_cred = NULL; +- void *buf = NULL; +- loff_t pos; +- unsigned int flags; +- ssize_t written; +- int err = 0; +- +- buf = kmap(page); +- pos = (loff_t)page->index << PAGE_SHIFT; +- /* enable NOFS for memory allocation */ +- flags = memalloc_nofs_save(); +- old_cred = hmdfs_override_creds(conn->sbi->cred); +- pos += cache->data_offs << HMDFS_STASH_BLK_SHIFT; +- written = kernel_write(cache->cache_file, buf, ctx->count, &pos); +- hmdfs_revert_creds(old_cred); +- memalloc_nofs_restore(flags); +- kunmap(page); +- +- if (written != ctx->count) { +- hmdfs_err("stash peer 0x%x:0x%llx ino 0x%llx page 0x%lx data_offs 0x%x len %u err %zd", +- conn->owner, conn->device_id, info->remote_ino, +- page->index, cache->data_offs, ctx->count, written); +- err = -EIO; +- } +- +- return err; +-} +- +-int hmdfs_stash_writepage(struct hmdfs_peer *conn, +- struct hmdfs_writepage_context *ctx) +-{ +- struct inode *inode = ctx->page->mapping->host; +- struct hmdfs_inode_info *info = hmdfs_i(inode); +- struct hmdfs_cache_info *cache = NULL; +- int err; +- +- /* e.g. fail to create stash file */ +- cache = info->cache; +- if (!cache) +- return -EIO; +- +- err = hmdfs_stash_write_local_file(conn, info, ctx, cache); +- if (!err) { +- hmdfs_client_writepage_done(info, ctx); +- atomic64_inc(&cache->written_pgs); +- put_task_struct(ctx->caller); +- kfree(ctx); +- } +- atomic64_inc(&cache->to_write_pgs); +- +- return err; +-} +- +-static void hmdfs_stash_rebuild_status(struct hmdfs_peer *conn, +- struct inode *inode) +-{ +- char *path_str = NULL; +- struct hmdfs_inode_info *info = NULL; +- const struct cred *old_cred = NULL; +- struct path path; +- struct path *stash_path = NULL; +- int err = 0; +- +- path_str = kmalloc(HMDFS_STASH_PATH_LEN, GFP_KERNEL); +- if (!path_str) { +- err = -ENOMEM; +- return; +- } +- +- info = hmdfs_i(inode); +- err = snprintf(path_str, HMDFS_STASH_PATH_LEN, "%s/0x%llx", +- conn->cid, info->remote_ino); +- if (err >= HMDFS_STASH_PATH_LEN) { +- kfree(path_str); +- hmdfs_err("peer 0x%x:0x%llx inode 0x%llx too long name len", +- conn->owner, conn->device_id, info->remote_ino); +- return; +- } +- old_cred = hmdfs_override_creds(conn->sbi->cred); +- stash_path = &conn->sbi->stash_work_dir; +- err = vfs_path_lookup(stash_path->dentry, stash_path->mnt, +- path_str, 0, &path); +- hmdfs_revert_creds(old_cred); +- if (!err) { +- if (hmdfs_is_reg(path.dentry)) { +- WRITE_ONCE(info->stash_status, +- HMDFS_REMOTE_INODE_RESTORING); +- ihold(&info->vfs_inode); +- hmdfs_track_inode_locked(conn, info); +- } else { +- hmdfs_info("peer 0x%x:0x%llx inode 0x%llx unexpected stashed file mode 0%o", +- conn->owner, conn->device_id, +- info->remote_ino, +- d_inode(path.dentry)->i_mode); +- } +- +- path_put(&path); +- } else if (err && err != -ENOENT) { +- hmdfs_err("peer 0x%x:0x%llx inode 0x%llx find %s err %d", +- conn->owner, conn->device_id, info->remote_ino, +- path_str, err); +- } +- +- kfree(path_str); +-} +- +-static inline bool +-hmdfs_need_rebuild_inode_stash_status(struct hmdfs_peer *conn, umode_t mode) +-{ +- return hmdfs_is_stash_enabled(conn->sbi) && +- READ_ONCE(conn->need_rebuild_stash_list) && +- (S_ISREG(mode) || S_ISLNK(mode)); +-} +- +-void hmdfs_remote_init_stash_status(struct hmdfs_peer *conn, +- struct inode *inode, umode_t mode) +-{ +- if (!hmdfs_need_rebuild_inode_stash_status(conn, mode)) +- return; +- +- atomic_inc(&conn->rebuild_inode_status_nr); +- /* +- * Use smp_mb__after_atomic() to ensure order between writing +- * @conn->rebuild_inode_status_nr and reading +- * @conn->need_rebuild_stash_list. +- */ +- smp_mb__after_atomic(); +- if (READ_ONCE(conn->need_rebuild_stash_list)) +- hmdfs_stash_rebuild_status(conn, inode); +- if (atomic_dec_and_test(&conn->rebuild_inode_status_nr)) +- wake_up(&conn->rebuild_inode_status_wq); +-} +- +-static struct hmdfs_node_cb_desc stash_cb[] = { +- { +- .evt = NODE_EVT_OFFLINE, +- .sync = true, +- .fn = hmdfs_stash_offline_prepare, +- }, +- { +- .evt = NODE_EVT_OFFLINE, +- .sync = false, +- .fn = hmdfs_stash_offline_do_stash, +- }, +- { +- .evt = NODE_EVT_ADD, +- .sync = true, +- .fn = hmdfs_stash_add_do_check, +- }, +- { +- .evt = NODE_EVT_ONLINE, +- .sync = false, +- .fn = hmdfs_stash_online_prepare, +- }, +- { +- .evt = NODE_EVT_ONLINE, +- .sync = false, +- .fn = hmdfs_stash_online_do_restore, +- }, +- { +- .evt = NODE_EVT_DEL, +- .sync = true, +- .fn = hmdfs_stash_del_do_cleanup, +- }, +-}; +- +-void __init hmdfs_stash_add_node_evt_cb(void) +-{ +- hmdfs_node_add_evt_cb(stash_cb, ARRAY_SIZE(stash_cb)); +-} +- +diff --git a/fs/hmdfs/stash.h b/fs/hmdfs/stash.h +deleted file mode 100644 +index f38e737f9..000000000 +--- a/fs/hmdfs/stash.h ++++ /dev/null +@@ -1,25 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/hmdfs/stash.h +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#ifndef HMDFS_STASH_H +-#define HMDFS_STASH_H +- +-#include "hmdfs.h" +-#include "hmdfs_client.h" +- +-extern void hmdfs_stash_add_node_evt_cb(void); +- +-extern void hmdfs_exit_stash(struct hmdfs_sb_info *sbi); +-extern int hmdfs_init_stash(struct hmdfs_sb_info *sbi); +- +-extern int hmdfs_stash_writepage(struct hmdfs_peer *conn, +- struct hmdfs_writepage_context *ctx); +- +-extern void hmdfs_remote_init_stash_status(struct hmdfs_peer *conn, +- struct inode *inode, umode_t mode); +- +-#endif +diff --git a/fs/hmdfs/super.c b/fs/hmdfs/super.c +deleted file mode 100644 +index 7de0971ed..000000000 +--- a/fs/hmdfs/super.c ++++ /dev/null +@@ -1,187 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * fs/hmdfs/super.c +- * +- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +- +-#include "hmdfs.h" +- +-enum { +- OPT_RA_PAGES, +- OPT_LOCAL_DST, +- OPT_CACHE_DIR, +- OPT_CLOUD_DIR, +- OPT_S_CASE, +- OPT_VIEW_TYPE, +- OPT_CLOUD_DISK_TYPE, +- OPT_NO_OFFLINE_STASH, +- OPT_NO_DENTRY_CACHE, +- OPT_USER_ID, +- OPT_ERR, +-}; +- +-static match_table_t hmdfs_tokens = { +- { OPT_RA_PAGES, "ra_pages=%s" }, +- { OPT_LOCAL_DST, "local_dst=%s" }, +- { OPT_CACHE_DIR, "cache_dir=%s" }, +- { OPT_CLOUD_DIR, "cloud_dir=%s" }, +- { OPT_S_CASE, "sensitive" }, +- { OPT_VIEW_TYPE, "merge" }, +- { OPT_CLOUD_DISK_TYPE, "cloud_disk"}, +- { OPT_NO_OFFLINE_STASH, "no_offline_stash" }, +- { OPT_NO_DENTRY_CACHE, "no_dentry_cache" }, +- { OPT_USER_ID, "user_id=%s"}, +- { OPT_ERR, NULL }, +-}; +- +-#define DEAULT_RA_PAGES 128 +- +-void __hmdfs_log(const char *level, const bool ratelimited, +- const char *function, const char *fmt, ...) +-{ +- struct va_format vaf; +- va_list args; +- +- va_start(args, fmt); +- vaf.fmt = fmt; +- vaf.va = &args; +- if (ratelimited) +- printk_ratelimited("%s hmdfs: %s() %pV\n", level, +- function, &vaf); +- else +- printk("%s hmdfs: %s() %pV\n", level, function, &vaf); +- va_end(args); +-} +- +-static int hmdfs_match_strdup(const substring_t *s, char **dst) +-{ +- char *dup = NULL; +- +- dup = match_strdup(s); +- if (!dup) +- return -ENOMEM; +- +- if (*dst) +- kfree(*dst); +- *dst = dup; +- +- return 0; +-} +- +-int hmdfs_parse_options(struct hmdfs_sb_info *sbi, const char *data) +-{ +- char *p = NULL; +- char *name = NULL; +- char *options = NULL; +- char *options_src = NULL; +- substring_t args[MAX_OPT_ARGS]; +- unsigned long value = DEAULT_RA_PAGES; +- unsigned int user_id = 0; +- struct super_block *sb = sbi->sb; +- int err = 0; +- size_t size = 0; +- +- size = strlen(data); +- if (size >= HMDFS_PAGE_SIZE) { +- return -EINVAL; +- } +- +- options = kstrdup(data, GFP_KERNEL); +- if (data && !options) { +- err = -ENOMEM; +- goto out; +- } +- options_src = options; +- err = super_setup_bdi(sb); +- if (err) +- goto out; +- +- while ((p = strsep(&options_src, ",")) != NULL) { +- int token; +- +- if (!*p) +- continue; +- args[0].to = args[0].from = NULL; +- token = match_token(p, hmdfs_tokens, args); +- +- switch (token) { +- case OPT_RA_PAGES: +- name = match_strdup(&args[0]); +- if (name) { +- err = kstrtoul(name, 10, &value); +- kfree(name); +- name = NULL; +- if (err) +- goto out; +- } +- break; +- case OPT_LOCAL_DST: +- err = hmdfs_match_strdup(&args[0], &sbi->local_dst); +- if (err) +- goto out; +- break; +- case OPT_CACHE_DIR: +- err = hmdfs_match_strdup(&args[0], &sbi->cache_dir); +- if (err) +- goto out; +- break; +- case OPT_CLOUD_DIR: +- err = hmdfs_match_strdup(&args[0], &sbi->cloud_dir); +- if (err) +- goto out; +- break; +- case OPT_S_CASE: +- sbi->s_case_sensitive = true; +- break; +- case OPT_VIEW_TYPE: +- sbi->s_merge_switch = true; +- break; +- case OPT_CLOUD_DISK_TYPE: +- sbi->s_cloud_disk_switch = true; +- break; +- case OPT_NO_OFFLINE_STASH: +- sbi->s_offline_stash = false; +- break; +- case OPT_NO_DENTRY_CACHE: +- sbi->s_dentry_cache = false; +- break; +- case OPT_USER_ID: +- name = match_strdup(&args[0]); +- if (name) { +- err = kstrtouint(name, 10, &user_id); +- kfree(name); +- name = NULL; +- if (err) +- goto out; +- sbi->user_id = user_id; +- } +- break; +- default: +- err = -EINVAL; +- goto out; +- } +- } +-out: +- kfree(options); +- sb->s_bdi->ra_pages = value; +- if (sbi->local_dst == NULL) +- err = -EINVAL; +- +- if (sbi->s_offline_stash && !sbi->cache_dir) { +- hmdfs_warning("no cache_dir for offline stash"); +- sbi->s_offline_stash = false; +- } +- +- if (sbi->s_dentry_cache && !sbi->cache_dir) { +- hmdfs_warning("no cache_dir for dentry cache"); +- sbi->s_dentry_cache = false; +- } +- +- return err; +-} +diff --git a/fs/proc/Makefile b/fs/proc/Makefile +index 04a0dd725..bd08616ed 100644 +--- a/fs/proc/Makefile ++++ b/fs/proc/Makefile +@@ -34,4 +34,3 @@ proc-$(CONFIG_PROC_VMCORE) += vmcore.o + proc-$(CONFIG_PRINTK) += kmsg.o + proc-$(CONFIG_PROC_PAGE_MONITOR) += page.o + proc-$(CONFIG_BOOT_CONFIG) += bootconfig.o +-obj-$(CONFIG_MEMORY_SECURITY) += memory_security/ +diff --git a/fs/proc/base.c b/fs/proc/base.c +index 1dd01a8ad..91fe20b76 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -54,11 +54,6 @@ + #include + #include + #include +- +-#ifdef CONFIG_QOS_CTRL +-#include +-#endif +- + #include + #include + #include +@@ -93,10 +88,6 @@ + #include + #include + #include +-#include +-#ifdef CONFIG_SCHED_RTG +-#include +-#endif + #include + #include + #include +@@ -108,9 +99,6 @@ + #include + #include + #include +-#ifdef CONFIG_SCHED_RTG +-#include +-#endif + #include "internal.h" + #include "fd.h" + +@@ -1555,108 +1543,6 @@ static const struct file_operations proc_pid_sched_operations = { + + #endif + +-#ifdef CONFIG_QOS_CTRL +-long proc_qos_ctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +-{ +- return do_qos_ctrl_ioctl(QOS_IOCTL_ABI_AARCH64, file, cmd, arg); +-} +- +-#ifdef CONFIG_COMPAT +-long proc_qos_ctrl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +-{ +- return do_qos_ctrl_ioctl(QOS_IOCTL_ABI_ARM32, file, cmd, +- (unsigned long)(compat_ptr((compat_uptr_t)arg))); +-} +-#endif +- +-int proc_qos_ctrl_open(struct inode *inode, struct file *filp) +-{ +- return 0; +-} +- +-static const struct file_operations proc_qos_ctrl_operations = { +- .open = proc_qos_ctrl_open, +- .unlocked_ioctl = proc_qos_ctrl_ioctl, +-#ifdef CONFIG_COMPAT +- .compat_ioctl = proc_qos_ctrl_compat_ioctl, +-#endif +-}; +-#endif +- +-#ifdef CONFIG_SCHED_RTG +-static const struct file_operations proc_rtg_operations = { +- .open = proc_rtg_open, +- .unlocked_ioctl = proc_rtg_ioctl, +-#ifdef CONFIG_COMPAT +- .compat_ioctl = proc_rtg_compat_ioctl, +-#endif +-}; +-#endif +- +-#ifdef CONFIG_SCHED_RTG_DEBUG +-static int sched_group_id_show(struct seq_file *m, void *v) +-{ +- struct inode *inode = m->private; +- struct task_struct *p; +- +- p = get_proc_task(inode); +- if (!p) +- return -ESRCH; +- +- seq_printf(m, "%d\n", sched_get_group_id(p)); +- +- put_task_struct(p); +- +- return 0; +-} +- +-static ssize_t +-sched_group_id_write(struct file *file, const char __user *buf, +- size_t count, loff_t *offset) +-{ +- struct inode *inode = file_inode(file); +- struct task_struct *p; +- char buffer[PROC_NUMBUF]; +- int group_id, err; +- +- memset(buffer, 0, sizeof(buffer)); +- if (count > sizeof(buffer) - 1) +- count = sizeof(buffer) - 1; +- if (copy_from_user(buffer, buf, count)) { +- err = -EFAULT; +- goto out; +- } +- +- err = kstrtoint(strstrip(buffer), 0, &group_id); +- if (err) +- goto out; +- +- p = get_proc_task(inode); +- if (!p) +- return -ESRCH; +- +- err = sched_set_group_id(p, group_id); +- +- put_task_struct(p); +- +-out: +- return err < 0 ? err : count; +-} +- +-static int sched_group_id_open(struct inode *inode, struct file *filp) +-{ +- return single_open(filp, sched_group_id_show, inode); +-} +- +-static const struct file_operations proc_pid_sched_group_id_operations = { +- .open = sched_group_id_open, +- .read = seq_read, +- .write = sched_group_id_write, +- .llseek = seq_lseek, +- .release = single_release, +-}; +-#endif /* CONFIG_SCHED_RTG_DEBUG */ +- + #ifdef CONFIG_SCHED_AUTOGROUP + /* + * Print out autogroup related information: +@@ -1732,70 +1618,6 @@ static const struct file_operations proc_pid_sched_autogroup_operations = { + + #endif /* CONFIG_SCHED_AUTOGROUP */ + +-#ifdef CONFIG_SCHED_WALT +-static int sched_init_task_load_show(struct seq_file *m, void *v) +-{ +- struct inode *inode = m->private; +- struct task_struct *p; +- +- p = get_proc_task(inode); +- if (!p) +- return -ESRCH; +- +- seq_printf(m, "%d\n", sched_get_init_task_load(p)); +- +- put_task_struct(p); +- +- return 0; +-} +- +-static ssize_t +-sched_init_task_load_write(struct file *file, const char __user *buf, +- size_t count, loff_t *offset) +-{ +- struct inode *inode = file_inode(file); +- struct task_struct *p; +- char buffer[PROC_NUMBUF]; +- int init_task_load, err; +- +- memset(buffer, 0, sizeof(buffer)); +- if (count > sizeof(buffer) - 1) +- count = sizeof(buffer) - 1; +- if (copy_from_user(buffer, buf, count)) { +- err = -EFAULT; +- goto out; +- } +- +- err = kstrtoint(strstrip(buffer), 0, &init_task_load); +- if (err) +- goto out; +- +- p = get_proc_task(inode); +- if (!p) +- return -ESRCH; +- +- err = sched_set_init_task_load(p, init_task_load); +- +- put_task_struct(p); +- +-out: +- return err < 0 ? err : count; +-} +- +-static int sched_init_task_load_open(struct inode *inode, struct file *filp) +-{ +- return single_open(filp, sched_init_task_load_show, inode); +-} +- +-static const struct file_operations proc_pid_sched_init_task_load_operations = { +- .open = sched_init_task_load_open, +- .read = seq_read, +- .write = sched_init_task_load_write, +- .llseek = seq_lseek, +- .release = single_release, +-}; +-#endif /* CONFIG_SCHED_WALT */ +- + #ifdef CONFIG_TIME_NS + static int timens_offsets_show(struct seq_file *m, void *v) + { +@@ -3470,15 +3292,6 @@ static int proc_stack_depth(struct seq_file *m, struct pid_namespace *ns, + } + #endif /* CONFIG_STACKLEAK_METRICS */ + +-#ifdef CONFIG_ACCESS_TOKENID +-static int proc_token_operations(struct seq_file *m, struct pid_namespace *ns, +- struct pid *pid, struct task_struct *task) +-{ +- seq_printf(m, "%#llx %#llx\n", task->token, task->ftoken); +- return 0; +-} +-#endif /* CONFIG_ACCESS_TOKENID */ +- + /* + * Thread groups + */ +@@ -3499,9 +3312,6 @@ static const struct pid_entry tgid_base_stuff[] = { + ONE("status", S_IRUGO, proc_pid_status), + ONE("personality", S_IRUSR, proc_pid_personality), + ONE("limits", S_IRUGO, proc_pid_limits), +-#ifdef CONFIG_SCHED_WALT +- REG("sched_init_task_load", 00644, proc_pid_sched_init_task_load_operations), +-#endif + #ifdef CONFIG_SCHED_DEBUG + REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), + #endif +@@ -3595,15 +3405,6 @@ static const struct pid_entry tgid_base_stuff[] = { + #ifdef CONFIG_PROC_PID_ARCH_STATUS + ONE("arch_status", S_IRUGO, proc_pid_arch_status), + #endif +-#ifdef CONFIG_ACCESS_TOKENID +- ONE("tokenid", S_IRUSR, proc_token_operations), +-#endif +-#ifdef CONFIG_SCHED_RTG +- REG("sched_rtg_ctrl", S_IRUGO|S_IWUGO, proc_rtg_operations), +-#endif +-#ifdef CONFIG_SCHED_RTG_DEBUG +- REG("sched_group_id", S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations), +-#endif + #ifdef CONFIG_SECCOMP_CACHE_DEBUG + ONE("seccomp_cache", S_IRUSR, proc_pid_seccomp_cache), + #endif +@@ -3943,15 +3744,6 @@ static const struct pid_entry tid_base_stuff[] = { + #ifdef CONFIG_PROC_PID_ARCH_STATUS + ONE("arch_status", S_IRUGO, proc_pid_arch_status), + #endif +-#ifdef CONFIG_ACCESS_TOKENID +- ONE("tokenid", S_IRUSR, proc_token_operations), +-#endif +-#ifdef CONFIG_QOS_CTRL +- REG("sched_qos_ctrl", S_IRUGO|S_IWUGO, proc_qos_ctrl_operations), +-#endif +-#ifdef CONFIG_SCHED_RTG_DEBUG +- REG("sched_group_id", S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations), +-#endif + #ifdef CONFIG_SECCOMP_CACHE_DEBUG + ONE("seccomp_cache", S_IRUSR, proc_pid_seccomp_cache), + #endif +diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c +index 875306ca2..45af9a989 100644 +--- a/fs/proc/meminfo.c ++++ b/fs/proc/meminfo.c +@@ -17,9 +17,6 @@ + #ifdef CONFIG_CMA + #include + #endif +-#ifdef CONFIG_MEM_PURGEABLE +-#include +-#endif + #include + #include + #include "internal.h" +@@ -43,11 +40,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v) + unsigned long pages[NR_LRU_LISTS]; + unsigned long sreclaimable, sunreclaim; + int lru; +- unsigned long nr_purg_active = 0; +- unsigned long nr_purg_inactive = 0; +-#ifdef CONFIG_MEM_PURGEABLE +- unsigned long nr_purg_pined = 0; +-#endif + + si_meminfo(&i); + si_swapinfo(&i); +@@ -61,13 +53,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v) + for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) + pages[lru] = global_node_page_state(NR_LRU_BASE + lru); + +-#ifdef CONFIG_MEM_PURGEABLE +- nr_purg_active = pages[LRU_ACTIVE_PURGEABLE]; +- nr_purg_inactive = pages[LRU_INACTIVE_PURGEABLE]; +- purg_pages_info(NULL, &nr_purg_pined); +- nr_purg_pined = min(nr_purg_pined, nr_purg_active + nr_purg_inactive); +-#endif +- + available = si_mem_available(); + sreclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B); + sunreclaim = global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B); +@@ -79,25 +64,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v) + show_val_kb(m, "Cached: ", cached); + show_val_kb(m, "SwapCached: ", total_swapcache_pages()); + show_val_kb(m, "Active: ", pages[LRU_ACTIVE_ANON] + +-#ifdef CONFIG_MEM_PURGEABLE +- pages[LRU_ACTIVE_FILE] + +- nr_purg_active); +-#else +- pages[LRU_ACTIVE_FILE]); +-#endif +- ++ pages[LRU_ACTIVE_FILE]); + show_val_kb(m, "Inactive: ", pages[LRU_INACTIVE_ANON] + +- pages[LRU_INACTIVE_FILE] + +- nr_purg_inactive); ++ pages[LRU_INACTIVE_FILE]); + show_val_kb(m, "Active(anon): ", pages[LRU_ACTIVE_ANON]); + show_val_kb(m, "Inactive(anon): ", pages[LRU_INACTIVE_ANON]); + show_val_kb(m, "Active(file): ", pages[LRU_ACTIVE_FILE]); + show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]); +-#ifdef CONFIG_MEM_PURGEABLE +- show_val_kb(m, "Active(purg): ", nr_purg_active); +- show_val_kb(m, "Inactive(purg): ", nr_purg_inactive); +- show_val_kb(m, "Pined(purg): ", nr_purg_pined); +-#endif + show_val_kb(m, "Unevictable: ", pages[LRU_UNEVICTABLE]); + show_val_kb(m, "Mlocked: ", global_zone_page_state(NR_MLOCK)); + +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index c8eaa37d0..b8640f36e 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -20,15 +20,11 @@ + #include + #include + #include +-#ifdef CONFIG_MEM_PURGEABLE +-#include +-#endif + + #include + #include + #include + #include "internal.h" +-#include + + #define SEQ_PUT_DEC(str, val) \ + seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8) +@@ -36,11 +32,6 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) + { + unsigned long text, lib, swap, anon, file, shmem; + unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; +-#ifdef CONFIG_MEM_PURGEABLE +- unsigned long nr_purg_sum = 0, nr_purg_pin = 0; +- +- mm_purg_pages_info(mm, &nr_purg_sum, &nr_purg_pin); +-#endif + + anon = get_mm_counter(mm, MM_ANONPAGES); + file = get_mm_counter(mm, MM_FILEPAGES); +@@ -84,10 +75,6 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) + seq_put_decimal_ull_width(m, + " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8); + SEQ_PUT_DEC(" kB\nVmSwap:\t", swap); +-#ifdef CONFIG_MEM_PURGEABLE +- SEQ_PUT_DEC(" kB\nPurgSum:\t", nr_purg_sum); +- SEQ_PUT_DEC(" kB\nPurgPin:\t", nr_purg_pin); +-#endif + seq_puts(m, " kB\n"); + hugetlb_report_usage(m, mm); + } +@@ -292,7 +279,6 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + + start = vma->vm_start; + end = vma->vm_end; +- CALL_HCK_LITE_HOOK(hideaddr_header_prefix_lhck, &start, &end, &flags, m, vma); + show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino); + if (mm) + anon_name = anon_vma_name(vma); +diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig +index 70dd4971d..3acc38600 100644 +--- a/fs/pstore/Kconfig ++++ b/fs/pstore/Kconfig +@@ -13,16 +13,6 @@ config PSTORE + If you don't have a platform persistent store driver, + say N. + +-config PSTORE_DEFLATE_COMPRESS +- tristate "DEFLATE (ZLIB) compression" +- default y +- depends on PSTORE +- select CRYPTO_DEFLATE +- help +- This option enables DEFLATE (also known as ZLIB) compression +- algorithm support. +- +- + config PSTORE_DEFAULT_KMSG_BYTES + int "Default kernel log storage space" if EXPERT + depends on PSTORE +@@ -44,26 +34,6 @@ config PSTORE_COMPRESS + blown crypto API. This reduces the risk of secondary oopses or other + problems while pstore is recording panic metadata. + +-choice +- prompt "Default pstore compression algorithm" +- depends on PSTORE_COMPRESS +- help +- This option chooses the default active compression algorithm. +- This change be changed at boot with "pstore.compress=..." on +- the kernel command line. +- +- Currently, pstore has support for 6 compression algorithms: +- deflate, lzo, lz4, lz4hc, 842 and zstd. +- +- The default compression algorithm is deflate. +- config PSTORE_DEFLATE_COMPRESS_DEFAULT +- bool "deflate" if PSTORE_DEFLATE_COMPRESS +-endchoice +-config PSTORE_COMPRESS_DEFAULT +- string +- depends on PSTORE_COMPRESS +- default "deflate" if PSTORE_DEFLATE_COMPRESS_DEFAULT +- + config PSTORE_CONSOLE + bool "Log kernel console messages" + depends on PSTORE +@@ -95,18 +65,6 @@ config PSTORE_FTRACE + + If unsure, say N. + +-config PSTORE_BLACKBOX +- bool "Store customised fault log" +- depends on PSTORE +- depends on BLACKBOX +- help +- Enable storing the customised fault log for BlackBox. +- +- With the option enabled, pstore will store the customised kernel +- fault log for BlackBox when oops or panic happened. +- +- If unsure, say N. +- + config PSTORE_RAM + tristate "Log panic/oops to a RAM buffer" + depends on PSTORE +@@ -231,24 +189,3 @@ config PSTORE_BLK_FTRACE_SIZE + + NOTE that, both Kconfig and module parameters can configure + pstore/blk, but module parameters have priority over Kconfig. +- +-config PSTORE_BLK_BLACKBOX_SIZE +- int "Size in Kbytes of fault log for BlackBox to store" +- depends on PSTORE_BLK +- depends on PSTORE_BLACKBOX +- default 64 +- help +- This just sets size of fault log (blackbox_size) for pstore/blk. +- The size is in KB and must be a multiple of 4. +- +- NOTE that, both Kconfig and module parameters can configure +- pstore/blk, but module parameters have priority over Kconfig. +- +-config PSTORE_BLACKBOX_STACK_SIZE +- int "Default stack size for BlackBox" if EXPERT +- depends on PSTORE +- depends on PSTORE_BLACKBOX +- default 1024 +- help +- Defines default size of pstore stack size for blackbox. +- Can be enlarged if needed. not recommended to shrink it. +diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h +index 824ef8e84..801d6c0b1 100644 +--- a/fs/pstore/internal.h ++++ b/fs/pstore/internal.h +@@ -49,7 +49,4 @@ extern void pstore_record_init(struct pstore_record *record, + int __init pstore_init_fs(void); + void __exit pstore_exit_fs(void); + +-#ifdef CONFIG_PSTORE_BLACKBOX +-extern bool pstore_ready; /* flag which pstore_blk is ready */ +-#endif + #endif +diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c +index 7ec9a3529..03425928d 100644 +--- a/fs/pstore/platform.c ++++ b/fs/pstore/platform.c +@@ -17,10 +17,6 @@ + #include + #include + #include +-#ifdef CONFIG_PSTORE_BLACKBOX +-#include +-#include +-#endif + #include + #include + #include +@@ -55,7 +51,6 @@ static const char * const pstore_type_names[] = { + "powerpc-common", + "pmsg", + "powerpc-opal", +- "blackbox", + }; + + static int pstore_new_entry; +@@ -275,110 +270,6 @@ void pstore_record_init(struct pstore_record *record, + record->time = ns_to_timespec64(ktime_get_real_fast_ns()); + } + +-/* +- * Store the customised fault log +- */ +-#ifdef CONFIG_PSTORE_BLACKBOX +-#define PSTORE_FLAG "PSTORE" +-#define CALLSTACK_MAX_ENTRIES 20 +-static void dump_stacktrace(char *pbuf, size_t buf_size, bool is_panic) +-{ +- int i; +- size_t stack_len = 0; +- size_t com_len = 0; +- unsigned long entries[CALLSTACK_MAX_ENTRIES]; +- unsigned int nr_entries; +- char tmp_buf[ERROR_DESC_MAX_LEN]; +- bool find_panic = false; +- +- if (unlikely(!pbuf || !buf_size)) +- return; +- memset(pbuf, 0, buf_size); +- memset(tmp_buf, 0, sizeof(tmp_buf)); +- nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); +- com_len = scnprintf(pbuf, buf_size, "Comm:%s,CPU:%d,Stack:", +- current->comm, raw_smp_processor_id()); +- for (i = 0; i < nr_entries; i++) { +- if (stack_len >= sizeof(tmp_buf)) { +- tmp_buf[sizeof(tmp_buf) - 1] = '\0'; +- break; +- } +- stack_len += scnprintf(tmp_buf + stack_len, sizeof(tmp_buf) - stack_len, +- "%pS-", (void *)entries[i]); +- if (!find_panic && is_panic) { +- if (strncmp(tmp_buf, "panic", strlen("panic")) == 0) +- find_panic = true; +- else +- (void)memset(tmp_buf, 0, sizeof(tmp_buf)); +- } +- } +- if (com_len >= buf_size) +- return; +- stack_len = min(buf_size - com_len, strlen(tmp_buf)); +- memcpy(pbuf + com_len, tmp_buf, stack_len); +- *(pbuf + buf_size - 1) = '\0'; +-} +- +-void pstore_blackbox_dump(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason) +-{ +- struct fault_log_info *pfault_log_info; +- struct pstore_record record; +- struct kmsg_dump_iter iter; +- size_t dst_size; +- const char *why; +- char *dst; +- unsigned long flags = 0; +- int ret; +- +-#if defined(CONFIG_PSTORE_BLK) || defined(CONFIG_PSTORE_RAM) +- if (!pstore_ready) +- return; +-#endif +- kmsg_dump_rewind(&iter); +- why = kmsg_dump_reason_str(reason); +- +- if (pstore_cannot_block_path(reason)) { +- if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) { +- pr_err("dump skipped in %s path because of concurrent dump\n", +- in_nmi() ? "NMI" : why); +- return; +- } +- } else { +- spin_lock_irqsave(&psinfo->buf_lock, flags); +- } +- +- pfault_log_info = (struct fault_log_info *)psinfo->buf; +- +- memset(pfault_log_info, 0, sizeof(*pfault_log_info)); +- +- pstore_record_init(&record, psinfo); +- +- record.type = PSTORE_TYPE_BLACKBOX; +- record.reason = reason; +- +- memcpy(pfault_log_info->flag, LOG_FLAG, strlen(LOG_FLAG)); +- strncpy(pfault_log_info->info.event, why, +- min(strlen(why), sizeof(pfault_log_info->info.event) - 1)); +- strncpy(pfault_log_info->info.module, PSTORE_FLAG, +- min(strlen(PSTORE_FLAG), sizeof(pfault_log_info->info.module) - 1)); +- get_timestamp(pfault_log_info->info.error_time, TIMESTAMP_MAX_LEN); +- dump_stacktrace(pfault_log_info->info.error_desc, sizeof(pfault_log_info->info.error_desc), false); +- +- record.buf = psinfo->buf; +- dst = psinfo->buf; +- dst_size = psinfo->bufsize; +- +- dst_size -= sizeof(struct fault_log_info); +- (void)kmsg_dump_get_buffer(&iter, true, dst + sizeof(struct fault_log_info), dst_size, +- &(pfault_log_info->len)); +- +- record.size = sizeof(struct fault_log_info) + pfault_log_info->len; +- ret = psinfo->write(&record); +- spin_unlock_irqrestore(&psinfo->buf_lock, flags); +-} +-EXPORT_SYMBOL_GPL(pstore_blackbox_dump); +-#endif +- + /* + * callback from kmsg_dump. Save as much as we can (up to kmsg_bytes) from the + * end of the buffer. +diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c +index 70a1ae218..88b34fdbf 100644 +--- a/fs/pstore/ram.c ++++ b/fs/pstore/ram.c +@@ -45,14 +45,6 @@ static ulong ramoops_pmsg_size = MIN_MEM_SIZE; + module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400); + MODULE_PARM_DESC(pmsg_size, "size of user space message log"); + +-static ulong ramoops_blackbox_size = MIN_MEM_SIZE; +-module_param_named(blackbox_size, ramoops_blackbox_size, ulong, 0400); +-MODULE_PARM_DESC(blackbox_size, "size of blackbox log"); +-#if IS_ENABLED(CONFIG_PSTORE_BLACKBOX) +-bool pstore_ready; +-#endif +- +- + static unsigned long long mem_address; + module_param_hw(mem_address, ullong, other, 0400); + MODULE_PARM_DESC(mem_address, +@@ -90,7 +82,6 @@ struct ramoops_context { + struct persistent_ram_zone *cprz; /* Console zone */ + struct persistent_ram_zone **fprzs; /* Ftrace zones */ + struct persistent_ram_zone *mprz; /* PMSG zone */ +- struct persistent_ram_zone *bprz; /* BLACKBOX zone */ + phys_addr_t phys_addr; + unsigned long size; + unsigned int memtype; +@@ -98,7 +89,6 @@ struct ramoops_context { + size_t console_size; + size_t ftrace_size; + size_t pmsg_size; +- size_t blackbox_size; + u32 flags; + struct persistent_ram_ecc_info ecc_info; + unsigned int max_dump_cnt; +@@ -109,7 +99,6 @@ struct ramoops_context { + unsigned int max_ftrace_cnt; + unsigned int ftrace_read_cnt; + unsigned int pmsg_read_cnt; +- unsigned int blackbox_read_cnt; + struct pstore_info pstore; + }; + +@@ -123,7 +112,6 @@ static int ramoops_pstore_open(struct pstore_info *psi) + cxt->console_read_cnt = 0; + cxt->ftrace_read_cnt = 0; + cxt->pmsg_read_cnt = 0; +- cxt->blackbox_read_cnt = 0; + return 0; + } + +@@ -227,9 +215,6 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record) + if (!prz_ok(prz) && !cxt->pmsg_read_cnt++) + prz = ramoops_get_next_prz(&cxt->mprz, 0 /* single */, record); + +- if (!prz_ok(prz) && !cxt->blackbox_read_cnt++) +- prz = ramoops_get_next_prz(&cxt->bprz, 0 /* single */, record); +- + /* ftrace is last since it may want to dynamically allocate memory. */ + if (!prz_ok(prz)) { + if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) && +@@ -351,11 +336,6 @@ static int notrace ramoops_pstore_write(struct pstore_record *record) + } else if (record->type == PSTORE_TYPE_PMSG) { + pr_warn_ratelimited("PMSG shouldn't call %s\n", __func__); + return -EINVAL; +- } else if (record->type == PSTORE_TYPE_BLACKBOX) { +- if (!cxt->bprz) +- return -ENOMEM; +- persistent_ram_write(cxt->bprz, record->buf, record->size); +- return 0; + } + + if (record->type != PSTORE_TYPE_DMESG) +@@ -447,9 +427,6 @@ static int ramoops_pstore_erase(struct pstore_record *record) + case PSTORE_TYPE_PMSG: + prz = cxt->mprz; + break; +- case PSTORE_TYPE_BLACKBOX: +- prz = cxt->bprz; +- break; + default: + return -EINVAL; + } +@@ -710,7 +687,6 @@ static int ramoops_parse_dt(struct platform_device *pdev, + parse_u32("console-size", pdata->console_size, 0); + parse_u32("ftrace-size", pdata->ftrace_size, 0); + parse_u32("pmsg-size", pdata->pmsg_size, 0); +- parse_u32("blackbox-size", pdata->blackbox_size, 0); + parse_u32("ecc-size", pdata->ecc_info.ecc_size, 0); + parse_u32("flags", pdata->flags, 0); + parse_u32("max-reason", pdata->max_reason, pdata->max_reason); +@@ -731,11 +707,9 @@ static int ramoops_parse_dt(struct platform_device *pdev, + parent_node = of_get_parent(of_node); + if (!of_node_name_eq(parent_node, "reserved-memory") && + !pdata->console_size && !pdata->ftrace_size && +- !pdata->pmsg_size && !pdata->ecc_info.ecc_size && +- !pdata->blackbox_size) { ++ !pdata->pmsg_size && !pdata->ecc_info.ecc_size) { + pdata->console_size = pdata->record_size; + pdata->pmsg_size = pdata->record_size; +- pdata->blackbox_size = pdata->record_size; + } + of_node_put(parent_node); + +@@ -778,7 +752,7 @@ static int ramoops_probe(struct platform_device *pdev) + } + + if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size && +- !pdata->ftrace_size && !pdata->pmsg_size && !pdata->blackbox_size)) { ++ !pdata->ftrace_size && !pdata->pmsg_size)) { + pr_err("The memory size and the record/console size must be " + "non-zero\n"); + err = -EINVAL; +@@ -793,8 +767,6 @@ static int ramoops_probe(struct platform_device *pdev) + pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); + if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size)) + pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size); +- if (pdata->blackbox_size && !is_power_of_2(pdata->blackbox_size)) +- pdata->blackbox_size = rounddown_pow_of_two(pdata->blackbox_size); + + cxt->size = pdata->mem_size; + cxt->phys_addr = pdata->mem_address; +@@ -803,22 +775,13 @@ static int ramoops_probe(struct platform_device *pdev) + cxt->console_size = pdata->console_size; + cxt->ftrace_size = pdata->ftrace_size; + cxt->pmsg_size = pdata->pmsg_size; +- cxt->blackbox_size = pdata->blackbox_size; + cxt->flags = pdata->flags; + cxt->ecc_info = pdata->ecc_info; + + paddr = cxt->phys_addr; + + dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size +- - cxt->pmsg_size - cxt->blackbox_size; +- err = ramoops_init_prz("blackbox", dev, cxt, &cxt->bprz, &paddr, +- cxt->blackbox_size, 0); +- if (err) +- goto fail_init; +-#if IS_ENABLED(CONFIG_PSTORE_BLACKBOX) +- else +- pstore_ready = true; +-#endif ++ - cxt->pmsg_size; + err = ramoops_init_przs("dmesg", dev, cxt, &cxt->dprzs, &paddr, + dump_mem_sz, cxt->record_size, + &cxt->max_dump_cnt, 0, 0); +@@ -864,8 +827,6 @@ static int ramoops_probe(struct platform_device *pdev) + cxt->pstore.flags |= PSTORE_FLAGS_FTRACE; + if (cxt->pmsg_size) + cxt->pstore.flags |= PSTORE_FLAGS_PMSG; +- if (cxt->blackbox_size) +- cxt->pstore.flags |= PSTORE_FLAGS_BLACKBOX; + + /* + * Since bufsize is only used for dmesg crash dumps, it +@@ -899,7 +860,6 @@ static int ramoops_probe(struct platform_device *pdev) + ramoops_console_size = pdata->console_size; + ramoops_pmsg_size = pdata->pmsg_size; + ramoops_ftrace_size = pdata->ftrace_size; +- ramoops_blackbox_size = pdata->blackbox_size; + + pr_info("using 0x%lx@0x%llx, ecc: %d\n", + cxt->size, (unsigned long long)cxt->phys_addr, +@@ -971,7 +931,6 @@ static void __init ramoops_register_dummy(void) + pdata.console_size = ramoops_console_size; + pdata.ftrace_size = ramoops_ftrace_size; + pdata.pmsg_size = ramoops_pmsg_size; +- pdata.blackbox_size = ramoops_blackbox_size; + /* If "max_reason" is set, its value has priority over "dump_oops". */ + if (ramoops_max_reason >= 0) + pdata.max_reason = ramoops_max_reason; +diff --git a/fs/sharefs/Kconfig b/fs/sharefs/Kconfig +deleted file mode 100644 +index 588192e26..000000000 +--- a/fs/sharefs/Kconfig ++++ /dev/null +@@ -1,24 +0,0 @@ +-config SHARE_FS +- tristate "SHAREFS filesystem support" +- help +- SHAREFS is an overlay file system.SHAREFS is used for file sharing +- between applications. Sharefs manages permissions through different +- permissions for reading and writing directories. +- +-config SHAREFS_SUPPORT_OVERRIDE +- bool "Sharefs: support override " +- depends on SHARE_FS +- default n +- help +- This is the switch of override feature on sharefs file system. +- If the device type is 2in1, it shoule be set y. +- +-config SHAREFS_SUPPORT_WRITE +- bool "Sharefs: support write operations" +- depends on SHARE_FS +- depends on SHAREFS_SUPPORT_OVERRIDE +- default n +- help +- This is the switch of write operation on sharefs file system. +- If the device type is 2in1 and writing files is needed, +- it shoule be set y. +diff --git a/fs/sharefs/Makefile b/fs/sharefs/Makefile +deleted file mode 100644 +index 9b84e26d1..000000000 +--- a/fs/sharefs/Makefile ++++ /dev/null +@@ -1,12 +0,0 @@ +-obj-$(CONFIG_SHARE_FS) += sharefs.o +-ccflags-y += -I$(src) +- +-sharefs-y := dentry.o file.o inode.o main.o super.o lookup.o authentication.o config.o +-ccflags-y += -I$(src) -Werror -Wall +-export CONFIG_SHARE_FS := m +-KDIR ::= /lib/modules/$(shell uname -r)/build +-PWD := $(shell pwd) +-all: +- $(MAKE) -C $(KDIR) M=$(PWD) modules +-clean: +- $(MAKE) -C $(KDIR) M=$(PWD) clean +\ No newline at end of file +diff --git a/fs/sharefs/authentication.c b/fs/sharefs/authentication.c +deleted file mode 100644 +index 39997c632..000000000 +--- a/fs/sharefs/authentication.c ++++ /dev/null +@@ -1,98 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/sharefs/authentication.c +- * +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +-#include "authentication.h" +- +-static inline __u16 perm_get_next_level(__u16 perm) +-{ +- __u16 level = (perm & SHAREFS_PERM_MASK) + 1; +- +- if (level <= SHAREFS_PERM_OTHER) +- return level; +- else +- return SHAREFS_PERM_OTHER; +-} +- +-void fixup_perm_from_level(struct inode *dir, struct dentry *dentry) +-{ +- struct sharefs_inode_info *hii = SHAREFS_I(dir); +- struct inode *dinode = d_inode(dentry); +- struct sharefs_inode_info *dinfo = SHAREFS_I(dinode); +- const unsigned char* cur_name = dentry->d_name.name; +- __u16 level = perm_get_next_level(hii->perm); +- __u16 perm = 0; +- int bid = 0; +- +- if (IS_ERR_OR_NULL(dinode)) +- return; +- dinode->i_uid = dir->i_uid; +- dinode->i_gid = dir->i_gid; +- switch (level) { +- case SHAREFS_PERM_MNT: +- bid = get_bundle_uid(SHAREFS_SB(dentry->d_sb), +- dentry->d_name.name); +- perm = level; +- if (bid != 0) { +- dinode->i_uid = KUIDT_INIT(bid); +- dinode->i_gid = KGIDT_INIT(bid); +- } else { +- dinode->i_uid = ROOT_UID; +- dinode->i_gid = ROOT_GID; +- } +- dinode->i_mode = (dinode->i_mode & S_IFMT) | SHAREFS_PERM_READONLY_DIR; +- break; +- case SHAREFS_PERM_DFS: +- if (!strcmp(cur_name, SHAREFS_READ_DIR)) { +- perm = SHAREFS_DIR_TYPE_READONLY | level; +- sharefs_set_read_perm(dinode); +- } else if (!strcmp(cur_name, SHAREFS_READWRITE_DIR)) { +- perm = SHAREFS_DIR_TYPE_READWRITE | level; +- sharefs_set_read_write_perm(dinode); +- } +- break; +- case SHAREFS_PERM_OTHER: +- if (is_read_only_auth(hii->perm)) { +- perm = SHAREFS_DIR_TYPE_READONLY | SHAREFS_PERM_DFS; +- sharefs_set_read_perm(dinode); +- } else if (is_read_write_auth(hii->perm)) { +- perm = SHAREFS_DIR_TYPE_READWRITE | SHAREFS_PERM_DFS; +- sharefs_set_read_write_perm(dinode); +- } +- break; +- default: +- sharefs_err("sharedfs perm incorrect got default case, level:%u", level); +- break; +- } +- dinfo->perm = perm; +-} +- +-void sharefs_root_inode_perm_init(struct inode *root_inode) +-{ +- struct sharefs_inode_info *hii = SHAREFS_I(root_inode); +- hii->perm = SHAREFS_PERM_FIX; +-} +- +-#ifdef CONFIG_SHAREFS_SUPPORT_OVERRIDE +-const struct cred *sharefs_override_file_fsids(struct inode *dir, __u16 *_perm) +-{ +- struct cred *cred = NULL; +- cred = prepare_creds(); +- if (!cred) +- return NULL; +- +- cred->fsuid = dir->i_uid; +- cred->fsgid = dir->i_gid; +- return override_creds(cred); +-} +- +-void sharefs_revert_fsids(const struct cred *old_cred) +-{ +- const struct cred *cur_cred; +- cur_cred = current->cred; +- revert_creds(old_cred); +- put_cred(cur_cred); +-} +-#endif +\ No newline at end of file +diff --git a/fs/sharefs/authentication.h b/fs/sharefs/authentication.h +deleted file mode 100644 +index c9875d376..000000000 +--- a/fs/sharefs/authentication.h ++++ /dev/null +@@ -1,79 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/sharefs/authentication.h +- * +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +-#ifndef AUTHENTICATION_H +-#define AUTHENTICATION_H +- +-#include "sharefs.h" +- +-#define OID_ROOT 0 +- +-#define SHAREFS_PERM_MASK 0x000F +- +-#define SHAREFS_PERM_FIX 0 +-#define SHAREFS_PERM_MNT 1 +-#define SHAREFS_PERM_DFS 2 +-#define SHAREFS_PERM_OTHER 3 +- +-#define SHAREFS_READ_DIR "r" +-#define SHAREFS_READWRITE_DIR "rw" +- +-#define BASE_USER_RANGE 200000 /* offset for uid ranges for each user */ +- +- +-#define SHAREFS_DIR_TYPE_MASK 0x00F0 +-#define SHAREFS_DIR_TYPE_READONLY 0x0010 +-#define SHAREFS_DIR_TYPE_READWRITE 0x0020 +- +-#define SHAREFS_PERM_READONLY_DIR 00550 +-#define SHAREFS_PERM_READONLY_FILE 00440 +-#define SHAREFS_PERM_READWRITE_DIR 00550 +-#define SHAREFS_PERM_READWRITE_FILE 00660 +- +-extern int get_bid_config(const char *bname); +-extern int __init sharefs_init_configfs(void); +-extern void sharefs_exit_configfs(void); +- +-void sharefs_root_inode_perm_init(struct inode *root_inode); +-void fixup_perm_from_level(struct inode *dir, struct dentry *dentry); +-#ifdef CONFIG_SHAREFS_SUPPORT_OVERRIDE +-const struct cred *sharefs_override_file_fsids(struct inode *dir, +- __u16 *_perm); +-void sharefs_revert_fsids(const struct cred *old_cred); +-#endif +- +-static inline bool is_read_only_auth(__u16 perm) +-{ +- return (perm & SHAREFS_DIR_TYPE_MASK) == SHAREFS_DIR_TYPE_READONLY; +-} +- +-static inline bool is_read_write_auth(__u16 perm) +-{ +- return (perm & SHAREFS_DIR_TYPE_MASK) == SHAREFS_DIR_TYPE_READWRITE; +-} +- +-static inline void sharefs_set_read_perm(struct inode *inode) +-{ +- if (S_ISDIR(inode->i_mode)) +- inode->i_mode = (inode->i_mode & S_IFMT) | SHAREFS_PERM_READONLY_DIR; +- else +- inode->i_mode = (inode->i_mode & S_IFMT) | SHAREFS_PERM_READONLY_FILE; +-} +- +-static inline void sharefs_set_read_write_perm(struct inode *inode) +-{ +- if (S_ISDIR(inode->i_mode)) +- inode->i_mode = (inode->i_mode & S_IFMT) | SHAREFS_PERM_READWRITE_DIR; +- else +- inode->i_mode = (inode->i_mode & S_IFMT) | SHAREFS_PERM_READWRITE_FILE; +-} +- +-static inline int get_bundle_uid(struct sharefs_sb_info *sbi, const char *bname) +-{ +- return sbi->user_id * BASE_USER_RANGE + get_bid_config(bname); +-} +- +-#endif //_AUTHENTICATION_H_ +\ No newline at end of file +diff --git a/fs/sharefs/config.c b/fs/sharefs/config.c +deleted file mode 100644 +index fe30eeeff..000000000 +--- a/fs/sharefs/config.c ++++ /dev/null +@@ -1,372 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * fs/sharefs/config.c +- * +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include "sharefs.h" +- +-static struct kmem_cache *sharefs_bid_entry_cachep; +- +-struct sharefs_bid_entry { +- struct hlist_node node; +- struct qstr str; +- int id; +-}; +- +-struct sharefs_config_bitem { +- struct config_item item; +- struct qstr str; +-}; +- +-static unsigned int make_hash(const char *name, unsigned int len) +-{ +- unsigned long hash; +- +- hash = init_name_hash(0); +- while (len--) +- hash = partial_name_hash(tolower(*name++), hash); +- +- return end_name_hash(hash); +-} +- +-static struct qstr make_qstr(const char *name) +-{ +- struct qstr str; +- str.name = name; +- str.len = strlen(name); +- str.hash = make_hash(str.name, str.len); +- +- return str; +-} +- +-static struct sharefs_bid_entry *alloc_bid_entry(const char *name, int id) +-{ +- struct sharefs_bid_entry *bid_entry; +- char *bid_entry_name; +- +- bid_entry = kmem_cache_alloc(sharefs_bid_entry_cachep, GFP_KERNEL); +- if (!bid_entry) { +- bid_entry = ERR_PTR(-ENOMEM); +- goto out; +- } +- +- bid_entry_name = kstrdup(name, GFP_KERNEL); +- if (!bid_entry_name) { +- kmem_cache_free(sharefs_bid_entry_cachep, bid_entry); +- bid_entry = ERR_PTR(-ENOMEM); +- goto out; +- } +- +- INIT_HLIST_NODE(&bid_entry->node); +- bid_entry->str = make_qstr(bid_entry_name); +- bid_entry->id = id; +-out: +- return bid_entry; +-} +- +-static void free_bid_entry(struct sharefs_bid_entry *bid_entry) +-{ +- if (bid_entry == NULL) +- return; +- +- kfree(bid_entry->str.name); +- kmem_cache_free(sharefs_bid_entry_cachep, bid_entry); +-} +- +-static struct sharefs_config_bitem *alloc_bitem(const char *name) +-{ +- struct sharefs_config_bitem *bitem; +- char *bitem_name; +- +- bitem = kzalloc(sizeof(*bitem), GFP_KERNEL); +- if (!bitem) { +- bitem = ERR_PTR(-ENOMEM); +- goto out; +- } +- +- bitem_name = kstrdup(name, GFP_KERNEL); +- if (!bitem_name) { +- kfree(bitem); +- bitem = ERR_PTR(-ENOMEM); +- goto out; +- } +- +- bitem->str = make_qstr(bitem_name); +-out: +- return bitem; +-} +- +-static void free_bitem(struct sharefs_config_bitem *bitem) +-{ +- if (bitem == NULL) +- return; +- +- kfree(bitem->str.name); +- kfree(bitem); +-} +- +-#define SHAREFS_BUNDLE_ATTRIBUTE(_attr_) \ +- \ +-static DEFINE_HASHTABLE(sharefs_##_attr_##_hash_table, 4); \ +- \ +-static DEFINE_MUTEX(sharefs_##_attr_##_hash_mutex); \ +- \ +-static int query_##_attr_##_hash_entry(struct qstr *str) \ +-{ \ +- int id = 0; \ +- struct sharefs_bid_entry *bid_entry; \ +- struct hlist_node *hash_node; \ +- \ +- mutex_lock(&sharefs_##_attr_##_hash_mutex); \ +- hash_for_each_possible_safe(sharefs_##_attr_##_hash_table, \ +- bid_entry, hash_node, node, str->hash) { \ +- if (qstr_case_eq(str, &bid_entry->str)) { \ +- id = bid_entry->id; \ +- break; \ +- } \ +- } \ +- mutex_unlock(&sharefs_##_attr_##_hash_mutex); \ +- \ +- return id; \ +-} \ +- \ +-static int insert_##_attr_##_hash_entry(struct qstr *str, int id) \ +-{ \ +- int err = 0; \ +- struct sharefs_bid_entry *bid_entry; \ +- struct hlist_node *hash_node; \ +- \ +- sharefs_info("insert name = %s", str->name); \ +- \ +- mutex_lock(&sharefs_##_attr_##_hash_mutex); \ +- hash_for_each_possible_safe(sharefs_##_attr_##_hash_table, \ +- bid_entry, hash_node, node, str->hash) { \ +- if (qstr_case_eq(str, &bid_entry->str)) { \ +- bid_entry->id = id; \ +- mutex_unlock(&sharefs_##_attr_##_hash_mutex); \ +- goto out; \ +- } \ +- } \ +- mutex_unlock(&sharefs_##_attr_##_hash_mutex); \ +- \ +- bid_entry = alloc_bid_entry(str->name, id); \ +- if (IS_ERR(bid_entry)) { \ +- err = PTR_ERR(bid_entry); \ +- goto out; \ +- } \ +- \ +- hash_add_rcu(sharefs_##_attr_##_hash_table, &bid_entry->node, \ +- bid_entry->str.hash); \ +-out: \ +- return err; \ +-} \ +- \ +-static void remove_##_attr_##_hash_entry(struct qstr *str) \ +-{ \ +- struct sharefs_bid_entry *bid_entry; \ +- struct hlist_node *hash_node; \ +- \ +- sharefs_info("remove name = %s", str->name); \ +- \ +- mutex_lock(&sharefs_##_attr_##_hash_mutex); \ +- hash_for_each_possible_safe(sharefs_##_attr_##_hash_table, \ +- bid_entry, hash_node, node, str->hash) { \ +- if (qstr_case_eq(str, &bid_entry->str)) { \ +- hash_del_rcu(&bid_entry->node); \ +- free_bid_entry(bid_entry); \ +- break; \ +- } \ +- } \ +- mutex_unlock(&sharefs_##_attr_##_hash_mutex); \ +-} \ +- \ +-static void clear_##_attr_##_hash_entry(void) \ +-{ \ +- int index; \ +- struct sharefs_bid_entry *bid_entry; \ +- struct hlist_node *hash_node; \ +- \ +- sharefs_info("clear bid entry"); \ +- \ +- mutex_lock(&sharefs_##_attr_##_hash_mutex); \ +- hash_for_each_safe(sharefs_##_attr_##_hash_table, index, \ +- hash_node, bid_entry, node) { \ +- hash_del_rcu(&bid_entry->node); \ +- kfree(bid_entry->str.name); \ +- kmem_cache_free(sharefs_bid_entry_cachep, bid_entry); \ +- } \ +- mutex_unlock(&sharefs_##_attr_##_hash_mutex); \ +-} \ +- \ +-static int sharefs_##_attr_##_get(const char *bname) \ +-{ \ +- struct qstr str; \ +- \ +- str = make_qstr(bname); \ +- return query_##_attr_##_hash_entry(&str); \ +-} \ +- \ +-static ssize_t sharefs_##_attr_##_show(struct config_item *item, \ +- char *page) \ +-{ \ +- int id; \ +- struct sharefs_config_bitem *bitem; \ +- \ +- sharefs_info("show bundle id"); \ +- \ +- bitem = container_of(item, struct sharefs_config_bitem, item); \ +- id = query_##_attr_##_hash_entry(&bitem->str); \ +- \ +- return scnprintf(page, PAGE_SIZE, "%u\n", id); \ +-} \ +- \ +-static ssize_t sharefs_##_attr_##_store(struct config_item *item, \ +- const char *page, size_t count) \ +-{ \ +- int id; \ +- int err; \ +- size_t size; \ +- struct sharefs_config_bitem *bitem; \ +- \ +- sharefs_info("store bundle id"); \ +- \ +- bitem = container_of(item, struct sharefs_config_bitem, item); \ +- \ +- if (kstrtouint(page, 10, &id)) { \ +- size = -EINVAL; \ +- goto out; \ +- } \ +- \ +- err = insert_##_attr_##_hash_entry(&bitem->str, id); \ +- if (err) { \ +- size = err; \ +- goto out; \ +- } \ +- \ +- size = count; \ +-out: \ +- return size; \ +-} \ +- \ +-static struct configfs_attribute sharefs_##_attr_##_attr = { \ +- .ca_name = __stringify(_attr_), \ +- .ca_mode = S_IRUGO | S_IWUGO, \ +- .ca_owner = THIS_MODULE, \ +- .show = sharefs_##_attr_##_show, \ +- .store = sharefs_##_attr_##_store, \ +-}; +- +-SHAREFS_BUNDLE_ATTRIBUTE(appid) +- +-static struct configfs_attribute *sharefs_battrs[] = { +- &sharefs_appid_attr, +- NULL, +-}; +- +-static void sharefs_config_bitem_release(struct config_item *item) +-{ +- struct sharefs_config_bitem *bitem; +- +- sharefs_info("release bundle item"); +- +- bitem = container_of(item, struct sharefs_config_bitem, item); +- remove_appid_hash_entry(&bitem->str); +- remove_appid_hash_entry(&bitem->str); +- free_bitem(bitem); +-} +- +-static struct configfs_item_operations sharefs_config_bitem_ops = { +- .release = sharefs_config_bitem_release, +-}; +- +-static struct config_item_type sharefs_config_bitem_type = { +- .ct_item_ops = &sharefs_config_bitem_ops, +- .ct_attrs = sharefs_battrs, +- .ct_owner = THIS_MODULE, +-}; +- +-static struct config_item *sharefs_make_bitem(struct config_group *group, +- const char *name) +-{ +- struct config_item *item; +- struct sharefs_config_bitem *bitem; +- +- bitem = alloc_bitem(name); +- if (IS_ERR(bitem)) { +- item = ERR_PTR(-ENOMEM); +- goto out; +- } +- +- config_item_init_type_name(&bitem->item, name, +- &sharefs_config_bitem_type); +- item = &bitem->item; +-out: +- return item; +-} +- +-static struct configfs_group_operations sharefs_group_ops = { +- .make_item = sharefs_make_bitem, +-}; +- +-static struct config_item_type sharefs_group_type = { +- .ct_group_ops = &sharefs_group_ops, +- .ct_owner = THIS_MODULE, +-}; +- +-static struct configfs_subsystem sharefs_subsystem = { +- .su_group = { +- .cg_item = { +- .ci_namebuf = "sharefs", +- .ci_type = &sharefs_group_type, +- }, +- }, +-}; +- +-int get_bid_config(const char *bname) +-{ +- return sharefs_appid_get(bname); +-} +- +-int __init sharefs_init_configfs(void) +-{ +- int err; +- struct configfs_subsystem *subsys; +- +- sharefs_info("init configfs"); +- +- sharefs_bid_entry_cachep = kmem_cache_create("sharefs_bid_entry_cachep", +- sizeof(struct sharefs_bid_entry), 0, 0, NULL); +- if (!sharefs_bid_entry_cachep) { +- sharefs_err("failed to create bid entry cachep"); +- err = -ENOMEM; +- goto out; +- } +- +- subsys = &sharefs_subsystem; +- config_group_init(&subsys->su_group); +- mutex_init(&subsys->su_mutex); +- +- err = configfs_register_subsystem(subsys); +- if (err) +- sharefs_err("failed to register subsystem"); +- +-out: +- return err; +-} +- +-void sharefs_exit_configfs(void) +-{ +- sharefs_info("sharefs exit configfs"); +- +- configfs_unregister_subsystem(&sharefs_subsystem); +- clear_appid_hash_entry(); +- +- kmem_cache_destroy(sharefs_bid_entry_cachep); +-} +\ No newline at end of file +diff --git a/fs/sharefs/dentry.c b/fs/sharefs/dentry.c +deleted file mode 100644 +index dee29cace..000000000 +--- a/fs/sharefs/dentry.c ++++ /dev/null +@@ -1,41 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0-only +-/* +- * fs/sharefs/dentry.c +- * +- * Copyright (c) 1998-2022 Erez Zadok +- * Copyright (c) 2009 Shrikar Archak +- * Copyright (c) 2003-2022 Stony Brook University +- * Copyright (c) 2003-2022 The Research Foundation of SUNY +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#include "sharefs.h" +- +-/* +- * returns: 0: tell VFS to invalidate dentry in share directory +- */ +-static int sharefs_d_revalidate(struct dentry *dentry, unsigned int flags) +-{ +- return 0; +-} +- +-static void sharefs_d_release(struct dentry *dentry) +-{ +- /* +- * It is possible that the dentry private data is NULL in case we +- * ran out of memory while initializing it in +- * new_dentry_private_data. So check for NULL before attempting to +- * release resources. +- */ +- if (SHAREFS_D(dentry)) { +- /* release and reset the lower paths */ +- sharefs_put_reset_lower_path(dentry); +- free_dentry_private_data(dentry); +- } +- return; +-} +- +-const struct dentry_operations sharefs_dops = { +- .d_revalidate = sharefs_d_revalidate, +- .d_release = sharefs_d_release, +-}; +diff --git a/fs/sharefs/file.c b/fs/sharefs/file.c +deleted file mode 100644 +index bf5dbb346..000000000 +--- a/fs/sharefs/file.c ++++ /dev/null +@@ -1,269 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0-only +-/* +- * fs/sharefs/file.c +- * +- * Copyright (c) 1998-2022 Erez Zadok +- * Copyright (c) 2009 Shrikar Archak +- * Copyright (c) 2003-2022 Stony Brook University +- * Copyright (c) 2003-2022 The Research Foundation of SUNY +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#include "sharefs.h" +- +-static int sharefs_readdir(struct file *file, struct dir_context *ctx) +-{ +- int err; +- struct file *lower_file = NULL; +- struct dentry *dentry = file->f_path.dentry; +- +- lower_file = sharefs_lower_file(file); +- err = iterate_dir(lower_file, ctx); +- file->f_pos = lower_file->f_pos; +- if (err >= 0) /* copy the atime */ +- fsstack_copy_attr_atime(d_inode(dentry), +- file_inode(lower_file)); +- return err; +-} +- +-static int sharefs_open(struct inode *inode, struct file *file) +-{ +- int err = 0; +- struct file *lower_file = NULL; +- struct path lower_path; +- +- /* don't open unhashed/deleted files */ +- if (d_unhashed(file->f_path.dentry)) { +- err = -ENOENT; +- goto out_err; +- } +- +- file->private_data = +- kzalloc(sizeof(struct sharefs_file_info), GFP_KERNEL); +- if (!SHAREFS_F(file)) { +- err = -ENOMEM; +- goto out_err; +- } +- +- /* open lower object and link sharefs's file struct to lower's */ +- sharefs_get_lower_path(file->f_path.dentry, &lower_path); +- lower_file = dentry_open(&lower_path, file->f_flags, current_cred()); +- path_put(&lower_path); +- if (IS_ERR(lower_file)) { +- err = PTR_ERR(lower_file); +- lower_file = sharefs_lower_file(file); +- if (lower_file) { +- sharefs_set_lower_file(file, NULL); +- fput(lower_file); /* fput calls dput for lower_dentry */ +- } +- } else { +- sharefs_set_lower_file(file, lower_file); +- } +- +- if (err) { +- kfree(SHAREFS_F(file)); +- } else { +- kuid_t uid = inode->i_uid; +- kgid_t gid = inode->i_gid; +- mode_t mode = inode->i_mode; +- fsstack_copy_attr_all(inode, sharefs_lower_inode(inode)); +- inode->i_uid = uid; +- inode->i_gid = gid; +- inode->i_mode = mode; +- } +-out_err: +- return err; +-} +- +-static int sharefs_flush(struct file *file, fl_owner_t id) +-{ +- int err = 0; +- struct file *lower_file = NULL; +- +- lower_file = sharefs_lower_file(file); +- if (lower_file && lower_file->f_op && lower_file->f_op->flush) { +- filemap_write_and_wait_range(file->f_mapping, 0, LLONG_MAX); +- err = lower_file->f_op->flush(lower_file, id); +- } +- +- return err; +-} +- +-/* release all lower object references & free the file info structure */ +-static int sharefs_file_release(struct inode *inode, struct file *file) +-{ +- struct file *lower_file; +- +- lower_file = sharefs_lower_file(file); +- if (lower_file) { +- sharefs_set_lower_file(file, NULL); +- fput(lower_file); +- } +- +- kfree(SHAREFS_F(file)); +- return 0; +-} +- +-static int sharefs_fsync(struct file *file, loff_t start, loff_t end, +- int datasync) +-{ +- int err; +- struct file *lower_file; +- struct path lower_path; +- struct dentry *dentry = file->f_path.dentry; +- +- err = __generic_file_fsync(file, start, end, datasync); +- if (err) +- goto out; +- lower_file = sharefs_lower_file(file); +- sharefs_get_lower_path(dentry, &lower_path); +- err = vfs_fsync_range(lower_file, start, end, datasync); +- sharefs_put_lower_path(dentry, &lower_path); +-out: +- return err; +-} +- +-static int sharefs_fasync(int fd, struct file *file, int flag) +-{ +- int err = 0; +- struct file *lower_file = NULL; +- +- lower_file = sharefs_lower_file(file); +- if (lower_file->f_op && lower_file->f_op->fasync) +- err = lower_file->f_op->fasync(fd, lower_file, flag); +- +- return err; +-} +- +-/* +- * Sharefs cannot use generic_file_llseek as ->llseek, because it would +- * only set the offset of the upper file. So we have to implement our +- * own method to set both the upper and lower file offsets +- * consistently. +- */ +-static loff_t sharefs_file_llseek(struct file *file, loff_t offset, int whence) +-{ +- loff_t err; +- struct file *lower_file; +- +- lower_file = sharefs_lower_file(file); +- lower_file->f_pos = file->f_pos; +- err = generic_file_llseek(lower_file, offset, whence); +- file->f_pos = lower_file->f_pos; +- +- return err; +-} +- +-/* +- * Sharefs read_iter, redirect modified iocb to lower read_iter +- */ +-ssize_t sharefs_read_iter(struct kiocb *iocb, struct iov_iter *iter) +-{ +- int err; +- struct file *file = iocb->ki_filp; +- struct file *lower_file; +- +- lower_file = sharefs_lower_file(file); +- if (!lower_file->f_op->read_iter) { +- err = -EINVAL; +- goto out; +- } +- +- /* prevent lower_file from being released */ +- get_file(lower_file); +- iocb->ki_filp = lower_file; +- err = lower_file->f_op->read_iter(iocb, iter); +- iocb->ki_filp = file; +- fput(lower_file); +- +- /* update upper inode atime as needed */ +- if (err >= 0 || err == -EIOCBQUEUED) +- fsstack_copy_attr_atime(d_inode(file->f_path.dentry), +- file_inode(lower_file)); +-out: +- return err; +-} +- +-/* +- * Sharefs write_iter, redirect modified iocb to lower write_iter +- */ +-ssize_t sharefs_write_iter(struct kiocb *iocb, struct iov_iter *iter) +-{ +- int err; +- struct file *file = iocb->ki_filp; +- struct file *lower_file; +- +- lower_file = sharefs_lower_file(file); +- if (!lower_file->f_op->write_iter) { +- err = -EINVAL; +- goto out; +- } +- +- get_file(lower_file); /* prevent lower_file from being released */ +- iocb->ki_filp = lower_file; +- err = lower_file->f_op->write_iter(iocb, iter); +- iocb->ki_filp = file; +- fput(lower_file); +- /* update upper inode times/sizes as needed */ +- if (err >= 0 || err == -EIOCBQUEUED) { +- fsstack_copy_inode_size(d_inode(file->f_path.dentry), +- file_inode(lower_file)); +- fsstack_copy_attr_times(d_inode(file->f_path.dentry), +- file_inode(lower_file)); +- } +-out: +- return err; +-} +- +-int sharefs_file_mmap(struct file *file, struct vm_area_struct *vma) +-{ +- int err = 0; +- struct file *lower_file; +- +- lower_file = sharefs_lower_file(file); +- if (!lower_file) +- return -EINVAL; +- +- if (!lower_file->f_op->mmap) +- return -ENODEV; +- +- if (WARN_ON(file != vma->vm_file)) +- return -EIO; +- +- vma->vm_file = get_file(lower_file); +- err = call_mmap(vma->vm_file, vma); +- if (err) +- fput(lower_file); +- else +- fput(file); +- +- file_accessed(file); +- +- return err; +-} +- +-const struct file_operations sharefs_main_fops = { +- .llseek = sharefs_file_llseek, +- .open = sharefs_open, +- .flush = sharefs_flush, +- .release = sharefs_file_release, +- .fsync = sharefs_fsync, +- .fasync = sharefs_fasync, +- .read_iter = sharefs_read_iter, +- .write_iter = sharefs_write_iter, +- .mmap = sharefs_file_mmap, +- .splice_read = filemap_splice_read, +- .splice_write = iter_file_splice_write, +-}; +- +-/* trimmed directory options */ +-const struct file_operations sharefs_dir_fops = { +- .llseek = sharefs_file_llseek, +- .read = generic_read_dir, +- .iterate_shared = sharefs_readdir, +- .open = sharefs_open, +- .release = sharefs_file_release, +- .flush = sharefs_flush, +- .fsync = sharefs_fsync, +- .fasync = sharefs_fasync, +-}; +diff --git a/fs/sharefs/inode.c b/fs/sharefs/inode.c +deleted file mode 100644 +index 546082924..000000000 +--- a/fs/sharefs/inode.c ++++ /dev/null +@@ -1,376 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0-only +-/* +- * fs/sharefs/inode.c +- * +- * Copyright (c) 1998-2022 Erez Zadok +- * Copyright (c) 2009 Shrikar Archak +- * Copyright (c) 2003-2022 Stony Brook University +- * Copyright (c) 2003-2022 The Research Foundation of SUNY +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#include "sharefs.h" +-#ifdef CONFIG_SHAREFS_SUPPORT_WRITE +-#include "authentication.h" +-#endif +- +-static int sharefs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, +- u32 request_mask, unsigned int flags) +-{ +- struct path lower_path; +- int ret; +- +- sharefs_get_lower_path(path->dentry, &lower_path); +- ret = vfs_getattr_nosec(&lower_path, stat, request_mask, flags); +- stat->ino = d_inode(path->dentry)->i_ino; +- stat->uid = d_inode(path->dentry)->i_uid; +- stat->gid = d_inode(path->dentry)->i_gid; +- stat->mode = d_inode(path->dentry)->i_mode; +- stat->dev = 0; +- stat->rdev = 0; +- sharefs_put_lower_path(path->dentry, &lower_path); +- +- return ret; +-} +- +-static ssize_t sharefs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) +-{ +- int err; +- struct dentry *lower_dentry; +- struct path lower_path; +- +- sharefs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- if (!(d_inode(lower_dentry)->i_opflags & IOP_XATTR)) { +- err = -EOPNOTSUPP; +- goto out; +- } +- err = vfs_listxattr(lower_dentry, buffer, buffer_size); +- if (err) +- goto out; +- fsstack_copy_attr_atime(d_inode(dentry), +- d_inode(lower_path.dentry)); +-out: +- sharefs_put_lower_path(dentry, &lower_path); +- return err; +-} +- +-static int sharefs_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) +-{ +-#ifdef CONFIG_SHAREFS_SUPPORT_OVERRIDE +- return 0; +-#endif +- unsigned short mode = inode->i_mode; +- kuid_t cur_uid = current_fsuid(); +- if (uid_eq(cur_uid, ROOT_UID)) +- return 0; +- if (uid_eq(cur_uid, inode->i_uid)) { +- mode >>= 6; +- } else if (in_group_p(inode->i_gid)) { +- mode >>= 3; +- } +- +- if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) +- return 0; +- +- return -EACCES; +-} +- +-#ifdef CONFIG_SHAREFS_SUPPORT_WRITE +-static int sharefs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, +- umode_t mode, bool want_excl) +-{ +- int err; +- struct dentry *lower_dentry; +- struct dentry *lower_parent_dentry = NULL; +- struct path lower_path; +- const struct cred *saved_cred = NULL; +- __u16 child_perm; +- +- saved_cred = sharefs_override_file_fsids(dir, &child_perm); +- if (!saved_cred) { +- err = -ENOMEM; +- return err; +- } +- +- sharefs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- lower_parent_dentry = lock_parent(lower_dentry); +- err = vfs_create(&nop_mnt_idmap, d_inode(lower_parent_dentry), lower_dentry, mode, +- want_excl); +- if (err) +- goto out; +- err = sharefs_interpose(dentry, dir->i_sb, &lower_path); +- if (err) +- goto out; +- fsstack_copy_attr_times(dir, sharefs_lower_inode(dir)); +- fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry)); +- +-out: +- unlock_dir(lower_parent_dentry); +- sharefs_put_lower_path(dentry, &lower_path); +- sharefs_revert_fsids(saved_cred); +- return err; +-} +- +-static int sharefs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) +-{ +- int err; +- struct dentry *lower_dentry; +- struct dentry *lower_parent_dentry = NULL; +- struct path lower_path; +- const struct cred *saved_cred = NULL; +- __u16 child_perm; +- +- saved_cred = sharefs_override_file_fsids(dir, &child_perm); +- if (!saved_cred) { +- err = -ENOMEM; +- return err; +- } +- +- sharefs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- lower_parent_dentry = lock_parent(lower_dentry); +- err = vfs_mkdir(&nop_mnt_idmap, d_inode(lower_parent_dentry), lower_dentry, mode); +- if (err) +- goto out; +- +- err = sharefs_interpose(dentry, dir->i_sb, &lower_path); +- if (err) +- goto out; +- +- fsstack_copy_attr_times(dir, sharefs_lower_inode(dir)); +- fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry)); +- /* update number of links on parent directory */ +- set_nlink(dir, sharefs_lower_inode(dir)->i_nlink); +- +-out: +- unlock_dir(lower_parent_dentry); +- sharefs_put_lower_path(dentry, &lower_path); +- sharefs_revert_fsids(saved_cred); +- return err; +-} +- +-static int sharefs_unlink(struct inode *dir, struct dentry *dentry) +-{ +- int err; +- struct dentry *lower_dentry = NULL; +- struct inode *lower_dir_inode = sharefs_lower_inode(dir); +- struct dentry *lower_dir_dentry = NULL; +- struct path lower_path; +- +- sharefs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- dget(lower_dentry); +- lower_dir_dentry = lock_parent(lower_dentry); +- err = vfs_unlink(&nop_mnt_idmap, lower_dir_inode, lower_dentry, NULL); +- if (err) +- goto out; +- fsstack_copy_attr_times(dir, lower_dir_inode); +- fsstack_copy_inode_size(dir, lower_dir_inode); +- set_nlink(dentry->d_inode, +- sharefs_lower_inode(dentry->d_inode)->i_nlink); +- dentry->d_inode->__i_ctime = dir->__i_ctime; +- d_drop(dentry); +- +-out: +- unlock_dir(lower_dir_dentry); +- dput(lower_dentry); +- sharefs_put_lower_path(dentry, &lower_path); +- return err; +-} +- +-static int sharefs_rmdir(struct inode *dir, struct dentry *dentry) +-{ +- int err; +- struct dentry *lower_dentry; +- struct dentry *lower_dir_dentry; +- struct path lower_path; +- +- sharefs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- lower_dir_dentry = lock_parent(lower_dentry); +- err = vfs_rmdir(&nop_mnt_idmap, lower_dir_dentry->d_inode, lower_dentry); +- if (err) +- goto out; +- +- d_drop(dentry); +- if (dentry->d_inode) +- clear_nlink(dentry->d_inode); +- fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); +- fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode); +- set_nlink(dir, lower_dir_dentry->d_inode->i_nlink); +- +-out: +- unlock_dir(lower_dir_dentry); +- sharefs_put_lower_path(dentry, &lower_path); +- return err; +-} +- +-static int sharefs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, +- struct inode *new_dir, struct dentry *new_dentry, +- unsigned int flags) +-{ +- int err; +- struct dentry *lower_old_dentry = NULL; +- struct dentry *lower_new_dentry = NULL; +- struct dentry *lower_old_dir_dentry = NULL; +- struct dentry *lower_new_dir_dentry = NULL; +- struct dentry *trap = NULL; +- struct path lower_old_path, lower_new_path; +- struct renamedata rename_data; +- +- if (flags) +- return -EINVAL; +- +- sharefs_get_lower_path(old_dentry, &lower_old_path); +- sharefs_get_lower_path(new_dentry, &lower_new_path); +- lower_old_dentry = lower_old_path.dentry; +- lower_new_dentry = lower_new_path.dentry; +- lower_old_dir_dentry = dget_parent(lower_old_dentry); +- lower_new_dir_dentry = dget_parent(lower_new_dentry); +- trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry); +- /* source should not be ancestor of target */ +- if (trap == lower_old_dentry) { +- err = -EINVAL; +- goto out; +- } +- /* target should not be ancestor of source */ +- if (trap == lower_new_dentry) { +- err = -ENOTEMPTY; +- goto out; +- } +- +- rename_data.old_mnt_idmap = &nop_mnt_idmap; +- rename_data.old_dir = lower_old_dir_dentry->d_inode; +- rename_data.old_dentry = lower_old_dentry; +- rename_data.new_mnt_idmap = &nop_mnt_idmap; +- rename_data.new_dir = lower_new_dir_dentry->d_inode; +- rename_data.new_dentry = lower_new_dentry; +- rename_data.flags = flags; +- err = vfs_rename(&rename_data); +- if (err) +- goto out; +- +- fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode); +- fsstack_copy_inode_size(new_dir, lower_new_dir_dentry->d_inode); +- if (new_dir != old_dir) { +- fsstack_copy_attr_all(old_dir, +- lower_old_dir_dentry->d_inode); +- fsstack_copy_inode_size(old_dir, +- lower_old_dir_dentry->d_inode); +- } +- +-out: +- unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry); +- dput(lower_old_dir_dentry); +- dput(lower_new_dir_dentry); +- sharefs_put_lower_path(old_dentry, &lower_old_path); +- sharefs_put_lower_path(new_dentry, &lower_new_path); +- return err; +-} +-#endif +- +-static int sharefs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *ia) +-{ +- int err; +- struct dentry *lower_dentry; +- struct inode *inode; +- struct inode *lower_inode; +- struct path lower_path; +- struct iattr lower_ia; +- +- inode = dentry->d_inode; +- /* +- * Check if user has permission to change inode. We don't check if +- * this user can change the lower inode: that should happen when +- * calling notify_change on the lower inode. +- */ +- +- err = setattr_prepare(&nop_mnt_idmap, dentry, ia); +- if (err) +- goto out_err; +- +- sharefs_get_lower_path(dentry, &lower_path); +- lower_dentry = lower_path.dentry; +- lower_inode = sharefs_lower_inode(inode); +- +- /* prepare our own lower struct iattr (with the lower file) */ +- memcpy(&lower_ia, ia, sizeof(lower_ia)); +- if (ia->ia_valid & ATTR_FILE) +- lower_ia.ia_file = sharefs_lower_file(ia->ia_file); +- +- /* +- * If shrinking, first truncate upper level to cancel writing dirty +- * pages beyond the new eof; and also if its' maxbytes is more +- * limiting (fail with -EFBIG before making any change to the lower +- * level). There is no need to vmtruncate the upper level +- * afterwards in the other cases: we fsstack_copy_inode_size from +- * the lower level. +- */ +- if (ia->ia_valid & ATTR_SIZE) { +- err = inode_newsize_ok(inode, ia->ia_size); +- if (err) +- goto out; +- truncate_setsize(inode, ia->ia_size); +- } +- +- lower_ia.ia_valid &= ~(ATTR_MODE|ATTR_UID|ATTR_GID); +- +- /* notify the (possibly copied-up) lower inode */ +- /* +- * Note: we use lower_dentry->d_inode, because lower_inode may be +- * unlinked (no inode->i_sb and i_ino==0. This happens if someone +- * tries to open(), unlink(), then ftruncate() a file. +- */ +- +- inode_lock(d_inode(lower_dentry)); +- err = notify_change(&nop_mnt_idmap, lower_dentry, &lower_ia, /* note: lower_ia */ +- NULL); +- inode_unlock(d_inode(lower_dentry)); +- +- if (err) +- goto out; +- +- /* get attributes from the lower inode */ +- fsstack_copy_attr_all(inode, lower_inode); +- /* +- * Not running fsstack_copy_inode_size(inode, lower_inode), because +- * VFS should update our inode size, and notify_change on +- * lower_inode should update its size. +- */ +- +-out: +- sharefs_put_lower_path(dentry, &lower_path); +-out_err: +- return err; +-} +- +-const struct inode_operations sharefs_symlink_iops = { +- .permission = sharefs_permission, +- .getattr = sharefs_getattr, +- .get_link = NULL, +- .listxattr = sharefs_listxattr, +-}; +- +-const struct inode_operations sharefs_dir_iops = { +- .lookup = sharefs_lookup, +- .permission = sharefs_permission, +- .getattr = sharefs_getattr, +- .listxattr = sharefs_listxattr, +- .setattr = sharefs_setattr, +-#ifdef CONFIG_SHAREFS_SUPPORT_WRITE +- .unlink = sharefs_unlink, +- .rmdir = sharefs_rmdir, +- .rename = sharefs_rename, +- .create = sharefs_create, +- .mkdir = sharefs_mkdir, +-#endif +-}; +- +-const struct inode_operations sharefs_main_iops = { +- .permission = sharefs_permission, +- .getattr = sharefs_getattr, +- .listxattr = sharefs_listxattr, +- .setattr = sharefs_setattr, +-}; +diff --git a/fs/sharefs/lookup.c b/fs/sharefs/lookup.c +deleted file mode 100644 +index 8e4b902ea..000000000 +--- a/fs/sharefs/lookup.c ++++ /dev/null +@@ -1,338 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0-only +-/* +- * fs/sharefs/lookup.c +- * +- * Copyright (c) 1998-2022 Erez Zadok +- * Copyright (c) 2009 Shrikar Archak +- * Copyright (c) 2003-2022 Stony Brook University +- * Copyright (c) 2003-2022 The Research Foundation of SUNY +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#include "sharefs.h" +-#include "authentication.h" +- +-/* The dentry cache is just so we have properly sized dentries */ +-static struct kmem_cache *sharefs_dentry_cachep; +- +-int sharefs_init_dentry_cache(void) +-{ +- sharefs_dentry_cachep = +- kmem_cache_create("sharefs_dentry", +- sizeof(struct sharefs_dentry_info), +- 0, SLAB_RECLAIM_ACCOUNT, NULL); +- +- return sharefs_dentry_cachep ? 0 : -ENOMEM; +-} +- +-void sharefs_destroy_dentry_cache(void) +-{ +- if (sharefs_dentry_cachep) +- kmem_cache_destroy(sharefs_dentry_cachep); +-} +- +-void free_dentry_private_data(struct dentry *dentry) +-{ +- if (!dentry || !dentry->d_fsdata) +- return; +- kmem_cache_free(sharefs_dentry_cachep, dentry->d_fsdata); +- dentry->d_fsdata = NULL; +-} +- +-/* allocate new dentry private data */ +-int new_dentry_private_data(struct dentry *dentry) +-{ +- struct sharefs_dentry_info *info = SHAREFS_D(dentry); +- +- /* use zalloc to init dentry_info.lower_path */ +- info = kmem_cache_zalloc(sharefs_dentry_cachep, GFP_ATOMIC); +- if (!info) +- return -ENOMEM; +- +- spin_lock_init(&info->lock); +- dentry->d_fsdata = info; +- +- return 0; +-} +- +-static int sharefs_inode_test(struct inode *inode, void *candidate_lower_inode) +-{ +- struct inode *current_lower_inode = sharefs_lower_inode(inode); +- if (current_lower_inode == (struct inode *)candidate_lower_inode) +- return 1; /* found a match */ +- else +- return 0; /* no match */ +-} +- +-static int sharefs_inode_set(struct inode *inode, void *lower_inode) +-{ +- /* we do actual inode initialization in sharefs_iget */ +- return 0; +-} +- +-struct inode *sharefs_iget(struct super_block *sb, struct inode *lower_inode) +-{ +- struct inode *inode; /* the new inode to return */ +- +- if (!igrab(lower_inode)) +- return ERR_PTR(-ESTALE); +- inode = iget5_locked(sb, /* our superblock */ +- /* +- * hashval: we use inode number, but we can +- * also use "(unsigned long)lower_inode" +- * instead. +- */ +- lower_inode->i_ino, /* hashval */ +- sharefs_inode_test, /* inode comparison function */ +- sharefs_inode_set, /* inode init function */ +- lower_inode); /* data passed to test+set fxns */ +- if (!inode) { +- iput(lower_inode); +- return ERR_PTR(-ENOMEM); +- } +- +- if (lower_inode->i_nlink == 0) { +- iput(lower_inode); +- iput(inode); +- return ERR_PTR(-ENOENT); +- } +- +- /* if found a cached inode, then just return it (after iput) */ +- if (!(inode->i_state & I_NEW)) { +- iput(lower_inode); +- return inode; +- } +- +- /* initialize new inode */ +- inode->i_ino = lower_inode->i_ino; +- sharefs_set_lower_inode(inode, lower_inode); +- +- atomic64_inc(&inode->i_version); +- +- /* use different set of inode ops for symlinks & directories */ +- if (S_ISDIR(lower_inode->i_mode)) +- inode->i_op = &sharefs_dir_iops; +- else if (S_ISLNK(lower_inode->i_mode)) +- inode->i_op = &sharefs_symlink_iops; +- else +- inode->i_op = &sharefs_main_iops; +- +- /* use different set of file ops for directories */ +- if (S_ISDIR(lower_inode->i_mode)) +- inode->i_fop = &sharefs_dir_fops; +- else +- inode->i_fop = &sharefs_main_fops; +- +- inode->i_atime.tv_sec = 0; +- inode->i_atime.tv_nsec = 0; +- inode->i_mtime.tv_sec = 0; +- inode->i_mtime.tv_nsec = 0; +- inode->__i_ctime.tv_sec = 0; +- inode->__i_ctime.tv_nsec = 0; +- +- /* properly initialize special inodes */ +- if (S_ISBLK(lower_inode->i_mode) || S_ISCHR(lower_inode->i_mode) || +- S_ISFIFO(lower_inode->i_mode) || S_ISSOCK(lower_inode->i_mode)) +- init_special_inode(inode, lower_inode->i_mode, +- lower_inode->i_rdev); +- +- /* all well, copy inode attributes */ +- fsstack_copy_attr_all(inode, lower_inode); +- fsstack_copy_inode_size(inode, lower_inode); +- +- unlock_new_inode(inode); +- return inode; +-} +- +-/* +- * Helper interpose routine, called directly by ->lookup to handle +- * spliced dentries. +- */ +-static struct dentry *__sharefs_interpose(struct dentry *dentry, +- struct super_block *sb, +- struct path *lower_path) +-{ +- struct inode *inode; +- struct inode *lower_inode = d_inode(lower_path->dentry); +- struct dentry *ret_dentry; +- +- /* +- * We allocate our new inode below by calling sharefs_iget, +- * which will initialize some of the new inode's fields +- */ +- +- /* inherit lower inode number for sharefs's inode */ +- inode = sharefs_iget(sb, lower_inode); +- if (IS_ERR(inode)) { +- ret_dentry = ERR_PTR(PTR_ERR(inode)); +- goto out; +- } +- +- ret_dentry = d_splice_alias(inode, dentry); +- +-out: +- return ret_dentry; +-} +- +-/* +- * Connect a sharefs inode dentry/inode with several lower ones. This is +- * the classic stackable file system "vnode interposition" action. +- * +- * @dentry: sharefs's dentry which interposes on lower one +- * @sb: sharefs's super_block +- * @lower_path: the lower path (caller does path_get/put) +- */ +-int sharefs_interpose(struct dentry *dentry, struct super_block *sb, +- struct path *lower_path) +-{ +- struct dentry *ret_dentry; +- +- ret_dentry = __sharefs_interpose(dentry, sb, lower_path); +- return PTR_ERR(ret_dentry); +-} +- +-/* +- * Main driver function for sharefs's lookup. +- * +- * Returns: NULL (ok), ERR_PTR if an error occurred. +- * Fills in lower_parent_path with on success. +- */ +-static struct dentry *__sharefs_lookup(struct dentry *dentry, +- unsigned int flags, +- struct path *lower_parent_path) +-{ +- int err = 0; +- struct vfsmount *lower_dir_mnt; +- struct dentry *lower_dir_dentry = NULL; +- struct dentry *lower_dentry; +- const char *name; +- struct path lower_path; +- struct qstr this; +- struct dentry *ret_dentry = NULL; +- +- /* must initialize dentry operations */ +- d_set_d_op(dentry, &sharefs_dops); +- +- if (IS_ROOT(dentry)) +- goto out; +- +- name = dentry->d_name.name; +- +- /* now start the actual lookup procedure */ +- lower_dir_dentry = lower_parent_path->dentry; +- lower_dir_mnt = lower_parent_path->mnt; +- +- /* Use vfs_path_lookup to check if the dentry exists or not */ +- err = vfs_path_lookup(lower_dir_dentry, lower_dir_mnt, name, 0, +- &lower_path); +- /* no error: handle positive dentries */ +- if (!err) { +- sharefs_set_lower_path(dentry, &lower_path); +- ret_dentry = +- __sharefs_interpose(dentry, dentry->d_sb, &lower_path); +- if (IS_ERR(ret_dentry)) { +- err = PTR_ERR(ret_dentry); +- /* path_put underlying path on error */ +- sharefs_put_reset_lower_path(dentry); +- } +- goto out; +- } +- /* +- * We don't consider ENOENT an error, and we want to return a +- * negative dentry. +- */ +- if (err && err != -ENOENT) +- goto out; +- +- /* instantiate a new negative dentry */ +- this.name = name; +- this.len = strlen(name); +- this.hash = full_name_hash(lower_dir_dentry, this.name, this.len); +- lower_dentry = d_lookup(lower_dir_dentry, &this); +- if (lower_dentry) +- goto setup_lower; +- +- lower_dentry = d_alloc(lower_dir_dentry, &this); +- if (!lower_dentry) { +- err = -ENOMEM; +- goto out; +- } +- +- /* +- * Calling ->lookup instead of d_add will give the lower fs a chance +- * to allocate the d_fsdata field but will still instantiate and hash the +- * lower_dentry. Without this, sharefs could not stack on top of itself. +- */ +- d_inode(lower_dir_dentry)->i_op->lookup(d_inode(lower_dir_dentry), +- lower_dentry, flags); +- +-setup_lower: +- lower_path.dentry = lower_dentry; +- lower_path.mnt = mntget(lower_dir_mnt); +- sharefs_set_lower_path(dentry, &lower_path); +- +- /* +- * If the intent is to create a file, then don't return an error, so +- * the VFS will continue the process of making this negative dentry +- * into a positive one. +- */ +- if (err == -ENOENT || (flags & (LOOKUP_CREATE|LOOKUP_RENAME_TARGET))) +- err = 0; +- +-out: +- if (err) +- return ERR_PTR(err); +- return ret_dentry; +-} +- +-struct dentry *sharefs_lookup(struct inode *dir, struct dentry *dentry, +- unsigned int flags) +-{ +- int err; +- struct dentry *ret, *parent; +- struct path lower_parent_path; +-#ifdef CONFIG_SHAREFS_SUPPORT_OVERRIDE +- const struct cred *saved_cred = NULL; +- __u16 permission; +-#endif +- +- parent = dget_parent(dentry); +- sharefs_get_lower_path(parent, &lower_parent_path); +-#ifdef CONFIG_SHAREFS_SUPPORT_OVERRIDE +- saved_cred = sharefs_override_file_fsids(dir, &permission); +- if (!saved_cred) { +- ret = ERR_PTR(-ENOMEM); +- goto out_err; +- } +-#endif +- +- /* allocate dentry private data. We free it in ->d_release */ +- err = new_dentry_private_data(dentry); +- if (err) { +- ret = ERR_PTR(err); +- goto out; +- } +- ret = __sharefs_lookup(dentry, flags, &lower_parent_path); +- if (IS_ERR(ret)) { +- sharefs_err("sharefs_lookup error!"); +- goto out; +- } +- +- if (ret) +- dentry = ret; +- if (d_inode(dentry)) +- fsstack_copy_attr_times(d_inode(dentry), +- sharefs_lower_inode(d_inode(dentry))); +- /* update parent directory's atime */ +- fsstack_copy_attr_atime(d_inode(parent), +- sharefs_lower_inode(d_inode(parent))); +- fixup_perm_from_level(d_inode(parent), dentry); +-out: +-#ifdef CONFIG_SHAREFS_SUPPORT_OVERRIDE +- sharefs_revert_fsids(saved_cred); +-out_err: +-#endif +- sharefs_put_lower_path(parent, &lower_parent_path); +- dput(parent); +- return ret; +-} +diff --git a/fs/sharefs/main.c b/fs/sharefs/main.c +deleted file mode 100644 +index f83d5e71d..000000000 +--- a/fs/sharefs/main.c ++++ /dev/null +@@ -1,193 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0-only +-/* +- * fs/sharefs/main.c +- * +- * Copyright (c) 1998-2022 Erez Zadok +- * Copyright (c) 2009 Shrikar Archak +- * Copyright (c) 2003-2022 Stony Brook University +- * Copyright (c) 2003-2022 The Research Foundation of SUNY +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#include +-#include "sharefs.h" +-#include "authentication.h" +- +- +-struct sharefs_mount_priv { +- const char *dev_name; +- const char *raw_data; +-}; +- +-/* +- * There is no need to lock the sharefs_super_info's rwsem as there is no +- * way anyone can have a reference to the superblock at this point in time. +- */ +-static int sharefs_fill_super(struct super_block *sb, void *data, int silent) +-{ +- +- struct sharefs_mount_priv *priv = (struct sharefs_mount_priv *)data; +- const char *dev_name = priv->dev_name; +- const char *raw_data = priv->raw_data; +- +- int err = 0; +- struct super_block *lower_sb; +- struct path lower_path; +- struct inode *inode; +- +- /* parse lower path */ +- err = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, +- &lower_path); +- if (err) { +- printk(KERN_ERR "sharefs: error accessing " +- "lower directory '%s'\n", dev_name); +- goto out; +- } +- +- /* allocate superblock private data */ +- sb->s_fs_info = kzalloc(sizeof(struct sharefs_sb_info), GFP_KERNEL); +- if (!SHAREFS_SB(sb)) { +- printk(KERN_CRIT "sharefs: fill_super: out of memory\n"); +- err = -ENOMEM; +- goto out_pput; +- } +- +- /* set the lower superblock field of upper superblock */ +- lower_sb = lower_path.dentry->d_sb; +- atomic_inc(&lower_sb->s_active); +- sharefs_set_lower_super(sb, lower_sb); +- +- /* inherit maxbytes from lower file system */ +- sb->s_maxbytes = lower_sb->s_maxbytes; +- +- /* +- * Our c/m/atime granularity is 1 ns because we may stack on file +- * systems whose granularity is as good. +- */ +- sb->s_time_gran = 1; +- +- sb->s_op = &sharefs_sops; +- +- /* get a new inode and allocate our root dentry */ +- inode = sharefs_iget(sb, d_inode(lower_path.dentry)); +- if (IS_ERR(inode)) { +- err = PTR_ERR(inode); +- goto out_pput; +- } +- sharefs_root_inode_perm_init(inode); +- sb->s_root = d_make_root(inode); +- if (!sb->s_root) { +- err = -ENOMEM; +- goto out_pput; +- } +- d_set_d_op(sb->s_root, &sharefs_dops); +- +- err = sharefs_parse_options(sb->s_fs_info, raw_data); +- if (err) +- goto out_pput; +- +- /* link the upper and lower dentries */ +- sb->s_root->d_fsdata = NULL; +- err = new_dentry_private_data(sb->s_root); +- if (err) +- goto out_pput; +- +- /* if get here: cannot have error */ +- +- /* set the lower dentries for s_root */ +- sharefs_set_lower_path(sb->s_root, &lower_path); +- +- /* +- * No need to call interpose because we already have a positive +- * dentry, which was instantiated by d_make_root. Just need to +- * d_rehash it. +- */ +- d_rehash(sb->s_root); +- if (!silent) +- printk(KERN_INFO +- "sharefs: mounted on top of %s type %s\n", +- dev_name, lower_sb->s_type->name); +- goto out; /* all is well */ +- +- /* +- * path_put is the only resource we need to free if an error occurred +- * because returning an error from this function will cause +- * generic_shutdown_super to be called, which will call +- * sharefs_put_super, and that function will release any other +- * resources we took. +- */ +-out_pput: +- path_put(&lower_path); +-out: +- return err; +-} +- +-struct dentry *sharefs_mount(struct file_system_type *fs_type, int flags, +- const char *dev_name, void *raw_data) +-{ +- struct sharefs_mount_priv priv = { +- .dev_name = dev_name, +- .raw_data = raw_data, +- }; +- +- /* sharefs needs a valid dev_name to get the lower_sb's metadata */ +- if (!dev_name || !*dev_name) +- return ERR_PTR(-EINVAL); +- +- return mount_nodev(fs_type, flags, &priv, +- sharefs_fill_super); +-} +- +-static struct file_system_type sharefs_fs_type = { +- .owner = THIS_MODULE, +- .name = SHAREFS_NAME, +- .mount = sharefs_mount, +- .kill_sb = generic_shutdown_super, +- .fs_flags = 0, +-}; +- +-static int __init init_sharefs_fs(void) +-{ +- int err; +- +- pr_info("Registering sharefs"); +- +- err = sharefs_init_inode_cache(); +- if (err) +- goto out_err; +- err = sharefs_init_dentry_cache(); +- if (err) +- goto out_err; +- err = register_filesystem(&sharefs_fs_type); +- if (err) { +- sharefs_err("share register failed!"); +- goto out_err; +- } +- +- err = sharefs_init_configfs(); +- if (err) +- goto out_err; +- return 0; +-out_err: +- sharefs_exit_configfs(); +- sharefs_destroy_inode_cache(); +- sharefs_destroy_dentry_cache(); +- sharefs_err("sharefs init failed!"); +- return err; +-} +- +-static void __exit exit_sharefs_fs(void) +-{ +- sharefs_destroy_inode_cache(); +- sharefs_destroy_dentry_cache(); +- unregister_filesystem(&sharefs_fs_type); +- sharefs_exit_configfs(); +- pr_info("Completed sharefs module unload\n"); +-} +- +-module_init(init_sharefs_fs); +-module_exit(exit_sharefs_fs); +- +-MODULE_LICENSE("GPL V2"); +-MODULE_DESCRIPTION("Share File System"); +-MODULE_ALIAS_FS("sharefs"); +\ No newline at end of file +diff --git a/fs/sharefs/sharefs.h b/fs/sharefs/sharefs.h +deleted file mode 100644 +index fe3067ce5..000000000 +--- a/fs/sharefs/sharefs.h ++++ /dev/null +@@ -1,245 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0-only +-/* +- * Copyright (c) 1998-2022 Erez Zadok +- * Copyright (c) 2009 Shrikar Archak +- * Copyright (c) 2003-2022 Stony Brook University +- * Copyright (c) 2003-2022 The Research Foundation of SUNY +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#ifndef _SHAREFS_H_ +-#define _SHAREFS_H_ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-/* the file system name */ +-#define SHAREFS_NAME "sharefs" +- +-/* sharefs root inode number */ +-#define SHAREFS_ROOT_INO 1 +-#define OID_ROOT 0 +-#define ROOT_UID KUIDT_INIT(OID_ROOT) +-#define ROOT_GID KGIDT_INIT(OID_ROOT) +-#define SHAREFS_SUPER_MAGIC 0x20230212 +- +-/* useful for tracking code reachability */ +-#define UDBG printk(KERN_DEFAULT "DBG:%s:%s:%d\n", __FILE__, __func__, __LINE__) +- +-/* file private data */ +-struct sharefs_file_info { +- struct file *lower_file; +- const struct vm_operations_struct *lower_vm_ops; +-}; +- +-/* sharefs inode data in memory */ +-struct sharefs_inode_info { +- struct inode *lower_inode; +- struct inode vfs_inode; +- __u16 perm; +-}; +- +-/* sharefs dentry data in memory */ +-struct sharefs_dentry_info { +- spinlock_t lock; /* protects lower_path */ +- struct path lower_path; +-}; +- +-/* sharefs super-block data in memory */ +-struct sharefs_sb_info { +- struct super_block *lower_sb; +- /* multi user */ +- unsigned int user_id; +- bool override; +- bool override_support_delete; +-}; +- +-/* operations vectors defined in specific files */ +-extern const struct file_operations sharefs_main_fops; +-extern const struct file_operations sharefs_dir_fops; +-extern const struct inode_operations sharefs_main_iops; +-extern const struct inode_operations sharefs_dir_iops; +-extern const struct inode_operations sharefs_symlink_iops; +-extern const struct super_operations sharefs_sops; +-extern const struct dentry_operations sharefs_dops; +- +-extern int sharefs_init_inode_cache(void); +-extern void sharefs_destroy_inode_cache(void); +-extern int sharefs_init_dentry_cache(void); +-extern void sharefs_destroy_dentry_cache(void); +-extern int new_dentry_private_data(struct dentry *dentry); +-extern void free_dentry_private_data(struct dentry *dentry); +-extern struct dentry *sharefs_lookup(struct inode *dir, struct dentry *dentry, +- unsigned int flags); +-extern struct inode *sharefs_iget(struct super_block *sb, +- struct inode *lower_inode); +-extern int sharefs_interpose(struct dentry *dentry, struct super_block *sb, +- struct path *lower_path); +-extern int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, +- const char *name, unsigned int flags, +- struct path *path); +-extern int sharefs_parse_options(struct sharefs_sb_info *sbi, +- const char *data); +- +-/* +- * inode to private data +- * +- * Since we use containers and the struct inode is _inside_ the +- * sharefs_inode_info structure, SHAREFS_I will always (given a non-NULL +- * inode pointer), return a valid non-NULL pointer. +- */ +-static inline struct sharefs_inode_info *SHAREFS_I(const struct inode *inode) +-{ +- return container_of(inode, struct sharefs_inode_info, vfs_inode); +-} +- +-/* dentry to private data */ +-#define SHAREFS_D(dent) ((struct sharefs_dentry_info *)(dent)->d_fsdata) +- +-/* superblock to private data */ +-#define SHAREFS_SB(super) ((struct sharefs_sb_info *)(super)->s_fs_info) +- +-/* file to private Data */ +-#define SHAREFS_F(file) ((struct sharefs_file_info *)((file)->private_data)) +- +-/* file to lower file */ +-static inline struct file *sharefs_lower_file(const struct file *f) +-{ +- return SHAREFS_F(f)->lower_file; +-} +- +-static inline void sharefs_set_lower_file(struct file *f, struct file *val) +-{ +- SHAREFS_F(f)->lower_file = val; +-} +- +-/* inode to lower inode. */ +-static inline struct inode *sharefs_lower_inode(const struct inode *i) +-{ +- return SHAREFS_I(i)->lower_inode; +-} +- +-static inline void sharefs_set_lower_inode(struct inode *i, struct inode *val) +-{ +- SHAREFS_I(i)->lower_inode = val; +-} +- +-/* superblock to lower superblock */ +-static inline struct super_block *sharefs_lower_super( +- const struct super_block *sb) +-{ +- return SHAREFS_SB(sb)->lower_sb; +-} +- +-static inline void sharefs_set_lower_super(struct super_block *sb, +- struct super_block *val) +-{ +- SHAREFS_SB(sb)->lower_sb = val; +-} +- +-/* path based (dentry/mnt) macros */ +-static inline void pathcpy(struct path *dst, const struct path *src) +-{ +- dst->dentry = src->dentry; +- dst->mnt = src->mnt; +-} +-/* Returns struct path. Caller must path_put it. */ +-static inline void sharefs_get_lower_path(const struct dentry *dent, +- struct path *lower_path) +-{ +- spin_lock(&SHAREFS_D(dent)->lock); +- pathcpy(lower_path, &SHAREFS_D(dent)->lower_path); +- path_get(lower_path); +- spin_unlock(&SHAREFS_D(dent)->lock); +- return; +-} +-static inline void sharefs_put_lower_path(const struct dentry *dent, +- struct path *lower_path) +-{ +- path_put(lower_path); +- return; +-} +-static inline void sharefs_set_lower_path(const struct dentry *dent, +- struct path *lower_path) +-{ +- spin_lock(&SHAREFS_D(dent)->lock); +- pathcpy(&SHAREFS_D(dent)->lower_path, lower_path); +- spin_unlock(&SHAREFS_D(dent)->lock); +- return; +-} +-static inline void sharefs_reset_lower_path(const struct dentry *dent) +-{ +- spin_lock(&SHAREFS_D(dent)->lock); +- SHAREFS_D(dent)->lower_path.dentry = NULL; +- SHAREFS_D(dent)->lower_path.mnt = NULL; +- spin_unlock(&SHAREFS_D(dent)->lock); +- return; +-} +-static inline void sharefs_put_reset_lower_path(const struct dentry *dent) +-{ +- struct path lower_path; +- spin_lock(&SHAREFS_D(dent)->lock); +- pathcpy(&lower_path, &SHAREFS_D(dent)->lower_path); +- SHAREFS_D(dent)->lower_path.dentry = NULL; +- SHAREFS_D(dent)->lower_path.mnt = NULL; +- spin_unlock(&SHAREFS_D(dent)->lock); +- path_put(&lower_path); +- return; +-} +- +-/* locking helpers */ +-static inline struct dentry *lock_parent(struct dentry *dentry) +-{ +- struct dentry *dir = dget_parent(dentry); +- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); +- return dir; +-} +- +-static inline void unlock_dir(struct dentry *dir) +-{ +- inode_unlock(d_inode(dir)); +- dput(dir); +-} +- +-static inline bool str_n_case_eq(const char *s1, const char *s2, size_t len) +-{ +- return !strncasecmp(s1, s2, len); +-} +- +-static inline bool qstr_case_eq(const struct qstr *q1, const struct qstr *q2) +-{ +- return q1->len == q2->len && str_n_case_eq(q1->name, q2->name, q2->len); +-} +-/***************************************************************************** +- * log print helpers +- *****************************************************************************/ +-__printf(4, 5) void __sharefs_log(const char *level, const bool ratelimited, +- const char *function, const char *fmt, ...); +-#define sharefs_err(fmt, ...) \ +- __sharefs_log(KERN_ERR, false, __func__, fmt, ##__VA_ARGS__) +-#define sharefs_warning(fmt, ...) \ +- __sharefs_log(KERN_WARNING, false, __func__, fmt, ##__VA_ARGS__) +-#define sharefs_info(fmt, ...) \ +- __sharefs_log(KERN_INFO, false, __func__, fmt, ##__VA_ARGS__) +-#define sharefs_err_ratelimited(fmt, ...) \ +- __sharefs_log(KERN_ERR, true, __func__, fmt, ##__VA_ARGS__) +-#define sharefs_warning_ratelimited(fmt, ...) \ +- __sharefs_log(KERN_WARNING, true, __func__, fmt, ##__VA_ARGS__) +-#define sharefs_info_ratelimited(fmt, ...) \ +- __sharefs_log(KERN_INFO, true, __func__, fmt, ##__VA_ARGS__) +- +-#endif /* not _SHAREFS_H_ */ +diff --git a/fs/sharefs/super.c b/fs/sharefs/super.c +deleted file mode 100644 +index cba0d2206..000000000 +--- a/fs/sharefs/super.c ++++ /dev/null +@@ -1,214 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0-only +-/* +- * Copyright (c) 1998-2022 Erez Zadok +- * Copyright (c) 2009 Shrikar Archak +- * Copyright (c) 2003-2022 Stony Brook University +- * Copyright (c) 2003-2022 The Research Foundation of SUNY +- */ +-#include +-#include +-#include +-#include +-#include "sharefs.h" +- +-enum { +- OPT_USER_ID, +- OPT_OVERRIDE, +- OPT_OVERRIDE_SUPPORT_DELETE, +- OPT_ERR, +-}; +- +-static match_table_t sharefs_tokens = { +- { OPT_USER_ID, "user_id=%s" }, +- { OPT_OVERRIDE, "override" }, +- { OPT_OVERRIDE_SUPPORT_DELETE, "override_support_delete" }, +- { OPT_ERR, NULL } +-}; +- +-int sharefs_parse_options(struct sharefs_sb_info *sbi, const char *data) +-{ +- char *p = NULL; +- char *name = NULL; +- char *options = NULL; +- char *options_src = NULL; +- substring_t args[MAX_OPT_ARGS]; +- unsigned int user_id = 0; +- int err = 0; +- +- options = kstrdup(data, GFP_KERNEL); +- if (data && !options) { +- err = -ENOMEM; +- goto out; +- } +- options_src = options; +- +- while ((p = strsep(&options_src, ",")) != NULL) { +- int token; +- +- if (!*p) +- continue; +- args[0].to = args[0].from = NULL; +- token = match_token(p, sharefs_tokens, args); +- +- switch (token) { +- case OPT_USER_ID: +- name = match_strdup(&args[0]); +- if (name) { +- err = kstrtouint(name, 10, &user_id); +- kfree(name); +- name = NULL; +- if (err) +- goto out; +- sbi->user_id = user_id; +- } +- break; +- case OPT_OVERRIDE: +- sbi->override = true; +- break; +- case OPT_OVERRIDE_SUPPORT_DELETE: +- sbi->override_support_delete = true; +- break; +- default: +- err = -EINVAL; +- goto out; +- } +- } +-out: +- kfree(options); +- +- return err; +-} +- +-/* +- * The inode cache is used with alloc_inode for both our inode info and the +- * vfs inode. +- */ +-static struct kmem_cache *sharefs_inode_cachep; +- +-/* final actions when unmounting a file system */ +-static void sharefs_put_super(struct super_block *sb) +-{ +- struct sharefs_sb_info *spd; +- struct super_block *s; +- +- spd = SHAREFS_SB(sb); +- if (!spd) +- return; +- +- /* decrement lower super references */ +- s = sharefs_lower_super(sb); +- sharefs_set_lower_super(sb, NULL); +- atomic_dec(&s->s_active); +- +- kfree(spd); +- sb->s_fs_info = NULL; +-} +- +-static int sharefs_statfs(struct dentry *dentry, struct kstatfs *buf) +-{ +- int err; +- struct path lower_path; +- +- sharefs_get_lower_path(dentry, &lower_path); +- err = vfs_statfs(&lower_path, buf); +- sharefs_put_lower_path(dentry, &lower_path); +- +- /* set return buf to our f/s to avoid confusing user-level utils */ +- buf->f_type = SHAREFS_SUPER_MAGIC; +- +- return err; +-} +- +-/* +- * Called by iput() when the inode reference count reached zero +- * and the inode is not hashed anywhere. Used to clear anything +- * that needs to be, before the inode is completely destroyed and put +- * on the inode free list. +- */ +-static void sharefs_evict_inode(struct inode *inode) +-{ +- struct inode *lower_inode; +- +- truncate_inode_pages(&inode->i_data, 0); +- clear_inode(inode); +- /* +- * Decrement a reference to a lower_inode, which was incremented +- * by our read_inode when it was created initially. +- */ +- lower_inode = sharefs_lower_inode(inode); +- sharefs_set_lower_inode(inode, NULL); +- iput(lower_inode); +-} +- +-void __sharefs_log(const char *level, const bool ratelimited, +- const char *function, const char *fmt, ...) +-{ +- struct va_format vaf; +- va_list args; +- +- va_start(args, fmt); +- vaf.fmt = fmt; +- vaf.va = &args; +- if (ratelimited) +- printk_ratelimited("%s sharefs: %s() %pV\n", level, +- function, &vaf); +- else +- printk("%s sharefs: %s() %pV\n", level, function, &vaf); +- va_end(args); +-} +- +-static struct inode *sharefs_alloc_inode(struct super_block *sb) +-{ +- struct sharefs_inode_info *i; +- +- i = kmem_cache_alloc(sharefs_inode_cachep, GFP_KERNEL); +- if (!i) +- return NULL; +- +- /* memset everything up to the inode to 0 */ +- memset(i, 0, offsetof(struct sharefs_inode_info, vfs_inode)); +- +- atomic64_set(&i->vfs_inode.i_version, 1); +- return &i->vfs_inode; +-} +- +-static void sharefs_destroy_inode(struct inode *inode) +-{ +- kmem_cache_free(sharefs_inode_cachep, SHAREFS_I(inode)); +-} +- +-/* sharefs inode cache constructor */ +-static void init_once(void *obj) +-{ +- struct sharefs_inode_info *i = obj; +- +- inode_init_once(&i->vfs_inode); +-} +- +-int sharefs_init_inode_cache(void) +-{ +- int err = 0; +- +- sharefs_inode_cachep = +- kmem_cache_create("sharefs_inode_cache", +- sizeof(struct sharefs_inode_info), 0, +- SLAB_RECLAIM_ACCOUNT, init_once); +- if (!sharefs_inode_cachep) +- err = -ENOMEM; +- return err; +-} +- +-/* sharefs inode cache destructor */ +-void sharefs_destroy_inode_cache(void) +-{ +- if (sharefs_inode_cachep) +- kmem_cache_destroy(sharefs_inode_cachep); +-} +- +-const struct super_operations sharefs_sops = { +- .put_super = sharefs_put_super, +- .statfs = sharefs_statfs, +- .evict_inode = sharefs_evict_inode, +- .alloc_inode = sharefs_alloc_inode, +- .destroy_inode = sharefs_destroy_inode, +-}; +diff --git a/fs/verity/enable.c b/fs/verity/enable.c +index 041ea14a9..c284f46d1 100644 +--- a/fs/verity/enable.c ++++ b/fs/verity/enable.c +@@ -61,33 +61,6 @@ static int write_merkle_tree_block(struct inode *inode, const u8 *buf, + return err; + } + +-static int check_file_and_enable_verity(struct file *filp, +- const struct fsverity_enable_arg *arg); +- +-#ifdef CONFIG_SECURITY_CODE_SIGN +- +-static int code_sign_init_descriptor(struct inode *inode, +- const struct fsverity_enable_arg *_arg, struct fsverity_descriptor *_desc); +- +-static int code_sign_copy_merkle_tree(struct file *filp, const void *_desc, +- const struct merkle_tree_params *params); +- +-#else /* !CONFIG_SECURITY_CODE_SIGN */ +- +-static inline int code_sign_init_descriptor(struct inode *inode, +- const struct fsverity_enable_arg *_arg, struct fsverity_descriptor *_desc) +-{ +- return 0; +-} +- +-static int code_sign_copy_merkle_tree(struct file *filp, +- const void *_desc, +- const struct merkle_tree_params *params) +-{ +- return 0; +-} +-#endif /* !CONFIG_SECURITY_CODE_SIGN */ +- + /* + * Build the Merkle tree for the given file using the given parameters, and + * return the root hash in @root_hash. +@@ -98,10 +71,10 @@ static int code_sign_copy_merkle_tree(struct file *filp, + */ + static int build_merkle_tree(struct file *filp, + const struct merkle_tree_params *params, +- u8 *root_hash, +- size_t data_size) ++ u8 *root_hash) + { + struct inode *inode = file_inode(filp); ++ const u64 data_size = inode->i_size; + const int num_levels = params->num_levels; + struct block_buffer _buffers[1 + FS_VERITY_MAX_LEVELS + 1] = {}; + struct block_buffer *buffers = &_buffers[1]; +@@ -211,8 +184,11 @@ static int enable_verity(struct file *filp, + const struct fsverity_enable_arg *arg) + { + struct inode *inode = file_inode(filp); ++ const struct fsverity_operations *vops = inode->i_sb->s_vop; ++ struct merkle_tree_params params = { }; + struct fsverity_descriptor *desc; + size_t desc_size = struct_size(desc, signature, arg->sig_size); ++ struct fsverity_info *vi; + int err; + + /* Start initializing the fsverity_descriptor */ +@@ -243,39 +219,11 @@ static int enable_verity(struct file *filp, + + desc->data_size = cpu_to_le64(inode->i_size); + +- err = code_sign_init_descriptor(inode, arg, desc); +- if (err) { +- fsverity_err(inode, "Init code sign descriptor err: %u", err); +- goto out; +- } +- +- err = fsverity_enable_with_descriptor(filp, (void *)desc, desc_size); +-out: +- kfree(desc); +- return err; +-} +- +-int fsverity_enable_with_descriptor(struct file *filp, +- void *_desc, size_t desc_size) +-{ +- struct inode *inode = file_inode(filp); +- const struct fsverity_operations *vops = inode->i_sb->s_vop; +- struct merkle_tree_params params = { }; +- struct fsverity_descriptor *desc = (struct fsverity_descriptor *)_desc; +- struct fsverity_info *vi; +- int err; +- +- if (vops == NULL) { +- fsverity_err(inode, "current filesystem doesn't support fs-verity."); +- return -ENOTTY; +- } +- + /* Prepare the Merkle tree parameters */ + err = fsverity_init_merkle_tree_params(¶ms, inode, +- desc->hash_algorithm, ++ arg->hash_algorithm, + desc->log_blocksize, +- desc->salt, desc->salt_size, +- desc->data_size); ++ desc->salt, desc->salt_size); + if (err) + goto out; + +@@ -292,13 +240,6 @@ int fsverity_enable_with_descriptor(struct file *filp, + if (err) + goto out; + +- err = code_sign_copy_merkle_tree(filp, _desc, ¶ms); +- if (err < 0) { +- fsverity_err(inode, "Error %d copying Merkle tree", err); +- goto rollback; +- } else if (err == 1) /* already copy merkle tree */ +- goto skip_build; +- + /* + * Build the Merkle tree. Don't hold the inode lock during this, since + * on huge files this may take a very long time and we don't want to +@@ -309,16 +250,12 @@ int fsverity_enable_with_descriptor(struct file *filp, + * lock and only allow one process to be here at a time on a given file. + */ + BUILD_BUG_ON(sizeof(desc->root_hash) < FS_VERITY_MAX_DIGEST_SIZE); +- err = build_merkle_tree(filp, ¶ms, desc->root_hash, desc->data_size); ++ err = build_merkle_tree(filp, ¶ms, desc->root_hash); + if (err) { + fsverity_err(inode, "Error %d building Merkle tree", err); + goto rollback; + } + +-skip_build: +- pr_debug("Done building Merkle tree. Root hash is %s:%*phN\n", +- params.hash_alg->name, params.digest_size, desc->root_hash); +- + /* + * Create the fsverity_info. Don't bother trying to save work by + * reusing the merkle_tree_params from above. Instead, just create the +@@ -358,6 +295,7 @@ int fsverity_enable_with_descriptor(struct file *filp, + } + out: + kfree(params.hashstate); ++ kfree(desc); + return err; + + rollback: +@@ -366,7 +304,6 @@ int fsverity_enable_with_descriptor(struct file *filp, + inode_unlock(inode); + goto out; + } +-EXPORT_SYMBOL_GPL(fsverity_enable_with_descriptor); + + /** + * fsverity_ioctl_enable() - enable verity on a file +@@ -382,6 +319,7 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg) + { + struct inode *inode = file_inode(filp); + struct fsverity_enable_arg arg; ++ int err; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; +@@ -402,15 +340,6 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg) + if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE) + return -EMSGSIZE; + +- return check_file_and_enable_verity(filp, &arg); +-} +-EXPORT_SYMBOL_GPL(fsverity_ioctl_enable); +- +-static int check_file_and_enable_verity(struct file *filp, +- const struct fsverity_enable_arg *arg) +-{ +- struct inode *inode = file_inode(filp); +- int err; + /* + * Require a regular file with write access. But the actual fd must + * still be readonly so that we can lock out all writers. This is +@@ -446,7 +375,7 @@ static int check_file_and_enable_verity(struct file *filp, + if (err) /* -ETXTBSY */ + goto out_drop_write; + +- err = enable_verity(filp, arg); ++ err = enable_verity(filp, &arg); + + /* + * We no longer drop the inode's pagecache after enabling verity. This +@@ -473,147 +402,4 @@ static int check_file_and_enable_verity(struct file *filp, + mnt_drop_write_file(filp); + return err; + } +- +-#ifdef CONFIG_SECURITY_CODE_SIGN +-static int code_sign_copy_merkle_tree(struct file *filp, +- const void *_desc, +- const struct merkle_tree_params *params) +-{ +- struct inode *inode = file_inode(filp); +- struct block_buffer buffer = {}; +- int err = -ENOMEM; +- u64 offset; +- u64 tree_offset; +- +- if (!is_inside_tree_compact(_desc)) +- return 0; +- +- tree_offset = get_tree_offset_compact(_desc); +- +- if (inode->i_size < tree_offset + params->tree_size) { +- fsverity_err(inode, "File is too small to contain Merkle tree."); +- return -EFAULT; +- } +- +- buffer.data = kzalloc(params->block_size, GFP_KERNEL); +- if (!buffer.data) +- goto out; +- +- for (offset = tree_offset; offset < tree_offset + params->tree_size; offset += params->block_size) { +- ssize_t bytes_read; +- loff_t pos = offset; +- +- bytes_read = __kernel_read(filp, buffer.data, +- params->block_size, &pos); +- if (bytes_read < 0) { +- err = bytes_read; +- fsverity_err(inode, "Error %d reading Merkle tree block %llu", +- err, offset / params->block_size); +- goto out; +- } +- if (bytes_read != params->block_size) { +- err = -EINVAL; +- fsverity_err(inode, "Short read of Merkle tree block %llu", +- offset / params->block_size); +- goto out; +- } +- +- err = write_merkle_tree_block(inode, buffer.data, +- (offset - tree_offset) / params->block_size, +- params); +- if (err) +- goto out; +- } +- +- /* already copy merkle tree */ +- err = 1; +-out: +- kfree(buffer.data); +- return err; +-} +- +-static int code_sign_init_descriptor(struct inode *inode, +- const struct fsverity_enable_arg *_arg, +- struct fsverity_descriptor *_desc) +-{ +- struct code_sign_descriptor *desc = CAST_CODE_SIGN_DESC(_desc); +- const struct code_sign_enable_arg *arg = (const struct code_sign_enable_arg *)_arg; +- int algo_index; +- +- if (!arg->cs_version) +- return 0; +- +- /* init extended fields */ +- desc->flags = cpu_to_le32(arg->flags); +- desc->data_size = cpu_to_le64(arg->data_size); +- desc->tree_offset = cpu_to_le64(arg->tree_offset); +- desc->cs_version = arg->cs_version; +- desc->pgtypeinfo_size = cpu_to_le32(arg->pgtypeinfo_size); +- desc->pgtypeinfo_off = cpu_to_le64(arg->pgtypeinfo_off); +- +- /* Get root hash if a Merkle tree carried in file */ +- if (!IS_INSIDE_TREE(desc)) +- return 0; +- +- /* Get size of root hash */ +- algo_index = desc->hash_algorithm; +- if (algo_index >= g_fsverity_hash_algs_num || +- !fsverity_hash_algs[algo_index].name) { +- fsverity_err(inode, "Unknown hash algorithm: %u", algo_index); +- return -EINVAL; +- } +- +- if (copy_from_user(desc->root_hash, u64_to_user_ptr(arg->root_hash_ptr), +- fsverity_hash_algs[algo_index].digest_size)) { +- return -EFAULT; +- } +- +- return 0; +-} +- +-/** +- * fsverity_ioctl_enable_code_sign() - enable code signing on a file +- * @filp: file to enable code signing on +- * @uarg: user pointer to code_sign_enable_arg +- * +- * Enable fs-verity on a file with code signing features. +- * +- * Return: 0 on success, -errno on failure +- */ +-int fsverity_ioctl_enable_code_sign(struct file *filp, const void __user *uarg) +-{ +- struct inode *inode = file_inode(filp); +- struct code_sign_enable_arg arg; +- +- if (copy_from_user(&arg, uarg, sizeof(arg))) +- return -EFAULT; +- +- if (arg.version != 1) +- return -EINVAL; +- +- if (arg.__reserved1 || +- memchr_inv(arg.__reserved2, 0, sizeof(arg.__reserved2))) +- return -EINVAL; +- +- if (arg.data_size > inode->i_size) +- return -EINVAL; +- +- if (arg.tree_offset % arg.block_size != 0) +- return -EINVAL; +- +- if (!is_power_of_2(arg.block_size)) +- return -EINVAL; +- +- if (arg.salt_size > sizeof_field(struct code_sign_descriptor, salt)) +- return -EMSGSIZE; +- +- if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE) +- return -EMSGSIZE; +- +- if (arg.pgtypeinfo_off > arg.data_size - arg.pgtypeinfo_size / 8) +- return -EINVAL; +- +- return check_file_and_enable_verity(filp, (struct fsverity_enable_arg *)&arg); +-} +-EXPORT_SYMBOL_GPL(fsverity_ioctl_enable_code_sign); +-#endif /* CONFIG_SECURITY_CODE_SIGN */ ++EXPORT_SYMBOL_GPL(fsverity_ioctl_enable); +diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h +index 095b54667..d071a6e32 100644 +--- a/fs/verity/fsverity_private.h ++++ b/fs/verity/fsverity_private.h +@@ -11,8 +11,6 @@ + #define pr_fmt(fmt) "fs-verity: " fmt + + #include +-#include +-#include + + /* + * Implementation limit: maximum depth of the Merkle tree. For now 8 is plenty; +@@ -72,11 +70,6 @@ struct fsverity_info { + const struct inode *inode; + unsigned long *hash_block_verified; + spinlock_t hash_page_init_lock; +-#ifdef CONFIG_SECURITY_CODE_SIGN +- struct cs_info fcs_info; +- u64 verified_data_size; +- int cert_type; +-#endif + }; + + #define FS_VERITY_MAX_SIGNATURE_SIZE (FS_VERITY_MAX_DESCRIPTOR_SIZE - \ +@@ -86,8 +79,6 @@ struct fsverity_info { + + extern struct fsverity_hash_alg fsverity_hash_algs[]; + +-extern int g_fsverity_hash_algs_num; +- + const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode, + unsigned int num); + const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg, +@@ -115,8 +106,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params, + const struct inode *inode, + unsigned int hash_algorithm, + unsigned int log_blocksize, +- const u8 *salt, size_t salt_size, +- u64 data_size); ++ const u8 *salt, size_t salt_size); + + struct fsverity_info *fsverity_create_info(const struct inode *inode, + struct fsverity_descriptor *desc); +@@ -134,13 +124,13 @@ void __init fsverity_init_info_cache(void); + + #ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES + extern int fsverity_require_signatures; +-int fsverity_verify_signature(struct fsverity_info *vi, ++int fsverity_verify_signature(const struct fsverity_info *vi, + const u8 *signature, size_t sig_size); + + void __init fsverity_init_signature(void); + #else /* !CONFIG_FS_VERITY_BUILTIN_SIGNATURES */ + static inline int +-fsverity_verify_signature(struct fsverity_info *vi, ++fsverity_verify_signature(const struct fsverity_info *vi, + const u8 *signature, size_t sig_size) + { + return 0; +diff --git a/fs/verity/hash_algs.c b/fs/verity/hash_algs.c +index dcc4121cf..6b08b1d9a 100644 +--- a/fs/verity/hash_algs.c ++++ b/fs/verity/hash_algs.c +@@ -25,8 +25,6 @@ struct fsverity_hash_alg fsverity_hash_algs[] = { + }, + }; + +-int g_fsverity_hash_algs_num = ARRAY_SIZE(fsverity_hash_algs); +- + static DEFINE_MUTEX(fsverity_hash_alg_init_mutex); + + /** +diff --git a/fs/verity/open.c b/fs/verity/open.c +index c63eb076e..6c31a871b 100644 +--- a/fs/verity/open.c ++++ b/fs/verity/open.c +@@ -20,7 +20,6 @@ static struct kmem_cache *fsverity_info_cachep; + * @log_blocksize: log base 2 of block size to use + * @salt: pointer to salt (optional) + * @salt_size: size of salt, possibly 0 +- * @data_size: verified data size + * + * Validate the hash algorithm and block size, then compute the tree topology + * (num levels, num blocks in each level, etc.) and initialize @params. +@@ -31,8 +30,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params, + const struct inode *inode, + unsigned int hash_algorithm, + unsigned int log_blocksize, +- const u8 *salt, size_t salt_size, +- u64 data_size) ++ const u8 *salt, size_t salt_size) + { + const struct fsverity_hash_alg *hash_alg; + int err; +@@ -108,7 +106,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params, + */ + + /* Compute number of levels and the number of blocks in each level */ +- blocks = ((u64)data_size + params->block_size - 1) >> params->log_blocksize; ++ blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize; + while (blocks > 1) { + if (params->num_levels >= FS_VERITY_MAX_LEVELS) { + fsverity_err(inode, "Too many levels in Merkle tree"); +@@ -165,13 +163,11 @@ static int compute_file_digest(const struct fsverity_hash_alg *hash_alg, + u8 *file_digest) + { + __le32 sig_size = desc->sig_size; +- int err, cs_version; ++ int err; + +- cs_version = code_sign_before_measurement_hook(desc); + desc->sig_size = 0; + err = fsverity_hash_buffer(hash_alg, desc, sizeof(*desc), file_digest); + desc->sig_size = sig_size; +- code_sign_after_measurement_hook(desc, cs_version); + + return err; + } +@@ -187,14 +183,6 @@ struct fsverity_info *fsverity_create_info(const struct inode *inode, + struct fsverity_info *vi; + int err; + +- err = code_sign_check_descriptor_hook(inode, desc); +- if (err < 0) { +- fsverity_err(inode, "Invalid code sign descriptor."); +- return ERR_PTR(err); +- } else if (err == 1) +- goto skip_part_check; +- +-skip_part_check: + vi = kmem_cache_zalloc(fsverity_info_cachep, GFP_KERNEL); + if (!vi) + return ERR_PTR(-ENOMEM); +@@ -203,8 +191,7 @@ struct fsverity_info *fsverity_create_info(const struct inode *inode, + err = fsverity_init_merkle_tree_params(&vi->tree_params, inode, + desc->hash_algorithm, + desc->log_blocksize, +- desc->salt, desc->salt_size, +- le64_to_cpu(desc->data_size)); ++ desc->salt, desc->salt_size); + if (err) { + fsverity_err(inode, + "Error %d initializing Merkle tree parameters", +@@ -221,9 +208,6 @@ struct fsverity_info *fsverity_create_info(const struct inode *inode, + goto fail; + } + +-#ifdef CONFIG_SECURITY_CODE_SIGN +- vi->verified_data_size = le64_to_cpu(desc->data_size); +-#endif + err = fsverity_verify_signature(vi, desc->signature, + le32_to_cpu(desc->sig_size)); + if (err) +diff --git a/fs/verity/signature.c b/fs/verity/signature.c +index e153a37df..90c07573d 100644 +--- a/fs/verity/signature.c ++++ b/fs/verity/signature.c +@@ -19,7 +19,6 @@ + #include + #include + #include +-#include + + /* + * /proc/sys/fs/verity/require_signatures +@@ -35,43 +34,6 @@ int fsverity_require_signatures; + */ + static struct key *fsverity_keyring; + +-#ifdef CONFIG_SECURITY_CODE_SIGN +- +-void fsverity_set_cert_type(struct fsverity_info *vi, +- int cert_type) +-{ +- vi->cert_type = cert_type; +-} +- +-int fsverity_get_cert_type(const struct inode *inode) +-{ +- return fsverity_get_info(inode)->cert_type; +-} +- +-#else /* !CONFIG_SECURITY_CODE_SIGN */ +- +-static void inline fsverity_set_cert_type(struct fsverity_info *verity_info, +- int cert_type) +-{ +-} +- +-#endif +- +-static inline int fsverity_verify_certchain(struct fsverity_info *vi, +- const void *raw_pkcs7, size_t pkcs7_len) +-{ +- int ret = 0; +- +- CALL_HCK_LITE_HOOK(code_sign_verify_certchain_lhck, +- raw_pkcs7, pkcs7_len, vi, &ret); +- if (ret > 0) { +- fsverity_set_cert_type(vi, ret); +- ret = 0; +- } +- +- return ret; +-} +- + /** + * fsverity_verify_signature() - check a verity file's signature + * @vi: the file's fsverity_info +@@ -83,7 +45,7 @@ static inline int fsverity_verify_certchain(struct fsverity_info *vi, + * + * Return: 0 on success (signature valid or not required); -errno on failure + */ +-int fsverity_verify_signature(struct fsverity_info *vi, ++int fsverity_verify_signature(const struct fsverity_info *vi, + const u8 *signature, size_t sig_size) + { + const struct inode *inode = vi->inode; +@@ -124,13 +86,6 @@ int fsverity_verify_signature(struct fsverity_info *vi, + d->digest_size = cpu_to_le16(hash_alg->digest_size); + memcpy(d->digest, vi->file_digest, hash_alg->digest_size); + +- err = fsverity_verify_certchain(vi, signature, sig_size); +- if (err) { +- fsverity_err(inode, "verify cert chain failed, err = %d", err); +- return err; +- } +- pr_debug("verify cert chain success\n"); +- + err = verify_pkcs7_signature(d, sizeof(*d) + hash_alg->digest_size, + signature, sig_size, fsverity_keyring, + VERIFYING_UNSPECIFIED_SIGNATURE, +diff --git a/fs/verity/verify.c b/fs/verity/verify.c +index 02a1ad997..904ccd7e8 100644 +--- a/fs/verity/verify.c ++++ b/fs/verity/verify.c +@@ -135,13 +135,6 @@ verify_data_block(struct inode *inode, struct fsverity_info *vi, + return true; + } + +-#ifdef CONFIG_SECURITY_CODE_SIGN +- if (data_pos >= vi->verified_data_size) { +- pr_debug_ratelimited("Data[%lu] out of verity range %lu\n", +- data_pos, vi->verified_data_size); +- return true; +- } +-#endif + /* + * Starting at the leaf level, ascend the tree saving hash blocks along + * the way until we find a hash block that has already been verified, or +@@ -341,21 +334,6 @@ void fsverity_verify_bio(struct bio *bio) + EXPORT_SYMBOL_GPL(fsverity_verify_bio); + #endif /* CONFIG_BLOCK */ + +-/** +- * fsverity_get_verified_data_size() - get verified data size of a verity file +- * @inode: the file's inode +- * +- * Return: verified data size +- */ +-u64 fsverity_get_verified_data_size(const struct inode *inode) +-{ +-#ifdef CONFIG_SECURITY_CODE_SIGN +- return fsverity_get_info(inode)->verified_data_size; +-#else +- return inode->i_size; +-#endif +-} +- + /** + * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue + * @work: the work to enqueue +diff --git a/include/dfx/hiview_hisysevent.h b/include/dfx/hiview_hisysevent.h +deleted file mode 100644 +index c47d419a2..000000000 +--- a/include/dfx/hiview_hisysevent.h ++++ /dev/null +@@ -1,67 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2022 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef HIVIEW_HISYSEVENT_H +-#define HIVIEW_HISYSEVENT_H +- +-enum hisysevent_type { +- /* fault event */ +- FAULT = 1, +- +- /* statistic event */ +- STATISTIC = 2, +- +- /* security event */ +- SECURITY = 3, +- +- /* behavior event */ +- BEHAVIOR = 4 +-}; +- +-struct hiview_hisysevent; +- +-#ifdef CONFIG_HISYSEVENT +- +-struct hiview_hisysevent * +-hisysevent_create(const char *domain, const char *name, enum hisysevent_type type); +-void hisysevent_destroy(struct hiview_hisysevent **event); +-int hisysevent_put_integer(struct hiview_hisysevent *event, const char *key, long long value); +-int hisysevent_put_string(struct hiview_hisysevent *event, const char *key, const char *value); +-int hisysevent_write(struct hiview_hisysevent *event); +- +-#else +- +-#include +-#include +- +-static inline struct hiview_hisysevent * +-hisysevent_create(const char *domain, const char *name, enum hisysevent_type type) +-{ +- return NULL; +-} +- +-static inline void hisysevent_destroy(struct hiview_hisysevent **event) +-{} +- +-static inline int +-hisysevent_put_integer(struct hiview_hisysevent *event, const char *key, long long value) +-{ +- return -EOPNOTSUPP; +-} +- +-static inline int +-hisysevent_put_string(struct hiview_hisysevent *event, const char *key, const char *value) +-{ +- return -EOPNOTSUPP; +-} +- +-static inline int hisysevent_write(struct hiview_hisysevent *event) +-{ +- return -EOPNOTSUPP; +-} +- +-#endif /* CONFIG_HISYSEVENT */ +- +-#endif /* HIVIEW_HISYSEVENT_H */ +diff --git a/include/dfx/hung_wp_screen.h b/include/dfx/hung_wp_screen.h +deleted file mode 100644 +index 39bad044c..000000000 +--- a/include/dfx/hung_wp_screen.h ++++ /dev/null +@@ -1,24 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2022 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef HUNG_WP_SCREEN_H +-#define HUNG_WP_SCREEN_H +- +-#define WP_SCREEN_PWK_RELEASE 0 +-#define WP_SCREEN_PWK_PRESS 1 +- +-#define ZRHUNG_WP_NONE 0 +-#define ZRHUNG_WP_SCREENON 1 +-#define ZRHUNG_WP_SCREENOFF 2 +- +-#define WP_SCREEN_DOMAIN "KERNEL_VENDOR" +-#define WP_SCREEN_PWK_NAME "POWER_KEY" +-#define WP_SCREEN_LPRESS_NAME "LONG_PRESS" +-#define WP_SCREEN_ON_NAME "SCREEN_ON" +-#define WP_SCREEN_OFF_NAME "SCREEN_OFF" +- +-void hung_wp_screen_powerkey_ncb(int event); +- +-#endif /* HUNG_WP_SCREEN_H */ +diff --git a/include/dfx/hungtask_base.h b/include/dfx/hungtask_base.h +deleted file mode 100644 +index b3cf189a0..000000000 +--- a/include/dfx/hungtask_base.h ++++ /dev/null +@@ -1,111 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2022 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef DFX_HUNGTASK_BASE_H +-#define DFX_HUNGTASK_BASE_H +- +-#include +-#include +-#include +- +-#define ENABLE_SHOW_LEN 8 +-#define WHITELIST_STORE_LEN 400 +-#define WHITELIST_LEN 61 +-#define WHITE_LIST 1 +-#define BLACK_LIST 2 +-#define HT_ENABLE 1 +-#define HT_DISABLE 0 +-#define HEARTBEAT_TIME 3 +-#define MAX_LOOP_NUM (CONFIG_DEFAULT_HUNG_TASK_TIMEOUT / HEARTBEAT_TIME) +-#define ONE_MINUTE (60 / HEARTBEAT_TIME) +-#define ONE_AND_HALF_MINUTE (90 / HEARTBEAT_TIME) +-#define TWO_MINUTES (120 / HEARTBEAT_TIME) +-#define THREE_MINUTES (180 / HEARTBEAT_TIME) +-#define TWENTY_SECONDS (21 / HEARTBEAT_TIME) +-#define THIRTY_SECONDS (30 / HEARTBEAT_TIME) +-#define HUNG_ONE_HOUR (3600 / HEARTBEAT_TIME) +-#define HUNG_TEN_MINUTES (600 / HEARTBEAT_TIME) +-#define HUNGTASK_REPORT_TIMECOST TWENTY_SECONDS +-#define HT_DUMP_IN_PANIC_LOOSE 5 +-#define HT_DUMP_IN_PANIC_STRICT 2 +-#define REFRESH_INTERVAL THREE_MINUTES +-#define FLAG_DUMP_WHITE (1 << 0) +-#define FLAG_DUMP_APP (1 << 1) +-#define FLAG_DUMP_NOSCHEDULE (1 << 2) +-#define FLAG_DUMP_JANK (1 << 3) +-#define FLAG_PANIC (1 << 4) +-#define FLAG_PF_FROZEN (1 << 6) +-#define TASK_TYPE_IGNORE 0 +-#define TASK_TYPE_WHITE (1 << 0) +-#define TASK_TYPE_APP (1 << 1) +-#define TASK_TYPE_JANK (1 << 2) +-#define TASK_TYPE_KERNEL (1 << 3) +-#define TASK_TYPE_NATIVE (1 << 4) +-#define TASK_TYPE_FROZEN (1 << 6) +-#define PID_INIT 1 +-#define PID_KTHREAD 2 +-#define DEFAULT_WHITE_DUMP_CNT MAX_LOOP_NUM +-#define DEFAULT_WHITE_PANIC_CNT MAX_LOOP_NUM +-#define HUNG_TASK_UPLOAD_ONCE 1 +-#define FROZEN_BUF_LEN 1024 +-#define MAX_REMOVE_LIST_NUM 200 +-#define HUNGTASK_DOMAIN "KERNEL_VENDOR" +-#define HUNGTASK_NAME "HUNGTASK" +-#define INIT_FREEZE_NAME "INIT_FREEZE" +-#define HUNG_TASK_BATCHING 1024 +-#define TIME_REFRESH_PIDS 20 +-#define PID_ERROR (-1) +-#define HUNGTASK_EVENT_WHITELIST 1 +-#define REPORT_MSGLENGTH 200 +- +-struct task_item { +- struct rb_node node; +- pid_t pid; +- pid_t tgid; +- char name[TASK_COMM_LEN + 1]; +- unsigned long switch_count; +- unsigned int task_type; +- int dump_wa; +- int panic_wa; +- int dump_jank; +- int d_state_time; +- bool isdone_wa; +-}; +- +-struct hashlist_node { +- pid_t pid; +- struct hlist_node list; +-}; +- +-struct whitelist_item { +- pid_t pid; +- char name[TASK_COMM_LEN + 1]; +-}; +- +-struct task_hung_upload { +- char name[TASK_COMM_LEN + 1]; +- pid_t pid; +- pid_t tgid; +- unsigned int flag; +- int duration; +-}; +- +-extern unsigned long sysctl_hung_task_timeout_secs; +-extern unsigned int sysctl_hung_task_panic; +- +-void do_dump_task(struct task_struct *task); +-int dump_task_wa(struct task_item *item, int dump_cnt, +- struct task_struct *task, unsigned int flag); +-void do_show_task(struct task_struct *task, unsigned int flag, int d_state_time); +-void hungtask_show_state_filter(unsigned long state_filter); +-int htbase_create_sysfs(void); +-void htbase_set_panic(int new_did_panic); +-void htbase_set_timeout_secs(unsigned long new_hungtask_timeout_secs); +-void htbase_check_tasks(unsigned long timeout); +-bool hashlist_find(struct hlist_head *head, int count, pid_t tgid); +-void hashlist_clear(struct hlist_head *head, int count); +-bool hashlist_insert(struct hlist_head *head, int count, pid_t tgid); +- +-#endif /* DFX_HUNGTASK_BASE_H */ +diff --git a/include/dfx/zrhung.h b/include/dfx/zrhung.h +deleted file mode 100644 +index 49ba205a3..000000000 +--- a/include/dfx/zrhung.h ++++ /dev/null +@@ -1,12 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2022 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef ZRHUNG_H +-#define ZRHUNG_H +- +-int zrhung_send_event_bbox(const char *domain, const char *event_name, const char *timestamp, const char *reset); +-int zrhung_send_event(const char *domain, const char *event_name, const char *msg_buf); +- +-#endif /* ZRHUNG_H */ +diff --git a/include/dt-bindings/clock/basedrv-clock.h b/include/dt-bindings/clock/basedrv-clock.h +new file mode 100644 +index 000000000..3873aac7d +--- /dev/null ++++ b/include/dt-bindings/clock/basedrv-clock.h +@@ -0,0 +1,20 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2022-2023. All rights reserved. ++ * Description: driver for clk ++ * Author: AuthorNameMagicTag ++ * Create: 2022-12-05 ++ */ ++ ++#ifndef __DT_BINDINGS_UPS_CLOCK_H ++#define __DT_BINDINGS_UPS_CLOCK_H ++ ++#define PERI_CRG3664_USB30_CTRL0 0x0000 ++#define PERI_CRG3672_USB30_CTRL1 0x0004 ++#define PERI_CRG3632_USB2_PHY0 0x0008 ++#define PERI_CRG3640_USB2_PHY1 0x000C ++#define PERI_CRG3665_COMBPHY0_CLK 0x0010 ++#define PERI_CRG3673_COMBPHY1_CLK 0x0014 ++ ++#define CLK_MAX 0x0800 ++ ++#endif /* __DT_BINDINGS_UPS_CLOCK_H */ +diff --git a/include/dt-bindings/clock/ss928v100_clock.h b/include/dt-bindings/clock/ss928v100_clock.h +new file mode 100644 +index 000000000..4699b352f +--- /dev/null ++++ b/include/dt-bindings/clock/ss928v100_clock.h +@@ -0,0 +1,100 @@ ++/* ++ * Copyright (c) 2016-2017 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ */ ++ ++#ifndef __DTS_SS928V100_CLOCK_H ++#define __DTS_SS928V100_CLOCK_H ++ ++/* fixed rate */ ++#define SS928V100_FIXED_396M 16 ++#define SS928V100_FIXED_297M 18 ++#define SS928V100_FIXED_250M 20 ++#define SS928V100_FIXED_200M 21 ++#define SS928V100_FIXED_198M 22 ++#define SS928V100_FIXED_187P_5M 23 ++#define SS928V100_FIXED_150M 24 ++#define SS928V100_FIXED_148P_5M 25 ++#define SS928V100_FIXED_100M 28 ++#define SS928V100_FIXED_99M 29 ++#define SS928V100_FIXED_50M 35 ++#define SS928V100_FIXED_24M 41 ++#define SS928V100_FIXED_25M 40 ++#define SS928V100_FIXED_3M 45 ++#define SS928V100_FIXED_400K 47 ++ ++#define SS928V100_I2C0_CLK 50 ++#define SS928V100_I2C1_CLK 51 ++#define SS928V100_I2C2_CLK 52 ++#define SS928V100_I2C3_CLK 53 ++#define SS928V100_I2C4_CLK 54 ++#define SS928V100_I2C5_CLK 55 ++ ++#define SS928V100_SPI0_CLK 62 ++#define SS928V100_SPI1_CLK 63 ++#define SS928V100_SPI2_CLK 64 ++#define SS928V100_SPI3_CLK 65 ++ ++#define SS928V100_EDMAC_CLK 69 ++#define SS928V100_EDMAC_AXICLK 70 ++ ++/* mux clocks */ ++#define SS928V100_PWM0_MUX 72 ++#define SS928V100_PWM1_MUX 73 ++#define SS928V100_FMC_MUX 80 ++#define SS928V100_MMC0_MUX 83 ++#define SS928V100_UART0_MUX 84 ++#define SS928V100_UART1_MUX 85 ++#define SS928V100_UART2_MUX 86 ++#define SS928V100_UART3_MUX 87 ++#define SS928V100_UART4_MUX 88 ++#define SS928V100_UART5_MUX 89 ++ ++/* gate clocks */ ++#define SS928V100_I2C0_MUX 74 ++#define SS928V100_I2C1_MUX 75 ++#define SS928V100_I2C2_MUX 76 ++#define SS928V100_I2C3_MUX 77 ++#define SS928V100_I2C4_MUX 78 ++#define SS928V100_I2C5_MUX 79 ++#define SS928V100_FMC_CLK 90 ++#define SS928V100_UART0_CLK 91 ++#define SS928V100_UART1_CLK 92 ++#define SS928V100_UART2_CLK 93 ++#define SS928V100_UART3_CLK 94 ++#define SS928V100_UART4_CLK 95 ++#define SS928V100_UART5_CLK 96 ++#define SS928V100_MMC0_CLK 97 ++#define SS928V100_MMC1_CLK 98 ++#define SS928V100_MMC2_CLK 99 ++ ++#define SS928V100_ETH_CLK 101 ++#define SS928V100_ETH_MACIF_CLK 102 ++#define SS928V100_ETH1_CLK 103 ++#define SS928V100_ETH1_MACIF_CLK 104 ++ ++#define SS928V100_PWM0_CLK 121 ++#define SS928V100_PWM1_CLK 122 ++ ++#define SS928V100_MMC1_MUX 115 ++#define SS928V100_MMC2_MUX 116 ++/* pll clocks */ ++#define SS928V100_APLL_CLK 250 ++ ++#define SS928V100_CRG_NR_CLKS 256 ++ ++#endif /* __DTS_SS928V100_CLOCK_H */ ++ +diff --git a/include/linux/blackbox.h b/include/linux/blackbox.h +deleted file mode 100644 +index ed470e4a6..000000000 +--- a/include/linux/blackbox.h ++++ /dev/null +@@ -1,84 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2021 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef BLACKBOX_H +-#define BLACKBOX_H +- +-#include +-#include +- +-#define PATH_MAX_LEN 256 +-#define EVENT_MAX_LEN 32 +-#define CATEGORY_MAX_LEN 32 +-#define MODULE_MAX_LEN 32 +-#define TIMESTAMP_MAX_LEN 24 +-#define ERROR_DESC_MAX_LEN 512 +-#define LOG_FLAG "VALIDLOG" +- +-/* module type */ +-#define MODULE_SYSTEM "SYSTEM" +- +-/* fault category type */ +-#define CATEGORY_SYSTEM_REBOOT "SYSREBOOT" +-#define CATEGORY_SYSTEM_POWEROFF "POWEROFF" +-#define CATEGORY_SYSTEM_PANIC "PANIC" +-#define CATEGORY_SYSTEM_OOPS "OOPS" +-#define CATEGORY_SYSTEM_CUSTOM "CUSTOM" +-#define CATEGORY_SYSTEM_WATCHDOG "HWWATCHDOG" +-#define CATEGORY_SYSTEM_HUNGTASK "HUNGTASK" +-#define CATEGORY_SUBSYSTEM_CUSTOM "CUSTOM" +- +-/* fault event type */ +-#define EVENT_SYSREBOOT "SYSREBOOT" +-#define EVENT_LONGPRESS "LONGPRESS" +-#define EVENT_COMBINATIONKEY "COMBINATIONKEY" +-#define EVENT_SUBSYSREBOOT "SUBSYSREBOOT" +-#define EVENT_POWEROFF "POWEROFF" +-#define EVENT_PANIC "PANIC" +-#define EVENT_OOPS "OOPS" +-#define EVENT_SYS_WATCHDOG "SYSWATCHDOG" +-#define EVENT_HUNGTASK "HUNGTASK" +-#define EVENT_BOOTFAIL "BOOTFAIL" +- +-#define FILE_NAME(x) (strrchr(x, '/') ? (strrchr(x, '/') + 1) : x) +-#define BBOX_DECORATOR_HILOG(level, fmt, args...) \ +- pr_err("bbox:[%s][%s:%d] " fmt, level, FILE_NAME(__FILE__), __LINE__, ##args) +- +-#define bbox_print_fatal(fmt, args...) BBOX_DECORATOR_HILOG("fatal", fmt, ##args) +-#define bbox_print_err(fmt, args...) BBOX_DECORATOR_HILOG("err", fmt, ##args) +-#define bbox_print_warn(fmt, args...) BBOX_DECORATOR_HILOG("warn", fmt, ##args) +-#define bbox_print_info(fmt, args...) BBOX_DECORATOR_HILOG("info", fmt, ##args) +-#define bbox_print_debug(fmt, args...) BBOX_DECORATOR_HILOG("debug", fmt, ##args) +- +-struct error_info { +- char event[EVENT_MAX_LEN]; +- char category[CATEGORY_MAX_LEN]; +- char module[MODULE_MAX_LEN]; +- char error_time[TIMESTAMP_MAX_LEN]; +- char error_desc[ERROR_DESC_MAX_LEN]; +-}; +- +-struct fault_log_info { +- char flag[8]; /* 8 is the length of the flag */ +- size_t len; /* length of the kernel fault log */ +- struct error_info info; +-}; +- +-struct module_ops { +- char module[MODULE_MAX_LEN]; +- void (*dump)(const char *log_dir, struct error_info *info); +- void (*reset)(struct error_info *info); +- int (*get_last_log_info)(struct error_info *info); +- int (*save_last_log)(const char *log_dir, struct error_info *info); +-}; +- +-void get_timestamp(char *buf, size_t buf_size); +-int bbox_register_module_ops(struct module_ops *ops); +-int bbox_notify_error(const char event[EVENT_MAX_LEN], +- const char module[MODULE_MAX_LEN], +- const char error_desc[ERROR_DESC_MAX_LEN], +- int need_sys_reset); +- +-#endif /* BLACKBOX_H */ +diff --git a/include/linux/blackbox_common.h b/include/linux/blackbox_common.h +deleted file mode 100644 +index ae8e0d229..000000000 +--- a/include/linux/blackbox_common.h ++++ /dev/null +@@ -1,44 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2021 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef BLACKBOX_COMMON_H +-#define BLACKBOX_COMMON_H +- +-#include +- +-/* bbox/BBOX - blackbox */ +-#define YEAR_BASE 1900 +-#define SECONDS_PER_MINUTE 60 +-#define AID_ROOT 0 +-#define AID_SYSTEM 1000 +-#define BBOX_DIR_LIMIT 0775 +-#define BBOX_FILE_LIMIT 0664 +-#define PATH_MAX_LEN 256 +- +-/* +- * format: +- * [topCategoryName],module[moduleName],category[categoryName],\ +- * event[eventName],time[seconds from 1970-01-01 00:00:00 UTC-tick],\ +- * sysreboot[true|false],errordesc[errorDescription],logpath[logpath]\n +- */ +-#define HISTORY_LOG_FORMAT "[%s],module[%s],category[%s],event[%s],"\ +- "time[%s],sysreboot[%s],errdesc[%s],logpath[%s]\n" +-#define TIMESTAMP_FORMAT "%04d%02d%02d%02d%02d%02d-%08llu" +- +-void sys_reset(void); +-void change_own(char *path, int uid, int gid); +-int full_write_file(const char *pfile_path, char *buf, +- size_t buf_size, bool read_file); +-int file_exists(const char *name); +-int create_log_dir(const char *path); +-unsigned long long get_ticks(void); +-struct file *file_open(const char *filename, int open_mode, int mode); +-void file_close(struct file *filp); +-ssize_t file_read(struct file *file, loff_t offset, unsigned char *data, +- size_t size); +-int file_delete(struct file *filp); +-char *getfullpath(struct file *filp); +- +-#endif /* BLACKBOX_COMMON_H */ +diff --git a/include/linux/blackbox_storage.h b/include/linux/blackbox_storage.h +deleted file mode 100644 +index 52d67523d..000000000 +--- a/include/linux/blackbox_storage.h ++++ /dev/null +@@ -1,22 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (C) 2021 Huawei Technologies Co., Ltd. All rights reserved. +- */ +- +-#ifndef BLACKBOX_STORAGE_H +-#define BLACKBOX_STORAGE_H +- +-#include +- +-struct reboot_crashlog_storage { +- int (*storage_log)(void *out, unsigned int outlen); +- int (*get_log)(void *in, unsigned int inlen); +- void (*blackbox_dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason); +- const char *material; +-}; +- +-extern char *storage_material; +-extern const struct reboot_crashlog_storage *storage_lastword; +-extern const struct reboot_crashlog_storage storage_lastwords[]; +- +-#endif /* BLACKBOX_STORAGE_H */ +diff --git a/include/linux/bpf.h b/include/linux/bpf.h +index 706060aa4..035e627f9 100644 +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -1245,12 +1245,8 @@ int bpf_dynptr_check_size(u32 size); + u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); + + #ifdef CONFIG_BPF_JIT +-int bpf_trampoline_link_prog(struct bpf_tramp_link *link, +- struct bpf_trampoline *tr, +- struct bpf_prog *tgt_prog); +-int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, +- struct bpf_trampoline *tr, +- struct bpf_prog *tgt_prog); ++int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); ++int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); + struct bpf_trampoline *bpf_trampoline_get(u64 key, + struct bpf_attach_target_info *tgt_info); + void bpf_trampoline_put(struct bpf_trampoline *tr); +@@ -1331,14 +1327,12 @@ void bpf_jit_uncharge_modmem(u32 size); + bool bpf_prog_has_trampoline(const struct bpf_prog *prog); + #else + static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, +- struct bpf_trampoline *tr, +- struct bpf_prog *tgt_prog) ++ struct bpf_trampoline *tr) + { + return -ENOTSUPP; + } + static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, +- struct bpf_trampoline *tr, +- struct bpf_prog *tgt_prog) ++ struct bpf_trampoline *tr) + { + return -ENOTSUPP; + } +@@ -1436,9 +1430,6 @@ struct bpf_prog_aux { + bool sleepable; + bool tail_call_reachable; + bool xdp_has_frags; +- bool is_extended; /* true if extended by freplace program */ +- u64 prog_array_member_cnt; /* counts how many times as member of prog_array */ +- struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */ + /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ + const struct btf_type *attach_func_proto; + /* function name for valid attach_btf_id */ +diff --git a/include/linux/bsp_cma.h b/include/linux/bsp_cma.h +new file mode 100644 +index 000000000..44271f75b +--- /dev/null ++++ b/include/linux/bsp_cma.h +@@ -0,0 +1,41 @@ ++/* ++ * Copyright (c) Shenshu Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++#ifndef __BSP_CMA_H__ ++#define __BSP_CMA_H__ ++ ++#ifdef CONFIG_ARCH_BSP ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define NAME_LEN_MAX 64 ++#define ZONE_MAX 64 ++ ++struct cma_zone { ++ struct device pdev; ++ char name[NAME_LEN_MAX]; ++ gfp_t gfp; ++ phys_addr_t phys_start; ++ phys_addr_t nbytes; ++ u32 alloc_type; ++ u32 block_align; ++}; ++ ++#ifdef CONFIG_CMA ++int is_cma_address(phys_addr_t phys, unsigned long size); ++phys_addr_t get_zones_start(void); ++struct cma_zone *get_cma_zone(const char *name); ++struct device *get_cma_device(const char *name); ++int __init declare_heap_memory(void); ++#endif /* CONFIG_CMA */ ++#endif /* CONFIG_ARCH_BSP */ ++ ++#endif +diff --git a/include/linux/code_sign.h b/include/linux/code_sign.h +deleted file mode 100644 +index 0e4f55742..000000000 +--- a/include/linux/code_sign.h ++++ /dev/null +@@ -1,92 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-or-later */ +-/* +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#ifndef LINUX_INCLUDE_CODE_SIGN_H +-#define LINUX_INCLUDE_CODE_SIGN_H +- +-#include +- +-/* +- * Merkle tree properties. The file measurement is the hash of this structure +- * excluding the signature and with the sig_size field set to 0, while version +- * is replaced by code sign version. +- */ +-struct code_sign_descriptor { +- __u8 version; /* must be 1 */ +- __u8 hash_algorithm; /* Merkle tree hash algorithm */ +- __u8 log_blocksize; /* log2 of size of data and tree blocks */ +- __u8 salt_size; /* size of salt in bytes; 0 if none */ +- __le32 sig_size; /* size of signature in bytes; 0 if none */ +- __le64 data_size; /* size of file the Merkle tree is built over */ +- __u8 root_hash[64]; /* Merkle tree root hash */ +- __u8 salt[32]; /* salt prepended to each hashed block */ +- __u32 flags; +- __u32 pgtypeinfo_size; /* size of page type info (in number of btis) */ +- __u64 tree_offset; /* merkle tree offset in file */ +- __u64 pgtypeinfo_off; /* offset of page type info */ +- __u8 __reserved2[119]; /* must be 0's */ +- __u8 cs_version; /* code sign version */ +- __u8 signature[]; /* optional PKCS#7 signature */ +-}; +- +-enum { +- RELEASE_CODE_START = 0x0, +- RELEASE_PLATFORM_CODE, +- RELEASE_AUTHED_CODE, +- RELEASE_DEVELOPER_CODE, +- RELEASE_BLOCK_CODE, +- RELEASE_CODE_END, +- +- DEBUG_CODE_START = 0x100, +- DEBUG_PLATFORM_CODE, +- DEBUG_AUTHED_CODE, +- DEBUG_DEVELOPER_CODE, +- DEBUG_BLOCK_CODE, +- DEBUG_DEBUG_CODE, +- DEBUG_CODE_END, +- +- MAY_LOCAL_CODE = 0x201, +-}; +- +-#define FLAG_INSIDE_TREE (1 << 0) /* Merkle tree in file */ +-#define IS_INSIDE_TREE(desc) ((desc)->flags & FLAG_INSIDE_TREE) +- +-#define CONST_CAST_CODE_SIGN_DESC(desc) ((const struct code_sign_descriptor *)(desc)) +-#define CAST_CODE_SIGN_DESC(desc) ((struct code_sign_descriptor *)(desc)) +- +-static inline u64 get_tree_offset_compact(const void *desc) +-{ +- return CONST_CAST_CODE_SIGN_DESC(desc)->tree_offset; +-} +- +-static inline bool is_inside_tree_compact(const void *_desc) +-{ +- const struct code_sign_descriptor *desc = CONST_CAST_CODE_SIGN_DESC(_desc); +- +- return desc->cs_version && IS_INSIDE_TREE(desc); +-} +- +-static inline int code_sign_check_descriptor_hook(const struct inode *inode, const void *desc) +-{ +- int ret = 0; +- +- CALL_HCK_LITE_HOOK(code_sign_check_descriptor_lhck, inode, desc, &ret); +- return ret; +-} +- +-static inline int code_sign_before_measurement_hook(void *desc) +-{ +- int ret = 0; +- +- CALL_HCK_LITE_HOOK(code_sign_before_measurement_lhck, desc, &ret); +- return ret; +-} +- +-static inline void code_sign_after_measurement_hook(void *desc, int version) +-{ +- CALL_HCK_LITE_HOOK(code_sign_after_measurement_lhck, desc, version); +-} +- +-#endif /* LINUX_INCLUDE_CODE_SIGN_H */ +diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h +index b205c4820..624d4a38c 100644 +--- a/include/linux/cpuhotplug.h ++++ b/include/linux/cpuhotplug.h +@@ -111,9 +111,6 @@ enum cpuhp_state { + CPUHP_SLAB_PREPARE, + CPUHP_MD_RAID5_PREPARE, + CPUHP_RCUTREE_PREP, +-#ifdef CONFIG_SCHED_CORE_CTRL +- CPUHP_CORE_CTL_ISOLATION_DEAD, +-#endif + CPUHP_CPUIDLE_COUPLED_PREPARE, + CPUHP_POWERPC_PMAC_PREPARE, + CPUHP_POWERPC_MMU_CTX_PREPARE, +diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h +index 7c36642bb..dbdbf1451 100644 +--- a/include/linux/cpumask.h ++++ b/include/linux/cpumask.h +@@ -94,7 +94,6 @@ static inline void set_nr_cpu_ids(unsigned int nr) + * cpu_present_mask - has bit 'cpu' set iff cpu is populated + * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler + * cpu_active_mask - has bit 'cpu' set iff cpu available to migration +- * cpu_isolated_mask- has bit 'cpu' set iff cpu isolated + * + * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. + * +@@ -133,28 +132,9 @@ extern struct cpumask __cpu_dying_mask; + #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) + #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) + #define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask) +-#ifdef CONFIG_CPU_ISOLATION_OPT +-extern struct cpumask __cpu_isolated_mask; +-#define cpu_isolated_mask ((const struct cpumask *)&__cpu_isolated_mask) +-#endif + + extern atomic_t __num_online_cpus; + +-#if defined(CONFIG_CPU_ISOLATION_OPT) && NR_CPUS > 1 +-#define num_isolated_cpus() cpumask_weight(cpu_isolated_mask) +-#define num_online_uniso_cpus() \ +-({ \ +- cpumask_t mask; \ +- \ +- cpumask_andnot(&mask, cpu_online_mask, cpu_isolated_mask); \ +- cpumask_weight(&mask); \ +-}) +-#define cpu_isolated(cpu) cpumask_test_cpu((cpu), cpu_isolated_mask) +-#else /* !CONFIG_CPU_ISOLATION_OPT || NR_CPUS == 1 */ +-#define num_isolated_cpus() 0U +-#define num_online_uniso_cpus() num_online_cpus() +-#define cpu_isolated(cpu) 0U +-#endif + extern cpumask_t cpus_booted_once_mask; + + static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) +@@ -633,18 +613,6 @@ static inline bool cpumask_andnot(struct cpumask *dstp, + cpumask_bits(src2p), small_cpumask_bits); + } + +-/** +- * cpumask_complement - *dstp = ~*srcp +- * @dstp: the cpumask result +- * @srcp: the input to invert +- */ +-static inline void cpumask_complement(struct cpumask *dstp, +- const struct cpumask *srcp) +-{ +- bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), +- small_cpumask_bits); +-} +- + /** + * cpumask_equal - *src1p == *src2p + * @src1p: the first input +@@ -1006,9 +974,6 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); + #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) + #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) + #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) +-#ifdef CONFIG_CPU_ISOLATION_OPT +-#define for_each_isolated_cpu(cpu) for_each_cpu((cpu), cpu_isolated_mask) +-#endif + #endif + + /* Wrappers for arch boot code to manipulate normally-constant masks */ +@@ -1059,17 +1024,6 @@ set_cpu_dying(unsigned int cpu, bool dying) + cpumask_clear_cpu(cpu, &__cpu_dying_mask); + } + +-#ifdef CONFIG_CPU_ISOLATION_OPT +-static inline void +-set_cpu_isolated(unsigned int cpu, bool isolated) +-{ +- if (isolated) +- cpumask_set_cpu(cpu, &__cpu_isolated_mask); +- else +- cpumask_clear_cpu(cpu, &__cpu_isolated_mask); +-} +-#endif +- + /** + * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * + * @bitmap: the bitmap +diff --git a/include/linux/edmac.h b/include/linux/edmac.h +new file mode 100644 +index 000000000..bc9198c3a +--- /dev/null ++++ b/include/linux/edmac.h +@@ -0,0 +1,80 @@ ++/* ++ * ++ * Copyright (c) 2015-2021 Shenshu Technologies Co., Ltd. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef __DMAC_H__ ++#define __DMAC_H__ ++ ++#define DMAC_ERROR_BASE 0x64 ++ ++#define DMAC_CHN_SUCCESS (DMAC_ERROR_BASE + 0x10) ++#define DMAC_CHN_ERROR (DMAC_ERROR_BASE + 0x11) ++#define DMAC_CHN_TIMEOUT (DMAC_ERROR_BASE + 0x12) ++#define DMAC_CHN_ALLOCAT (DMAC_ERROR_BASE + 0x13) ++#define DMAC_CHN_VACANCY (DMAC_ERROR_BASE + 0x14) ++#define DMAC_NOT_FINISHED (DMAC_ERROR_BASE + 0xe) ++ ++#ifdef CONFIG_EDMAC ++extern int dma_driver_init(void); ++extern int dmac_channelclose(unsigned int channel); ++extern int dmac_channelstart(unsigned int u32channel); ++extern int dmac_channel_allocate(void); ++ ++extern int dmac_start_m2p(unsigned int channel, unsigned int pmemaddr, ++ unsigned int uwperipheralid, ++ unsigned int uwnumtransfers, ++ unsigned int next_lli_addr); ++extern int dmac_m2p_transfer(unsigned long long memaddr, unsigned int uwperipheralid, ++ unsigned int length); ++extern int dmac_channel_free(unsigned int channel); ++ ++extern int do_dma_m2p(unsigned long long memaddr, unsigned int peripheral_addr, ++ unsigned int length); ++extern int do_dma_p2m(unsigned long mem_addr, unsigned int peripheral_addr, ++ unsigned int length); ++extern int dmac_wait(int channel); ++ ++extern int dmac_start_m2m(unsigned int channel, unsigned long psource, ++ unsigned long pdest, unsigned int uwnumtransfers); ++extern int dmac_m2m_transfer(unsigned long source, unsigned long dest, ++ unsigned int length); ++extern int dmac_register_isr(unsigned int channel, void *pisr); ++extern int free_dmalli_space(unsigned int *ppheadlli, unsigned int page_num); ++extern int dmac_start_llim2p(unsigned int channel, unsigned int *pfirst_lli, ++ unsigned int uwperipheralid); ++extern int dmac_buildllim2m(const unsigned long *ppheadlli, ++ unsigned long psource, ++ unsigned long pdest, ++ unsigned int totaltransfersize, ++ unsigned int uwnumtransfers); ++ ++extern int dmac_start_llim2m(unsigned int channel, const unsigned long *pfirst_lli); ++ ++extern int allocate_dmalli_space(struct device *dev, unsigned long *ppheadlli, ++ unsigned int page_num); ++#endif /* CONFIG_EDMAC */ ++ ++ ++/* structure for LLI */ ++typedef struct dmac_lli { ++ /* must be 64Byte aligned */ ++ unsigned long next_lli; ++ unsigned int reserved[5]; ++ unsigned int count; ++ unsigned long src_addr; ++ unsigned long dest_addr; ++ unsigned int config; ++ unsigned int pad[51]; ++} dmac_lli; ++#endif +diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h +index 05ca3ce16..1eb7eae58 100644 +--- a/include/linux/fsverity.h ++++ b/include/linux/fsverity.h +@@ -138,8 +138,6 @@ static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) + /* enable.c */ + + int fsverity_ioctl_enable(struct file *filp, const void __user *arg); +-int fsverity_enable_with_descriptor(struct file *filp, +- void *desc, size_t desc_size); + + /* measure.c */ + +@@ -175,7 +173,6 @@ int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg); + bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset); + void fsverity_verify_bio(struct bio *bio); + void fsverity_enqueue_verify_work(struct work_struct *work); +-u64 fsverity_get_verified_data_size(const struct inode *inode); + + #else /* !CONFIG_FS_VERITY */ + +@@ -192,12 +189,6 @@ static inline int fsverity_ioctl_enable(struct file *filp, + return -EOPNOTSUPP; + } + +-static inline int fsverity_enable_with_descriptor(struct file *filp, +- void *desc, size_t desc_size) +-{ +- return -EOPNOTSUPP; +-} +- + /* measure.c */ + + static inline int fsverity_ioctl_measure(struct file *filp, void __user *arg) +@@ -260,36 +251,8 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work) + WARN_ON_ONCE(1); + } + +-static inline u64 fsverity_get_verified_data_size(const struct inode *inode) +-{ +- WARN_ON(1); +- return inode->i_size; +-} +- + #endif /* !CONFIG_FS_VERITY */ + +-#ifdef CONFIG_SECURITY_CODE_SIGN +- +-/* enable.c */ +- +-int fsverity_ioctl_enable_code_sign(struct file *filp, const void __user *uarg); +- +-int fsverity_get_cert_type(const struct inode *inode); +- +-#else /* !CONFIG_SECURITY_CODE_SIGN */ +- +-static inline int fsverity_ioctl_enable_code_sign(struct file *filp, const void __user *uarg) +-{ +- return -EOPNOTSUPP; +-} +- +-static inline int fsverity_get_cert_type(const struct inode *inode) +-{ +- return 0; +-} +- +-#endif /* !CONFIG_SECURITY_CODE_SIGN */ +- + static inline bool fsverity_verify_folio(struct folio *folio) + { + return fsverity_verify_blocks(folio, folio_size(folio), 0); +diff --git a/include/linux/gfp.h b/include/linux/gfp.h +index 747014ff8..a0803ed4b 100644 +--- a/include/linux/gfp.h ++++ b/include/linux/gfp.h +@@ -15,24 +15,18 @@ struct vm_area_struct; + + static inline int gfp_migratetype(const gfp_t gfp_flags) + { +- unsigned int ret_mt = 0; +- + VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); + BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); + BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); ++ BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE); ++ BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >> ++ GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC); + + if (unlikely(page_group_by_mobility_disabled)) + return MIGRATE_UNMOVABLE; + + /* Group based on mobility */ +- ret_mt = (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; +- +-#ifdef CONFIG_CMA_REUSE +- if (ret_mt == MIGRATE_MOVABLE && (gfp_flags & __GFP_CMA)) +- return MIGRATE_CMA; +-#endif +- +- return ret_mt; ++ return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + } + #undef GFP_MOVABLE_MASK + #undef GFP_MOVABLE_SHIFT +diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h +index 10234b767..dfde1e1e3 100644 +--- a/include/linux/gfp_types.h ++++ b/include/linux/gfp_types.h +@@ -55,9 +55,8 @@ typedef unsigned int __bitwise gfp_t; + #define ___GFP_SKIP_ZERO 0 + #define ___GFP_SKIP_KASAN 0 + #endif +-#define ___GFP_CMA 0x4000000u + #ifdef CONFIG_LOCKDEP +-#define ___GFP_NOLOCKDEP 0x8000000u ++#define ___GFP_NOLOCKDEP 0x4000000u + #else + #define ___GFP_NOLOCKDEP 0 + #endif +@@ -75,7 +74,6 @@ typedef unsigned int __bitwise gfp_t; + #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) + #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ + #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) +-#define __GFP_CMA ((__force gfp_t)___GFP_CMA) + + /** + * DOC: Page mobility and placement hints +@@ -253,7 +251,7 @@ typedef unsigned int __bitwise gfp_t; + #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) + + /* Room for N __GFP_FOO bits */ +-#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) ++#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) + #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) + + /** +diff --git a/include/linux/hck/lite_hck_ced.h b/include/linux/hck/lite_hck_ced.h +deleted file mode 100644 +index 9d1ffb7cc..000000000 +--- a/include/linux/hck/lite_hck_ced.h ++++ /dev/null +@@ -1,50 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-or-later */ +-/* +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#ifndef _LITE_HCK_CED_H +-#define _LITE_HCK_CED_H +- +-#include +-#include +-#include +- +-#ifndef CONFIG_HCK +-#undef CALL_HCK_LITE_HOOK +-#define CALL_HCK_LITE_HOOK(name, args...) +-#undef REGISTER_HCK_LITE_HOOK +-#define REGISTER_HCK_LITE_HOOK(name, probe) +-#undef REGISTER_HCK_LITE_DATA_HOOK +-#define REGISTER_HCK_LITE_DATA_HOOK(name, probe, data) +-#else +-DECLARE_HCK_LITE_HOOK(ced_setattr_insert_lhck, +- TP_PROTO(struct task_struct *task), +- TP_ARGS(task)); +- +-DECLARE_HCK_LITE_HOOK(ced_switch_task_namespaces_lhck, +- TP_PROTO(const struct nsproxy *new), +- TP_ARGS(new)); +- +-DECLARE_HCK_LITE_HOOK(ced_detection_lhck, +- TP_PROTO(struct task_struct *task), +- TP_ARGS(task)); +- +-DECLARE_HCK_LITE_HOOK(ced_exit_lhck, +- TP_PROTO(struct task_struct *task), +- TP_ARGS(task)); +- +-DECLARE_HCK_LITE_HOOK(ced_kernel_clone_lhck, +- TP_PROTO(struct task_struct *task), +- TP_ARGS(task)); +- +-DECLARE_HCK_LITE_HOOK(ced_commit_creds_lhck, +- TP_PROTO(const struct cred *new), +- TP_ARGS(new)); +- +-DECLARE_HCK_LITE_HOOK(ced_switch_task_namespaces_permission_lhck, +- TP_PROTO(const struct nsproxy *new, int *ret), +- TP_ARGS(new, ret)); +-#endif /* CONFIG_HCK */ +- +-#endif /* _LITE_HCK_CED_H */ +diff --git a/include/linux/hck/lite_hck_code_sign.h b/include/linux/hck/lite_hck_code_sign.h +deleted file mode 100644 +index 83bcddaf0..000000000 +--- a/include/linux/hck/lite_hck_code_sign.h ++++ /dev/null +@@ -1,39 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-or-later */ +-/* +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#ifndef LITE_HCK_CODE_SIGN_H +-#define LITE_HCK_CODE_SIGN_H +- +-#include +-#include +- +-#ifndef CONFIG_HCK +- +-#define CALL_HCK_LITE_HOOK(name, args...) +-#define REGISTER_HCK_LITE_HOOK(name, probe) +-#define REGISTER_HCK_LITE_DATA_HOOK(name, probe, data) +- +-#else +- +-DECLARE_HCK_LITE_HOOK(code_sign_verify_certchain_lhck, +- TP_PROTO(const void *raw_pkcs7, size_t pkcs7_len, struct fsverity_info *vi, +- int *ret), +- TP_ARGS(raw_pkcs7, pkcs7_len, vi, ret)); +- +-DECLARE_HCK_LITE_HOOK(code_sign_check_descriptor_lhck, +- TP_PROTO(const struct inode *inode, const void *desc, int *ret), +- TP_ARGS(inode, desc, ret)); +- +-DECLARE_HCK_LITE_HOOK(code_sign_before_measurement_lhck, +- TP_PROTO(void *desc, int *ret), +- TP_ARGS(desc, ret)); +- +-DECLARE_HCK_LITE_HOOK(code_sign_after_measurement_lhck, +- TP_PROTO(void *desc, int version), +- TP_ARGS(desc, version)); +- +-#endif /* CONFIG_HCK */ +- +-#endif /* LITE_HCK_CODE_SIGN_H */ +diff --git a/include/linux/hck/lite_hck_hideaddr.h b/include/linux/hck/lite_hck_hideaddr.h +deleted file mode 100644 +index e7dbf9695..000000000 +--- a/include/linux/hck/lite_hck_hideaddr.h ++++ /dev/null +@@ -1,25 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-or-later */ +-/* +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#ifndef _LITE_HCK_HIDEADDR_H +-#define _LITE_HCK_HIDEADDR_H +- +-#include "linux/seq_file.h" +-#include "linux/mm_types.h" +-#include +- +-#ifndef CONFIG_HCK +-#define CALL_HCK_LITE_HOOK(name, args...) +-#define REGISTER_HCK_LITE_HOOK(name, probe) +-#define REGISTER_HCK_LITE_DATA_HOOK(name, probe, data) +-#else +- +- +-DECLARE_HCK_LITE_HOOK(hideaddr_header_prefix_lhck, +- TP_PROTO(unsigned long *start, unsigned long *end, vm_flags_t *flags, struct seq_file *m, struct vm_area_struct *vma), +- TP_ARGS(start, end, flags, m, vma)); +- +-#endif /* CONFIG_HCK */ +-#endif /* _LITE_HCK_HIDEADDR_H */ +diff --git a/include/linux/hck/lite_hck_inet.h b/include/linux/hck/lite_hck_inet.h +deleted file mode 100644 +index 5dd1ecd83..000000000 +--- a/include/linux/hck/lite_hck_inet.h ++++ /dev/null +@@ -1,26 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-or-later */ +-/* +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#ifndef LITE_HCK_INET_H +-#define LITE_HCK_INET_H +- +-#include +- +-#ifndef CONFIG_HCK +-#undef CALL_HCK_LITE_HOOK +-#define CALL_HCK_LITE_HOOK(name, args...) +-#undef REGISTER_HCK_LITE_HOOK +-#define REGISTER_HCK_LITE_HOOK(name, probe) +-#undef REGISTER_HCK_LITE_DATA_HOOK +-#define REGISTER_HCK_LITE_DATA_HOOK(name, probe, data) +-#else +- +-DECLARE_HCK_LITE_HOOK(nip_ninet_ehashfn_lhck, +- TP_PROTO(const struct sock *sk, u32 *ret), +- TP_ARGS(sk, ret)); +- +-#endif /* CONFIG_HCK */ +- +-#endif /* LITE_HCK_INET_H */ +diff --git a/include/linux/hck/lite_hck_jit_memory.h b/include/linux/hck/lite_hck_jit_memory.h +deleted file mode 100644 +index dbce24a43..000000000 +--- a/include/linux/hck/lite_hck_jit_memory.h ++++ /dev/null +@@ -1,41 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-or-later */ +-/* +-* Copyright (c) 2023 Huawei Device Co., Ltd. +-*/ +- +-#ifndef LITE_HCK_JIT_MEMORY_H +-#define LITE_HCK_JIT_MEMORY_H +- +-#include +-#include +- +-#ifndef CONFIG_HCK +-#undef CALL_HCK_LITE_HOOK +-#define CALL_HCK_LITE_HOOK(name, args...) +-#undef REGISTER_HCK_LITE_HOOK +-#define REGISTER_HCK_LITE_HOOK(name, probe) +-#undef REGISTER_HCK_LITE_DATA_HOOK +-#define REGISTER_HCK_LITE_DATA_HOOK(name, probe, data) +- +-#else +- +-DECLARE_HCK_LITE_HOOK(find_jit_memory_lhck, +- TP_PROTO(struct task_struct *task, unsigned long start, unsigned long size, int *err), +- TP_ARGS(task, start, size, err)); +- +-DECLARE_HCK_LITE_HOOK(check_jit_memory_lhck, +- TP_PROTO(struct task_struct *task, unsigned long cookie, unsigned long prot, +- unsigned long flag, unsigned long size, unsigned long *err), +- TP_ARGS(task, cookie, prot, flag, size, err)); +- +-DECLARE_HCK_LITE_HOOK(delete_jit_memory_lhck, +- TP_PROTO(struct task_struct *task, unsigned long start, unsigned long size, int *err), +- TP_ARGS(task, start, size, err)); +- +-DECLARE_HCK_LITE_HOOK(exit_jit_memory_lhck, +- TP_PROTO(struct task_struct *task), +- TP_ARGS(task)); +- +-#endif /* CONFIG_HCK */ +- +-#endif /* LITE_HCK_JIT_MEMORY_H */ +diff --git a/include/linux/hck/lite_hck_sample.h b/include/linux/hck/lite_hck_sample.h +deleted file mode 100644 +index f29dec41a..000000000 +--- a/include/linux/hck/lite_hck_sample.h ++++ /dev/null +@@ -1,36 +0,0 @@ +-//SPDX-License-Identifier: GPL-2.0-only +-/*lite_hck_sample.h +- * +- *OpenHarmony Common Kernel Vendor Hook Smaple +- * +- */ +- +-#ifndef LITE_HCK_SAMPLE_H +-#define LITE_HCK_SAMPLE_H +- +-#include +- +- +-struct sample_hck_data { +- int stat; +- char* name; +-}; +- +-/* +- * Follwing tracepoints are not exported in trace and provide a +- * mechanism for vendor modules to hok and extend functionality +- */ +-#ifndef CONFIG_HCK +- +-#define CALL_HCK_LITE_HOOK(name, args...) +-#define REGISTER_HCK_LITE_HOOK(name, probe) +-#define REGISTER_HCK_LITE_DATA_HOOK(name, probe, data) +- +-#else +- +-DECLARE_HCK_LITE_HOOK(get_boot_config_lhck, TP_PROTO(int* s), TP_ARGS(s)); +-DECLARE_HCK_LITE_HOOK(set_boot_stat_lhck, TP_PROTO(int m), TP_ARGS(m)); +- +-#endif +- +-#endif /* LITE_HCK_SAMPLE_H */ +diff --git a/include/linux/hck/lite_hck_xpm.h b/include/linux/hck/lite_hck_xpm.h +deleted file mode 100644 +index 0ec0063d3..000000000 +--- a/include/linux/hck/lite_hck_xpm.h ++++ /dev/null +@@ -1,55 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-or-later */ +-/* +- * Copyright (c) 2023 Huawei Device Co., Ltd. +- */ +- +-#ifndef _LITE_HCK_XPM_H +-#define _LITE_HCK_XPM_H +- +-#include +-#include +-#include +- +-#ifndef CONFIG_HCK +-#undef CALL_HCK_LITE_HOOK +-#define CALL_HCK_LITE_HOOK(name, args...) +-#undef REGISTER_HCK_LITE_HOOK +-#define REGISTER_HCK_LITE_HOOK(name, probe) +-#undef REGISTER_HCK_LITE_DATA_HOOK +-#define REGISTER_HCK_LITE_DATA_HOOK(name, probe, data) +-#else +-DECLARE_HCK_LITE_HOOK(xpm_delete_cache_node_lhck, +- TP_PROTO(struct inode *file_node), +- TP_ARGS(file_node)); +- +-DECLARE_HCK_LITE_HOOK(xpm_region_outer_lhck, +- TP_PROTO(unsigned long addr_start, unsigned long addr_end, +- unsigned long flags, bool *ret), +- TP_ARGS(addr_start, addr_end, flags, ret)); +- +-DECLARE_HCK_LITE_HOOK(xpm_get_unmapped_area_lhck, +- TP_PROTO(unsigned long addr, unsigned long len, unsigned long map_flags, +- unsigned long unmapped_flags, unsigned long *ret), +- TP_ARGS(addr, len, map_flags, unmapped_flags, ret)); +- +-DECLARE_HCK_LITE_HOOK(xpm_integrity_equal_lhck, +- TP_PROTO(struct page *page, struct page *kpage, bool *ret), +- TP_ARGS(page, kpage, ret)); +- +-DECLARE_HCK_LITE_HOOK(xpm_integrity_check_lhck, +- TP_PROTO(struct vm_area_struct *vma, unsigned int vflags, +- unsigned long addr, struct page *page, vm_fault_t *ret), +- TP_ARGS(vma, vflags, addr, page, ret)); +- +-DECLARE_HCK_LITE_HOOK(xpm_integrity_validate_lhck, +- TP_PROTO(struct vm_area_struct *vma, unsigned int vflags, +- unsigned long addr, struct page *page, vm_fault_t *ret), +- TP_ARGS(vma, vflags, addr, page, ret)); +- +-DECLARE_HCK_LITE_HOOK(xpm_integrity_update_lhck, +- TP_PROTO(struct vm_area_struct *vma, unsigned int vflags, +- struct page *page), +- TP_ARGS(vma, vflags, page)); +-#endif /* CONFIG_HCK */ +- +-#endif /* _LITE_HCK_XPM_H */ +diff --git a/include/linux/hck/lite_vendor_hooks.h b/include/linux/hck/lite_vendor_hooks.h +deleted file mode 100644 +index 4b0f30f6c..000000000 +--- a/include/linux/hck/lite_vendor_hooks.h ++++ /dev/null +@@ -1,126 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. +- * OpenHarmony Common Kernel Vendor Hook Support +- * Based on include/trace/hooks/lite_vendor_hooks.h +- * +- */ +- +-#ifndef LITE_VENDOR_HOOK_H +-#define LITE_VENDOR_HOOK_H +- +-#include +-#include +-#include +-#include +-#include +-#include +- +-struct __lvh_func { +- void *func; +- void *data; +- bool has_data; +-}; +- +-struct lite_vendor_hook { +- struct mutex mutex; +- struct __lvh_func *funcs; +-}; +-#endif // LITE_VENDOR_HOOK_H +- +-#ifdef CREATE_LITE_VENDOR_HOOK +- +-#define DEFINE_HCK_LITE_HOOK(name, proto, args) \ +- struct lite_vendor_hook __lvh_##name __used \ +- __section("__vendor_hooks") = { \ +- .mutex = __MUTEX_INITIALIZER(__lvh_##name.mutex), \ +- .funcs = NULL }; \ +- EXPORT_SYMBOL(__lvh_##name); \ +- void lvh_probe_##name(proto) { return; } \ +- void lvh_probe_data_##name(void *lvh_data, proto) { return; } +- +-#undef DECLARE_HCK_LITE_HOOK +-#define DECLARE_HCK_LITE_HOOK(name, proto, args) \ +- DEFINE_HCK_LITE_HOOK(name, PARAMS(proto), PARAMS(args)) +- +-#else // #ifndef CREATE_LITE_VENDOR_HOOK +- +-#define REGISTER_HCK_LITE_HOOK(name, probe) \ +- extern typeof(lvh_probe_##name) (probe); \ +- do { \ +- if (register_lvh_##name(probe)) \ +- WARN_ONCE(1, "LVH register failed!\n"); \ +- } while (0) +- +-#define REGISTER_HCK_LITE_DATA_HOOK(name, probe, data) \ +- extern typeof(lvh_probe_data_##name) (probe); \ +- do { \ +- if (register_lvh_data_##name(probe, data)) \ +- WARN_ONCE(1, "LVH register failed!\n"); \ +- } while (0) +- +-#define CALL_HCK_LITE_HOOK(name, args...) \ +- call_lvh_##name(args) +- +-#define __DECLARE_HCK_LITE_HOOK(name, proto, args) \ +- extern struct lite_vendor_hook __lvh_##name; \ +- extern void lvh_probe_##name(proto); \ +- extern void lvh_probe_data_##name(void *lvh_data, proto); \ +- static inline void \ +- call_lvh_##name(proto) \ +- { \ +- struct __lvh_func *funcs = (&__lvh_##name)->funcs; \ +- if (funcs && funcs->func) { \ +- if (funcs->has_data) \ +- ((void(*)(void *, proto))funcs->func)(funcs->data, args); \ +- else \ +- ((void(*)(proto))funcs->func)(args); \ +- } \ +- } \ +- static inline int \ +- __register_lvh_##name(void *probe, void *data, bool has_data) \ +- { \ +- int err = 0; \ +- struct __lvh_func *funcs; \ +- struct module *mod; \ +- mutex_lock(&__lvh_##name.mutex); \ +- funcs = (&__lvh_##name)->funcs; \ +- if (funcs) { \ +- if (funcs->func != probe || funcs->data != data) \ +- err = -EBUSY; \ +- goto out; \ +- } \ +- \ +- funcs = (struct __lvh_func*)kmalloc(sizeof(struct __lvh_func), GFP_KERNEL); \ +- if (!funcs) { \ +- err = -ENOMEM; \ +- goto out; \ +- } \ +- \ +- funcs->func = probe; \ +- funcs->data = data; \ +- funcs->has_data = has_data; \ +- mod = __module_address((uintptr_t)probe); \ +- if (mod) \ +- (void)try_module_get(mod); \ +- (&__lvh_##name)->funcs = funcs; \ +- out: \ +- mutex_unlock(&__lvh_##name.mutex); \ +- return err; \ +- } \ +- static inline int \ +- register_lvh_##name(void (*probe)(proto)) \ +- { \ +- return __register_lvh_##name((void *)probe, NULL, false); \ +- } \ +- static inline int \ +- register_lvh_data_##name(void (*probe)(void *lvh_data, proto), void *data) \ +- { \ +- return __register_lvh_##name((void *)probe, data, true); \ +- } +- +-#undef DECLARE_HCK_LITE_HOOK +-#define DECLARE_HCK_LITE_HOOK(name, proto, args) \ +- __DECLARE_HCK_LITE_HOOK(name, PARAMS(proto), PARAMS(args)) +- +-#endif // CREATE_LITE_VENDOR_HOOK +diff --git a/include/linux/highmem.h b/include/linux/highmem.h +index ccef475c0..75607d4ba 100644 +--- a/include/linux/highmem.h ++++ b/include/linux/highmem.h +@@ -226,7 +226,7 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, + { + struct folio *folio; + +- folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_CMA, 0, vma, vaddr, false); ++ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false); + if (folio) + clear_user_highpage(&folio->page, vaddr); + +diff --git a/include/linux/i2c.h b/include/linux/i2c.h +index a3166100f..9f5ae5f0a 100644 +--- a/include/linux/i2c.h ++++ b/include/linux/i2c.h +@@ -23,6 +23,10 @@ + #include /* for swab16 */ + #include + ++#ifdef CONFIG_ARCH_BSP ++#include ++#endif ++ + extern struct bus_type i2c_bus_type; + extern struct device_type i2c_adapter_type; + extern struct device_type i2c_client_type; +diff --git a/include/linux/iommu.h b/include/linux/iommu.h +index b6ef263e8..0f9959026 100644 +--- a/include/linux/iommu.h ++++ b/include/linux/iommu.h +@@ -346,6 +346,10 @@ struct iommu_domain_ops { + struct iommu_iotlb_gather *iotlb_gather); + + void (*flush_iotlb_all)(struct iommu_domain *domain); ++#if defined(CONFIG_VENDOR_NPU) ++ void (*inv_iotlb_range)(struct iommu_domain *domain, ++ struct mm_struct *mm, unsigned long start, unsigned long end); ++#endif + void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, + size_t size); + void (*iotlb_sync)(struct iommu_domain *domain, +diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h +index 04340d3d2..2923754c1 100644 +--- a/include/linux/lsm_hook_defs.h ++++ b/include/linux/lsm_hook_defs.h +@@ -176,9 +176,6 @@ LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd, + LSM_HOOK(int, 0, mmap_addr, unsigned long addr) + LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) +-#ifdef CONFIG_SECURITY_XPM +-LSM_HOOK(int, 0, mmap_region, struct vm_area_struct *vma) +-#endif + LSM_HOOK(int, 0, file_mprotect, struct vm_area_struct *vma, + unsigned long reqprot, unsigned long prot) + LSM_HOOK(int, 0, file_lock, struct file *file, unsigned int cmd) +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h +index 7d025de30..b1fdb1554 100644 +--- a/include/linux/memcontrol.h ++++ b/include/linux/memcontrol.h +@@ -21,8 +21,6 @@ + #include + #include + #include +-#include +-#include + + struct mem_cgroup; + struct obj_cgroup; +@@ -60,11 +58,6 @@ struct mem_cgroup_reclaim_cookie { + unsigned int generation; + }; + +-static inline bool is_prot_page(struct page *page) +-{ +- return false; +-} +- + #ifdef CONFIG_MEMCG + + #define MEM_CGROUP_ID_SHIFT 16 +@@ -304,13 +297,6 @@ struct mem_cgroup { + bool tcpmem_active; + int tcpmem_pressure; + +-#ifdef CONFIG_HYPERHOLD_MEMCG +- struct list_head score_node; +-#define MEM_CGROUP_NAME_MAX_LEN 100 +- char name[MEM_CGROUP_NAME_MAX_LEN]; +- struct memcg_reclaim memcg_reclaimed; +-#endif +- + #ifdef CONFIG_MEMCG_KMEM + int kmemcg_id; + struct obj_cgroup __rcu *objcg; +@@ -729,12 +715,6 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) + + void mem_cgroup_migrate(struct folio *old, struct folio *new); + +-static inline struct mem_cgroup_per_node *mem_cgroup_nodeinfo(struct mem_cgroup *memcg, +- int nid) +-{ +- return memcg->nodeinfo[nid]; +-} +- + /** + * mem_cgroup_lruvec - get the lru list vector for a memcg & node + * @memcg: memcg of the wanted lruvec +@@ -854,10 +834,6 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) + { + if (mem_cgroup_disabled()) + return 0; +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (!memcg) +- return -1; +-#endif + + return memcg->id.id; + } +@@ -884,11 +860,6 @@ static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) + if (mem_cgroup_disabled()) + return NULL; + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (is_node_lruvec(lruvec)) +- return NULL; +-#endif +- + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + return mz->memcg; + } +@@ -1041,10 +1012,6 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec, + if (mem_cgroup_disabled()) + return node_page_state(lruvec_pgdat(lruvec), idx); + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (is_node_lruvec(lruvec)) +- return node_page_state(lruvec_pgdat(lruvec), idx); +-#endif + pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + x = READ_ONCE(pn->lruvec_stats.state[idx]); + #ifdef CONFIG_SMP +@@ -1063,11 +1030,6 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, + if (mem_cgroup_disabled()) + return node_page_state(lruvec_pgdat(lruvec), idx); + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (is_node_lruvec(lruvec)) +- return node_page_state(lruvec_pgdat(lruvec), idx); +-#endif +- + pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + x = READ_ONCE(pn->lruvec_stats.state_local[idx]); + #ifdef CONFIG_SMP +@@ -1104,17 +1066,6 @@ static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, + local_irq_restore(flags); + } + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +-static __always_inline bool is_file_page(struct page *page) +-{ +- if (!PageUnevictable(page) && !PageSwapBacked(page) && page_mapping(page)) +- return true; +- +- return false; +- +-} +-#endif +- + void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, + unsigned long count); + +diff --git a/include/linux/mfd/bsp_fmc.h b/include/linux/mfd/bsp_fmc.h +new file mode 100644 +index 000000000..e86404386 +--- /dev/null ++++ b/include/linux/mfd/bsp_fmc.h +@@ -0,0 +1,479 @@ ++/* ++ * Header file for Vendor Flash Memory Controller Driver ++ * ++ * Copyright (c) 2016 Shenshu Technologies Co., Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#ifndef __BSP_FMC_H_ ++#define __BSP_FMC_H_ ++ ++#include ++#include ++#include ++#include ++ ++#define _512B (512) ++#define _1K (1024) ++#define _2K (2048) ++#define _4K (4096) ++#define _8K (8192) ++#define _16K (16384) ++#define _32K (32768) ++#define _64K (0x10000UL) ++#define _128K (0x20000UL) ++#define _256K (0x40000UL) ++#define _512K (0x80000UL) ++#define _1M (0x100000UL) ++#define _2M (0x200000UL) ++#define _4M (0x400000UL) ++#define _8M (0x800000UL) ++#define _16M (0x1000000UL) ++#define _32M (0x2000000UL) ++#define _64M (0x4000000UL) ++#define _128M (0x8000000UL) ++#define _256M (0x10000000UL) ++#define _512M (0x20000000UL) ++#define _1G (0x40000000ULL) ++#define _2G (0x80000000ULL) ++#define _4G (0x100000000ULL) ++#define _8G (0x200000000ULL) ++#define _16G (0x400000000ULL) ++#define _64G (0x1000000000ULL) ++ ++#define FMC_MEM_LEN _16M ++#define FMC_MAX_DMA_LEN _8K ++#define MAX_OOB_LEN _512B ++#define MAX_PAGE_SIZE _8K ++#define BUFF_LEN 128 ++ ++/* FMC REG MAP */ ++#define FMC_CFG 0x00 ++#define fmc_cfg_spi_nand_sel(_type) (((_size) & 0x3) << 11) ++#define SPI_NOR_ADDR_MODE BIT(10) ++#define FMC_CFG_OP_MODE_MASK BIT_MASK(0) ++#define FMC_CFG_OP_MODE_BOOT 0 ++#define FMC_CFG_OP_MODE_NORMAL 1 ++#define SPI_NOR_ADDR_MODE_3BYTES (0x0 << 10) ++#define SPI_NOR_ADDR_MODE_4BYTES (0x1 << 10) ++ ++#define fmc_cfg_block_size(_size) (((_size) & 0x3) << 8) ++#define fmc_cfg_ecc_type(_type) (((_type) & 0x7) << 5) ++#define fmc_cfg_page_size(_size) (((_size) & 0x3) << 3) ++#define fmc_cfg_flash_sel(_type) (((_type) & 0x3) << 1) ++#define fmc_cfg_op_mode(_mode) ((_mode) & 0x1) ++ ++#define SPI_NAND_MFR_OTHER 0x0 ++ ++#define SPI_NAND_SEL_SHIFT 11 ++#define SPI_NAND_SEL_MASK (0x3 << SPI_NAND_SEL_SHIFT) ++ ++#define SPI_NOR_ADDR_MODE_3_BYTES 0x0 ++#define SPI_NOR_ADDR_MODE_4_BYTES 0x1 ++ ++#define SPI_NOR_ADDR_MODE_SHIFT 10 ++#define SPI_NOR_ADDR_MODE_MASK (0x1 << SPI_NOR_ADDR_MODE_SHIFT) ++ ++#define BLOCK_SIZE_64_PAGE 0x0 ++#define BLOCK_SIZE_128_PAGE 0x1 ++#define BLOCK_SIZE_256_PAGE 0x2 ++#define BLOCK_SIZE_512_PAGE 0x3 ++ ++#define BLOCK_SIZE_MASK (0x3 << 8) ++ ++#define ECC_TYPE_0BIT 0x0 ++#define ECC_TYPE_8BIT 0x1 ++#define ECC_TYPE_16BIT 0x2 ++#define ECC_TYPE_24BIT 0x3 ++#define ECC_TYPE_28BIT 0x4 ++#define ECC_TYPE_40BIT 0x5 ++#define ECC_TYPE_64BIT 0x6 ++ ++#define ECC_TYPE_SHIFT 5 ++#define ECC_TYPE_MASK (0x7 << ECC_TYPE_SHIFT) ++ ++#define PAGE_SIZE_2KB 0x0 ++#define PAGE_SIZE_4KB 0x1 ++#define PAGE_SIZE_8KB 0x2 ++#define PAGE_SIZE_16KB 0x3 ++ ++#define PAGE_SIZE_SHIFT 3 ++#define PAGE_SIZE_MASK (0x3 << PAGE_SIZE_SHIFT) ++ ++#define FLASH_TYPE_SPI_NOR 0x0 ++#define FLASH_TYPE_SPI_NAND 0x1 ++#define FLASH_TYPE_NAND 0x2 ++#define FLASH_TYPE_UNKNOWN 0x3 ++ ++#define FLASH_TYPE_SEL_MASK (0x3 << 1) ++#define get_spi_flash_type(_reg) (((_reg) >> 1) & 0x3) ++ ++#define FMC_GLOBAL_CFG 0x04 ++#define FMC_GLOBAL_CFG_WP_ENABLE BIT(6) ++#define FMC_GLOBAL_CFG_RANDOMIZER_EN (1 << 2) ++#define FLASH_TYPE_SEL_MASK (0x3 << 1) ++#define fmc_cfg_flash_sel(_type) (((_type) & 0x3) << 1) ++ ++#define FMC_GLOBAL_CFG_DTR_MODE BIT(11) ++#define FMC_SPI_TIMING_CFG 0x08 ++#define timing_cfg_tcsh(nr) (((nr) & 0xf) << 8) ++#define timing_cfg_tcss(nr) (((nr) & 0xf) << 4) ++#define timing_cfg_tshsl(nr) ((nr) & 0xf) ++ ++#define CS_HOLD_TIME 0x6 ++#define CS_SETUP_TIME 0x6 ++#define CS_DESELECT_TIME 0xf ++ ++#define FMC_PND_PWIDTH_CFG 0x0c ++#define pwidth_cfg_rw_hcnt(_n) (((_n) & 0xf) << 8) ++#define pwidth_cfg_r_lcnt(_n) (((_n) & 0xf) << 4) ++#define pwidth_cfg_w_lcnt(_n) ((_n) & 0xf) ++ ++#define RW_H_WIDTH (0xa) ++#define R_L_WIDTH (0xa) ++#define W_L_WIDTH (0xa) ++ ++#define FMC_INT 0x18 ++#define FMC_INT_AHB_OP BIT(7) ++#define FMC_INT_WR_LOCK BIT(6) ++#define FMC_INT_DMA_ERR BIT(5) ++#define FMC_INT_ERR_ALARM BIT(4) ++#define FMC_INT_ERR_INVALID BIT(3) ++#define FMC_INT_ERR_INVALID_MASK (0x8) ++#define FMC_INT_ERR_VALID BIT(2) ++#define FMC_INT_ERR_VALID_MASK (0x4) ++#define FMC_INT_OP_FAIL BIT(1) ++#define FMC_INT_OP_DONE BIT(0) ++ ++#define FMC_INT_EN 0x1c ++#define FMC_INT_EN_AHB_OP BIT(7) ++#define FMC_INT_EN_WR_LOCK BIT(6) ++#define FMC_INT_EN_DMA_ERR BIT(5) ++#define FMC_INT_EN_ERR_ALARM BIT(4) ++#define FMC_INT_EN_ERR_INVALID BIT(3) ++#define FMC_INT_EN_ERR_VALID BIT(2) ++#define FMC_INT_EN_OP_FAIL BIT(1) ++#define FMC_INT_EN_OP_DONE BIT(0) ++ ++#define FMC_INT_CLR 0x20 ++#define FMC_INT_CLR_AHB_OP BIT(7) ++#define FMC_INT_CLR_WR_LOCK BIT(6) ++#define FMC_INT_CLR_DMA_ERR BIT(5) ++#define FMC_INT_CLR_ERR_ALARM BIT(4) ++#define FMC_INT_CLR_ERR_INVALID BIT(3) ++#define FMC_INT_CLR_ERR_VALID BIT(2) ++#define FMC_INT_CLR_OP_FAIL BIT(1) ++#define FMC_INT_CLR_OP_DONE BIT(0) ++ ++#define FMC_INT_CLR_ALL 0xff ++ ++#define FMC_CMD 0x24 ++#define fmc_cmd_cmd2(_cmd) (((_cmd) & 0xff) << 8) ++#define fmc_cmd_cmd1(_cmd) ((_cmd) & 0xff) ++ ++#define FMC_ADDRH 0x28 ++#define fmc_addrh_set(_addr) ((_addr) & 0xff) ++ ++#define FMC_ADDRL 0x2c ++#define fmc_addrl_block_mask(_page) ((_page) & 0xffffffc0) ++#define fmc_addrl_block_h_mask(_page) (((_page) & 0xffff) << 16) ++#define fmc_addrl_block_l_mask(_page) ((_page) & 0xffc0) ++ ++#define READ_ID_ADDR 0x00 ++#define PROTECT_ADDR 0xa0 ++#define FEATURE_ADDR 0xb0 ++#define STATUS_ADDR 0xc0 ++#define FMC_OP_CFG 0x30 ++#define op_cfg_fm_cs(_cs) ((_cs) << 11) ++#define op_cfg_force_cs_en(_en) ((_en) << 10) ++#define op_cfg_mem_if_type(_type) (((_type) & 0x7) << 7) ++#define op_cfg_addr_num(_addr) (((_addr) & 0x7) << 4) ++#define op_cfg_dummy_num(_dummy) ((_dummy) & 0xf) ++#define OP_CFG_OEN_EN (0x1 << 13) ++ ++#define IF_TYPE_SHIFT 7 ++#define IF_TYPE_MASK (0x7 << IF_TYPE_SHIFT) ++ ++#define READ_ID_ADDR_NUM 1 ++#define FEATURES_OP_ADDR_NUM 1 ++#define STD_OP_ADDR_NUM 3 ++ ++#define FMC_SPI_OP_ADDR 0x34 ++ ++#define FMC_DATA_NUM 0x38 ++#define fmc_data_num_cnt(_n) ((_n) & 0x3fff) ++ ++#define SPI_NOR_SR_LEN 1 /* Status Register length */ ++#define SPI_NOR_CR_LEN 1 /* Config Register length */ ++#define FEATURES_DATA_LEN 1 ++#define READ_OOB_BB_LEN 1 ++ ++#define PROTECT_BRWD_MASK BIT(7) ++#define PROTECT_BP3_MASK BIT(6) ++#define PROTECT_BP2_MASK BIT(5) ++#define PROTECT_BP1_MASK BIT(4) ++#define PROTECT_BP0_MASK BIT(3) ++ ++#define any_bp_enable(_val) (((PROTECT_BP3_MASK & (unsigned int)(_val)) != 0) || \ ++ ((PROTECT_BP2_MASK & (unsigned int)(_val)) != 0) || \ ++ ((PROTECT_BP1_MASK & (unsigned int)(_val)) != 0) || \ ++ ((PROTECT_BP0_MASK & (unsigned int)(_val)) != 0)) ++ ++#define ALL_BP_MASK (PROTECT_BP3_MASK | PROTECT_BP2_MASK | \ ++ PROTECT_BP1_MASK | PROTECT_BP0_MASK) ++ ++#define FEATURE_ECC_ENABLE (1 << 4) ++#define FEATURE_QE_ENABLE (1 << 0) ++ ++#define FMC_OP 0x3c ++#define FMC_OP_DUMMY_EN BIT(8) ++#define FMC_OP_CMD1_EN BIT(7) ++#define FMC_OP_ADDR_EN BIT(6) ++#define FMC_OP_WRITE_DATA_EN BIT(5) ++#define FMC_OP_CMD2_EN BIT(4) ++#define FMC_OP_WAIT_READY_EN BIT(3) ++#define FMC_OP_READ_DATA_EN BIT(2) ++#define FMC_OP_READ_STATUS_EN BIT(1) ++#define FMC_OP_REG_OP_START BIT(0) ++ ++#define FMC_OP_DMA 0x68 ++#define FMC_DMA_LEN 0x40 ++#define fmc_dma_len_set(_len) ((_len) & 0x0fffffff) ++ ++#define FMC_DMA_AHB_CTRL 0x48 ++#define FMC_DMA_AHB_CTRL_DMA_PP_EN BIT(3) ++#define FMC_DMA_AHB_CTRL_BURST16_EN BIT(2) ++#define FMC_DMA_AHB_CTRL_BURST8_EN BIT(1) ++#define FMC_DMA_AHB_CTRL_BURST4_EN BIT(0) ++ ++#define ALL_BURST_ENABLE (FMC_DMA_AHB_CTRL_BURST16_EN | \ ++ FMC_DMA_AHB_CTRL_BURST8_EN | \ ++ FMC_DMA_AHB_CTRL_BURST4_EN) ++ ++#define FMC_DMA_ADDR_OFFSET 4096 ++ ++#define FMC_DMA_SADDR_D0 0x4c ++ ++#define FMC_DMA_SADDR_D1 0x50 ++ ++#define FMC_DMA_SADDR_D2 0x54 ++ ++#define FMC_DMA_SADDR_D3 0x58 ++ ++#define FMC_DMA_SADDR_OOB 0x5c ++ ++#ifdef CONFIG_64BIT ++#define FMC_DMA_BIT_SHIFT_LENTH 32 ++#define FMC_DMA_SADDRH_D0 0x200 ++#define FMC_DMA_SADDRH_SHIFT 0x3LL ++#define FMC_DMA_SADDRH_MASK (FMC_DMA_SADDRH_SHIFT << FMC_DMA_BIT_SHIFT_LENTH) ++ ++#define FMC_DMA_SADDRH_OOB 0x210 ++#endif ++ ++#define FMC_DMA_BLK_SADDR 0x60 ++#define fmc_dma_blk_saddr_set(_addr) ((_addr) & 0xffffff) ++ ++#define FMC_DMA_BLK_LEN 0x64 ++#define fmc_dma_blk_len_set(_len) ((_len) & 0xffff) ++ ++#define FMC_OP_CTRL 0x68 ++#define op_ctrl_rd_opcode(code) (((code) & 0xff) << 16) ++#define op_ctrl_wr_opcode(code) (((code) & 0xff) << 8) ++#define op_ctrl_rd_op_sel(_op) (((_op) & 0x3) << 4) ++#define op_ctrl_dma_op(_type) ((_type) << 2) ++#define op_ctrl_rw_op(op) ((op) << 1) ++#define OP_CTRL_DMA_OP_READY BIT(0) ++ ++#define RD_OP_READ_ALL_PAGE 0x0 ++#define RD_OP_READ_OOB 0x1 ++#define RD_OP_BLOCK_READ 0x2 ++ ++#define RD_OP_SHIFT 4 ++#define RD_OP_MASK (0x3 << RD_OP_SHIFT) ++ ++#define OP_TYPE_DMA 0x0 ++#define OP_TYPE_REG 0x1 ++ ++#define FMC_OP_READ 0x0 ++#define FMC_OP_WRITE 0x1 ++#define RW_OP_READ 0x0 ++#define RW_OP_WRITE 0x1 ++ ++#define FMC_OP_PARA 0x70 ++#define FMC_OP_PARA_RD_OOB_ONLY BIT(1) ++ ++#define FMC_BOOT_SET 0x74 ++#define FMC_BOOT_SET_DEVICE_ECC_EN BIT(3) ++#define FMC_BOOT_SET_BOOT_QUAD_EN BIT(1) ++ ++#define FMC_STATUS 0xac ++ ++#ifndef FMC_VERSION ++#define FMC_VERSION 0xbc ++#endif ++ ++/* fmc IP version */ ++#ifndef FMC_VER_100 ++#define FMC_VER_100 (0x100) ++#endif ++ ++/* DMA address align with 32 bytes. */ ++#define FMC_DMA_ALIGN 32 ++ ++#define FMC_CHIP_DELAY 25 ++#define FMC_ECC_ERR_NUM0_BUF0 0xc0 ++#define get_ecc_err_num(_i, _reg) (((_reg) >> ((_i) * 8)) & 0xff) ++ ++#define DISABLE 0 ++#define ENABLE 1 ++ ++#define FMC_REG_ADDRESS_LEN 0x200 ++ ++#define FMC_MAX_READY_WAIT_JIFFIES (HZ) ++ ++#define MAX_SPI_NOR_ID_LEN 8 ++#define MAX_NAND_ID_LEN 8 ++#define MAX_SPI_NAND_ID_LEN 3 ++ ++#define GET_OP 0 ++#define SET_OP 1 ++ ++#define STATUS_ECC_MASK (0x3 << 4) ++#define STATUS_P_FAIL_MASK (1 << 3) ++#define STATUS_E_FAIL_MASK (1 << 2) ++#define STATUS_WEL_MASK (1 << 1) ++#define STATUS_OIP_MASK (1 << 0) ++ ++#define FMC_VERSION 0xbc ++ ++/* fmc IP version */ ++#define FMC_VER_100 (0x100) ++ ++#define CONFIG_SPI_NAND_MAX_CHIP_NUM (1) ++ ++#define CONFIG_FMC100_MAX_NAND_CHIP (1) ++ ++#define get_page_index(host) \ ++ (((host)->addr_value[0] >> 16) | ((host)->addr_value[1] << 16)) ++#define FMC_MAX_CHIP_NUM 2 ++ ++extern unsigned char fmc_cs_user[]; ++ ++#define fmc_readl(_host, _reg) \ ++ (readl((char *)(_host)->regbase + (_reg))) ++ ++#define fmc_readb( _addr) \ ++ (readb((void __iomem *)(_addr))) ++ ++#define fmc_readw( _addr) \ ++ (readw((void __iomem *)(_addr))) ++ ++#define fmc_writel(_host, _reg, _value) \ ++ (writel((u_int)(_value), ((char *)(_host)->regbase + (_reg)))) ++ ++#define fmc_writeb(_val, _addr) \ ++ (writeb((u_int)(_val), ((char *)(_addr)))) ++ ++#define FMC_WAIT_TIMEOUT 0x2000000 ++ ++#define fmc_cmd_wait_cpu_finish(_host) \ ++ do { \ ++ unsigned regval, timeout = FMC_WAIT_TIMEOUT * 2; \ ++ do { \ ++ regval = fmc_readl((_host), FMC_OP); \ ++ --timeout; \ ++ } while (((regval & FMC_OP_REG_OP_START) != 0) && (timeout != 0)); \ ++ if (timeout <= 0) \ ++ pr_info("Error: Wait cmd cpu finish timeout!\n"); \ ++ } while (0) ++ ++#define fmc_dma_wait_int_finish(_host) \ ++ do { \ ++ unsigned regval, timeout = FMC_WAIT_TIMEOUT; \ ++ do { \ ++ regval = fmc_readl((_host), FMC_INT); \ ++ --timeout; \ ++ } while ((((regval & FMC_INT_OP_DONE) == 0) && (timeout != 0))); \ ++ if (timeout <= 0) \ ++ pr_info("Error: Wait dma int finish timeout!\n"); \ ++ } while (0) ++ ++#define fmc_dma_wait_cpu_finish(_host) \ ++ do { \ ++ unsigned regval, timeout = FMC_WAIT_TIMEOUT; \ ++ do { \ ++ regval = fmc_readl((_host), FMC_OP_CTRL); \ ++ --timeout; \ ++ } while (((regval & OP_CTRL_DMA_OP_READY) != 0) && (timeout != 0)); \ ++ if (timeout <= 0) \ ++ pr_info("Error: Wait dma cpu finish timeout!\n"); \ ++ } while (0) ++ ++#define BT_DBG 0 /* Boot init debug print */ ++#define ER_DBG 0 /* Erase debug print */ ++#define WR_DBG 0 /* Write debug print */ ++#define RD_DBG 0 /* Read debug print */ ++#define QE_DBG 0 /* Quad Enable debug print */ ++#define OP_DBG 0 /* OP command debug print */ ++#define DMA_DB 0 /* DMA read or write debug print */ ++#define AC_DBG 0 /* 3-4byte Address Cycle */ ++#define SR_DBG 0 /* Status Register debug print */ ++#define CR_DBG 0 /* Config Register debug print */ ++#define FT_DBG 0 /* Features debug print */ ++#define WE_DBG 0 /* Write Enable debug print */ ++#define BP_DBG 0 /* Block Protection debug print */ ++#define EC_DBG 0 /* enable/disable ecc0 and randomizer */ ++#define PM_DBG 0 /* power management debug */ ++ ++#define fmc_pr(_type, _fmt, arg...) \ ++ do { \ ++ if (_type) \ ++ db_msg(_fmt, ##arg) \ ++ } while (0) ++ ++#define db_msg(_fmt, arg...) \ ++ pr_info("%s(%d): " _fmt, __func__, __LINE__, ##arg); ++ ++#define db_bug(fmt, args...) \ ++ do { \ ++ pr_info("%s(%d): BUG: " fmt, __FILE__, __LINE__, ##args); \ ++ while (1) \ ++ ; \ ++ } while (0) ++ ++enum fmc_iftype { ++ IF_TYPE_STD, ++ IF_TYPE_DUAL, ++ IF_TYPE_DIO, ++ IF_TYPE_QUAD, ++ IF_TYPE_QIO, ++}; ++ ++struct bsp_fmc { ++ void __iomem *regbase; ++ void __iomem *iobase; ++ struct clk *clk; ++ struct mutex lock; ++ void *buffer; ++ dma_addr_t dma_buffer; ++ unsigned int dma_len; ++}; ++ ++struct fmc_cmd_op { ++ unsigned char cs; ++ unsigned char cmd; ++ unsigned char l_cmd; ++ unsigned char addr_h; ++ unsigned int addr_l; ++ unsigned int data_no; ++ unsigned short option; ++ unsigned short op_cfg; ++}; ++ ++extern struct mutex fmc_switch_mutex; ++ ++#endif /* __BSP_FMC_H_ */ +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 642782811..209370f64 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -324,40 +324,14 @@ extern unsigned int kobjsize(const void *objp); + #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ + #define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ + #define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */ +-#define VM_HIGH_ARCH_BIT_6 38 /* bit only usable on 64-bit architectures */ +-#define VM_HIGH_ARCH_BIT_7 39 /* bit only usable on 64-bit architectures */ +-#ifdef CONFIG_MEM_PURGEABLE +-#define VM_HIGH_ARCH_BIT_8 40 /* bit only usable on 64-bit architectures */ +-#define VM_HIGH_ARCH_BIT_9 41 /* bit only usable on 64-bit architectures */ +-#endif /* CONFIG_MEM_PURGEABLE */ + #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) + #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) + #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) + #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) + #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) + #define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5) +-#define VM_HIGH_ARCH_6 BIT(VM_HIGH_ARCH_BIT_6) +-#define VM_HIGH_ARCH_7 BIT(VM_HIGH_ARCH_BIT_7) +-#ifdef CONFIG_MEM_PURGEABLE +-#define VM_HIGH_ARCH_8 BIT(VM_HIGH_ARCH_BIT_8) +-#define VM_HIGH_ARCH_9 BIT(VM_HIGH_ARCH_BIT_9) +-#endif /* CONFIG_MEM_PURGEABLE */ + #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ + +-#ifdef CONFIG_MEM_PURGEABLE +-#define VM_PURGEABLE VM_HIGH_ARCH_8 +-#define VM_USEREXPTE VM_HIGH_ARCH_9 +-#else /* CONFIG_MEM_PURGEABLE */ +-#define VM_PURGEABLE 0 +-#define VM_USEREXPTE 0 +-#endif /* CONFIG_MEM_PURGEABLE */ +- +-#ifdef CONFIG_SECURITY_XPM +-#define VM_XPM VM_HIGH_ARCH_7 +-#else /* CONFIG_SECURITY_XPM */ +-#define VM_XPM VM_NONE +-#endif /* CONFIG_SECURITY_XPM */ +- + #ifdef CONFIG_ARCH_HAS_PKEYS + # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 + # define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ +@@ -3351,7 +3325,6 @@ extern unsigned long __must_check vm_mmap(struct file *, unsigned long, + + struct vm_unmapped_area_info { + #define VM_UNMAPPED_AREA_TOPDOWN 1 +-#define VM_UNMAPPED_AREA_XPM 2 + unsigned long flags; + unsigned long length; + unsigned long low_limit; +diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h +index 027591c9d..96b1c1575 100644 +--- a/include/linux/mm_inline.h ++++ b/include/linux/mm_inline.h +@@ -93,10 +93,6 @@ static __always_inline enum lru_list folio_lru_list(struct folio *folio) + return LRU_UNEVICTABLE; + + lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; +-#ifdef CONFIG_MEM_PURGEABLE +- if (folio_test_purgeable(folio)) +- lru = LRU_INACTIVE_PURGEABLE; +-#endif + if (folio_test_active(folio)) + lru += LRU_ACTIVE; + +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index 2b2616008..e77d4a5c0 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -19,7 +19,6 @@ + #include + #include + #include +-#include + + #include + +@@ -763,10 +762,6 @@ struct mm_struct { + #endif + unsigned long task_size; /* size of task vm space */ + pgd_t * pgd; +-#ifdef CONFIG_MEM_PURGEABLE +- void *uxpgd; +- spinlock_t uxpgd_lock; +-#endif + + #ifdef CONFIG_MEMBARRIER + /** +@@ -982,14 +977,6 @@ struct mm_struct { + #endif + } lru_gen; + #endif /* CONFIG_LRU_GEN */ +- +-#ifdef CONFIG_SECURITY_XPM +- struct xpm_region xpm_region; +-#endif +- +-#ifdef CONFIG_SECURITY_CODE_SIGN +- struct cs_info pcs_info; +-#endif + } __randomize_layout; + + /* +diff --git a/include/linux/mman.h b/include/linux/mman.h +index 2989b7812..b2e2677ea 100644 +--- a/include/linux/mman.h ++++ b/include/linux/mman.h +@@ -157,8 +157,7 @@ calc_vm_flag_bits(struct file *file, unsigned long flags) + return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | + _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | + _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | +- _calc_vm_trans(flags, MAP_XPM, VM_XPM ) | +- arch_calc_vm_flag_bits(file, flags); ++ arch_calc_vm_flag_bits(file, flags); + } + + unsigned long vm_commit_limit(void); +diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h +index 62a6847a3..8fb3f0d9c 100644 +--- a/include/linux/mmc/host.h ++++ b/include/linux/mmc/host.h +@@ -218,6 +218,7 @@ struct mmc_host_ops { + + /* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */ + int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios); ++ int (*card_info_save)(struct mmc_host *host); + }; + + struct mmc_cqe_ops { +@@ -477,6 +478,10 @@ struct mmc_host { + + struct delayed_work detect; + int detect_change; /* card detect flag */ ++ u32 card_status; ++#define MMC_CARD_UNINIT 0 ++#define MMC_CARD_INIT 1 ++#define MMC_CARD_INIT_FAIL 2 + struct mmc_slot slot; + + const struct mmc_bus_ops *bus_ops; /* current bus driver */ +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index c378c11c2..05092c37a 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -48,12 +48,9 @@ enum migratetype { + MIGRATE_UNMOVABLE, + MIGRATE_MOVABLE, + MIGRATE_RECLAIMABLE, +-#ifdef CONFIG_CMA_REUSE +- MIGRATE_CMA, +-#endif + MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ + MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, +-#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) ++#ifdef CONFIG_CMA + /* + * MIGRATE_CMA migration type is designed to mimic the way + * ZONE_MOVABLE works. Only movable pages can be allocated +@@ -83,12 +80,6 @@ extern const char * const migratetype_names[MIGRATE_TYPES]; + # define is_migrate_cma_page(_page) false + #endif + +-#ifdef CONFIG_CMA_REUSE +-# define get_cma_migratetype() MIGRATE_CMA +-#else +-# define get_cma_migratetype() MIGRATE_MOVABLE +-#endif +- + static inline bool is_migrate_movable(int mt) + { + return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; +@@ -148,10 +139,6 @@ enum zone_stat_item { + NR_ZONE_ACTIVE_ANON, + NR_ZONE_INACTIVE_FILE, + NR_ZONE_ACTIVE_FILE, +-#ifdef CONFIG_MEM_PURGEABLE +- NR_ZONE_INACTIVE_PURGEABLE, +- NR_ZONE_ACTIVE_PURGEABLE, +-#endif + NR_ZONE_UNEVICTABLE, + NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ + NR_MLOCK, /* mlock()ed pages found and moved off LRU */ +@@ -172,10 +159,6 @@ enum node_stat_item { + NR_ACTIVE_ANON, /* " " " " " */ + NR_INACTIVE_FILE, /* " " " " " */ + NR_ACTIVE_FILE, /* " " " " " */ +-#ifdef CONFIG_MEM_PURGEABLE +- NR_INACTIVE_PURGEABLE, +- NR_ACTIVE_PURGEABLE, +-#endif + NR_UNEVICTABLE, /* " " " " " */ + NR_SLAB_RECLAIMABLE_B, + NR_SLAB_UNRECLAIMABLE_B, +@@ -278,19 +261,12 @@ static __always_inline bool vmstat_item_in_bytes(int idx) + #define LRU_BASE 0 + #define LRU_ACTIVE 1 + #define LRU_FILE 2 +-#ifdef CONFIG_MEM_PURGEABLE +-#define LRU_PURGEABLE 4 +-#endif + + enum lru_list { + LRU_INACTIVE_ANON = LRU_BASE, + LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, + LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, + LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, +-#ifdef CONFIG_MEM_PURGEABLE +- LRU_INACTIVE_PURGEABLE = LRU_BASE + LRU_PURGEABLE, +- LRU_ACTIVE_PURGEABLE = LRU_BASE + LRU_PURGEABLE + LRU_ACTIVE, +-#endif + LRU_UNEVICTABLE, + NR_LRU_LISTS + }; +@@ -305,7 +281,7 @@ enum vmscan_throttle_state { + + #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) + +-#define for_each_evictable_lru(lru) for (lru = 0; lru < LRU_UNEVICTABLE; lru++) ++#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) + + static inline bool is_file_lru(enum lru_list lru) + { +@@ -314,10 +290,6 @@ static inline bool is_file_lru(enum lru_list lru) + + static inline bool is_active_lru(enum lru_list lru) + { +-#ifdef CONFIG_MEM_PURGEABLE +- if (lru == LRU_ACTIVE_PURGEABLE) +- return true; +-#endif + return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); + } + +@@ -1351,12 +1323,6 @@ typedef struct pglist_data { + + int kswapd_failures; /* Number of 'reclaimed == 0' runs */ + +-#ifdef CONFIG_HYPERHOLD_ZSWAPD +- wait_queue_head_t zswapd_wait; +- atomic_t zswapd_wait_flag; +- struct task_struct *zswapd; +-#endif +- + #ifdef CONFIG_COMPACTION + int kcompactd_max_order; + enum zone_type kcompactd_highest_zoneidx; +@@ -1445,11 +1411,6 @@ typedef struct pglist_data { + #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) + #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) + +-static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) +-{ +- return &pgdat->__lruvec; +-} +- + static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) + { + return pgdat->node_start_pfn + pgdat->node_spanned_pages; +@@ -1491,15 +1452,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) + #endif + } + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +-static inline int is_node_lruvec(struct lruvec *lruvec) +-{ +- return &lruvec_pgdat(lruvec)->__lruvec == lruvec; +-} +-#endif +- +-extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); +- + #ifdef CONFIG_HAVE_MEMORYLESS_NODES + int local_memory_node(int node_id); + #else +diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h +index 914a9f974..19496a356 100644 +--- a/include/linux/mtd/mtd.h ++++ b/include/linux/mtd/mtd.h +@@ -17,7 +17,13 @@ + #include + + #include +- ++#ifdef CONFIG_ARCH_BSP ++#define MTD_ERASE_PENDING 0x01 ++#define MTD_ERASING 0x02 ++#define MTD_ERASE_SUSPEND 0x04 ++#define MTD_ERASE_DONE 0x08 ++#define MTD_ERASE_FAILED 0x10 ++#endif + #define MTD_FAIL_ADDR_UNKNOWN -1LL + + struct mtd_info; +@@ -31,6 +37,17 @@ struct erase_info { + uint64_t addr; + uint64_t len; + uint64_t fail_addr; ++#ifdef CONFIG_ARCH_BSP ++ struct mtd_info *mtd; ++ u_long time; ++ u_long retries; ++ unsigned dev; ++ unsigned cell; ++ void (*callback) (struct erase_info *self); ++ u_long priv; ++ u_char state; ++ struct erase_info *next; ++#endif + }; + + struct mtd_erase_region_info { +@@ -317,6 +334,12 @@ struct mtd_info { + int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, void **virt, resource_size_t *phys); + int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); ++#ifdef CONFIG_ARCH_BSP ++ unsigned long (*_get_unmapped_area) (struct mtd_info *mtd, ++ unsigned long len, ++ unsigned long offset, ++ unsigned long flags); ++#endif + int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf); + int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, +@@ -367,7 +390,12 @@ struct mtd_info { + * action if required to ensure writes go through + */ + bool oops_panic_write; +- ++#ifdef CONFIG_ARCH_BSP ++ /* Backing device capabilities for this device ++ * - provides mmap capabilities ++ */ ++ struct backing_dev_info *backing_dev_info; ++#endif + struct notifier_block reboot_notifier; /* default mode before reboot */ + + /* ECC status information */ +@@ -462,13 +490,13 @@ static inline void mtd_set_of_node(struct mtd_info *mtd, + struct device_node *np) + { + mtd->dev.of_node = np; +- if (!mtd->name) +- of_property_read_string(np, "label", &mtd->name); + } + + static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) + { +- return dev_of_node(&mtd->dev); ++#ifdef CONFIG_ARCH_BSP ++ return mtd->dev.of_node; ++#endif + } + + static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) +@@ -699,7 +727,9 @@ struct mtd_notifier { + extern void register_mtd_user (struct mtd_notifier *new); + extern int unregister_mtd_user (struct mtd_notifier *old); + void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size); +- ++#ifdef CONFIG_ARCH_BSP ++void mtd_erase_callback(struct erase_info *instr); ++#endif + static inline int mtd_is_bitflip(int err) { + return err == -EUCLEAN; + } +diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h +index cdcfe0fd2..7581e126b 100644 +--- a/include/linux/mtd/spi-nor.h ++++ b/include/linux/mtd/spi-nor.h +@@ -10,6 +10,44 @@ + #include + #include + ++#ifdef CONFIG_ARCH_BSP ++#include ++/* ++ * Manufacturer IDs ++ * ++ * The first byte returned from the flash after sending opcode SPINOR_OP_RDID. ++ * Sometimes these are the same as CFI IDs, but sometimes they aren't. ++ */ ++ ++/* Flash set the RESET# from */ ++#define SPI_NOR_SR_RST_MASK BIT(7) ++#define SPI_NOR_GET_RST(val) (((val) & SPI_NOR_SR_RST_MASK) >> 7) ++#define SPI_NOR_SET_RST(val) ((val) | SPI_NOR_SR_RST_MASK) ++ ++/* Flash block protect */ ++#ifdef CONFIG_BSP_SPI_BLOCK_PROTECT ++#define _2M (0x200000UL) ++#define _4M (0x400000UL) ++#define _8M (0x800000UL) ++#define _16M (0x1000000UL) ++#define _32M (0x2000000UL) ++ ++#define BP_NUM_3 3 ++#define BP_NUM_4 4 ++ ++#define DEBUG_SPI_NOR_BP 0 ++ ++#define SPI_NOR_SR_BP0_SHIFT 2 ++#define SPI_NOR_SR_BP_WIDTH_4 0xf ++#define SPI_NOR_SR_BP_MASK_4 (SPI_NOR_SR_BP_WIDTH_4 << SPI_NOR_SR_BP0_SHIFT) ++ ++#define SPI_NOR_SR_BP_WIDTH_3 0x7 ++#define SPI_NOR_SR_BP_MASK_3 (SPI_NOR_SR_BP_WIDTH_3 << SPI_NOR_SR_BP0_SHIFT) ++ ++#define LOCK_LEVEL_MAX(bp_num) (((0x01) << bp_num) - 1) ++ ++#endif /* CONFIG_BSP_SPI_BLOCK_PROTECT */ ++#endif /* CONFIG_ARCH_BSP */ + /* + * Note on opcode nomenclature: some opcodes have a format like + * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number +@@ -88,6 +126,14 @@ + /* Used for Spansion flashes only. */ + #define SPINOR_OP_BRWR 0x17 /* Bank register write */ + ++#ifdef CONFIG_ARCH_BSP ++#define SPINOR_OP_RDSR3 0x15 /* Read Status Register-3 */ ++#define SPINOR_OP_WRSR3 0x11 /* Write Status Register-3 1 byte*/ ++ ++#define SPINOR_OP_WRCR 0x31 /* Config register write */ ++/* Software reset code */ ++#define CR_DUMMY_CYCLE (0x03 << 6) ++#endif + /* Used for Micron flashes only. */ + #define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */ + #define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ +@@ -156,6 +202,23 @@ + (SNOR_PROTO_IS_DTR | \ + SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits)) + ++#ifdef CONFIG_ARCH_BSP ++#define SNOR_OP_READ(_num_mode_clocks, _num_wait_states, _opcode, _proto) \ ++ { \ ++ .num_mode_clocks = _num_mode_clocks, \ ++ .num_wait_states = _num_wait_states, \ ++ .opcode = _opcode, \ ++ .proto = _proto, \ ++ } ++ ++#define SNOR_OP_PROGRAMS(_opcode, _proto) \ ++ { \ ++ .opcode = _opcode, \ ++ .proto = _proto, \ ++ } ++ ++#endif /* CONFIG_ARCH_BSP */ ++ + enum spi_nor_protocol { + SNOR_PROTO_1_1_1 = SNOR_PROTO_STR(1, 1, 1), + SNOR_PROTO_1_1_2 = SNOR_PROTO_STR(1, 1, 2), +@@ -418,7 +481,15 @@ struct spi_nor { + struct spi_mem_dirmap_desc *rdesc; + struct spi_mem_dirmap_desc *wdesc; + } dirmap; +- ++#ifdef CONFIG_ARCH_BSP ++#ifdef CONFIG_BSP_SPI_BLOCK_PROTECT ++ unsigned int end_addr; ++ unsigned int lock_level_max; ++ unsigned char level; ++#endif ++ struct device_node *flash_node; ++ u32 clkrate; ++#endif + void *priv; + }; + +@@ -447,6 +518,13 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) + * + * Return: 0 for success, others for failure. + */ ++#ifdef CONFIG_ARCH_BSP ++void spi_nor_driver_shutdown(struct spi_nor *nor); ++#ifdef CONFIG_PM ++int bsp_spi_nor_suspend(struct spi_nor *nor, pm_message_t state); ++int bsp_spi_nor_resume(struct spi_nor *nor); ++#endif ++#endif /* CONFIG_ARCH_BSP */ + int spi_nor_scan(struct spi_nor *nor, const char *name, + const struct spi_nor_hwcaps *hwcaps); + +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 9b0fd384b..337a9d1c5 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -360,7 +360,7 @@ struct napi_struct { + + unsigned long state; + int weight; +- u32 defer_hard_irqs_count; ++ int defer_hard_irqs_count; + unsigned long gro_bitmask; + int (*poll)(struct napi_struct *, int); + #ifdef CONFIG_NETPOLL +@@ -2244,7 +2244,7 @@ struct net_device { + + struct bpf_prog __rcu *xdp_prog; + unsigned long gro_flush_timeout; +- u32 napi_defer_hard_irqs; ++ int napi_defer_hard_irqs; + #define GRO_LEGACY_MAX_SIZE 65536u + /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), + * and shinfo->gso_segs is a 16bit field. +diff --git a/include/linux/nmi.h b/include/linux/nmi.h +index 66af92dde..e92e378df 100644 +--- a/include/linux/nmi.h ++++ b/include/linux/nmi.h +@@ -15,11 +15,6 @@ + + #ifdef CONFIG_LOCKUP_DETECTOR + void lockup_detector_init(void); +-#ifdef CONFIG_CPU_ISOLATION_OPT +-extern void watchdog_enable(unsigned int cpu); +-extern void watchdog_disable(unsigned int cpu); +-extern bool watchdog_configured(unsigned int cpu); +-#endif + void lockup_detector_retry_init(void); + void lockup_detector_soft_poweroff(void); + void lockup_detector_cleanup(void); +@@ -43,22 +38,6 @@ static inline void lockup_detector_init(void) { } + static inline void lockup_detector_retry_init(void) { } + static inline void lockup_detector_soft_poweroff(void) { } + static inline void lockup_detector_cleanup(void) { } +-#ifdef CONFIG_CPU_ISOLATION_OPT +-static inline void watchdog_enable(unsigned int cpu) +-{ +-} +-static inline void watchdog_disable(unsigned int cpu) +-{ +-} +-static inline bool watchdog_configured(unsigned int cpu) +-{ +- /* +- * Pretend the watchdog is always configured. +- * We will be waiting for the watchdog to be enabled in core isolation +- */ +- return true; +-} +-#endif + #endif /* !CONFIG_LOCKUP_DETECTOR */ + + #ifdef CONFIG_SOFTLOCKUP_DETECTOR +diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h +index 704c7d819..f86a08ba0 100644 +--- a/include/linux/oid_registry.h ++++ b/include/linux/oid_registry.h +@@ -136,9 +136,6 @@ enum OID { + OID_SM2_with_SM3, /* 1.2.156.10197.1.501 */ + OID_sm3WithRSAEncryption, /* 1.2.156.10197.1.504 */ + +- /* Code signature defined OIDS */ +- OID_ownerid, /* 1.3.6.1.4.1.2011.2.376.1.4.1 */ +- + /* TCG defined OIDS for TPM based keys */ + OID_TPMLoadableKey, /* 2.23.133.10.1.3 */ + OID_TPMImportableKey, /* 2.23.133.10.1.4 */ +diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h +index 3ed1e637e..a77f3a7d2 100644 +--- a/include/linux/page-flags.h ++++ b/include/linux/page-flags.h +@@ -135,13 +135,6 @@ enum pageflags { + #ifdef CONFIG_ARCH_USES_PG_ARCH_X + PG_arch_2, + PG_arch_3, +-#endif +-#ifdef CONFIG_MEM_PURGEABLE +- PG_purgeable, +-#endif +-#ifdef CONFIG_SECURITY_XPM +- PG_xpm_readonly, +- PG_xpm_writetainted, + #endif + __NR_PAGEFLAGS, + +@@ -508,14 +501,6 @@ PAGEFLAG(Workingset, workingset, PF_HEAD) + __PAGEFLAG(Slab, slab, PF_NO_TAIL) + PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ + +-#ifdef CONFIG_SECURITY_XPM +-PAGEFLAG(XPMReadonly, xpm_readonly, PF_HEAD) +-PAGEFLAG(XPMWritetainted, xpm_writetainted, PF_HEAD) +-#else +-PAGEFLAG_FALSE(XPMReadonly) +-PAGEFLAG_FALSE(XPMWritetainted) +-#endif +- + /* Xen */ + PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) + TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) +@@ -638,12 +623,6 @@ PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) + PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) + #endif + +-#ifdef CONFIG_MEM_PURGEABLE +-PAGEFLAG(Purgeable, purgeable, PF_ANY) +-#else +-PAGEFLAG_FALSE(Purgeable) +-#endif +- + /* + * On an anonymous page mapped into a user virtual memory area, + * page->mapping points to its anon_vma, not to a struct address_space; +@@ -1091,12 +1070,6 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page) + #define __PG_MLOCKED 0 + #endif + +-#ifdef CONFIG_SECURITY_XPM +-#define __XPM_PAGE_FLAGS (1UL << PG_xpm_readonly | 1UL << PG_xpm_writetainted) +-#else +-#define __XPM_PAGE_FLAGS 0 +-#endif +- + /* + * Flags checked when a page is freed. Pages being freed should not have + * these flags set. If they are, there is a problem. +@@ -1106,7 +1079,6 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page) + 1UL << PG_private | 1UL << PG_private_2 | \ + 1UL << PG_writeback | 1UL << PG_reserved | \ + 1UL << PG_slab | 1UL << PG_active | \ +- __XPM_PAGE_FLAGS | \ + 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK) + + /* +diff --git a/include/linux/pstore.h b/include/linux/pstore.h +index df2c4ee05..638507a3c 100644 +--- a/include/linux/pstore.h ++++ b/include/linux/pstore.h +@@ -39,8 +39,6 @@ enum pstore_type_id { + PSTORE_TYPE_PMSG = 7, + PSTORE_TYPE_PPC_OPAL = 8, + +- PSTORE_TYPE_BLACKBOX = 9, +- + /* End of the list */ + PSTORE_TYPE_MAX + }; +@@ -208,7 +206,6 @@ struct pstore_info { + #define PSTORE_FLAGS_CONSOLE BIT(1) + #define PSTORE_FLAGS_FTRACE BIT(2) + #define PSTORE_FLAGS_PMSG BIT(3) +-#define PSTORE_FLAGS_BLACKBOX BIT(4) + + extern int pstore_register(struct pstore_info *); + extern void pstore_unregister(struct pstore_info *); +@@ -289,9 +286,4 @@ pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val) + } + #endif + +-#ifdef CONFIG_PSTORE_BLACKBOX +-extern void pstore_blackbox_dump(struct kmsg_dumper *dumper, +- enum kmsg_dump_reason reason); +-#endif +- + #endif /*_LINUX_PSTORE_H*/ +diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h +index 95981aa83..9d65ff94e 100644 +--- a/include/linux/pstore_ram.h ++++ b/include/linux/pstore_ram.h +@@ -34,7 +34,6 @@ struct ramoops_platform_data { + unsigned long console_size; + unsigned long ftrace_size; + unsigned long pmsg_size; +- unsigned long blackbox_size; + int max_reason; + u32 flags; + struct persistent_ram_ecc_info ecc_info; +diff --git a/include/linux/pwm.h b/include/linux/pwm.h +index 44fe5fae2..a83cde630 100644 +--- a/include/linux/pwm.h ++++ b/include/linux/pwm.h +@@ -59,6 +59,10 @@ enum { + struct pwm_state { + u64 period; + u64 duty_cycle; ++#ifdef CONFIG_ARCH_BSP ++ u64 duty_cycle1; ++ u64 duty_cycle2; ++#endif + enum pwm_polarity polarity; + bool enabled; + bool usage_power; +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 124898a69..393c30034 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -290,55 +290,6 @@ struct user_event_mm; + enum { + TASK_COMM_LEN = 16, + }; +-enum task_event { +- PUT_PREV_TASK = 0, +- PICK_NEXT_TASK = 1, +- TASK_WAKE = 2, +- TASK_MIGRATE = 3, +- TASK_UPDATE = 4, +- IRQ_UPDATE = 5, +-}; +- +-/* Note: this need to be in sync with migrate_type_names array */ +-enum migrate_types { +- GROUP_TO_RQ, +- RQ_TO_GROUP, +-}; +- +-#ifdef CONFIG_CPU_ISOLATION_OPT +-extern int sched_isolate_count(const cpumask_t *mask, bool include_offline); +-extern int sched_isolate_cpu(int cpu); +-extern int sched_unisolate_cpu(int cpu); +-extern int sched_unisolate_cpu_unlocked(int cpu); +-#else +-static inline int sched_isolate_count(const cpumask_t *mask, +- bool include_offline) +-{ +- cpumask_t count_mask; +- +- if (include_offline) +- cpumask_andnot(&count_mask, mask, cpu_online_mask); +- else +- return 0; +- +- return cpumask_weight(&count_mask); +-} +- +-static inline int sched_isolate_cpu(int cpu) +-{ +- return 0; +-} +- +-static inline int sched_unisolate_cpu(int cpu) +-{ +- return 0; +-} +- +-static inline int sched_unisolate_cpu_unlocked(int cpu) +-{ +- return 0; +-} +-#endif + + extern void scheduler_tick(void); + +@@ -627,10 +578,6 @@ struct sched_entity { + unsigned long runnable_weight; + #endif + +-#ifdef CONFIG_SCHED_LATENCY_NICE +- int latency_weight; +-#endif +- + #ifdef CONFIG_SMP + /* + * Per entity load average tracking. +@@ -642,53 +589,6 @@ struct sched_entity { + #endif + }; + +-#ifdef CONFIG_SCHED_WALT +-extern void sched_exit(struct task_struct *p); +-extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct); +-extern u32 sched_get_init_task_load(struct task_struct *p); +-extern void free_task_load_ptrs(struct task_struct *p); +-#define RAVG_HIST_SIZE_MAX 5 +-struct ravg { +- /* +- * 'mark_start' marks the beginning of an event (task waking up, task +- * starting to execute, task being preempted) within a window +- * +- * 'sum' represents how runnable a task has been within current +- * window. It incorporates both running time and wait time and is +- * frequency scaled. +- * +- * 'sum_history' keeps track of history of 'sum' seen over previous +- * RAVG_HIST_SIZE windows. Windows where task was entirely sleeping are +- * ignored. +- * +- * 'demand' represents maximum sum seen over previous +- * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency +- * demand for tasks. +- * +- * 'curr_window_cpu' represents task's contribution to cpu busy time on +- * various CPUs in the current window +- * +- * 'prev_window_cpu' represents task's contribution to cpu busy time on +- * various CPUs in the previous window +- * +- * 'curr_window' represents the sum of all entries in curr_window_cpu +- * +- * 'prev_window' represents the sum of all entries in prev_window_cpu +- * +- */ +- u64 mark_start; +- u32 sum, demand; +- u32 sum_history[RAVG_HIST_SIZE_MAX]; +- u32 *curr_window_cpu, *prev_window_cpu; +- u32 curr_window, prev_window; +- u16 active_windows; +- u16 demand_scaled; +-}; +-#else +-static inline void sched_exit(struct task_struct *p) { } +-static inline void free_task_load_ptrs(struct task_struct *p) { } +-#endif /* CONFIG_SCHED_WALT */ +- + struct sched_rt_entity { + struct list_head run_list; + unsigned long timeout; +@@ -893,21 +793,9 @@ struct task_struct { + int static_prio; + int normal_prio; + unsigned int rt_priority; +-#ifdef CONFIG_SCHED_LATENCY_NICE +- int latency_prio; +-#endif + + struct sched_entity se; + struct sched_rt_entity rt; +-#ifdef CONFIG_SCHED_WALT +- struct ravg ravg; +- /* +- * 'init_load_pct' represents the initial task load assigned to children +- * of this task +- */ +- u32 init_load_pct; +- u64 last_sleep_ts; +-#endif + struct sched_dl_entity dl; + const struct sched_class *sched_class; + +@@ -917,12 +805,6 @@ struct task_struct { + unsigned int core_occupation; + #endif + +-#ifdef CONFIG_SCHED_RTG +- int rtg_depth; +- struct related_thread_group *grp; +- struct list_head grp_list; +-#endif +- + #ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; + #endif +@@ -1660,11 +1542,6 @@ struct task_struct { + struct user_event_mm *user_event_mm; + #endif + +-#ifdef CONFIG_ACCESS_TOKENID +- u64 token; +- u64 ftoken; +-#endif +- + /* + * New fields for task_struct should be added above here, so that + * they are included in the randomized portion of task_struct. +@@ -1878,7 +1755,6 @@ extern struct pid *cad_pid; + #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ + #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ + #define PF__HOLE__00800000 0x00800000 +-#define PF_FROZEN PF__HOLE__00800000 /* Frozen for system suspend */ + #define PF__HOLE__01000000 0x01000000 + #define PF__HOLE__02000000 0x02000000 + #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ +diff --git a/include/linux/sched/core_ctl.h b/include/linux/sched/core_ctl.h +deleted file mode 100755 +index ca321b7b0..000000000 +--- a/include/linux/sched/core_ctl.h ++++ /dev/null +@@ -1,14 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-only */ +-/* +- * Copyright (c) 2016, 2019-2020, The Linux Foundation. All rights reserved. +- */ +- +-#ifndef __CORE_CTL_H +-#define __CORE_CTL_H +- +-#ifdef CONFIG_SCHED_CORE_CTRL +-extern void core_ctl_check(u64 wallclock); +-#else +-static inline void core_ctl_check(u64 wallclock) { } +-#endif +-#endif +diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h +index 4d5600506..bdd31ab93 100644 +--- a/include/linux/sched/cpufreq.h ++++ b/include/linux/sched/cpufreq.h +@@ -9,9 +9,6 @@ + */ + + #define SCHED_CPUFREQ_IOWAIT (1U << 0) +-#define SCHED_CPUFREQ_WALT (1U << 1) +-#define SCHED_CPUFREQ_CONTINUE (1U << 2) +-#define SCHED_CPUFREQ_FORCE_UPDATE (1U << 3) + + #ifdef CONFIG_CPU_FREQ + struct cpufreq_policy; +diff --git a/include/linux/sched/frame_rtg.h b/include/linux/sched/frame_rtg.h +deleted file mode 100755 +index 6713f6771..000000000 +--- a/include/linux/sched/frame_rtg.h ++++ /dev/null +@@ -1,75 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Frame declaration +- * +- * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd. +- */ +- +-#ifndef __SCHED_FRAME_RTG_H +-#define __SCHED_FRAME_RTG_H +- +-#ifdef CONFIG_SCHED_RTG_FRAME +- +-#define MAX_TID_NUM 5 +- +-struct frame_info { +- /* +- * use rtg load tracking in frame_info +- * rtg->curr_window_load -=> the workload of current frame +- * rtg->prev_window_load -=> the workload of last frame +- * rtg->curr_window_exec -=> the thread's runtime of current frame +- * rtg->prev_window_exec -=> the thread's runtime of last frame +- * rtg->prev_window_time -=> the actual time of the last frame +- */ +- struct mutex lock; +- struct related_thread_group *rtg; +- int prio; +- struct task_struct *thread[MAX_TID_NUM]; +- atomic_t thread_prio[MAX_TID_NUM]; +- int thread_num; +- unsigned int frame_rate; // frame rate +- u64 frame_time; +- atomic_t curr_rt_thread_num; +- atomic_t max_rt_thread_num; +- atomic_t frame_sched_state; +- atomic_t start_frame_freq; +- atomic_t frame_state; +- +- /* +- * frame_vload : the emergency level of current frame. +- * max_vload_time : the timeline frame_load increase to FRAME_MAX_VLOAD +- * it's always equal to 2 * frame_time / NSEC_PER_MSEC +- * +- * The closer to the deadline, the higher emergency of current +- * frame, so the frame_vload is only related to frame time, +- * and grown with time. +- */ +- u64 frame_vload; +- int vload_margin; +- int max_vload_time; +- +- u64 frame_util; +- unsigned long status; +- unsigned long prev_fake_load_util; +- unsigned long prev_frame_load_util; +- unsigned long prev_frame_time; +- unsigned long prev_frame_exec; +- unsigned long prev_frame_load; +- unsigned int frame_min_util; +- unsigned int frame_max_util; +- unsigned int prev_min_util; +- unsigned int prev_max_util; +- unsigned int frame_boost_min_util; +- +- bool margin_imme; +- bool timestamp_skipped; +-}; +- +-struct frame_info *rtg_frame_info(int id); +-static inline +-struct related_thread_group *frame_info_rtg(const struct frame_info *frame_info) +-{ +- return frame_info->rtg; +-} +-#endif +-#endif +diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h +index 3ec3a0ec6..fe1a46f30 100644 +--- a/include/linux/sched/isolation.h ++++ b/include/linux/sched/isolation.h +@@ -29,25 +29,10 @@ extern void __init housekeeping_init(void); + + #else + +-#ifdef CONFIG_CPU_ISOLATION_OPT +-static inline int housekeeping_any_cpu(enum hk_type type) +-{ +- cpumask_t available; +- int cpu; +- +- cpumask_andnot(&available, cpu_online_mask, cpu_isolated_mask); +- cpu = cpumask_any(&available); +- if (cpu >= nr_cpu_ids) +- cpu = smp_processor_id(); +- +- return cpu; +-} +-#else + static inline int housekeeping_any_cpu(enum hk_type type) + { + return smp_processor_id(); + } +-#endif + + static inline const struct cpumask *housekeeping_cpumask(enum hk_type type) + { +@@ -76,11 +61,7 @@ static inline bool housekeeping_cpu(int cpu, enum hk_type type) + if (static_branch_unlikely(&housekeeping_overridden)) + return housekeeping_test_cpu(cpu, type); + #endif +-#ifdef CONFIG_CPU_ISOLATION_OPT +- return !cpu_isolated(cpu); +-#else + return true; +-#endif + } + + static inline bool cpu_is_isolated(int cpu) +diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h +index 7d64feafc..ab83d85e1 100644 +--- a/include/linux/sched/prio.h ++++ b/include/linux/sched/prio.h +@@ -11,16 +11,9 @@ + * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH + * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority + * values are inverted: lower p->prio value means higher priority. +- * +- * The MAX_USER_RT_PRIO value allows the actual maximum +- * RT priority to be separate from the value exported to +- * user-space. This allows kernel threads to set their +- * priority to a value higher than any user task. Note: +- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. + */ + +-#define MAX_USER_RT_PRIO 100 +-#define MAX_RT_PRIO MAX_USER_RT_PRIO ++#define MAX_RT_PRIO 100 + + #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) + #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) +@@ -33,15 +26,6 @@ + #define NICE_TO_PRIO(nice) ((nice) + DEFAULT_PRIO) + #define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO) + +-/* +- * 'User priority' is the nice value converted to something we +- * can work with better when scaling various scheduler parameters, +- * it's a [ 0 ... 39 ] range. +- */ +-#define USER_PRIO(p) ((p)-MAX_RT_PRIO) +-#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) +-#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) +- + /* + * Convert nice value [19,-20] to rlimit style value [1,40]. + */ +diff --git a/include/linux/sched/rtg.h b/include/linux/sched/rtg.h +deleted file mode 100755 +index ec738f49f..000000000 +--- a/include/linux/sched/rtg.h ++++ /dev/null +@@ -1,65 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-#ifndef __SCHED_RTG_H +-#define __SCHED_RTG_H +- +-#ifdef CONFIG_SCHED_RTG +- +-#define DEFAULT_RTG_GRP_ID 0 +-#define DEFAULT_CGROUP_COLOC_ID 1 +-#define MAX_NUM_CGROUP_COLOC_ID 21 +- +-struct group_cpu_time { +- u64 window_start; +- u64 curr_runnable_sum; +- u64 prev_runnable_sum; +- u64 nt_curr_runnable_sum; +- u64 nt_prev_runnable_sum; +-}; +- +-struct group_ravg { +- unsigned long curr_window_load; +- unsigned long curr_window_exec; +- unsigned long prev_window_load; +- unsigned long prev_window_exec; +- unsigned long normalized_util; +-}; +- +-struct rtg_class; +- +-struct related_thread_group { +- int id; +- raw_spinlock_t lock; +- struct list_head tasks; +- struct list_head list; +- +- unsigned int nr_running; +- struct group_ravg ravg; +- u64 window_start; +- u64 mark_start; +- u64 prev_window_time; +- /* rtg window information for WALT */ +- unsigned int window_size; +- const struct rtg_class *rtg_class; +- struct sched_cluster *preferred_cluster; +- int max_boost; +- unsigned long util_invalid_interval; /* in nanoseconds */ +- unsigned long util_update_timeout; /* in nanoseconds */ +- unsigned long freq_update_interval; /* in nanoseconds */ +- u64 last_util_update_time; +- u64 last_freq_update_time; +- void *private_data; +-}; +- +-struct rtg_class { +- void (*sched_update_rtg_tick)(struct related_thread_group *grp); +-}; +- +-enum rtg_freq_update_flags { +- RTG_FREQ_FORCE_UPDATE = (1 << 0), +- RTG_FREQ_NORMAL_UPDATE = (1 << 1), +-}; +- +-int sched_set_group_id(struct task_struct *p, unsigned int group_id); +-unsigned int sched_get_group_id(struct task_struct *p); +-#endif /* CONFIG_SCHED_RTG */ +-#endif +diff --git a/include/linux/sched/rtg_ctrl.h b/include/linux/sched/rtg_ctrl.h +deleted file mode 100755 +index b71dd74e7..000000000 +--- a/include/linux/sched/rtg_ctrl.h ++++ /dev/null +@@ -1,99 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * rtg control interface +- * +- * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd. +- */ +- +-#ifndef __SCHED_RTG_CTL_H +-#define __SCHED_RTG_CTL_H +- +-#include +- +-#define SYSTEM_SERVER_UID 1000 +-#define MIN_APP_UID 10000 +-#define MAX_BOOST_DURATION_MS 5000 +- +-#define RTG_SCHED_IPC_MAGIC 0XAB +- +-#define CMD_ID_SET_ENABLE \ +- _IOWR(RTG_SCHED_IPC_MAGIC, SET_ENABLE, struct rtg_enable_data) +-#define CMD_ID_SET_RTG \ +- _IOWR(RTG_SCHED_IPC_MAGIC, SET_RTG, struct rtg_str_data) +-#define CMD_ID_SET_CONFIG \ +- _IOWR(RTG_SCHED_IPC_MAGIC, SET_CONFIG, struct rtg_str_data) +-#define CMD_ID_SET_RTG_ATTR \ +- _IOWR(RTG_SCHED_IPC_MAGIC, SET_RTG_ATTR, struct rtg_str_data) +-#define CMD_ID_BEGIN_FRAME_FREQ \ +- _IOWR(RTG_SCHED_IPC_MAGIC, BEGIN_FRAME_FREQ, struct proc_state_data) +-#define CMD_ID_END_FRAME_FREQ \ +- _IOWR(RTG_SCHED_IPC_MAGIC, END_FRAME_FREQ, struct proc_state_data) +-#define CMD_ID_END_SCENE \ +- _IOWR(RTG_SCHED_IPC_MAGIC, END_SCENE, struct proc_state_data) +-#define CMD_ID_SET_MIN_UTIL \ +- _IOWR(RTG_SCHED_IPC_MAGIC, SET_MIN_UTIL, struct proc_state_data) +-#define CMD_ID_SET_MARGIN \ +- _IOWR(RTG_SCHED_IPC_MAGIC, SET_MARGIN, struct proc_state_data) +-#define CMD_ID_LIST_RTG \ +- _IOWR(RTG_SCHED_IPC_MAGIC, LIST_RTG, struct rtg_info) +-#define CMD_ID_LIST_RTG_THREAD \ +- _IOWR(RTG_SCHED_IPC_MAGIC, LIST_RTG_THREAD, struct rtg_grp_data) +-#define CMD_ID_SEARCH_RTG \ +- _IOWR(RTG_SCHED_IPC_MAGIC, SEARCH_RTG, struct proc_state_data) +-#define CMD_ID_GET_ENABLE \ +- _IOWR(RTG_SCHED_IPC_MAGIC, GET_ENABLE, struct rtg_enable_data) +- +-int proc_rtg_open(struct inode *inode, struct file *filp); +-long proc_rtg_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +-#ifdef CONFIG_COMPAT +-long proc_rtg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +-#endif +- +-enum ioctl_abi_format { +- IOCTL_ABI_ARM32, +- IOCTL_ABI_AARCH64, +-}; +- +-enum rtg_sched_cmdid { +- SET_ENABLE = 1, +- SET_RTG, +- SET_CONFIG, +- SET_RTG_ATTR, +- BEGIN_FRAME_FREQ = 5, +- END_FRAME_FREQ, +- END_SCENE, +- SET_MIN_UTIL, +- SET_MARGIN, +- LIST_RTG = 10, +- LIST_RTG_THREAD, +- SEARCH_RTG, +- GET_ENABLE, +- RTG_CTRL_MAX_NR, +-}; +- +-/* proc_state */ +-enum grp_ctrl_cmd { +- CMD_CREATE_RTG_GRP, +- CMD_ADD_RTG_THREAD, +- CMD_REMOVE_RTG_THREAD, +- CMD_CLEAR_RTG_GRP, +- CMD_DESTROY_RTG_GRP +-}; +- +-struct rtg_enable_data { +- int enable; +- int len; +- char *data; +-}; +- +-struct rtg_str_data { +- int type; +- int len; +- char *data; +-}; +- +-struct proc_state_data { +- int grp_id; +- int state_param; +-}; +-#endif +diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h +index 0b2189197..0108a38bb 100644 +--- a/include/linux/sched/stat.h ++++ b/include/linux/sched/stat.h +@@ -22,14 +22,6 @@ extern bool single_task_running(void); + extern unsigned int nr_iowait(void); + extern unsigned int nr_iowait_cpu(int cpu); + +-#ifdef CONFIG_SCHED_WALT +-extern unsigned int sched_get_cpu_util(int cpu); +-#else +-static inline unsigned int sched_get_cpu_util(int cpu) +-{ +- return 0; +-} +-#endif + static inline int sched_info_on(void) + { + return IS_ENABLED(CONFIG_SCHED_INFO); +diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h +index 546e750f7..5a64582b0 100644 +--- a/include/linux/sched/sysctl.h ++++ b/include/linux/sched/sysctl.h +@@ -12,26 +12,6 @@ extern unsigned long sysctl_hung_task_timeout_secs; + enum { sysctl_hung_task_timeout_secs = 0 }; + #endif + +-extern unsigned int sysctl_sched_latency; +-extern unsigned int sysctl_sched_wakeup_granularity; +-#ifdef CONFIG_SCHED_WALT +-extern unsigned int sysctl_sched_use_walt_cpu_util; +-extern unsigned int sysctl_sched_use_walt_task_util; +-extern unsigned int sysctl_sched_walt_init_task_load_pct; +-extern unsigned int sysctl_sched_cpu_high_irqload; +- +-extern int +-sysctl_sched_walt_init_task_load_pct_sysctl_handler(struct ctl_table *table, +- int write, void __user *buffer, size_t *length, loff_t *ppos); +-#endif +- +-#ifdef CONFIG_SCHED_RT_CAS +-extern unsigned int sysctl_sched_enable_rt_cas; +-#endif +-#ifdef CONFIG_SCHED_RT_ACTIVE_LB +-extern unsigned int sysctl_sched_enable_rt_active_lb; +-#endif +- + enum sched_tunable_scaling { + SCHED_TUNABLESCALING_NONE, + SCHED_TUNABLESCALING_LOG, +diff --git a/include/linux/securec.h b/include/linux/securec.h +new file mode 100644 +index 000000000..88df54e66 +--- /dev/null ++++ b/include/linux/securec.h +@@ -0,0 +1,629 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: The user of this secure c library should include this header file in you source code. ++ * This header file declare all supported API prototype of the library, ++ * such as memcpy_s, strcpy_s, wcscpy_s,strcat_s, strncat_s, sprintf_s, scanf_s, and so on. ++ * Create: 2014-02-25 ++ * Notes: Do not modify this file by yourself. ++ */ ++ ++#ifndef SECUREC_H_5D13A042_DC3F_4ED9_A8D1_882811274C27 ++#define SECUREC_H_5D13A042_DC3F_4ED9_A8D1_882811274C27 ++ ++#include "securectype.h" ++#ifndef SECUREC_HAVE_STDARG_H ++#define SECUREC_HAVE_STDARG_H 1 ++#endif ++ ++#if SECUREC_HAVE_STDARG_H ++#include "stdarg.h" ++#endif ++ ++#ifndef SECUREC_HAVE_ERRNO_H ++#define SECUREC_HAVE_ERRNO_H 1 ++#endif ++ ++/* EINVAL ERANGE may defined in errno.h */ ++#if SECUREC_HAVE_ERRNO_H ++#if SECUREC_IN_KERNEL ++#include ++#else ++#include ++#endif ++#endif ++ ++/* Define error code */ ++#if defined(SECUREC_NEED_ERRNO_TYPE) || !defined(__STDC_WANT_LIB_EXT1__) || \ ++ (defined(__STDC_WANT_LIB_EXT1__) && (!__STDC_WANT_LIB_EXT1__)) ++#ifndef SECUREC_DEFINED_ERRNO_TYPE ++#define SECUREC_DEFINED_ERRNO_TYPE ++/* Just check whether macrodefinition exists. */ ++#ifndef errno_t ++typedef int errno_t; ++#endif ++#endif ++#endif ++ ++/* Success */ ++#ifndef EOK ++#define EOK 0 ++#endif ++ ++#ifndef EINVAL ++/* The src buffer is not correct and destination buffer can not be reset */ ++#define EINVAL 22 ++#endif ++ ++#ifndef EINVAL_AND_RESET ++/* Once the error is detected, the dest buffer must be reset! Value is 22 or 128 */ ++#define EINVAL_AND_RESET 150 ++#endif ++ ++#ifndef ERANGE ++/* The destination buffer is not long enough and destination buffer can not be reset */ ++#define ERANGE 34 ++#endif ++ ++#ifndef ERANGE_AND_RESET ++/* Once the error is detected, the dest buffer must be reset! Value is 34 or 128 */ ++#define ERANGE_AND_RESET 162 ++#endif ++ ++#ifndef EOVERLAP_AND_RESET ++/* Once the buffer overlap is detected, the dest buffer must be reset! Value is 54 or 128 */ ++#define EOVERLAP_AND_RESET 182 ++#endif ++ ++/* If you need export the function of this library in Win32 dll, use __declspec(dllexport) */ ++#ifndef SECUREC_API ++#if defined(SECUREC_DLL_EXPORT) ++#define SECUREC_API __declspec(dllexport) ++#elif defined(SECUREC_DLL_IMPORT) ++#define SECUREC_API __declspec(dllimport) ++#else ++/* ++ * Standardized function declaration. If a security function is declared in the your code, ++ * it may cause a compilation alarm,Please delete the security function you declared. ++ * Adding extern under windows will cause the system to have inline functions to expand, ++ * so do not add the extern in default ++ */ ++#if defined(_MSC_VER) ++#define SECUREC_API ++#else ++#define SECUREC_API extern ++#endif ++#endif ++#endif ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++/* ++ * Description: The GetHwSecureCVersion function get SecureC Version string and version number. ++ * Parameter: verNumber - to store version number (for example value is 0x500 | 0xa) ++ * Return: version string ++ */ ++SECUREC_API const char *GetHwSecureCVersion(unsigned short *verNumber); ++ ++#if SECUREC_ENABLE_MEMSET ++/* ++ * Description: The memset_s function copies the value of c (converted to an unsigned char) into each of ++ * the first count characters of the object pointed to by dest. ++ * Parameter: dest - destination address ++ * Parameter: destMax - The maximum length of destination buffer ++ * Parameter: c - the value to be copied ++ * Parameter: count - copies count bytes of value to dest ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t memset_s(void *dest, size_t destMax, int c, size_t count); ++#endif ++ ++#ifndef SECUREC_ONLY_DECLARE_MEMSET ++#define SECUREC_ONLY_DECLARE_MEMSET 0 ++#endif ++ ++#if !SECUREC_ONLY_DECLARE_MEMSET ++ ++#if SECUREC_ENABLE_MEMMOVE ++/* ++ * Description: The memmove_s function copies n characters from the object pointed to by src ++ * into the object pointed to by dest. ++ * Parameter: dest - destination address ++ * Parameter: destMax - The maximum length of destination buffer ++ * Parameter: src - source address ++ * Parameter: count - copies count bytes from the src ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t memmove_s(void *dest, size_t destMax, const void *src, size_t count); ++#endif ++ ++#if SECUREC_ENABLE_MEMCPY ++/* ++ * Description: The memcpy_s function copies n characters from the object pointed to ++ * by src into the object pointed to by dest. ++ * Parameter: dest - destination address ++ * Parameter: destMax - The maximum length of destination buffer ++ * Parameter: src - source address ++ * Parameter: count - copies count bytes from the src ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t memcpy_s(void *dest, size_t destMax, const void *src, size_t count); ++#endif ++ ++#if SECUREC_ENABLE_STRCPY ++/* ++ * Description: The strcpy_s function copies the string pointed to by strSrc (including ++ * the terminating null character) into the array pointed to by strDest ++ * Parameter: strDest - destination address ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null character) ++ * Parameter: strSrc - source address ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t strcpy_s(char *strDest, size_t destMax, const char *strSrc); ++#endif ++ ++#if SECUREC_ENABLE_STRNCPY ++/* ++ * Description: The strncpy_s function copies not more than n successive characters (not including ++ * the terminating null character) from the array pointed to by strSrc to the array pointed to by strDest. ++ * Parameter: strDest - destination address ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null character) ++ * Parameter: strSrc - source address ++ * Parameter: count - copies count characters from the src ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t strncpy_s(char *strDest, size_t destMax, const char *strSrc, size_t count); ++#endif ++ ++#if SECUREC_ENABLE_STRCAT ++/* ++ * Description: The strcat_s function appends a copy of the string pointed to by strSrc (including ++ * the terminating null character) to the end of the string pointed to by strDest. ++ * Parameter: strDest - destination address ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null wide character) ++ * Parameter: strSrc - source address ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t strcat_s(char *strDest, size_t destMax, const char *strSrc); ++#endif ++ ++#if SECUREC_ENABLE_STRNCAT ++/* ++ * Description: The strncat_s function appends not more than n successive characters (not including ++ * the terminating null character) ++ * from the array pointed to by strSrc to the end of the string pointed to by strDest. ++ * Parameter: strDest - destination address ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null character) ++ * Parameter: strSrc - source address ++ * Parameter: count - copies count characters from the src ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t strncat_s(char *strDest, size_t destMax, const char *strSrc, size_t count); ++#endif ++ ++#if SECUREC_ENABLE_VSPRINTF ++/* ++ * Description: The vsprintf_s function is equivalent to the vsprintf function except for the parameter destMax ++ * and the explicit runtime-constraints violation ++ * Parameter: strDest - produce output according to a format,write to the character string strDest. ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null wide character) ++ * Parameter: format - format string ++ * Parameter: argList - instead of a variable number of arguments ++ * Return: the number of characters printed(not including the terminating null byte '\0'), ++ * If an error occurred Return: -1. ++ */ ++SECUREC_API int vsprintf_s(char *strDest, size_t destMax, const char *format, ++ va_list argList) SECUREC_ATTRIBUTE(3, 0); ++#endif ++ ++#if SECUREC_ENABLE_SPRINTF ++/* ++ * Description: The sprintf_s function is equivalent to the sprintf function except for the parameter destMax ++ * and the explicit runtime-constraints violation ++ * Parameter: strDest - produce output according to a format ,write to the character string strDest. ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null byte '\0') ++ * Parameter: format - format string ++ * Return: the number of characters printed(not including the terminating null byte '\0'), ++ * If an error occurred Return: -1. ++*/ ++SECUREC_API int sprintf_s(char *strDest, size_t destMax, const char *format, ...) SECUREC_ATTRIBUTE(3, 4); ++#endif ++ ++#if SECUREC_ENABLE_VSNPRINTF ++/* ++ * Description: The vsnprintf_s function is equivalent to the vsnprintf function except for ++ * the parameter destMax/count and the explicit runtime-constraints violation ++ * Parameter: strDest - produce output according to a format ,write to the character string strDest. ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null byte '\0') ++ * Parameter: count - do not write more than count bytes to strDest(not including the terminating null byte '\0') ++ * Parameter: format - format string ++ * Parameter: argList - instead of a variable number of arguments ++ * Return: the number of characters printed(not including the terminating null byte '\0'), ++ * If an error occurred Return: -1.Pay special attention to returning -1 when truncation occurs. ++ */ ++SECUREC_API int vsnprintf_s(char *strDest, size_t destMax, size_t count, const char *format, ++ va_list argList) SECUREC_ATTRIBUTE(4, 0); ++#endif ++ ++#if SECUREC_ENABLE_SNPRINTF ++/* ++ * Description: The snprintf_s function is equivalent to the snprintf function except for ++ * the parameter destMax/count and the explicit runtime-constraints violation ++ * Parameter: strDest - produce output according to a format ,write to the character string strDest. ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null byte '\0') ++ * Parameter: count - do not write more than count bytes to strDest(not including the terminating null byte '\0') ++ * Parameter: format - format string ++ * Return: the number of characters printed(not including the terminating null byte '\0'), ++ * If an error occurred Return: -1.Pay special attention to returning -1 when truncation occurs. ++ */ ++SECUREC_API int snprintf_s(char *strDest, size_t destMax, size_t count, const char *format, ++ ...) SECUREC_ATTRIBUTE(4, 5); ++#endif ++ ++#if SECUREC_SNPRINTF_TRUNCATED ++/* ++ * Description: The vsnprintf_truncated_s function is equivalent to the vsnprintf_s function except ++ * no count parameter and return value ++ * Parameter: strDest - produce output according to a format ,write to the character string strDest ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null byte '\0') ++ * Parameter: format - format string ++ * Parameter: argList - instead of a variable number of arguments ++ * Return: the number of characters printed(not including the terminating null byte '\0'), ++ * If an error occurred Return: -1.Pay special attention to returning destMax - 1 when truncation occurs ++*/ ++SECUREC_API int vsnprintf_truncated_s(char *strDest, size_t destMax, const char *format, ++ va_list argList) SECUREC_ATTRIBUTE(3, 0); ++ ++/* ++ * Description: The snprintf_truncated_s function is equivalent to the snprintf_s function except ++ * no count parameter and return value ++ * Parameter: strDest - produce output according to a format,write to the character string strDest. ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null byte '\0') ++ * Parameter: format - format string ++ * Return: the number of characters printed(not including the terminating null byte '\0'), ++ * If an error occurred Return: -1.Pay special attention to returning destMax - 1 when truncation occurs. ++ */ ++SECUREC_API int snprintf_truncated_s(char *strDest, size_t destMax, ++ const char *format, ...) SECUREC_ATTRIBUTE(3, 4); ++#endif ++ ++#if SECUREC_ENABLE_SCANF ++/* ++ * Description: The scanf_s function is equivalent to fscanf_s with the argument stdin ++ * interposed before the arguments to scanf_s ++ * Parameter: format - format string ++ * Return: the number of input items assigned, If an error occurred Return: -1. ++ */ ++SECUREC_API int scanf_s(const char *format, ...); ++#endif ++ ++#if SECUREC_ENABLE_VSCANF ++/* ++ * Description: The vscanf_s function is equivalent to scanf_s, with the variable argument list replaced by argList ++ * Parameter: format - format string ++ * Parameter: argList - instead of a variable number of arguments ++ * Return: the number of input items assigned, If an error occurred Return: -1. ++ */ ++SECUREC_API int vscanf_s(const char *format, va_list argList); ++#endif ++ ++#if SECUREC_ENABLE_SSCANF ++/* ++ * Description: The sscanf_s function is equivalent to fscanf_s, except that input is obtained from a ++ * string (specified by the argument buffer) rather than from a stream ++ * Parameter: buffer - read character from buffer ++ * Parameter: format - format string ++ * Return: the number of input items assigned, If an error occurred Return: -1. ++ */ ++SECUREC_API int sscanf_s(const char *buffer, const char *format, ...); ++#endif ++ ++#if SECUREC_ENABLE_VSSCANF ++/* ++ * Description: The vsscanf_s function is equivalent to sscanf_s, with the variable argument list ++ * replaced by argList ++ * Parameter: buffer - read character from buffer ++ * Parameter: format - format string ++ * Parameter: argList - instead of a variable number of arguments ++ * Return: the number of input items assigned, If an error occurred Return: -1. ++ */ ++SECUREC_API int vsscanf_s(const char *buffer, const char *format, va_list argList); ++#endif ++ ++#if SECUREC_ENABLE_FSCANF ++/* ++ * Description: The fscanf_s function is equivalent to fscanf except that the c, s, and [ conversion specifiers ++ * apply to a pair of arguments (unless assignment suppression is indicated by a *) ++ * Parameter: stream - stdio file stream ++ * Parameter: format - format string ++ * Return: the number of input items assigned, If an error occurred Return: -1. ++ */ ++SECUREC_API int fscanf_s(FILE *stream, const char *format, ...); ++#endif ++ ++#if SECUREC_ENABLE_VFSCANF ++/* ++ * Description: The vfscanf_s function is equivalent to fscanf_s, with the variable argument list ++ * replaced by argList ++ * Parameter: stream - stdio file stream ++ * Parameter: format - format string ++ * Parameter: argList - instead of a variable number of arguments ++ * Return: the number of input items assigned, If an error occurred Return: -1. ++ */ ++SECUREC_API int vfscanf_s(FILE *stream, const char *format, va_list argList); ++#endif ++ ++#if SECUREC_ENABLE_STRTOK ++/* ++ * Description: The strtok_s function parses a string into a sequence of strToken, ++ * replace all characters in strToken string that match to strDelimit set with 0. ++ * On the first call to strtok_s the string to be parsed should be specified in strToken. ++ * In each subsequent call that should parse the same string, strToken should be NULL ++ * Parameter: strToken - the string to be delimited ++ * Parameter: strDelimit - specifies a set of characters that delimit the tokens in the parsed string ++ * Parameter: context - is a pointer to a char * variable that is used internally by strtok_s function ++ * Return: On the first call returns the address of the first non \0 character, otherwise NULL is returned. ++ * In subsequent calls, the strtoken is set to NULL, and the context set is the same as the previous call, ++ * return NULL if the *context string length is equal 0, otherwise return *context. ++ */ ++SECUREC_API char *strtok_s(char *strToken, const char *strDelimit, char **context); ++#endif ++ ++#if SECUREC_ENABLE_GETS && !SECUREC_IN_KERNEL ++/* ++ * Description: The gets_s function reads at most one less than the number of characters specified ++ * by destMax from the stream pointed to by stdin, into the array pointed to by buffer ++ * Parameter: buffer - destination address ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null character) ++ * Return: buffer if there was no runtime-constraint violation,If an error occurred Return: NULL. ++ */ ++SECUREC_API char *gets_s(char *buffer, size_t destMax); ++#endif ++ ++#if SECUREC_ENABLE_WCHAR_FUNC ++#if SECUREC_ENABLE_MEMCPY ++/* ++ * Description: The wmemcpy_s function copies n successive wide characters from the object pointed to ++ * by src into the object pointed to by dest. ++ * Parameter: dest - destination address ++ * Parameter: destMax - The maximum length of destination buffer ++ * Parameter: src - source address ++ * Parameter: count - copies count wide characters from the src ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t wmemcpy_s(wchar_t *dest, size_t destMax, const wchar_t *src, size_t count); ++#endif ++ ++#if SECUREC_ENABLE_MEMMOVE ++/* ++ * Description: The wmemmove_s function copies n successive wide characters from the object ++ * pointed to by src into the object pointed to by dest. ++ * Parameter: dest - destination address ++ * Parameter: destMax - The maximum length of destination buffer ++ * Parameter: src - source address ++ * Parameter: count - copies count wide characters from the src ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t wmemmove_s(wchar_t *dest, size_t destMax, const wchar_t *src, size_t count); ++#endif ++ ++#if SECUREC_ENABLE_STRCPY ++/* ++ * Description: The wcscpy_s function copies the wide string pointed to by strSrc(including the terminating ++ * null wide character) into the array pointed to by strDest ++ * Parameter: strDest - destination address ++ * Parameter: destMax - The maximum length of destination buffer ++ * Parameter: strSrc - source address ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t wcscpy_s(wchar_t *strDest, size_t destMax, const wchar_t *strSrc); ++#endif ++ ++#if SECUREC_ENABLE_STRNCPY ++/* ++ * Description: The wcsncpy_s function copies not more than n successive wide characters (not including the ++ * terminating null wide character) from the array pointed to by strSrc to the array pointed to by strDest ++ * Parameter: strDest - destination address ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating wide character) ++ * Parameter: strSrc - source address ++ * Parameter: count - copies count wide characters from the src ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t wcsncpy_s(wchar_t *strDest, size_t destMax, const wchar_t *strSrc, size_t count); ++#endif ++ ++#if SECUREC_ENABLE_STRCAT ++/* ++ * Description: The wcscat_s function appends a copy of the wide string pointed to by strSrc (including the ++ * terminating null wide character) to the end of the wide string pointed to by strDest ++ * Parameter: strDest - destination address ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating wide character) ++ * Parameter: strSrc - source address ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t wcscat_s(wchar_t *strDest, size_t destMax, const wchar_t *strSrc); ++#endif ++ ++#if SECUREC_ENABLE_STRNCAT ++/* ++ * Description: The wcsncat_s function appends not more than n successive wide characters (not including the ++ * terminating null wide character) from the array pointed to by strSrc to the end of the wide string pointed to ++ * by strDest. ++ * Parameter: strDest - destination address ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating wide character) ++ * Parameter: strSrc - source address ++ * Parameter: count - copies count wide characters from the src ++ * Return: EOK if there was no runtime-constraint violation ++ */ ++SECUREC_API errno_t wcsncat_s(wchar_t *strDest, size_t destMax, const wchar_t *strSrc, size_t count); ++#endif ++ ++#if SECUREC_ENABLE_STRTOK ++/* ++ * Description: The wcstok_s function is the wide-character equivalent of the strtok_s function ++ * Parameter: strToken - the string to be delimited ++ * Parameter: strDelimit - specifies a set of characters that delimit the tokens in the parsed string ++ * Parameter: context - is a pointer to a char * variable that is used internally by strtok_s function ++ * Return: a pointer to the first character of a token, or a null pointer if there is no token ++ * or there is a runtime-constraint violation. ++ */ ++SECUREC_API wchar_t *wcstok_s(wchar_t *strToken, const wchar_t *strDelimit, wchar_t **context); ++#endif ++ ++#if SECUREC_ENABLE_VSPRINTF ++/* ++ * Description: The vswprintf_s function is the wide-character equivalent of the vsprintf_s function ++ * Parameter: strDest - produce output according to a format,write to the character string strDest ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null) ++ * Parameter: format - format string ++ * Parameter: argList - instead of a variable number of arguments ++ * Return: the number of characters printed(not including the terminating null wide character), ++ * If an error occurred Return: -1. ++ */ ++SECUREC_API int vswprintf_s(wchar_t *strDest, size_t destMax, const wchar_t *format, va_list argList); ++#endif ++ ++#if SECUREC_ENABLE_SPRINTF ++/* ++ * Description: The swprintf_s function is the wide-character equivalent of the sprintf_s function ++ * Parameter: strDest - produce output according to a format,write to the character string strDest ++ * Parameter: destMax - The maximum length of destination buffer(including the terminating null) ++ * Parameter: format - format string ++ * Return: the number of characters printed(not including the terminating null wide character), ++ * If an error occurred Return: -1. ++ */ ++SECUREC_API int swprintf_s(wchar_t *strDest, size_t destMax, const wchar_t *format, ...); ++#endif ++ ++#if SECUREC_ENABLE_FSCANF ++/* ++ * Description: The fwscanf_s function is the wide-character equivalent of the fscanf_s function ++ * Parameter: stream - stdio file stream ++ * Parameter: format - format string ++ * Return: the number of input items assigned, If an error occurred Return: -1. ++ */ ++SECUREC_API int fwscanf_s(FILE *stream, const wchar_t *format, ...); ++#endif ++ ++#if SECUREC_ENABLE_VFSCANF ++/* ++ * Description: The vfwscanf_s function is the wide-character equivalent of the vfscanf_s function ++ * Parameter: stream - stdio file stream ++ * Parameter: format - format string ++ * Parameter: argList - instead of a variable number of arguments ++ * Return: the number of input items assigned, If an error occurred Return: -1. ++ */ ++SECUREC_API int vfwscanf_s(FILE *stream, const wchar_t *format, va_list argList); ++#endif ++ ++#if SECUREC_ENABLE_SCANF ++/* ++ * Description: The wscanf_s function is the wide-character equivalent of the scanf_s function ++ * Parameter: format - format string ++ * Return: the number of input items assigned, If an error occurred Return: -1. ++ */ ++SECUREC_API int wscanf_s(const wchar_t *format, ...); ++#endif ++ ++#if SECUREC_ENABLE_VSCANF ++/* ++ * Description: The vwscanf_s function is the wide-character equivalent of the vscanf_s function ++ * Parameter: format - format string ++ * Parameter: argList - instead of a variable number of arguments ++ * Return: the number of input items assigned, If an error occurred Return: -1. ++ */ ++SECUREC_API int vwscanf_s(const wchar_t *format, va_list argList); ++#endif ++ ++#if SECUREC_ENABLE_SSCANF ++/* ++ * Description: The swscanf_s function is the wide-character equivalent of the sscanf_s function ++ * Parameter: buffer - read character from buffer ++ * Parameter: format - format string ++ * Return: the number of input items assigned, If an error occurred Return: -1. ++ */ ++SECUREC_API int swscanf_s(const wchar_t *buffer, const wchar_t *format, ...); ++#endif ++ ++#if SECUREC_ENABLE_VSSCANF ++/* ++ * Description: The vswscanf_s function is the wide-character equivalent of the vsscanf_s function ++ * Parameter: buffer - read character from buffer ++ * Parameter: format - format string ++ * Parameter: argList - instead of a variable number of arguments ++ * Return: the number of input items assigned, If an error occurred Return: -1. ++ */ ++SECUREC_API int vswscanf_s(const wchar_t *buffer, const wchar_t *format, va_list argList); ++#endif ++#endif /* SECUREC_ENABLE_WCHAR_FUNC */ ++#endif ++ ++/* Those functions are used by macro,must declare hare, also for without function declaration warning */ ++extern errno_t strncpy_error(char *strDest, size_t destMax, const char *strSrc, size_t count); ++extern errno_t strcpy_error(char *strDest, size_t destMax, const char *strSrc); ++ ++#if SECUREC_WITH_PERFORMANCE_ADDONS ++/* Those functions are used by macro */ ++extern errno_t memset_sOptAsm(void *dest, size_t destMax, int c, size_t count); ++extern errno_t memset_sOptTc(void *dest, size_t destMax, int c, size_t count); ++extern errno_t memcpy_sOptAsm(void *dest, size_t destMax, const void *src, size_t count); ++extern errno_t memcpy_sOptTc(void *dest, size_t destMax, const void *src, size_t count); ++ ++/* The strcpy_sp is a macro, not a function in performance optimization mode. */ ++#define strcpy_sp(dest, destMax, src) ((__builtin_constant_p((destMax)) && \ ++ __builtin_constant_p((src))) ? \ ++ SECUREC_STRCPY_SM((dest), (destMax), (src)) : \ ++ strcpy_s((dest), (destMax), (src))) ++ ++/* The strncpy_sp is a macro, not a function in performance optimization mode. */ ++#define strncpy_sp(dest, destMax, src, count) ((__builtin_constant_p((count)) && \ ++ __builtin_constant_p((destMax)) && \ ++ __builtin_constant_p((src))) ? \ ++ SECUREC_STRNCPY_SM((dest), (destMax), (src), (count)) : \ ++ strncpy_s((dest), (destMax), (src), (count))) ++ ++/* The strcat_sp is a macro, not a function in performance optimization mode. */ ++#define strcat_sp(dest, destMax, src) ((__builtin_constant_p((destMax)) && \ ++ __builtin_constant_p((src))) ? \ ++ SECUREC_STRCAT_SM((dest), (destMax), (src)) : \ ++ strcat_s((dest), (destMax), (src))) ++ ++/* The strncat_sp is a macro, not a function in performance optimization mode. */ ++#define strncat_sp(dest, destMax, src, count) ((__builtin_constant_p((count)) && \ ++ __builtin_constant_p((destMax)) && \ ++ __builtin_constant_p((src))) ? \ ++ SECUREC_STRNCAT_SM((dest), (destMax), (src), (count)) : \ ++ strncat_s((dest), (destMax), (src), (count))) ++ ++/* The memcpy_sp is a macro, not a function in performance optimization mode. */ ++#define memcpy_sp(dest, destMax, src, count) (__builtin_constant_p((count)) ? \ ++ (SECUREC_MEMCPY_SM((dest), (destMax), (src), (count))) : \ ++ (__builtin_constant_p((destMax)) ? \ ++ (((size_t)(destMax) > 0 && \ ++ (((unsigned long long)(destMax) & (unsigned long long)(-2)) < SECUREC_MEM_MAX_LEN)) ? \ ++ memcpy_sOptTc((dest), (destMax), (src), (count)) : ERANGE) : \ ++ memcpy_sOptAsm((dest), (destMax), (src), (count)))) ++ ++/* The memset_sp is a macro, not a function in performance optimization mode. */ ++#define memset_sp(dest, destMax, c, count) (__builtin_constant_p((count)) ? \ ++ (SECUREC_MEMSET_SM((dest), (destMax), (c), (count))) : \ ++ (__builtin_constant_p((destMax)) ? \ ++ (((((unsigned long long)(destMax) & (unsigned long long)(-2)) < SECUREC_MEM_MAX_LEN)) ? \ ++ memset_sOptTc((dest), (destMax), (c), (count)) : ERANGE) : \ ++ memset_sOptAsm((dest), (destMax), (c), (count)))) ++ ++#endif ++ ++#ifdef __cplusplus ++} ++#endif ++#endif ++ +diff --git a/include/linux/securectype.h b/include/linux/securectype.h +new file mode 100644 +index 000000000..69e79c2f9 +--- /dev/null ++++ b/include/linux/securectype.h +@@ -0,0 +1,585 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: Define internal used macro and data type. The marco of SECUREC_ON_64BITS ++ * will be determined in this header file, which is a switch for part ++ * of code. Some macro are used to suppress warning by MS compiler. ++ * Create: 2014-02-25 ++ * Notes: User can change the value of SECUREC_STRING_MAX_LEN and SECUREC_MEM_MAX_LEN ++ * macro to meet their special need, but The maximum value should not exceed 2G. ++ */ ++/* ++ * [Standardize-exceptions]: Performance-sensitive ++ * [reason]: Strict parameter verification has been done before use ++ */ ++ ++#ifndef SECURECTYPE_H_A7BBB686_AADA_451B_B9F9_44DACDAE18A7 ++#define SECURECTYPE_H_A7BBB686_AADA_451B_B9F9_44DACDAE18A7 ++ ++#ifndef SECUREC_USING_STD_SECURE_LIB ++#if defined(_MSC_VER) && _MSC_VER >= 1400 ++#if defined(__STDC_WANT_SECURE_LIB__) && (!__STDC_WANT_SECURE_LIB__) ++/* Security functions have been provided since vs2005, default use of system library functions */ ++#define SECUREC_USING_STD_SECURE_LIB 0 ++#else ++#define SECUREC_USING_STD_SECURE_LIB 1 ++#endif ++#else ++#define SECUREC_USING_STD_SECURE_LIB 0 ++#endif ++#endif ++ ++/* Compatibility with older Secure C versions, shielding VC symbol redefinition warning */ ++#if defined(_MSC_VER) && (_MSC_VER >= 1400) && (!SECUREC_USING_STD_SECURE_LIB) ++#ifndef SECUREC_DISABLE_CRT_FUNC ++#define SECUREC_DISABLE_CRT_FUNC 1 ++#endif ++#ifndef SECUREC_DISABLE_CRT_IMP ++#define SECUREC_DISABLE_CRT_IMP 1 ++#endif ++#else /* MSC VER */ ++#ifndef SECUREC_DISABLE_CRT_FUNC ++#define SECUREC_DISABLE_CRT_FUNC 0 ++#endif ++#ifndef SECUREC_DISABLE_CRT_IMP ++#define SECUREC_DISABLE_CRT_IMP 0 ++#endif ++#endif ++ ++#if SECUREC_DISABLE_CRT_FUNC ++#ifdef __STDC_WANT_SECURE_LIB__ ++#undef __STDC_WANT_SECURE_LIB__ ++#endif ++#define __STDC_WANT_SECURE_LIB__ 0 ++#endif ++ ++#if SECUREC_DISABLE_CRT_IMP ++#ifdef _CRTIMP_ALTERNATIVE ++#undef _CRTIMP_ALTERNATIVE ++#endif ++#define _CRTIMP_ALTERNATIVE /* Comment Microsoft *_s function */ ++#endif ++ ++/* Compile in kernel under macro control */ ++#ifndef SECUREC_IN_KERNEL ++#ifdef __KERNEL__ ++#define SECUREC_IN_KERNEL 1 ++#else ++#define SECUREC_IN_KERNEL 0 ++#endif ++#endif ++ ++/* make kernel symbols of functions available to loadable modules */ ++#ifndef SECUREC_EXPORT_KERNEL_SYMBOL ++#if SECUREC_IN_KERNEL ++#define SECUREC_EXPORT_KERNEL_SYMBOL 1 ++#else ++#define SECUREC_EXPORT_KERNEL_SYMBOL 0 ++#endif ++#endif ++ ++#if SECUREC_IN_KERNEL ++#ifndef SECUREC_ENABLE_SCANF_FILE ++#define SECUREC_ENABLE_SCANF_FILE 0 ++#endif ++#ifndef SECUREC_ENABLE_WCHAR_FUNC ++#define SECUREC_ENABLE_WCHAR_FUNC 0 ++#endif ++#else /* SECUREC_IN_KERNEL */ ++#ifndef SECUREC_ENABLE_SCANF_FILE ++#define SECUREC_ENABLE_SCANF_FILE 1 ++#endif ++#ifndef SECUREC_ENABLE_WCHAR_FUNC ++#define SECUREC_ENABLE_WCHAR_FUNC 1 ++#endif ++#endif ++ ++/* Default secure function declaration, default declarations for non-standard functions */ ++#ifndef SECUREC_SNPRINTF_TRUNCATED ++#define SECUREC_SNPRINTF_TRUNCATED 1 ++#endif ++ ++#if SECUREC_USING_STD_SECURE_LIB ++#if defined(_MSC_VER) && _MSC_VER >= 1400 ++/* Declare secure functions that are not available in the VS compiler */ ++#ifndef SECUREC_ENABLE_MEMSET ++#define SECUREC_ENABLE_MEMSET 1 ++#endif ++/* VS 2005 have vsnprintf_s function */ ++#ifndef SECUREC_ENABLE_VSNPRINTF ++#define SECUREC_ENABLE_VSNPRINTF 0 ++#endif ++#ifndef SECUREC_ENABLE_SNPRINTF ++/* VS 2005 have vsnprintf_s function Adapt the snprintf_s of the security function */ ++#define snprintf_s _snprintf_s ++#define SECUREC_ENABLE_SNPRINTF 0 ++#endif ++/* Before VS 2010 do not have v functions */ ++#if _MSC_VER <= 1600 || defined(SECUREC_FOR_V_SCANFS) ++#ifndef SECUREC_ENABLE_VFSCANF ++#define SECUREC_ENABLE_VFSCANF 1 ++#endif ++#ifndef SECUREC_ENABLE_VSCANF ++#define SECUREC_ENABLE_VSCANF 1 ++#endif ++#ifndef SECUREC_ENABLE_VSSCANF ++#define SECUREC_ENABLE_VSSCANF 1 ++#endif ++#endif ++ ++#else /* MSC VER */ ++#ifndef SECUREC_ENABLE_MEMSET ++#define SECUREC_ENABLE_MEMSET 0 ++#endif ++#ifndef SECUREC_ENABLE_SNPRINTF ++#define SECUREC_ENABLE_SNPRINTF 0 ++#endif ++#ifndef SECUREC_ENABLE_VSNPRINTF ++#define SECUREC_ENABLE_VSNPRINTF 0 ++#endif ++#endif ++ ++#ifndef SECUREC_ENABLE_MEMMOVE ++#define SECUREC_ENABLE_MEMMOVE 0 ++#endif ++#ifndef SECUREC_ENABLE_MEMCPY ++#define SECUREC_ENABLE_MEMCPY 0 ++#endif ++#ifndef SECUREC_ENABLE_STRCPY ++#define SECUREC_ENABLE_STRCPY 0 ++#endif ++#ifndef SECUREC_ENABLE_STRNCPY ++#define SECUREC_ENABLE_STRNCPY 0 ++#endif ++#ifndef SECUREC_ENABLE_STRCAT ++#define SECUREC_ENABLE_STRCAT 0 ++#endif ++#ifndef SECUREC_ENABLE_STRNCAT ++#define SECUREC_ENABLE_STRNCAT 0 ++#endif ++#ifndef SECUREC_ENABLE_SPRINTF ++#define SECUREC_ENABLE_SPRINTF 0 ++#endif ++#ifndef SECUREC_ENABLE_VSPRINTF ++#define SECUREC_ENABLE_VSPRINTF 0 ++#endif ++#ifndef SECUREC_ENABLE_SSCANF ++#define SECUREC_ENABLE_SSCANF 0 ++#endif ++#ifndef SECUREC_ENABLE_VSSCANF ++#define SECUREC_ENABLE_VSSCANF 0 ++#endif ++#ifndef SECUREC_ENABLE_SCANF ++#define SECUREC_ENABLE_SCANF 0 ++#endif ++#ifndef SECUREC_ENABLE_VSCANF ++#define SECUREC_ENABLE_VSCANF 0 ++#endif ++ ++#ifndef SECUREC_ENABLE_FSCANF ++#define SECUREC_ENABLE_FSCANF 0 ++#endif ++#ifndef SECUREC_ENABLE_VFSCANF ++#define SECUREC_ENABLE_VFSCANF 0 ++#endif ++#ifndef SECUREC_ENABLE_STRTOK ++#define SECUREC_ENABLE_STRTOK 0 ++#endif ++#ifndef SECUREC_ENABLE_GETS ++#define SECUREC_ENABLE_GETS 0 ++#endif ++ ++#else /* SECUREC USE STD SECURE LIB */ ++ ++#ifndef SECUREC_ENABLE_MEMSET ++#define SECUREC_ENABLE_MEMSET 1 ++#endif ++#ifndef SECUREC_ENABLE_MEMMOVE ++#define SECUREC_ENABLE_MEMMOVE 1 ++#endif ++#ifndef SECUREC_ENABLE_MEMCPY ++#define SECUREC_ENABLE_MEMCPY 1 ++#endif ++#ifndef SECUREC_ENABLE_STRCPY ++#define SECUREC_ENABLE_STRCPY 1 ++#endif ++#ifndef SECUREC_ENABLE_STRNCPY ++#define SECUREC_ENABLE_STRNCPY 1 ++#endif ++#ifndef SECUREC_ENABLE_STRCAT ++#define SECUREC_ENABLE_STRCAT 1 ++#endif ++#ifndef SECUREC_ENABLE_STRNCAT ++#define SECUREC_ENABLE_STRNCAT 1 ++#endif ++#ifndef SECUREC_ENABLE_SPRINTF ++#define SECUREC_ENABLE_SPRINTF 1 ++#endif ++#ifndef SECUREC_ENABLE_VSPRINTF ++#define SECUREC_ENABLE_VSPRINTF 1 ++#endif ++#ifndef SECUREC_ENABLE_SNPRINTF ++#define SECUREC_ENABLE_SNPRINTF 1 ++#endif ++#ifndef SECUREC_ENABLE_VSNPRINTF ++#define SECUREC_ENABLE_VSNPRINTF 1 ++#endif ++#ifndef SECUREC_ENABLE_SSCANF ++#define SECUREC_ENABLE_SSCANF 1 ++#endif ++#ifndef SECUREC_ENABLE_VSSCANF ++#define SECUREC_ENABLE_VSSCANF 1 ++#endif ++#ifndef SECUREC_ENABLE_SCANF ++#if SECUREC_ENABLE_SCANF_FILE ++#define SECUREC_ENABLE_SCANF 1 ++#else ++#define SECUREC_ENABLE_SCANF 0 ++#endif ++#endif ++#ifndef SECUREC_ENABLE_VSCANF ++#if SECUREC_ENABLE_SCANF_FILE ++#define SECUREC_ENABLE_VSCANF 1 ++#else ++#define SECUREC_ENABLE_VSCANF 0 ++#endif ++#endif ++ ++#ifndef SECUREC_ENABLE_FSCANF ++#if SECUREC_ENABLE_SCANF_FILE ++#define SECUREC_ENABLE_FSCANF 1 ++#else ++#define SECUREC_ENABLE_FSCANF 0 ++#endif ++#endif ++#ifndef SECUREC_ENABLE_VFSCANF ++#if SECUREC_ENABLE_SCANF_FILE ++#define SECUREC_ENABLE_VFSCANF 1 ++#else ++#define SECUREC_ENABLE_VFSCANF 0 ++#endif ++#endif ++ ++#ifndef SECUREC_ENABLE_STRTOK ++#define SECUREC_ENABLE_STRTOK 1 ++#endif ++#ifndef SECUREC_ENABLE_GETS ++#define SECUREC_ENABLE_GETS 1 ++#endif ++#endif /* SECUREC_USE_STD_SECURE_LIB */ ++ ++#if !SECUREC_ENABLE_SCANF_FILE ++#if SECUREC_ENABLE_FSCANF ++#undef SECUREC_ENABLE_FSCANF ++#define SECUREC_ENABLE_FSCANF 0 ++#endif ++#if SECUREC_ENABLE_VFSCANF ++#undef SECUREC_ENABLE_VFSCANF ++#define SECUREC_ENABLE_VFSCANF 0 ++#endif ++#if SECUREC_ENABLE_SCANF ++#undef SECUREC_ENABLE_SCANF ++#define SECUREC_ENABLE_SCANF 0 ++#endif ++#if SECUREC_ENABLE_FSCANF ++#undef SECUREC_ENABLE_FSCANF ++#define SECUREC_ENABLE_FSCANF 0 ++#endif ++ ++#endif ++ ++#if SECUREC_IN_KERNEL ++#include ++#include ++#else ++#ifndef SECUREC_HAVE_STDIO_H ++#define SECUREC_HAVE_STDIO_H 1 ++#endif ++#ifndef SECUREC_HAVE_STRING_H ++#define SECUREC_HAVE_STRING_H 1 ++#endif ++#ifndef SECUREC_HAVE_STDLIB_H ++#define SECUREC_HAVE_STDLIB_H 1 ++#endif ++#if SECUREC_HAVE_STDIO_H ++#include ++#endif ++#if SECUREC_HAVE_STRING_H ++#include ++#endif ++#if SECUREC_HAVE_STDLIB_H ++#include ++#endif ++#endif ++ ++/* ++ * If you need high performance, enable the SECUREC_WITH_PERFORMANCE_ADDONS macro, default is enable. ++ * The macro is automatically closed on the windows platform and linux kernel ++ */ ++#ifndef SECUREC_WITH_PERFORMANCE_ADDONS ++#if SECUREC_IN_KERNEL ++#define SECUREC_WITH_PERFORMANCE_ADDONS 0 ++#else ++#define SECUREC_WITH_PERFORMANCE_ADDONS 1 ++#endif ++#endif ++ ++/* If enable SECUREC_COMPATIBLE_WIN_FORMAT, the output format will be compatible to Windows. */ ++#if (defined(_WIN32) || defined(_WIN64) || defined(_MSC_VER)) && !defined(SECUREC_COMPATIBLE_LINUX_FORMAT) ++#ifndef SECUREC_COMPATIBLE_WIN_FORMAT ++#define SECUREC_COMPATIBLE_WIN_FORMAT ++#endif ++#endif ++ ++#if defined(SECUREC_COMPATIBLE_WIN_FORMAT) ++/* On windows platform, can't use optimized function for there is no __builtin_constant_p like function */ ++/* If need optimized macro, can define this: define __builtin_constant_p(x) 0 */ ++#ifdef SECUREC_WITH_PERFORMANCE_ADDONS ++#undef SECUREC_WITH_PERFORMANCE_ADDONS ++#define SECUREC_WITH_PERFORMANCE_ADDONS 0 ++#endif ++#endif ++ ++#if defined(__VXWORKS__) || defined(__vxworks) || defined(__VXWORKS) || defined(_VXWORKS_PLATFORM_) || \ ++ defined(SECUREC_VXWORKS_VERSION_5_4) ++#ifndef SECUREC_VXWORKS_PLATFORM ++#define SECUREC_VXWORKS_PLATFORM ++#endif ++#endif ++ ++/* If enable SECUREC_COMPATIBLE_LINUX_FORMAT, the output format will be compatible to Linux. */ ++#if !defined(SECUREC_COMPATIBLE_WIN_FORMAT) && !defined(SECUREC_VXWORKS_PLATFORM) ++#ifndef SECUREC_COMPATIBLE_LINUX_FORMAT ++#define SECUREC_COMPATIBLE_LINUX_FORMAT ++#endif ++#endif ++ ++#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT ++#ifndef SECUREC_HAVE_STDDEF_H ++#define SECUREC_HAVE_STDDEF_H 1 ++#endif ++/* Some system may no stddef.h */ ++#if SECUREC_HAVE_STDDEF_H ++#if !SECUREC_IN_KERNEL ++#include ++#endif ++#endif ++#endif ++ ++/* ++ * Add the -DSECUREC_SUPPORT_FORMAT_WARNING=1 compiler option to supoort -Wformat=2. ++ * Default does not check the format is that the same data type in the actual code. ++ * In the product is different in the original data type definition of VxWorks and Linux. ++ */ ++#ifndef SECUREC_SUPPORT_FORMAT_WARNING ++#define SECUREC_SUPPORT_FORMAT_WARNING 0 ++#endif ++ ++#if SECUREC_SUPPORT_FORMAT_WARNING ++#define SECUREC_ATTRIBUTE(x, y) __attribute__((format(printf, (x), (y)))) ++#else ++#define SECUREC_ATTRIBUTE(x, y) ++#endif ++ ++/* ++ * Add the -DSECUREC_SUPPORT_BUILTIN_EXPECT=0 compiler option, if compiler can not support __builtin_expect. ++ */ ++#ifndef SECUREC_SUPPORT_BUILTIN_EXPECT ++#define SECUREC_SUPPORT_BUILTIN_EXPECT 1 ++#endif ++ ++#if SECUREC_SUPPORT_BUILTIN_EXPECT && defined(__GNUC__) && ((__GNUC__ > 3) || \ ++ (defined(__GNUC_MINOR__) && (__GNUC__ == 3 && __GNUC_MINOR__ > 3))) ++/* ++ * This is a built-in function that can be used without a declaration, if warning for declaration not found occurred, ++ * you can add -DSECUREC_NEED_BUILTIN_EXPECT_DECLARE to compiler options ++ */ ++#ifdef SECUREC_NEED_BUILTIN_EXPECT_DECLARE ++long __builtin_expect(long exp, long c); ++#endif ++ ++#define SECUREC_LIKELY(x) __builtin_expect(!!(x), 1) ++#define SECUREC_UNLIKELY(x) __builtin_expect(!!(x), 0) ++#else ++#define SECUREC_LIKELY(x) (x) ++#define SECUREC_UNLIKELY(x) (x) ++#endif ++ ++/* Define the max length of the string */ ++#ifndef SECUREC_STRING_MAX_LEN ++#define SECUREC_STRING_MAX_LEN 0x7fffffffUL ++#endif ++#define SECUREC_WCHAR_STRING_MAX_LEN (SECUREC_STRING_MAX_LEN / sizeof(wchar_t)) ++ ++/* Add SECUREC_MEM_MAX_LEN for memcpy and memmove */ ++#ifndef SECUREC_MEM_MAX_LEN ++#define SECUREC_MEM_MAX_LEN 0x7fffffffUL ++#endif ++#define SECUREC_WCHAR_MEM_MAX_LEN (SECUREC_MEM_MAX_LEN / sizeof(wchar_t)) ++ ++#if SECUREC_STRING_MAX_LEN > 0x7fffffffUL ++#error "max string is 2G" ++#endif ++ ++#if (defined(__GNUC__) && defined(__SIZEOF_POINTER__)) ++#if (__SIZEOF_POINTER__ != 4) && (__SIZEOF_POINTER__ != 8) ++#error "unsupported system" ++#endif ++#endif ++ ++#if defined(_WIN64) || defined(WIN64) || defined(__LP64__) || defined(_LP64) ++#define SECUREC_ON_64BITS ++#endif ++ ++#if (!defined(SECUREC_ON_64BITS) && defined(__GNUC__) && defined(__SIZEOF_POINTER__)) ++#if __SIZEOF_POINTER__ == 8 ++#define SECUREC_ON_64BITS ++#endif ++#endif ++ ++#if defined(__SVR4) || defined(__svr4__) ++#define SECUREC_ON_SOLARIS ++#endif ++ ++#if (defined(__hpux) || defined(_AIX) || defined(SECUREC_ON_SOLARIS)) ++#define SECUREC_ON_UNIX ++#endif ++ ++/* ++ * Codes should run under the macro SECUREC_COMPATIBLE_LINUX_FORMAT in unknown system on default, ++ * and strtold. ++ * The function strtold is referenced first at ISO9899:1999(C99), and some old compilers can ++ * not support these functions. Here provides a macro to open these functions: ++ * SECUREC_SUPPORT_STRTOLD -- If defined, strtold will be used ++ */ ++#ifndef SECUREC_SUPPORT_STRTOLD ++#define SECUREC_SUPPORT_STRTOLD 0 ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT)) ++#if defined(__USE_ISOC99) || \ ++ (defined(_AIX) && defined(_ISOC99_SOURCE)) || \ ++ (defined(__hpux) && defined(__ia64)) || \ ++ (defined(SECUREC_ON_SOLARIS) && (!defined(_STRICT_STDC) && !defined(__XOPEN_OR_POSIX)) || \ ++ defined(_STDC_C99) || defined(__EXTENSIONS__)) ++#undef SECUREC_SUPPORT_STRTOLD ++#define SECUREC_SUPPORT_STRTOLD 1 ++#endif ++#endif ++#if ((defined(SECUREC_WRLINUX_BELOW4) || defined(_WRLINUX_BELOW4_))) ++#undef SECUREC_SUPPORT_STRTOLD ++#define SECUREC_SUPPORT_STRTOLD 0 ++#endif ++#endif ++ ++#if SECUREC_WITH_PERFORMANCE_ADDONS ++ ++#ifndef SECUREC_TWO_MIN ++#define SECUREC_TWO_MIN(a, b) ((a) < (b) ? (a) : (b)) ++#endif ++ ++/* For strncpy_s performance optimization */ ++#define SECUREC_STRNCPY_SM(dest, destMax, src, count) \ ++ (((void *)(dest) != NULL && (const void *)(src) != NULL && (size_t)(destMax) > 0 && \ ++ (((unsigned long long)(destMax) & (unsigned long long)(-2)) < SECUREC_STRING_MAX_LEN) && \ ++ (SECUREC_TWO_MIN((size_t)(count), strlen(src)) + 1) <= (size_t)(destMax)) ? \ ++ (((size_t)(count) < strlen(src)) ? (memcpy((dest), (src), (count)), *((char *)(dest) + (count)) = '\0', EOK) : \ ++ (memcpy((dest), (src), strlen(src) + 1), EOK)) : (strncpy_error((dest), (destMax), (src), (count)))) ++ ++#define SECUREC_STRCPY_SM(dest, destMax, src) \ ++ (((void *)(dest) != NULL && (const void *)(src) != NULL && (size_t)(destMax) > 0 && \ ++ (((unsigned long long)(destMax) & (unsigned long long)(-2)) < SECUREC_STRING_MAX_LEN) && \ ++ (strlen(src) + 1) <= (size_t)(destMax)) ? (memcpy((dest), (src), strlen(src) + 1), EOK) : \ ++ (strcpy_error((dest), (destMax), (src)))) ++ ++/* For strcat_s performance optimization */ ++#if defined(__GNUC__) ++#define SECUREC_STRCAT_SM(dest, destMax, src) ({ \ ++ int catRet_ = EOK; \ ++ if ((void *)(dest) != NULL && (const void *)(src) != NULL && (size_t)(destMax) > 0 && \ ++ (((unsigned long long)(destMax) & (unsigned long long)(-2)) < SECUREC_STRING_MAX_LEN)) { \ ++ char *catTmpDst_ = (char *)(dest); \ ++ size_t catRestSize_ = (destMax); \ ++ while (catRestSize_ > 0 && *catTmpDst_ != '\0') { \ ++ ++catTmpDst_; \ ++ --catRestSize_; \ ++ } \ ++ if (catRestSize_ == 0) { \ ++ catRet_ = EINVAL; \ ++ } else if ((strlen(src) + 1) <= catRestSize_) { \ ++ memcpy(catTmpDst_, (src), strlen(src) + 1); \ ++ catRet_ = EOK; \ ++ } else { \ ++ catRet_ = ERANGE; \ ++ } \ ++ if (catRet_ != EOK) { \ ++ catRet_ = strcat_s((dest), (destMax), (src)); \ ++ } \ ++ } else { \ ++ catRet_ = strcat_s((dest), (destMax), (src)); \ ++ } \ ++ catRet_; \ ++}) ++#else ++#define SECUREC_STRCAT_SM(dest, destMax, src) strcat_s((dest), (destMax), (src)) ++#endif ++ ++/* For strncat_s performance optimization */ ++#if defined(__GNUC__) ++#define SECUREC_STRNCAT_SM(dest, destMax, src, count) ({ \ ++ int ncatRet_ = EOK; \ ++ if ((void *)(dest) != NULL && (const void *)(src) != NULL && (size_t)(destMax) > 0 && \ ++ (((unsigned long long)(destMax) & (unsigned long long)(-2)) < SECUREC_STRING_MAX_LEN) && \ ++ (((unsigned long long)(count) & (unsigned long long)(-2)) < SECUREC_STRING_MAX_LEN)) { \ ++ char *ncatTmpDest_ = (char *)(dest); \ ++ size_t ncatRestSize_ = (size_t)(destMax); \ ++ while (ncatRestSize_ > 0 && *ncatTmpDest_ != '\0') { \ ++ ++ncatTmpDest_; \ ++ --ncatRestSize_; \ ++ } \ ++ if (ncatRestSize_ == 0) { \ ++ ncatRet_ = EINVAL; \ ++ } else if ((SECUREC_TWO_MIN((count), strlen(src)) + 1) <= ncatRestSize_) { \ ++ if ((size_t)(count) < strlen(src)) { \ ++ memcpy(ncatTmpDest_, (src), (count)); \ ++ *(ncatTmpDest_ + (count)) = '\0'; \ ++ } else { \ ++ memcpy(ncatTmpDest_, (src), strlen(src) + 1); \ ++ } \ ++ } else { \ ++ ncatRet_ = ERANGE; \ ++ } \ ++ if (ncatRet_ != EOK) { \ ++ ncatRet_ = strncat_s((dest), (destMax), (src), (count)); \ ++ } \ ++ } else { \ ++ ncatRet_ = strncat_s((dest), (destMax), (src), (count)); \ ++ } \ ++ ncatRet_; \ ++}) ++#else ++#define SECUREC_STRNCAT_SM(dest, destMax, src, count) strncat_s((dest), (destMax), (src), (count)) ++#endif ++ ++/* This macro do not check buffer overlap by default */ ++#define SECUREC_MEMCPY_SM(dest, destMax, src, count) \ ++ (!(((size_t)(destMax) == 0) || \ ++ (((unsigned long long)(destMax) & (unsigned long long)(-2)) > SECUREC_MEM_MAX_LEN) || \ ++ ((size_t)(count) > (size_t)(destMax)) || ((void *)(dest)) == NULL || ((const void *)(src) == NULL)) ? \ ++ (memcpy((dest), (src), (count)), EOK) : \ ++ (memcpy_s((dest), (destMax), (src), (count)))) ++ ++#define SECUREC_MEMSET_SM(dest, destMax, c, count) \ ++ (!((((unsigned long long)(destMax) & (unsigned long long)(-2)) > SECUREC_MEM_MAX_LEN) || \ ++ ((void *)(dest) == NULL) || ((size_t)(count) > (size_t)(destMax))) ? \ ++ (memset((dest), (c), (count)), EOK) : \ ++ (memset_s((dest), (destMax), (c), (count)))) ++ ++#endif ++#endif ++ +diff --git a/include/linux/security.h b/include/linux/security.h +index 6efbd58ce..4bd0f6fc5 100644 +--- a/include/linux/security.h ++++ b/include/linux/security.h +@@ -2112,15 +2112,6 @@ static inline int security_perf_event_write(struct perf_event *event) + #endif /* CONFIG_SECURITY */ + #endif /* CONFIG_PERF_EVENTS */ + +-#if IS_ENABLED(CONFIG_SECURITY) && IS_ENABLED(CONFIG_SECURITY_XPM) +-extern int security_mmap_region(struct vm_area_struct *vma); +-#else +-static inline int security_mmap_region(struct vm_area_struct *vma) +-{ +- return 0; +-} +-#endif /* CONFIG_SECURITY && CONFIG_SECURITY_XPM */ +- + #ifdef CONFIG_IO_URING + #ifdef CONFIG_SECURITY + extern int security_uring_override_creds(const struct cred *new); +diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h +index 243e661e3..ea7a74ea7 100644 +--- a/include/linux/stop_machine.h ++++ b/include/linux/stop_machine.h +@@ -33,9 +33,6 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); + int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg); + bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, + struct cpu_stop_work *work_buf); +-#ifdef CONFIG_CPU_ISOLATION_OPT +-int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); +-#endif + void stop_machine_park(int cpu); + void stop_machine_unpark(int cpu); + void stop_machine_yield(const struct cpumask *cpumask); +@@ -86,14 +83,6 @@ static inline bool stop_one_cpu_nowait(unsigned int cpu, + return false; + } + +-static inline int stop_cpus(const struct cpumask *cpumask, +- cpu_stop_fn_t fn, void *arg) +-{ +- if (cpumask_test_cpu(raw_smp_processor_id(), cpumask)) +- return stop_one_cpu(raw_smp_processor_id(), fn, arg); +- return -ENOENT; +-} +- + static inline void print_stop_info(const char *log_lvl, struct task_struct *task) { } + + #endif /* CONFIG_SMP */ +diff --git a/include/linux/swap.h b/include/linux/swap.h +index 8f4e2fcef..cb25db2a9 100644 +--- a/include/linux/swap.h ++++ b/include/linux/swap.h +@@ -426,23 +426,6 @@ extern int sysctl_min_slab_ratio; + #define node_reclaim_mode 0 + #endif + +-struct scan_control; +- +-extern unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, +- struct lruvec *lruvec, +- struct scan_control *sc); +-extern bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru); +-extern bool cgroup_reclaim(struct scan_control *sc); +-extern void check_move_unevictable_pages(struct pagevec *pvec); +-extern unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, +- int priority); +-extern bool writeback_throttling_sane(struct scan_control *sc); +-extern inline bool should_continue_reclaim(struct pglist_data *pgdat, +- unsigned long nr_reclaimed, +- struct scan_control *sc); +- +-extern int current_may_throttle(void); +- + static inline bool node_reclaim_enabled(void) + { + /* Is any node_reclaim_mode bit set? */ +@@ -474,9 +457,6 @@ extern atomic_long_t nr_swap_pages; + extern long total_swap_pages; + extern atomic_t nr_rotate_swap; + extern bool has_usable_swap(void); +-#ifdef CONFIG_HYPERHOLD_ZSWAPD +-extern bool free_swap_is_low(void); +-#endif + + /* Swap 50% full? Release swapcache more aggressively.. */ + static inline bool vm_swap_full(void) +diff --git a/include/linux/timer.h b/include/linux/timer.h +index 9993326e1..9162f2758 100644 +--- a/include/linux/timer.h ++++ b/include/linux/timer.h +@@ -179,13 +179,6 @@ extern int timer_reduce(struct timer_list *timer, unsigned long expires); + */ + #define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1) + +-/* To be used from cpusets, only */ +-#ifdef CONFIG_CPU_ISOLATION_OPT +-extern void timer_quiesce_cpu(void *cpup); +-#else +-static inline void timer_quiesce_cpu(void *cpup) { } +-#endif +- + extern void add_timer(struct timer_list *timer); + + extern int try_to_del_timer_sync(struct timer_list *timer); +diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h +index 2e2c52529..b0542cd11 100644 +--- a/include/linux/uidgid.h ++++ b/include/linux/uidgid.h +@@ -55,14 +55,6 @@ static inline gid_t __kgid_val(kgid_t gid) + #define GLOBAL_ROOT_UID KUIDT_INIT(0) + #define GLOBAL_ROOT_GID KGIDT_INIT(0) + +-#ifdef CONFIG_ACCESS_TOKENID +-#define NWEBSPAWN_UID KUIDT_INIT(3081) +-#endif +- +-#ifdef CONFIG_HYPERHOLD +-#define GLOBAL_MEMMGR_UID KUIDT_INIT(1111) +-#endif +- + #define INVALID_UID KUIDT_INIT(-1) + #define INVALID_GID KGIDT_INIT(-1) + +diff --git a/include/linux/vendor/sva_ext.h b/include/linux/vendor/sva_ext.h +new file mode 100644 +index 000000000..17ae4cef1 +--- /dev/null ++++ b/include/linux/vendor/sva_ext.h +@@ -0,0 +1,88 @@ ++/* ++* ++* Copyright (c) 2020-2021 Shenshu Technologies Co., Ltd. ++* ++* This software is licensed under the terms of the GNU General Public ++* License version 2, as published by the Free Software Foundation, and ++* may be copied, distributed, and modified under those terms. ++* ++* This program is distributed in the hope that it will be useful, ++* but WITHOUT ANY WARRANTY; without even the implied warranty of ++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++* GNU General Public License for more details. ++* ++*/ ++ ++#ifndef _SVA_EXTEND_H ++#define _SVA_EXTEND_H ++ ++#ifdef CONFIG_VENDOR_NPU ++ ++extern void iommu_sva_flush_iotlb_single(struct mm_struct *mm); ++ ++extern const char *iommu_sva_get_smmu_device_name(struct mm_struct *mm); ++ ++extern int arm_smmu_device_post_probe(const char *device_name); ++ ++extern int arm_smmu_device_resume(const char *device_name); ++ ++extern int arm_smmu_device_suspend(const char *device_name); ++ ++extern int arm_smmu_device_reset_ex(const char *device_name); ++ ++extern const char *arm_smmu_get_device_name(struct iommu_domain *domain); ++ ++extern int svm_flush_cache(struct mm_struct *mm, unsigned long addr, size_t size); ++ ++extern void svm_smmu_clk_live_enter(void); ++extern void svm_smmu_clk_live_exit(void); ++ ++#else ++static inline void iommu_sva_flush_iotlb_single(struct mm_struct *mm) ++{ ++ return; ++} ++ ++static inline const char *iommu_sva_get_smmu_device_name(struct mm_struct *mm) ++{ ++ return NULL; ++} ++ ++static inline int arm_smmu_device_post_probe(const char *device_name) ++{ ++ return -1; ++} ++ ++static inline int arm_smmu_device_resume(const char *device_name) ++{ ++ return -1; ++} ++ ++static inline int arm_smmu_device_suspend(const char *device_name) ++{ ++ return -1; ++} ++ ++static inline int arm_smmu_device_reset_ex(const char *device_name) ++{ ++ return -1; ++} ++ ++static inline int svm_flush_cache(struct mm_struct *mm, unsigned long addr, size_t size) ++{ ++ return -1; ++} ++ ++static inline void svm_smmu_clk_live_enter(void) ++{ ++ return; ++} ++ ++static inline void svm_smmu_clk_live_exit(void) ++{ ++ return; ++} ++ ++#endif ++ ++#endif /* _SVA_EXTEND_H */ +diff --git a/include/linux/vendor/vendor_i2c.h b/include/linux/vendor/vendor_i2c.h +new file mode 100644 +index 000000000..e3094662a +--- /dev/null ++++ b/include/linux/vendor/vendor_i2c.h +@@ -0,0 +1,27 @@ ++/* ++ * Copyright (c) HiSilicon (Shanghai) Technologies Co., Ltd. 2020-2023. All rights reserved. ++ */ ++#ifndef __VENDOR_LINUX_I2C_H ++#define __VENDOR_LINUX_I2C_H ++ ++struct i2c_msg; ++struct i2c_adapter; ++struct i2c_client; ++ ++#define I2C_M_16BIT_REG 0x0002 /* indicate reg bit-width is 16bit */ ++#define I2C_M_16BIT_DATA 0x0008 /* indicate data bit-width is 16bit */ ++#define I2C_M_DMA 0x0004 /* indicate use dma mode */ ++ ++extern int bsp_i2c_master_send(const struct i2c_client *client, const char *buf, ++ __u16 count); ++ ++extern int bsp_i2c_master_send_mul_reg(const struct i2c_client *client, const char *buf, ++ __u16 count, unsigned int reg_data_width); ++ ++extern int bsp_i2c_master_recv(const struct i2c_client *client, const char *buf, ++ int count); ++ ++extern int bsp_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, ++ int num); ++ ++#endif /* __VENDOR_LINUX_I2C_H */ +diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h +index d775f3ca9..8abfa1240 100644 +--- a/include/linux/vm_event_item.h ++++ b/include/linux/vm_event_item.h +@@ -155,24 +155,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, + VMA_LOCK_ABORT, + VMA_LOCK_RETRY, + VMA_LOCK_MISS, +-#endif +-#ifdef CONFIG_HYPERHOLD_ZSWAPD +- ZSWAPD_WAKEUP, +- ZSWAPD_REFAULT, +- ZSWAPD_MEDIUM_PRESS, +- ZSWAPD_CRITICAL_PRESS, +- ZSWAPD_MEMCG_RATIO_SKIP, +- ZSWAPD_MEMCG_REFAULT_SKIP, +- ZSWAPD_SWAPOUT, +- ZSWAPD_EMPTY_ROUND, +- ZSWAPD_EMPTY_ROUND_SKIP_TIMES, +- ZSWAPD_SNAPSHOT_TIMES, +- ZSWAPD_RECLAIMED, +- ZSWAPD_SCANNED, +-#endif +-#ifdef CONFIG_HYPERHOLD_MEMCG +- FREEZE_RECLAIMED, +- FREEZE_RECLAIME_COUNT, + #endif + NR_VM_EVENT_ITEMS + }; +diff --git a/include/trace/events/eas_sched.h b/include/trace/events/eas_sched.h +deleted file mode 100755 +index d015a3cf4..000000000 +--- a/include/trace/events/eas_sched.h ++++ /dev/null +@@ -1,76 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-#ifdef CONFIG_SCHED_RT_CAS +-TRACE_EVENT(sched_find_cas_cpu_each, +- +- TP_PROTO(struct task_struct *task, int cpu, int target_cpu, +- int isolated, int idle, unsigned long task_util, +- unsigned long cpu_util, int cpu_cap), +- +- TP_ARGS(task, cpu, target_cpu, isolated, idle, task_util, cpu_util, cpu_cap), +- +- TP_STRUCT__entry( +- __array(char, comm, TASK_COMM_LEN) +- __field(pid_t, pid) +- __field(int, prio) +- __field(int, cpu) +- __field(int, target_cpu) +- __field(int, isolated) +- __field(unsigned long, idle) +- __field(unsigned long, task_util) +- __field(unsigned long, cpu_util) +- __field(unsigned long, cpu_cap) +- ), +- +- TP_fast_assign( +- memcpy(__entry->comm, task->comm, TASK_COMM_LEN); +- __entry->pid = task->pid; +- __entry->prio = task->prio; +- __entry->cpu = cpu; +- __entry->target_cpu = target_cpu; +- __entry->isolated = isolated; +- __entry->idle = idle; +- __entry->task_util = task_util; +- __entry->cpu_util = cpu_util; +- __entry->cpu_cap = cpu_cap; +- ), +- +- TP_printk("comm=%s pid=%d prio=%d cpu=%d target_cpu=%d isolated=%d idle=%lu task_util=%lu cpu_util=%lu cpu_cap=%lu", +- __entry->comm, __entry->pid, __entry->prio, +- __entry->cpu, __entry->target_cpu, __entry->isolated, +- __entry->idle, __entry->task_util, +- __entry->cpu_util, __entry->cpu_cap) +-); +- +-TRACE_EVENT(sched_find_cas_cpu, +- +- TP_PROTO(struct task_struct *task, struct cpumask *lowest_mask, +- unsigned long tutil, int prev_cpu, int target_cpu), +- +- TP_ARGS(task, lowest_mask, tutil, prev_cpu, target_cpu), +- +- TP_STRUCT__entry( +- __array(char, comm, TASK_COMM_LEN) +- __field(pid_t, pid) +- __field(unsigned int, prio) +- __bitmask(lowest, num_possible_cpus()) +- __field(unsigned long, tutil) +- __field(int, prev_cpu) +- __field(int, target_cpu) +- ), +- +- TP_fast_assign( +- memcpy(__entry->comm, task->comm, TASK_COMM_LEN); +- __entry->pid = task->pid; +- __entry->prio = task->prio; +- __assign_bitmask(lowest, cpumask_bits(lowest_mask), num_possible_cpus()); +- __entry->tutil = tutil; +- __entry->prev_cpu = prev_cpu; +- __entry->target_cpu = target_cpu; +- ), +- +- TP_printk("comm=%s pid=%d prio=%d lowest_mask=%s tutil=%lu prev=%d target=%d ", +- __entry->comm, __entry->pid, __entry->prio, +- __get_bitmask(lowest), __entry->tutil, +- __entry->prev_cpu, __entry->target_cpu) +-); +-#endif /* CONFIG_SCHED_RT_CAS */ +diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h +index 5078d1dfc..e010618f9 100644 +--- a/include/trace/events/mmflags.h ++++ b/include/trace/events/mmflags.h +@@ -65,18 +65,6 @@ + __def_gfpflag_names __def_gfpflag_names_kasan \ + ) : "none" + +-#ifdef CONFIG_MEM_PURGEABLE +-#define IF_HAVE_PG_PURGEABLE(_name) ,{1UL << PG_##_name, __stringify(_name)} +-#else +-#define IF_HAVE_PG_PURGEABLE(_name) +-#endif +- +-#ifdef CONFIG_SECURITY_XPM +-#define IF_HAVE_PG_XPM_INTEGRITY(_name) ,{1UL << PG_##_name, __stringify(_name)} +-#else +-#define IF_HAVE_PG_XPM_INTEGRITY(_name) +-#endif +- + #ifdef CONFIG_MMU + #define IF_HAVE_PG_MLOCK(_name) ,{1UL << PG_##_name, __stringify(_name)} + #else +@@ -131,16 +119,13 @@ + DEF_PAGEFLAG_NAME(reclaim), \ + DEF_PAGEFLAG_NAME(swapbacked), \ + DEF_PAGEFLAG_NAME(unevictable) \ +-IF_HAVE_PG_PURGEABLE(purgeable) \ + IF_HAVE_PG_MLOCK(mlocked) \ + IF_HAVE_PG_UNCACHED(uncached) \ + IF_HAVE_PG_HWPOISON(hwpoison) \ + IF_HAVE_PG_IDLE(idle) \ + IF_HAVE_PG_IDLE(young) \ + IF_HAVE_PG_ARCH_X(arch_2) \ +-IF_HAVE_PG_ARCH_X(arch_3) \ +-IF_HAVE_PG_ARCH_X(xpm_readonly) \ +-IF_HAVE_PG_ARCH_X(xpm_writetainted) ++IF_HAVE_PG_ARCH_X(arch_3) + + #define show_page_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ +diff --git a/include/trace/events/rtg.h b/include/trace/events/rtg.h +deleted file mode 100755 +index e44730130..000000000 +--- a/include/trace/events/rtg.h ++++ /dev/null +@@ -1,146 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-#undef TRACE_SYSTEM +-#define TRACE_SYSTEM rtg +- +-#if !defined(_TRACE_RTG_H) || defined(TRACE_HEADER_MULTI_READ) +-#define _TRACE_RTG_H +- +-#include +-#include +-#include +-#include +- +-struct rq; +- +-TRACE_EVENT(find_rtg_cpu, +- +- TP_PROTO(struct task_struct *p, const struct cpumask *perferred_cpumask, +- char *msg, int cpu), +- +- TP_ARGS(p, perferred_cpumask, msg, cpu), +- +- TP_STRUCT__entry( +- __array(char, comm, TASK_COMM_LEN) +- __field(pid_t, pid) +- __bitmask(cpus, num_possible_cpus()) +- __array(char, msg, TASK_COMM_LEN) +- __field(int, cpu) +- ), +- +- TP_fast_assign( +- __entry->pid = p->pid; +- memcpy(__entry->comm, p->comm, TASK_COMM_LEN); +- __assign_bitmask(cpus, cpumask_bits(perferred_cpumask), num_possible_cpus()); +- memcpy(__entry->msg, msg, min((size_t)TASK_COMM_LEN, strlen(msg)+1)); +- __entry->cpu = cpu; +- ), +- +- TP_printk("comm=%s pid=%d perferred_cpus=%s reason=%s target_cpu=%d", +- __entry->comm, __entry->pid, __get_bitmask(cpus), __entry->msg, __entry->cpu) +-); +- +-TRACE_EVENT(sched_rtg_task_each, +- +- TP_PROTO(unsigned int id, unsigned int nr_running, struct task_struct *task), +- +- TP_ARGS(id, nr_running, task), +- +- TP_STRUCT__entry( +- __field(unsigned int, id) +- __field(unsigned int, nr_running) +- __array(char, comm, TASK_COMM_LEN) +- __field(pid_t, pid) +- __field(int, prio) +- __bitmask(allowed, num_possible_cpus()) +- __field(int, cpu) +- __field(int, state) +- __field(bool, on_rq) +- __field(int, on_cpu) +- ), +- +- TP_fast_assign( +- __entry->id = id; +- __entry->nr_running = nr_running; +- memcpy(__entry->comm, task->comm, TASK_COMM_LEN); +- __entry->pid = task->pid; +- __entry->prio = task->prio; +- __assign_bitmask(allowed, cpumask_bits(&task->cpus_mask), num_possible_cpus()); +- __entry->cpu = task_cpu(task); +- __entry->state = task->__state; +- __entry->on_rq = task->on_rq; +- __entry->on_cpu = task->on_cpu; +- ), +- +- TP_printk("comm=%s pid=%d prio=%d allowed=%s cpu=%d state=%s%s on_rq=%d on_cpu=%d", +- __entry->comm, __entry->pid, __entry->prio, __get_bitmask(allowed), __entry->cpu, +- __entry->state & (TASK_REPORT_MAX) ? +- __print_flags(__entry->state & (TASK_REPORT_MAX), "|", +- { TASK_INTERRUPTIBLE, "S" }, +- { TASK_UNINTERRUPTIBLE, "D" }, +- { __TASK_STOPPED, "T" }, +- { __TASK_TRACED, "t" }, +- { EXIT_DEAD, "X" }, +- { EXIT_ZOMBIE, "Z" }, +- { TASK_DEAD, "x" }, +- { TASK_WAKEKILL, "K"}, +- { TASK_WAKING, "W"}) : "R", +- __entry->state & TASK_STATE_MAX ? "+" : "", +- __entry->on_rq, __entry->on_cpu) +-); +- +-TRACE_EVENT(sched_rtg_valid_normalized_util, +- +- TP_PROTO(unsigned int id, unsigned int nr_running, +- const struct cpumask *rtg_cpus, unsigned int valid), +- +- TP_ARGS(id, nr_running, rtg_cpus, valid), +- +- TP_STRUCT__entry( +- __field(unsigned int, id) +- __field(unsigned int, nr_running) +- __bitmask(cpus, num_possible_cpus()) +- __field(unsigned int, valid) +- ), +- +- TP_fast_assign( +- __entry->id = id; +- __entry->nr_running = nr_running; +- __assign_bitmask(cpus, cpumask_bits(rtg_cpus), num_possible_cpus()); +- __entry->valid = valid; +- ), +- +- TP_printk("id=%d nr_running=%d cpus=%s valid=%d", +- __entry->id, __entry->nr_running, +- __get_bitmask(cpus), __entry->valid) +-); +- +-#ifdef CONFIG_SCHED_RTG_FRAME +-TRACE_EVENT(rtg_frame_sched, +- +- TP_PROTO(int rtgid, const char *s, s64 value), +- +- TP_ARGS(rtgid, s, value), +- TP_STRUCT__entry( +- __field(int, rtgid) +- __field(struct frame_info *, frame) +- __field(pid_t, pid) +- __string(str, s) +- __field(s64, value) +- ), +- +- TP_fast_assign( +- __assign_str(str, s); +- __entry->rtgid = rtgid != -1 ? rtgid : (current->grp ? current->grp->id : 0); +- __entry->frame = rtg_frame_info(rtgid); +- __entry->pid = __entry->frame ? ((__entry->frame->thread[0]) ? +- ((__entry->frame->thread[0])->pid) : +- current->tgid) : current->tgid; +- __entry->value = value; +- ), +- TP_printk("C|%d|%s_%d|%lld", __entry->pid, __get_str(str), __entry->rtgid, __entry->value) +-); +-#endif +-#endif /* _TRACE_RTG_H */ +- +-/* This part must be outside protection */ +-#include +diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h +index 6558b3f51..bdb1e8389 100644 +--- a/include/trace/events/sched.h ++++ b/include/trace/events/sched.h +@@ -7,14 +7,9 @@ + + #include + #include +-#include + #include + #include + +-#ifdef CONFIG_SCHED_RT_CAS +-#include "eas_sched.h" +-#endif +- + /* + * Tracepoint for calling kthread_stop, performed to end a kthread: + */ +@@ -739,165 +734,6 @@ TRACE_EVENT(sched_wake_idle_without_ipi, + TP_printk("cpu=%d", __entry->cpu) + ); + +-#ifdef CONFIG_SCHED_CORE_CTRL +-TRACE_EVENT(core_ctl_eval_need, +- +- TP_PROTO(unsigned int cpu, unsigned int old_need, +- unsigned int new_need, unsigned int updated), +- TP_ARGS(cpu, old_need, new_need, updated), +- TP_STRUCT__entry( +- __field(u32, cpu) +- __field(u32, old_need) +- __field(u32, new_need) +- __field(u32, updated) +- ), +- TP_fast_assign( +- __entry->cpu = cpu; +- __entry->old_need = old_need; +- __entry->new_need = new_need; +- __entry->updated = updated; +- ), +- TP_printk("cpu=%u, old_need=%u, new_need=%u, updated=%u", __entry->cpu, +- __entry->old_need, __entry->new_need, __entry->updated) +-); +- +-TRACE_EVENT(core_ctl_set_busy, +- +- TP_PROTO(unsigned int cpu, unsigned int busy, +- unsigned int old_is_busy, unsigned int is_busy, int high_irqload), +- TP_ARGS(cpu, busy, old_is_busy, is_busy, high_irqload), +- TP_STRUCT__entry( +- __field(u32, cpu) +- __field(u32, busy) +- __field(u32, old_is_busy) +- __field(u32, is_busy) +- __field(bool, high_irqload) +- ), +- TP_fast_assign( +- __entry->cpu = cpu; +- __entry->busy = busy; +- __entry->old_is_busy = old_is_busy; +- __entry->is_busy = is_busy; +- __entry->high_irqload = high_irqload; +- ), +- TP_printk("cpu=%u, busy=%u, old_is_busy=%u, new_is_busy=%u high_irqload=%d", +- __entry->cpu, __entry->busy, __entry->old_is_busy, +- __entry->is_busy, __entry->high_irqload) +-); +- +-TRACE_EVENT(core_ctl_set_boost, +- +- TP_PROTO(u32 refcount, s32 ret), +- TP_ARGS(refcount, ret), +- TP_STRUCT__entry( +- __field(u32, refcount) +- __field(s32, ret) +- ), +- TP_fast_assign( +- __entry->refcount = refcount; +- __entry->ret = ret; +- ), +- TP_printk("refcount=%u, ret=%d", __entry->refcount, __entry->ret) +-); +- +-TRACE_EVENT(core_ctl_update_nr_need, +- +- TP_PROTO(int cpu, int nr_need, int prev_misfit_need, +- int nrrun, int max_nr, int nr_prev_assist), +- +- TP_ARGS(cpu, nr_need, prev_misfit_need, nrrun, max_nr, nr_prev_assist), +- +- TP_STRUCT__entry( +- __field(int, cpu) +- __field(int, nr_need) +- __field(int, prev_misfit_need) +- __field(int, nrrun) +- __field(int, max_nr) +- __field(int, nr_prev_assist) +- ), +- +- TP_fast_assign( +- __entry->cpu = cpu; +- __entry->nr_need = nr_need; +- __entry->prev_misfit_need = prev_misfit_need; +- __entry->nrrun = nrrun; +- __entry->max_nr = max_nr; +- __entry->nr_prev_assist = nr_prev_assist; +- ), +- +- TP_printk("cpu=%d nr_need=%d prev_misfit_need=%d nrrun=%d max_nr=%d nr_prev_assist=%d", +- __entry->cpu, __entry->nr_need, __entry->prev_misfit_need, +- __entry->nrrun, __entry->max_nr, __entry->nr_prev_assist) +-); +-#endif +- +-#ifdef CONFIG_SCHED_RUNNING_AVG +-/* +- * Tracepoint for sched_get_nr_running_avg +- */ +-TRACE_EVENT(sched_get_nr_running_avg, +- +- TP_PROTO(int cpu, int nr, int nr_misfit, int nr_max), +- +- TP_ARGS(cpu, nr, nr_misfit, nr_max), +- +- TP_STRUCT__entry( +- __field(int, cpu) +- __field(int, nr) +- __field(int, nr_misfit) +- __field(int, nr_max) +- ), +- +- TP_fast_assign( +- __entry->cpu = cpu; +- __entry->nr = nr; +- __entry->nr_misfit = nr_misfit; +- __entry->nr_max = nr_max; +- ), +- +- TP_printk("cpu=%d nr=%d nr_misfit=%d nr_max=%d", +- __entry->cpu, __entry->nr, __entry->nr_misfit, __entry->nr_max) +-); +-#endif +- +-#ifdef CONFIG_CPU_ISOLATION_OPT +-/* +- * sched_isolate - called when cores are isolated/unisolated +- * +- * @acutal_mask: mask of cores actually isolated/unisolated +- * @req_mask: mask of cores requested isolated/unisolated +- * @online_mask: cpu online mask +- * @time: amount of time in us it took to isolate/unisolate +- * @isolate: 1 if isolating, 0 if unisolating +- * +- */ +-TRACE_EVENT(sched_isolate, +- +- TP_PROTO(unsigned int requested_cpu, unsigned int isolated_cpus, +- u64 start_time, unsigned char isolate), +- +- TP_ARGS(requested_cpu, isolated_cpus, start_time, isolate), +- +- TP_STRUCT__entry( +- __field(u32, requested_cpu) +- __field(u32, isolated_cpus) +- __field(u32, time) +- __field(unsigned char, isolate) +- ), +- +- TP_fast_assign( +- __entry->requested_cpu = requested_cpu; +- __entry->isolated_cpus = isolated_cpus; +- __entry->time = div64_u64(sched_clock() - start_time, 1000); +- __entry->isolate = isolate; +- ), +- +- TP_printk("iso cpu=%u cpus=0x%x time=%u us isolated=%d", +- __entry->requested_cpu, __entry->isolated_cpus, +- __entry->time, __entry->isolate) +-); +-#endif +- + /* + * Following tracepoints are not exported in tracefs and provide hooking + * mechanisms only for testing and debugging purposes. +diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h +index bef2cf6f9..d2123dd96 100644 +--- a/include/trace/events/vmscan.h ++++ b/include/trace/events/vmscan.h +@@ -350,36 +350,6 @@ TRACE_EVENT(mm_vmscan_write_folio, + show_reclaim_flags(__entry->reclaim_flags)) + ); + +-#ifdef CONFIG_HYPERHOLD_ZSWAPD +-TRACE_EVENT(mm_vmscan_lru_zswapd_shrink_active, +- +- TP_PROTO(int nid, unsigned long nr_taken, +- unsigned long nr_deactivated, int priority), +- +- TP_ARGS(nid, nr_taken, nr_deactivated, priority), +- +- TP_STRUCT__entry( +- __field(int, nid) +- __field(unsigned long, nr_taken) +- __field(unsigned long, nr_deactivated) +- __field(int, priority) +- ), +- +- TP_fast_assign( +- __entry->nid = nid; +- __entry->nr_taken = nr_taken; +- __entry->nr_deactivated = nr_deactivated; +- __entry->priority = priority; +- ), +- +- TP_printk("nid=%d nr_taken=%ld nr_deactivated=%ld priority=%d", +- __entry->nid, +- __entry->nr_taken, +- __entry->nr_deactivated, +- __entry->priority) +-); +-#endif +- + TRACE_EVENT(mm_vmscan_lru_shrink_inactive, + + TP_PROTO(int nid, +diff --git a/include/trace/events/walt.h b/include/trace/events/walt.h +deleted file mode 100755 +index 9af92c868..000000000 +--- a/include/trace/events/walt.h ++++ /dev/null +@@ -1,256 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-#undef TRACE_SYSTEM +-#define TRACE_SYSTEM walt +- +-#if !defined(_TRACE_WALT_H) || defined(TRACE_HEADER_MULTI_READ) +-#define _TRACE_WALT_H +- +-#include +-#include +- +-struct rq; +-extern const char *task_event_names[]; +- +-#if defined(CREATE_TRACE_POINTS) && defined(CONFIG_SCHED_WALT) +-static inline void __window_data(u32 *dst, u32 *src) +-{ +- if (src) +- memcpy(dst, src, nr_cpu_ids * sizeof(u32)); +- else +- memset(dst, 0, nr_cpu_ids * sizeof(u32)); +-} +- +-struct trace_seq; +-const char *__window_print(struct trace_seq *p, const u32 *buf, int buf_len) +-{ +- int i; +- const char *ret = p->buffer + seq_buf_used(&p->seq); +- +- for (i = 0; i < buf_len; i++) +- trace_seq_printf(p, "%u ", buf[i]); +- +- trace_seq_putc(p, 0); +- +- return ret; +-} +- +-static inline s64 __rq_update_sum(struct rq *rq, bool curr, bool new) +-{ +- if (curr) +- if (new) +- return rq->nt_curr_runnable_sum; +- else +- return rq->curr_runnable_sum; +- else +- if (new) +- return rq->nt_prev_runnable_sum; +- else +- return rq->prev_runnable_sum; +-} +- +-#ifdef CONFIG_SCHED_RTG +-static inline s64 __grp_update_sum(struct rq *rq, bool curr, bool new) +-{ +- if (curr) +- if (new) +- return rq->grp_time.nt_curr_runnable_sum; +- else +- return rq->grp_time.curr_runnable_sum; +- else +- if (new) +- return rq->grp_time.nt_prev_runnable_sum; +- else +- return rq->grp_time.prev_runnable_sum; +-} +- +-static inline s64 +-__get_update_sum(struct rq *rq, enum migrate_types migrate_type, +- bool src, bool new, bool curr) +-{ +- switch (migrate_type) { +- case RQ_TO_GROUP: +- if (src) +- return __rq_update_sum(rq, curr, new); +- else +- return __grp_update_sum(rq, curr, new); +- case GROUP_TO_RQ: +- if (src) +- return __grp_update_sum(rq, curr, new); +- else +- return __rq_update_sum(rq, curr, new); +- default: +- WARN_ON_ONCE(1); +- return -1; +- } +-} +-#endif +-#endif +- +-TRACE_EVENT(sched_update_history, +- +- TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples, +- enum task_event evt), +- +- TP_ARGS(rq, p, runtime, samples, evt), +- +- TP_STRUCT__entry( +- __array(char, comm, TASK_COMM_LEN) +- __field(pid_t, pid) +- __field(unsigned int, runtime) +- __field(int, samples) +- __field(enum task_event, evt) +- __field(unsigned int, demand) +- __array(u32, hist, RAVG_HIST_SIZE_MAX) +- __field(int, cpu) +- ), +- +- TP_fast_assign( +- memcpy(__entry->comm, p->comm, TASK_COMM_LEN); +- __entry->pid = p->pid; +- __entry->runtime = runtime; +- __entry->samples = samples; +- __entry->evt = evt; +- __entry->demand = p->ravg.demand; +- memcpy(__entry->hist, p->ravg.sum_history, +- RAVG_HIST_SIZE_MAX * sizeof(u32)); +- __entry->cpu = rq->cpu; +- ), +- +- TP_printk("%d (%s): runtime %u samples %d event %s demand %u (hist: %u %u %u %u %u) cpu %d", +- __entry->pid, __entry->comm, +- __entry->runtime, __entry->samples, +- task_event_names[__entry->evt], __entry->demand, +- __entry->hist[0], __entry->hist[1], +- __entry->hist[2], __entry->hist[3], +- __entry->hist[4], __entry->cpu) +-); +- +-TRACE_EVENT(sched_update_task_ravg, +- +- TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt, +- u64 wallclock, u64 irqtime), +- +- TP_ARGS(p, rq, evt, wallclock, irqtime), +- +- TP_STRUCT__entry( +- __array(char, comm, TASK_COMM_LEN) +- __field(pid_t, pid) +- __field(pid_t, cur_pid) +- __field(unsigned int, cur_freq) +- __field(u64, wallclock) +- __field(u64, mark_start) +- __field(u64, delta_m) +- __field(u64, win_start) +- __field(u64, delta) +- __field(u64, irqtime) +- __field(enum task_event, evt) +- __field(unsigned int, demand) +- __field(unsigned int, sum) +- __field(int, cpu) +- __field(u64, rq_cs) +- __field(u64, rq_ps) +- __field(u32, curr_window) +- __field(u32, prev_window) +- __dynamic_array(u32, curr_sum, nr_cpu_ids) +- __dynamic_array(u32, prev_sum, nr_cpu_ids) +- __field(u64, nt_cs) +- __field(u64, nt_ps) +- __field(u32, active_windows) +- ), +- +- TP_fast_assign( +- __entry->wallclock = wallclock; +- __entry->win_start = rq->window_start; +- __entry->delta = (wallclock - rq->window_start); +- __entry->evt = evt; +- __entry->cpu = rq->cpu; +- __entry->cur_pid = rq->curr->pid; +- __entry->cur_freq = rq->cluster->cur_freq; +- memcpy(__entry->comm, p->comm, TASK_COMM_LEN); +- __entry->pid = p->pid; +- __entry->mark_start = p->ravg.mark_start; +- __entry->delta_m = (wallclock - p->ravg.mark_start); +- __entry->demand = p->ravg.demand; +- __entry->sum = p->ravg.sum; +- __entry->irqtime = irqtime; +- __entry->rq_cs = rq->curr_runnable_sum; +- __entry->rq_ps = rq->prev_runnable_sum; +- __entry->curr_window = p->ravg.curr_window; +- __entry->prev_window = p->ravg.prev_window; +- __window_data(__get_dynamic_array(curr_sum), p->ravg.curr_window_cpu); +- __window_data(__get_dynamic_array(prev_sum), p->ravg.prev_window_cpu); +- __entry->nt_cs = rq->nt_curr_runnable_sum; +- __entry->nt_ps = rq->nt_prev_runnable_sum; +- __entry->active_windows = p->ravg.active_windows; +- ), +- +- TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu rq_cs %llu rq_ps %llu cur_window %u (%s) prev_window %u (%s) nt_cs %llu nt_ps %llu active_wins %u", +- __entry->wallclock, __entry->win_start, __entry->delta, +- task_event_names[__entry->evt], __entry->cpu, +- __entry->cur_freq, __entry->cur_pid, +- __entry->pid, __entry->comm, __entry->mark_start, +- __entry->delta_m, __entry->demand, +- __entry->sum, __entry->irqtime, +- __entry->rq_cs, __entry->rq_ps, __entry->curr_window, +- __window_print(p, __get_dynamic_array(curr_sum), nr_cpu_ids), +- __entry->prev_window, +- __window_print(p, __get_dynamic_array(prev_sum), nr_cpu_ids), +- __entry->nt_cs, __entry->nt_ps, +- __entry->active_windows) +-); +- +-extern const char *migrate_type_names[]; +- +-#ifdef CONFIG_SCHED_RTG +-TRACE_EVENT(sched_migration_update_sum, +- +- TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct rq *rq), +- +- TP_ARGS(p, migrate_type, rq), +- +- TP_STRUCT__entry( +- __field(int, tcpu) +- __field(int, pid) +- __field(enum migrate_types, migrate_type) +- __field(s64, src_cs) +- __field(s64, src_ps) +- __field(s64, dst_cs) +- __field(s64, dst_ps) +- __field(s64, src_nt_cs) +- __field(s64, src_nt_ps) +- __field(s64, dst_nt_cs) +- __field(s64, dst_nt_ps) +- ), +- +- TP_fast_assign( +- __entry->tcpu = task_cpu(p); +- __entry->pid = p->pid; +- __entry->migrate_type = migrate_type; +- __entry->src_cs = __get_update_sum(rq, migrate_type, +- true, false, true); +- __entry->src_ps = __get_update_sum(rq, migrate_type, +- true, false, false); +- __entry->dst_cs = __get_update_sum(rq, migrate_type, +- false, false, true); +- __entry->dst_ps = __get_update_sum(rq, migrate_type, +- false, false, false); +- __entry->src_nt_cs = __get_update_sum(rq, migrate_type, +- true, true, true); +- __entry->src_nt_ps = __get_update_sum(rq, migrate_type, +- true, true, false); +- __entry->dst_nt_cs = __get_update_sum(rq, migrate_type, +- false, true, true); +- __entry->dst_nt_ps = __get_update_sum(rq, migrate_type, +- false, true, false); +- ), +- +- TP_printk("pid %d task_cpu %d migrate_type %s src_cs %llu src_ps %llu dst_cs %lld dst_ps %lld src_nt_cs %llu src_nt_ps %llu dst_nt_cs %lld dst_nt_ps %lld", +- __entry->pid, __entry->tcpu, migrate_type_names[__entry->migrate_type], +- __entry->src_cs, __entry->src_ps, __entry->dst_cs, __entry->dst_ps, +- __entry->src_nt_cs, __entry->src_nt_ps, __entry->dst_nt_cs, __entry->dst_nt_ps) +-); +-#endif +-#endif /* _TRACE_WALT_H */ +- +-/* This part must be outside protection */ +-#include +diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h +index 98172b931..6ce1f1ceb 100644 +--- a/include/uapi/asm-generic/mman-common.h ++++ b/include/uapi/asm-generic/mman-common.h +@@ -21,7 +21,6 @@ + #define MAP_TYPE 0x0f /* Mask for type of mapping */ + #define MAP_FIXED 0x10 /* Interpret addr exactly */ + #define MAP_ANONYMOUS 0x20 /* don't use a file */ +-#define MAP_XPM 0x40 /* xpm control memory */ + + /* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */ + #define MAP_POPULATE 0x008000 /* populate (prefault) pagetables */ +@@ -34,9 +33,6 @@ + #define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be + * uninitialized */ + +-#define MAP_JIT 0x80000000 /* For JIT compiler which apply FORT_NONE memory +- * and turn it into PORT_EXEC when code run */ +- + /* + * Flags for mlock + */ +diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h +index 806caae8a..5f636b5af 100644 +--- a/include/uapi/linux/android/binder.h ++++ b/include/uapi/linux/android/binder.h +@@ -251,21 +251,6 @@ struct binder_extended_error { + __s32 param; + }; + +-struct binder_feature_set { +- __u64 feature_set; +-}; +- +-struct access_token { +- __u64 sender_tokenid; +- __u64 first_tokenid; +- __u64 reserved[2]; +-}; +- +-struct binder_sender_info { +- struct access_token tokens; +- __u64 sender_pid_nr; +-}; +- + #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) + #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) + #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) +@@ -281,10 +266,6 @@ struct binder_sender_info { + #define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32) + #define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error) + +-#define BINDER_FEATURE_SET _IOWR('b', 30, struct binder_feature_set) +-#define BINDER_GET_ACCESS_TOKEN _IOWR('b', 31, struct access_token) +-#define BINDER_GET_SENDER_INFO _IOWR('b', 32, struct binder_sender_info) +- + /* + * NOTE: Two special error codes you should check for when calling + * in to the driver are: +diff --git a/include/uapi/linux/fsverity.h b/include/uapi/linux/fsverity.h +index 21225e3f2..15384e22e 100644 +--- a/include/uapi/linux/fsverity.h ++++ b/include/uapi/linux/fsverity.h +@@ -100,26 +100,4 @@ struct fsverity_read_metadata_arg { + #define FS_IOC_READ_VERITY_METADATA \ + _IOWR('f', 135, struct fsverity_read_metadata_arg) + +-struct code_sign_enable_arg { +- __u32 version; +- __u32 hash_algorithm; +- __u32 block_size; +- __u32 salt_size; +- __u64 salt_ptr; +- __u32 sig_size; +- __u32 __reserved1; +- __u64 sig_ptr; +- __u64 __reserved2[5]; +- __u32 __reserved3; +- __u32 pgtypeinfo_size; +- __u64 pgtypeinfo_off; +- __u64 tree_offset; +- __u64 root_hash_ptr; +- __u64 data_size; +- __u32 flags; +- __u32 cs_version; +-}; +- +-#define FS_IOC_ENABLE_CODE_SIGN _IOW('f', 200, struct code_sign_enable_arg) +- + #endif /* _UAPI_LINUX_FSVERITY_H */ +diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h +index b2e932c25..3bac0a8ce 100644 +--- a/include/uapi/linux/sched.h ++++ b/include/uapi/linux/sched.h +@@ -132,7 +132,6 @@ struct clone_args { + #define SCHED_FLAG_KEEP_PARAMS 0x10 + #define SCHED_FLAG_UTIL_CLAMP_MIN 0x20 + #define SCHED_FLAG_UTIL_CLAMP_MAX 0x40 +-#define SCHED_FLAG_LATENCY_NICE 0x80 + + #define SCHED_FLAG_KEEP_ALL (SCHED_FLAG_KEEP_POLICY | \ + SCHED_FLAG_KEEP_PARAMS) +@@ -144,7 +143,6 @@ struct clone_args { + SCHED_FLAG_RECLAIM | \ + SCHED_FLAG_DL_OVERRUN | \ + SCHED_FLAG_KEEP_ALL | \ +- SCHED_FLAG_UTIL_CLAMP | \ +- SCHED_FLAG_LATENCY_NICE) ++ SCHED_FLAG_UTIL_CLAMP) + + #endif /* _UAPI_LINUX_SCHED_H */ +diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h +index 0c7f45507..906623856 100644 +--- a/include/uapi/linux/sched/types.h ++++ b/include/uapi/linux/sched/types.h +@@ -6,7 +6,6 @@ + + #define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ + #define SCHED_ATTR_SIZE_VER1 56 /* add: util_{min,max} */ +-#define SCHED_ATTR_SIZE_VER2 60 /* add: latency_nice */ + + /* + * Extended scheduling parameters data structure. +@@ -95,22 +94,6 @@ + * scheduled on a CPU with no more capacity than the specified value. + * + * A task utilization boundary can be reset by setting the attribute to -1. +- * +- * Latency Tolerance Attributes +- * =========================== +- * +- * A subset of sched_attr attributes allows to specify the relative latency +- * requirements of a task with respect to the other tasks running/queued in the +- * system. +- * +- * @ sched_latency_nice task's latency_nice value +- * +- * The latency_nice of a task can have any value in a range of +- * [MIN_LATENCY_NICE..MAX_LATENCY_NICE]. +- * +- * A task with latency_nice with the value of LATENCY_NICE_MIN can be +- * taken for a task requiring a lower latency as opposed to the task with +- * higher latency_nice. + */ + struct sched_attr { + __u32 size; +@@ -133,8 +116,6 @@ struct sched_attr { + __u32 sched_util_min; + __u32 sched_util_max; + +- /* latency requirement hints */ +- __s32 sched_latency_nice; + }; + + #endif /* _UAPI_LINUX_SCHED_TYPES_H */ +diff --git a/init/Kconfig b/init/Kconfig +index 486bd372b..1105cb53f 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -485,7 +485,6 @@ source "kernel/irq/Kconfig" + source "kernel/time/Kconfig" + source "kernel/bpf/Kconfig" + source "kernel/Kconfig.preempt" +-source "kernel/sched/rtg/Kconfig" + + menu "CPU/Task time and stats accounting" + +@@ -575,14 +574,6 @@ config SCHED_THERMAL_PRESSURE + + This requires the architecture to implement + arch_update_thermal_pressure() and arch_scale_thermal_pressure(). +-config SCHED_WALT +- bool "Support window based load tracking" +- depends on SMP +- help +- This feature will allow the scheduler to maintain a tunable window +- based set of metrics for tasks and runqueues. These metrics can be +- used to guide task placement as well as task frequency requirements +- for cpufreq governors. + + config BSD_PROCESS_ACCT + bool "BSD Process Accounting" +@@ -708,32 +699,6 @@ config CPU_ISOLATION + + Say Y if unsure. + +-config SCHED_RUNNING_AVG +- bool "per-rq and per-cluster running average statistics" +- default n +- +-config CPU_ISOLATION_OPT +- bool "CPU isolation optimization" +- depends on SMP +- default n +- help +- This option enables cpu isolation optimization, which allows +- to isolate cpu dynamically. The isolated cpu will be unavailable +- to scheduler and load balancer, and all its non-pinned timers, +- IRQs and tasks will be migrated to other cpus, only pinned +- kthread and IRQS are still allowed to run, this achieves +- similar effect as hotplug but at lower latency cost. +- +-config SCHED_CORE_CTRL +- bool "Core control" +- depends on CPU_ISOLATION_OPT +- select SCHED_RUNNING_AVG +- default n +- help +- This option enables the core control functionality in +- the scheduler. Core control automatically isolate and +- unisolate cores based on cpu load and utilization. +- + source "kernel/rcu/Kconfig" + + config IKCONFIG +@@ -893,34 +858,6 @@ config UCLAMP_BUCKETS_COUNT + + If in doubt, use the default value. + +-config SCHED_LATENCY_NICE +- bool "Enable latency feature for FAIR tasks" +- default n +- help +- This feature use latency nice priority to decide if a cfs task can +- preempt the current running task. +- +- +-config SCHED_EAS +- bool "EAS scheduler optimization" +- default n +- help +- Check and migrate the CFS process to a more suitable CPU in the tick. +- +-config SCHED_RT_CAS +- bool "rt-cas optimization" +- depends on SCHED_EAS +- default n +- help +- RT task detects capacity during CPU selection +- +-config SCHED_RT_ACTIVE_LB +- bool "RT Capacity Aware Misfit Task" +- depends on SCHED_EAS +- default n +- help +- Check and migrate the RT process to a more suitable CPU in the tick. +- + endmenu + + # +diff --git a/init/init_task.c b/init/init_task.c +index 21d81449f..fd9e27185 100644 +--- a/init/init_task.c ++++ b/init/init_task.c +@@ -78,9 +78,6 @@ struct task_struct init_task + .prio = MAX_PRIO - 20, + .static_prio = MAX_PRIO - 20, + .normal_prio = MAX_PRIO - 20, +-#ifdef CONFIG_SCHED_LATENCY_NICE +- .latency_prio = NICE_WIDTH - 20, +-#endif + .policy = SCHED_NORMAL, + .cpus_ptr = &init_task.cpus_mask, + .user_cpus_ptr = NULL, +diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c +index 7f1ef37c6..1811efcfb 100644 +--- a/kernel/bpf/arraymap.c ++++ b/kernel/bpf/arraymap.c +@@ -909,44 +909,22 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map, + struct file *map_file, int fd) + { + struct bpf_prog *prog = bpf_prog_get(fd); +- bool is_extended; + + if (IS_ERR(prog)) + return prog; + +- if (prog->type == BPF_PROG_TYPE_EXT || +- !bpf_prog_map_compatible(map, prog)) { ++ if (!bpf_prog_map_compatible(map, prog)) { + bpf_prog_put(prog); + return ERR_PTR(-EINVAL); + } + +- mutex_lock(&prog->aux->ext_mutex); +- is_extended = prog->aux->is_extended; +- if (!is_extended) +- prog->aux->prog_array_member_cnt++; +- mutex_unlock(&prog->aux->ext_mutex); +- if (is_extended) { +- /* Extended prog can not be tail callee. It's to prevent a +- * potential infinite loop like: +- * tail callee prog entry -> tail callee prog subprog -> +- * freplace prog entry --tailcall-> tail callee prog entry. +- */ +- bpf_prog_put(prog); +- return ERR_PTR(-EBUSY); +- } +- + return prog; + } + + static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) + { +- struct bpf_prog *prog = ptr; +- +- mutex_lock(&prog->aux->ext_mutex); +- prog->aux->prog_array_member_cnt--; +- mutex_unlock(&prog->aux->ext_mutex); + /* bpf_prog is freed after one RCU or tasks trace grace period */ +- bpf_prog_put(prog); ++ bpf_prog_put(ptr); + } + + static u32 prog_fd_array_sys_lookup_elem(void *ptr) +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c +index 855523318..02f327f05 100644 +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -122,7 +122,6 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag + + INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); + mutex_init(&fp->aux->used_maps_mutex); +- mutex_init(&fp->aux->ext_mutex); + mutex_init(&fp->aux->dst_mutex); + + return fp; +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c +index 848da1d19..f089a6163 100644 +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -3108,8 +3108,7 @@ static void bpf_tracing_link_release(struct bpf_link *link) + container_of(link, struct bpf_tracing_link, link.link); + + WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, +- tr_link->trampoline, +- tr_link->tgt_prog)); ++ tr_link->trampoline)); + + bpf_trampoline_put(tr_link->trampoline); + +@@ -3244,7 +3243,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, + * in prog->aux + * + * - if prog->aux->dst_trampoline is NULL, the program has already been +- * attached to a target and its initial target was cleared (below) ++ * attached to a target and its initial target was cleared (below) + * + * - if tgt_prog != NULL, the caller specified tgt_prog_fd + + * target_btf_id using the link_create API. +@@ -3319,7 +3318,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, + if (err) + goto out_unlock; + +- err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog); ++ err = bpf_trampoline_link_prog(&link->link, tr); + if (err) { + bpf_link_cleanup(&link_primer); + link = NULL; +diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c +index 7c0abf19b..e97aeda3a 100644 +--- a/kernel/bpf/trampoline.c ++++ b/kernel/bpf/trampoline.c +@@ -510,27 +510,7 @@ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog) + } + } + +-static int bpf_freplace_check_tgt_prog(struct bpf_prog *tgt_prog) +-{ +- struct bpf_prog_aux *aux = tgt_prog->aux; +- +- guard(mutex)(&aux->ext_mutex); +- if (aux->prog_array_member_cnt) +- /* Program extensions can not extend target prog when the target +- * prog has been updated to any prog_array map as tail callee. +- * It's to prevent a potential infinite loop like: +- * tgt prog entry -> tgt prog subprog -> freplace prog entry +- * --tailcall-> tgt prog entry. +- */ +- return -EBUSY; +- +- aux->is_extended = true; +- return 0; +-} +- +-static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, +- struct bpf_trampoline *tr, +- struct bpf_prog *tgt_prog) ++static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) + { + enum bpf_tramp_prog_type kind; + struct bpf_tramp_link *link_exiting; +@@ -551,9 +531,6 @@ static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, + /* Cannot attach extension if fentry/fexit are in use. */ + if (cnt) + return -EBUSY; +- err = bpf_freplace_check_tgt_prog(tgt_prog); +- if (err) +- return err; + tr->extension_prog = link->link.prog; + return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL, + link->link.prog->bpf_func); +@@ -580,21 +557,17 @@ static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, + return err; + } + +-int bpf_trampoline_link_prog(struct bpf_tramp_link *link, +- struct bpf_trampoline *tr, +- struct bpf_prog *tgt_prog) ++int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) + { + int err; + + mutex_lock(&tr->mutex); +- err = __bpf_trampoline_link_prog(link, tr, tgt_prog); ++ err = __bpf_trampoline_link_prog(link, tr); + mutex_unlock(&tr->mutex); + return err; + } + +-static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, +- struct bpf_trampoline *tr, +- struct bpf_prog *tgt_prog) ++static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) + { + enum bpf_tramp_prog_type kind; + int err; +@@ -605,8 +578,6 @@ static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, + err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, + tr->extension_prog->bpf_func, NULL); + tr->extension_prog = NULL; +- guard(mutex)(&tgt_prog->aux->ext_mutex); +- tgt_prog->aux->is_extended = false; + return err; + } + hlist_del_init(&link->tramp_hlist); +@@ -615,14 +586,12 @@ static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, + } + + /* bpf_trampoline_unlink_prog() should never fail. */ +-int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, +- struct bpf_trampoline *tr, +- struct bpf_prog *tgt_prog) ++int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) + { + int err; + + mutex_lock(&tr->mutex); +- err = __bpf_trampoline_unlink_prog(link, tr, tgt_prog); ++ err = __bpf_trampoline_unlink_prog(link, tr); + mutex_unlock(&tr->mutex); + return err; + } +@@ -637,7 +606,7 @@ static void bpf_shim_tramp_link_release(struct bpf_link *link) + if (!shim_link->trampoline) + return; + +- WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline, NULL)); ++ WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline)); + bpf_trampoline_put(shim_link->trampoline); + } + +@@ -751,7 +720,7 @@ int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, + goto err; + } + +- err = __bpf_trampoline_link_prog(&shim_link->link, tr, NULL); ++ err = __bpf_trampoline_link_prog(&shim_link->link, tr); + if (err) + goto err; + +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 1a872b05d..d6a410231 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -18898,46 +18898,13 @@ static int do_misc_fixups(struct bpf_verifier_env *env) + int i, ret, cnt, delta = 0; + + for (i = 0; i < insn_cnt; i++, insn++) { +- /* Make sdiv/smod divide-by-minus-one exceptions impossible. */ +- if ((insn->code == (BPF_ALU64 | BPF_MOD | BPF_K) || +- insn->code == (BPF_ALU64 | BPF_DIV | BPF_K) || +- insn->code == (BPF_ALU | BPF_MOD | BPF_K) || +- insn->code == (BPF_ALU | BPF_DIV | BPF_K)) && +- insn->off == 1 && insn->imm == -1) { +- bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; +- bool isdiv = BPF_OP(insn->code) == BPF_DIV; +- struct bpf_insn *patchlet; +- struct bpf_insn chk_and_sdiv[] = { +- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | +- BPF_NEG | BPF_K, insn->dst_reg, +- 0, 0, 0), +- }; +- struct bpf_insn chk_and_smod[] = { +- BPF_MOV32_IMM(insn->dst_reg, 0), +- }; +- +- patchlet = isdiv ? chk_and_sdiv : chk_and_smod; +- cnt = isdiv ? ARRAY_SIZE(chk_and_sdiv) : ARRAY_SIZE(chk_and_smod); +- +- new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); +- if (!new_prog) +- return -ENOMEM; +- +- delta += cnt - 1; +- env->prog = prog = new_prog; +- insn = new_prog->insnsi + i + delta; +- continue; +- } +- +- /* Make divide-by-zero and divide-by-minus-one exceptions impossible. */ ++ /* Make divide-by-zero exceptions impossible. */ + if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || + insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || + insn->code == (BPF_ALU | BPF_MOD | BPF_X) || + insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; + bool isdiv = BPF_OP(insn->code) == BPF_DIV; +- bool is_sdiv = isdiv && insn->off == 1; +- bool is_smod = !isdiv && insn->off == 1; + struct bpf_insn *patchlet; + struct bpf_insn chk_and_div[] = { + /* [R,W]x div 0 -> 0 */ +@@ -18957,62 +18924,10 @@ static int do_misc_fixups(struct bpf_verifier_env *env) + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), + }; +- struct bpf_insn chk_and_sdiv[] = { +- /* [R,W]x sdiv 0 -> 0 +- * LLONG_MIN sdiv -1 -> LLONG_MIN +- * INT_MIN sdiv -1 -> INT_MIN +- */ +- BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), +- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | +- BPF_ADD | BPF_K, BPF_REG_AX, +- 0, 0, 1), +- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | +- BPF_JGT | BPF_K, BPF_REG_AX, +- 0, 4, 1), +- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | +- BPF_JEQ | BPF_K, BPF_REG_AX, +- 0, 1, 0), +- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | +- BPF_MOV | BPF_K, insn->dst_reg, +- 0, 0, 0), +- /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */ +- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | +- BPF_NEG | BPF_K, insn->dst_reg, +- 0, 0, 0), +- BPF_JMP_IMM(BPF_JA, 0, 0, 1), +- *insn, +- }; +- struct bpf_insn chk_and_smod[] = { +- /* [R,W]x mod 0 -> [R,W]x */ +- /* [R,W]x mod -1 -> 0 */ +- BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), +- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | +- BPF_ADD | BPF_K, BPF_REG_AX, +- 0, 0, 1), +- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | +- BPF_JGT | BPF_K, BPF_REG_AX, +- 0, 3, 1), +- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | +- BPF_JEQ | BPF_K, BPF_REG_AX, +- 0, 3 + (is64 ? 0 : 1), 1), +- BPF_MOV32_IMM(insn->dst_reg, 0), +- BPF_JMP_IMM(BPF_JA, 0, 0, 1), +- *insn, +- BPF_JMP_IMM(BPF_JA, 0, 0, 1), +- BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), +- }; + +- if (is_sdiv) { +- patchlet = chk_and_sdiv; +- cnt = ARRAY_SIZE(chk_and_sdiv); +- } else if (is_smod) { +- patchlet = chk_and_smod; +- cnt = ARRAY_SIZE(chk_and_smod) - (is64 ? 2 : 0); +- } else { +- patchlet = isdiv ? chk_and_div : chk_and_mod; +- cnt = isdiv ? ARRAY_SIZE(chk_and_div) : +- ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); +- } ++ patchlet = isdiv ? chk_and_div : chk_and_mod; ++ cnt = isdiv ? ARRAY_SIZE(chk_and_div) : ++ ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); + + new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); + if (!new_prog) +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c +index 90e9f31a3..9cb00ebe9 100644 +--- a/kernel/cgroup/cgroup-v1.c ++++ b/kernel/cgroup/cgroup-v1.c +@@ -511,12 +511,7 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of, + */ + cred = of->file->f_cred; + tcred = get_task_cred(task); +-#ifdef CONFIG_HYPERHOLD +- if (!uid_eq(cred->euid, GLOBAL_MEMMGR_UID) && +- !uid_eq(cred->euid, GLOBAL_ROOT_UID) && +-#else + if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && +-#endif + !uid_eq(cred->euid, tcred->uid) && + !uid_eq(cred->euid, tcred->suid)) + ret = -EACCES; +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 58e7700ea..7ab11b459 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -1441,11 +1441,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, + if (!cpu_present(cpu)) + return -EINVAL; + +-#ifdef CONFIG_CPU_ISOLATION_OPT +- if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1) +- return -EBUSY; +-#endif +- + cpus_write_lock(); + + cpuhp_tasks_frozen = tasks_frozen; +@@ -3145,11 +3140,6 @@ EXPORT_SYMBOL(__cpu_active_mask); + struct cpumask __cpu_dying_mask __read_mostly; + EXPORT_SYMBOL(__cpu_dying_mask); + +-#ifdef CONFIG_CPU_ISOLATION_OPT +-struct cpumask __cpu_isolated_mask __read_mostly; +-EXPORT_SYMBOL(__cpu_isolated_mask); +-#endif +- + atomic_t __num_online_cpus __read_mostly; + EXPORT_SYMBOL(__num_online_cpus); + +@@ -3168,13 +3158,6 @@ void init_cpu_online(const struct cpumask *src) + cpumask_copy(&__cpu_online_mask, src); + } + +-#ifdef CONFIG_CPU_ISOLATION_OPT +-void init_cpu_isolated(const struct cpumask *src) +-{ +- cpumask_copy(&__cpu_isolated_mask, src); +-} +-#endif +- + void set_cpu_online(unsigned int cpu, bool online) + { + /* +diff --git a/kernel/cred.c b/kernel/cred.c +index 68e80acff..64404d51c 100644 +--- a/kernel/cred.c ++++ b/kernel/cred.c +@@ -19,7 +19,6 @@ + #include + #include + #include +-#include + + #if 0 + #define kdebug(FMT, ...) \ +@@ -391,7 +390,6 @@ int commit_creds(struct cred *new) + struct task_struct *task = current; + const struct cred *old = task->real_cred; + +- CALL_HCK_LITE_HOOK(ced_commit_creds_lhck, new); + kdebug("commit_creds(%p{%ld})", new, + atomic_long_read(&new->usage)); + +diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c +index f005c66f3..cd4701acc 100644 +--- a/kernel/dma/contiguous.c ++++ b/kernel/dma/contiguous.c +@@ -215,6 +215,11 @@ static inline void __init dma_numa_cma_reserve(void) + * has been activated and all other subsystems have already allocated/reserved + * memory. + */ ++ #if defined(CONFIG_ARCH_BSP) && defined(CONFIG_VENDOR_CMA) ++#ifdef CONFIG_64BIT ++extern __init int declare_heap_memory(void); ++#endif ++#endif + void __init dma_contiguous_reserve(phys_addr_t limit) + { + phys_addr_t selected_size = 0; +@@ -226,6 +231,12 @@ void __init dma_contiguous_reserve(phys_addr_t limit) + + pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); + ++#if defined(CONFIG_ARCH_BSP) && defined(CONFIG_VENDOR_CMA) ++#ifdef CONFIG_64BIT ++ declare_heap_memory(); ++#endif ++#endif ++ + if (size_cmdline != -1) { + selected_size = size_cmdline; + selected_base = base_cmdline; +@@ -316,6 +327,10 @@ struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, + return cma_alloc(dev_get_cma_area(dev), count, align, no_warn); + } + ++#ifdef CONFIG_ARCH_BSP ++EXPORT_SYMBOL(dma_alloc_from_contiguous); ++#endif ++ + /** + * dma_release_from_contiguous() - release allocated pages + * @dev: Pointer to device for which the pages were allocated. +@@ -332,6 +347,10 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, + return cma_release(dev_get_cma_area(dev), pages, count); + } + ++#ifdef CONFIG_ARCH_BSP ++EXPORT_SYMBOL(dma_release_from_contiguous); ++#endif ++ + static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) + { + unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT); +diff --git a/kernel/exit.c b/kernel/exit.c +index 513ad92c8..3540b2c9b 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -13,11 +13,6 @@ + #include + #include + #include +- +-#ifdef CONFIG_QOS_CTRL +-#include +-#endif +- + #include + #include + #include +@@ -78,8 +73,6 @@ + #include + #include + #include +-#include +-#include + + /* + * The default value should be high enough to not crash a system that randomly +@@ -820,8 +813,6 @@ void __noreturn do_exit(long code) + struct task_struct *tsk = current; + int group_dead; + +- CALL_HCK_LITE_HOOK(exit_jit_memory_lhck, current); +- + WARN_ON(irqs_disabled()); + + synchronize_group_exit(tsk, code); +@@ -837,11 +828,6 @@ void __noreturn do_exit(long code) + + io_uring_files_cancel(); + exit_signals(tsk); /* sets PF_EXITING */ +- sched_exit(tsk); +- +-#ifdef CONFIG_QOS_CTRL +- sched_exit_qos_list(tsk); +-#endif + + /* sync mm's RSS info before statistics gathering */ + if (tsk->mm) +@@ -906,7 +892,6 @@ void __noreturn do_exit(long code) + + exit_tasks_rcu_start(); + exit_notify(tsk, group_dead); +- CALL_HCK_LITE_HOOK(ced_exit_lhck, tsk); + proc_exit_connector(tsk); + mpol_put_task_policy(tsk); + #ifdef CONFIG_FUTEX +diff --git a/kernel/fork.c b/kernel/fork.c +index f51acd75f..23efaa2c4 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -100,9 +100,7 @@ + #include + #include + #include +-#ifdef CONFIG_MEM_PURGEABLE +-#include +-#endif ++ + #include + #include + #include +@@ -113,7 +111,6 @@ + + #define CREATE_TRACE_POINTS + #include +-#include + + /* + * Minimum number of threads to boot the kernel +@@ -796,9 +793,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, + + static inline int mm_alloc_pgd(struct mm_struct *mm) + { +-#ifdef CONFIG_MEM_PURGEABLE +- mm_init_uxpgd(mm); +-#endif + mm->pgd = pgd_alloc(mm); + if (unlikely(!mm->pgd)) + return -ENOMEM; +@@ -808,9 +802,6 @@ static inline int mm_alloc_pgd(struct mm_struct *mm) + static inline void mm_free_pgd(struct mm_struct *mm) + { + pgd_free(mm, mm->pgd); +-#ifdef CONFIG_MEM_PURGEABLE +- mm_clear_uxpgd(mm); +-#endif + } + #else + static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) +@@ -1125,11 +1116,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) + if (err) + goto free_tsk; + +-#ifdef CONFIG_ACCESS_TOKENID +- tsk->token = orig->token; +- tsk->ftoken = 0; +-#endif +- + err = alloc_thread_stack_node(tsk, node); + if (err) + goto free_tsk; +@@ -2792,7 +2778,6 @@ __latent_entropy struct task_struct *copy_process( + perf_event_free_task(p); + bad_fork_cleanup_policy: + lockdep_free_task(p); +- free_task_load_ptrs(p); + #ifdef CONFIG_NUMA + mpol_put(p->mempolicy); + #endif +@@ -2952,7 +2937,6 @@ pid_t kernel_clone(struct kernel_clone_args *args) + task_unlock(p); + } + +- CALL_HCK_LITE_HOOK(ced_kernel_clone_lhck, p); + wake_up_new_task(p); + + /* forking complete and child started to run, tell ptracer */ +diff --git a/kernel/hung_task.c b/kernel/hung_task.c +index 07e8e6233..9a2457498 100644 +--- a/kernel/hung_task.c ++++ b/kernel/hung_task.c +@@ -25,14 +25,10 @@ + + #include + +-#ifdef CONFIG_DFX_HUNGTASK +-#include +-#endif +- + /* + * The number of tasks checked: + */ +-int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; ++static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; + + /* + * Limit number of tasks checked in a batch. +@@ -51,16 +47,14 @@ unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_ + /* + * Zero (default value) means use sysctl_hung_task_timeout_secs: + */ +-unsigned long __read_mostly sysctl_hung_task_check_interval_secs; ++static unsigned long __read_mostly sysctl_hung_task_check_interval_secs; + +-int __read_mostly sysctl_hung_task_warnings = 10; ++static int __read_mostly sysctl_hung_task_warnings = 10; + + static int __read_mostly did_panic; +-#ifndef CONFIG_DFX_HUNGTASK + static bool hung_task_show_lock; + static bool hung_task_call_panic; + static bool hung_task_show_all_bt; +-#endif + + static struct task_struct *watchdog_task; + +@@ -78,16 +72,14 @@ static unsigned int __read_mostly sysctl_hung_task_all_cpu_backtrace; + * Should we panic (and reboot, if panic_timeout= is set) when a + * hung task is detected: + */ +-unsigned int __read_mostly sysctl_hung_task_panic = +- CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE; ++static unsigned int __read_mostly sysctl_hung_task_panic = ++ IS_ENABLED(CONFIG_BOOTPARAM_HUNG_TASK_PANIC); + + static int + hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr) + { + did_panic = 1; +-#ifdef CONFIG_DFX_HUNGTASK +- htbase_set_panic(did_panic); +-#endif ++ + return NOTIFY_DONE; + } + +@@ -95,7 +87,6 @@ static struct notifier_block panic_block = { + .notifier_call = hung_task_panic, + }; + +-#ifndef CONFIG_DFX_HUNGTASK + static void check_hung_task(struct task_struct *t, unsigned long timeout) + { + unsigned long switch_count = t->nvcsw + t->nivcsw; +@@ -234,7 +225,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) + if (hung_task_call_panic) + panic("hung_task: blocked tasks"); + } +-#endif + + static long hung_timeout_jiffies(unsigned long last_checked, + unsigned long timeout) +@@ -260,9 +250,7 @@ static int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, + goto out; + + wake_up_process(watchdog_task); +-#ifdef CONFIG_DFX_HUNGTASK +- htbase_set_timeout_secs(sysctl_hung_task_timeout_secs); +-#endif ++ + out: + return ret; + } +@@ -377,11 +365,7 @@ static int watchdog(void *dummy) + set_user_nice(current, 0); + + for ( ; ; ) { +-#ifdef CONFIG_DFX_HUNGTASK +- unsigned long timeout = HEARTBEAT_TIME; +-#else + unsigned long timeout = sysctl_hung_task_timeout_secs; +-#endif + unsigned long interval = sysctl_hung_task_check_interval_secs; + long t; + +@@ -392,11 +376,7 @@ static int watchdog(void *dummy) + if (t <= 0) { + if (!atomic_xchg(&reset_hung_task, 0) && + !hung_detector_suspended) +-#ifdef CONFIG_DFX_HUNGTASK +- htbase_check_tasks(timeout); +-#else + check_hung_uninterruptible_tasks(timeout); +-#endif + hung_last_checked = jiffies; + continue; + } +@@ -408,13 +388,6 @@ static int watchdog(void *dummy) + + static int __init hung_task_init(void) + { +-#ifdef CONFIG_DFX_HUNGTASK +- int ret = 0; +- +- ret = htbase_create_sysfs(); +- if (ret) +- pr_err("hungtask: create_sysfs_hungtask fail"); +-#endif + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); + + /* Disable hung task detector on suspend */ +diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c +index 4db263deb..eb8628390 100644 +--- a/kernel/irq/cpuhotplug.c ++++ b/kernel/irq/cpuhotplug.c +@@ -12,7 +12,6 @@ + #include + #include + #include +-#include + #include + + #include "internals.h" +@@ -59,9 +58,6 @@ static bool migrate_one_irq(struct irq_desc *desc) + const struct cpumask *affinity; + bool brokeaff = false; + int err; +-#ifdef CONFIG_CPU_ISOLATION_OPT +- struct cpumask available_cpus; +-#endif + + /* + * IRQ chip might be already torn down, but the irq descriptor is +@@ -114,17 +110,7 @@ static bool migrate_one_irq(struct irq_desc *desc) + if (maskchip && chip->irq_mask) + chip->irq_mask(d); + +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_copy(&available_cpus, affinity); +- cpumask_andnot(&available_cpus, &available_cpus, cpu_isolated_mask); +- affinity = &available_cpus; +-#endif +- + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { +-#ifdef CONFIG_CPU_ISOLATION_OPT +- const struct cpumask *default_affinity; +-#endif +- + /* + * If the interrupt is managed, then shut it down and leave + * the affinity untouched. +@@ -134,40 +120,16 @@ static bool migrate_one_irq(struct irq_desc *desc) + irq_shutdown_and_deactivate(desc); + return false; + } +- +-#ifdef CONFIG_CPU_ISOLATION_OPT +- default_affinity = desc->affinity_hint ? : irq_default_affinity; +- /* +- * The order of preference for selecting a fallback CPU is +- * +- * (1) online and un-isolated CPU from default affinity +- * (2) online and un-isolated CPU +- * (3) online CPU +- */ +- cpumask_andnot(&available_cpus, cpu_online_mask, +- cpu_isolated_mask); +- if (cpumask_intersects(&available_cpus, default_affinity)) +- cpumask_and(&available_cpus, &available_cpus, +- default_affinity); +- else if (cpumask_empty(&available_cpus)) +- affinity = cpu_online_mask; +- +- /* +- * We are overriding the affinity with all online and +- * un-isolated cpus. irq_set_affinity_locked() call +- * below notify this mask to PM QOS affinity listener. +- * That results in applying the CPU_DMA_LATENCY QOS +- * to all the CPUs specified in the mask. But the low +- * level irqchip driver sets the affinity of an irq +- * to only one CPU. So pick only one CPU from the +- * prepared mask while overriding the user affinity. +- */ +- affinity = cpumask_of(cpumask_any(affinity)); +-#else + affinity = cpu_online_mask; +-#endif + brokeaff = true; + } ++ /* ++ * Do not set the force argument of irq_do_set_affinity() as this ++ * disables the masking of offline CPUs from the supplied affinity ++ * mask and therefore might keep/reassign the irq to the outgoing ++ * CPU. ++ */ ++ err = irq_do_set_affinity(d, affinity, false); + + /* + * If there are online CPUs in the affinity mask, but they have no +@@ -180,17 +142,8 @@ static bool migrate_one_irq(struct irq_desc *desc) + + affinity = cpu_online_mask; + brokeaff = true; +- /* +- * Do not set the force argument of irq_do_set_affinity() as this +- * disables the masking of offline CPUs from the supplied affinity +- * mask and therefore might keep/reassign the irq to the outgoing +- * CPU. +- */ +-#ifdef CONFIG_CPU_ISOLATION_OPT +- err = irq_set_affinity_locked(d, affinity, false); +-#else ++ + err = irq_do_set_affinity(d, affinity, false); +-#endif + } + + if (err) { +diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c +index 9ae838689..623b8136e 100644 +--- a/kernel/irq/proc.c ++++ b/kernel/irq/proc.c +@@ -154,12 +154,6 @@ static ssize_t write_irq_affinity(int type, struct file *file, + if (err) + goto free_cpumask; + +-#ifdef CONFIG_CPU_ISOLATION_OPT +- if (cpumask_subset(new_value, cpu_isolated_mask)) { +- err = -EINVAL; +- goto free_cpumask; +- } +-#endif + /* + * Do not allow disabling IRQs completely - it's a too easy + * way to make the system unusable accidentally :-) At least +diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c +index 945731754..15781acaa 100644 +--- a/kernel/nsproxy.c ++++ b/kernel/nsproxy.c +@@ -26,7 +26,6 @@ + #include + #include + #include +-#include + + static struct kmem_cache *nsproxy_cachep; + +@@ -241,11 +240,6 @@ void switch_task_namespaces(struct task_struct *p, struct nsproxy *new) + { + struct nsproxy *ns; + +- int ret = 0; +- CALL_HCK_LITE_HOOK(ced_switch_task_namespaces_lhck, new); +- CALL_HCK_LITE_HOOK(ced_switch_task_namespaces_permission_lhck, new, &ret); +- if (ret) +- return; + might_sleep(); + + task_lock(p); +diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile +index 3a6c3878c..976092b7b 100644 +--- a/kernel/sched/Makefile ++++ b/kernel/sched/Makefile +@@ -32,7 +32,3 @@ obj-y += core.o + obj-y += fair.o + obj-y += build_policy.o + obj-y += build_utility.o +-obj-$(CONFIG_SCHED_WALT) += walt.o +-obj-$(CONFIG_SCHED_RTG) += rtg/ +-obj-$(CONFIG_SCHED_RUNNING_AVG) += sched_avg.o +-obj-$(CONFIG_SCHED_CORE_CTRL) += core_ctl.o +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 5db36a3b2..8c5f75af0 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -64,11 +64,7 @@ + #include + #include + #include +-#include +-#include +-#ifdef CONFIG_QOS_CTRL +-#include +-#endif ++ + #ifdef CONFIG_PREEMPT_DYNAMIC + # ifdef CONFIG_GENERIC_ENTRY + # include +@@ -95,8 +91,6 @@ + #include "pelt.h" + #include "smp.h" + #include "stats.h" +-#include "walt.h" +-#include "rtg/rtg.h" + + #include "../workqueue_internal.h" + #include "../../io_uring/io-wq.h" +@@ -1333,49 +1327,6 @@ static void set_load_weight(struct task_struct *p, bool update_load) + p->se.load = lw; + } + +-#ifdef CONFIG_SCHED_LATENCY_NICE +-static void set_latency_weight(struct task_struct *p) +-{ +- p->se.latency_weight = sched_latency_to_weight[p->latency_prio]; +-} +- +-static void __setscheduler_latency(struct task_struct *p, +- const struct sched_attr *attr) +-{ +- if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE) { +- p->latency_prio = NICE_TO_LATENCY(attr->sched_latency_nice); +- set_latency_weight(p); +- } +-} +- +-static int latency_nice_validate(struct task_struct *p, bool user, +- const struct sched_attr *attr) +-{ +- if (attr->sched_latency_nice > MAX_LATENCY_NICE) +- return -EINVAL; +- if (attr->sched_latency_nice < MIN_LATENCY_NICE) +- return -EINVAL; +- /* Use the same security checks as NICE */ +- if (user && attr->sched_latency_nice < LATENCY_TO_NICE(p->latency_prio) +- && !capable(CAP_SYS_NICE)) +- return -EPERM; +- +- return 0; +-} +-#else +-static void +-__setscheduler_latency(struct task_struct *p, const struct sched_attr *attr) +-{ +-} +- +-static inline +-int latency_nice_validate(struct task_struct *p, bool user, +- const struct sched_attr *attr) +-{ +- return -EOPNOTSUPP; +-} +-#endif +- + #ifdef CONFIG_UCLAMP_TASK + /* + * Serializes updates of utilization clamp values +@@ -2567,17 +2518,8 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, + lockdep_assert_rq_held(rq); + + deactivate_task(rq, p, DEQUEUE_NOCLOCK); +-#ifdef CONFIG_SCHED_WALT +- double_lock_balance(rq, cpu_rq(new_cpu)); +- if (!(rq->clock_update_flags & RQCF_UPDATED)) +- update_rq_clock(rq); +-#endif + set_task_cpu(p, new_cpu); +-#ifdef CONFIG_SCHED_WALT +- double_rq_unlock(cpu_rq(new_cpu), rq); +-#else + rq_unlock(rq, rf); +-#endif + + rq = cpu_rq(new_cpu); + +@@ -3176,9 +3118,6 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p, + bool kthread = p->flags & PF_KTHREAD; + unsigned int dest_cpu; + int ret = 0; +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_t allowed_mask; +-#endif + + update_rq_clock(rq); + +@@ -3224,20 +3163,7 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p, + goto out; + } + } +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_andnot(&allowed_mask, ctx->new_mask, cpu_isolated_mask); +- cpumask_and(&allowed_mask, &allowed_mask, cpu_valid_mask); + +- dest_cpu = cpumask_any(&allowed_mask); +- if (dest_cpu >= nr_cpu_ids) { +- cpumask_and(&allowed_mask, cpu_valid_mask, ctx->new_mask); +- dest_cpu = cpumask_any(&allowed_mask); +- if (!cpumask_intersects(ctx->new_mask, cpu_valid_mask)) { +- ret = -EINVAL; +- goto out; +- } +- } +-#else + /* + * Picking a ~random cpu helps in cases where we are changing affinity + * for groups of tasks (ie. cpuset), so that load balancing is not +@@ -3248,16 +3174,10 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p, + ret = -EINVAL; + goto out; + } +-#endif + + __do_set_cpus_allowed(p, ctx); + +-#ifdef CONFIG_CPU_ISOLATION_OPT +- if (cpumask_test_cpu(task_cpu(p), &allowed_mask)) +- goto out; +-#else + return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); +-#endif + + out: + task_rq_unlock(rq, p, rf); +@@ -3471,7 +3391,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) + rseq_migrate(p); + sched_mm_cid_migrate_from(p); + perf_event_task_migrate(p); +- fixup_busy_time(p, new_cpu); + } + + __set_task_cpu(p, new_cpu); +@@ -3632,19 +3551,12 @@ EXPORT_SYMBOL_GPL(kick_process); + * select_task_rq() below may allow selection of !active CPUs in order + * to satisfy the above rules. + */ +-#ifdef CONFIG_CPU_ISOLATION_OPT +-static int select_fallback_rq(int cpu, struct task_struct *p, bool allow_iso) +-#else + static int select_fallback_rq(int cpu, struct task_struct *p) +-#endif + { + int nid = cpu_to_node(cpu); + const struct cpumask *nodemask = NULL; +- enum { cpuset, possible, fail, bug } state = cpuset; ++ enum { cpuset, possible, fail } state = cpuset; + int dest_cpu; +-#ifdef CONFIG_CPU_ISOLATION_OPT +- int isolated_candidate = -1; +-#endif + + /* + * If the node that the CPU is on has been offlined, cpu_to_node() +@@ -3656,8 +3568,6 @@ static int select_fallback_rq(int cpu, struct task_struct *p) + + /* Look for allowed, online CPU in same node. */ + for_each_cpu(dest_cpu, nodemask) { +- if (cpu_isolated(dest_cpu)) +- continue; + if (is_cpu_allowed(p, dest_cpu)) + return dest_cpu; + } +@@ -3668,18 +3578,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) + for_each_cpu(dest_cpu, p->cpus_ptr) { + if (!is_cpu_allowed(p, dest_cpu)) + continue; +-#ifdef CONFIG_CPU_ISOLATION_OPT +- if (cpu_isolated(dest_cpu)) { +- if (allow_iso) +- isolated_candidate = dest_cpu; +- continue; +- } +- goto out; +- } + +- if (isolated_candidate != -1) { +- dest_cpu = isolated_candidate; +-#endif + goto out; + } + +@@ -3702,15 +3601,6 @@ static int select_fallback_rq(int cpu, struct task_struct *p) + state = fail; + break; + case fail: +-#ifdef CONFIG_CPU_ISOLATION_OPT +- allow_iso = true; +- state = bug; +- break; +-#else +- /* fall through; */ +-#endif +- +- case bug: + BUG(); + break; + } +@@ -3738,10 +3628,6 @@ static int select_fallback_rq(int cpu, struct task_struct *p) + static inline + int select_task_rq(struct task_struct *p, int cpu, int wake_flags) + { +-#ifdef CONFIG_CPU_ISOLATION_OPT +- bool allow_isolated = (p->flags & PF_KTHREAD); +-#endif +- + lockdep_assert_held(&p->pi_lock); + + if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) +@@ -3759,14 +3645,8 @@ int select_task_rq(struct task_struct *p, int cpu, int wake_flags) + * [ this allows ->select_task() to simply return task_cpu(p) and + * not worry about this generic constraint ] + */ +-#ifdef CONFIG_CPU_ISOLATION_OPT +- if (unlikely(!is_cpu_allowed(p, cpu)) || +- (cpu_isolated(cpu) && !allow_isolated)) +- cpu = select_fallback_rq(task_cpu(p), p, allow_isolated); +-#else + if (unlikely(!is_cpu_allowed(p, cpu))) + cpu = select_fallback_rq(task_cpu(p), p); +-#endif + + return cpu; + } +@@ -4282,26 +4162,6 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) + * accesses to the task state; see try_to_wake_up() and set_current_state(). + */ + +-#ifdef CONFIG_SMP +-#ifdef CONFIG_SCHED_WALT +-/* utility function to update walt signals at wakeup */ +-static inline void walt_try_to_wake_up(struct task_struct *p) +-{ +- struct rq *rq = cpu_rq(task_cpu(p)); +- struct rq_flags rf; +- u64 wallclock; +- +- rq_lock_irqsave(rq, &rf); +- wallclock = sched_ktime_clock(); +- update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); +- update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); +- rq_unlock_irqrestore(rq, &rf); +-} +-#else +-#define walt_try_to_wake_up(a) {} +-#endif +-#endif +- + /** + * try_to_wake_up - wake up a thread + * @p: the thread to be awakened +@@ -4429,7 +4289,6 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) + */ + smp_acquire__after_ctrl_dep(); + +- walt_try_to_wake_up(p); + /* + * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq + * == 0), which means we need to do an enqueue, change p->state to +@@ -4677,9 +4536,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) + #ifdef CONFIG_SMP + p->wake_entry.u_flags = CSD_TYPE_TTWU; + p->migration_pending = NULL; +-#endif +-#ifdef CONFIG_SCHED_RTG +- p->rtg_depth = 0; + #endif + init_sched_mm_cid(p); + } +@@ -4872,12 +4728,6 @@ late_initcall(sched_core_sysctl_init); + */ + int sched_fork(unsigned long clone_flags, struct task_struct *p) + { +- init_new_task_load(p); +- +-#ifdef CONFIG_QOS_CTRL +- init_task_qos(p); +-#endif +- + __sched_fork(clone_flags, p); + /* + * We mark the process as NEW here. This guarantees that +@@ -4891,11 +4741,6 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) + */ + p->prio = current->normal_prio; + +-#ifdef CONFIG_SCHED_LATENCY_NICE +- /* Propagate the parent's latency requirements to the child as well */ +- p->latency_prio = current->latency_prio; +-#endif +- + uclamp_fork(p); + + /* +@@ -4904,14 +4749,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) + if (unlikely(p->sched_reset_on_fork)) { + if (task_has_dl_policy(p) || task_has_rt_policy(p)) { + p->policy = SCHED_NORMAL; +-#ifdef CONFIG_SCHED_RTG +- if (current->rtg_depth != 0) +- p->static_prio = current->static_prio; +- else +- p->static_prio = NICE_TO_PRIO(0); +-#else + p->static_prio = NICE_TO_PRIO(0); +-#endif + p->rt_priority = 0; + } else if (PRIO_TO_NICE(p->static_prio) < 0) + p->static_prio = NICE_TO_PRIO(0); +@@ -4919,11 +4757,6 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) + p->prio = p->normal_prio = p->static_prio; + set_load_weight(p, false); + +-#ifdef CONFIG_SCHED_LATENCY_NICE +- p->latency_prio = NICE_TO_LATENCY(0); +- set_latency_weight(p); +-#endif +- + /* + * We don't need the reset flag anymore after the fork. It has + * fulfilled its duty: +@@ -5037,8 +4870,6 @@ void wake_up_new_task(struct task_struct *p) + update_rq_clock(rq); + post_init_entity_util_avg(p); + +- mark_task_starting(p); +- + activate_task(rq, p, ENQUEUE_NOCLOCK); + trace_sched_wakeup_new(p); + wakeup_preempt(rq, p, WF_FORK); +@@ -5673,7 +5504,7 @@ void sched_exec(void) + if (dest_cpu == smp_processor_id()) + return; + +- if (unlikely(!cpu_active(dest_cpu) && likely(!cpu_isolated(dest_cpu)))) ++ if (unlikely(!cpu_active(dest_cpu))) + return; + + arg = (struct migration_arg){ p, dest_cpu }; +@@ -5809,7 +5640,6 @@ void scheduler_tick(void) + struct rq *rq = cpu_rq(cpu); + struct task_struct *curr; + struct rq_flags rf; +- u64 wallclock; + unsigned long thermal_pressure; + u64 resched_latency; + +@@ -5820,10 +5650,6 @@ void scheduler_tick(void) + + rq_lock(rq, &rf); + +- set_window_start(rq); +- wallclock = sched_ktime_clock(); +- update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); +- + curr = rq->curr; + psi_account_irqtime(rq, curr, NULL); + +@@ -5838,9 +5664,7 @@ void scheduler_tick(void) + task_tick_mm_cid(rq, curr); + + rq_unlock(rq, &rf); +-#ifdef CONFIG_SCHED_RTG +- sched_update_rtg_tick(curr); +-#endif ++ + if (sched_feat(LATENCY_WARN) && resched_latency) + resched_latency_warn(cpu, resched_latency); + +@@ -5852,11 +5676,6 @@ void scheduler_tick(void) + #ifdef CONFIG_SMP + rq->idle_balance = idle_cpu(cpu); + trigger_load_balance(rq); +- +-#ifdef CONFIG_SCHED_EAS +- if (curr->sched_class->check_for_migration) +- curr->sched_class->check_for_migration(rq, curr); +-#endif + #endif + } + +@@ -6764,7 +6583,6 @@ static void __sched notrace __schedule(unsigned int sched_mode) + struct rq_flags rf; + struct rq *rq; + int cpu; +- u64 wallclock; + + cpu = smp_processor_id(); + rq = cpu_rq(cpu); +@@ -6844,18 +6662,11 @@ static void __sched notrace __schedule(unsigned int sched_mode) + next = pick_next_task(rq, prev, &rf); + clear_tsk_need_resched(prev); + clear_preempt_need_resched(); +- wallclock = sched_ktime_clock(); + #ifdef CONFIG_SCHED_DEBUG + rq->last_seen_need_resched_ns = 0; + #endif + + if (likely(prev != next)) { +-#ifdef CONFIG_SCHED_WALT +- if (!prev->on_rq) +- prev->last_sleep_ts = wallclock; +-#endif +- update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0); +- update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0); + rq->nr_switches++; + /* + * RCU users of rcu_dereference(rq->curr) may not see +@@ -6888,7 +6699,6 @@ static void __sched notrace __schedule(unsigned int sched_mode) + /* Also unlocks the rq: */ + rq = context_switch(rq, prev, next, &rf); + } else { +- update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0); + rq_unpin_lock(rq, &rf); + __balance_callbacks(rq); + raw_spin_rq_unlock_irq(rq); +@@ -7920,11 +7730,6 @@ static int __sched_setscheduler(struct task_struct *p, + goto change; + if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) + goto change; +-#ifdef CONFIG_SCHED_LATENCY_NICE +- if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE && +- attr->sched_latency_nice != LATENCY_TO_NICE(p->latency_prio)) +- goto change; +-#endif + + p->sched_reset_on_fork = reset_on_fork; + retval = 0; +@@ -8224,11 +8029,6 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a + size < SCHED_ATTR_SIZE_VER1) + return -EINVAL; + +-#ifdef CONFIG_SCHED_LATENCY_NICE +- if ((attr->sched_flags & SCHED_FLAG_LATENCY_NICE) && +- size < SCHED_ATTR_SIZE_VER2) +- return -EINVAL; +-#endif + /* + * XXX: Do we want to be lenient like existing syscalls; or do we want + * to be strict and return an error on out-of-bounds values? +@@ -8466,10 +8266,6 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, + get_params(p, &kattr); + kattr.sched_flags &= SCHED_FLAG_ALL; + +-#ifdef CONFIG_SCHED_LATENCY_NICE +- kattr.sched_latency_nice = LATENCY_TO_NICE(p->latency_prio); +-#endif +- + #ifdef CONFIG_UCLAMP_TASK + /* + * This could race with another potential updater, but this is fine +@@ -8520,10 +8316,7 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) + { + int retval; + cpumask_var_t cpus_allowed, new_mask; +-#ifdef CONFIG_CPU_ISOLATION_OPT +- int dest_cpu; +- cpumask_t allowed_mask; +-#endif ++ + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) + return -ENOMEM; + +@@ -8541,19 +8334,11 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) + retval = dl_task_check_affinity(p, new_mask); + if (retval) + goto out_free_new_mask; +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask); +- dest_cpu = cpumask_any_and(cpu_active_mask, &allowed_mask); +- if (dest_cpu < nr_cpu_ids) { +-#endif ++ + retval = __set_cpus_allowed_ptr(p, ctx); + if (retval) + goto out_free_new_mask; +-#ifdef CONFIG_CPU_ISOLATION_OPT +- } else { +- retval = -EINVAL; +- } +-#endif ++ + cpuset_cpus_allowed(p, cpus_allowed); + if (!cpumask_subset(new_mask, cpus_allowed)) { + /* +@@ -8574,7 +8359,7 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) + bool empty = !cpumask_and(new_mask, new_mask, + ctx->user_mask); + +- if (empty) ++ if (WARN_ON_ONCE(empty)) + cpumask_copy(new_mask, cpus_allowed); + } + __set_cpus_allowed_ptr(p, ctx); +@@ -8594,10 +8379,6 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) + struct cpumask *user_mask; + struct task_struct *p; + int retval; +-#ifdef CONFIG_CPU_ISOLATION_OPT +- int dest_cpu; +- cpumask_t allowed_mask; +-#endif + + rcu_read_lock(); + +@@ -8710,16 +8491,6 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) + + raw_spin_lock_irqsave(&p->pi_lock, flags); + cpumask_and(mask, &p->cpus_mask, cpu_active_mask); +- +-#ifdef CONFIG_CPU_ISOLATION_OPT +- /* The userspace tasks are forbidden to run on +- * isolated CPUs. So exclude isolated CPUs from +- * the getaffinity. +- */ +- if (!(p->flags & PF_KTHREAD)) +- cpumask_andnot(mask, mask, cpu_isolated_mask); +-#endif +- + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + + out_unlock: +@@ -9664,9 +9435,6 @@ static int __balance_push_cpu_stop(void *arg) + struct rq *rq = this_rq(); + struct rq_flags rf; + int cpu; +-#ifdef CONFIG_CPU_ISOLATION_OPT +- bool allow_isolated = (p->flags & PF_KTHREAD); +-#endif + + raw_spin_lock_irq(&p->pi_lock); + rq_lock(rq, &rf); +@@ -9674,11 +9442,7 @@ static int __balance_push_cpu_stop(void *arg) + update_rq_clock(rq); + + if (task_rq(p) == rq && task_on_rq_queued(p)) { +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpu = select_fallback_rq(rq->cpu, p, allow_isolated); +-#else + cpu = select_fallback_rq(rq->cpu, p); +-#endif + rq = __migrate_task(rq, &rf, p, cpu); + } + +@@ -9692,71 +9456,6 @@ static int __balance_push_cpu_stop(void *arg) + + static DEFINE_PER_CPU(struct cpu_stop_work, push_work); + +-static struct task_struct *__pick_migrate_task(struct rq *rq) +-{ +- const struct sched_class *class; +- struct task_struct *next; +- +- for_each_class(class) { +- next = class->pick_next_task(rq); +- if (next) { +- next->sched_class->put_prev_task(rq, next); +- return next; +- } +- } +- +- /* The idle class should always have a runnable task */ +- BUG(); +-} +- +- +-#ifdef CONFIG_CPU_ISOLATION_OPT +-/* +- * Remove a task from the runqueue and pretend that it's migrating. This +- * should prevent migrations for the detached task and disallow further +- * changes to tsk_cpus_allowed. +- */ +-static void +-detach_one_task_core(struct task_struct *p, struct rq *rq, +- struct list_head *tasks) +-{ +- lockdep_assert_held(&rq->__lock); +- +- p->on_rq = TASK_ON_RQ_MIGRATING; +- deactivate_task(rq, p, 0); +- list_add(&p->se.group_node, tasks); +-} +- +-static void attach_tasks_core(struct list_head *tasks, struct rq *rq) +-{ +- struct task_struct *p; +- +- lockdep_assert_held(&rq->__lock); +- +- while (!list_empty(tasks)) { +- p = list_first_entry(tasks, struct task_struct, se.group_node); +- list_del_init(&p->se.group_node); +- +- BUG_ON(task_rq(p) != rq); +- activate_task(rq, p, 0); +- p->on_rq = TASK_ON_RQ_QUEUED; +- } +-} +- +-#else +- +-static void +-detach_one_task_core(struct task_struct *p, struct rq *rq, +- struct list_head *tasks) +-{ +-} +- +-static void attach_tasks_core(struct list_head *tasks, struct rq *rq) +-{ +-} +- +-#endif /* CONFIG_CPU_ISOLATION_OPT */ +- + /* + * Ensure we only run per-cpu kthreads once the CPU goes !active. + * +@@ -9869,380 +9568,8 @@ static inline void balance_push_set(int cpu, bool on) + static inline void balance_hotplug_wait(void) + { + } +-#endif /* CONFIG_HOTPLUG_CPU */ +- +- +-/* +- * Migrate all tasks (not pinned if pinned argument say so) from the rq, +- * sleeping tasks will be migrated by try_to_wake_up()->select_task_rq(). +- * +- * Called with rq->lock held even though we'er in stop_machine() and +- * there's no concurrency possible, we hold the required locks anyway +- * because of lock validation efforts. +- */ +-void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf, +- bool migrate_pinned_tasks) +-{ +- struct rq *rq = dead_rq; +- struct task_struct *next, *stop = rq->stop; +- struct rq_flags orf = *rf; +- int dest_cpu; +- unsigned int num_pinned_kthreads = 1; /* this thread */ +- LIST_HEAD(tasks); +- cpumask_t avail_cpus; +- +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask); +-#else +- cpumask_copy(&avail_cpus, cpu_online_mask); +-#endif +- +- /* +- * Fudge the rq selection such that the below task selection loop +- * doesn't get stuck on the currently eligible stop task. +- * +- * We're currently inside stop_machine() and the rq is either stuck +- * in the stop_machine_cpu_stop() loop, or we're executing this code, +- * either way we should never end up calling schedule() until we're +- * done here. +- */ +- rq->stop = NULL; +- +- /* +- * put_prev_task() and pick_next_task() sched +- * class method both need to have an up-to-date +- * value of rq->clock[_task] +- */ +- update_rq_clock(rq); +- +- for (;;) { +- /* +- * There's this thread running, bail when that's the only +- * remaining thread. +- */ +- if (rq->nr_running == 1) +- break; +- +- next = __pick_migrate_task(rq); +- +- if (!migrate_pinned_tasks && next->flags & PF_KTHREAD && +- !cpumask_intersects(&avail_cpus, &next->cpus_mask)) { +- detach_one_task_core(next, rq, &tasks); +- num_pinned_kthreads += 1; +- continue; +- } +- +- /* +- * Rules for changing task_struct::cpus_mask are holding +- * both pi_lock and rq->lock, such that holding either +- * stabilizes the mask. +- * +- * Drop rq->lock is not quite as disastrous as it usually is +- * because !cpu_active at this point, which means load-balance +- * will not interfere. Also, stop-machine. +- */ +- rq_unlock(rq, rf); +- raw_spin_lock(&next->pi_lock); +- rq_relock(rq, rf); +- if (!(rq->clock_update_flags & RQCF_UPDATED)) +- update_rq_clock(rq); +- +- /* +- * Since we're inside stop-machine, _nothing_ should have +- * changed the task, WARN if weird stuff happened, because in +- * that case the above rq->lock drop is a fail too. +- * However, during cpu isolation the load balancer might have +- * interferred since we don't stop all CPUs. Ignore warning for +- * this case. +- */ +- if (task_rq(next) != rq || !task_on_rq_queued(next)) { +- WARN_ON(migrate_pinned_tasks); +- raw_spin_unlock(&next->pi_lock); +- continue; +- } +- +- /* Find suitable destination for @next, with force if needed. */ +-#ifdef CONFIG_CPU_ISOLATION_OPT +- dest_cpu = select_fallback_rq(dead_rq->cpu, next, false); +-#else +- dest_cpu = select_fallback_rq(dead_rq->cpu, next); +-#endif +- rq = __migrate_task(rq, rf, next, dest_cpu); +- if (rq != dead_rq) { +- rq_unlock(rq, rf); +- rq = dead_rq; +- *rf = orf; +- rq_relock(rq, rf); +- if (!(rq->clock_update_flags & RQCF_UPDATED)) +- update_rq_clock(rq); +- } +- raw_spin_unlock(&next->pi_lock); +- } +- +- rq->stop = stop; +- +- if (num_pinned_kthreads > 1) +- attach_tasks_core(&tasks, rq); +-} +- +-#ifdef CONFIG_SCHED_EAS +-static void clear_eas_migration_request(int cpu) +-{ +- struct rq *rq = cpu_rq(cpu); +- unsigned long flags; +- +- clear_reserved(cpu); +- if (rq->push_task) { +- struct task_struct *push_task = NULL; +- +- raw_spin_lock_irqsave(&rq->__lock, flags); +- if (rq->push_task) { +- clear_reserved(rq->push_cpu); +- push_task = rq->push_task; +- rq->push_task = NULL; +- } +- rq->active_balance = 0; +- raw_spin_unlock_irqrestore(&rq->__lock, flags); +- if (push_task) +- put_task_struct(push_task); +- } +-} +-#else +-static inline void clear_eas_migration_request(int cpu) {} +-#endif +- +-#ifdef CONFIG_CPU_ISOLATION_OPT +-int do_isolation_work_cpu_stop(void *data) +-{ +- unsigned int cpu = smp_processor_id(); +- struct rq *rq = cpu_rq(cpu); +- struct rq_flags rf; +- +- watchdog_disable(cpu); +- +- local_irq_disable(); +- +- irq_migrate_all_off_this_cpu(); +- +- flush_smp_call_function_queue(); +- +- /* Update our root-domain */ +- rq_lock(rq, &rf); +- +- /* +- * Temporarily mark the rq as offline. This will allow us to +- * move tasks off the CPU. +- */ +- if (rq->rd) { +- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); +- set_rq_offline(rq); +- } +- +- migrate_tasks(rq, &rf, false); +- +- if (rq->rd) +- set_rq_online(rq); +- rq_unlock(rq, &rf); +- +- clear_eas_migration_request(cpu); +- local_irq_enable(); +- return 0; +-} +- +-int do_unisolation_work_cpu_stop(void *data) +-{ +- watchdog_enable(smp_processor_id()); +- return 0; +-} +- +-static void sched_update_group_capacities(int cpu) +-{ +- struct sched_domain *sd; +- +- mutex_lock(&sched_domains_mutex); +- rcu_read_lock(); +- +- for_each_domain(cpu, sd) { +- int balance_cpu = group_balance_cpu(sd->groups); +- +- init_sched_groups_capacity(cpu, sd); +- /* +- * Need to ensure this is also called with balancing +- * cpu. +- */ +- if (cpu != balance_cpu) +- init_sched_groups_capacity(balance_cpu, sd); +- } +- +- rcu_read_unlock(); +- mutex_unlock(&sched_domains_mutex); +-} +- +-static unsigned int cpu_isolation_vote[NR_CPUS]; +- +-int sched_isolate_count(const cpumask_t *mask, bool include_offline) +-{ +- cpumask_t count_mask = CPU_MASK_NONE; +- +- if (include_offline) { +- cpumask_complement(&count_mask, cpu_online_mask); +- cpumask_or(&count_mask, &count_mask, cpu_isolated_mask); +- cpumask_and(&count_mask, &count_mask, mask); +- } else { +- cpumask_and(&count_mask, mask, cpu_isolated_mask); +- } +- +- return cpumask_weight(&count_mask); +-} +- +-/* +- * 1) CPU is isolated and cpu is offlined: +- * Unisolate the core. +- * 2) CPU is not isolated and CPU is offlined: +- * No action taken. +- * 3) CPU is offline and request to isolate +- * Request ignored. +- * 4) CPU is offline and isolated: +- * Not a possible state. +- * 5) CPU is online and request to isolate +- * Normal case: Isolate the CPU +- * 6) CPU is not isolated and comes back online +- * Nothing to do +- * +- * Note: The client calling sched_isolate_cpu() is repsonsible for ONLY +- * calling sched_unisolate_cpu() on a CPU that the client previously isolated. +- * Client is also responsible for unisolating when a core goes offline +- * (after CPU is marked offline). +- */ +- static void calc_load_migrate(struct rq *rq); +-int sched_isolate_cpu(int cpu) +-{ +- struct rq *rq; +- cpumask_t avail_cpus; +- int ret_code = 0; +- u64 start_time = 0; +- +- if (trace_sched_isolate_enabled()) +- start_time = sched_clock(); +- +- cpu_maps_update_begin(); +- +- cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask); +- +- if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_possible(cpu) || +- !cpu_online(cpu) || cpu >= NR_CPUS) { +- ret_code = -EINVAL; +- goto out; +- } +- +- rq = cpu_rq(cpu); +- +- if (++cpu_isolation_vote[cpu] > 1) +- goto out; +- +- /* We cannot isolate ALL cpus in the system */ +- if (cpumask_weight(&avail_cpus) == 1) { +- --cpu_isolation_vote[cpu]; +- ret_code = -EINVAL; +- goto out; +- } +- +- /* +- * There is a race between watchdog being enabled by hotplug and +- * core isolation disabling the watchdog. When a CPU is hotplugged in +- * and the hotplug lock has been released the watchdog thread might +- * not have run yet to enable the watchdog. +- * We have to wait for the watchdog to be enabled before proceeding. +- */ +- if (!watchdog_configured(cpu)) { +- msleep(20); +- if (!watchdog_configured(cpu)) { +- --cpu_isolation_vote[cpu]; +- ret_code = -EBUSY; +- goto out; +- } +- } +- +- set_cpu_isolated(cpu, true); +- cpumask_clear_cpu(cpu, &avail_cpus); +- +- /* Migrate timers */ +- //smp_call_function_any(&avail_cpus, hrtimer_quiesce_cpu, &cpu, 1); +- smp_call_function_any(&avail_cpus, timer_quiesce_cpu, &cpu, 1); +- +- watchdog_disable(cpu); +- irq_lock_sparse(); +- stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0); +- irq_unlock_sparse(); +- +- calc_load_migrate(rq); +- update_max_interval(); +- sched_update_group_capacities(cpu); +- +-out: +- cpu_maps_update_done(); +- trace_sched_isolate(cpu, cpumask_bits(cpu_isolated_mask)[0], +- start_time, 1); +- return ret_code; +-} +- +-/* +- * Note: The client calling sched_isolate_cpu() is repsonsible for ONLY +- * calling sched_unisolate_cpu() on a CPU that the client previously isolated. +- * Client is also responsible for unisolating when a core goes offline +- * (after CPU is marked offline). +- */ +-int sched_unisolate_cpu_unlocked(int cpu) +-{ +- int ret_code = 0; +- u64 start_time = 0; +- +- if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_possible(cpu) +- || cpu >= NR_CPUS) { +- ret_code = -EINVAL; +- goto out; +- } +- +- if (trace_sched_isolate_enabled()) +- start_time = sched_clock(); +- +- if (!cpu_isolation_vote[cpu]) { +- ret_code = -EINVAL; +- goto out; +- } +- +- if (--cpu_isolation_vote[cpu]) +- goto out; +- +- set_cpu_isolated(cpu, false); +- update_max_interval(); +- sched_update_group_capacities(cpu); +- +- if (cpu_online(cpu)) { +- stop_cpus(cpumask_of(cpu), do_unisolation_work_cpu_stop, 0); +- +- /* Kick CPU to immediately do load balancing */ +- if (!atomic_fetch_or(NOHZ_KICK_MASK, nohz_flags(cpu))) +- smp_send_reschedule(cpu); +- } +- +-out: +- trace_sched_isolate(cpu, cpumask_bits(cpu_isolated_mask)[0], +- start_time, 0); +- return ret_code; +-} +- +-int sched_unisolate_cpu(int cpu) +-{ +- int ret_code; +- +- cpu_maps_update_begin(); +- ret_code = sched_unisolate_cpu_unlocked(cpu); +- cpu_maps_update_done(); +- return ret_code; +-} + +-#endif /* CONFIG_CPU_ISOLATION_OPT */ ++#endif /* CONFIG_HOTPLUG_CPU */ + + void set_rq_online(struct rq *rq) + { +@@ -10465,11 +9792,6 @@ int sched_cpu_deactivate(unsigned int cpu) + static void sched_rq_cpu_starting(unsigned int cpu) + { + struct rq *rq = cpu_rq(cpu); +- unsigned long flags; +- +- raw_spin_lock_irqsave(&rq->__lock, flags); +- set_window_start(rq); +- raw_spin_unlock_irqrestore(&rq->__lock, flags); + + rq->calc_load_update = calc_load_update; + update_max_interval(); +@@ -10480,7 +9802,6 @@ int sched_cpu_starting(unsigned int cpu) + sched_core_cpu_starting(cpu); + sched_rq_cpu_starting(cpu); + sched_tick_start(cpu); +- clear_eas_migration_request(cpu); + return 0; + } + +@@ -10554,8 +9875,6 @@ int sched_cpu_dying(unsigned int cpu) + } + rq_unlock_irqrestore(rq, &rf); + +- clear_eas_migration_request(cpu); +- + calc_load_migrate(rq); + update_max_interval(); + hrtick_clear(rq); +@@ -10577,8 +9896,6 @@ void __init sched_init_smp(void) + sched_init_domains(cpu_active_mask); + mutex_unlock(&sched_domains_mutex); + +- update_cluster_topology(); +- + /* Move init over to a non-isolated CPU */ + if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) + BUG(); +@@ -10639,8 +9956,6 @@ void __init sched_init(void) + + wait_bit_init(); + +- init_clusters(); +- + #ifdef CONFIG_FAIR_GROUP_SCHED + ptr += 2 * nr_cpu_ids * sizeof(void **); + #endif +@@ -10745,7 +10060,6 @@ void __init sched_init(void) + rq->wake_stamp = jiffies; + rq->wake_avg_idle = rq->avg_idle; + rq->max_idle_balance_cost = sysctl_sched_migration_cost; +- walt_sched_init_rq(rq); + + INIT_LIST_HEAD(&rq->cfs_tasks); + +@@ -10777,7 +10091,6 @@ void __init sched_init(void) + zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i)); + } + +- BUG_ON(alloc_related_thread_groups()); + set_load_weight(&init_task, false); + + /* +@@ -10802,11 +10115,6 @@ void __init sched_init(void) + */ + __sched_fork(0, current); + init_idle(current, smp_processor_id()); +- init_new_task_load(current); +- +-#ifdef CONIG_QOS_CTRL +- init_task_qos(current); +-#endif + + calc_load_update = jiffies + LOAD_FREQ; + +@@ -11270,11 +10578,6 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) + if (IS_ERR(tg)) + return ERR_PTR(-ENOMEM); + +-#ifdef CONFIG_SCHED_RTG_CGROUP +- tg->colocate = false; +- tg->colocate_update_disabled = false; +-#endif +- + return &tg->css; + } + +@@ -11330,26 +10633,6 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) + } + #endif + +-#if defined(CONFIG_UCLAMP_TASK_GROUP) && defined(CONFIG_SCHED_RTG_CGROUP) +-static void schedgp_attach(struct cgroup_taskset *tset) +-{ +- struct task_struct *task; +- struct cgroup_subsys_state *css; +- bool colocate; +- struct task_group *tg; +- +- cgroup_taskset_first(tset, &css); +- tg = css_tg(css); +- +- colocate = tg->colocate; +- +- cgroup_taskset_for_each(task, css, tset) +- sync_cgroup_colocation(task, colocate); +-} +-#else +-static void schedgp_attach(struct cgroup_taskset *tset) { } +-#endif +- + static void cpu_cgroup_attach(struct cgroup_taskset *tset) + { + struct task_struct *task; +@@ -11534,30 +10817,6 @@ static int cpu_uclamp_max_show(struct seq_file *sf, void *v) + cpu_uclamp_print(sf, UCLAMP_MAX); + return 0; + } +- +-#ifdef CONFIG_SCHED_RTG_CGROUP +-static u64 sched_colocate_read(struct cgroup_subsys_state *css, +- struct cftype *cft) +-{ +- struct task_group *tg = css_tg(css); +- +- return (u64) tg->colocate; +-} +- +-static int sched_colocate_write(struct cgroup_subsys_state *css, +- struct cftype *cft, u64 colocate) +-{ +- struct task_group *tg = css_tg(css); +- +- if (tg->colocate_update_disabled) +- return -EPERM; +- +- tg->colocate = !!colocate; +- tg->colocate_update_disabled = true; +- +- return 0; +-} +-#endif /* CONFIG_SCHED_RTG_CGROUP */ + #endif /* CONFIG_UCLAMP_TASK_GROUP */ + + #ifdef CONFIG_FAIR_GROUP_SCHED +@@ -12030,14 +11289,6 @@ static struct cftype cpu_legacy_files[] = { + .seq_show = cpu_uclamp_max_show, + .write = cpu_uclamp_max_write, + }, +-#ifdef CONFIG_SCHED_RTG_CGROUP +- { +- .name = "uclamp.colocate", +- .flags = CFTYPE_NOT_ON_ROOT, +- .read_u64 = sched_colocate_read, +- .write_u64 = sched_colocate_write, +- }, +-#endif + #endif + { } /* Terminate */ + }; +@@ -12337,22 +11588,6 @@ const u32 sched_prio_to_wmult[40] = { + /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, + }; + +-#ifdef CONFIG_SCHED_LATENCY_NICE +-/* +- * latency weight for wakeup preemption +- */ +-const int sched_latency_to_weight[40] = { +- /* -20 */ 1024, 973, 922, 870, 819, +- /* -15 */ 768, 717, 666, 614, 563, +- /* -10 */ 512, 461, 410, 358, 307, +- /* -5 */ 256, 205, 154, 102, 51, +- /* 0 */ 0, -51, -102, -154, -205, +- /* 5 */ -256, -307, -358, -410, -461, +- /* 10 */ -512, -563, -614, -666, -717, +- /* 15 */ -768, -819, -870, -922, -973, +-}; +-#endif +- + void call_trace_sched_update_nr_running(struct rq *rq, int count) + { + trace_sched_update_nr_running_tp(rq, count); +@@ -12888,48 +12123,3 @@ void sched_mm_cid_fork(struct task_struct *t) + t->mm_cid_active = 1; + } + #endif +- +-#ifdef CONFIG_SCHED_WALT +-/* +- * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field +- * +- * Stop accounting (exiting) task's future cpu usage +- * +- * We need this so that reset_all_windows_stats() can function correctly. +- * reset_all_window_stats() depends on do_each_thread/for_each_thread task +- * iterators to reset *all* task's statistics. Exiting tasks however become +- * invisible to those iterators. sched_exit() is called on a exiting task prior +- * to being removed from task_list, which will let reset_all_window_stats() +- * function correctly. +- */ +-void sched_exit(struct task_struct *p) +-{ +- struct rq_flags rf; +- struct rq *rq; +- u64 wallclock; +- +-#ifdef CONFIG_SCHED_RTG +- sched_set_group_id(p, 0); +-#endif +- +- rq = task_rq_lock(p, &rf); +- +- /* rq->curr == p */ +- wallclock = sched_ktime_clock(); +- update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); +- dequeue_task(rq, p, 0); +- /* +- * task's contribution is already removed from the +- * cumulative window demand in dequeue. As the +- * task's stats are reset, the next enqueue does +- * not change the cumulative window demand. +- */ +- reset_task_stats(p); +- p->ravg.mark_start = wallclock; +- p->ravg.sum_history[0] = EXITING_TASK_MARKER; +- +- enqueue_task(rq, p, 0); +- task_rq_unlock(rq, p, &rf); +- free_task_load_ptrs(p); +-} +-#endif /* CONFIG_SCHED_WALT */ +diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c +deleted file mode 100755 +index a9d5b98fd..000000000 +--- a/kernel/sched/core_ctl.c ++++ /dev/null +@@ -1,1061 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0-only +-/* +- * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. +- */ +- +-#define pr_fmt(fmt) "core_ctl: " fmt +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include +-#include "sched.h" +-#include "walt.h" +- +-#define MAX_CPUS_PER_CLUSTER 6 +-#define MAX_CLUSTERS 3 +- +-struct cluster_data { +- bool inited; +- unsigned int min_cpus; +- unsigned int max_cpus; +- unsigned int offline_delay_ms; +- unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER]; +- unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER]; +- unsigned int active_cpus; +- unsigned int num_cpus; +- unsigned int nr_isolated_cpus; +- unsigned int nr_not_preferred_cpus; +- cpumask_t cpu_mask; +- unsigned int need_cpus; +- unsigned int task_thres; +- unsigned int max_nr; +- unsigned int nr_prev_assist; +- unsigned int nr_prev_assist_thresh; +- s64 need_ts; +- struct list_head lru; +- bool pending; +- spinlock_t pending_lock; +- bool enable; +- int nrrun; +- struct task_struct *core_ctl_thread; +- unsigned int first_cpu; +- unsigned int boost; +- struct kobject kobj; +-}; +- +-struct cpu_data { +- bool is_busy; +- unsigned int busy; +- unsigned int cpu; +- bool not_preferred; +- struct cluster_data *cluster; +- struct list_head sib; +- bool isolated_by_us; +-}; +- +-static DEFINE_PER_CPU(struct cpu_data, cpu_state); +-static struct cluster_data cluster_state[MAX_CLUSTERS]; +-static unsigned int num_clusters; +- +-#define for_each_cluster(cluster, idx) \ +- for (; (idx) < num_clusters && ((cluster) = &cluster_state[idx]);\ +- (idx)++) +- +-static DEFINE_SPINLOCK(state_lock); +-static void apply_need(struct cluster_data *state); +-static void wake_up_core_ctl_thread(struct cluster_data *state); +-static bool initialized; +- +-ATOMIC_NOTIFIER_HEAD(core_ctl_notifier); +-static unsigned int last_nr_big; +- +-static unsigned int get_active_cpu_count(const struct cluster_data *cluster); +- +-/* ========================= sysfs interface =========================== */ +- +-static ssize_t store_min_cpus(struct cluster_data *state, +- const char *buf, size_t count) +-{ +- unsigned int val; +- +- if (sscanf(buf, "%u\n", &val) != 1) +- return -EINVAL; +- +- state->min_cpus = min(val, state->max_cpus); +- wake_up_core_ctl_thread(state); +- +- return count; +-} +- +-static ssize_t show_min_cpus(const struct cluster_data *state, char *buf) +-{ +- return sysfs_emit(buf, "%u\n", state->min_cpus); +-} +- +-static ssize_t store_max_cpus(struct cluster_data *state, +- const char *buf, size_t count) +-{ +- unsigned int val; +- +- if (sscanf(buf, "%u\n", &val) != 1) +- return -EINVAL; +- +- val = min(val, state->num_cpus); +- state->max_cpus = val; +- state->min_cpus = min(state->min_cpus, state->max_cpus); +- wake_up_core_ctl_thread(state); +- +- return count; +-} +- +-static ssize_t show_max_cpus(const struct cluster_data *state, char *buf) +-{ +- return sysfs_emit(buf, "%u\n", state->max_cpus); +-} +- +-static ssize_t store_enable(struct cluster_data *state, +- const char *buf, size_t count) +-{ +- unsigned int val; +- bool bval; +- +- if (sscanf(buf, "%u\n", &val) != 1) +- return -EINVAL; +- +- bval = !!val; +- if (bval != state->enable) { +- state->enable = bval; +- apply_need(state); +- } +- +- return count; +-} +- +-static ssize_t show_enable(const struct cluster_data *state, char *buf) +-{ +- return sysfs_emit(buf, "%u\n", state->enable); +-} +- +-static ssize_t show_need_cpus(const struct cluster_data *state, char *buf) +-{ +- return sysfs_emit(buf, "%u\n", state->need_cpus); +-} +- +-static ssize_t show_active_cpus(const struct cluster_data *state, char *buf) +-{ +- return sysfs_emit(buf, "%u\n", state->active_cpus); +-} +- +-static ssize_t show_global_state(const struct cluster_data *state, char *buf) +-{ +- struct cpu_data *c; +- struct cluster_data *cluster; +- ssize_t count = 0; +- unsigned int cpu; +- +- spin_lock_irq(&state_lock); +- for_each_possible_cpu(cpu) { +- c = &per_cpu(cpu_state, cpu); +- cluster = c->cluster; +- if (!cluster || !cluster->inited) +- continue; +- +- count += sysfs_emit_at(buf, count, +- "CPU%u\n", cpu); +- count += sysfs_emit_at(buf, count, +- "\tCPU: %u\n", c->cpu); +- count += sysfs_emit_at(buf, count, +- "\tOnline: %u\n", +- cpu_online(c->cpu)); +- count += sysfs_emit_at(buf, count, +- "\tIsolated: %u\n", +- cpu_isolated(c->cpu)); +- count += sysfs_emit_at(buf, count, +- "\tFirst CPU: %u\n", +- cluster->first_cpu); +- count += sysfs_emit_at(buf, count, +- "\tBusy%%: %u\n", c->busy); +- count += sysfs_emit_at(buf, count, +- "\tIs busy: %u\n", c->is_busy); +- count += sysfs_emit_at(buf, count, +- "\tNot preferred: %u\n", +- c->not_preferred); +- count += sysfs_emit_at(buf, count, +- "\tNr running: %u\n", cluster->nrrun); +- count += sysfs_emit_at(buf, count, +- "\tActive CPUs: %u\n", get_active_cpu_count(cluster)); +- count += sysfs_emit_at(buf, count, +- "\tNeed CPUs: %u\n", cluster->need_cpus); +- count += sysfs_emit_at(buf, count, +- "\tNr isolated CPUs: %u\n", +- cluster->nr_isolated_cpus); +- count += sysfs_emit_at(buf, count, +- "\tBoost: %u\n", (unsigned int) cluster->boost); +- } +- spin_unlock_irq(&state_lock); +- +- return count; +-} +- +-struct core_ctl_attr { +- struct attribute attr; +- ssize_t (*show)(const struct cluster_data *, char *); +- ssize_t (*store)(struct cluster_data *, const char *, size_t count); +-}; +- +-#define core_ctl_attr_ro(_name) \ +-static struct core_ctl_attr _name = \ +-__ATTR(_name, 0444, show_##_name, NULL) +- +-#define core_ctl_attr_rw(_name) \ +-static struct core_ctl_attr _name = \ +-__ATTR(_name, 0644, show_##_name, store_##_name) +- +-core_ctl_attr_rw(min_cpus); +-core_ctl_attr_rw(max_cpus); +-core_ctl_attr_ro(need_cpus); +-core_ctl_attr_ro(active_cpus); +-core_ctl_attr_ro(global_state); +-core_ctl_attr_rw(enable); +- +-static struct attribute *default_attrs[] = { +- &min_cpus.attr, +- &max_cpus.attr, +- &enable.attr, +- &need_cpus.attr, +- &active_cpus.attr, +- &global_state.attr, +- NULL +-}; +-ATTRIBUTE_GROUPS(default); +-#define to_cluster_data(k) container_of(k, struct cluster_data, kobj) +-#define to_attr(a) container_of(a, struct core_ctl_attr, attr) +-static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) +-{ +- struct cluster_data *data = to_cluster_data(kobj); +- struct core_ctl_attr *cattr = to_attr(attr); +- ssize_t ret = -EIO; +- +- if (cattr->show) +- ret = cattr->show(data, buf); +- +- return ret; +-} +- +-static ssize_t store(struct kobject *kobj, struct attribute *attr, +- const char *buf, size_t count) +-{ +- struct cluster_data *data = to_cluster_data(kobj); +- struct core_ctl_attr *cattr = to_attr(attr); +- ssize_t ret = -EIO; +- +- if (cattr->store) +- ret = cattr->store(data, buf, count); +- +- return ret; +-} +- +-static const struct sysfs_ops sysfs_ops = { +- .show = show, +- .store = store, +-}; +- +-static struct kobj_type ktype_core_ctl = { +- .sysfs_ops = &sysfs_ops, +- .default_groups = default_groups, +-}; +- +-/* ==================== runqueue based core count =================== */ +- +-static struct sched_avg_stats nr_stats[NR_CPUS]; +- +-/* +- * nr_need: +- * Number of tasks running on this cluster plus +- * tasks running on higher capacity clusters. +- * To find out CPUs needed from this cluster. +- * +- * For example: +- * On dual cluster system with 4 min capacity +- * CPUs and 4 max capacity CPUs, if there are +- * 4 small tasks running on min capacity CPUs +- * and 2 big tasks running on 2 max capacity +- * CPUs, nr_need has to be 6 for min capacity +- * cluster and 2 for max capacity cluster. +- * This is because, min capacity cluster has to +- * account for tasks running on max capacity +- * cluster, so that, the min capacity cluster +- * can be ready to accommodate tasks running on max +- * capacity CPUs if the demand of tasks goes down. +- */ +-static int compute_cluster_nr_need(int index) +-{ +- int cpu; +- struct cluster_data *cluster; +- int nr_need = 0; +- +- for_each_cluster(cluster, index) { +- for_each_cpu(cpu, &cluster->cpu_mask) +- nr_need += nr_stats[cpu].nr; +- } +- +- return nr_need; +-} +- +-/* +- * prev_misfit_need: +- * Tasks running on smaller capacity cluster which +- * needs to be migrated to higher capacity cluster. +- * To find out how many tasks need higher capacity CPUs. +- * +- * For example: +- * On dual cluster system with 4 min capacity +- * CPUs and 4 max capacity CPUs, if there are +- * 2 small tasks and 2 big tasks running on +- * min capacity CPUs and no tasks running on +- * max cpacity, prev_misfit_need of min capacity +- * cluster will be 0 and prev_misfit_need of +- * max capacity cluster will be 2. +- */ +-static int compute_prev_cluster_misfit_need(int index) +-{ +- int cpu; +- struct cluster_data *prev_cluster; +- int prev_misfit_need = 0; +- +- /* +- * Lowest capacity cluster does not have to +- * accommodate any misfit tasks. +- */ +- if (index == 0) +- return 0; +- +- prev_cluster = &cluster_state[index - 1]; +- +- for_each_cpu(cpu, &prev_cluster->cpu_mask) +- prev_misfit_need += nr_stats[cpu].nr_misfit; +- +- return prev_misfit_need; +-} +- +-static int compute_cluster_max_nr(int index) +-{ +- int cpu; +- struct cluster_data *cluster = &cluster_state[index]; +- int max_nr = 0; +- +- for_each_cpu(cpu, &cluster->cpu_mask) +- max_nr = max(max_nr, nr_stats[cpu].nr_max); +- +- return max_nr; +-} +- +-static int cluster_real_big_tasks(int index) +-{ +- int nr_big = 0; +- int cpu; +- struct cluster_data *cluster = &cluster_state[index]; +- +- if (index == 0) { +- for_each_cpu(cpu, &cluster->cpu_mask) +- nr_big += nr_stats[cpu].nr_misfit; +- } else { +- for_each_cpu(cpu, &cluster->cpu_mask) +- nr_big += nr_stats[cpu].nr; +- } +- +- return nr_big; +-} +- +-/* +- * prev_nr_need_assist: +- * Tasks that are eligible to run on the previous +- * cluster but cannot run because of insufficient +- * CPUs there. prev_nr_need_assist is indicative +- * of number of CPUs in this cluster that should +- * assist its previous cluster to makeup for +- * insufficient CPUs there. +- * +- * For example: +- * On tri-cluster system with 4 min capacity +- * CPUs, 3 intermediate capacity CPUs and 1 +- * max capacity CPU, if there are 4 small +- * tasks running on min capacity CPUs, 4 big +- * tasks running on intermediate capacity CPUs +- * and no tasks running on max capacity CPU, +- * prev_nr_need_assist for min & max capacity +- * clusters will be 0, but, for intermediate +- * capacity cluster prev_nr_need_assist will +- * be 1 as it has 3 CPUs, but, there are 4 big +- * tasks to be served. +- */ +-static int prev_cluster_nr_need_assist(int index) +-{ +- int need = 0; +- int cpu; +- struct cluster_data *prev_cluster; +- +- if (index == 0) +- return 0; +- +- index--; +- prev_cluster = &cluster_state[index]; +- +- /* +- * Next cluster should not assist, while there are isolated cpus +- * in this cluster. +- */ +- if (prev_cluster->nr_isolated_cpus) +- return 0; +- +- for_each_cpu(cpu, &prev_cluster->cpu_mask) +- need += nr_stats[cpu].nr; +- +- need += compute_prev_cluster_misfit_need(index); +- +- if (need > prev_cluster->active_cpus) +- need = need - prev_cluster->active_cpus; +- else +- need = 0; +- +- return need; +-} +- +-static void update_running_avg(void) +-{ +- struct cluster_data *cluster; +- unsigned int index = 0; +- unsigned long flags; +- int big_avg = 0; +- +- sched_get_nr_running_avg(nr_stats); +- +- spin_lock_irqsave(&state_lock, flags); +- for_each_cluster(cluster, index) { +- int nr_need, prev_misfit_need; +- +- if (!cluster->inited) +- continue; +- +- nr_need = compute_cluster_nr_need(index); +- prev_misfit_need = compute_prev_cluster_misfit_need(index); +- +- +- cluster->nrrun = nr_need + prev_misfit_need; +- cluster->max_nr = compute_cluster_max_nr(index); +- cluster->nr_prev_assist = prev_cluster_nr_need_assist(index); +- trace_core_ctl_update_nr_need(cluster->first_cpu, nr_need, +- prev_misfit_need, +- cluster->nrrun, cluster->max_nr, +- cluster->nr_prev_assist); +- big_avg += cluster_real_big_tasks(index); +- } +- spin_unlock_irqrestore(&state_lock, flags); +- +- last_nr_big = big_avg; +-} +- +-#define MAX_NR_THRESHOLD 4 +-/* adjust needed CPUs based on current runqueue information */ +-static unsigned int apply_task_need(const struct cluster_data *cluster, +- unsigned int new_need) +-{ +- /* unisolate all cores if there are enough tasks */ +- if (cluster->nrrun >= cluster->task_thres) +- return cluster->num_cpus; +- +- /* +- * unisolate as many cores as the previous cluster +- * needs assistance with. +- */ +- if (cluster->nr_prev_assist >= cluster->nr_prev_assist_thresh) +- new_need = new_need + cluster->nr_prev_assist; +- +- /* only unisolate more cores if there are tasks to run */ +- if (cluster->nrrun > new_need) +- new_need = new_need + 1; +- +- /* +- * We don't want tasks to be overcrowded in a cluster. +- * If any CPU has more than MAX_NR_THRESHOLD in the last +- * window, bring another CPU to help out. +- */ +- if (cluster->max_nr > MAX_NR_THRESHOLD) +- new_need = new_need + 1; +- +- return new_need; +-} +- +-/* ======================= load based core count ====================== */ +- +-static unsigned int apply_limits(const struct cluster_data *cluster, +- unsigned int need_cpus) +-{ +- return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus); +-} +- +-static unsigned int get_active_cpu_count(const struct cluster_data *cluster) +-{ +- return cluster->num_cpus - +- sched_isolate_count(&cluster->cpu_mask, true); +-} +- +-static bool is_active(const struct cpu_data *state) +-{ +- return cpu_online(state->cpu) && !cpu_isolated(state->cpu); +-} +- +-static bool adjustment_possible(const struct cluster_data *cluster, +- unsigned int need) +-{ +- return (need < cluster->active_cpus || (need > cluster->active_cpus && +- cluster->nr_isolated_cpus)); +-} +- +-static bool eval_need(struct cluster_data *cluster) +-{ +- unsigned long flags; +- struct cpu_data *c; +- unsigned int need_cpus = 0, last_need, thres_idx; +- int ret = 0; +- bool need_flag = false; +- unsigned int new_need; +- s64 now, elapsed; +- +- if (unlikely(!cluster->inited)) +- return 0; +- +- spin_lock_irqsave(&state_lock, flags); +- +- if (cluster->boost || !cluster->enable) { +- need_cpus = cluster->max_cpus; +- } else { +- cluster->active_cpus = get_active_cpu_count(cluster); +- thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0; +- list_for_each_entry(c, &cluster->lru, sib) { +- bool old_is_busy = c->is_busy; +- int high_irqload = sched_cpu_high_irqload(c->cpu); +- +- if (c->busy >= cluster->busy_up_thres[thres_idx] || +- high_irqload) +- c->is_busy = true; +- else if (c->busy < cluster->busy_down_thres[thres_idx]) +- c->is_busy = false; +- trace_core_ctl_set_busy(c->cpu, c->busy, old_is_busy, +- c->is_busy, high_irqload); +- need_cpus += c->is_busy; +- } +- need_cpus = apply_task_need(cluster, need_cpus); +- } +- new_need = apply_limits(cluster, need_cpus); +- need_flag = adjustment_possible(cluster, new_need); +- +- last_need = cluster->need_cpus; +- now = ktime_to_ms(ktime_get()); +- +- if (new_need > cluster->active_cpus) { +- ret = 1; +- } else { +- /* +- * When there is no change in need and there are no more +- * active CPUs than currently needed, just update the +- * need time stamp and return. +- */ +- if (new_need == last_need && new_need == cluster->active_cpus) { +- cluster->need_ts = now; +- spin_unlock_irqrestore(&state_lock, flags); +- return 0; +- } +- +- elapsed = now - cluster->need_ts; +- ret = elapsed >= cluster->offline_delay_ms; +- } +- +- if (ret) { +- cluster->need_ts = now; +- cluster->need_cpus = new_need; +- } +- trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need, +- ret && need_flag); +- spin_unlock_irqrestore(&state_lock, flags); +- +- return ret && need_flag; +-} +- +-static void apply_need(struct cluster_data *cluster) +-{ +- if (eval_need(cluster)) +- wake_up_core_ctl_thread(cluster); +-} +- +-/* ========================= core count enforcement ==================== */ +- +-static void wake_up_core_ctl_thread(struct cluster_data *cluster) +-{ +- unsigned long flags; +- +- spin_lock_irqsave(&cluster->pending_lock, flags); +- cluster->pending = true; +- spin_unlock_irqrestore(&cluster->pending_lock, flags); +- +- wake_up_process(cluster->core_ctl_thread); +-} +- +-static u64 core_ctl_check_timestamp; +- +-int core_ctl_set_boost(bool boost) +-{ +- unsigned int index = 0; +- struct cluster_data *cluster = NULL; +- unsigned long flags; +- int ret = 0; +- bool boost_state_changed = false; +- +- if (unlikely(!initialized)) +- return 0; +- +- spin_lock_irqsave(&state_lock, flags); +- for_each_cluster(cluster, index) { +- if (boost) { +- boost_state_changed = !cluster->boost; +- ++cluster->boost; +- } else { +- if (!cluster->boost) { +- ret = -EINVAL; +- break; +- } else { +- --cluster->boost; +- boost_state_changed = !cluster->boost; +- } +- } +- } +- spin_unlock_irqrestore(&state_lock, flags); +- +- if (boost_state_changed) { +- index = 0; +- for_each_cluster(cluster, index) +- apply_need(cluster); +- } +- +- if (cluster) +- trace_core_ctl_set_boost(cluster->boost, ret); +- +- return ret; +-} +-EXPORT_SYMBOL(core_ctl_set_boost); +- +-void core_ctl_check(u64 window_start) +-{ +- int cpu; +- struct cpu_data *c; +- struct cluster_data *cluster; +- unsigned int index = 0; +- unsigned long flags; +- +- if (unlikely(!initialized)) +- return; +- +- if (window_start == core_ctl_check_timestamp) +- return; +- +- core_ctl_check_timestamp = window_start; +- +- spin_lock_irqsave(&state_lock, flags); +- for_each_possible_cpu(cpu) { +- +- c = &per_cpu(cpu_state, cpu); +- cluster = c->cluster; +- +- if (!cluster || !cluster->inited) +- continue; +- +- c->busy = sched_get_cpu_util(cpu); +- } +- spin_unlock_irqrestore(&state_lock, flags); +- +- update_running_avg(); +- +- for_each_cluster(cluster, index) { +- if (eval_need(cluster)) +- wake_up_core_ctl_thread(cluster); +- } +-} +- +-static void move_cpu_lru(struct cpu_data *cpu_data) +-{ +- unsigned long flags; +- +- spin_lock_irqsave(&state_lock, flags); +- list_del(&cpu_data->sib); +- list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru); +- spin_unlock_irqrestore(&state_lock, flags); +-} +- +-static void try_to_isolate(struct cluster_data *cluster, unsigned int need) +-{ +- struct cpu_data *c, *tmp; +- unsigned long flags; +- unsigned int num_cpus = cluster->num_cpus; +- unsigned int nr_isolated = 0; +- bool first_pass = cluster->nr_not_preferred_cpus; +- +- /* +- * Protect against entry being removed (and added at tail) by other +- * thread (hotplug). +- */ +- spin_lock_irqsave(&state_lock, flags); +- list_for_each_entry_safe(c, tmp, &cluster->lru, sib) { +- if (!num_cpus--) +- break; +- +- if (!is_active(c)) +- continue; +- if (cluster->active_cpus == need) +- break; +- /* Don't isolate busy CPUs. */ +- if (c->is_busy) +- continue; +- +- /* +- * We isolate only the not_preferred CPUs. If none +- * of the CPUs are selected as not_preferred, then +- * all CPUs are eligible for isolation. +- */ +- if (cluster->nr_not_preferred_cpus && !c->not_preferred) +- continue; +- +- spin_unlock_irqrestore(&state_lock, flags); +- +- pr_debug("Trying to isolate CPU%u\n", c->cpu); +- if (!sched_isolate_cpu(c->cpu)) { +- c->isolated_by_us = true; +- move_cpu_lru(c); +- nr_isolated++; +- } else { +- pr_debug("Unable to isolate CPU%u\n", c->cpu); +- } +- cluster->active_cpus = get_active_cpu_count(cluster); +- spin_lock_irqsave(&state_lock, flags); +- } +- cluster->nr_isolated_cpus += nr_isolated; +- spin_unlock_irqrestore(&state_lock, flags); +- +-again: +- /* +- * If the number of active CPUs is within the limits, then +- * don't force isolation of any busy CPUs. +- */ +- if (cluster->active_cpus <= cluster->max_cpus) +- return; +- +- nr_isolated = 0; +- num_cpus = cluster->num_cpus; +- spin_lock_irqsave(&state_lock, flags); +- list_for_each_entry_safe(c, tmp, &cluster->lru, sib) { +- if (!num_cpus--) +- break; +- +- if (!is_active(c)) +- continue; +- if (cluster->active_cpus <= cluster->max_cpus) +- break; +- +- if (first_pass && !c->not_preferred) +- continue; +- +- spin_unlock_irqrestore(&state_lock, flags); +- +- pr_debug("Trying to isolate CPU%u\n", c->cpu); +- if (!sched_isolate_cpu(c->cpu)) { +- c->isolated_by_us = true; +- move_cpu_lru(c); +- nr_isolated++; +- } else { +- pr_debug("Unable to isolate CPU%u\n", c->cpu); +- } +- cluster->active_cpus = get_active_cpu_count(cluster); +- spin_lock_irqsave(&state_lock, flags); +- } +- cluster->nr_isolated_cpus += nr_isolated; +- spin_unlock_irqrestore(&state_lock, flags); +- +- if (first_pass && cluster->active_cpus > cluster->max_cpus) { +- first_pass = false; +- goto again; +- } +-} +- +-static void __try_to_unisolate(struct cluster_data *cluster, +- unsigned int need, bool force) +-{ +- struct cpu_data *c, *tmp; +- unsigned long flags; +- unsigned int num_cpus = cluster->num_cpus; +- unsigned int nr_unisolated = 0; +- +- /* +- * Protect against entry being removed (and added at tail) by other +- * thread (hotplug). +- */ +- spin_lock_irqsave(&state_lock, flags); +- list_for_each_entry_safe(c, tmp, &cluster->lru, sib) { +- if (!num_cpus--) +- break; +- +- if (!c->isolated_by_us) +- continue; +- if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) || +- (!force && c->not_preferred)) +- continue; +- if (cluster->active_cpus == need) +- break; +- +- spin_unlock_irqrestore(&state_lock, flags); +- +- pr_debug("Trying to unisolate CPU%u\n", c->cpu); +- if (!sched_unisolate_cpu(c->cpu)) { +- c->isolated_by_us = false; +- move_cpu_lru(c); +- nr_unisolated++; +- } else { +- pr_debug("Unable to unisolate CPU%u\n", c->cpu); +- } +- cluster->active_cpus = get_active_cpu_count(cluster); +- spin_lock_irqsave(&state_lock, flags); +- } +- cluster->nr_isolated_cpus -= nr_unisolated; +- spin_unlock_irqrestore(&state_lock, flags); +-} +- +-static void try_to_unisolate(struct cluster_data *cluster, unsigned int need) +-{ +- bool force_use_non_preferred = false; +- +- __try_to_unisolate(cluster, need, force_use_non_preferred); +- +- if (cluster->active_cpus == need) +- return; +- +- force_use_non_preferred = true; +- __try_to_unisolate(cluster, need, force_use_non_preferred); +-} +- +-static void __ref do_core_ctl(struct cluster_data *cluster) +-{ +- unsigned int need; +- +- need = apply_limits(cluster, cluster->need_cpus); +- +- if (adjustment_possible(cluster, need)) { +- pr_debug("Trying to adjust group %u from %u to %u\n", +- cluster->first_cpu, cluster->active_cpus, need); +- +- if (cluster->active_cpus > need) +- try_to_isolate(cluster, need); +- else if (cluster->active_cpus < need) +- try_to_unisolate(cluster, need); +- } +-} +- +-static int __ref try_core_ctl(void *data) +-{ +- struct cluster_data *cluster = data; +- unsigned long flags; +- +- while (1) { +- set_current_state(TASK_INTERRUPTIBLE); +- spin_lock_irqsave(&cluster->pending_lock, flags); +- if (!cluster->pending) { +- spin_unlock_irqrestore(&cluster->pending_lock, flags); +- schedule(); +- if (kthread_should_stop()) +- break; +- spin_lock_irqsave(&cluster->pending_lock, flags); +- } +- set_current_state(TASK_RUNNING); +- cluster->pending = false; +- spin_unlock_irqrestore(&cluster->pending_lock, flags); +- +- do_core_ctl(cluster); +- } +- +- return 0; +-} +- +-static int isolation_cpuhp_state(unsigned int cpu, bool online) +-{ +- struct cpu_data *state = &per_cpu(cpu_state, cpu); +- struct cluster_data *cluster = state->cluster; +- unsigned int need; +- bool do_wakeup = false, unisolated = false; +- unsigned long flags; +- +- if (unlikely(!cluster || !cluster->inited)) +- return 0; +- +- if (online) { +- cluster->active_cpus = get_active_cpu_count(cluster); +- +- /* +- * Moving to the end of the list should only happen in +- * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an +- * infinite list traversal when thermal (or other entities) +- * reject trying to online CPUs. +- */ +- move_cpu_lru(state); +- } else { +- /* +- * We don't want to have a CPU both offline and isolated. +- * So unisolate a CPU that went down if it was isolated by us. +- */ +- if (state->isolated_by_us) { +- sched_unisolate_cpu_unlocked(cpu); +- state->isolated_by_us = false; +- unisolated = true; +- } +- +- /* Move a CPU to the end of the LRU when it goes offline. */ +- move_cpu_lru(state); +- +- state->busy = 0; +- cluster->active_cpus = get_active_cpu_count(cluster); +- } +- +- need = apply_limits(cluster, cluster->need_cpus); +- spin_lock_irqsave(&state_lock, flags); +- if (unisolated) +- cluster->nr_isolated_cpus--; +- do_wakeup = adjustment_possible(cluster, need); +- spin_unlock_irqrestore(&state_lock, flags); +- if (do_wakeup) +- wake_up_core_ctl_thread(cluster); +- +- return 0; +-} +- +-static int core_ctl_isolation_online_cpu(unsigned int cpu) +-{ +- return isolation_cpuhp_state(cpu, true); +-} +- +-static int core_ctl_isolation_dead_cpu(unsigned int cpu) +-{ +- return isolation_cpuhp_state(cpu, false); +-} +- +-/* ============================ init code ============================== */ +- +-static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu) +-{ +- unsigned int i; +- +- for (i = 0; i < num_clusters; ++i) { +- if (cluster_state[i].first_cpu == first_cpu) +- return &cluster_state[i]; +- } +- +- return NULL; +-} +- +-static int cluster_init(const struct cpumask *mask) +-{ +- struct device *dev; +- unsigned int first_cpu = cpumask_first(mask); +- struct cluster_data *cluster; +- struct cpu_data *state; +- unsigned int cpu; +- struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; +- +- if (find_cluster_by_first_cpu(first_cpu)) +- return 0; +- +- dev = get_cpu_device(first_cpu); +- if (!dev) +- return -ENODEV; +- +- pr_info("Creating CPU group %d\n", first_cpu); +- +- if (num_clusters == MAX_CLUSTERS) { +- pr_err("Unsupported number of clusters. Only %u supported\n", +- MAX_CLUSTERS); +- return -EINVAL; +- } +- cluster = &cluster_state[num_clusters]; +- ++num_clusters; +- +- cpumask_copy(&cluster->cpu_mask, mask); +- cluster->num_cpus = cpumask_weight(mask); +- if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) { +- pr_err("HW configuration not supported\n"); +- return -EINVAL; +- } +- cluster->first_cpu = first_cpu; +- cluster->min_cpus = 1; +- cluster->max_cpus = cluster->num_cpus; +- cluster->need_cpus = cluster->num_cpus; +- cluster->offline_delay_ms = 100; +- cluster->task_thres = UINT_MAX; +- cluster->nr_prev_assist_thresh = UINT_MAX; +- cluster->nrrun = cluster->num_cpus; +- cluster->enable = true; +- cluster->nr_not_preferred_cpus = 0; +- INIT_LIST_HEAD(&cluster->lru); +- spin_lock_init(&cluster->pending_lock); +- +- for_each_cpu(cpu, mask) { +- pr_info("Init CPU%u state\n", cpu); +- +- state = &per_cpu(cpu_state, cpu); +- state->cluster = cluster; +- state->cpu = cpu; +- list_add_tail(&state->sib, &cluster->lru); +- } +- cluster->active_cpus = get_active_cpu_count(cluster); +- +- cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster, +- "core_ctl/%d", first_cpu); +- if (IS_ERR(cluster->core_ctl_thread)) +- return PTR_ERR(cluster->core_ctl_thread); +- +- sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO, +- ¶m); +- +- cluster->inited = true; +- +- kobject_init(&cluster->kobj, &ktype_core_ctl); +- return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl"); +-} +- +-static int __init core_ctl_init(void) +-{ +- struct sched_cluster *cluster; +- int ret; +- +- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, +- "core_ctl/isolation:online", +- core_ctl_isolation_online_cpu, NULL); +- +- cpuhp_setup_state_nocalls(CPUHP_CORE_CTL_ISOLATION_DEAD, +- "core_ctl/isolation:dead", +- NULL, core_ctl_isolation_dead_cpu); +- +- for_each_sched_cluster(cluster) { +- ret = cluster_init(&cluster->cpus); +- if (ret) +- pr_warn("unable to create core ctl group: %d\n", ret); +- } +- +- initialized = true; +- return 0; +-} +- +-late_initcall(core_ctl_init); +diff --git a/kernel/sched/core_ctl.h b/kernel/sched/core_ctl.h +deleted file mode 100755 +index 0be55ac6a..000000000 +--- a/kernel/sched/core_ctl.h ++++ /dev/null +@@ -1,19 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-only */ +-/* +- * Copyright (c) 2016, 2019-2020, The Linux Foundation. All rights reserved. +- */ +- +-#ifndef __CORE_CTL_H +-#define __CORE_CTL_H +- +-#ifdef CONFIG_SCHED_CORE_CTRL +-void core_ctl_check(u64 wallclock); +-int core_ctl_set_boost(bool boost); +-#else +-static inline void core_ctl_check(u64 wallclock) {} +-static inline int core_ctl_set_boost(bool boost) +-{ +- return 0; +-} +-#endif +-#endif +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c +index cd8d9bd39..a49f13601 100644 +--- a/kernel/sched/cpufreq_schedutil.c ++++ b/kernel/sched/cpufreq_schedutil.c +@@ -6,8 +6,6 @@ + * Author: Rafael J. Wysocki + */ + +-#include "sched.h" +-#include "rtg/rtg.h" + #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8) + + struct sugov_tunables { +@@ -33,10 +31,6 @@ struct sugov_policy { + struct mutex work_lock; + struct kthread_worker worker; + struct task_struct *thread; +-#ifdef CONFIG_SCHED_RTG +- unsigned long rtg_util; +- unsigned int rtg_freq; +-#endif + bool work_in_progress; + + bool limits_changed; +@@ -165,12 +159,8 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) + struct rq *rq = cpu_rq(sg_cpu->cpu); + + sg_cpu->bw_dl = cpu_bw_dl(rq); +-#ifdef CONFIG_SCHED_WALT +- cpu_util_freq_walt(sg_cpu->cpu); +-#else + sg_cpu->util = effective_cpu_util(sg_cpu->cpu, util, + FREQUENCY_UTIL, NULL); +-#endif + } + + /** +@@ -346,14 +336,6 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time, + unsigned int cached_freq = sg_policy->cached_raw_freq; + unsigned long max_cap; + unsigned int next_f; +-#ifdef CONFIG_SCHED_RTG +- bool force_update = false; +-#endif +-#ifdef CONFIG_SCHED_RTG +- unsigned long irq_flag; +- +- force_update = flags & SCHED_CPUFREQ_FORCE_UPDATE; +-#endif + + max_cap = arch_scale_cpu_capacity(sg_cpu->cpu); + +@@ -387,17 +369,9 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time, + if (sg_policy->policy->fast_switch_enabled) { + cpufreq_driver_fast_switch(sg_policy->policy, next_f); + } else { +-#ifdef CONFIG_SCHED_RTG +- raw_spin_lock_irqsave(&sg_policy->update_lock, irq_flag); +-#else + raw_spin_lock(&sg_policy->update_lock); +-#endif + sugov_deferred_update(sg_policy); +-#ifdef CONFIG_SCHED_RTG +- raw_spin_unlock_irqrestore(&sg_policy->update_lock, irq_flag); +-#else + raw_spin_unlock(&sg_policy->update_lock); +-#endif + } + } + +@@ -457,10 +431,6 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) + util = max(j_sg_cpu->util, util); + } + +-#ifdef CONFIG_SCHED_RTG +- sched_get_max_group_util(policy->cpus, &sg_policy->rtg_util, &sg_policy->rtg_freq); +- util = max(sg_policy->rtg_util, util); +-#endif + return get_next_freq(sg_policy, util, max_cap); + } + +@@ -470,29 +440,15 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) + struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); + struct sugov_policy *sg_policy = sg_cpu->sg_policy; + unsigned int next_f; +-#ifdef CONFIG_SCHED_RTG +- unsigned long irq_flag; +- bool force_update = false; +-#endif + +-#ifdef CONFIG_SCHED_RTG +- force_update = flags & SCHED_CPUFREQ_FORCE_UPDATE; +- raw_spin_lock_irqsave(&sg_policy->update_lock, irq_flag); +-#else + raw_spin_lock(&sg_policy->update_lock); +-#endif + + sugov_iowait_boost(sg_cpu, time, flags); + sg_cpu->last_update = time; + + ignore_dl_rate_limit(sg_cpu); + +-#ifdef CONFIG_SCHED_WALT +- if ((sugov_should_update_freq(sg_policy, time)) +- && !(flags & SCHED_CPUFREQ_CONTINUE)) { +-#else + if (sugov_should_update_freq(sg_policy, time)) { +-#endif + next_f = sugov_next_freq_shared(sg_cpu, time); + + if (!sugov_update_next_freq(sg_policy, time, next_f)) +@@ -504,11 +460,7 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) + sugov_deferred_update(sg_policy); + } + unlock: +-#ifdef CONFIG_SCHED_RTG +- raw_spin_unlock_irqrestore(&sg_policy->update_lock, irq_flag); +-#else + raw_spin_unlock(&sg_policy->update_lock); +-#endif + } + + static void sugov_work(struct kthread_work *work) +diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c +index c8686fcdd..42c40cfdf 100644 +--- a/kernel/sched/cpupri.c ++++ b/kernel/sched/cpupri.c +@@ -102,9 +102,6 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p, + if (lowest_mask) { + cpumask_and(lowest_mask, &p->cpus_mask, vec->mask); + cpumask_and(lowest_mask, lowest_mask, cpu_active_mask); +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_andnot(lowest_mask, lowest_mask, cpu_isolated_mask); +-#endif + + /* + * We have to ensure that we have at least one bit +diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c +index 79b86e3e6..b453f8a6a 100644 +--- a/kernel/sched/cputime.c ++++ b/kernel/sched/cputime.c +@@ -6,7 +6,6 @@ + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + #include + #endif +-#include "walt.h" + + #ifdef CONFIG_IRQ_TIME_ACCOUNTING + +@@ -57,18 +56,11 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset) + unsigned int pc; + s64 delta; + int cpu; +-#ifdef CONFIG_SCHED_WALT +- u64 wallclock; +- bool account = true; +-#endif + + if (!sched_clock_irqtime) + return; + + cpu = smp_processor_id(); +-#ifdef CONFIG_SCHED_WALT +- wallclock = sched_clock_cpu(cpu); +-#endif + delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; + irqtime->irq_start_time += delta; + pc = irq_count() - offset; +@@ -83,13 +75,6 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset) + irqtime_account_delta(irqtime, delta, CPUTIME_IRQ); + else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd()) + irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ); +-#ifdef CONFIG_SCHED_WALT +- else +- account = false; +- +- if (account) +- sched_account_irqtime(cpu, curr, delta, wallclock); +-#endif + } + + static u64 irqtime_tick_accounted(u64 maxtime) +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index d32cecdfe..b9e99bc3b 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -17,7 +17,6 @@ + */ + + #include +-#include "walt.h" + + /* + * Default limits for DL period; on the top end we guard against small util +@@ -1497,7 +1496,6 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) + WARN_ON(!dl_prio(prio)); + dl_rq->dl_nr_running++; + add_nr_running(rq_of_dl_rq(dl_rq), 1); +- walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se)); + + inc_dl_deadline(dl_rq, deadline); + inc_dl_migration(dl_se, dl_rq); +@@ -1512,7 +1510,6 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) + WARN_ON(!dl_rq->dl_nr_running); + dl_rq->dl_nr_running--; + sub_nr_running(rq_of_dl_rq(dl_rq), 1); +- walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se)); + + dec_dl_deadline(dl_rq, dl_se->deadline); + dec_dl_migration(dl_se, dl_rq); +@@ -2763,9 +2760,6 @@ DEFINE_SCHED_CLASS(dl) = { + #ifdef CONFIG_SCHED_CORE + .task_is_throttled = task_is_throttled_dl, + #endif +-#ifdef CONFIG_SCHED_WALT +- .fixup_walt_sched_stats = fixup_walt_sched_stats_common, +-#endif + }; + + /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */ +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c +index 26763caf7..115e266db 100644 +--- a/kernel/sched/debug.c ++++ b/kernel/sched/debug.c +@@ -6,7 +6,6 @@ + * + * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar + */ +-#include "sched.h" + + /* + * This allows printing both to /proc/sched_debug and +@@ -793,17 +792,6 @@ do { \ + SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr))); + PN(clock); + PN(clock_task); +-#ifdef CONFIG_SCHED_WALT +- P(cluster->load_scale_factor); +- P(cluster->capacity); +- P(cluster->max_possible_capacity); +- P(cluster->efficiency); +- P(cluster->cur_freq); +- P(cluster->max_freq); +- P(cluster->exec_scale_factor); +- SEQ_printf(m, " .%-30s: %llu\n", "walt_stats.cumulative_runnable_avg", +- rq->walt_stats.cumulative_runnable_avg_scaled); +-#endif + #undef P + #undef PN + +@@ -878,12 +866,6 @@ static void sched_debug_header(struct seq_file *m) + PN(sysctl_sched_base_slice); + P(sysctl_sched_child_runs_first); + P(sysctl_sched_features); +-#ifdef CONFIG_SCHED_WALT +- P(sched_init_task_load_windows); +- P(min_capacity); +- P(max_capacity); +- P(sched_ravg_window); +-#endif + #undef PN + #undef P + +@@ -1057,9 +1039,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, + P_SCHEDSTAT(nr_wakeups_affine_attempts); + P_SCHEDSTAT(nr_wakeups_passive); + P_SCHEDSTAT(nr_wakeups_idle); +-#ifdef CONFIG_SCHED_WALT +- P(ravg.demand); +-#endif + + avg_atom = p->se.sum_exec_runtime; + if (nr_switches) +@@ -1107,9 +1086,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, + #endif + P(policy); + P(prio); +-#ifdef CONFIG_SCHED_LATENCY_NICE +- P(latency_prio); +-#endif + if (task_has_dl_policy(p)) { + P(dl.runtime); + P(dl.deadline); +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 76732f1af..2808dbdd0 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -52,58 +52,10 @@ + #include + + #include +-#ifdef CONFIG_SCHED_RTG +-#include +-#endif + + #include "sched.h" + #include "stats.h" + #include "autogroup.h" +-#include "walt.h" +-#include "rtg/rtg.h" +- +-#ifdef CONFIG_SCHED_WALT +-static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p, +- u16 updated_demand_scaled); +-#endif +- +-#if defined(CONFIG_SCHED_WALT) && defined(CONFIG_CFS_BANDWIDTH) +-static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq); +-static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, +- struct task_struct *p); +-static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, +- struct task_struct *p); +-static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats, +- struct cfs_rq *cfs_rq); +-static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats, +- struct cfs_rq *cfs_rq); +-#else +-static inline void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq) {} +-static inline void +-walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {} +-static inline void +-walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {} +- +-#define walt_inc_throttled_cfs_rq_stats(...) +-#define walt_dec_throttled_cfs_rq_stats(...) +- +-#endif +- +-/* +- * Targeted preemption latency for CPU-bound tasks: +- * +- * NOTE: this latency value is not the same as the concept of +- * 'timeslice length' - timeslices in CFS are of variable length +- * and have no persistent notion like in traditional, time-slice +- * based scheduling concepts. +- * +- * (to see the precise effective timeslice length of your workload, +- * run vmstat and monitor the context-switches (cs) field) +- * +- * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) +- */ +-unsigned int sysctl_sched_latency = 6000000ULL; +-static unsigned int normalized_sysctl_sched_latency = 6000000ULL; + + /* + * The initial- and re-scaling of tunables is configurable +@@ -126,29 +78,12 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; + unsigned int sysctl_sched_base_slice = 750000ULL; + static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; + +-/* +- * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity +- */ +-static unsigned int sched_nr_latency = 8; +- + /* + * After fork, child runs first. If set to 0 (default) then + * parent will (try to) run first. + */ + unsigned int sysctl_sched_child_runs_first __read_mostly; + +-/* +- * SCHED_OTHER wake-up granularity. +- * +- * This option delays the preemption effects of decoupled workloads +- * and reduces their over-scheduling. Synchronous workloads will still +- * have immediate wakeup/sleep latencies. +- * +- * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) +- */ +-unsigned int sysctl_sched_wakeup_granularity = 1000000UL; +-static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; +- + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; + + int sched_thermal_decay_shift; +@@ -303,8 +238,6 @@ static void update_sysctl(void) + #define SET_SYSCTL(name) \ + (sysctl_##name = (factor) * normalized_sysctl_##name) + SET_SYSCTL(sched_base_slice); +- SET_SYSCTL(sched_latency); +- SET_SYSCTL(sched_wakeup_granularity); + #undef SET_SYSCTL + } + +@@ -1076,8 +1009,6 @@ int sched_update_scaling(void) + #define WRT_SYSCTL(name) \ + (normalized_sysctl_##name = sysctl_##name / (factor)) + WRT_SYSCTL(sched_base_slice); +- WRT_SYSCTL(sched_latency); +- WRT_SYSCTL(sched_wakeup_granularity); + #undef WRT_SYSCTL + + return 0; +@@ -4902,10 +4833,6 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf); + + static inline unsigned long task_util(struct task_struct *p) + { +-#ifdef CONFIG_SCHED_WALT +- if (likely(!walt_disabled && sysctl_sched_use_walt_task_util)) +- return p->ravg.demand_scaled; +-#endif + return READ_ONCE(p->se.avg.util_avg); + } + +@@ -4918,41 +4845,9 @@ static inline unsigned long _task_util_est(struct task_struct *p) + + static inline unsigned long task_util_est(struct task_struct *p) + { +-#ifdef CONFIG_SCHED_WALT +- if (likely(!walt_disabled && sysctl_sched_use_walt_task_util)) +- return p->ravg.demand_scaled; +-#endif + return max(task_util(p), _task_util_est(p)); + } + +-#ifdef CONFIG_UCLAMP_TASK +-#ifdef CONFIG_SCHED_RT_CAS +-unsigned long uclamp_task_util(struct task_struct *p, +- unsigned long uclamp_min, +- unsigned long uclamp_max) +-#else +-static inline unsigned long uclamp_task_util(struct task_struct *p, +- unsigned long uclamp_min, +- unsigned long uclamp_max) +-#endif +-{ +- return clamp(task_util_est(p), uclamp_min, uclamp_max); +-} +-#else +-#ifdef CONFIG_SCHED_RT_CAS +-unsigned long uclamp_task_util(struct task_struct *p, +- unsigned long uclamp_min, +- unsigned long uclamp_max) +-#else +-static inline unsigned long uclamp_task_util(struct task_struct *p, +- unsigned long uclamp_min, +- unsigned long uclamp_max) +-#endif +-{ +- return task_util_est(p); +-} +-#endif +- + static inline void util_est_enqueue(struct cfs_rq *cfs_rq, + struct task_struct *p) + { +@@ -5217,26 +5112,8 @@ static inline int task_fits_cpu(struct task_struct *p, int cpu) + return (util_fits_cpu(util, uclamp_min, uclamp_max, cpu) > 0); + } + +-#ifdef CONFIG_SCHED_RTG +-bool task_fits_max(struct task_struct *p, int cpu) +-{ +- unsigned long capacity = capacity_orig_of(cpu); +- unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity; +- +- if (capacity == max_capacity) +- return true; +- +- return task_fits_cpu(p, cpu); +-} +-#endif +- + static inline void update_misfit_status(struct task_struct *p, struct rq *rq) + { +- bool task_fits = false; +-#ifdef CONFIG_SCHED_RTG +- int cpu = cpu_of(rq); +- struct cpumask *rtg_target = NULL; +-#endif + if (!sched_asym_cpucap_active()) + return; + +@@ -5245,17 +5122,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) + return; + } + +-#ifdef CONFIG_SCHED_RTG +- rtg_target = find_rtg_target(p); +- if (rtg_target) +- task_fits = capacity_orig_of(cpu) >= +- capacity_orig_of(cpumask_first(rtg_target)); +- else +- task_fits = task_fits_cpu(p, cpu_of(rq)); +-#else +- task_fits = task_fits_cpu(p, cpu_of(rq)); +-#endif +- if (task_fits) { ++ if (task_fits_cpu(p, cpu_of(rq))) { + rq->misfit_task_load = 0; + return; + } +@@ -5598,9 +5465,6 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) + se->prev_sum_exec_runtime = se->sum_exec_runtime; + } + +-static int +-wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); +- + /* + * Pick the next process, keeping these things in mind, in this order: + * 1) keep things fair between processes/task groups +@@ -5971,12 +5835,10 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) + + qcfs_rq->h_nr_running -= task_delta; + qcfs_rq->idle_h_nr_running -= idle_task_delta; +- walt_dec_throttled_cfs_rq_stats(&qcfs_rq->walt_stats, cfs_rq); + } + + /* At this point se is NULL and we are at root level*/ + sub_nr_running(rq, task_delta); +- walt_dec_throttled_cfs_rq_stats(&rq->walt_stats, cfs_rq); + + done: + /* +@@ -5996,7 +5858,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); + struct sched_entity *se; + long task_delta, idle_task_delta; +- struct cfs_rq *tcfs_rq __maybe_unused = cfs_rq; + + se = cfs_rq->tg->se[cpu_of(rq)]; + +@@ -6043,7 +5904,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) + + qcfs_rq->h_nr_running += task_delta; + qcfs_rq->idle_h_nr_running += idle_task_delta; +- walt_inc_throttled_cfs_rq_stats(&cfs_rq->walt_stats, tcfs_rq); + + /* end evaluation on encountering a throttled cfs_rq */ + if (cfs_rq_throttled(qcfs_rq)) +@@ -6061,7 +5921,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) + + qcfs_rq->h_nr_running += task_delta; + qcfs_rq->idle_h_nr_running += idle_task_delta; +- walt_inc_throttled_cfs_rq_stats(&cfs_rq->walt_stats, tcfs_rq); + + /* end evaluation on encountering a throttled cfs_rq */ + if (cfs_rq_throttled(qcfs_rq)) +@@ -6070,7 +5929,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) + + /* At this point se is NULL and we are at root level*/ + add_nr_running(rq, task_delta); +- walt_inc_throttled_cfs_rq_stats(&rq->walt_stats, tcfs_rq); + + unthrottle_throttle: + assert_list_leaf_cfs_rq(rq); +@@ -6555,7 +6413,6 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) + #ifdef CONFIG_SMP + INIT_LIST_HEAD(&cfs_rq->throttled_csd_list); + #endif +- walt_init_cfs_rq_stats(cfs_rq); + } + + void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +@@ -6870,37 +6727,6 @@ static int sched_idle_cpu(int cpu) + } + #endif + +-static void set_next_buddy(struct sched_entity *se); +- +-#ifdef CONFIG_SCHED_LATENCY_NICE +-static void check_preempt_from_idle(struct cfs_rq *cfs, struct sched_entity *se) +-{ +- struct sched_entity *next; +- +- if (se->latency_weight <= 0) +- return; +- +- if (cfs->nr_running <= 1) +- return; +- /* +- * When waking from idle, we don't need to check to preempt at wakeup +- * the idle thread and don't set next buddy as a candidate for being +- * picked in priority. +- * In case of simultaneous wakeup from idle, the latency sensitive tasks +- * lost opportunity to preempt non sensitive tasks which woke up +- * simultaneously. +- */ +- +- if (cfs->next) +- next = cfs->next; +- else +- next = __pick_first_entity(cfs); +- +- if (next && wakeup_preempt_entity(next, se) == 1) +- set_next_buddy(se); +-} +-#endif +- + /* + * The enqueue_task method is called before nr_running is + * increased. Here we update the fair scheduling stats and +@@ -6938,7 +6764,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) + + cfs_rq->h_nr_running++; + cfs_rq->idle_h_nr_running += idle_h_nr_running; +- walt_inc_cfs_rq_stats(cfs_rq, p); ++ + if (cfs_rq_is_idle(cfs_rq)) + idle_h_nr_running = 1; + +@@ -6958,7 +6784,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) + + cfs_rq->h_nr_running++; + cfs_rq->idle_h_nr_running += idle_h_nr_running; +- walt_inc_cfs_rq_stats(cfs_rq, p); ++ + if (cfs_rq_is_idle(cfs_rq)) + idle_h_nr_running = 1; + +@@ -6969,7 +6795,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) + + /* At this point se is NULL and we are at root level*/ + add_nr_running(rq, 1); +- inc_rq_walt_stats(rq, p); ++ + /* + * Since new tasks are assigned an initial util_avg equal to + * half of the spare capacity of their CPU, tiny tasks have the +@@ -6987,11 +6813,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) + if (!task_new) + check_update_overutilized_status(rq); + +-#ifdef CONFIG_SCHED_LATENCY_NICE +- if (rq->curr == rq->idle) +- check_preempt_from_idle(cfs_rq_of(&p->se), &p->se); +-#endif +- + enqueue_throttle: + assert_list_leaf_cfs_rq(rq); + +@@ -7021,7 +6842,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) + + cfs_rq->h_nr_running--; + cfs_rq->idle_h_nr_running -= idle_h_nr_running; +- walt_dec_cfs_rq_stats(cfs_rq, p); ++ + if (cfs_rq_is_idle(cfs_rq)) + idle_h_nr_running = 1; + +@@ -7053,7 +6874,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) + + cfs_rq->h_nr_running--; + cfs_rq->idle_h_nr_running -= idle_h_nr_running; +- walt_dec_cfs_rq_stats(cfs_rq, p); ++ + if (cfs_rq_is_idle(cfs_rq)) + idle_h_nr_running = 1; + +@@ -7065,7 +6886,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) + + /* At this point se is NULL and we are at root level*/ + sub_nr_running(rq, 1); +- dec_rq_walt_stats(rq, p); + + /* balance early to pull high priority tasks */ + if (unlikely(!was_sched_idle && sched_idle_rq(rq))) +@@ -7333,9 +7153,6 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this + for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { + struct rq *rq = cpu_rq(i); + +- if (cpu_isolated(i)) +- continue; +- + if (!sched_core_cookie_match(rq, p)) + continue; + +@@ -7497,10 +7314,6 @@ void __update_idle_core(struct rq *rq) + */ + static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu) + { +- +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_andnot(cpus, cpus, cpu_isolated_mask); +-#endif + bool idle = true; + int cpu; + +@@ -7541,8 +7354,6 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t + * Check if the CPU is in the LLC scheduling domain of @target. + * Due to isolcpus, there is no guarantee that all the siblings are in the domain. + */ +- if (cpu_isolated(cpu)) +- continue; + if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) + continue; + if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) +@@ -7644,8 +7455,6 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool + } else { + if (!--nr) + return -1; +- if (cpu_isolated(cpu)) +- continue; + idle_cpu = __select_idle_cpu(cpu, p); + if ((unsigned int)idle_cpu < nr_cpumask_bits) + break; +@@ -7693,9 +7502,6 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) + for_each_cpu_wrap(cpu, cpus, target) { + unsigned long cpu_cap = capacity_of(cpu); + +- if (cpu_isolated(cpu)) +- continue; +- + if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu)) + continue; + +@@ -7768,15 +7574,15 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) + lockdep_assert_irqs_disabled(); + + if ((available_idle_cpu(target) || sched_idle_cpu(target)) && +- !cpu_isolated(target) && asym_fits_cpu(task_util, util_min, util_max, target)) ++ asym_fits_cpu(task_util, util_min, util_max, target)) + return target; + + /* + * If the previous CPU is cache affine and idle, don't be stupid: + */ + if (prev != target && cpus_share_cache(prev, target) && +- ((available_idle_cpu(prev) || sched_idle_cpu(prev)) && +- !cpu_isolated(target) && asym_fits_cpu(task_util, util_min, util_max, prev))) ++ (available_idle_cpu(prev) || sched_idle_cpu(prev)) && ++ asym_fits_cpu(task_util, util_min, util_max, prev)) + return prev; + + /* +@@ -7896,16 +7702,6 @@ cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost) + unsigned long util = READ_ONCE(cfs_rq->avg.util_avg); + unsigned long runnable; + +-#ifdef CONFIG_SCHED_WALT +- if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util)) { +- u64 walt_cpu_util = +- cpu_rq(cpu)->walt_stats.cumulative_runnable_avg_scaled; +- +- return min_t(unsigned long, walt_cpu_util, +- capacity_orig_of(cpu)); +- } +-#endif +- + if (boost) { + runnable = READ_ONCE(cfs_rq->avg.runnable_avg); + util = max(util, runnable); +@@ -7989,29 +7785,11 @@ unsigned long cpu_util_cfs_boost(int cpu) + */ + static unsigned long cpu_util_without(int cpu, struct task_struct *p) + { +- unsigned int util; +-#ifdef CONFIG_SCHED_WALT +- /* +- * WALT does not decay idle tasks in the same manner +- * as PELT, so it makes little sense to subtract task +- * utilization from cpu utilization. Instead just use +- * cpu_util for this case. +- */ +- if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util) && +- p->__state == TASK_WAKING) +- return cpu_util_cfs(cpu); +-#endif + /* Task has no contribution or is new */ + if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) + p = NULL; + + return cpu_util(cpu, p, -1, 0); +-#ifdef CONFIG_SCHED_WALT +- if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util)) { +- util = max_t(long, cpu_util_cfs(cpu) - task_util(p), 0); +- return min_t(unsigned long, util, capacity_orig_of(cpu)); +- } +-#endif + } + + /* +@@ -8050,13 +7828,6 @@ static inline void eenv_task_busy_time(struct energy_env *eenv, + eenv->task_busy_time = busy_time; + } + +-#ifdef CONFIG_SCHED_RTG +-unsigned long capacity_spare_without(int cpu, struct task_struct *p) +-{ +- return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0); +-} +-#endif +- + /* + * Compute the perf_domain (PD) busy time for compute_energy(). Based on the + * utilization for each @pd_cpus, it however doesn't take into account +@@ -8094,18 +7865,6 @@ static inline void eenv_pd_busy_time(struct energy_env *eenv, + eenv->pd_busy_time = min(eenv->pd_cap, busy_time); + } + +-/* +- * Returns the current capacity of cpu after applying both +- * cpu and freq scaling. +- */ +-unsigned long capacity_curr_of(int cpu) +-{ +- unsigned long max_cap = cpu_rq(cpu)->cpu_capacity_orig; +- unsigned long scale_freq = arch_scale_freq_capacity(cpu); +- +- return cap_scale(max_cap, scale_freq); +-} +- + /* + * Compute the maximum utilization for compute_energy() when the task @p + * is placed on the cpu @dst_cpu. +@@ -8404,12 +8163,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) + int want_affine = 0; + /* SD_flags and WF_flags share the first nibble */ + int sd_flag = wake_flags & 0xF; +-#ifdef CONFIG_SCHED_RTG +- int target_cpu = -1; +- target_cpu = find_rtg_cpu(p); +- if (target_cpu >= 0) +- return target_cpu; +-#endif + + /* + * required for stable ->cpus_allowed +@@ -8516,93 +8269,6 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) + } + #endif /* CONFIG_SMP */ + +-#ifdef CONFIG_SCHED_LATENCY_NICE +-static long wakeup_latency_gran(struct sched_entity *curr, struct sched_entity *se) +-{ +- int latency_weight = se->latency_weight; +- long thresh = sysctl_sched_latency; +- +- /* +- * A positive latency weigth means that the sched_entity has latency +- * requirement that needs to be evaluated versus other entity. +- * Otherwise, use the latency weight to evaluate how much scheduling +- * delay is acceptable by se. +- */ +- if ((se->latency_weight > 0) || (curr->latency_weight > 0)) +- latency_weight -= curr->latency_weight; +- +- if (!latency_weight) +- return 0; +- +- if (sched_feat(GENTLE_FAIR_SLEEPERS)) +- thresh >>= 1; +- +- /* +- * Clamp the delta to stay in the scheduler period range +- * [-sysctl_sched_latency:sysctl_sched_latency] +- */ +- latency_weight = clamp_t(long, latency_weight, +- -1 * NICE_LATENCY_WEIGHT_MAX, +- NICE_LATENCY_WEIGHT_MAX); +- +- return (thresh * latency_weight) >> NICE_LATENCY_SHIFT; +-} +-#endif +- +-static unsigned long wakeup_gran(struct sched_entity *se) +-{ +- unsigned long gran = sysctl_sched_wakeup_granularity; +- +- /* +- * Since its curr running now, convert the gran from real-time +- * to virtual-time in his units. +- * +- * By using 'se' instead of 'curr' we penalize light tasks, so +- * they get preempted easier. That is, if 'se' < 'curr' then +- * the resulting gran will be larger, therefore penalizing the +- * lighter, if otoh 'se' > 'curr' then the resulting gran will +- * be smaller, again penalizing the lighter task. +- * +- * This is especially important for buddies when the leftmost +- * task is higher priority than the buddy. +- */ +- return calc_delta_fair(gran, se); +-} +- +-/* +- * Should 'se' preempt 'curr'. +- * +- * |s1 +- * |s2 +- * |s3 +- * g +- * |<--->|c +- * +- * w(c, s1) = -1 +- * w(c, s2) = 0 +- * w(c, s3) = 1 +- * +- */ +-static int +-wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) +-{ +- s64 gran, vdiff = curr->vruntime - se->vruntime; +- +-#ifdef CONFIG_SCHED_LATENCY_NICE +- /* Take into account latency priority */ +- vdiff += wakeup_latency_gran(curr, se); +-#endif +- +- if (vdiff <= 0) +- return -1; +- +- gran = wakeup_gran(se); +- if (vdiff > gran) +- return 1; +- +- return 0; +-} +- + static void set_next_buddy(struct sched_entity *se) + { + for_each_sched_entity(se) { +@@ -9116,9 +8782,7 @@ enum migration_type { + #define LBF_DST_PINNED 0x04 + #define LBF_SOME_PINNED 0x08 + #define LBF_ACTIVE_LB 0x10 +-#ifdef CONFIG_SCHED_RTG +-#define LBF_IGNORE_PREFERRED_CLUSTER_TASKS 0x200 +-#endif ++ + struct lb_env { + struct sched_domain *sd; + +@@ -9312,13 +8976,6 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) + /* Record that we found at least one task that could run on dst_cpu */ + env->flags &= ~LBF_ALL_PINNED; + +- +-#ifdef CONFIG_SCHED_RTG +- if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS && +- !preferred_cluster(cpu_rq(env->dst_cpu)->cluster, p)) +- return 0; +-#endif +- + if (task_on_cpu(env->src_rq, p)) { + schedstat_inc(p->stats.nr_failed_migrations_running); + return 0; +@@ -9363,15 +9020,7 @@ static void detach_task(struct task_struct *p, struct lb_env *env) + } + + deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); +-#ifdef CONFIG_SCHED_WALT +- double_lock_balance(env->src_rq, env->dst_rq); +- if (!(env->src_rq->clock_update_flags & RQCF_UPDATED)) +- update_rq_clock(env->src_rq); +-#endif + set_task_cpu(p, env->dst_cpu); +-#ifdef CONFIG_SCHED_WALT +- double_unlock_balance(env->src_rq, env->dst_rq); +-#endif + } + + /* +@@ -9417,9 +9066,6 @@ static int detach_tasks(struct lb_env *env) + unsigned long util, load; + struct task_struct *p; + int detached = 0; +-#ifdef CONFIG_SCHED_RTG +- int orig_loop = env->loop; +-#endif + + lockdep_assert_rq_held(env->src_rq); + +@@ -9435,12 +9081,6 @@ static int detach_tasks(struct lb_env *env) + if (env->imbalance <= 0) + return 0; + +-#ifdef CONFIG_SCHED_RTG +- if (!same_cluster(env->dst_cpu, env->src_cpu)) +- env->flags |= LBF_IGNORE_PREFERRED_CLUSTER_TASKS; +- +-redo: +-#endif + while (!list_empty(tasks)) { + /* + * We don't want to steal all, otherwise we may be treated likewise, +@@ -9545,15 +9185,6 @@ static int detach_tasks(struct lb_env *env) + list_move(&p->se.group_node, tasks); + } + +-#ifdef CONFIG_SCHED_RTG +- if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS && !detached) { +- tasks = &env->src_rq->cfs_tasks; +- env->flags &= ~LBF_IGNORE_PREFERRED_CLUSTER_TASKS; +- env->loop = orig_loop; +- goto redo; +- } +-#endif +- + /* + * Right now, this is one of only two places we collect this stat + * so we can safely collect detach_one_task() stats here rather +@@ -9955,9 +9586,6 @@ void update_group_capacity(struct sched_domain *sd, int cpu) + for_each_cpu(cpu, sched_group_span(sdg)) { + unsigned long cpu_cap = capacity_of(cpu); + +- if (cpu_isolated(cpu)) +- continue; +- + capacity += cpu_cap; + min_capacity = min(cpu_cap, min_capacity); + max_capacity = max(cpu_cap, max_capacity); +@@ -9971,16 +9599,10 @@ void update_group_capacity(struct sched_domain *sd, int cpu) + group = child->groups; + do { + struct sched_group_capacity *sgc = group->sgc; +- __maybe_unused cpumask_t *cpus = +- sched_group_span(group); +- +- if (!cpu_isolated(cpumask_first(cpus))) { +- capacity += sgc->capacity; +- min_capacity = min(sgc->min_capacity, +- min_capacity); +- max_capacity = max(sgc->max_capacity, +- max_capacity); +- } ++ ++ capacity += sgc->capacity; ++ min_capacity = min(sgc->min_capacity, min_capacity); ++ max_capacity = max(sgc->max_capacity, max_capacity); + group = group->next; + } while (group != child->groups); + } +@@ -10289,8 +9911,6 @@ static inline void update_sg_lb_stats(struct lb_env *env, + for_each_cpu_and(i, sched_group_span(group), env->cpus) { + struct rq *rq = cpu_rq(i); + unsigned long load = cpu_load(rq); +- if (cpu_isolated(i)) +- continue; + + sgs->group_load += load; + sgs->group_util += cpu_util_cfs(i); +@@ -10340,15 +9960,6 @@ static inline void update_sg_lb_stats(struct lb_env *env, + + sgs->group_weight = group->group_weight; + +- /* Isolated CPU has no weight */ +- if (!group->group_weight) { +- sgs->group_capacity = 0; +- sgs->avg_load = 0; +- sgs->group_type = group_has_spare; +- sgs->group_weight = group->group_weight; +- return; +- } +- + /* Check if dst CPU is idle and preferred to this group */ + if (!local_group && env->sd->flags & SD_ASYM_PACKING && + env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && +@@ -10734,22 +10345,13 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) + .avg_load = UINT_MAX, + .group_type = group_overloaded, + }; +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_t allowed_cpus; + +- cpumask_andnot(&allowed_cpus, p->cpus_ptr, cpu_isolated_mask); +-#endif + do { + int local_group; + + /* Skip over this group if it has no CPUs allowed */ +-#ifdef CONFIG_CPU_ISOLATION_OPT +- if (!cpumask_intersects(sched_group_span(group), +- &allowed_cpus)) +-#else + if (!cpumask_intersects(sched_group_span(group), + p->cpus_ptr)) +-#endif + continue; + + /* Skip over this group if no cookie matched */ +@@ -11430,9 +11032,6 @@ static struct rq *find_busiest_queue(struct lb_env *env, + if (rt > env->fbq_type) + continue; + +- if (cpu_isolated(i)) +- continue; +- + nr_running = rq->cfs.h_nr_running; + if (!nr_running) + continue; +@@ -11608,16 +11207,6 @@ static int need_active_balance(struct lb_env *env) + return 0; + } + +-#ifdef CONFIG_CPU_ISOLATION_OPT +-int group_balance_cpu_not_isolated(struct sched_group *sg) +-{ +- cpumask_t cpus; +- +- cpumask_and(&cpus, sched_group_span(sg), group_balance_mask(sg)); +- cpumask_andnot(&cpus, &cpus, cpu_isolated_mask); +- return cpumask_first(&cpus); +-} +-#endif + static int active_load_balance_cpu_stop(void *data); + + static int should_we_balance(struct lb_env *env) +@@ -11649,7 +11238,7 @@ static int should_we_balance(struct lb_env *env) + cpumask_copy(swb_cpus, group_balance_mask(sg)); + /* Try to find first idle CPU */ + for_each_cpu_and(cpu, swb_cpus, env->cpus) { +- if (!idle_cpu(cpu) || cpu_isolated(cpu)) ++ if (!idle_cpu(cpu)) + continue; + + /* +@@ -11683,7 +11272,7 @@ static int should_we_balance(struct lb_env *env) + return idle_smt == env->dst_cpu; + + /* Are we the first CPU of this group ? */ +- return group_balance_cpu_not_isolated(sg) == env->dst_cpu; ++ return group_balance_cpu(sg) == env->dst_cpu; + } + + /* +@@ -11886,8 +11475,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, + * ->active_balance_work. Once set, it's cleared + * only after active load balance is finished. + */ +- if (!busiest->active_balance && +- !cpu_isolated(cpu_of(busiest))) { ++ if (!busiest->active_balance) { + busiest->active_balance = 1; + busiest->push_cpu = this_cpu; + active_balance = 1; +@@ -12006,13 +11594,9 @@ static int active_load_balance_cpu_stop(void *data) + int busiest_cpu = cpu_of(busiest_rq); + int target_cpu = busiest_rq->push_cpu; + struct rq *target_rq = cpu_rq(target_cpu); +- struct sched_domain *sd = NULL; ++ struct sched_domain *sd; + struct task_struct *p = NULL; + struct rq_flags rf; +-#ifdef CONFIG_SCHED_EAS +- struct task_struct *push_task; +- int push_task_detached = 0; +-#endif + + rq_lock_irq(busiest_rq, &rf); + /* +@@ -12038,31 +11622,6 @@ static int active_load_balance_cpu_stop(void *data) + * Bjorn Helgaas on a 128-CPU setup. + */ + WARN_ON_ONCE(busiest_rq == target_rq); +-#ifdef CONFIG_SCHED_EAS +- push_task = busiest_rq->push_task; +- target_cpu = busiest_rq->push_cpu; +- if (push_task) { +- struct lb_env env = { +- .sd = sd, +- .dst_cpu = target_cpu, +- .dst_rq = target_rq, +- .src_cpu = busiest_rq->cpu, +- .src_rq = busiest_rq, +- .idle = CPU_IDLE, +- .flags = 0, +- .loop = 0, +- }; +- if (task_on_rq_queued(push_task) && +- push_task->__state == TASK_RUNNING && +- task_cpu(push_task) == busiest_cpu && +- cpu_online(target_cpu)) { +- update_rq_clock(busiest_rq); +- detach_task(push_task, &env); +- push_task_detached = 1; +- } +- goto out_unlock; +- } +-#endif + + /* Search for an sd spanning us and the target CPU. */ + rcu_read_lock(); +@@ -12097,23 +11656,8 @@ static int active_load_balance_cpu_stop(void *data) + rcu_read_unlock(); + out_unlock: + busiest_rq->active_balance = 0; +- +-#ifdef CONFIG_SCHED_EAS +- push_task = busiest_rq->push_task; +- if (push_task) +- busiest_rq->push_task = NULL; +-#endif + rq_unlock(busiest_rq, &rf); + +-#ifdef CONFIG_SCHED_EAS +- if (push_task) { +- if (push_task_detached) +- attach_one_task(target_rq, push_task); +- +- put_task_struct(push_task); +- } +-#endif +- + if (p) + attach_one_task(target_rq, p); + +@@ -12130,17 +11674,7 @@ static DEFINE_SPINLOCK(balancing); + */ + void update_max_interval(void) + { +- unsigned int available_cpus; +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_t avail_mask; +- +- cpumask_andnot(&avail_mask, cpu_online_mask, cpu_isolated_mask); +- available_cpus = cpumask_weight(&avail_mask); +-#else +- available_cpus = num_online_cpus(); +-#endif +- +- max_load_balance_interval = HZ*available_cpus/10; ++ max_load_balance_interval = HZ*num_online_cpus()/10; + } + + static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost) +@@ -12279,9 +11813,6 @@ static inline int find_new_ilb(void) + + for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) { + +- if (cpu_isolated(ilb)) +- continue; +- + if (ilb == smp_processor_id()) + continue; + +@@ -12339,7 +11870,6 @@ static void nohz_balancer_kick(struct rq *rq) + struct sched_domain *sd; + int nr_busy, i, cpu = rq->cpu; + unsigned int flags = 0; +- cpumask_t cpumask; + + if (unlikely(rq->idle_balance)) + return; +@@ -12354,15 +11884,8 @@ static void nohz_balancer_kick(struct rq *rq) + * None are in tickless mode and hence no need for NOHZ idle load + * balancing. + */ +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_andnot(&cpumask, nohz.idle_cpus_mask, cpu_isolated_mask); +- if (cpumask_empty(&cpumask)) +- return; +-#else +- cpumask_copy(&cpumask, nohz.idle_cpus_mask); + if (likely(!atomic_read(&nohz.nr_cpus))) + return; +-#endif + + if (READ_ONCE(nohz.has_blocked) && + time_after(now, READ_ONCE(nohz.next_blocked))) +@@ -12401,7 +11924,7 @@ static void nohz_balancer_kick(struct rq *rq) + * When balancing betwen cores, all the SMT siblings of the + * preferred CPU must be idle. + */ +- for_each_cpu_and(i, sched_domain_span(sd), &cpumask) { ++ for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { + if (sched_use_asym_prio(sd, i) && + sched_asym_prefer(i, cpu)) { + flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; +@@ -12598,7 +12121,6 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags) + int this_cpu = this_rq->cpu; + int balance_cpu; + struct rq *rq; +- cpumask_t cpus; + + SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); + +@@ -12623,17 +12145,11 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags) + */ + smp_mb(); + +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_andnot(&cpus, nohz.idle_cpus_mask, cpu_isolated_mask); +-#else +- cpumask_copy(&cpus, nohz.idle_cpus_mask); +-#endif +- + /* + * Start with the next CPU after this_cpu so we will end with this_cpu and let a + * chance for other idle cpu to pull load. + */ +- for_each_cpu_wrap(balance_cpu, &cpus, this_cpu+1) { ++ for_each_cpu_wrap(balance_cpu, nohz.idle_cpus_mask, this_cpu+1) { + if (!idle_cpu(balance_cpu)) + continue; + +@@ -12788,9 +12304,6 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) + struct sched_domain *sd; + int pulled_task = 0; + +- if (cpu_isolated(this_cpu)) +- return 0; +- + update_misfit_status(NULL, this_rq); + + /* +@@ -12915,14 +12428,6 @@ static __latent_entropy void run_rebalance_domains(struct softirq_action *h) + enum cpu_idle_type idle = this_rq->idle_balance ? + CPU_IDLE : CPU_NOT_IDLE; + +- /* +- * Since core isolation doesn't update nohz.idle_cpus_mask, there +- * is a possibility this nohz kicked cpu could be isolated. Hence +- * return if the cpu is isolated. +- */ +- if (cpu_isolated(this_rq->cpu)) +- return; +- + /* + * If this CPU has a pending nohz_balance_kick, then do the + * balancing on behalf of the other idle CPUs whose ticks are +@@ -12948,7 +12453,7 @@ void trigger_load_balance(struct rq *rq) + * Don't need to rebalance while attached to NULL domain or + * runqueue CPU is not active + */ +- if (unlikely(on_null_domain(rq)) || cpu_isolated(cpu_of(rq)) || !cpu_active(cpu_of(rq))) ++ if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq)))) + return; + + if (time_after_eq(jiffies, rq->next_balance)) +@@ -12972,98 +12477,6 @@ static void rq_offline_fair(struct rq *rq) + unthrottle_offline_cfs_rqs(rq); + } + +-#ifdef CONFIG_SCHED_EAS +-static inline int +-kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu) +-{ +- unsigned long flags; +- int rc = 0; +- +- if (cpu_of(rq) == new_cpu) +- return rc; +- +- /* Invoke active balance to force migrate currently running task */ +- raw_spin_lock_irqsave(&rq->__lock, flags); +- if (!rq->active_balance) { +- rq->active_balance = 1; +- rq->push_cpu = new_cpu; +- get_task_struct(p); +- rq->push_task = p; +- rc = 1; +- } +- raw_spin_unlock_irqrestore(&rq->__lock, flags); +- return rc; +-} +- +-DEFINE_RAW_SPINLOCK(migration_lock); +-static void check_for_migration_fair(struct rq *rq, struct task_struct *p) +-{ +- int active_balance; +- int new_cpu = -1; +- int prev_cpu = task_cpu(p); +- int ret; +- +-#ifdef CONFIG_SCHED_RTG +- bool need_down_migrate = false; +- struct cpumask *rtg_target = find_rtg_target(p); +- +- if (rtg_target && +- (capacity_orig_of(prev_cpu) > +- capacity_orig_of(cpumask_first(rtg_target)))) +- need_down_migrate = true; +-#endif +- +- if (rq->misfit_task_load) { +- if (rq->curr->__state != TASK_RUNNING || +- rq->curr->nr_cpus_allowed == 1) +- return; +- +- raw_spin_lock(&migration_lock); +-#ifdef CONFIG_SCHED_RTG +- if (rtg_target) { +- new_cpu = find_rtg_cpu(p); +- +- if (new_cpu != -1 && need_down_migrate && +- cpumask_test_cpu(new_cpu, rtg_target) && +- idle_cpu(new_cpu)) +- goto do_active_balance; +- +- if (new_cpu != -1 && +- capacity_orig_of(new_cpu) > capacity_orig_of(prev_cpu)) +- goto do_active_balance; +- +- goto out_unlock; +- } +-#endif +- rcu_read_lock(); +- new_cpu = find_energy_efficient_cpu(p, prev_cpu); +- rcu_read_unlock(); +- +- if (new_cpu == -1 || +- capacity_orig_of(new_cpu) <= capacity_orig_of(prev_cpu)) +- goto out_unlock; +-#ifdef CONFIG_SCHED_RTG +-do_active_balance: +-#endif +- active_balance = kick_active_balance(rq, p, new_cpu); +- if (active_balance) { +- mark_reserved(new_cpu); +- raw_spin_unlock(&migration_lock); +- ret = stop_one_cpu_nowait(prev_cpu, +- active_load_balance_cpu_stop, rq, +- &rq->active_balance_work); +- if (!ret) +- clear_reserved(new_cpu); +- else +- wake_up_if_idle(new_cpu); +- return; +- } +-out_unlock: +- raw_spin_unlock(&migration_lock); +- } +-} +-#endif /* CONFIG_SCHED_EAS */ +- + #endif /* CONFIG_SMP */ + + #ifdef CONFIG_SCHED_CORE +@@ -13767,12 +13180,6 @@ DEFINE_SCHED_CLASS(fair) = { + #ifdef CONFIG_UCLAMP_TASK + .uclamp_enabled = 1, + #endif +-#ifdef CONFIG_SCHED_WALT +- .fixup_walt_sched_stats = walt_fixup_sched_stats_fair, +-#endif +-#ifdef CONFIG_SCHED_EAS +- .check_for_migration = check_for_migration_fair, +-#endif + }; + + #ifdef CONFIG_SCHED_DEBUG +@@ -13838,91 +13245,3 @@ __init void init_sched_fair_class(void) + #endif /* SMP */ + + } +- +-/* WALT sched implementation begins here */ +-#ifdef CONFIG_SCHED_WALT +- +-#ifdef CONFIG_CFS_BANDWIDTH +- +-static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq) +-{ +- cfs_rq->walt_stats.cumulative_runnable_avg_scaled = 0; +-} +- +-static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) +-{ +- fixup_cumulative_runnable_avg(&cfs_rq->walt_stats, +- p->ravg.demand_scaled); +-} +- +-static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) +-{ +- fixup_cumulative_runnable_avg(&cfs_rq->walt_stats, +- -(s64)p->ravg.demand_scaled); +-} +- +-static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats, +- struct cfs_rq *tcfs_rq) +-{ +- struct rq *rq = rq_of(tcfs_rq); +- +- fixup_cumulative_runnable_avg(stats, +- tcfs_rq->walt_stats.cumulative_runnable_avg_scaled); +- +- if (stats == &rq->walt_stats) +- walt_fixup_cum_window_demand(rq, +- tcfs_rq->walt_stats.cumulative_runnable_avg_scaled); +- +-} +- +-static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats, +- struct cfs_rq *tcfs_rq) +-{ +- struct rq *rq = rq_of(tcfs_rq); +- +- fixup_cumulative_runnable_avg(stats, +- -tcfs_rq->walt_stats.cumulative_runnable_avg_scaled); +- +- /* +- * We remove the throttled cfs_rq's tasks's contribution from the +- * cumulative window demand so that the same can be added +- * unconditionally when the cfs_rq is unthrottled. +- */ +- if (stats == &rq->walt_stats) +- walt_fixup_cum_window_demand(rq, +- -tcfs_rq->walt_stats.cumulative_runnable_avg_scaled); +-} +- +-static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p, +- u16 updated_demand_scaled) +-{ +- struct cfs_rq *cfs_rq; +- struct sched_entity *se = &p->se; +- s64 task_load_delta = (s64)updated_demand_scaled - +- p->ravg.demand_scaled; +- +- for_each_sched_entity(se) { +- cfs_rq = cfs_rq_of(se); +- +- fixup_cumulative_runnable_avg(&cfs_rq->walt_stats, +- task_load_delta); +- if (cfs_rq_throttled(cfs_rq)) +- break; +- } +- +- /* Fix up rq->walt_stats only if we didn't find any throttled cfs_rq */ +- if (!se) { +- fixup_cumulative_runnable_avg(&rq->walt_stats, +- task_load_delta); +- walt_fixup_cum_window_demand(rq, task_load_delta); +- } +-} +- +-#else /* CONFIG_CFS_BANDWIDTH */ +-static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p, +- u16 updated_demand_scaled) +-{ +- fixup_walt_sched_stats_common(rq, p, updated_demand_scaled); +-} +-#endif /* CONFIG_CFS_BANDWIDTH */ +-#endif /* CONFIG_SCHED_WALT */ +diff --git a/kernel/sched/features.h b/kernel/sched/features.h +index 68a41a801..f77016823 100644 +--- a/kernel/sched/features.h ++++ b/kernel/sched/features.h +@@ -1,12 +1,5 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + +-/* +- * Only give sleepers 50% of their service deficit. This allows +- * them to run sooner, but does not allow tons of sleepers to +- * rip the spread apart. +- */ +-SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true) +- + /* + * Using the avg_vruntime, do the right thing and preserve lag across + * sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled. +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index 8e9b53ce9..b89223a97 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -3,9 +3,7 @@ + * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR + * policies) + */ +-#include "sched.h" + +-#include "walt.h" + int sched_rr_timeslice = RR_TIMESLICE; + /* More than 4 hours if BW_SHIFT equals 20. */ + static const u64 max_rt_runtime = MAX_BW; +@@ -14,14 +12,6 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); + + struct rt_bandwidth def_rt_bandwidth; + +-#ifdef CONFIG_SCHED_RT_CAS +-unsigned int sysctl_sched_enable_rt_cas = 1; +-#endif +- +-#ifdef CONFIG_SCHED_RT_ACTIVE_LB +-unsigned int sysctl_sched_enable_rt_active_lb = 1; +-#endif +- + /* + * period over which we measure -rt task CPU usage in us. + * default: 1s +@@ -335,8 +325,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) + static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) + { + /* Try to pull RT tasks here if we lower this rq's prio */ +- return rq->online && rq->rt.highest_prio.curr > prev->prio && +- !cpu_isolated(cpu_of(rq)); ++ return rq->online && rq->rt.highest_prio.curr > prev->prio; + } + + static inline int rt_overloaded(struct rq *rq) +@@ -1549,7 +1538,6 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) + update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se); + + enqueue_rt_entity(rt_se, flags); +- walt_inc_cumulative_runnable_avg(rq, p); + + if (!task_current(rq, p) && p->nr_cpus_allowed > 1) + enqueue_pushable_task(rq, p); +@@ -1561,7 +1549,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) + + update_curr_rt(rq); + dequeue_rt_entity(rt_se, flags); +- walt_dec_cumulative_runnable_avg(rq, p); + + dequeue_pushable_task(rq, p); + } +@@ -1648,9 +1635,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int flags) + test = curr && + unlikely(rt_task(curr)) && + (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio); +-#ifdef CONFIG_SCHED_RT_CAS +- test |= sysctl_sched_enable_rt_cas; +-#endif + + if (test || !rt_task_fits_capacity(p, cpu)) { + int target = find_lowest_rq(p); +@@ -1666,11 +1650,8 @@ select_task_rq_rt(struct task_struct *p, int cpu, int flags) + * Don't bother moving it if the destination CPU is + * not running a lower priority task. + */ +- if (target != -1 && ( +-#ifdef CONFIG_SCHED_RT_CAS +- sysctl_sched_enable_rt_cas || +-#endif +- p->prio < cpu_rq(target)->rt.highest_prio.curr)) ++ if (target != -1 && ++ p->prio < cpu_rq(target)->rt.highest_prio.curr) + cpu = target; + } + +@@ -1889,170 +1870,6 @@ static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) + return NULL; + } + +-#ifdef CONFIG_SCHED_RT_CAS +-static int find_cas_cpu(struct sched_domain *sd, +- struct task_struct *task, struct cpumask *lowest_mask) +-{ +- struct root_domain *rd = cpu_rq(smp_processor_id())->rd; +- struct sched_group *sg = NULL; +- struct sched_group *sg_target = NULL; +- struct sched_group *sg_backup = NULL; +- struct cpumask search_cpu, backup_search_cpu; +- int cpu = -1; +- int target_cpu = -1; +- unsigned long cpu_capacity; +- unsigned long boosted_tutil = uclamp_task_util(task, uclamp_eff_value(task, UCLAMP_MIN), uclamp_eff_value(task, UCLAMP_MAX)); +- unsigned long target_capacity = ULONG_MAX; +- unsigned long util; +- unsigned long target_cpu_util = ULONG_MAX; +- int prev_cpu = task_cpu(task); +-#ifdef CONFIG_SCHED_RTG +- struct cpumask *rtg_target = NULL; +-#endif +- bool boosted = uclamp_boosted(task); +- +- if (!sysctl_sched_enable_rt_cas) +- return -1; +- +- rcu_read_lock(); +- +-#ifdef CONFIG_SCHED_RTG +- rtg_target = find_rtg_target(task); +-#endif +- +- sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, 0)); +- if (!sd) { +- rcu_read_unlock(); +- return -1; +- } +- +- sg = sd->groups; +- do { +- if (!cpumask_intersects(lowest_mask, sched_group_span(sg))) +- continue; +- +- if (boosted) { +- if (cpumask_test_cpu(rd->max_cap_orig_cpu, +- sched_group_span(sg))) { +- sg_target = sg; +- break; +- } +- } +- +- cpu = group_first_cpu(sg); +-#ifdef CONFIG_SCHED_RTG +- /* honor the rtg tasks */ +- if (rtg_target) { +- if (cpumask_test_cpu(cpu, rtg_target)) { +- sg_target = sg; +- break; +- } +- +- /* active LB or big_task favor cpus with more capacity */ +- if (task->__state == TASK_RUNNING || boosted) { +- if (capacity_orig_of(cpu) > +- capacity_orig_of(cpumask_any(rtg_target))) { +- sg_target = sg; +- break; +- } +- +- sg_backup = sg; +- continue; +- } +- } +-#endif +- /* +- * 1. add margin to support task migration +- * 2. if task_util is high then all cpus, make sure the +- * sg_backup with the most powerful cpus is selected +- */ +- if (!rt_task_fits_capacity(task, cpu)) { +- sg_backup = sg; +- continue; +- } +- +- /* support task boost */ +- cpu_capacity = capacity_orig_of(cpu); +- if (boosted_tutil > cpu_capacity) { +- sg_backup = sg; +- continue; +- } +- +- /* sg_target: select the sg with smaller capacity */ +- if (cpu_capacity < target_capacity) { +- target_capacity = cpu_capacity; +- sg_target = sg; +- } +- } while (sg = sg->next, sg != sd->groups); +- +- if (!sg_target) +- sg_target = sg_backup; +- +- if (sg_target) { +- cpumask_and(&search_cpu, lowest_mask, sched_group_span(sg_target)); +- cpumask_copy(&backup_search_cpu, lowest_mask); +- cpumask_andnot(&backup_search_cpu, &backup_search_cpu, &search_cpu); +- } else { +- cpumask_copy(&search_cpu, lowest_mask); +- cpumask_clear(&backup_search_cpu); +- } +- +-retry: +- cpu = cpumask_first(&search_cpu); +- do { +- trace_sched_find_cas_cpu_each(task, cpu, target_cpu, +- cpu_isolated(cpu), +- idle_cpu(cpu), boosted_tutil, cpu_util_cfs(cpu), +- capacity_orig_of(cpu)); +- +- if (cpu_isolated(cpu)) +- continue; +- +- if (!cpumask_test_cpu(cpu, task->cpus_ptr)) +- continue; +- +- /* find best cpu with smallest max_capacity */ +- if (target_cpu != -1 && +- capacity_orig_of(cpu) > capacity_orig_of(target_cpu)) +- continue; +- +- util = cpu_util_cfs(cpu); +- +- /* Find the least loaded CPU */ +- if (util > target_cpu_util) +- continue; +- +- /* +- * If the preivous CPU has same load, keep it as +- * target_cpu +- */ +- if (target_cpu_util == util && target_cpu == prev_cpu) +- continue; +- +- /* +- * If candidate CPU is the previous CPU, select it. +- * If all above conditions are same, select the least +- * cumulative window demand CPU. +- */ +- target_cpu_util = util; +- target_cpu = cpu; +- } while ((cpu = cpumask_next(cpu, &search_cpu)) < nr_cpu_ids); +- +- if (target_cpu != -1 && cpumask_test_cpu(target_cpu, lowest_mask)) { +- goto done; +- } else if (!cpumask_empty(&backup_search_cpu)) { +- cpumask_copy(&search_cpu, &backup_search_cpu); +- cpumask_clear(&backup_search_cpu); +- goto retry; +- } +- +-done: +- trace_sched_find_cas_cpu(task, lowest_mask, boosted_tutil, prev_cpu, target_cpu); +- rcu_read_unlock(); +- return target_cpu; +-} +-#endif +- + static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); + + static int find_lowest_rq(struct task_struct *task) +@@ -2062,9 +1879,6 @@ static int find_lowest_rq(struct task_struct *task) + int this_cpu = smp_processor_id(); + int cpu = task_cpu(task); + int ret; +-#ifdef CONFIG_SCHED_RT_CAS +- int cas_cpu; +-#endif + + /* Make sure the mask is initialized first */ + if (unlikely(!lowest_mask)) +@@ -2091,12 +1905,6 @@ static int find_lowest_rq(struct task_struct *task) + if (!ret) + return -1; /* No targets found */ + +-#ifdef CONFIG_SCHED_RT_CAS +- cas_cpu = find_cas_cpu(sd, task, lowest_mask); +- if (cas_cpu != -1) +- return cas_cpu; +-#endif +- + /* + * At this point we have built a mask of CPUs representing the + * lowest priority tasks in the system. Now we want to elect +@@ -2702,8 +2510,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) + * we may need to handle the pulling of RT tasks + * now. + */ +- if (!task_on_rq_queued(p) || rq->rt.rt_nr_running || +- cpu_isolated(cpu_of(rq))) ++ if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) + return; + + rt_queue_pull_task(rq); +@@ -2862,93 +2669,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) + } + } + +-#ifdef CONFIG_SCHED_RT_ACTIVE_LB +-static int rt_active_load_balance_cpu_stop(void *data) +-{ +- struct rq *busiest_rq = data; +- struct task_struct *next_task = busiest_rq->rt_push_task; +- struct rq *lowest_rq = NULL; +- unsigned long flags; +- +- raw_spin_lock_irqsave(&busiest_rq->__lock, flags); +- busiest_rq->rt_active_balance = 0; +- +- if (!task_on_rq_queued(next_task) || +- task_cpu(next_task) != cpu_of(busiest_rq)) +- goto out; +- +- /* find_lock_lowest_rq locks the rq if found */ +- lowest_rq = find_lock_lowest_rq(next_task, busiest_rq); +- if (!lowest_rq) +- goto out; +- +- if (capacity_orig_of(cpu_of(lowest_rq)) <= capacity_orig_of(task_cpu(next_task))) +- goto unlock; +- +- deactivate_task(busiest_rq, next_task, 0); +- set_task_cpu(next_task, lowest_rq->cpu); +- activate_task(lowest_rq, next_task, 0); +- +- resched_curr(lowest_rq); +-unlock: +- double_unlock_balance(busiest_rq, lowest_rq); +-out: +- put_task_struct(next_task); +- raw_spin_unlock_irqrestore(&busiest_rq->__lock, flags); +- +- return 0; +-} +- +-static void check_for_migration_rt(struct rq *rq, struct task_struct *p) +-{ +- bool need_actvie_lb = false; +- bool misfit_task = false; +- int cpu = task_cpu(p); +- unsigned long cpu_orig_cap; +-#ifdef CONFIG_SCHED_RTG +- struct cpumask *rtg_target = NULL; +-#endif +- +- if (!sysctl_sched_enable_rt_active_lb) +- return; +- +- if (p->nr_cpus_allowed == 1) +- return; +- +- cpu_orig_cap = capacity_orig_of(cpu); +- /* cpu has max capacity, no need to do balance */ +- if (cpu_orig_cap == rq->rd->max_cpu_capacity) +- return; +- +-#ifdef CONFIG_SCHED_RTG +- rtg_target = find_rtg_target(p); +- if (rtg_target) +- misfit_task = capacity_orig_of(cpumask_first(rtg_target)) > +- cpu_orig_cap; +- else +- misfit_task = !rt_task_fits_capacity(p, cpu); +-#else +- misfit_task = !rt_task_fits_capacity(p, cpu); +-#endif +- +- if (misfit_task) { +- raw_spin_lock(&rq->__lock); +- if (!rq->active_balance && !rq->rt_active_balance) { +- rq->rt_active_balance = 1; +- rq->rt_push_task = p; +- get_task_struct(p); +- need_actvie_lb = true; +- } +- raw_spin_unlock(&rq->__lock); +- +- if (need_actvie_lb) +- stop_one_cpu_nowait(task_cpu(p), +- rt_active_load_balance_cpu_stop, +- rq, &rq->rt_active_balance_work); +- } +-} +-#endif +- + static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) + { + /* +@@ -3015,12 +2735,6 @@ DEFINE_SCHED_CLASS(rt) = { + #ifdef CONFIG_UCLAMP_TASK + .uclamp_enabled = 1, + #endif +-#ifdef CONFIG_SCHED_WALT +- .fixup_walt_sched_stats = fixup_walt_sched_stats_common, +-#endif +-#ifdef CONFIG_SCHED_RT_ACTIVE_LB +- .check_for_migration = check_for_migration_rt, +-#endif + }; + + #ifdef CONFIG_RT_GROUP_SCHED +diff --git a/kernel/sched/rtg/Kconfig b/kernel/sched/rtg/Kconfig +deleted file mode 100755 +index 1cb0c4298..000000000 +--- a/kernel/sched/rtg/Kconfig ++++ /dev/null +@@ -1,40 +0,0 @@ +-menu "Related Thread Group" +- +-config SCHED_RTG +- bool "Related Thread Group" +- depends on SCHED_WALT +- default n +- help +- Set related threads into a group. +- +-config SCHED_RTG_DEBUG +- bool "Related Thread Group DebugFS" +- depends on SCHED_RTG +- default n +- help +- If set, debug node will show rtg threads +- +-config SCHED_RTG_CGROUP +- bool "enable DEFAULT_CGROUP_COLOC RTG" +- depends on SCHED_RTG +- default n +- help +- If set, support for adding the tasks which belong to +- co-located cgroup to DEFAULT_CGROUP_COLOC RTG. +- +-config SCHED_RTG_FRAME +- bool "Frame-based Related Thread Group" +- depends on SCHED_RTG +- default n +- help +- Support frame-based related thread group scheduling. +- If set, you can set the task to RTG and kernel will +- statistic the load per frame. +- +-config SCHED_RTG_RT_THREAD_LIMIT +- bool "Limit the number of RT threads in groups" +- depends on SCHED_RTG_FRAME +- default n +- help +- If set, limit the number of RT threads in frame RTG. +-endmenu +diff --git a/kernel/sched/rtg/Makefile b/kernel/sched/rtg/Makefile +deleted file mode 100755 +index 4d55523d1..000000000 +--- a/kernel/sched/rtg/Makefile ++++ /dev/null +@@ -1,3 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0 +-obj-$(CONFIG_SCHED_RTG) += rtg.o +-obj-$(CONFIG_SCHED_RTG_FRAME) += frame_rtg.o rtg_ctrl.o +diff --git a/kernel/sched/rtg/frame_rtg.c b/kernel/sched/rtg/frame_rtg.c +deleted file mode 100755 +index 79db64522..000000000 +--- a/kernel/sched/rtg/frame_rtg.c ++++ /dev/null +@@ -1,1229 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Frame-based load tracking for rt_frame and RTG +- * +- * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd. +- */ +- +-#include "frame_rtg.h" +-#include "rtg.h" +- +-#include +-#include +-#include <../kernel/sched/sched.h> +-#include +- +-static struct multi_frame_id_manager g_id_manager = { +- .id_map = {0}, +- .offset = 0, +- .lock = __RW_LOCK_UNLOCKED(g_id_manager.lock) +-}; +- +-static struct frame_info g_multi_frame_info[MULTI_FRAME_NUM]; +- +-static bool is_rtg_rt_task(struct task_struct *task) +-{ +- bool ret = false; +- +- if (!task) +- return ret; +- +- ret = ((task->prio < MAX_RT_PRIO) && +- (task->rtg_depth == STATIC_RTG_DEPTH)); +- +- return ret; +-} +- +-#ifdef CONFIG_SCHED_RTG_RT_THREAD_LIMIT +-static atomic_t g_rtg_rt_thread_num = ATOMIC_INIT(0); +- +-static unsigned int _get_rtg_rt_thread_num(struct related_thread_group *grp) +-{ +- unsigned int rtg_rt_thread_num = 0; +- struct task_struct *p = NULL; +- +- if (list_empty(&grp->tasks)) +- goto out; +- +- list_for_each_entry(p, &grp->tasks, grp_list) { +- if (is_rtg_rt_task(p)) +- ++rtg_rt_thread_num; +- } +- +-out: +- return rtg_rt_thread_num; +-} +- +-static unsigned int get_rtg_rt_thread_num(void) +-{ +- struct related_thread_group *grp = NULL; +- unsigned int total_rtg_rt_thread_num = 0; +- unsigned long flag; +- unsigned int i; +- +- for (i = MULTI_FRAME_ID; i < MULTI_FRAME_ID + MULTI_FRAME_NUM; i++) { +- grp = lookup_related_thread_group(i); +- if (grp == NULL) +- continue; +- raw_spin_lock_irqsave(&grp->lock, flag); +- total_rtg_rt_thread_num += _get_rtg_rt_thread_num(grp); +- raw_spin_unlock_irqrestore(&grp->lock, flag); +- } +- +- return total_rtg_rt_thread_num; +-} +- +-static void inc_rtg_rt_thread_num(void) +-{ +- atomic_inc(&g_rtg_rt_thread_num); +-} +- +-static void dec_rtg_rt_thread_num(void) +-{ +- atomic_dec_if_positive(&g_rtg_rt_thread_num); +-} +- +-static int test_and_read_rtg_rt_thread_num(void) +-{ +- if (atomic_read(&g_rtg_rt_thread_num) >= RTG_MAX_RT_THREAD_NUM) +- atomic_set(&g_rtg_rt_thread_num, get_rtg_rt_thread_num()); +- +- return atomic_read(&g_rtg_rt_thread_num); +-} +- +-int read_rtg_rt_thread_num(void) +-{ +- return atomic_read(&g_rtg_rt_thread_num); +-} +-#else +-static inline void inc_rtg_rt_thread_num(void) { } +-static inline void dec_rtg_rt_thread_num(void) { } +-static inline int test_and_read_rtg_rt_thread_num(void) +-{ +- return 0; +-} +-#endif +- +-bool is_frame_rtg(int id) +-{ +- return (id >= MULTI_FRAME_ID) && +- (id < (MULTI_FRAME_ID + MULTI_FRAME_NUM)); +-} +- +-static struct related_thread_group *frame_rtg(int id) +-{ +- if (!is_frame_rtg(id)) +- return NULL; +- +- return lookup_related_thread_group(id); +-} +- +-struct frame_info *rtg_frame_info(int id) +-{ +- if (!is_frame_rtg(id)) +- return NULL; +- +- return rtg_active_multi_frame_info(id); +-} +- +-static int alloc_rtg_id(void) +-{ +- unsigned int id_offset; +- int id; +- +- write_lock(&g_id_manager.lock); +- id_offset = find_next_zero_bit(g_id_manager.id_map, MULTI_FRAME_NUM, +- g_id_manager.offset); +- if (id_offset >= MULTI_FRAME_NUM) { +- id_offset = find_first_zero_bit(g_id_manager.id_map, +- MULTI_FRAME_NUM); +- if (id_offset >= MULTI_FRAME_NUM) { +- write_unlock(&g_id_manager.lock); +- return -EINVAL; +- } +- } +- +- set_bit(id_offset, g_id_manager.id_map); +- g_id_manager.offset = id_offset; +- id = id_offset + MULTI_FRAME_ID; +- write_unlock(&g_id_manager.lock); +- pr_debug("[FRAME_RTG] %s id_offset=%u, id=%d\n", __func__, id_offset, id); +- +- return id; +-} +- +-static void free_rtg_id(int id) +-{ +- unsigned int id_offset = id - MULTI_FRAME_ID; +- +- if (id_offset >= MULTI_FRAME_NUM) { +- pr_err("[FRAME_RTG] %s id_offset is invalid, id=%d, id_offset=%u.\n", +- __func__, id, id_offset); +- return; +- } +- +- pr_debug("[FRAME_RTG] %s id=%d id_offset=%u\n", __func__, id, id_offset); +- write_lock(&g_id_manager.lock); +- clear_bit(id_offset, g_id_manager.id_map); +- write_unlock(&g_id_manager.lock); +-} +- +-int set_frame_rate(struct frame_info *frame_info, int rate) +-{ +- int id; +- +- if ((rate < MIN_FRAME_RATE) || (rate > MAX_FRAME_RATE)) { +- pr_err("[FRAME_RTG]: %s invalid QOS(rate) value\n", +- __func__); +- return -EINVAL; +- } +- +- if (!frame_info || !frame_info->rtg) +- return -EINVAL; +- +- frame_info->frame_rate = (unsigned int)rate; +- frame_info->frame_time = div_u64(NSEC_PER_SEC, rate); +- frame_info->max_vload_time = +- div_u64(frame_info->frame_time, NSEC_PER_MSEC) + +- frame_info->vload_margin; +- id = frame_info->rtg->id; +- trace_rtg_frame_sched(id, "FRAME_QOS", rate); +- trace_rtg_frame_sched(id, "FRAME_MAX_TIME", frame_info->max_vload_time); +- +- return 0; +-} +- +-int alloc_multi_frame_info(void) +-{ +- struct frame_info *frame_info = NULL; +- int id; +- int i; +- +- id = alloc_rtg_id(); +- if (id < 0) +- return id; +- +- frame_info = rtg_frame_info(id); +- if (!frame_info) { +- free_rtg_id(id); +- return -EINVAL; +- } +- +- set_frame_rate(frame_info, DEFAULT_FRAME_RATE); +- atomic_set(&frame_info->curr_rt_thread_num, 0); +- atomic_set(&frame_info->max_rt_thread_num, DEFAULT_MAX_RT_THREAD); +- for (i = 0; i < MAX_TID_NUM; i++) +- atomic_set(&frame_info->thread_prio[i], 0); +- +- return id; +-} +- +-void release_multi_frame_info(int id) +-{ +- if ((id < MULTI_FRAME_ID) || (id >= MULTI_FRAME_ID + MULTI_FRAME_NUM)) { +- pr_err("[FRAME_RTG] %s frame(id=%d) not found.\n", __func__, id); +- return; +- } +- +- read_lock(&g_id_manager.lock); +- if (!test_bit(id - MULTI_FRAME_ID, g_id_manager.id_map)) { +- read_unlock(&g_id_manager.lock); +- return; +- } +- read_unlock(&g_id_manager.lock); +- +- pr_debug("[FRAME_RTG] %s release frame(id=%d).\n", __func__, id); +- free_rtg_id(id); +-} +- +-void clear_multi_frame_info(void) +-{ +- write_lock(&g_id_manager.lock); +- bitmap_zero(g_id_manager.id_map, MULTI_FRAME_NUM); +- g_id_manager.offset = 0; +- write_unlock(&g_id_manager.lock); +-} +- +-struct frame_info *rtg_active_multi_frame_info(int id) +-{ +- struct frame_info *frame_info = NULL; +- +- if ((id < MULTI_FRAME_ID) || (id >= MULTI_FRAME_ID + MULTI_FRAME_NUM)) +- return NULL; +- +- read_lock(&g_id_manager.lock); +- if (test_bit(id - MULTI_FRAME_ID, g_id_manager.id_map)) +- frame_info = &g_multi_frame_info[id - MULTI_FRAME_ID]; +- read_unlock(&g_id_manager.lock); +- if (!frame_info) +- pr_debug("[FRAME_RTG] %s frame %d has been released\n", +- __func__, id); +- +- return frame_info; +-} +- +-struct frame_info *rtg_multi_frame_info(int id) +-{ +- if ((id < MULTI_FRAME_ID) || (id >= MULTI_FRAME_ID + MULTI_FRAME_NUM)) +- return NULL; +- +- return &g_multi_frame_info[id - MULTI_FRAME_ID]; +-} +- +-static void do_update_frame_task_prio(struct frame_info *frame_info, +- struct task_struct *task, int prio) +-{ +- int policy = SCHED_NORMAL; +- struct sched_param sp = {0}; +- bool is_rt_task = (prio != NOT_RT_PRIO); +- bool need_dec_flag = false; +- bool need_inc_flag = false; +- int err; +- +- trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num", +- read_rtg_rt_thread_num()); +- /* change policy to RT */ +- if (is_rt_task && (atomic_read(&frame_info->curr_rt_thread_num) < +- atomic_read(&frame_info->max_rt_thread_num))) { +- /* change policy from CFS to RT */ +- if (!is_rtg_rt_task(task)) { +- if (test_and_read_rtg_rt_thread_num() >= RTG_MAX_RT_THREAD_NUM) +- goto out; +- need_inc_flag = true; +- } +- /* change RT priority */ +- policy = SCHED_FIFO | SCHED_RESET_ON_FORK; +- sp.sched_priority = MAX_USER_RT_PRIO - 1 - prio; +- atomic_inc(&frame_info->curr_rt_thread_num); +- } else { +- /* change policy from RT to CFS */ +- if (!is_rt_task && is_rtg_rt_task(task)) +- need_dec_flag = true; +- } +-out: +- trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num", +- read_rtg_rt_thread_num()); +- trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num", +- atomic_read(&frame_info->curr_rt_thread_num)); +- err = sched_setscheduler_nocheck(task, policy, &sp); +- if (err == 0) { +- if (need_dec_flag) +- dec_rtg_rt_thread_num(); +- else if (need_inc_flag) +- inc_rtg_rt_thread_num(); +- } +-} +- +-int list_rtg_group(struct rtg_info *rs_data) +-{ +- int i; +- int num = 0; +- +- read_lock(&g_id_manager.lock); +- for (i = MULTI_FRAME_ID; i < MULTI_FRAME_ID + MULTI_FRAME_NUM; i++) { +- if (test_bit(i - MULTI_FRAME_ID, g_id_manager.id_map)) { +- rs_data->rtgs[num] = i; +- num++; +- } +- } +- read_unlock(&g_id_manager.lock); +- rs_data->rtg_num = num; +- +- return num; +-} +- +-int search_rtg(int pid) +-{ +- struct rtg_info grp_info; +- struct frame_info *frame_info = NULL; +- int i = 0; +- int j = 0; +- +- grp_info.rtg_num = 0; +- read_lock(&g_id_manager.lock); +- for (i = MULTI_FRAME_ID; i < MULTI_FRAME_ID + MULTI_FRAME_NUM; i++) { +- if (test_bit(i - MULTI_FRAME_ID, g_id_manager.id_map)) { +- grp_info.rtgs[grp_info.rtg_num] = i; +- grp_info.rtg_num++; +- } +- } +- read_unlock(&g_id_manager.lock); +- for (i = 0; i < grp_info.rtg_num; i++) { +- frame_info = lookup_frame_info_by_grp_id(grp_info.rtgs[i]); +- if (!frame_info) { +- pr_err("[FRAME_RTG] unexpected grp %d find error.", i); +- return -EINVAL; +- } +- +- for (j = 0; j < frame_info->thread_num; j++) { +- if (frame_info->thread[j] && frame_info->thread[j]->pid == pid) +- return grp_info.rtgs[i]; +- } +- } +- +- return 0; +-} +- +-static void update_frame_task_prio(struct frame_info *frame_info, int prio) +-{ +- int i; +- struct task_struct *thread = NULL; +- +- /* reset curr_rt_thread_num */ +- atomic_set(&frame_info->curr_rt_thread_num, 0); +- +- for (i = 0; i < MAX_TID_NUM; i++) { +- thread = frame_info->thread[i]; +- if (thread) +- do_update_frame_task_prio(frame_info, thread, prio); +- } +-} +- +-void set_frame_prio(struct frame_info *frame_info, int prio) +-{ +- if (!frame_info) +- return; +- +- mutex_lock(&frame_info->lock); +- if (frame_info->prio == prio) +- goto out; +- +- update_frame_task_prio(frame_info, prio); +- frame_info->prio = prio; +-out: +- mutex_unlock(&frame_info->lock); +-} +- +-static int do_set_rtg_sched(struct task_struct *task, bool is_rtg, +- int grp_id, int prio) +-{ +- int err; +- int policy = SCHED_NORMAL; +- int grpid = DEFAULT_RTG_GRP_ID; +- bool is_rt_task = (prio != NOT_RT_PRIO); +- struct sched_param sp = {0}; +- +- if (is_rtg) { +- if (is_rt_task) { +- if (test_and_read_rtg_rt_thread_num() >= RTG_MAX_RT_THREAD_NUM) +- // rtg_rt_thread_num is inavailable, set policy to CFS +- goto skip_setpolicy; +- policy = SCHED_FIFO | SCHED_RESET_ON_FORK; +- sp.sched_priority = MAX_USER_RT_PRIO - 1 - prio; +- } +-skip_setpolicy: +- grpid = grp_id; +- } +- err = sched_setscheduler_nocheck(task, policy, &sp); +- if (err < 0) { +- pr_err("[FRAME_RTG]: %s task:%d setscheduler err:%d\n", +- __func__, task->pid, err); +- return err; +- } +- err = sched_set_group_id(task, grpid); +- if (err < 0) { +- pr_err("[FRAME_RTG]: %s task:%d set_group_id err:%d\n", +- __func__, task->pid, err); +- if (is_rtg) { +- policy = SCHED_NORMAL; +- sp.sched_priority = 0; +- sched_setscheduler_nocheck(task, policy, &sp); +- } +- } +- if (err == 0) { +- if (is_rtg) { +- if (policy != SCHED_NORMAL) +- inc_rtg_rt_thread_num(); +- } else { +- dec_rtg_rt_thread_num(); +- } +- } +- +- return err; +-} +- +-static int set_rtg_sched(struct task_struct *task, bool is_rtg, +- int grp_id, int prio) +-{ +- int err = -1; +- bool is_rt_task = (prio != NOT_RT_PRIO); +- +- if (!task) +- return err; +- +- if (is_rt_task && is_rtg && ((prio < 0) || +- (prio > MAX_USER_RT_PRIO - 1))) +- return err; +- /* +- * original logic deny the non-cfs task st rt. +- * add !fair_policy(task->policy) if needed +- * +- * if CONFIG_HW_FUTEX_PI is set, task->prio and task->sched_class +- * may be modified by rtmutex. So we use task->policy instead. +- */ +- if (is_rtg && task->flags & PF_EXITING) +- return err; +- +- if (in_interrupt()) { +- pr_err("[FRAME_RTG]: %s is in interrupt\n", __func__); +- return err; +- } +- +- return do_set_rtg_sched(task, is_rtg, grp_id, prio); +-} +- +-static bool set_frame_rtg_thread(int grp_id, struct task_struct *task, +- bool is_rtg, int prio) +-{ +- int depth; +- +- if (!task) +- return false; +- depth = task->rtg_depth; +- if (is_rtg) +- task->rtg_depth = STATIC_RTG_DEPTH; +- else +- task->rtg_depth = 0; +- +- if (set_rtg_sched(task, is_rtg, grp_id, prio) < 0) { +- task->rtg_depth = depth; +- return false; +- } +- +- return true; +-} +- +-struct task_struct *update_frame_thread(struct frame_info *frame_info, +- int old_prio, int prio, int pid, +- struct task_struct *old_task) +-{ +- struct task_struct *task = NULL; +- bool is_rt_task = (prio != NOT_RT_PRIO); +- int new_prio = prio; +- bool update_ret = false; +- +- if (pid > 0) { +- if (old_task && (pid == old_task->pid) && (old_prio == new_prio)) { +- if (is_rt_task && atomic_read(&frame_info->curr_rt_thread_num) < +- atomic_read(&frame_info->max_rt_thread_num) && +- (atomic_read(&frame_info->frame_sched_state) == 1)) +- atomic_inc(&frame_info->curr_rt_thread_num); +- trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num", +- atomic_read(&frame_info->curr_rt_thread_num)); +- return old_task; +- } +- rcu_read_lock(); +- task = find_task_by_vpid(pid); +- if (task) +- get_task_struct(task); +- rcu_read_unlock(); +- } +- trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE", +- atomic_read(&frame_info->frame_sched_state)); +- if (atomic_read(&frame_info->frame_sched_state) == 1) { +- if (task && is_rt_task) { +- if (atomic_read(&frame_info->curr_rt_thread_num) < +- atomic_read(&frame_info->max_rt_thread_num)) +- atomic_inc(&frame_info->curr_rt_thread_num); +- else +- new_prio = NOT_RT_PRIO; +- } +- trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num", +- atomic_read(&frame_info->curr_rt_thread_num)); +- trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num", +- read_rtg_rt_thread_num()); +- +- set_frame_rtg_thread(frame_info->rtg->id, old_task, false, NOT_RT_PRIO); +- update_ret = set_frame_rtg_thread(frame_info->rtg->id, task, true, new_prio); +- } +- if (old_task) +- put_task_struct(old_task); +- if (!update_ret) +- return NULL; +- +- return task; +-} +- +-void update_frame_thread_info(struct frame_info *frame_info, +- struct frame_thread_info *frame_thread_info) +-{ +- int i; +- int old_prio; +- int prio; +- int thread_num; +- int real_thread; +- +- if (!frame_info || !frame_thread_info || +- frame_thread_info->thread_num < 0) +- return; +- +- prio = frame_thread_info->prio; +- thread_num = frame_thread_info->thread_num; +- if (thread_num > MAX_TID_NUM) +- thread_num = MAX_TID_NUM; +- +- // reset curr_rt_thread_num +- atomic_set(&frame_info->curr_rt_thread_num, 0); +- mutex_lock(&frame_info->lock); +- old_prio = frame_info->prio; +- real_thread = 0; +- for (i = 0; i < thread_num; i++) { +- atomic_set(&frame_info->thread_prio[i], 0); +- frame_info->thread[i] = update_frame_thread(frame_info, old_prio, prio, +- frame_thread_info->thread[i], +- frame_info->thread[i]); +- if (frame_info->thread[i] && (frame_thread_info->thread[i] > 0)) +- real_thread++; +- } +- frame_info->prio = prio; +- frame_info->thread_num = real_thread; +- mutex_unlock(&frame_info->lock); +-} +- +-static void do_set_frame_sched_state(struct frame_info *frame_info, +- struct task_struct *task, +- bool enable, int prio) +-{ +- int new_prio = prio; +- bool is_rt_task = (prio != NOT_RT_PRIO); +- +- if (enable && is_rt_task) { +- if (atomic_read(&frame_info->curr_rt_thread_num) < +- atomic_read(&frame_info->max_rt_thread_num)) +- atomic_inc(&frame_info->curr_rt_thread_num); +- else +- new_prio = NOT_RT_PRIO; +- } +- trace_rtg_frame_sched(frame_info->rtg->id, "curr_rt_thread_num", +- atomic_read(&frame_info->curr_rt_thread_num)); +- trace_rtg_frame_sched(frame_info->rtg->id, "rtg_rt_thread_num", +- read_rtg_rt_thread_num()); +- set_frame_rtg_thread(frame_info->rtg->id, task, enable, new_prio); +-} +- +-void set_frame_sched_state(struct frame_info *frame_info, bool enable) +-{ +- atomic_t *frame_sched_state = NULL; +- int prio; +- int i; +- +- if (!frame_info || !frame_info->rtg) +- return; +- +- frame_sched_state = &(frame_info->frame_sched_state); +- if (enable) { +- if (atomic_read(frame_sched_state) == 1) +- return; +- atomic_set(frame_sched_state, 1); +- trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE", 1); +- +- frame_info->prev_fake_load_util = 0; +- frame_info->prev_frame_load_util = 0; +- frame_info->frame_vload = 0; +- frame_info_rtg_load(frame_info)->curr_window_load = 0; +- } else { +- if (atomic_read(frame_sched_state) == 0) +- return; +- atomic_set(frame_sched_state, 0); +- trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE", 0); +- +- (void)sched_set_group_normalized_util(frame_info->rtg->id, +- 0, RTG_FREQ_NORMAL_UPDATE); +- trace_rtg_frame_sched(frame_info->rtg->id, "preferred_cluster", +- INVALID_PREFERRED_CLUSTER); +- frame_info->status = FRAME_END; +- } +- +- /* reset curr_rt_thread_num */ +- atomic_set(&frame_info->curr_rt_thread_num, 0); +- mutex_lock(&frame_info->lock); +- for (i = 0; i < MAX_TID_NUM; i++) { +- if (frame_info->thread[i]) { +- prio = atomic_read(&frame_info->thread_prio[i]); +- do_set_frame_sched_state(frame_info, frame_info->thread[i], +- enable, prio); +- } +- } +- mutex_unlock(&frame_info->lock); +- +- trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_STATUS", +- frame_info->status); +- trace_rtg_frame_sched(frame_info->rtg->id, "frame_status", +- frame_info->status); +-} +- +-static inline bool check_frame_util_invalid(const struct frame_info *frame_info, +- u64 timeline) +-{ +- return ((frame_info_rtg(frame_info)->util_invalid_interval <= timeline) && +- (frame_info_rtg_load(frame_info)->curr_window_exec * FRAME_UTIL_INVALID_FACTOR +- <= timeline)); +-} +- +-static u64 calc_prev_fake_load_util(const struct frame_info *frame_info) +-{ +- u64 prev_frame_load = frame_info->prev_frame_load; +- u64 prev_frame_time = max_t(unsigned long, frame_info->prev_frame_time, +- frame_info->frame_time); +- u64 frame_util = 0; +- +- if (prev_frame_time > 0) +- frame_util = div_u64((prev_frame_load << SCHED_CAPACITY_SHIFT), +- prev_frame_time); +- frame_util = clamp_t(unsigned long, frame_util, +- frame_info->prev_min_util, +- frame_info->prev_max_util); +- +- return frame_util; +-} +- +-static u64 calc_prev_frame_load_util(const struct frame_info *frame_info) +-{ +- u64 prev_frame_load = frame_info->prev_frame_load; +- u64 frame_time = frame_info->frame_time; +- u64 frame_util = 0; +- +- if (prev_frame_load >= frame_time) +- frame_util = FRAME_MAX_LOAD; +- else +- frame_util = div_u64((prev_frame_load << SCHED_CAPACITY_SHIFT), +- frame_info->frame_time); +- frame_util = clamp_t(unsigned long, frame_util, +- frame_info->prev_min_util, +- frame_info->prev_max_util); +- +- return frame_util; +-} +- +-/* last frame load tracking */ +-static void update_frame_prev_load(struct frame_info *frame_info, bool fake) +-{ +- /* last frame load tracking */ +- frame_info->prev_frame_exec = +- frame_info_rtg_load(frame_info)->prev_window_exec; +- frame_info->prev_frame_time = +- frame_info_rtg(frame_info)->prev_window_time; +- frame_info->prev_frame_load = +- frame_info_rtg_load(frame_info)->prev_window_load; +- +- if (fake) +- frame_info->prev_fake_load_util = +- calc_prev_fake_load_util(frame_info); +- else +- frame_info->prev_frame_load_util = +- calc_prev_frame_load_util(frame_info); +-} +- +-static void do_frame_end(struct frame_info *frame_info, bool fake) +-{ +- unsigned long prev_util; +- int id = frame_info->rtg->id; +- +- frame_info->status = FRAME_END; +- trace_rtg_frame_sched(id, "frame_status", frame_info->status); +- +- /* last frame load tracking */ +- update_frame_prev_load(frame_info, fake); +- +- /* reset frame_info */ +- frame_info->frame_vload = 0; +- +- /* reset frame_min_util */ +- frame_info->frame_min_util = 0; +- +- if (fake) +- prev_util = frame_info->prev_fake_load_util; +- else +- prev_util = frame_info->prev_frame_load_util; +- +- frame_info->frame_util = clamp_t(unsigned long, prev_util, +- frame_info->frame_min_util, +- frame_info->frame_max_util); +- +- trace_rtg_frame_sched(id, "frame_last_task_time", +- frame_info->prev_frame_exec); +- trace_rtg_frame_sched(id, "frame_last_time", frame_info->prev_frame_time); +- trace_rtg_frame_sched(id, "frame_last_load", frame_info->prev_frame_load); +- trace_rtg_frame_sched(id, "frame_last_load_util", +- frame_info->prev_frame_load_util); +- trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util); +- trace_rtg_frame_sched(id, "frame_vload", frame_info->frame_vload); +-} +- +-/* +- * frame_load : calculate frame load using exec util +- */ +-static inline u64 calc_frame_exec(const struct frame_info *frame_info) +-{ +- if (frame_info->frame_time > 0) +- return div_u64((frame_info_rtg_load(frame_info)->curr_window_exec << +- SCHED_CAPACITY_SHIFT), frame_info->frame_time); +- else +- return 0; +-} +- +-/* +- * real_util: +- * max(last_util, virtual_util, boost_util, phase_util, frame_min_util) +- */ +-static u64 calc_frame_util(const struct frame_info *frame_info, bool fake) +-{ +- unsigned long load_util; +- +- if (fake) +- load_util = frame_info->prev_fake_load_util; +- else +- load_util = frame_info->prev_frame_load_util; +- +- load_util = max_t(unsigned long, load_util, frame_info->frame_vload); +- load_util = clamp_t(unsigned long, load_util, +- frame_info->frame_min_util, +- frame_info->frame_max_util); +- +- return load_util; +-} +- +-/* +- * frame_vload [0~1024] +- * vtime: now - timestamp +- * max_time: frame_info->frame_time + vload_margin +- * load = F(vtime) +- * = vtime ^ 2 - vtime * max_time + FRAME_MAX_VLOAD * vtime / max_time; +- * = vtime * (vtime + FRAME_MAX_VLOAD / max_time - max_time); +- * [0, 0] -=> [max_time, FRAME_MAX_VLOAD] +- * +- */ +-static u64 calc_frame_vload(const struct frame_info *frame_info, u64 timeline) +-{ +- u64 vload; +- int vtime = div_u64(timeline, NSEC_PER_MSEC); +- int max_time = frame_info->max_vload_time; +- int factor; +- +- if ((max_time <= 0) || (vtime > max_time)) +- return FRAME_MAX_VLOAD; +- +- factor = vtime + FRAME_MAX_VLOAD / max_time; +- /* margin maybe negative */ +- if ((vtime <= 0) || (factor <= max_time)) +- return 0; +- +- vload = (u64)vtime * (u64)(factor - max_time); +- +- return vload; +-} +- +-static int update_frame_info_tick_inner(int id, struct frame_info *frame_info, +- u64 timeline) +-{ +- switch (frame_info->status) { +- case FRAME_INVALID: +- case FRAME_END: +- if (timeline >= frame_info->frame_time) { +- /* +- * fake FRAME_END here to rollover frame_window. +- */ +- sched_set_group_window_rollover(id); +- do_frame_end(frame_info, true); +- } else { +- frame_info->frame_vload = calc_frame_exec(frame_info); +- frame_info->frame_util = +- calc_frame_util(frame_info, true); +- } +- +- /* when not in boost, start tick timer */ +- break; +- case FRAME_START: +- /* check frame_util invalid */ +- if (!check_frame_util_invalid(frame_info, timeline)) { +- /* frame_vload statistic */ +- frame_info->frame_vload = calc_frame_vload(frame_info, timeline); +- /* frame_util statistic */ +- frame_info->frame_util = +- calc_frame_util(frame_info, false); +- } else { +- frame_info->status = FRAME_INVALID; +- trace_rtg_frame_sched(id, "FRAME_STATUS", +- frame_info->status); +- trace_rtg_frame_sched(id, "frame_status", +- frame_info->status); +- +- /* +- * trigger FRAME_END to rollover frame_window, +- * we treat FRAME_INVALID as FRAME_END. +- */ +- sched_set_group_window_rollover(id); +- do_frame_end(frame_info, false); +- } +- break; +- default: +- return -EINVAL; +- } +- +- return 0; +-} +- +-static inline struct frame_info *rtg_frame_info_inner( +- const struct related_thread_group *grp) +-{ +- return (struct frame_info *)grp->private_data; +-} +- +-static inline void frame_boost(struct frame_info *frame_info) +-{ +- if (frame_info->frame_util < frame_info->frame_boost_min_util) +- frame_info->frame_util = frame_info->frame_boost_min_util; +-} +- +-/* +- * update CPUFREQ and PLACEMENT when frame task running (in tick) and migration +- */ +-static void update_frame_info_tick(struct related_thread_group *grp) +-{ +- u64 window_start; +- u64 wallclock; +- u64 timeline; +- struct frame_info *frame_info = NULL; +- int id = grp->id; +- +- rcu_read_lock(); +- frame_info = rtg_frame_info_inner(grp); +- window_start = grp->window_start; +- rcu_read_unlock(); +- if (unlikely(!frame_info)) +- return; +- +- if (atomic_read(&frame_info->frame_sched_state) == 0) +- return; +- trace_rtg_frame_sched(id, "frame_status", frame_info->status); +- +- wallclock = ktime_get_ns(); +- timeline = wallclock - window_start; +- +- trace_rtg_frame_sched(id, "update_curr_pid", current->pid); +- trace_rtg_frame_sched(id, "frame_timeline", div_u64(timeline, NSEC_PER_MSEC)); +- +- if (update_frame_info_tick_inner(grp->id, frame_info, timeline) == -EINVAL) +- return; +- +- frame_boost(frame_info); +- trace_rtg_frame_sched(id, "frame_vload", frame_info->frame_vload); +- trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util); +- +- sched_set_group_normalized_util(grp->id, +- frame_info->frame_util, RTG_FREQ_NORMAL_UPDATE); +- +- if (grp->preferred_cluster) +- trace_rtg_frame_sched(id, "preferred_cluster", +- grp->preferred_cluster->id); +-} +- +-const struct rtg_class frame_rtg_class = { +- .sched_update_rtg_tick = update_frame_info_tick, +-}; +- +-int set_frame_margin(struct frame_info *frame_info, int margin) +-{ +- int id; +- +- if ((margin < MIN_VLOAD_MARGIN) || (margin > MAX_VLOAD_MARGIN)) { +- pr_err("[FRAME_RTG]: %s invalid MARGIN value\n", +- __func__); +- return -EINVAL; +- } +- +- if (!frame_info || !frame_info->rtg) +- return -EINVAL; +- +- frame_info->vload_margin = margin; +- frame_info->max_vload_time = +- div_u64(frame_info->frame_time, NSEC_PER_MSEC) + +- frame_info->vload_margin; +- id = frame_info->rtg->id; +- trace_rtg_frame_sched(id, "FRAME_MARGIN", -margin); +- trace_rtg_frame_sched(id, "FRAME_MAX_TIME", frame_info->max_vload_time); +- +- return 0; +-} +- +-static void set_frame_start(struct frame_info *frame_info) +-{ +- int id = frame_info->rtg->id; +- +- if (likely(frame_info->status == FRAME_START)) { +- /* +- * START -=> START -=> ...... +- * FRMAE_START is +- * the end of last frame +- * the start of the current frame +- */ +- update_frame_prev_load(frame_info, false); +- } else if ((frame_info->status == FRAME_END) || +- (frame_info->status == FRAME_INVALID)) { +- /* START -=> END -=> [START] +- * FRAME_START is +- * only the start of current frame +- * we shoudn't tracking the last rtg-window +- * [FRAME_END, FRAME_START] +- * it's not an available frame window +- */ +- update_frame_prev_load(frame_info, true); +- frame_info->status = FRAME_START; +- } +- trace_rtg_frame_sched(id, "FRAME_STATUS", frame_info->status); +- trace_rtg_frame_sched(id, "frame_last_task_time", +- frame_info->prev_frame_exec); +- trace_rtg_frame_sched(id, "frame_last_time", frame_info->prev_frame_time); +- trace_rtg_frame_sched(id, "frame_last_load", frame_info->prev_frame_load); +- trace_rtg_frame_sched(id, "frame_last_load_util", +- frame_info->prev_frame_load_util); +- +- /* new_frame_start */ +- if (!frame_info->margin_imme) { +- frame_info->frame_vload = 0; +- frame_info->frame_util = clamp_t(unsigned long, +- frame_info->prev_frame_load_util, +- frame_info->frame_min_util, +- frame_info->frame_max_util); +- } else { +- frame_info->frame_vload = calc_frame_vload(frame_info, 0); +- frame_info->frame_util = calc_frame_util(frame_info, false); +- } +- +- trace_rtg_frame_sched(id, "frame_vload", frame_info->frame_vload); +-} +- +-static void set_frame_end(struct frame_info *frame_info) +-{ +- trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_STATUS", FRAME_END); +- do_frame_end(frame_info, false); +-} +- +-static int update_frame_timestamp(unsigned long status, +- struct frame_info *frame_info, struct related_thread_group *grp) +-{ +- int id = frame_info->rtg->id; +- +- /* SCHED_FRAME timestamp */ +- switch (status) { +- case FRAME_START: +- /* collect frame_info when frame_end timestamp coming */ +- set_frame_start(frame_info); +- break; +- case FRAME_END: +- /* FRAME_END should only set and update freq once */ +- if (unlikely(frame_info->status == FRAME_END)) +- return 0; +- set_frame_end(frame_info); +- break; +- default: +- pr_err("[FRAME_RTG]: %s invalid timestamp(status)\n", +- __func__); +- return -EINVAL; +- } +- +- frame_boost(frame_info); +- trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util); +- +- /* update cpufreq force when frame_stop */ +- sched_set_group_normalized_util(grp->id, +- frame_info->frame_util, RTG_FREQ_FORCE_UPDATE); +- if (grp->preferred_cluster) +- trace_rtg_frame_sched(id, "preferred_cluster", +- grp->preferred_cluster->id); +- +- return 0; +-} +- +-static int set_frame_status(struct frame_info *frame_info, unsigned long status) +-{ +- struct related_thread_group *grp = NULL; +- int id; +- +- if (!frame_info) +- return -EINVAL; +- +- grp = frame_info->rtg; +- if (unlikely(!grp)) +- return -EINVAL; +- +- if (atomic_read(&frame_info->frame_sched_state) == 0) +- return -EINVAL; +- +- if (!(status & FRAME_SETTIME) || +- (status == (unsigned long)FRAME_SETTIME_PARAM)) { +- pr_err("[FRAME_RTG]: %s invalid timetsamp(status)\n", +- __func__); +- return -EINVAL; +- } +- +- if (status & FRAME_TIMESTAMP_SKIP_START) { +- frame_info->timestamp_skipped = true; +- status &= ~FRAME_TIMESTAMP_SKIP_START; +- } else if (status & FRAME_TIMESTAMP_SKIP_END) { +- frame_info->timestamp_skipped = false; +- status &= ~FRAME_TIMESTAMP_SKIP_END; +- } else if (frame_info->timestamp_skipped) { +- /* +- * skip the following timestamp until +- * FRAME_TIMESTAMP_SKIPPED reset +- */ +- return 0; +- } +- id = grp->id; +- trace_rtg_frame_sched(id, "FRAME_TIMESTAMP_SKIPPED", +- frame_info->timestamp_skipped); +- trace_rtg_frame_sched(id, "FRAME_MAX_UTIL", frame_info->frame_max_util); +- +- if (status & FRAME_USE_MARGIN_IMME) { +- frame_info->margin_imme = true; +- status &= ~FRAME_USE_MARGIN_IMME; +- } else { +- frame_info->margin_imme = false; +- } +- trace_rtg_frame_sched(id, "FRAME_MARGIN_IMME", frame_info->margin_imme); +- trace_rtg_frame_sched(id, "FRAME_TIMESTAMP", status); +- +- return update_frame_timestamp(status, frame_info, grp); +-} +- +-int set_frame_timestamp(struct frame_info *frame_info, unsigned long timestamp) +-{ +- int ret; +- +- if (!frame_info || !frame_info->rtg) +- return -EINVAL; +- +- if (atomic_read(&frame_info->frame_sched_state) == 0) +- return -EINVAL; +- +- ret = sched_set_group_window_rollover(frame_info->rtg->id); +- if (!ret) +- ret = set_frame_status(frame_info, timestamp); +- +- return ret; +-} +- +-int set_frame_min_util(struct frame_info *frame_info, int min_util, bool is_boost) +-{ +- int id; +- +- if (unlikely((min_util < 0) || (min_util > SCHED_CAPACITY_SCALE))) { +- pr_err("[FRAME_RTG]: %s invalid min_util value\n", +- __func__); +- return -EINVAL; +- } +- +- if (!frame_info || !frame_info->rtg) +- return -EINVAL; +- +- id = frame_info->rtg->id; +- if (is_boost) { +- frame_info->frame_boost_min_util = min_util; +- trace_rtg_frame_sched(id, "FRAME_BOOST_MIN_UTIL", min_util); +- } else { +- frame_info->frame_min_util = min_util; +- +- frame_info->frame_util = calc_frame_util(frame_info, false); +- trace_rtg_frame_sched(id, "frame_util", frame_info->frame_util); +- sched_set_group_normalized_util(id, +- frame_info->frame_util, RTG_FREQ_FORCE_UPDATE); +- } +- +- return 0; +-} +- +-int set_frame_max_util(struct frame_info *frame_info, int max_util) +-{ +- int id; +- +- if ((max_util < 0) || (max_util > SCHED_CAPACITY_SCALE)) { +- pr_err("[FRAME_RTG]: %s invalid max_util value\n", +- __func__); +- return -EINVAL; +- } +- +- if (!frame_info || !frame_info->rtg) +- return -EINVAL; +- +- frame_info->frame_max_util = max_util; +- id = frame_info->rtg->id; +- trace_rtg_frame_sched(id, "FRAME_MAX_UTIL", frame_info->frame_max_util); +- +- return 0; +-} +- +-struct frame_info *lookup_frame_info_by_grp_id(int grp_id) +-{ +- if (grp_id >= (MULTI_FRAME_ID + MULTI_FRAME_NUM) || (grp_id <= 0)) +- return NULL; +- if (grp_id >= MULTI_FRAME_ID) { +- read_lock(&g_id_manager.lock); +- if (!test_bit(grp_id - MULTI_FRAME_ID, g_id_manager.id_map)) { +- read_unlock(&g_id_manager.lock); +- return NULL; +- } +- read_unlock(&g_id_manager.lock); +- return rtg_frame_info(grp_id); +- } else +- return rtg_frame_info(grp_id); +-} +- +-static int _init_frame_info(struct frame_info *frame_info, int id) +-{ +- struct related_thread_group *grp = NULL; +- unsigned long flags; +- +- memset(frame_info, 0, sizeof(struct frame_info)); +- mutex_init(&frame_info->lock); +- +- mutex_lock(&frame_info->lock); +- frame_info->frame_rate = DEFAULT_FRAME_RATE; +- frame_info->frame_time = div_u64(NSEC_PER_SEC, frame_info->frame_rate); +- frame_info->thread_num = 0; +- frame_info->prio = NOT_RT_PRIO; +- atomic_set(&(frame_info->curr_rt_thread_num), 0); +- atomic_set(&(frame_info->frame_sched_state), 0); +- frame_info->vload_margin = DEFAULT_VLOAD_MARGIN; +- frame_info->max_vload_time = +- div_u64(frame_info->frame_time, NSEC_PER_MSEC) + +- frame_info->vload_margin; +- frame_info->frame_min_util = FRAME_DEFAULT_MIN_UTIL; +- frame_info->frame_max_util = FRAME_DEFAULT_MAX_UTIL; +- frame_info->prev_min_util = FRAME_DEFAULT_MIN_PREV_UTIL; +- frame_info->prev_max_util = FRAME_DEFAULT_MAX_PREV_UTIL; +- frame_info->margin_imme = false; +- frame_info->timestamp_skipped = false; +- frame_info->status = FRAME_END; +- +- grp = frame_rtg(id); +- if (unlikely(!grp)) { +- mutex_unlock(&frame_info->lock); +- return -EINVAL; +- } +- +- raw_spin_lock_irqsave(&grp->lock, flags); +- grp->private_data = frame_info; +- grp->rtg_class = &frame_rtg_class; +- raw_spin_unlock_irqrestore(&grp->lock, flags); +- +- frame_info->rtg = grp; +- mutex_unlock(&frame_info->lock); +- +- return 0; +-} +- +-static int __init init_frame_info(void) +-{ +- int ret = 0; +- int id; +- +- for (id = MULTI_FRAME_ID; id < (MULTI_FRAME_ID + MULTI_FRAME_NUM); id++) { +- if (ret != 0) +- break; +- ret = _init_frame_info(rtg_multi_frame_info(id), id); +- } +- +- return ret; +-} +-late_initcall(init_frame_info); +diff --git a/kernel/sched/rtg/frame_rtg.h b/kernel/sched/rtg/frame_rtg.h +deleted file mode 100755 +index 6bb25fa20..000000000 +--- a/kernel/sched/rtg/frame_rtg.h ++++ /dev/null +@@ -1,116 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * Frame declaration +- * +- * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd. +- */ +- +-#ifndef __FRAME_RTG_H +-#define __FRAME_RTG_H +- +-#include +-#include +-#include +-#include +-#include +- +-#define MULTI_FRAME_ID (DEFAULT_CGROUP_COLOC_ID + 1) +-#define MULTI_FRAME_NUM (MAX_NUM_CGROUP_COLOC_ID - DEFAULT_CGROUP_COLOC_ID - 1) +- +-#define NOT_RT_PRIO (-1) +-#define STATIC_RTG_DEPTH (-1) +- +-#define FRAME_START (1 << 0) +-#define FRAME_END (1 << 1) +-#define FRAME_INVALID (1 << 2) +-#define FRAME_USE_MARGIN_IMME (1 << 4) +-#define FRAME_TIMESTAMP_SKIP_START (1 << 5) +-#define FRAME_TIMESTAMP_SKIP_END (1 << 6) +-#define FRAME_SETTIME (FRAME_START | FRAME_END | \ +- FRAME_USE_MARGIN_IMME) +-#define FRAME_SETTIME_PARAM (-1) +- +-#define DEFAULT_FRAME_RATE 60 +-#define MIN_FRAME_RATE 1 +-#define MAX_FRAME_RATE 120 +- +-/* MARGIN value : [-100, 100] */ +-#define DEFAULT_VLOAD_MARGIN 16 +-#define MIN_VLOAD_MARGIN (-100) +-#define MAX_VLOAD_MARGIN 0xffff +- +-#define FRAME_MAX_VLOAD SCHED_CAPACITY_SCALE +-#define FRAME_MAX_LOAD SCHED_CAPACITY_SCALE +-#define FRAME_UTIL_INVALID_FACTOR 4 +-#define FRAME_DEFAULT_MIN_UTIL 0 +-#define FRAME_DEFAULT_MAX_UTIL SCHED_CAPACITY_SCALE +-#define FRAME_DEFAULT_MIN_PREV_UTIL 0 +-#define FRAME_DEFAULT_MAX_PREV_UTIL SCHED_CAPACITY_SCALE +- +-#define DEFAULT_MAX_RT_THREAD 5 +-/* +- * RTG_MAX_RT_THREAD_NUM should be CONFIG_NR_CPUS in previous version +- * fit for FFRT here +- */ +-#define RTG_MAX_RT_THREAD_NUM 20 +-#define INVALID_PREFERRED_CLUSTER 10 +- +-enum rtg_type { +- VIP = 0, +- TOP_TASK_KEY, +- NORMAL_TASK, +- RTG_TYPE_MAX, +-}; +- +-struct frame_thread_info { +- int prio; +- int thread[MAX_TID_NUM]; +- int thread_num; +-}; +- +-struct multi_frame_id_manager { +- DECLARE_BITMAP(id_map, MULTI_FRAME_NUM); +- unsigned int offset; +- rwlock_t lock; +-}; +- +-struct rtg_info { +- int rtg_num; +- int rtgs[MULTI_FRAME_NUM]; +-}; +- +-bool is_frame_rtg(int id); +-int set_frame_rate(struct frame_info *frame_info, int rate); +-int alloc_multi_frame_info(void); +-struct frame_info *rtg_active_multi_frame_info(int id); +-struct frame_info *rtg_multi_frame_info(int id); +-void release_multi_frame_info(int id); +-void clear_multi_frame_info(void); +-void set_frame_prio(struct frame_info *frame_info, int prio); +-struct task_struct *update_frame_thread(struct frame_info *frame_info, +- int old_prio, int prio, int pid, +- struct task_struct *old_task); +-void update_frame_thread_info(struct frame_info *frame_info, +- struct frame_thread_info *frame_thread_info); +-#ifdef CONFIG_SCHED_RTG_RT_THREAD_LIMIT +-int read_rtg_rt_thread_num(void); +-#else +-static inline int read_rtg_rt_thread_num(void) +-{ +- return 0; +-} +-#endif +-static inline +-struct group_ravg *frame_info_rtg_load(const struct frame_info *frame_info) +-{ +- return &frame_info_rtg(frame_info)->ravg; +-} +-void set_frame_sched_state(struct frame_info *frame_info, bool enable); +-int set_frame_margin(struct frame_info *frame_info, int margin); +-int set_frame_timestamp(struct frame_info *frame_info, unsigned long timestamp); +-int set_frame_max_util(struct frame_info *frame_info, int max_util); +-int set_frame_min_util(struct frame_info *frame_info, int min_util, bool is_boost); +-struct frame_info *lookup_frame_info_by_grp_id(int grp_id); +-int list_rtg_group(struct rtg_info *rs_data); +-int search_rtg(int pid); +-#endif +diff --git a/kernel/sched/rtg/rtg.c b/kernel/sched/rtg/rtg.c +deleted file mode 100755 +index be95c1e81..000000000 +--- a/kernel/sched/rtg/rtg.c ++++ /dev/null +@@ -1,1258 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * related thread group sched +- * +- */ +-#include +-#include +-#include +-#include +-#define CREATE_TRACE_POINTS +-#include +-#undef CREATE_TRACE_POINTS +- +-#include "../sched.h" +-#include "rtg.h" +-#include "../walt.h" +- +-#ifdef CONFIG_SCHED_RTG_FRAME +-#include "frame_rtg.h" +-#endif +- +-#define ADD_TASK 0 +-#define REM_TASK 1 +- +-#define DEFAULT_GROUP_RATE 60 /* 60FPS */ +-#define DEFAULT_UTIL_INVALID_INTERVAL (~0U) /* ns */ +-#define DEFAULT_UTIL_UPDATE_TIMEOUT 20000000 /* ns */ +-#define DEFAULT_FREQ_UPDATE_INTERVAL 8000000 /* ns */ +- +-struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID]; +-static DEFINE_RWLOCK(related_thread_group_lock); +-static LIST_HEAD(active_related_thread_groups); +- +-#define for_each_related_thread_group(grp) \ +- list_for_each_entry(grp, &active_related_thread_groups, list) +- +-void init_task_rtg(struct task_struct *p) +-{ +- rcu_assign_pointer(p->grp, NULL); +- INIT_LIST_HEAD(&p->grp_list); +-} +- +-struct related_thread_group *task_related_thread_group(struct task_struct *p) +-{ +- return rcu_dereference(p->grp); +-} +- +-struct related_thread_group * +-lookup_related_thread_group(unsigned int group_id) +-{ +- return related_thread_groups[group_id]; +-} +- +-int alloc_related_thread_groups(void) +-{ +- int i, ret; +- struct related_thread_group *grp = NULL; +- +- /* groupd_id = 0 is invalid as it's special id to remove group. */ +- for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) { +- grp = kzalloc(sizeof(*grp), GFP_NOWAIT); +- if (!grp) { +- ret = -ENOMEM; +- goto err; +- } +- +- grp->id = i; +- INIT_LIST_HEAD(&grp->tasks); +- INIT_LIST_HEAD(&grp->list); +- grp->window_size = NSEC_PER_SEC / DEFAULT_GROUP_RATE; +- grp->util_invalid_interval = DEFAULT_UTIL_INVALID_INTERVAL; +- grp->util_update_timeout = DEFAULT_UTIL_UPDATE_TIMEOUT; +- grp->max_boost = 0; +- grp->freq_update_interval = DEFAULT_FREQ_UPDATE_INTERVAL; +- raw_spin_lock_init(&grp->lock); +- +- related_thread_groups[i] = grp; +- } +- +- return 0; +- +-err: +- for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) { +- grp = lookup_related_thread_group(i); +- if (grp) { +- kfree(grp); +- related_thread_groups[i] = NULL; +- } else { +- break; +- } +- } +- +- return ret; +-} +- +-/* +- * Task's cpu usage is accounted in: +- * rq->curr/prev_runnable_sum, when its ->grp is NULL +- * grp->cpu_time[cpu]->curr/prev_runnable_sum, when its ->grp is !NULL +- * +- * Transfer task's cpu usage between those counters when transitioning between +- * groups +- */ +-static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp, +- struct task_struct *p, int event) +-{ +- u64 wallclock; +- struct group_cpu_time *cpu_time; +- u64 *src_curr_runnable_sum, *dst_curr_runnable_sum; +- u64 *src_prev_runnable_sum, *dst_prev_runnable_sum; +- u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum; +- u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum; +- int migrate_type; +- int cpu = cpu_of(rq); +- bool new_task; +- int i; +- +- wallclock = sched_ktime_clock(); +- +- update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); +- update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0); +- new_task = is_new_task(p); +- +- cpu_time = &rq->grp_time; +- if (event == ADD_TASK) { +- migrate_type = RQ_TO_GROUP; +- +- src_curr_runnable_sum = &rq->curr_runnable_sum; +- dst_curr_runnable_sum = &cpu_time->curr_runnable_sum; +- src_prev_runnable_sum = &rq->prev_runnable_sum; +- dst_prev_runnable_sum = &cpu_time->prev_runnable_sum; +- +- src_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum; +- dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum; +- src_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum; +- dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum; +- +- *src_curr_runnable_sum -= p->ravg.curr_window_cpu[cpu]; +- *src_prev_runnable_sum -= p->ravg.prev_window_cpu[cpu]; +- if (new_task) { +- *src_nt_curr_runnable_sum -= +- p->ravg.curr_window_cpu[cpu]; +- *src_nt_prev_runnable_sum -= +- p->ravg.prev_window_cpu[cpu]; +- } +- +- update_cluster_load_subtractions(p, cpu, +- rq->window_start, new_task); +- +- } else { +- migrate_type = GROUP_TO_RQ; +- +- src_curr_runnable_sum = &cpu_time->curr_runnable_sum; +- dst_curr_runnable_sum = &rq->curr_runnable_sum; +- src_prev_runnable_sum = &cpu_time->prev_runnable_sum; +- dst_prev_runnable_sum = &rq->prev_runnable_sum; +- +- src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum; +- dst_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum; +- src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum; +- dst_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum; +- +- *src_curr_runnable_sum -= p->ravg.curr_window; +- *src_prev_runnable_sum -= p->ravg.prev_window; +- if (new_task) { +- *src_nt_curr_runnable_sum -= p->ravg.curr_window; +- *src_nt_prev_runnable_sum -= p->ravg.prev_window; +- } +- +- /* +- * Need to reset curr/prev windows for all CPUs, not just the +- * ones in the same cluster. Since inter cluster migrations +- * did not result in the appropriate book keeping, the values +- * per CPU would be inaccurate. +- */ +- for_each_possible_cpu(i) { +- p->ravg.curr_window_cpu[i] = 0; +- p->ravg.prev_window_cpu[i] = 0; +- } +- } +- +- *dst_curr_runnable_sum += p->ravg.curr_window; +- *dst_prev_runnable_sum += p->ravg.prev_window; +- if (new_task) { +- *dst_nt_curr_runnable_sum += p->ravg.curr_window; +- *dst_nt_prev_runnable_sum += p->ravg.prev_window; +- } +- +- /* +- * When a task enter or exits a group, it's curr and prev windows are +- * moved to a single CPU. This behavior might be sub-optimal in the +- * exit case, however, it saves us the overhead of handling inter +- * cluster migration fixups while the task is part of a related group. +- */ +- p->ravg.curr_window_cpu[cpu] = p->ravg.curr_window; +- p->ravg.prev_window_cpu[cpu] = p->ravg.prev_window; +- +- trace_sched_migration_update_sum(p, migrate_type, rq); +-} +- +-static void _set_preferred_cluster(struct related_thread_group *grp, +- int sched_cluster_id); +-static void remove_task_from_group(struct task_struct *p) +-{ +- struct related_thread_group *grp = p->grp; +- struct rq *rq = NULL; +- bool empty_group = true; +- struct rq_flags flag; +- unsigned long irqflag; +- +- rq = __task_rq_lock(p, &flag); +- transfer_busy_time(rq, p->grp, p, REM_TASK); +- +- raw_spin_lock_irqsave(&grp->lock, irqflag); +- list_del_init(&p->grp_list); +- rcu_assign_pointer(p->grp, NULL); +- +- if (p->on_cpu) +- grp->nr_running--; +- +- if ((int)grp->nr_running < 0) { +- WARN_ON(1); +- grp->nr_running = 0; +- } +- +- if (!list_empty(&grp->tasks)) { +- empty_group = false; +- } else { +-#ifdef CONFIG_UCLAMP_TASK +- grp->max_boost = 0; +-#endif +- _set_preferred_cluster(grp, -1); +- grp->ravg.normalized_util = 0; +- } +- +- raw_spin_unlock_irqrestore(&grp->lock, irqflag); +- __task_rq_unlock(rq, &flag); +- +- /* Reserved groups cannot be destroyed */ +- if (empty_group && grp->id != DEFAULT_CGROUP_COLOC_ID) { +- /* +- * We test whether grp->list is attached with list_empty() +- * hence re-init the list after deletion. +- */ +- write_lock(&related_thread_group_lock); +- list_del_init(&grp->list); +- write_unlock(&related_thread_group_lock); +- } +-} +- +-static int +-add_task_to_group(struct task_struct *p, struct related_thread_group *grp) +-{ +- struct rq *rq = NULL; +- struct rq_flags flag; +- unsigned long irqflag; +-#ifdef CONFIG_UCLAMP_TASK +- int boost; +-#endif +- +- /* +- * Change p->grp under rq->lock. Will prevent races with read-side +- * reference of p->grp in various hot-paths +- */ +- rq = __task_rq_lock(p, &flag); +- transfer_busy_time(rq, grp, p, ADD_TASK); +- +- raw_spin_lock_irqsave(&grp->lock, irqflag); +- list_add(&p->grp_list, &grp->tasks); +- rcu_assign_pointer(p->grp, grp); +- if (p->on_cpu) { +- grp->nr_running++; +- if (grp->nr_running == 1) +- grp->mark_start = max(grp->mark_start, +- sched_ktime_clock()); +- } +- +-#ifdef CONFIG_UCLAMP_TASK +- boost = (int)uclamp_eff_value(p, UCLAMP_MIN); +- if (boost > grp->max_boost) +- grp->max_boost = boost; +-#endif +- raw_spin_unlock_irqrestore(&grp->lock, irqflag); +- __task_rq_unlock(rq, &flag); +- +- return 0; +-} +- +-static int __sched_set_group_id(struct task_struct *p, unsigned int group_id) +-{ +- int rc = 0; +- unsigned long flags; +- struct related_thread_group *grp = NULL; +- struct related_thread_group *old_grp = NULL; +- +- if (group_id >= MAX_NUM_CGROUP_COLOC_ID) +- return -EINVAL; +- +- raw_spin_lock_irqsave(&p->pi_lock, flags); +- old_grp = p->grp; +- if ((current != p && (p->flags & PF_EXITING)) || +- (!old_grp && !group_id)) +- goto done; +- +- /* +- * If the system has CONFIG_SCHED_RTG_CGROUP, only tasks in DEFAULT group +- * can be directly switched to other groups. +- * +- * In other cases, Switching from one group to another directly is not permitted. +- */ +- if (old_grp && group_id) { +-#ifdef CONFIG_SCHED_RTG_CGROUP +- if (old_grp->id == DEFAULT_CGROUP_COLOC_ID) { +- remove_task_from_group(p); +- } else { +-#endif +- rc = -EINVAL; +- goto done; +-#ifdef CONFIG_SCHED_RTG_CGROUP +- } +-#endif +- } +- +- if (!group_id) { +- remove_task_from_group(p); +- goto done; +- } +- +- grp = lookup_related_thread_group(group_id); +- write_lock(&related_thread_group_lock); +- if (list_empty(&grp->list)) +- list_add(&grp->list, &active_related_thread_groups); +- write_unlock(&related_thread_group_lock); +- +- rc = add_task_to_group(p, grp); +-done: +- raw_spin_unlock_irqrestore(&p->pi_lock, flags); +- +- return rc; +-} +- +-/* group_id == 0: remove task from rtg */ +-int sched_set_group_id(struct task_struct *p, unsigned int group_id) +-{ +- /* DEFAULT_CGROUP_COLOC_ID is a reserved id */ +- if (group_id == DEFAULT_CGROUP_COLOC_ID) +- return -EINVAL; +- +- return __sched_set_group_id(p, group_id); +-} +- +-unsigned int sched_get_group_id(struct task_struct *p) +-{ +- unsigned int group_id; +- struct related_thread_group *grp = NULL; +- +- rcu_read_lock(); +- grp = task_related_thread_group(p); +- group_id = grp ? grp->id : 0; +- rcu_read_unlock(); +- +- return group_id; +-} +- +-void update_group_nr_running(struct task_struct *p, int event, u64 wallclock) +-{ +- struct related_thread_group *grp; +- bool need_update = false; +- +- rcu_read_lock(); +- grp = task_related_thread_group(p); +- if (!grp) { +- rcu_read_unlock(); +- return; +- } +- +- raw_spin_lock(&grp->lock); +- +- if (event == PICK_NEXT_TASK) +- grp->nr_running++; +- else if (event == PUT_PREV_TASK) +- grp->nr_running--; +- +- if ((int)grp->nr_running < 0) { +- WARN_ON(1); +- grp->nr_running = 0; +- } +- +- /* update preferred cluster if no update long */ +- if (wallclock - grp->last_util_update_time > grp->util_update_timeout) +- need_update = true; +- +- raw_spin_unlock(&grp->lock); +- +- rcu_read_unlock(); +- +- if (need_update && grp->rtg_class && grp->rtg_class->sched_update_rtg_tick && +- grp->id != DEFAULT_CGROUP_COLOC_ID) +- grp->rtg_class->sched_update_rtg_tick(grp); +-} +- +-int sched_set_group_window_size(unsigned int grp_id, unsigned int window_size) +-{ +- struct related_thread_group *grp = NULL; +- unsigned long flag; +- +- if (!window_size) +- return -EINVAL; +- +- grp = lookup_related_thread_group(grp_id); +- if (!grp) { +- pr_err("set window size for group %d fail\n", grp_id); +- return -ENODEV; +- } +- +- raw_spin_lock_irqsave(&grp->lock, flag); +- grp->window_size = window_size; +- raw_spin_unlock_irqrestore(&grp->lock, flag); +- +- return 0; +-} +- +-void group_time_rollover(struct group_ravg *ravg) +-{ +- ravg->prev_window_load = ravg->curr_window_load; +- ravg->curr_window_load = 0; +- ravg->prev_window_exec = ravg->curr_window_exec; +- ravg->curr_window_exec = 0; +-} +- +-int sched_set_group_window_rollover(unsigned int grp_id) +-{ +- struct related_thread_group *grp = NULL; +- u64 wallclock; +- unsigned long flag; +-#ifdef CONFIG_UCLAMP_TASK +- struct task_struct *p = NULL; +- int boost; +-#endif +- +- grp = lookup_related_thread_group(grp_id); +- if (!grp) { +- pr_err("set window start for group %d fail\n", grp_id); +- return -ENODEV; +- } +- +- raw_spin_lock_irqsave(&grp->lock, flag); +- +- wallclock = sched_ktime_clock(); +- grp->prev_window_time = wallclock - grp->window_start; +- grp->window_start = wallclock; +- grp->max_boost = 0; +- +-#ifdef CONFIG_UCLAMP_TASK +- list_for_each_entry(p, &grp->tasks, grp_list) { +- boost = (int)uclamp_eff_value(p, UCLAMP_MIN); +- if (boost > 0) +- grp->max_boost = boost; +- } +-#endif +- +- group_time_rollover(&grp->ravg); +- raw_spin_unlock_irqrestore(&grp->lock, flag); +- +- return 0; +-} +- +-static void add_to_group_time(struct related_thread_group *grp, struct rq *rq, u64 wallclock) +-{ +- u64 delta_exec, delta_load; +- u64 mark_start = grp->mark_start; +- u64 window_start = grp->window_start; +- +- if (unlikely(wallclock <= mark_start)) +- return; +- +- /* per group load tracking in RTG */ +- if (likely(mark_start >= window_start)) { +- /* +- * ws ms wc +- * | | | +- * V V V +- * |---------------| +- */ +- delta_exec = wallclock - mark_start; +- grp->ravg.curr_window_exec += delta_exec; +- +- delta_load = scale_exec_time(delta_exec, rq); +- grp->ravg.curr_window_load += delta_load; +- } else { +- /* +- * ms ws wc +- * | | | +- * V V V +- * -----|---------- +- */ +- /* prev window statistic */ +- delta_exec = window_start - mark_start; +- grp->ravg.prev_window_exec += delta_exec; +- +- delta_load = scale_exec_time(delta_exec, rq); +- grp->ravg.prev_window_load += delta_load; +- +- /* curr window statistic */ +- delta_exec = wallclock - window_start; +- grp->ravg.curr_window_exec += delta_exec; +- +- delta_load = scale_exec_time(delta_exec, rq); +- grp->ravg.curr_window_load += delta_load; +- } +-} +- +-static inline void add_to_group_demand(struct related_thread_group *grp, +- struct rq *rq, u64 wallclock) +-{ +- if (unlikely(wallclock <= grp->window_start)) +- return; +- +- add_to_group_time(grp, rq, wallclock); +-} +- +-static int account_busy_for_group_demand(struct task_struct *p, int event) +-{ +- /* +- *No need to bother updating task demand for exiting tasks +- * or the idle task. +- */ +- if (exiting_task(p) || is_idle_task(p)) +- return 0; +- +- if (event == TASK_WAKE || event == TASK_MIGRATE) +- return 0; +- +- return 1; +-} +- +-void update_group_demand(struct task_struct *p, struct rq *rq, +- int event, u64 wallclock) +-{ +- struct related_thread_group *grp; +- +- if (!account_busy_for_group_demand(p, event)) +- return; +- +- rcu_read_lock(); +- grp = task_related_thread_group(p); +- if (!grp) { +- rcu_read_unlock(); +- return; +- } +- +- raw_spin_lock(&grp->lock); +- +- if (grp->nr_running == 1) +- grp->mark_start = max(grp->mark_start, p->ravg.mark_start); +- +- add_to_group_demand(grp, rq, wallclock); +- +- grp->mark_start = wallclock; +- +- raw_spin_unlock(&grp->lock); +- +- rcu_read_unlock(); +-} +- +-void sched_update_rtg_tick(struct task_struct *p) +-{ +- struct related_thread_group *grp = NULL; +- +- rcu_read_lock(); +- grp = task_related_thread_group(p); +- if (!grp || list_empty(&grp->tasks)) { +- rcu_read_unlock(); +- return; +- } +- +- if (grp->rtg_class && grp->rtg_class->sched_update_rtg_tick) +- grp->rtg_class->sched_update_rtg_tick(grp); +- +- rcu_read_unlock(); +-} +- +-int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p) +-{ +- struct related_thread_group *grp = NULL; +- int rc = 1; +- +- rcu_read_lock(); +- +- grp = task_related_thread_group(p); +- if (grp != NULL) +- rc = (grp->preferred_cluster == cluster); +- +- rcu_read_unlock(); +- return rc; +-} +- +-unsigned int get_cluster_grp_running(int cluster_id) +-{ +- struct related_thread_group *grp = NULL; +- unsigned int total_grp_running = 0; +- unsigned long flag, rtg_flag; +- unsigned int i; +- +- read_lock_irqsave(&related_thread_group_lock, rtg_flag); +- +- /* grp_id 0 is used for exited tasks */ +- for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) { +- grp = lookup_related_thread_group(i); +- if (!grp) +- continue; +- +- raw_spin_lock_irqsave(&grp->lock, flag); +- if (grp->preferred_cluster != NULL && +- grp->preferred_cluster->id == cluster_id) +- total_grp_running += grp->nr_running; +- raw_spin_unlock_irqrestore(&grp->lock, flag); +- } +- read_unlock_irqrestore(&related_thread_group_lock, rtg_flag); +- +- return total_grp_running; +-} +- +-static void _set_preferred_cluster(struct related_thread_group *grp, +- int sched_cluster_id) +-{ +- struct sched_cluster *cluster = NULL; +- struct sched_cluster *cluster_found = NULL; +- +- if (sched_cluster_id == -1) { +- grp->preferred_cluster = NULL; +- return; +- } +- +- for_each_sched_cluster_reverse(cluster) { +- if (cluster->id == sched_cluster_id) { +- cluster_found = cluster; +- break; +- } +- } +- +- if (cluster_found != NULL) +- grp->preferred_cluster = cluster_found; +- else +- pr_err("cannot found sched_cluster_id=%d\n", sched_cluster_id); +-} +- +-/* +- * sched_cluster_id == -1: grp will set to NULL +- */ +-static void set_preferred_cluster(struct related_thread_group *grp, +- int sched_cluster_id) +-{ +- unsigned long flag; +- +- raw_spin_lock_irqsave(&grp->lock, flag); +- _set_preferred_cluster(grp, sched_cluster_id); +- raw_spin_unlock_irqrestore(&grp->lock, flag); +-} +- +-int sched_set_group_preferred_cluster(unsigned int grp_id, int sched_cluster_id) +-{ +- struct related_thread_group *grp = NULL; +- +- /* DEFAULT_CGROUP_COLOC_ID is a reserved id */ +- if (grp_id == DEFAULT_CGROUP_COLOC_ID || +- grp_id >= MAX_NUM_CGROUP_COLOC_ID) +- return -EINVAL; +- +- grp = lookup_related_thread_group(grp_id); +- if (!grp) { +- pr_err("set preferred cluster for group %d fail\n", grp_id); +- return -ENODEV; +- } +- set_preferred_cluster(grp, sched_cluster_id); +- +- return 0; +-} +- +-struct cpumask *find_rtg_target(struct task_struct *p) +-{ +- struct related_thread_group *grp = NULL; +- struct sched_cluster *preferred_cluster = NULL; +- struct cpumask *rtg_target = NULL; +- +- rcu_read_lock(); +- grp = task_related_thread_group(p); +- rcu_read_unlock(); +- +- if (!grp) +- return NULL; +- +- preferred_cluster = grp->preferred_cluster; +- if (!preferred_cluster) +- return NULL; +- +- rtg_target = &preferred_cluster->cpus; +- if (!task_fits_max(p, cpumask_first(rtg_target))) +- return NULL; +- +- return rtg_target; +-} +- +-int find_rtg_cpu(struct task_struct *p) +-{ +- int i; +- cpumask_t search_cpus = CPU_MASK_NONE; +- int max_spare_cap_cpu = -1; +- unsigned long max_spare_cap = 0; +- int idle_backup_cpu = -1; +- struct cpumask *preferred_cpus = find_rtg_target(p); +- +- if (!preferred_cpus) +- return -1; +- +- cpumask_and(&search_cpus, p->cpus_ptr, cpu_online_mask); +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_andnot(&search_cpus, &search_cpus, cpu_isolated_mask); +-#endif +- +- /* search the perferred idle cpu */ +- for_each_cpu_and(i, &search_cpus, preferred_cpus) { +- if (is_reserved(i)) +- continue; +- +- if (idle_cpu(i) || (i == task_cpu(p) && p->__state == TASK_RUNNING)) { +- trace_find_rtg_cpu(p, preferred_cpus, "prefer_idle", i); +- return i; +- } +- } +- +- for_each_cpu(i, &search_cpus) { +- unsigned long spare_cap; +- +- if (sched_cpu_high_irqload(i)) +- continue; +- +- if (is_reserved(i)) +- continue; +- +- /* take the Active LB CPU as idle_backup_cpu */ +- if (idle_cpu(i) || (i == task_cpu(p) && p->__state == TASK_RUNNING)) { +- /* find the idle_backup_cpu with max capacity */ +- if (idle_backup_cpu == -1 || +- capacity_orig_of(i) > capacity_orig_of(idle_backup_cpu)) +- idle_backup_cpu = i; +- +- continue; +- } +- +- spare_cap = capacity_spare_without(i, p); +- if (spare_cap > max_spare_cap) { +- max_spare_cap = spare_cap; +- max_spare_cap_cpu = i; +- } +- } +- +- if (idle_backup_cpu != -1) { +- trace_find_rtg_cpu(p, preferred_cpus, "idle_backup", idle_backup_cpu); +- return idle_backup_cpu; +- } +- +- trace_find_rtg_cpu(p, preferred_cpus, "max_spare", max_spare_cap_cpu); +- +- return max_spare_cap_cpu; +-} +- +-int sched_set_group_util_invalid_interval(unsigned int grp_id, +- unsigned int interval) +-{ +- struct related_thread_group *grp = NULL; +- unsigned long flag; +- +- if (interval == 0) +- return -EINVAL; +- +- /* DEFAULT_CGROUP_COLOC_ID is a reserved id */ +- if (grp_id == DEFAULT_CGROUP_COLOC_ID || +- grp_id >= MAX_NUM_CGROUP_COLOC_ID) +- return -EINVAL; +- +- grp = lookup_related_thread_group(grp_id); +- if (!grp) { +- pr_err("set invalid interval for group %d fail\n", grp_id); +- return -ENODEV; +- } +- +- raw_spin_lock_irqsave(&grp->lock, flag); +- if ((signed int)interval < 0) +- grp->util_invalid_interval = DEFAULT_UTIL_INVALID_INTERVAL; +- else +- grp->util_invalid_interval = interval * NSEC_PER_MSEC; +- +- raw_spin_unlock_irqrestore(&grp->lock, flag); +- +- return 0; +-} +- +-static inline bool +-group_should_invalid_util(struct related_thread_group *grp, u64 now) +-{ +- if (grp->util_invalid_interval == DEFAULT_UTIL_INVALID_INTERVAL) +- return false; +- +- return (now - grp->last_freq_update_time >= grp->util_invalid_interval); +-} +- +-static inline bool valid_normalized_util(struct related_thread_group *grp) +-{ +- struct task_struct *p = NULL; +- cpumask_t rtg_cpus = CPU_MASK_NONE; +- bool valid = false; +- +- if (grp->nr_running != 0) { +- list_for_each_entry(p, &grp->tasks, grp_list) { +- get_task_struct(p); +- if (p->__state == TASK_RUNNING) +- cpumask_set_cpu(task_cpu(p), &rtg_cpus); +- trace_sched_rtg_task_each(grp->id, grp->nr_running, p); +- put_task_struct(p); +- } +- +- valid = cpumask_intersects(&rtg_cpus, +- &grp->preferred_cluster->cpus); +- } +- trace_sched_rtg_valid_normalized_util(grp->id, grp->nr_running, &rtg_cpus, valid); +- +- return valid; +-} +- +-void sched_get_max_group_util(const struct cpumask *query_cpus, +- unsigned long *util, unsigned int *freq) +-{ +- struct related_thread_group *grp = NULL; +- unsigned long max_grp_util = 0; +- unsigned int max_grp_freq = 0; +- u64 now = ktime_get_ns(); +- unsigned long rtg_flag; +- unsigned long flag; +- +- /* +- * sum the prev_runnable_sum for each rtg, +- * return the max rtg->load +- */ +- read_lock_irqsave(&related_thread_group_lock, rtg_flag); +- if (list_empty(&active_related_thread_groups)) +- goto unlock; +- +- for_each_related_thread_group(grp) { +- raw_spin_lock_irqsave(&grp->lock, flag); +- if (!list_empty(&grp->tasks) && +- grp->preferred_cluster != NULL && +- cpumask_intersects(query_cpus, +- &grp->preferred_cluster->cpus) && +- !group_should_invalid_util(grp, now)) { +- +- if (grp->ravg.normalized_util > max_grp_util) +- max_grp_util = grp->ravg.normalized_util; +- } +- raw_spin_unlock_irqrestore(&grp->lock, flag); +- } +- +-unlock: +- read_unlock_irqrestore(&related_thread_group_lock, rtg_flag); +- +- *freq = max_grp_freq; +- *util = max_grp_util; +-} +- +-static struct sched_cluster *best_cluster(struct related_thread_group *grp) +-{ +- struct sched_cluster *cluster = NULL; +- struct sched_cluster *max_cluster = NULL; +- int cpu; +- unsigned long util = grp->ravg.normalized_util; +- unsigned long boosted_grp_util = util + grp->max_boost; +- unsigned long max_cap = 0; +- unsigned long cap = 0; +- +- /* find new cluster */ +- for_each_sched_cluster(cluster) { +- cpu = cpumask_first(&cluster->cpus); +- cap = capacity_orig_of(cpu); +- if (cap > max_cap) { +- max_cap = cap; +- max_cluster = cluster; +- } +- +- if (boosted_grp_util <= cap) +- return cluster; +- } +- +- return max_cluster; +-} +- +-static bool group_should_update_freq(struct related_thread_group *grp, +- int cpu, unsigned int flags, u64 now) +-{ +- if (!grp) +- return true; +- +- if (flags & RTG_FREQ_FORCE_UPDATE) { +- return true; +- } else if (flags & RTG_FREQ_NORMAL_UPDATE) { +- if (now - grp->last_freq_update_time >= +- grp->freq_update_interval) +- return true; +- } +- +- return false; +-} +- +-int sched_set_group_normalized_util(unsigned int grp_id, unsigned long util, +- unsigned int flag) +-{ +- struct related_thread_group *grp = NULL; +- bool need_update_prev_freq = false; +- bool need_update_next_freq = false; +- u64 now; +- unsigned long flags; +- struct sched_cluster *preferred_cluster = NULL; +- int prev_cpu; +- int next_cpu; +- +- grp = lookup_related_thread_group(grp_id); +- if (!grp) { +- pr_err("set normalized util for group %d fail\n", grp_id); +- return -ENODEV; +- } +- +- raw_spin_lock_irqsave(&grp->lock, flags); +- +- if (list_empty(&grp->tasks)) { +- raw_spin_unlock_irqrestore(&grp->lock, flags); +- return 0; +- } +- +- grp->ravg.normalized_util = util; +- +- preferred_cluster = best_cluster(grp); +- +- /* update prev_cluster force when preferred_cluster changed */ +- if (!grp->preferred_cluster) { +- grp->preferred_cluster = preferred_cluster; +- } else if (grp->preferred_cluster != preferred_cluster) { +- prev_cpu = cpumask_first(&grp->preferred_cluster->cpus); +- grp->preferred_cluster = preferred_cluster; +- +- need_update_prev_freq = true; +- } +- +- if (grp->preferred_cluster != NULL) +- next_cpu = cpumask_first(&grp->preferred_cluster->cpus); +- else +- next_cpu = 0; +- +- now = ktime_get_ns(); +- grp->last_util_update_time = now; +- need_update_next_freq = +- group_should_update_freq(grp, next_cpu, flag, now); +- if (need_update_next_freq) +- grp->last_freq_update_time = now; +- +- raw_spin_unlock_irqrestore(&grp->lock, flags); +- +- if (need_update_prev_freq) +- cpufreq_update_util(cpu_rq(prev_cpu), +- SCHED_CPUFREQ_FORCE_UPDATE | SCHED_CPUFREQ_WALT); +- +- if (need_update_next_freq) +- cpufreq_update_util(cpu_rq(next_cpu), +- SCHED_CPUFREQ_FORCE_UPDATE | SCHED_CPUFREQ_WALT); +- +- return 0; +-} +- +-int sched_set_group_freq_update_interval(unsigned int grp_id, unsigned int interval) +-{ +- struct related_thread_group *grp = NULL; +- unsigned long flag; +- +- if ((signed int)interval <= 0) +- return -EINVAL; +- +- /* DEFAULT_CGROUP_COLOC_ID is a reserved id */ +- if (grp_id == DEFAULT_CGROUP_COLOC_ID || +- grp_id >= MAX_NUM_CGROUP_COLOC_ID) +- return -EINVAL; +- +- grp = lookup_related_thread_group(grp_id); +- if (!grp) { +- pr_err("set update interval for group %d fail\n", grp_id); +- return -ENODEV; +- } +- +- raw_spin_lock_irqsave(&grp->lock, flag); +- grp->freq_update_interval = interval * NSEC_PER_MSEC; +- raw_spin_unlock_irqrestore(&grp->lock, flag); +- +- return 0; +-} +- +-#ifdef CONFIG_SCHED_RTG_CGROUP +-#ifdef CONFIG_UCLAMP_TASK_GROUP +-static inline bool uclamp_task_colocated(struct task_struct *p) +-{ +- struct cgroup_subsys_state *css; +- struct task_group *tg; +- bool colocate; +- +- rcu_read_lock(); +- css = task_css(p, cpu_cgrp_id); +- if (!css) { +- rcu_read_unlock(); +- return false; +- } +- tg = container_of(css, struct task_group, css); +- colocate = tg->colocate; +- rcu_read_unlock(); +- +- return colocate; +-} +-#else +-static inline bool uclamp_task_colocated(struct task_struct *p) +-{ +- return false; +-} +-#endif /* CONFIG_UCLAMP_TASK_GROUP */ +- +-void add_new_task_to_grp(struct task_struct *new) +-{ +- struct related_thread_group *grp = NULL; +- unsigned long flag; +- +- /* +- * If the task does not belong to colocated schedtune +- * cgroup, nothing to do. We are checking this without +- * lock. Even if there is a race, it will be added +- * to the co-located cgroup via cgroup attach. +- */ +- if (!uclamp_task_colocated(new)) +- return; +- +- grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID); +- write_lock_irqsave(&related_thread_group_lock, flag); +- +- /* +- * It's possible that someone already added the new task to the +- * group. or it might have taken out from the colocated schedtune +- * cgroup. check these conditions under lock. +- */ +- if (!uclamp_task_colocated(new) || new->grp) { +- write_unlock_irqrestore(&related_thread_group_lock, flag); +- return; +- } +- +- raw_spin_lock(&grp->lock); +- +- rcu_assign_pointer(new->grp, grp); +- list_add(&new->grp_list, &grp->tasks); +- +- raw_spin_unlock(&grp->lock); +- write_unlock_irqrestore(&related_thread_group_lock, flag); +-} +- +- +-/* +- * We create a default colocation group at boot. There is no need to +- * synchronize tasks between cgroups at creation time because the +- * correct cgroup hierarchy is not available at boot. Therefore cgroup +- * colocation is turned off by default even though the colocation group +- * itself has been allocated. Furthermore this colocation group cannot +- * be destroyted once it has been created. All of this has been as part +- * of runtime optimizations. +- * +- * The job of synchronizing tasks to the colocation group is done when +- * the colocation flag in the cgroup is turned on. +- */ +-static int __init create_default_coloc_group(void) +-{ +- struct related_thread_group *grp = NULL; +- unsigned long flags; +- +- grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID); +- if (!grp) { +- pr_err("create_default_coloc_group fail\n"); +- return -ENODEV; +- } +- write_lock_irqsave(&related_thread_group_lock, flags); +- list_add(&grp->list, &active_related_thread_groups); +- write_unlock_irqrestore(&related_thread_group_lock, flags); +- +- return 0; +-} +-late_initcall(create_default_coloc_group); +- +-int sync_cgroup_colocation(struct task_struct *p, bool insert) +-{ +- unsigned int grp_id = insert ? DEFAULT_CGROUP_COLOC_ID : 0; +- unsigned int old_grp_id; +- +- if (p) { +- old_grp_id = sched_get_group_id(p); +- /* +- * If the task is already in a group which is not DEFAULT_CGROUP_COLOC_ID, +- * we should not change the group id during switch to background. +- */ +- if ((old_grp_id != DEFAULT_CGROUP_COLOC_ID) && (grp_id == 0)) +- return 0; +- } +- +- return __sched_set_group_id(p, grp_id); +-} +-#endif /* CONFIG_SCHED_RTG_CGROUP */ +- +-#ifdef CONFIG_SCHED_RTG_DEBUG +-#define seq_printf_rtg(m, x...) \ +-do { \ +- if (m) \ +- seq_printf(m, x); \ +- else \ +- printk(x); \ +-} while (0) +- +-static void print_rtg_info(struct seq_file *file, +- const struct related_thread_group *grp) +-{ +- seq_printf_rtg(file, "RTG_ID : %d\n", grp->id); +- seq_printf_rtg(file, "RTG_INTERVAL : UPDATE:%lums#INVALID:%lums\n", +- grp->freq_update_interval / NSEC_PER_MSEC, +- grp->util_invalid_interval / NSEC_PER_MSEC); +- seq_printf_rtg(file, "RTG_CLUSTER : %d\n", +- grp->preferred_cluster ? grp->preferred_cluster->id : -1); +-#ifdef CONFIG_SCHED_RTG_RT_THREAD_LIMIT +- seq_printf_rtg(file, "RTG_RT_THREAD_NUM : %d/%d\n", +- read_rtg_rt_thread_num(), RTG_MAX_RT_THREAD_NUM); +-#endif +-} +- +-static char rtg_task_state_to_char(const struct task_struct *tsk) +-{ +- static const char state_char[] = "RSDTtXZPI"; +- unsigned int tsk_state = READ_ONCE(tsk->__state); +- unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; +- +- BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); +- BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); +- +- if (tsk_state == TASK_IDLE) +- state = TASK_REPORT_IDLE; +- return state_char[fls(state)]; +-} +- +-static inline void print_rtg_task_header(struct seq_file *file, +- const char *header, int run, int nr) +-{ +- seq_printf_rtg(file, +- "%s : %d/%d\n" +- "STATE COMM PID PRIO CPU\n" +- "---------------------------------------------------------\n", +- header, run, nr); +-} +- +-static inline void print_rtg_task(struct seq_file *file, +- const struct task_struct *tsk) +-{ +- seq_printf_rtg(file, "%5c %15s %5d %5d %5d(%*pbl)\n", +- rtg_task_state_to_char(tsk), tsk->comm, tsk->pid, +- tsk->prio, task_cpu(tsk), cpumask_pr_args(tsk->cpus_ptr)); +-} +- +-static void print_rtg_threads(struct seq_file *file, +- const struct related_thread_group *grp) +-{ +- struct task_struct *tsk = NULL; +- int nr_thread = 0; +- +- list_for_each_entry(tsk, &grp->tasks, grp_list) +- nr_thread++; +- +- if (!nr_thread) +- return; +- +- print_rtg_task_header(file, "RTG_THREADS", +- grp->nr_running, nr_thread); +- list_for_each_entry(tsk, &grp->tasks, grp_list) { +- if (unlikely(!tsk)) +- continue; +- get_task_struct(tsk); +- print_rtg_task(file, tsk); +- put_task_struct(tsk); +- } +- seq_printf_rtg(file, "---------------------------------------------------------\n"); +-} +- +-static int sched_rtg_debug_show(struct seq_file *file, void *param) +-{ +- struct related_thread_group *grp = NULL; +- unsigned long flags; +- bool have_task = false; +- +- for_each_related_thread_group(grp) { +- if (unlikely(!grp)) { +- seq_printf_rtg(file, "RTG none\n"); +- return 0; +- } +- +- raw_spin_lock_irqsave(&grp->lock, flags); +- if (list_empty(&grp->tasks)) { +- raw_spin_unlock_irqrestore(&grp->lock, flags); +- continue; +- } +- +- if (!have_task) +- have_task = true; +- +- seq_printf_rtg(file, "\n\n"); +- print_rtg_info(file, grp); +- print_rtg_threads(file, grp); +- raw_spin_unlock_irqrestore(&grp->lock, flags); +- } +- +- if (!have_task) +- seq_printf_rtg(file, "RTG tasklist empty\n"); +- +- return 0; +-} +- +-static int sched_rtg_debug_release(struct inode *inode, struct file *file) +-{ +- seq_release(inode, file); +- return 0; +-} +- +-static int sched_rtg_debug_open(struct inode *inode, struct file *filp) +-{ +- return single_open(filp, sched_rtg_debug_show, NULL); +-} +- +-static const struct proc_ops sched_rtg_debug_fops = { +- .proc_open = sched_rtg_debug_open, +- .proc_read = seq_read, +- .proc_lseek = seq_lseek, +- .proc_release = sched_rtg_debug_release, +-}; +- +-static int __init init_sched_rtg_debug_procfs(void) +-{ +- struct proc_dir_entry *pe = NULL; +- +- pe = proc_create("sched_rtg_debug", +- 0400, NULL, &sched_rtg_debug_fops); +- if (unlikely(!pe)) +- return -ENOMEM; +- return 0; +-} +-late_initcall(init_sched_rtg_debug_procfs); +-#endif +diff --git a/kernel/sched/rtg/rtg.h b/kernel/sched/rtg/rtg.h +deleted file mode 100755 +index 4f0cedc33..000000000 +--- a/kernel/sched/rtg/rtg.h ++++ /dev/null +@@ -1,64 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * related thread group sched header +- */ +-#ifndef __RTG_H +-#define __RTG_H +- +-#include +-#include +- +-#define for_each_sched_cluster_reverse(cluster) \ +- list_for_each_entry_reverse(cluster, &cluster_head, list) +- +-#ifdef CONFIG_SCHED_RTG +-void init_task_rtg(struct task_struct *p); +-int alloc_related_thread_groups(void); +-struct related_thread_group *lookup_related_thread_group(unsigned int group_id); +-struct related_thread_group *task_related_thread_group(struct task_struct *p); +-void update_group_nr_running(struct task_struct *p, int event, u64 wallclock); +-struct rq; +-void update_group_demand(struct task_struct *p, struct rq *rq, +- int event, u64 wallclock); +-int sched_set_group_window_size(unsigned int grp_id, unsigned int window_size); +-int sched_set_group_window_rollover(unsigned int grp_id); +-struct group_cpu_time *group_update_cpu_time(struct rq *rq, +- struct related_thread_group *grp); +-void sched_update_rtg_tick(struct task_struct *p); +-int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p); +-int sched_set_group_preferred_cluster(unsigned int grp_id, int sched_cluster_id); +-struct cpumask *find_rtg_target(struct task_struct *p); +-int find_rtg_cpu(struct task_struct *p); +-int sched_set_group_util_invalid_interval(unsigned int grp_id, +- unsigned int interval); +-int sched_set_group_normalized_util(unsigned int grp_id, unsigned long util, +- unsigned int flag); +-void sched_get_max_group_util(const struct cpumask *query_cpus, +- unsigned long *util, unsigned int *freq); +-int sched_set_group_freq_update_interval(unsigned int grp_id, +- unsigned int interval); +-#ifdef CONFIG_SCHED_RTG_CGROUP +-int sync_cgroup_colocation(struct task_struct *p, bool insert); +-void add_new_task_to_grp(struct task_struct *new); +-#else +-static inline void add_new_task_to_grp(struct task_struct *new) {} +-#endif /* CONFIG_SCHED_RTG_CGROUP */ +-#else +-static inline int alloc_related_thread_groups(void) { return 0; } +-static inline int sched_set_group_preferred_cluster(unsigned int grp_id, +- int sched_cluster_id) +-{ +- return 0; +-} +-static inline int sched_set_group_normalized_util(unsigned int grp_id, unsigned long util, +- unsigned int flag) +-{ +- return 0; +-} +-static inline void sched_get_max_group_util(const struct cpumask *query_cpus, +- unsigned long *util, unsigned int *freq) +-{ +-} +-static inline void add_new_task_to_grp(struct task_struct *new) {} +-#endif /* CONFIG_SCHED_RTG */ +-#endif +diff --git a/kernel/sched/rtg/rtg_ctrl.c b/kernel/sched/rtg/rtg_ctrl.c +deleted file mode 100755 +index 164f1b237..000000000 +--- a/kernel/sched/rtg/rtg_ctrl.c ++++ /dev/null +@@ -1,934 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * rtg control entry +- * +- * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd. +- */ +- +-#include "rtg.h" +-#include "rtg_ctrl.h" +- +-#include +-#include +- +-#ifdef CONFIG_AUTHORITY_CTRL +-#include +-#endif +- +-#include +-#include +-#include +- +-atomic_t g_rtg_enable = ATOMIC_INIT(0); +-static atomic_t g_rt_frame_num = ATOMIC_INIT(0); +-static int g_frame_max_util = DEFAULT_MAX_UTIL; +-static int g_max_rt_frames = DEFAULT_MAX_RT_FRAME; +-typedef long (*rtg_ctrl_func)(int abi, void __user *arg); +- +-static long ctrl_set_enable(int abi, void __user *uarg); +-static long ctrl_set_rtg(int abi, void __user *uarg); +-static long ctrl_set_rtg_attr(int abi, void __user *uarg); +-static long ctrl_begin_frame(int abi, void __user *uarg); +-static long ctrl_end_frame(int abi, void __user *uarg); +-static long ctrl_end_scene(int abi, void __user *uarg); +-static long ctrl_set_min_util(int abi, void __user *uarg); +-static long ctrl_set_margin(int abi, void __user *uarg); +-static long ctrl_search_rtg(int abi, void __user *uarg); +-static long ctrl_get_enable(int abi, void __user *uarg); +- +-static rtg_ctrl_func g_func_array[RTG_CTRL_MAX_NR] = { +- NULL, /* reserved */ +- ctrl_set_enable, // 1 +- ctrl_set_rtg, +- NULL, +- ctrl_set_rtg_attr, +- ctrl_begin_frame, // 5 +- ctrl_end_frame, +- ctrl_end_scene, +- ctrl_set_min_util, +- ctrl_set_margin, +- NULL, +- NULL, +- ctrl_search_rtg, +- ctrl_get_enable +-}; +- +-static int init_proc_state(const int *config, int len); +-static void deinit_proc_state(void); +- +-static int set_enable_config(char *config_str) +-{ +- char *p = NULL; +- char *tmp = NULL; +- int value; +- int config[RTG_CONFIG_NUM]; +- int i; +- int ret = 0; +- +- for (i = 0; i < RTG_CONFIG_NUM; i++) +- config[i] = INVALID_VALUE; +- /* eg: key1:value1;key2:value2;key3:value3 */ +- for (p = strsep(&config_str, ";"); p != NULL; +- p = strsep(&config_str, ";")) { +- tmp = strsep(&p, ":"); +- if ((tmp == NULL) || (p == NULL)) +- continue; +- if (kstrtoint((const char *)p, DECIMAL, &value)) +- return -INVALID_ARG; +- +- if (!strcmp(tmp, "sched_cycle")) +- config[RTG_FREQ_CYCLE] = value; +- else if (!strcmp(tmp, "frame_max_util")) +- config[RTG_FRAME_MAX_UTIL] = value; +- else if (!strcmp(tmp, "invalid_interval")) +- config[RTG_INVALID_INTERVAL] = value; +- else +- continue; +- } +- +- for (i = 0; i < RTG_CONFIG_NUM; i++) +- pr_info("[SCHED_RTG] config[%d] = %d\n", i, config[i]); +- +- ret = init_proc_state(config, RTG_CONFIG_NUM); +- +- return ret; +-} +- +-static void rtg_enable(int abi, const struct rtg_enable_data *data) +-{ +- char temp[MAX_DATA_LEN]; +- int ret = -1; +- +- if (atomic_read(&g_rtg_enable) == 1) { +- pr_info("[SCHED_RTG] already enabled!\n"); +- return; +- } +- +- if ((data->len <= 0) || (data->len >= MAX_DATA_LEN)) { +- pr_err("[SCHED_RTG] %s data len invalid\n", __func__); +- return; +- } +- +-#pragma GCC diagnostic push +-#pragma GCC diagnostic ignored "-Wpointer-to-int-cast" +- switch (abi) { +- case IOCTL_ABI_ARM32: +- ret = copy_from_user(&temp, +- (void __user *)compat_ptr((compat_uptr_t)data->data), data->len); +- break; +- case IOCTL_ABI_AARCH64: +- ret = copy_from_user(&temp, (void __user *)data->data, data->len); +- break; +- default: +- pr_err("[SCHED_RTG] abi format error\n"); +- break; +- } +- if (ret) { +- pr_err("[SCHED_RTG] %s copy user data failed\n", __func__); +- return; +- } +-#pragma GCC diagnostic pop +- +-#pragma GCC diagnostic push +-#pragma GCC diagnostic ignored "-Wincompatible-pointer-types" +- temp[data->len] = '\0'; +- +- if (set_enable_config(&temp) != SUCC) { +- pr_err("[SCHED_RTG] %s failed!\n", __func__); +- return; +- } +-#pragma GCC diagnostic pop +- +- atomic_set(&g_rtg_enable, 1); +- pr_info("[SCHED_RTG] enabled!\n"); +-} +- +-static void rtg_disable(void) +-{ +- if (atomic_read(&g_rtg_enable) == 0) { +- pr_info("[SCHED_RTG] already disabled!\n"); +- return; +- } +- pr_info("[SCHED_RTG] disabled!\n"); +- atomic_set(&g_rtg_enable, 0); +- deinit_proc_state(); +-} +- +-static inline bool is_rt_type(int type) +-{ +- return (type >= VIP && type < NORMAL_TASK); +-} +- +-static int do_update_rt_frame_num(struct frame_info *frame_info, int new_type) +-{ +- int old_type; +- int ret = SUCC; +- +- mutex_lock(&frame_info->lock); +- old_type = frame_info->prio - DEFAULT_RT_PRIO; +- if (is_rt_type(new_type) == is_rt_type(old_type)) +- goto out; +- +- if (is_rt_type(old_type)) { +- if (atomic_read(&g_rt_frame_num) > 0) +- atomic_dec(&g_rt_frame_num); +- } else if (is_rt_type(new_type)) { +- if (atomic_read(&g_rt_frame_num) < g_max_rt_frames) { +- atomic_inc(&g_rt_frame_num); +- } else { +- pr_err("[SCHED_RTG]: %s g_max_rt_frames is %d\n", +- __func__, g_max_rt_frames); +- ret = -INVALID_ARG; +- } +- } +-out: +- mutex_unlock(&frame_info->lock); +- +- return ret; +-} +- +-static int update_rt_frame_num(struct frame_info *frame_info, int new_type, int cmd) +-{ +- int ret = SUCC; +- +- switch (cmd) { +- case UPDATE_RTG_FRAME: +- ret = do_update_rt_frame_num(frame_info, new_type); +- break; +- case ADD_RTG_FRAME: +- if (is_rt_type(new_type)) { +- if (atomic_read(&g_rt_frame_num) >= g_max_rt_frames) { +- pr_err("[SCHED_RTG] g_max_rt_frames is %d!\n", g_max_rt_frames); +- ret = -INVALID_ARG; +- } else { +- atomic_inc(&g_rt_frame_num); +- } +- } +- break; +- case CLEAR_RTG_FRAME: +- if ((atomic_read(&g_rt_frame_num) > 0) && is_rt_type(new_type)) +- atomic_dec(&g_rt_frame_num); +- break; +- default: +- return -INVALID_ARG; +- } +- trace_rtg_frame_sched(frame_info->rtg->id, "g_rt_frame_num", atomic_read(&g_rt_frame_num)); +- trace_rtg_frame_sched(frame_info->rtg->id, "g_max_rt_frames", g_max_rt_frames); +- +- return ret; +-} +- +-static long ctrl_set_enable(int abi, void __user *uarg) +-{ +- struct rtg_enable_data rs_enable; +- +- if (copy_from_user(&rs_enable, uarg, sizeof(rs_enable))) { +- pr_err("[SCHED_RTG] CMD_ID_SET_ENABLE copy data failed\n"); +- return -INVALID_ARG; +- } +- if (rs_enable.enable == 1) +- rtg_enable(abi, &rs_enable); +- else +- rtg_disable(); +- +- return SUCC; +-} +- +-static long ctrl_get_enable(int abi, void __user *uarg) +-{ +- return atomic_read(&g_rtg_enable); +-} +- +-static inline bool is_valid_type(int type) +-{ +- return (type >= VIP && type < RTG_TYPE_MAX); +-} +- +-static int parse_rtg_attr(const struct rtg_str_data *rs_data) +-{ +- char *p = NULL; +- char *tmp = NULL; +- char *data = NULL; +- int value; +- struct frame_info *frame_info = NULL; +- int rate = -1; +- int type = -1; +- int ret; +- +- if (rs_data == NULL) { +- pr_err("[SCHED_RTG] rtg attr: rs_data is null!\n"); +- return -INVALID_ARG; +- } +- +- data = rs_data->data; +- if ((data == NULL) || (rs_data->len <= 0) || +- (rs_data->len > MAX_DATA_LEN)) { +- pr_err("[SCHED_RTG] rtg attr: rs_data len err!\n"); +- return -INVALID_ARG; +- } +- +- // eg: rtgId:xx;rate:xx;type:xx; +- for (p = strsep(&data, ";"); p != NULL; p = strsep(&data, ";")) { +- tmp = strsep(&p, ":"); +- if ((tmp == NULL) || (p == NULL)) +- continue; +- if (kstrtoint((const char *)p, DECIMAL, &value)) { +- pr_err("[SCHED_RTG] rtg attr: rs_data format err!\n"); +- return -INVALID_ARG; +- } +- if (!strcmp(tmp, "rtgId")) { +- frame_info = rtg_frame_info(value); +- } else if (!strcmp(tmp, "rate")) { +- rate = value; +- } else if (!strcmp(tmp, "type")) { +- if (is_valid_type(value)) { +- type = value; +- } else { +- pr_err("[SCHED_RTG] invalid type : %d\n", value); +- return -INVALID_ARG; +- } +- } else { +- pr_err("[SCHED_RTG] parse rtg attr failed!\n"); +- return -INVALID_ARG; +- } +- } +- +- if (!frame_info) { +- pr_err("[SCHED_RTG] rtg attr: invalid args!\n"); +- return -INVALID_ARG; +- } +- +- ret = set_frame_rate(frame_info, rate); +- if (ret) +- return ret; +- +- if (is_valid_type(type)) { +- if (update_rt_frame_num(frame_info, type, UPDATE_RTG_FRAME)) { +- pr_err("[SCHED_RTG] set rtg attr failed!\n"); +- return -INVALID_ARG; +- } +- +- set_frame_prio(frame_info, (type == NORMAL_TASK ? +- NOT_RT_PRIO : (type + DEFAULT_RT_PRIO))); +- } +- +- return SUCC; +-} +- +-static long ctrl_set_rtg_attr(int abi, void __user *uarg) +-{ +- struct rtg_str_data rs; +- char temp[MAX_DATA_LEN]; +- int ret; +- +- if (uarg == NULL) +- return -INVALID_ARG; +- +- if (copy_from_user(&rs, uarg, sizeof(rs))) { +- pr_err("[SCHED_RTG] CMD_ID_SET_RTG_ATTR copy data failed\n"); +- return -INVALID_ARG; +- } +- if ((rs.len <= 0) || (rs.len >= MAX_DATA_LEN)) { +- pr_err("[SCHED_RTG] CMD_ID_SET_RTG_ATTR data len invalid\n"); +- return -INVALID_ARG; +- } +- +-#pragma GCC diagnostic push +-#pragma GCC diagnostic ignored "-Wpointer-to-int-cast" +- switch (abi) { +- case IOCTL_ABI_ARM32: +- ret = copy_from_user(&temp, +- (void __user *)compat_ptr((compat_uptr_t)rs.data), rs.len); +- break; +- case IOCTL_ABI_AARCH64: +- ret = copy_from_user(&temp, (void __user *)rs.data, rs.len); +- break; +- default: +- pr_err("[SCHED_RTG] abi format error\n"); +- return -INVALID_ARG; +- } +-#pragma GCC diagnostic pop +- +- if (ret) { +- pr_err("[SCHED_RTG] CMD_ID_SET_RTG_ATTR copy rs.data failed with ret %d\n", ret); +- return -INVALID_ARG; +- } +- +-#pragma GCC diagnostic push +-#pragma GCC diagnostic ignored "-Wincompatible-pointer-types" +- temp[rs.len] = '\0'; +- rs.data = &temp; +-#pragma GCC diagnostic pop +- +- return parse_rtg_attr(&rs); +-} +- +-static void start_frame_freq(struct frame_info *frame_info) +-{ +- if (!frame_info) +- return; +- +- if (atomic_read(&frame_info->start_frame_freq) == 0) { +- atomic_set(&frame_info->start_frame_freq, 1); +- set_frame_sched_state(frame_info, true); +- } +-} +- +-static int set_frame(struct frame_info *frame_info, int margin) +-{ +- int ret; +- if (!frame_info) +- return -INVALID_RTG_ID; +- +- atomic_set(&frame_info->frame_state, FRAME_DRAWING); +- ret = set_frame_margin(frame_info, margin); +- if (ret) +- goto out; +- +- ret = set_frame_timestamp(frame_info, FRAME_START); +- if (ret) +- goto out; +- +-out: +- return ret; +-} +- +-static int reset_frame(struct frame_info *frame_info) +-{ +- if (!frame_info) +- return -INVALID_RTG_ID; +- +- if (atomic_read(&frame_info->frame_state) == FRAME_END_STATE) { +- pr_debug("[SCHED_RTG]: Frame state is already reset\n"); +- return -INVALID_PROC_STATE; +- } +- +- atomic_set(&frame_info->frame_state, FRAME_END_STATE); +- return set_frame_timestamp(frame_info, FRAME_END); +-} +- +-int update_frame_state(int grp_id, int margin, bool in_frame) +-{ +- int ret; +- struct frame_info *frame_info = NULL; +- +- frame_info = lookup_frame_info_by_grp_id(grp_id); +- if (!frame_info || !frame_info->rtg) +- return -INVALID_RTG_ID; +- +- if (in_frame) { +- start_frame_freq(frame_info); +- ret = set_frame(frame_info, margin); +- trace_rtg_frame_sched(grp_id, "margin", margin); +- } else { +- ret = reset_frame(frame_info); +- } +- +- return ret; +-} +- +-static inline int curr_grp_id() +-{ +- return sched_get_group_id(current); +-} +- +-static long ctrl_frame_state(void __user *uarg, bool is_enter) +-{ +- struct proc_state_data state_data; +- +- if (uarg == NULL) +- return -INVALID_ARG; +- +- if (copy_from_user(&state_data, uarg, sizeof(state_data))) { +- pr_err("[SCHED_RTG] CMD_ID_FRAME_FREQ copy data failed\n"); +- return -INVALID_ARG; +- } +- +- return update_frame_state(curr_grp_id(), state_data.state_param, is_enter); +-} +- +-static long ctrl_begin_frame(int abi, void __user *uarg) +-{ +- return ctrl_frame_state(uarg, true); +-} +- +-static long ctrl_end_frame(int abi, void __user *uarg) +-{ +- return ctrl_frame_state(uarg, false); +-} +- +-static int stop_frame_freq(int gid) +-{ +- struct frame_info *frame_info = NULL; +- +- frame_info = lookup_frame_info_by_grp_id(gid); +- if (!frame_info) +- return -INVALID_RTG_ID; +- +- atomic_set(&frame_info->start_frame_freq, 0); +- set_frame_sched_state(frame_info, false); +- +- return 0; +-} +- +-static long ctrl_end_scene(int abi, void __user *uarg) +-{ +- int rtg_id; +- +- if (uarg == NULL) +- return -INVALID_ARG; +- +- if (copy_from_user(&rtg_id, uarg, sizeof(int))) { +- pr_err("[SCHED_RTG] CMD_ID_END_SCENE copy data failed\n"); +- return -INVALID_ARG; +- } +- +- return stop_frame_freq(rtg_id); +-} +- +-static int set_min_util(int gid, int min_util) +-{ +- struct frame_info *frame_info = NULL; +- +- frame_info = lookup_frame_info_by_grp_id(gid); +- if (!frame_info) +- return -FRAME_ERR_PID; +- +- return set_frame_min_util(frame_info, min_util, false); +-} +- +-static long ctrl_set_min_util(int abi, void __user *uarg) +-{ +- struct proc_state_data state_data; +- +- if (uarg == NULL) +- return -INVALID_ARG; +- +- if (copy_from_user(&state_data, uarg, sizeof(state_data))) { +- pr_err("[SCHED_RTG] CMD_ID_SET_MIN_UTIL copy data failed\n"); +- return -INVALID_ARG; +- } +- +- return set_min_util(curr_grp_id(), state_data.state_param); +-} +- +-static int set_margin(int grp_id, int margin) +-{ +- struct frame_info *frame_info = NULL; +- +- frame_info = lookup_frame_info_by_grp_id(grp_id); +- if (!frame_info) +- return -FRAME_ERR_PID; +- +- set_frame_margin(frame_info, margin); +- +- return SUCC; +-} +- +-static long ctrl_set_margin(int abi, void __user *uarg) +-{ +- struct proc_state_data state_data; +- +- if (uarg == NULL) +- return -INVALID_ARG; +- +- if (copy_from_user(&state_data, uarg, sizeof(state_data))) { +- pr_err("[SCHED_RTG] CMD_ID_SET_MARGIN copy data failed\n"); +- return -INVALID_ARG; +- } +- +- return set_margin(curr_grp_id(), state_data.state_param); +-} +- +-static void clear_rtg_frame_thread(struct frame_info *frame_info, bool reset) +-{ +- struct frame_thread_info frame_thread_info; +- int i; +- +- if (!reset && frame_info) +- frame_thread_info.prio = frame_info->prio; +- else +- frame_thread_info.prio = NOT_RT_PRIO; +- for (i = 0; i < MAX_TID_NUM; i++) +- frame_thread_info.thread[i] = -1; +- frame_thread_info.thread_num = MAX_TID_NUM; +- update_frame_thread_info(frame_info, &frame_thread_info); +- if (reset) { +- atomic_set(&frame_info->max_rt_thread_num, DEFAULT_MAX_RT_THREAD); +- atomic_set(&frame_info->frame_sched_state, 0); +- trace_rtg_frame_sched(frame_info->rtg->id, "FRAME_SCHED_ENABLE", 0); +- } +-} +- +-static void copy_proc_from_rsdata(struct rtg_proc_data *proc_info, +- const struct rtg_grp_data *rs_data) +-{ +- memset(proc_info, 0, sizeof(struct rtg_proc_data)); +- proc_info->type = VIP; +- proc_info->rtcnt = DEFAULT_MAX_RT_THREAD; +- if ((rs_data->grp_type > 0) && (rs_data->grp_type < RTG_TYPE_MAX)) +- proc_info->type = rs_data->grp_type; +- if ((rs_data->rt_cnt > 0) && (rs_data->rt_cnt < DEFAULT_MAX_RT_THREAD)) +- proc_info->rtcnt = rs_data->rt_cnt; +-} +- +-static void init_frame_thread_info(struct frame_thread_info *frame_thread_info, +- const struct rtg_proc_data *proc_info) +-{ +- int i; +- int type = proc_info->type; +- +- frame_thread_info->prio = (type == NORMAL_TASK ? NOT_RT_PRIO : (type + DEFAULT_RT_PRIO)); +- for (i = 0; i < MAX_TID_NUM; i++) +- frame_thread_info->thread[i] = proc_info->thread[i]; +- frame_thread_info->thread_num = MAX_TID_NUM; +-} +- +-static int parse_create_rtg_grp(const struct rtg_grp_data *rs_data) +-{ +- struct rtg_proc_data proc_info; +- struct frame_info *frame_info; +- struct frame_thread_info frame_thread_info; +- +- copy_proc_from_rsdata(&proc_info, rs_data); +- proc_info.rtgid = alloc_multi_frame_info(); +- frame_info = rtg_frame_info(proc_info.rtgid); +- if (!frame_info) { +- pr_err("[SCHED_RTG] no free multi frame.\n"); +- return -NO_FREE_MULTI_FRAME; +- } +- atomic_set(&frame_info->max_rt_thread_num, proc_info.rtcnt); +- if (update_rt_frame_num(frame_info, rs_data->grp_type, ADD_RTG_FRAME)) { +- release_multi_frame_info(proc_info.rtgid); +- return -NO_RT_FRAME; +- } +- init_frame_thread_info(&frame_thread_info, &proc_info); +- update_frame_thread_info(frame_info, &frame_thread_info); +- atomic_set(&frame_info->frame_sched_state, 1); +- pr_info("[SCHED_RTG] %s rtgid=%d, type=%d, prio=%d, threadnum=%d, rtnum=%d\n", +- __func__, proc_info.rtgid, rs_data->grp_type, +- frame_thread_info.prio, frame_thread_info.thread_num, proc_info.rtcnt); +- +- return proc_info.rtgid; +-} +- +-static int parse_add_rtg_thread(const struct rtg_grp_data *rs_data) +-{ +- struct rtg_proc_data proc_info; +- struct frame_info *frame_info; +- int add_index; +- int add_num; +- int prio; +- int fail_num = 0; +- int i; +- +- if ((rs_data->grp_id <= 0) || (rs_data->grp_id >= MAX_NUM_CGROUP_COLOC_ID)) +- return -INVALID_ARG; +- copy_proc_from_rsdata(&proc_info, rs_data); +- frame_info = lookup_frame_info_by_grp_id(rs_data->grp_id); +- if (!frame_info) { +- pr_err("[SCHED_RTG] grp not created yet.\n"); +- return -INVALID_ARG; +- } +- mutex_lock(&frame_info->lock); +- add_num = rs_data->tid_num; +- if ((frame_info->thread_num < 0) || (add_num < 0)) { +- mutex_unlock(&frame_info->lock); +- pr_err("[SCHED_RTG] Unexception err: frame_info num < 0.\n"); +- return -INVALID_RTG_ID; +- } +- if (frame_info->thread_num + add_num > MAX_TID_NUM) { +- mutex_unlock(&frame_info->lock); +- return -INVALID_RTG_ID; +- } +- add_index = frame_info->thread_num; +- prio = (proc_info.type == NORMAL_TASK) ? NOT_RT_PRIO : frame_info->prio; +- for (i = 0; i < add_num; i++) { +- frame_info->thread[add_index] = update_frame_thread(frame_info, prio, prio, +- rs_data->tids[i], +- frame_info->thread[add_index]); +- if (frame_info->thread[add_index]) { +- atomic_set(&frame_info->thread_prio[add_index], prio); +- frame_info->thread_num++; +- add_index = frame_info->thread_num; +- } else { +- fail_num++; +- } +- } +- mutex_unlock(&frame_info->lock); +- +- return fail_num; +-} +- +-static int parse_remove_thread(const struct rtg_grp_data *rs_data) +-{ +- pr_err("[SCHED_RTG] frame rtg not support remove single yet.\n"); +- +- return -INVALID_ARG; +-} +- +-static int do_clear_or_destroy_grp(const struct rtg_grp_data *rs_data, bool destroy) +-{ +- struct frame_info *frame_info; +- int type; +- int id = rs_data->grp_id; +- +- if (!is_frame_rtg(id)) { +- pr_err("[SCHED_RTG] Failed to destroy rtg group %d!\n", id); +- return -INVALID_ARG; +- } +- +- frame_info = rtg_frame_info(id); +- if (!frame_info) { +- pr_err("[SCHED_RTG] Failed to destroy rtg group %d: grp not exist.\n", id); +- return -INVALID_ARG; +- } +- +- type = frame_info->prio - DEFAULT_RT_PRIO; +- if (destroy) { +- clear_rtg_frame_thread(frame_info, true); +- release_multi_frame_info(id); +- update_rt_frame_num(frame_info, type, CLEAR_RTG_FRAME); +- } else { +- clear_rtg_frame_thread(frame_info, false); +- } +- pr_info("[SCHED_RTG] %s clear frame(id=%d)\n", __func__, id); +- +- return SUCC; +-} +- +-static int parse_destroy_grp(const struct rtg_grp_data *rs_data) +-{ +- return do_clear_or_destroy_grp(rs_data, true); +-} +- +-long ctrl_set_rtg(int abi, void __user *uarg) +-{ +- struct rtg_grp_data rs_data; +- long ret; +- +- if (copy_from_user(&rs_data, uarg, sizeof(rs_data))) { +- pr_err("[SCHED_RTG] CMD_ID_SET_RTG copy data failed\n"); +- return -INVALID_ARG; +- } +- +- switch (rs_data.rtg_cmd) { +- case CMD_CREATE_RTG_GRP: +- ret = parse_create_rtg_grp(&rs_data); +- break; +- case CMD_ADD_RTG_THREAD: +- ret = parse_add_rtg_thread(&rs_data); +- break; +- case CMD_REMOVE_RTG_THREAD: +- ret = parse_remove_thread(&rs_data); +- break; +- case CMD_CLEAR_RTG_GRP: +- ret = -INVALID_ARG; +- break; +- case CMD_DESTROY_RTG_GRP: +- ret = parse_destroy_grp(&rs_data); +- break; +- default: +- return -INVALID_ARG; +- } +- +- return ret; +-} +- +-static long ctrl_search_rtg(int abi, void __user *uarg) +-{ +- struct proc_state_data search_data; +- +- if (copy_from_user(&search_data, uarg, sizeof(search_data))) { +- pr_err("[SCHED_RTG] CMD_ID_SEARCH_RTG copy data failed\n"); +- return -INVALID_ARG; +- } +- +- return search_rtg(search_data.state_param); +-} +- +-static long do_proc_rtg_ioctl(int abi, struct file *file, unsigned int cmd, unsigned long arg) +-{ +- void __user *uarg = (void __user *)(uintptr_t)arg; +- unsigned int func_id = _IOC_NR(cmd); +-#ifdef CONFIG_RTG_AUTHORITY +- bool authorized = true; +-#endif +- +- if (uarg == NULL) { +- pr_err("[SCHED_RTG] %s: invalid user uarg\n", __func__); +- return -EINVAL; +- } +- +- if (_IOC_TYPE(cmd) != RTG_SCHED_IPC_MAGIC) { +- pr_err("[SCHED_RTG] %s: RTG_SCHED_IPC_MAGIC fail, TYPE=%d\n", +- __func__, _IOC_TYPE(cmd)); +- return -INVALID_MAGIC; +- } +- +- if (!atomic_read(&g_rtg_enable) && (func_id != SET_ENABLE) && (func_id != GET_ENABLE)) { +- pr_err("[SCHED_RTG] CMD_ID %x error: Rtg not enabled yet.\n", cmd); +- return -RTG_DISABLED; +- } +- +- if (func_id >= RTG_CTRL_MAX_NR) { +- pr_err("[SCHED_RTG] %s: RTG_MAX_NR fail, _IOC_NR(cmd)=%d, MAX_NR=%d\n", +- __func__, _IOC_NR(cmd), RTG_CTRL_MAX_NR); +- return -INVALID_CMD; +- } +- +-#ifdef CONFIG_RTG_AUTHORITY +- authorized = check_authorized(func_id, RTG_AUTH_FLAG); +- if (!authorized) { +- pr_err("[SCHED_RTG] %s: uid not authorized.\n", __func__); +- return -INVALID_CMD; +- } +-#endif +- if (g_func_array[func_id] != NULL) +- return (*g_func_array[func_id])(abi, uarg); +- +- return -EINVAL; +-} +- +-static void reset_frame_info(struct frame_info *frame_info) +-{ +- int i; +- clear_rtg_frame_thread(frame_info, true); +- atomic_set(&frame_info->frame_state, -1); +- atomic_set(&frame_info->curr_rt_thread_num, 0); +- atomic_set(&frame_info->max_rt_thread_num, DEFAULT_MAX_RT_THREAD); +- for (i = 0; i < MAX_TID_NUM; i++) +- atomic_set(&frame_info->thread_prio[i], 0); +-} +- +-static int do_init_proc_state(int rtgid, const int *config, int len) +-{ +- struct related_thread_group *grp = NULL; +- struct frame_info *frame_info = NULL; +- +- grp = lookup_related_thread_group(rtgid); +- if (unlikely(!grp)) +- return -EINVAL; +- +- frame_info = (struct frame_info *)grp->private_data; +- if (!frame_info) +- return -EINVAL; +- +- reset_frame_info(frame_info); +- +- if ((config[RTG_FREQ_CYCLE] >= MIN_FREQ_CYCLE) && +- (config[RTG_FREQ_CYCLE] <= MAX_FREQ_CYCLE)) +- sched_set_group_freq_update_interval(rtgid, +- (unsigned int)config[RTG_FREQ_CYCLE]); +- else +- sched_set_group_freq_update_interval(rtgid, +- DEFAULT_FREQ_CYCLE); +- +- if (config[RTG_INVALID_INTERVAL] != INVALID_VALUE) +- sched_set_group_util_invalid_interval(rtgid, +- config[RTG_INVALID_INTERVAL]); +- else +- sched_set_group_util_invalid_interval(rtgid, +- DEFAULT_INVALID_INTERVAL); +- +- set_frame_max_util(frame_info, g_frame_max_util); +- +- return SUCC; +-} +- +-static int init_proc_state(const int *config, int len) +-{ +- int ret; +- int id; +- +- if ((config == NULL) || (len != RTG_CONFIG_NUM)) +- return -INVALID_ARG; +- +- if ((config[RTG_FRAME_MAX_UTIL] > 0) && +- (config[RTG_FRAME_MAX_UTIL] < DEFAULT_MAX_UTIL)) +- g_frame_max_util = config[RTG_FRAME_MAX_UTIL]; +- +- for (id = MULTI_FRAME_ID; id < (MULTI_FRAME_ID + MULTI_FRAME_NUM); id++) { +- ret = do_init_proc_state(id, config, len); +- if (ret) { +- pr_err("[SCHED_RTG] init proc state for FRAME_ID=%d failed, ret=%d\n", +- id, ret); +- return ret; +- } +- } +- atomic_set(&g_rt_frame_num, 0); +- +- return SUCC; +-} +- +-static void deinit_proc_state(void) +-{ +- int id; +- struct frame_info *frame_info = NULL; +- struct related_thread_group *grp = NULL; +- +- for (id = MULTI_FRAME_ID; id < (MULTI_FRAME_ID + MULTI_FRAME_NUM); id++) { +- grp = lookup_related_thread_group(id); +- if (unlikely(!grp)) +- return; +- +- frame_info = (struct frame_info *)grp->private_data; +- if (frame_info) +- reset_frame_info(frame_info); +- } +- clear_multi_frame_info(); +- atomic_set(&g_rt_frame_num, 0); +-} +- +-int proc_rtg_open(struct inode *inode, struct file *filp) +-{ +- return SUCC; +-} +- +-static int proc_rtg_release(struct inode *inode, struct file *filp) +-{ +- return SUCC; +-} +- +-long proc_rtg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +-{ +- return do_proc_rtg_ioctl(IOCTL_ABI_AARCH64, file, cmd, arg); +-} +- +-#ifdef CONFIG_COMPAT +-long proc_rtg_compat_ioctl(struct file *file, +- unsigned int cmd, unsigned long arg) +-{ +- return do_proc_rtg_ioctl(IOCTL_ABI_ARM32, file, cmd, +- (unsigned long)(compat_ptr((compat_uptr_t)arg))); +-} +-#endif +- +-static const struct file_operations rtg_ctrl_fops = { +- .open = proc_rtg_open, +- .release = proc_rtg_release, +- .unlocked_ioctl = proc_rtg_ioctl, +-#ifdef CONFIG_COMPAT +- .compat_ioctl = proc_rtg_compat_ioctl, +-#endif +-}; +- +-static struct miscdevice rtg_ctrl_device = { +- .minor = MISC_DYNAMIC_MINOR, +- .name = "sched_rtg_ctrl", +- .fops = &rtg_ctrl_fops, +- .mode = 0666, +-}; +- +-static int __init rtg_ctrl_dev_init(void) +-{ +- return misc_register(&rtg_ctrl_device); +-} +- +-static void __exit rtg_ctrl_dev_exit(void) +-{ +- misc_deregister(&rtg_ctrl_device); +-} +- +-module_init(rtg_ctrl_dev_init); +-module_exit(rtg_ctrl_dev_exit); +diff --git a/kernel/sched/rtg/rtg_ctrl.h b/kernel/sched/rtg/rtg_ctrl.h +deleted file mode 100755 +index 5a5a4e5dd..000000000 +--- a/kernel/sched/rtg/rtg_ctrl.h ++++ /dev/null +@@ -1,90 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * rtg control interface +- * +- * Copyright (c) 2022-2023 Huawei Technologies Co., Ltd. +- */ +- +-#ifndef __RTG_CTL_H +-#define __RTG_CTL_H +- +-#include +-#include +-#include +-#include +- +-#include "frame_rtg.h" +- +-/* set rtg */ +-#define INVALID_VALUE 0xffff +-#define DEFAULT_RT_PRIO 97 +- +-#define MAX_DATA_LEN 256 +-#define DECIMAL 10 +-#define DEFAULT_MAX_UTIL 1024 +-#define MAX_SUBPROCESS_NUM 8 +- +-#define RTG_ID_INVALID (-1) +-/* fit for FFRT, original DEFAULT_MAX_RT_FRAME is 3 */ +-#define DEFAULT_MAX_RT_FRAME 10 +-#define MAX_RT_THREAD (MAX_TID_NUM + 2) +-#define INIT_VALUE (-1) +-#define UPDATE_RTG_FRAME (1 << 0) +-#define ADD_RTG_FRAME (1 << 1) +-#define CLEAR_RTG_FRAME (1 << 2) +- +-#define DEFAULT_FREQ_CYCLE 4 +-#define MIN_FREQ_CYCLE 1 +-#define MAX_FREQ_CYCLE 16 +-#define DEFAULT_INVALID_INTERVAL 50 +- +-/* proc_state */ +-enum proc_state { +- STATE_MIN = 0, +- FRAME_DRAWING, +- FRAME_RME_MAX = 19, +- /* rme end */ +- FRAME_END_STATE = FRAME_RME_MAX + 1, +- +- FRAME_CLICK = 100, +- STATE_MAX, +-}; +- +-enum rtg_config { +- RTG_FREQ_CYCLE, +- RTG_FRAME_MAX_UTIL, +- RTG_INVALID_INTERVAL, +- RTG_CONFIG_NUM, +-}; +- +-enum rtg_err_no { +- SUCC = 0, +- RTG_DISABLED = 1, +- INVALID_ARG, +- INVALID_MAGIC, +- INVALID_CMD, +- FRAME_ERR_PID = 100, +- NO_FREE_MULTI_FRAME, +- NOT_MULTI_FRAME, +- INVALID_RTG_ID, +- NO_RT_FRAME, +- INVALID_PROC_STATE, +-}; +- +-struct rtg_grp_data { +- int rtg_cmd; +- int grp_id; +- int grp_type; +- int rt_cnt; +- int tid_num; +- int tids[MAX_TID_NUM]; +-}; +- +-struct rtg_proc_data { +- int rtgid; +- int type; +- int thread[MAX_TID_NUM]; +- int rtcnt; +-}; +- +-#endif +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index e079a51cc..d48c6a292 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -8,7 +8,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -84,10 +83,6 @@ + # include + #endif + +-#ifdef CONFIG_SCHED_RTG +-#include +-#endif +- + #ifdef CONFIG_PARAVIRT + # include + # include +@@ -107,51 +102,6 @@ + struct rq; + struct cpuidle_state; + +-#ifdef CONFIG_SCHED_RT_CAS +-extern unsigned long uclamp_task_util(struct task_struct *p, +- unsigned long uclamp_min, +- unsigned long uclamp_max); +-#endif +- +-#ifdef CONFIG_SCHED_WALT +-extern unsigned int sched_ravg_window; +-extern unsigned int walt_cpu_util_freq_divisor; +- +-struct walt_sched_stats { +- u64 cumulative_runnable_avg_scaled; +-}; +- +-struct load_subtractions { +- u64 window_start; +- u64 subs; +- u64 new_subs; +-}; +- +-#define NUM_TRACKED_WINDOWS 2 +- +-struct sched_cluster { +- raw_spinlock_t load_lock; +- struct list_head list; +- struct cpumask cpus; +- int id; +- int max_power_cost; +- int min_power_cost; +- int max_possible_capacity; +- int capacity; +- int efficiency; /* Differentiate cpus with different IPC capability */ +- int load_scale_factor; +- unsigned int exec_scale_factor; +- /* +- * max_freq = user maximum +- * max_possible_freq = maximum supported by hardware +- */ +- unsigned int cur_freq, max_freq, min_freq; +- unsigned int max_possible_freq; +- bool freq_init_done; +-}; +- +-extern unsigned int sched_disable_window_stats; +-#endif /* CONFIG_SCHED_WALT */ + /* task_struct::on_rq states: */ + #define TASK_ON_RQ_QUEUED 1 + #define TASK_ON_RQ_MIGRATING 2 +@@ -166,10 +116,6 @@ extern unsigned int sysctl_sched_child_runs_first; + extern void calc_global_load_tick(struct rq *this_rq); + extern long calc_load_fold_active(struct rq *this_rq, long adjust); + +-#ifdef CONFIG_SMP +-extern void init_sched_groups_capacity(int cpu, struct sched_domain *sd); +-#endif +- + extern void call_trace_sched_update_nr_running(struct rq *rq, int count); + + extern unsigned int sysctl_sched_rt_period; +@@ -181,37 +127,6 @@ extern int sched_rr_timeslice; + */ + #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) + +-#ifdef CONFIG_SCHED_LATENCY_NICE +-/* +- * Latency nice is meant to provide scheduler hints about the relative +- * latency requirements of a task with respect to other tasks. +- * Thus a task with latency_nice == 19 can be hinted as the task with no +- * latency requirements, in contrast to the task with latency_nice == -20 +- * which should be given priority in terms of lower latency. +- */ +-#define MAX_LATENCY_NICE 19 +-#define MIN_LATENCY_NICE -20 +- +-#define LATENCY_NICE_WIDTH \ +- (MAX_LATENCY_NICE - MIN_LATENCY_NICE + 1) +- +-/* +- * Default tasks should be treated as a task with latency_nice = 0. +- */ +-#define DEFAULT_LATENCY_NICE 0 +-#define DEFAULT_LATENCY_PRIO (DEFAULT_LATENCY_NICE + LATENCY_NICE_WIDTH/2) +- +-/* +- * Convert user-nice values [ -20 ... 0 ... 19 ] +- * to static latency [ 0..39 ], +- * and back. +- */ +-#define NICE_TO_LATENCY(nice) ((nice) + DEFAULT_LATENCY_PRIO) +-#define LATENCY_TO_NICE(prio) ((prio) - DEFAULT_LATENCY_PRIO) +-#define NICE_LATENCY_SHIFT (SCHED_FIXEDPOINT_SHIFT) +-#define NICE_LATENCY_WEIGHT_MAX (1L << NICE_LATENCY_SHIFT) +-#endif /* CONFIG_SCHED_LATENCY_NICE */ +- + /* + * Increase resolution of nice-level calculations for 64-bit architectures. + * The extra resolution improves shares distribution and load balancing of +@@ -497,16 +412,6 @@ struct task_group { + struct uclamp_se uclamp[UCLAMP_CNT]; + #endif + +-#ifdef CONFIG_SCHED_RTG_CGROUP +- /* +- * Controls whether tasks of this cgroup should be colocated with each +- * other and tasks of other cgroups that have the same flag turned on. +- */ +- bool colocate; +- +- /* Controls whether further updates are allowed to the colocate flag */ +- bool colocate_update_disabled; +-#endif + }; + + #ifdef CONFIG_FAIR_GROUP_SCHED +@@ -720,9 +625,6 @@ struct cfs_rq { + struct list_head leaf_cfs_rq_list; + struct task_group *tg; /* group that "owns" this runqueue */ + +-#ifdef CONFIG_SCHED_WALT +- struct walt_sched_stats walt_stats; +-#endif + /* Locally cached copy of our task_group's idle value */ + int idle; + +@@ -745,9 +647,6 @@ struct cfs_rq { + #ifdef CONFIG_SMP + struct list_head throttled_csd_list; + #endif +-#ifdef CONFIG_SCHED_WALT +- u64 cumulative_runnable_avg; +-#endif + #endif /* CONFIG_CFS_BANDWIDTH */ + #endif /* CONFIG_FAIR_GROUP_SCHED */ + }; +@@ -990,9 +889,6 @@ struct root_domain { + * CPUs of the rd. Protected by RCU. + */ + struct perf_domain __rcu *pd; +-#ifdef CONFIG_SCHED_RT_CAS +- int max_cap_orig_cpu; +-#endif + }; + + extern void init_defrootdomain(void); +@@ -1164,18 +1060,8 @@ struct rq { + /* For active balancing */ + int active_balance; + int push_cpu; +-#ifdef CONFIG_SCHED_EAS +- struct task_struct *push_task; +-#endif + struct cpu_stop_work active_balance_work; + +- /* For rt active balancing */ +-#ifdef CONFIG_SCHED_RT_ACTIVE_LB +- int rt_active_balance; +- struct task_struct *rt_push_task; +- struct cpu_stop_work rt_active_balance_work; +-#endif +- + /* CPU of this runqueue: */ + int cpu; + int online; +@@ -1198,27 +1084,6 @@ struct rq { + + /* This is used to determine avg_idle's max value */ + u64 max_idle_balance_cost; +-#ifdef CONFIG_SCHED_WALT +- struct sched_cluster *cluster; +- struct cpumask freq_domain_cpumask; +- struct walt_sched_stats walt_stats; +- +- u64 window_start; +- unsigned long walt_flags; +- +- u64 cur_irqload; +- u64 avg_irqload; +- u64 irqload_ts; +- u64 curr_runnable_sum; +- u64 prev_runnable_sum; +- u64 nt_curr_runnable_sum; +- u64 nt_prev_runnable_sum; +- u64 cum_window_demand_scaled; +- struct load_subtractions load_subs[NUM_TRACKED_WINDOWS]; +-#ifdef CONFIG_SCHED_RTG +- struct group_cpu_time grp_time; +-#endif +-#endif /* CONFIG_SCHED_WALT */ + + #ifdef CONFIG_HOTPLUG_CPU + struct rcuwait hotplug_wait; +@@ -1818,14 +1683,6 @@ rq_lock(struct rq *rq, struct rq_flags *rf) + rq_pin_lock(rq, rf); + } + +-static inline void +-rq_relock(struct rq *rq, struct rq_flags *rf) +- __acquires(rq->lock) +-{ +- raw_spin_rq_lock(rq); +- rq_repin_lock(rq, rf); +-} +- + static inline void + rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) + __releases(rq->lock) +@@ -2075,15 +1932,6 @@ static inline struct cpumask *group_balance_mask(struct sched_group *sg) + return to_cpumask(sg->sgc->cpumask); + } + +-/** +- * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. +- * @group: The group whose first CPU is to be returned. +- */ +-static inline unsigned int group_first_cpu(struct sched_group *group) +-{ +- return cpumask_first(sched_group_span(group)); +-} +- + extern int group_balance_cpu(struct sched_group *sg); + + #ifdef CONFIG_SCHED_DEBUG +@@ -2334,9 +2182,6 @@ static_assert(WF_TTWU == SD_BALANCE_WAKE); + + extern const int sched_prio_to_weight[40]; + extern const u32 sched_prio_to_wmult[40]; +-#ifdef CONFIG_SCHED_LATENCY_NICE +-extern const int sched_latency_to_weight[40]; +-#endif + + /* + * {de,en}queue flags: +@@ -2450,13 +2295,7 @@ struct sched_class { + #ifdef CONFIG_FAIR_GROUP_SCHED + void (*task_change_group)(struct task_struct *p); + #endif +-#ifdef CONFIG_SCHED_WALT +- void (*fixup_walt_sched_stats)(struct rq *rq, struct task_struct *p, +- u16 updated_demand_scaled); +-#endif +-#ifdef CONFIG_SCHED_EAS +- void (*check_for_migration)(struct rq *rq, struct task_struct *p); +-#endif ++ + #ifdef CONFIG_SCHED_CORE + int (*task_is_throttled)(struct task_struct *p, int cpu); + #endif +@@ -2757,15 +2596,6 @@ static inline int hrtick_enabled(struct rq *rq) + + #endif /* CONFIG_SCHED_HRTICK */ + +-#ifdef CONFIG_SCHED_WALT +-u64 sched_ktime_clock(void); +-#else +-static inline u64 sched_ktime_clock(void) +-{ +- return sched_clock(); +-} +-#endif +- + #ifndef arch_scale_freq_tick + static __always_inline + void arch_scale_freq_tick(void) +@@ -2846,11 +2676,6 @@ static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) + + extern void double_rq_lock(struct rq *rq1, struct rq *rq2); + +-#ifdef CONFIG_SCHED_WALT +-extern unsigned int sysctl_sched_use_walt_cpu_util; +-extern unsigned int walt_disabled; +-#endif +- + #ifdef CONFIG_PREEMPTION + + /* +@@ -3141,20 +2966,11 @@ DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); + static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) + { + struct update_util_data *data; +- u64 clock; +- +-#ifdef CONFIG_SCHED_WALT +- if (!(flags & SCHED_CPUFREQ_WALT)) +- return; + +- clock = sched_ktime_clock(); +-#else +- clock = rq_clock(rq); +-#endif + data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, + cpu_of(rq))); + if (data) +- data->func(data, clock, flags); ++ data->func(data, rq_clock(rq), flags); + } + #else + static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} +@@ -3301,11 +3117,6 @@ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, + return clamp(util, min_util, max_util); + } + +-static inline bool uclamp_boosted(struct task_struct *p) +-{ +- return uclamp_eff_value(p, UCLAMP_MIN) > 0; +-} +- + /* Is the rq being capped/throttled by uclamp_max? */ + static inline bool uclamp_rq_is_capped(struct rq *rq) + { +@@ -3350,11 +3161,6 @@ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, + return util; + } + +-static inline bool uclamp_boosted(struct task_struct *p) +-{ +- return false; +-} +- + static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } + + static inline bool uclamp_is_used(void) +@@ -3474,14 +3280,6 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) + extern void swake_up_all_locked(struct swait_queue_head *q); + extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); + +-#ifdef CONFIG_SCHED_RTG +-extern bool task_fits_max(struct task_struct *p, int cpu); +-extern unsigned long capacity_spare_without(int cpu, struct task_struct *p); +-extern int update_preferred_cluster(struct related_thread_group *grp, +- struct task_struct *p, u32 old_load, bool from_tick); +-extern struct cpumask *find_rtg_target(struct task_struct *p); +-#endif +- + extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags); + + #ifdef CONFIG_PREEMPT_DYNAMIC +@@ -3734,274 +3532,4 @@ static inline void init_sched_mm_cid(struct task_struct *t) { } + extern u64 avg_vruntime(struct cfs_rq *cfs_rq); + extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); + +-#ifdef CONFIG_SCHED_WALT +-static inline int cluster_first_cpu(struct sched_cluster *cluster) +-{ +- return cpumask_first(&cluster->cpus); +-} +- +-extern struct list_head cluster_head; +-extern struct sched_cluster *sched_cluster[NR_CPUS]; +-unsigned long capacity_curr_of(int cpu); +-unsigned long cpu_util_cfs(int cpu); +- +-#define for_each_sched_cluster(cluster) \ +- list_for_each_entry_rcu(cluster, &cluster_head, list) +- +-extern struct mutex policy_mutex; +-extern unsigned int sched_disable_window_stats; +-extern unsigned int max_possible_freq; +-extern unsigned int min_max_freq; +-extern unsigned int max_possible_efficiency; +-extern unsigned int min_possible_efficiency; +-extern unsigned int max_capacity; +-extern unsigned int min_capacity; +-extern unsigned int max_load_scale_factor; +-extern unsigned int max_possible_capacity; +-extern unsigned int min_max_possible_capacity; +-extern unsigned int max_power_cost; +-extern unsigned int __read_mostly sched_init_task_load_windows; +-extern unsigned int sysctl_sched_restrict_cluster_spill; +-extern unsigned int sched_pred_alert_load; +-extern struct sched_cluster init_cluster; +- +-static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta) +-{ +- rq->cum_window_demand_scaled += scaled_delta; +- if (unlikely((s64)rq->cum_window_demand_scaled < 0)) +- rq->cum_window_demand_scaled = 0; +-} +- +-/* Is frequency of two cpus synchronized with each other? */ +-static inline int same_freq_domain(int src_cpu, int dst_cpu) +-{ +- struct rq *rq = cpu_rq(src_cpu); +- +- if (src_cpu == dst_cpu) +- return 1; +- +- return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask); +-} +- +-extern void reset_task_stats(struct task_struct *p); +- +-#define CPU_RESERVED 1 +-static inline int is_reserved(int cpu) +-{ +- struct rq *rq = cpu_rq(cpu); +- +- return test_bit(CPU_RESERVED, &rq->walt_flags); +-} +- +-static inline int mark_reserved(int cpu) +-{ +- struct rq *rq = cpu_rq(cpu); +- +- return test_and_set_bit(CPU_RESERVED, &rq->walt_flags); +-} +- +-static inline void clear_reserved(int cpu) +-{ +- struct rq *rq = cpu_rq(cpu); +- +- clear_bit(CPU_RESERVED, &rq->walt_flags); +-} +- +-static inline int cpu_capacity(int cpu) +-{ +- return cpu_rq(cpu)->cluster->capacity; +-} +- +-static inline int cpu_max_possible_capacity(int cpu) +-{ +- return cpu_rq(cpu)->cluster->max_possible_capacity; +-} +- +-static inline int cpu_load_scale_factor(int cpu) +-{ +- return cpu_rq(cpu)->cluster->load_scale_factor; +-} +- +-static inline unsigned int cluster_max_freq(struct sched_cluster *cluster) +-{ +- /* +- * Governor and thermal driver don't know the other party's mitigation +- * voting. So struct cluster saves both and return min() for current +- * cluster fmax. +- */ +- return cluster->max_freq; +-} +- +-/* Keep track of max/min capacity possible across CPUs "currently" */ +-static inline void __update_min_max_capacity(void) +-{ +- int i; +- int max_cap = 0, min_cap = INT_MAX; +- +- for_each_possible_cpu(i) { +- if (!cpu_active(i)) +- continue; +- +- max_cap = max(max_cap, cpu_capacity(i)); +- min_cap = min(min_cap, cpu_capacity(i)); +- } +- +- max_capacity = max_cap; +- min_capacity = min_cap; +-} +- +-/* +- * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so +- * that "most" efficient cpu gets a load_scale_factor of 1 +- */ +-static inline unsigned long +-load_scale_cpu_efficiency(struct sched_cluster *cluster) +-{ +- return DIV_ROUND_UP(1024 * max_possible_efficiency, +- cluster->efficiency); +-} +- +-/* +- * Return load_scale_factor of a cpu in reference to cpu with best max_freq +- * (max_possible_freq), so that one with best max_freq gets a load_scale_factor +- * of 1. +- */ +-static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster) +-{ +- return DIV_ROUND_UP(1024 * max_possible_freq, +- cluster_max_freq(cluster)); +-} +- +-static inline int compute_load_scale_factor(struct sched_cluster *cluster) +-{ +- int load_scale = 1024; +- +- /* +- * load_scale_factor accounts for the fact that task load +- * is in reference to "best" performing cpu. Task's load will need to be +- * scaled (up) by a factor to determine suitability to be placed on a +- * (little) cpu. +- */ +- load_scale *= load_scale_cpu_efficiency(cluster); +- load_scale >>= 10; +- +- load_scale *= load_scale_cpu_freq(cluster); +- load_scale >>= 10; +- +- return load_scale; +-} +- +-static inline bool is_max_capacity_cpu(int cpu) +-{ +- return cpu_max_possible_capacity(cpu) == max_possible_capacity; +-} +- +-static inline bool is_min_capacity_cpu(int cpu) +-{ +- return cpu_max_possible_capacity(cpu) == min_max_possible_capacity; +-} +- +-/* +- * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that +- * least efficient cpu gets capacity of 1024 +- */ +-static unsigned long +-capacity_scale_cpu_efficiency(struct sched_cluster *cluster) +-{ +- return (1024 * cluster->efficiency) / min_possible_efficiency; +-} +- +-/* +- * Return 'capacity' of a cpu in reference to cpu with lowest max_freq +- * (min_max_freq), such that one with lowest max_freq gets capacity of 1024. +- */ +-static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster) +-{ +- return (1024 * cluster_max_freq(cluster)) / min_max_freq; +-} +- +-static inline int compute_capacity(struct sched_cluster *cluster) +-{ +- int capacity = 1024; +- +- capacity *= capacity_scale_cpu_efficiency(cluster); +- capacity >>= 10; +- +- capacity *= capacity_scale_cpu_freq(cluster); +- capacity >>= 10; +- +- return capacity; +-} +- +-static inline unsigned int power_cost(int cpu, u64 demand) +-{ +- return cpu_max_possible_capacity(cpu); +-} +- +-static inline unsigned long cpu_util_freq_walt(int cpu) +-{ +- u64 util; +- struct rq *rq = cpu_rq(cpu); +- unsigned long capacity = capacity_orig_of(cpu); +- +- if (unlikely(walt_disabled || !sysctl_sched_use_walt_cpu_util)) +- return cpu_util_cfs(cpu); +- +- util = rq->prev_runnable_sum << SCHED_CAPACITY_SHIFT; +- util = div_u64(util, sched_ravg_window); +- +- return (util >= capacity) ? capacity : util; +-} +- +-static inline bool hmp_capable(void) +-{ +- return max_possible_capacity != min_max_possible_capacity; +-} +-#else /* CONFIG_SCHED_WALT */ +-static inline void walt_fixup_cum_window_demand(struct rq *rq, +- s64 scaled_delta) { } +- +-static inline int same_freq_domain(int src_cpu, int dst_cpu) +-{ +- return 1; +-} +- +-static inline int is_reserved(int cpu) +-{ +- return 0; +-} +- +-static inline void clear_reserved(int cpu) { } +- +-static inline bool hmp_capable(void) +-{ +- return false; +-} +-#endif /* CONFIG_SCHED_WALT */ +- +-struct sched_avg_stats { +- int nr; +- int nr_misfit; +- int nr_max; +- int nr_scaled; +-}; +-#ifdef CONFIG_SCHED_RUNNING_AVG +-extern void sched_get_nr_running_avg(struct sched_avg_stats *stats); +-#else +-static inline void sched_get_nr_running_avg(struct sched_avg_stats *stats) { } +-#endif +- +-#ifdef CONFIG_CPU_ISOLATION_OPT +-extern int group_balance_cpu_not_isolated(struct sched_group *sg); +-#else +-static inline int group_balance_cpu_not_isolated(struct sched_group *sg) +-{ +- return group_balance_cpu(sg); +-} +-#endif /* CONFIG_CPU_ISOLATION_OPT */ +- +-#ifdef CONFIG_HOTPLUG_CPU +-extern void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf, +- bool migrate_pinned_tasks); +-#endif + #endif /* _KERNEL_SCHED_SCHED_H */ +diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c +deleted file mode 100755 +index 538004868..000000000 +--- a/kernel/sched/sched_avg.c ++++ /dev/null +@@ -1,186 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0-only +-/* +- * Copyright (c) 2012, 2015-2021, The Linux Foundation. All rights reserved. +- */ +-/* +- * Scheduler hook for average runqueue determination +- */ +-#include +-#include +-#include +-#include +-#include +- +-#include "sched.h" +-#include "walt.h" +-#include +- +-static DEFINE_PER_CPU(u64, nr_prod_sum); +-static DEFINE_PER_CPU(u64, last_time); +-static DEFINE_PER_CPU(u64, nr_big_prod_sum); +-static DEFINE_PER_CPU(u64, nr); +-static DEFINE_PER_CPU(u64, nr_max); +- +-static DEFINE_PER_CPU(unsigned long, iowait_prod_sum); +-static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock); +-static s64 last_get_time; +- +-static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0); +- +-#define NR_THRESHOLD_PCT 15 +- +-/** +- * sched_get_nr_running_avg +- * @return: Average nr_running, iowait and nr_big_tasks value since last poll. +- * Returns the avg * 100 to return up to two decimal points +- * of accuracy. +- * +- * Obtains the average nr_running value since the last poll. +- * This function may not be called concurrently with itself +- */ +-void sched_get_nr_running_avg(struct sched_avg_stats *stats) +-{ +- int cpu; +- u64 curr_time = sched_clock(); +- u64 period = curr_time - last_get_time; +- u64 tmp_nr, tmp_misfit; +- +- if (!period) +- return; +- +- /* read and reset nr_running counts */ +- for_each_possible_cpu(cpu) { +- unsigned long flags; +- u64 diff; +- +- spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags); +- curr_time = sched_clock(); +- diff = curr_time - per_cpu(last_time, cpu); +- BUG_ON((s64)diff < 0); +- +- tmp_nr = per_cpu(nr_prod_sum, cpu); +- tmp_nr += per_cpu(nr, cpu) * diff; +- tmp_nr = div64_u64((tmp_nr * 100), period); +- +- tmp_misfit = per_cpu(nr_big_prod_sum, cpu); +- tmp_misfit = div64_u64((tmp_misfit * 100), period); +- +- /* +- * NR_THRESHOLD_PCT is to make sure that the task ran +- * at least 85% in the last window to compensate any +- * over estimating being done. +- */ +- stats[cpu].nr = (int)div64_u64((tmp_nr + NR_THRESHOLD_PCT), +- 100); +- stats[cpu].nr_misfit = (int)div64_u64((tmp_misfit + +- NR_THRESHOLD_PCT), 100); +- stats[cpu].nr_max = per_cpu(nr_max, cpu); +- +- trace_sched_get_nr_running_avg(cpu, stats[cpu].nr, +- stats[cpu].nr_misfit, stats[cpu].nr_max); +- +- per_cpu(last_time, cpu) = curr_time; +- per_cpu(nr_prod_sum, cpu) = 0; +- per_cpu(nr_big_prod_sum, cpu) = 0; +- per_cpu(iowait_prod_sum, cpu) = 0; +- per_cpu(nr_max, cpu) = per_cpu(nr, cpu); +- +- spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); +- } +- +- last_get_time = curr_time; +- +-} +-EXPORT_SYMBOL(sched_get_nr_running_avg); +- +-#define BUSY_NR_RUN 3 +-#define BUSY_LOAD_FACTOR 10 +-static inline void update_last_busy_time(int cpu, bool dequeue, +- unsigned long prev_nr_run, u64 curr_time) +-{ +- bool nr_run_trigger = false, load_trigger = false; +- +- if (!hmp_capable() || is_min_capacity_cpu(cpu)) +- return; +- +- if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN) +- nr_run_trigger = true; +- +- if (dequeue && (cpu_util_cfs(cpu) * BUSY_LOAD_FACTOR) > +- capacity_orig_of(cpu)) +- load_trigger = true; +- +- if (nr_run_trigger || load_trigger) +- atomic64_set(&per_cpu(last_busy_time, cpu), curr_time); +-} +- +-/** +- * sched_update_nr_prod +- * @cpu: The core id of the nr running driver. +- * @delta: Adjust nr by 'delta' amount +- * @inc: Whether we are increasing or decreasing the count +- * @return: N/A +- * +- * Update average with latest nr_running value for CPU +- */ +-void sched_update_nr_prod(int cpu, long delta, bool inc) +-{ +- u64 diff; +- u64 curr_time; +- unsigned long flags, nr_running; +- +- spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags); +- nr_running = per_cpu(nr, cpu); +- curr_time = sched_clock(); +- diff = curr_time - per_cpu(last_time, cpu); +- BUG_ON((s64)diff < 0); +- per_cpu(last_time, cpu) = curr_time; +- per_cpu(nr, cpu) = nr_running + (inc ? delta : -delta); +- +- BUG_ON((s64)per_cpu(nr, cpu) < 0); +- +- if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu)) +- per_cpu(nr_max, cpu) = per_cpu(nr, cpu); +- +- update_last_busy_time(cpu, !inc, nr_running, curr_time); +- +- per_cpu(nr_prod_sum, cpu) += nr_running * diff; +- per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff; +- spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); +-} +-EXPORT_SYMBOL(sched_update_nr_prod); +- +-/* +- * Returns the CPU utilization % in the last window. +- * +- */ +-unsigned int sched_get_cpu_util(int cpu) +-{ +- struct rq *rq = cpu_rq(cpu); +- u64 util; +- unsigned long capacity, flags; +- unsigned int busy; +- +- raw_spin_lock_irqsave(&rq->__lock, flags); +- +- util = rq->cfs.avg.util_avg; +- capacity = capacity_orig_of(cpu); +- +-#ifdef CONFIG_SCHED_WALT +- if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { +- util = rq->prev_runnable_sum; +- util = div64_u64(util, +- sched_ravg_window >> SCHED_CAPACITY_SHIFT); +- } +-#endif +- raw_spin_unlock_irqrestore(&rq->__lock, flags); +- +- util = (util >= capacity) ? capacity : util; +- busy = div64_ul((util * 100), capacity); +- return busy; +-} +- +-u64 sched_get_cpu_last_busy_time(int cpu) +-{ +- return atomic64_read(&per_cpu(last_busy_time, cpu)); +-} +diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c +index a6465bda8..b1b8fe61c 100644 +--- a/kernel/sched/stop_task.c ++++ b/kernel/sched/stop_task.c +@@ -7,7 +7,6 @@ + * + * See kernel/stop_machine.c + */ +-#include "walt.h" + + #ifdef CONFIG_SMP + static int +@@ -56,14 +55,12 @@ static void + enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) + { + add_nr_running(rq, 1); +- walt_inc_cumulative_runnable_avg(rq, p); + } + + static void + dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) + { + sub_nr_running(rq, 1); +- walt_dec_cumulative_runnable_avg(rq, p); + } + + static void yield_task_stop(struct rq *rq) +@@ -130,7 +127,4 @@ DEFINE_SCHED_CLASS(stop) = { + .prio_changed = prio_changed_stop, + .switched_to = switched_to_stop, + .update_curr = update_curr_stop, +-#ifdef CONFIG_SCHED_WALT +- .fixup_walt_sched_stats = fixup_walt_sched_stats_common, +-#endif + }; +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c +index f76f0a576..2ed884bb3 100644 +--- a/kernel/sched/topology.c ++++ b/kernel/sched/topology.c +@@ -558,10 +558,6 @@ static int init_rootdomain(struct root_domain *rd) + + if (cpupri_init(&rd->cpupri) != 0) + goto free_cpudl; +- +-#ifdef CONFIG_SCHED_RT_CAS +- rd->max_cap_orig_cpu = -1; +-#endif + return 0; + + free_cpudl: +@@ -1275,12 +1271,9 @@ build_sched_groups(struct sched_domain *sd, int cpu) + * group having more cpu_capacity will pickup more load compared to the + * group having less cpu_capacity. + */ +-void init_sched_groups_capacity(int cpu, struct sched_domain *sd) ++static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) + { + struct sched_group *sg = sd->groups; +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_t avail_mask; +-#endif + struct cpumask *mask = sched_domains_tmpmask2; + + WARN_ON(!sg); +@@ -1288,13 +1281,8 @@ void init_sched_groups_capacity(int cpu, struct sched_domain *sd) + do { + int cpu, cores = 0, max_cpu = -1; + +-#ifdef CONFIG_CPU_ISOLATION_OPT +- cpumask_andnot(&avail_mask, sched_group_span(sg), +- cpu_isolated_mask); +- sg->group_weight = cpumask_weight(&avail_mask); +-#else + sg->group_weight = cpumask_weight(sched_group_span(sg)); +-#endif ++ + cpumask_copy(mask, sched_group_span(sg)); + for_each_cpu(cpu, mask) { + cores++; +@@ -2498,19 +2486,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att + /* Attach the domains */ + rcu_read_lock(); + for_each_cpu(i, cpu_map) { +-#ifdef CONFIG_SCHED_RT_CAS +- int max_cpu = READ_ONCE(d.rd->max_cap_orig_cpu); +-#endif +- + rq = cpu_rq(i); + sd = *per_cpu_ptr(d.sd, i); + +-#ifdef CONFIG_SCHED_RT_CAS +- if (max_cpu < 0 || arch_scale_cpu_capacity(i) > +- arch_scale_cpu_capacity(max_cpu)) +- WRITE_ONCE(d.rd->max_cap_orig_cpu, i); +-#endif +- + /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ + if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) + WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); +diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c +deleted file mode 100755 +index 4391bf669..000000000 +--- a/kernel/sched/walt.c ++++ /dev/null +@@ -1,1862 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * walt.c +- * +- * Window Assistant Load Tracking +- * +- * This software is licensed under the terms of the GNU General Public +- * License version 2, as published by the Free Software Foundation, and +- * may be copied, distributed, and modified under those terms. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include "sched.h" +-#include "walt.h" +-#include "core_ctl.h" +-#include "rtg/rtg.h" +-#define CREATE_TRACE_POINTS +-#include +-#undef CREATE_TRACE_POINTS +- +-const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK", +- "TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE", +- "IRQ_UPDATE"}; +-const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP", +- "RQ_TO_RQ", "GROUP_TO_GROUP"}; +- +-#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0 +-#define SCHED_ACCOUNT_WAIT_TIME 1 +- +-static ktime_t ktime_last; +-static bool sched_ktime_suspended; +-DEFINE_MUTEX(cluster_lock); +-static atomic64_t walt_irq_work_lastq_ws; +-u64 walt_load_reported_window; +- +-static struct irq_work walt_cpufreq_irq_work; +-static struct irq_work walt_migration_irq_work; +- +-u64 sched_ktime_clock(void) +-{ +- if (unlikely(sched_ktime_suspended)) +- return ktime_to_ns(ktime_last); +- return ktime_get_ns(); +-} +- +-static void sched_resume(void) +-{ +- sched_ktime_suspended = false; +-} +- +-static int sched_suspend(void) +-{ +- ktime_last = ktime_get(); +- sched_ktime_suspended = true; +- return 0; +-} +- +-static struct syscore_ops sched_syscore_ops = { +- .resume = sched_resume, +- .suspend = sched_suspend +-}; +- +-static int __init sched_init_ops(void) +-{ +- register_syscore_ops(&sched_syscore_ops); +- return 0; +-} +-late_initcall(sched_init_ops); +- +-static void acquire_rq_locks_irqsave(const cpumask_t *cpus, +- unsigned long *flags) +-{ +- int cpu; +- int level = 0; +- +- local_irq_save(*flags); +- for_each_cpu(cpu, cpus) { +- if (level == 0) +- raw_spin_lock(&cpu_rq(cpu)->__lock); +- else +- raw_spin_lock_nested(&cpu_rq(cpu)->__lock, level); +- level++; +- } +-} +- +-static void release_rq_locks_irqrestore(const cpumask_t *cpus, +- unsigned long *flags) +-{ +- int cpu; +- +- for_each_cpu(cpu, cpus) +- raw_spin_unlock(&cpu_rq(cpu)->__lock); +- local_irq_restore(*flags); +-} +- +-#ifdef CONFIG_HZ_300 +-/* +- * Tick interval becomes to 3333333 due to +- * rounding error when HZ=300. +- */ +-#define MIN_SCHED_RAVG_WINDOW (3333333 * 6) +-#else +-/* Min window size (in ns) = 20ms */ +-#define MIN_SCHED_RAVG_WINDOW 20000000 +-#endif +- +-/* Max window size (in ns) = 1s */ +-#define MAX_SCHED_RAVG_WINDOW 1000000000 +- +-/* 1 -> use PELT based load stats, 0 -> use window-based load stats */ +-unsigned int __read_mostly walt_disabled; +- +-__read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC); +- +-/* +- * sched_window_stats_policy and sched_ravg_hist_size have a 'sysctl' copy +- * associated with them. This is required for atomic update of those variables +- * when being modifed via sysctl interface. +- * +- * IMPORTANT: Initialize both copies to same value!! +- */ +- +-__read_mostly unsigned int sched_ravg_hist_size = 5; +-__read_mostly unsigned int sysctl_sched_ravg_hist_size = 5; +- +-__read_mostly unsigned int sched_window_stats_policy = WINDOW_STATS_MAX_RECENT_AVG; +-__read_mostly unsigned int sysctl_sched_window_stats_policy = WINDOW_STATS_MAX_RECENT_AVG; +- +-static __read_mostly unsigned int sched_io_is_busy = 1; +- +-unsigned int sysctl_sched_use_walt_cpu_util = 1; +-unsigned int sysctl_sched_use_walt_task_util = 1; +-unsigned int sysctl_sched_walt_init_task_load_pct = 15; +-__read_mostly unsigned int sysctl_sched_walt_cpu_high_irqload = (10 * NSEC_PER_MSEC); +- +-/* Window size (in ns) */ +-__read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW; +- +-/* +- * A after-boot constant divisor for cpu_util_freq_walt() to apply the load +- * boost. +- */ +-__read_mostly unsigned int walt_cpu_util_freq_divisor; +- +-/* Initial task load. Newly created tasks are assigned this load. */ +-unsigned int __read_mostly sched_init_task_load_windows; +-unsigned int __read_mostly sched_init_task_load_windows_scaled; +-unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15; +- +-/* +- * Maximum possible frequency across all cpus. Task demand and cpu +- * capacity (cpu_power) metrics are scaled in reference to it. +- */ +-unsigned int max_possible_freq = 1; +- +-/* +- * Minimum possible max_freq across all cpus. This will be same as +- * max_possible_freq on homogeneous systems and could be different from +- * max_possible_freq on heterogenous systems. min_max_freq is used to derive +- */ +-unsigned int min_max_freq = 1; +- +-unsigned int max_capacity = 1024; /* max(rq->capacity) */ +-unsigned int min_capacity = 1024; /* min(rq->capacity) */ +-unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */ +-unsigned int +-min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */ +- +-/* Temporarily disable window-stats activity on all cpus */ +-unsigned int __read_mostly sched_disable_window_stats; +- +-/* +- * This governs what load needs to be used when reporting CPU busy time +- * to the cpufreq governor. +- */ +-__read_mostly unsigned int sysctl_sched_freq_reporting_policy; +- +-static int __init set_sched_ravg_window(char *str) +-{ +- unsigned int window_size; +- +- get_option(&str, &window_size); +- +- if (window_size < MIN_SCHED_RAVG_WINDOW || +- window_size > MAX_SCHED_RAVG_WINDOW) { +- WARN_ON(1); +- return -EINVAL; +- } +- +- sched_ravg_window = window_size; +- return 0; +-} +-early_param("sched_ravg_window", set_sched_ravg_window); +- +-__read_mostly unsigned int walt_scale_demand_divisor; +-#define scale_demand(d) ((d)/walt_scale_demand_divisor) +- +-void inc_rq_walt_stats(struct rq *rq, struct task_struct *p) +-{ +- walt_inc_cumulative_runnable_avg(rq, p); +-} +- +-void dec_rq_walt_stats(struct rq *rq, struct task_struct *p) +-{ +- walt_dec_cumulative_runnable_avg(rq, p); +-} +- +-void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p, +- u16 updated_demand_scaled) +-{ +- s64 task_load_delta = (s64)updated_demand_scaled - +- p->ravg.demand_scaled; +- +- fixup_cumulative_runnable_avg(&rq->walt_stats, task_load_delta); +- +- walt_fixup_cum_window_demand(rq, task_load_delta); +-} +- +-static u64 +-update_window_start(struct rq *rq, u64 wallclock, int event) +-{ +- s64 delta; +- int nr_windows; +- u64 old_window_start = rq->window_start; +- +- delta = wallclock - rq->window_start; +- BUG_ON(delta < 0); +- if (delta < sched_ravg_window) +- return old_window_start; +- +- nr_windows = div64_u64(delta, sched_ravg_window); +- rq->window_start += (u64)nr_windows * (u64)sched_ravg_window; +- +- rq->cum_window_demand_scaled = +- rq->walt_stats.cumulative_runnable_avg_scaled; +- +- return old_window_start; +-} +- +-void sched_account_irqtime(int cpu, struct task_struct *curr, +- u64 delta, u64 wallclock) +-{ +- struct rq *rq = cpu_rq(cpu); +- unsigned long flags, nr_windows; +- u64 cur_jiffies_ts; +- +- raw_spin_lock_irqsave(&rq->__lock, flags); +- +- /* +- * cputime (wallclock) uses sched_clock so use the same here for +- * consistency. +- */ +- delta += sched_clock() - wallclock; +- cur_jiffies_ts = get_jiffies_64(); +- +- if (is_idle_task(curr)) +- update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(), +- delta); +- +- nr_windows = cur_jiffies_ts - rq->irqload_ts; +- +- if (nr_windows) { +- if (nr_windows < 10) { +- /* Decay CPU's irqload by 3/4 for each window. */ +- rq->avg_irqload *= (3 * nr_windows); +- rq->avg_irqload = div64_u64(rq->avg_irqload, +- 4 * nr_windows); +- } else { +- rq->avg_irqload = 0; +- } +- rq->avg_irqload += rq->cur_irqload; +- rq->cur_irqload = 0; +- } +- +- rq->cur_irqload += delta; +- rq->irqload_ts = cur_jiffies_ts; +- raw_spin_unlock_irqrestore(&rq->__lock, flags); +-} +- +-static int +-account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event) +-{ +- /* +- * No need to bother updating task demand for exiting tasks +- * or the idle task. +- */ +- if (exiting_task(p) || is_idle_task(p)) +- return 0; +- +- /* +- * When a task is waking up it is completing a segment of non-busy +- * time. Likewise, if wait time is not treated as busy time, then +- * when a task begins to run or is migrated, it is not running and +- * is completing a segment of non-busy time. +- */ +- if (event == TASK_WAKE || (!SCHED_ACCOUNT_WAIT_TIME && +- (event == PICK_NEXT_TASK || event == TASK_MIGRATE))) +- return 0; +- +- /* +- * The idle exit time is not accounted for the first task _picked_ up to +- * run on the idle CPU. +- */ +- if (event == PICK_NEXT_TASK && rq->curr == rq->idle) +- return 0; +- +- /* +- * TASK_UPDATE can be called on sleeping task, when its moved between +- * related groups +- */ +- if (event == TASK_UPDATE) { +- if (rq->curr == p) +- return 1; +- +- return p->on_rq ? SCHED_ACCOUNT_WAIT_TIME : 0; +- } +- +- return 1; +-} +- +-/* +- * In this function we match the accumulated subtractions with the current +- * and previous windows we are operating with. Ignore any entries where +- * the window start in the load_subtraction struct does not match either +- * the curent or the previous window. This could happen whenever CPUs +- * become idle or busy with interrupts disabled for an extended period. +- */ +-static inline void account_load_subtractions(struct rq *rq) +-{ +- u64 ws = rq->window_start; +- u64 prev_ws = ws - sched_ravg_window; +- struct load_subtractions *ls = rq->load_subs; +- int i; +- +- for (i = 0; i < NUM_TRACKED_WINDOWS; i++) { +- if (ls[i].window_start == ws) { +- rq->curr_runnable_sum -= ls[i].subs; +- rq->nt_curr_runnable_sum -= ls[i].new_subs; +- } else if (ls[i].window_start == prev_ws) { +- rq->prev_runnable_sum -= ls[i].subs; +- rq->nt_prev_runnable_sum -= ls[i].new_subs; +- } +- +- ls[i].subs = 0; +- ls[i].new_subs = 0; +- } +- +- BUG_ON((s64)rq->prev_runnable_sum < 0); +- BUG_ON((s64)rq->curr_runnable_sum < 0); +- BUG_ON((s64)rq->nt_prev_runnable_sum < 0); +- BUG_ON((s64)rq->nt_curr_runnable_sum < 0); +-} +- +-static inline void create_subtraction_entry(struct rq *rq, u64 ws, int index) +-{ +- rq->load_subs[index].window_start = ws; +- rq->load_subs[index].subs = 0; +- rq->load_subs[index].new_subs = 0; +-} +- +-static bool get_subtraction_index(struct rq *rq, u64 ws) +-{ +- int i; +- u64 oldest = ULLONG_MAX; +- int oldest_index = 0; +- +- for (i = 0; i < NUM_TRACKED_WINDOWS; i++) { +- u64 entry_ws = rq->load_subs[i].window_start; +- +- if (ws == entry_ws) +- return i; +- +- if (entry_ws < oldest) { +- oldest = entry_ws; +- oldest_index = i; +- } +- } +- +- create_subtraction_entry(rq, ws, oldest_index); +- return oldest_index; +-} +- +-static void update_rq_load_subtractions(int index, struct rq *rq, +- u32 sub_load, bool new_task) +-{ +- rq->load_subs[index].subs += sub_load; +- if (new_task) +- rq->load_subs[index].new_subs += sub_load; +-} +- +-void update_cluster_load_subtractions(struct task_struct *p, +- int cpu, u64 ws, bool new_task) +-{ +- struct sched_cluster *cluster = cpu_cluster(cpu); +- struct cpumask cluster_cpus = cluster->cpus; +- u64 prev_ws = ws - sched_ravg_window; +- int i; +- +- cpumask_clear_cpu(cpu, &cluster_cpus); +- raw_spin_lock(&cluster->load_lock); +- +- for_each_cpu(i, &cluster_cpus) { +- struct rq *rq = cpu_rq(i); +- int index; +- +- if (p->ravg.curr_window_cpu[i]) { +- index = get_subtraction_index(rq, ws); +- update_rq_load_subtractions(index, rq, +- p->ravg.curr_window_cpu[i], new_task); +- p->ravg.curr_window_cpu[i] = 0; +- } +- +- if (p->ravg.prev_window_cpu[i]) { +- index = get_subtraction_index(rq, prev_ws); +- update_rq_load_subtractions(index, rq, +- p->ravg.prev_window_cpu[i], new_task); +- p->ravg.prev_window_cpu[i] = 0; +- } +- } +- +- raw_spin_unlock(&cluster->load_lock); +-} +- +-static inline void inter_cluster_migration_fixup +- (struct task_struct *p, int new_cpu, int task_cpu, bool new_task) +-{ +- struct rq *dest_rq = cpu_rq(new_cpu); +- struct rq *src_rq = cpu_rq(task_cpu); +- +- if (same_freq_domain(new_cpu, task_cpu)) +- return; +- +- p->ravg.curr_window_cpu[new_cpu] = p->ravg.curr_window; +- p->ravg.prev_window_cpu[new_cpu] = p->ravg.prev_window; +- +- dest_rq->curr_runnable_sum += p->ravg.curr_window; +- dest_rq->prev_runnable_sum += p->ravg.prev_window; +- +- src_rq->curr_runnable_sum -= p->ravg.curr_window_cpu[task_cpu]; +- src_rq->prev_runnable_sum -= p->ravg.prev_window_cpu[task_cpu]; +- +- if (new_task) { +- dest_rq->nt_curr_runnable_sum += p->ravg.curr_window; +- dest_rq->nt_prev_runnable_sum += p->ravg.prev_window; +- +- src_rq->nt_curr_runnable_sum -= +- p->ravg.curr_window_cpu[task_cpu]; +- src_rq->nt_prev_runnable_sum -= +- p->ravg.prev_window_cpu[task_cpu]; +- } +- +- p->ravg.curr_window_cpu[task_cpu] = 0; +- p->ravg.prev_window_cpu[task_cpu] = 0; +- +- update_cluster_load_subtractions(p, task_cpu, +- src_rq->window_start, new_task); +- +- BUG_ON((s64)src_rq->prev_runnable_sum < 0); +- BUG_ON((s64)src_rq->curr_runnable_sum < 0); +- BUG_ON((s64)src_rq->nt_prev_runnable_sum < 0); +- BUG_ON((s64)src_rq->nt_curr_runnable_sum < 0); +-} +- +-void fixup_busy_time(struct task_struct *p, int new_cpu) +-{ +- struct rq *src_rq = task_rq(p); +- struct rq *dest_rq = cpu_rq(new_cpu); +- u64 wallclock; +- bool new_task; +-#ifdef CONFIG_SCHED_RTG +- u64 *src_curr_runnable_sum, *dst_curr_runnable_sum; +- u64 *src_prev_runnable_sum, *dst_prev_runnable_sum; +- u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum; +- u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum; +- struct related_thread_group *grp; +-#endif +- +- if (!p->on_rq && p->__state != TASK_WAKING) +- return; +- +- if (exiting_task(p)) +- return; +- +- if (p->__state == TASK_WAKING) +- double_rq_lock(src_rq, dest_rq); +- +- if (sched_disable_window_stats) +- goto done; +- +- wallclock = sched_ktime_clock(); +- +- update_task_ravg(task_rq(p)->curr, task_rq(p), +- TASK_UPDATE, +- wallclock, 0); +- update_task_ravg(dest_rq->curr, dest_rq, +- TASK_UPDATE, wallclock, 0); +- +- update_task_ravg(p, task_rq(p), TASK_MIGRATE, +- wallclock, 0); +- +- /* +- * When a task is migrating during the wakeup, adjust +- * the task's contribution towards cumulative window +- * demand. +- */ +- if (p->__state == TASK_WAKING && p->last_sleep_ts >= +- src_rq->window_start) { +- walt_fixup_cum_window_demand(src_rq, +- -(s64)p->ravg.demand_scaled); +- walt_fixup_cum_window_demand(dest_rq, p->ravg.demand_scaled); +- } +- +- new_task = is_new_task(p); +-#ifdef CONFIG_SCHED_RTG +- /* Protected by rq_lock */ +- grp = task_related_thread_group(p); +- +- /* +- * For frequency aggregation, we continue to do migration fixups +- * even for intra cluster migrations. This is because, the aggregated +- * load has to reported on a single CPU regardless. +- */ +- if (grp) { +- struct group_cpu_time *cpu_time; +- +- cpu_time = &src_rq->grp_time; +- src_curr_runnable_sum = &cpu_time->curr_runnable_sum; +- src_prev_runnable_sum = &cpu_time->prev_runnable_sum; +- src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum; +- src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum; +- +- cpu_time = &dest_rq->grp_time; +- dst_curr_runnable_sum = &cpu_time->curr_runnable_sum; +- dst_prev_runnable_sum = &cpu_time->prev_runnable_sum; +- dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum; +- dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum; +- +- if (p->ravg.curr_window) { +- *src_curr_runnable_sum -= p->ravg.curr_window; +- *dst_curr_runnable_sum += p->ravg.curr_window; +- if (new_task) { +- *src_nt_curr_runnable_sum -= +- p->ravg.curr_window; +- *dst_nt_curr_runnable_sum += +- p->ravg.curr_window; +- } +- } +- +- if (p->ravg.prev_window) { +- *src_prev_runnable_sum -= p->ravg.prev_window; +- *dst_prev_runnable_sum += p->ravg.prev_window; +- if (new_task) { +- *src_nt_prev_runnable_sum -= +- p->ravg.prev_window; +- *dst_nt_prev_runnable_sum += +- p->ravg.prev_window; +- } +- } +- } else { +-#endif +- inter_cluster_migration_fixup(p, new_cpu, +- task_cpu(p), new_task); +-#ifdef CONFIG_SCHED_RTG +- } +-#endif +- +- if (!same_freq_domain(new_cpu, task_cpu(p))) +- irq_work_queue(&walt_migration_irq_work); +- +-done: +- if (p->__state == TASK_WAKING) +- double_rq_unlock(src_rq, dest_rq); +-} +- +-void set_window_start(struct rq *rq) +-{ +- static int sync_cpu_available; +- +- if (likely(rq->window_start)) +- return; +- +- if (!sync_cpu_available) { +- rq->window_start = 1; +- sync_cpu_available = 1; +- atomic64_set(&walt_irq_work_lastq_ws, rq->window_start); +- walt_load_reported_window = +- atomic64_read(&walt_irq_work_lastq_ws); +- +- } else { +- struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask)); +- +- raw_spin_unlock(&rq->__lock); +- double_rq_lock(rq, sync_rq); +- rq->window_start = sync_rq->window_start; +- rq->curr_runnable_sum = rq->prev_runnable_sum = 0; +- rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0; +- raw_spin_unlock(&sync_rq->__lock); +- } +- +- rq->curr->ravg.mark_start = rq->window_start; +-} +- +-/* +- * Called when new window is starting for a task, to record cpu usage over +- * recently concluded window(s). Normally 'samples' should be 1. It can be > 1 +- * when, say, a real-time task runs without preemption for several windows at a +- * stretch. +- */ +-static void update_history(struct rq *rq, struct task_struct *p, +- u32 runtime, int samples, int event) +-{ +- u32 *hist = &p->ravg.sum_history[0]; +- int ridx, widx; +- u32 max = 0, avg, demand; +- u64 sum = 0; +- u16 demand_scaled; +- +- /* Ignore windows where task had no activity */ +- if (!runtime || is_idle_task(p) || exiting_task(p) || !samples) +- goto done; +- +- /* Push new 'runtime' value onto stack */ +- widx = sched_ravg_hist_size - 1; +- ridx = widx - samples; +- for (; ridx >= 0; --widx, --ridx) { +- hist[widx] = hist[ridx]; +- sum += hist[widx]; +- if (hist[widx] > max) +- max = hist[widx]; +- } +- +- for (widx = 0; widx < samples && widx < sched_ravg_hist_size; widx++) { +- hist[widx] = runtime; +- sum += hist[widx]; +- if (hist[widx] > max) +- max = hist[widx]; +- } +- +- p->ravg.sum = 0; +- +- if (sched_window_stats_policy == WINDOW_STATS_RECENT) { +- demand = runtime; +- } else if (sched_window_stats_policy == WINDOW_STATS_MAX) { +- demand = max; +- } else { +- avg = div64_u64(sum, sched_ravg_hist_size); +- if (sched_window_stats_policy == WINDOW_STATS_AVG) +- demand = avg; +- else +- demand = max(avg, runtime); +- } +- demand_scaled = scale_demand(demand); +- +- /* +- * A throttled deadline sched class task gets dequeued without +- * changing p->on_rq. Since the dequeue decrements walt stats +- * avoid decrementing it here again. +- * +- * When window is rolled over, the cumulative window demand +- * is reset to the cumulative runnable average (contribution from +- * the tasks on the runqueue). If the current task is dequeued +- * already, it's demand is not included in the cumulative runnable +- * average. So add the task demand separately to cumulative window +- * demand. +- */ +- if (!task_has_dl_policy(p) || !p->dl.dl_throttled) { +- if (task_on_rq_queued(p) +- && p->sched_class->fixup_walt_sched_stats) +- p->sched_class->fixup_walt_sched_stats(rq, p, +- demand_scaled); +- else if (rq->curr == p) +- walt_fixup_cum_window_demand(rq, demand_scaled); +- } +- +- p->ravg.demand = demand; +- p->ravg.demand_scaled = demand_scaled; +- +-done: +- trace_sched_update_history(rq, p, runtime, samples, event); +-} +- +-#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y) +- +-static u64 add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta) +-{ +- delta = scale_exec_time(delta, rq); +- p->ravg.sum += delta; +- if (unlikely(p->ravg.sum > sched_ravg_window)) +- p->ravg.sum = sched_ravg_window; +- +- return delta; +-} +- +-/* +- * Account cpu demand of task and/or update task's cpu demand history +- * +- * ms = p->ravg.mark_start; +- * wc = wallclock +- * ws = rq->window_start +- * +- * Three possibilities: +- * +- * a) Task event is contained within one window. +- * window_start < mark_start < wallclock +- * +- * ws ms wc +- * | | | +- * V V V +- * |---------------| +- * +- * In this case, p->ravg.sum is updated *iff* event is appropriate +- * (ex: event == PUT_PREV_TASK) +- * +- * b) Task event spans two windows. +- * mark_start < window_start < wallclock +- * +- * ms ws wc +- * | | | +- * V V V +- * -----|------------------- +- * +- * In this case, p->ravg.sum is updated with (ws - ms) *iff* event +- * is appropriate, then a new window sample is recorded followed +- * by p->ravg.sum being set to (wc - ws) *iff* event is appropriate. +- * +- * c) Task event spans more than two windows. +- * +- * ms ws_tmp ws wc +- * | | | | +- * V V V V +- * ---|-------|-------|-------|-------|------ +- * | | +- * |<------ nr_full_windows ------>| +- * +- * In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff* +- * event is appropriate, window sample of p->ravg.sum is recorded, +- * 'nr_full_window' samples of window_size is also recorded *iff* +- * event is appropriate and finally p->ravg.sum is set to (wc - ws) +- * *iff* event is appropriate. +- * +- * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time() +- * depends on it! +- */ +-static u64 update_task_demand(struct task_struct *p, struct rq *rq, +- int event, u64 wallclock) +-{ +- u64 mark_start = p->ravg.mark_start; +- u64 delta, window_start = rq->window_start; +- int new_window, nr_full_windows; +- u32 window_size = sched_ravg_window; +- u64 runtime; +- +-#ifdef CONFIG_SCHED_RTG +- update_group_demand(p, rq, event, wallclock); +-#endif +- +- new_window = mark_start < window_start; +- if (!account_busy_for_task_demand(rq, p, event)) { +- if (new_window) +- /* +- * If the time accounted isn't being accounted as +- * busy time, and a new window started, only the +- * previous window need be closed out with the +- * pre-existing demand. Multiple windows may have +- * elapsed, but since empty windows are dropped, +- * it is not necessary to account those. +- */ +- update_history(rq, p, p->ravg.sum, 1, event); +- return 0; +- } +- +- if (!new_window) { +- /* +- * The simple case - busy time contained within the existing +- * window. +- */ +- return add_to_task_demand(rq, p, wallclock - mark_start); +- } +- +- /* +- * Busy time spans at least two windows. Temporarily rewind +- * window_start to first window boundary after mark_start. +- */ +- delta = window_start - mark_start; +- nr_full_windows = div64_u64(delta, window_size); +- window_start -= (u64)nr_full_windows * (u64)window_size; +- +- /* Process (window_start - mark_start) first */ +- runtime = add_to_task_demand(rq, p, window_start - mark_start); +- +- /* Push new sample(s) into task's demand history */ +- update_history(rq, p, p->ravg.sum, 1, event); +- if (nr_full_windows) { +- u64 scaled_window = scale_exec_time(window_size, rq); +- +- update_history(rq, p, scaled_window, nr_full_windows, event); +- runtime += nr_full_windows * scaled_window; +- } +- +- /* +- * Roll window_start back to current to process any remainder +- * in current window. +- */ +- window_start += (u64)nr_full_windows * (u64)window_size; +- +- /* Process (wallclock - window_start) next */ +- mark_start = window_start; +- runtime += add_to_task_demand(rq, p, wallclock - mark_start); +- +- return runtime; +-} +- +-static u32 empty_windows[NR_CPUS]; +- +-static void rollover_task_window(struct task_struct *p, bool full_window) +-{ +- u32 *curr_cpu_windows = empty_windows; +- u32 curr_window; +- int i; +- +- /* Rollover the sum */ +- curr_window = 0; +- +- if (!full_window) { +- curr_window = p->ravg.curr_window; +- curr_cpu_windows = p->ravg.curr_window_cpu; +- } +- +- p->ravg.prev_window = curr_window; +- p->ravg.curr_window = 0; +- +- /* Roll over individual CPU contributions */ +- for (i = 0; i < nr_cpu_ids; i++) { +- p->ravg.prev_window_cpu[i] = curr_cpu_windows[i]; +- p->ravg.curr_window_cpu[i] = 0; +- } +-} +- +-static void rollover_cpu_window(struct rq *rq, bool full_window) +-{ +- u64 curr_sum = rq->curr_runnable_sum; +- u64 nt_curr_sum = rq->nt_curr_runnable_sum; +- +- if (unlikely(full_window)) { +- curr_sum = 0; +- nt_curr_sum = 0; +- } +- +- rq->prev_runnable_sum = curr_sum; +- rq->nt_prev_runnable_sum = nt_curr_sum; +- +- rq->curr_runnable_sum = 0; +- rq->nt_curr_runnable_sum = 0; +-} +- +-static inline int cpu_is_waiting_on_io(struct rq *rq) +-{ +- if (!sched_io_is_busy) +- return 0; +- +- return atomic_read(&rq->nr_iowait); +-} +- +-static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p, +- u64 irqtime, int event) +-{ +- if (is_idle_task(p)) { +- /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */ +- if (event == PICK_NEXT_TASK) +- return 0; +- +- /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */ +- return irqtime || cpu_is_waiting_on_io(rq); +- } +- +- if (event == TASK_WAKE) +- return 0; +- +- if (event == PUT_PREV_TASK || event == IRQ_UPDATE) +- return 1; +- +- /* +- * TASK_UPDATE can be called on sleeping task, when its moved between +- * related groups +- */ +- if (event == TASK_UPDATE) { +- if (rq->curr == p) +- return 1; +- +- return p->on_rq ? SCHED_FREQ_ACCOUNT_WAIT_TIME : 0; +- } +- +- /* TASK_MIGRATE, PICK_NEXT_TASK left */ +- return SCHED_FREQ_ACCOUNT_WAIT_TIME; +-} +- +-/* +- * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum) +- */ +-static void update_cpu_busy_time(struct task_struct *p, struct rq *rq, +- int event, u64 wallclock, u64 irqtime) +-{ +- int new_window, full_window = 0; +- int p_is_curr_task = (p == rq->curr); +- u64 mark_start = p->ravg.mark_start; +- u64 window_start = rq->window_start; +- u32 window_size = sched_ravg_window; +- u64 delta; +- u64 *curr_runnable_sum = &rq->curr_runnable_sum; +- u64 *prev_runnable_sum = &rq->prev_runnable_sum; +- u64 *nt_curr_runnable_sum = &rq->nt_curr_runnable_sum; +- u64 *nt_prev_runnable_sum = &rq->nt_prev_runnable_sum; +- bool new_task; +- int cpu = rq->cpu; +-#ifdef CONFIG_SCHED_RTG +- struct group_cpu_time *cpu_time; +- struct related_thread_group *grp; +-#endif +- +- new_window = mark_start < window_start; +- if (new_window) { +- full_window = (window_start - mark_start) >= window_size; +- if (p->ravg.active_windows < USHRT_MAX) +- p->ravg.active_windows++; +- } +- +- new_task = is_new_task(p); +- +- /* +- * Handle per-task window rollover. We don't care about the idle +- * task or exiting tasks. +- */ +- if (!is_idle_task(p) && !exiting_task(p)) { +- if (new_window) +- rollover_task_window(p, full_window); +- } +- +- if (p_is_curr_task && new_window) +- rollover_cpu_window(rq, full_window); +- +- if (!account_busy_for_cpu_time(rq, p, irqtime, event)) +- goto done; +- +-#ifdef CONFIG_SCHED_RTG +- grp = task_related_thread_group(p); +- if (grp) { +- cpu_time = &rq->grp_time; +- +- curr_runnable_sum = &cpu_time->curr_runnable_sum; +- prev_runnable_sum = &cpu_time->prev_runnable_sum; +- +- nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum; +- nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum; +- } +-#endif +- +- if (!new_window) { +- /* +- * account_busy_for_cpu_time() = 1 so busy time needs +- * to be accounted to the current window. No rollover +- * since we didn't start a new window. An example of this is +- * when a task starts execution and then sleeps within the +- * same window. +- */ +- +- if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) +- delta = wallclock - mark_start; +- else +- delta = irqtime; +- delta = scale_exec_time(delta, rq); +- *curr_runnable_sum += delta; +- if (new_task) +- *nt_curr_runnable_sum += delta; +- +- if (!is_idle_task(p) && !exiting_task(p)) { +- p->ravg.curr_window += delta; +- p->ravg.curr_window_cpu[cpu] += delta; +- } +- +- goto done; +- } +- +- if (!p_is_curr_task) { +- /* +- * account_busy_for_cpu_time() = 1 so busy time needs +- * to be accounted to the current window. A new window +- * has also started, but p is not the current task, so the +- * window is not rolled over - just split up and account +- * as necessary into curr and prev. The window is only +- * rolled over when a new window is processed for the current +- * task. +- * +- * Irqtime can't be accounted by a task that isn't the +- * currently running task. +- */ +- +- if (!full_window) { +- /* +- * A full window hasn't elapsed, account partial +- * contribution to previous completed window. +- */ +- delta = scale_exec_time(window_start - mark_start, rq); +- if (!exiting_task(p)) { +- p->ravg.prev_window += delta; +- p->ravg.prev_window_cpu[cpu] += delta; +- } +- } else { +- /* +- * Since at least one full window has elapsed, +- * the contribution to the previous window is the +- * full window (window_size). +- */ +- delta = scale_exec_time(window_size, rq); +- if (!exiting_task(p)) { +- p->ravg.prev_window = delta; +- p->ravg.prev_window_cpu[cpu] = delta; +- } +- } +- +- *prev_runnable_sum += delta; +- if (new_task) +- *nt_prev_runnable_sum += delta; +- +- /* Account piece of busy time in the current window. */ +- delta = scale_exec_time(wallclock - window_start, rq); +- *curr_runnable_sum += delta; +- if (new_task) +- *nt_curr_runnable_sum += delta; +- +- if (!exiting_task(p)) { +- p->ravg.curr_window = delta; +- p->ravg.curr_window_cpu[cpu] = delta; +- } +- +- goto done; +- } +- +- if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) { +- /* +- * account_busy_for_cpu_time() = 1 so busy time needs +- * to be accounted to the current window. A new window +- * has started and p is the current task so rollover is +- * needed. If any of these three above conditions are true +- * then this busy time can't be accounted as irqtime. +- * +- * Busy time for the idle task or exiting tasks need not +- * be accounted. +- * +- * An example of this would be a task that starts execution +- * and then sleeps once a new window has begun. +- */ +- +- if (!full_window) { +- /* +- * A full window hasn't elapsed, account partial +- * contribution to previous completed window. +- */ +- delta = scale_exec_time(window_start - mark_start, rq); +- if (!is_idle_task(p) && !exiting_task(p)) { +- p->ravg.prev_window += delta; +- p->ravg.prev_window_cpu[cpu] += delta; +- } +- } else { +- /* +- * Since at least one full window has elapsed, +- * the contribution to the previous window is the +- * full window (window_size). +- */ +- delta = scale_exec_time(window_size, rq); +- if (!is_idle_task(p) && !exiting_task(p)) { +- p->ravg.prev_window = delta; +- p->ravg.prev_window_cpu[cpu] = delta; +- } +- } +- +- /* +- * Rollover is done here by overwriting the values in +- * prev_runnable_sum and curr_runnable_sum. +- */ +- *prev_runnable_sum += delta; +- if (new_task) +- *nt_prev_runnable_sum += delta; +- +- /* Account piece of busy time in the current window. */ +- delta = scale_exec_time(wallclock - window_start, rq); +- *curr_runnable_sum += delta; +- if (new_task) +- *nt_curr_runnable_sum += delta; +- +- if (!is_idle_task(p) && !exiting_task(p)) { +- p->ravg.curr_window = delta; +- p->ravg.curr_window_cpu[cpu] = delta; +- } +- +- goto done; +- } +- +- if (irqtime) { +- /* +- * account_busy_for_cpu_time() = 1 so busy time needs +- * to be accounted to the current window. A new window +- * has started and p is the current task so rollover is +- * needed. The current task must be the idle task because +- * irqtime is not accounted for any other task. +- * +- * Irqtime will be accounted each time we process IRQ activity +- * after a period of idleness, so we know the IRQ busy time +- * started at wallclock - irqtime. +- */ +- +- BUG_ON(!is_idle_task(p)); +- mark_start = wallclock - irqtime; +- +- /* +- * Roll window over. If IRQ busy time was just in the current +- * window then that is all that need be accounted. +- */ +- if (mark_start > window_start) { +- *curr_runnable_sum = scale_exec_time(irqtime, rq); +- return; +- } +- +- /* +- * The IRQ busy time spanned multiple windows. Process the +- * window then that is all that need be accounted. +- */ +- delta = window_start - mark_start; +- if (delta > window_size) +- delta = window_size; +- delta = scale_exec_time(delta, rq); +- *prev_runnable_sum += delta; +- +- /* Process the remaining IRQ busy time in the current window. */ +- delta = wallclock - window_start; +- rq->curr_runnable_sum = scale_exec_time(delta, rq); +- +- return; +- } +- +-done: +- return; +-} +- +-static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq) +-{ +- u64 result; +- +- if (old_window_start == rq->window_start) +- return; +- +- result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start, +- rq->window_start); +- if (result == old_window_start) +- irq_work_queue(&walt_cpufreq_irq_work); +-} +- +-/* Reflect task activity on its demand and cpu's busy time statistics */ +-void update_task_ravg(struct task_struct *p, struct rq *rq, int event, +- u64 wallclock, u64 irqtime) +-{ +- u64 old_window_start; +- +- if (!rq->window_start || sched_disable_window_stats || +- p->ravg.mark_start == wallclock) +- return; +- +- lockdep_assert_held(&rq->__lock); +- +- old_window_start = update_window_start(rq, wallclock, event); +- +-#ifdef CONFIG_SCHED_RTG +- update_group_nr_running(p, event, wallclock); +-#endif +- if (!p->ravg.mark_start) +- goto done; +- +- update_task_demand(p, rq, event, wallclock); +- update_cpu_busy_time(p, rq, event, wallclock, irqtime); +- +- if (exiting_task(p)) +- goto done; +- +- trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime); +-done: +- p->ravg.mark_start = wallclock; +- +- run_walt_irq_work(old_window_start, rq); +-} +- +-int sysctl_sched_walt_init_task_load_pct_sysctl_handler(struct ctl_table *table, +- int write, void __user *buffer, size_t *length, loff_t *ppos) +-{ +- int rc; +- +- rc = proc_dointvec(table, write, buffer, length, ppos); +- if (rc) +- return rc; +- +- sysctl_sched_init_task_load_pct = sysctl_sched_walt_init_task_load_pct; +- +- return 0; +-} +- +-u32 sched_get_init_task_load(struct task_struct *p) +-{ +- return p->init_load_pct; +-} +- +-int sched_set_init_task_load(struct task_struct *p, int init_load_pct) +-{ +- if (init_load_pct < 0 || init_load_pct > 100) +- return -EINVAL; +- +- p->init_load_pct = init_load_pct; +- +- return 0; +-} +- +-void init_new_task_load(struct task_struct *p) +-{ +- int i; +- u32 init_load_windows = sched_init_task_load_windows; +- u32 init_load_windows_scaled = sched_init_task_load_windows_scaled; +- u32 init_load_pct = current->init_load_pct; +- +-#ifdef CONFIG_SCHED_RTG +- init_task_rtg(p); +-#endif +- +- p->last_sleep_ts = 0; +- p->init_load_pct = 0; +- memset(&p->ravg, 0, sizeof(struct ravg)); +- +- p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), +- GFP_KERNEL | __GFP_NOFAIL); +- p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), +- GFP_KERNEL | __GFP_NOFAIL); +- +- if (init_load_pct) { +- init_load_windows = div64_u64((u64)init_load_pct * +- (u64)sched_ravg_window, 100); +- init_load_windows_scaled = scale_demand(init_load_windows); +- } +- +- p->ravg.demand = init_load_windows; +- p->ravg.demand_scaled = init_load_windows_scaled; +- for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i) +- p->ravg.sum_history[i] = init_load_windows; +-} +- +-void free_task_load_ptrs(struct task_struct *p) +-{ +- kfree(p->ravg.curr_window_cpu); +- kfree(p->ravg.prev_window_cpu); +- +- /* +- * update_task_ravg() can be called for exiting tasks. While the +- * function itself ensures correct behavior, the corresponding +- * trace event requires that these pointers be NULL. +- */ +- p->ravg.curr_window_cpu = NULL; +- p->ravg.prev_window_cpu = NULL; +-} +- +-void reset_task_stats(struct task_struct *p) +-{ +- u32 sum = 0; +- u32 *curr_window_ptr = NULL; +- u32 *prev_window_ptr = NULL; +- +- if (exiting_task(p)) { +- sum = EXITING_TASK_MARKER; +- } else { +- curr_window_ptr = p->ravg.curr_window_cpu; +- prev_window_ptr = p->ravg.prev_window_cpu; +- memset(curr_window_ptr, 0, sizeof(u32) * nr_cpu_ids); +- memset(prev_window_ptr, 0, sizeof(u32) * nr_cpu_ids); +- } +- +- memset(&p->ravg, 0, sizeof(struct ravg)); +- +- p->ravg.curr_window_cpu = curr_window_ptr; +- p->ravg.prev_window_cpu = prev_window_ptr; +- +- /* Retain EXITING_TASK marker */ +- p->ravg.sum_history[0] = sum; +-} +- +-void mark_task_starting(struct task_struct *p) +-{ +- u64 wallclock; +- struct rq *rq = task_rq(p); +- +- if (!rq->window_start || sched_disable_window_stats) { +- reset_task_stats(p); +- return; +- } +- +- wallclock = sched_ktime_clock(); +- p->ravg.mark_start = wallclock; +-} +- +-unsigned int max_possible_efficiency = 1; +-unsigned int min_possible_efficiency = UINT_MAX; +-unsigned int max_power_cost = 1; +- +-static cpumask_t all_cluster_cpus = CPU_MASK_NONE; +-DECLARE_BITMAP(all_cluster_ids, NR_CPUS); +-struct sched_cluster *sched_cluster[NR_CPUS]; +-int num_clusters; +- +-struct list_head cluster_head; +- +-static void +-insert_cluster(struct sched_cluster *cluster, struct list_head *head) +-{ +- struct sched_cluster *tmp; +- struct list_head *iter = head; +- +- list_for_each_entry(tmp, head, list) { +- if (cluster->max_power_cost < tmp->max_power_cost) +- break; +- iter = &tmp->list; +- } +- +- list_add(&cluster->list, iter); +-} +- +-static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus) +-{ +- struct sched_cluster *cluster = NULL; +- +- cluster = kzalloc(sizeof(struct sched_cluster), GFP_ATOMIC); +- if (!cluster) { +- pr_warn("Cluster allocation failed. Possible bad scheduling\n"); +- return NULL; +- } +- +- INIT_LIST_HEAD(&cluster->list); +- cluster->max_power_cost = 1; +- cluster->min_power_cost = 1; +- cluster->capacity = 1024; +- cluster->max_possible_capacity = 1024; +- cluster->efficiency = 1; +- cluster->load_scale_factor = 1024; +- cluster->cur_freq = 1; +- cluster->max_freq = 1; +- cluster->min_freq = 1; +- cluster->max_possible_freq = 1; +- cluster->freq_init_done = false; +- +- raw_spin_lock_init(&cluster->load_lock); +- cluster->cpus = *cpus; +- cluster->efficiency = topology_get_cpu_scale(cpumask_first(cpus)); +- +- if (cluster->efficiency > max_possible_efficiency) +- max_possible_efficiency = cluster->efficiency; +- if (cluster->efficiency < min_possible_efficiency) +- min_possible_efficiency = cluster->efficiency; +- +- return cluster; +-} +- +-static void add_cluster(const struct cpumask *cpus, struct list_head *head) +-{ +- struct sched_cluster *cluster = alloc_new_cluster(cpus); +- int i; +- +- if (!cluster) +- return; +- +- for_each_cpu(i, cpus) +- cpu_rq(i)->cluster = cluster; +- +- insert_cluster(cluster, head); +- set_bit(num_clusters, all_cluster_ids); +- num_clusters++; +-} +- +-static int compute_max_possible_capacity(struct sched_cluster *cluster) +-{ +- int capacity = 1024; +- +- capacity *= capacity_scale_cpu_efficiency(cluster); +- capacity >>= 10; +- +- capacity *= (1024 * cluster->max_possible_freq) / min_max_freq; +- capacity >>= 10; +- +- return capacity; +-} +- +-void walt_update_min_max_capacity(void) +-{ +- unsigned long flags; +- +- acquire_rq_locks_irqsave(cpu_possible_mask, &flags); +- __update_min_max_capacity(); +- release_rq_locks_irqrestore(cpu_possible_mask, &flags); +-} +- +-static int +-compare_clusters(void *priv, const struct list_head *a, const struct list_head *b) +-{ +- struct sched_cluster *cluster1, *cluster2; +- int ret; +- +- cluster1 = container_of(a, struct sched_cluster, list); +- cluster2 = container_of(b, struct sched_cluster, list); +- +- /* +- * Don't assume higher capacity means higher power. If the +- * power cost is same, sort the higher capacity cluster before +- * the lower capacity cluster to start placing the tasks +- * on the higher capacity cluster. +- */ +- ret = cluster1->max_power_cost > cluster2->max_power_cost || +- (cluster1->max_power_cost == cluster2->max_power_cost && +- cluster1->max_possible_capacity < +- cluster2->max_possible_capacity); +- +- return ret; +-} +- +-void sort_clusters(void) +-{ +- struct sched_cluster *cluster; +- struct list_head new_head; +- unsigned int tmp_max = 1; +- +- INIT_LIST_HEAD(&new_head); +- +- for_each_sched_cluster(cluster) { +- cluster->max_power_cost = power_cost(cluster_first_cpu(cluster), +- max_task_load()); +- cluster->min_power_cost = power_cost(cluster_first_cpu(cluster), +- 0); +- +- if (cluster->max_power_cost > tmp_max) +- tmp_max = cluster->max_power_cost; +- } +- max_power_cost = tmp_max; +- +- move_list(&new_head, &cluster_head, true); +- +- list_sort(NULL, &new_head, compare_clusters); +- assign_cluster_ids(&new_head); +- +- /* +- * Ensure cluster ids are visible to all CPUs before making +- * cluster_head visible. +- */ +- move_list(&cluster_head, &new_head, false); +-} +- +-static void update_all_clusters_stats(void) +-{ +- struct sched_cluster *cluster; +- u64 highest_mpc = 0, lowest_mpc = U64_MAX; +- unsigned long flags; +- +- acquire_rq_locks_irqsave(cpu_possible_mask, &flags); +- +- for_each_sched_cluster(cluster) { +- u64 mpc; +- +- cluster->capacity = compute_capacity(cluster); +- mpc = cluster->max_possible_capacity = +- compute_max_possible_capacity(cluster); +- cluster->load_scale_factor = compute_load_scale_factor(cluster); +- +- cluster->exec_scale_factor = +- DIV_ROUND_UP(cluster->efficiency * 1024, +- max_possible_efficiency); +- +- if (mpc > highest_mpc) +- highest_mpc = mpc; +- +- if (mpc < lowest_mpc) +- lowest_mpc = mpc; +- } +- +- max_possible_capacity = highest_mpc; +- min_max_possible_capacity = lowest_mpc; +- +- __update_min_max_capacity(); +- release_rq_locks_irqrestore(cpu_possible_mask, &flags); +-} +- +-void update_cluster_topology(void) +-{ +- struct cpumask cpus = *cpu_possible_mask; +- const struct cpumask *cluster_cpus; +- struct list_head new_head; +- int i; +- +- INIT_LIST_HEAD(&new_head); +- +- for_each_cpu(i, &cpus) { +- cluster_cpus = cpu_coregroup_mask(i); +- cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus); +- cpumask_andnot(&cpus, &cpus, cluster_cpus); +- add_cluster(cluster_cpus, &new_head); +- } +- +- assign_cluster_ids(&new_head); +- +- /* +- * Ensure cluster ids are visible to all CPUs before making +- * cluster_head visible. +- */ +- move_list(&cluster_head, &new_head, false); +- update_all_clusters_stats(); +-} +- +-struct sched_cluster init_cluster = { +- .list = LIST_HEAD_INIT(init_cluster.list), +- .id = 0, +- .max_power_cost = 1, +- .min_power_cost = 1, +- .capacity = 1024, +- .max_possible_capacity = 1024, +- .efficiency = 1, +- .load_scale_factor = 1024, +- .cur_freq = 1, +- .max_freq = 1, +- .min_freq = 1, +- .max_possible_freq = 1, +- .exec_scale_factor = 1024, +-}; +- +-void init_clusters(void) +-{ +- bitmap_clear(all_cluster_ids, 0, NR_CPUS); +- init_cluster.cpus = *cpu_possible_mask; +- raw_spin_lock_init(&init_cluster.load_lock); +- INIT_LIST_HEAD(&cluster_head); +-} +- +-static unsigned long cpu_max_table_freq[NR_CPUS]; +- +-void update_cpu_cluster_capacity(const cpumask_t *cpus) +-{ +- int i; +- struct sched_cluster *cluster; +- struct cpumask cpumask; +- unsigned long flags; +- +- cpumask_copy(&cpumask, cpus); +- acquire_rq_locks_irqsave(cpu_possible_mask, &flags); +- +- for_each_cpu(i, &cpumask) { +- cluster = cpu_rq(i)->cluster; +- cpumask_andnot(&cpumask, &cpumask, &cluster->cpus); +- +- cluster->capacity = compute_capacity(cluster); +- cluster->load_scale_factor = compute_load_scale_factor(cluster); +- } +- +- __update_min_max_capacity(); +- +- release_rq_locks_irqrestore(cpu_possible_mask, &flags); +-} +- +-static int cpufreq_notifier_policy(struct notifier_block *nb, +- unsigned long val, void *data) +-{ +- struct cpufreq_policy *policy = (struct cpufreq_policy *)data; +- struct sched_cluster *cluster = NULL; +- struct cpumask policy_cluster = *policy->related_cpus; +- unsigned int orig_max_freq = 0; +- int i, j, update_capacity = 0; +- +- if (val != CPUFREQ_CREATE_POLICY) +- return 0; +- +- walt_update_min_max_capacity(); +- +- max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq); +- if (min_max_freq == 1) +- min_max_freq = UINT_MAX; +- min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq); +- BUG_ON(!min_max_freq); +- BUG_ON(!policy->max); +- +- for_each_cpu(i, &policy_cluster) +- cpu_max_table_freq[i] = policy->cpuinfo.max_freq; +- +- for_each_cpu(i, &policy_cluster) { +- cluster = cpu_rq(i)->cluster; +- cpumask_andnot(&policy_cluster, &policy_cluster, +- &cluster->cpus); +- +- orig_max_freq = cluster->max_freq; +- cluster->min_freq = policy->min; +- cluster->max_freq = policy->max; +- cluster->cur_freq = policy->cur; +- +- if (!cluster->freq_init_done) { +- mutex_lock(&cluster_lock); +- for_each_cpu(j, &cluster->cpus) +- cpumask_copy(&cpu_rq(j)->freq_domain_cpumask, +- policy->related_cpus); +- cluster->max_possible_freq = policy->cpuinfo.max_freq; +- cluster->max_possible_capacity = +- compute_max_possible_capacity(cluster); +- cluster->freq_init_done = true; +- +- sort_clusters(); +- update_all_clusters_stats(); +- mutex_unlock(&cluster_lock); +- continue; +- } +- +- update_capacity += (orig_max_freq != cluster->max_freq); +- } +- +- if (update_capacity) +- update_cpu_cluster_capacity(policy->related_cpus); +- +- return 0; +-} +- +-static struct notifier_block notifier_policy_block = { +- .notifier_call = cpufreq_notifier_policy +-}; +- +-static int cpufreq_notifier_trans(struct notifier_block *nb, +- unsigned long val, void *data) +-{ +- struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data; +- unsigned int cpu = freq->policy->cpu, new_freq = freq->new; +- unsigned long flags; +- struct sched_cluster *cluster; +- struct cpumask policy_cpus = cpu_rq(cpu)->freq_domain_cpumask; +- int i, j; +- +- if (val != CPUFREQ_POSTCHANGE) +- return NOTIFY_DONE; +- +- if (cpu_cur_freq(cpu) == new_freq) +- return NOTIFY_OK; +- +- for_each_cpu(i, &policy_cpus) { +- cluster = cpu_rq(i)->cluster; +- +- for_each_cpu(j, &cluster->cpus) { +- struct rq *rq = cpu_rq(j); +- +- raw_spin_lock_irqsave(&rq->__lock, flags); +- update_task_ravg(rq->curr, rq, TASK_UPDATE, +- sched_ktime_clock(), 0); +- raw_spin_unlock_irqrestore(&rq->__lock, flags); +- } +- +- cluster->cur_freq = new_freq; +- cpumask_andnot(&policy_cpus, &policy_cpus, &cluster->cpus); +- } +- +- return NOTIFY_OK; +-} +- +-static struct notifier_block notifier_trans_block = { +- .notifier_call = cpufreq_notifier_trans +-}; +- +-static int register_walt_callback(void) +-{ +- int ret; +- +- ret = cpufreq_register_notifier(¬ifier_policy_block, +- CPUFREQ_POLICY_NOTIFIER); +- if (!ret) +- ret = cpufreq_register_notifier(¬ifier_trans_block, +- CPUFREQ_TRANSITION_NOTIFIER); +- +- return ret; +-} +-/* +- * cpufreq callbacks can be registered at core_initcall or later time. +- * Any registration done prior to that is "forgotten" by cpufreq. See +- * initialization of variable init_cpufreq_transition_notifier_list_called +- * for further information. +- */ +-core_initcall(register_walt_callback); +- +-/* +- * Runs in hard-irq context. This should ideally run just after the latest +- * window roll-over. +- */ +-void walt_irq_work(struct irq_work *irq_work) +-{ +- struct sched_cluster *cluster; +- struct rq *rq; +- int cpu; +- u64 wc; +- bool is_migration = false; +- int level = 0; +- +- /* Am I the window rollover work or the migration work? */ +- if (irq_work == &walt_migration_irq_work) +- is_migration = true; +- +- for_each_cpu(cpu, cpu_possible_mask) { +- if (level == 0) +- raw_spin_lock(&cpu_rq(cpu)->__lock); +- else +- raw_spin_lock_nested(&cpu_rq(cpu)->__lock, level); +- level++; +- } +- +- wc = sched_ktime_clock(); +- walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws); +- for_each_sched_cluster(cluster) { +- raw_spin_lock(&cluster->load_lock); +- +- for_each_cpu(cpu, &cluster->cpus) { +- rq = cpu_rq(cpu); +- if (rq->curr) { +- update_task_ravg(rq->curr, rq, +- TASK_UPDATE, wc, 0); +- account_load_subtractions(rq); +- } +- } +- +- raw_spin_unlock(&cluster->load_lock); +- } +- +- for_each_sched_cluster(cluster) { +- cpumask_t cluster_online_cpus; +- unsigned int num_cpus, i = 1; +- +- cpumask_and(&cluster_online_cpus, &cluster->cpus, +- cpu_online_mask); +- num_cpus = cpumask_weight(&cluster_online_cpus); +- for_each_cpu(cpu, &cluster_online_cpus) { +- int flag = SCHED_CPUFREQ_WALT; +- +- rq = cpu_rq(cpu); +- +- if (i == num_cpus) +- cpufreq_update_util(cpu_rq(cpu), flag); +- else +- cpufreq_update_util(cpu_rq(cpu), flag | +- SCHED_CPUFREQ_CONTINUE); +- i++; +- } +- } +- +- for_each_cpu(cpu, cpu_possible_mask) +- raw_spin_unlock(&cpu_rq(cpu)->__lock); +- +- if (!is_migration) +- core_ctl_check(this_rq()->window_start); +-} +- +-static void walt_init_once(void) +-{ +- init_irq_work(&walt_migration_irq_work, walt_irq_work); +- init_irq_work(&walt_cpufreq_irq_work, walt_irq_work); +- +- walt_cpu_util_freq_divisor = +- (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100; +- walt_scale_demand_divisor = sched_ravg_window >> SCHED_CAPACITY_SHIFT; +- +- sched_init_task_load_windows = +- div64_u64((u64)sysctl_sched_init_task_load_pct * +- (u64)sched_ravg_window, 100); +- sched_init_task_load_windows_scaled = +- scale_demand(sched_init_task_load_windows); +-} +- +-void walt_sched_init_rq(struct rq *rq) +-{ +- static bool init; +- int j; +- +- if (!init) { +- walt_init_once(); +- init = true; +- } +- +- cpumask_set_cpu(cpu_of(rq), &rq->freq_domain_cpumask); +- +- rq->walt_stats.cumulative_runnable_avg_scaled = 0; +- rq->window_start = 0; +- rq->walt_flags = 0; +- rq->cur_irqload = 0; +- rq->avg_irqload = 0; +- rq->irqload_ts = 0; +- +- /* +- * All cpus part of same cluster by default. This avoids the +- * need to check for rq->cluster being non-NULL in hot-paths +- * like select_best_cpu() +- */ +- rq->cluster = &init_cluster; +- rq->curr_runnable_sum = rq->prev_runnable_sum = 0; +- rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0; +- rq->cum_window_demand_scaled = 0; +- +- for (j = 0; j < NUM_TRACKED_WINDOWS; j++) +- memset(&rq->load_subs[j], 0, sizeof(struct load_subtractions)); +-} +- +-#define min_cap_cluster() \ +- list_first_entry(&cluster_head, struct sched_cluster, list) +-#define max_cap_cluster() \ +- list_last_entry(&cluster_head, struct sched_cluster, list) +-static int sched_cluster_debug_show(struct seq_file *file, void *param) +-{ +- struct sched_cluster *cluster = NULL; +- +- seq_printf(file, "min_id:%d, max_id:%d\n", +- min_cap_cluster()->id, +- max_cap_cluster()->id); +- +- for_each_sched_cluster(cluster) { +- seq_printf(file, "id:%d, cpumask:%d(%*pbl)\n", +- cluster->id, +- cpumask_first(&cluster->cpus), +- cpumask_pr_args(&cluster->cpus)); +- } +- +- return 0; +-} +- +-static int sched_cluster_debug_open(struct inode *inode, struct file *filp) +-{ +- return single_open(filp, sched_cluster_debug_show, NULL); +-} +- +-static const struct proc_ops sched_cluster_fops = { +- .proc_open = sched_cluster_debug_open, +- .proc_read = seq_read, +- .proc_lseek = seq_lseek, +- .proc_release = seq_release, +-}; +- +-static int __init init_sched_cluster_debug_procfs(void) +-{ +- struct proc_dir_entry *pe = NULL; +- +- pe = proc_create("sched_cluster", +- 0444, NULL, &sched_cluster_fops); +- if (!pe) +- return -ENOMEM; +- return 0; +-} +-late_initcall(init_sched_cluster_debug_procfs); +diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h +deleted file mode 100755 +index c5d6e2410..000000000 +--- a/kernel/sched/walt.h ++++ /dev/null +@@ -1,255 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * walt.h +- * +- * head file for Window-Assistant-Load-Tracking +- * +- * This software is licensed under the terms of the GNU General Public +- * License version 2, as published by the Free Software Foundation, and +- * may be copied, distributed, and modified under those terms. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- */ +- +-#ifndef __WALT_H +-#define __WALT_H +- +-#ifdef CONFIG_SCHED_WALT +- +-#include +- +-#define WINDOW_STATS_RECENT 0 +-#define WINDOW_STATS_MAX 1 +-#define WINDOW_STATS_MAX_RECENT_AVG 2 +-#define WINDOW_STATS_AVG 3 +-#define WINDOW_STATS_INVALID_POLICY 4 +- +-#define EXITING_TASK_MARKER 0xdeaddead +- +-#define SCHED_NEW_TASK_WINDOWS 5 +- +-extern unsigned int sched_ravg_window; +-extern unsigned int sysctl_sched_walt_init_task_load_pct; +-unsigned long capacity_curr_of(int cpu); +- +-static inline int exiting_task(struct task_struct *p) +-{ +- return (p->ravg.sum_history[0] == EXITING_TASK_MARKER); +-} +- +-static inline struct sched_cluster *cpu_cluster(int cpu) +-{ +- return cpu_rq(cpu)->cluster; +-} +- +-static inline int same_cluster(int src_cpu, int dst_cpu) +-{ +- return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster; +-} +- +-static inline u64 scale_exec_time(u64 delta, struct rq *rq) +-{ +- unsigned long capcurr = capacity_curr_of(cpu_of(rq)); +- +- delta = (delta * capcurr) >> SCHED_CAPACITY_SHIFT; +- +- return delta; +-} +- +-static inline bool is_new_task(struct task_struct *p) +-{ +- return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS; +-} +- +-static inline unsigned int max_task_load(void) +-{ +- return sched_ravg_window; +-} +- +-static inline void +-move_list(struct list_head *dst, struct list_head *src, bool sync_rcu) +-{ +- struct list_head *first, *last; +- +- first = src->next; +- last = src->prev; +- +- if (sync_rcu) { +- INIT_LIST_HEAD_RCU(src); +- synchronize_rcu(); +- } +- +- first->prev = dst; +- dst->prev = last; +- last->next = dst; +- +- /* Ensure list sanity before making the head visible to all CPUs. */ +- smp_mb(); +- dst->next = first; +-} +- +-extern void reset_task_stats(struct task_struct *p); +-extern void update_cluster_topology(void); +-extern void init_clusters(void); +-extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event, +- u64 wallclock, u64 irqtime); +- +-static inline void +-fixup_cumulative_runnable_avg(struct walt_sched_stats *stats, +- s64 demand_scaled_delta) +-{ +- if (sched_disable_window_stats) +- return; +- +- stats->cumulative_runnable_avg_scaled += demand_scaled_delta; +- BUG_ON((s64)stats->cumulative_runnable_avg_scaled < 0); +-} +- +-static inline void +-walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) +-{ +- if (sched_disable_window_stats) +- return; +- +- fixup_cumulative_runnable_avg(&rq->walt_stats, p->ravg.demand_scaled); +- +- /* +- * Add a task's contribution to the cumulative window demand when +- * +- * (1) task is enqueued with on_rq = 1 i.e migration, +- * prio/cgroup/class change. +- * (2) task is waking for the first time in this window. +- */ +- if (p->on_rq || (p->last_sleep_ts < rq->window_start)) +- walt_fixup_cum_window_demand(rq, p->ravg.demand_scaled); +-} +- +-static inline void +-walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) +-{ +- if (sched_disable_window_stats) +- return; +- +- fixup_cumulative_runnable_avg(&rq->walt_stats, +- -(s64)p->ravg.demand_scaled); +- +- /* +- * on_rq will be 1 for sleeping tasks. So check if the task +- * is migrating or dequeuing in RUNNING state to change the +- * prio/cgroup/class. +- */ +- if (task_on_rq_migrating(p) || p->__state == TASK_RUNNING) +- walt_fixup_cum_window_demand(rq, -(s64)p->ravg.demand_scaled); +-} +-extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p, +- u16 updated_demand_scaled); +-extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p); +-extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p); +-extern void fixup_busy_time(struct task_struct *p, int new_cpu); +-extern void init_new_task_load(struct task_struct *p); +-extern void mark_task_starting(struct task_struct *p); +-extern void set_window_start(struct rq *rq); +-void account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock); +- +-void walt_irq_work(struct irq_work *irq_work); +- +-void walt_sched_init_rq(struct rq *rq); +- +-extern void sched_account_irqtime(int cpu, struct task_struct *curr, +- u64 delta, u64 wallclock); +- +-#define SCHED_HIGH_IRQ_TIMEOUT 3 +-static inline u64 sched_irqload(int cpu) +-{ +- struct rq *rq = cpu_rq(cpu); +- s64 delta; +- +- delta = get_jiffies_64() - rq->irqload_ts; +- /* +- * Current context can be preempted by irq and rq->irqload_ts can be +- * updated by irq context so that delta can be negative. +- * But this is okay and we can safely return as this means there +- * was recent irq occurrence. +- */ +- +- if (delta < SCHED_HIGH_IRQ_TIMEOUT) +- return rq->avg_irqload; +- else +- return 0; +-} +- +-static inline int sched_cpu_high_irqload(int cpu) +-{ +- return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload; +-} +- +-extern int +-sysctl_sched_walt_init_task_load_pct_sysctl_handler(struct ctl_table *table, +- int write, void __user *buffer, size_t *length, loff_t *ppos); +- +-static inline unsigned int cpu_cur_freq(int cpu) +-{ +- return cpu_rq(cpu)->cluster->cur_freq; +-} +- +-static inline void assign_cluster_ids(struct list_head *head) +-{ +- struct sched_cluster *cluster; +- int pos = 0; +- +- list_for_each_entry(cluster, head, list) { +- cluster->id = pos; +- sched_cluster[pos++] = cluster; +- } +-} +- +-extern void update_cluster_load_subtractions(struct task_struct *p, +- int cpu, u64 ws, bool new_task); +-#else /* CONFIG_SCHED_WALT */ +-static inline void walt_sched_init_rq(struct rq *rq) { } +- +-static inline void update_task_ravg(struct task_struct *p, struct rq *rq, +- int event, u64 wallclock, u64 irqtime) { } +- +-static inline void walt_inc_cumulative_runnable_avg(struct rq *rq, +- struct task_struct *p) { } +- +-static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, +- struct task_struct *p) { } +- +-static inline void +-inc_rq_walt_stats(struct rq *rq, struct task_struct *p) { } +- +-static inline void +-dec_rq_walt_stats(struct rq *rq, struct task_struct *p) { } +- +-static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } +-static inline void init_new_task_load(struct task_struct *p) { } +-static inline void mark_task_starting(struct task_struct *p) { } +-static inline void set_window_start(struct rq *rq) { } +-static inline void update_cluster_topology(void) { } +-static inline void init_clusters(void) { } +- +-static inline void +-fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p, +- u16 updated_demand_scaled) { } +- +-static inline void sched_account_irqtime(int cpu, struct task_struct *curr, +- u64 delta, u64 wallclock) { } +- +-static inline u64 sched_irqload(int cpu) +-{ +- return 0; +-} +-static inline int sched_cpu_high_irqload(int cpu) +-{ +- return 0; +-} +-static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; } +-#endif /* CONFIG_SCHED_WALT */ +- +-#endif /* __WALT_H */ +diff --git a/kernel/smp.c b/kernel/smp.c +index 578a848b9..3eeffeaf5 100644 +--- a/kernel/smp.c ++++ b/kernel/smp.c +@@ -1060,10 +1060,8 @@ void wake_up_all_idle_cpus(void) + + for_each_possible_cpu(cpu) { + preempt_disable(); +- if (cpu != smp_processor_id() && cpu_online(cpu)){ +- if (!cpu_isolated(cpu)) ++ if (cpu != smp_processor_id() && cpu_online(cpu)) + wake_up_if_idle(cpu); +- } + preempt_enable(); + } + } +diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c +index 40b00eec5..cedb17ba1 100644 +--- a/kernel/stop_machine.c ++++ b/kernel/stop_machine.c +@@ -460,11 +460,7 @@ static int __stop_cpus(const struct cpumask *cpumask, + * @cpumask were offline; otherwise, 0 if all executions of @fn + * returned 0, any non zero return value if any returned non zero. + */ +-#ifdef CONFIG_CPU_ISOLATION_OPT +-int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) +-#else + static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) +-#endif + { + int ret; + +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 3cacf8cfa..354a2d294 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -1623,58 +1623,6 @@ static struct ctl_table kern_table[] = { + .mode = 0644, + .proc_handler = proc_dointvec, + }, +-#ifdef CONFIG_SCHED_RT_CAS +- { +- .procname = "sched_enable_rt_cas", +- .data = &sysctl_sched_enable_rt_cas, +- .maxlen = sizeof(unsigned int), +- .mode = 0644, +- .proc_handler = proc_dointvec, +- }, +-#endif +-#ifdef CONFIG_SCHED_RT_ACTIVE_LB +- { +- .procname = "sched_enable_rt_active_lb", +- .data = &sysctl_sched_enable_rt_active_lb, +- .maxlen = sizeof(unsigned int), +- .mode = 0644, +- .proc_handler = proc_dointvec, +- }, +-#endif +-#ifdef CONFIG_SCHED_WALT +- { +- .procname = "sched_use_walt_cpu_util", +- .data = &sysctl_sched_use_walt_cpu_util, +- .maxlen = sizeof(unsigned int), +- .mode = 0644, +- .proc_handler = proc_dointvec_minmax, +- .extra1 = SYSCTL_ZERO, +- .extra2 = SYSCTL_ONE, +- }, +- { +- .procname = "sched_use_walt_task_util", +- .data = &sysctl_sched_use_walt_task_util, +- .maxlen = sizeof(unsigned int), +- .mode = 0644, +- .proc_handler = proc_dointvec_minmax, +- .extra1 = SYSCTL_ZERO, +- .extra2 = SYSCTL_ONE, +- }, +- { +- .procname = "sched_walt_init_task_load_pct", +- .data = &sysctl_sched_walt_init_task_load_pct, +- .maxlen = sizeof(unsigned int), +- .mode = 0644, +- .proc_handler = sysctl_sched_walt_init_task_load_pct_sysctl_handler, +- }, +- { +- .procname = "sched_cpu_high_irqload", +- .data = &sysctl_sched_cpu_high_irqload, +- .maxlen = sizeof(unsigned int), +- .mode = 0644, +- .proc_handler = proc_dointvec, +- }, +-#endif + #ifdef CONFIG_PROC_SYSCTL + { + .procname = "tainted", +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 1bf77aaa2..63a8ce717 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -2214,65 +2214,6 @@ signed long __sched schedule_timeout_idle(signed long timeout) + EXPORT_SYMBOL(schedule_timeout_idle); + + #ifdef CONFIG_HOTPLUG_CPU +- +-#ifdef CONFIG_CPU_ISOLATION_OPT +-static void migrate_timer_list(struct timer_base *new_base, +- struct hlist_head *head, bool remove_pinned) +-{ +- struct timer_list *timer; +- int cpu = new_base->cpu; +- struct hlist_node *n; +- int is_pinned; +- +- hlist_for_each_entry_safe(timer, n, head, entry) { +- is_pinned = timer->flags & TIMER_PINNED; +- if (!remove_pinned && is_pinned) +- continue; +- +- detach_if_pending(timer, get_timer_base(timer->flags), false); +- timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; +- internal_add_timer(new_base, timer); +- } +-} +- +-static void __migrate_timers(unsigned int cpu, bool remove_pinned) +-{ +- struct timer_base *old_base; +- struct timer_base *new_base; +- unsigned long flags; +- int b, i; +- +- for (b = 0; b < NR_BASES; b++) { +- old_base = per_cpu_ptr(&timer_bases[b], cpu); +- new_base = get_cpu_ptr(&timer_bases[b]); +- /* +- * The caller is globally serialized and nobody else +- * takes two locks at once, deadlock is not possible. +- */ +- raw_spin_lock_irqsave(&new_base->lock, flags); +- raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); +- +- /* +- * The current CPUs base clock might be stale. Update it +- * before moving the timers over. +- */ +- forward_timer_base(new_base); +- +- if (!cpu_online(cpu)) +- BUG_ON(old_base->running_timer); +- +- for (i = 0; i < WHEEL_SIZE; i++) +- migrate_timer_list(new_base, old_base->vectors + i, +- remove_pinned); +- +- raw_spin_unlock(&old_base->lock); +- raw_spin_unlock_irqrestore(&new_base->lock, flags); +- put_cpu_ptr(&timer_bases); +- } +-} +- +-#else +- + static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head) + { + struct timer_list *timer; +@@ -2286,8 +2227,6 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h + } + } + +-#endif /* CONFIG_CPU_ISOLATION_OPT */ +- + int timers_prepare_cpu(unsigned int cpu) + { + struct timer_base *base; +@@ -2304,20 +2243,6 @@ int timers_prepare_cpu(unsigned int cpu) + return 0; + } + +-#ifdef CONFIG_CPU_ISOLATION_OPT +-int timers_dead_cpu(unsigned int cpu) +-{ +- BUG_ON(cpu_online(cpu)); +- __migrate_timers(cpu, true); +- return 0; +-} +- +-void timer_quiesce_cpu(void *cpup) +-{ +- __migrate_timers(*(unsigned int *)cpup, false); +-} +- +-#else + int timers_dead_cpu(unsigned int cpu) + { + struct timer_base *old_base; +@@ -2353,8 +2278,6 @@ int timers_dead_cpu(unsigned int cpu) + return 0; + } + +-#endif /* CONFIG_CPU_ISOLATION_OPT */ +- + #endif /* CONFIG_HOTPLUG_CPU */ + + static void __init init_timer_cpu(int cpu) +diff --git a/kernel/watchdog.c b/kernel/watchdog.c +index c904872be..5cd6d4e26 100644 +--- a/kernel/watchdog.c ++++ b/kernel/watchdog.c +@@ -14,7 +14,6 @@ + + #include + #include +-#include + #include + #include + #include +@@ -538,20 +537,16 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) + return HRTIMER_RESTART; + } + +-void watchdog_enable(unsigned int cpu) ++static void watchdog_enable(unsigned int cpu) + { + struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); + struct completion *done = this_cpu_ptr(&softlockup_completion); +- unsigned int *enabled = this_cpu_ptr(&watchdog_en); + + WARN_ON_ONCE(cpu != smp_processor_id()); + + init_completion(done); + complete(done); + +- if (*enabled) +- return; +- + /* + * Start the timer first to prevent the hardlockup watchdog triggering + * before the timer has a chance to fire. +@@ -566,24 +561,11 @@ void watchdog_enable(unsigned int cpu) + /* Enable the hardlockup detector */ + if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED) + watchdog_hardlockup_enable(cpu); +- +- /* +- * Need to ensure above operations are observed by other CPUs before +- * indicating that timer is enabled. This is to synchronize core +- * isolation and hotplug. Core isolation will wait for this flag to be +- * set. +- */ +- mb(); +- *enabled = 1; + } + +-void watchdog_disable(unsigned int cpu) ++static void watchdog_disable(unsigned int cpu) + { +- struct hrtimer *hrtimer = per_cpu_ptr(&watchdog_hrtimer, cpu); +- unsigned int *enabled = per_cpu_ptr(&watchdog_en, cpu); +- +- if (!*enabled) +- return; ++ struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); + + WARN_ON_ONCE(cpu != smp_processor_id()); + +@@ -594,18 +576,7 @@ void watchdog_disable(unsigned int cpu) + */ + watchdog_hardlockup_disable(cpu); + hrtimer_cancel(hrtimer); +- wait_for_completion(per_cpu_ptr(&softlockup_completion, cpu)); +- +- /* +- * No need for barrier here since disabling the watchdog is +- * synchronized with hotplug lock +- */ +- *enabled = 0; +-} +- +-bool watchdog_configured(unsigned int cpu) +-{ +- return *per_cpu_ptr(&watchdog_en, cpu); ++ wait_for_completion(this_cpu_ptr(&softlockup_completion)); + } + + static int softlockup_stop_fn(void *data) +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index 2dcb380a4..e809b6d8b 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -1189,13 +1189,6 @@ config BOOTPARAM_HUNG_TASK_PANIC + + Say N if unsure. + +-config BOOTPARAM_HUNG_TASK_PANIC_VALUE +- int +- depends on DETECT_HUNG_TASK +- range 0 1 +- default 0 if !BOOTPARAM_HUNG_TASK_PANIC +- default 1 if BOOTPARAM_HUNG_TASK_PANIC +- + config WQ_WATCHDOG + bool "Detect Workqueue Stalls" + depends on DEBUG_KERNEL +diff --git a/lib/Makefile b/lib/Makefile +index 740109b6e..c774c3307 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -149,7 +149,7 @@ obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o + CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any) + + obj-y += math/ crypto/ +- ++obj-y += securec/src/ + obj-$(CONFIG_GENERIC_IOMAP) += iomap.o + obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o + obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o +diff --git a/lib/securec/LICENSE b/lib/securec/LICENSE +new file mode 100644 +index 000000000..42f2a8367 +--- /dev/null ++++ b/lib/securec/LICENSE +@@ -0,0 +1,124 @@ ++木兰宽松许可证, 第2版 ++ ++2020年1月 http://license.coscl.org.cn/MulanPSL2 ++ ++您对“软件”的复制、使用、修改及分发受木兰宽松许可证,第2版(“本许可证”)的如下条款的约束: ++ ++0. 定义 ++ ++“软件” 是指由“贡献”构成的许可在“本许可证”下的程序和相关文档的集合。 ++ ++“贡献” 是指由任一“贡献者”许可在“本许可证”下的受版权法保护的作品。 ++ ++“贡献者” 是指将受版权法保护的作品许可在“本许可证”下的自然人或“法人实体”。 ++ ++“法人实体” 是指提交贡献的机构及其“关联实体”。 ++ ++“关联实体” 是指,对“本许可证”下的行为方而言,控制、受控制或与其共同受控制的机构,此处的控制是指有受控方或共同受控方至少50%直接或间接的投票权、资金或其他有价证券。 ++ ++1. 授予版权许可 ++ ++每个“贡献者”根据“本许可证”授予您永久性的、全球性的、免费的、非独占的、不可撤销的版权许可,您可以复制、使用、修改、分发其“贡献”,不论修改与否。 ++ ++2. 授予专利许可 ++ ++每个“贡献者”根据“本许可证”授予您永久性的、全球性的、免费的、非独占的、不可撤销的(根据本条规定撤销除外)专利许可,供您制造、委托制造、使用、许诺销售、销售、进口其“贡献”或以其他方式转移其“贡献”。前述专利许可仅限于“贡献者”现在或将来拥有或控制的其“贡献”本身或其“贡献”与许可“贡献”时的“软件”结合而将必然会侵犯的专利权利要求,不包括对“贡献”的修改或包含“贡献”的其他结合。如果您或您的“关联实体”直接或间接地,就“软件”或其中的“贡献”对任何人发起专利侵权诉讼(包括反诉或交叉诉讼)或其他专利维权行动,指控其侵犯专利权,则“本许可证”授予您对“软件”的专利许可自您提起诉讼或发起维权行动之日终止。 ++ ++3. 无商标许可 ++ ++“本许可证”不提供对“贡献者”的商品名称、商标、服务标志或产品名称的商标许可,但您为满足第4条规定的声明义务而必须使用除外。 ++ ++4. 分发限制 ++ ++您可以在任何媒介中将“软件”以源程序形式或可执行形式重新分发,不论修改与否,但您必须向接收者提供“本许可证”的副本,并保留“软件”中的版权、商标、专利及免责声明。 ++ ++5. 免责声明与责任限制 ++ ++“软件”及其中的“贡献”在提供时不带任何明示或默示的担保。在任何情况下,“贡献者”或版权所有者不对任何人因使用“软件”或其中的“贡献”而引发的任何直接或间接损失承担责任,不论因何种原因导致或者基于何种法律理论,即使其曾被建议有此种损失的可能性。 ++ ++6. 语言 ++ ++“本许可证”以中英文双语表述,中英文版本具有同等法律效力。如果中英文版本存在任何冲突不一致,以中文版为准。 ++ ++条款结束 ++ ++如何将木兰宽松许可证,第2版,应用到您的软件 ++ ++如果您希望将木兰宽松许可证,第2版,应用到您的新软件,为了方便接收者查阅,建议您完成如下三步: ++ ++1, 请您补充如下声明中的空白,包括软件名、软件的首次发表年份以及您作为版权人的名字; ++ ++2, 请您在软件包的一级目录下创建以“LICENSE”为名的文件,将整个许可证文本放入该文件中; ++ ++3, 请将如下声明文本放入每个源文件的头部注释中。 ++ ++Copyright (c) [Year] [name of copyright holder] ++[Software Name] is licensed under Mulan PSL v2. ++You can use this software according to the terms and conditions of the Mulan PSL v2. ++You may obtain a copy of Mulan PSL v2 at: ++ http://license.coscl.org.cn/MulanPSL2 ++THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++See the Mulan PSL v2 for more details. ++Mulan Permissive Software License,Version 2 ++Mulan Permissive Software License,Version 2 (Mulan PSL v2) ++ ++January 2020 http://license.coscl.org.cn/MulanPSL2 ++ ++Your reproduction, use, modification and distribution of the Software shall be subject to Mulan PSL v2 (this License) with the following terms and conditions: ++ ++0. Definition ++ ++Software means the program and related documents which are licensed under this License and comprise all Contribution(s). ++ ++Contribution means the copyrightable work licensed by a particular Contributor under this License. ++ ++Contributor means the Individual or Legal Entity who licenses its copyrightable work under this License. ++ ++Legal Entity means the entity making a Contribution and all its Affiliates. ++ ++Affiliates means entities that control, are controlled by, or are under common control with the acting entity under this License, 'control' means direct or indirect ownership of at least fifty percent (50%) of the voting power, capital or other securities of controlled or commonly controlled entity. ++ ++1. Grant of Copyright License ++ ++Subject to the terms and conditions of this License, each Contributor hereby grants to you a perpetual, worldwide, royalty-free, non-exclusive, irrevocable copyright license to reproduce, use, modify, or distribute its Contribution, with modification or not. ++ ++2. Grant of Patent License ++ ++Subject to the terms and conditions of this License, each Contributor hereby grants to you a perpetual, worldwide, royalty-free, non-exclusive, irrevocable (except for revocation under this Section) patent license to make, have made, use, offer for sale, sell, import or otherwise transfer its Contribution, where such patent license is only limited to the patent claims owned or controlled by such Contributor now or in future which will be necessarily infringed by its Contribution alone, or by combination of the Contribution with the Software to which the Contribution was contributed. The patent license shall not apply to any modification of the Contribution, and any other combination which includes the Contribution. If you or your Affiliates directly or indirectly institute patent litigation (including a cross claim or counterclaim in a litigation) or other patent enforcement activities against any individual or entity by alleging that the Software or any Contribution in it infringes patents, then any patent license granted to you under this License for the Software shall terminate as of the date such litigation or activity is filed or taken. ++ ++3. No Trademark License ++ ++No trademark license is granted to use the trade names, trademarks, service marks, or product names of Contributor, except as required to fulfill notice requirements in section 4. ++ ++4. Distribution Restriction ++ ++You may distribute the Software in any medium with or without modification, whether in source or executable forms, provided that you provide recipients with a copy of this License and retain copyright, patent, trademark and disclaimer statements in the Software. ++ ++5. Disclaimer of Warranty and Limitation of Liability ++ ++THE SOFTWARE AND CONTRIBUTION IN IT ARE PROVIDED WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED. IN NO EVENT SHALL ANY CONTRIBUTOR OR COPYRIGHT HOLDER BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE SOFTWARE OR THE CONTRIBUTION IN IT, NO MATTER HOW IT'S CAUSED OR BASED ON WHICH LEGAL THEORY, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. ++ ++6. Language ++ ++THIS LICENSE IS WRITTEN IN BOTH CHINESE AND ENGLISH, AND THE CHINESE VERSION AND ENGLISH VERSION SHALL HAVE THE SAME LEGAL EFFECT. IN THE CASE OF DIVERGENCE BETWEEN THE CHINESE AND ENGLISH VERSIONS, THE CHINESE VERSION SHALL PREVAIL. ++ ++END OF THE TERMS AND CONDITIONS ++ ++How to Apply the Mulan Permissive Software License,Version 2 (Mulan PSL v2) to Your Software ++ ++To apply the Mulan PSL v2 to your work, for easy identification by recipients, you are suggested to complete following three steps: ++ ++Fill in the blanks in following statement, including insert your software name, the year of the first publication of your software, and your name identified as the copyright owner; ++Create a file named "LICENSE" which contains the whole context of this License in the first directory of your software package; ++Attach the statement to the appropriate annotated syntax at the beginning of each source file. ++Copyright (c) [Year] [name of copyright holder] ++[Software Name] is licensed under Mulan PSL v2. ++You can use this software according to the terms and conditions of the Mulan PSL v2. ++You may obtain a copy of Mulan PSL v2 at: ++ http://license.coscl.org.cn/MulanPSL2 ++THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++See the Mulan PSL v2 for more details. +\ No newline at end of file +diff --git a/lib/securec/Makefile b/lib/securec/Makefile +new file mode 100644 +index 000000000..9d804337a +--- /dev/null ++++ b/lib/securec/Makefile +@@ -0,0 +1 @@ ++obj-y += src/ +diff --git a/lib/securec/README.en.md b/lib/securec/README.en.md +new file mode 100644 +index 000000000..60c477fe8 +--- /dev/null ++++ b/lib/securec/README.en.md +@@ -0,0 +1,59 @@ ++# libboundscheck ++ ++#### Description ++ ++- following the standard of C11 Annex K (bound-checking interfaces), functions of the common memory/string operation classes, such as memcpy_s, strcpy_s, are selected and implemented. ++ ++- other standard functions in C11 Annex K will be analyzed in the future and implemented in this organization if necessary. ++ ++- handles the release, update, and maintenance of bounds_checking_function. ++ ++#### Function List ++ ++- memcpy_s ++- wmemcpy_s ++- memmove_s ++- wmemmove_s ++- memset_s ++- strcpy_s ++- wcscpy_s ++- strncpy_s ++- wcsncpy_s ++- strcat_s ++- wcscat_s ++- strncat_s ++- wcsncat_s ++- strtok_s ++- wcstok_s ++- sprintf_s ++- swprintf_s ++- vsprintf_s ++- vswprintf_s ++- snprintf_s ++- vsnprintf_s ++- scanf_s ++- wscanf_s ++- vscanf_s ++- vwscanf_s ++- fscanf_s ++- fwscanf_s ++- vfscanf_s ++- vfwscanf_s ++- sscanf_s ++- swscanf_s ++- vsscanf_s ++- vswscanf_s ++- gets_s ++ ++ ++#### Build ++ ++``` ++CC=gcc make ++``` ++The generated Dynamic library libboundscheck.so is stored in the newly created directory lib. ++ ++#### How to use ++1. Copy the libboundscheck.so to the library file directory, for example: "/usr/local/lib/". ++ ++2. To use the libboundscheck, add the “-lboundscheck” parameters to the compiler, for example: “gcc -g -o test test.c -lboundscheck”. +\ No newline at end of file +diff --git a/lib/securec/README.md b/lib/securec/README.md +new file mode 100644 +index 000000000..c16cbb176 +--- /dev/null ++++ b/lib/securec/README.md +@@ -0,0 +1,56 @@ ++# libboundscheck ++ ++#### 介绍 ++- 遵循C11 Annex K (Bounds-checking interfaces)的标准,选取并实现了常见的内存/字符串操作类的函数,如memcpy_s、strcpy_s等函数。 ++- 未来将分析C11 Annex K中的其他标准函数,如果有必要,将在该组织中实现。 ++- 处理边界检查函数的版本发布、更新以及维护。 ++ ++#### 函数清单 ++ ++- memcpy_s ++- wmemcpy_s ++- memmove_s ++- wmemmove_s ++- memset_s ++- strcpy_s ++- wcscpy_s ++- strncpy_s ++- wcsncpy_s ++- strcat_s ++- wcscat_s ++- strncat_s ++- wcsncat_s ++- strtok_s ++- wcstok_s ++- sprintf_s ++- swprintf_s ++- vsprintf_s ++- vswprintf_s ++- snprintf_s ++- vsnprintf_s ++- scanf_s ++- wscanf_s ++- vscanf_s ++- vwscanf_s ++- fscanf_s ++- fwscanf_s ++- vfscanf_s ++- vfwscanf_s ++- sscanf_s ++- swscanf_s ++- vsscanf_s ++- vswscanf_s ++- gets_s ++ ++#### 构建方法 ++ ++运行命令 ++``` ++make CC=gcc ++``` ++生成的动态库libboundscheck.so存放在新创建的lib目录下。 ++ ++#### 使用方法 ++1. 将构建生成的动态库libboundscheck.so放到库文件目录下,例如:"/usr/local/lib/"。 ++ ++2. 为使用libboundscheck,编译程序时需增加编译参数"-lboundscheck",例如:"gcc -g -o test test.c -lboundscheck"。 +\ No newline at end of file +diff --git a/lib/securec/src/Makefile b/lib/securec/src/Makefile +new file mode 100644 +index 000000000..42554f880 +--- /dev/null ++++ b/lib/securec/src/Makefile +@@ -0,0 +1,17 @@ ++obj-y += securecutil.o ++obj-y += strncpy_s.o ++obj-y += vsprintf_s.o ++obj-y += snprintf_s.o ++obj-y += memcpy_s.o ++obj-y += memmove_s.o ++obj-y += strcat_s.o ++obj-y += secureprintoutput_a.o ++obj-y += memset_s.o ++obj-y += strtok_s.o ++obj-y += sprintf_s.o ++obj-y += strncat_s.o ++obj-y += strcpy_s.o ++obj-y += vsnprintf_s.o ++obj-y += secureinput_a.o ++obj-y += vsscanf_s.o ++obj-y += sscanf_s.o +diff --git a/lib/securec/src/input.inl b/lib/securec/src/input.inl +new file mode 100644 +index 000000000..5880d45df +--- /dev/null ++++ b/lib/securec/src/input.inl +@@ -0,0 +1,2229 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: Used by secureinput_a.c and secureinput_w.c to include. ++ * This file provides a template function for ANSI and UNICODE compiling by ++ * different type definition. The functions of SecInputS or ++ * SecInputSW provides internal implementation for scanf family API, such as sscanf_s, fscanf_s. ++ * Create: 2014-02-25 ++ * Notes: The formatted input processing results of integers on different platforms are different. ++ */ ++/* ++ * [Standardize-exceptions] Use unsafe function: Performance-sensitive ++ * [reason] Always used in the performance critical path, ++ * and sufficient input validation is performed before calling ++ */ ++#ifndef INPUT_INL_5D13A042_DC3F_4ED9_A8D1_882811274C27 ++#define INPUT_INL_5D13A042_DC3F_4ED9_A8D1_882811274C27 ++ ++#if SECUREC_IN_KERNEL ++#if !defined(SECUREC_CTYPE_MACRO_ADAPT) ++#include ++#endif ++#else ++#if !defined(SECUREC_SYSAPI4VXWORKS) && !defined(SECUREC_CTYPE_MACRO_ADAPT) ++//#include ++#ifdef SECUREC_FOR_WCHAR ++//#include /* For iswspace */ ++#endif ++#endif ++#endif ++ ++#ifndef EOF ++#define EOF (-1) ++#endif ++ ++#define SECUREC_NUM_WIDTH_SHORT 0 ++#define SECUREC_NUM_WIDTH_INT 1 ++#define SECUREC_NUM_WIDTH_LONG 2 ++#define SECUREC_NUM_WIDTH_LONG_LONG 3 /* Also long double */ ++ ++#define SECUREC_BUFFERED_BLOK_SIZE 1024U ++ ++#if defined(SECUREC_VXWORKS_PLATFORM) && !defined(va_copy) && !defined(__va_copy) ++/* The name is the same as system macro. */ ++#define __va_copy(dest, src) do { \ ++ size_t destSize_ = (size_t)sizeof(dest); \ ++ size_t srcSize_ = (size_t)sizeof(src); \ ++ if (destSize_ != srcSize_) { \ ++ SECUREC_MEMCPY_WARP_OPT((dest), (src), sizeof(va_list)); \ ++ } else { \ ++ SECUREC_MEMCPY_WARP_OPT(&(dest), &(src), sizeof(va_list)); \ ++ } \ ++} SECUREC_WHILE_ZERO ++#endif ++ ++#define SECUREC_MULTI_BYTE_MAX_LEN 6 ++ ++/* Compatibility macro name cannot be modifie */ ++#ifndef UNALIGNED ++#if !(defined(_M_IA64)) && !(defined(_M_AMD64)) ++#define UNALIGNED ++#else ++#define UNALIGNED __unaligned ++#endif ++#endif ++ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++/* Max 64bit value is 0xffffffffffffffff */ ++#define SECUREC_MAX_64BITS_VALUE 18446744073709551615ULL ++#define SECUREC_MAX_64BITS_VALUE_DIV_TEN 1844674407370955161ULL ++#define SECUREC_MAX_64BITS_VALUE_CUT_LAST_DIGIT 18446744073709551610ULL ++#define SECUREC_MIN_64BITS_NEG_VALUE 9223372036854775808ULL ++#define SECUREC_MAX_64BITS_POS_VALUE 9223372036854775807ULL ++#define SECUREC_MIN_32BITS_NEG_VALUE 2147483648UL ++#define SECUREC_MAX_32BITS_POS_VALUE 2147483647UL ++#define SECUREC_MAX_32BITS_VALUE 4294967295UL ++#define SECUREC_MAX_32BITS_VALUE_INC 4294967296UL ++#define SECUREC_MAX_32BITS_VALUE_DIV_TEN 429496729UL ++#define SECUREC_LONG_BIT_NUM ((unsigned int)(sizeof(long) << 3U)) ++/* Use ULL to clean up cl6x compilation alerts */ ++#define SECUREC_MAX_LONG_POS_VALUE ((unsigned long)(1ULL << (SECUREC_LONG_BIT_NUM - 1)) - 1) ++#define SECUREC_MIN_LONG_NEG_VALUE ((unsigned long)(1ULL << (SECUREC_LONG_BIT_NUM - 1))) ++ ++/* Covert to long long to clean up cl6x compilation alerts */ ++#define SECUREC_LONG_HEX_BEYOND_MAX(number) (((unsigned long long)(number) >> (SECUREC_LONG_BIT_NUM - 4U)) > 0) ++#define SECUREC_LONG_OCTAL_BEYOND_MAX(number) (((unsigned long long)(number) >> (SECUREC_LONG_BIT_NUM - 3U)) > 0) ++ ++#define SECUREC_QWORD_HEX_BEYOND_MAX(number) (((number) >> (64U - 4U)) > 0) ++#define SECUREC_QWORD_OCTAL_BEYOND_MAX(number) (((number) >> (64U - 3U)) > 0) ++ ++#define SECUREC_LP64_BIT_WIDTH 64 ++#define SECUREC_LP32_BIT_WIDTH 32 ++ ++#define SECUREC_CONVERT_IS_SIGNED(conv) ((conv) == 'd' || (conv) == 'i') ++#endif ++ ++#define SECUREC_BRACE '{' /* [ to { */ ++#define SECUREC_FILED_WIDTH_ENOUGH(spec) ((spec)->widthSet == 0 || (spec)->width > 0) ++#define SECUREC_FILED_WIDTH_DEC(spec) do { \ ++ if ((spec)->widthSet != 0) { \ ++ --(spec)->width; \ ++ } \ ++} SECUREC_WHILE_ZERO ++ ++#ifdef SECUREC_FOR_WCHAR ++/* Bits for all wchar, size is 65536/8, only supports wide characters with a maximum length of two bytes */ ++#define SECUREC_BRACKET_TABLE_SIZE 8192 ++#define SECUREC_EOF WEOF ++#define SECUREC_MB_LEN 16 /* Max. # bytes in multibyte char ,see MB_LEN_MAX */ ++#else ++/* Bits for all char, size is 256/8 */ ++#define SECUREC_BRACKET_TABLE_SIZE 32 ++#define SECUREC_EOF EOF ++#endif ++ ++#if SECUREC_HAVE_WCHART ++#define SECUREC_ARRAY_WIDTH_IS_WRONG(spec) ((spec).arrayWidth == 0 || \ ++ ((spec).isWCharOrLong <= 0 && (spec).arrayWidth > SECUREC_STRING_MAX_LEN) || \ ++ ((spec).isWCharOrLong > 0 && (spec).arrayWidth > SECUREC_WCHAR_STRING_MAX_LEN)) ++#else ++#define SECUREC_ARRAY_WIDTH_IS_WRONG(spec) ((spec).arrayWidth == 0 || (spec).arrayWidth > SECUREC_STRING_MAX_LEN) ++#endif ++ ++#ifdef SECUREC_ON_64BITS ++/* Use 0xffffffffUL mask to pass integer as array length */ ++#define SECUREC_GET_ARRAYWIDTH(argList) (((size_t)va_arg((argList), size_t)) & 0xffffffffUL) ++#else /* !SECUREC_ON_64BITS */ ++#define SECUREC_GET_ARRAYWIDTH(argList) ((size_t)va_arg((argList), size_t)) ++#endif ++ ++typedef struct { ++#ifdef SECUREC_FOR_WCHAR ++ unsigned char *table; /* Default NULL */ ++#else ++ unsigned char table[SECUREC_BRACKET_TABLE_SIZE]; /* Array length is large enough in application scenarios */ ++#endif ++ unsigned char mask; /* Default 0 */ ++} SecBracketTable; ++ ++#ifdef SECUREC_FOR_WCHAR ++#define SECUREC_INIT_BRACKET_TABLE { NULL, 0 } ++#else ++#define SECUREC_INIT_BRACKET_TABLE { {0}, 0 } ++#endif ++ ++#if SECUREC_ENABLE_SCANF_FLOAT ++typedef struct { ++ size_t floatStrTotalLen; /* Initialization must be length of buffer in charater */ ++ size_t floatStrUsedLen; /* Store float string len */ ++ SecChar *floatStr; /* Initialization must point to buffer */ ++ SecChar *allocatedFloatStr; /* Initialization must be NULL to store alloced point */ ++ SecChar buffer[SECUREC_FLOAT_BUFSIZE + 1]; ++} SecFloatSpec; ++#endif ++ ++#define SECUREC_NUMBER_STATE_DEFAULT 0U ++#define SECUREC_NUMBER_STATE_STARTED 1U ++ ++typedef struct { ++ SecInt ch; /* Char read from input */ ++ int charCount; /* Number of characters processed */ ++ void *argPtr; /* Variable parameter pointer, point to the end of the string */ ++ size_t arrayWidth; /* Length of pointer Variable parameter, in charaters */ ++ SecUnsignedInt64 number64; /* Store input number64 value */ ++ unsigned long number; /* Store input number32 value */ ++ int numberWidth; /* 0 = SHORT, 1 = int, > 1 long or L_DOUBLE */ ++ int numberArgType; /* 1 for 64-bit integer, 0 otherwise. use it as decode function index */ ++ unsigned int negative; /* 0 is positive */ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++ unsigned int beyondMax; /* Non-zero means beyond */ ++#endif ++ unsigned int numberState; /* Identifies whether to start processing numbers, 1 is can input number */ ++ int width; /* Width number in format */ ++ int widthSet; /* 0 is not set width in format */ ++ int convChr; /* Lowercase format conversion characters */ ++ int oriConvChr; /* Store original format conversion, convChr may change when parsing integers */ ++ signed char isWCharOrLong; /* -1/0 not wchar or long, 1 for wchar or long */ ++ unsigned char suppress; /* 0 is not have %* in format */ ++} SecScanSpec; ++ ++#ifdef SECUREC_FOR_WCHAR ++#define SECUREC_GETC fgetwc ++#define SECUREC_UN_GETC ungetwc ++/* Only supports wide characters with a maximum length of two bytes in format string */ ++#define SECUREC_BRACKET_CHAR_MASK 0xffffU ++#else ++#define SECUREC_GETC fgetc ++#define SECUREC_UN_GETC ungetc ++#define SECUREC_BRACKET_CHAR_MASK 0xffU ++#endif ++ ++#define SECUREC_CHAR_SIZE ((unsigned int)(sizeof(SecChar))) ++/* To avoid 648, mask high bit: 0x00ffffff 0x0000ffff or 0x00000000 */ ++#define SECUREC_CHAR_MASK_HIGH (((((((((unsigned int)(-1) >> SECUREC_CHAR_SIZE) >> SECUREC_CHAR_SIZE) >> \ ++ SECUREC_CHAR_SIZE) >> SECUREC_CHAR_SIZE) >> \ ++ SECUREC_CHAR_SIZE) >> SECUREC_CHAR_SIZE) >> \ ++ SECUREC_CHAR_SIZE) >> SECUREC_CHAR_SIZE) ++ ++/* For char is 0xff, wcahr_t is 0xffff or 0xffffffff. */ ++#define SECUREC_CHAR_MASK (~((((((((((unsigned int)(-1) & SECUREC_CHAR_MASK_HIGH) << \ ++ SECUREC_CHAR_SIZE) << SECUREC_CHAR_SIZE) << \ ++ SECUREC_CHAR_SIZE) << SECUREC_CHAR_SIZE) << \ ++ SECUREC_CHAR_SIZE) << SECUREC_CHAR_SIZE) << \ ++ SECUREC_CHAR_SIZE) << SECUREC_CHAR_SIZE)) ++ ++/* According wchar_t has multiple bytes, so use sizeof */ ++#define SECUREC_GET_CHAR(stream, outCh) do { \ ++ if ((stream)->count >= sizeof(SecChar)) { \ ++ *(outCh) = (SecInt)(SECUREC_CHAR_MASK & \ ++ (unsigned int)(int)(*((const SecChar *)(const void *)(stream)->cur))); \ ++ (stream)->cur += sizeof(SecChar); \ ++ (stream)->count -= sizeof(SecChar); \ ++ } else { \ ++ *(outCh) = SECUREC_EOF; \ ++ } \ ++} SECUREC_WHILE_ZERO ++ ++#define SECUREC_UN_GET_CHAR(stream) do { \ ++ if ((stream)->cur > (stream)->base) { \ ++ (stream)->cur -= sizeof(SecChar); \ ++ (stream)->count += sizeof(SecChar); \ ++ } \ ++} SECUREC_WHILE_ZERO ++ ++/* Convert wchar_t to int and then to unsigned int to keep data clearing warning */ ++#define SECUREC_TO_LOWERCASE(chr) ((int)((unsigned int)(int)(chr) | (unsigned int)('a' - 'A'))) ++ ++/* Record a flag for each bit */ ++#define SECUREC_BRACKET_INDEX(x) ((unsigned int)(x) >> 3U) ++#define SECUREC_BRACKET_VALUE(x) ((unsigned char)(1U << ((unsigned int)(x) & 7U))) ++#if SECUREC_IN_KERNEL ++#define SECUREC_CONVERT_IS_UNSIGNED(conv) ((conv) == 'x' || (conv) == 'o' || (conv) == 'u') ++#endif ++ ++/* ++ * Set char in %[xxx] into table, only supports wide characters with a maximum length of two bytes ++ */ ++SECUREC_INLINE void SecBracketSetBit(unsigned char *table, SecUnsignedChar ch) ++{ ++ unsigned int tableIndex = SECUREC_BRACKET_INDEX(((unsigned int)(int)ch & SECUREC_BRACKET_CHAR_MASK)); ++ unsigned int tableValue = SECUREC_BRACKET_VALUE(((unsigned int)(int)ch & SECUREC_BRACKET_CHAR_MASK)); ++ /* Do not use |= optimize this code, it will cause compiling warning */ ++ table[tableIndex] = (unsigned char)(table[tableIndex] | tableValue); ++} ++ ++SECUREC_INLINE void SecBracketSetBitRange(unsigned char *table, SecUnsignedChar startCh, SecUnsignedChar endCh) ++{ ++ SecUnsignedChar expCh; ++ /* %[a-z] %[a-a] Format %[a-\xff] end is 0xFF, condition (expCh <= endChar) cause dead loop */ ++ for (expCh = startCh; expCh < endCh; ++expCh) { ++ SecBracketSetBit(table, expCh); ++ } ++ SecBracketSetBit(table, endCh); ++} ++/* ++ * Determine whether the expression can be satisfied ++ */ ++SECUREC_INLINE int SecCanInputForBracket(int convChr, SecInt ch, const SecBracketTable *bracketTable) ++{ ++ unsigned int tableIndex = SECUREC_BRACKET_INDEX(((unsigned int)(int)ch & SECUREC_BRACKET_CHAR_MASK)); ++ unsigned int tableValue = SECUREC_BRACKET_VALUE(((unsigned int)(int)ch & SECUREC_BRACKET_CHAR_MASK)); ++#ifdef SECUREC_FOR_WCHAR ++ if (((unsigned int)(int)ch & (~(SECUREC_BRACKET_CHAR_MASK))) != 0) { ++ /* The value of the wide character exceeds the size of two bytes */ ++ return 0; ++ } ++ return (int)(convChr == SECUREC_BRACE && ++ (((unsigned int)bracketTable->table[tableIndex] ^ (unsigned int)bracketTable->mask) & tableValue) != 0); ++#else ++ return (int)(convChr == SECUREC_BRACE && ++ (((unsigned int)bracketTable->table[tableIndex] ^ (unsigned int)bracketTable->mask) & tableValue) != 0); ++#endif ++} ++ ++/* ++ * String input ends when blank character is encountered ++ */ ++SECUREC_INLINE int SecCanInputString(int convChr, SecInt ch) ++{ ++ return (int)(convChr == 's' && ++ (!(ch >= SECUREC_CHAR('\t') && ch <= SECUREC_CHAR('\r')) && ch != SECUREC_CHAR(' '))); ++} ++ ++/* ++ * Can input a character when format is %c ++ */ ++SECUREC_INLINE int SecCanInputCharacter(int convChr) ++{ ++ return (int)(convChr == 'c'); ++} ++ ++/* ++ * Determine if it is a 64-bit pointer function ++ * Return 0 is not ,1 is 64bit pointer ++ */ ++SECUREC_INLINE int SecNumberArgType(size_t sizeOfVoidStar) ++{ ++ /* Point size is 4 or 8 , Under the 64 bit system, the value not 0 */ ++ /* To clear e778 */ ++ if ((sizeOfVoidStar & sizeof(SecInt64)) != 0) { ++ return 1; ++ } ++ return 0; ++} ++SECUREC_INLINE int SecIsDigit(SecInt ch); ++SECUREC_INLINE int SecIsXdigit(SecInt ch); ++SECUREC_INLINE int SecIsSpace(SecInt ch); ++SECUREC_INLINE SecInt SecSkipSpaceChar(SecFileStream *stream, int *counter); ++SECUREC_INLINE SecInt SecGetChar(SecFileStream *stream, int *counter); ++SECUREC_INLINE void SecUnGetChar(SecInt ch, SecFileStream *stream, int *counter); ++ ++#if SECUREC_ENABLE_SCANF_FLOAT ++ ++/* ++ * Convert a floating point string to a floating point number ++ */ ++SECUREC_INLINE int SecAssignNarrowFloat(const char *floatStr, const SecScanSpec *spec) ++{ ++ char *endPtr = NULL; ++ double d; ++#if SECUREC_SUPPORT_STRTOLD ++ if (spec->numberWidth == SECUREC_NUM_WIDTH_LONG_LONG) { ++ long double d2 = strtold(floatStr, &endPtr); ++ if (endPtr == floatStr) { ++ return -1; ++ } ++ *(long double UNALIGNED *)(spec->argPtr) = d2; ++ return 0; ++ } ++#endif ++ d = strtod(floatStr, &endPtr); ++ /* cannot detect if endPtr points to the end of floatStr,because strtod handles only two characters for 1.E */ ++ if (endPtr == floatStr) { ++ return -1; ++ } ++ if (spec->numberWidth > SECUREC_NUM_WIDTH_INT) { ++ *(double UNALIGNED *)(spec->argPtr) = (double)d; ++ } else { ++ *(float UNALIGNED *)(spec->argPtr) = (float)d; ++ } ++ return 0; ++} ++ ++#ifdef SECUREC_FOR_WCHAR ++/* ++ * Convert a floating point wchar string to a floating point number ++ * Success ret 0 ++ */ ++SECUREC_INLINE int SecAssignWideFloat(const SecFloatSpec *floatSpec, const SecScanSpec *spec) ++{ ++ int retVal; ++ /* Convert float string */ ++ size_t mbsLen; ++ size_t tempFloatStrLen = (size_t)(floatSpec->floatStrUsedLen + 1) * sizeof(wchar_t); ++ char *tempFloatStr = (char *)SECUREC_MALLOC(tempFloatStrLen); ++ if (tempFloatStr == NULL) { ++ return -1; ++ } ++ tempFloatStr[0] = '\0'; ++ SECUREC_MASK_MSVC_CRT_WARNING ++ mbsLen = wcstombs(tempFloatStr, floatSpec->floatStr, tempFloatStrLen - 1); ++ SECUREC_END_MASK_MSVC_CRT_WARNING ++ /* This condition must satisfy mbsLen is not -1 */ ++ if (mbsLen >= tempFloatStrLen) { ++ SECUREC_FREE(tempFloatStr); ++ return -1; ++ } ++ tempFloatStr[mbsLen] = '\0'; ++ retVal = SecAssignNarrowFloat(tempFloatStr, spec); ++ SECUREC_FREE(tempFloatStr); ++ return retVal; ++} ++#endif ++ ++SECUREC_INLINE int SecAssignFloat(const SecFloatSpec *floatSpec, const SecScanSpec *spec) ++{ ++#ifdef SECUREC_FOR_WCHAR ++ return SecAssignWideFloat(floatSpec, spec); ++#else ++ return SecAssignNarrowFloat(floatSpec->floatStr, spec); ++#endif ++} ++ ++/* ++ * Init SecFloatSpec before parse format ++ */ ++SECUREC_INLINE void SecInitFloatSpec(SecFloatSpec *floatSpec) ++{ ++ floatSpec->floatStr = floatSpec->buffer; ++ floatSpec->allocatedFloatStr = NULL; ++ floatSpec->floatStrTotalLen = sizeof(floatSpec->buffer) / sizeof(floatSpec->buffer[0]); ++ floatSpec->floatStrUsedLen = 0; ++} ++ ++SECUREC_INLINE void SecFreeFloatSpec(SecFloatSpec *floatSpec, int *doneCount) ++{ ++ /* 2014.3.6 add, clear the stack data */ ++ if (memset_s(floatSpec->buffer, sizeof(floatSpec->buffer), 0, sizeof(floatSpec->buffer)) != EOK) { ++ *doneCount = 0; /* This code just to meet the coding requirements */ ++ } ++ /* The pFloatStr can be alloced in SecExtendFloatLen function, clear and free it */ ++ if (floatSpec->allocatedFloatStr != NULL) { ++ size_t bufferSize = floatSpec->floatStrTotalLen * sizeof(SecChar); ++ if (memset_s(floatSpec->allocatedFloatStr, bufferSize, 0, bufferSize) != EOK) { ++ *doneCount = 0; /* This code just to meet the coding requirements */ ++ } ++ SECUREC_FREE(floatSpec->allocatedFloatStr); ++ floatSpec->allocatedFloatStr = NULL; ++ floatSpec->floatStr = NULL; ++ } ++} ++ ++/* ++ * Splice floating point string ++ * Return 0 OK ++ */ ++SECUREC_INLINE int SecExtendFloatLen(SecFloatSpec *floatSpec) ++{ ++ if (floatSpec->floatStrUsedLen >= floatSpec->floatStrTotalLen) { ++ /* Buffer size is len x sizeof(SecChar) */ ++ size_t oriSize = floatSpec->floatStrTotalLen * sizeof(SecChar); ++ /* Add one character to clear tool warning */ ++ size_t nextSize = (oriSize * 2) + sizeof(SecChar); /* Multiply 2 to extend buffer size */ ++ ++ /* Prevents integer overflow, the maximum length of SECUREC_MAX_WIDTH_LEN is enough */ ++ if (nextSize <= (size_t)SECUREC_MAX_WIDTH_LEN) { ++ void *nextBuffer = (void *)SECUREC_MALLOC(nextSize); ++ if (nextBuffer == NULL) { ++ return -1; ++ } ++ if (memcpy_s(nextBuffer, nextSize, floatSpec->floatStr, oriSize) != EOK) { ++ SECUREC_FREE(nextBuffer); /* This is a dead code, just to meet the coding requirements */ ++ return -1; ++ } ++ /* Clear old buffer memory */ ++ if (memset_s(floatSpec->floatStr, oriSize, 0, oriSize) != EOK) { ++ SECUREC_FREE(nextBuffer); /* This is a dead code, just to meet the coding requirements */ ++ return -1; ++ } ++ /* Free old allocated buffer */ ++ if (floatSpec->allocatedFloatStr != NULL) { ++ SECUREC_FREE(floatSpec->allocatedFloatStr); ++ } ++ floatSpec->allocatedFloatStr = (SecChar *)(nextBuffer); /* Use to clear free on stack warning */ ++ floatSpec->floatStr = (SecChar *)(nextBuffer); ++ floatSpec->floatStrTotalLen = nextSize / sizeof(SecChar); /* Get buffer total len in character */ ++ return 0; ++ } ++ return -1; /* Next size is beyond max */ ++ } ++ return 0; ++} ++ ++/* Do not use localeconv()->decimal_pointif onlay support '.' */ ++SECUREC_INLINE int SecIsFloatDecimal(SecChar ch) ++{ ++ return (int)(ch == SECUREC_CHAR('.')); ++} ++ ++SECUREC_INLINE int SecInputFloatSign(SecFileStream *stream, SecScanSpec *spec, SecFloatSpec *floatSpec) ++{ ++ if (!SECUREC_FILED_WIDTH_ENOUGH(spec)) { ++ return 0; ++ } ++ spec->ch = SecGetChar(stream, &(spec->charCount)); ++ if (spec->ch == SECUREC_CHAR('+') || spec->ch == SECUREC_CHAR('-')) { ++ SECUREC_FILED_WIDTH_DEC(spec); /* Make sure the count after un get char is correct */ ++ if (spec->ch == SECUREC_CHAR('-')) { ++ floatSpec->floatStr[floatSpec->floatStrUsedLen] = SECUREC_CHAR('-'); ++ ++floatSpec->floatStrUsedLen; ++ if (SecExtendFloatLen(floatSpec) != 0) { ++ return -1; ++ } ++ } ++ } else { ++ SecUnGetChar(spec->ch, stream, &(spec->charCount)); ++ } ++ return 0; ++} ++ ++SECUREC_INLINE int SecInputFloatDigit(SecFileStream *stream, SecScanSpec *spec, SecFloatSpec *floatSpec) ++{ ++ /* Now get integral part */ ++ while (SECUREC_FILED_WIDTH_ENOUGH(spec)) { ++ spec->ch = SecGetChar(stream, &(spec->charCount)); ++ if (SecIsDigit(spec->ch) == 0) { ++ SecUnGetChar(spec->ch, stream, &(spec->charCount)); ++ return 0; ++ } ++ SECUREC_FILED_WIDTH_DEC(spec); /* Must be behind un get char, otherwise the logic is incorrect */ ++ spec->numberState = SECUREC_NUMBER_STATE_STARTED; ++ floatSpec->floatStr[floatSpec->floatStrUsedLen] = (SecChar)spec->ch; ++ ++floatSpec->floatStrUsedLen; ++ if (SecExtendFloatLen(floatSpec) != 0) { ++ return -1; ++ } ++ } ++ return 0; ++} ++ ++/* ++* Scan value of exponent. ++* Return 0 OK ++*/ ++SECUREC_INLINE int SecInputFloatE(SecFileStream *stream, SecScanSpec *spec, SecFloatSpec *floatSpec) ++{ ++ if (SecInputFloatSign(stream, spec, floatSpec) == -1) { ++ return -1; ++ } ++ if (SecInputFloatDigit(stream, spec, floatSpec) != 0) { ++ return -1; ++ } ++ return 0; ++} ++ ++SECUREC_INLINE int SecInputFloatFractional(SecFileStream *stream, SecScanSpec *spec, SecFloatSpec *floatSpec) ++{ ++ if (SECUREC_FILED_WIDTH_ENOUGH(spec)) { ++ spec->ch = SecGetChar(stream, &(spec->charCount)); ++ if (SecIsFloatDecimal((SecChar)spec->ch) == 0) { ++ SecUnGetChar(spec->ch, stream, &(spec->charCount)); ++ return 0; ++ } ++ SECUREC_FILED_WIDTH_DEC(spec); /* Must be behind un get char, otherwise the logic is incorrect */ ++ /* Now check for decimal */ ++ floatSpec->floatStr[floatSpec->floatStrUsedLen] = (SecChar)spec->ch; ++ ++floatSpec->floatStrUsedLen; ++ if (SecExtendFloatLen(floatSpec) != 0) { ++ return -1; ++ } ++ if (SecInputFloatDigit(stream, spec, floatSpec) != 0) { ++ return -1; ++ } ++ } ++ return 0; ++} ++ ++SECUREC_INLINE int SecInputFloatExponent(SecFileStream *stream, SecScanSpec *spec, SecFloatSpec *floatSpec) ++{ ++ /* Now get exponent part */ ++ if (spec->numberState == SECUREC_NUMBER_STATE_STARTED && SECUREC_FILED_WIDTH_ENOUGH(spec)) { ++ spec->ch = SecGetChar(stream, &(spec->charCount)); ++ if (spec->ch != SECUREC_CHAR('e') && spec->ch != SECUREC_CHAR('E')) { ++ SecUnGetChar(spec->ch, stream, &(spec->charCount)); ++ return 0; ++ } ++ SECUREC_FILED_WIDTH_DEC(spec); /* Must be behind un get char, otherwise the logic is incorrect */ ++ floatSpec->floatStr[floatSpec->floatStrUsedLen] = SECUREC_CHAR('e'); ++ ++floatSpec->floatStrUsedLen; ++ if (SecExtendFloatLen(floatSpec) != 0) { ++ return -1; ++ } ++ if (SecInputFloatE(stream, spec, floatSpec) != 0) { ++ return -1; ++ } ++ } ++ return 0; ++} ++ ++/* ++* Scan %f. ++* Return 0 OK ++*/ ++SECUREC_INLINE int SecInputFloat(SecFileStream *stream, SecScanSpec *spec, SecFloatSpec *floatSpec) ++{ ++ floatSpec->floatStrUsedLen = 0; ++ ++ /* The following code sequence is strict */ ++ if (SecInputFloatSign(stream, spec, floatSpec) != 0) { ++ return -1; ++ } ++ if (SecInputFloatDigit(stream, spec, floatSpec) != 0) { ++ return -1; ++ } ++ if (SecInputFloatFractional(stream, spec, floatSpec) != 0) { ++ return -1; ++ } ++ if (SecInputFloatExponent(stream, spec, floatSpec) != 0) { ++ return -1; ++ } ++ ++ /* Make sure have a string terminator, buffer is large enough */ ++ floatSpec->floatStr[floatSpec->floatStrUsedLen] = SECUREC_CHAR('\0'); ++ if (spec->numberState == SECUREC_NUMBER_STATE_STARTED) { ++ return 0; ++ } ++ return -1; ++} ++#endif ++ ++#if (!defined(SECUREC_FOR_WCHAR) && SECUREC_HAVE_WCHART && SECUREC_HAVE_MBTOWC) || \ ++ (!defined(SECUREC_FOR_WCHAR) && defined(SECUREC_COMPATIBLE_VERSION)) ++/* only multi-bytes string need isleadbyte() function */ ++SECUREC_INLINE int SecIsLeadByte(SecInt ch) ++{ ++ unsigned int c = (unsigned int)ch; ++#if !(defined(_MSC_VER) || defined(_INC_WCTYPE)) ++ return (int)(c & 0x80U); /* Use bitwise operation to check if the most significant bit is 1 */ ++#else ++ return (int)isleadbyte((int)(c & 0xffU)); /* Use bitwise operations to limit character values to valid ranges */ ++#endif ++} ++#endif ++ ++/* ++ * Parsing whether it is a wide character ++ */ ++SECUREC_INLINE void SecUpdateWcharFlagByType(SecUnsignedChar ch, SecScanSpec *spec) ++{ ++ if (spec->isWCharOrLong != 0) { ++ /* Wide character identifiers have been explicitly set by l or h flag */ ++ return; ++ } ++ ++ /* Set default flag */ ++#if defined(SECUREC_FOR_WCHAR) && defined(SECUREC_COMPATIBLE_WIN_FORMAT) ++ spec->isWCharOrLong = 1; /* On windows wide char version %c %s %[ is wide char */ ++#else ++ spec->isWCharOrLong = -1; /* On linux all version %c %s %[ is multi char */ ++#endif ++ ++ if (ch == SECUREC_CHAR('C') || ch == SECUREC_CHAR('S')) { ++#if defined(SECUREC_FOR_WCHAR) && defined(SECUREC_COMPATIBLE_WIN_FORMAT) ++ spec->isWCharOrLong = -1; /* On windows wide char version %C %S is multi char */ ++#else ++ spec->isWCharOrLong = 1; /* On linux all version %C %S is wide char */ ++#endif ++ } ++ ++ return; ++} ++/* ++ * Decode %l %ll ++ */ ++SECUREC_INLINE void SecDecodeScanQualifierL(const SecUnsignedChar **format, SecScanSpec *spec) ++{ ++ const SecUnsignedChar *fmt = *format; ++ if (*(fmt + 1) == SECUREC_CHAR('l')) { ++ spec->numberArgType = 1; ++ spec->numberWidth = SECUREC_NUM_WIDTH_LONG_LONG; ++ ++fmt; ++ } else { ++ spec->numberWidth = SECUREC_NUM_WIDTH_LONG; ++#if defined(SECUREC_ON_64BITS) && !(defined(SECUREC_COMPATIBLE_WIN_FORMAT)) ++ /* On window 64 system sizeof long is 32bit */ ++ spec->numberArgType = 1; ++#endif ++ spec->isWCharOrLong = 1; ++ } ++ *format = fmt; ++} ++ ++/* ++ * Decode %I %I43 %I64 %Id %Ii %Io ... ++ * Set finishFlag to 1 finish Flag ++ */ ++SECUREC_INLINE void SecDecodeScanQualifierI(const SecUnsignedChar **format, SecScanSpec *spec, int *finishFlag) ++{ ++ const SecUnsignedChar *fmt = *format; ++ if ((*(fmt + 1) == SECUREC_CHAR('6')) && ++ (*(fmt + 2) == SECUREC_CHAR('4'))) { /* Offset 2 for I64 */ ++ spec->numberArgType = 1; ++ *format = *format + 2; /* Add 2 to skip I64 point to '4' next loop will inc */ ++ } else if ((*(fmt + 1) == SECUREC_CHAR('3')) && ++ (*(fmt + 2) == SECUREC_CHAR('2'))) { /* Offset 2 for I32 */ ++ *format = *format + 2; /* Add 2 to skip I32 point to '2' next loop will inc */ ++ } else if ((*(fmt + 1) == SECUREC_CHAR('d')) || ++ (*(fmt + 1) == SECUREC_CHAR('i')) || ++ (*(fmt + 1) == SECUREC_CHAR('o')) || ++ (*(fmt + 1) == SECUREC_CHAR('x')) || ++ (*(fmt + 1) == SECUREC_CHAR('X'))) { ++ spec->numberArgType = SecNumberArgType(sizeof(void *)); ++ } else { ++ /* For %I */ ++ spec->numberArgType = SecNumberArgType(sizeof(void *)); ++ *finishFlag = 1; ++ } ++} ++ ++SECUREC_INLINE int SecDecodeScanWidth(const SecUnsignedChar **format, SecScanSpec *spec) ++{ ++ const SecUnsignedChar *fmt = *format; ++ while (SecIsDigit((SecInt)(int)(*fmt)) != 0) { ++ spec->widthSet = 1; ++ if (SECUREC_MUL_TEN_ADD_BEYOND_MAX(spec->width)) { ++ return -1; ++ } ++ spec->width = (int)SECUREC_MUL_TEN((unsigned int)spec->width) + (unsigned char)(*fmt - SECUREC_CHAR('0')); ++ ++fmt; ++ } ++ *format = fmt; ++ return 0; ++} ++ ++/* ++ * Init default flags for each format. do not init ch this variable is context-dependent ++ */ ++SECUREC_INLINE void SecSetDefaultScanSpec(SecScanSpec *spec) ++{ ++ /* The ch and charCount member variables cannot be initialized here */ ++ spec->argPtr = NULL; ++ spec->arrayWidth = 0; ++ spec->number64 = 0; ++ spec->number = 0; ++ spec->numberWidth = SECUREC_NUM_WIDTH_INT; /* 0 = SHORT, 1 = int, > 1 long or L_DOUBLE */ ++ spec->numberArgType = 0; /* 1 for 64-bit integer, 0 otherwise */ ++ spec->width = 0; ++ spec->widthSet = 0; ++ spec->convChr = 0; ++ spec->oriConvChr = 0; ++ spec->isWCharOrLong = 0; ++ spec->suppress = 0; ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++ spec->beyondMax = 0; ++#endif ++ spec->negative = 0; ++ spec->numberState = SECUREC_NUMBER_STATE_DEFAULT; ++} ++ ++/* ++ * Decode qualifier %I %L %h ... ++ * Set finishFlag to 1 finish Flag ++ */ ++SECUREC_INLINE void SecDecodeScanQualifier(const SecUnsignedChar **format, SecScanSpec *spec, int *finishFlag) ++{ ++ switch (**format) { ++ case SECUREC_CHAR('F'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('N'): ++ break; ++ case SECUREC_CHAR('h'): ++ --spec->numberWidth; /* The h for SHORT , hh for CHAR */ ++ spec->isWCharOrLong = -1; ++ break; ++#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT ++ case SECUREC_CHAR('j'): ++ spec->numberWidth = SECUREC_NUM_WIDTH_LONG_LONG; /* For intmax_t or uintmax_t */ ++ spec->numberArgType = 1; ++ break; ++ case SECUREC_CHAR('t'): /* fall-through */ /* FALLTHRU */ ++#endif ++#if SECUREC_IN_KERNEL ++ case SECUREC_CHAR('Z'): /* fall-through */ /* FALLTHRU */ ++#endif ++ case SECUREC_CHAR('z'): ++#ifdef SECUREC_ON_64BITS ++ spec->numberWidth = SECUREC_NUM_WIDTH_LONG_LONG; ++ spec->numberArgType = 1; ++#else ++ spec->numberWidth = SECUREC_NUM_WIDTH_LONG; ++#endif ++ break; ++ case SECUREC_CHAR('L'): /* For long double */ /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('q'): ++ spec->numberWidth = SECUREC_NUM_WIDTH_LONG_LONG; ++ spec->numberArgType = 1; ++ break; ++ case SECUREC_CHAR('l'): ++ SecDecodeScanQualifierL(format, spec); ++ break; ++ case SECUREC_CHAR('w'): ++ spec->isWCharOrLong = 1; ++ break; ++ case SECUREC_CHAR('*'): ++ spec->suppress = 1; ++ break; ++ case SECUREC_CHAR('I'): ++ SecDecodeScanQualifierI(format, spec, finishFlag); ++ break; ++ default: ++ *finishFlag = 1; ++ break; ++ } ++} ++/* ++ * Decode width and qualifier in format ++ */ ++SECUREC_INLINE int SecDecodeScanFlag(const SecUnsignedChar **format, SecScanSpec *spec) ++{ ++ const SecUnsignedChar *fmt = *format; ++ int finishFlag = 0; ++ ++ do { ++ ++fmt; /* First skip % , next seek fmt */ ++ /* May %*6d , so put it inside the loop */ ++ if (SecDecodeScanWidth(&fmt, spec) != 0) { ++ return -1; ++ } ++ SecDecodeScanQualifier(&fmt, spec, &finishFlag); ++ } while (finishFlag == 0); ++ *format = fmt; ++ return 0; ++} ++ ++/* ++ * Judging whether a zeroing buffer is needed according to different formats ++ */ ++SECUREC_INLINE int SecDecodeClearFormat(const SecUnsignedChar *format, int *convChr) ++{ ++ const SecUnsignedChar *fmt = format; ++ /* To lowercase */ ++ int ch = SECUREC_TO_LOWERCASE(*fmt); ++ if (!(ch == 'c' || ch == 's' || ch == SECUREC_BRACE)) { ++ return -1; /* First argument is not a string type */ ++ } ++ if (ch == SECUREC_BRACE) { ++#if !(defined(SECUREC_COMPATIBLE_WIN_FORMAT)) ++ if (*fmt == SECUREC_CHAR('{')) { ++ return -1; ++ } ++#endif ++ ++fmt; ++ if (*fmt == SECUREC_CHAR('^')) { ++ ++fmt; ++ } ++ if (*fmt == SECUREC_CHAR(']')) { ++ ++fmt; ++ } ++ while (*fmt != SECUREC_CHAR('\0') && *fmt != SECUREC_CHAR(']')) { ++ ++fmt; ++ } ++ if (*fmt == SECUREC_CHAR('\0')) { ++ return -1; /* Trunc'd format string */ ++ } ++ } ++ *convChr = ch; ++ return 0; ++} ++ ++/* ++ * Add L'\0' for wchar string , add '\0' for char string ++ */ ++SECUREC_INLINE void SecAddEndingZero(void *ptr, const SecScanSpec *spec) ++{ ++ if (spec->suppress == 0) { ++ *(char *)ptr = '\0'; ++#if SECUREC_HAVE_WCHART ++ if (spec->isWCharOrLong > 0) { ++ *(wchar_t UNALIGNED *)ptr = L'\0'; ++ } ++#endif ++ } ++} ++ ++SECUREC_INLINE void SecDecodeClearArg(SecScanSpec *spec, va_list argList) ++{ ++ va_list argListSave; /* Backup for argList value, this variable don't need initialized */ ++ (void)SECUREC_MEMSET_FUNC_OPT(&argListSave, 0, sizeof(va_list)); /* To clear e530 argListSave not initialized */ ++#if defined(va_copy) ++ va_copy(argListSave, argList); ++#elif defined(__va_copy) /* For vxworks */ ++ __va_copy(argListSave, argList); ++#else ++ argListSave = argList; ++#endif ++ spec->argPtr = (void *)va_arg(argListSave, void *); ++ /* Get the next argument, size of the array in characters */ ++ /* Use 0xffffffffUL mask to Support pass integer as array length */ ++ spec->arrayWidth = ((size_t)(va_arg(argListSave, size_t))) & 0xffffffffUL; ++ va_end(argListSave); ++ /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++ (void)argListSave; ++} ++ ++#ifdef SECUREC_FOR_WCHAR ++/* ++ * Clean up the first %s %c buffer to zero for wchar version ++ */ ++void SecClearDestBufW(const wchar_t *buffer, const wchar_t *format, va_list argList) ++#else ++/* ++ * Clean up the first %s %c buffer to zero for char version ++ */ ++void SecClearDestBuf(const char *buffer, const char *format, va_list argList) ++#endif ++{ ++ SecScanSpec spec; ++ int convChr = 0; ++ const SecUnsignedChar *fmt = (const SecUnsignedChar *)format; ++ ++ /* Find first % */ ++ while (*fmt != SECUREC_CHAR('\0') && *fmt != SECUREC_CHAR('%')) { ++ ++fmt; ++ } ++ if (*fmt == SECUREC_CHAR('\0')) { ++ return; ++ } ++ ++ SecSetDefaultScanSpec(&spec); ++ if (SecDecodeScanFlag(&fmt, &spec) != 0) { ++ return; ++ } ++ ++ /* Update wchar flag for %S %C */ ++ SecUpdateWcharFlagByType(*fmt, &spec); ++ if (spec.suppress != 0) { ++ return; ++ } ++ ++ if (SecDecodeClearFormat(fmt, &convChr) != 0) { ++ return; ++ } ++ ++ if (*buffer != SECUREC_CHAR('\0') && convChr != 's') { ++ /* ++ * When buffer not empty just clear %s. ++ * Example call sscanf by argment of (" \n", "%s", s, sizeof(s)) ++ */ ++ return; ++ } ++ ++ SecDecodeClearArg(&spec, argList); ++ /* There is no need to judge the upper limit */ ++ if (spec.arrayWidth == 0 || spec.argPtr == NULL) { ++ return; ++ } ++ /* Clear one char */ ++ SecAddEndingZero(spec.argPtr, &spec); ++ return; ++} ++ ++/* ++ * Assign number to output buffer ++ */ ++SECUREC_INLINE void SecAssignNumber(const SecScanSpec *spec) ++{ ++ void *argPtr = spec->argPtr; ++ if (spec->numberArgType != 0) { ++#if defined(SECUREC_VXWORKS_PLATFORM) ++#if defined(SECUREC_VXWORKS_PLATFORM_COMP) ++ *(SecInt64 UNALIGNED *)argPtr = (SecInt64)(spec->number64); ++#else ++ /* Take number64 as unsigned number unsigned to int clear Compile warning */ ++ *(SecInt64 UNALIGNED *)argPtr = *(SecUnsignedInt64 *)(&(spec->number64)); ++#endif ++#else ++ /* Take number64 as unsigned number */ ++ *(SecInt64 UNALIGNED *)argPtr = (SecInt64)(spec->number64); ++#endif ++ return; ++ } ++ if (spec->numberWidth > SECUREC_NUM_WIDTH_INT) { ++ /* Take number as unsigned number */ ++ *(long UNALIGNED *)argPtr = (long)(spec->number); ++ } else if (spec->numberWidth == SECUREC_NUM_WIDTH_INT) { ++ *(int UNALIGNED *)argPtr = (int)(spec->number); ++ } else if (spec->numberWidth == SECUREC_NUM_WIDTH_SHORT) { ++ /* Take number as unsigned number */ ++ *(short UNALIGNED *)argPtr = (short)(spec->number); ++ } else { /* < 0 for hh format modifier */ ++ /* Take number as unsigned number */ ++ *(char UNALIGNED *)argPtr = (char)(spec->number); ++ } ++} ++ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++/* ++ * Judge the long bit width ++ */ ++SECUREC_INLINE int SecIsLongBitEqual(int bitNum) ++{ ++ return (int)((unsigned int)bitNum == SECUREC_LONG_BIT_NUM); ++} ++#endif ++ ++/* ++ * Convert hexadecimal characters to decimal value ++ */ ++SECUREC_INLINE int SecHexValueOfChar(SecInt ch) ++{ ++ /* Use isdigt Causing tool false alarms */ ++ return (int)((ch >= '0' && ch <= '9') ? ((unsigned char)ch - '0') : ++ ((((unsigned char)ch | (unsigned char)('a' - 'A')) - ('a')) + 10)); /* Adding 10 is to hex value */ ++} ++ ++/* ++ * Parse decimal character to integer for 32bit . ++ */ ++static void SecDecodeNumberDecimal(SecScanSpec *spec) ++{ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++ unsigned long decimalEdge = SECUREC_MAX_32BITS_VALUE_DIV_TEN; ++#ifdef SECUREC_ON_64BITS ++ if (SecIsLongBitEqual(SECUREC_LP64_BIT_WIDTH) != 0) { ++ decimalEdge = (unsigned long)SECUREC_MAX_64BITS_VALUE_DIV_TEN; ++ } ++#endif ++ if (spec->number > decimalEdge) { ++ spec->beyondMax = 1; ++ } ++#endif ++ spec->number = SECUREC_MUL_TEN(spec->number); ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++ if (spec->number == SECUREC_MUL_TEN(decimalEdge)) { ++ /* This code is specially converted to unsigned long type for compatibility */ ++ SecUnsignedInt64 number64As = (unsigned long)SECUREC_MAX_64BITS_VALUE - spec->number; ++ if (number64As < (SecUnsignedInt64)(SecUnsignedInt)spec->ch - (SecUnsignedInt)SECUREC_CHAR('0')) { ++ spec->beyondMax = 1; ++ } ++ } ++#endif ++ spec->number += ((unsigned long)(SecUnsignedInt)spec->ch - (SecUnsignedInt)SECUREC_CHAR('0')); ++} ++ ++/* ++ * Parse Hex character to integer for 32bit . ++ */ ++static void SecDecodeNumberHex(SecScanSpec *spec) ++{ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++ if (SECUREC_LONG_HEX_BEYOND_MAX(spec->number)) { ++ spec->beyondMax = 1; ++ } ++#endif ++ spec->number = SECUREC_MUL_SIXTEEN(spec->number); ++ spec->number += (unsigned long)(unsigned int)SecHexValueOfChar(spec->ch); ++} ++ ++/* ++ * Parse Octal character to integer for 32bit . ++ */ ++static void SecDecodeNumberOctal(SecScanSpec *spec) ++{ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++ if (SECUREC_LONG_OCTAL_BEYOND_MAX(spec->number)) { ++ spec->beyondMax = 1; ++ } ++#endif ++ spec->number = SECUREC_MUL_EIGHT(spec->number); ++ spec->number += ((unsigned long)(SecUnsignedInt)spec->ch - (SecUnsignedInt)SECUREC_CHAR('0')); ++} ++ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++/* Compatible with integer negative values other than int */ ++SECUREC_INLINE void SecFinishNumberNegativeOther(SecScanSpec *spec) ++{ ++ if (SECUREC_CONVERT_IS_SIGNED(spec->oriConvChr)) { ++ if (spec->number > SECUREC_MIN_LONG_NEG_VALUE) { ++ spec->number = SECUREC_MIN_LONG_NEG_VALUE; ++ } else { ++ spec->number = (unsigned long)(0U - spec->number); /* Wrap with unsigned long numbers */ ++ } ++ if (spec->beyondMax != 0) { ++ if (spec->numberWidth < SECUREC_NUM_WIDTH_INT) { ++ spec->number = 0; ++ } ++ if (spec->numberWidth == SECUREC_NUM_WIDTH_LONG) { ++ spec->number = SECUREC_MIN_LONG_NEG_VALUE; ++ } ++ } ++ } else { /* For o, u, x, X, p */ ++ spec->number = (unsigned long)(0U - spec->number); /* Wrap with unsigned long numbers */ ++ if (spec->beyondMax != 0) { ++ spec->number = (unsigned long)SECUREC_MAX_64BITS_VALUE; ++ } ++ } ++} ++/* Compatible processing of integer negative numbers */ ++SECUREC_INLINE void SecFinishNumberNegativeInt(SecScanSpec *spec) ++{ ++ if (SECUREC_CONVERT_IS_SIGNED(spec->oriConvChr)) { ++#ifdef SECUREC_ON_64BITS ++ if (SecIsLongBitEqual(SECUREC_LP64_BIT_WIDTH) != 0) { ++ if ((spec->number > SECUREC_MIN_64BITS_NEG_VALUE)) { ++ spec->number = 0; ++ } else { ++ spec->number = (unsigned int)(0U - (unsigned int)spec->number); /* Wrap with unsigned int numbers */ ++ } ++ } ++#else ++ if (SecIsLongBitEqual(SECUREC_LP32_BIT_WIDTH) != 0) { ++ if ((spec->number > SECUREC_MIN_32BITS_NEG_VALUE)) { ++ spec->number = SECUREC_MIN_32BITS_NEG_VALUE; ++ } else { ++ spec->number = (unsigned int)(0U - (unsigned int)spec->number); /* Wrap with unsigned int numbers */ ++ } ++ } ++#endif ++ if (spec->beyondMax != 0) { ++#ifdef SECUREC_ON_64BITS ++ if (SecIsLongBitEqual(SECUREC_LP64_BIT_WIDTH) != 0) { ++ spec->number = 0; ++ } ++#else ++ if (SecIsLongBitEqual(SECUREC_LP32_BIT_WIDTH) != 0) { ++ spec->number = SECUREC_MIN_32BITS_NEG_VALUE; ++ } ++#endif ++ } ++ } else { /* For o, u, x, X ,p */ ++#ifdef SECUREC_ON_64BITS ++ if (spec->number > SECUREC_MAX_32BITS_VALUE_INC) { ++ spec->number = SECUREC_MAX_32BITS_VALUE; ++ } else { ++ spec->number = (unsigned int)(0U - (unsigned int)spec->number); /* Wrap with unsigned int numbers */ ++ } ++#else ++ spec->number = (unsigned int)(0U - (unsigned int)spec->number); /* Wrap with unsigned int numbers */ ++#endif ++ if (spec->beyondMax != 0) { ++ spec->number = (unsigned long)SECUREC_MAX_64BITS_VALUE; ++ } ++ } ++} ++ ++/* Compatible with integer positive values other than int */ ++SECUREC_INLINE void SecFinishNumberPositiveOther(SecScanSpec *spec) ++{ ++ if (SECUREC_CONVERT_IS_SIGNED(spec->oriConvChr)) { ++ if (spec->number > SECUREC_MAX_LONG_POS_VALUE) { ++ spec->number = SECUREC_MAX_LONG_POS_VALUE; ++ } ++ if ((spec->beyondMax != 0 && spec->numberWidth < SECUREC_NUM_WIDTH_INT)) { ++ spec->number = (unsigned long)SECUREC_MAX_64BITS_VALUE; ++ } ++ if (spec->beyondMax != 0 && spec->numberWidth == SECUREC_NUM_WIDTH_LONG) { ++ spec->number = SECUREC_MAX_LONG_POS_VALUE; ++ } ++ } else { ++ if (spec->beyondMax != 0) { ++ spec->number = (unsigned long)SECUREC_MAX_64BITS_VALUE; ++ } ++ } ++} ++ ++/* Compatible processing of integer positive numbers */ ++SECUREC_INLINE void SecFinishNumberPositiveInt(SecScanSpec *spec) ++{ ++ if (SECUREC_CONVERT_IS_SIGNED(spec->oriConvChr)) { ++#ifdef SECUREC_ON_64BITS ++ if (SecIsLongBitEqual(SECUREC_LP64_BIT_WIDTH) != 0) { ++ if (spec->number > SECUREC_MAX_64BITS_POS_VALUE) { ++ spec->number = (unsigned long)SECUREC_MAX_64BITS_VALUE; ++ } ++ } ++ if (spec->beyondMax != 0 && SecIsLongBitEqual(SECUREC_LP64_BIT_WIDTH) != 0) { ++ spec->number = (unsigned long)SECUREC_MAX_64BITS_VALUE; ++ } ++#else ++ if (SecIsLongBitEqual(SECUREC_LP32_BIT_WIDTH) != 0) { ++ if (spec->number > SECUREC_MAX_32BITS_POS_VALUE) { ++ spec->number = SECUREC_MAX_32BITS_POS_VALUE; ++ } ++ } ++ if (spec->beyondMax != 0 && SecIsLongBitEqual(SECUREC_LP32_BIT_WIDTH) != 0) { ++ spec->number = SECUREC_MAX_32BITS_POS_VALUE; ++ } ++#endif ++ } else { /* For o,u,x,X,p */ ++ if (spec->beyondMax != 0) { ++ spec->number = SECUREC_MAX_32BITS_VALUE; ++ } ++ } ++} ++ ++#endif ++ ++/* ++ * Parse decimal character to integer for 64bit . ++ */ ++static void SecDecodeNumber64Decimal(SecScanSpec *spec) ++{ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++ if (spec->number64 > SECUREC_MAX_64BITS_VALUE_DIV_TEN) { ++ spec->beyondMax = 1; ++ } ++#endif ++ spec->number64 = SECUREC_MUL_TEN(spec->number64); ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++ if (spec->number64 == SECUREC_MAX_64BITS_VALUE_CUT_LAST_DIGIT) { ++ SecUnsignedInt64 number64As = (SecUnsignedInt64)SECUREC_MAX_64BITS_VALUE - spec->number64; ++ if (number64As < (SecUnsignedInt64)(SecUnsignedInt)spec->ch - (SecUnsignedInt)SECUREC_CHAR('0')) { ++ spec->beyondMax = 1; ++ } ++ } ++#endif ++ spec->number64 += ((SecUnsignedInt64)(SecUnsignedInt)spec->ch - (SecUnsignedInt)SECUREC_CHAR('0')); ++} ++ ++/* ++ * Parse Hex character to integer for 64bit . ++ */ ++static void SecDecodeNumber64Hex(SecScanSpec *spec) ++{ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++ if (SECUREC_QWORD_HEX_BEYOND_MAX(spec->number64)) { ++ spec->beyondMax = 1; ++ } ++#endif ++ spec->number64 = SECUREC_MUL_SIXTEEN(spec->number64); ++ spec->number64 += (SecUnsignedInt64)(unsigned int)SecHexValueOfChar(spec->ch); ++} ++ ++/* ++ * Parse Octal character to integer for 64bit . ++ */ ++static void SecDecodeNumber64Octal(SecScanSpec *spec) ++{ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++ if (SECUREC_QWORD_OCTAL_BEYOND_MAX(spec->number64)) { ++ spec->beyondMax = 1; ++ } ++#endif ++ spec->number64 = SECUREC_MUL_EIGHT(spec->number64); ++ spec->number64 += ((SecUnsignedInt64)(SecUnsignedInt)spec->ch - (SecUnsignedInt)SECUREC_CHAR('0')); ++} ++ ++#define SECUREC_DECODE_NUMBER_FUNC_NUM 2 ++ ++/* ++ * Parse 64-bit integer formatted input, return 0 when ch is a number. ++ */ ++SECUREC_INLINE int SecDecodeNumber(SecScanSpec *spec) ++{ ++ /* Function name cannot add address symbol, causing 546 alarm */ ++ static void (* const secDecodeNumberHex[SECUREC_DECODE_NUMBER_FUNC_NUM])(SecScanSpec *spec) = { ++ SecDecodeNumberHex, SecDecodeNumber64Hex ++ }; ++ static void (* const secDecodeNumberOctal[SECUREC_DECODE_NUMBER_FUNC_NUM])(SecScanSpec *spec) = { ++ SecDecodeNumberOctal, SecDecodeNumber64Octal ++ }; ++ static void (* const secDecodeNumberDecimal[SECUREC_DECODE_NUMBER_FUNC_NUM])(SecScanSpec *spec) = { ++ SecDecodeNumberDecimal, SecDecodeNumber64Decimal ++ }; ++ if (spec->convChr == 'x' || spec->convChr == 'p') { ++ if (SecIsXdigit(spec->ch) != 0) { ++ (*secDecodeNumberHex[spec->numberArgType])(spec); ++ } else { ++ return -1; ++ } ++ return 0; ++ } ++ if (SecIsDigit(spec->ch) == 0) { ++ return -1; ++ } ++ if (spec->convChr == 'o') { ++ if (spec->ch < SECUREC_CHAR('8')) { /* Octal maximum limit '8' */ ++ (*secDecodeNumberOctal[spec->numberArgType])(spec); ++ } else { ++ return -1; ++ } ++ } else { /* The convChr is 'd' */ ++ (*secDecodeNumberDecimal[spec->numberArgType])(spec); ++ } ++ return 0; ++} ++ ++/* ++ * Complete the final 32-bit integer formatted input ++ */ ++static void SecFinishNumber(SecScanSpec *spec) ++{ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++ if (spec->negative != 0) { ++ if (spec->numberWidth == SECUREC_NUM_WIDTH_INT) { ++ SecFinishNumberNegativeInt(spec); ++ } else { ++ SecFinishNumberNegativeOther(spec); ++ } ++ } else { ++ if (spec->numberWidth == SECUREC_NUM_WIDTH_INT) { ++ SecFinishNumberPositiveInt(spec); ++ } else { ++ SecFinishNumberPositiveOther(spec); ++ } ++ } ++#else ++ if (spec->negative != 0) { ++#if defined(__hpux) ++ if (spec->oriConvChr != 'p') { ++ spec->number = (unsigned long)(0U - spec->number); /* Wrap with unsigned long numbers */ ++ } ++#else ++ spec->number = (unsigned long)(0U - spec->number); /* Wrap with unsigned long numbers */ ++#endif ++ } ++#endif ++ return; ++} ++ ++/* ++ * Complete the final 64-bit integer formatted input ++ */ ++static void SecFinishNumber64(SecScanSpec *spec) ++{ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && !(defined(SECUREC_ON_UNIX))) ++ if (spec->negative != 0) { ++ if (SECUREC_CONVERT_IS_SIGNED(spec->oriConvChr)) { ++ if (spec->number64 > SECUREC_MIN_64BITS_NEG_VALUE) { ++ spec->number64 = SECUREC_MIN_64BITS_NEG_VALUE; ++ } else { ++ spec->number64 = (SecUnsignedInt64)(0U - spec->number64); /* Wrap with unsigned int64 numbers */ ++ } ++ if (spec->beyondMax != 0) { ++ spec->number64 = SECUREC_MIN_64BITS_NEG_VALUE; ++ } ++ } else { /* For o, u, x, X, p */ ++ spec->number64 = (SecUnsignedInt64)(0U - spec->number64); /* Wrap with unsigned int64 numbers */ ++ if (spec->beyondMax != 0) { ++ spec->number64 = SECUREC_MAX_64BITS_VALUE; ++ } ++ } ++ } else { ++ if (SECUREC_CONVERT_IS_SIGNED(spec->oriConvChr)) { ++ if (spec->number64 > SECUREC_MAX_64BITS_POS_VALUE) { ++ spec->number64 = SECUREC_MAX_64BITS_POS_VALUE; ++ } ++ if (spec->beyondMax != 0) { ++ spec->number64 = SECUREC_MAX_64BITS_POS_VALUE; ++ } ++ } else { ++ if (spec->beyondMax != 0) { ++ spec->number64 = SECUREC_MAX_64BITS_VALUE; ++ } ++ } ++ } ++#else ++ if (spec->negative != 0) { ++#if defined(__hpux) ++ if (spec->oriConvChr != 'p') { ++ spec->number64 = (SecUnsignedInt64)(0U - spec->number64); /* Wrap with unsigned int64 numbers */ ++ } ++#else ++ spec->number64 = (SecUnsignedInt64)(0U - spec->number64); /* Wrap with unsigned int64 numbers */ ++#endif ++ } ++#endif ++ return; ++} ++ ++#if SECUREC_ENABLE_SCANF_FILE ++ ++/* ++ * Adjust the pointer position of the file stream ++ */ ++SECUREC_INLINE void SecSeekStream(SecFileStream *stream) ++{ ++ if (stream->count == 0) { ++ if (feof(stream->pf) != 0) { ++ /* File pointer at the end of file, don't need to seek back */ ++ stream->base[0] = '\0'; ++ return; ++ } ++ } ++ /* Seek to original position, for file read, but nothing to input */ ++ if (fseek(stream->pf, stream->oriFilePos, SEEK_SET) != 0) { ++ /* Seek failed, ignore it */ ++ stream->oriFilePos = 0; ++ return; ++ } ++ ++ if (stream->fileRealRead > 0) { /* Do not seek without input data */ ++#if defined(SECUREC_COMPATIBLE_WIN_FORMAT) ++ size_t residue = stream->fileRealRead % SECUREC_BUFFERED_BLOK_SIZE; ++ size_t loops; ++ for (loops = 0; loops < (stream->fileRealRead / SECUREC_BUFFERED_BLOK_SIZE); ++loops) { ++ if (fread(stream->base, (size_t)SECUREC_BUFFERED_BLOK_SIZE, (size_t)1, stream->pf) != (size_t)1) { ++ break; ++ } ++ } ++ if (residue != 0) { ++ long curFilePos; ++ if (fread(stream->base, residue, (size_t)1, stream->pf) != (size_t)1) { ++ return; ++ } ++ curFilePos = ftell(stream->pf); ++ if (curFilePos < stream->oriFilePos || ++ (size_t)(unsigned long)(curFilePos - stream->oriFilePos) < stream->fileRealRead) { ++ /* Try to remedy the problem */ ++ long adjustNum = (long)(stream->fileRealRead - (size_t)(unsigned long)(curFilePos - stream->oriFilePos)); ++ (void)fseek(stream->pf, adjustNum, SEEK_CUR); ++ } ++ } ++#else ++ /* Seek from oriFilePos. Regardless of the integer sign problem, call scanf will not read very large data */ ++ if (fseek(stream->pf, (long)stream->fileRealRead, SEEK_CUR) != 0) { ++ /* Seek failed, ignore it */ ++ stream->oriFilePos = 0; ++ return; ++ } ++#endif ++ } ++ return; ++} ++ ++/* ++ * Adjust the pointer position of the file stream and free memory ++ */ ++SECUREC_INLINE void SecAdjustStream(SecFileStream *stream) ++{ ++ if ((stream->flag & SECUREC_FILE_STREAM_FLAG) != 0 && stream->base != NULL) { ++ SecSeekStream(stream); ++ SECUREC_FREE(stream->base); ++ stream->base = NULL; ++ } ++ return; ++} ++#endif ++ ++SECUREC_INLINE void SecSkipSpaceFormat(const SecUnsignedChar **format) ++{ ++ const SecUnsignedChar *fmt = *format; ++ while (SecIsSpace((SecInt)(int)(*fmt)) != 0) { ++ ++fmt; ++ } ++ *format = fmt; ++} ++ ++#if !defined(SECUREC_FOR_WCHAR) && defined(SECUREC_COMPATIBLE_VERSION) ++/* ++ * Handling multi-character characters ++ */ ++SECUREC_INLINE int SecDecodeLeadByte(SecScanSpec *spec, const SecUnsignedChar **format, SecFileStream *stream) ++{ ++#if SECUREC_HAVE_MBTOWC ++ const SecUnsignedChar *fmt = *format; ++ int ch1 = (int)spec->ch; ++ int ch2 = SecGetChar(stream, &(spec->charCount)); ++ spec->ch = (SecInt)ch2; ++ if (*fmt == SECUREC_CHAR('\0') || (int)(*fmt) != ch2) { ++ /* in console mode, ungetc twice may cause problem */ ++ SecUnGetChar(ch2, stream, &(spec->charCount)); ++ SecUnGetChar(ch1, stream, &(spec->charCount)); ++ return -1; ++ } ++ ++fmt; ++ if ((unsigned int)MB_CUR_MAX >= SECUREC_UTF8_BOM_HEADER_SIZE && ++ (((unsigned char)ch1 & SECUREC_UTF8_LEAD_1ST) == SECUREC_UTF8_LEAD_1ST) && ++ (((unsigned char)ch2 & SECUREC_UTF8_LEAD_2ND) == SECUREC_UTF8_LEAD_2ND)) { ++ /* This char is very likely to be a UTF-8 char */ ++ wchar_t tempWChar; ++ char temp[SECUREC_MULTI_BYTE_MAX_LEN]; ++ int ch3 = (int)SecGetChar(stream, &(spec->charCount)); ++ spec->ch = (SecInt)ch3; ++ if (*fmt == SECUREC_CHAR('\0') || (int)(*fmt) != ch3) { ++ SecUnGetChar(ch3, stream, &(spec->charCount)); ++ return -1; ++ } ++ temp[0] = (char)ch1; ++ temp[1] = (char)ch2; /* 1 index of second character */ ++ temp[2] = (char)ch3; /* 2 index of third character */ ++ temp[3] = '\0'; /* 3 of string terminator position */ ++ if (mbtowc(&tempWChar, temp, sizeof(temp)) > 0) { ++ /* Succeed */ ++ ++fmt; ++ --spec->charCount; ++ } else { ++ SecUnGetChar(ch3, stream, &(spec->charCount)); ++ } ++ } ++ --spec->charCount; /* Only count as one character read */ ++ *format = fmt; ++ return 0; ++#else ++ SecUnGetChar(spec->ch, stream, &(spec->charCount)); ++ (void)format; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++ return -1; ++#endif ++} ++ ++SECUREC_INLINE int SecFilterWcharInFormat(SecScanSpec *spec, const SecUnsignedChar **format, SecFileStream *stream) ++{ ++ if (SecIsLeadByte(spec->ch) != 0) { ++ if (SecDecodeLeadByte(spec, format, stream) != 0) { ++ return -1; ++ } ++ } ++ return 0; ++} ++#endif ++ ++/* ++ * Resolving sequence of characters from %[ format, format wile point to ']' ++ */ ++SECUREC_INLINE int SecSetupBracketTable(const SecUnsignedChar **format, SecBracketTable *bracketTable) ++{ ++ const SecUnsignedChar *fmt = *format; ++ SecUnsignedChar prevChar = 0; ++#if !(defined(SECUREC_COMPATIBLE_WIN_FORMAT)) ++ if (*fmt == SECUREC_CHAR('{')) { ++ return -1; ++ } ++#endif ++ /* For building "table" data */ ++ ++fmt; /* Skip [ */ ++ bracketTable->mask = 0; /* Set all bits to 0 */ ++ if (*fmt == SECUREC_CHAR('^')) { ++ ++fmt; ++ bracketTable->mask = (unsigned char)0xffU; /* Use 0xffU to set all bits to 1 */ ++ } ++ if (*fmt == SECUREC_CHAR(']')) { ++ prevChar = SECUREC_CHAR(']'); ++ ++fmt; ++ SecBracketSetBit(bracketTable->table, SECUREC_CHAR(']')); ++ } ++ while (*fmt != SECUREC_CHAR('\0') && *fmt != SECUREC_CHAR(']')) { ++ SecUnsignedChar expCh = *fmt; ++ ++fmt; ++ if (expCh != SECUREC_CHAR('-') || prevChar == 0 || *fmt == SECUREC_CHAR(']')) { ++ /* Normal character */ ++ prevChar = expCh; ++ SecBracketSetBit(bracketTable->table, expCh); ++ } else { ++ /* For %[a-z] */ ++ expCh = *fmt; /* Get end of range */ ++ ++fmt; ++ if (prevChar <= expCh) { /* %[a-z] %[a-a] */ ++ SecBracketSetBitRange(bracketTable->table, prevChar, expCh); ++ } else { ++ /* For %[z-a] */ ++#if defined(SECUREC_COMPATIBLE_WIN_FORMAT) ++ /* Swap start and end characters */ ++ SecBracketSetBitRange(bracketTable->table, expCh, prevChar); ++#else ++ SecBracketSetBit(bracketTable->table, SECUREC_CHAR('-')); ++ SecBracketSetBit(bracketTable->table, expCh); ++#endif ++ } ++ prevChar = 0; ++ } ++ } ++ *format = fmt; ++ return 0; ++} ++ ++#ifdef SECUREC_FOR_WCHAR ++SECUREC_INLINE int SecInputForWchar(SecScanSpec *spec) ++{ ++ void *endPtr = spec->argPtr; ++ if (spec->isWCharOrLong > 0) { ++ *(wchar_t UNALIGNED *)endPtr = (wchar_t)spec->ch; ++ endPtr = (wchar_t *)endPtr + 1; ++ --spec->arrayWidth; ++ } else { ++#if SECUREC_HAVE_WCTOMB ++ int temp; ++ char tmpBuf[SECUREC_MB_LEN + 1]; ++ SECUREC_MASK_MSVC_CRT_WARNING temp = wctomb(tmpBuf, (wchar_t)spec->ch); ++ SECUREC_END_MASK_MSVC_CRT_WARNING ++ if (temp <= 0 || (size_t)(unsigned int)temp > sizeof(tmpBuf)) { ++ /* If wctomb error, then ignore character */ ++ return 0; ++ } ++ if (((size_t)(unsigned int)temp) > spec->arrayWidth) { ++ return -1; ++ } ++ if (memcpy_s(endPtr, spec->arrayWidth, tmpBuf, (size_t)(unsigned int)temp) != EOK) { ++ return -1; ++ } ++ endPtr = (char *)endPtr + temp; ++ spec->arrayWidth -= (size_t)(unsigned int)temp; ++#else ++ return -1; ++#endif ++ } ++ spec->argPtr = endPtr; ++ return 0; ++} ++#endif ++ ++#ifndef SECUREC_FOR_WCHAR ++#if SECUREC_HAVE_WCHART ++SECUREC_INLINE wchar_t SecConvertInputCharToWchar(SecScanSpec *spec, SecFileStream *stream) ++{ ++ wchar_t tempWChar = L'?'; /* Set default char is ? */ ++#if SECUREC_HAVE_MBTOWC ++ char temp[SECUREC_MULTI_BYTE_MAX_LEN + 1]; ++ temp[0] = (char)spec->ch; ++ temp[1] = '\0'; ++#if defined(SECUREC_COMPATIBLE_WIN_FORMAT) ++ if (SecIsLeadByte(spec->ch) != 0) { ++ spec->ch = SecGetChar(stream, &(spec->charCount)); ++ temp[1] = (char)spec->ch; ++ temp[2] = '\0'; /* 2 of string terminator position */ ++ } ++ if (mbtowc(&tempWChar, temp, sizeof(temp)) <= 0) { ++ /* No string termination error for tool */ ++ tempWChar = L'?'; ++ } ++#else ++ if (SecIsLeadByte(spec->ch) != 0) { ++ int convRes = 0; ++ int di = 1; ++ /* On Linux like system, the string is encoded in UTF-8 */ ++ while (convRes <= 0 && di < (int)MB_CUR_MAX && di < SECUREC_MULTI_BYTE_MAX_LEN) { ++ spec->ch = SecGetChar(stream, &(spec->charCount)); ++ temp[di] = (char)spec->ch; ++ ++di; ++ temp[di] = '\0'; ++ convRes = mbtowc(&tempWChar, temp, sizeof(temp)); ++ } ++ if (convRes <= 0) { ++ tempWChar = L'?'; ++ } ++ } else { ++ if (mbtowc(&tempWChar, temp, sizeof(temp)) <= 0) { ++ tempWChar = L'?'; ++ } ++ } ++#endif ++#else ++ (void)spec; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++ (void)stream; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++#endif /* SECUREC_HAVE_MBTOWC */ ++ ++ return tempWChar; ++} ++#endif /* SECUREC_HAVE_WCHART */ ++ ++SECUREC_INLINE int SecInputForChar(SecScanSpec *spec, SecFileStream *stream) ++{ ++ void *endPtr = spec->argPtr; ++ if (spec->isWCharOrLong > 0) { ++#if SECUREC_HAVE_WCHART ++ *(wchar_t UNALIGNED *)endPtr = SecConvertInputCharToWchar(spec, stream); ++ endPtr = (wchar_t *)endPtr + 1; ++ --spec->arrayWidth; ++#else ++ (void)stream; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++ return -1; ++#endif ++ } else { ++ *(char *)endPtr = (char)spec->ch; ++ endPtr = (char *)endPtr + 1; ++ --spec->arrayWidth; ++ } ++ spec->argPtr = endPtr; ++ return 0; ++} ++#endif ++ ++/* ++ * Scan digital part of %d %i %o %u %x %p. ++ * Return 0 OK ++ */ ++SECUREC_INLINE int SecInputNumberDigital(SecFileStream *stream, SecScanSpec *spec) ++{ ++ static void (* const secFinishNumber[SECUREC_DECODE_NUMBER_FUNC_NUM])(SecScanSpec *spec) = { ++ SecFinishNumber, SecFinishNumber64 ++ }; ++ while (SECUREC_FILED_WIDTH_ENOUGH(spec)) { ++ spec->ch = SecGetChar(stream, &(spec->charCount)); ++ /* Decode ch to number */ ++ if (SecDecodeNumber(spec) != 0) { ++ SecUnGetChar(spec->ch, stream, &(spec->charCount)); ++ break; ++ } ++ SECUREC_FILED_WIDTH_DEC(spec); /* Must be behind un get char, otherwise the logic is incorrect */ ++ spec->numberState = SECUREC_NUMBER_STATE_STARTED; ++ } ++ /* Handling integer negative numbers and beyond max */ ++ (*secFinishNumber[spec->numberArgType])(spec); ++ if (spec->numberState == SECUREC_NUMBER_STATE_STARTED) { ++ return 0; ++ } ++ return -1; ++} ++ ++/* ++ * Scan %d %i %o %u %x %p. ++ * Return 0 OK ++ */ ++SECUREC_INLINE int SecInputNumber(SecFileStream *stream, SecScanSpec *spec) ++{ ++ /* Character already read */ ++ if (spec->ch == SECUREC_CHAR('+') || spec->ch == SECUREC_CHAR('-')) { ++ if (spec->ch == SECUREC_CHAR('-')) { ++ spec->negative = 1; ++#if SECUREC_IN_KERNEL ++ /* In kernel Refuse to enter negative number */ ++ if (SECUREC_CONVERT_IS_UNSIGNED(spec->oriConvChr)) { ++ return -1; ++ } ++#endif ++ } ++ SECUREC_FILED_WIDTH_DEC(spec); /* Do not need to check width here, must be greater than 0 */ ++ spec->ch = SecGetChar(stream, &(spec->charCount)); /* Eat + or - */ ++ spec->ch = SecGetChar(stream, &(spec->charCount)); /* Get next character, used for the '0' judgments */ ++ SecUnGetChar(spec->ch, stream, &(spec->charCount)); /* Not sure if it was actually read, so push back */ ++ } ++ ++ if (spec->oriConvChr == 'i') { ++ spec->convChr = 'd'; /* The i could be d, o, or x, use d as default */ ++ } ++ ++ if (spec->ch == SECUREC_CHAR('0') && (spec->oriConvChr == 'x' || spec->oriConvChr == 'i') && ++ SECUREC_FILED_WIDTH_ENOUGH(spec)) { ++ /* Input string begin with 0, may be 0x123 0X123 0123 0x 01 0yy 09 0 0ab 00 */ ++ SECUREC_FILED_WIDTH_DEC(spec); ++ spec->ch = SecGetChar(stream, &(spec->charCount)); /* ch is '0' */ ++ ++ /* Read only '0' due to width limitation */ ++ if (!SECUREC_FILED_WIDTH_ENOUGH(spec)) { ++ /* The number or number64 in spec has been set 0 */ ++ return 0; ++ } ++ ++ spec->ch = SecGetChar(stream, &(spec->charCount)); /* Get next char to check x or X, do not dec width */ ++ if ((SecChar)spec->ch == SECUREC_CHAR('x') || (SecChar)spec->ch == SECUREC_CHAR('X')) { ++ spec->convChr = 'x'; ++ SECUREC_FILED_WIDTH_DEC(spec); /* Make incorrect width for x or X */ ++ } else { ++ if (spec->oriConvChr == 'i') { ++ spec->convChr = 'o'; ++ } ++ /* For "0y" "08" "01" "0a" ... ,push the 'y' '8' '1' 'a' back */ ++ SecUnGetChar(spec->ch, stream, &(spec->charCount)); ++ /* Since 0 has been read, it indicates that a valid character has been read */ ++ spec->numberState = SECUREC_NUMBER_STATE_STARTED; ++ } ++ } ++ return SecInputNumberDigital(stream, spec); ++} ++ ++/* ++ * Scan %c %s %[ ++ * Return 0 OK ++ */ ++SECUREC_INLINE int SecInputString(SecFileStream *stream, SecScanSpec *spec, ++ const SecBracketTable *bracketTable, int *doneCount) ++{ ++ void *startPtr = spec->argPtr; ++ int suppressed = 0; ++ int errNoMem = 0; ++ ++ while (SECUREC_FILED_WIDTH_ENOUGH(spec)) { ++ SECUREC_FILED_WIDTH_DEC(spec); ++ spec->ch = SecGetChar(stream, &(spec->charCount)); ++ /* ++ * The char condition or string condition and bracket condition. ++ * Only supports wide characters with a maximum length of two bytes ++ */ ++ if (spec->ch != SECUREC_EOF && (SecCanInputCharacter(spec->convChr) != 0 || ++ SecCanInputString(spec->convChr, spec->ch) != 0 || ++ SecCanInputForBracket(spec->convChr, spec->ch, bracketTable) != 0)) { ++ if (spec->suppress != 0) { ++ /* Used to identify processed data for %*, use argPtr to identify will cause 613, so use suppressed */ ++ suppressed = 1; ++ continue; ++ } ++ /* Now suppress is not set */ ++ if (spec->arrayWidth == 0) { ++ errNoMem = 1; /* We have exhausted the user's buffer */ ++ break; ++ } ++#ifdef SECUREC_FOR_WCHAR ++ errNoMem = SecInputForWchar(spec); ++#else ++ errNoMem = SecInputForChar(spec, stream); ++#endif ++ if (errNoMem != 0) { ++ break; ++ } ++ } else { ++ SecUnGetChar(spec->ch, stream, &(spec->charCount)); ++ break; ++ } ++ } ++ ++ if (errNoMem != 0) { ++ /* In case of error, blank out the input buffer */ ++ SecAddEndingZero(startPtr, spec); ++ return -1; ++ } ++ if ((spec->suppress != 0 && suppressed == 0) || ++ (spec->suppress == 0 && startPtr == spec->argPtr)) { ++ /* No input was scanned */ ++ return -1; ++ } ++ if (spec->convChr != 'c') { ++ /* Add null-terminate for strings */ ++ SecAddEndingZero(spec->argPtr, spec); ++ } ++ if (spec->suppress == 0) { ++ *doneCount = *doneCount + 1; ++ } ++ return 0; ++} ++ ++#ifdef SECUREC_FOR_WCHAR ++/* ++ * Alloce buffer for wchar version of %[. ++ * Return 0 OK ++ */ ++SECUREC_INLINE int SecAllocBracketTable(SecBracketTable *bracketTable) ++{ ++ if (bracketTable->table == NULL) { ++ /* Table should be freed after use */ ++ bracketTable->table = (unsigned char *)SECUREC_MALLOC(SECUREC_BRACKET_TABLE_SIZE); ++ if (bracketTable->table == NULL) { ++ return -1; ++ } ++ } ++ return 0; ++} ++ ++/* ++ * Free buffer for wchar version of %[ ++ */ ++SECUREC_INLINE void SecFreeBracketTable(SecBracketTable *bracketTable) ++{ ++ if (bracketTable->table != NULL) { ++ SECUREC_FREE(bracketTable->table); ++ bracketTable->table = NULL; ++ } ++} ++#endif ++ ++#ifdef SECUREC_FOR_WCHAR ++/* ++ * Formatting input core functions for wchar version.Called by a function such as vswscanf_s ++ */ ++int SecInputSW(SecFileStream *stream, const wchar_t *cFormat, va_list argList) ++#else ++/* ++ * Formatting input core functions for char version.Called by a function such as vsscanf_s ++ */ ++int SecInputS(SecFileStream *stream, const char *cFormat, va_list argList) ++#endif ++{ ++ const SecUnsignedChar *format = (const SecUnsignedChar *)cFormat; ++ SecBracketTable bracketTable = SECUREC_INIT_BRACKET_TABLE; ++ SecScanSpec spec; ++ int doneCount = 0; ++ int formatError = 0; ++ int paraIsNull = 0; ++ int match = 0; /* When % is found , inc this value */ ++ int errRet = 0; ++#if SECUREC_ENABLE_SCANF_FLOAT ++ SecFloatSpec floatSpec; ++ SecInitFloatSpec(&floatSpec); ++#endif ++ spec.ch = 0; /* Need to initialize to 0 */ ++ spec.charCount = 0; /* Need to initialize to 0 */ ++ ++ /* Format must not NULL, use err < 1 to claer 845 */ ++ while (errRet < 1 && *format != SECUREC_CHAR('\0')) { ++ /* Skip space in format and space in input */ ++ if (SecIsSpace((SecInt)(int)(*format)) != 0) { ++ /* Read first no space char */ ++ spec.ch = SecSkipSpaceChar(stream, &(spec.charCount)); ++ /* Read the EOF cannot be returned directly here, because the case of " %n" needs to be handled */ ++ /* Put fist no space char backup. put EOF back is also OK, and to modify the character count */ ++ SecUnGetChar(spec.ch, stream, &(spec.charCount)); ++ SecSkipSpaceFormat(&format); ++ continue; ++ } ++ ++ if (*format != SECUREC_CHAR('%')) { ++ spec.ch = SecGetChar(stream, &(spec.charCount)); ++ if ((int)(*format) != (int)(spec.ch)) { ++ SecUnGetChar(spec.ch, stream, &(spec.charCount)); ++ break; ++ } ++ ++format; ++#if !defined(SECUREC_FOR_WCHAR) && defined(SECUREC_COMPATIBLE_VERSION) ++ if (SecFilterWcharInFormat(&spec, &format, stream) != 0) { ++ break; ++ } ++#endif ++ continue; ++ } ++ ++ /* Now *format is % */ ++ /* Set default value for each % */ ++ SecSetDefaultScanSpec(&spec); ++ if (SecDecodeScanFlag(&format, &spec) != 0) { ++ formatError = 1; ++ ++errRet; ++ continue; ++ } ++ if (!SECUREC_FILED_WIDTH_ENOUGH(&spec)) { ++ /* 0 width in format */ ++ ++errRet; ++ continue; ++ } ++ ++ /* Update wchar flag for %S %C */ ++ SecUpdateWcharFlagByType(*format, &spec); ++ ++ spec.convChr = SECUREC_TO_LOWERCASE(*format); ++ spec.oriConvChr = spec.convChr; /* convChr may be modified to handle integer logic */ ++ if (spec.convChr != 'n') { ++ if (spec.convChr != 'c' && spec.convChr != SECUREC_BRACE) { ++ spec.ch = SecSkipSpaceChar(stream, &(spec.charCount)); ++ } else { ++ spec.ch = SecGetChar(stream, &(spec.charCount)); ++ } ++ if (spec.ch == SECUREC_EOF) { ++ ++errRet; ++ continue; ++ } ++ } ++ ++ /* Now no 0 width in format and get one char from input */ ++ switch (spec.oriConvChr) { ++ case 'c': /* Also 'C' */ ++ if (spec.widthSet == 0) { ++ spec.widthSet = 1; ++ spec.width = 1; ++ } ++ /* fall-through */ /* FALLTHRU */ ++ case 's': /* Also 'S': */ ++ /* fall-through */ /* FALLTHRU */ ++ case SECUREC_BRACE: ++ /* Unset last char to stream */ ++ SecUnGetChar(spec.ch, stream, &(spec.charCount)); ++ /* Check dest buffer and size */ ++ if (spec.suppress == 0) { ++ spec.argPtr = (void *)va_arg(argList, void *); ++ if (spec.argPtr == NULL) { ++ paraIsNull = 1; ++ ++errRet; ++ continue; ++ } ++ /* Get the next argument, size of the array in characters */ ++ spec.arrayWidth = SECUREC_GET_ARRAYWIDTH(argList); ++ if (SECUREC_ARRAY_WIDTH_IS_WRONG(spec)) { ++ /* Do not clear buffer just go error */ ++ ++errRet; ++ continue; ++ } ++ /* One element is needed for '\0' for %s and %[ */ ++ if (spec.convChr != 'c') { ++ --spec.arrayWidth; ++ } ++ } else { ++ /* Set argPtr to NULL is necessary, in supress mode we don't use argPtr to store data */ ++ spec.argPtr = NULL; ++ } ++ ++ if (spec.convChr == SECUREC_BRACE) { ++ /* Malloc when first %[ is meet for wchar version */ ++#ifdef SECUREC_FOR_WCHAR ++ if (SecAllocBracketTable(&bracketTable) != 0) { ++ ++errRet; ++ continue; ++ } ++#endif ++ (void)SECUREC_MEMSET_FUNC_OPT(bracketTable.table, 0, (size_t)SECUREC_BRACKET_TABLE_SIZE); ++ if (SecSetupBracketTable(&format, &bracketTable) != 0) { ++ ++errRet; ++ continue; ++ } ++ ++ if (*format == SECUREC_CHAR('\0')) { ++ /* Default add string terminator */ ++ SecAddEndingZero(spec.argPtr, &spec); ++ ++errRet; ++ /* Truncated format */ ++ continue; ++ } ++ } ++ ++ /* Set completed. Now read string or character */ ++ if (SecInputString(stream, &spec, &bracketTable, &doneCount) != 0) { ++ ++errRet; ++ continue; ++ } ++ break; ++ case 'p': ++ /* Make %hp same as %p */ ++ spec.numberWidth = SECUREC_NUM_WIDTH_INT; ++#ifdef SECUREC_ON_64BITS ++ spec.numberArgType = 1; ++#endif ++ /* fall-through */ /* FALLTHRU */ ++ case 'o': /* fall-through */ /* FALLTHRU */ ++ case 'u': /* fall-through */ /* FALLTHRU */ ++ case 'd': /* fall-through */ /* FALLTHRU */ ++ case 'i': /* fall-through */ /* FALLTHRU */ ++ case 'x': ++ /* Unset last char to stream */ ++ SecUnGetChar(spec.ch, stream, &(spec.charCount)); ++ if (SecInputNumber(stream, &spec) != 0) { ++ ++errRet; ++ continue; ++ } ++ if (spec.suppress == 0) { ++ spec.argPtr = (void *)va_arg(argList, void *); ++ if (spec.argPtr == NULL) { ++ paraIsNull = 1; ++ ++errRet; ++ continue; ++ } ++ SecAssignNumber(&spec); ++ ++doneCount; ++ } ++ break; ++ case 'n': /* Char count */ ++ if (spec.suppress == 0) { ++ spec.argPtr = (void *)va_arg(argList, void *); ++ if (spec.argPtr == NULL) { ++ paraIsNull = 1; ++ ++errRet; ++ continue; ++ } ++ spec.number = (unsigned long)(unsigned int)(spec.charCount); ++ spec.numberArgType = 0; ++ SecAssignNumber(&spec); ++ } ++ break; ++ case 'e': /* fall-through */ /* FALLTHRU */ ++ case 'f': /* fall-through */ /* FALLTHRU */ ++ case 'g': /* Scan a float */ ++ /* Unset last char to stream */ ++ SecUnGetChar(spec.ch, stream, &(spec.charCount)); ++#if SECUREC_ENABLE_SCANF_FLOAT ++ if (SecInputFloat(stream, &spec, &floatSpec) != 0) { ++ ++errRet; ++ continue; ++ } ++ if (spec.suppress == 0) { ++ spec.argPtr = (void *)va_arg(argList, void *); ++ if (spec.argPtr == NULL) { ++ ++errRet; ++ paraIsNull = 1; ++ continue; ++ } ++ if (SecAssignFloat(&floatSpec, &spec) != 0) { ++ ++errRet; ++ continue; ++ } ++ ++doneCount; ++ } ++ break; ++#else /* SECUREC_ENABLE_SCANF_FLOAT */ ++ ++errRet; ++ continue; ++#endif ++ default: ++ if ((int)(*format) != (int)spec.ch) { ++ SecUnGetChar(spec.ch, stream, &(spec.charCount)); ++ formatError = 1; ++ ++errRet; ++ continue; ++ } else { ++ --match; /* Compensate for the self-increment of the following code */ ++ } ++ break; ++ } ++ ++match; ++ ++format; ++ } ++ ++#ifdef SECUREC_FOR_WCHAR ++ SecFreeBracketTable(&bracketTable); ++#endif ++ ++#if SECUREC_ENABLE_SCANF_FLOAT ++ SecFreeFloatSpec(&floatSpec, &doneCount); ++#endif ++ ++#if SECUREC_ENABLE_SCANF_FILE ++ SecAdjustStream(stream); ++#endif ++ ++ if (spec.ch == SECUREC_EOF) { ++ return ((doneCount != 0 || match != 0) ? doneCount : SECUREC_SCANF_EINVAL); ++ } ++ if (formatError != 0 || paraIsNull != 0) { ++ /* Invalid Input Format or parameter, but not meet EOF */ ++ return SECUREC_SCANF_ERROR_PARA; ++ } ++ return doneCount; ++} ++ ++#if SECUREC_ENABLE_SCANF_FILE ++/* ++ * Get char from stream use std function ++ */ ++SECUREC_INLINE SecInt SecGetCharFromStream(const SecFileStream *stream) ++{ ++ SecInt ch; ++ ch = SECUREC_GETC(stream->pf); ++ return ch; ++} ++ ++/* ++ * Try to read the BOM header, when meet a BOM head, discard it, then data is Aligned to base ++ */ ++SECUREC_INLINE void SecReadAndSkipBomHeader(SecFileStream *stream) ++{ ++ /* Use size_t type conversion to clean e747 */ ++ stream->count = fread(stream->base, (size_t)1, (size_t)SECUREC_BOM_HEADER_SIZE, stream->pf); ++ if (stream->count > SECUREC_BOM_HEADER_SIZE) { ++ stream->count = 0; ++ } ++ if (SECUREC_BEGIN_WITH_BOM(stream->base, stream->count)) { ++ /* It's BOM header, discard it */ ++ stream->count = 0; ++ } ++} ++ ++/* ++ * Get char from file stream or buffer ++ */ ++SECUREC_INLINE SecInt SecGetCharFromFile(SecFileStream *stream) ++{ ++ SecInt ch; ++ if (stream->count < sizeof(SecChar)) { ++ /* Load file to buffer */ ++ size_t len; ++ if (stream->base != NULL) { ++ /* Put the last unread data in the buffer head */ ++ for (len = 0; len < stream->count; ++len) { ++ stream->base[len] = stream->cur[len]; ++ } ++ } else { ++ stream->oriFilePos = ftell(stream->pf); /* Save original file read position */ ++ if (stream->oriFilePos == -1) { ++ /* It may be a pipe stream */ ++ stream->flag = SECUREC_PIPE_STREAM_FLAG; ++ return SecGetCharFromStream(stream); ++ } ++ /* Reserve the length of BOM head */ ++ stream->base = (char *)SECUREC_MALLOC(SECUREC_BUFFERED_BLOK_SIZE + ++ SECUREC_BOM_HEADER_SIZE + sizeof(SecChar)); /* To store '\0' and aligned to wide char */ ++ if (stream->base == NULL) { ++ return SECUREC_EOF; ++ } ++ /* First read file */ ++ if (stream->oriFilePos == 0) { ++ /* Make sure the data is aligned to base */ ++ SecReadAndSkipBomHeader(stream); ++ } ++ } ++ ++ /* Skip existing data and read data */ ++ len = fread(stream->base + stream->count, (size_t)1, (size_t)SECUREC_BUFFERED_BLOK_SIZE, stream->pf); ++ if (len > SECUREC_BUFFERED_BLOK_SIZE) { /* It won't happen, */ ++ len = 0; ++ } ++ stream->count += len; ++ stream->cur = stream->base; ++ stream->flag |= SECUREC_LOAD_FILE_TO_MEM_FLAG; ++ stream->base[stream->count] = '\0'; /* For tool Warning string null */ ++ } ++ ++ SECUREC_GET_CHAR(stream, &ch); ++ if (ch != SECUREC_EOF) { ++ stream->fileRealRead += sizeof(SecChar); ++ } ++ return ch; ++} ++#endif ++ ++/* ++ * Get char for wchar version ++ */ ++SECUREC_INLINE SecInt SecGetChar(SecFileStream *stream, int *counter) ++{ ++ *counter = *counter + 1; /* Always plus 1 */ ++ /* The main scenario is scanf str */ ++ if ((stream->flag & SECUREC_MEM_STR_FLAG) != 0) { ++ SecInt ch; ++ SECUREC_GET_CHAR(stream, &ch); ++ return ch; ++ } ++#if SECUREC_ENABLE_SCANF_FILE ++ if ((stream->flag & SECUREC_FILE_STREAM_FLAG) != 0) { ++ return SecGetCharFromFile(stream); ++ } ++ if ((stream->flag & SECUREC_PIPE_STREAM_FLAG) != 0) { ++ return SecGetCharFromStream(stream); ++ } ++#endif ++ return SECUREC_EOF; ++} ++ ++/* ++ * Unget Public realizatio char for wchar and char version ++ */ ++SECUREC_INLINE void SecUnGetCharImpl(SecInt ch, SecFileStream *stream) ++{ ++ if ((stream->flag & SECUREC_MEM_STR_FLAG) != 0) { ++ SECUREC_UN_GET_CHAR(stream); ++ return; ++ } ++#if SECUREC_ENABLE_SCANF_FILE ++ if ((stream->flag & SECUREC_LOAD_FILE_TO_MEM_FLAG) != 0) { ++ SECUREC_UN_GET_CHAR(stream); ++ if (stream->fileRealRead > 0) { ++ stream->fileRealRead -= sizeof(SecChar); ++ } ++ return; ++ } ++ if ((stream->flag & SECUREC_PIPE_STREAM_FLAG) != 0) { ++ (void)SECUREC_UN_GETC(ch, stream->pf); ++ return; ++ } ++#else ++ (void)ch; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++#endif ++} ++ ++/* ++ * Unget char for char version ++ */ ++SECUREC_INLINE void SecUnGetChar(SecInt ch, SecFileStream *stream, int *counter) ++{ ++ *counter = *counter - 1; /* Always mius 1 */ ++ if (ch != SECUREC_EOF) { ++ SecUnGetCharImpl(ch, stream); ++ } ++} ++ ++/* ++ * Skip space char by isspace ++ */ ++SECUREC_INLINE SecInt SecSkipSpaceChar(SecFileStream *stream, int *counter) ++{ ++ SecInt ch; ++ do { ++ ch = SecGetChar(stream, counter); ++ if (ch == SECUREC_EOF) { ++ break; ++ } ++ } while (SecIsSpace(ch) != 0); ++ return ch; ++} ++#endif /* INPUT_INL_5D13A042_DC3F_4ED9_A8D1_882811274C27 */ ++ +diff --git a/lib/securec/src/memcpy_s.c b/lib/securec/src/memcpy_s.c +new file mode 100644 +index 000000000..a7fd48748 +--- /dev/null ++++ b/lib/securec/src/memcpy_s.c +@@ -0,0 +1,555 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: memcpy_s function ++ * Create: 2014-02-25 ++ */ ++/* ++ * [Standardize-exceptions] Use unsafe function: Portability ++ * [reason] Use unsafe function to implement security function to maintain platform compatibility. ++ * And sufficient input validation is performed before calling ++ */ ++ ++#include "securecutil.h" ++ ++#if SECUREC_WITH_PERFORMANCE_ADDONS ++#ifndef SECUREC_MEMCOPY_THRESHOLD_SIZE ++#define SECUREC_MEMCOPY_THRESHOLD_SIZE 64UL ++#endif ++ ++#define SECUREC_SMALL_MEM_COPY(dest, src, count) do { \ ++ if (SECUREC_ADDR_ALIGNED_8(dest) && SECUREC_ADDR_ALIGNED_8(src)) { \ ++ /* Use struct assignment */ \ ++ switch (count) { \ ++ case 1: \ ++ *(unsigned char *)(dest) = *(const unsigned char *)(src); \ ++ break; \ ++ case 2: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 2); \ ++ break; \ ++ case 3: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 3); \ ++ break; \ ++ case 4: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 4); \ ++ break; \ ++ case 5: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 5); \ ++ break; \ ++ case 6: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 6); \ ++ break; \ ++ case 7: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 7); \ ++ break; \ ++ case 8: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 8); \ ++ break; \ ++ case 9: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 9); \ ++ break; \ ++ case 10: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 10); \ ++ break; \ ++ case 11: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 11); \ ++ break; \ ++ case 12: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 12); \ ++ break; \ ++ case 13: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 13); \ ++ break; \ ++ case 14: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 14); \ ++ break; \ ++ case 15: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 15); \ ++ break; \ ++ case 16: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 16); \ ++ break; \ ++ case 17: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 17); \ ++ break; \ ++ case 18: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 18); \ ++ break; \ ++ case 19: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 19); \ ++ break; \ ++ case 20: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 20); \ ++ break; \ ++ case 21: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 21); \ ++ break; \ ++ case 22: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 22); \ ++ break; \ ++ case 23: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 23); \ ++ break; \ ++ case 24: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 24); \ ++ break; \ ++ case 25: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 25); \ ++ break; \ ++ case 26: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 26); \ ++ break; \ ++ case 27: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 27); \ ++ break; \ ++ case 28: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 28); \ ++ break; \ ++ case 29: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 29); \ ++ break; \ ++ case 30: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 30); \ ++ break; \ ++ case 31: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 31); \ ++ break; \ ++ case 32: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 32); \ ++ break; \ ++ case 33: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 33); \ ++ break; \ ++ case 34: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 34); \ ++ break; \ ++ case 35: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 35); \ ++ break; \ ++ case 36: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 36); \ ++ break; \ ++ case 37: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 37); \ ++ break; \ ++ case 38: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 38); \ ++ break; \ ++ case 39: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 39); \ ++ break; \ ++ case 40: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 40); \ ++ break; \ ++ case 41: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 41); \ ++ break; \ ++ case 42: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 42); \ ++ break; \ ++ case 43: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 43); \ ++ break; \ ++ case 44: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 44); \ ++ break; \ ++ case 45: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 45); \ ++ break; \ ++ case 46: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 46); \ ++ break; \ ++ case 47: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 47); \ ++ break; \ ++ case 48: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 48); \ ++ break; \ ++ case 49: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 49); \ ++ break; \ ++ case 50: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 50); \ ++ break; \ ++ case 51: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 51); \ ++ break; \ ++ case 52: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 52); \ ++ break; \ ++ case 53: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 53); \ ++ break; \ ++ case 54: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 54); \ ++ break; \ ++ case 55: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 55); \ ++ break; \ ++ case 56: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 56); \ ++ break; \ ++ case 57: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 57); \ ++ break; \ ++ case 58: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 58); \ ++ break; \ ++ case 59: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 59); \ ++ break; \ ++ case 60: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 60); \ ++ break; \ ++ case 61: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 61); \ ++ break; \ ++ case 62: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 62); \ ++ break; \ ++ case 63: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 63); \ ++ break; \ ++ case 64: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((dest), (src), 64); \ ++ break; \ ++ default: \ ++ /* Do nothing */ \ ++ break; \ ++ } /* END switch */ \ ++ } else { \ ++ unsigned char *tmpDest_ = (unsigned char *)(dest); \ ++ const unsigned char *tmpSrc_ = (const unsigned char *)(src); \ ++ switch (count) { \ ++ case 64: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 63: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 62: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 61: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 60: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 59: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 58: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 57: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 56: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 55: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 54: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 53: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 52: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 51: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 50: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 49: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 48: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 47: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 46: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 45: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 44: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 43: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 42: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 41: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 40: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 39: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 38: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 37: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 36: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 35: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 34: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 33: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 32: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 31: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 30: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 29: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 28: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 27: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 26: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 25: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 24: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 23: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 22: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 21: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 20: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 19: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 18: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 17: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 16: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 15: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 14: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 13: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 12: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 11: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 10: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 9: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 8: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 7: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 6: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 5: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 4: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 3: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 2: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 1: \ ++ *(tmpDest_++) = *(tmpSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ default: \ ++ /* Do nothing */ \ ++ break; \ ++ } \ ++ } \ ++} SECUREC_WHILE_ZERO ++ ++/* ++ * Performance optimization ++ */ ++#define SECUREC_MEMCPY_OPT(dest, src, count) do { \ ++ if ((count) > SECUREC_MEMCOPY_THRESHOLD_SIZE) { \ ++ SECUREC_MEMCPY_WARP_OPT((dest), (src), (count)); \ ++ } else { \ ++ SECUREC_SMALL_MEM_COPY((dest), (src), (count)); \ ++ } \ ++} SECUREC_WHILE_ZERO ++#endif ++ ++/* ++ * Handling errors ++ */ ++SECUREC_INLINE errno_t SecMemcpyError(void *dest, size_t destMax, const void *src, size_t count) ++{ ++ if (destMax == 0 || destMax > SECUREC_MEM_MAX_LEN) { ++ SECUREC_ERROR_INVALID_RANGE("memcpy_s"); ++ return ERANGE; ++ } ++ if (dest == NULL || src == NULL) { ++ SECUREC_ERROR_INVALID_PARAMTER("memcpy_s"); ++ if (dest != NULL) { ++ (void)SECUREC_MEMSET_FUNC_OPT(dest, 0, destMax); ++ return EINVAL_AND_RESET; ++ } ++ return EINVAL; ++ } ++ if (count > destMax) { ++ (void)SECUREC_MEMSET_FUNC_OPT(dest, 0, destMax); ++ SECUREC_ERROR_INVALID_RANGE("memcpy_s"); ++ return ERANGE_AND_RESET; ++ } ++ if (SECUREC_MEMORY_IS_OVERLAP(dest, src, count)) { ++ (void)SECUREC_MEMSET_FUNC_OPT(dest, 0, destMax); ++ SECUREC_ERROR_BUFFER_OVERLAP("memcpy_s"); ++ return EOVERLAP_AND_RESET; ++ } ++ /* Count is 0 or dest equal src also ret EOK */ ++ return EOK; ++} ++ ++#if defined(SECUREC_COMPATIBLE_WIN_FORMAT) ++ /* ++ * The fread API in windows will call memcpy_s and pass 0xffffffff to destMax. ++ * To avoid the failure of fread, we don't check desMax limit. ++ */ ++#define SECUREC_MEMCPY_PARAM_OK(dest, destMax, src, count) (SECUREC_LIKELY((count) <= (destMax) && \ ++ (dest) != NULL && (src) != NULL && \ ++ (count) > 0 && SECUREC_MEMORY_NO_OVERLAP((dest), (src), (count)))) ++#else ++#define SECUREC_MEMCPY_PARAM_OK(dest, destMax, src, count) (SECUREC_LIKELY((count) <= (destMax) && \ ++ (dest) != NULL && (src) != NULL && (destMax) <= SECUREC_MEM_MAX_LEN && \ ++ (count) > 0 && SECUREC_MEMORY_NO_OVERLAP((dest), (src), (count)))) ++#endif ++ ++/* ++ * ++ * The memcpy_s function copies n characters from the object pointed to by src into the object pointed to by dest ++ * ++ * ++ * dest Destination buffer. ++ * destMax Size of the destination buffer. ++ * src Buffer to copy from. ++ * count Number of characters to copy ++ * ++ * ++ * dest buffer is updated. ++ * ++ * ++ * EOK Success ++ * EINVAL dest is NULL and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN ++ * EINVAL_AND_RESET dest != NULL and src is NULL and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN ++ * ERANGE destMax > SECUREC_MEM_MAX_LEN or destMax is 0 ++ * ERANGE_AND_RESET count > destMax and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN ++ * and dest != NULL and src != NULL ++ * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped and ++ * count <= destMax destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN and dest != NULL ++ * and src != NULL and dest != src ++ * ++ * if an error occurred, dest will be filled with 0. ++ * If the source and destination overlap, the behavior of memcpy_s is undefined. ++ * Use memmove_s to handle overlapping regions. ++ */ ++errno_t memcpy_s(void *dest, size_t destMax, const void *src, size_t count) ++{ ++ if (SECUREC_MEMCPY_PARAM_OK(dest, destMax, src, count)) { ++ SECUREC_MEMCPY_WARP_OPT(dest, src, count); ++ return EOK; ++ } ++ /* Meet some runtime violation, return error code */ ++ return SecMemcpyError(dest, destMax, src, count); ++} ++ ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(memcpy_s); ++#endif ++ ++#if SECUREC_WITH_PERFORMANCE_ADDONS ++/* ++ * Performance optimization ++ */ ++errno_t memcpy_sOptAsm(void *dest, size_t destMax, const void *src, size_t count) ++{ ++ if (SECUREC_MEMCPY_PARAM_OK(dest, destMax, src, count)) { ++ SECUREC_MEMCPY_OPT(dest, src, count); ++ return EOK; ++ } ++ /* Meet some runtime violation, return error code */ ++ return SecMemcpyError(dest, destMax, src, count); ++} ++ ++/* Trim judgement on "destMax <= SECUREC_MEM_MAX_LEN" */ ++errno_t memcpy_sOptTc(void *dest, size_t destMax, const void *src, size_t count) ++{ ++ if (SECUREC_LIKELY(count <= destMax && dest != NULL && src != NULL && \ ++ count > 0 && SECUREC_MEMORY_NO_OVERLAP((dest), (src), (count)))) { ++ SECUREC_MEMCPY_OPT(dest, src, count); ++ return EOK; ++ } ++ /* Meet some runtime violation, return error code */ ++ return SecMemcpyError(dest, destMax, src, count); ++} ++#endif ++ +diff --git a/lib/securec/src/memmove_s.c b/lib/securec/src/memmove_s.c +new file mode 100644 +index 000000000..f231f05da +--- /dev/null ++++ b/lib/securec/src/memmove_s.c +@@ -0,0 +1,123 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: memmove_s function ++ * Create: 2014-02-25 ++ */ ++/* ++ * [Standardize-exceptions] Use unsafe function: Portability ++ * [reason] Use unsafe function to implement security function to maintain platform compatibility. ++ * And sufficient input validation is performed before calling ++ */ ++ ++#include "securecutil.h" ++ ++#ifdef SECUREC_NOT_CALL_LIBC_CORE_API ++/* ++ * Implementing memory data movement ++ */ ++SECUREC_INLINE void SecUtilMemmove(void *dst, const void *src, size_t count) ++{ ++ unsigned char *pDest = (unsigned char *)dst; ++ const unsigned char *pSrc = (const unsigned char *)src; ++ size_t maxCount = count; ++ ++ if (dst <= src || pDest >= (pSrc + maxCount)) { ++ /* ++ * Non-Overlapping Buffers ++ * Copy from lower addresses to higher addresses ++ */ ++ while (maxCount > 0) { ++ --maxCount; ++ *pDest = *pSrc; ++ ++pDest; ++ ++pSrc; ++ } ++ } else { ++ /* ++ * Overlapping Buffers ++ * Copy from higher addresses to lower addresses ++ */ ++ pDest = pDest + maxCount - 1; ++ pSrc = pSrc + maxCount - 1; ++ while (maxCount > 0) { ++ --maxCount; ++ *pDest = *pSrc; ++ --pDest; ++ --pSrc; ++ } ++ } ++} ++#endif ++ ++/* ++ * ++ * The memmove_s function copies count bytes of characters from src to dest. ++ * This function can be assigned correctly when memory overlaps. ++ * ++ * dest Destination object. ++ * destMax Size of the destination buffer. ++ * src Source object. ++ * count Number of characters to copy. ++ * ++ * ++ * dest buffer is updated. ++ * ++ * ++ * EOK Success ++ * EINVAL dest is NULL and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN ++ * EINVAL_AND_RESET dest != NULL and src is NULL and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN ++ * ERANGE destMax > SECUREC_MEM_MAX_LEN or destMax is 0 ++ * ERANGE_AND_RESET count > destMax and dest != NULL and src != NULL and destMax != 0 ++ * and destMax <= SECUREC_MEM_MAX_LEN ++ * ++ * If an error occurred, dest will be filled with 0 when dest and destMax valid. ++ * If some regions of the source area and the destination overlap, memmove_s ++ * ensures that the original source bytes in the overlapping region are copied ++ * before being overwritten. ++ */ ++errno_t memmove_s(void *dest, size_t destMax, const void *src, size_t count) ++{ ++ if (destMax == 0 || destMax > SECUREC_MEM_MAX_LEN) { ++ SECUREC_ERROR_INVALID_RANGE("memmove_s"); ++ return ERANGE; ++ } ++ if (dest == NULL || src == NULL) { ++ SECUREC_ERROR_INVALID_PARAMTER("memmove_s"); ++ if (dest != NULL) { ++ (void)SECUREC_MEMSET_FUNC_OPT(dest, 0, destMax); ++ return EINVAL_AND_RESET; ++ } ++ return EINVAL; ++ } ++ if (count > destMax) { ++ (void)SECUREC_MEMSET_FUNC_OPT(dest, 0, destMax); ++ SECUREC_ERROR_INVALID_RANGE("memmove_s"); ++ return ERANGE_AND_RESET; ++ } ++ if (dest == src) { ++ return EOK; ++ } ++ ++ if (count > 0) { ++#ifdef SECUREC_NOT_CALL_LIBC_CORE_API ++ SecUtilMemmove(dest, src, count); ++#else ++ /* Use underlying memmove for performance consideration */ ++ (void)memmove(dest, src, count); ++#endif ++ } ++ return EOK; ++} ++ ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(memmove_s); ++#endif ++ +diff --git a/lib/securec/src/memset_s.c b/lib/securec/src/memset_s.c +new file mode 100644 +index 000000000..d9a657fd3 +--- /dev/null ++++ b/lib/securec/src/memset_s.c +@@ -0,0 +1,510 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: memset_s function ++ * Create: 2014-02-25 ++ */ ++/* ++ * [Standardize-exceptions] Use unsafe function: Portability ++ * [reason] Use unsafe function to implement security function to maintain platform compatibility. ++ * And sufficient input validation is performed before calling ++ */ ++ ++#include "securecutil.h" ++ ++#define SECUREC_MEMSET_PARAM_OK(dest, destMax, count) (SECUREC_LIKELY((destMax) <= SECUREC_MEM_MAX_LEN && \ ++ (dest) != NULL && (count) <= (destMax))) ++ ++#if SECUREC_WITH_PERFORMANCE_ADDONS ++ ++/* Use union to clear strict-aliasing warning */ ++typedef union { ++ SecStrBuf32 buf32; ++ SecStrBuf31 buf31; ++ SecStrBuf30 buf30; ++ SecStrBuf29 buf29; ++ SecStrBuf28 buf28; ++ SecStrBuf27 buf27; ++ SecStrBuf26 buf26; ++ SecStrBuf25 buf25; ++ SecStrBuf24 buf24; ++ SecStrBuf23 buf23; ++ SecStrBuf22 buf22; ++ SecStrBuf21 buf21; ++ SecStrBuf20 buf20; ++ SecStrBuf19 buf19; ++ SecStrBuf18 buf18; ++ SecStrBuf17 buf17; ++ SecStrBuf16 buf16; ++ SecStrBuf15 buf15; ++ SecStrBuf14 buf14; ++ SecStrBuf13 buf13; ++ SecStrBuf12 buf12; ++ SecStrBuf11 buf11; ++ SecStrBuf10 buf10; ++ SecStrBuf9 buf9; ++ SecStrBuf8 buf8; ++ SecStrBuf7 buf7; ++ SecStrBuf6 buf6; ++ SecStrBuf5 buf5; ++ SecStrBuf4 buf4; ++ SecStrBuf3 buf3; ++ SecStrBuf2 buf2; ++} SecStrBuf32Union; ++/* C standard initializes the first member of the consortium. */ ++static const SecStrBuf32 g_allZero = {{ ++ 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, ++ 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, ++ 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, ++ 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U ++}}; ++static const SecStrBuf32 g_allFF = {{ ++ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, ++ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF ++}}; ++ ++/* Clear conversion warning strict aliasing" */ ++SECUREC_INLINE const SecStrBuf32Union *SecStrictAliasingCast(const SecStrBuf32 *buf) ++{ ++ return (const SecStrBuf32Union *)buf; ++} ++ ++#ifndef SECUREC_MEMSET_THRESHOLD_SIZE ++#define SECUREC_MEMSET_THRESHOLD_SIZE 32UL ++#endif ++ ++#define SECUREC_UNALIGNED_SET(dest, c, count) do { \ ++ unsigned char *pDest_ = (unsigned char *)(dest); \ ++ switch (count) { \ ++ case 32: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 31: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 30: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 29: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 28: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 27: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 26: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 25: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 24: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 23: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 22: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 21: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 20: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 19: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 18: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 17: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 16: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 15: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 14: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 13: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 12: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 11: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 10: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 9: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 8: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 7: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 6: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 5: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 4: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 3: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 2: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 1: \ ++ *(pDest_++) = (unsigned char)(c); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ default: \ ++ /* Do nothing */ \ ++ break; \ ++ } \ ++} SECUREC_WHILE_ZERO ++ ++#define SECUREC_SET_VALUE_BY_STRUCT(dest, dataName, n) do { \ ++ *(SecStrBuf##n *)(dest) = *(const SecStrBuf##n *)(&((SecStrictAliasingCast(&(dataName)))->buf##n)); \ ++} SECUREC_WHILE_ZERO ++ ++#define SECUREC_ALIGNED_SET_OPT_ZERO_FF(dest, c, count) do { \ ++ switch (c) { \ ++ case 0: \ ++ switch (count) { \ ++ case 1: \ ++ *(unsigned char *)(dest) = (unsigned char)0; \ ++ break; \ ++ case 2: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 2); \ ++ break; \ ++ case 3: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 3); \ ++ break; \ ++ case 4: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 4); \ ++ break; \ ++ case 5: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 5); \ ++ break; \ ++ case 6: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 6); \ ++ break; \ ++ case 7: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 7); \ ++ break; \ ++ case 8: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 8); \ ++ break; \ ++ case 9: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 9); \ ++ break; \ ++ case 10: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 10); \ ++ break; \ ++ case 11: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 11); \ ++ break; \ ++ case 12: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 12); \ ++ break; \ ++ case 13: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 13); \ ++ break; \ ++ case 14: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 14); \ ++ break; \ ++ case 15: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 15); \ ++ break; \ ++ case 16: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 16); \ ++ break; \ ++ case 17: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 17); \ ++ break; \ ++ case 18: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 18); \ ++ break; \ ++ case 19: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 19); \ ++ break; \ ++ case 20: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 20); \ ++ break; \ ++ case 21: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 21); \ ++ break; \ ++ case 22: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 22); \ ++ break; \ ++ case 23: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 23); \ ++ break; \ ++ case 24: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 24); \ ++ break; \ ++ case 25: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 25); \ ++ break; \ ++ case 26: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 26); \ ++ break; \ ++ case 27: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 27); \ ++ break; \ ++ case 28: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 28); \ ++ break; \ ++ case 29: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 29); \ ++ break; \ ++ case 30: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 30); \ ++ break; \ ++ case 31: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 31); \ ++ break; \ ++ case 32: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allZero, 32); \ ++ break; \ ++ default: \ ++ /* Do nothing */ \ ++ break; \ ++ } \ ++ break; \ ++ case 0xFF: \ ++ switch (count) { \ ++ case 1: \ ++ *(unsigned char *)(dest) = (unsigned char)0xffU; \ ++ break; \ ++ case 2: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 2); \ ++ break; \ ++ case 3: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 3); \ ++ break; \ ++ case 4: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 4); \ ++ break; \ ++ case 5: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 5); \ ++ break; \ ++ case 6: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 6); \ ++ break; \ ++ case 7: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 7); \ ++ break; \ ++ case 8: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 8); \ ++ break; \ ++ case 9: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 9); \ ++ break; \ ++ case 10: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 10); \ ++ break; \ ++ case 11: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 11); \ ++ break; \ ++ case 12: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 12); \ ++ break; \ ++ case 13: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 13); \ ++ break; \ ++ case 14: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 14); \ ++ break; \ ++ case 15: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 15); \ ++ break; \ ++ case 16: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 16); \ ++ break; \ ++ case 17: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 17); \ ++ break; \ ++ case 18: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 18); \ ++ break; \ ++ case 19: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 19); \ ++ break; \ ++ case 20: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 20); \ ++ break; \ ++ case 21: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 21); \ ++ break; \ ++ case 22: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 22); \ ++ break; \ ++ case 23: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 23); \ ++ break; \ ++ case 24: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 24); \ ++ break; \ ++ case 25: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 25); \ ++ break; \ ++ case 26: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 26); \ ++ break; \ ++ case 27: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 27); \ ++ break; \ ++ case 28: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 28); \ ++ break; \ ++ case 29: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 29); \ ++ break; \ ++ case 30: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 30); \ ++ break; \ ++ case 31: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 31); \ ++ break; \ ++ case 32: \ ++ SECUREC_SET_VALUE_BY_STRUCT((dest), g_allFF, 32); \ ++ break; \ ++ default: \ ++ /* Do nothing */ \ ++ break; \ ++ } \ ++ break; \ ++ default: \ ++ SECUREC_UNALIGNED_SET((dest), (c), (count)); \ ++ break; \ ++ } /* END switch */ \ ++} SECUREC_WHILE_ZERO ++ ++#define SECUREC_SMALL_MEM_SET(dest, c, count) do { \ ++ if (SECUREC_ADDR_ALIGNED_8((dest))) { \ ++ SECUREC_ALIGNED_SET_OPT_ZERO_FF((dest), (c), (count)); \ ++ } else { \ ++ SECUREC_UNALIGNED_SET((dest), (c), (count)); \ ++ } \ ++} SECUREC_WHILE_ZERO ++ ++/* ++ * Performance optimization ++ */ ++#define SECUREC_MEMSET_OPT(dest, c, count) do { \ ++ if ((count) > SECUREC_MEMSET_THRESHOLD_SIZE) { \ ++ SECUREC_MEMSET_PREVENT_DSE((dest), (c), (count)); \ ++ } else { \ ++ SECUREC_SMALL_MEM_SET((dest), (c), (count)); \ ++ } \ ++} SECUREC_WHILE_ZERO ++#endif ++ ++/* ++ * Handling errors ++ */ ++SECUREC_INLINE errno_t SecMemsetError(void *dest, size_t destMax, int c) ++{ ++ /* Check destMax is 0 compatible with _sp macro */ ++ if (destMax == 0 || destMax > SECUREC_MEM_MAX_LEN) { ++ SECUREC_ERROR_INVALID_RANGE("memset_s"); ++ return ERANGE; ++ } ++ if (dest == NULL) { ++ SECUREC_ERROR_INVALID_PARAMTER("memset_s"); ++ return EINVAL; ++ } ++ SECUREC_MEMSET_PREVENT_DSE(dest, c, destMax); /* Set entire buffer to value c */ ++ SECUREC_ERROR_INVALID_RANGE("memset_s"); ++ return ERANGE_AND_RESET; ++} ++ ++/* ++ * ++ * The memset_s function copies the value of c (converted to an unsigned char) ++ * into each of the first count characters of the object pointed to by dest. ++ * ++ * ++ * dest Pointer to destination. ++ * destMax The size of the buffer. ++ * c Character to set. ++ * count Number of characters. ++ * ++ * ++ * dest buffer is updated. ++ * ++ * ++ * EOK Success ++ * EINVAL dest == NULL and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN ++ * ERANGE destMax > SECUREC_MEM_MAX_LEN or (destMax is 0 and count > destMax) ++ * ERANGE_AND_RESET count > destMax and destMax != 0 and destMax <= SECUREC_MEM_MAX_LEN and dest != NULL ++ * ++ * if return ERANGE_AND_RESET then fill dest to c ,fill length is destMax ++ */ ++errno_t memset_s(void *dest, size_t destMax, int c, size_t count) ++{ ++ if (SECUREC_MEMSET_PARAM_OK(dest, destMax, count)) { ++ SECUREC_MEMSET_PREVENT_DSE(dest, c, count); ++ return EOK; ++ } ++ /* Meet some runtime violation, return error code */ ++ return SecMemsetError(dest, destMax, c); ++} ++ ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(memset_s); ++#endif ++ ++#if SECUREC_WITH_PERFORMANCE_ADDONS ++/* ++ * Performance optimization ++ */ ++errno_t memset_sOptAsm(void *dest, size_t destMax, int c, size_t count) ++{ ++ if (SECUREC_MEMSET_PARAM_OK(dest, destMax, count)) { ++ SECUREC_MEMSET_OPT(dest, c, count); ++ return EOK; ++ } ++ /* Meet some runtime violation, return error code */ ++ return SecMemsetError(dest, destMax, c); ++} ++ ++/* ++ * Performance optimization, trim judgement on "destMax <= SECUREC_MEM_MAX_LEN" ++ */ ++errno_t memset_sOptTc(void *dest, size_t destMax, int c, size_t count) ++{ ++ if (SECUREC_LIKELY(count <= destMax && dest != NULL)) { ++ SECUREC_MEMSET_OPT(dest, c, count); ++ return EOK; ++ } ++ /* Meet some runtime violation, return error code */ ++ return SecMemsetError(dest, destMax, c); ++} ++#endif ++ +diff --git a/lib/securec/src/output.inl b/lib/securec/src/output.inl +new file mode 100644 +index 000000000..9392efaaf +--- /dev/null ++++ b/lib/securec/src/output.inl +@@ -0,0 +1,1720 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: Used by secureprintoutput_a.c and secureprintoutput_w.c to include. ++ * This file provides a template function for ANSI and UNICODE compiling ++ * by different type definition. The functions of SecOutputS or ++ * SecOutputSW provides internal implementation for printf family API, such as sprintf, swprintf_s. ++ * Create: 2014-02-25 ++ * Notes: see www.cplusplus.com/reference/cstdio/printf/ ++ */ ++/* ++ * [Standardize-exceptions] Use unsafe function: Portability ++ * [reason] Use unsafe function to implement security function to maintain platform compatibility. ++ * And sufficient input validation is performed before calling ++ */ ++#ifndef OUTPUT_INL_2B263E9C_43D8_44BB_B17A_6D2033DECEE5 ++#define OUTPUT_INL_2B263E9C_43D8_44BB_B17A_6D2033DECEE5 ++ ++#ifndef SECUREC_ENABLE_SPRINTF_LONG_DOUBLE ++/* Some compilers do not support long double */ ++#define SECUREC_ENABLE_SPRINTF_LONG_DOUBLE 1 ++#endif ++ ++#define SECUREC_NULL_STRING_SIZE 8 ++#define SECUREC_STATE_TABLE_SIZE 337 ++ ++#if defined(SECUREC_VXWORKS_VERSION_5_4) && !defined(SECUREC_ON_64BITS) ++#define SECUREC_DIV_QUOTIENT_OCTAL(val64) ((val64) >> 3ULL) ++#define SECUREC_DIV_RESIDUE_OCTAL(val64) ((val64) & 7ULL) ++ ++#define SECUREC_DIV_QUOTIENT_HEX(val64) ((val64) >> 4ULL) ++#define SECUREC_DIV_RESIDUE_HEX(val64) ((val64) & 0xfULL) ++#endif ++ ++#define SECUREC_RADIX_OCTAL 8U ++#define SECUREC_RADIX_DECIMAL 10U ++#define SECUREC_RADIX_HEX 16U ++#define SECUREC_PREFIX_LEN 2 ++/* Size include '+' and '\0' */ ++#define SECUREC_FLOAT_BUF_EXT 2 ++ ++/* Sign extend or Zero-extend */ ++#define SECUREC_GET_LONG_FROM_ARG(attr) ((((attr).flags & SECUREC_FLAG_SIGNED) != 0) ? \ ++ (SecInt64)(long)va_arg(argList, long) : \ ++ (SecInt64)(unsigned long)va_arg(argList, long)) ++ ++/* Sign extend or Zero-extend */ ++#define SECUREC_GET_CHAR_FROM_ARG(attr) ((((attr).flags & SECUREC_FLAG_SIGNED) != 0) ? \ ++ SecUpdateNegativeChar(&(attr), ((char)va_arg(argList, int))) : \ ++ (SecInt64)(unsigned char)va_arg(argList, int)) ++ ++/* Sign extend or Zero-extend */ ++#define SECUREC_GET_SHORT_FROM_ARG(attr) ((((attr).flags & SECUREC_FLAG_SIGNED) != 0) ? \ ++ (SecInt64)(short)va_arg(argList, int) : \ ++ (SecInt64)(unsigned short)va_arg(argList, int)) ++ ++/* Sign extend or Zero-extend */ ++#define SECUREC_GET_INT_FROM_ARG(attr) ((((attr).flags & SECUREC_FLAG_SIGNED) != 0) ? \ ++ (SecInt64)(int)va_arg(argList, int) : \ ++ (SecInt64)(unsigned int)va_arg(argList, int)) ++ ++#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT ++/* Sign extend or Zero-extend. No suitable macros were found to handle the branch */ ++#define SECUREC_GET_SIZE_FROM_ARG(attr) ((((attr).flags & SECUREC_FLAG_SIGNED) != 0) ? \ ++ ((SecIsSameSize(sizeof(size_t), sizeof(long)) != 0) ? (SecInt64)(long)va_arg(argList, long) : \ ++ ((SecIsSameSize(sizeof(size_t), sizeof(long long)) != 0) ? (SecInt64)(long long)va_arg(argList, long long) : \ ++ (SecInt64)(int)va_arg(argList, int))) : \ ++ (SecInt64)(size_t)va_arg(argList, size_t)) ++#endif ++ ++/* Format output buffer pointer and available size */ ++typedef struct { ++ int count; ++ SecChar *cur; ++} SecPrintfStream; ++ ++typedef union { ++ /* Integer formatting refers to the end of the buffer, plus 1 to prevent tool alarms */ ++ char str[SECUREC_BUFFER_SIZE + 1]; ++#if SECUREC_HAVE_WCHART ++ wchar_t wStr[SECUREC_WCHAR_BUFFER_SIZE]; /* Just for %lc */ ++#endif ++} SecBuffer; ++ ++typedef union { ++ char *str; /* Not a null terminated string */ ++#if SECUREC_HAVE_WCHART ++ wchar_t *wStr; ++#endif ++} SecFormatBuf; ++ ++typedef struct { ++ const char *digits; /* Point to the hexadecimal subset */ ++ SecFormatBuf text; /* Point to formatted string */ ++ int textLen; /* Length of the text */ ++ int textIsWide; /* Flag for text is wide chars ; 0 is not wide char */ ++ unsigned int radix; /* Use for output number , default set to 10 */ ++ unsigned int flags; ++ int fldWidth; ++ int precision; ++ int dynWidth; /* %* 1 width from variable parameter ;0 not */ ++ int dynPrecision; /* %.* 1 precision from variable parameter ;0 not */ ++ int padding; /* Padding len */ ++ int prefixLen; /* Length of prefix, 0 or 1 or 2 */ ++ SecChar prefix[SECUREC_PREFIX_LEN]; /* Prefix is 0 or 0x */ ++ SecBuffer buffer; ++} SecFormatAttr; ++ ++#if SECUREC_ENABLE_SPRINTF_FLOAT ++#ifdef SECUREC_STACK_SIZE_LESS_THAN_1K ++#define SECUREC_FMT_STR_LEN 8 ++#else ++#define SECUREC_FMT_STR_LEN 16 ++#endif ++typedef struct { ++ char buffer[SECUREC_FMT_STR_LEN]; ++ char *fmtStr; /* Initialization must point to buffer */ ++ char *allocatedFmtStr; /* Initialization must be NULL to store allocated point */ ++ char *floatBuffer; /* Use heap memory if the SecFormatAttr.buffer is not enough */ ++ int bufferSize; /* The size of floatBuffer */ ++} SecFloatAdapt; ++#endif ++ ++/* Use 20 to Align the data */ ++#define SECUREC_DIGITS_BUF_SIZE 20 ++/* The serial number of 'x' or 'X' is 16 */ ++#define SECUREC_NUMBER_OF_X 16 ++/* Some systems can not use pointers to point to string literals, but can use string arrays. */ ++/* For example, when handling code under uboot, there is a problem with the pointer */ ++static const char g_itoaUpperDigits[SECUREC_DIGITS_BUF_SIZE] = "0123456789ABCDEFX"; ++static const char g_itoaLowerDigits[SECUREC_DIGITS_BUF_SIZE] = "0123456789abcdefx"; ++ ++#if SECUREC_ENABLE_SPRINTF_FLOAT ++/* Call system sprintf to format float value */ ++SECUREC_INLINE int SecFormatFloat(char *strDest, const char *format, ...) ++{ ++ int ret; /* If initialization causes e838 */ ++ va_list argList; ++ ++ va_start(argList, format); ++ SECUREC_MASK_VSPRINTF_WARNING ++ ret = vsprintf(strDest, format, argList); ++ SECUREC_END_MASK_VSPRINTF_WARNING ++ va_end(argList); ++ (void)argList; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++ ++ return ret; ++} ++ ++#if defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && SECUREC_ENABLE_SPRINTF_LONG_DOUBLE ++/* Out put long double value to dest */ ++SECUREC_INLINE void SecFormatLongDouble(SecFormatAttr *attr, const SecFloatAdapt *floatAdapt, long double ldValue) ++{ ++ int fldWidth = (((attr->flags & SECUREC_FLAG_LEFT) != 0) ? (-attr->fldWidth) : attr->fldWidth); ++ if (attr->dynWidth != 0 && attr->dynPrecision != 0) { ++ attr->textLen = SecFormatFloat(attr->text.str, floatAdapt->fmtStr, fldWidth, attr->precision, ldValue); ++ } else if (attr->dynWidth != 0) { ++ attr->textLen = SecFormatFloat(attr->text.str, floatAdapt->fmtStr, fldWidth, ldValue); ++ } else if (attr->dynPrecision != 0) { ++ attr->textLen = SecFormatFloat(attr->text.str, floatAdapt->fmtStr, attr->precision, ldValue); ++ } else { ++ attr->textLen = SecFormatFloat(attr->text.str, floatAdapt->fmtStr, ldValue); ++ } ++ if (attr->textLen < 0 || attr->textLen >= floatAdapt->bufferSize) { ++ attr->textLen = 0; ++ } ++} ++#endif ++ ++/* Out put double value to dest */ ++SECUREC_INLINE void SecFormatDouble(SecFormatAttr *attr, const SecFloatAdapt *floatAdapt, double dValue) ++{ ++ int fldWidth = (((attr->flags & SECUREC_FLAG_LEFT) != 0) ? (-attr->fldWidth) : attr->fldWidth); ++ if (attr->dynWidth != 0 && attr->dynPrecision != 0) { ++ attr->textLen = SecFormatFloat(attr->text.str, floatAdapt->fmtStr, fldWidth, attr->precision, dValue); ++ } else if (attr->dynWidth != 0) { ++ attr->textLen = SecFormatFloat(attr->text.str, floatAdapt->fmtStr, fldWidth, dValue); ++ } else if (attr->dynPrecision != 0) { ++ attr->textLen = SecFormatFloat(attr->text.str, floatAdapt->fmtStr, attr->precision, dValue); ++ } else { ++ attr->textLen = SecFormatFloat(attr->text.str, floatAdapt->fmtStr, dValue); ++ } ++ if (attr->textLen < 0 || attr->textLen >= floatAdapt->bufferSize) { ++ attr->textLen = 0; ++ } ++} ++#endif ++ ++#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT ++/* To clear e506 warning */ ++SECUREC_INLINE int SecIsSameSize(size_t sizeA, size_t sizeB) ++{ ++ return (int)(sizeA == sizeB); ++} ++#endif ++ ++#ifndef SECUREC_ON_64BITS ++/* ++ * Compiler Optimized Division 8. ++ * The text.str point to buffer end, must be Large enough ++ */ ++SECUREC_INLINE void SecNumber32ToOctalString(SecUnsignedInt32 number, SecFormatAttr *attr) ++{ ++ SecUnsignedInt32 val32 = number; ++ do { ++ --attr->text.str; ++ /* Just use lowerDigits for 0 - 9 */ ++ *(attr->text.str) = g_itoaLowerDigits[val32 % SECUREC_RADIX_OCTAL]; ++ val32 /= SECUREC_RADIX_OCTAL; ++ } while (val32 != 0); ++} ++ ++#ifdef _AIX ++/* ++ * Compiler Optimized Division 10. ++ * The text.str point to buffer end, must be Large enough ++ */ ++SECUREC_INLINE void SecNumber32ToDecString(SecUnsignedInt32 number, SecFormatAttr *attr) ++{ ++ SecUnsignedInt32 val32 = number; ++ do { ++ --attr->text.str; ++ /* Just use lowerDigits for 0 - 9 */ ++ *(attr->text.str) = g_itoaLowerDigits[val32 % SECUREC_RADIX_DECIMAL]; ++ val32 /= SECUREC_RADIX_DECIMAL; ++ } while (val32 != 0); ++} ++#endif ++/* ++ * Compiler Optimized Division 16. ++ * The text.str point to buffer end, must be Large enough ++ */ ++SECUREC_INLINE void SecNumber32ToHexString(SecUnsignedInt32 number, SecFormatAttr *attr) ++{ ++ SecUnsignedInt32 val32 = number; ++ do { ++ --attr->text.str; ++ *(attr->text.str) = attr->digits[val32 % SECUREC_RADIX_HEX]; ++ val32 /= SECUREC_RADIX_HEX; ++ } while (val32 != 0); ++} ++ ++#ifndef _AIX ++/* Use fast div 10 */ ++SECUREC_INLINE void SecNumber32ToDecStringFast(SecUnsignedInt32 number, SecFormatAttr *attr) ++{ ++ SecUnsignedInt32 val32 = number; ++ do { ++ SecUnsignedInt32 quotient; ++ SecUnsignedInt32 remain; ++ --attr->text.str; ++ *(attr->text.str) = g_itoaLowerDigits[val32 % SECUREC_RADIX_DECIMAL]; ++ quotient = (val32 >> 1U) + (val32 >> 2U); /* Fast div magic 2 */ ++ quotient = quotient + (quotient >> 4U); /* Fast div magic 4 */ ++ quotient = quotient + (quotient >> 8U); /* Fast div magic 8 */ ++ quotient = quotient + (quotient >> 16U); /* Fast div magic 16 */ ++ quotient = quotient >> 3U; /* Fast div magic 3 */ ++ remain = val32 - SECUREC_MUL_TEN(quotient); ++ val32 = (remain > 9U) ? (quotient + 1U) : quotient; /* Fast div magic 9 */ ++ } while (val32 != 0); ++} ++#endif ++ ++SECUREC_INLINE void SecNumber32ToString(SecUnsignedInt32 number, SecFormatAttr *attr) ++{ ++ switch (attr->radix) { ++ case SECUREC_RADIX_HEX: ++ SecNumber32ToHexString(number, attr); ++ break; ++ case SECUREC_RADIX_OCTAL: ++ SecNumber32ToOctalString(number, attr); ++ break; ++ case SECUREC_RADIX_DECIMAL: ++#ifdef _AIX ++ /* The compiler will optimize div 10 */ ++ SecNumber32ToDecString(number, attr); ++#else ++ SecNumber32ToDecStringFast(number, attr); ++#endif ++ break; ++ default: ++ /* Do nothing */ ++ break; ++ } ++} ++#endif ++ ++#if defined(SECUREC_USE_SPECIAL_DIV64) || (defined(SECUREC_VXWORKS_VERSION_5_4) && !defined(SECUREC_ON_64BITS)) ++/* ++ * This function just to clear warning, on sume vxworks compiler shift 32 bit make warnings ++ */ ++SECUREC_INLINE SecUnsignedInt64 SecU64Shr32(SecUnsignedInt64 number) ++{ ++ return (((number) >> 16U) >> 16U); /* Two shifts of 16 bits to realize shifts of 32 bits */ ++} ++/* ++ * Fast divide by 10 algorithm. ++ * Calculation divisor multiply 0xcccccccccccccccdULL, resultHi64 >> 3 as quotient ++ */ ++SECUREC_INLINE void SecU64Div10(SecUnsignedInt64 divisor, SecUnsignedInt64 *quotient, SecUnsignedInt32 *residue) ++{ ++ SecUnsignedInt64 mask = 0xffffffffULL; /* Use 0xffffffffULL as 32 bit mask */ ++ SecUnsignedInt64 magicHi = 0xccccccccULL; /* Fast divide 10 magic numbers high 32bit 0xccccccccULL */ ++ SecUnsignedInt64 magicLow = 0xcccccccdULL; /* Fast divide 10 magic numbers low 32bit 0xcccccccdULL */ ++ SecUnsignedInt64 divisorHi = (SecUnsignedInt64)(SecU64Shr32(divisor)); /* High 32 bit use */ ++ SecUnsignedInt64 divisorLow = (SecUnsignedInt64)(divisor & mask); /* Low 32 bit mask */ ++ SecUnsignedInt64 factorHi = divisorHi * magicHi; ++ SecUnsignedInt64 factorLow1 = divisorHi * magicLow; ++ SecUnsignedInt64 factorLow2 = divisorLow * magicHi; ++ SecUnsignedInt64 factorLow3 = divisorLow * magicLow; ++ SecUnsignedInt64 carry = (factorLow1 & mask) + (factorLow2 & mask) + SecU64Shr32(factorLow3); ++ SecUnsignedInt64 resultHi64 = factorHi + SecU64Shr32(factorLow1) + SecU64Shr32(factorLow2) + SecU64Shr32(carry); ++ ++ *quotient = resultHi64 >> 3U; /* Fast divide 10 magic numbers 3 */ ++ *residue = (SecUnsignedInt32)(divisor - ((*quotient) * 10)); /* Quotient mul 10 */ ++ return; ++} ++#if defined(SECUREC_VXWORKS_VERSION_5_4) && !defined(SECUREC_ON_64BITS) ++/* ++ * Divide function for VXWORKS ++ */ ++SECUREC_INLINE int SecU64Div32(SecUnsignedInt64 divisor, SecUnsignedInt32 radix, ++ SecUnsignedInt64 *quotient, SecUnsignedInt32 *residue) ++{ ++ switch (radix) { ++ case SECUREC_RADIX_DECIMAL: ++ SecU64Div10(divisor, quotient, residue); ++ break; ++ case SECUREC_RADIX_HEX: ++ *quotient = SECUREC_DIV_QUOTIENT_HEX(divisor); ++ *residue = (SecUnsignedInt32)SECUREC_DIV_RESIDUE_HEX(divisor); ++ break; ++ case SECUREC_RADIX_OCTAL: ++ *quotient = SECUREC_DIV_QUOTIENT_OCTAL(divisor); ++ *residue = (SecUnsignedInt32)SECUREC_DIV_RESIDUE_OCTAL(divisor); ++ break; ++ default: ++ return -1; /* This does not happen in the current file */ ++ } ++ return 0; ++} ++SECUREC_INLINE void SecNumber64ToStringSpecial(SecUnsignedInt64 number, SecFormatAttr *attr) ++{ ++ SecUnsignedInt64 val64 = number; ++ do { ++ SecUnsignedInt32 digit = 0; /* Ascii value of digit */ ++ SecUnsignedInt64 quotient = 0; ++ if (SecU64Div32(val64, (SecUnsignedInt32)attr->radix, "ient, &digit) != 0) { ++ /* Just break, when enter this function, no error is returned */ ++ break; ++ } ++ --attr->text.str; ++ *(attr->text.str) = attr->digits[digit]; ++ val64 = quotient; ++ } while (val64 != 0); ++} ++#endif ++#endif ++ ++#if defined(SECUREC_ON_64BITS) || !defined(SECUREC_VXWORKS_VERSION_5_4) ++#if defined(SECUREC_USE_SPECIAL_DIV64) ++/* The compiler does not provide 64 bit division problems */ ++SECUREC_INLINE void SecNumber64ToDecString(SecUnsignedInt64 number, SecFormatAttr *attr) ++{ ++ SecUnsignedInt64 val64 = number; ++ do { ++ SecUnsignedInt64 quotient = 0; ++ SecUnsignedInt32 digit = 0; ++ SecU64Div10(val64, "ient, &digit); ++ --attr->text.str; ++ /* Just use lowerDigits for 0 - 9 */ ++ *(attr->text.str) = g_itoaLowerDigits[digit]; ++ val64 = quotient; ++ } while (val64 != 0); ++} ++#else ++/* ++ * Compiler Optimized Division 10. ++ * The text.str point to buffer end, must be Large enough ++ */ ++SECUREC_INLINE void SecNumber64ToDecString(SecUnsignedInt64 number, SecFormatAttr *attr) ++{ ++ SecUnsignedInt64 val64 = number; ++ do { ++ --attr->text.str; ++ /* Just use lowerDigits for 0 - 9 */ ++ *(attr->text.str) = g_itoaLowerDigits[val64 % SECUREC_RADIX_DECIMAL]; ++ val64 /= SECUREC_RADIX_DECIMAL; ++ } while (val64 != 0); ++} ++#endif ++ ++/* ++ * Compiler Optimized Division 8. ++ * The text.str point to buffer end, must be Large enough ++ */ ++SECUREC_INLINE void SecNumber64ToOctalString(SecUnsignedInt64 number, SecFormatAttr *attr) ++{ ++ SecUnsignedInt64 val64 = number; ++ do { ++ --attr->text.str; ++ /* Just use lowerDigits for 0 - 9 */ ++ *(attr->text.str) = g_itoaLowerDigits[val64 % SECUREC_RADIX_OCTAL]; ++ val64 /= SECUREC_RADIX_OCTAL; ++ } while (val64 != 0); ++} ++/* ++ * Compiler Optimized Division 16. ++ * The text.str point to buffer end, must be Large enough ++ */ ++SECUREC_INLINE void SecNumber64ToHexString(SecUnsignedInt64 number, SecFormatAttr *attr) ++{ ++ SecUnsignedInt64 val64 = number; ++ do { ++ --attr->text.str; ++ *(attr->text.str) = attr->digits[val64 % SECUREC_RADIX_HEX]; ++ val64 /= SECUREC_RADIX_HEX; ++ } while (val64 != 0); ++} ++ ++SECUREC_INLINE void SecNumber64ToString(SecUnsignedInt64 number, SecFormatAttr *attr) ++{ ++ switch (attr->radix) { ++ /* The compiler will optimize div 10 */ ++ case SECUREC_RADIX_DECIMAL: ++ SecNumber64ToDecString(number, attr); ++ break; ++ case SECUREC_RADIX_OCTAL: ++ SecNumber64ToOctalString(number, attr); ++ break; ++ case SECUREC_RADIX_HEX: ++ SecNumber64ToHexString(number, attr); ++ break; ++ default: ++ /* Do nothing */ ++ break; ++ } ++} ++#endif ++ ++/* ++ * Converting integers to string ++ */ ++SECUREC_INLINE void SecNumberToString(SecUnsignedInt64 number, SecFormatAttr *attr) ++{ ++#ifdef SECUREC_ON_64BITS ++ SecNumber64ToString(number, attr); ++#else /* For 32 bits system */ ++ if (number <= 0xffffffffUL) { /* Use 0xffffffffUL to check if the value is in the 32-bit range */ ++ /* In most case, the value to be converted is small value */ ++ SecUnsignedInt32 n32Tmp = (SecUnsignedInt32)number; ++ SecNumber32ToString(n32Tmp, attr); ++ } else { ++ /* The value to be converted is greater than 4G */ ++#if defined(SECUREC_VXWORKS_VERSION_5_4) ++ SecNumber64ToStringSpecial(number, attr); ++#else ++ SecNumber64ToString(number, attr); ++#endif ++ } ++#endif ++} ++ ++SECUREC_INLINE int SecIsNumberNeedTo32Bit(const SecFormatAttr *attr) ++{ ++ return (int)(((attr->flags & SECUREC_FLAG_I64) == 0) && ++#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT ++ ((attr->flags & SECUREC_FLAG_INTMAX) == 0) && ++#endif ++#ifdef SECUREC_ON_64BITS ++ ((attr->flags & SECUREC_FLAG_PTRDIFF) == 0) && ++ ((attr->flags & SECUREC_FLAG_SIZE) == 0) && ++#if !defined(SECUREC_COMPATIBLE_WIN_FORMAT) /* on window 64 system sizeof long is 32bit */ ++ ((attr->flags & SECUREC_FLAG_LONG) == 0) && ++#endif ++#endif ++ ((attr->flags & SECUREC_FLAG_LONGLONG) == 0)); ++} ++ ++SECUREC_INLINE void SecNumberToBuffer(SecFormatAttr *attr, SecInt64 num64) ++{ ++ SecUnsignedInt64 number; ++ /* Check for negative; copy into number */ ++ if ((attr->flags & SECUREC_FLAG_SIGNED) != 0 && num64 < 0) { ++ number = (SecUnsignedInt64)(0 - (SecUnsignedInt64)num64); /* Wrap with unsigned int64 numbers */ ++ attr->flags |= SECUREC_FLAG_NEGATIVE; ++ } else { ++ number = (SecUnsignedInt64)num64; ++ } ++ if (SecIsNumberNeedTo32Bit(attr) != 0) { ++ number = (number & (SecUnsignedInt64)0xffffffffUL); /* Use 0xffffffff as 32 bit mask */ ++ } ++ ++ /* The text.str must be point to buffer.str, this pointer is used outside the function */ ++ attr->text.str = &attr->buffer.str[SECUREC_BUFFER_SIZE]; ++ ++ if (number == 0) { ++ /* Turn off hex prefix default, and textLen is zero */ ++ attr->prefixLen = 0; ++ attr->textLen = 0; ++ return; ++ } ++ ++ /* Convert integer to string. It must be invoked when number > 0, otherwise the following logic is incorrect */ ++ SecNumberToString(number, attr); ++ /* Compute length of number, text.str must be in buffer.str */ ++ attr->textLen = (int)(size_t)((char *)&attr->buffer.str[SECUREC_BUFFER_SIZE] - attr->text.str); ++} ++ ++/* ++ * Write one character to dest buffer ++ */ ++SECUREC_INLINE void SecWriteChar(SecPrintfStream *stream, SecChar ch, int *charsOut) ++{ ++ /* Count must be reduced first, In order to identify insufficient length */ ++ --stream->count; ++ if (stream->count >= 0) { ++ *(stream->cur) = ch; ++ ++stream->cur; ++ *charsOut = *charsOut + 1; ++ return; ++ } ++ /* No enough length */ ++ *charsOut = -1; ++} ++ ++/* ++* Write multiple identical characters. ++*/ ++SECUREC_INLINE void SecWriteMultiChar(SecPrintfStream *stream, SecChar ch, int num, int *charsOut) ++{ ++ int count; ++ for (count = num; count > 0; --count) { ++ --stream->count; /* count may be negative,indicating insufficient space */ ++ if (stream->count < 0) { ++ *charsOut = -1; ++ return; ++ } ++ *(stream->cur) = ch; ++ ++stream->cur; ++ } ++ *charsOut = *charsOut + num; ++} ++ ++/* ++* Write string function, where this function is called, make sure that len is greater than 0 ++*/ ++SECUREC_INLINE void SecWriteString(SecPrintfStream *stream, const SecChar *str, int len, int *charsOut) ++{ ++ const SecChar *tmp = str; ++ int count; ++ for (count = len; count > 0; --count) { ++ --stream->count; /* count may be negative,indicating insufficient space */ ++ if (stream->count < 0) { ++ *charsOut = -1; ++ return; ++ } ++ *(stream->cur) = *tmp; ++ ++stream->cur; ++ ++tmp; ++ } ++ *charsOut = *charsOut + len; ++} ++ ++/* Use loop copy char or wchar_t string */ ++SECUREC_INLINE void SecWriteStringByLoop(SecPrintfStream *stream, const SecChar *str, int len) ++{ ++ int i; ++ const SecChar *tmp = str; ++ for (i = 0; i < len; ++i) { ++ *stream->cur = *tmp; ++ ++stream->cur; ++ ++tmp; ++ } ++ stream->count -= len; ++} ++ ++SECUREC_INLINE void SecWriteStringOpt(SecPrintfStream *stream, const SecChar *str, int len) ++{ ++ if (len < 12) { /* Performance optimization for mobile number length 12 */ ++ SecWriteStringByLoop(stream, str, len); ++ } else { ++ size_t count = (size_t)(unsigned int)len * sizeof(SecChar); ++ SECUREC_MEMCPY_WARP_OPT(stream->cur, str, count); ++ stream->cur += len; ++ stream->count -= len; ++ } ++} ++ ++/* ++ * Return if buffer length is enough ++ * The count variable can be reduced to 0, and the external function complements the \0 terminator. ++ */ ++SECUREC_INLINE int SecIsStreamBufEnough(const SecPrintfStream *stream, int needLen) ++{ ++ return (int)(stream->count >= needLen); ++} ++ ++/* Write text string */ ++SECUREC_INLINE void SecWriteTextOpt(SecPrintfStream *stream, const SecChar *str, int len, int *charsOut) ++{ ++ if (SecIsStreamBufEnough(stream, len) != 0) { ++ SecWriteStringOpt(stream, str, len); ++ *charsOut += len; ++ } else { ++ SecWriteString(stream, str, len, charsOut); ++ } ++} ++ ++/* Write left padding */ ++SECUREC_INLINE void SecWriteLeftPadding(SecPrintfStream *stream, const SecFormatAttr *attr, int *charsOut) ++{ ++ if ((attr->flags & (SECUREC_FLAG_LEFT | SECUREC_FLAG_LEADZERO)) == 0 && attr->padding > 0) { ++ /* Pad on left with blanks */ ++ SecWriteMultiChar(stream, SECUREC_CHAR(' '), attr->padding, charsOut); ++ } ++} ++ ++/* Write prefix */ ++SECUREC_INLINE void SecWritePrefix(SecPrintfStream *stream, const SecFormatAttr *attr, int *charsOut) ++{ ++ if (attr->prefixLen > 0) { ++ SecWriteString(stream, attr->prefix, attr->prefixLen, charsOut); ++ } ++} ++ ++/* Write leading zeros */ ++SECUREC_INLINE void SecWriteLeadingZero(SecPrintfStream *stream, const SecFormatAttr *attr, int *charsOut) ++{ ++ if ((attr->flags & SECUREC_FLAG_LEADZERO) != 0 && (attr->flags & SECUREC_FLAG_LEFT) == 0 && ++ attr->padding > 0) { ++ SecWriteMultiChar(stream, SECUREC_CHAR('0'), attr->padding, charsOut); ++ } ++} ++ ++/* Write right padding */ ++SECUREC_INLINE void SecWriteRightPadding(SecPrintfStream *stream, const SecFormatAttr *attr, int *charsOut) ++{ ++ if (*charsOut >= 0 && (attr->flags & SECUREC_FLAG_LEFT) != 0 && attr->padding > 0) { ++ /* Pad on right with blanks */ ++ SecWriteMultiChar(stream, SECUREC_CHAR(' '), attr->padding, charsOut); ++ } ++} ++ ++#ifdef SECUREC_FOR_WCHAR ++#define SECUREC_TEXT_CHAR_PTR(text) ((text).wStr) ++#define SECUREC_NEED_CONVERT_TEXT(attr) ((attr)->textIsWide == 0) ++#if SECUREC_HAVE_MBTOWC ++#define SECUREC_WRITE_TEXT_AFTER_CONVERT(stream, attr, charsOut) SecWriteTextAfterMbtowc((stream), (attr), (charsOut)) ++#else ++#define SECUREC_WRITE_TEXT_AFTER_CONVERT(stream, attr, charsOut) (*(charsOut) = -1) ++#endif ++#else ++#define SECUREC_TEXT_CHAR_PTR(text) ((text).str) ++#define SECUREC_NEED_CONVERT_TEXT(attr) ((attr)->textIsWide != 0) ++#if SECUREC_HAVE_WCTOMB ++#define SECUREC_WRITE_TEXT_AFTER_CONVERT(stream, attr, charsOut) SecWriteTextAfterWctomb((stream), (attr), (charsOut)) ++#else ++#define SECUREC_WRITE_TEXT_AFTER_CONVERT(stream, attr, charsOut) (*(charsOut) = -1) ++#endif ++#endif ++ ++#ifdef SECUREC_FOR_WCHAR ++#if SECUREC_HAVE_MBTOWC ++SECUREC_INLINE void SecWriteTextAfterMbtowc(SecPrintfStream *stream, const SecFormatAttr *attr, int *charsOut) ++{ ++ const char *p = attr->text.str; ++ int count = attr->textLen; ++ while (count > 0) { ++ wchar_t wChar = L'\0'; ++ int retVal = mbtowc(&wChar, p, (size_t)MB_CUR_MAX); ++ if (retVal <= 0) { ++ *charsOut = -1; ++ break; ++ } ++ SecWriteChar(stream, wChar, charsOut); ++ if (*charsOut == -1) { ++ break; ++ } ++ p += retVal; ++ count -= retVal; ++ } ++} ++#endif ++#else /* Not SECUREC_FOR_WCHAR */ ++#if SECUREC_HAVE_WCTOMB ++SECUREC_INLINE void SecWriteTextAfterWctomb(SecPrintfStream *stream, const SecFormatAttr *attr, int *charsOut) ++{ ++ const wchar_t *p = attr->text.wStr; ++ int count = attr->textLen; ++ while (count > 0) { ++ char tmpBuf[SECUREC_MB_LEN + 1]; ++ SECUREC_MASK_MSVC_CRT_WARNING ++ int retVal = wctomb(tmpBuf, *p); ++ SECUREC_END_MASK_MSVC_CRT_WARNING ++ if (retVal <= 0) { ++ *charsOut = -1; ++ break; ++ } ++ SecWriteString(stream, tmpBuf, retVal, charsOut); ++ if (*charsOut == -1) { ++ break; ++ } ++ --count; ++ ++p; ++ } ++} ++#endif ++#endif ++ ++#if SECUREC_ENABLE_SPRINTF_FLOAT ++/* ++ * Write text of float ++ * Using independent functions to optimize the expansion of inline functions by the compiler ++ */ ++SECUREC_INLINE void SecWriteFloatText(SecPrintfStream *stream, const SecFormatAttr *attr, int *charsOut) ++{ ++#ifdef SECUREC_FOR_WCHAR ++#if SECUREC_HAVE_MBTOWC ++ SecWriteTextAfterMbtowc(stream, attr, charsOut); ++#else ++ *charsOut = -1; ++ (void)stream; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++ (void)attr; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++#endif ++#else /* Not SECUREC_FOR_WCHAR */ ++ SecWriteString(stream, attr->text.str, attr->textLen, charsOut); ++#endif ++} ++#endif ++ ++/* Write text of integer or string ... */ ++SECUREC_INLINE void SecWriteText(SecPrintfStream *stream, const SecFormatAttr *attr, int *charsOut) ++{ ++ if (SECUREC_NEED_CONVERT_TEXT(attr)) { ++ SECUREC_WRITE_TEXT_AFTER_CONVERT(stream, attr, charsOut); ++ } else { ++ SecWriteTextOpt(stream, SECUREC_TEXT_CHAR_PTR(attr->text), attr->textLen, charsOut); ++ } ++} ++ ++#define SECUREC_FMT_STATE_OFFSET 256 ++ ++SECUREC_INLINE SecFmtState SecDecodeState(SecChar ch, SecFmtState lastState) ++{ ++ static const unsigned char stateTable[SECUREC_STATE_TABLE_SIZE] = { ++ /* ++ * Type ++ * 0: nospecial meaning; ++ * 1: '%' ++ * 2: '.' ++ * 3: '*' ++ * 4: '0' ++ * 5: '1' ... '9' ++ * 6: ' ', '+', '-', '#' ++ * 7: 'h', 'l', 'L', 'w' , 'N', 'z', 'q', 't', 'j' ++ * 8: 'd', 'o', 'u', 'i', 'x', 'X', 'e', 'f', 'g', 'E', 'F', 'G', 's', 'c', '[', 'p' ++ */ ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x06, 0x00, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 0x00, 0x06, 0x02, 0x00, ++ 0x04, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x08, 0x00, 0x08, 0x08, 0x08, 0x00, 0x07, 0x00, 0x00, 0x07, 0x00, 0x07, 0x00, ++ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x07, 0x08, 0x07, 0x00, 0x07, 0x00, 0x00, 0x08, ++ 0x08, 0x07, 0x00, 0x08, 0x07, 0x08, 0x00, 0x07, 0x08, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, ++ /* Fill zero for normal char 128 byte for 0x80 - 0xff */ ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ /* ++ * State ++ * 0: normal ++ * 1: percent ++ * 2: flag ++ * 3: width ++ * 4: dot ++ * 5: precis ++ * 6: size ++ * 7: type ++ * 8: invalid ++ */ ++ 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00, 0x01, 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, ++ 0x01, 0x00, 0x00, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x03, 0x03, 0x08, 0x05, ++ 0x08, 0x08, 0x00, 0x00, 0x00, 0x02, 0x02, 0x03, 0x05, 0x05, 0x08, 0x00, 0x00, 0x00, 0x03, 0x03, ++ 0x03, 0x05, 0x05, 0x08, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, ++ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x00, ++ 0x00 ++ }; ++ ++#ifdef SECUREC_FOR_WCHAR ++ /* Convert to unsigned char to clear gcc 4.3.4 warning */ ++ unsigned char fmtType = (unsigned char)((((unsigned int)(int)(ch)) <= (unsigned int)(int)(L'~')) ? \ ++ (stateTable[(unsigned char)(ch)]) : 0); ++ return (SecFmtState)(stateTable[fmtType * ((unsigned char)STAT_INVALID + 1) + ++ (unsigned char)(lastState) + SECUREC_FMT_STATE_OFFSET]); ++#else ++ unsigned char fmtType = stateTable[(unsigned char)(ch)]; ++ return (SecFmtState)(stateTable[fmtType * ((unsigned char)STAT_INVALID + 1) + ++ (unsigned char)(lastState) + SECUREC_FMT_STATE_OFFSET]); ++#endif ++} ++ ++SECUREC_INLINE void SecDecodeFlags(SecChar ch, SecFormatAttr *attr) ++{ ++ switch (ch) { ++ case SECUREC_CHAR(' '): ++ attr->flags |= SECUREC_FLAG_SIGN_SPACE; ++ break; ++ case SECUREC_CHAR('+'): ++ attr->flags |= SECUREC_FLAG_SIGN; ++ break; ++ case SECUREC_CHAR('-'): ++ attr->flags |= SECUREC_FLAG_LEFT; ++ break; ++ case SECUREC_CHAR('0'): ++ attr->flags |= SECUREC_FLAG_LEADZERO; /* Add zero th the front */ ++ break; ++ case SECUREC_CHAR('#'): ++ attr->flags |= SECUREC_FLAG_ALTERNATE; /* Output %x with 0x */ ++ break; ++ default: ++ /* Do nothing */ ++ break; ++ } ++ return; ++} ++ ++/* ++ * Decoded size identifier in format string to Reduce the number of lines of function code ++ */ ++SECUREC_INLINE int SecDecodeSizeI(SecFormatAttr *attr, const SecChar **format) ++{ ++#ifdef SECUREC_ON_64BITS ++ attr->flags |= SECUREC_FLAG_I64; /* %I to INT64 */ ++#endif ++ if ((**format == SECUREC_CHAR('6')) && (*((*format) + 1) == SECUREC_CHAR('4'))) { ++ (*format) += 2; /* Add 2 to skip I64 */ ++ attr->flags |= SECUREC_FLAG_I64; /* %I64 to INT64 */ ++ } else if ((**format == SECUREC_CHAR('3')) && (*((*format) + 1) == SECUREC_CHAR('2'))) { ++ (*format) += 2; /* Add 2 to skip I32 */ ++ attr->flags &= ~SECUREC_FLAG_I64; /* %I64 to INT32 */ ++ } else if ((**format == SECUREC_CHAR('d')) || (**format == SECUREC_CHAR('i')) || ++ (**format == SECUREC_CHAR('o')) || (**format == SECUREC_CHAR('u')) || ++ (**format == SECUREC_CHAR('x')) || (**format == SECUREC_CHAR('X'))) { ++ /* Do nothing */ ++ } else { ++ /* Compatibility code for "%I" just print I */ ++ return -1; ++ } ++ return 0; ++} ++ ++/* ++ * Decoded size identifier in format string, and skip format to next charater ++ */ ++SECUREC_INLINE int SecDecodeSize(SecChar ch, SecFormatAttr *attr, const SecChar **format) ++{ ++ switch (ch) { ++ case SECUREC_CHAR('l'): ++ if (**format == SECUREC_CHAR('l')) { ++ *format = *format + 1; ++ attr->flags |= SECUREC_FLAG_LONGLONG; /* For long long */ ++ } else { ++ attr->flags |= SECUREC_FLAG_LONG; /* For long int or wchar_t */ ++ } ++ break; ++#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT ++ case SECUREC_CHAR('z'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('Z'): ++ attr->flags |= SECUREC_FLAG_SIZE; ++ break; ++ case SECUREC_CHAR('j'): ++ attr->flags |= SECUREC_FLAG_INTMAX; ++ break; ++#endif ++ case SECUREC_CHAR('t'): ++ attr->flags |= SECUREC_FLAG_PTRDIFF; ++ break; ++ case SECUREC_CHAR('q'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('L'): ++ attr->flags |= (SECUREC_FLAG_LONGLONG | SECUREC_FLAG_LONG_DOUBLE); ++ break; ++ case SECUREC_CHAR('I'): ++ if (SecDecodeSizeI(attr, format) != 0) { ++ /* Compatibility code for "%I" just print I */ ++ return -1; ++ } ++ break; ++ case SECUREC_CHAR('h'): ++ if (**format == SECUREC_CHAR('h')) { ++ *format = *format + 1; ++ attr->flags |= SECUREC_FLAG_CHAR; /* For char */ ++ } else { ++ attr->flags |= SECUREC_FLAG_SHORT; /* For short int */ ++ } ++ break; ++ case SECUREC_CHAR('w'): ++ attr->flags |= SECUREC_FLAG_WIDECHAR; /* For wide char */ ++ break; ++ default: ++ /* Do nothing */ ++ break; ++ } ++ return 0; ++} ++ ++/* ++ * Decoded char type identifier ++ */ ++SECUREC_INLINE void SecDecodeTypeC(SecFormatAttr *attr, unsigned int c) ++{ ++ attr->textLen = 1; /* Only 1 wide character */ ++ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT)) && !(defined(__hpux)) && !(defined(SECUREC_ON_SOLARIS)) ++ attr->flags &= ~SECUREC_FLAG_LEADZERO; ++#endif ++ ++#ifdef SECUREC_FOR_WCHAR ++ if ((attr->flags & SECUREC_FLAG_SHORT) != 0) { ++ /* Get multibyte character from argument */ ++ attr->buffer.str[0] = (char)c; ++ attr->text.str = attr->buffer.str; ++ attr->textIsWide = 0; ++ } else { ++ attr->buffer.wStr[0] = (wchar_t)c; ++ attr->text.wStr = attr->buffer.wStr; ++ attr->textIsWide = 1; ++ } ++#else /* Not SECUREC_FOR_WCHAR */ ++ if ((attr->flags & (SECUREC_FLAG_LONG | SECUREC_FLAG_WIDECHAR)) != 0) { ++#if SECUREC_HAVE_WCHART ++ attr->buffer.wStr[0] = (wchar_t)c; ++ attr->text.wStr = attr->buffer.wStr; ++ attr->textIsWide = 1; ++#else ++ attr->textLen = 0; /* Ignore unsupported characters */ ++ attr->fldWidth = 0; /* No paddings */ ++#endif ++ } else { ++ /* Get multibyte character from argument */ ++ attr->buffer.str[0] = (char)c; ++ attr->text.str = attr->buffer.str; ++ attr->textIsWide = 0; ++ } ++#endif ++} ++ ++#ifdef SECUREC_FOR_WCHAR ++#define SECUREC_IS_NARROW_STRING(attr) (((attr)->flags & SECUREC_FLAG_SHORT) != 0) ++#else ++#define SECUREC_IS_NARROW_STRING(attr) (((attr)->flags & (SECUREC_FLAG_LONG | SECUREC_FLAG_WIDECHAR)) == 0) ++#endif ++ ++SECUREC_INLINE void SecDecodeTypeSchar(SecFormatAttr *attr) ++{ ++ size_t textLen; ++ if (attr->text.str == NULL) { ++ /* ++ * Literal string to print null ptr, define it as array rather than const text area ++ * To avoid gcc warning with pointing const text with variable ++ */ ++ static char strNullString[SECUREC_NULL_STRING_SIZE] = "(null)"; ++ attr->text.str = strNullString; ++ } ++ if (attr->precision == -1) { ++ /* Precision NOT assigned */ ++ /* The strlen performance is high when the string length is greater than 32 */ ++ textLen = strlen(attr->text.str); ++ if (textLen > SECUREC_STRING_MAX_LEN) { ++ textLen = 0; ++ } ++ } else { ++ /* Precision assigned */ ++ SECUREC_CALC_STR_LEN(attr->text.str, (size_t)(unsigned int)attr->precision, &textLen); ++ } ++ attr->textLen = (int)textLen; ++} ++ ++SECUREC_INLINE void SecDecodeTypeSwchar(SecFormatAttr *attr) ++{ ++#if SECUREC_HAVE_WCHART ++ size_t textLen; ++ attr->textIsWide = 1; ++ if (attr->text.wStr == NULL) { ++ /* ++ * Literal string to print null ptr, define it as array rather than const text area ++ * To avoid gcc warning with pointing const text with variable ++ */ ++ static wchar_t wStrNullString[SECUREC_NULL_STRING_SIZE] = { L'(', L'n', L'u', L'l', L'l', L')', L'\0', L'\0' }; ++ attr->text.wStr = wStrNullString; ++ } ++ /* The textLen in wchar_t,when precision is -1, it is unlimited */ ++ SECUREC_CALC_WSTR_LEN(attr->text.wStr, (size_t)(unsigned int)attr->precision, &textLen); ++ if (textLen > SECUREC_WCHAR_STRING_MAX_LEN) { ++ textLen = 0; ++ } ++ attr->textLen = (int)textLen; ++#else ++ attr->textLen = 0; ++#endif ++} ++ ++/* ++ * Decoded string identifier ++ */ ++SECUREC_INLINE void SecDecodeTypeS(SecFormatAttr *attr, char *argPtr) ++{ ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT)) ++#if (!defined(SECUREC_ON_UNIX)) ++ attr->flags &= ~SECUREC_FLAG_LEADZERO; ++#endif ++#if (defined(SECUREC_FOR_WCHAR)) ++ if ((attr->flags & SECUREC_FLAG_LONG) == 0) { ++ attr->flags |= SECUREC_FLAG_SHORT; ++ } ++#endif ++#endif ++ attr->text.str = argPtr; ++ if (SECUREC_IS_NARROW_STRING(attr)) { ++ /* The textLen now contains length in multibyte chars */ ++ SecDecodeTypeSchar(attr); ++ } else { ++ /* The textLen now contains length in wide chars */ ++ SecDecodeTypeSwchar(attr); ++ } ++} ++ ++/* ++ * Check precision in format ++ */ ++SECUREC_INLINE int SecDecodePrecision(SecChar ch, SecFormatAttr *attr) ++{ ++ if (attr->dynPrecision == 0) { ++ /* Add digit to current precision */ ++ if (SECUREC_MUL_TEN_ADD_BEYOND_MAX(attr->precision)) { ++ return -1; ++ } ++ attr->precision = (int)SECUREC_MUL_TEN((unsigned int)attr->precision) + ++ (unsigned char)(ch - SECUREC_CHAR('0')); ++ } else { ++ if (attr->precision < 0) { ++ attr->precision = -1; ++ } ++ if (attr->precision > SECUREC_MAX_WIDTH_LEN) { ++ return -1; ++ } ++ } ++ return 0; ++} ++ ++/* ++ * Check width in format ++ */ ++SECUREC_INLINE int SecDecodeWidth(SecChar ch, SecFormatAttr *attr, SecFmtState lastState) ++{ ++ if (attr->dynWidth == 0) { ++ if (lastState != STAT_WIDTH) { ++ attr->fldWidth = 0; ++ } ++ if (SECUREC_MUL_TEN_ADD_BEYOND_MAX(attr->fldWidth)) { ++ return -1; ++ } ++ attr->fldWidth = (int)SECUREC_MUL_TEN((unsigned int)attr->fldWidth) + ++ (unsigned char)(ch - SECUREC_CHAR('0')); ++ } else { ++ if (attr->fldWidth < 0) { ++ attr->flags |= SECUREC_FLAG_LEFT; ++ attr->fldWidth = (-attr->fldWidth); ++ } ++ if (attr->fldWidth > SECUREC_MAX_WIDTH_LEN) { ++ return -1; ++ } ++ } ++ return 0; ++} ++ ++/* ++ * The sprintf_s function processes the wide character as a parameter for %C ++ * The swprintf_s function processes the multiple character as a parameter for %C ++ */ ++SECUREC_INLINE void SecUpdateWcharFlags(SecFormatAttr *attr) ++{ ++ if ((attr->flags & (SECUREC_FLAG_SHORT | SECUREC_FLAG_LONG | SECUREC_FLAG_WIDECHAR)) == 0) { ++#ifdef SECUREC_FOR_WCHAR ++ attr->flags |= SECUREC_FLAG_SHORT; ++#else ++ attr->flags |= SECUREC_FLAG_WIDECHAR; ++#endif ++ } ++} ++/* ++ * When encountering %S, current just same as %C ++ */ ++SECUREC_INLINE void SecUpdateWstringFlags(SecFormatAttr *attr) ++{ ++ SecUpdateWcharFlags(attr); ++} ++ ++#if SECUREC_IN_KERNEL ++SECUREC_INLINE void SecUpdatePointFlagsForKernel(SecFormatAttr *attr) ++{ ++ /* Width is not set */ ++ if (attr->fldWidth <= 0) { ++ attr->flags |= SECUREC_FLAG_LEADZERO; ++ attr->fldWidth = 2 * sizeof(void *); /* 2 x byte number is the length of hex */ ++ } ++ if ((attr->flags & SECUREC_FLAG_ALTERNATE) != 0) { ++ /* Alternate form means '0x' prefix */ ++ attr->prefix[0] = SECUREC_CHAR('0'); ++ attr->prefix[1] = SECUREC_CHAR('x'); ++ attr->prefixLen = SECUREC_PREFIX_LEN; ++ } ++ attr->flags |= SECUREC_FLAG_LONG; /* Converting a long */ ++} ++#endif ++ ++SECUREC_INLINE void SecUpdatePointFlags(SecFormatAttr *attr) ++{ ++ attr->flags |= SECUREC_FLAG_POINTER; ++#if SECUREC_IN_KERNEL ++ SecUpdatePointFlagsForKernel(attr); ++#else ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) || defined(SECUREC_VXWORKS_PLATFORM)) && (!defined(SECUREC_ON_UNIX)) ++#if defined(SECUREC_VXWORKS_PLATFORM) ++ attr->precision = 1; ++#else ++ attr->precision = 0; ++#endif ++ attr->flags |= SECUREC_FLAG_ALTERNATE; /* "0x" is not default prefix in UNIX */ ++ attr->digits = g_itoaLowerDigits; ++#else /* On unix or win */ ++#if defined(_AIX) || defined(SECUREC_ON_SOLARIS) ++ attr->precision = 1; ++#else ++ attr->precision = 2 * sizeof(void *); /* 2 x byte number is the length of hex */ ++#endif ++#if defined(SECUREC_ON_UNIX) ++ attr->digits = g_itoaLowerDigits; ++#else ++ attr->digits = g_itoaUpperDigits; ++#endif ++#endif ++ ++#if defined(SECUREC_COMPATIBLE_WIN_FORMAT) ++ attr->flags &= ~SECUREC_FLAG_LEADZERO; ++#endif ++ ++#ifdef SECUREC_ON_64BITS ++ attr->flags |= SECUREC_FLAG_I64; /* Converting an int64 */ ++#else ++ attr->flags |= SECUREC_FLAG_LONG; /* Converting a long */ ++#endif ++ /* Set up for %#p on different system */ ++ if ((attr->flags & SECUREC_FLAG_ALTERNATE) != 0) { ++ /* Alternate form means '0x' prefix */ ++ attr->prefix[0] = SECUREC_CHAR('0'); ++#if (defined(SECUREC_COMPATIBLE_LINUX_FORMAT) || defined(SECUREC_VXWORKS_PLATFORM)) ++ attr->prefix[1] = SECUREC_CHAR('x'); ++#else ++ attr->prefix[1] = (SecChar)(attr->digits[SECUREC_NUMBER_OF_X]); ++#endif ++#if defined(_AIX) || defined(SECUREC_ON_SOLARIS) ++ attr->prefixLen = 0; ++#else ++ attr->prefixLen = SECUREC_PREFIX_LEN; ++#endif ++ } ++#endif ++} ++ ++SECUREC_INLINE void SecUpdateXpxFlags(SecFormatAttr *attr, SecChar ch) ++{ ++ /* Use unsigned lower hex output for 'x' */ ++ attr->digits = g_itoaLowerDigits; ++ attr->radix = SECUREC_RADIX_HEX; ++ switch (ch) { ++ case SECUREC_CHAR('p'): ++ /* Print a pointer */ ++ SecUpdatePointFlags(attr); ++ break; ++ case SECUREC_CHAR('X'): /* fall-through */ /* FALLTHRU */ ++ /* Unsigned upper hex output */ ++ attr->digits = g_itoaUpperDigits; ++ /* fall-through */ /* FALLTHRU */ ++ default: ++ /* For %#x or %#X */ ++ if ((attr->flags & SECUREC_FLAG_ALTERNATE) != 0) { ++ /* Alternate form means '0x' prefix */ ++ attr->prefix[0] = SECUREC_CHAR('0'); ++ attr->prefix[1] = (SecChar)(attr->digits[SECUREC_NUMBER_OF_X]); ++ attr->prefixLen = SECUREC_PREFIX_LEN; ++ } ++ break; ++ } ++} ++ ++SECUREC_INLINE void SecUpdateOudiFlags(SecFormatAttr *attr, SecChar ch) ++{ ++ /* Do not set digits here */ ++ switch (ch) { ++ case SECUREC_CHAR('i'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('d'): /* fall-through */ /* FALLTHRU */ ++ /* For signed decimal output */ ++ attr->flags |= SECUREC_FLAG_SIGNED; ++ /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('u'): ++ attr->radix = SECUREC_RADIX_DECIMAL; ++ attr->digits = g_itoaLowerDigits; ++ break; ++ case SECUREC_CHAR('o'): ++ /* For unsigned octal output */ ++ attr->radix = SECUREC_RADIX_OCTAL; ++ attr->digits = g_itoaLowerDigits; ++ if ((attr->flags & SECUREC_FLAG_ALTERNATE) != 0) { ++ /* Alternate form means force a leading 0 */ ++ attr->flags |= SECUREC_FLAG_FORCE_OCTAL; ++ } ++ break; ++ default: ++ /* Do nothing */ ++ break; ++ } ++} ++ ++#if SECUREC_ENABLE_SPRINTF_FLOAT ++SECUREC_INLINE void SecFreeFloatBuffer(SecFloatAdapt *floatAdapt) ++{ ++ if (floatAdapt->floatBuffer != NULL) { ++ SECUREC_FREE(floatAdapt->floatBuffer); ++ } ++ if (floatAdapt->allocatedFmtStr != NULL) { ++ SECUREC_FREE(floatAdapt->allocatedFmtStr); ++ } ++ floatAdapt->floatBuffer = NULL; ++ floatAdapt->allocatedFmtStr = NULL; ++ floatAdapt->fmtStr = NULL; ++ floatAdapt->bufferSize = 0; ++} ++ ++SECUREC_INLINE void SecSeekToFrontPercent(const SecChar **format) ++{ ++ const SecChar *fmt = *format; ++ while (*fmt != SECUREC_CHAR('%')) { /* Must meet '%' */ ++ --fmt; ++ } ++ *format = fmt; ++} ++ ++/* Init float format, return 0 is OK */ ++SECUREC_INLINE int SecInitFloatFmt(SecFloatAdapt *floatFmt, const SecChar *format) ++{ ++ const SecChar *fmt = format - 2; /* Sub 2 to the position before 'f' or 'g' */ ++ int fmtStrLen; ++ int i; ++ ++ SecSeekToFrontPercent(&fmt); ++ /* Now fmt point to '%' */ ++ fmtStrLen = (int)(size_t)(format - fmt) + 1; /* With ending terminator */ ++ if (fmtStrLen > (int)sizeof(floatFmt->buffer)) { ++ /* When buffer is NOT enough, alloc a new buffer */ ++ floatFmt->allocatedFmtStr = (char *)SECUREC_MALLOC((size_t)((unsigned int)fmtStrLen)); ++ if (floatFmt->allocatedFmtStr == NULL) { ++ return -1; ++ } ++ floatFmt->fmtStr = floatFmt->allocatedFmtStr; ++ } else { ++ floatFmt->fmtStr = floatFmt->buffer; ++ floatFmt->allocatedFmtStr = NULL; /* Must set to NULL, later code free memory based on this identity */ ++ } ++ ++ for (i = 0; i < fmtStrLen - 1; ++i) { ++ /* Convert wchar to char */ ++ floatFmt->fmtStr[i] = (char)(fmt[i]); /* Copy the format string */ ++ } ++ floatFmt->fmtStr[fmtStrLen - 1] = '\0'; ++ ++ return 0; ++} ++ ++/* Init float buffer and format, return 0 is OK */ ++SECUREC_INLINE int SecInitFloatBuffer(SecFloatAdapt *floatAdapt, const SecChar *format, SecFormatAttr *attr) ++{ ++ floatAdapt->allocatedFmtStr = NULL; ++ floatAdapt->fmtStr = NULL; ++ floatAdapt->floatBuffer = NULL; ++ /* Compute the precision value */ ++ if (attr->precision < 0) { ++ attr->precision = SECUREC_FLOAT_DEFAULT_PRECISION; ++ } ++ /* ++ * Calc buffer size to store double value ++ * The maximum length of SECUREC_MAX_WIDTH_LEN is enough ++ */ ++ if ((attr->flags & SECUREC_FLAG_LONG_DOUBLE) != 0) { ++ if (attr->precision > (SECUREC_MAX_WIDTH_LEN - SECUREC_FLOAT_BUFSIZE_LB)) { ++ return -1; ++ } ++ /* Long double needs to meet the basic print length */ ++ floatAdapt->bufferSize = SECUREC_FLOAT_BUFSIZE_LB + attr->precision + SECUREC_FLOAT_BUF_EXT; ++ } else { ++ if (attr->precision > (SECUREC_MAX_WIDTH_LEN - SECUREC_FLOAT_BUFSIZE)) { ++ return -1; ++ } ++ /* Double needs to meet the basic print length */ ++ floatAdapt->bufferSize = SECUREC_FLOAT_BUFSIZE + attr->precision + SECUREC_FLOAT_BUF_EXT; ++ } ++ if (attr->fldWidth > floatAdapt->bufferSize) { ++ floatAdapt->bufferSize = attr->fldWidth + SECUREC_FLOAT_BUF_EXT; ++ } ++ ++ if (floatAdapt->bufferSize > SECUREC_BUFFER_SIZE) { ++ /* The current value of SECUREC_BUFFER_SIZE could not store the formatted float string */ ++ floatAdapt->floatBuffer = (char *)SECUREC_MALLOC(((size_t)(unsigned int)floatAdapt->bufferSize)); ++ if (floatAdapt->floatBuffer == NULL) { ++ return -1; ++ } ++ attr->text.str = floatAdapt->floatBuffer; ++ } else { ++ attr->text.str = attr->buffer.str; /* Output buffer for float string with default size */ ++ } ++ ++ if (SecInitFloatFmt(floatAdapt, format) != 0) { ++ if (floatAdapt->floatBuffer != NULL) { ++ SECUREC_FREE(floatAdapt->floatBuffer); ++ floatAdapt->floatBuffer = NULL; ++ } ++ return -1; ++ } ++ return 0; ++} ++#endif ++ ++SECUREC_INLINE SecInt64 SecUpdateNegativeChar(SecFormatAttr *attr, char ch) ++{ ++ SecInt64 num64 = ch; /* Sign extend */ ++ if (num64 >= 128) { /* 128 on some platform, char is always unsigned */ ++ unsigned char tmp = (unsigned char)(~((unsigned char)ch)); ++ num64 = tmp + 1; ++ attr->flags |= SECUREC_FLAG_NEGATIVE; ++ } ++ return num64; ++} ++ ++/* ++ * If the precision is not satisfied, zero is added before the string ++ */ ++SECUREC_INLINE void SecNumberSatisfyPrecision(SecFormatAttr *attr) ++{ ++ int precision; ++ if (attr->precision < 0) { ++ precision = 1; /* Default precision 1 */ ++ } else { ++#if defined(SECUREC_COMPATIBLE_WIN_FORMAT) ++ attr->flags &= ~SECUREC_FLAG_LEADZERO; ++#else ++ if ((attr->flags & SECUREC_FLAG_POINTER) == 0) { ++ attr->flags &= ~SECUREC_FLAG_LEADZERO; ++ } ++#endif ++ if (attr->precision > SECUREC_MAX_PRECISION) { ++ attr->precision = SECUREC_MAX_PRECISION; ++ } ++ precision = attr->precision; ++ } ++ while (attr->textLen < precision) { ++ --attr->text.str; ++ *(attr->text.str) = '0'; ++ ++attr->textLen; ++ } ++} ++ ++/* ++ * Add leading zero for %#o ++ */ ++SECUREC_INLINE void SecNumberForceOctal(SecFormatAttr *attr) ++{ ++ /* Force a leading zero if FORCEOCTAL flag set */ ++ if ((attr->flags & SECUREC_FLAG_FORCE_OCTAL) != 0 && ++ (attr->textLen == 0 || attr->text.str[0] != '0')) { ++ --attr->text.str; ++ *(attr->text.str) = '0'; ++ ++attr->textLen; ++ } ++} ++ ++SECUREC_INLINE void SecUpdateSignedNumberPrefix(SecFormatAttr *attr) ++{ ++ if ((attr->flags & SECUREC_FLAG_SIGNED) == 0) { ++ return; ++ } ++ if ((attr->flags & SECUREC_FLAG_NEGATIVE) != 0) { ++ /* Prefix is '-' */ ++ attr->prefix[0] = SECUREC_CHAR('-'); ++ attr->prefixLen = 1; ++ return; ++ } ++ if ((attr->flags & SECUREC_FLAG_SIGN) != 0) { ++ /* Prefix is '+' */ ++ attr->prefix[0] = SECUREC_CHAR('+'); ++ attr->prefixLen = 1; ++ return; ++ } ++ if ((attr->flags & SECUREC_FLAG_SIGN_SPACE) != 0) { ++ /* Prefix is ' ' */ ++ attr->prefix[0] = SECUREC_CHAR(' '); ++ attr->prefixLen = 1; ++ return; ++ } ++ return; ++} ++ ++SECUREC_INLINE void SecNumberCompatZero(SecFormatAttr *attr) ++{ ++#if SECUREC_IN_KERNEL ++ if ((attr->flags & SECUREC_FLAG_POINTER) != 0) { ++ static char strNullPointer[SECUREC_NULL_STRING_SIZE] = "(null)"; ++ attr->text.str = strNullPointer; ++ attr->textLen = 6; /* Length of (null) is 6 */ ++ attr->flags &= ~SECUREC_FLAG_LEADZERO; ++ attr->prefixLen = 0; ++ if (attr->precision >= 0 && attr->precision < attr->textLen) { ++ attr->textLen = attr->precision; ++ } ++ } ++ if ((attr->flags & SECUREC_FLAG_POINTER) == 0 && attr->radix == SECUREC_RADIX_HEX && ++ (attr->flags & SECUREC_FLAG_ALTERNATE) != 0) { ++ /* Add 0x prefix for %x or %X, the prefix string has been set before */ ++ attr->prefixLen = SECUREC_PREFIX_LEN; ++ } ++#elif defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && (!defined(SECUREC_ON_UNIX)) ++ if ((attr->flags & SECUREC_FLAG_POINTER) != 0) { ++ static char strNullPointer[SECUREC_NULL_STRING_SIZE] = "(nil)"; ++ attr->text.str = strNullPointer; ++ attr->textLen = 5; /* Length of (nil) is 5 */ ++ attr->flags &= ~SECUREC_FLAG_LEADZERO; ++ } ++#elif defined(SECUREC_VXWORKS_PLATFORM) || defined(__hpux) ++ if ((attr->flags & SECUREC_FLAG_POINTER) != 0 && (attr->flags & SECUREC_FLAG_ALTERNATE) != 0) { ++ /* Add 0x prefix for %p, the prefix string has been set before */ ++ attr->prefixLen = SECUREC_PREFIX_LEN; ++ } ++#endif ++ (void)attr; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++} ++ ++/* ++ * Formatting output core function ++ */ ++SECUREC_INLINE int SecOutput(SecPrintfStream *stream, const SecChar *cFormat, va_list argList) ++{ ++ const SecChar *format = cFormat; ++ int charsOut; /* Characters written */ ++ int noOutput = 0; /* Must be initialized or compiler alerts */ ++ SecFmtState state; ++ SecFormatAttr formatAttr; ++ ++ formatAttr.flags = 0; ++ formatAttr.textIsWide = 0; /* Flag for buffer contains wide chars */ ++ formatAttr.fldWidth = 0; ++ formatAttr.precision = 0; ++ formatAttr.dynWidth = 0; ++ formatAttr.dynPrecision = 0; ++ formatAttr.digits = g_itoaUpperDigits; ++ formatAttr.radix = SECUREC_RADIX_DECIMAL; ++ formatAttr.padding = 0; ++ formatAttr.textLen = 0; ++ formatAttr.text.str = NULL; ++ formatAttr.prefixLen = 0; ++ formatAttr.prefix[0] = SECUREC_CHAR('\0'); ++ formatAttr.prefix[1] = SECUREC_CHAR('\0'); ++ charsOut = 0; ++ state = STAT_NORMAL; /* Starting state */ ++ ++ /* Loop each format character */ ++ while (*format != SECUREC_CHAR('\0') && charsOut >= 0) { ++ SecFmtState lastState = state; ++ SecChar ch = *format; /* Currently read character */ ++ ++format; ++ state = SecDecodeState(ch, lastState); ++ switch (state) { ++ case STAT_NORMAL: ++ SecWriteChar(stream, ch, &charsOut); ++ continue; ++ case STAT_PERCENT: ++ /* Set default values */ ++ noOutput = 0; ++ formatAttr.prefixLen = 0; ++ formatAttr.textLen = 0; ++ formatAttr.flags = 0; ++ formatAttr.fldWidth = 0; ++ formatAttr.precision = -1; ++ formatAttr.textIsWide = 0; ++ formatAttr.dynWidth = 0; ++ formatAttr.dynPrecision = 0; ++ break; ++ case STAT_FLAG: ++ /* Set flag based on which flag character */ ++ SecDecodeFlags(ch, &formatAttr); ++ break; ++ case STAT_WIDTH: ++ /* Update width value */ ++ if (ch == SECUREC_CHAR('*')) { ++ /* get width from arg list */ ++ formatAttr.fldWidth = (int)va_arg(argList, int); ++ formatAttr.dynWidth = 1; ++ } ++ if (SecDecodeWidth(ch, &formatAttr, lastState) != 0) { ++ return -1; ++ } ++ break; ++ case STAT_DOT: ++ formatAttr.precision = 0; ++ break; ++ case STAT_PRECIS: ++ /* Update precision value */ ++ if (ch == SECUREC_CHAR('*')) { ++ /* Get precision from arg list */ ++ formatAttr.precision = (int)va_arg(argList, int); ++ formatAttr.dynPrecision = 1; ++ } ++ if (SecDecodePrecision(ch, &formatAttr) != 0) { ++ return -1; ++ } ++ break; ++ case STAT_SIZE: ++ /* Read a size specifier, set the formatAttr.flags based on it, and skip format to next character */ ++ if (SecDecodeSize(ch, &formatAttr, &format) != 0) { ++ /* Compatibility code for "%I" just print I */ ++ SecWriteChar(stream, ch, &charsOut); ++ state = STAT_NORMAL; ++ continue; ++ } ++ break; ++ case STAT_TYPE: ++ switch (ch) { ++ case SECUREC_CHAR('C'): /* Wide char */ ++ SecUpdateWcharFlags(&formatAttr); ++ /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('c'): { ++ unsigned int cValue = (unsigned int)va_arg(argList, int); ++ SecDecodeTypeC(&formatAttr, cValue); ++ break; ++ } ++ case SECUREC_CHAR('S'): /* Wide char string */ ++ SecUpdateWstringFlags(&formatAttr); ++ /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('s'): { ++ char *argPtr = (char *)va_arg(argList, char *); ++ SecDecodeTypeS(&formatAttr, argPtr); ++ break; ++ } ++ case SECUREC_CHAR('G'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('g'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('E'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('F'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('e'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('f'): { ++#if SECUREC_ENABLE_SPRINTF_FLOAT ++ /* Add following code to call system sprintf API for float number */ ++ SecFloatAdapt floatAdapt; ++ noOutput = 1; /* It's no more data needs to be written */ ++ ++ /* Now format is pointer to the next character of 'f' */ ++ if (SecInitFloatBuffer(&floatAdapt, format, &formatAttr) != 0) { ++ break; ++ } ++ ++ if ((formatAttr.flags & SECUREC_FLAG_LONG_DOUBLE) != 0) { ++#if defined(SECUREC_COMPATIBLE_LINUX_FORMAT) && SECUREC_ENABLE_SPRINTF_LONG_DOUBLE ++ long double tmp = (long double)va_arg(argList, long double); ++ SecFormatLongDouble(&formatAttr, &floatAdapt, tmp); ++#else ++ double tmp = (double)va_arg(argList, double); ++ SecFormatDouble(&formatAttr, &floatAdapt, tmp); ++#endif ++ } else { ++ double tmp = (double)va_arg(argList, double); ++ SecFormatDouble(&formatAttr, &floatAdapt, tmp); ++ } ++ ++ /* Only need write formatted float string */ ++ SecWriteFloatText(stream, &formatAttr, &charsOut); ++ SecFreeFloatBuffer(&floatAdapt); ++ break; ++#else ++ return -1; ++#endif ++ } ++ case SECUREC_CHAR('X'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('p'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('x'): /* fall-through */ /* FALLTHRU */ ++ SecUpdateXpxFlags(&formatAttr, ch); ++ /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('i'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('d'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('u'): /* fall-through */ /* FALLTHRU */ ++ case SECUREC_CHAR('o'): { ++ SecInt64 num64; ++ SecUpdateOudiFlags(&formatAttr, ch); ++ /* Read argument into variable num64. Be careful, depend on the order of judgment */ ++ if ((formatAttr.flags & SECUREC_FLAG_I64) != 0 || ++ (formatAttr.flags & SECUREC_FLAG_LONGLONG) != 0) { ++ num64 = (SecInt64)va_arg(argList, SecInt64); /* Maximum Bit Width sign bit unchanged */ ++ } else if ((formatAttr.flags & SECUREC_FLAG_LONG) != 0) { ++ num64 = SECUREC_GET_LONG_FROM_ARG(formatAttr); ++ } else if ((formatAttr.flags & SECUREC_FLAG_CHAR) != 0) { ++ num64 = SECUREC_GET_CHAR_FROM_ARG(formatAttr); ++ } else if ((formatAttr.flags & SECUREC_FLAG_SHORT) != 0) { ++ num64 = SECUREC_GET_SHORT_FROM_ARG(formatAttr); ++#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT ++ } else if ((formatAttr.flags & SECUREC_FLAG_PTRDIFF) != 0) { ++ num64 = (ptrdiff_t)va_arg(argList, ptrdiff_t); /* Sign extend */ ++ } else if ((formatAttr.flags & SECUREC_FLAG_SIZE) != 0) { ++ num64 = SECUREC_GET_SIZE_FROM_ARG(formatAttr); ++ } else if ((formatAttr.flags & SECUREC_FLAG_INTMAX) != 0) { ++ num64 = (SecInt64)va_arg(argList, SecInt64); ++#endif ++ } else { ++ num64 = SECUREC_GET_INT_FROM_ARG(formatAttr); ++ } ++ ++ /* The order of the following calls must be correct */ ++ SecNumberToBuffer(&formatAttr, num64); ++ SecNumberSatisfyPrecision(&formatAttr); ++ SecNumberForceOctal(&formatAttr); ++ SecUpdateSignedNumberPrefix(&formatAttr); ++ if (num64 == 0) { ++ SecNumberCompatZero(&formatAttr); ++ } ++ break; ++ } ++ default: ++ /* Do nothing */ ++ break; ++ } ++ ++ if (noOutput == 0) { ++ /* Calculate amount of padding */ ++ formatAttr.padding = (formatAttr.fldWidth - formatAttr.textLen) - formatAttr.prefixLen; ++ ++ /* Put out the padding, prefix, and text, in the correct order */ ++ SecWriteLeftPadding(stream, &formatAttr, &charsOut); ++ SecWritePrefix(stream, &formatAttr, &charsOut); ++ SecWriteLeadingZero(stream, &formatAttr, &charsOut); ++ SecWriteText(stream, &formatAttr, &charsOut); ++ SecWriteRightPadding(stream, &formatAttr, &charsOut); ++ } ++ break; ++ case STAT_INVALID: /* fall-through */ /* FALLTHRU */ ++ default: ++ return -1; /* Input format is wrong(STAT_INVALID), directly return */ ++ } ++ } ++ ++ if (state != STAT_NORMAL && state != STAT_TYPE) { ++ return -1; ++ } ++ ++ return charsOut; /* The number of characters written */ ++} ++ ++/* ++ * Output one zero character zero into the SecPrintfStream structure ++ * If there is not enough space, make sure f->count is less than 0 ++ */ ++SECUREC_INLINE int SecPutZeroChar(SecPrintfStream *stream) ++{ ++ --stream->count; ++ if (stream->count >= 0) { ++ *(stream->cur) = SECUREC_CHAR('\0'); ++ ++stream->cur; ++ return 0; ++ } ++ return -1; ++} ++ ++/* ++ * Multi character formatted output implementation ++ */ ++#ifdef SECUREC_FOR_WCHAR ++int SecVswprintfImpl(wchar_t *string, size_t count, const wchar_t *format, va_list argList) ++#else ++int SecVsnprintfImpl(char *string, size_t count, const char *format, va_list argList) ++#endif ++{ ++ SecPrintfStream stream; ++ int retVal; ++ ++ stream.count = (int)count; /* The count include \0 character, must be greater than zero */ ++ stream.cur = string; ++ ++ retVal = SecOutput(&stream, format, argList); ++ if (retVal >= 0) { ++ if (SecPutZeroChar(&stream) == 0) { ++ return retVal; ++ } ++ } ++ if (stream.count < 0) { ++ /* The buffer was too small, then truncate */ ++ string[count - 1] = SECUREC_CHAR('\0'); ++ return SECUREC_PRINTF_TRUNCATE; ++ } ++ string[0] = SECUREC_CHAR('\0'); /* Empty the dest string */ ++ return -1; ++} ++#endif /* OUTPUT_INL_2B263E9C_43D8_44BB_B17A_6D2033DECEE5 */ ++ +diff --git a/lib/securec/src/scanf_s.c b/lib/securec/src/scanf_s.c +new file mode 100644 +index 000000000..dc575714e +--- /dev/null ++++ b/lib/securec/src/scanf_s.c +@@ -0,0 +1,51 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: scanf_s function ++ * Create: 2014-02-25 ++ */ ++ ++#include "linux/securec.h" ++ ++/* ++ * ++ * The scanf_s function is equivalent to fscanf_s with the argument stdin interposed before the arguments to scanf_s ++ * The scanf_s function reads data from the standard input stream stdin and ++ * writes the data into the location that's given by argument. Each argument ++ * must be a pointer to a variable of a type that corresponds to a type specifier ++ * in format. If copying occurs between strings that overlap, the behavior is ++ * undefined. ++ * ++ * ++ * format Format control string. ++ * ... Optional arguments. ++ * ++ * ++ * ... The converted value stored in user assigned address ++ * ++ * ++ * Returns the number of fields successfully converted and assigned; ++ * the return value does not include fields that were read but not assigned. ++ * A return value of 0 indicates that no fields were assigned. ++ * return -1 if an error occurs. ++ */ ++int scanf_s(const char *format, ...) ++{ ++ int ret; /* If initialization causes e838 */ ++ va_list argList; ++ ++ va_start(argList, format); ++ ret = vscanf_s(format, argList); ++ va_end(argList); ++ (void)argList; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++ ++ return ret; ++} ++ +diff --git a/lib/securec/src/secinput.h b/lib/securec/src/secinput.h +new file mode 100644 +index 000000000..176ee05d9 +--- /dev/null ++++ b/lib/securec/src/secinput.h +@@ -0,0 +1,181 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: Define macro, data struct, and declare function prototype, ++ * which is used by input.inl, secureinput_a.c and secureinput_w.c. ++ * Create: 2014-02-25 ++ */ ++ ++#ifndef SEC_INPUT_H_E950DA2C_902F_4B15_BECD_948E99090D9C ++#define SEC_INPUT_H_E950DA2C_902F_4B15_BECD_948E99090D9C ++#include "securecutil.h" ++ ++#define SECUREC_SCANF_EINVAL (-1) ++#define SECUREC_SCANF_ERROR_PARA (-2) ++ ++/* For internal stream flag */ ++#define SECUREC_MEM_STR_FLAG 0x01U ++#define SECUREC_FILE_STREAM_FLAG 0x02U ++#define SECUREC_PIPE_STREAM_FLAG 0x04U ++#define SECUREC_LOAD_FILE_TO_MEM_FLAG 0x08U ++ ++#define SECUREC_UCS_BOM_HEADER_SIZE 2U ++#define SECUREC_UCS_BOM_HEADER_BE_1ST 0xfeU ++#define SECUREC_UCS_BOM_HEADER_BE_2ST 0xffU ++#define SECUREC_UCS_BOM_HEADER_LE_1ST 0xffU ++#define SECUREC_UCS_BOM_HEADER_LE_2ST 0xfeU ++#define SECUREC_UTF8_BOM_HEADER_SIZE 3U ++#define SECUREC_UTF8_BOM_HEADER_1ST 0xefU ++#define SECUREC_UTF8_BOM_HEADER_2ND 0xbbU ++#define SECUREC_UTF8_BOM_HEADER_3RD 0xbfU ++#define SECUREC_UTF8_LEAD_1ST 0xe0U ++#define SECUREC_UTF8_LEAD_2ND 0x80U ++ ++#define SECUREC_BEGIN_WITH_UCS_BOM(s, len) ((len) == SECUREC_UCS_BOM_HEADER_SIZE && \ ++ (((unsigned char)((s)[0]) == SECUREC_UCS_BOM_HEADER_LE_1ST && \ ++ (unsigned char)((s)[1]) == SECUREC_UCS_BOM_HEADER_LE_2ST) || \ ++ ((unsigned char)((s)[0]) == SECUREC_UCS_BOM_HEADER_BE_1ST && \ ++ (unsigned char)((s)[1]) == SECUREC_UCS_BOM_HEADER_BE_2ST))) ++ ++#define SECUREC_BEGIN_WITH_UTF8_BOM(s, len) ((len) == SECUREC_UTF8_BOM_HEADER_SIZE && \ ++ (unsigned char)((s)[0]) == SECUREC_UTF8_BOM_HEADER_1ST && \ ++ (unsigned char)((s)[1]) == SECUREC_UTF8_BOM_HEADER_2ND && \ ++ (unsigned char)((s)[2]) == SECUREC_UTF8_BOM_HEADER_3RD) ++ ++#ifdef SECUREC_FOR_WCHAR ++#define SECUREC_BOM_HEADER_SIZE SECUREC_UCS_BOM_HEADER_SIZE ++#define SECUREC_BEGIN_WITH_BOM(s, len) SECUREC_BEGIN_WITH_UCS_BOM((s), (len)) ++#else ++#define SECUREC_BOM_HEADER_SIZE SECUREC_UTF8_BOM_HEADER_SIZE ++#define SECUREC_BEGIN_WITH_BOM(s, len) SECUREC_BEGIN_WITH_UTF8_BOM((s), (len)) ++#endif ++ ++typedef struct { ++ unsigned int flag; /* Mark the properties of input stream */ ++ char *base; /* The pointer to the header of buffered string */ ++ const char *cur; /* The pointer to next read position */ ++ size_t count; /* The size of buffered string in bytes */ ++#if SECUREC_ENABLE_SCANF_FILE ++ FILE *pf; /* The file pointer */ ++ size_t fileRealRead; ++ long oriFilePos; /* The original position of file offset when fscanf is called */ ++#endif ++} SecFileStream; ++ ++#if SECUREC_ENABLE_SCANF_FILE ++#define SECUREC_FILE_STREAM_INIT_FILE(stream, fp) do { \ ++ (stream)->pf = (fp); \ ++ (stream)->fileRealRead = 0; \ ++ (stream)->oriFilePos = 0; \ ++} SECUREC_WHILE_ZERO ++#else ++/* Disable file */ ++#define SECUREC_FILE_STREAM_INIT_FILE(stream, fp) ++#endif ++ ++/* This initialization for eliminating redundant initialization. */ ++#define SECUREC_FILE_STREAM_FROM_STRING(stream, buf, cnt) do { \ ++ (stream)->flag = SECUREC_MEM_STR_FLAG; \ ++ (stream)->base = NULL; \ ++ (stream)->cur = (buf); \ ++ (stream)->count = (cnt); \ ++ SECUREC_FILE_STREAM_INIT_FILE((stream), NULL); \ ++} SECUREC_WHILE_ZERO ++ ++/* This initialization for eliminating redundant initialization. */ ++#define SECUREC_FILE_STREAM_FROM_FILE(stream, fp) do { \ ++ (stream)->flag = SECUREC_FILE_STREAM_FLAG; \ ++ (stream)->base = NULL; \ ++ (stream)->cur = NULL; \ ++ (stream)->count = 0; \ ++ SECUREC_FILE_STREAM_INIT_FILE((stream), (fp)); \ ++} SECUREC_WHILE_ZERO ++ ++/* This initialization for eliminating redundant initialization. */ ++#define SECUREC_FILE_STREAM_FROM_STDIN(stream) do { \ ++ (stream)->flag = SECUREC_PIPE_STREAM_FLAG; \ ++ (stream)->base = NULL; \ ++ (stream)->cur = NULL; \ ++ (stream)->count = 0; \ ++ SECUREC_FILE_STREAM_INIT_FILE((stream), SECUREC_STREAM_STDIN); \ ++} SECUREC_WHILE_ZERO ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++int SecInputS(SecFileStream *stream, const char *cFormat, va_list argList); ++void SecClearDestBuf(const char *buffer, const char *format, va_list argList); ++#ifdef SECUREC_FOR_WCHAR ++int SecInputSW(SecFileStream *stream, const wchar_t *cFormat, va_list argList); ++void SecClearDestBufW(const wchar_t *buffer, const wchar_t *format, va_list argList); ++#endif ++ ++/* 20150105 For software and hardware decoupling,such as UMG */ ++#ifdef SECUREC_SYSAPI4VXWORKS ++#ifdef feof ++#undef feof ++#endif ++extern int feof(FILE *stream); ++#endif ++ ++#if defined(SECUREC_SYSAPI4VXWORKS) || defined(SECUREC_CTYPE_MACRO_ADAPT) ++#ifndef isspace ++#define isspace(c) (((c) == ' ') || ((c) == '\t') || ((c) == '\r') || ((c) == '\n')) ++#endif ++#ifndef iswspace ++#define iswspace(c) (((c) == L' ') || ((c) == L'\t') || ((c) == L'\r') || ((c) == L'\n')) ++#endif ++#ifndef isascii ++#define isascii(c) (((unsigned char)(c)) <= 0x7f) ++#endif ++#ifndef isupper ++#define isupper(c) ((c) >= 'A' && (c) <= 'Z') ++#endif ++#ifndef islower ++#define islower(c) ((c) >= 'a' && (c) <= 'z') ++#endif ++#ifndef isalpha ++#define isalpha(c) (isupper(c) || (islower(c))) ++#endif ++#ifndef isdigit ++#define isdigit(c) ((c) >= '0' && (c) <= '9') ++#endif ++#ifndef isxupper ++#define isxupper(c) ((c) >= 'A' && (c) <= 'F') ++#endif ++#ifndef isxlower ++#define isxlower(c) ((c) >= 'a' && (c) <= 'f') ++#endif ++#ifndef isxdigit ++#define isxdigit(c) (isdigit(c) || isxupper(c) || isxlower(c)) ++#endif ++#endif ++ ++#ifdef __cplusplus ++} ++#endif ++/* Reserved file operation macro interface, s is FILE *, i is fileno zero. */ ++#ifndef SECUREC_LOCK_FILE ++#define SECUREC_LOCK_FILE(s) ++#endif ++ ++#ifndef SECUREC_UNLOCK_FILE ++#define SECUREC_UNLOCK_FILE(s) ++#endif ++ ++#ifndef SECUREC_LOCK_STDIN ++#define SECUREC_LOCK_STDIN(i, s) ++#endif ++ ++#ifndef SECUREC_UNLOCK_STDIN ++#define SECUREC_UNLOCK_STDIN(i, s) ++#endif ++#endif ++ +diff --git a/lib/securec/src/securecutil.c b/lib/securec/src/securecutil.c +new file mode 100644 +index 000000000..7518eb300 +--- /dev/null ++++ b/lib/securec/src/securecutil.c +@@ -0,0 +1,81 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: Provides internal functions used by this library, such as memory ++ * copy and memory move. Besides, include some helper function for ++ * printf family API, such as SecVsnprintfImpl ++ * Create: 2014-02-25 ++ */ ++ ++/* Avoid duplicate header files,not include securecutil.h */ ++#include "securecutil.h" ++ ++#if defined(ANDROID) && !defined(SECUREC_CLOSE_ANDROID_HANDLE) && (SECUREC_HAVE_WCTOMB || SECUREC_HAVE_MBTOWC) ++#include ++#if SECUREC_HAVE_WCTOMB ++/* ++ * Convert wide characters to narrow multi-bytes ++ */ ++int wctomb(char *s, wchar_t wc) ++{ ++ return (int)wcrtomb(s, wc, NULL); ++} ++#endif ++ ++#if SECUREC_HAVE_MBTOWC ++/* ++ * Converting narrow multi-byte characters to wide characters ++ * mbrtowc returns -1 or -2 upon failure, unlike mbtowc, which only returns -1 ++ * When the return value is less than zero, we treat it as a failure ++ */ ++int mbtowc(wchar_t *pwc, const char *s, size_t n) ++{ ++ return (int)mbrtowc(pwc, s, n, NULL); ++} ++#endif ++#endif ++ ++/* The V100R001C01 version num is 0x5 (High 8 bits) */ ++#define SECUREC_C_VERSION 0x500U ++#define SECUREC_SPC_VERSION 0xbU ++#define SECUREC_VERSION_STR "V100R001C01SPC011B003" ++ ++/* ++ * Get version string and version number. ++ * The rules for version number are as follows: ++ * 1) SPC verNumber<->verStr like: ++ * 0x201<->C01 ++ * 0x202<->C01SPC001 Redefine numbers after this version ++ * 0x502<->C01SPC002 ++ * 0x503<->C01SPC003 ++ * ... ++ * 0X50a<->SPC010 ++ * 0X50b<->SPC011 ++ * ... ++ * 0x700<->C02 ++ * 0x701<->C01SPC001 ++ * 0x702<->C02SPC002 ++ * ... ++ * 2) CP verNumber<->verStr like: ++ * 0X601<->CP0001 ++ * 0X602<->CP0002 ++ * ... ++ */ ++const char *GetHwSecureCVersion(unsigned short *verNumber) ++{ ++ if (verNumber != NULL) { ++ *verNumber = (unsigned short)(SECUREC_C_VERSION | SECUREC_SPC_VERSION); ++ } ++ return SECUREC_VERSION_STR; ++} ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(GetHwSecureCVersion); ++#endif ++ +diff --git a/lib/securec/src/securecutil.h b/lib/securec/src/securecutil.h +new file mode 100644 +index 000000000..35112a248 +--- /dev/null ++++ b/lib/securec/src/securecutil.h +@@ -0,0 +1,574 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: Define macro, data struct, and declare internal used function prototype, ++ * which is used by secure functions. ++ * Create: 2014-02-25 ++ */ ++ ++#ifndef SECURECUTIL_H_46C86578_F8FF_4E49_8E64_9B175241761F ++#define SECURECUTIL_H_46C86578_F8FF_4E49_8E64_9B175241761F ++#include "linux/securec.h" ++ ++#if (defined(_MSC_VER)) && (_MSC_VER >= 1400) ++/* Shield compilation alerts using discarded functions and Constant expression to maximize code compatibility */ ++#define SECUREC_MASK_MSVC_CRT_WARNING __pragma(warning(push)) \ ++ __pragma(warning(disable : 4996 4127)) ++#define SECUREC_END_MASK_MSVC_CRT_WARNING __pragma(warning(pop)) ++#else ++#define SECUREC_MASK_MSVC_CRT_WARNING ++#define SECUREC_END_MASK_MSVC_CRT_WARNING ++#endif ++#define SECUREC_WHILE_ZERO SECUREC_MASK_MSVC_CRT_WARNING while (0) SECUREC_END_MASK_MSVC_CRT_WARNING ++ ++/* Automatically identify the platform that supports strnlen function, and use this function to improve performance */ ++#ifndef SECUREC_HAVE_STRNLEN ++#if (defined(_XOPEN_SOURCE) && _XOPEN_SOURCE >= 700) || (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 200809L) ++#if SECUREC_IN_KERNEL ++#define SECUREC_HAVE_STRNLEN 0 ++#else ++#if defined(__GLIBC__) && __GLIBC__ >= 2 && defined(__GLIBC_MINOR__) && __GLIBC_MINOR__ >= 10 ++#define SECUREC_HAVE_STRNLEN 1 ++#else ++#define SECUREC_HAVE_STRNLEN 0 ++#endif ++#endif ++#else ++#define SECUREC_HAVE_STRNLEN 0 ++#endif ++#endif ++ ++#if SECUREC_IN_KERNEL ++/* In kernel disable functions */ ++#ifndef SECUREC_ENABLE_SCANF_FILE ++#define SECUREC_ENABLE_SCANF_FILE 0 ++#endif ++#ifndef SECUREC_ENABLE_SCANF_FLOAT ++#define SECUREC_ENABLE_SCANF_FLOAT 0 ++#endif ++#ifndef SECUREC_ENABLE_SPRINTF_FLOAT ++#define SECUREC_ENABLE_SPRINTF_FLOAT 0 ++#endif ++#ifndef SECUREC_HAVE_MBTOWC ++#define SECUREC_HAVE_MBTOWC 0 ++#endif ++#ifndef SECUREC_HAVE_WCTOMB ++#define SECUREC_HAVE_WCTOMB 0 ++#endif ++#ifndef SECUREC_HAVE_WCHART ++#define SECUREC_HAVE_WCHART 0 ++#endif ++#else /* Not in kernel */ ++/* Systems that do not support file, can define this macro to 0. */ ++#ifndef SECUREC_ENABLE_SCANF_FILE ++#define SECUREC_ENABLE_SCANF_FILE 1 ++#endif ++#ifndef SECUREC_ENABLE_SCANF_FLOAT ++#define SECUREC_ENABLE_SCANF_FLOAT 0 ++#endif ++/* Systems that do not support float, can define this macro to 0. */ ++#ifndef SECUREC_ENABLE_SPRINTF_FLOAT ++#define SECUREC_ENABLE_SPRINTF_FLOAT 1 ++#endif ++#ifndef SECUREC_HAVE_MBTOWC ++#define SECUREC_HAVE_MBTOWC 0 ++#endif ++#ifndef SECUREC_HAVE_WCTOMB ++#define SECUREC_HAVE_WCTOMB 0 ++#endif ++#ifndef SECUREC_HAVE_WCHART ++#define SECUREC_HAVE_WCHART 1 ++#endif ++#endif ++ ++#ifndef SECUREC_ENABLE_INLINE ++#define SECUREC_ENABLE_INLINE 0 ++#endif ++ ++#ifndef SECUREC_INLINE ++#if SECUREC_ENABLE_INLINE ++#define SECUREC_INLINE static inline ++#else ++#define SECUREC_INLINE static ++#endif ++#endif ++ ++#ifndef SECUREC_WARP_OUTPUT ++#if SECUREC_IN_KERNEL ++#define SECUREC_WARP_OUTPUT 1 ++#else ++#define SECUREC_WARP_OUTPUT 0 ++#endif ++#endif ++ ++#ifndef SECUREC_STREAM_STDIN ++#define SECUREC_STREAM_STDIN stdin ++#endif ++ ++#define SECUREC_MUL_SIXTEEN(x) ((x) << 4U) ++#define SECUREC_MUL_EIGHT(x) ((x) << 3U) ++#define SECUREC_MUL_TEN(x) ((((x) << 2U) + (x)) << 1U) ++/* Limited format input and output width, use signed integer */ ++#define SECUREC_MAX_WIDTH_LEN_DIV_TEN 21474836 ++#define SECUREC_MAX_WIDTH_LEN (SECUREC_MAX_WIDTH_LEN_DIV_TEN * 10) ++/* Is the x multiplied by 10 greater than */ ++#define SECUREC_MUL_TEN_ADD_BEYOND_MAX(x) (((x) > SECUREC_MAX_WIDTH_LEN_DIV_TEN)) ++ ++#define SECUREC_FLOAT_BUFSIZE (309 + 40) /* Max length of double value */ ++#define SECUREC_FLOAT_BUFSIZE_LB (4932 + 40) /* Max length of long double value */ ++#define SECUREC_FLOAT_DEFAULT_PRECISION 6 ++ ++/* This macro does not handle pointer equality or integer overflow */ ++#define SECUREC_MEMORY_NO_OVERLAP(dest, src, count) \ ++ (((src) < (dest) && ((const char *)(src) + (count)) <= (char *)(dest)) || \ ++ ((dest) < (src) && ((char *)(dest) + (count)) <= (const char *)(src))) ++ ++#define SECUREC_MEMORY_IS_OVERLAP(dest, src, count) \ ++ (((src) < (dest) && ((const char *)(src) + (count)) > (char *)(dest)) || \ ++ ((dest) < (src) && ((char *)(dest) + (count)) > (const char *)(src))) ++ ++/* ++ * Check whether the strings overlap, len is the length of the string not include terminator ++ * Length is related to data type char or wchar , do not force conversion of types ++ */ ++#define SECUREC_STRING_NO_OVERLAP(dest, src, len) \ ++ (((src) < (dest) && ((src) + (len)) < (dest)) || \ ++ ((dest) < (src) && ((dest) + (len)) < (src))) ++ ++/* ++ * Check whether the strings overlap for strcpy wcscpy function, dest len and src Len are not include terminator ++ * Length is related to data type char or wchar , do not force conversion of types ++ */ ++#define SECUREC_STRING_IS_OVERLAP(dest, src, len) \ ++ (((src) < (dest) && ((src) + (len)) >= (dest)) || \ ++ ((dest) < (src) && ((dest) + (len)) >= (src))) ++ ++/* ++ * Check whether the strings overlap for strcat wcscat function, dest len and src Len are not include terminator ++ * Length is related to data type char or wchar , do not force conversion of types ++ */ ++#define SECUREC_CAT_STRING_IS_OVERLAP(dest, destLen, src, srcLen) \ ++ (((dest) < (src) && ((dest) + (destLen) + (srcLen)) >= (src)) || \ ++ ((src) < (dest) && ((src) + (srcLen)) >= (dest))) ++ ++#if SECUREC_HAVE_STRNLEN ++#define SECUREC_CALC_STR_LEN(str, maxLen, outLen) do { \ ++ *(outLen) = strnlen((str), (maxLen)); \ ++} SECUREC_WHILE_ZERO ++#define SECUREC_CALC_STR_LEN_OPT(str, maxLen, outLen) do { \ ++ if ((maxLen) > 8) { \ ++ /* Optimization or len less then 8 */ \ ++ if (*((str) + 0) == '\0') { \ ++ *(outLen) = 0; \ ++ } else if (*((str) + 1) == '\0') { \ ++ *(outLen) = 1; \ ++ } else if (*((str) + 2) == '\0') { \ ++ *(outLen) = 2; \ ++ } else if (*((str) + 3) == '\0') { \ ++ *(outLen) = 3; \ ++ } else if (*((str) + 4) == '\0') { \ ++ *(outLen) = 4; \ ++ } else if (*((str) + 5) == '\0') { \ ++ *(outLen) = 5; \ ++ } else if (*((str) + 6) == '\0') { \ ++ *(outLen) = 6; \ ++ } else if (*((str) + 7) == '\0') { \ ++ *(outLen) = 7; \ ++ } else if (*((str) + 8) == '\0') { \ ++ /* Optimization with a length of 8 */ \ ++ *(outLen) = 8; \ ++ } else { \ ++ /* The offset is 8 because the performance of 8 byte alignment is high */ \ ++ *(outLen) = 8 + strnlen((str) + 8, (maxLen) - 8); \ ++ } \ ++ } else { \ ++ SECUREC_CALC_STR_LEN((str), (maxLen), (outLen)); \ ++ } \ ++} SECUREC_WHILE_ZERO ++#else ++#define SECUREC_CALC_STR_LEN(str, maxLen, outLen) do { \ ++ const char *strEnd_ = (const char *)(str); \ ++ size_t availableSize_ = (size_t)(maxLen); \ ++ while (availableSize_ > 0 && *strEnd_ != '\0') { \ ++ --availableSize_; \ ++ ++strEnd_; \ ++ } \ ++ *(outLen) = (size_t)(strEnd_ - (str)); \ ++} SECUREC_WHILE_ZERO ++#define SECUREC_CALC_STR_LEN_OPT SECUREC_CALC_STR_LEN ++#endif ++ ++#define SECUREC_CALC_WSTR_LEN(str, maxLen, outLen) do { \ ++ const wchar_t *strEnd_ = (const wchar_t *)(str); \ ++ size_t len_ = 0; \ ++ while (len_ < (maxLen) && *strEnd_ != L'\0') { \ ++ ++len_; \ ++ ++strEnd_; \ ++ } \ ++ *(outLen) = len_; \ ++} SECUREC_WHILE_ZERO ++ ++/* ++ * Performance optimization, product may disable inline function. ++ * Using function pointer for MEMSET to prevent compiler optimization when cleaning up memory. ++ */ ++#ifdef SECUREC_USE_ASM ++#define SECUREC_MEMSET_FUNC_OPT memset_opt ++#define SECUREC_MEMCPY_FUNC_OPT memcpy_opt ++#else ++#define SECUREC_MEMSET_FUNC_OPT memset ++#define SECUREC_MEMCPY_FUNC_OPT memcpy ++#endif ++ ++#define SECUREC_MEMCPY_WARP_OPT(dest, src, count) (void)SECUREC_MEMCPY_FUNC_OPT((dest), (src), (count)) ++ ++#ifndef SECUREC_MEMSET_BARRIER ++#if defined(__GNUC__) ++/* Can be turned off for scenarios that do not use memory barrier */ ++#define SECUREC_MEMSET_BARRIER 1 ++#else ++#define SECUREC_MEMSET_BARRIER 0 ++#endif ++#endif ++ ++#ifndef SECUREC_MEMSET_INDIRECT_USE ++/* Can be turned off for scenarios that do not allow pointer calls */ ++#define SECUREC_MEMSET_INDIRECT_USE 1 ++#endif ++ ++#if SECUREC_MEMSET_BARRIER ++#define SECUREC_MEMORY_BARRIER(dest) __asm__ __volatile__("": : "r"(dest) : "memory") ++#else ++#define SECUREC_MEMORY_BARRIER(dest) ++#endif ++ ++#if SECUREC_MEMSET_BARRIER ++#define SECUREC_MEMSET_PREVENT_DSE(dest, value, count) do { \ ++ (void)SECUREC_MEMSET_FUNC_OPT(dest, value, count); \ ++ SECUREC_MEMORY_BARRIER(dest); \ ++} SECUREC_WHILE_ZERO ++#elif SECUREC_MEMSET_INDIRECT_USE ++#define SECUREC_MEMSET_PREVENT_DSE(dest, value, count) do { \ ++ void *(* const volatile fn_)(void *s_, int c_, size_t n_) = SECUREC_MEMSET_FUNC_OPT; \ ++ (void)(*fn_)((dest), (value), (count)); \ ++} SECUREC_WHILE_ZERO ++#else ++#define SECUREC_MEMSET_PREVENT_DSE(dest, value, count) (void)SECUREC_MEMSET_FUNC_OPT((dest), (value), (count)) ++#endif ++ ++#ifdef SECUREC_FORMAT_OUTPUT_INPUT ++#if defined(SECUREC_COMPATIBLE_WIN_FORMAT) || defined(__ARMCC_VERSION) ++typedef __int64 SecInt64; ++typedef unsigned __int64 SecUnsignedInt64; ++#if defined(__ARMCC_VERSION) ++typedef unsigned int SecUnsignedInt32; ++#else ++typedef unsigned __int32 SecUnsignedInt32; ++#endif ++#else ++typedef unsigned int SecUnsignedInt32; ++typedef long long SecInt64; ++typedef unsigned long long SecUnsignedInt64; ++#endif ++ ++#ifdef SECUREC_FOR_WCHAR ++#if 1//defined(SECUREC_VXWORKS_PLATFORM) && !defined(__WINT_TYPE__) ++typedef wchar_t wint_t; ++#endif ++#ifndef WEOF ++#define WEOF ((wchar_t)(-1)) ++#endif ++#define SECUREC_CHAR(x) L ## x ++typedef wchar_t SecChar; ++typedef wchar_t SecUnsignedChar; ++typedef wint_t SecInt; ++typedef wint_t SecUnsignedInt; ++#else /* no SECUREC_FOR_WCHAR */ ++#define SECUREC_CHAR(x) (x) ++typedef char SecChar; ++typedef unsigned char SecUnsignedChar; ++typedef int SecInt; ++typedef unsigned int SecUnsignedInt; ++#endif ++#endif ++ ++/* ++ * Determine whether the address is 8-byte aligned ++ * Some systems do not have uintptr_t type, so use NULL to clear tool alarm 507 ++ */ ++#define SECUREC_ADDR_ALIGNED_8(addr) ((((size_t)(addr)) & 7U) == 0) /* Use 7 to check aligned 8 */ ++ ++/* ++ * If you define the memory allocation function, you need to define the function prototype. ++ * You can define this macro as a header file. ++ */ ++#if defined(SECUREC_MALLOC_PROTOTYPE) ++SECUREC_MALLOC_PROTOTYPE ++#endif ++ ++#ifndef SECUREC_MALLOC ++#define SECUREC_MALLOC(x) malloc((size_t)(x)) ++#endif ++ ++#ifndef SECUREC_FREE ++#define SECUREC_FREE(x) free((void *)(x)) ++#endif ++ ++/* Improve performance with struct assignment, buf1 is not defined to avoid tool false positive */ ++#define SECUREC_COPY_VALUE_BY_STRUCT(dest, src, n) do { \ ++ *(SecStrBuf##n *)(void *)(dest) = *(const SecStrBuf##n *)(const void *)(src); \ ++} SECUREC_WHILE_ZERO ++ ++typedef struct { ++ unsigned char buf[2]; /* Performance optimization code structure assignment length 2 bytes */ ++} SecStrBuf2; ++typedef struct { ++ unsigned char buf[3]; /* Performance optimization code structure assignment length 3 bytes */ ++} SecStrBuf3; ++typedef struct { ++ unsigned char buf[4]; /* Performance optimization code structure assignment length 4 bytes */ ++} SecStrBuf4; ++typedef struct { ++ unsigned char buf[5]; /* Performance optimization code structure assignment length 5 bytes */ ++} SecStrBuf5; ++typedef struct { ++ unsigned char buf[6]; /* Performance optimization code structure assignment length 6 bytes */ ++} SecStrBuf6; ++typedef struct { ++ unsigned char buf[7]; /* Performance optimization code structure assignment length 7 bytes */ ++} SecStrBuf7; ++typedef struct { ++ unsigned char buf[8]; /* Performance optimization code structure assignment length 8 bytes */ ++} SecStrBuf8; ++typedef struct { ++ unsigned char buf[9]; /* Performance optimization code structure assignment length 9 bytes */ ++} SecStrBuf9; ++typedef struct { ++ unsigned char buf[10]; /* Performance optimization code structure assignment length 10 bytes */ ++} SecStrBuf10; ++typedef struct { ++ unsigned char buf[11]; /* Performance optimization code structure assignment length 11 bytes */ ++} SecStrBuf11; ++typedef struct { ++ unsigned char buf[12]; /* Performance optimization code structure assignment length 12 bytes */ ++} SecStrBuf12; ++typedef struct { ++ unsigned char buf[13]; /* Performance optimization code structure assignment length 13 bytes */ ++} SecStrBuf13; ++typedef struct { ++ unsigned char buf[14]; /* Performance optimization code structure assignment length 14 bytes */ ++} SecStrBuf14; ++typedef struct { ++ unsigned char buf[15]; /* Performance optimization code structure assignment length 15 bytes */ ++} SecStrBuf15; ++typedef struct { ++ unsigned char buf[16]; /* Performance optimization code structure assignment length 16 bytes */ ++} SecStrBuf16; ++typedef struct { ++ unsigned char buf[17]; /* Performance optimization code structure assignment length 17 bytes */ ++} SecStrBuf17; ++typedef struct { ++ unsigned char buf[18]; /* Performance optimization code structure assignment length 18 bytes */ ++} SecStrBuf18; ++typedef struct { ++ unsigned char buf[19]; /* Performance optimization code structure assignment length 19 bytes */ ++} SecStrBuf19; ++typedef struct { ++ unsigned char buf[20]; /* Performance optimization code structure assignment length 20 bytes */ ++} SecStrBuf20; ++typedef struct { ++ unsigned char buf[21]; /* Performance optimization code structure assignment length 21 bytes */ ++} SecStrBuf21; ++typedef struct { ++ unsigned char buf[22]; /* Performance optimization code structure assignment length 22 bytes */ ++} SecStrBuf22; ++typedef struct { ++ unsigned char buf[23]; /* Performance optimization code structure assignment length 23 bytes */ ++} SecStrBuf23; ++typedef struct { ++ unsigned char buf[24]; /* Performance optimization code structure assignment length 24 bytes */ ++} SecStrBuf24; ++typedef struct { ++ unsigned char buf[25]; /* Performance optimization code structure assignment length 25 bytes */ ++} SecStrBuf25; ++typedef struct { ++ unsigned char buf[26]; /* Performance optimization code structure assignment length 26 bytes */ ++} SecStrBuf26; ++typedef struct { ++ unsigned char buf[27]; /* Performance optimization code structure assignment length 27 bytes */ ++} SecStrBuf27; ++typedef struct { ++ unsigned char buf[28]; /* Performance optimization code structure assignment length 28 bytes */ ++} SecStrBuf28; ++typedef struct { ++ unsigned char buf[29]; /* Performance optimization code structure assignment length 29 bytes */ ++} SecStrBuf29; ++typedef struct { ++ unsigned char buf[30]; /* Performance optimization code structure assignment length 30 bytes */ ++} SecStrBuf30; ++typedef struct { ++ unsigned char buf[31]; /* Performance optimization code structure assignment length 31 bytes */ ++} SecStrBuf31; ++typedef struct { ++ unsigned char buf[32]; /* Performance optimization code structure assignment length 32 bytes */ ++} SecStrBuf32; ++typedef struct { ++ unsigned char buf[33]; /* Performance optimization code structure assignment length 33 bytes */ ++} SecStrBuf33; ++typedef struct { ++ unsigned char buf[34]; /* Performance optimization code structure assignment length 34 bytes */ ++} SecStrBuf34; ++typedef struct { ++ unsigned char buf[35]; /* Performance optimization code structure assignment length 35 bytes */ ++} SecStrBuf35; ++typedef struct { ++ unsigned char buf[36]; /* Performance optimization code structure assignment length 36 bytes */ ++} SecStrBuf36; ++typedef struct { ++ unsigned char buf[37]; /* Performance optimization code structure assignment length 37 bytes */ ++} SecStrBuf37; ++typedef struct { ++ unsigned char buf[38]; /* Performance optimization code structure assignment length 38 bytes */ ++} SecStrBuf38; ++typedef struct { ++ unsigned char buf[39]; /* Performance optimization code structure assignment length 39 bytes */ ++} SecStrBuf39; ++typedef struct { ++ unsigned char buf[40]; /* Performance optimization code structure assignment length 40 bytes */ ++} SecStrBuf40; ++typedef struct { ++ unsigned char buf[41]; /* Performance optimization code structure assignment length 41 bytes */ ++} SecStrBuf41; ++typedef struct { ++ unsigned char buf[42]; /* Performance optimization code structure assignment length 42 bytes */ ++} SecStrBuf42; ++typedef struct { ++ unsigned char buf[43]; /* Performance optimization code structure assignment length 43 bytes */ ++} SecStrBuf43; ++typedef struct { ++ unsigned char buf[44]; /* Performance optimization code structure assignment length 44 bytes */ ++} SecStrBuf44; ++typedef struct { ++ unsigned char buf[45]; /* Performance optimization code structure assignment length 45 bytes */ ++} SecStrBuf45; ++typedef struct { ++ unsigned char buf[46]; /* Performance optimization code structure assignment length 46 bytes */ ++} SecStrBuf46; ++typedef struct { ++ unsigned char buf[47]; /* Performance optimization code structure assignment length 47 bytes */ ++} SecStrBuf47; ++typedef struct { ++ unsigned char buf[48]; /* Performance optimization code structure assignment length 48 bytes */ ++} SecStrBuf48; ++typedef struct { ++ unsigned char buf[49]; /* Performance optimization code structure assignment length 49 bytes */ ++} SecStrBuf49; ++typedef struct { ++ unsigned char buf[50]; /* Performance optimization code structure assignment length 50 bytes */ ++} SecStrBuf50; ++typedef struct { ++ unsigned char buf[51]; /* Performance optimization code structure assignment length 51 bytes */ ++} SecStrBuf51; ++typedef struct { ++ unsigned char buf[52]; /* Performance optimization code structure assignment length 52 bytes */ ++} SecStrBuf52; ++typedef struct { ++ unsigned char buf[53]; /* Performance optimization code structure assignment length 53 bytes */ ++} SecStrBuf53; ++typedef struct { ++ unsigned char buf[54]; /* Performance optimization code structure assignment length 54 bytes */ ++} SecStrBuf54; ++typedef struct { ++ unsigned char buf[55]; /* Performance optimization code structure assignment length 55 bytes */ ++} SecStrBuf55; ++typedef struct { ++ unsigned char buf[56]; /* Performance optimization code structure assignment length 56 bytes */ ++} SecStrBuf56; ++typedef struct { ++ unsigned char buf[57]; /* Performance optimization code structure assignment length 57 bytes */ ++} SecStrBuf57; ++typedef struct { ++ unsigned char buf[58]; /* Performance optimization code structure assignment length 58 bytes */ ++} SecStrBuf58; ++typedef struct { ++ unsigned char buf[59]; /* Performance optimization code structure assignment length 59 bytes */ ++} SecStrBuf59; ++typedef struct { ++ unsigned char buf[60]; /* Performance optimization code structure assignment length 60 bytes */ ++} SecStrBuf60; ++typedef struct { ++ unsigned char buf[61]; /* Performance optimization code structure assignment length 61 bytes */ ++} SecStrBuf61; ++typedef struct { ++ unsigned char buf[62]; /* Performance optimization code structure assignment length 62 bytes */ ++} SecStrBuf62; ++typedef struct { ++ unsigned char buf[63]; /* Performance optimization code structure assignment length 63 bytes */ ++} SecStrBuf63; ++typedef struct { ++ unsigned char buf[64]; /* Performance optimization code structure assignment length 64 bytes */ ++} SecStrBuf64; ++ ++/* ++ * User can change the error handler by modify the following definition, ++ * such as logging the detail error in file. ++ */ ++#if defined(_DEBUG) || defined(DEBUG) ++#if defined(SECUREC_ERROR_HANDLER_BY_ASSERT) ++#define SECUREC_ERROR_INVALID_PARAMTER(msg) assert(msg "invalid argument" == NULL) ++#define SECUREC_ERROR_INVALID_RANGE(msg) assert(msg "invalid dest buffer size" == NULL) ++#define SECUREC_ERROR_BUFFER_OVERLAP(msg) assert(msg "buffer overlap" == NULL) ++#elif defined(SECUREC_ERROR_HANDLER_BY_PRINTF) ++#if SECUREC_IN_KERNEL ++#define SECUREC_ERROR_INVALID_PARAMTER(msg) printk("%s invalid argument\n", msg) ++#define SECUREC_ERROR_INVALID_RANGE(msg) printk("%s invalid dest buffer size\n", msg) ++#define SECUREC_ERROR_BUFFER_OVERLAP(msg) printk("%s buffer overlap\n", msg) ++#else ++#define SECUREC_ERROR_INVALID_PARAMTER(msg) printf("%s invalid argument\n", msg) ++#define SECUREC_ERROR_INVALID_RANGE(msg) printf("%s invalid dest buffer size\n", msg) ++#define SECUREC_ERROR_BUFFER_OVERLAP(msg) printf("%s buffer overlap\n", msg) ++#endif ++#elif defined(SECUREC_ERROR_HANDLER_BY_FILE_LOG) ++#define SECUREC_ERROR_INVALID_PARAMTER(msg) LogSecureCRuntimeError(msg " EINVAL\n") ++#define SECUREC_ERROR_INVALID_RANGE(msg) LogSecureCRuntimeError(msg " ERANGE\n") ++#define SECUREC_ERROR_BUFFER_OVERLAP(msg) LogSecureCRuntimeError(msg " EOVERLAP\n") ++#endif ++#endif ++ ++/* Default handler is none */ ++#ifndef SECUREC_ERROR_INVALID_PARAMTER ++#define SECUREC_ERROR_INVALID_PARAMTER(msg) ++#endif ++#ifndef SECUREC_ERROR_INVALID_RANGE ++#define SECUREC_ERROR_INVALID_RANGE(msg) ++#endif ++#ifndef SECUREC_ERROR_BUFFER_OVERLAP ++#define SECUREC_ERROR_BUFFER_OVERLAP(msg) ++#endif ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++/* Assembly language memory copy and memory set for X86 or MIPS ... */ ++#ifdef SECUREC_USE_ASM ++void *memcpy_opt(void *dest, const void *src, size_t n); ++void *memset_opt(void *s, int c, size_t n); ++#endif ++ ++#if defined(SECUREC_ERROR_HANDLER_BY_FILE_LOG) ++void LogSecureCRuntimeError(const char *errDetail); ++#endif ++ ++#ifdef __cplusplus ++} ++#endif /* __cplusplus */ ++#endif ++ +diff --git a/lib/securec/src/secureinput_a.c b/lib/securec/src/secureinput_a.c +new file mode 100644 +index 000000000..e79868f45 +--- /dev/null ++++ b/lib/securec/src/secureinput_a.c +@@ -0,0 +1,38 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: By defining data type for ANSI string and including "input.inl", ++ * this file generates real underlying function used by scanf family API. ++ * Create: 2014-02-25 ++ */ ++ ++#define SECUREC_FORMAT_OUTPUT_INPUT 1 ++#ifdef SECUREC_FOR_WCHAR ++#undef SECUREC_FOR_WCHAR ++#endif ++ ++#include "secinput.h" ++ ++#include "input.inl" ++ ++SECUREC_INLINE int SecIsDigit(SecInt ch) ++{ ++ /* SecInt to unsigned char clear 571, use bit mask to clear negative return of ch */ ++ return isdigit((int)((unsigned int)(unsigned char)(ch) & 0xffU)); ++} ++SECUREC_INLINE int SecIsXdigit(SecInt ch) ++{ ++ return isxdigit((int)((unsigned int)(unsigned char)(ch) & 0xffU)); ++} ++SECUREC_INLINE int SecIsSpace(SecInt ch) ++{ ++ return isspace((int)((unsigned int)(unsigned char)(ch) & 0xffU)); ++} ++ +diff --git a/lib/securec/src/secureprintoutput.h b/lib/securec/src/secureprintoutput.h +new file mode 100644 +index 000000000..a00b10dff +--- /dev/null ++++ b/lib/securec/src/secureprintoutput.h +@@ -0,0 +1,146 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: Define macro, enum, data struct, and declare internal used function ++ * prototype, which is used by output.inl, secureprintoutput_w.c and ++ * secureprintoutput_a.c. ++ * Create: 2014-02-25 ++ */ ++ ++#ifndef SECUREPRINTOUTPUT_H_E950DA2C_902F_4B15_BECD_948E99090D9C ++#define SECUREPRINTOUTPUT_H_E950DA2C_902F_4B15_BECD_948E99090D9C ++#include "securecutil.h" ++ ++/* Shield compilation alerts about using sprintf without format attribute to format float value. */ ++#ifndef SECUREC_HANDLE_WFORMAT ++#define SECUREC_HANDLE_WFORMAT 1 ++#endif ++ ++#if SECUREC_HANDLE_WFORMAT && defined(__GNUC__) && ((__GNUC__ >= 5) || \ ++ (defined(__GNUC_MINOR__) && (__GNUC__ == 4 && __GNUC_MINOR__ > 7))) ++#if defined(__clang__) ++#define SECUREC_MASK_WFORMAT_WARNING _Pragma("GCC diagnostic push") \ ++ _Pragma("GCC diagnostic ignored \"-Wformat-nonliteral\"") ++#else ++#define SECUREC_MASK_WFORMAT_WARNING _Pragma("GCC diagnostic push") \ ++ _Pragma("GCC diagnostic ignored \"-Wformat-nonliteral\"") \ ++ _Pragma("GCC diagnostic ignored \"-Wmissing-format-attribute\"") \ ++ _Pragma("GCC diagnostic ignored \"-Wsuggest-attribute=format\"") ++#endif ++#define SECUREC_END_MASK_WFORMAT_WARNING _Pragma("GCC diagnostic pop") ++#else ++#define SECUREC_MASK_WFORMAT_WARNING ++#define SECUREC_END_MASK_WFORMAT_WARNING ++#endif ++ ++#define SECUREC_MASK_VSPRINTF_WARNING SECUREC_MASK_WFORMAT_WARNING \ ++ SECUREC_MASK_MSVC_CRT_WARNING ++ ++#define SECUREC_END_MASK_VSPRINTF_WARNING SECUREC_END_MASK_WFORMAT_WARNING \ ++ SECUREC_END_MASK_MSVC_CRT_WARNING ++ ++/* ++ * Flag definitions. ++ * Using macros instead of enumerations is because some of the enumerated types under the compiler are 16bit. ++ */ ++#define SECUREC_FLAG_SIGN 0x00001U ++#define SECUREC_FLAG_SIGN_SPACE 0x00002U ++#define SECUREC_FLAG_LEFT 0x00004U ++#define SECUREC_FLAG_LEADZERO 0x00008U ++#define SECUREC_FLAG_LONG 0x00010U ++#define SECUREC_FLAG_SHORT 0x00020U ++#define SECUREC_FLAG_SIGNED 0x00040U ++#define SECUREC_FLAG_ALTERNATE 0x00080U ++#define SECUREC_FLAG_NEGATIVE 0x00100U ++#define SECUREC_FLAG_FORCE_OCTAL 0x00200U ++#define SECUREC_FLAG_LONG_DOUBLE 0x00400U ++#define SECUREC_FLAG_WIDECHAR 0x00800U ++#define SECUREC_FLAG_LONGLONG 0x01000U ++#define SECUREC_FLAG_CHAR 0x02000U ++#define SECUREC_FLAG_POINTER 0x04000U ++#define SECUREC_FLAG_I64 0x08000U ++#define SECUREC_FLAG_PTRDIFF 0x10000U ++#define SECUREC_FLAG_SIZE 0x20000U ++#ifdef SECUREC_COMPATIBLE_LINUX_FORMAT ++#define SECUREC_FLAG_INTMAX 0x40000U ++#endif ++ ++/* State definitions. Identify the status of the current format */ ++typedef enum { ++ STAT_NORMAL, ++ STAT_PERCENT, ++ STAT_FLAG, ++ STAT_WIDTH, ++ STAT_DOT, ++ STAT_PRECIS, ++ STAT_SIZE, ++ STAT_TYPE, ++ STAT_INVALID ++} SecFmtState; ++ ++#ifndef SECUREC_BUFFER_SIZE ++#if SECUREC_IN_KERNEL ++#define SECUREC_BUFFER_SIZE 32 ++#elif defined(SECUREC_STACK_SIZE_LESS_THAN_1K) ++/* ++ * SECUREC BUFFER SIZE Can not be less than 23 ++ * The length of the octal representation of 64-bit integers with zero lead ++ */ ++#define SECUREC_BUFFER_SIZE 256 ++#else ++#define SECUREC_BUFFER_SIZE 512 ++#endif ++#endif ++#if SECUREC_BUFFER_SIZE < 23 ++#error SECUREC_BUFFER_SIZE Can not be less than 23 ++#endif ++/* Buffer size for wchar, use 4 to make the compiler aligns as 8 bytes as possible */ ++#define SECUREC_WCHAR_BUFFER_SIZE 4 ++ ++#define SECUREC_MAX_PRECISION SECUREC_BUFFER_SIZE ++/* Max. # bytes in multibyte char,see MB_LEN_MAX */ ++#define SECUREC_MB_LEN 16 ++/* The return value of the internal function, which is returned when truncated */ ++#define SECUREC_PRINTF_TRUNCATE (-2) ++ ++#define SECUREC_VSPRINTF_PARAM_ERROR(format, strDest, destMax, maxLimit) \ ++ ((format) == NULL || (strDest) == NULL || (destMax) == 0 || (destMax) > (maxLimit)) ++ ++#define SECUREC_VSPRINTF_CLEAR_DEST(strDest, destMax, maxLimit) do { \ ++ if ((strDest) != NULL && (destMax) > 0 && (destMax) <= (maxLimit)) { \ ++ *(strDest) = '\0'; \ ++ } \ ++} SECUREC_WHILE_ZERO ++ ++#ifdef SECUREC_COMPATIBLE_WIN_FORMAT ++#define SECUREC_VSNPRINTF_PARAM_ERROR(format, strDest, destMax, count, maxLimit) \ ++ (((format) == NULL || (strDest) == NULL || (destMax) == 0 || (destMax) > (maxLimit)) || \ ++ ((count) > (SECUREC_STRING_MAX_LEN - 1) && (count) != (size_t)(-1))) ++ ++#else ++#define SECUREC_VSNPRINTF_PARAM_ERROR(format, strDest, destMax, count, maxLimit) \ ++ (((format) == NULL || (strDest) == NULL || (destMax) == 0 || (destMax) > (maxLimit)) || \ ++ ((count) > (SECUREC_STRING_MAX_LEN - 1))) ++#endif ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++#ifdef SECUREC_FOR_WCHAR ++int SecVswprintfImpl(wchar_t *string, size_t count, const wchar_t *format, va_list argList); ++#else ++int SecVsnprintfImpl(char *string, size_t count, const char *format, va_list argList); ++#endif ++#ifdef __cplusplus ++} ++#endif ++ ++#endif ++ +diff --git a/lib/securec/src/secureprintoutput_a.c b/lib/securec/src/secureprintoutput_a.c +new file mode 100644 +index 000000000..b2b4b6a65 +--- /dev/null ++++ b/lib/securec/src/secureprintoutput_a.c +@@ -0,0 +1,112 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: By defining corresponding macro for ANSI string and including "output.inl", ++ * this file generates real underlying function used by printf family API. ++ * Create: 2014-02-25 ++ */ ++ ++#define SECUREC_FORMAT_OUTPUT_INPUT 1 ++ ++#ifdef SECUREC_FOR_WCHAR ++#undef SECUREC_FOR_WCHAR ++#endif ++ ++#include "secureprintoutput.h" ++#if SECUREC_WARP_OUTPUT ++#define SECUREC_FORMAT_FLAG_TABLE_SIZE 128 ++SECUREC_INLINE const char *SecSkipKnownFlags(const char *format) ++{ ++ static const unsigned char flagTable[SECUREC_FORMAT_FLAG_TABLE_SIZE] = { ++ /* ++ * Known flag is "0123456789 +-#hlLwZzjqt*I$" ++ */ ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x01, 0x00, 0x00, ++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, ++ 0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 ++ }; ++ const char *fmt = format; ++ while (*fmt != '\0') { ++ char fmtChar = *fmt; ++ if ((unsigned char)fmtChar > 0x7f) { /* 0x7f is upper limit of format char value */ ++ break; ++ } ++ if (flagTable[(unsigned char)fmtChar] == 0) { ++ break; ++ } ++ ++fmt; ++ } ++ return fmt; ++} ++ ++SECUREC_INLINE int SecFormatContainN(const char *format) ++{ ++ const char *fmt = format; ++ while (*fmt != '\0') { ++ ++fmt; ++ /* Skip normal char */ ++ if (*(fmt - 1) != '%') { ++ continue; ++ } ++ /* Meet %% */ ++ if (*fmt == '%') { ++ ++fmt; /* Point to the character after the %. Correct handling %%xx */ ++ continue; ++ } ++ /* Now parse %..., fmt point to the character after the % */ ++ fmt = SecSkipKnownFlags(fmt); ++ if (*fmt == 'n') { ++ return 1; ++ } ++ } ++ return 0; ++} ++/* ++ * Multi character formatted output implementation, the count include \0 character, must be greater than zero ++ */ ++int SecVsnprintfImpl(char *string, size_t count, const char *format, va_list argList) ++{ ++ int retVal; ++ if (SecFormatContainN(format) != 0) { ++ string[0] = '\0'; ++ return -1; ++ } ++ SECUREC_MASK_VSPRINTF_WARNING ++ retVal = vsnprintf(string, count, format, argList); ++ SECUREC_END_MASK_VSPRINTF_WARNING ++ if (retVal >= (int)count) { /* The size_t to int is ok, count max is SECUREC_STRING_MAX_LEN */ ++ /* The buffer was too small; we return truncation */ ++ string[count - 1] = '\0'; ++ return SECUREC_PRINTF_TRUNCATE; ++ } ++ if (retVal < 0) { ++ string[0] = '\0'; /* Empty the dest strDest */ ++ return -1; ++ } ++ return retVal; ++} ++#else ++#if SECUREC_IN_KERNEL ++#include ++#endif ++ ++#ifndef EOF ++#define EOF (-1) ++#endif ++ ++#include "output.inl" ++ ++#endif ++ +diff --git a/lib/securec/src/snprintf_s.c b/lib/securec/src/snprintf_s.c +new file mode 100644 +index 000000000..ec18328e3 +--- /dev/null ++++ b/lib/securec/src/snprintf_s.c +@@ -0,0 +1,110 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: snprintf_s function ++ * Create: 2014-02-25 ++ */ ++ ++#include "linux/securec.h" ++ ++#if SECUREC_ENABLE_SNPRINTF ++/* ++ * ++ * The snprintf_s function is equivalent to the snprintf function ++ * except for the parameter destMax/count and the explicit runtime-constraints violation ++ * The snprintf_s function formats and stores count or fewer characters in ++ * strDest and appends a terminating null. Each argument (if any) is converted ++ * and output according to the corresponding format specification in format. ++ * The formatting is consistent with the printf family of functions; If copying ++ * occurs between strings that overlap, the behavior is undefined. ++ * ++ * ++ * strDest Storage location for the output. ++ * destMax The size of the storage location for output. Size ++ * in bytes for snprintf_s or size in words for snwprintf_s. ++ * count Maximum number of character to store. ++ * format Format-control string. ++ * ... Optional arguments. ++ * ++ * ++ * strDest is updated ++ * ++ * ++ * return the number of characters written, not including the terminating null ++ * return -1 if an error occurs. ++ * return -1 if count < destMax and the output string has been truncated ++ * ++ * If there is a runtime-constraint violation, strDest[0] will be set to the '\0' when strDest and destMax valid ++ * ++ */ ++int snprintf_s(char *strDest, size_t destMax, size_t count, const char *format, ...) ++{ ++ int ret; /* If initialization causes e838 */ ++ va_list argList; ++ ++ va_start(argList, format); ++ ret = vsnprintf_s(strDest, destMax, count, format, argList); ++ va_end(argList); ++ (void)argList; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++ ++ return ret; ++} ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(snprintf_s); ++#endif ++#endif ++ ++#if SECUREC_SNPRINTF_TRUNCATED ++/* ++ * ++ * The snprintf_truncated_s function is equivalent to the snprintf function ++ * except for the parameter destMax/count and the explicit runtime-constraints violation ++ * The snprintf_truncated_s function formats and stores count or fewer characters in ++ * strDest and appends a terminating null. Each argument (if any) is converted ++ * and output according to the corresponding format specification in format. ++ * The formatting is consistent with the printf family of functions; If copying ++ * occurs between strings that overlap, the behavior is undefined. ++ * ++ * ++ * strDest Storage location for the output. ++ * destMax The size of the storage location for output. Size ++ * in bytes for snprintf_truncated_s or size in words for snwprintf_s. ++ * format Format-control string. ++ * ... Optional arguments. ++ * ++ * ++ * strDest is updated ++ * ++ * ++ * return the number of characters written, not including the terminating null ++ * return -1 if an error occurs. ++ * return destMax-1 if output string has been truncated ++ * ++ * If there is a runtime-constraint violation, strDest[0] will be set to the '\0' when strDest and destMax valid ++ * ++ */ ++int snprintf_truncated_s(char *strDest, size_t destMax, const char *format, ...) ++{ ++ int ret; /* If initialization causes e838 */ ++ va_list argList; ++ ++ va_start(argList, format); ++ ret = vsnprintf_truncated_s(strDest, destMax, format, argList); ++ va_end(argList); ++ (void)argList; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++ ++ return ret; ++} ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(snprintf_truncated_s); ++#endif ++ ++#endif ++ +diff --git a/lib/securec/src/sprintf_s.c b/lib/securec/src/sprintf_s.c +new file mode 100644 +index 000000000..1f25f8399 +--- /dev/null ++++ b/lib/securec/src/sprintf_s.c +@@ -0,0 +1,58 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: sprintf_s function ++ * Create: 2014-02-25 ++ */ ++ ++#include "linux/securec.h" ++ ++/* ++ * ++ * The sprintf_s function is equivalent to the sprintf function ++ * except for the parameter destMax and the explicit runtime-constraints violation ++ * The sprintf_s function formats and stores a series of characters and values ++ * in strDest. Each argument (if any) is converted and output according to ++ * the corresponding format specification in format. The format consists of ++ * ordinary characters and has the same form and function as the format argument ++ * for printf. A null character is appended after the last character written. ++ * If copying occurs between strings that overlap, the behavior is undefined. ++ * ++ * ++ * strDest Storage location for output. ++ * destMax Maximum number of characters to store. ++ * format Format-control string. ++ * ... Optional arguments ++ * ++ * ++ * strDest is updated ++ * ++ * ++ * return the number of bytes stored in strDest, not counting the terminating null character. ++ * return -1 if an error occurred. ++ * ++ * If there is a runtime-constraint violation, strDest[0] will be set to the '\0' when strDest and destMax valid ++ */ ++int sprintf_s(char *strDest, size_t destMax, const char *format, ...) ++{ ++ int ret; /* If initialization causes e838 */ ++ va_list argList; ++ ++ va_start(argList, format); ++ ret = vsprintf_s(strDest, destMax, format, argList); ++ va_end(argList); ++ (void)argList; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++ ++ return ret; ++} ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(sprintf_s); ++#endif ++ +diff --git a/lib/securec/src/sscanf_s.c b/lib/securec/src/sscanf_s.c +new file mode 100644 +index 000000000..a8141ed25 +--- /dev/null ++++ b/lib/securec/src/sscanf_s.c +@@ -0,0 +1,58 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: sscanf_s function ++ * Create: 2014-02-25 ++ */ ++ ++#include "linux/securec.h" ++ ++/* ++ * ++ * The sscanf_s function is equivalent to fscanf_s, ++ * except that input is obtained from a string (specified by the argument buffer) rather than from a stream ++ * The sscanf function reads data from buffer into the location given by each ++ * argument. Every argument must be a pointer to a variable with a type that ++ * corresponds to a type specifier in format. The format argument controls the ++ * interpretation of the input fields and has the same form and function as ++ * the format argument for the scanf function. ++ * If copying takes place between strings that overlap, the behavior is undefined. ++ * ++ * ++ * buffer Stored data. ++ * format Format control string, see Format Specifications. ++ * ... Optional arguments. ++ * ++ * ++ * ... The converted value stored in user assigned address ++ * ++ * ++ * Each of these functions returns the number of fields successfully converted ++ * and assigned; the return value does not include fields that were read but ++ * not assigned. ++ * A return value of 0 indicates that no fields were assigned. ++ * return -1 if an error occurs. ++ */ ++int sscanf_s(const char *buffer, const char *format, ...) ++{ ++ int ret; /* If initialization causes e838 */ ++ va_list argList; ++ ++ va_start(argList, format); ++ ret = vsscanf_s(buffer, format, argList); ++ va_end(argList); ++ (void)argList; /* To clear e438 last value assigned not used , the compiler will optimize this code */ ++ ++ return ret; ++} ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(sscanf_s); ++#endif ++ +diff --git a/lib/securec/src/strcat_s.c b/lib/securec/src/strcat_s.c +new file mode 100644 +index 000000000..f835e7bc9 +--- /dev/null ++++ b/lib/securec/src/strcat_s.c +@@ -0,0 +1,101 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: strcat_s function ++ * Create: 2014-02-25 ++ */ ++ ++#include "securecutil.h" ++ ++/* ++ * Befor this function, the basic parameter checking has been done ++ */ ++SECUREC_INLINE errno_t SecDoCat(char *strDest, size_t destMax, const char *strSrc) ++{ ++ size_t destLen; ++ size_t srcLen; ++ size_t maxSrcLen; ++ SECUREC_CALC_STR_LEN(strDest, destMax, &destLen); ++ /* Only optimize strSrc, do not apply this function to strDest */ ++ maxSrcLen = destMax - destLen; ++ SECUREC_CALC_STR_LEN_OPT(strSrc, maxSrcLen, &srcLen); ++ ++ if (SECUREC_CAT_STRING_IS_OVERLAP(strDest, destLen, strSrc, srcLen)) { ++ strDest[0] = '\0'; ++ if (strDest + destLen <= strSrc && destLen == destMax) { ++ SECUREC_ERROR_INVALID_PARAMTER("strcat_s"); ++ return EINVAL_AND_RESET; ++ } ++ SECUREC_ERROR_BUFFER_OVERLAP("strcat_s"); ++ return EOVERLAP_AND_RESET; ++ } ++ if (srcLen + destLen >= destMax || strDest == strSrc) { ++ strDest[0] = '\0'; ++ if (destLen == destMax) { ++ SECUREC_ERROR_INVALID_PARAMTER("strcat_s"); ++ return EINVAL_AND_RESET; ++ } ++ SECUREC_ERROR_INVALID_RANGE("strcat_s"); ++ return ERANGE_AND_RESET; ++ } ++ SECUREC_MEMCPY_WARP_OPT(strDest + destLen, strSrc, srcLen + 1); /* Single character length include \0 */ ++ return EOK; ++} ++ ++/* ++ * ++ * The strcat_s function appends a copy of the string pointed to by strSrc (including the terminating null character) ++ * to the end of the string pointed to by strDest. ++ * The initial character of strSrc overwrites the terminating null character of strDest. ++ * strcat_s will return EOVERLAP_AND_RESET if the source and destination strings overlap. ++ * ++ * Note that the second parameter is the total size of the buffer, not the ++ * remaining size. ++ * ++ * ++ * strDest Null-terminated destination string buffer. ++ * destMax Size of the destination string buffer. ++ * strSrc Null-terminated source string buffer. ++ * ++ * ++ * strDest is updated ++ * ++ * ++ * EOK Success ++ * EINVAL strDest is NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN ++ * EINVAL_AND_RESET (strDest unterminated and all other parameters are valid) or ++ * (strDest != NULL and strSrc is NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN) ++ * ERANGE destMax is 0 and destMax > SECUREC_STRING_MAX_LEN ++ * ERANGE_AND_RESET strDest have not enough space and all other parameters are valid and not overlap ++ * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped and all parameters are valid ++ * ++ * If there is a runtime-constraint violation, strDest[0] will be set to the '\0' when strDest and destMax valid ++ */ ++errno_t strcat_s(char *strDest, size_t destMax, const char *strSrc) ++{ ++ if (destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) { ++ SECUREC_ERROR_INVALID_RANGE("strcat_s"); ++ return ERANGE; ++ } ++ if (strDest == NULL || strSrc == NULL) { ++ SECUREC_ERROR_INVALID_PARAMTER("strcat_s"); ++ if (strDest != NULL) { ++ strDest[0] = '\0'; ++ return EINVAL_AND_RESET; ++ } ++ return EINVAL; ++ } ++ return SecDoCat(strDest, destMax, strSrc); ++} ++ ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(strcat_s); ++#endif ++ +diff --git a/lib/securec/src/strcpy_s.c b/lib/securec/src/strcpy_s.c +new file mode 100644 +index 000000000..ca1b2ddb1 +--- /dev/null ++++ b/lib/securec/src/strcpy_s.c +@@ -0,0 +1,353 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: strcpy_s function ++ * Create: 2014-02-25 ++ */ ++/* ++ * [Standardize-exceptions] Use unsafe function: Performance-sensitive ++ * [reason] Always used in the performance critical path, ++ * and sufficient input validation is performed before calling ++ */ ++ ++#include "securecutil.h" ++ ++#ifndef SECUREC_STRCPY_WITH_PERFORMANCE ++#define SECUREC_STRCPY_WITH_PERFORMANCE 1 ++#endif ++ ++#define SECUREC_STRCPY_PARAM_OK(strDest, destMax, strSrc) ((destMax) > 0 && \ ++ (destMax) <= SECUREC_STRING_MAX_LEN && (strDest) != NULL && (strSrc) != NULL && (strDest) != (strSrc)) ++ ++#if (!SECUREC_IN_KERNEL) && SECUREC_STRCPY_WITH_PERFORMANCE ++#ifndef SECUREC_STRCOPY_THRESHOLD_SIZE ++#define SECUREC_STRCOPY_THRESHOLD_SIZE 32UL ++#endif ++/* The purpose of converting to void is to clean up the alarm */ ++#define SECUREC_SMALL_STR_COPY(strDest, strSrc, lenWithTerm) do { \ ++ if (SECUREC_ADDR_ALIGNED_8(strDest) && SECUREC_ADDR_ALIGNED_8(strSrc)) { \ ++ /* Use struct assignment */ \ ++ switch (lenWithTerm) { \ ++ case 1: \ ++ *(strDest) = *(strSrc); \ ++ break; \ ++ case 2: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 2); \ ++ break; \ ++ case 3: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 3); \ ++ break; \ ++ case 4: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 4); \ ++ break; \ ++ case 5: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 5); \ ++ break; \ ++ case 6: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 6); \ ++ break; \ ++ case 7: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 7); \ ++ break; \ ++ case 8: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 8); \ ++ break; \ ++ case 9: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 9); \ ++ break; \ ++ case 10: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 10); \ ++ break; \ ++ case 11: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 11); \ ++ break; \ ++ case 12: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 12); \ ++ break; \ ++ case 13: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 13); \ ++ break; \ ++ case 14: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 14); \ ++ break; \ ++ case 15: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 15); \ ++ break; \ ++ case 16: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 16); \ ++ break; \ ++ case 17: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 17); \ ++ break; \ ++ case 18: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 18); \ ++ break; \ ++ case 19: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 19); \ ++ break; \ ++ case 20: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 20); \ ++ break; \ ++ case 21: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 21); \ ++ break; \ ++ case 22: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 22); \ ++ break; \ ++ case 23: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 23); \ ++ break; \ ++ case 24: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 24); \ ++ break; \ ++ case 25: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 25); \ ++ break; \ ++ case 26: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 26); \ ++ break; \ ++ case 27: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 27); \ ++ break; \ ++ case 28: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 28); \ ++ break; \ ++ case 29: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 29); \ ++ break; \ ++ case 30: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 30); \ ++ break; \ ++ case 31: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 31); \ ++ break; \ ++ case 32: \ ++ SECUREC_COPY_VALUE_BY_STRUCT((strDest), (strSrc), 32); \ ++ break; \ ++ default: \ ++ /* Do nothing */ \ ++ break; \ ++ } /* END switch */ \ ++ } else { \ ++ char *tmpStrDest_ = (char *)(strDest); \ ++ const char *tmpStrSrc_ = (const char *)(strSrc); \ ++ switch (lenWithTerm) { \ ++ case 32: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 31: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 30: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 29: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 28: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 27: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 26: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 25: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 24: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 23: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 22: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 21: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 20: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 19: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 18: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 17: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 16: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 15: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 14: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 13: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 12: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 11: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 10: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 9: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 8: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 7: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 6: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 5: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 4: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 3: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 2: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ case 1: \ ++ *(tmpStrDest_++) = *(tmpStrSrc_++); \ ++ /* fall-through */ /* FALLTHRU */ \ ++ default: \ ++ /* Do nothing */ \ ++ break; \ ++ } \ ++ } \ ++} SECUREC_WHILE_ZERO ++#endif ++ ++#if SECUREC_IN_KERNEL || (!SECUREC_STRCPY_WITH_PERFORMANCE) ++#define SECUREC_STRCPY_OPT(dest, src, lenWithTerm) SECUREC_MEMCPY_WARP_OPT((dest), (src), (lenWithTerm)) ++#else ++/* ++ * Performance optimization. lenWithTerm include '\0' ++ */ ++#define SECUREC_STRCPY_OPT(dest, src, lenWithTerm) do { \ ++ if ((lenWithTerm) > SECUREC_STRCOPY_THRESHOLD_SIZE) { \ ++ SECUREC_MEMCPY_WARP_OPT((dest), (src), (lenWithTerm)); \ ++ } else { \ ++ SECUREC_SMALL_STR_COPY((dest), (src), (lenWithTerm)); \ ++ } \ ++} SECUREC_WHILE_ZERO ++#endif ++ ++/* ++ * Check Src Range ++ */ ++SECUREC_INLINE errno_t CheckSrcRange(char *strDest, size_t destMax, const char *strSrc) ++{ ++ size_t tmpDestMax = destMax; ++ const char *tmpSrc = strSrc; ++ /* Use destMax as boundary checker and destMax must be greater than zero */ ++ while (*tmpSrc != '\0' && tmpDestMax > 0) { ++ ++tmpSrc; ++ --tmpDestMax; ++ } ++ if (tmpDestMax == 0) { ++ strDest[0] = '\0'; ++ SECUREC_ERROR_INVALID_RANGE("strcpy_s"); ++ return ERANGE_AND_RESET; ++ } ++ return EOK; ++} ++ ++/* ++ * Handling errors ++ */ ++errno_t strcpy_error(char *strDest, size_t destMax, const char *strSrc) ++{ ++ if (destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) { ++ SECUREC_ERROR_INVALID_RANGE("strcpy_s"); ++ return ERANGE; ++ } ++ if (strDest == NULL || strSrc == NULL) { ++ SECUREC_ERROR_INVALID_PARAMTER("strcpy_s"); ++ if (strDest != NULL) { ++ strDest[0] = '\0'; ++ return EINVAL_AND_RESET; ++ } ++ return EINVAL; ++ } ++ return CheckSrcRange(strDest, destMax, strSrc); ++} ++ ++/* ++ * ++ * The strcpy_s function copies the string pointed to strSrc ++ * (including the terminating null character) into the array pointed to by strDest ++ * The destination string must be large enough to hold the source string, ++ * including the terminating null character. strcpy_s will return EOVERLAP_AND_RESET ++ * if the source and destination strings overlap. ++ * ++ * ++ * strDest Location of destination string buffer ++ * destMax Size of the destination string buffer. ++ * strSrc Null-terminated source string buffer. ++ * ++ * ++ * strDest is updated. ++ * ++ * ++ * EOK Success ++ * EINVAL strDest is NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN ++ * EINVAL_AND_RESET strDest != NULL and strSrc is NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN ++ * ERANGE destMax is 0 and destMax > SECUREC_STRING_MAX_LEN ++ * ERANGE_AND_RESET strDest have not enough space and all other parameters are valid and not overlap ++ * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped and all parameters are valid ++ * ++ * If there is a runtime-constraint violation, strDest[0] will be set to the '\0' when strDest and destMax valid ++ */ ++errno_t strcpy_s(char *strDest, size_t destMax, const char *strSrc) ++{ ++ if (SECUREC_STRCPY_PARAM_OK(strDest, destMax, strSrc)) { ++ size_t srcStrLen; ++ SECUREC_CALC_STR_LEN(strSrc, destMax, &srcStrLen); ++ ++srcStrLen; /* The length include '\0' */ ++ ++ if (srcStrLen <= destMax) { ++ /* Use mem overlap check include '\0' */ ++ if (SECUREC_MEMORY_NO_OVERLAP(strDest, strSrc, srcStrLen)) { ++ /* Performance optimization srcStrLen include '\0' */ ++ SECUREC_STRCPY_OPT(strDest, strSrc, srcStrLen); ++ return EOK; ++ } else { ++ strDest[0] = '\0'; ++ SECUREC_ERROR_BUFFER_OVERLAP("strcpy_s"); ++ return EOVERLAP_AND_RESET; ++ } ++ } ++ } ++ return strcpy_error(strDest, destMax, strSrc); ++} ++ ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(strcpy_s); ++#endif ++ +diff --git a/lib/securec/src/strncat_s.c b/lib/securec/src/strncat_s.c +new file mode 100644 +index 000000000..6686d2994 +--- /dev/null ++++ b/lib/securec/src/strncat_s.c +@@ -0,0 +1,119 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: strncat_s function ++ * Create: 2014-02-25 ++ */ ++ ++#include "securecutil.h" ++ ++/* ++ * Befor this function, the basic parameter checking has been done ++ */ ++SECUREC_INLINE errno_t SecDoCatLimit(char *strDest, size_t destMax, const char *strSrc, size_t count) ++{ ++ size_t destLen; ++ size_t srcLen; ++ SECUREC_CALC_STR_LEN(strDest, destMax, &destLen); ++ /* ++ * The strSrc is no longer optimized. The reason is that when count is small, ++ * the efficiency of strnlen is higher than that of self realization. ++ */ ++ SECUREC_CALC_STR_LEN(strSrc, count, &srcLen); ++ ++ if (SECUREC_CAT_STRING_IS_OVERLAP(strDest, destLen, strSrc, srcLen)) { ++ strDest[0] = '\0'; ++ if (strDest + destLen <= strSrc && destLen == destMax) { ++ SECUREC_ERROR_INVALID_PARAMTER("strncat_s"); ++ return EINVAL_AND_RESET; ++ } ++ SECUREC_ERROR_BUFFER_OVERLAP("strncat_s"); ++ return EOVERLAP_AND_RESET; ++ } ++ if (srcLen + destLen >= destMax || strDest == strSrc) { ++ strDest[0] = '\0'; ++ if (destLen == destMax) { ++ SECUREC_ERROR_INVALID_PARAMTER("strncat_s"); ++ return EINVAL_AND_RESET; ++ } ++ SECUREC_ERROR_INVALID_RANGE("strncat_s"); ++ return ERANGE_AND_RESET; ++ } ++ SECUREC_MEMCPY_WARP_OPT(strDest + destLen, strSrc, srcLen); /* No terminator */ ++ *(strDest + destLen + srcLen) = '\0'; ++ return EOK; ++} ++ ++/* ++ * ++ * The strncat_s function appends not more than n successive characters ++ * (not including the terminating null character) ++ * from the array pointed to by strSrc to the end of the string pointed to by strDest ++ * The strncat_s function try to append the first D characters of strSrc to ++ * the end of strDest, where D is the lesser of count and the length of strSrc. ++ * If appending those D characters will fit within strDest (whose size is given ++ * as destMax) and still leave room for a null terminator, then those characters ++ * are appended, starting at the original terminating null of strDest, and a ++ * new terminating null is appended; otherwise, strDest[0] is set to the null ++ * character. ++ * ++ * ++ * strDest Null-terminated destination string. ++ * destMax Size of the destination buffer. ++ * strSrc Null-terminated source string. ++ * count Number of character to append, or truncate. ++ * ++ * ++ * strDest is updated ++ * ++ * ++ * EOK Success ++ * EINVAL strDest is NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN ++ * EINVAL_AND_RESET (strDest unterminated and all other parameters are valid)or ++ * (strDest != NULL and strSrc is NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN) ++ * ERANGE destMax is 0 and destMax > SECUREC_STRING_MAX_LEN ++ * ERANGE_AND_RESET strDest have not enough space and all other parameters are valid and not overlap ++ * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped and all parameters are valid ++ * ++ * If there is a runtime-constraint violation, strDest[0] will be set to the '\0' when strDest and destMax valid ++ */ ++errno_t strncat_s(char *strDest, size_t destMax, const char *strSrc, size_t count) ++{ ++ if (destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) { ++ SECUREC_ERROR_INVALID_RANGE("strncat_s"); ++ return ERANGE; ++ } ++ ++ if (strDest == NULL || strSrc == NULL) { ++ SECUREC_ERROR_INVALID_PARAMTER("strncat_s"); ++ if (strDest != NULL) { ++ strDest[0] = '\0'; ++ return EINVAL_AND_RESET; ++ } ++ return EINVAL; ++ } ++ if (count > SECUREC_STRING_MAX_LEN) { ++#ifdef SECUREC_COMPATIBLE_WIN_FORMAT ++ if (count == (size_t)(-1)) { ++ /* Windows internal functions may pass in -1 when calling this function */ ++ return SecDoCatLimit(strDest, destMax, strSrc, destMax); ++ } ++#endif ++ strDest[0] = '\0'; ++ SECUREC_ERROR_INVALID_RANGE("strncat_s"); ++ return ERANGE_AND_RESET; ++ } ++ return SecDoCatLimit(strDest, destMax, strSrc, count); ++} ++ ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(strncat_s); ++#endif ++ +diff --git a/lib/securec/src/strncpy_s.c b/lib/securec/src/strncpy_s.c +new file mode 100644 +index 000000000..5f4c5b709 +--- /dev/null ++++ b/lib/securec/src/strncpy_s.c +@@ -0,0 +1,145 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: strncpy_s function ++ * Create: 2014-02-25 ++ */ ++/* ++ * [Standardize-exceptions] Use unsafe function: Performance-sensitive ++ * [reason] Always used in the performance critical path, ++ * and sufficient input validation is performed before calling ++ */ ++ ++#include "securecutil.h" ++ ++#if defined(SECUREC_COMPATIBLE_WIN_FORMAT) ++#define SECUREC_STRNCPY_PARAM_OK(strDest, destMax, strSrc, count) \ ++ (((destMax) > 0 && (destMax) <= SECUREC_STRING_MAX_LEN && (strDest) != NULL && (strSrc) != NULL && \ ++ ((count) <= SECUREC_STRING_MAX_LEN || (count) == ((size_t)(-1))) && (count) > 0)) ++#else ++#define SECUREC_STRNCPY_PARAM_OK(strDest, destMax, strSrc, count) \ ++ (((destMax) > 0 && (destMax) <= SECUREC_STRING_MAX_LEN && (strDest) != NULL && (strSrc) != NULL && \ ++ (count) <= SECUREC_STRING_MAX_LEN && (count) > 0)) ++#endif ++ ++/* ++ * Check Src Count Range ++ */ ++SECUREC_INLINE errno_t CheckSrcCountRange(char *strDest, size_t destMax, const char *strSrc, size_t count) ++{ ++ size_t tmpDestMax = destMax; ++ size_t tmpCount = count; ++ const char *endPos = strSrc; ++ ++ /* Use destMax and count as boundary checker and destMax must be greater than zero */ ++ while (*(endPos) != '\0' && tmpDestMax > 0 && tmpCount > 0) { ++ ++endPos; ++ --tmpCount; ++ --tmpDestMax; ++ } ++ if (tmpDestMax == 0) { ++ strDest[0] = '\0'; ++ SECUREC_ERROR_INVALID_RANGE("strncpy_s"); ++ return ERANGE_AND_RESET; ++ } ++ return EOK; ++} ++ ++/* ++ * Handling errors, when dest equal src return EOK ++ */ ++errno_t strncpy_error(char *strDest, size_t destMax, const char *strSrc, size_t count) ++{ ++ if (destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) { ++ SECUREC_ERROR_INVALID_RANGE("strncpy_s"); ++ return ERANGE; ++ } ++ if (strDest == NULL || strSrc == NULL) { ++ SECUREC_ERROR_INVALID_PARAMTER("strncpy_s"); ++ if (strDest != NULL) { ++ strDest[0] = '\0'; ++ return EINVAL_AND_RESET; ++ } ++ return EINVAL; ++ } ++ if (count > SECUREC_STRING_MAX_LEN) { ++ strDest[0] = '\0'; /* Clear dest string */ ++ SECUREC_ERROR_INVALID_RANGE("strncpy_s"); ++ return ERANGE_AND_RESET; ++ } ++ if (count == 0) { ++ strDest[0] = '\0'; ++ return EOK; ++ } ++ return CheckSrcCountRange(strDest, destMax, strSrc, count); ++} ++ ++/* ++ * ++ * The strncpy_s function copies not more than n successive characters (not including the terminating null character) ++ * from the array pointed to by strSrc to the array pointed to by strDest. ++ * ++ * ++ * strDest Destination string. ++ * destMax The size of the destination string, in characters. ++ * strSrc Source string. ++ * count Number of characters to be copied. ++ * ++ * ++ * strDest is updated ++ * ++ * ++ * EOK Success ++ * EINVAL strDest is NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN ++ * EINVAL_AND_RESET strDest != NULL and strSrc is NULL and destMax != 0 and destMax <= SECUREC_STRING_MAX_LEN ++ * ERANGE destMax is 0 and destMax > SECUREC_STRING_MAX_LEN ++ * ERANGE_AND_RESET strDest have not enough space and all other parameters are valid and not overlap ++ * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped and all parameters are valid ++ * ++ * If there is a runtime-constraint violation, strDest[0] will be set to the '\0' when strDest and destMax valid ++ */ ++errno_t strncpy_s(char *strDest, size_t destMax, const char *strSrc, size_t count) ++{ ++ if (SECUREC_STRNCPY_PARAM_OK(strDest, destMax, strSrc, count)) { ++ size_t minCpLen; /* Use it to store the maxi length limit */ ++ if (count < destMax) { ++ SECUREC_CALC_STR_LEN(strSrc, count, &minCpLen); /* No ending terminator */ ++ } else { ++ size_t tmpCount = destMax; ++#ifdef SECUREC_COMPATIBLE_WIN_FORMAT ++ if (count == ((size_t)(-1))) { ++ tmpCount = destMax - 1; ++ } ++#endif ++ SECUREC_CALC_STR_LEN(strSrc, tmpCount, &minCpLen); /* No ending terminator */ ++ if (minCpLen == destMax) { ++ strDest[0] = '\0'; ++ SECUREC_ERROR_INVALID_RANGE("strncpy_s"); ++ return ERANGE_AND_RESET; ++ } ++ } ++ if (SECUREC_STRING_NO_OVERLAP(strDest, strSrc, minCpLen) || strDest == strSrc) { ++ /* Not overlap */ ++ SECUREC_MEMCPY_WARP_OPT(strDest, strSrc, minCpLen); /* Copy string without terminator */ ++ strDest[minCpLen] = '\0'; ++ return EOK; ++ } else { ++ strDest[0] = '\0'; ++ SECUREC_ERROR_BUFFER_OVERLAP("strncpy_s"); ++ return EOVERLAP_AND_RESET; ++ } ++ } ++ return strncpy_error(strDest, destMax, strSrc, count); ++} ++ ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(strncpy_s); ++#endif ++ +diff --git a/lib/securec/src/strtok_s.c b/lib/securec/src/strtok_s.c +new file mode 100644 +index 000000000..cd5dcd2cd +--- /dev/null ++++ b/lib/securec/src/strtok_s.c +@@ -0,0 +1,116 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: strtok_s function ++ * Create: 2014-02-25 ++ */ ++ ++#include "securecutil.h" ++ ++SECUREC_INLINE int SecIsInDelimit(char ch, const char *strDelimit) ++{ ++ const char *ctl = strDelimit; ++ while (*ctl != '\0' && *ctl != ch) { ++ ++ctl; ++ } ++ return (int)(*ctl != '\0'); ++} ++ ++/* ++ * Find beginning of token (skip over leading delimiters). ++ * Note that there is no token if this loop sets string to point to the terminal null. ++ */ ++SECUREC_INLINE char *SecFindBegin(char *strToken, const char *strDelimit) ++{ ++ char *token = strToken; ++ while (*token != '\0') { ++ if (SecIsInDelimit(*token, strDelimit) != 0) { ++ ++token; ++ continue; ++ } ++ /* Don't find any delimiter in string header, break the loop */ ++ break; ++ } ++ return token; ++} ++ ++/* ++ * Find rest of token ++ */ ++SECUREC_INLINE char *SecFindRest(char *strToken, const char *strDelimit) ++{ ++ /* Find the rest of the token. If it is not the end of the string, put a null there */ ++ char *token = strToken; ++ while (*token != '\0') { ++ if (SecIsInDelimit(*token, strDelimit) != 0) { ++ /* Find a delimiter, set string terminator */ ++ *token = '\0'; ++ ++token; ++ break; ++ } ++ ++token; ++ } ++ return token; ++} ++ ++/* ++ * Find the final position pointer ++ */ ++SECUREC_INLINE char *SecUpdateToken(char *strToken, const char *strDelimit, char **context) ++{ ++ /* Point to updated position. Record string position for next search in the context */ ++ *context = SecFindRest(strToken, strDelimit); ++ /* Determine if a token has been found. */ ++ if (*context == strToken) { ++ return NULL; ++ } ++ return strToken; ++} ++ ++/* ++ * ++ * The strtok_s function parses a string into a sequence of strToken, ++ * replace all characters in strToken string that match to strDelimit set with 0. ++ * On the first call to strtok_s the string to be parsed should be specified in strToken. ++ * In each subsequent call that should parse the same string, strToken should be NULL ++ * ++ * strToken String containing token or tokens. ++ * strDelimit Set of delimiter characters. ++ * context Used to store position information between calls ++ * to strtok_s ++ * ++ * context is updated ++ * ++ * On the first call returns the address of the first non \0 character, otherwise NULL is returned. ++ * In subsequent calls, the strtoken is set to NULL, and the context set is the same as the previous call, ++ * return NULL if the *context string length is equal 0, otherwise return *context. ++ */ ++char *strtok_s(char *strToken, const char *strDelimit, char **context) ++{ ++ char *orgToken = strToken; ++ /* Validate delimiter and string context */ ++ if (context == NULL || strDelimit == NULL) { ++ return NULL; ++ } ++ /* Valid input string and string pointer from where to search */ ++ if (orgToken == NULL && *context == NULL) { ++ return NULL; ++ } ++ /* If string is null, continue searching from previous string position stored in context */ ++ if (orgToken == NULL) { ++ orgToken = *context; ++ } ++ orgToken = SecFindBegin(orgToken, strDelimit); ++ return SecUpdateToken(orgToken, strDelimit, context); ++} ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(strtok_s); ++#endif ++ +diff --git a/lib/securec/src/vscanf_s.c b/lib/securec/src/vscanf_s.c +new file mode 100644 +index 000000000..61480a697 +--- /dev/null ++++ b/lib/securec/src/vscanf_s.c +@@ -0,0 +1,63 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: vscanf_s function ++ * Create: 2014-02-25 ++ */ ++ ++#include "secinput.h" ++ ++/* ++ * ++ * The vscanf_s function is equivalent to scanf_s, with the variable argument list replaced by argList, ++ * The vscanf_s function reads data from the standard input stream stdin and ++ * writes the data into the location that's given by argument. Each argument ++ * must be a pointer to a variable of a type that corresponds to a type specifier ++ * in format. If copying occurs between strings that overlap, the behavior is ++ * undefined. ++ * ++ * ++ * format Format control string. ++ * argList pointer to list of arguments ++ * ++ * ++ * argList the converted value stored in user assigned address ++ * ++ * ++ * Returns the number of fields successfully converted and assigned; ++ * the return value does not include fields that were read but not assigned. ++ * A return value of 0 indicates that no fields were assigned. ++ * return -1 if an error occurs. ++ */ ++int vscanf_s(const char *format, va_list argList) ++{ ++ int retVal; /* If initialization causes e838 */ ++ SecFileStream fStr; ++ SECUREC_FILE_STREAM_FROM_STDIN(&fStr); ++ /* ++ * The "va_list" has different definition on different platform, so we can't use argList == NULL ++ * To determine it's invalid. If you has fixed platform, you can check some fields to validate it, ++ * such as "argList == NULL" or argList.xxx != NULL or *(size_t *)&argList != 0. ++ */ ++ if (format == NULL || fStr.pf == NULL) { ++ SECUREC_ERROR_INVALID_PARAMTER("vscanf_s"); ++ return SECUREC_SCANF_EINVAL; ++ } ++ ++ SECUREC_LOCK_STDIN(0, fStr.pf); ++ retVal = SecInputS(&fStr, format, argList); ++ SECUREC_UNLOCK_STDIN(0, fStr.pf); ++ if (retVal < 0) { ++ SECUREC_ERROR_INVALID_PARAMTER("vscanf_s"); ++ return SECUREC_SCANF_EINVAL; ++ } ++ return retVal; ++} ++ +diff --git a/lib/securec/src/vsnprintf_s.c b/lib/securec/src/vsnprintf_s.c +new file mode 100644 +index 000000000..35caaa220 +--- /dev/null ++++ b/lib/securec/src/vsnprintf_s.c +@@ -0,0 +1,138 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: vsnprintf_s function ++ * Create: 2014-02-25 ++ */ ++ ++#include "secureprintoutput.h" ++ ++#if SECUREC_ENABLE_VSNPRINTF ++/* ++ * ++ * The vsnprintf_s function is equivalent to the vsnprintf function ++ * except for the parameter destMax/count and the explicit runtime-constraints violation ++ * The vsnprintf_s function takes a pointer to an argument list, then formats ++ * and writes up to count characters of the given data to the memory pointed ++ * to by strDest and appends a terminating null. ++ * ++ * ++ * strDest Storage location for the output. ++ * destMax The size of the strDest for output. ++ * count Maximum number of character to write(not including ++ * the terminating NULL) ++ * format Format-control string. ++ * argList pointer to list of arguments. ++ * ++ * ++ * strDest is updated ++ * ++ * ++ * return the number of characters written, not including the terminating null ++ * return -1 if an error occurs. ++ * return -1 if count < destMax and the output string has been truncated ++ * ++ * If there is a runtime-constraint violation, strDest[0] will be set to the '\0' when strDest and destMax valid ++ */ ++int vsnprintf_s(char *strDest, size_t destMax, size_t count, const char *format, va_list argList) ++{ ++ int retVal; ++ ++ if (SECUREC_VSNPRINTF_PARAM_ERROR(format, strDest, destMax, count, SECUREC_STRING_MAX_LEN)) { ++ SECUREC_VSPRINTF_CLEAR_DEST(strDest, destMax, SECUREC_STRING_MAX_LEN); ++ SECUREC_ERROR_INVALID_PARAMTER("vsnprintf_s"); ++ return -1; ++ } ++ ++ if (destMax > count) { ++ retVal = SecVsnprintfImpl(strDest, count + 1, format, argList); ++ if (retVal == SECUREC_PRINTF_TRUNCATE) { /* To keep dest buffer not destroyed 2014.2.18 */ ++ /* The string has been truncated, return -1 */ ++ return -1; /* To skip error handler, return strlen(strDest) or -1 */ ++ } ++ } else { ++ retVal = SecVsnprintfImpl(strDest, destMax, format, argList); ++#ifdef SECUREC_COMPATIBLE_WIN_FORMAT ++ if (retVal == SECUREC_PRINTF_TRUNCATE && count == (size_t)(-1)) { ++ return -1; ++ } ++#endif ++ } ++ ++ if (retVal < 0) { ++ strDest[0] = '\0'; /* Empty the dest strDest */ ++ if (retVal == SECUREC_PRINTF_TRUNCATE) { ++ /* Buffer too small */ ++ SECUREC_ERROR_INVALID_RANGE("vsnprintf_s"); ++ } ++ SECUREC_ERROR_INVALID_PARAMTER("vsnprintf_s"); ++ return -1; ++ } ++ ++ return retVal; ++} ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(vsnprintf_s); ++#endif ++#endif ++ ++#if SECUREC_SNPRINTF_TRUNCATED ++/* ++ * ++ * The vsnprintf_truncated_s function is equivalent to the vsnprintf function ++ * except for the parameter destMax/count and the explicit runtime-constraints violation ++ * The vsnprintf_truncated_s function takes a pointer to an argument list, then formats ++ * and writes up to count characters of the given data to the memory pointed ++ * to by strDest and appends a terminating null. ++ * ++ * ++ * strDest Storage location for the output. ++ * destMax The size of the strDest for output. ++ * the terminating NULL) ++ * format Format-control string. ++ * argList pointer to list of arguments. ++ * ++ * ++ * strDest is updated ++ * ++ * ++ * return the number of characters written, not including the terminating null ++ * return -1 if an error occurs. ++ * return destMax-1 if output string has been truncated ++ * ++ * If there is a runtime-constraint violation, strDest[0] will be set to the '\0' when strDest and destMax valid ++ */ ++int vsnprintf_truncated_s(char *strDest, size_t destMax, const char *format, va_list argList) ++{ ++ int retVal; ++ ++ if (SECUREC_VSPRINTF_PARAM_ERROR(format, strDest, destMax, SECUREC_STRING_MAX_LEN)) { ++ SECUREC_VSPRINTF_CLEAR_DEST(strDest, destMax, SECUREC_STRING_MAX_LEN); ++ SECUREC_ERROR_INVALID_PARAMTER("vsnprintf_truncated_s"); ++ return -1; ++ } ++ ++ retVal = SecVsnprintfImpl(strDest, destMax, format, argList); ++ if (retVal < 0) { ++ if (retVal == SECUREC_PRINTF_TRUNCATE) { ++ return (int)(destMax - 1); /* To skip error handler, return strlen(strDest) */ ++ } ++ strDest[0] = '\0'; /* Empty the dest strDest */ ++ SECUREC_ERROR_INVALID_PARAMTER("vsnprintf_truncated_s"); ++ return -1; ++ } ++ ++ return retVal; ++} ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(vsnprintf_truncated_s); ++#endif ++#endif ++ +diff --git a/lib/securec/src/vsprintf_s.c b/lib/securec/src/vsprintf_s.c +new file mode 100644 +index 000000000..f50fa4a98 +--- /dev/null ++++ b/lib/securec/src/vsprintf_s.c +@@ -0,0 +1,67 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: vsprintf_s function ++ * Create: 2014-02-25 ++ */ ++ ++#include "secureprintoutput.h" ++ ++/* ++ * ++ * The vsprintf_s function is equivalent to the vsprintf function ++ * except for the parameter destMax and the explicit runtime-constraints violation ++ * The vsprintf_s function takes a pointer to an argument list, and then formats ++ * and writes the given data to the memory pointed to by strDest. ++ * The function differ from the non-secure versions only in that the secure ++ * versions support positional parameters. ++ * ++ * ++ * strDest Storage location for the output. ++ * destMax Size of strDest ++ * format Format specification. ++ * argList pointer to list of arguments ++ * ++ * ++ * strDest is updated ++ * ++ * ++ * return the number of characters written, not including the terminating null character, ++ * return -1 if an error occurs. ++ * ++ * If there is a runtime-constraint violation, strDest[0] will be set to the '\0' when strDest and destMax valid ++ */ ++int vsprintf_s(char *strDest, size_t destMax, const char *format, va_list argList) ++{ ++ int retVal; /* If initialization causes e838 */ ++ ++ if (SECUREC_VSPRINTF_PARAM_ERROR(format, strDest, destMax, SECUREC_STRING_MAX_LEN)) { ++ SECUREC_VSPRINTF_CLEAR_DEST(strDest, destMax, SECUREC_STRING_MAX_LEN); ++ SECUREC_ERROR_INVALID_PARAMTER("vsprintf_s"); ++ return -1; ++ } ++ ++ retVal = SecVsnprintfImpl(strDest, destMax, format, argList); ++ if (retVal < 0) { ++ strDest[0] = '\0'; ++ if (retVal == SECUREC_PRINTF_TRUNCATE) { ++ /* Buffer is too small */ ++ SECUREC_ERROR_INVALID_RANGE("vsprintf_s"); ++ } ++ SECUREC_ERROR_INVALID_PARAMTER("vsprintf_s"); ++ return -1; ++ } ++ ++ return retVal; ++} ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(vsprintf_s); ++#endif ++ +diff --git a/lib/securec/src/vsscanf_s.c b/lib/securec/src/vsscanf_s.c +new file mode 100644 +index 000000000..a19abe2b9 +--- /dev/null ++++ b/lib/securec/src/vsscanf_s.c +@@ -0,0 +1,87 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2014-2021. All rights reserved. ++ * Licensed under Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, ++ * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, ++ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ * Description: vsscanf_s function ++ * Create: 2014-02-25 ++ */ ++ ++#include "secinput.h" ++#if defined(SECUREC_VXWORKS_PLATFORM) && !SECUREC_IN_KERNEL && \ ++ (!defined(SECUREC_SYSAPI4VXWORKS) && !defined(SECUREC_CTYPE_MACRO_ADAPT)) ++#include ++#endif ++ ++/* ++ * ++ * vsscanf_s ++ * ++ * ++ * ++ * The vsscanf_s function is equivalent to sscanf_s, with the variable argument list replaced by argList ++ * The vsscanf_s function reads data from buffer into the location given by ++ * each argument. Every argument must be a pointer to a variable with a type ++ * that corresponds to a type specifier in format. The format argument controls ++ * the interpretation of the input fields and has the same form and function ++ * as the format argument for the scanf function. ++ * If copying takes place between strings that overlap, the behavior is undefined. ++ * ++ * ++ * buffer Stored data ++ * format Format control string, see Format Specifications. ++ * argList pointer to list of arguments ++ * ++ * ++ * argList the converted value stored in user assigned address ++ * ++ * ++ * Each of these functions returns the number of fields successfully converted ++ * and assigned; the return value does not include fields that were read but ++ * not assigned. A return value of 0 indicates that no fields were assigned. ++ * return -1 if an error occurs. ++ */ ++int vsscanf_s(const char *buffer, const char *format, va_list argList) ++{ ++ size_t count; /* If initialization causes e838 */ ++ int retVal; ++ SecFileStream fStr; ++ ++ /* Validation section */ ++ if (buffer == NULL || format == NULL) { ++ SECUREC_ERROR_INVALID_PARAMTER("vsscanf_s"); ++ return SECUREC_SCANF_EINVAL; ++ } ++ count = strlen(buffer); ++ if (count == 0 || count > SECUREC_STRING_MAX_LEN) { ++ SecClearDestBuf(buffer, format, argList); ++ SECUREC_ERROR_INVALID_PARAMTER("vsscanf_s"); ++ return SECUREC_SCANF_EINVAL; ++ } ++#if defined(SECUREC_VXWORKS_PLATFORM) && !SECUREC_IN_KERNEL ++ /* ++ * On vxworks platform when buffer is white string, will set first %s argument to zero.Like following usage: ++ * " \v\f\t\r\n", "%s", str, strSize ++ * Do not check all character, just first and last character then consider it is white string ++ */ ++ if (isspace((int)(unsigned char)buffer[0]) != 0 && isspace((int)(unsigned char)buffer[count - 1]) != 0) { ++ SecClearDestBuf(buffer, format, argList); ++ } ++#endif ++ SECUREC_FILE_STREAM_FROM_STRING(&fStr, buffer, count); ++ retVal = SecInputS(&fStr, format, argList); ++ if (retVal < 0) { ++ SECUREC_ERROR_INVALID_PARAMTER("vsscanf_s"); ++ return SECUREC_SCANF_EINVAL; ++ } ++ return retVal; ++} ++#if SECUREC_EXPORT_KERNEL_SYMBOL ++EXPORT_SYMBOL(vsscanf_s); ++#endif ++ +diff --git a/mm/Kconfig b/mm/Kconfig +index 38bc0ee5e..c11cd0116 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -35,16 +35,6 @@ config ZSWAP + in the case where decompressing from RAM is faster than swap device + reads, can also improve workload performance. + +-config CMA_REUSE +- bool "CMA reuse feature" +- depends on CMA +- help +- If enabled, it will add MIGRATE_CMA to pcp lists and movable +- allocations with __GFP_CMA flag will use cma areas prior to +- movable areas. +- +- It improves the utilization ratio of cma areas. +- + config ZSWAP_DEFAULT_ON + bool "Enable the compressed cache for swap pages by default" + depends on ZSWAP +@@ -481,41 +471,6 @@ config SPARSEMEM_MANUAL + + endchoice + +-config MEMORY_MONITOR +- bool "ENABLE MEMORY_MONITOR" +- depends on PROC_FS +- default n +- help +- MEMORY_MONITOR is a monitor of some memory reclaim method. +- Now, kswapd wake up monitor use it. +- +-config HYPERHOLD_FILE_LRU +- bool "Enable HyperHold FILE LRU" +- depends on HYPERHOLD && MEMCG +- select HYPERHOLD_MEMCG +- default n +- help +- File-LRU is a mechanism that put file page in global lru list, +- and anon page in memcg lru list(if MEMCG is enable), what's +- more, recliam of anonymous pages and file page are separated. +- +-config HYPERHOLD_MEMCG +- bool "Enable Memcg Management in HyperHold" +- depends on HYPERHOLD && MEMCG +- help +- Add more attributes in memory cgroup, these attribute is used +- to show information, shrink memory, swapin page and so on. +- +-config HYPERHOLD_ZSWAPD +- bool "Enable zswapd thread to reclaim anon pages in background" +- depends on HYPERHOLD && ZRAM +- default n +- help +- zswapd is a kernel thread that reclaim anonymous pages in the +- background. When the use of swap pages reaches the watermark +- and the refault of anonymous pages is high, the content of +- zram will exchanged to eswap by a certain percentage. +- + config SPARSEMEM + def_bool y + depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL +@@ -1327,29 +1282,6 @@ config LOCK_MM_AND_FIND_VMA + bool + depends on !STACK_GROWSUP + +- +-config MEM_PURGEABLE +- bool "Purgeable memory feature" +- default n +- depends on 64BIT +- select ARCH_USES_HIGH_VMA_FLAGS +- help +- Support purgeable pages for process +- +-config MEM_PURGEABLE_DEBUG +- bool "Purgeable memory debug" +- default n +- depends on MEM_PURGEABLE +- help +- Debug info for purgeable memory +- +-config PURGEABLE_ASHMEM +- bool "Purgeable memory feature for ashmem" +- default n +- depends on MEM_PURGEABLE +- help +- Support purgeable ashmem for process +- + source "mm/damon/Kconfig" + + endmenu +diff --git a/mm/Makefile b/mm/Makefile +index f84d4b0f5..ec65984e2 100644 +--- a/mm/Makefile ++++ b/mm/Makefile +@@ -138,9 +138,3 @@ obj-$(CONFIG_IO_MAPPING) += io-mapping.o + obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o + obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o + obj-$(CONFIG_SHRINKER_DEBUG) += shrinker_debug.o +-obj-$(CONFIG_HYPERHOLD_FILE_LRU) += memcg_reclaim.o +-obj-$(CONFIG_HYPERHOLD_MEMCG) += memcg_control.o +-obj-$(CONFIG_HYPERHOLD_ZSWAPD) += zswapd.o zswapd_control.o +-obj-$(CONFIG_MEM_PURGEABLE) += purgeable.o +-obj-$(CONFIG_PURGEABLE_ASHMEM) += purgeable_ashmem_trigger.o +-obj-$(CONFIG_MEMORY_MONITOR) += memory_monitor.o +diff --git a/mm/compaction.c b/mm/compaction.c +index 55aba56da..8b889bee2 100644 +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -2235,7 +2235,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) + + #ifdef CONFIG_CMA + /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ +- if (migratetype == get_cma_migratetype() && ++ if (migratetype == MIGRATE_MOVABLE && + !free_area_empty(area, MIGRATE_CMA)) + return COMPACT_SUCCESS; + #endif +diff --git a/mm/internal.h b/mm/internal.h +index 848d33206..f773db493 100644 +--- a/mm/internal.h ++++ b/mm/internal.h +@@ -10,11 +10,8 @@ + #include + #include + #include +-#include + #include + #include +-#include +-#include + + struct folio_batch; + +@@ -38,130 +35,6 @@ struct folio_batch; + /* Do not use these with a slab allocator */ + #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) + +-enum reclaim_invoker { +- ALL, +- KSWAPD, +- ZSWAPD, +- DIRECT_RECLAIM, +- NODE_RECLAIM, +- SOFT_LIMIT, +- RCC_RECLAIM, +- FILE_RECLAIM, +- ANON_RECLAIM +-}; +- +-struct scan_control { +- /* How many pages shrink_list() should reclaim */ +- unsigned long nr_to_reclaim; +- +- /* +- * Nodemask of nodes allowed by the caller. If NULL, all nodes +- * are scanned. +- */ +- nodemask_t *nodemask; +- +- /* +- * The memory cgroup that hit its limit and as a result is the +- * primary target of this reclaim invocation. +- */ +- struct mem_cgroup *target_mem_cgroup; +- +- /* +- * Scan pressure balancing between anon and file LRUs +- */ +- unsigned long anon_cost; +- unsigned long file_cost; +- +- /* Can active folios be deactivated as part of reclaim? */ +-#define DEACTIVATE_ANON 1 +-#define DEACTIVATE_FILE 2 +- unsigned int may_deactivate:2; +- unsigned int force_deactivate:1; +- unsigned int skipped_deactivate:1; +- +- /* Writepage batching in laptop mode; RECLAIM_WRITE */ +- unsigned int may_writepage:1; +- +- /* Can mapped folios be reclaimed? */ +- unsigned int may_unmap:1; +- +- /* Can folios be swapped as part of reclaim? */ +- unsigned int may_swap:1; +- +- /* Proactive reclaim invoked by userspace through memory.reclaim */ +- unsigned int proactive:1; +- +- /* +- * Cgroup memory below memory.low is protected as long as we +- * don't threaten to OOM. If any cgroup is reclaimed at +- * reduced force or passed over entirely due to its memory.low +- * setting (memcg_low_skipped), and nothing is reclaimed as a +- * result, then go back for one more cycle that reclaims the protected +- * memory (memcg_low_reclaim) to avert OOM. +- */ +- unsigned int memcg_low_reclaim:1; +- unsigned int memcg_low_skipped:1; +- +- unsigned int hibernation_mode:1; +- +- /* One of the zones is ready for compaction */ +- unsigned int compaction_ready:1; +- +- /* There is easily reclaimable cold cache in the current node */ +- unsigned int cache_trim_mode:1; +- +- /* The file folios on the current node are dangerously low */ +- unsigned int file_is_tiny:1; +- +- /* Always discard instead of demoting to lower tier memory */ +- unsigned int no_demotion:1; +- +- /* Allocation order */ +- s8 order; +- +- /* Scan (total_size >> priority) pages at once */ +- s8 priority; +- +- /* The highest zone to isolate folios for reclaim from */ +- s8 reclaim_idx; +- +- /* This context's GFP mask */ +- gfp_t gfp_mask; +- +- /* Incremented by the number of inactive pages that were scanned */ +- unsigned long nr_scanned; +- +- /* Number of pages freed so far during a call to shrink_zones() */ +- unsigned long nr_reclaimed; +- +- struct { +- unsigned int dirty; +- unsigned int unqueued_dirty; +- unsigned int congested; +- unsigned int writeback; +- unsigned int immediate; +- unsigned int file_taken; +- unsigned int taken; +- } nr; +- +- enum reclaim_invoker invoker; +- u32 isolate_count; +- unsigned long nr_scanned_anon; +- unsigned long nr_scanned_file; +- unsigned long nr_reclaimed_anon; +- unsigned long nr_reclaimed_file; +- +- /* for recording the reclaimed slab by now */ +- struct reclaim_state reclaim_state; +-}; +- +-enum scan_balance { +- SCAN_EQUAL, +- SCAN_FRACT, +- SCAN_ANON, +- SCAN_FILE, +-}; +- + /* + * Different from WARN_ON_ONCE(), no warning will be issued + * when we specify __GFP_NOWARN. +@@ -370,25 +243,11 @@ extern unsigned long highest_memmap_pfn; + /* + * in mm/vmscan.c: + */ +-#ifdef CONFIG_MEMORY_MONITOR +-extern void kswapd_monitor_wake_up_queue(void); +-#endif + bool isolate_lru_page(struct page *page); + bool folio_isolate_lru(struct folio *folio); + void putback_lru_page(struct page *page); + void folio_putback_lru(struct folio *folio); + extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason); +-extern unsigned int shrink_folio_list(struct list_head *page_list, struct pglist_data *pgdat, +- struct scan_control *sc, struct reclaim_stat *stat, bool ignore_references); +-extern unsigned long isolate_lru_folios(unsigned long nr_to_scan, struct lruvec *lruvec, +- struct list_head *dst, unsigned long *nr_scanned, struct scan_control *sc, +- enum lru_list lru); +-extern unsigned move_folios_to_lru(struct lruvec *lruvec, struct list_head *list); +-extern void shrink_active_list(unsigned long nr_to_scan, struct lruvec *lruvec, +- struct scan_control *sc, enum lru_list lru); +-extern unsigned long shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, +- struct scan_control *sc, enum lru_list lru); +-extern void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc); + + /* + * in mm/rmap.c: +diff --git a/mm/memcg_control.c b/mm/memcg_control.c +deleted file mode 100644 +index 4ca565174..000000000 +--- a/mm/memcg_control.c ++++ /dev/null +@@ -1,488 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * mm/memcg_control.c +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +-#include +-#include +-#include +-#include +-#include +-#include "internal.h" +- +-#include "zswapd_internal.h" +- +-#ifdef CONFIG_HYPERHOLD_MEMCG +- +-struct list_head score_head; +-bool score_head_inited; +-DEFINE_RWLOCK(score_list_lock); +-DEFINE_MUTEX(reclaim_para_lock); +- +-/** +- * get_next_memcg - iterate over memory cgroup score_list +- * @prev: previously returned memcg, NULL on first invocation +- * +- * Returns references to the next memg on score_list of @prev, +- * or %NULL after a full round-trip. +- * +- * Caller must pass the return value in @prev on subsequent +- * invocations for reference counting, or use get_next_memcg_break() +- * to cancel a walk before the round-trip is complete. +- */ +-struct mem_cgroup *get_next_memcg(struct mem_cgroup *prev) +-{ +- struct mem_cgroup *memcg = NULL; +- struct list_head *pos = NULL; +- unsigned long flags; +- +- if (unlikely(!score_head_inited)) +- return NULL; +- +- read_lock_irqsave(&score_list_lock, flags); +- +- if (unlikely(!prev)) +- pos = &score_head; +- else +- pos = &(prev->score_node); +- +- if (list_empty(pos)) /* deleted node */ +- goto unlock; +- +- if (pos->next == &score_head) +- goto unlock; +- +- memcg = list_entry(pos->next, +- struct mem_cgroup, score_node); +- +- if (!css_tryget(&memcg->css)) +- memcg = NULL; +- +-unlock: +- read_unlock_irqrestore(&score_list_lock, flags); +- +- if (prev) +- css_put(&prev->css); +- +- return memcg; +-} +- +-void get_next_memcg_break(struct mem_cgroup *memcg) +-{ +- if (memcg) +- css_put(&memcg->css); +-} +- +-struct mem_cgroup *get_prev_memcg(struct mem_cgroup *next) +-{ +- struct mem_cgroup *memcg = NULL; +- struct list_head *pos = NULL; +- unsigned long flags; +- +- if (unlikely(!score_head_inited)) +- return NULL; +- +- read_lock_irqsave(&score_list_lock, flags); +- +- if (unlikely(!next)) +- pos = &score_head; +- else +- pos = &next->score_node; +- +- if (list_empty(pos)) /* deleted node */ +- goto unlock; +- +- if (pos->prev == &score_head) +- goto unlock; +- +- memcg = list_entry(pos->prev, +- struct mem_cgroup, score_node); +- +- if (unlikely(!memcg)) +- goto unlock; +- +- if (!css_tryget(&memcg->css)) +- memcg = NULL; +- +-unlock: +- read_unlock_irqrestore(&score_list_lock, flags); +- +- if (next) +- css_put(&next->css); +- return memcg; +-} +- +-void get_prev_memcg_break(struct mem_cgroup *memcg) +-{ +- if (memcg) +- css_put(&memcg->css); +-} +- +-void memcg_app_score_update(struct mem_cgroup *target) +-{ +- struct list_head *pos = NULL; +- struct list_head *tmp; +- unsigned long flags; +- +- write_lock_irqsave(&score_list_lock, flags); +- list_for_each_prev_safe(pos, tmp, &score_head) { +- struct mem_cgroup *memcg = list_entry(pos, +- struct mem_cgroup, score_node); +- if (atomic64_read(&memcg->memcg_reclaimed.app_score) < +- atomic64_read(&target->memcg_reclaimed.app_score)) +- break; +- } +- list_move_tail(&target->score_node, pos); +- write_unlock_irqrestore(&score_list_lock, flags); +-} +- +-static u64 mem_cgroup_app_score_read(struct cgroup_subsys_state *css, +- struct cftype *cft) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(css); +- +- return atomic64_read(&memcg->memcg_reclaimed.app_score); +-} +- +-static int mem_cgroup_app_score_write(struct cgroup_subsys_state *css, +- struct cftype *cft, u64 val) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(css); +- +- if (val > MAX_APP_SCORE) +- return -EINVAL; +- +- if (atomic64_read(&memcg->memcg_reclaimed.app_score) != val) { +- atomic64_set(&memcg->memcg_reclaimed.app_score, val); +- memcg_app_score_update(memcg); +- } +- +- return 0; +-} +- +-static unsigned long move_pages_to_page_list(struct lruvec *lruvec, enum lru_list lru, +- struct list_head *page_list) +-{ +- struct list_head *src = &lruvec->lists[lru]; +- unsigned long nr_isolated = 0; +- struct page *page; +- +- while (!list_empty(src)) { +- page = lru_to_page(src); +- +- if (PageUnevictable(page)) +- continue; +- +- if (likely(get_page_unless_zero(page))) { +- if (isolate_lru_page(page)) { +- put_page(page); +- continue; +- } +- put_page(page); +- +- } else { +- continue; +- } +- +- +- if (PageUnevictable(page)) { +- putback_lru_page(page); +- continue; +- } +- +- if (PageAnon(page) && !PageSwapBacked(page)) { +- putback_lru_page(page); +- continue; +- } +- +- list_add(&page->lru, page_list); +- nr_isolated++; +- } +- +- return nr_isolated; +-} +- +- +-unsigned long reclaim_all_anon_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg) +-{ +- struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); +- unsigned long nr_reclaimed; +- LIST_HEAD(page_list); +- struct page *page; +- struct reclaim_stat stat = {}; +- struct scan_control sc = { +- .gfp_mask = GFP_KERNEL, +- .may_writepage = 1, +- .may_unmap = 1, +- .may_swap = 1, +- }; +- +-#ifdef CONFIG_RECLAIM_ACCT +- reclaimacct_substage_start(RA_SHRINKANON); +-#endif +- count_vm_event(FREEZE_RECLAIME_COUNT); +- move_pages_to_page_list(lruvec, LRU_INACTIVE_ANON, &page_list); +- +- nr_reclaimed = shrink_folio_list(&page_list, pgdat, &sc, &stat, true); +- count_vm_event(FREEZE_RECLAIMED); +- +- while (!list_empty(&page_list)) { +- page = lru_to_page(&page_list); +- list_del(&page->lru); +- putback_lru_page(page); +- } +- +-#ifdef CONFIG_RECLAIM_ACCT +- reclaimacct_substage_end(RA_SHRINKANON, nr_reclaimed, NULL); +-#endif +- +- return nr_reclaimed; +-} +- +-static ssize_t memcg_force_shrink_anon(struct kernfs_open_file *of, +- char *buf, size_t nbytes, +- loff_t off) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); +- struct pglist_data *pgdat; +- int nid; +- +- for_each_online_node(nid) { +- pgdat = NODE_DATA(nid); +- reclaim_all_anon_memcg(pgdat, memcg); +- } +- +- return nbytes; +-} +- +-static int memcg_name_show(struct seq_file *m, void *v) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); +- +- seq_printf(m, "%s\n", memcg->name); +- return 0; +-} +- +-static ssize_t memcg_name_write(struct kernfs_open_file *of, char *buf, +- size_t nbytes, loff_t off) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); +- +- buf = strstrip(buf); +- if (nbytes >= MEM_CGROUP_NAME_MAX_LEN) +- return -EINVAL; +- +- mutex_lock(&reclaim_para_lock); +- if (memcg) +- strcpy(memcg->name, buf); +- mutex_unlock(&reclaim_para_lock); +- +- return nbytes; +-} +- +-static int memcg_total_info_per_app_show(struct seq_file *m, void *v) +-{ +- struct mem_cgroup *memcg = NULL; +- struct mem_cgroup_per_node *mz = NULL; +- struct lruvec *lruvec = NULL; +- unsigned long anon_size; +- unsigned long zram_compress_size; +- unsigned long eswap_compress_size; +- +- +- while ((memcg = get_next_memcg(memcg))) { +- mz = mem_cgroup_nodeinfo(memcg, 0); +- if (!mz) { +- get_next_memcg_break(memcg); +- return 0; +- } +- +- lruvec = &mz->lruvec; +- if (!lruvec) { +- get_next_memcg_break(memcg); +- return 0; +- } +- +- anon_size = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) + +- lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES); +- zram_compress_size = memcg_data_size(memcg, CACHE_SIZE); +- eswap_compress_size = memcg_data_size(memcg, SWAP_SIZE); +- anon_size *= PAGE_SIZE / SZ_1K; +- zram_compress_size /= SZ_1K; +- eswap_compress_size /= SZ_1K; +- +- if (!strlen(memcg->name)) +- continue; +- +- seq_printf(m, "%s %lu %lu %lu\n", memcg->name, anon_size, +- zram_compress_size, eswap_compress_size); +- } +- +- return 0; +-} +- +-static int memcg_ub_ufs2zram_ratio_write(struct cgroup_subsys_state *css, +- struct cftype *cft, u64 val) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(css); +- const unsigned int ratio = 100; +- +- if (val > ratio) +- return -EINVAL; +- +- atomic64_set(&memcg->memcg_reclaimed.ub_ufs2zram_ratio, val); +- +- return 0; +-} +- +-static u64 memcg_ub_ufs2zram_ratio_read(struct cgroup_subsys_state *css, struct cftype *cft) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(css); +- +- return atomic64_read(&memcg->memcg_reclaimed.ub_ufs2zram_ratio); +-} +- +-static int memcg_force_swapin_write(struct cgroup_subsys_state *css, struct cftype *cft, u64 val) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(css); +- u64 size; +- const unsigned int ratio = 100; +- +- size = memcg_data_size(memcg, SWAP_SIZE); +- size = div_u64(atomic64_read(&memcg->memcg_reclaimed.ub_ufs2zram_ratio) * size, ratio); +- +- swapin_memcg(memcg, size); +- +- return 0; +-} +- +-#ifdef CONFIG_MEM_PURGEABLE +-static unsigned long purgeable_memcg_node(pg_data_t *pgdata, +- struct scan_control *sc, struct mem_cgroup *memcg) +-{ +- unsigned long nr = 0; +- struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdata); +- if (!lruvec) +- return 0; +- +- shrink_list(LRU_ACTIVE_PURGEABLE, -1, lruvec, sc); +- nr += shrink_list(LRU_INACTIVE_PURGEABLE, -1, lruvec, sc); +- +- pr_info("reclaim %lu purgeable pages \n", nr); +- return nr; +-} +- +-static int memcg_force_shrink_purgeable_bysize(struct cgroup_subsys_state *css, +- struct cftype *cft, u64 reclaim_size) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(css); +- if (!memcg) +- return 0; +- +- if (reclaim_size == 0) { +- pr_err("reclaim_size is zero, skip shrink\n"); +- return 0; +- } +- +- struct scan_control sc = { +- .gfp_mask = GFP_KERNEL, +- .order = 0, +- .priority = DEF_PRIORITY, +- .may_deactivate = DEACTIVATE_ANON, +- .may_writepage = 1, +- .may_unmap = 1, +- .may_swap = 1, +- .reclaim_idx = MAX_NR_ZONES -1, +- }; +- int nid = 0; +- sc.nr_to_reclaim = div_u64(reclaim_size, PAGE_SIZE); +- +- for_each_node_state(nid, N_MEMORY) +- purgeable_memcg_node(NODE_DATA(nid), &sc, memcg); +- return 0; +-} +-#endif +- +-static struct cftype memcg_policy_files[] = { +- { +- .name = "name", +- .write = memcg_name_write, +- .seq_show = memcg_name_show, +- }, +- { +- .name = "ub_ufs2zram_ratio", +- .write_u64 = memcg_ub_ufs2zram_ratio_write, +- .read_u64 = memcg_ub_ufs2zram_ratio_read, +- }, +- { +- .name = "total_info_per_app", +- .seq_show = memcg_total_info_per_app_show, +- }, +- { +- .name = "app_score", +- .write_u64 = mem_cgroup_app_score_write, +- .read_u64 = mem_cgroup_app_score_read, +- }, +- { +- .name = "force_shrink_anon", +- .write = memcg_force_shrink_anon +- }, +- { +- .name = "force_swapin", +- .write_u64 = memcg_force_swapin_write, +- }, +-#ifdef CONFIG_MEM_PURGEABLE +- { +- .name = "force_shrink_purgeable_bysize", +- .write_u64 = memcg_force_shrink_purgeable_bysize, +- }, +-#endif +- { }, /* terminate */ +-}; +- +-static int __init memcg_policy_init(void) +-{ +- if (!mem_cgroup_disabled()) +- WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, +- memcg_policy_files)); +- +- return 0; +-} +-subsys_initcall(memcg_policy_init); +-#else +-struct mem_cgroup *get_next_memcg(struct mem_cgroup *prev) +-{ +- return NULL; +-} +- +-void get_next_memcg_break(struct mem_cgroup *memcg) +-{ +-} +- +- +-struct mem_cgroup *get_prev_memcg(struct mem_cgroup *next) +-{ +- return NULL; +-} +- +-void get_prev_memcg_break(struct mem_cgroup *memcg) +-{ +-} +- +-static u64 mem_cgroup_app_score_read(struct cgroup_subsys_state *css, +- struct cftype *cft) +-{ +- return 0; +-} +- +-static int mem_cgroup_app_score_write(struct cgroup_subsys_state *css, +- struct cftype *cft, u64 val) +-{ +- return 0; +-} +- +-void memcg_app_score_update(struct mem_cgroup *target) +-{ +-} +-#endif +diff --git a/mm/memcg_reclaim.c b/mm/memcg_reclaim.c +deleted file mode 100644 +index 3b97fd968..000000000 +--- a/mm/memcg_reclaim.c ++++ /dev/null +@@ -1,536 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * mm/memcg_reclaim.c +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +-#include +-#include +-#include +-#include +-#include +- +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +-#include +-#include "internal.h" +-#endif +- +-static inline bool is_swap_not_allowed(struct scan_control *sc, int swappiness) +-{ +- return !sc->may_swap || !swappiness || !get_nr_swap_pages(); +-} +- +-/* +- * From 0 .. 100. Higher means more swappy. +- */ +-#define HYPERHOLD_SWAPPINESS 100 +- +-static int get_hyperhold_swappiness(void) +-{ +- return is_hyperhold_enable() ? HYPERHOLD_SWAPPINESS : vm_swappiness; +-} +- +-static void get_scan_count_hyperhold(struct pglist_data *pgdat, +- struct scan_control *sc, unsigned long *nr, +- unsigned long *lru_pages) +-{ +- int swappiness = get_hyperhold_swappiness(); +- struct lruvec *lruvec = node_lruvec(pgdat); +- u64 fraction[2]; +- u64 denominator; +- enum scan_balance scan_balance; +- unsigned long ap, fp; +- enum lru_list lru; +- unsigned long pgdatfile; +- unsigned long pgdatfree; +- int z; +- unsigned long anon_cost, file_cost, total_cost; +- unsigned long total_high_wmark = 0; +- +- +- if (cgroup_reclaim(sc) && !swappiness) { +- scan_balance = SCAN_FILE; +- goto out; +- } +- +- /* +- * Do not apply any pressure balancing cleverness when the +- * system is close to OOM, scan both anon and file equally +- * (unless the swappiness setting disagrees with swapping). +- */ +- if (!sc->priority && swappiness) { +- scan_balance = SCAN_EQUAL; +- goto out; +- } +- +- if (!cgroup_reclaim(sc)) { +- pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); +- pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) + +- node_page_state(pgdat, NR_INACTIVE_FILE); +- +- for (z = 0; z < MAX_NR_ZONES; z++) { +- struct zone *zone = &pgdat->node_zones[z]; +- +- if (!managed_zone(zone)) +- continue; +- +- total_high_wmark += high_wmark_pages(zone); +- } +- +- if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) { +- /* +- * Force SCAN_ANON if there are enough inactive +- * anonymous pages on the LRU in eligible zones. +- * Otherwise, the small LRU gets thrashed. +- */ +- if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON) && +- (lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, +- sc->reclaim_idx) >> +- (unsigned int)sc->priority)) { +- scan_balance = SCAN_ANON; +- goto out; +- } +- } +- } +- +- /* +- * If there is enough inactive page cache, i.e. if the size of the +- * inactive list is greater than that of the active list *and* the +- * inactive list actually has some pages to scan on this priority, we +- * do not reclaim anything from the anonymous working set right now. +- * Without the second condition we could end up never scanning an +- * lruvec even if it has plenty of old anonymous pages unless the +- * system is under heavy pressure. +- */ +- +- if (!IS_ENABLED(CONFIG_BALANCE_ANON_FILE_RECLAIM) && +- !inactive_is_low(lruvec, LRU_INACTIVE_FILE) && +- lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) { +- scan_balance = SCAN_FILE; +- goto out; +- } +- +- scan_balance = SCAN_FRACT; +- +- /* +- * Calculate the pressure balance between anon and file pages. +- * +- * The amount of pressure we put on each LRU is inversely +- * proportional to the cost of reclaiming each list, as +- * determined by the share of pages that are refaulting, times +- * the relative IO cost of bringing back a swapped out +- * anonymous page vs reloading a filesystem page (swappiness). +- * +- * Although we limit that influence to ensure no list gets +- * left behind completely: at least a third of the pressure is +- * applied, before swappiness. +- * +- * With swappiness at 100, anon and file have equal IO cost. +- */ +- total_cost = sc->anon_cost + sc->file_cost; +- anon_cost = total_cost + sc->anon_cost; +- file_cost = total_cost + sc->file_cost; +- total_cost = anon_cost + file_cost; +- +- ap = swappiness * (total_cost + 1); +- ap /= anon_cost + 1; +- +- fp = (200 - swappiness) * (total_cost + 1); +- fp /= file_cost + 1; +- +- fraction[0] = ap; +- fraction[1] = fp; +- denominator = ap + fp; +- +-out: +- *lru_pages = 0; +- for_each_evictable_lru(lru) { +- int file = is_file_lru(lru); +- unsigned long lruvec_size; +- unsigned long scan; +- +- lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); +- scan = lruvec_size; +- *lru_pages += scan; +- scan >>= sc->priority; +- +- switch (scan_balance) { +- case SCAN_EQUAL: +- /* Scan lists relative to size */ +- break; +- case SCAN_FRACT: +- /* +- * Scan types proportional to swappiness and +- * their relative recent reclaim efficiency. +- * Make sure we don't miss the last page on +- * the offlined memory cgroups because of a +- * round-off error. +- */ +- scan = DIV64_U64_ROUND_UP(scan * fraction[file], +- denominator); +- break; +- case SCAN_FILE: +- case SCAN_ANON: +- /* Scan one type exclusively */ +- if ((scan_balance == SCAN_FILE) != file) +- scan = 0; +- break; +- default: +- /* Look ma, no brain */ +- BUG(); +- } +- +- nr[lru] = scan; +- } +-} +- +-#define ISOLATE_LIMIT_CNT 5 +-void shrink_anon_memcg(struct pglist_data *pgdat, +- struct mem_cgroup *memcg, struct scan_control *sc, +- unsigned long *nr) +-{ +- struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); +- unsigned long nr_to_scan; +- enum lru_list lru; +- unsigned long nr_reclaimed = 0; +- struct blk_plug plug; +- +- blk_start_plug(&plug); +- +- while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_ANON]) { +- for (lru = 0; lru <= LRU_ACTIVE_ANON; lru++) { +- if (nr[lru]) { +- nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); +- nr[lru] -= nr_to_scan; +- nr_reclaimed += +- shrink_list(lru, nr_to_scan, +- lruvec, sc); +- } +- } +- if (sc->nr_reclaimed >= sc->nr_to_reclaim || +- (sc->isolate_count > ISOLATE_LIMIT_CNT && +- sc->invoker == DIRECT_RECLAIM)) +- break; +- } +- blk_finish_plug(&plug); +- sc->nr_reclaimed += nr_reclaimed; +- sc->nr_reclaimed_anon += nr_reclaimed; +-} +- +-static inline bool memcg_is_child_of(struct mem_cgroup *mcg, struct mem_cgroup *tmcg) +-{ +- if (tmcg == NULL) +- return true; +- +- while (!mem_cgroup_is_root(mcg)) { +- if (mcg == tmcg) +- break; +- +- mcg = parent_mem_cgroup(mcg); +- } +- +- return (mcg == tmcg); +-} +- +-static void shrink_anon(struct pglist_data *pgdat, +- struct scan_control *sc, unsigned long *nr) +-{ +- unsigned long reclaimed; +- unsigned long scanned; +- struct mem_cgroup *memcg = NULL; +- struct mem_cgroup *target_memcg = sc->target_mem_cgroup; +- unsigned long nr_memcg[NR_LRU_LISTS]; +- unsigned long nr_node_active = lruvec_lru_size( +- node_lruvec(pgdat), LRU_ACTIVE_ANON, MAX_NR_ZONES); +- unsigned long nr_node_inactive = lruvec_lru_size( +- node_lruvec(pgdat), LRU_INACTIVE_ANON, MAX_NR_ZONES); +- +- while ((memcg = get_next_memcg(memcg))) { +- struct lruvec *lruvec = NULL; +- +- if (!memcg_is_child_of(memcg, target_memcg)) +- continue; +- +- lruvec = mem_cgroup_lruvec(memcg, pgdat); +- +- reclaimed = sc->nr_reclaimed; +- scanned = sc->nr_scanned; +- +- nr_memcg[LRU_ACTIVE_ANON] = nr[LRU_ACTIVE_ANON] * +- lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, +- MAX_NR_ZONES) / (nr_node_active + 1); +- nr_memcg[LRU_INACTIVE_ANON] = nr[LRU_INACTIVE_ANON] * +- lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, +- MAX_NR_ZONES) / (nr_node_inactive + 1); +- nr_memcg[LRU_ACTIVE_FILE] = 0; +- nr_memcg[LRU_INACTIVE_FILE] = 0; +- +- /* +- * This loop can become CPU-bound when target memcgs +- * aren't eligible for reclaim - either because they +- * don't have any reclaimable pages, or because their +- * memory is explicitly protected. Avoid soft lockups. +- */ +- cond_resched(); +- +- mem_cgroup_calculate_protection(target_memcg, memcg); +- +- if (mem_cgroup_below_min(target_memcg, memcg)) { +- /* +- * Hard protection. +- * If there is no reclaimable memory, OOM. +- */ +- continue; +- } else if (mem_cgroup_below_low(target_memcg, memcg)) { +- /* +- * Soft protection. +- * Respect the protection only as long as +- * there is an unprotected supply +- * of reclaimable memory from other cgroups. +- */ +- if (!sc->memcg_low_reclaim) { +- sc->memcg_low_skipped = 1; +- continue; +- } +- memcg_memory_event(memcg, MEMCG_LOW); +- } +- +- shrink_anon_memcg(pgdat, memcg, sc, nr_memcg); +- shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, +- sc->priority); +- +- vmpressure(sc->gfp_mask, memcg, false, +- sc->nr_scanned - scanned, +- sc->nr_reclaimed - reclaimed); +- +- if (sc->nr_reclaimed >= sc->nr_to_reclaim || +- (sc->isolate_count > ISOLATE_LIMIT_CNT && +- sc->invoker == DIRECT_RECLAIM)) { +- get_next_memcg_break(memcg); +- break; +- } +- } +-} +- +-static void shrink_file(struct pglist_data *pgdat, +- struct scan_control *sc, unsigned long *nr) +-{ +- struct lruvec *lruvec = node_lruvec(pgdat); +- unsigned long nr_to_scan; +- enum lru_list lru; +- unsigned long nr_reclaimed = 0; +- struct blk_plug plug; +- +- blk_start_plug(&plug); +- +- while (nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { +- for (lru = LRU_INACTIVE_FILE; lru <= LRU_ACTIVE_FILE; lru++) { +- if (nr[lru]) { +- nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); +- nr[lru] -= nr_to_scan; +- nr_reclaimed += shrink_list(lru, nr_to_scan, lruvec, sc); +- } +- } +- } +- blk_finish_plug(&plug); +- sc->nr_reclaimed += nr_reclaimed; +- sc->nr_reclaimed_file += nr_reclaimed; +-} +- +-bool shrink_node_hyperhold(struct pglist_data *pgdat, struct scan_control *sc) +-{ +- unsigned long nr_reclaimed; +- struct lruvec *target_lruvec; +- bool reclaimable = false; +- unsigned long file; +- +- target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); +- do { +- /* Get scan count for file and anon */ +- unsigned long node_lru_pages = 0; +- unsigned long nr[NR_LRU_LISTS] = {0}; +- +- memset(&sc->nr, 0, sizeof(sc->nr)); +- nr_reclaimed = sc->nr_reclaimed; +- +- /* +- * Determine the scan balance between anon and file LRUs. +- */ +- spin_lock_irq(&target_lruvec->lru_lock); +- sc->anon_cost = mem_cgroup_lruvec(NULL, pgdat)->anon_cost; +- sc->file_cost = node_lruvec(pgdat)->file_cost; +- spin_unlock_irq(&target_lruvec->lru_lock); +- +- /* +- * Target desirable inactive:active list ratios for the anon +- * and file LRU lists. +- */ +- if (!sc->force_deactivate) { +- unsigned long refaults; +- +- refaults = lruvec_page_state(target_lruvec, +- WORKINGSET_ACTIVATE_ANON); +- if (refaults != target_lruvec->refaults[0] || +- inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) +- sc->may_deactivate |= DEACTIVATE_ANON; +- else +- sc->may_deactivate &= ~DEACTIVATE_ANON; +- +- /* +- * When refaults are being observed, it means a new +- * workingset is being established. Deactivate to get +- * rid of any stale active pages quickly. +- */ +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- refaults = lruvec_page_state(node_lruvec(pgdat), +- WORKINGSET_ACTIVATE_FILE); +- if (refaults != node_lruvec(pgdat)->refaults[1] || +- inactive_is_low(node_lruvec(pgdat), LRU_INACTIVE_FILE)) +- sc->may_deactivate |= DEACTIVATE_FILE; +-#else +- refaults = lruvec_page_state(target_lruvec, +- WORKINGSET_ACTIVATE_FILE); +- if (refaults != target_lruvec->refaults[1] || +- inactive_is_low(target_lruvec, LRU_INACTIVE_FILE)) +- sc->may_deactivate |= DEACTIVATE_FILE; +-#endif +- else +- sc->may_deactivate &= ~DEACTIVATE_FILE; +- } else +- sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; +- +- /* +- * If we have plenty of inactive file pages that aren't +- * thrashing, try to reclaim those first before touching +- * anonymous pages. +- */ +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- file = lruvec_page_state(node_lruvec(pgdat), NR_INACTIVE_FILE); +-#else +- file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE); +-#endif +- if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE)) +- sc->cache_trim_mode = 1; +- else +- sc->cache_trim_mode = 0; +- +- /* +- * Prevent the reclaimer from falling into the cache trap: as +- * cache pages start out inactive, every cache fault will tip +- * the scan balance towards the file LRU. And as the file LRU +- * shrinks, so does the window for rotation from references. +- * This means we have a runaway feedback loop where a tiny +- * thrashing file LRU becomes infinitely more attractive than +- * anon pages. Try to detect this based on file LRU size. +- */ +- if (!cgroup_reclaim(sc)) { +- unsigned long total_high_wmark = 0; +- unsigned long free, anon; +- int z; +- +- free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); +- file = node_page_state(pgdat, NR_ACTIVE_FILE) + +- node_page_state(pgdat, NR_INACTIVE_FILE); +- +- for (z = 0; z < MAX_NR_ZONES; z++) { +- struct zone *zone = &pgdat->node_zones[z]; +- +- if (!managed_zone(zone)) +- continue; +- +- total_high_wmark += high_wmark_pages(zone); +- } +- +- /* +- * Consider anon: if that's low too, this isn't a +- * runaway file reclaim problem, but rather just +- * extreme pressure. Reclaim as per usual then. +- */ +- anon = node_page_state(pgdat, NR_INACTIVE_ANON); +- +- sc->file_is_tiny = +- file + free <= total_high_wmark && +- !(sc->may_deactivate & DEACTIVATE_ANON) && +- anon >> sc->priority; +- } +- +- get_scan_count_hyperhold(pgdat, sc, nr, &node_lru_pages); +- +- if (!cgroup_reclaim(sc)) { +- /* Shrink the Total-File-LRU */ +- shrink_file(pgdat, sc, nr); +- } +- +- /* Shrink Anon by iterating score_list */ +- shrink_anon(pgdat, sc, nr); +- +- if (sc->nr_reclaimed - nr_reclaimed) +- reclaimable = true; +- +- if (current_is_kswapd()) { +- /* +- * If reclaim is isolating dirty pages under writeback, +- * it implies that the long-lived page allocation rate +- * is exceeding the page laundering rate. Either the +- * global limits are not being effective at throttling +- * processes due to the page distribution throughout +- * zones or there is heavy usage of a slow backing +- * device. The only option is to throttle from reclaim +- * context which is not ideal as there is no guarantee +- * the dirtying process is throttled in the same way +- * balance_dirty_pages() manages. +- * +- * Once a node is flagged PGDAT_WRITEBACK, kswapd will +- * count the number of pages under pages flagged for +- * immediate reclaim and stall if any are encountered +- * in the nr_immediate check below. +- */ +- if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) +- set_bit(PGDAT_WRITEBACK, &pgdat->flags); +- +- /* Allow kswapd to start writing pages during reclaim. */ +- if (sc->nr.unqueued_dirty == sc->nr.file_taken) +- set_bit(PGDAT_DIRTY, &pgdat->flags); +- +- /* +- * If kswapd scans pages marked for immediate +- * reclaim and under writeback (nr_immediate), it +- * implies that pages are cycling through the LRU +- * faster than they are written so also forcibly stall. +- */ +- if (sc->nr.immediate) +- reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); +- } +- /* +- * Legacy memcg will stall in page writeback so avoid forcibly +- * stalling in reclaim_throttle(). +- */ +- if ((current_is_kswapd() || +- (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) && +- sc->nr.dirty && sc->nr.dirty == sc->nr.congested) +- set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); +- +- /* +- * Stall direct reclaim for IO completions if underlying BDIs +- * and node is congested. Allow kswapd to continue until it +- * starts encountering unqueued dirty pages or cycling through +- * the LRU too quickly. +- */ +- if (!current_is_kswapd() && current_may_throttle() && +- !sc->hibernation_mode && +- test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags)) +- reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); +- +- } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed, +- sc)); +- /* +- * Kswapd gives up on balancing particular nodes after too +- * many failures to reclaim anything from them and goes to +- * sleep. On reclaim progress, reset the failure counter. A +- * successful direct reclaim run will revive a dormant kswapd. +- */ +- if (reclaimable) +- pgdat->kswapd_failures = 0; +- +- return reclaimable; +-} +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 31dd77076..9bf5a69e2 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -71,7 +71,6 @@ + #include "swap.h" + + #include +-#include + + #include + +@@ -88,7 +87,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg); + static bool cgroup_memory_nosocket __ro_after_init; + + /* Kernel memory accounting disabled? */ +-static bool cgroup_memory_nokmem = true; ++static bool cgroup_memory_nokmem __ro_after_init; + + /* BPF memory accounting disabled? */ + static bool cgroup_memory_nobpf __ro_after_init; +@@ -468,15 +467,7 @@ static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, + + static unsigned long soft_limit_excess(struct mem_cgroup *memcg) + { +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- struct mem_cgroup_per_node *mz = mem_cgroup_nodeinfo(memcg, 0); +- struct lruvec *lruvec = &mz->lruvec; +- unsigned long nr_pages = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, +- MAX_NR_ZONES) + lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, +- MAX_NR_ZONES); +-#else + unsigned long nr_pages = page_counter_read(&memcg->memory); +-#endif + unsigned long soft_limit = READ_ONCE(memcg->soft_limit); + unsigned long excess = 0; + +@@ -854,13 +845,8 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, + __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); + + /* Update memcg and lruvec */ +- if (!mem_cgroup_disabled()) { +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (is_node_lruvec(lruvec)) +- return; +-#endif ++ if (!mem_cgroup_disabled()) + __mod_memcg_lruvec_state(lruvec, idx, val); +- } + } + + void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, +@@ -871,13 +857,6 @@ void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, + pg_data_t *pgdat = page_pgdat(page); + struct lruvec *lruvec; + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (is_file_page(page) && !is_prot_page(page)) { +- __mod_node_page_state(pgdat, idx, val); +- return; +- } +-#endif +- + rcu_read_lock(); + memcg = page_memcg(head); + /* Untracked pages have no memcg, no lruvec. Update only the node */ +@@ -930,10 +909,6 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, + + if (mem_cgroup_disabled() || index < 0) + return; +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (!memcg) +- return; +-#endif + + memcg_stats_lock(); + __this_cpu_add(memcg->vmstats_percpu->events[index], count); +@@ -1421,11 +1396,6 @@ void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, + if (mem_cgroup_disabled()) + return; + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (is_node_lruvec(lruvec)) +- return; +-#endif +- + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + lru_size = &mz->lru_zone_size[zid][lru]; + +@@ -5261,10 +5231,6 @@ static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) + struct mem_cgroup *mem_cgroup_from_id(unsigned short id) + { + WARN_ON_ONCE(!rcu_read_lock_held()); +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (id == -1) +- return NULL; +-#endif + return idr_find(&mem_cgroup_idr, id); + } + +@@ -5307,9 +5273,6 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) + } + + lruvec_init(&pn->lruvec); +-#if defined(CONFIG_HYPERHOLD_FILE_LRU) && defined(CONFIG_MEMCG) +- pn->lruvec.pgdat = NODE_DATA(node); +-#endif + pn->memcg = memcg; + + memcg->nodeinfo[node] = pn; +@@ -5401,18 +5364,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void) + INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); + memcg->deferred_split_queue.split_queue_len = 0; + #endif +- +-#ifdef CONFIG_HYPERHOLD_MEMCG +- if (unlikely(!score_head_inited)) { +- INIT_LIST_HEAD(&score_head); +- score_head_inited = true; +- } +-#endif +- +-#ifdef CONFIG_HYPERHOLD_MEMCG +- INIT_LIST_HEAD(&memcg->score_node); +-#endif +- + lru_gen_init_memcg(memcg); + return memcg; + fail: +@@ -5433,14 +5384,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) + if (IS_ERR(memcg)) + return ERR_CAST(memcg); + +-#ifdef CONFIG_HYPERHOLD_MEMCG +- atomic64_set(&memcg->memcg_reclaimed.app_score, 300); +-#endif +-#ifdef CONFIG_HYPERHOLD_ZSWAPD +- atomic_set(&memcg->memcg_reclaimed.ub_zram2ufs_ratio, 10); +- atomic_set(&memcg->memcg_reclaimed.ub_mem2zram_ratio, 60); +- atomic_set(&memcg->memcg_reclaimed.refault_threshold, 50); +-#endif + page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); + WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); + #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) +@@ -5497,11 +5440,6 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) + FLUSH_TIME); + lru_gen_online_memcg(memcg); + +-#ifdef CONFIG_HYPERHOLD_MEMCG +- memcg_app_score_update(memcg); +- css_get(css); +-#endif +- + /* Online state pins memcg ID, memcg ID pins CSS */ + refcount_set(&memcg->id.ref, 1); + css_get(css); +@@ -5533,15 +5471,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + struct mem_cgroup_event *event, *tmp; + +-#ifdef CONFIG_HYPERHOLD_MEMCG +- unsigned long flags; +- +- write_lock_irqsave(&score_list_lock, flags); +- list_del_init(&memcg->score_node); +- write_unlock_irqrestore(&score_list_lock, flags); +- css_put(css); +-#endif +- + /* + * Unregister events and notify userspace. + * Notify userspace about cgroup removing only after rmdir of cgroup +@@ -6740,9 +6669,6 @@ static int memory_stat_show(struct seq_file *m, void *v) + memory_stat_format(memcg, &s); + seq_puts(m, buf); + kfree(buf); +-#ifdef CONFIG_HYPERHOLD_DEBUG +- memcg_eswap_info_show(m); +-#endif + return 0; + } + +@@ -7490,8 +7416,6 @@ static int __init cgroup_memory(char *s) + cgroup_memory_nokmem = true; + if (!strcmp(token, "nobpf")) + cgroup_memory_nobpf = true; +- if (!strcmp(token, "kmem")) +- cgroup_memory_nokmem = false; + } + return 1; + } +diff --git a/mm/memory.c b/mm/memory.c +index bd63cef02..65f1865cb 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -77,9 +77,7 @@ + #include + #include + #include +-#ifdef CONFIG_MEM_PURGEABLE +-#include +-#endif ++ + #include + + #include +@@ -1428,10 +1426,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, + unsigned int delay_rmap; + + page = vm_normal_page(vma, addr, ptent); +-#ifdef CONFIG_MEM_PURGEABLE +- if (vma->vm_flags & VM_USEREXPTE) +- page = NULL; +-#endif + if (unlikely(!should_zap_page(details, page))) + continue; + ptent = ptep_get_and_clear_full(mm, addr, pte, +@@ -1444,10 +1438,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, + ksm_might_unmap_zero_page(mm, ptent); + continue; + } +-#ifdef CONFIG_MEM_PURGEABLE +- if (vma->vm_flags & VM_PURGEABLE) +- uxpte_clear_present(vma, addr); +-#endif ++ + delay_rmap = 0; + if (!PageAnon(page)) { + if (pte_dirty(ptent)) { +@@ -3172,13 +3163,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) + */ + ptep_clear_flush(vma, vmf->address, vmf->pte); + folio_add_new_anon_rmap(new_folio, vma, vmf->address); +-#ifdef CONFIG_MEM_PURGEABLE +- if (vma->vm_flags & VM_PURGEABLE) { +- pr_info("set wp new folio %lx purgeable\n", folio_pfn(new_folio)); +- folio_set_purgeable(new_folio); +- uxpte_set_present(vma, vmf->address); +- } +-#endif + folio_add_lru_vma(new_folio, vma); + /* + * We call the notify macro here because, when using secondary +@@ -4138,23 +4122,11 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) + if (pte_alloc(vma->vm_mm, vmf->pmd)) + return VM_FAULT_OOM; + +-#ifdef CONFIG_MEM_PURGEABLE +- /* use extra page table for userexpte */ +- if (vma->vm_flags & VM_USEREXPTE) { +- if (do_uxpte_page_fault(vmf, &entry)) +- goto oom; +- else +- goto got_page; +- } +-#endif + /* Use the zero-page for reads */ + if (!(vmf->flags & FAULT_FLAG_WRITE) && + !mm_forbids_zeropage(vma->vm_mm)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), + vma->vm_page_prot)); +-#ifdef CONFIG_MEM_PURGEABLE +-got_page: +-#endif + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + if (!vmf->pte) +@@ -4219,16 +4191,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) + + inc_mm_counter(vma->vm_mm, MM_ANONPAGES); + folio_add_new_anon_rmap(folio, vma, vmf->address); +-#ifdef CONFIG_MEM_PURGEABLE +- if (vma->vm_flags & VM_PURGEABLE) +- folio_set_purgeable(folio); +-#endif + folio_add_lru_vma(folio, vma); + setpte: +-#ifdef CONFIG_MEM_PURGEABLE +- if (vma->vm_flags & VM_PURGEABLE) +- uxpte_set_present(vma, vmf->address); +-#endif + if (uffd_wp) + entry = pte_mkuffd_wp(entry); + set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index 0e2268dc7..9beed7c71 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -35,7 +35,6 @@ + #include + #include + #include +-#include + + #include + +@@ -1209,9 +1208,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, + + kswapd_run(nid); + kcompactd_run(nid); +-#ifdef CONFIG_HYPERHOLD_ZSWAPD +- zswapd_run(nid); +-#endif + + writeback_set_ratelimit(); + +@@ -2028,9 +2024,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, + if (arg.status_change_nid >= 0) { + kcompactd_stop(node); + kswapd_stop(node); +-#ifdef CONFIG_HYPERHOLD_ZSWAPD +- zswapd_stop(node); +-#endif + } + + writeback_set_ratelimit(); +diff --git a/mm/memory_monitor.c b/mm/memory_monitor.c +deleted file mode 100644 +index 88fb97466..000000000 +--- a/mm/memory_monitor.c ++++ /dev/null +@@ -1,58 +0,0 @@ +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "internal.h" +- +-static atomic_t kswapd_monitor = ATOMIC_INIT(0); +-static DECLARE_WAIT_QUEUE_HEAD(kswapd_poll_wait); +- +-void kswapd_monitor_wake_up_queue(void) +-{ +- atomic_inc(&kswapd_monitor); +- wake_up_interruptible(&kswapd_poll_wait); +-} +- +-static __poll_t kswapd_monitor_poll(struct file *file, struct poll_table_struct *wait) +-{ +- struct seq_file *seq = file->private_data; +- +- poll_wait(file, &kswapd_poll_wait, wait); +- +- if (seq->poll_event != atomic_read(&kswapd_monitor)) { +- seq->poll_event = atomic_read(&kswapd_monitor); +- return EPOLLPRI; +- } +- +- return EPOLLIN | EPOLLRDNORM; +-} +- +-static int kswapd_monitor_show(struct seq_file *m, void *v) +-{ +- seq_printf(m, "kswapd_monitor_show kswapd_monitor %d\n", atomic_read(&kswapd_monitor)); +- return 0; +-} +- +-static int kswapd_monitor_open(struct inode *inode, struct file *file) +-{ +- return single_open(file, kswapd_monitor_show, NULL); +-} +- +-static const struct proc_ops proc_kswapd_monitor_operations = { +- .proc_open = kswapd_monitor_open, +- .proc_poll = kswapd_monitor_poll, +- .proc_read = seq_read, +- .proc_lseek = seq_lseek, +- .proc_release = single_release, +-}; +- +-static int __init memory_monitor_init(void) +-{ +- proc_create("kswapd_monitor", 0, NULL, &proc_kswapd_monitor_operations); +- return 0; +-} +- +-__initcall(memory_monitor_init) +diff --git a/mm/mm_init.c b/mm/mm_init.c +index 8b31d6a43..77fd04c83 100644 +--- a/mm/mm_init.c ++++ b/mm/mm_init.c +@@ -1361,18 +1361,12 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat) + + init_waitqueue_head(&pgdat->kswapd_wait); + init_waitqueue_head(&pgdat->pfmemalloc_wait); +-#ifdef CONFIG_HYPERHOLD_ZSWAPD +- init_waitqueue_head(&pgdat->zswapd_wait); +-#endif + + for (i = 0; i < NR_VMSCAN_THROTTLE; i++) + init_waitqueue_head(&pgdat->reclaim_wait[i]); + + pgdat_page_ext_init(pgdat); + lruvec_init(&pgdat->__lruvec); +-#if defined(CONFIG_HYPERHOLD_FILE_LRU) && defined(CONFIG_MEMCG) +- pgdat->__lruvec.pgdat = pgdat; +-#endif + } + + static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, +diff --git a/mm/mmap.c b/mm/mmap.c +index 4b9f1b246..03a24cb39 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -52,18 +52,12 @@ + #include + #include + #include +-#include + + #define CREATE_TRACE_POINTS + #include + + #include "internal.h" + +-#ifdef CONFIG_MEM_PURGEABLE +-#define MAP_PURGEABLE 0x04 /* purgeable memory */ +-#define MAP_USEREXPTE 0x08 /* userspace extension page table */ +-#endif +- + #ifndef arch_mmap_check + #define arch_mmap_check(addr, len, flags) (0) + #endif +@@ -1365,14 +1359,6 @@ unsigned long do_mmap(struct file *file, unsigned long addr, + */ + pgoff = addr >> PAGE_SHIFT; + break; +-#ifdef CONFIG_MEM_PURGEABLE +- case MAP_PURGEABLE: +- vm_flags |= VM_PURGEABLE; +- break; +- case MAP_USEREXPTE: +- vm_flags |= VM_USEREXPTE; +- break; +-#endif + default: + return -EINVAL; + } +@@ -1439,12 +1425,6 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, + } + + retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); +- +- if (!IS_ERR_VALUE(retval)) { +- CALL_HCK_LITE_HOOK(check_jit_memory_lhck, current, fd, prot, flags, PAGE_ALIGN(len), &retval); +- if (IS_ERR_VALUE(retval)) +- pr_info("JITINFO: jit request denied"); +- } + out_fput: + if (file) + fput(file); +@@ -2656,11 +2636,6 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, + if (end == start) + return -EINVAL; + +- int errno = 0; +- CALL_HCK_LITE_HOOK(delete_jit_memory_lhck, current, start, len, &errno); +- if (errno) +- return errno; +- + /* arch_unmap() might do unmaps itself. */ + arch_unmap(mm, start, end); + +diff --git a/mm/mprotect.c b/mm/mprotect.c +index efe332f3c..7e870a8c9 100644 +--- a/mm/mprotect.c ++++ b/mm/mprotect.c +@@ -38,7 +38,6 @@ + #include + + #include "internal.h" +-#include + + bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr, + pte_t pte) +@@ -702,14 +701,6 @@ static int do_mprotect_pkey(unsigned long start, size_t len, + + start = untagged_addr(start); + +- if (prot & PROT_EXEC) { +- CALL_HCK_LITE_HOOK(find_jit_memory_lhck, current, start, len, &error); +- if (error) { +- pr_info("JITINFO: mprotect protection triggered"); +- return error; +- } +- } +- + prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); + if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ + return -EINVAL; +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 24a738de7..bc62bb2a3 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -51,10 +51,6 @@ + #include + #include + #include +-#include +-#ifdef CONFIG_RECLAIM_ACCT +-#include +-#endif + #include + #include + #include "internal.h" +@@ -279,11 +275,8 @@ const char * const migratetype_names[MIGRATE_TYPES] = { + "Unmovable", + "Movable", + "Reclaimable", +-#ifdef CONFIG_CMA_REUSE +- "CMA", +-#endif + "HighAtomic", +-#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) ++#ifdef CONFIG_CMA + "CMA", + #endif + #ifdef CONFIG_MEMORY_ISOLATION +@@ -2103,27 +2096,6 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, + + } + +-static __always_inline struct page * +-__rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, +- int migratetype, unsigned int alloc_flags) +-{ +- struct page *page = NULL; +-retry: +- page = __rmqueue_smallest(zone, order, migratetype); +- +- if (unlikely(!page) && is_migrate_cma(migratetype)) { +- migratetype = MIGRATE_MOVABLE; +- alloc_flags &= ~ALLOC_CMA; +- page = __rmqueue_smallest(zone, order, migratetype); +- } +- +- if (unlikely(!page) && +- __rmqueue_fallback(zone, order, migratetype, alloc_flags)) +- goto retry; +- +- return page; +-} +- + /* + * Do the hard work of removing an element from the buddy allocator. + * Call me with the zone->lock already held. +@@ -2134,12 +2106,6 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, + { + struct page *page; + +-#ifdef CONFIG_CMA_REUSE +- page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); +- if (page) +- return page; +-#endif +- + if (IS_ENABLED(CONFIG_CMA)) { + /* + * Balance movable allocations between regular and CMA areas by +@@ -3090,7 +3056,7 @@ static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, + unsigned int alloc_flags) + { + #ifdef CONFIG_CMA +- if (gfp_migratetype(gfp_mask) == get_cma_migratetype()) ++ if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) + alloc_flags |= ALLOC_CMA; + #endif + return alloc_flags; +@@ -4253,11 +4219,6 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, + + might_alloc(gfp_mask); + +-#ifdef CONFIG_HYPERHOLD_ZSWAPD +- if (gfp_mask & __GFP_KSWAPD_RECLAIM) +- wake_all_zswapd(); +-#endif +- + if (should_fail_alloc_page(gfp_mask, order)) + return false; + +diff --git a/mm/purgeable.c b/mm/purgeable.c +deleted file mode 100644 +index 54bee931c..000000000 +--- a/mm/purgeable.c ++++ /dev/null +@@ -1,348 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (c) 2024 Huawei Device Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include /* find_lock_task_mm */ +- +-#include +- +-struct uxpte_t { +- atomic64_t val; +-}; +- +-#define UXPTE_SIZE_SHIFT 3 +-#define UXPTE_SIZE (1 << UXPTE_SIZE_SHIFT) +- +-#define UXPTE_PER_PAGE_SHIFT (PAGE_SHIFT - UXPTE_SIZE_SHIFT) +-#define UXPTE_PER_PAGE (1 << UXPTE_PER_PAGE_SHIFT) +- +-#define UXPTE_PRESENT_BIT 1 +-#define UXPTE_PRESENT_MASK ((1 << UXPTE_PRESENT_BIT) - 1) +-#define UXPTE_REFCNT_ONE (1 << UXPTE_PRESENT_BIT) +-#define UXPTE_UNDER_RECLAIM (-UXPTE_REFCNT_ONE) +- +-#define vpn(vaddr) ((vaddr) >> PAGE_SHIFT) +-#define uxpte_pn(vaddr) (vpn(vaddr) >> UXPTE_PER_PAGE_SHIFT) +-#define uxpte_off(vaddr) (vpn(vaddr) & (UXPTE_PER_PAGE - 1)) +-#define uxpn2addr(uxpn) ((uxpn) << (UXPTE_PER_PAGE_SHIFT + PAGE_SHIFT)) +-#define uxpte_refcnt(uxpte) ((uxpte) >> UXPTE_PRESENT_BIT) +-#define uxpte_present(uxpte) ((uxpte) & UXPTE_PRESENT_MASK) +- +-static inline long uxpte_read(struct uxpte_t *uxpte) +-{ +- return atomic64_read(&uxpte->val); +-} +- +-static inline void uxpte_set(struct uxpte_t *uxpte, long val) +-{ +- atomic64_set(&uxpte->val, val); +-} +- +-static inline bool uxpte_cas(struct uxpte_t *uxpte, long old, long new) +-{ +- return atomic64_cmpxchg(&uxpte->val, old, new) == old; +-} +- +-void mm_init_uxpgd(struct mm_struct *mm) +-{ +- mm->uxpgd = NULL; +- spin_lock_init(&mm->uxpgd_lock); +-} +- +-void mm_clear_uxpgd(struct mm_struct *mm) +-{ +- struct page *page = NULL; +- void **slot = NULL; +- struct radix_tree_iter iter; +- +- spin_lock(&mm->uxpgd_lock); +- if (!mm->uxpgd) +- goto out; +- radix_tree_for_each_slot(slot, mm->uxpgd, &iter, 0) { +- page = radix_tree_delete(mm->uxpgd, iter.index); +- put_page(page); +- } +-out: +- kfree(mm->uxpgd); +- mm->uxpgd = NULL; +- spin_unlock(&mm->uxpgd_lock); +-} +- +-/* should hold uxpgd_lock before invoke */ +-static struct page *lookup_uxpte_page(struct vm_area_struct *vma, +- unsigned long addr, bool alloc) +-{ +- struct radix_tree_root *uxpgd = NULL; +- struct page *page = NULL; +- struct folio *new_folio = NULL; +- struct page *new_page = NULL; +- struct mm_struct *mm = vma->vm_mm; +- unsigned long uxpn = uxpte_pn(addr); +- +- if (mm->uxpgd) +- goto lookup; +- if (!alloc) +- goto out; +- spin_unlock(&mm->uxpgd_lock); +- uxpgd = kzalloc(sizeof(struct radix_tree_root), GFP_KERNEL); +- if (!uxpgd) { +- pr_err("uxpgd alloc failed.\n"); +- spin_lock(&mm->uxpgd_lock); +- goto out; +- } +- INIT_RADIX_TREE(uxpgd, GFP_KERNEL); +- spin_lock(&mm->uxpgd_lock); +- if (mm->uxpgd) +- kfree(uxpgd); +- else +- mm->uxpgd = uxpgd; +-lookup: +- page = radix_tree_lookup(mm->uxpgd, uxpn); +- if (page) +- goto out; +- if (!alloc) +- goto out; +- spin_unlock(&mm->uxpgd_lock); +- new_folio = vma_alloc_zeroed_movable_folio(vma, addr); +- if (!new_folio) { +- pr_err("uxpte page alloc fail.\n"); +- spin_lock(&mm->uxpgd_lock); +- goto out; +- } +- new_page = &new_folio->page; +- if (radix_tree_preload(GFP_KERNEL)) { +- put_page(new_page); +- pr_err("radix preload fail.\n"); +- spin_lock(&mm->uxpgd_lock); +- goto out; +- } +- spin_lock(&mm->uxpgd_lock); +- page = radix_tree_lookup(mm->uxpgd, uxpn); +- if (page) { +- put_page(new_page); +- } else { +- page = new_page; +- radix_tree_insert(mm->uxpgd, uxpn, page); +- } +- radix_tree_preload_end(); +-out: +- return page; +-} +- +-/* should hold uxpgd_lock before invoke */ +-static struct uxpte_t *lookup_uxpte(struct vm_area_struct *vma, +- unsigned long addr, bool alloc) +-{ +- struct uxpte_t *uxpte = NULL; +- struct page *page = NULL; +- +- page = lookup_uxpte_page(vma, addr, alloc); +- if (!page) +- return NULL; +- uxpte = page_to_virt(page); +- +- return uxpte + uxpte_off(addr); +-} +- +-bool lock_uxpte(struct vm_area_struct *vma, unsigned long addr) +-{ +- struct uxpte_t *uxpte = NULL; +- long val = 0; +- +- spin_lock(&vma->vm_mm->uxpgd_lock); +- uxpte = lookup_uxpte(vma, addr, true); +- if (!uxpte) +- goto unlock; +-retry: +- val = uxpte_read(uxpte); +- if (val >> 1) +- goto unlock; +- if (!uxpte_cas(uxpte, val, UXPTE_UNDER_RECLAIM)) +- goto retry; +- val = UXPTE_UNDER_RECLAIM; +-unlock: +- spin_unlock(&vma->vm_mm->uxpgd_lock); +- +- return val == UXPTE_UNDER_RECLAIM; +-} +- +-void unlock_uxpte(struct vm_area_struct *vma, unsigned long addr) +-{ +- struct uxpte_t *uxpte = NULL; +- +- spin_lock(&vma->vm_mm->uxpgd_lock); +- uxpte = lookup_uxpte(vma, addr, false); +- if (!uxpte) +- goto unlock; +- uxpte_set(uxpte, 0); +-unlock: +- spin_unlock(&vma->vm_mm->uxpgd_lock); +-} +- +-bool uxpte_set_present(struct vm_area_struct *vma, unsigned long addr) +-{ +- struct uxpte_t *uxpte = NULL; +- long val = 0; +- +- spin_lock(&vma->vm_mm->uxpgd_lock); +- uxpte = lookup_uxpte(vma, addr, true); +- if (!uxpte) +- goto unlock; +-retry: +- val = uxpte_read(uxpte); +- if (val & 1) +- goto unlock; +- if (!uxpte_cas(uxpte, val, val + 1)) +- goto retry; +- val++; +-unlock: +- spin_unlock(&vma->vm_mm->uxpgd_lock); +- +- return val & 1; +-} +- +-void uxpte_clear_present(struct vm_area_struct *vma, unsigned long addr) +-{ +- struct uxpte_t *uxpte = NULL; +- long val = 0; +- +- spin_lock(&vma->vm_mm->uxpgd_lock); +- uxpte = lookup_uxpte(vma, addr, false); +- if (!uxpte) +- goto unlock; +-retry: +- val = uxpte_read(uxpte); +- if (!(val & 1)) +- goto unlock; +- if (!uxpte_cas(uxpte, val, val - 1)) +- goto retry; +-unlock: +- spin_unlock(&vma->vm_mm->uxpgd_lock); +-} +- +-vm_fault_t do_uxpte_page_fault(struct vm_fault *vmf, pte_t *entry) +-{ +- struct vm_area_struct *vma = vmf->vma; +- unsigned long vma_uxpn = vma->vm_pgoff; +- unsigned long off_uxpn = vpn(vmf->address - vma->vm_start); +- unsigned long addr = uxpn2addr(vma_uxpn + off_uxpn); +- struct page *page = NULL; +- +- if (unlikely(anon_vma_prepare(vma))) +- return VM_FAULT_OOM; +- +- spin_lock(&vma->vm_mm->uxpgd_lock); +- page = lookup_uxpte_page(vma, addr, true); +- spin_unlock(&vma->vm_mm->uxpgd_lock); +- +- if (!page) +- return VM_FAULT_OOM; +- +- *entry = mk_pte(page, vma->vm_page_prot); +- *entry = pte_sw_mkyoung(*entry); +- if (vma->vm_flags & VM_WRITE) +- *entry = pte_mkwrite(pte_mkdirty(*entry), vma); +- return 0; +-} +- +-static void __mm_purg_pages_info(struct mm_struct *mm, unsigned long *total_purg_pages, +- unsigned long *pined_purg_pages) +-{ +- struct page *page = NULL; +- void **slot = NULL; +- struct radix_tree_iter iter; +- struct uxpte_t *uxpte = NULL; +- long pte_entry = 0; +- int index = 0; +- unsigned long nr_total = 0, nr_pined = 0; +- +- spin_lock(&mm->uxpgd_lock); +- if (!mm->uxpgd) +- goto out; +- radix_tree_for_each_slot(slot, mm->uxpgd, &iter, 0) { +- page = radix_tree_deref_slot(slot); +- if (unlikely(!page)) +- continue; +- uxpte = page_to_virt(page); +- for (index = 0; index < UXPTE_PER_PAGE; index++) { +- pte_entry = uxpte_read(&(uxpte[index])); +- if (uxpte_present(pte_entry) == 0) /* not present */ +- continue; +- nr_total++; +- if (uxpte_refcnt(pte_entry) > 0) /* pined by user */ +- nr_pined++; +- } +- } +-out: +- spin_unlock(&mm->uxpgd_lock); +- +- if (total_purg_pages) +- *total_purg_pages = nr_total; +- +- if (pined_purg_pages) +- *pined_purg_pages = nr_pined; +-} +- +-void mm_purg_pages_info(struct mm_struct *mm, unsigned long *total_purg_pages, +- unsigned long *pined_purg_pages) +-{ +- if (unlikely(!mm)) +- return; +- +- if (!total_purg_pages && !pined_purg_pages) +- return; +- +- __mm_purg_pages_info(mm, total_purg_pages, pined_purg_pages); +-} +- +-void purg_pages_info(unsigned long *total_purg_pages, unsigned long *pined_purg_pages) +-{ +- struct task_struct *p = NULL; +- struct task_struct *tsk = NULL; +- unsigned long mm_nr_purge = 0, mm_nr_pined = 0; +- unsigned long nr_total = 0, nr_pined = 0; +- +- if (!total_purg_pages && !pined_purg_pages) +- return; +- +- if (total_purg_pages) +- *total_purg_pages = 0; +- +- if (pined_purg_pages) +- *pined_purg_pages = 0; +- +- rcu_read_lock(); +- for_each_process(p) { +- tsk = find_lock_task_mm(p); +- if (!tsk) { +- /* +- * It is a kthread or all of p's threads have already +- * detached their mm's. +- */ +- continue; +- } +- __mm_purg_pages_info(tsk->mm, &mm_nr_purge, &mm_nr_pined); +- nr_total += mm_nr_purge; +- nr_pined += mm_nr_pined; +- task_unlock(tsk); +- +- if (mm_nr_purge > 0) { +- pr_info("purgemm: tsk: %s %lu pined in %lu pages\n", tsk->comm ?: "NULL", +- mm_nr_pined, mm_nr_purge); +- } +- } +- rcu_read_unlock(); +- if (total_purg_pages) +- *total_purg_pages = nr_total; +- +- if (pined_purg_pages) +- *pined_purg_pages = nr_pined; +- pr_info("purgemm: Sum: %lu pined in %lu pages\n", nr_pined, nr_total); +-} +diff --git a/mm/purgeable_ashmem_trigger.c b/mm/purgeable_ashmem_trigger.c +deleted file mode 100644 +index 73759333d..000000000 +--- a/mm/purgeable_ashmem_trigger.c ++++ /dev/null +@@ -1,134 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Copyright (c) 2024 Huawei Technologies Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include "../drivers/staging/android/ashmem.h" +- +-#define PURGEABLE_ASHMEM_SHRINKALL_ARG 0 +- +-struct purgeable_ashmem_trigger_args { +- struct seq_file *seq; +- struct task_struct *tsk; +-}; +- +-static int purgeable_ashmem_trigger_cb(const void *data, +- struct file *f, unsigned int fd) +-{ +- const struct purgeable_ashmem_trigger_args *args = data; +- struct task_struct *tsk = args->tsk; +- struct purgeable_ashmem_metadata pmdata; +- +- if (!is_ashmem_file(f)) +- return 0; +- if (!get_purgeable_ashmem_metadata(f, &pmdata)) +- return 0; +- if (pmdata.is_purgeable) { +- pmdata.name = pmdata.name == NULL ? "" : pmdata.name; +- seq_printf(args->seq, +- "%s,%u,%u,%ld,%s,%zu,%u,%u,%d,%d\n", +- tsk->comm, tsk->pid, fd, (long)tsk->signal->oom_score_adj, +- pmdata.name, pmdata.size, pmdata.id, pmdata.create_time, +- pmdata.refc, pmdata.purged); +- } +- return 0; +-} +- +-static ssize_t purgeable_ashmem_trigger_write(struct file *file, +- const char __user *buffer, size_t count, loff_t *ppos) +-{ +- char *buf; +- unsigned int ashmem_id = 0; +- unsigned int create_time = 0; +- const unsigned int params_num = 2; +- const struct cred *cred = current_cred(); +- +- if (!cred) +- return 0; +- +- if (!uid_eq(cred->euid, GLOBAL_MEMMGR_UID) && +- !uid_eq(cred->euid, GLOBAL_ROOT_UID)) { +- pr_err("no permission to shrink purgeable ashmem!\n"); +- return 0; +- } +- buf = memdup_user_nul(buffer, count); +- buf = strstrip(buf); +- if (sscanf(buf, "%u %u", &ashmem_id, &create_time) != params_num) +- return -EINVAL; +- if (ashmem_id == PURGEABLE_ASHMEM_SHRINKALL_ARG && +- create_time == PURGEABLE_ASHMEM_SHRINKALL_ARG) +- ashmem_shrinkall(); +- else +- ashmem_shrink_by_id(ashmem_id, create_time); +- return count; +-} +- +-static int purgeable_ashmem_trigger_show(struct seq_file *s, void *d) +-{ +- struct task_struct *tsk = NULL; +- struct purgeable_ashmem_trigger_args cb_args; +- const struct cred *cred = current_cred(); +- +- if (!cred) +- return -EINVAL; +- +- if (!uid_eq(cred->euid, GLOBAL_MEMMGR_UID) && +- !uid_eq(cred->euid, GLOBAL_ROOT_UID)) { +- pr_err("no permission to shrink purgeable ashmem!\n"); +- return -EINVAL; +- } +- seq_puts(s, "Process purgeable ashmem detail info:\n"); +- seq_puts(s, "----------------------------------------------------\n"); +- seq_printf(s, "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n", +- "process_name", "pid", "adj", "fd", +- "ashmem_name", "size", "id", "time", "ref_count", "purged"); +- +- ashmem_mutex_lock(); +- rcu_read_lock(); +- for_each_process(tsk) { +- if (tsk->flags & PF_KTHREAD) +- continue; +- cb_args.seq = s; +- cb_args.tsk = tsk; +- +- task_lock(tsk); +- iterate_fd(tsk->files, 0, +- purgeable_ashmem_trigger_cb, (void *)&cb_args); +- task_unlock(tsk); +- } +- rcu_read_unlock(); +- ashmem_mutex_unlock(); +- seq_puts(s, "----------------------------------------------------\n"); +- return 0; +-} +- +-static int purgeable_ashmem_trigger_open(struct inode *inode, +- struct file *file) +-{ +- return single_open(file, purgeable_ashmem_trigger_show, +- inode->i_private); +-} +- +-static const struct proc_ops purgeable_ashmem_trigger_fops = { +- .proc_open = purgeable_ashmem_trigger_open, +- .proc_write = purgeable_ashmem_trigger_write, +- .proc_read = seq_read, +- .proc_lseek = seq_lseek, +- .proc_release = single_release, +-}; +- +-void init_purgeable_ashmem_trigger(void) +-{ +- struct proc_dir_entry *entry = NULL; +- +- entry = proc_create_data("purgeable_ashmem_trigger", 0660, +- NULL, &purgeable_ashmem_trigger_fops, NULL); +- if (!entry) +- pr_err("Failed to create purgeable ashmem trigger\n"); +-} +diff --git a/mm/rmap.c b/mm/rmap.c +index d61242e91..9f795b93c 100644 +--- a/mm/rmap.c ++++ b/mm/rmap.c +@@ -75,7 +75,6 @@ + #include + #include + #include +-#include + + #include + +@@ -812,10 +811,6 @@ static bool folio_referenced_one(struct folio *folio, + while (page_vma_mapped_walk(&pvmw)) { + address = pvmw.address; + +-#ifdef CONFIG_MEM_PURGEABLE +- if (!(vma->vm_flags & VM_PURGEABLE)) +- pra->vm_flags &= ~VM_PURGEABLE; +-#endif + if ((vma->vm_flags & VM_LOCKED) && + (!folio_test_large(folio) || !pvmw.pte)) { + /* Restore the mlock which got missed */ +@@ -855,9 +850,6 @@ static bool folio_referenced_one(struct folio *folio, + if (referenced) { + pra->referenced++; + pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; +-#ifdef CONFIG_MEM_PURGEABLE +- pra->vm_flags |= vma->vm_flags & ~VM_PURGEABLE; +-#endif + } + + if (!pra->mapcount) +@@ -909,9 +901,6 @@ int folio_referenced(struct folio *folio, int is_locked, + struct folio_referenced_arg pra = { + .mapcount = folio_mapcount(folio), + .memcg = memcg, +-#ifdef CONFIG_MEM_PURGEABLE +- .vm_flags = VM_PURGEABLE, +-#endif + }; + struct rmap_walk_control rwc = { + .rmap_one = folio_referenced_one, +@@ -1533,13 +1522,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, + /* Unexpected PMD-mapped THP? */ + VM_BUG_ON_FOLIO(!pvmw.pte, folio); + +-#ifdef CONFIG_MEM_PURGEABLE +- if ((vma->vm_flags & VM_PURGEABLE) && !lock_uxpte(vma, address)) { +- ret = false; +- page_vma_mapped_walk_done(&pvmw); +- break; +- } +-#endif + /* + * If the folio is in an mlock()d vma, we must not swap it out. + */ +@@ -1657,17 +1639,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, + set_pte_at(mm, address, pvmw.pte, pteval); + } + +-#ifdef CONFIG_MEM_PURGEABLE +- } else if ((vma->vm_flags & VM_PURGEABLE) || (pte_unused(pteval) && +- !userfaultfd_armed(vma))) { +-#else + } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { +-#endif +-#ifdef CONFIG_MEM_PURGEABLE +- if (vma->vm_flags & VM_PURGEABLE) +- unlock_uxpte(vma, address); +-#endif +- + /* + * The guest indicated that the page content is of no + * interest anymore. Simply discard the pte, vmscan +diff --git a/mm/slub.c b/mm/slub.c +index 9c6d8f285..d2544c88a 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -452,26 +452,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) + *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); + } + +-/* +- * See comment in calculate_sizes(). +- */ +-static inline bool freeptr_outside_object(struct kmem_cache *s) +-{ +- return s->offset >= s->inuse; +-} +- +-/* +- * Return offset of the end of info block which is inuse + free pointer if +- * not overlapping with object. +- */ +-static inline unsigned int get_info_end(struct kmem_cache *s) +-{ +- if (freeptr_outside_object(s)) +- return s->inuse + sizeof(void *); +- else +- return s->inuse; +-} +- + /* Loop over all objects in a slab */ + #define for_each_object(__p, __s, __addr, __objects) \ + for (__p = fixup_red_left(__s, __addr); \ +@@ -647,50 +627,6 @@ static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, + return false; + } + +-/* +- * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API +- * family will round up the real request size to these fixed ones, so +- * there could be an extra area than what is requested. Save the original +- * request size in the meta data area, for better debug and sanity check. +- */ +-static inline void set_orig_size(struct kmem_cache *s, +- void *object, unsigned int orig_size) +-{ +- void *p = kasan_reset_tag(object); +- +- if (!slub_debug_orig_size(s)) +- return; +- +-#ifdef CONFIG_KASAN_GENERIC +- /* +- * KASAN can save its free meta data inside of the object at offset 0. +- * If this meta data size is larger than 'orig_size', it will overlap +- * the data redzone in [orig_size+1, object_size]. Thus, we adjust +- * 'orig_size' to be as at least as big as KASAN's meta data. +- */ +- if (kasan_metadata_size(s, true) > orig_size) +- orig_size = kasan_meta_size; +-#endif +- +- p += get_info_end(s); +- p += sizeof(struct track) * 2; +- +- *(unsigned int *)p = orig_size; +-} +- +-static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) +-{ +- void *p = kasan_reset_tag(object); +- +- if (!slub_debug_orig_size(s)) +- return s->object_size; +- +- p += get_info_end(s); +- p += sizeof(struct track) * 2; +- +- return *(unsigned int *)p; +-} +- + #ifdef CONFIG_SLUB_DEBUG + static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; + static DEFINE_SPINLOCK(object_map_lock); +@@ -804,6 +740,26 @@ static void print_section(char *level, char *text, u8 *addr, + metadata_access_disable(); + } + ++/* ++ * See comment in calculate_sizes(). ++ */ ++static inline bool freeptr_outside_object(struct kmem_cache *s) ++{ ++ return s->offset >= s->inuse; ++} ++ ++/* ++ * Return offset of the end of info block which is inuse + free pointer if ++ * not overlapping with object. ++ */ ++static inline unsigned int get_info_end(struct kmem_cache *s) ++{ ++ if (freeptr_outside_object(s)) ++ return s->inuse + sizeof(void *); ++ else ++ return s->inuse; ++} ++ + static struct track *get_track(struct kmem_cache *s, void *object, + enum track_item alloc) + { +@@ -904,6 +860,50 @@ static void print_slab_info(const struct slab *slab) + folio_flags(folio, 0)); + } + ++/* ++ * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API ++ * family will round up the real request size to these fixed ones, so ++ * there could be an extra area than what is requested. Save the original ++ * request size in the meta data area, for better debug and sanity check. ++ */ ++static inline void set_orig_size(struct kmem_cache *s, ++ void *object, unsigned int orig_size) ++{ ++ void *p = kasan_reset_tag(object); ++ ++ if (!slub_debug_orig_size(s)) ++ return; ++ ++#ifdef CONFIG_KASAN_GENERIC ++ /* ++ * KASAN could save its free meta data in object's data area at ++ * offset 0, if the size is larger than 'orig_size', it will ++ * overlap the data redzone in [orig_size+1, object_size], and ++ * the check should be skipped. ++ */ ++ if (kasan_metadata_size(s, true) > orig_size) ++ orig_size = s->object_size; ++#endif ++ ++ p += get_info_end(s); ++ p += sizeof(struct track) * 2; ++ ++ *(unsigned int *)p = orig_size; ++} ++ ++static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) ++{ ++ void *p = kasan_reset_tag(object); ++ ++ if (!slub_debug_orig_size(s)) ++ return s->object_size; ++ ++ p += get_info_end(s); ++ p += sizeof(struct track) * 2; ++ ++ return *(unsigned int *)p; ++} ++ + void skip_orig_size_check(struct kmem_cache *s, const void *object) + { + set_orig_size(s, (void *)object, s->object_size); +@@ -1755,6 +1755,7 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, + int objects) {} + static inline void dec_slabs_node(struct kmem_cache *s, int node, + int objects) {} ++ + #ifndef CONFIG_SLUB_TINY + static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, + void **freelist, void *nextfree) +@@ -1794,19 +1795,12 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, + */ + if (init) { + int rsize; +- unsigned int orig_size; + +- orig_size = get_orig_size(s, x); + if (!kasan_has_integrated_init()) +- memset(kasan_reset_tag(x), 0, orig_size); ++ memset(kasan_reset_tag(x), 0, s->object_size); + rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; + memset((char *)kasan_reset_tag(x) + s->inuse, 0, + s->size - s->inuse - rsize); +- /* +- * Restore orig_size, otherwize kmalloc redzone overwritten +- * would be reported +- */ +- set_orig_size(s, x, orig_size); + } + /* KASAN might put x into memory quarantine, delaying its reuse. */ + return kasan_slab_free(s, x, init); +diff --git a/mm/swap.c b/mm/swap.c +index 03d239633..42082eba4 100644 +--- a/mm/swap.c ++++ b/mm/swap.c +@@ -312,13 +312,6 @@ void lru_note_cost(struct lruvec *lruvec, bool file, + + void lru_note_cost_refault(struct folio *folio) + { +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (page_is_file_lru(folio_page(folio, 0))) { +- lru_note_cost(&(folio_pgdat(folio)->__lruvec), 1, folio_nr_pages(folio), 0); +- return; +- } +-#endif +- + lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio), + folio_nr_pages(folio), 0); + } +diff --git a/mm/swapfile.c b/mm/swapfile.c +index a1ba6f8a8..c856d6bb2 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -46,7 +46,6 @@ + #include + #include + #include +-#include + #include "internal.h" + #include "swap.h" + +@@ -3266,28 +3265,6 @@ void si_swapinfo(struct sysinfo *val) + spin_unlock(&swap_lock); + } + +-#ifdef CONFIG_HYPERHOLD_ZSWAPD +-bool free_swap_is_low(void) +-{ +- unsigned int type; +- unsigned long long freeswap = 0; +- unsigned long nr_to_be_unused = 0; +- +- spin_lock(&swap_lock); +- for (type = 0; type < nr_swapfiles; type++) { +- struct swap_info_struct *si = swap_info[type]; +- +- if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) +- nr_to_be_unused += si->inuse_pages; +- } +- freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; +- spin_unlock(&swap_lock); +- +- return (freeswap < get_free_swap_threshold()); +-} +-EXPORT_SYMBOL(free_swap_is_low); +-#endif +- + /* + * Verify that a swap entry is valid and increment its swap map count. + * +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 941894990..49456b725 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -71,12 +71,103 @@ + #define CREATE_TRACE_POINTS + #include + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +-#include +-#endif +-#ifdef CONFIG_RECLAIM_ACCT +-#include +-#endif ++struct scan_control { ++ /* How many pages shrink_list() should reclaim */ ++ unsigned long nr_to_reclaim; ++ ++ /* ++ * Nodemask of nodes allowed by the caller. If NULL, all nodes ++ * are scanned. ++ */ ++ nodemask_t *nodemask; ++ ++ /* ++ * The memory cgroup that hit its limit and as a result is the ++ * primary target of this reclaim invocation. ++ */ ++ struct mem_cgroup *target_mem_cgroup; ++ ++ /* ++ * Scan pressure balancing between anon and file LRUs ++ */ ++ unsigned long anon_cost; ++ unsigned long file_cost; ++ ++ /* Can active folios be deactivated as part of reclaim? */ ++#define DEACTIVATE_ANON 1 ++#define DEACTIVATE_FILE 2 ++ unsigned int may_deactivate:2; ++ unsigned int force_deactivate:1; ++ unsigned int skipped_deactivate:1; ++ ++ /* Writepage batching in laptop mode; RECLAIM_WRITE */ ++ unsigned int may_writepage:1; ++ ++ /* Can mapped folios be reclaimed? */ ++ unsigned int may_unmap:1; ++ ++ /* Can folios be swapped as part of reclaim? */ ++ unsigned int may_swap:1; ++ ++ /* Proactive reclaim invoked by userspace through memory.reclaim */ ++ unsigned int proactive:1; ++ ++ /* ++ * Cgroup memory below memory.low is protected as long as we ++ * don't threaten to OOM. If any cgroup is reclaimed at ++ * reduced force or passed over entirely due to its memory.low ++ * setting (memcg_low_skipped), and nothing is reclaimed as a ++ * result, then go back for one more cycle that reclaims the protected ++ * memory (memcg_low_reclaim) to avert OOM. ++ */ ++ unsigned int memcg_low_reclaim:1; ++ unsigned int memcg_low_skipped:1; ++ ++ unsigned int hibernation_mode:1; ++ ++ /* One of the zones is ready for compaction */ ++ unsigned int compaction_ready:1; ++ ++ /* There is easily reclaimable cold cache in the current node */ ++ unsigned int cache_trim_mode:1; ++ ++ /* The file folios on the current node are dangerously low */ ++ unsigned int file_is_tiny:1; ++ ++ /* Always discard instead of demoting to lower tier memory */ ++ unsigned int no_demotion:1; ++ ++ /* Allocation order */ ++ s8 order; ++ ++ /* Scan (total_size >> priority) pages at once */ ++ s8 priority; ++ ++ /* The highest zone to isolate folios for reclaim from */ ++ s8 reclaim_idx; ++ ++ /* This context's GFP mask */ ++ gfp_t gfp_mask; ++ ++ /* Incremented by the number of inactive pages that were scanned */ ++ unsigned long nr_scanned; ++ ++ /* Number of pages freed so far during a call to shrink_zones() */ ++ unsigned long nr_reclaimed; ++ ++ struct { ++ unsigned int dirty; ++ unsigned int unqueued_dirty; ++ unsigned int congested; ++ unsigned int writeback; ++ unsigned int immediate; ++ unsigned int file_taken; ++ unsigned int taken; ++ } nr; ++ ++ /* for recording the reclaimed slab by now */ ++ struct reclaim_state reclaim_state; ++}; + + #ifdef ARCH_HAS_PREFETCHW + #define prefetchw_prev_lru_folio(_folio, _base, _field) \ +@@ -92,10 +183,6 @@ + #define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0) + #endif + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +-unsigned int enough_inactive_file = 1; +-#endif +- + /* + * From 0 .. 200. Higher means more swappy. + */ +@@ -343,7 +430,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg) + } + + /* Returns true for reclaim through cgroup limits or cgroup interfaces. */ +-bool cgroup_reclaim(struct scan_control *sc) ++static bool cgroup_reclaim(struct scan_control *sc) + { + return sc->target_mem_cgroup; + } +@@ -370,7 +457,7 @@ static bool root_reclaim(struct scan_control *sc) + * This function tests whether the vmscan currently in progress can assume + * that the normal dirty throttling mechanism is operational. + */ +-bool writeback_throttling_sane(struct scan_control *sc) ++static bool writeback_throttling_sane(struct scan_control *sc) + { + if (!cgroup_reclaim(sc)) + return true; +@@ -402,7 +489,7 @@ static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, + return 0; + } + +-bool cgroup_reclaim(struct scan_control *sc) ++static bool cgroup_reclaim(struct scan_control *sc) + { + return false; + } +@@ -412,7 +499,7 @@ static bool root_reclaim(struct scan_control *sc) + return true; + } + +-bool writeback_throttling_sane(struct scan_control *sc) ++static bool writeback_throttling_sane(struct scan_control *sc) + { + return true; + } +@@ -571,27 +658,12 @@ unsigned long zone_reclaimable_pages(struct zone *zone) + * @lru: lru to use + * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list) + */ +-unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, ++static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, + int zone_idx) + { + unsigned long size = 0; + int zid; + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (!mem_cgroup_disabled() && is_node_lruvec(lruvec)) { +- for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) { +- struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; +- +- if (!managed_zone(zone)) +- continue; +- +- size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); +- } +- +- return size; +- } +-#endif +- + for (zid = 0; zid <= zone_idx; zid++) { + struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; + +@@ -965,7 +1037,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, + * + * Returns the number of reclaimed slab objects. + */ +-unsigned long shrink_slab(gfp_t gfp_mask, int nid, ++static unsigned long shrink_slab(gfp_t gfp_mask, int nid, + struct mem_cgroup *memcg, + int priority) + { +@@ -1452,7 +1524,6 @@ void folio_putback_lru(struct folio *folio) + enum folio_references { + FOLIOREF_RECLAIM, + FOLIOREF_RECLAIM_CLEAN, +- FOLIOREF_RECLAIM_PURGEABLE, + FOLIOREF_KEEP, + FOLIOREF_ACTIVATE, + }; +@@ -1474,16 +1545,10 @@ static enum folio_references folio_check_references(struct folio *folio, + if (vm_flags & VM_LOCKED) + return FOLIOREF_ACTIVATE; + +- + /* rmap lock contention: rotate */ + if (referenced_ptes == -1) + return FOLIOREF_KEEP; + +-#ifdef CONFIG_MEM_PURGEABLE +- if (vm_flags & VM_PURGEABLE) +- return FOLIOREF_RECLAIM_PURGEABLE; +-#endif +- + if (referenced_ptes) { + /* + * All mapped folios start out with page table +@@ -1644,7 +1709,7 @@ static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) + /* + * shrink_folio_list() returns the number of reclaimed pages + */ +-unsigned int shrink_folio_list(struct list_head *folio_list, ++static unsigned int shrink_folio_list(struct list_head *folio_list, + struct pglist_data *pgdat, struct scan_control *sc, + struct reclaim_stat *stat, bool ignore_references) + { +@@ -1810,7 +1875,6 @@ unsigned int shrink_folio_list(struct list_head *folio_list, + goto keep_locked; + case FOLIOREF_RECLAIM: + case FOLIOREF_RECLAIM_CLEAN: +- case FOLIOREF_RECLAIM_PURGEABLE: + ; /* try to reclaim the folio below */ + } + +@@ -1831,7 +1895,7 @@ unsigned int shrink_folio_list(struct list_head *folio_list, + * Lazyfree folio could be freed directly + */ + if (folio_test_anon(folio) && folio_test_swapbacked(folio)) { +- if (!folio_test_swapcache(folio) && references != FOLIOREF_RECLAIM_PURGEABLE) { ++ if (!folio_test_swapcache(folio)) { + if (!(sc->gfp_mask & __GFP_IO)) + goto keep_locked; + if (folio_maybe_dma_pinned(folio)) +@@ -1913,7 +1977,7 @@ unsigned int shrink_folio_list(struct list_head *folio_list, + goto activate_locked; + + mapping = folio_mapping(folio); +- if (folio_test_dirty(folio) && references != FOLIOREF_RECLAIM_PURGEABLE) { ++ if (folio_test_dirty(folio)) { + /* + * Only kswapd can writeback filesystem folios + * to avoid risk of stack overflow. But avoid +@@ -2028,11 +2092,10 @@ unsigned int shrink_folio_list(struct list_head *folio_list, + } + } + +- if (folio_test_anon(folio) && (!folio_test_swapbacked(folio) || references == FOLIOREF_RECLAIM_PURGEABLE)) { ++ if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { + /* follow __remove_mapping for reference */ + if (!folio_ref_freeze(folio, 1)) + goto keep_locked; +- + /* + * The folio has only one reference left, which is + * from the isolation. After the caller puts the +@@ -2226,7 +2289,7 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, + * + * returns how many pages were moved onto *@dst. + */ +-unsigned long isolate_lru_folios(unsigned long nr_to_scan, ++static unsigned long isolate_lru_folios(unsigned long nr_to_scan, + struct lruvec *lruvec, struct list_head *dst, + unsigned long *nr_scanned, struct scan_control *sc, + enum lru_list lru) +@@ -2411,15 +2474,11 @@ static int too_many_isolated(struct pglist_data *pgdat, int file, + * + * Returns the number of pages moved to the given lruvec. + */ +-unsigned int move_folios_to_lru(struct lruvec *lruvec, ++static unsigned int move_folios_to_lru(struct lruvec *lruvec, + struct list_head *list) + { + int nr_pages, nr_moved = 0; + LIST_HEAD(folios_to_free); +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- bool prot; +- bool file; +-#endif + + while (!list_empty(list)) { + struct folio *folio = lru_to_folio(list); +@@ -2467,23 +2526,8 @@ unsigned int move_folios_to_lru(struct lruvec *lruvec, + lruvec_add_folio(lruvec, folio); + nr_pages = folio_nr_pages(folio); + nr_moved += nr_pages; +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (folio_test_active(folio)) { +- prot = is_prot_page(folio_page(folio, 0)); +- file = page_is_file_lru(folio_page(folio, 0)); +- if (!prot && file) { +- lruvec = folio_lruvec(folio); +- workingset_age_nonresident(lruvec, +- nr_pages); +- } else { +- workingset_age_nonresident(lruvec, +- nr_pages); +- } +- } +-#else + if (folio_test_active(folio)) + workingset_age_nonresident(lruvec, nr_pages); +-#endif + } + + /* +@@ -2499,7 +2543,7 @@ unsigned int move_folios_to_lru(struct lruvec *lruvec, + * device by writing to the page cache it sets PF_LOCAL_THROTTLE. In this case + * we should not throttle. Otherwise it is safe to do so. + */ +-int current_may_throttle(void) ++static int current_may_throttle(void) + { + return !(current->flags & PF_LOCAL_THROTTLE); + } +@@ -2508,7 +2552,7 @@ int current_may_throttle(void) + * shrink_inactive_list() is a helper for shrink_node(). It returns the number + * of reclaimed pages + */ +-unsigned long shrink_inactive_list(unsigned long nr_to_scan, ++static unsigned long shrink_inactive_list(unsigned long nr_to_scan, + struct lruvec *lruvec, struct scan_control *sc, + enum lru_list lru) + { +@@ -2526,9 +2570,6 @@ unsigned long shrink_inactive_list(unsigned long nr_to_scan, + if (stalled) + return 0; + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- sc->isolate_count++; +-#endif + /* wait a bit for the reclaimer. */ + stalled = true; + reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); +@@ -2570,14 +2611,7 @@ unsigned long shrink_inactive_list(unsigned long nr_to_scan, + __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed); + spin_unlock_irq(&lruvec->lru_lock); + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (file) +- lru_note_cost(node_lruvec(pgdat), file, stat.nr_pageout, nr_scanned - nr_reclaimed); +- else +- lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); +-#else + lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); +-#endif + mem_cgroup_uncharge_list(&folio_list); + free_unref_page_list(&folio_list); + +@@ -2638,7 +2672,7 @@ unsigned long shrink_inactive_list(unsigned long nr_to_scan, + * The downside is that we have to touch folio->_refcount against each folio. + * But we had to alter folio->flags anyway. + */ +-void shrink_active_list(unsigned long nr_to_scan, ++static void shrink_active_list(unsigned long nr_to_scan, + struct lruvec *lruvec, + struct scan_control *sc, + enum lru_list lru) +@@ -2794,7 +2828,7 @@ unsigned long reclaim_pages(struct list_head *folio_list) + return nr_reclaimed; + } + +-unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, ++static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, + struct lruvec *lruvec, struct scan_control *sc) + { + if (is_active_lru(lru)) { +@@ -2836,7 +2870,7 @@ unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, + * 1TB 101 10GB + * 10TB 320 32GB + */ +-bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) ++static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) + { + enum lru_list active_lru = inactive_lru + LRU_ACTIVE; + unsigned long inactive, active; +@@ -2855,6 +2889,13 @@ bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) + return inactive * inactive_ratio < active; + } + ++enum scan_balance { ++ SCAN_EQUAL, ++ SCAN_FRACT, ++ SCAN_ANON, ++ SCAN_FILE, ++}; ++ + static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) + { + unsigned long file; +@@ -5496,7 +5537,6 @@ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) + goto restart; + } + +-#ifndef CONFIG_HYPERHOLD_FILE_LRU + static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) + { + struct blk_plug plug; +@@ -5517,7 +5557,6 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc + + blk_finish_plug(&plug); + } +-#endif + + #else /* !CONFIG_MEMCG */ + +@@ -5526,12 +5565,10 @@ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) + BUILD_BUG(); + } + +-#ifndef CONFIG_HYPERHOLD_FILE_LRU + static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) + { + BUILD_BUG(); + } +-#endif + + #endif + +@@ -6251,7 +6288,7 @@ static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control * + + #endif /* CONFIG_LRU_GEN */ + +-void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) ++static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) + { + unsigned long nr[NR_LRU_LISTS]; + unsigned long targets[NR_LRU_LISTS]; +@@ -6439,7 +6476,6 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, + return inactive_lru_pages > pages_for_compaction; + } + +-#ifndef CONFIG_HYPERHOLD_FILE_LRU + static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) + { + struct mem_cgroup *target_memcg = sc->target_mem_cgroup; +@@ -6608,7 +6644,6 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) + if (reclaimable) + pgdat->kswapd_failures = 0; + } +-#endif + + /* + * Returns true if compaction should go ahead for a costly-order request, or +@@ -6762,11 +6797,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) + if (zone->zone_pgdat == last_pgdat) + continue; + last_pgdat = zone->zone_pgdat; +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- shrink_node_hyperhold(zone->zone_pgdat, sc); +-#else + shrink_node(zone->zone_pgdat, sc); +-#endif + } + + if (first_pgdat) +@@ -6783,19 +6814,10 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) + { + struct lruvec *target_lruvec; + unsigned long refaults; +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- struct lruvec *lruvec; +-#endif + + if (lru_gen_enabled()) + return; + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- lruvec = node_lruvec(pgdat); +- lruvec->refaults[0] = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE_ANON); /* modified */ +- lruvec->refaults[1] = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE_FILE); /* modified */ +-#endif +- + target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); + refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON); + target_lruvec->refaults[WORKINGSET_ANON] = refaults; +@@ -7097,9 +7119,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, + .reclaim_idx = MAX_NR_ZONES - 1, + .may_swap = !noswap, + }; +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- unsigned long nr[NR_LRU_LISTS]; +-#endif + + WARN_ON_ONCE(!current->reclaim_state); + +@@ -7116,17 +7135,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, + * will pick up pages from other mem cgroup's as well. We hack + * the priority and make it zero. + */ +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- nr[LRU_ACTIVE_ANON] = lruvec_lru_size(lruvec, +- LRU_ACTIVE_ANON, MAX_NR_ZONES); +- nr[LRU_INACTIVE_ANON] = lruvec_lru_size(lruvec, +- LRU_INACTIVE_ANON, MAX_NR_ZONES); +- nr[LRU_ACTIVE_FILE] = 0; +- nr[LRU_INACTIVE_FILE] = 0; +- shrink_anon_memcg(pgdat, memcg, &sc, nr); +-#else + shrink_lruvec(lruvec, &sc); +-#endif + + trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); + +@@ -7341,11 +7350,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, + * Historically care was taken to put equal pressure on all zones but + * now pressure is applied based on node LRU order. + */ +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- shrink_node_hyperhold(pgdat, sc); +-#else + shrink_node(pgdat, sc); +-#endif + + /* + * Fragmentation may mean that the system cannot be rebalanced for +@@ -7796,9 +7801,6 @@ static int kswapd(void *p) + */ + trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, + alloc_order); +-#ifdef CONFIG_MEMORY_MONITOR +- kswapd_monitor_wake_up_queue(); +-#endif + reclaim_order = balance_pgdat(pgdat, alloc_order, + highest_zoneidx); + if (reclaim_order < alloc_order) +@@ -7939,10 +7941,6 @@ void __meminit kswapd_stop(int nid) + pgdat_kswapd_unlock(pgdat); + } + +-#ifdef CONFIG_MEM_PURGEABLE_DEBUG +-static void __init purgeable_debugfs_init(void); +-#endif +- + static int __init kswapd_init(void) + { + int nid; +@@ -7950,9 +7948,6 @@ static int __init kswapd_init(void) + swap_setup(); + for_each_node_state(nid, N_MEMORY) + kswapd_run(nid); +-#ifdef CONFIG_MEM_PURGEABLE_DEBUG +- purgeable_debugfs_init(); +-#endif + return 0; + } + +@@ -8068,11 +8063,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in + * priorities until we have enough memory freed. + */ + do { +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- shrink_node_hyperhold(pgdat, &sc); +-#else + shrink_node(pgdat, &sc); +-#endif + } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); + } + +@@ -8178,75 +8169,3 @@ void check_move_unevictable_folios(struct folio_batch *fbatch) + } + } + EXPORT_SYMBOL_GPL(check_move_unevictable_folios); +- +-#ifdef CONFIG_MEM_PURGEABLE_DEBUG +-static unsigned long purgeable_node(pg_data_t *pgdata, struct scan_control *sc) +-{ +- struct mem_cgroup *memcg = NULL; +- unsigned long nr = 0; +-#ifdef CONFIG_MEMCG +- while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))) +-#endif +- { +- struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdata); +- +- shrink_list(LRU_ACTIVE_PURGEABLE, -1, lruvec, sc); +- nr += shrink_list(LRU_INACTIVE_PURGEABLE, -1, lruvec, sc); +- } +- +- pr_info("reclaim %lu purgeable pages.\n", nr); +- +- return nr; +-} +- +-static int purgeable(struct ctl_table *table, int write, void *buffer, +- size_t *lenp, loff_t *ppos) +-{ +- struct scan_control sc = { +- .gfp_mask = GFP_KERNEL, +- .order = 0, +- .priority = DEF_PRIORITY, +- .may_deactivate = DEACTIVATE_ANON, +- .may_writepage = 1, +- .may_unmap = 1, +- .may_swap = 1, +- .reclaim_idx = MAX_NR_ZONES - 1, +- }; +- int nid = 0; +- const struct cred *cred = current_cred(); +- if (!cred) +- return 0; +- +- if (!uid_eq(cred->euid, GLOBAL_MEMMGR_UID) && +- !uid_eq(cred->euid, GLOBAL_ROOT_UID)) { +- pr_err("no permission to shrink purgeable heap!\n"); +- return -EINVAL; +- } +- for_each_node_state(nid, N_MEMORY) +- purgeable_node(NODE_DATA(nid), &sc); +- return 0; +-} +- +-static struct ctl_table ker_tab[] = { +- { +- .procname = "purgeable", +- .mode = 0666, +- .proc_handler = purgeable, +- }, +- {}, +-}; +- +-static struct ctl_table_header *purgeable_header; +- +-static void __init purgeable_debugfs_init(void) +-{ +- purgeable_header = register_sysctl("kernel", ker_tab); +- if (!purgeable_header) +- pr_err("register purgeable sysctl table failed.\n"); +-} +- +-static void __exit purgeable_debugfs_exit(void) +-{ +- unregister_sysctl_table(purgeable_header); +-} +-#endif /* CONFIG_MEM_PURGEABLE_DEBUG */ +diff --git a/mm/vmstat.c b/mm/vmstat.c +index 3d8ffcc40..578916978 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -1172,10 +1172,6 @@ const char * const vmstat_text[] = { + "nr_zone_active_anon", + "nr_zone_inactive_file", + "nr_zone_active_file", +-#ifdef CONFIG_MEM_PURGEABLE +- "nr_zone_inactive_purgeable", +- "nr_zone_active_purgeable", +-#endif + "nr_zone_unevictable", + "nr_zone_write_pending", + "nr_mlock", +@@ -1203,10 +1199,6 @@ const char * const vmstat_text[] = { + "nr_active_anon", + "nr_inactive_file", + "nr_active_file", +-#ifdef CONFIG_MEM_PURGEABLE +- "nr_inactive_purgeable", +- "nr_active_purgeable", +-#endif + "nr_unevictable", + "nr_slab_reclaimable", + "nr_slab_unreclaimable", +@@ -1416,24 +1408,6 @@ const char * const vmstat_text[] = { + "vma_lock_retry", + "vma_lock_miss", + #endif +-#ifdef CONFIG_HYPERHOLD_ZSWAPD +- "zswapd_running", +- "zswapd_hit_refaults", +- "zswapd_medium_press", +- "zswapd_critical_press", +- "zswapd_memcg_ratio_skip", +- "zswapd_memcg_refault_skip", +- "zswapd_swapout", +- "zswapd_empty_round", +- "zswapd_empty_round_skip_times", +- "zswapd_snapshot_times", +- "zswapd_reclaimed", +- "zswapd_scanned", +-#endif +-#ifdef CONFIG_HYPERHOLD_MEMCG +- "freeze_reclaimed", +- "freeze_reclaim_count", +-#endif + #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ + }; + #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */ +@@ -1967,7 +1941,7 @@ int vmstat_refresh(struct ctl_table *table, int write, + + static void vmstat_update(struct work_struct *w) + { +- if (refresh_cpu_vm_stats(true) && !cpu_isolated(smp_processor_id())) { ++ if (refresh_cpu_vm_stats(true)) { + /* + * Counters were updated so we expect more updates + * to occur in the future. Keep on running the +@@ -2066,8 +2040,7 @@ static void vmstat_shepherd(struct work_struct *w) + if (cpu_is_isolated(cpu)) + continue; + +- if (!delayed_work_pending(dw) && need_update(cpu) && +- !cpu_isolated(cpu)) ++ if (!delayed_work_pending(dw) && need_update(cpu)) + queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0); + + cond_resched(); +diff --git a/mm/workingset.c b/mm/workingset.c +index 34c1ccad6..9110957be 100644 +--- a/mm/workingset.c ++++ b/mm/workingset.c +@@ -398,16 +398,7 @@ void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg) + memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); + eviction = atomic_long_read(&lruvec->nonresident_age); + eviction >>= bucket_order; +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (!is_prot_page(folio_page(folio, 0)) && page_is_file_lru(folio_page(folio, 0))) { +- lruvec = folio_lruvec(folio); +- workingset_age_nonresident(lruvec, folio_nr_pages(folio)); +- } else { +- workingset_age_nonresident(lruvec, folio_nr_pages(folio)); +- } +-#else + workingset_age_nonresident(lruvec, folio_nr_pages(folio)); +-#endif + return pack_shadow(memcgid, pgdat, eviction, + folio_test_workingset(folio)); + } +@@ -456,17 +447,9 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset) + * would be better if the root_mem_cgroup existed in all + * configurations instead. + */ +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (memcgid != -1) { +- eviction_memcg = mem_cgroup_from_id(memcgid); +- if (!mem_cgroup_disabled() && !eviction_memcg) +- return false; +- } +-#else + eviction_memcg = mem_cgroup_from_id(memcgid); + if (!mem_cgroup_disabled() && !eviction_memcg) + return false; +-#endif + + eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); + refault = atomic_long_read(&eviction_lruvec->nonresident_age); +@@ -496,21 +479,10 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset) + * workingset competition needs to consider anon or not depends + * on having free swap space. + */ +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- workingset_size = lruvec_page_state(node_lruvec(pgdat), NR_ACTIVE_FILE); +-#else + workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); +-#endif +- + if (!file) { +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- workingset_size += lruvec_page_state(node_lruvec(pgdat), +- NR_INACTIVE_FILE); +-#else +- + workingset_size += lruvec_page_state(eviction_lruvec, + NR_INACTIVE_FILE); +-#endif + } + if (mem_cgroup_get_nr_swap_pages(eviction_memcg) > 0) { + workingset_size += lruvec_page_state(eviction_lruvec, +@@ -565,33 +537,14 @@ void workingset_refault(struct folio *folio, void *shadow) + pgdat = folio_pgdat(folio); + lruvec = mem_cgroup_lruvec(memcg, pgdat); + +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (!is_prot_page(folio_page(folio, 0)) && file) +- mod_lruvec_state(node_lruvec(pgdat), +- WORKINGSET_REFAULT_BASE + file, folio_nr_pages(folio)); +- else +- mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); +-#else + mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); +-#endif + + if (!workingset_test_recent(shadow, file, &workingset)) + goto out; + + folio_set_active(folio); +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (!is_prot_page(folio_page(folio, 0)) && file) { +- workingset_age_nonresident(node_lruvec(pgdat), +- folio_nr_pages(folio)); +- mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, folio_nr_pages(folio)); +- } else { +- workingset_age_nonresident(lruvec, nr); +- mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr); +- } +-#else + workingset_age_nonresident(lruvec, nr); + mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr); +-#endif + + /* Folio was active prior to eviction */ + if (workingset) { +@@ -601,14 +554,7 @@ void workingset_refault(struct folio *folio, void *shadow) + * putback + */ + lru_note_cost_refault(folio); +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (!is_prot_page(folio_page(folio, 0)) && file) +- mod_lruvec_state(node_lruvec(pgdat), WORKINGSET_RESTORE_BASE + file, folio_nr_pages(folio)); +- else +- mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr); +-#else + mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr); +-#endif + } + out: + rcu_read_unlock(); +@@ -621,7 +567,6 @@ void workingset_refault(struct folio *folio, void *shadow) + void workingset_activation(struct folio *folio) + { + struct mem_cgroup *memcg; +- struct lruvec *lruvec; + + rcu_read_lock(); + /* +@@ -634,16 +579,7 @@ void workingset_activation(struct folio *folio) + memcg = folio_memcg_rcu(folio); + if (!mem_cgroup_disabled() && !memcg) + goto out; +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- if (!is_prot_page(folio_page(folio, 0)) && page_is_file_lru(folio_page(folio, 0))) { +- lruvec = folio_lruvec(folio); +- workingset_age_nonresident(lruvec, folio_nr_pages(folio)); +- } else { +- workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio)); +- } +-#else + workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio)); +-#endif + out: + rcu_read_unlock(); + } +@@ -724,7 +660,6 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, + * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE + */ + #ifdef CONFIG_MEMCG +-#ifndef CONFIG_HYPERHOLD_FILE_LRU + if (sc->memcg) { + struct lruvec *lruvec; + int i; +@@ -739,7 +674,6 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, + pages += lruvec_page_state_local( + lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT; + } else +-#endif + #endif + pages = node_present_pages(sc->nid); + +diff --git a/mm/zswapd.c b/mm/zswapd.c +deleted file mode 100644 +index d80a00d9f..000000000 +--- a/mm/zswapd.c ++++ /dev/null +@@ -1,911 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * mm/zswapd.c +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#ifdef CONFIG_RECLAIM_ACCT +-#include +-#endif +- +-#include "zswapd_internal.h" +-#include "internal.h" +- +-#define UNSET_ZRAM_WM_RATIO 0 +-#define ESWAP_PERCENT_CONSTANT 100 +-#define DEFAULT_ZRAM_WM_RATIO 37 +-#define SWAP_MORE_ZRAM (50 * (SZ_1M)) +- +-static wait_queue_head_t snapshotd_wait; +-static atomic_t snapshotd_wait_flag; +-static atomic_t snapshotd_init_flag = ATOMIC_INIT(0); +-static struct task_struct *snapshotd_task; +- +-static pid_t zswapd_pid = -1; +-static unsigned long long last_anon_pagefault; +-static unsigned long long anon_refault_ratio; +-static unsigned long long zswapd_skip_interval; +-static unsigned long last_zswapd_time; +-static unsigned long last_snapshot_time; +-bool last_round_is_empty; +- +- +-DECLARE_RWSEM(gs_lock); +-LIST_HEAD(gs_list); +- +-void unregister_group_swap(struct group_swap_device *gsdev) +-{ +- down_write(&gs_lock); +- list_del(&gsdev->list); +- up_write(&gs_lock); +- +- kfree(gsdev); +-} +-EXPORT_SYMBOL(unregister_group_swap); +- +-struct group_swap_device *register_group_swap(struct group_swap_ops *ops, void *priv) +-{ +- struct group_swap_device *gsdev = kzalloc(sizeof(struct group_swap_device), GFP_KERNEL); +- +- if (!gsdev) +- return NULL; +- +- gsdev->priv = priv; +- gsdev->ops = ops; +- +- down_write(&gs_lock); +- list_add(&gsdev->list, &gs_list); +- up_write(&gs_lock); +- +- return gsdev; +-} +-EXPORT_SYMBOL(register_group_swap); +- +-u64 memcg_data_size(struct mem_cgroup *memcg, int type) +-{ +- struct group_swap_device *gsdev = NULL; +- u64 size = 0; +- +- down_read(&gs_lock); +- list_for_each_entry(gsdev, &gs_list, list) +- size += gsdev->ops->group_data_size(memcg->id.id, type, gsdev->priv); +- up_read(&gs_lock); +- +- return size; +-} +- +-u64 swapin_memcg(struct mem_cgroup *memcg, u64 req_size) +-{ +- u64 swap_size = memcg_data_size(memcg, SWAP_SIZE); +- u64 read_size = 0; +- u64 ratio = atomic64_read(&memcg->memcg_reclaimed.ub_ufs2zram_ratio); +- struct group_swap_device *gsdev = NULL; +- +- if (req_size > div_u64(swap_size * ratio, ESWAP_PERCENT_CONSTANT)) +- req_size = div_u64(swap_size * ratio, ESWAP_PERCENT_CONSTANT); +- down_read(&gs_lock); +- list_for_each_entry(gsdev, &gs_list, list) { +- read_size += gsdev->ops->group_read(memcg->id.id, req_size - read_size, +- gsdev->priv); +- if (read_size >= req_size) +- break; +- } +- up_read(&gs_lock); +- +- return read_size; +-} +- +-static u64 swapout_memcg(struct mem_cgroup *memcg, u64 req_size) +-{ +- u64 cache_size = memcg_data_size(memcg, CACHE_SIZE); +- u64 swap_size = memcg_data_size(memcg, SWAP_SIZE); +- u64 all_size = cache_size + swap_size; +- u64 write_size = 0; +- u32 ratio = atomic_read(&memcg->memcg_reclaimed.ub_zram2ufs_ratio); +- struct group_swap_device *gsdev = NULL; +- +- if (div_u64(all_size * ratio, ESWAP_PERCENT_CONSTANT) <= swap_size) +- return 0; +- if (req_size > div_u64(all_size * ratio, ESWAP_PERCENT_CONSTANT) - swap_size) +- req_size = div_u64(all_size * ratio, ESWAP_PERCENT_CONSTANT) - swap_size; +- down_read(&gs_lock); +- list_for_each_entry(gsdev, &gs_list, list) { +- write_size += gsdev->ops->group_write(memcg->id.id, req_size - write_size, +- gsdev->priv); +- if (write_size >= req_size) +- break; +- } +- up_read(&gs_lock); +- +- return write_size; +-} +- +-static u64 swapout(u64 req_size) +-{ +- struct mem_cgroup *memcg = NULL; +- u64 write_size = 0; +- +- while ((memcg = get_next_memcg(memcg)) != NULL) { +- write_size += swapout_memcg(memcg, req_size - write_size); +- if (write_size >= req_size) +- break; +- } +- +- return write_size; +-} +- +-static unsigned long long get_zram_used_pages(void) +-{ +- struct mem_cgroup *memcg = NULL; +- unsigned long long zram_pages = 0; +- +- while ((memcg = get_next_memcg(memcg)) != NULL) +- zram_pages += memcg_data_size(memcg, CACHE_PAGE); +- +- return zram_pages; +-} +- +-static unsigned long long get_eswap_used_pages(void) +-{ +- struct mem_cgroup *memcg = NULL; +- unsigned long long eswap_pages = 0; +- +- while ((memcg = get_next_memcg(memcg)) != NULL) +- eswap_pages += memcg_data_size(memcg, SWAP_PAGE); +- +- return eswap_pages; +-} +- +-static unsigned long long get_zram_pagefault(void) +-{ +- struct mem_cgroup *memcg = NULL; +- unsigned long long cache_fault = 0; +- +- while ((memcg = get_next_memcg(memcg)) != NULL) +- cache_fault += memcg_data_size(memcg, CACHE_FAULT); +- +- return cache_fault; +-} +- +-static unsigned int calc_sys_cur_avail_buffers(void) +-{ +- const unsigned int percent_constant = 100; +- unsigned long freemem; +- unsigned long active_file; +- unsigned long inactive_file; +- unsigned long buffers; +- +- freemem = global_zone_page_state(NR_FREE_PAGES) * PAGE_SIZE / SZ_1K; +- active_file = global_node_page_state(NR_ACTIVE_FILE) * PAGE_SIZE / SZ_1K; +- inactive_file = global_node_page_state(NR_INACTIVE_FILE) * PAGE_SIZE / SZ_1K; +- +- buffers = freemem + inactive_file * get_inactive_file_ratio() / percent_constant + +- active_file * get_active_file_ratio() / percent_constant; +- +- return (buffers * SZ_1K / SZ_1M); /* kb to mb */ +-} +- +-void zswapd_status_show(struct seq_file *m) +-{ +- unsigned int buffers = calc_sys_cur_avail_buffers(); +- +- seq_printf(m, "buffer_size:%u\n", buffers); +- seq_printf(m, "recent_refault:%llu\n", anon_refault_ratio); +-} +- +-pid_t get_zswapd_pid(void) +-{ +- return zswapd_pid; +-} +- +-static bool min_buffer_is_suitable(void) +-{ +- unsigned int buffers = calc_sys_cur_avail_buffers(); +- +- if (buffers >= get_min_avail_buffers()) +- return true; +- +- return false; +-} +- +-static bool buffer_is_suitable(void) +-{ +- unsigned int buffers = calc_sys_cur_avail_buffers(); +- +- if (buffers >= get_avail_buffers()) +- return true; +- +- return false; +-} +- +-static bool high_buffer_is_suitable(void) +-{ +- unsigned int buffers = calc_sys_cur_avail_buffers(); +- +- if (buffers >= get_high_avail_buffers()) +- return true; +- +- return false; +-} +- +-static void snapshot_anon_refaults(void) +-{ +- struct mem_cgroup *memcg = NULL; +- +- while ((memcg = get_next_memcg(memcg)) != NULL) +- memcg->memcg_reclaimed.reclaimed_pagefault = memcg_data_size(memcg, CACHE_FAULT); +- +- last_anon_pagefault = get_zram_pagefault(); +- last_snapshot_time = jiffies; +-} +- +-/* +- * Return true if refault changes between two read operations. +- */ +-static bool get_memcg_anon_refault_status(struct mem_cgroup *memcg) +-{ +- const unsigned int percent_constant = 100; +- unsigned long long anon_pagefault; +- unsigned long long anon_total; +- unsigned long long ratio; +- struct mem_cgroup_per_node *mz = NULL; +- struct lruvec *lruvec = NULL; +- +- if (!memcg) +- return false; +- +- anon_pagefault = memcg_data_size(memcg, CACHE_FAULT); +- if (anon_pagefault == memcg->memcg_reclaimed.reclaimed_pagefault) +- return false; +- +- mz = mem_cgroup_nodeinfo(memcg, 0); +- if (!mz) +- return false; +- +- lruvec = &mz->lruvec; +- if (!lruvec) +- return false; +- +- anon_total = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) + +- lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES) + +- memcg_data_size(memcg, SWAP_PAGE) + memcg_data_size(memcg, CACHE_PAGE); +- +- ratio = div64_u64((anon_pagefault - memcg->memcg_reclaimed.reclaimed_pagefault) * +- percent_constant, (anon_total + 1)); +- if (ratio > atomic_read(&memcg->memcg_reclaimed.refault_threshold)) +- return true; +- +- return false; +-} +- +-static bool get_area_anon_refault_status(void) +-{ +- const unsigned int percent_constant = 1000; +- unsigned long long anon_pagefault; +- unsigned long long ratio; +- unsigned long long time; +- +- anon_pagefault = get_zram_pagefault(); +- time = jiffies; +- if (anon_pagefault == last_anon_pagefault || time == last_snapshot_time) +- return false; +- +- ratio = div_u64((anon_pagefault - last_anon_pagefault) * percent_constant, +- (jiffies_to_msecs(time - last_snapshot_time) + 1)); +- anon_refault_ratio = ratio; +- +- if (ratio > get_area_anon_refault_threshold()) +- return true; +- +- return false; +-} +- +-void wakeup_snapshotd(void) +-{ +- unsigned long snapshot_interval; +- +- snapshot_interval = jiffies_to_msecs(jiffies - last_snapshot_time); +- if (snapshot_interval >= get_anon_refault_snapshot_min_interval()) { +- atomic_set(&snapshotd_wait_flag, 1); +- wake_up_interruptible(&snapshotd_wait); +- } +-} +- +-static int snapshotd(void *p) +-{ +- int ret; +- +- while (!kthread_should_stop()) { +- ret = wait_event_interruptible(snapshotd_wait, atomic_read(&snapshotd_wait_flag)); +- if (ret) +- continue; +- +- atomic_set(&snapshotd_wait_flag, 0); +- +- snapshot_anon_refaults(); +- count_vm_event(ZSWAPD_SNAPSHOT_TIMES); +- } +- +- return 0; +-} +- +-void set_snapshotd_init_flag(unsigned int val) +-{ +- atomic_set(&snapshotd_init_flag, val); +-} +- +-/* +- * This snapshotd start function will be called by init. +- */ +-int snapshotd_run(void) +-{ +- atomic_set(&snapshotd_wait_flag, 0); +- init_waitqueue_head(&snapshotd_wait); +- +- snapshotd_task = kthread_run(snapshotd, NULL, "snapshotd"); +- if (IS_ERR(snapshotd_task)) { +- pr_err("Failed to start snapshotd\n"); +- return PTR_ERR(snapshotd_task); +- } +- +- return 0; +-} +- +-static int __init snapshotd_init(void) +-{ +- snapshotd_run(); +- +- return 0; +-} +-module_init(snapshotd_init); +- +-static int get_zswapd_eswap_policy(void) +-{ +- if (get_zram_wm_ratio() == UNSET_ZRAM_WM_RATIO) +- return CHECK_BUFFER_ONLY; +- else +- return CHECK_BUFFER_ZRAMRATIO_BOTH; +-} +- +-static unsigned int get_policy_zram_wm_ratio(void) +-{ +- enum zswapd_eswap_policy policy = get_zswapd_eswap_policy(); +- +- if (policy == CHECK_BUFFER_ONLY) +- return DEFAULT_ZRAM_WM_RATIO; +- else +- return get_zram_wm_ratio(); +-} +- +-int get_zram_current_watermark(void) +-{ +- long long diff_buffers; +- const unsigned int percent_constant = 10; +- u64 nr_total; +- unsigned int zram_wm_ratio = get_policy_zram_wm_ratio(); +- +- nr_total = totalram_pages(); +- /* B_target - B_current */ +- diff_buffers = get_avail_buffers() - calc_sys_cur_avail_buffers(); +- /* MB to page */ +- diff_buffers *= SZ_1M / PAGE_SIZE; +- /* after_comp to before_comp */ +- diff_buffers *= get_compress_ratio(); +- /* page to ratio */ +- diff_buffers = div64_s64(diff_buffers * percent_constant, nr_total); +- +- return min((long long)zram_wm_ratio, zram_wm_ratio - diff_buffers); +-} +- +-bool zram_watermark_ok(void) +-{ +- const unsigned int percent_constant = 100; +- u64 nr_zram_used; +- u64 nr_wm; +- u64 ratio; +- +- ratio = get_zram_current_watermark(); +- nr_zram_used = get_zram_used_pages(); +- nr_wm = div_u64(totalram_pages() * ratio, percent_constant); +- if (nr_zram_used > nr_wm) +- return true; +- +- return false; +-} +- +-bool zram_watermark_exceed(void) +-{ +- u64 nr_zram_used; +- const unsigned long long nr_wm = get_zram_critical_threshold() * (SZ_1M / PAGE_SIZE); +- +- if (!nr_wm) +- return false; +- +- nr_zram_used = get_zram_used_pages(); +- if (nr_zram_used > nr_wm) +- return true; +- return false; +-} +- +-void wakeup_zswapd(pg_data_t *pgdat) +-{ +- unsigned long interval; +- +- if (IS_ERR(pgdat->zswapd)) +- return; +- +- if (!wq_has_sleeper(&pgdat->zswapd_wait)) +- return; +- +- /* +- * make anon pagefault snapshots +- * wake up snapshotd +- */ +- if (atomic_read(&snapshotd_init_flag) == 1) +- wakeup_snapshotd(); +- +- /* wake up when the buffer is lower than min_avail_buffer */ +- if (min_buffer_is_suitable()) +- return; +- +- interval = jiffies_to_msecs(jiffies - last_zswapd_time); +- if (interval < zswapd_skip_interval) { +- count_vm_event(ZSWAPD_EMPTY_ROUND_SKIP_TIMES); +- return; +- } +- +- atomic_set(&pgdat->zswapd_wait_flag, 1); +- wake_up_interruptible(&pgdat->zswapd_wait); +-} +- +-void wake_all_zswapd(void) +-{ +- pg_data_t *pgdat = NULL; +- int nid; +- +- for_each_online_node(nid) { +- pgdat = NODE_DATA(nid); +- wakeup_zswapd(pgdat); +- } +-} +- +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +-static void zswapd_shrink_active_list(unsigned long nr_to_scan, +- struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) +-{ +- unsigned int nr_deactivate; +- unsigned long nr_scanned; +- unsigned long nr_taken; +- +- struct page *page = NULL; +- struct pglist_data *pgdat = lruvec_pgdat(lruvec); +- unsigned long *node_anon_cost = &pgdat->__lruvec.anon_cost; +- unsigned long *anon_cost = &lruvec->anon_cost; +- LIST_HEAD(l_inactive); +- LIST_HEAD(l_hold); +- +- lru_add_drain(); +- +- spin_lock_irq(&lruvec->lru_lock); +- nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold, &nr_scanned, sc, lru); +- __mod_node_page_state(pgdat, NR_ISOLATED_ANON, nr_taken); +- *anon_cost += nr_taken; +- *node_anon_cost += nr_taken; +- __count_vm_events(PGREFILL, nr_scanned); +- count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); +- spin_unlock_irq(&lruvec->lru_lock); +- +- while (!list_empty(&l_hold)) { +- cond_resched(); +- page = lru_to_page(&l_hold); +- list_del(&page->lru); +- +- if (unlikely(!folio_evictable(page_folio(page)))) { +- putback_lru_page(page); +- continue; +- } +- +- ClearPageActive(page); +- SetPageWorkingset(page); +- list_add(&page->lru, &l_inactive); +- } +- +- spin_lock_irq(&lruvec->lru_lock); +- nr_deactivate = move_folios_to_lru(lruvec, &l_inactive); +- __mod_node_page_state(pgdat, NR_ISOLATED_ANON, -nr_taken); +- spin_unlock_irq(&lruvec->lru_lock); +- +- mem_cgroup_uncharge_list(&l_inactive); +- free_unref_page_list(&l_inactive); +- +- trace_mm_vmscan_lru_zswapd_shrink_active(pgdat->node_id, nr_taken, +- nr_deactivate, sc->priority); +-} +- +-static unsigned long zswapd_shrink_list(enum lru_list lru, +- unsigned long nr_to_scan, struct lruvec *lruvec, +- struct scan_control *sc) +-{ +-#ifdef CONFIG_RECLAIM_ACCT +- unsigned long nr_reclaimed; +- +- reclaimacct_substage_start(RA_SHRINKANON); +-#endif +- if (is_active_lru(lru)) { +- if (sc->may_deactivate & (1 << is_file_lru(lru))) +- zswapd_shrink_active_list(nr_to_scan, lruvec, sc, lru); +- else +- sc->skipped_deactivate = 1; +-#ifdef CONFIG_RECLAIM_ACCT +- reclaimacct_substage_end(RA_SHRINKANON, 0, NULL); +-#endif +- return 0; +- } +- +-#ifdef CONFIG_RECLAIM_ACCT +- nr_reclaimed = shrink_inactive_list(nr_to_scan, lruvec, sc, lru); +- reclaimacct_substage_end(RA_SHRINKANON, nr_reclaimed, NULL); +- return nr_reclaimed; +-#else +- return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); +-#endif +-} +- +-static void zswapd_shrink_anon_memcg(struct pglist_data *pgdat, +- struct mem_cgroup *memcg, struct scan_control *sc, unsigned long *nr) +-{ +- struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); +- unsigned long nr_reclaimed = 0; +- unsigned long nr_to_scan; +- struct blk_plug plug; +- enum lru_list lru; +- +- blk_start_plug(&plug); +- +- while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_ANON]) { +- for (lru = 0; lru <= LRU_ACTIVE_ANON; lru++) { +- if (nr[lru]) { +- nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); +- nr[lru] -= nr_to_scan; +- nr_reclaimed += zswapd_shrink_list(lru, +- nr_to_scan, lruvec, sc); +- } +- } +- } +- +- blk_finish_plug(&plug); +- sc->nr_reclaimed += nr_reclaimed; +-} +-#endif +- +-static bool zswapd_shrink_anon(pg_data_t *pgdat, struct scan_control *sc) +-{ +- const unsigned int percent_constant = 100; +- struct mem_cgroup *memcg = NULL; +- unsigned long nr[NR_LRU_LISTS]; +- +- while ((memcg = get_next_memcg(memcg)) != NULL) { +- struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); +- u64 nr_active, nr_inactive, nr_zram, nr_eswap, zram_ratio; +- +- /* reclaim and try to meet the high buffer watermark */ +- if (high_buffer_is_suitable()) { +- get_next_memcg_break(memcg); +- break; +- } +- +- if (get_memcg_anon_refault_status(memcg)) { +- count_vm_event(ZSWAPD_MEMCG_REFAULT_SKIP); +- continue; +- } +- +- nr_active = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES); +- nr_inactive = lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES); +- nr_zram = memcg_data_size(memcg, CACHE_PAGE); +- nr_eswap = memcg_data_size(memcg, SWAP_PAGE); +- +- zram_ratio = div64_u64((nr_zram + nr_eswap) * percent_constant, +- (nr_inactive + nr_active + nr_zram + nr_eswap + 1)); +- if (zram_ratio >= (u32)atomic_read(&memcg->memcg_reclaimed.ub_mem2zram_ratio)) { +- count_vm_event(ZSWAPD_MEMCG_RATIO_SKIP); +- continue; +- } +- +- nr[LRU_ACTIVE_ANON] = nr_active >> (unsigned int)sc->priority; +- nr[LRU_INACTIVE_ANON] = nr_inactive >> (unsigned int)sc->priority; +- nr[LRU_ACTIVE_FILE] = 0; +- nr[LRU_INACTIVE_FILE] = 0; +- +-#ifdef CONFIG_HYPERHOLD_FILE_LRU +- zswapd_shrink_anon_memcg(pgdat, memcg, sc, nr); +-#else +- shrink_lruvec(lruvec, sc); +-#endif +- shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); +- +- if (sc->nr_reclaimed >= sc->nr_to_reclaim) { +- get_next_memcg_break(memcg); +- break; +- } +- } +- +- return sc->nr_scanned >= sc->nr_to_reclaim; +-} +- +-static u64 __calc_nr_to_reclaim(void) +-{ +- unsigned int buffers; +- unsigned int high_buffers; +- unsigned int max_reclaim_size; +- u64 reclaim_size = 0; +- +- high_buffers = get_high_avail_buffers(); +- buffers = calc_sys_cur_avail_buffers(); +- max_reclaim_size = get_zswapd_max_reclaim_size(); +- if (buffers < high_buffers) +- reclaim_size = high_buffers - buffers; +- +- /* once max reclaim target is max_reclaim_size */ +- reclaim_size = min(reclaim_size, (u64)max_reclaim_size); +- +- /* MB to pages */ +- return div_u64(reclaim_size * SZ_1M, PAGE_SIZE); +-} +- +-static void zswapd_shrink_node(pg_data_t *pgdat) +-{ +- struct scan_control sc = { +- .gfp_mask = GFP_KERNEL, +- .order = 0, +- .priority = DEF_PRIORITY / 2, +- .may_writepage = !laptop_mode, +- .may_unmap = 1, +- .may_swap = 1, +- .reclaim_idx = MAX_NR_ZONES - 1, +- }; +- const unsigned int increase_rate = 2; +- +- do { +- unsigned long nr_reclaimed = sc.nr_reclaimed; +- bool raise_priority = true; +- +- /* reclaim and try to meet the high buffer watermark */ +- if (high_buffer_is_suitable()) +- break; +- +- sc.nr_scanned = 0; +- sc.nr_to_reclaim = __calc_nr_to_reclaim(); +- +- if (zswapd_shrink_anon(pgdat, &sc)) +- raise_priority = false; +- count_vm_events(ZSWAPD_SCANNED, sc.nr_scanned); +- count_vm_events(ZSWAPD_RECLAIMED, sc.nr_reclaimed); +- if (try_to_freeze() || kthread_should_stop()) +- break; +- +- nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; +- if (raise_priority || !nr_reclaimed) +- sc.priority--; +- } while (sc.priority >= 1); +- +- /* +- * When meets the first empty round, set the interval to t. +- * If the following round is still empty, set the intervall +- * to 2t. If the round is always empty, then 4t, 8t, and so on. +- * But make sure the interval is not more than the max_skip_interval. +- * Once a non-empty round occurs, reset the interval to 0. +- */ +- if (sc.nr_reclaimed < get_empty_round_check_threshold()) { +- count_vm_event(ZSWAPD_EMPTY_ROUND); +- if (last_round_is_empty) +- zswapd_skip_interval = min(zswapd_skip_interval * +- increase_rate, get_max_skip_interval()); +- else +- zswapd_skip_interval = get_empty_round_skip_interval(); +- last_round_is_empty = true; +- } else { +- zswapd_skip_interval = 0; +- last_round_is_empty = false; +- } +-} +- +-u64 zram_watermark_diff(void) +-{ +- const unsigned int percent_constant = 100; +- u64 nr_zram_used; +- u64 nr_wm; +- u64 ratio; +- +- ratio = get_zram_current_watermark(); +- nr_zram_used = get_zram_used_pages(); +- nr_wm = div_u64(totalram_pages() * ratio, percent_constant); +- if (nr_zram_used > nr_wm) +- return (nr_zram_used - nr_wm) * PAGE_SIZE + SWAP_MORE_ZRAM; +- +- return 0; +-} +- +-u64 zswapd_buffer_diff(void) +-{ +- u64 buffers; +- u64 avail; +- +- buffers = calc_sys_cur_avail_buffers(); +- avail = get_high_avail_buffers(); +- if (buffers < avail) +- return (avail - buffers) * SZ_1M; +- +- return 0; +-} +- +-u64 get_do_eswap_size(bool refault) +-{ +- u64 size = 0; +- enum zswapd_eswap_policy policy = get_zswapd_eswap_policy(); +- +- if (policy == CHECK_BUFFER_ZRAMRATIO_BOTH) +- size = max(zram_watermark_diff(), zswapd_buffer_diff()); +- else if (policy == CHECK_BUFFER_ONLY && (zram_watermark_ok() || refault)) +- size = zswapd_buffer_diff(); +- +- return size; +-} +- +-static int zswapd(void *p) +-{ +- struct task_struct *tsk = current; +- pg_data_t *pgdat = (pg_data_t *)p; +- const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); +-#ifdef CONFIG_RECLAIM_ACCT +- struct reclaim_acct ra = {0}; +-#endif +- +- /* save zswapd pid for schedule strategy */ +- zswapd_pid = tsk->pid; +- +- +- if (!cpumask_empty(cpumask)) +- set_cpus_allowed_ptr(tsk, cpumask); +- +- set_freezable(); +- +- while (!kthread_should_stop()) { +- bool refault = false; +- u64 size = 0; +- +- (void)wait_event_freezable(pgdat->zswapd_wait, +- atomic_read(&pgdat->zswapd_wait_flag)); +- atomic_set(&pgdat->zswapd_wait_flag, 0); +- count_vm_event(ZSWAPD_WAKEUP); +- zswapd_pressure_report(LEVEL_LOW); +- +- if (get_area_anon_refault_status()) { +- refault = true; +- count_vm_event(ZSWAPD_REFAULT); +- goto do_eswap; +- } +- +-#ifdef CONFIG_RECLAIM_ACCT +- reclaimacct_start(ZSWAPD_RECLAIM, &ra); +-#endif +- zswapd_shrink_node(pgdat); +-#ifdef CONFIG_RECLAIM_ACCT +- reclaimacct_end(ZSWAPD_RECLAIM); +-#endif +- last_zswapd_time = jiffies; +- +-do_eswap: +- size = get_do_eswap_size(refault); +- if (size >= SZ_1M) { +- count_vm_event(ZSWAPD_SWAPOUT); +- size = swapout(size); +- } +- +- if (!buffer_is_suitable()) { +- if (free_swap_is_low() || zram_watermark_exceed()) { +- zswapd_pressure_report(LEVEL_CRITICAL); +- count_vm_event(ZSWAPD_CRITICAL_PRESS); +- pr_info("%s:zrampages:%llu, eswappages:%llu\n", __func__, +- get_zram_used_pages(), get_eswap_used_pages()); +- } else { +- zswapd_pressure_report(LEVEL_MEDIUM); +- count_vm_event(ZSWAPD_MEDIUM_PRESS); +- } +- } +- } +- +- return 0; +-} +- +-/* +- * This zswapd start function will be called by init and node-hot-add. +- */ +-int zswapd_run(int nid) +-{ +- const unsigned int priority_less = 5; +- struct sched_param param = { +- .sched_priority = MAX_PRIO - priority_less, +- }; +- pg_data_t *pgdat = NODE_DATA(nid); +- +- if (pgdat->zswapd) +- return 0; +- +- atomic_set(&pgdat->zswapd_wait_flag, 0); +- pgdat->zswapd = kthread_create(zswapd, pgdat, "zswapd%d", nid); +- if (IS_ERR(pgdat->zswapd)) { +- pr_err("Failed to start zswapd on node %d\n", nid); +- return PTR_ERR(pgdat->zswapd); +- } +- +- sched_setscheduler_nocheck(pgdat->zswapd, SCHED_NORMAL, ¶m); +- set_user_nice(pgdat->zswapd, PRIO_TO_NICE(param.sched_priority)); +- wake_up_process(pgdat->zswapd); +- +- return 0; +-} +- +-/* +- * Called by memory hotplug when all memory in a node is offlined. Caller must +- * hold mem_hotplug_begin/end(). +- */ +-void zswapd_stop(int nid) +-{ +- struct task_struct *zswapd = NODE_DATA(nid)->zswapd; +- +- if (zswapd) { +- kthread_stop(zswapd); +- NODE_DATA(nid)->zswapd = NULL; +- } +- +- zswapd_pid = -1; +-} +- +-/* +- * It's optimal to keep kswapds on the same CPUs as their memory, but +- * not required for correctness. So if the last cpu in a node goes away, +- * we get changed to run anywhere: as the first one comes back, restore +- * their cpu bindings. +- */ +-static int zswapd_cpu_online(unsigned int cpu) +-{ +- int nid; +- +- for_each_node_state(nid, N_MEMORY) { +- pg_data_t *pgdat = NODE_DATA(nid); +- const struct cpumask *mask; +- +- mask = cpumask_of_node(pgdat->node_id); +- if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) +- /* One of our CPUs online: restore mask */ +- set_cpus_allowed_ptr(pgdat->zswapd, mask); +- } +- +- return 0; +-} +- +-static int __init zswapd_init(void) +-{ +- int nid; +- int ret; +- +- ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/zswapd:online", +- zswapd_cpu_online, NULL); +- if (ret < 0) { +- pr_err("zswapd: failed to register hotplug callbacks.\n"); +- return ret; +- } +- +- for_each_node_state(nid, N_MEMORY) +- zswapd_run(nid); +- +- return 0; +-} +-module_init(zswapd_init) +diff --git a/mm/zswapd_control.c b/mm/zswapd_control.c +deleted file mode 100644 +index 340b68306..000000000 +--- a/mm/zswapd_control.c ++++ /dev/null +@@ -1,860 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * mm/zswapd_control.c +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "zswapd_internal.h" +- +-#define ANON_REFAULT_SNAPSHOT_MIN_INTERVAL 200 +-#define AREA_ANON_REFAULT_THRESHOLD 22000 +-#define EMPTY_ROUND_CHECK_THRESHOLD 10 +-#define EMPTY_ROUND_SKIP_INTERVAL 20 +-#define ZSWAPD_MAX_LEVEL_NUM 10 +-#define MAX_SKIP_INTERVAL 1000 +-#define MAX_RECLAIM_SIZE 100 +- +-#define INACTIVE_FILE_RATIO 90 +-#define ACTIVE_FILE_RATIO 70 +-#define COMPRESS_RATIO 30 +-#define ZRAM_WM_RATIO 0 +-#define MAX_RATIO 100 +- +-#define CHECK_BUFFER_VALID(var1, var2) (((var2) != 0) && ((var1) > (var2))) +- +-struct zswapd_param { +- unsigned int min_score; +- unsigned int max_score; +- unsigned int ub_mem2zram_ratio; +- unsigned int ub_zram2ufs_ratio; +- unsigned int refault_threshold; +-}; +- +-static struct zswapd_param zswap_param[ZSWAPD_MAX_LEVEL_NUM]; +-struct eventfd_ctx *zswapd_press_efd[LEVEL_COUNT]; +-static DEFINE_MUTEX(pressure_event_lock); +-static DEFINE_MUTEX(reclaim_para_lock); +- +-atomic_t avail_buffers = ATOMIC_INIT(0); +-atomic_t min_avail_buffers = ATOMIC_INIT(0); +-atomic_t high_avail_buffers = ATOMIC_INIT(0); +-atomic_t max_reclaim_size = ATOMIC_INIT(MAX_RECLAIM_SIZE); +- +-atomic_t inactive_file_ratio = ATOMIC_INIT(INACTIVE_FILE_RATIO); +-atomic_t active_file_ratio = ATOMIC_INIT(ACTIVE_FILE_RATIO); +-atomic_t zram_wm_ratio = ATOMIC_INIT(ZRAM_WM_RATIO); +-atomic_t compress_ratio = ATOMIC_INIT(COMPRESS_RATIO); +- +-atomic64_t zram_critical_threshold = ATOMIC_LONG_INIT(0); +-atomic64_t free_swap_threshold = ATOMIC_LONG_INIT(0); +-atomic64_t area_anon_refault_threshold = ATOMIC_LONG_INIT(AREA_ANON_REFAULT_THRESHOLD); +-atomic64_t anon_refault_snapshot_min_interval = +- ATOMIC_LONG_INIT(ANON_REFAULT_SNAPSHOT_MIN_INTERVAL); +-atomic64_t empty_round_skip_interval = ATOMIC_LONG_INIT(EMPTY_ROUND_SKIP_INTERVAL); +-atomic64_t max_skip_interval = ATOMIC_LONG_INIT(MAX_SKIP_INTERVAL); +-atomic64_t empty_round_check_threshold = ATOMIC_LONG_INIT(EMPTY_ROUND_CHECK_THRESHOLD); +- +-inline unsigned int get_zram_wm_ratio(void) +-{ +- return atomic_read(&zram_wm_ratio); +-} +- +-inline unsigned int get_compress_ratio(void) +-{ +- return atomic_read(&compress_ratio); +-} +- +-inline unsigned int get_inactive_file_ratio(void) +-{ +- return atomic_read(&inactive_file_ratio); +-} +- +-inline unsigned int get_active_file_ratio(void) +-{ +- return atomic_read(&active_file_ratio); +-} +- +-inline unsigned int get_avail_buffers(void) +-{ +- return atomic_read(&avail_buffers); +-} +- +-inline unsigned int get_min_avail_buffers(void) +-{ +- return atomic_read(&min_avail_buffers); +-} +- +-inline unsigned int get_high_avail_buffers(void) +-{ +- return atomic_read(&high_avail_buffers); +-} +- +-inline unsigned int get_zswapd_max_reclaim_size(void) +-{ +- return atomic_read(&max_reclaim_size); +-} +- +-inline unsigned long long get_free_swap_threshold(void) +-{ +- return atomic64_read(&free_swap_threshold); +-} +- +-inline unsigned long long get_area_anon_refault_threshold(void) +-{ +- return atomic64_read(&area_anon_refault_threshold); +-} +- +-inline unsigned long long get_anon_refault_snapshot_min_interval(void) +-{ +- return atomic64_read(&anon_refault_snapshot_min_interval); +-} +- +-inline unsigned long long get_empty_round_skip_interval(void) +-{ +- return atomic64_read(&empty_round_skip_interval); +-} +- +-inline unsigned long long get_max_skip_interval(void) +-{ +- return atomic64_read(&max_skip_interval); +-} +- +-inline unsigned long long get_empty_round_check_threshold(void) +-{ +- return atomic64_read(&empty_round_check_threshold); +-} +- +-inline unsigned long long get_zram_critical_threshold(void) +-{ +- return atomic64_read(&zram_critical_threshold); +-} +- +-static ssize_t avail_buffers_params_write(struct kernfs_open_file *of, +- char *buf, size_t nbytes, loff_t off) +-{ +- unsigned long long threshold; +- unsigned int high_buffers; +- unsigned int min_buffers; +- unsigned int buffers; +- +- buf = strstrip(buf); +- +- if (sscanf(buf, "%u %u %u %llu", &buffers, &min_buffers, &high_buffers, &threshold) != 4) +- return -EINVAL; +- +- if (CHECK_BUFFER_VALID(min_buffers, buffers) || +- CHECK_BUFFER_VALID(min_buffers, high_buffers) || +- CHECK_BUFFER_VALID(buffers, high_buffers)) +- return -EINVAL; +- +- atomic_set(&avail_buffers, buffers); +- atomic_set(&min_avail_buffers, min_buffers); +- atomic_set(&high_avail_buffers, high_buffers); +- atomic64_set(&free_swap_threshold, (threshold * (SZ_1M / PAGE_SIZE))); +- +- if (atomic_read(&min_avail_buffers) == 0) +- set_snapshotd_init_flag(0); +- else +- set_snapshotd_init_flag(1); +- +- wake_all_zswapd(); +- +- return nbytes; +-} +- +-static ssize_t zswapd_max_reclaim_size_write(struct kernfs_open_file *of, +- char *buf, size_t nbytes, loff_t off) +-{ +- u32 max; +- int ret; +- +- buf = strstrip(buf); +- ret = kstrtouint(buf, 10, &max); +- if (ret) +- return -EINVAL; +- +- atomic_set(&max_reclaim_size, max); +- +- return nbytes; +-} +- +-static ssize_t buffers_ratio_params_write(struct kernfs_open_file *of, +- char *buf, size_t nbytes, loff_t off) +-{ +- unsigned int inactive; +- unsigned int active; +- +- buf = strstrip(buf); +- +- if (sscanf(buf, "%u %u", &inactive, &active) != 2) +- return -EINVAL; +- +- if (inactive > MAX_RATIO || active > MAX_RATIO) +- return -EINVAL; +- +- atomic_set(&inactive_file_ratio, inactive); +- atomic_set(&active_file_ratio, active); +- +- return nbytes; +-} +- +-static int area_anon_refault_threshold_write(struct cgroup_subsys_state *css, +- struct cftype *cft, u64 val) +-{ +- atomic64_set(&area_anon_refault_threshold, val); +- +- return 0; +-} +- +-static int empty_round_skip_interval_write(struct cgroup_subsys_state *css, +- struct cftype *cft, u64 val) +-{ +- atomic64_set(&empty_round_skip_interval, val); +- +- return 0; +-} +- +-static int max_skip_interval_write(struct cgroup_subsys_state *css, +- struct cftype *cft, u64 val) +-{ +- atomic64_set(&max_skip_interval, val); +- +- return 0; +-} +- +-static int empty_round_check_threshold_write(struct cgroup_subsys_state *css, +- struct cftype *cft, u64 val) +-{ +- atomic64_set(&empty_round_check_threshold, val); +- +- return 0; +-} +- +-static int anon_refault_snapshot_min_interval_write(struct cgroup_subsys_state *css, +- struct cftype *cft, u64 val) +-{ +- atomic64_set(&anon_refault_snapshot_min_interval, val); +- +- return 0; +-} +- +-static int zram_critical_thres_write(struct cgroup_subsys_state *css, +- struct cftype *cft, u64 val) +-{ +- atomic64_set(&zram_critical_threshold, val); +- +- return 0; +-} +- +-static ssize_t zswapd_pressure_event_control(struct kernfs_open_file *of, +- char *buf, size_t nbytes, loff_t off) +-{ +- unsigned int level; +- unsigned int efd; +- struct fd efile; +- int ret; +- +- buf = strstrip(buf); +- if (sscanf(buf, "%u %u", &efd, &level) != 2) +- return -EINVAL; +- +- if (level >= LEVEL_COUNT) +- return -EINVAL; +- +- mutex_lock(&pressure_event_lock); +- efile = fdget(efd); +- if (!efile.file) { +- ret = -EBADF; +- goto out; +- } +- +- zswapd_press_efd[level] = eventfd_ctx_fileget(efile.file); +- if (IS_ERR(zswapd_press_efd[level])) { +- ret = PTR_ERR(zswapd_press_efd[level]); +- goto out_put_efile; +- } +- fdput(efile); +- mutex_unlock(&pressure_event_lock); +- return nbytes; +- +-out_put_efile: +- fdput(efile); +-out: +- mutex_unlock(&pressure_event_lock); +- +- return ret; +-} +- +-void zswapd_pressure_report(enum zswapd_pressure_level level) +-{ +- int ret; +- +- if (zswapd_press_efd[level] == NULL) +- return; +- +- ret = eventfd_signal(zswapd_press_efd[level], 1); +- if (ret < 0) +- pr_err("SWAP-MM: %s : level:%u, ret:%d ", __func__, level, ret); +-} +- +-static u64 zswapd_pid_read(struct cgroup_subsys_state *css, struct cftype *cft) +-{ +- return get_zswapd_pid(); +-} +- +-static void zswapd_memcgs_param_parse(int level_num) +-{ +- struct mem_cgroup *memcg = NULL; +- u64 score; +- int i; +- +- while ((memcg = get_next_memcg(memcg))) { +- score = atomic64_read(&memcg->memcg_reclaimed.app_score); +- for (i = 0; i < level_num; ++i) +- if (score >= zswap_param[i].min_score && +- score <= zswap_param[i].max_score) +- break; +- +- atomic_set(&memcg->memcg_reclaimed.ub_mem2zram_ratio, +- zswap_param[i].ub_mem2zram_ratio); +- atomic_set(&memcg->memcg_reclaimed.ub_zram2ufs_ratio, +- zswap_param[i].ub_zram2ufs_ratio); +- atomic_set(&memcg->memcg_reclaimed.refault_threshold, +- zswap_param[i].refault_threshold); +- } +-} +- +-static ssize_t zswapd_memcgs_param_write(struct kernfs_open_file *of, char *buf, +- size_t nbytes, loff_t off) +-{ +- char *token = NULL; +- int level_num; +- int i; +- +- buf = strstrip(buf); +- token = strsep(&buf, " "); +- +- if (!token) +- return -EINVAL; +- +- if (kstrtoint(token, 0, &level_num)) +- return -EINVAL; +- +- if (level_num > ZSWAPD_MAX_LEVEL_NUM) +- return -EINVAL; +- +- mutex_lock(&reclaim_para_lock); +- for (i = 0; i < level_num; ++i) { +- token = strsep(&buf, " "); +- if (!token) +- goto out; +- +- if (kstrtoint(token, 0, &zswap_param[i].min_score) || +- zswap_param[i].min_score > MAX_APP_SCORE) +- goto out; +- +- token = strsep(&buf, " "); +- if (!token) +- goto out; +- +- if (kstrtoint(token, 0, &zswap_param[i].max_score) || +- zswap_param[i].max_score > MAX_APP_SCORE) +- goto out; +- +- token = strsep(&buf, " "); +- if (!token) +- goto out; +- +- if (kstrtoint(token, 0, &zswap_param[i].ub_mem2zram_ratio) || +- zswap_param[i].ub_mem2zram_ratio > MAX_RATIO) +- goto out; +- +- token = strsep(&buf, " "); +- if (!token) +- goto out; +- +- if (kstrtoint(token, 0, &zswap_param[i].ub_zram2ufs_ratio) || +- zswap_param[i].ub_zram2ufs_ratio > MAX_RATIO) +- goto out; +- +- token = strsep(&buf, " "); +- if (!token) +- goto out; +- +- if (kstrtoint(token, 0, &zswap_param[i].refault_threshold)) +- goto out; +- } +- +- zswapd_memcgs_param_parse(level_num); +- mutex_unlock(&reclaim_para_lock); +- +- return nbytes; +- +-out: +- mutex_unlock(&reclaim_para_lock); +- return -EINVAL; +-} +- +-static ssize_t zswapd_single_memcg_param_write(struct kernfs_open_file *of, +- char *buf, size_t nbytes, loff_t off) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); +- unsigned int ub_mem2zram_ratio; +- unsigned int ub_zram2ufs_ratio; +- unsigned int refault_threshold; +- +- buf = strstrip(buf); +- +- if (sscanf(buf, "%u %u %u", &ub_mem2zram_ratio, &ub_zram2ufs_ratio, +- &refault_threshold) != 3) +- return -EINVAL; +- +- if (ub_mem2zram_ratio > MAX_RATIO || ub_zram2ufs_ratio > MAX_RATIO || +- refault_threshold > MAX_RATIO) +- return -EINVAL; +- +- atomic_set(&memcg->memcg_reclaimed.ub_mem2zram_ratio, +- ub_mem2zram_ratio); +- atomic_set(&memcg->memcg_reclaimed.ub_zram2ufs_ratio, +- ub_zram2ufs_ratio); +- atomic_set(&memcg->memcg_reclaimed.refault_threshold, +- refault_threshold); +- +- return nbytes; +-} +- +-static ssize_t mem_cgroup_zram_wm_ratio_write(struct kernfs_open_file *of, +- char *buf, size_t nbytes, loff_t off) +-{ +- unsigned int ratio; +- int ret; +- +- buf = strstrip(buf); +- +- ret = kstrtouint(buf, 10, &ratio); +- if (ret) +- return -EINVAL; +- +- if (ratio > MAX_RATIO) +- return -EINVAL; +- +- atomic_set(&zram_wm_ratio, ratio); +- +- return nbytes; +-} +- +-static ssize_t mem_cgroup_compress_ratio_write(struct kernfs_open_file *of, +- char *buf, size_t nbytes, loff_t off) +-{ +- unsigned int ratio; +- int ret; +- +- buf = strstrip(buf); +- +- ret = kstrtouint(buf, 10, &ratio); +- if (ret) +- return -EINVAL; +- +- if (ratio > MAX_RATIO) +- return -EINVAL; +- +- atomic_set(&compress_ratio, ratio); +- +- return nbytes; +-} +- +-static int zswapd_pressure_show(struct seq_file *m, void *v) +-{ +- zswapd_status_show(m); +- +- return 0; +-} +- +-static int memcg_active_app_info_list_show(struct seq_file *m, void *v) +-{ +- struct mem_cgroup_per_node *mz = NULL; +- struct mem_cgroup *memcg = NULL; +- struct lruvec *lruvec = NULL; +- unsigned long eswap_size; +- unsigned long anon_size; +- unsigned long zram_size; +- +- while ((memcg = get_next_memcg(memcg))) { +- u64 score = atomic64_read(&memcg->memcg_reclaimed.app_score); +- +- mz = mem_cgroup_nodeinfo(memcg, 0); +- if (!mz) { +- get_next_memcg_break(memcg); +- return 0; +- } +- +- lruvec = &mz->lruvec; +- if (!lruvec) { +- get_next_memcg_break(memcg); +- return 0; +- } +- +- anon_size = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, +- MAX_NR_ZONES) + lruvec_lru_size(lruvec, +- LRU_INACTIVE_ANON, MAX_NR_ZONES); +- eswap_size = memcg_data_size(memcg, SWAP_SIZE); +- zram_size = memcg_data_size(memcg, CACHE_SIZE); +- +- if (anon_size + zram_size + eswap_size == 0) +- continue; +- +- if (!strlen(memcg->name)) +- continue; +- +- anon_size *= PAGE_SIZE / SZ_1K; +- zram_size *= PAGE_SIZE / SZ_1K; +- eswap_size *= PAGE_SIZE / SZ_1K; +- +- seq_printf(m, "%s %llu %lu %lu %lu %llu\n", memcg->name, score, +- anon_size, zram_size, eswap_size, +- memcg->memcg_reclaimed.reclaimed_pagefault); +- } +- return 0; +-} +- +-#ifdef CONFIG_HYPERHOLD_DEBUG +-static int avail_buffers_params_show(struct seq_file *m, void *v) +-{ +- seq_printf(m, "avail_buffers: %u\n", atomic_read(&avail_buffers)); +- seq_printf(m, "min_avail_buffers: %u\n", atomic_read(&min_avail_buffers)); +- seq_printf(m, "high_avail_buffers: %u\n", atomic_read(&high_avail_buffers)); +- seq_printf(m, "free_swap_threshold: %llu\n", +- atomic64_read(&free_swap_threshold) * PAGE_SIZE / SZ_1M); +- +- return 0; +-} +- +-static int zswapd_max_reclaim_size_show(struct seq_file *m, void *v) +-{ +- seq_printf(m, "zswapd_max_reclaim_size: %u\n", +- atomic_read(&max_reclaim_size)); +- +- return 0; +-} +- +-static int buffers_ratio_params_show(struct seq_file *m, void *v) +-{ +- seq_printf(m, "inactive_file_ratio: %u\n", atomic_read(&inactive_file_ratio)); +- seq_printf(m, "active_file_ratio: %u\n", atomic_read(&active_file_ratio)); +- +- return 0; +-} +- +-static u64 area_anon_refault_threshold_read(struct cgroup_subsys_state *css, +- struct cftype *cft) +-{ +- return atomic64_read(&area_anon_refault_threshold); +-} +- +-static u64 empty_round_skip_interval_read(struct cgroup_subsys_state *css, +- struct cftype *cft) +-{ +- return atomic64_read(&empty_round_skip_interval); +-} +- +-static u64 max_skip_interval_read(struct cgroup_subsys_state *css, +- struct cftype *cft) +-{ +- return atomic64_read(&max_skip_interval); +-} +- +-static u64 empty_round_check_threshold_read(struct cgroup_subsys_state *css, +- struct cftype *cft) +-{ +- return atomic64_read(&empty_round_check_threshold); +-} +- +-static u64 anon_refault_snapshot_min_interval_read( +- struct cgroup_subsys_state *css, struct cftype *cft) +-{ +- return atomic64_read(&anon_refault_snapshot_min_interval); +-} +- +-static u64 zram_critical_threshold_read(struct cgroup_subsys_state *css, +- struct cftype *cft) +-{ +- return atomic64_read(&zram_critical_threshold); +-} +- +-static int zswapd_memcgs_param_show(struct seq_file *m, void *v) +-{ +- int i; +- +- for (i = 0; i < ZSWAPD_MAX_LEVEL_NUM; ++i) { +- seq_printf(m, "level %d min score: %u\n", i, +- zswap_param[i].min_score); +- seq_printf(m, "level %d max score: %u\n", i, +- zswap_param[i].max_score); +- seq_printf(m, "level %d ub_mem2zram_ratio: %u\n", i, +- zswap_param[i].ub_mem2zram_ratio); +- seq_printf(m, "level %d ub_zram2ufs_ratio: %u\n", i, +- zswap_param[i].ub_zram2ufs_ratio); +- seq_printf(m, "level %d refault_threshold: %u\n", i, +- zswap_param[i].refault_threshold); +- } +- +- return 0; +-} +- +-static int zswapd_single_memcg_param_show(struct seq_file *m, void *v) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); +- +- seq_printf(m, "memcg score: %llu\n", +- atomic64_read(&memcg->memcg_reclaimed.app_score)); +- seq_printf(m, "memcg ub_mem2zram_ratio: %u\n", +- atomic_read(&memcg->memcg_reclaimed.ub_mem2zram_ratio)); +- seq_printf(m, "memcg ub_zram2ufs_ratio: %u\n", +- atomic_read(&memcg->memcg_reclaimed.ub_zram2ufs_ratio)); +- seq_printf(m, "memcg refault_threshold: %u\n", +- atomic_read(&memcg->memcg_reclaimed.refault_threshold)); +- +- return 0; +-} +- +-static int zram_wm_ratio_show(struct seq_file *m, void *v) +-{ +- seq_printf(m, "zram_wm_ratio: %u\n", atomic_read(&zram_wm_ratio)); +- +- return 0; +-} +- +-static int compress_ratio_show(struct seq_file *m, void *v) +-{ +- seq_printf(m, "compress_ratio: %u\n", atomic_read(&compress_ratio)); +- +- return 0; +-} +- +-static int zswapd_vmstat_show(struct seq_file *m, void *v) +-{ +-#ifdef CONFIG_VM_EVENT_COUNTERS +- unsigned long *vm_buf = NULL; +- +- vm_buf = kzalloc(sizeof(struct vm_event_state), GFP_KERNEL); +- if (!vm_buf) +- return -ENOMEM; +- all_vm_events(vm_buf); +- +- seq_printf(m, "zswapd_wake_up:%lu\n", vm_buf[ZSWAPD_WAKEUP]); +- seq_printf(m, "zswapd_area_refault:%lu\n", vm_buf[ZSWAPD_REFAULT]); +- seq_printf(m, "zswapd_medium_press:%lu\n", vm_buf[ZSWAPD_MEDIUM_PRESS]); +- seq_printf(m, "zswapd_critical_press:%lu\n", vm_buf[ZSWAPD_CRITICAL_PRESS]); +- seq_printf(m, "zswapd_memcg_ratio_skip:%lu\n", vm_buf[ZSWAPD_MEMCG_RATIO_SKIP]); +- seq_printf(m, "zswapd_memcg_refault_skip:%lu\n", vm_buf[ZSWAPD_MEMCG_REFAULT_SKIP]); +- seq_printf(m, "zswapd_swapout:%lu\n", vm_buf[ZSWAPD_SWAPOUT]); +- seq_printf(m, "zswapd_snapshot_times:%lu\n", vm_buf[ZSWAPD_SNAPSHOT_TIMES]); +- seq_printf(m, "zswapd_reclaimed:%lu\n", vm_buf[ZSWAPD_RECLAIMED]); +- seq_printf(m, "zswapd_scanned:%lu\n", vm_buf[ZSWAPD_SCANNED]); +- +- kfree(vm_buf); +-#endif +- +- return 0; +-} +- +-static int eswap_info_show(struct seq_file *m, void *v) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); +- unsigned long long eswap_size; +- +- eswap_size = memcg_data_size(memcg, WRITE_SIZE) / SZ_1K; +- seq_printf(m, "Total Swapout Size: %llu kB\n", eswap_size); +- +- return 0; +-} +- +-void memcg_eswap_info_show(struct seq_file *m) +-{ +- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); +- struct mem_cgroup_per_node *mz = NULL; +- struct lruvec *lruvec = NULL; +- unsigned long anon; +- unsigned long file; +- unsigned long zram; +- unsigned long eswap; +- +- mz = mem_cgroup_nodeinfo(memcg, 0); +- if (!mz) +- return; +- +- lruvec = &mz->lruvec; +- if (!lruvec) +- return; +- +- anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) + +- lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES); +- file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) + +- lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES); +- zram = memcg_data_size(memcg, CACHE_SIZE) / SZ_1K; +- eswap = memcg_data_size(memcg, SWAP_SIZE) / SZ_1K; +- anon *= PAGE_SIZE / SZ_1K; +- file *= PAGE_SIZE / SZ_1K; +- seq_printf(m, "Anon:\t%12lu kB\nFile:\t%12lu kB\nzram:\t%12lu kB\nEswap:\t%12lu kB\n", +- anon, file, zram, eswap); +-} +-#endif +- +-static struct cftype zswapd_policy_files[] = { +- { +- .name = "active_app_info_list", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .seq_show = memcg_active_app_info_list_show, +- }, +- { +- .name = "zram_wm_ratio", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write = mem_cgroup_zram_wm_ratio_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .seq_show = zram_wm_ratio_show, +-#endif +- }, +- { +- .name = "compress_ratio", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write = mem_cgroup_compress_ratio_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .seq_show = compress_ratio_show, +-#endif +- }, +- { +- .name = "zswapd_pressure", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write = zswapd_pressure_event_control, +- }, +- { +- .name = "zswapd_pid", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .read_u64 = zswapd_pid_read, +- }, +- { +- .name = "avail_buffers", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write = avail_buffers_params_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .seq_show = avail_buffers_params_show, +-#endif +- }, +- { +- .name = "zswapd_max_reclaim_size", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write = zswapd_max_reclaim_size_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .seq_show = zswapd_max_reclaim_size_show, +-#endif +- }, +- { +- .name = "area_anon_refault_threshold", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write_u64 = area_anon_refault_threshold_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .read_u64 = area_anon_refault_threshold_read, +-#endif +- }, +- { +- .name = "empty_round_skip_interval", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write_u64 = empty_round_skip_interval_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .read_u64 = empty_round_skip_interval_read, +-#endif +- }, +- { +- .name = "max_skip_interval", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write_u64 = max_skip_interval_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .read_u64 = max_skip_interval_read, +-#endif +- }, +- { +- .name = "empty_round_check_threshold", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write_u64 = empty_round_check_threshold_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .read_u64 = empty_round_check_threshold_read, +-#endif +- }, +- { +- .name = "anon_refault_snapshot_min_interval", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write_u64 = anon_refault_snapshot_min_interval_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .read_u64 = anon_refault_snapshot_min_interval_read, +-#endif +- }, +- { +- .name = "zswapd_memcgs_param", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write = zswapd_memcgs_param_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .seq_show = zswapd_memcgs_param_show, +-#endif +- }, +- { +- .name = "zswapd_single_memcg_param", +- .write = zswapd_single_memcg_param_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .seq_show = zswapd_single_memcg_param_show, +-#endif +- }, +- { +- .name = "buffer_ratio_params", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write = buffers_ratio_params_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .seq_show = buffers_ratio_params_show, +-#endif +- }, +- { +- .name = "zswapd_pressure_show", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .seq_show = zswapd_pressure_show, +- }, +- { +- .name = "zram_critical_threshold", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .write_u64 = zram_critical_thres_write, +-#ifdef CONFIG_HYPERHOLD_DEBUG +- .read_u64 = zram_critical_threshold_read, +-#endif +- }, +- +-#ifdef CONFIG_HYPERHOLD_DEBUG +- { +- .name = "zswapd_vmstat_show", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .seq_show = zswapd_vmstat_show, +- }, +-#endif +- { +- .name = "eswap_info", +- .flags = CFTYPE_ONLY_ON_ROOT, +- .seq_show = eswap_info_show, +- }, +- +- { }, /* terminate */ +-}; +- +-static int __init zswapd_policy_init(void) +-{ +- if (!mem_cgroup_disabled()) +- WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, zswapd_policy_files)); +- +- return 0; +-} +-subsys_initcall(zswapd_policy_init); +diff --git a/mm/zswapd_internal.h b/mm/zswapd_internal.h +deleted file mode 100644 +index 1447882ae..000000000 +--- a/mm/zswapd_internal.h ++++ /dev/null +@@ -1,41 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ +-/* +- * mm/zswapd_internal.h +- * +- * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd. +- */ +- +-#ifndef _ZSWAPD_INTERNAL_H +-#define _ZSWAPD_INTERNAL_H +- +-enum zswapd_pressure_level { +- LEVEL_LOW = 0, +- LEVEL_MEDIUM, +- LEVEL_CRITICAL, +- LEVEL_COUNT +-}; +- +-enum zswapd_eswap_policy { +- CHECK_BUFFER_ONLY = 0, +- CHECK_BUFFER_ZRAMRATIO_BOTH +-}; +- +-void zswapd_pressure_report(enum zswapd_pressure_level level); +-inline unsigned int get_zram_wm_ratio(void); +-inline unsigned int get_compress_ratio(void); +-inline unsigned int get_avail_buffers(void); +-inline unsigned int get_min_avail_buffers(void); +-inline unsigned int get_high_avail_buffers(void); +-inline unsigned int get_zswapd_max_reclaim_size(void); +-inline unsigned int get_inactive_file_ratio(void); +-inline unsigned int get_active_file_ratio(void); +-inline unsigned long long get_area_anon_refault_threshold(void); +-inline unsigned long long get_anon_refault_snapshot_min_interval(void); +-inline unsigned long long get_empty_round_skip_interval(void); +-inline unsigned long long get_max_skip_interval(void); +-inline unsigned long long get_empty_round_check_threshold(void); +-inline unsigned long long get_zram_critical_threshold(void); +-u64 memcg_data_size(struct mem_cgroup *memcg, int type); +-u64 swapin_memcg(struct mem_cgroup *memcg, u64 req_size); +- +-#endif /* MM_ZSWAPD_INTERNAL_H */ +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c +index 2f866a325..f7404bc67 100644 +--- a/net/core/net-sysfs.c ++++ b/net/core/net-sysfs.c +@@ -31,7 +31,6 @@ + #ifdef CONFIG_SYSFS + static const char fmt_hex[] = "%#x\n"; + static const char fmt_dec[] = "%d\n"; +-static const char fmt_uint[] = "%u\n"; + static const char fmt_ulong[] = "%lu\n"; + static const char fmt_u64[] = "%llu\n"; + +@@ -407,9 +406,6 @@ NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); + + static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val) + { +- if (val > S32_MAX) +- return -ERANGE; +- + WRITE_ONCE(dev->napi_defer_hard_irqs, val); + return 0; + } +@@ -423,7 +419,7 @@ static ssize_t napi_defer_hard_irqs_store(struct device *dev, + + return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs); + } +-NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_uint); ++NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec); + + static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c +index 65297b9ed..70da78ab9 100644 +--- a/net/l2tp/l2tp_core.c ++++ b/net/l2tp/l2tp_core.c +@@ -387,7 +387,6 @@ int l2tp_session_register(struct l2tp_session *session, + l2tp_tunnel_inc_refcount(tunnel); + } + +- WRITE_ONCE(session->tunnel, tunnel); + hlist_add_head_rcu(&session->hlist, head); + spin_unlock_bh(&tunnel->hlist_lock); + +@@ -699,8 +698,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, + if (!session->lns_mode && !session->send_seq) { + trace_session_seqnum_lns_enable(session); + session->send_seq = 1; +- l2tp_session_set_header_len(session, tunnel->version, +- tunnel->encap); ++ l2tp_session_set_header_len(session, tunnel->version); + } + } else { + /* No sequence numbers. +@@ -721,8 +719,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, + if (!session->lns_mode && session->send_seq) { + trace_session_seqnum_lns_disable(session); + session->send_seq = 0; +- l2tp_session_set_header_len(session, tunnel->version, +- tunnel->encap); ++ l2tp_session_set_header_len(session, tunnel->version); + } else if (session->send_seq) { + pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n", + session->name); +@@ -1577,8 +1574,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete); + /* We come here whenever a session's send_seq, cookie_len or + * l2specific_type parameters are set. + */ +-void l2tp_session_set_header_len(struct l2tp_session *session, int version, +- enum l2tp_encap_type encap) ++void l2tp_session_set_header_len(struct l2tp_session *session, int version) + { + if (version == L2TP_HDR_VER_2) { + session->hdr_len = 6; +@@ -1587,7 +1583,7 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version, + } else { + session->hdr_len = 4 + session->cookie_len; + session->hdr_len += l2tp_get_l2specific_len(session); +- if (encap == L2TP_ENCAPTYPE_UDP) ++ if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP) + session->hdr_len += 4; + } + } +@@ -1601,6 +1597,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn + session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL); + if (session) { + session->magic = L2TP_SESSION_MAGIC; ++ session->tunnel = tunnel; + + session->session_id = session_id; + session->peer_session_id = peer_session_id; +@@ -1636,7 +1633,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn + memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len); + } + +- l2tp_session_set_header_len(session, tunnel->version, tunnel->encap); ++ l2tp_session_set_header_len(session, tunnel->version); + + refcount_set(&session->ref_count, 1); + +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h +index 61f402c3e..91ebf0a3f 100644 +--- a/net/l2tp/l2tp_core.h ++++ b/net/l2tp/l2tp_core.h +@@ -261,8 +261,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, + int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); + + /* Transmit path helpers for sending packets over the tunnel socket. */ +-void l2tp_session_set_header_len(struct l2tp_session *session, int version, +- enum l2tp_encap_type encap); ++void l2tp_session_set_header_len(struct l2tp_session *session, int version); + int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb); + + /* Pseudowire management. +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c +index 05314419c..e27e00cb1 100644 +--- a/net/l2tp/l2tp_netlink.c ++++ b/net/l2tp/l2tp_netlink.c +@@ -690,10 +690,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf + session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); + + if (info->attrs[L2TP_ATTR_SEND_SEQ]) { +- struct l2tp_tunnel *tunnel = session->tunnel; +- + session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); +- l2tp_session_set_header_len(session, tunnel->version, tunnel->encap); ++ l2tp_session_set_header_len(session, session->tunnel->version); + } + + if (info->attrs[L2TP_ATTR_LNS_MODE]) +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c +index eebec10a1..6146e4e67 100644 +--- a/net/l2tp/l2tp_ppp.c ++++ b/net/l2tp/l2tp_ppp.c +@@ -1203,8 +1203,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk, + po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : + PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; + } +- l2tp_session_set_header_len(session, session->tunnel->version, +- session->tunnel->encap); ++ l2tp_session_set_header_len(session, session->tunnel->version); + break; + + case PPPOL2TP_SO_LNSMODE: +diff --git a/samples/Kconfig b/samples/Kconfig +index 2d5861bf9..b0ddf5f36 100644 +--- a/samples/Kconfig ++++ b/samples/Kconfig +@@ -287,29 +287,6 @@ config SAMPLE_KMEMLEAK + + source "samples/rust/Kconfig" + +-config SAMPLE_HCK +- bool "HCK sample" +- help +- HCK sample +- +-config SAMPLE_HCK_CALL +- bool "HCK call sample" +- depends on SAMPLE_HCK +- help +- HCK call sample +- +-config SAMPLE_HCK_REGISTER +- bool "HCK register sample" +- depends on SAMPLE_HCK +- help +- HCK register sample +- +-config SAMPLE_HCK_REGISTER_ONE +- bool "HCK register one interface sample" +- depends on SAMPLE_HCK +- help +- HCK register sample +- + endif # SAMPLES + + config HAVE_SAMPLE_FTRACE_DIRECT +diff --git a/samples/Makefile b/samples/Makefile +index 485a0c503..0a551c2b3 100644 +--- a/samples/Makefile ++++ b/samples/Makefile +@@ -38,4 +38,3 @@ obj-$(CONFIG_SAMPLE_KMEMLEAK) += kmemleak/ + obj-$(CONFIG_SAMPLE_CORESIGHT_SYSCFG) += coresight/ + obj-$(CONFIG_SAMPLE_FPROBE) += fprobe/ + obj-$(CONFIG_SAMPLES_RUST) += rust/ +-obj-$(CONFIG_SAMPLE_HCK) += hck/ +diff --git a/samples/hck/Makefile b/samples/hck/Makefile +deleted file mode 100644 +index 1f24a99a4..000000000 +--- a/samples/hck/Makefile ++++ /dev/null +@@ -1,6 +0,0 @@ +-# SPDX-License-Identifier: GPL-2.0-only +-ccflags-y += -I$(src) +- +-obj-$(CONFIG_SAMPLE_HCK_CALL) += call.o +-obj-$(CONFIG_SAMPLE_HCK_REGISTER) += register.o +-obj-$(CONFIG_SAMPLE_HCK_REGISTER_ONE) += register_one.o +\ No newline at end of file +diff --git a/samples/hck/call.c b/samples/hck/call.c +deleted file mode 100644 +index 870d5611c..000000000 +--- a/samples/hck/call.c ++++ /dev/null +@@ -1,24 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Sample Call HCK +- * +- */ +-#include +-#include +-#include +- +-static int __init samplecallhck_init(void) +-{ +- int val = 0; +- +- pr_info("hck sample: call\n"); +- +- CALL_HCK_LITE_HOOK(get_boot_config_lhck, &val); +- pr_info("hck sample val changed: %d\n", val); +- +- CALL_HCK_LITE_HOOK(set_boot_stat_lhck, val); +- pr_info("hck sample val not changed: %d\n", val); +- +- return 0; +-} +-late_initcall(samplecallhck_init); +\ No newline at end of file +diff --git a/samples/hck/register.c b/samples/hck/register.c +deleted file mode 100644 +index 407d05f74..000000000 +--- a/samples/hck/register.c ++++ /dev/null +@@ -1,48 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Sample HCK +- * +- */ +-#include +-#include +-#include +-#include +- +-static struct sample_hck_data data = { +- .stat = 999, +- .name = "sample tesst", +-}; +- +-void get_boot_config(int* info) +-{ +- pr_info("hck sample: %s\n", __func__); +- *info = 1; +-} +- +-void set_boot_stat(void* data, int info) +-{ +- pr_info("hck sample: %s\n", __func__); +- info = 2; +- struct sample_hck_data *hdata = data; +- +- pr_info("hck data: stat = %d, name = %s\n", hdata->stat, hdata->name); +-} +- +-static int __init samplehck_init(void) +-{ +- pr_info("hck sample register\n"); +- +- REGISTER_HCK_LITE_HOOK(get_boot_config_lhck, get_boot_config); +- REGISTER_HCK_LITE_DATA_HOOK(set_boot_stat_lhck, set_boot_stat, &data); +- +- return 0; +-} +- +-static void __exit samplehck_exit(void) +-{ +-} +- +-module_init(samplehck_init); +-module_exit(samplehck_exit); +-MODULE_LICENSE("GPL v2"); +-MODULE_AUTHOR("zhujiaxin "); +diff --git a/samples/hck/register_one.c b/samples/hck/register_one.c +deleted file mode 100644 +index 9ea2c0250..000000000 +--- a/samples/hck/register_one.c ++++ /dev/null +@@ -1,31 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* +- * Sample HCK +- * +- */ +-#include +-#include +-#include +- +-void get_boot_power_config(int* info) +-{ +- pr_info("hck sample: intf-2 run\n"); +- *info = 2; +-} +- +-static int __init samplehckone_init(void) +-{ +- pr_info("hck sample register_one\n"); +- REGISTER_HCK_LITE_HOOK(get_boot_config_lhck, get_boot_power_config); +- +- return 0; +-} +- +-static void __exit samplehckone_exit(void) +-{ +-} +- +-module_init(samplehckone_init); +-module_exit(samplehckone_exit); +-MODULE_LICENSE("GPL v2"); +-MODULE_AUTHOR("zhujiaxin "); +diff --git a/security/Kconfig b/security/Kconfig +index a22124596..39af8b869 100644 +--- a/security/Kconfig ++++ b/security/Kconfig +@@ -225,9 +225,7 @@ source "security/loadpin/Kconfig" + source "security/yama/Kconfig" + source "security/safesetid/Kconfig" + source "security/lockdown/Kconfig" +-source "security/xpm/Kconfig" + source "security/landlock/Kconfig" +-source "security/container_escape_detection/Kconfig" + + source "security/integrity/Kconfig" + +diff --git a/security/Makefile b/security/Makefile +index 1fbed3e27..18121f8f8 100644 +--- a/security/Makefile ++++ b/security/Makefile +@@ -4,7 +4,6 @@ + # + + obj-$(CONFIG_KEYS) += keys/ +-subdir-$(CONFIG_SECURITY_CONTAINER_ESCAPE_DETECTION) += container_escape_detection + + # always enable default capabilities + obj-y += commoncap.o +@@ -24,9 +23,7 @@ obj-$(CONFIG_SECURITY_SAFESETID) += safesetid/ + obj-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown/ + obj-$(CONFIG_CGROUPS) += device_cgroup.o + obj-$(CONFIG_BPF_LSM) += bpf/ +-obj-$(CONFIG_SECURITY_XPM) += xpm/ + obj-$(CONFIG_SECURITY_LANDLOCK) += landlock/ +-obj-$(CONFIG_SECURITY_CONTAINER_ESCAPE_DETECTION) += container_escape_detection/ + + # Object integrity file lists + obj-$(CONFIG_INTEGRITY) += integrity/ +diff --git a/security/security.c b/security/security.c +index 110fa188c..b6144833c 100644 +--- a/security/security.c ++++ b/security/security.c +@@ -5351,21 +5351,6 @@ int security_perf_event_write(struct perf_event *event) + } + #endif /* CONFIG_PERF_EVENTS */ + +-/** +- * security_mmap_region() - Check if mmap region is allowed +- * @vma: vm area +- * +- * Mmap region if allowed. +- * +- * Return: Returns 0 if permission is granted. +- */ +-#ifdef CONFIG_SECURITY_XPM +-int security_mmap_region(struct vm_area_struct *vma) +-{ +- return call_int_hook(mmap_region, 0, vma); +-} +-#endif +- + #ifdef CONFIG_IO_URING + /** + * security_uring_override_creds() - Check if overriding creds is allowed +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index 834d0b242..d4a99d98e 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -92,7 +92,6 @@ + #include + #include + #include +-#include + + #include "avc.h" + #include "objsec.h" +@@ -6500,7 +6499,6 @@ static int selinux_setprocattr(const char *name, void *value, size_t size) + } + + commit_creds(new); +- CALL_HCK_LITE_HOOK(ced_setattr_insert_lhck, current); + return size; + + abort_change: +diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h +index 2c4e9fb3c..a3c380775 100644 +--- a/security/selinux/include/classmap.h ++++ b/security/selinux/include/classmap.h +@@ -256,16 +256,6 @@ const struct security_class_mapping secclass_map[] = { + { "override_creds", "sqpoll", "cmd", NULL } }, + { "user_namespace", + { "create", NULL } }, +- { "hideaddr", +- { "hide_exec_anon_mem", "hide_exec_anon_mem_debug", NULL } }, +- { "jit_memory", +- { "exec_mem_ctrl", NULL} }, +- { "ced", +- { "container_escape_check", NULL} }, +- { "code_sign", +- { "add_cert_chain", "remove_cert_chain", NULL } }, +- { "xpm", +- { "exec_no_sign", "exec_anon_mem", NULL } }, + { NULL } + }; + +diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c +index 3693a6857..09c189761 100644 +--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c ++++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c +@@ -1,8 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0 + #include + #include +-#include "tailcall_freplace.skel.h" +-#include "tc_bpf2bpf.skel.h" + + /* test_tailcall_1 checks basic functionality by patching multiple locations + * in a single program for a single tail call slot with nop->jmp, jmp->nop +@@ -886,156 +884,6 @@ static void test_tailcall_bpf2bpf_6(void) + tailcall_bpf2bpf6__destroy(obj); + } + +-/* test_tailcall_freplace checks that the freplace prog fails to update the +- * prog_array map, no matter whether the freplace prog attaches to its target. +- */ +-static void test_tailcall_freplace(void) +-{ +- struct tailcall_freplace *freplace_skel = NULL; +- struct bpf_link *freplace_link = NULL; +- struct bpf_program *freplace_prog; +- struct tc_bpf2bpf *tc_skel = NULL; +- int prog_fd, tc_prog_fd, map_fd; +- char buff[128] = {}; +- int err, key; +- +- LIBBPF_OPTS(bpf_test_run_opts, topts, +- .data_in = buff, +- .data_size_in = sizeof(buff), +- .repeat = 1, +- ); +- +- freplace_skel = tailcall_freplace__open(); +- if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open")) +- return; +- +- tc_skel = tc_bpf2bpf__open_and_load(); +- if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load")) +- goto out; +- +- tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); +- freplace_prog = freplace_skel->progs.entry_freplace; +- err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd, +- "subprog_tc"); +- if (!ASSERT_OK(err, "set_attach_target")) +- goto out; +- +- err = tailcall_freplace__load(freplace_skel); +- if (!ASSERT_OK(err, "tailcall_freplace__load")) +- goto out; +- +- map_fd = bpf_map__fd(freplace_skel->maps.jmp_table); +- prog_fd = bpf_program__fd(freplace_prog); +- key = 0; +- err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); +- ASSERT_ERR(err, "update jmp_table failure"); +- +- freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd, +- "subprog_tc"); +- if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) +- goto out; +- +- err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); +- ASSERT_ERR(err, "update jmp_table failure"); +- +-out: +- bpf_link__destroy(freplace_link); +- tailcall_freplace__destroy(freplace_skel); +- tc_bpf2bpf__destroy(tc_skel); +-} +- +-/* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail +- * callee prog with freplace prog or fails to update an extended prog to +- * prog_array map. +- */ +-static void test_tailcall_bpf2bpf_freplace(void) +-{ +- struct tailcall_freplace *freplace_skel = NULL; +- struct bpf_link *freplace_link = NULL; +- struct tc_bpf2bpf *tc_skel = NULL; +- char buff[128] = {}; +- int prog_fd, map_fd; +- int err, key; +- +- LIBBPF_OPTS(bpf_test_run_opts, topts, +- .data_in = buff, +- .data_size_in = sizeof(buff), +- .repeat = 1, +- ); +- +- tc_skel = tc_bpf2bpf__open_and_load(); +- if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load")) +- goto out; +- +- prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); +- freplace_skel = tailcall_freplace__open(); +- if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open")) +- goto out; +- +- err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace, +- prog_fd, "subprog_tc"); +- if (!ASSERT_OK(err, "set_attach_target")) +- goto out; +- +- err = tailcall_freplace__load(freplace_skel); +- if (!ASSERT_OK(err, "tailcall_freplace__load")) +- goto out; +- +- /* OK to attach then detach freplace prog. */ +- +- freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, +- prog_fd, "subprog_tc"); +- if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) +- goto out; +- +- err = bpf_link__destroy(freplace_link); +- if (!ASSERT_OK(err, "destroy link")) +- goto out; +- +- /* OK to update prog_array map then delete element from the map. */ +- +- key = 0; +- map_fd = bpf_map__fd(freplace_skel->maps.jmp_table); +- err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); +- if (!ASSERT_OK(err, "update jmp_table")) +- goto out; +- +- err = bpf_map_delete_elem(map_fd, &key); +- if (!ASSERT_OK(err, "delete_elem from jmp_table")) +- goto out; +- +- /* Fail to attach a tail callee prog with freplace prog. */ +- +- err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); +- if (!ASSERT_OK(err, "update jmp_table")) +- goto out; +- +- freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, +- prog_fd, "subprog_tc"); +- if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure")) +- goto out; +- +- err = bpf_map_delete_elem(map_fd, &key); +- if (!ASSERT_OK(err, "delete_elem from jmp_table")) +- goto out; +- +- /* Fail to update an extended prog to prog_array map. */ +- +- freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, +- prog_fd, "subprog_tc"); +- if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) +- goto out; +- +- err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); +- if (!ASSERT_ERR(err, "update jmp_table failure")) +- goto out; +- +-out: +- bpf_link__destroy(freplace_link); +- tailcall_freplace__destroy(freplace_skel); +- tc_bpf2bpf__destroy(tc_skel); +-} +- + void test_tailcalls(void) + { + if (test__start_subtest("tailcall_1")) +@@ -1062,8 +910,4 @@ void test_tailcalls(void) + test_tailcall_bpf2bpf_4(true); + if (test__start_subtest("tailcall_bpf2bpf_6")) + test_tailcall_bpf2bpf_6(); +- if (test__start_subtest("tailcall_freplace")) +- test_tailcall_freplace(); +- if (test__start_subtest("tailcall_bpf2bpf_freplace")) +- test_tailcall_bpf2bpf_freplace(); + } +diff --git a/tools/testing/selftests/bpf/progs/tailcall_freplace.c b/tools/testing/selftests/bpf/progs/tailcall_freplace.c +deleted file mode 100644 +index 6713b809d..000000000 +--- a/tools/testing/selftests/bpf/progs/tailcall_freplace.c ++++ /dev/null +@@ -1,23 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +- +-#include +-#include +- +-struct { +- __uint(type, BPF_MAP_TYPE_PROG_ARRAY); +- __uint(max_entries, 1); +- __uint(key_size, sizeof(__u32)); +- __uint(value_size, sizeof(__u32)); +-} jmp_table SEC(".maps"); +- +-int count = 0; +- +-SEC("freplace") +-int entry_freplace(struct __sk_buff *skb) +-{ +- count++; +- bpf_tail_call_static(skb, &jmp_table, 0); +- return count; +-} +- +-char __license[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c +deleted file mode 100644 +index d1a57f7d0..000000000 +--- a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c ++++ /dev/null +@@ -1,23 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +- +-#include +-#include +-#include "bpf_misc.h" +- +-__noinline +-int subprog_tc(struct __sk_buff *skb) +-{ +- int ret = 1; +- +- __sink(skb); +- __sink(ret); +- return ret; +-} +- +-SEC("tc") +-int entry_tc(struct __sk_buff *skb) +-{ +- return subprog_tc(skb); +-} +- +-char __license[] SEC("license") = "GPL"; +-- +2.34.1 + diff --git a/bsp/meta-hisilicon/recipes-kernel/linux/linux-hieulerpi1.inc b/bsp/meta-hisilicon/recipes-kernel/linux/linux-hieulerpi1.inc index cbc52799b27184dc38fdfaf429b5cda8f75a9d7e..98741f761deb85b9dfb4b25040ed56c0de44bc3a 100644 --- a/bsp/meta-hisilicon/recipes-kernel/linux/linux-hieulerpi1.inc +++ b/bsp/meta-hisilicon/recipes-kernel/linux/linux-hieulerpi1.inc @@ -18,7 +18,7 @@ PV = "${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', '6.6-tag928', '5.10-tag SRC_URI:append = " \ ${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', ' \ - file://HiEuler-driver/linux/0001-oh_ss928-oee.patch \ + file://patch/0001-oh_ss928-oee.patch \ file://patch/0002-oee-mod-compat-for-6.6.86.patch \ file://dtbs/ss928-pi-kernel6.dts \ file://dtbs/ss928-pi-kernel6_mcs.dts \ @@ -51,12 +51,6 @@ OPENEULER_KERNEL_CONFIG = "${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', 'f # KBUILD_DEFCONFIG = "${@bb.utils.contains('DISTRO_FEATURES', 'kernel6', '', 'hieulerpi1_defconfig', d)}" KBUILD_DEFCONFIG = "" -do_compile:prepend(){ - # avoid issue of '+' vermagic of module in-tree of yocto - sed -i '/echo "+"/d' ${STAGING_KERNEL_DIR}/scripts/setlocalversion -} - - MCS_SUFFIX = "${@bb.utils.contains('MCS_FEATURES', 'openamp', '_mcs', '', d)}" # add method to do_compile task to produce bootable Image do_compile:append(){