diff --git a/drivers/infiniband/hw/hns/hns_roce_bond.c b/drivers/infiniband/hw/hns/hns_roce_bond.c index f766673351895a87953859efda17613c2d84f10b..1f5eb0eca246e606cb69dbb0c8320122726be0c1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_bond.c +++ b/drivers/infiniband/hw/hns/hns_roce_bond.c @@ -606,7 +606,7 @@ static struct hns_roce_die_info *alloc_die_info(int bus_num) static void dealloc_die_info(struct hns_roce_die_info *die_info, u8 bus_num) { xa_erase(&roce_bond_xa, bus_num); - kvfree(die_info); + kfree(die_info); } static int alloc_bond_id(struct hns_roce_bond_group *bond_grp) diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 59f5abd868e59c8af491a7754f58c9e3196c797c..a18d379d401cee42b9f02d0fdf7a44441f805e19 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -190,7 +190,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) u64 mtts[MTT_MIN_COUNT] = {}; int ret; - ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts)); + ret = hns_roce_mtr_find(hr_dev, hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts)); if (ret) { ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret); return ret; @@ -211,7 +211,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) } ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, - hns_roce_get_mtr_ba(&hr_cq->mtr)); + hns_roce_get_mtr_ba(hr_cq->mtr)); if (ret) goto err_xa; @@ -261,24 +261,19 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, { struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_buf_attr buf_attr = {}; - int ret; - - hr_cq->mtr_node = kvmalloc(sizeof(*hr_cq->mtr_node), GFP_KERNEL); - if (!hr_cq->mtr_node) - return -ENOMEM; + int ret = 0; buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT; buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size; buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num; buf_attr.region_count = 1; - ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr, - hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT, - udata, addr); - if (ret) { + hr_cq->mtr = hns_roce_mtr_create(hr_dev, &buf_attr, + hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT, + udata, addr); + if (IS_ERR(hr_cq->mtr)) { + ret = PTR_ERR(hr_cq->mtr); ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret); - kvfree(hr_cq->mtr_node); - hr_cq->mtr_node = NULL; } return ret; @@ -286,13 +281,10 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) { - if (hr_cq->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(hr_cq->mtr_node, hr_dev, &hr_cq->mtr); - } else { - hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr); - kvfree(hr_cq->mtr_node); - hr_cq->mtr_node = NULL; - } + if (hr_cq->delayed_destroy_flag) + hns_roce_add_unfree_mtr(hr_dev, hr_cq->mtr); + else + hns_roce_mtr_destroy(hr_dev, hr_cq->mtr); } static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, @@ -346,7 +338,8 @@ static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, hns_roce_db_unmap_user(uctx, &hr_cq->db, hr_cq->delayed_destroy_flag); } else { - hns_roce_free_db(hr_dev, &hr_cq->db); + hns_roce_free_db(hr_dev, &hr_cq->db, + hr_cq->delayed_destroy_flag); } } diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index 5adc2f1fa31920c42991d7e51316e218f0cf0b60..d9b217891b93f724c109d685c6d34b4f1a794ab2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c @@ -12,6 +12,7 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt, { unsigned long page_addr = virt & PAGE_MASK; struct hns_roce_user_db_page *page; + struct ib_umem *umem; unsigned int offset; int ret = 0; @@ -29,32 +30,33 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt, refcount_set(&page->refcount, 1); page->user_virt = page_addr; - page->umem = ib_umem_get(context->ibucontext.device, page_addr, - PAGE_SIZE, 0); - if (IS_ERR(page->umem)) { - ret = PTR_ERR(page->umem); + page->db_node = kvzalloc(sizeof(*page->db_node), GFP_KERNEL); + if (!page->db_node) { + ret = -ENOMEM; goto err_page; } - page->umem_node = kvmalloc(sizeof(*page->umem_node), GFP_KERNEL); - if (!page->umem_node) { - ret = -ENOMEM; - goto err_umem; + + umem = ib_umem_get(context->ibucontext.device, page_addr, PAGE_SIZE, 0); + if (IS_ERR(umem)) { + ret = PTR_ERR(umem); + goto err_dbnode; } + page->db_node->umem = umem; list_add(&page->list, &context->page_list); found: offset = virt - page_addr; - db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset; - db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset; + db->dma = sg_dma_address(page->db_node->umem->sgt_append.sgt.sgl) + offset; + db->virt_addr = sg_virt(page->db_node->umem->sgt_append.sgt.sgl) + offset; db->u.user_page = page; refcount_inc(&page->refcount); mutex_unlock(&context->page_mutex); return 0; -err_umem: - ib_umem_release(page->umem); +err_dbnode: + kvfree(page->db_node); err_page: kfree(page); err_out: @@ -68,17 +70,20 @@ void hns_roce_db_unmap_user(struct hns_roce_ucontext *context, bool delayed_unmap_flag) { struct hns_roce_dev *hr_dev = to_hr_dev(context->ibucontext.device); + struct hns_roce_db_pg_node *db_node = db->u.user_page->db_node; mutex_lock(&context->page_mutex); + db_node->delayed_unmap_flag |= delayed_unmap_flag; + refcount_dec(&db->u.user_page->refcount); if (refcount_dec_if_one(&db->u.user_page->refcount)) { list_del(&db->u.user_page->list); - if (delayed_unmap_flag) { - hns_roce_add_unfree_umem(db->u.user_page, hr_dev); + if (db_node->delayed_unmap_flag) { + hns_roce_add_unfree_db(db_node, hr_dev); } else { - ib_umem_release(db->u.user_page->umem); - kvfree(db->u.user_page->umem_node); + ib_umem_release(db_node->umem); + kvfree(db_node); } kfree(db->u.user_page); } @@ -90,6 +95,8 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir( struct device *dma_device) { struct hns_roce_db_pgdir *pgdir; + dma_addr_t db_dma; + u32 *page; pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); if (!pgdir) @@ -99,14 +106,24 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir( HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT); pgdir->bits[0] = pgdir->order0; pgdir->bits[1] = pgdir->order1; - pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE, - &pgdir->db_dma, GFP_KERNEL); - if (!pgdir->page) { - kfree(pgdir); - return NULL; - } + pgdir->db_node = kvzalloc(sizeof(*pgdir->db_node), GFP_KERNEL); + if (!pgdir->db_node) + goto err_node; + + page = dma_alloc_coherent(dma_device, PAGE_SIZE, &db_dma, GFP_KERNEL); + if (!page) + goto err_dma; + + pgdir->db_node->kdb.page = page; + pgdir->db_node->kdb.db_dma = db_dma; return pgdir; + +err_dma: + kvfree(pgdir->db_node); +err_node: + kfree(pgdir); + return NULL; } static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir, @@ -133,8 +150,8 @@ static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir, db->u.pgdir = pgdir; db->index = i; - db->db_record = pgdir->page + db->index; - db->dma = pgdir->db_dma + db->index * HNS_ROCE_DB_UNIT_SIZE; + db->db_record = pgdir->db_node->kdb.page + db->index; + db->dma = pgdir->db_node->kdb.db_dma + db->index * HNS_ROCE_DB_UNIT_SIZE; db->order = order; return 0; @@ -169,13 +186,17 @@ int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, return ret; } -void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db) +void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, + bool delayed_unmap_flag) { + struct hns_roce_db_pg_node *db_node = db->u.pgdir->db_node; unsigned long o; unsigned long i; mutex_lock(&hr_dev->pgdir_mutex); + db_node->delayed_unmap_flag |= delayed_unmap_flag; + o = db->order; i = db->index; @@ -189,9 +210,15 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db) if (bitmap_full(db->u.pgdir->order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT)) { - dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page, - db->u.pgdir->db_dma); list_del(&db->u.pgdir->list); + if (db_node->delayed_unmap_flag) { + hns_roce_add_unfree_db(db_node, hr_dev); + } else { + dma_free_coherent(hr_dev->dev, PAGE_SIZE, + db_node->kdb.page, + db_node->kdb.db_dma); + kvfree(db_node); + } kfree(db->u.pgdir); } diff --git a/drivers/infiniband/hw/hns/hns_roce_dca.c b/drivers/infiniband/hw/hns/hns_roce_dca.c index eb408130329bded5fd851132f5a0e7d2484680d4..53184e1c71b7567d3b793530efff5f53e44c1cd6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_dca.c +++ b/drivers/infiniband/hw/hns/hns_roce_dca.c @@ -327,7 +327,7 @@ int hns_roce_map_dca_safe_page(struct hns_roce_dev *hr_dev, for (i = 0; i < page_count; i++) pages[i] = hr_dev->dca_safe_page; - ret = hns_roce_mtr_map(hr_dev, &hr_qp->mtr, pages, page_count); + ret = hns_roce_mtr_map(hr_dev, hr_qp->mtr, pages, page_count); if (ret) ibdev_err(ibdev, "failed to map safe page for DCA, ret = %d.\n", ret); @@ -341,7 +341,7 @@ static int config_dca_qpc(struct hns_roce_dev *hr_dev, int page_count) { struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_mtr *mtr = &hr_qp->mtr; + struct hns_roce_mtr *mtr = hr_qp->mtr; int ret; ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count); @@ -701,7 +701,7 @@ static u32 alloc_buf_from_dca_mem(struct hns_roce_qp *hr_qp, buf_id = HNS_DCA_TO_BUF_ID(hr_qp->qpn, hr_qp->dca_cfg.attach_count); /* Assign pages from free pages */ - unit_pages = hr_qp->mtr.hem_cfg.is_direct ? buf_pages : 1; + unit_pages = hr_qp->mtr->hem_cfg.is_direct ? buf_pages : 1; alloc_pages = assign_dca_pages(ctx, buf_id, buf_pages, unit_pages); if (buf_pages != alloc_pages) { if (alloc_pages > 0) @@ -899,7 +899,7 @@ static int free_buffer_pages_proc(struct dca_mem *mem, int index, void *param) } for (; changed && i < mem->page_count; i++) - if (dca_page_is_free(state)) + if (dca_page_is_free(&mem->states[i])) free_pages++; if (changed && free_pages == mem->page_count) diff --git a/drivers/infiniband/hw/hns/hns_roce_debugfs.c b/drivers/infiniband/hw/hns/hns_roce_debugfs.c index 7023c3cefaa7dc12fe9ddebcb5a2685d66276b5b..756e43cace431b6194bbcc8edf280ec27ab770d8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_debugfs.c +++ b/drivers/infiniband/hw/hns/hns_roce_debugfs.c @@ -486,9 +486,14 @@ void hns_roce_register_uctx_debugfs(struct hns_roce_dev *hr_dev, hr_dev, uctx); } -void hns_roce_unregister_uctx_debugfs(struct hns_roce_ucontext *uctx) +void hns_roce_unregister_uctx_debugfs(struct hns_roce_dev *hr_dev, + struct hns_roce_ucontext *uctx) { - debugfs_remove_recursive(uctx->dca_dbgfs.root); + struct hns_dca_debugfs *dca_dbgfs = &hr_dev->dbgfs.dca_root; + char name[DCA_CTX_PID_LEN]; + + snprintf(name, sizeof(name), "%d", uctx->pid); + debugfs_lookup_and_remove(name, dca_dbgfs->root); } /* debugfs for device */ @@ -508,6 +513,7 @@ void hns_roce_register_debugfs(struct hns_roce_dev *hr_dev) void hns_roce_unregister_debugfs(struct hns_roce_dev *hr_dev) { debugfs_remove_recursive(hr_dev->dbgfs.root); + memset(&hr_dev->dbgfs, 0, sizeof(hr_dev->dbgfs)); } /* debugfs for hns module */ diff --git a/drivers/infiniband/hw/hns/hns_roce_debugfs.h b/drivers/infiniband/hw/hns/hns_roce_debugfs.h index 7fff3aa98ad27589f66f47320e030761dd5be63c..f5bdc1e99e0c24c9d61274b2faf1897c57ae1d5b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_debugfs.h +++ b/drivers/infiniband/hw/hns/hns_roce_debugfs.h @@ -47,6 +47,7 @@ void hns_roce_register_debugfs(struct hns_roce_dev *hr_dev); void hns_roce_unregister_debugfs(struct hns_roce_dev *hr_dev); void hns_roce_register_uctx_debugfs(struct hns_roce_dev *hr_dev, struct hns_roce_ucontext *uctx); -void hns_roce_unregister_uctx_debugfs(struct hns_roce_ucontext *uctx); +void hns_roce_unregister_uctx_debugfs(struct hns_roce_dev *hr_dev, + struct hns_roce_ucontext *uctx); #endif diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 765bfc3d55793a9e912a6db9d956e5712605f961..9b81f2974c82d2464e8c8c41e782ad6290280f6b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -368,6 +368,7 @@ struct hns_roce_mtr { struct ib_umem *umem; /* user space buffer */ struct hns_roce_buf *kmem; /* kernel space buffer */ struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */ + struct list_head node; /* list node for delay-destruction */ }; /* DCA config */ @@ -393,11 +394,6 @@ struct hns_roce_mw { u32 pbl_buf_pg_sz; }; -struct hns_roce_mtr_node { - struct hns_roce_mtr mtr; - struct list_head list; -}; - struct hns_roce_mr { struct ib_mr ibmr; u64 iova; /* MR's virtual original addr */ @@ -408,11 +404,10 @@ struct hns_roce_mr { int enabled; /* MR's active status */ int type; /* MR's register type */ u32 pbl_hop_num; /* multi-hop number */ - struct hns_roce_mtr pbl_mtr; + struct hns_roce_mtr *pbl_mtr; u32 npages; dma_addr_t *page_list; bool delayed_destroy_flag; - struct hns_roce_mtr_node *mtr_node; }; struct hns_roce_mr_table { @@ -470,26 +465,29 @@ struct hns_roce_buf { unsigned int page_shift; }; +struct hns_roce_db_pg_node { + struct list_head list; + struct ib_umem *umem; + struct { + u32 *page; + dma_addr_t db_dma; + } kdb; + bool delayed_unmap_flag; +}; + struct hns_roce_db_pgdir { struct list_head list; DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE); DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT); unsigned long *bits[HNS_ROCE_DB_TYPE_COUNT]; - u32 *page; - dma_addr_t db_dma; -}; - -struct hns_roce_umem_node { - struct ib_umem *umem; - struct list_head list; + struct hns_roce_db_pg_node *db_node; }; struct hns_roce_user_db_page { struct list_head list; - struct ib_umem *umem; unsigned long user_virt; refcount_t refcount; - struct hns_roce_umem_node *umem_node; + struct hns_roce_db_pg_node *db_node; }; struct hns_roce_db { @@ -506,7 +504,7 @@ struct hns_roce_db { struct hns_roce_cq { struct ib_cq ib_cq; - struct hns_roce_mtr mtr; + struct hns_roce_mtr *mtr; struct hns_roce_db db; u32 flags; spinlock_t lock; @@ -525,16 +523,14 @@ struct hns_roce_cq { int is_armed; /* cq is armed */ struct list_head node; /* all armed cqs are on a list */ bool delayed_destroy_flag; - struct hns_roce_mtr_node *mtr_node; }; struct hns_roce_idx_que { - struct hns_roce_mtr mtr; + struct hns_roce_mtr *mtr; u32 entry_shift; unsigned long *bitmap; u32 head; u32 tail; - struct hns_roce_mtr_node *mtr_node; }; struct hns_roce_srq { @@ -551,7 +547,7 @@ struct hns_roce_srq { refcount_t refcount; struct completion free; - struct hns_roce_mtr buf_mtr; + struct hns_roce_mtr *buf_mtr; u64 *wrid; struct hns_roce_idx_que idx_que; @@ -561,7 +557,6 @@ struct hns_roce_srq { struct hns_roce_db rdb; u32 cap_flags; bool delayed_destroy_flag; - struct hns_roce_mtr_node *mtr_node; }; struct hns_roce_uar_table { @@ -703,7 +698,7 @@ struct hns_roce_qp { enum ib_sig_type sq_signal_bits; struct hns_roce_wq sq; - struct hns_roce_mtr mtr; + struct hns_roce_mtr *mtr; struct hns_roce_dca_cfg dca_cfg; u32 buff_size; @@ -742,7 +737,6 @@ struct hns_roce_qp { u8 priority; enum hns_roce_cong_type cong_type; bool delayed_destroy_flag; - struct hns_roce_mtr_node *mtr_node; spinlock_t flush_lock; struct hns_roce_dip *dip; }; @@ -805,7 +799,7 @@ struct hns_roce_eq { int coalesce; int arm_st; int hop_num; - struct hns_roce_mtr mtr; + struct hns_roce_mtr *mtr; u16 eq_max_cnt; u32 eq_period; int shift; @@ -1162,8 +1156,8 @@ struct hns_roce_dev { struct list_head mtr_unfree_list; /* list of unfree mtr on this dev */ struct mutex mtr_unfree_list_mutex; /* protect mtr_unfree_list */ - struct list_head umem_unfree_list; /* list of unfree umem on this dev */ - struct mutex umem_unfree_list_mutex; /* protect umem_unfree_list */ + struct list_head db_unfree_list; /* list of unfree db on this dev */ + struct mutex db_unfree_list_mutex; /* protect db_unfree_list */ void *dca_safe_buf; dma_addr_t dca_safe_page; @@ -1345,10 +1339,11 @@ static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr) int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, u32 offset, u64 *mtt_buf, int mtt_max); -int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - struct hns_roce_buf_attr *buf_attr, - unsigned int page_shift, struct ib_udata *udata, - unsigned long user_addr); +struct hns_roce_mtr *hns_roce_mtr_create(struct hns_roce_dev *hr_dev, + struct hns_roce_buf_attr *buf_attr, + unsigned int ba_page_shift, + struct ib_udata *udata, + unsigned long user_addr); void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr); int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, @@ -1445,7 +1440,8 @@ void hns_roce_db_unmap_user(struct hns_roce_ucontext *context, bool delayed_unmap_flag); int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, int order); -void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db); +void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, + bool delayed_unmap_flag); void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); @@ -1468,11 +1464,10 @@ struct hns_user_mmap_entry * hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, size_t length, enum hns_roce_mmap_type mmap_type); -void hns_roce_add_unfree_umem(struct hns_roce_user_db_page *user_page, - struct hns_roce_dev *hr_dev); -void hns_roce_free_unfree_umem(struct hns_roce_dev *hr_dev); -void hns_roce_add_unfree_mtr(struct hns_roce_mtr_node *pos, - struct hns_roce_dev *hr_dev, +void hns_roce_add_unfree_db(struct hns_roce_db_pg_node *db_node, + struct hns_roce_dev *hr_dev); +void hns_roce_free_unfree_db(struct hns_roce_dev *hr_dev); +void hns_roce_add_unfree_mtr(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr); void hns_roce_free_unfree_mtr(struct hns_roce_dev *hr_dev); int hns_roce_alloc_scc_param(struct hns_roce_dev *hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index be15b709c59c1bdfec19941519e4a666e8b37d38..e4839faac8391743da673ca24447a4598844507d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -152,7 +152,7 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE); /* Data structure reuse may lead to confusion */ - pbl_ba = mr->pbl_mtr.hem_cfg.root_ba; + pbl_ba = mr->pbl_mtr->hem_cfg.root_ba; rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba)); rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba)); @@ -163,7 +163,7 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages); hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.buf_pg_shift)); hr_reg_clear(fseg, FRMR_BLK_MODE); } @@ -972,12 +972,12 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n) { - return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift); + return hns_roce_buf_offset(srq->buf_mtr->kmem, n << srq->wqe_shift); } static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n) { - return hns_roce_buf_offset(idx_que->mtr.kmem, + return hns_roce_buf_offset(idx_que->mtr->kmem, n << idx_que->entry_shift); } @@ -3487,7 +3487,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, int ret; int i; - ret = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, + ret = hns_roce_mtr_find(hr_dev, mr->pbl_mtr, 0, pages, min_t(int, ARRAY_SIZE(pages), mr->npages)); if (ret) { ibdev_err(ibdev, "failed to find PBL mtr, ret = %d.\n", ret); @@ -3498,7 +3498,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, for (i = 0; i < ARRAY_SIZE(pages); i++) pages[i] >>= MPT_PBL_BUF_ADDR_S; - pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr); + pbl_ba = hns_roce_get_mtr_ba(mr->pbl_mtr); mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> MPT_PBL_BA_ADDR_S); @@ -3511,7 +3511,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1])); hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.buf_pg_shift)); return 0; } @@ -3554,7 +3554,7 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num); hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.ba_pg_shift)); hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD); return set_mtpt_pbl(hr_dev, mpt_entry, mr); @@ -3598,7 +3598,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) { - dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr); + dma_addr_t pbl_ba = hns_roce_get_mtr_ba(mr->pbl_mtr); struct hns_roce_v2_mpt_entry *mpt_entry; mpt_entry = mb_buf; @@ -3617,9 +3617,9 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1); hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.ba_pg_shift)); hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.buf_pg_shift)); mpt_entry->pbl_size = cpu_to_le32(mr->npages); @@ -3757,7 +3757,7 @@ static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev) static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) { - return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); + return hns_roce_buf_offset(hr_cq->mtr->kmem, n * hr_cq->cqe_size); } static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n) @@ -3869,9 +3869,9 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H, upper_32_bits(to_hr_hw_page_addr(mtts[1]))); hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ, - to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(hr_cq->mtr->hem_cfg.ba_pg_shift)); hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ, - to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(hr_cq->mtr->hem_cfg.buf_pg_shift)); hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> CQC_CQE_BA_L_S); hr_reg_write(cq_context, CQC_CQE_BA_H, dma_handle >> CQC_CQE_BA_H_S); hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN, @@ -4727,7 +4727,7 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, int ret; /* Search qp buf's mtts */ - ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.wqe_offset, mtts, + ret = hns_roce_mtr_find(hr_dev, hr_qp->mtr, hr_qp->rq.wqe_offset, mtts, ARRAY_SIZE(mtts)); if (hr_qp->rq.wqe_cnt && ret) { ibdev_err(&hr_dev->ib_dev, @@ -4736,7 +4736,7 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, return ret; } - wqe_sge_ba = hns_roce_get_mtr_ba(&hr_qp->mtr); + wqe_sge_ba = hns_roce_get_mtr_ba(hr_qp->mtr); context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3); qpc_mask->wqe_sge_ba = 0; @@ -4767,11 +4767,11 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM); hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ, - to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(hr_qp->mtr->hem_cfg.ba_pg_shift)); hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ); hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ, - to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(hr_qp->mtr->hem_cfg.buf_pg_shift)); hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ); context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0])); @@ -4805,7 +4805,7 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, int ret; /* search qp buf's mtts */ - ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.wqe_offset, + ret = hns_roce_mtr_find(hr_dev, hr_qp->mtr, hr_qp->sq.wqe_offset, &sq_cur_blk, 1); if (ret) { ibdev_err(ibdev, "failed to find QP(0x%lx) SQ WQE buf, ret = %d.\n", @@ -4813,7 +4813,7 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, return ret; } if (hr_qp->sge.sge_cnt > 0) { - ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, + ret = hns_roce_mtr_find(hr_dev, hr_qp->mtr, hr_qp->sge.wqe_offset, &sge_cur_blk, 1); if (ret) { ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf, ret = %d.\n", @@ -6177,7 +6177,7 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq, int ret; /* Get physical address of idx que buf */ - ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx, + ret = hns_roce_mtr_find(hr_dev, idx_que->mtr, 0, mtts_idx, ARRAY_SIZE(mtts_idx)); if (ret) { ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n", @@ -6185,7 +6185,7 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq, return ret; } - dma_handle_idx = hns_roce_get_mtr_ba(&idx_que->mtr); + dma_handle_idx = hns_roce_get_mtr_ba(idx_que->mtr); hr_reg_write(ctx, SRQC_IDX_HOP_NUM, to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt)); @@ -6195,9 +6195,9 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq, upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT)); hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ, - to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(idx_que->mtr->hem_cfg.ba_pg_shift)); hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ, - to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(idx_que->mtr->hem_cfg.buf_pg_shift)); hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L, to_hr_hw_page_addr(mtts_idx[0])); @@ -6224,7 +6224,7 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf) memset(ctx, 0, sizeof(*ctx)); /* Get the physical address of srq buf */ - ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe, + ret = hns_roce_mtr_find(hr_dev, srq->buf_mtr, 0, mtts_wqe, ARRAY_SIZE(mtts_wqe)); if (ret) { ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n", @@ -6232,7 +6232,7 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf) return ret; } - dma_handle_wqe = hns_roce_get_mtr_ba(&srq->buf_mtr); + dma_handle_wqe = hns_roce_get_mtr_ba(srq->buf_mtr); hr_reg_write(ctx, SRQC_SRQ_ST, 1); hr_reg_write_bool(ctx, SRQC_SRQ_TYPE, @@ -6254,9 +6254,9 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf) upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT)); hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ, - to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(srq->buf_mtr->hem_cfg.ba_pg_shift)); hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ, - to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(srq->buf_mtr->hem_cfg.buf_pg_shift)); if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB) { hr_reg_enable(ctx, SRQC_DB_RECORD_EN); @@ -6609,7 +6609,7 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) { struct hns_roce_aeqe *aeqe; - aeqe = hns_roce_buf_offset(eq->mtr.kmem, + aeqe = hns_roce_buf_offset(eq->mtr->kmem, (eq->cons_index & (eq->entries - 1)) * eq->eqe_size); @@ -6677,7 +6677,7 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) { struct hns_roce_ceqe *ceqe; - ceqe = hns_roce_buf_offset(eq->mtr.kmem, + ceqe = hns_roce_buf_offset(eq->mtr->kmem, (eq->cons_index & (eq->entries - 1)) * eq->eqe_size); @@ -6917,7 +6917,7 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev, static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { - hns_roce_mtr_destroy(hr_dev, &eq->mtr); + hns_roce_mtr_destroy(hr_dev, eq->mtr); } static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, @@ -6964,14 +6964,14 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, init_eq_config(hr_dev, eq); /* if not multi-hop, eqe buffer only use one trunk */ - ret = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, + ret = hns_roce_mtr_find(hr_dev, eq->mtr, 0, eqe_ba, ARRAY_SIZE(eqe_ba)); if (ret) { dev_err(hr_dev->dev, "failed to find EQE mtr, ret = %d\n", ret); return ret; } - bt_ba = hns_roce_get_mtr_ba(&eq->mtr); + bt_ba = hns_roce_get_mtr_ba(eq->mtr); hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID); hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num); @@ -6981,9 +6981,9 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, hr_reg_write(eqc, EQC_EQN, eq->eqn); hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT); hr_reg_write(eqc, EQC_EQE_BA_PG_SZ, - to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift)); + to_hr_hw_page_shift(eq->mtr->hem_cfg.ba_pg_shift)); hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ, - to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift)); + to_hr_hw_page_shift(eq->mtr->hem_cfg.buf_pg_shift)); hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX); hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt); @@ -7016,7 +7016,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { struct hns_roce_buf_attr buf_attr = {}; - int err; + int err = 0; if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0) eq->hop_num = 0; @@ -7028,11 +7028,12 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) buf_attr.region[0].hopnum = eq->hop_num; buf_attr.region_count = 1; - err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr, - hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL, - 0); - if (err) + eq->mtr = hns_roce_mtr_create(hr_dev, &buf_attr, + hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL, 0); + if (IS_ERR(eq->mtr)) { + err = PTR_ERR(eq->mtr); dev_err(hr_dev->dev, "failed to alloc EQE mtr, err %d\n", err); + } return err; } diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 8f110e64e601be7153418dfe658d9267fff3da14..a77dc1d70a1cad950e0e7a510cb4fb64008d37f2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -632,21 +632,20 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device); + hns_roce_put_cq_bankid_for_uctx(context); + hns_roce_unregister_uctx_debugfs(hr_dev, context); + mutex_lock(&hr_dev->uctx_list_mutex); list_del(&context->list); mutex_unlock(&hr_dev->uctx_list_mutex); + hns_roce_unregister_udca(hr_dev, context); + hns_roce_dealloc_reset_entry(context); if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) mutex_destroy(&context->page_mutex); - hns_roce_put_cq_bankid_for_uctx(context); - hns_roce_unregister_uctx_debugfs(context); - - hns_roce_unregister_udca(hr_dev, context); - hns_roce_dealloc_uar_entry(context); - hns_roce_dealloc_reset_entry(context); ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx); } @@ -1235,7 +1234,7 @@ static void hns_roce_teardown_hca(struct hns_roce_dev *hr_dev) hns_roce_cleanup_dca(hr_dev); hns_roce_cleanup_bitmap(hr_dev); - mutex_destroy(&hr_dev->umem_unfree_list_mutex); + mutex_destroy(&hr_dev->db_unfree_list_mutex); mutex_destroy(&hr_dev->mtr_unfree_list_mutex); mutex_destroy(&hr_dev->uctx_list_mutex); if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || @@ -1264,8 +1263,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) INIT_LIST_HEAD(&hr_dev->mtr_unfree_list); mutex_init(&hr_dev->mtr_unfree_list_mutex); - INIT_LIST_HEAD(&hr_dev->umem_unfree_list); - mutex_init(&hr_dev->umem_unfree_list_mutex); + INIT_LIST_HEAD(&hr_dev->db_unfree_list); + mutex_init(&hr_dev->db_unfree_list_mutex); if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) { @@ -1309,7 +1308,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) mutex_destroy(&hr_dev->pgdir_mutex); - mutex_destroy(&hr_dev->umem_unfree_list_mutex); + mutex_destroy(&hr_dev->db_unfree_list_mutex); mutex_destroy(&hr_dev->mtr_unfree_list_mutex); mutex_destroy(&hr_dev->uctx_list_mutex); @@ -1495,15 +1494,15 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) void hns_roce_exit(struct hns_roce_dev *hr_dev, bool bond_cleanup) { + hns_roce_unregister_debugfs(hr_dev); hns_roce_unregister_device(hr_dev, bond_cleanup); hns_roce_dealloc_scc_param(hr_dev); - hns_roce_unregister_debugfs(hr_dev); hns_roce_free_dca_safe_buf(hr_dev); if (hr_dev->hw->hw_exit) hr_dev->hw->hw_exit(hr_dev); - hns_roce_free_unfree_umem(hr_dev); + hns_roce_free_unfree_db(hr_dev); hns_roce_free_unfree_mtr(hr_dev); hns_roce_teardown_hca(hr_dev); hns_roce_cleanup_hem(hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 84d246fd13b618fbab085212e2bdc7cab9661b6e..b3a1e5b4cd8d15425531781e788fd4680fb5a3fd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -97,11 +97,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, struct ib_device *ibdev = &hr_dev->ib_dev; bool is_fast = mr->type == MR_TYPE_FRMR; struct hns_roce_buf_attr buf_attr = {}; - int err; - - mr->mtr_node = kvmalloc(sizeof(*mr->mtr_node), GFP_KERNEL); - if (!mr->mtr_node) - return -ENOMEM; + int err = 0; mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num; buf_attr.page_shift = is_fast ? PAGE_SHIFT : @@ -117,17 +113,15 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, buf_attr.adaptive = !is_fast; buf_attr.type = MTR_PBL; - err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr, - hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT, - udata, start); - if (err) { + mr->pbl_mtr = hns_roce_mtr_create(hr_dev, &buf_attr, + hr_dev->caps.pbl_ba_pg_sz + HNS_HW_PAGE_SHIFT, udata, start); + if (IS_ERR(mr->pbl_mtr)) { + err = PTR_ERR(mr->pbl_mtr); ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err); - kvfree(mr->mtr_node); - mr->mtr_node = NULL; return err; } - mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count; + mr->npages = mr->pbl_mtr->hem_cfg.buf_pg_count; mr->pbl_hop_num = buf_attr.region[0].hopnum; return err; @@ -135,13 +129,10 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) { - if (mr->delayed_destroy_flag && mr->type != MR_TYPE_DMA) { - hns_roce_add_unfree_mtr(mr->mtr_node, hr_dev, &mr->pbl_mtr); - } else { - hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr); - kvfree(mr->mtr_node); - mr->mtr_node = NULL; - } + if (mr->delayed_destroy_flag && mr->type != MR_TYPE_DMA) + hns_roce_add_unfree_mtr(hr_dev, mr->pbl_mtr); + else + hns_roce_mtr_destroy(hr_dev, mr->pbl_mtr); } static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) @@ -214,18 +205,22 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) { struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); struct hns_roce_mr *mr; - int ret; + int ret = -ENOMEM; mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); + mr->pbl_mtr = kvzalloc(sizeof(*mr->pbl_mtr), GFP_KERNEL); + if (!mr->pbl_mtr) + goto err_mtr; + mr->type = MR_TYPE_DMA; mr->pd = to_hr_pd(pd)->pdn; mr->access = acc; /* Allocate memory region key */ - hns_roce_hem_list_init(&mr->pbl_mtr.hem_list); + hns_roce_hem_list_init(&mr->pbl_mtr->hem_list); ret = alloc_mr_key(hr_dev, mr); if (ret) goto err_free; @@ -241,6 +236,8 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) free_mr_key(hr_dev, mr); err_free: + kvfree(mr->pbl_mtr); +err_mtr: kfree(mr); return ERR_PTR(ret); } @@ -444,7 +441,7 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr) { struct hns_roce_mr *mr = to_hr_mr(ibmr); - if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) { + if (likely(mr->npages < mr->pbl_mtr->hem_cfg.buf_pg_count)) { mr->page_list[mr->npages++] = addr; return 0; } @@ -459,7 +456,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_mr *mr = to_hr_mr(ibmr); - struct hns_roce_mtr *mtr = &mr->pbl_mtr; + struct hns_roce_mtr *mtr = mr->pbl_mtr; int ret, sg_num = 0; if (!IS_ALIGNED(sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) || @@ -468,7 +465,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, return sg_num; mr->npages = 0; - mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, + mr->page_list = kvcalloc(mr->pbl_mtr->hem_cfg.buf_pg_count, sizeof(dma_addr_t), GFP_KERNEL); if (!mr->page_list) return sg_num; @@ -476,7 +473,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset_p, hns_roce_set_page); if (sg_num < 1) { ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", - mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num); + mr->npages, mr->pbl_mtr->hem_cfg.buf_pg_count, sg_num); goto err_page_list; } @@ -489,7 +486,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret); sg_num = 0; } else { - mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size); + mr->pbl_mtr->hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size); } err_page_list: @@ -1146,20 +1143,25 @@ static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) * hns_roce_mtr_create - Create hns memory translate region. * * @hr_dev: RoCE device struct pointer - * @mtr: memory translate region * @buf_attr: buffer attribute for creating mtr * @ba_page_shift: page shift for multi-hop base address table * @udata: user space context, if it's NULL, means kernel space * @user_addr: userspace virtual address to start at */ -int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - struct hns_roce_buf_attr *buf_attr, - unsigned int ba_page_shift, struct ib_udata *udata, - unsigned long user_addr) +struct hns_roce_mtr *hns_roce_mtr_create(struct hns_roce_dev *hr_dev, + struct hns_roce_buf_attr *buf_attr, + unsigned int ba_page_shift, + struct ib_udata *udata, + unsigned long user_addr) { struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_mtr *mtr; int ret; + mtr = kvzalloc(sizeof(*mtr), GFP_KERNEL); + if (!mtr) + return ERR_PTR(-ENOMEM); + /* The caller has its own buffer list and invokes the hns_roce_mtr_map() * to finish the MTT configuration. */ @@ -1171,7 +1173,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, if (ret) { ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret); - return ret; + goto err_out; } ret = get_best_page_shift(hr_dev, mtr, buf_attr); @@ -1194,7 +1196,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, } if (buf_attr->mtt_only) - return 0; + return mtr; /* Write buffer's dma address to MTT */ ret = mtr_map_bufs(hr_dev, mtr); @@ -1203,14 +1205,15 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, goto err_alloc_mtt; } - return 0; + return mtr; err_alloc_mtt: mtr_free_mtt(hr_dev, mtr); err_init_buf: mtr_free_bufs(hr_dev, mtr); - - return ret; +err_out: + kvfree(mtr); + return ERR_PTR(ret); } void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) @@ -1220,76 +1223,50 @@ void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) /* free buffers */ mtr_free_bufs(hr_dev, mtr); + kvfree(mtr); } -static void hns_roce_copy_mtr(struct hns_roce_mtr *new_mtr, struct hns_roce_mtr *old_mtr) -{ - struct list_head *new_head, *old_head; - int i, j; - - memcpy(new_mtr, old_mtr, sizeof(*old_mtr)); - - for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++) - for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++) { - new_head = &new_mtr->hem_list.mid_bt[i][j]; - old_head = &old_mtr->hem_list.mid_bt[i][j]; - list_replace(old_head, new_head); - } - - new_head = &new_mtr->hem_list.root_bt; - old_head = &old_mtr->hem_list.root_bt; - list_replace(old_head, new_head); - - new_head = &new_mtr->hem_list.btm_bt; - old_head = &old_mtr->hem_list.btm_bt; - list_replace(old_head, new_head); -} - -void hns_roce_add_unfree_mtr(struct hns_roce_mtr_node *pos, - struct hns_roce_dev *hr_dev, +void hns_roce_add_unfree_mtr(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) { - hns_roce_copy_mtr(&pos->mtr, mtr); - mutex_lock(&hr_dev->mtr_unfree_list_mutex); - list_add_tail(&pos->list, &hr_dev->mtr_unfree_list); + list_add_tail(&mtr->node, &hr_dev->mtr_unfree_list); mutex_unlock(&hr_dev->mtr_unfree_list_mutex); } void hns_roce_free_unfree_mtr(struct hns_roce_dev *hr_dev) { - struct hns_roce_mtr_node *pos, *next; + struct hns_roce_mtr *mtr, *next; mutex_lock(&hr_dev->mtr_unfree_list_mutex); - list_for_each_entry_safe(pos, next, &hr_dev->mtr_unfree_list, list) { - list_del(&pos->list); - hns_roce_mtr_destroy(hr_dev, &pos->mtr); - kvfree(pos); + list_for_each_entry_safe(mtr, next, &hr_dev->mtr_unfree_list, node) { + list_del(&mtr->node); + hns_roce_mtr_destroy(hr_dev, mtr); } mutex_unlock(&hr_dev->mtr_unfree_list_mutex); } -void hns_roce_add_unfree_umem(struct hns_roce_user_db_page *user_page, - struct hns_roce_dev *hr_dev) +void hns_roce_add_unfree_db(struct hns_roce_db_pg_node *db_node, + struct hns_roce_dev *hr_dev) { - struct hns_roce_umem_node *pos = user_page->umem_node; - - pos->umem = user_page->umem; - - mutex_lock(&hr_dev->umem_unfree_list_mutex); - list_add_tail(&pos->list, &hr_dev->umem_unfree_list); - mutex_unlock(&hr_dev->umem_unfree_list_mutex); + mutex_lock(&hr_dev->db_unfree_list_mutex); + list_add_tail(&db_node->list, &hr_dev->db_unfree_list); + mutex_unlock(&hr_dev->db_unfree_list_mutex); } -void hns_roce_free_unfree_umem(struct hns_roce_dev *hr_dev) +void hns_roce_free_unfree_db(struct hns_roce_dev *hr_dev) { - struct hns_roce_umem_node *pos, *next; + struct hns_roce_db_pg_node *pos, *next; - mutex_lock(&hr_dev->umem_unfree_list_mutex); - list_for_each_entry_safe(pos, next, &hr_dev->umem_unfree_list, list) { + mutex_lock(&hr_dev->db_unfree_list_mutex); + list_for_each_entry_safe(pos, next, &hr_dev->db_unfree_list, list) { list_del(&pos->list); - ib_umem_release(pos->umem); + if (pos->umem) + ib_umem_release(pos->umem); + else + dma_free_coherent(hr_dev->dev, PAGE_SIZE, + pos->kdb.page, pos->kdb.db_dma); kvfree(pos); } - mutex_unlock(&hr_dev->umem_unfree_list_mutex); + mutex_unlock(&hr_dev->db_unfree_list_mutex); } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index e0ba0ab891dd649c11a10b069f070f0cb702d1b4..90da0080a93e5d2ee60c9e6e44d3d56bc74707f1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -835,11 +835,7 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_udata *udata, unsigned long addr) { struct ib_device *ibdev = &hr_dev->ib_dev; - int ret; - - hr_qp->mtr_node = kvmalloc(sizeof(*hr_qp->mtr_node), GFP_KERNEL); - if (!hr_qp->mtr_node) - return -ENOMEM; + int ret = 0; if (dca_en) { /* DCA must be enabled after the buffer attr is configured. */ @@ -847,8 +843,6 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, if (ret) { ibdev_err(ibdev, "failed to enable DCA, ret = %d.\n", ret); - kvfree(hr_qp->mtr_node); - hr_qp->mtr_node = NULL; return ret; } @@ -862,32 +856,29 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; } - ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, buf_attr, - PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, - udata, addr); - if (ret) { + hr_qp->mtr = hns_roce_mtr_create(hr_dev, buf_attr, + PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, + udata, addr); + if (IS_ERR(hr_qp->mtr)) { + ret = PTR_ERR(hr_qp->mtr); ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); - if (dca_en) - hns_roce_disable_dca(hr_dev, hr_qp, udata); - kvfree(hr_qp->mtr_node); - hr_qp->mtr_node = NULL; } else if (dca_en) { ret = hns_roce_map_dca_safe_page(hr_dev, hr_qp); } + if (ret && dca_en) + hns_roce_disable_dca(hr_dev, hr_qp, udata); + return ret; } static void free_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_udata *udata) { - if (hr_qp->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(hr_qp->mtr_node, hr_dev, &hr_qp->mtr); - } else { - hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); - kvfree(hr_qp->mtr_node); - hr_qp->mtr_node = NULL; - } + if (hr_qp->delayed_destroy_flag) + hns_roce_add_unfree_mtr(hr_dev, hr_qp->mtr); + else + hns_roce_mtr_destroy(hr_dev, hr_qp->mtr); if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DYNAMIC_CTX_ATTACH) hns_roce_disable_dca(hr_dev, hr_qp, udata); @@ -1141,7 +1132,8 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, qp_user_mmap_entry_remove(hr_qp); } else { if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) - hns_roce_free_db(hr_dev, &hr_qp->rdb); + hns_roce_free_db(hr_dev, &hr_qp->rdb, + hr_qp->delayed_destroy_flag); } } @@ -1710,7 +1702,7 @@ static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset) if (unlikely(hr_qp->dca_cfg.buf_list)) return dca_buf_offset(&hr_qp->dca_cfg, offset); else - return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); + return hns_roce_buf_offset(hr_qp->mtr->kmem, offset); } void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n) diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 34b8e4f859616700107dfc75ab315f0acd01d45e..d99658ddf689ffdaf62ea85a2bb4f2dbdbde12fe 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -143,11 +143,11 @@ int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr) goto err; if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift", - hr_mr->pbl_mtr.hem_cfg.ba_pg_shift)) + hr_mr->pbl_mtr->hem_cfg.ba_pg_shift)) goto err; if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift", - hr_mr->pbl_mtr.hem_cfg.buf_pg_shift)) + hr_mr->pbl_mtr->hem_cfg.buf_pg_shift)) goto err; nla_nest_end(msg, table_attr); diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 0ab99aa9f9d50e9d6c359b6cc5e85c508cb3792d..1262e9535c2290b680c3897de5be5c0d8073e1a8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -172,11 +172,7 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, struct hns_roce_idx_que *idx_que = &srq->idx_que; struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_buf_attr buf_attr = {}; - int ret; - - idx_que->mtr_node = kvmalloc(sizeof(*idx_que->mtr_node), GFP_KERNEL); - if (!idx_que->mtr_node) - return -ENOMEM; + int ret = 0; srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ); @@ -186,13 +182,14 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num; buf_attr.region_count = 1; - ret = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr, - hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT, - udata, addr); - if (ret) { + idx_que->mtr = hns_roce_mtr_create(hr_dev, &buf_attr, + hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT, + udata, addr); + if (IS_ERR(idx_que->mtr)) { + ret = PTR_ERR(idx_que->mtr); ibdev_err(ibdev, "failed to alloc SRQ idx mtr, ret = %d.\n", ret); - goto err_kvmalloc; + return ret; } if (!udata) { @@ -209,10 +206,7 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, return 0; err_idx_mtr: - hns_roce_mtr_destroy(hr_dev, &idx_que->mtr); -err_kvmalloc: - kvfree(idx_que->mtr_node); - idx_que->mtr_node = NULL; + hns_roce_mtr_destroy(hr_dev, idx_que->mtr); return ret; } @@ -223,13 +217,10 @@ static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) bitmap_free(idx_que->bitmap); idx_que->bitmap = NULL; - if (srq->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(idx_que->mtr_node, hr_dev, &idx_que->mtr); - } else { - hns_roce_mtr_destroy(hr_dev, &idx_que->mtr); - kvfree(idx_que->mtr_node); - idx_que->mtr_node = NULL; - } + if (srq->delayed_destroy_flag) + hns_roce_add_unfree_mtr(hr_dev, idx_que->mtr); + else + hns_roce_mtr_destroy(hr_dev, idx_que->mtr); } static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, @@ -238,11 +229,7 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, { struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_buf_attr buf_attr = {}; - int ret; - - srq->mtr_node = kvmalloc(sizeof(*srq->mtr_node), GFP_KERNEL); - if (!srq->mtr_node) - return -ENOMEM; + int ret = 0; srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE, HNS_ROCE_SGE_SIZE * @@ -254,14 +241,13 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num; buf_attr.region_count = 1; - ret = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr, - hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT, - udata, addr); - if (ret) { + srq->buf_mtr = hns_roce_mtr_create(hr_dev, &buf_attr, + hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT, + udata, addr); + if (IS_ERR(srq->buf_mtr)) { + ret = PTR_ERR(srq->buf_mtr); ibdev_err(ibdev, "failed to alloc SRQ buf mtr, ret = %d.\n", ret); - kvfree(srq->mtr_node); - srq->mtr_node = NULL; } return ret; @@ -270,13 +256,10 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) { - if (srq->delayed_destroy_flag) { - hns_roce_add_unfree_mtr(srq->mtr_node, hr_dev, &srq->buf_mtr); - } else { - hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr); - kvfree(srq->mtr_node); - srq->mtr_node = NULL; - } + if (srq->delayed_destroy_flag) + hns_roce_add_unfree_mtr(hr_dev, srq->buf_mtr); + else + hns_roce_mtr_destroy(hr_dev, srq->buf_mtr); } static int alloc_srq_wrid(struct hns_roce_srq *srq) @@ -448,7 +431,7 @@ static void free_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, hns_roce_db_unmap_user(uctx, &srq->rdb, srq->delayed_destroy_flag); } else { - hns_roce_free_db(hr_dev, &srq->rdb); + hns_roce_free_db(hr_dev, &srq->rdb, srq->delayed_destroy_flag); } }