diff options
Diffstat (limited to 'drivers/block/ublk_drv.c')
| -rw-r--r-- | drivers/block/ublk_drv.c | 407 |
1 files changed, 194 insertions, 213 deletions
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 0c74a41a6753..2c715df63f23 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -155,12 +155,13 @@ struct ublk_uring_cmd_pdu { */ #define UBLK_REFCOUNT_INIT (REFCOUNT_MAX / 2) +union ublk_io_buf { + __u64 addr; + struct ublk_auto_buf_reg auto_reg; +}; + struct ublk_io { - /* userspace buffer address from io cmd */ - union { - __u64 addr; - struct ublk_auto_buf_reg buf; - }; + union ublk_io_buf buf; unsigned int flags; int res; @@ -203,15 +204,12 @@ struct ublk_queue { bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */ spinlock_t cancel_lock; struct ublk_device *dev; - struct ublk_io ios[]; + struct ublk_io ios[] __counted_by(q_depth); }; struct ublk_device { struct gendisk *ub_disk; - char *__queues; - - unsigned int queue_size; struct ublksrv_ctrl_dev_info dev_info; struct blk_mq_tag_set tag_set; @@ -239,6 +237,8 @@ struct ublk_device { bool canceling; pid_t ublksrv_tgid; struct delayed_work exit_work; + + struct ublk_queue *queues[]; }; /* header of ublk_params */ @@ -265,7 +265,7 @@ static inline bool ublk_dev_is_zoned(const struct ublk_device *ub) return ub->dev_info.flags & UBLK_F_ZONED; } -static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq) +static inline bool ublk_queue_is_zoned(const struct ublk_queue *ubq) { return ubq->flags & UBLK_F_ZONED; } @@ -368,7 +368,7 @@ static void *ublk_alloc_report_buffer(struct ublk_device *ublk, } static int ublk_report_zones(struct gendisk *disk, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data) + unsigned int nr_zones, struct blk_report_zones_args *args) { struct ublk_device *ub = disk->private_data; unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors; @@ -431,7 +431,7 @@ free_req: if (!zone->len) break; - ret = cb(zone, i, data); + ret = disk_report_zone(disk, zone, i, args); if (ret) goto out; @@ -499,7 +499,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, iod->op_flags = ublk_op | ublk_req_build_flags(req); iod->nr_sectors = blk_rq_sectors(req); iod->start_sector = blk_rq_pos(req); - iod->addr = io->addr; + iod->addr = io->buf.addr; return BLK_STS_OK; } @@ -781,7 +781,7 @@ static noinline void ublk_put_device(struct ublk_device *ub) static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev, int qid) { - return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]); + return dev->queues[qid]; } static inline bool ublk_rq_has_data(const struct request *rq) @@ -914,73 +914,6 @@ static const struct block_device_operations ub_fops = { .report_zones = ublk_report_zones, }; -#define UBLK_MAX_PIN_PAGES 32 - -struct ublk_io_iter { - struct page *pages[UBLK_MAX_PIN_PAGES]; - struct bio *bio; - struct bvec_iter iter; -}; - -/* return how many pages are copied */ -static void ublk_copy_io_pages(struct ublk_io_iter *data, - size_t total, size_t pg_off, int dir) -{ - unsigned done = 0; - unsigned pg_idx = 0; - - while (done < total) { - struct bio_vec bv = bio_iter_iovec(data->bio, data->iter); - unsigned int bytes = min3(bv.bv_len, (unsigned)total - done, - (unsigned)(PAGE_SIZE - pg_off)); - void *bv_buf = bvec_kmap_local(&bv); - void *pg_buf = kmap_local_page(data->pages[pg_idx]); - - if (dir == ITER_DEST) - memcpy(pg_buf + pg_off, bv_buf, bytes); - else - memcpy(bv_buf, pg_buf + pg_off, bytes); - - kunmap_local(pg_buf); - kunmap_local(bv_buf); - - /* advance page array */ - pg_off += bytes; - if (pg_off == PAGE_SIZE) { - pg_idx += 1; - pg_off = 0; - } - - done += bytes; - - /* advance bio */ - bio_advance_iter_single(data->bio, &data->iter, bytes); - if (!data->iter.bi_size) { - data->bio = data->bio->bi_next; - if (data->bio == NULL) - break; - data->iter = data->bio->bi_iter; - } - } -} - -static bool ublk_advance_io_iter(const struct request *req, - struct ublk_io_iter *iter, unsigned int offset) -{ - struct bio *bio = req->bio; - - for_each_bio(bio) { - if (bio->bi_iter.bi_size > offset) { - iter->bio = bio; - iter->iter = bio->bi_iter; - bio_advance_iter(iter->bio, &iter->iter, offset); - return true; - } - offset -= bio->bi_iter.bi_size; - } - return false; -} - /* * Copy data between request pages and io_iter, and 'offset' * is the start point of linear offset of request. @@ -988,34 +921,35 @@ static bool ublk_advance_io_iter(const struct request *req, static size_t ublk_copy_user_pages(const struct request *req, unsigned offset, struct iov_iter *uiter, int dir) { - struct ublk_io_iter iter; + struct req_iterator iter; + struct bio_vec bv; size_t done = 0; - if (!ublk_advance_io_iter(req, &iter, offset)) - return 0; + rq_for_each_segment(bv, req, iter) { + void *bv_buf; + size_t copied; - while (iov_iter_count(uiter) && iter.bio) { - unsigned nr_pages; - ssize_t len; - size_t off; - int i; - - len = iov_iter_get_pages2(uiter, iter.pages, - iov_iter_count(uiter), - UBLK_MAX_PIN_PAGES, &off); - if (len <= 0) - return done; - - ublk_copy_io_pages(&iter, len, off, dir); - nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE); - for (i = 0; i < nr_pages; i++) { - if (dir == ITER_DEST) - set_page_dirty(iter.pages[i]); - put_page(iter.pages[i]); + if (offset >= bv.bv_len) { + offset -= bv.bv_len; + continue; } - done += len; - } + bv.bv_offset += offset; + bv.bv_len -= offset; + bv_buf = bvec_kmap_local(&bv); + if (dir == ITER_DEST) + copied = copy_to_iter(bv_buf, bv.bv_len, uiter); + else + copied = copy_from_iter(bv_buf, bv.bv_len, uiter); + + kunmap_local(bv_buf); + + done += copied; + if (copied < bv.bv_len) + break; + + offset = 0; + } return done; } @@ -1030,8 +964,9 @@ static inline bool ublk_need_unmap_req(const struct request *req) (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN); } -static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, - const struct ublk_io *io) +static unsigned int ublk_map_io(const struct ublk_queue *ubq, + const struct request *req, + const struct ublk_io *io) { const unsigned int rq_bytes = blk_rq_bytes(req); @@ -1047,13 +982,13 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, struct iov_iter iter; const int dir = ITER_DEST; - import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter); + import_ubuf(dir, u64_to_user_ptr(io->buf.addr), rq_bytes, &iter); return ublk_copy_user_pages(req, 0, &iter, dir); } return rq_bytes; } -static int ublk_unmap_io(bool need_map, +static unsigned int ublk_unmap_io(bool need_map, const struct request *req, const struct ublk_io *io) { @@ -1068,7 +1003,7 @@ static int ublk_unmap_io(bool need_map, WARN_ON_ONCE(io->res > rq_bytes); - import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter); + import_ubuf(dir, u64_to_user_ptr(io->buf.addr), io->res, &iter); return ublk_copy_user_pages(req, 0, &iter, dir); } return rq_bytes; @@ -1134,7 +1069,7 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req) iod->op_flags = ublk_op | ublk_req_build_flags(req); iod->nr_sectors = blk_rq_sectors(req); iod->start_sector = blk_rq_pos(req); - iod->addr = io->addr; + iod->addr = io->buf.addr; return BLK_STS_OK; } @@ -1233,45 +1168,65 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq, } static void -ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, struct ublk_io *io) +ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, unsigned tag) { - unsigned tag = io - ubq->ios; struct ublksrv_io_desc *iod = ublk_get_iod(ubq, tag); iod->op_flags |= UBLK_IO_F_NEED_REG_BUF; } -static bool ublk_auto_buf_reg(const struct ublk_queue *ubq, struct request *req, - struct ublk_io *io, unsigned int issue_flags) +enum auto_buf_reg_res { + AUTO_BUF_REG_FAIL, + AUTO_BUF_REG_FALLBACK, + AUTO_BUF_REG_OK, +}; + +static void ublk_prep_auto_buf_reg_io(const struct ublk_queue *ubq, + struct request *req, struct ublk_io *io, + struct io_uring_cmd *cmd, + enum auto_buf_reg_res res) +{ + if (res == AUTO_BUF_REG_OK) { + io->task_registered_buffers = 1; + io->buf_ctx_handle = io_uring_cmd_ctx_handle(cmd); + io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG; + } + ublk_init_req_ref(ubq, io); + __ublk_prep_compl_io_cmd(io, req); +} + +static enum auto_buf_reg_res +__ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req, + struct ublk_io *io, struct io_uring_cmd *cmd, + unsigned int issue_flags) { int ret; - ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release, - io->buf.index, issue_flags); + ret = io_buffer_register_bvec(cmd, req, ublk_io_release, + io->buf.auto_reg.index, issue_flags); if (ret) { - if (io->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) { - ublk_auto_buf_reg_fallback(ubq, io); - return true; + if (io->buf.auto_reg.flags & UBLK_AUTO_BUF_REG_FALLBACK) { + ublk_auto_buf_reg_fallback(ubq, req->tag); + return AUTO_BUF_REG_FALLBACK; } blk_mq_end_request(req, BLK_STS_IOERR); - return false; + return AUTO_BUF_REG_FAIL; } - io->task_registered_buffers = 1; - io->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd); - io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG; - return true; + return AUTO_BUF_REG_OK; } -static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq, - struct request *req, struct ublk_io *io, - unsigned int issue_flags) +static void ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req, + struct ublk_io *io, struct io_uring_cmd *cmd, + unsigned int issue_flags) { - ublk_init_req_ref(ubq, io); - if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req)) - return ublk_auto_buf_reg(ubq, req, io, issue_flags); + enum auto_buf_reg_res res = __ublk_do_auto_buf_reg(ubq, req, io, cmd, + issue_flags); - return true; + if (res != AUTO_BUF_REG_FAIL) { + ublk_prep_auto_buf_reg_io(ubq, req, io, cmd, res); + io_uring_cmd_done(cmd, UBLK_IO_RES_OK, issue_flags); + } } static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req, @@ -1302,10 +1257,9 @@ static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req, return true; } -static void ublk_dispatch_req(struct ublk_queue *ubq, - struct request *req, - unsigned int issue_flags) +static void ublk_dispatch_req(struct ublk_queue *ubq, struct request *req) { + unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; int tag = req->tag; struct ublk_io *io = &ubq->ios[tag]; @@ -1344,17 +1298,21 @@ static void ublk_dispatch_req(struct ublk_queue *ubq, if (!ublk_start_io(ubq, req, io)) return; - if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags)) + if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req)) { + ublk_do_auto_buf_reg(ubq, req, io, io->cmd, issue_flags); + } else { + ublk_init_req_ref(ubq, io); ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags); + } } -static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd, - unsigned int issue_flags) +static void ublk_cmd_tw_cb(struct io_tw_req tw_req, io_tw_token_t tw) { + struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); struct ublk_queue *ubq = pdu->ubq; - ublk_dispatch_req(ubq, pdu->req, issue_flags); + ublk_dispatch_req(ubq, pdu->req); } static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq) @@ -1366,9 +1324,9 @@ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq) io_uring_cmd_complete_in_task(cmd, ublk_cmd_tw_cb); } -static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd, - unsigned int issue_flags) +static void ublk_cmd_list_tw_cb(struct io_tw_req tw_req, io_tw_token_t tw) { + struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); struct request *rq = pdu->req_list; struct request *next; @@ -1376,7 +1334,7 @@ static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd, do { next = rq->rq_next; rq->rq_next = NULL; - ublk_dispatch_req(rq->mq_hctx->driver_data, rq, issue_flags); + ublk_dispatch_req(rq->mq_hctx->driver_data, rq); rq = next; } while (rq); } @@ -1537,7 +1495,7 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq) */ io->flags &= UBLK_IO_FLAG_CANCELED; io->cmd = NULL; - io->addr = 0; + io->buf.addr = 0; /* * old task is PF_EXITING, put it now @@ -2098,13 +2056,16 @@ static inline int ublk_check_cmd_op(u32 cmd_op) static inline int ublk_set_auto_buf_reg(struct ublk_io *io, struct io_uring_cmd *cmd) { - io->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr)); + struct ublk_auto_buf_reg buf; - if (io->buf.reserved0 || io->buf.reserved1) + buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr)); + + if (buf.reserved0 || buf.reserved1) return -EINVAL; - if (io->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK) + if (buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK) return -EINVAL; + io->buf.auto_reg = buf; return 0; } @@ -2126,7 +2087,7 @@ static int ublk_handle_auto_buf_reg(struct ublk_io *io, * this ublk request gets stuck. */ if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd)) - *buf_idx = io->buf.index; + *buf_idx = io->buf.auto_reg.index; } return ublk_set_auto_buf_reg(io, cmd); @@ -2154,7 +2115,7 @@ ublk_config_io_buf(const struct ublk_device *ub, struct ublk_io *io, if (ublk_dev_support_auto_buf_reg(ub)) return ublk_handle_auto_buf_reg(io, cmd, buf_idx); - io->addr = buf_addr; + io->buf.addr = buf_addr; return 0; } @@ -2272,39 +2233,41 @@ static int ublk_check_fetch_buf(const struct ublk_device *ub, __u64 buf_addr) return 0; } -static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub, - struct ublk_io *io, __u64 buf_addr) +static int __ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub, + struct ublk_io *io) { - int ret = 0; - - /* - * When handling FETCH command for setting up ublk uring queue, - * ub->mutex is the innermost lock, and we won't block for handling - * FETCH, so it is fine even for IO_URING_F_NONBLOCK. - */ - mutex_lock(&ub->mutex); /* UBLK_IO_FETCH_REQ is only allowed before dev is setup */ - if (ublk_dev_ready(ub)) { - ret = -EBUSY; - goto out; - } + if (ublk_dev_ready(ub)) + return -EBUSY; /* allow each command to be FETCHed at most once */ - if (io->flags & UBLK_IO_FLAG_ACTIVE) { - ret = -EINVAL; - goto out; - } + if (io->flags & UBLK_IO_FLAG_ACTIVE) + return -EINVAL; WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV); ublk_fill_io_cmd(io, cmd); - ret = ublk_config_io_buf(ub, io, cmd, buf_addr, NULL); - if (ret) - goto out; WRITE_ONCE(io->task, get_task_struct(current)); ublk_mark_io_ready(ub); -out: + + return 0; +} + +static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub, + struct ublk_io *io, __u64 buf_addr) +{ + int ret; + + /* + * When handling FETCH command for setting up ublk uring queue, + * ub->mutex is the innermost lock, and we won't block for handling + * FETCH, so it is fine even for IO_URING_F_NONBLOCK. + */ + mutex_lock(&ub->mutex); + ret = __ublk_fetch(cmd, ub, io); + if (!ret) + ret = ublk_config_io_buf(ub, io, cmd, buf_addr, NULL); mutex_unlock(&ub->mutex); return ret; } @@ -2351,7 +2314,7 @@ static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io, */ io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA; /* update iod->addr because ublksrv may have passed a new io buffer */ - ublk_get_iod(ubq, req->tag)->addr = io->addr; + ublk_get_iod(ubq, req->tag)->addr = io->buf.addr; pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n", __func__, ubq->q_id, req->tag, io->flags, ublk_get_iod(ubq, req->tag)->addr); @@ -2367,7 +2330,7 @@ static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd, u16 buf_idx = UBLK_INVALID_BUF_IDX; struct ublk_device *ub = cmd->file->private_data; struct ublk_queue *ubq; - struct ublk_io *io; + struct ublk_io *io = NULL; u32 cmd_op = cmd->cmd_op; u16 q_id = READ_ONCE(ub_src->q_id); u16 tag = READ_ONCE(ub_src->tag); @@ -2488,7 +2451,7 @@ static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd, out: pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n", - __func__, cmd_op, tag, ret, io->flags); + __func__, cmd_op, tag, ret, io ? io->flags : 0); return ret; } @@ -2523,9 +2486,10 @@ fail_put: return NULL; } -static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd, - unsigned int issue_flags) +static void ublk_ch_uring_cmd_cb(struct io_tw_req tw_req, io_tw_token_t tw) { + unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; + struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); int ret = ublk_ch_uring_cmd_local(cmd, issue_flags); if (ret != -EIOCBQUEUED) @@ -2575,9 +2539,6 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb, size_t buf_off; u16 tag, q_id; - if (!ub) - return ERR_PTR(-EACCES); - if (!user_backed_iter(iter)) return ERR_PTR(-EACCES); @@ -2603,9 +2564,6 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb, if (!req) return ERR_PTR(-EINVAL); - if (!req->mq_hctx || !req->mq_hctx->driver_data) - goto fail; - if (!ublk_check_ubuf_dir(req, dir)) goto fail; @@ -2662,9 +2620,13 @@ static const struct file_operations ublk_ch_fops = { static void ublk_deinit_queue(struct ublk_device *ub, int q_id) { - int size = ublk_queue_cmd_buf_size(ub); - struct ublk_queue *ubq = ublk_get_queue(ub, q_id); - int i; + struct ublk_queue *ubq = ub->queues[q_id]; + int size, i; + + if (!ubq) + return; + + size = ublk_queue_cmd_buf_size(ub); for (i = 0; i < ubq->q_depth; i++) { struct ublk_io *io = &ubq->ios[i]; @@ -2676,57 +2638,76 @@ static void ublk_deinit_queue(struct ublk_device *ub, int q_id) if (ubq->io_cmd_buf) free_pages((unsigned long)ubq->io_cmd_buf, get_order(size)); + + kvfree(ubq); + ub->queues[q_id] = NULL; +} + +static int ublk_get_queue_numa_node(struct ublk_device *ub, int q_id) +{ + unsigned int cpu; + + /* Find first CPU mapped to this queue */ + for_each_possible_cpu(cpu) { + if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[cpu] == q_id) + return cpu_to_node(cpu); + } + + return NUMA_NO_NODE; } static int ublk_init_queue(struct ublk_device *ub, int q_id) { - struct ublk_queue *ubq = ublk_get_queue(ub, q_id); + int depth = ub->dev_info.queue_depth; gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; - void *ptr; + struct ublk_queue *ubq; + struct page *page; + int numa_node; int size; + /* Determine NUMA node based on queue's CPU affinity */ + numa_node = ublk_get_queue_numa_node(ub, q_id); + + /* Allocate queue structure on local NUMA node */ + ubq = kvzalloc_node(struct_size(ubq, ios, depth), GFP_KERNEL, + numa_node); + if (!ubq) + return -ENOMEM; + spin_lock_init(&ubq->cancel_lock); ubq->flags = ub->dev_info.flags; ubq->q_id = q_id; - ubq->q_depth = ub->dev_info.queue_depth; + ubq->q_depth = depth; size = ublk_queue_cmd_buf_size(ub); - ptr = (void *) __get_free_pages(gfp_flags, get_order(size)); - if (!ptr) + /* Allocate I/O command buffer on local NUMA node */ + page = alloc_pages_node(numa_node, gfp_flags, get_order(size)); + if (!page) { + kvfree(ubq); return -ENOMEM; + } + ubq->io_cmd_buf = page_address(page); - ubq->io_cmd_buf = ptr; + ub->queues[q_id] = ubq; ubq->dev = ub; return 0; } static void ublk_deinit_queues(struct ublk_device *ub) { - int nr_queues = ub->dev_info.nr_hw_queues; int i; - if (!ub->__queues) - return; - - for (i = 0; i < nr_queues; i++) + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) ublk_deinit_queue(ub, i); - kvfree(ub->__queues); } static int ublk_init_queues(struct ublk_device *ub) { - int nr_queues = ub->dev_info.nr_hw_queues; - int depth = ub->dev_info.queue_depth; - int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io); - int i, ret = -ENOMEM; + int i, ret; - ub->queue_size = ubq_size; - ub->__queues = kvcalloc(nr_queues, ubq_size, GFP_KERNEL); - if (!ub->__queues) - return ret; - - for (i = 0; i < nr_queues; i++) { - if (ublk_init_queue(ub, i)) + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { + ret = ublk_init_queue(ub, i); + if (ret) goto fail; } @@ -3128,7 +3109,7 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header) goto out_unlock; ret = -ENOMEM; - ub = kzalloc(sizeof(*ub), GFP_KERNEL); + ub = kzalloc(struct_size(ub, queues, info.nr_hw_queues), GFP_KERNEL); if (!ub) goto out_unlock; mutex_init(&ub->mutex); @@ -3178,17 +3159,17 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header) ub->dev_info.nr_hw_queues, nr_cpu_ids); ublk_align_max_io_size(ub); - ret = ublk_init_queues(ub); + ret = ublk_add_tag_set(ub); if (ret) goto out_free_dev_number; - ret = ublk_add_tag_set(ub); + ret = ublk_init_queues(ub); if (ret) - goto out_deinit_queues; + goto out_free_tag_set; ret = -EFAULT; if (copy_to_user(argp, &ub->dev_info, sizeof(info))) - goto out_free_tag_set; + goto out_deinit_queues; /* * Add the char dev so that ublksrv daemon can be setup. @@ -3197,10 +3178,10 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header) ret = ublk_add_chdev(ub); goto out_unlock; -out_free_tag_set: - blk_mq_free_tag_set(&ub->tag_set); out_deinit_queues: ublk_deinit_queues(ub); +out_free_tag_set: + blk_mq_free_tag_set(&ub->tag_set); out_free_dev_number: ublk_free_dev_number(ub); out_free_ub: |