summaryrefslogtreecommitdiff
path: root/fs/fuse/dev_uring.c
diff options
context:
space:
mode:
authorBernd Schubert <bschubert@ddn.com>2025-01-20 02:29:07 +0100
committerMiklos Szeredi <mszeredi@redhat.com>2025-01-27 18:01:22 +0100
commit857b0263f30eebe13ab4b6a65156a0d6c8fc2210 (patch)
tree2e3dc8ceb5862a520a51fb0631df25923926c5be /fs/fuse/dev_uring.c
parentc2c9af9a0b13261c36909036057a116f2edb5e1a (diff)
fuse: Allow to queue bg requests through io-uring
This prepares queueing and sending background requests through io-uring. Signed-off-by: Bernd Schubert <bschubert@ddn.com> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> # io_uring Reviewed-by: Luis Henriques <luis@igalia.com> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
Diffstat (limited to 'fs/fuse/dev_uring.c')
-rw-r--r--fs/fuse/dev_uring.c99
1 files changed, 99 insertions, 0 deletions
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index 728000434589..27bc103c17c8 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -47,10 +47,53 @@ static struct fuse_ring_ent *uring_cmd_to_ring_ent(struct io_uring_cmd *cmd)
return pdu->ent;
}
+static void fuse_uring_flush_bg(struct fuse_ring_queue *queue)
+{
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+
+ lockdep_assert_held(&queue->lock);
+ lockdep_assert_held(&fc->bg_lock);
+
+ /*
+ * Allow one bg request per queue, ignoring global fc limits.
+ * This prevents a single queue from consuming all resources and
+ * eliminates the need for remote queue wake-ups when global
+ * limits are met but this queue has no more waiting requests.
+ */
+ while ((fc->active_background < fc->max_background ||
+ !queue->active_background) &&
+ (!list_empty(&queue->fuse_req_bg_queue))) {
+ struct fuse_req *req;
+
+ req = list_first_entry(&queue->fuse_req_bg_queue,
+ struct fuse_req, list);
+ fc->active_background++;
+ queue->active_background++;
+
+ list_move_tail(&req->list, &queue->fuse_req_queue);
+ }
+}
+
static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
int error)
{
+ struct fuse_ring_queue *queue = ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+
+ lockdep_assert_not_held(&queue->lock);
+ spin_lock(&queue->lock);
ent->fuse_req = NULL;
+ if (test_bit(FR_BACKGROUND, &req->flags)) {
+ queue->active_background--;
+ spin_lock(&fc->bg_lock);
+ fuse_uring_flush_bg(queue);
+ spin_unlock(&fc->bg_lock);
+ }
+
+ spin_unlock(&queue->lock);
+
if (error)
req->out.h.error = error;
@@ -78,6 +121,7 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring)
{
int qid;
struct fuse_ring_queue *queue;
+ struct fuse_conn *fc = ring->fc;
for (qid = 0; qid < ring->nr_queues; qid++) {
queue = READ_ONCE(ring->queues[qid]);
@@ -85,6 +129,13 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring)
continue;
queue->stopped = true;
+
+ WARN_ON_ONCE(ring->fc->max_background != UINT_MAX);
+ spin_lock(&queue->lock);
+ spin_lock(&fc->bg_lock);
+ fuse_uring_flush_bg(queue);
+ spin_unlock(&fc->bg_lock);
+ spin_unlock(&queue->lock);
fuse_uring_abort_end_queue_requests(queue);
}
}
@@ -190,6 +241,7 @@ static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring,
INIT_LIST_HEAD(&queue->ent_w_req_queue);
INIT_LIST_HEAD(&queue->ent_in_userspace);
INIT_LIST_HEAD(&queue->fuse_req_queue);
+ INIT_LIST_HEAD(&queue->fuse_req_bg_queue);
queue->fpq.processing = pq;
fuse_pqueue_init(&queue->fpq);
@@ -1141,6 +1193,53 @@ err:
fuse_request_end(req);
}
+bool fuse_uring_queue_bq_req(struct fuse_req *req)
+{
+ struct fuse_conn *fc = req->fm->fc;
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ struct fuse_ring_ent *ent = NULL;
+
+ queue = fuse_uring_task_to_queue(ring);
+ if (!queue)
+ return false;
+
+ spin_lock(&queue->lock);
+ if (unlikely(queue->stopped)) {
+ spin_unlock(&queue->lock);
+ return false;
+ }
+
+ list_add_tail(&req->list, &queue->fuse_req_bg_queue);
+
+ ent = list_first_entry_or_null(&queue->ent_avail_queue,
+ struct fuse_ring_ent, list);
+ spin_lock(&fc->bg_lock);
+ fc->num_background++;
+ if (fc->num_background == fc->max_background)
+ fc->blocked = 1;
+ fuse_uring_flush_bg(queue);
+ spin_unlock(&fc->bg_lock);
+
+ /*
+ * Due to bg_queue flush limits there might be other bg requests
+ * in the queue that need to be handled first. Or no further req
+ * might be available.
+ */
+ req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req,
+ list);
+ if (ent && req) {
+ fuse_uring_add_req_to_ring_ent(ent, req);
+ spin_unlock(&queue->lock);
+
+ fuse_uring_dispatch_ent(ent);
+ } else {
+ spin_unlock(&queue->lock);
+ }
+
+ return true;
+}
+
static const struct fuse_iqueue_ops fuse_io_uring_ops = {
/* should be send over io-uring as enhancement */
.send_forget = fuse_dev_queue_forget,