summaryrefslogtreecommitdiff
path: root/net/sunrpc/svc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-05-28 12:21:12 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-05-28 12:21:12 -0700
commit2c26b68cd5c51689b8cee9cb6a21abb5d2ab2d0f (patch)
treebecf130707976d0104b33e66666aae0bf5de9a4d /net/sunrpc/svc.c
parentd87d73895fcdbe6e45813efc473544433862364f (diff)
parent425364dc49f050b6008b43408aa96d42105a9c1d (diff)
Merge tag 'nfsd-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux
Pull nfsd updates from Chuck Lever: "The marquee feature for this release is that the limit on the maximum rsize and wsize has been raised to 4MB. The default remains at 1MB, but risk-seeking administrators now have the ability to try larger I/O sizes with NFS clients that support them. Eventually the default setting will be increased when we have confidence that this change will not have negative impact. With v6.16, NFSD now has its own debugfs file system where we can add experimental features and make them available outside of our development community without impacting production deployments. The first experimental setting added is one that makes all NFS READ operations use vfs_iter_read() instead of the NFSD splice actor. The plan is to eventually retire the splice actor, as that will enable a number of new capabilities such as the use of struct bio_vec from the top to the bottom of the NFSD stack. Jeff Layton contributed a number of observability improvements. The use of dprintk() in a number of high-traffic code paths has been replaced with static trace points. This release sees the continuation of efforts to harden the NFSv4.2 COPY operation. Soon, the restriction on async COPY operations can be lifted. Many thanks to the contributors, reviewers, testers, and bug reporters who participated during the v6.16 development cycle" * tag 'nfsd-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux: (60 commits) xdrgen: Fix code generated for counted arrays SUNRPC: Bump the maximum payload size for the server NFSD: Add a "default" block size NFSD: Remove NFSSVC_MAXBLKSIZE_V2 macro NFSD: Remove NFSD_BUFSIZE sunrpc: Remove the RPCSVC_MAXPAGES macro svcrdma: Adjust the number of entries in svc_rdma_send_ctxt::sc_pages svcrdma: Adjust the number of entries in svc_rdma_recv_ctxt::rc_pages sunrpc: Adjust size of socket's receive page array dynamically SUNRPC: Remove svc_rqst :: rq_vec SUNRPC: Remove svc_fill_write_vector() NFSD: Use rqstp->rq_bvec in nfsd_iter_write() SUNRPC: Export xdr_buf_to_bvec() NFSD: De-duplicate the svc_fill_write_vector() call sites NFSD: Use rqstp->rq_bvec in nfsd_iter_read() sunrpc: Replace the rq_bvec array with dynamically-allocated memory sunrpc: Replace the rq_pages array with dynamically-allocated memory sunrpc: Remove backchannel check in svc_init_buffer() sunrpc: Add a helper to derive maxpages from sv_max_mesg svcrdma: Reduce the number of rdma_rw contexts per-QP ...
Diffstat (limited to 'net/sunrpc/svc.c')
-rw-r--r--net/sunrpc/svc.c80
1 files changed, 23 insertions, 57 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index e7f9c295d13c..939b6239df8a 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -636,24 +636,22 @@ svc_destroy(struct svc_serv **servp)
EXPORT_SYMBOL_GPL(svc_destroy);
static bool
-svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
+svc_init_buffer(struct svc_rqst *rqstp, const struct svc_serv *serv, int node)
{
- unsigned long pages, ret;
+ unsigned long ret;
- /* bc_xprt uses fore channel allocated buffers */
- if (svc_is_backchannel(rqstp))
- return true;
+ rqstp->rq_maxpages = svc_serv_maxpages(serv);
- pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
- * We assume one is at most one page
- */
- WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
- if (pages > RPCSVC_MAXPAGES)
- pages = RPCSVC_MAXPAGES;
+ /* rq_pages' last entry is NULL for historical reasons. */
+ rqstp->rq_pages = kcalloc_node(rqstp->rq_maxpages + 1,
+ sizeof(struct page *),
+ GFP_KERNEL, node);
+ if (!rqstp->rq_pages)
+ return false;
- ret = alloc_pages_bulk_node(GFP_KERNEL, node, pages,
+ ret = alloc_pages_bulk_node(GFP_KERNEL, node, rqstp->rq_maxpages,
rqstp->rq_pages);
- return ret == pages;
+ return ret == rqstp->rq_maxpages;
}
/*
@@ -662,17 +660,19 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
static void
svc_release_buffer(struct svc_rqst *rqstp)
{
- unsigned int i;
+ unsigned long i;
- for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
+ for (i = 0; i < rqstp->rq_maxpages; i++)
if (rqstp->rq_pages[i])
put_page(rqstp->rq_pages[i]);
+ kfree(rqstp->rq_pages);
}
static void
svc_rqst_free(struct svc_rqst *rqstp)
{
folio_batch_release(&rqstp->rq_fbatch);
+ kfree(rqstp->rq_bvec);
svc_release_buffer(rqstp);
if (rqstp->rq_scratch_page)
put_page(rqstp->rq_scratch_page);
@@ -708,7 +708,13 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
if (!rqstp->rq_resp)
goto out_enomem;
- if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
+ if (!svc_init_buffer(rqstp, serv, node))
+ goto out_enomem;
+
+ rqstp->rq_bvec = kcalloc_node(rqstp->rq_maxpages,
+ sizeof(struct bio_vec),
+ GFP_KERNEL, node);
+ if (!rqstp->rq_bvec)
goto out_enomem;
rqstp->rq_err = -EAGAIN; /* No error yet */
@@ -900,7 +906,7 @@ EXPORT_SYMBOL_GPL(svc_set_num_threads);
bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
{
struct page **begin = rqstp->rq_pages;
- struct page **end = &rqstp->rq_pages[RPCSVC_MAXPAGES];
+ struct page **end = &rqstp->rq_pages[rqstp->rq_maxpages];
if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) {
trace_svc_replace_page_err(rqstp);
@@ -1714,46 +1720,6 @@ int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset,
EXPORT_SYMBOL_GPL(svc_encode_result_payload);
/**
- * svc_fill_write_vector - Construct data argument for VFS write call
- * @rqstp: svc_rqst to operate on
- * @payload: xdr_buf containing only the write data payload
- *
- * Fills in rqstp::rq_vec, and returns the number of elements.
- */
-unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
- struct xdr_buf *payload)
-{
- struct page **pages = payload->pages;
- struct kvec *first = payload->head;
- struct kvec *vec = rqstp->rq_vec;
- size_t total = payload->len;
- unsigned int i;
-
- /* Some types of transport can present the write payload
- * entirely in rq_arg.pages. In this case, @first is empty.
- */
- i = 0;
- if (first->iov_len) {
- vec[i].iov_base = first->iov_base;
- vec[i].iov_len = min_t(size_t, total, first->iov_len);
- total -= vec[i].iov_len;
- ++i;
- }
-
- while (total) {
- vec[i].iov_base = page_address(*pages);
- vec[i].iov_len = min_t(size_t, total, PAGE_SIZE);
- total -= vec[i].iov_len;
- ++i;
- ++pages;
- }
-
- WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec));
- return i;
-}
-EXPORT_SYMBOL_GPL(svc_fill_write_vector);
-
-/**
* svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call
* @rqstp: svc_rqst to operate on
* @first: buffer containing first section of pathname