summaryrefslogtreecommitdiff
path: root/drivers/net/wireguard/send.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2020-03-04 18:28:57 +0000
committerMark Brown <broonie@kernel.org>2020-03-04 18:28:57 +0000
commitcb71d8efd74c588fc68cce2180a4861091e8fe8a (patch)
tree2d7591ae4fe588e1a14e4d2d6101a54f5f27db40 /drivers/net/wireguard/send.c
parent4709d86ca3c8f845ff653690b0a97ad19dc5ba18 (diff)
parent50b62071deab48c1a69c471f9a7d0c8ff9ef23eb (diff)
Merge series "Compatible string consolidation for NXP DSPI driver" from Vladimir Oltean <olteanv@gmail.com>:
This series makes room in the driver for differentiation between the controllers which currently operate in TCFQ mode. Most of these are actually capable of a lot more in terms of throughput. This is in preparation of a second series which will convert the remaining users of TCFQ mode altogether to XSPI mode with command cycling. Vladimir Oltean (6): doc: spi-fsl-dspi: Add specific compatibles for all Layerscape SoCs spi: spi-fsl-dspi: Use specific compatible strings for all SoC instantiations spi: spi-fsl-dspi: Parameterize the FIFO size and DMA buffer size spi: spi-fsl-dspi: LS2080A and LX2160A support XSPI mode spi: spi-fsl-dspi: Support SPI software timestamping in all non-DMA modes spi: spi-fsl-dspi: Convert the instantiations that support it to DMA .../devicetree/bindings/spi/spi-fsl-dspi.txt | 17 +- drivers/spi/spi-fsl-dspi.c | 162 +++++++++++++----- 2 files changed, 128 insertions(+), 51 deletions(-) -- 2.17.1
Diffstat (limited to 'drivers/net/wireguard/send.c')
-rw-r--r--drivers/net/wireguard/send.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index c13260563446..7348c10cbae3 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -143,16 +143,22 @@ static void keep_key_fresh(struct wg_peer *peer)
static unsigned int calculate_skb_padding(struct sk_buff *skb)
{
+ unsigned int padded_size, last_unit = skb->len;
+
+ if (unlikely(!PACKET_CB(skb)->mtu))
+ return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit;
+
/* We do this modulo business with the MTU, just in case the networking
* layer gives us a packet that's bigger than the MTU. In that case, we
* wouldn't want the final subtraction to overflow in the case of the
- * padded_size being clamped.
+ * padded_size being clamped. Fortunately, that's very rarely the case,
+ * so we optimize for that not happening.
*/
- unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu;
- unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE);
+ if (unlikely(last_unit > PACKET_CB(skb)->mtu))
+ last_unit %= PACKET_CB(skb)->mtu;
- if (padded_size > PACKET_CB(skb)->mtu)
- padded_size = PACKET_CB(skb)->mtu;
+ padded_size = min(PACKET_CB(skb)->mtu,
+ ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE));
return padded_size - last_unit;
}