2018-04-30 15:16:16 +08:00
|
|
|
/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <crypto/aead.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <net/dst.h>
|
|
|
|
#include <net/inet_connection_sock.h>
|
|
|
|
#include <net/tcp.h>
|
|
|
|
#include <net/tls.h>
|
|
|
|
|
2022-07-08 09:03:13 +08:00
|
|
|
#include "tls.h"
|
2019-10-05 07:19:22 +08:00
|
|
|
#include "trace.h"
|
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
/* device_offload_lock is used to synchronize tls_dev_add
|
|
|
|
* against NETDEV_DOWN notifications.
|
|
|
|
*/
|
|
|
|
static DECLARE_RWSEM(device_offload_lock);
|
|
|
|
|
2022-07-27 17:43:42 +08:00
|
|
|
static struct workqueue_struct *destruct_wq __read_mostly;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
static LIST_HEAD(tls_device_list);
|
net/tls: Fix use-after-free after the TLS device goes down and up
When a netdev with active TLS offload goes down, tls_device_down is
called to stop the offload and tear down the TLS context. However, the
socket stays alive, and it still points to the TLS context, which is now
deallocated. If a netdev goes up, while the connection is still active,
and the data flow resumes after a number of TCP retransmissions, it will
lead to a use-after-free of the TLS context.
This commit addresses this bug by keeping the context alive until its
normal destruction, and implements the necessary fallbacks, so that the
connection can resume in software (non-offloaded) kTLS mode.
On the TX side tls_sw_fallback is used to encrypt all packets. The RX
side already has all the necessary fallbacks, because receiving
non-decrypted packets is supported. The thing needed on the RX side is
to block resync requests, which are normally produced after receiving
non-decrypted packets.
The necessary synchronization is implemented for a graceful teardown:
first the fallbacks are deployed, then the driver resources are released
(it used to be possible to have a tls_dev_resync after tls_dev_del).
A new flag called TLS_RX_DEV_DEGRADED is added to indicate the fallback
mode. It's used to skip the RX resync logic completely, as it becomes
useless, and some objects may be released (for example, resync_async,
which is allocated and freed by the driver).
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-06-01 20:08:00 +08:00
|
|
|
static LIST_HEAD(tls_device_down_list);
|
2018-04-30 15:16:16 +08:00
|
|
|
static DEFINE_SPINLOCK(tls_device_lock);
|
|
|
|
|
2023-08-05 06:59:51 +08:00
|
|
|
static struct page *dummy_page;
|
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
static void tls_device_free_ctx(struct tls_context *ctx)
|
|
|
|
{
|
2019-04-11 02:04:30 +08:00
|
|
|
if (ctx->tx_conf == TLS_HW) {
|
2018-07-13 19:33:43 +08:00
|
|
|
kfree(tls_offload_ctx_tx(ctx));
|
2019-04-11 02:04:30 +08:00
|
|
|
kfree(ctx->tx.rec_seq);
|
|
|
|
kfree(ctx->tx.iv);
|
|
|
|
}
|
2018-07-13 19:33:43 +08:00
|
|
|
|
|
|
|
if (ctx->rx_conf == TLS_HW)
|
|
|
|
kfree(tls_offload_ctx_rx(ctx));
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2019-08-30 18:25:47 +08:00
|
|
|
tls_ctx_free(NULL, ctx);
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
|
|
|
|
2022-07-27 17:43:42 +08:00
|
|
|
static void tls_device_tx_del_task(struct work_struct *work)
|
2018-04-30 15:16:16 +08:00
|
|
|
{
|
2022-07-27 17:43:42 +08:00
|
|
|
struct tls_offload_context_tx *offload_ctx =
|
|
|
|
container_of(work, struct tls_offload_context_tx, destruct_work);
|
|
|
|
struct tls_context *ctx = offload_ctx->ctx;
|
net/tls: Use RCU API to access tls_ctx->netdev
Currently, tls_device_down synchronizes with tls_device_resync_rx using
RCU, however, the pointer to netdev is stored using WRITE_ONCE and
loaded using READ_ONCE.
Although such approach is technically correct (rcu_dereference is
essentially a READ_ONCE, and rcu_assign_pointer uses WRITE_ONCE to store
NULL), using special RCU helpers for pointers is more valid, as it
includes additional checks and might change the implementation
transparently to the callers.
Mark the netdev pointer as __rcu and use the correct RCU helpers to
access it. For non-concurrent access pass the right conditions that
guarantee safe access (locks taken, refcount value). Also use the
correct helper in mlx5e, where even READ_ONCE was missing.
The transition to RCU exposes existing issues, fixed by this commit:
1. bond_tls_device_xmit could read netdev twice, and it could become
NULL the second time, after the NULL check passed.
2. Drivers shouldn't stop processing the last packet if tls_device_down
just set netdev to NULL, before tls_dev_del was called. This prevents a
possible packet drop when transitioning to the fallback software mode.
Fixes: 89df6a810470 ("net/bonding: Implement TLS TX device offload")
Fixes: c55dcdd435aa ("net/tls: Fix use-after-free after the TLS device goes down and up")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Link: https://lore.kernel.org/r/20220810081602.1435800-1-maximmi@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-10 16:16:02 +08:00
|
|
|
struct net_device *netdev;
|
|
|
|
|
|
|
|
/* Safe, because this is the destroy flow, refcount is 0, so
|
|
|
|
* tls_device_down can't store this field in parallel.
|
|
|
|
*/
|
|
|
|
netdev = rcu_dereference_protected(ctx->netdev,
|
|
|
|
!refcount_read(&ctx->refcount));
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2022-07-27 17:43:42 +08:00
|
|
|
netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
|
|
|
|
dev_put(netdev);
|
|
|
|
ctx->netdev = NULL;
|
|
|
|
tls_device_free_ctx(ctx);
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
|
|
|
|
{
|
net/tls: Use RCU API to access tls_ctx->netdev
Currently, tls_device_down synchronizes with tls_device_resync_rx using
RCU, however, the pointer to netdev is stored using WRITE_ONCE and
loaded using READ_ONCE.
Although such approach is technically correct (rcu_dereference is
essentially a READ_ONCE, and rcu_assign_pointer uses WRITE_ONCE to store
NULL), using special RCU helpers for pointers is more valid, as it
includes additional checks and might change the implementation
transparently to the callers.
Mark the netdev pointer as __rcu and use the correct RCU helpers to
access it. For non-concurrent access pass the right conditions that
guarantee safe access (locks taken, refcount value). Also use the
correct helper in mlx5e, where even READ_ONCE was missing.
The transition to RCU exposes existing issues, fixed by this commit:
1. bond_tls_device_xmit could read netdev twice, and it could become
NULL the second time, after the NULL check passed.
2. Drivers shouldn't stop processing the last packet if tls_device_down
just set netdev to NULL, before tls_dev_del was called. This prevents a
possible packet drop when transitioning to the fallback software mode.
Fixes: 89df6a810470 ("net/bonding: Implement TLS TX device offload")
Fixes: c55dcdd435aa ("net/tls: Fix use-after-free after the TLS device goes down and up")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Link: https://lore.kernel.org/r/20220810081602.1435800-1-maximmi@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-10 16:16:02 +08:00
|
|
|
struct net_device *netdev;
|
2018-04-30 15:16:16 +08:00
|
|
|
unsigned long flags;
|
2022-07-27 17:43:41 +08:00
|
|
|
bool async_cleanup;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&tls_device_lock, flags);
|
2022-07-27 17:43:41 +08:00
|
|
|
if (unlikely(!refcount_dec_and_test(&ctx->refcount))) {
|
|
|
|
spin_unlock_irqrestore(&tls_device_lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
net/tls: Fix race in TLS device down flow
Socket destruction flow and tls_device_down function sync against each
other using tls_device_lock and the context refcount, to guarantee the
device resources are freed via tls_dev_del() by the end of
tls_device_down.
In the following unfortunate flow, this won't happen:
- refcount is decreased to zero in tls_device_sk_destruct.
- tls_device_down starts, skips the context as refcount is zero, going
all the way until it flushes the gc work, and returns without freeing
the device resources.
- only then, tls_device_queue_ctx_destruction is called, queues the gc
work and frees the context's device resources.
Solve it by decreasing the refcount in the socket's destruction flow
under the tls_device_lock, for perfect synchronization. This does not
slow down the common likely destructor flow, in which both the refcount
is decreased and the spinlock is acquired, anyway.
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Reviewed-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2022-07-15 16:42:16 +08:00
|
|
|
|
2022-07-27 17:43:42 +08:00
|
|
|
list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
|
net/tls: Use RCU API to access tls_ctx->netdev
Currently, tls_device_down synchronizes with tls_device_resync_rx using
RCU, however, the pointer to netdev is stored using WRITE_ONCE and
loaded using READ_ONCE.
Although such approach is technically correct (rcu_dereference is
essentially a READ_ONCE, and rcu_assign_pointer uses WRITE_ONCE to store
NULL), using special RCU helpers for pointers is more valid, as it
includes additional checks and might change the implementation
transparently to the callers.
Mark the netdev pointer as __rcu and use the correct RCU helpers to
access it. For non-concurrent access pass the right conditions that
guarantee safe access (locks taken, refcount value). Also use the
correct helper in mlx5e, where even READ_ONCE was missing.
The transition to RCU exposes existing issues, fixed by this commit:
1. bond_tls_device_xmit could read netdev twice, and it could become
NULL the second time, after the NULL check passed.
2. Drivers shouldn't stop processing the last packet if tls_device_down
just set netdev to NULL, before tls_dev_del was called. This prevents a
possible packet drop when transitioning to the fallback software mode.
Fixes: 89df6a810470 ("net/bonding: Implement TLS TX device offload")
Fixes: c55dcdd435aa ("net/tls: Fix use-after-free after the TLS device goes down and up")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Link: https://lore.kernel.org/r/20220810081602.1435800-1-maximmi@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-10 16:16:02 +08:00
|
|
|
|
|
|
|
/* Safe, because this is the destroy flow, refcount is 0, so
|
|
|
|
* tls_device_down can't store this field in parallel.
|
|
|
|
*/
|
|
|
|
netdev = rcu_dereference_protected(ctx->netdev,
|
|
|
|
!refcount_read(&ctx->refcount));
|
|
|
|
|
|
|
|
async_cleanup = netdev && ctx->tx_conf == TLS_HW;
|
2022-07-27 17:43:41 +08:00
|
|
|
if (async_cleanup) {
|
2022-07-27 17:43:42 +08:00
|
|
|
struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2022-07-27 17:43:42 +08:00
|
|
|
/* queue_work inside the spinlock
|
2022-07-27 17:43:41 +08:00
|
|
|
* to make sure tls_device_down waits for that work.
|
|
|
|
*/
|
2022-07-27 17:43:42 +08:00
|
|
|
queue_work(destruct_wq, &offload_ctx->destruct_work);
|
2022-07-27 17:43:41 +08:00
|
|
|
}
|
2018-04-30 15:16:16 +08:00
|
|
|
spin_unlock_irqrestore(&tls_device_lock, flags);
|
2022-07-27 17:43:41 +08:00
|
|
|
|
|
|
|
if (!async_cleanup)
|
|
|
|
tls_device_free_ctx(ctx);
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We assume that the socket is already connected */
|
|
|
|
static struct net_device *get_netdev_for_sock(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct dst_entry *dst = sk_dst_get(sk);
|
|
|
|
struct net_device *netdev = NULL;
|
|
|
|
|
|
|
|
if (likely(dst)) {
|
2021-01-17 22:59:48 +08:00
|
|
|
netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
|
2018-04-30 15:16:16 +08:00
|
|
|
dev_hold(netdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
dst_release(dst);
|
|
|
|
|
|
|
|
return netdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void destroy_record(struct tls_record_info *record)
|
|
|
|
{
|
2019-09-07 13:29:57 +08:00
|
|
|
int i;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2019-09-07 13:29:57 +08:00
|
|
|
for (i = 0; i < record->num_frags; i++)
|
2021-06-08 03:02:37 +08:00
|
|
|
__skb_frag_unref(&record->frags[i], false);
|
2018-04-30 15:16:16 +08:00
|
|
|
kfree(record);
|
|
|
|
}
|
|
|
|
|
2018-07-13 19:33:39 +08:00
|
|
|
static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
|
2018-04-30 15:16:16 +08:00
|
|
|
{
|
|
|
|
struct tls_record_info *info, *temp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
|
|
|
|
list_del(&info->list);
|
|
|
|
destroy_record(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
offload_ctx->retransmit_hint = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
|
|
|
|
{
|
|
|
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
|
|
|
struct tls_record_info *info, *temp;
|
2018-07-13 19:33:39 +08:00
|
|
|
struct tls_offload_context_tx *ctx;
|
2018-04-30 15:16:16 +08:00
|
|
|
u64 deleted_records = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!tls_ctx)
|
|
|
|
return;
|
|
|
|
|
2018-07-13 19:33:39 +08:00
|
|
|
ctx = tls_offload_ctx_tx(tls_ctx);
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&ctx->lock, flags);
|
|
|
|
info = ctx->retransmit_hint;
|
2019-09-03 12:31:06 +08:00
|
|
|
if (info && !before(acked_seq, info->end_seq))
|
2018-04-30 15:16:16 +08:00
|
|
|
ctx->retransmit_hint = NULL;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
|
|
|
|
if (before(acked_seq, info->end_seq))
|
|
|
|
break;
|
|
|
|
list_del(&info->list);
|
|
|
|
|
|
|
|
destroy_record(info);
|
|
|
|
deleted_records++;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->unacked_record_sn += deleted_records;
|
|
|
|
spin_unlock_irqrestore(&ctx->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* At this point, there should be no references on this
|
|
|
|
* socket and no in-flight SKBs associated with this
|
|
|
|
* socket, so it is safe to free all the resources.
|
|
|
|
*/
|
2019-12-18 06:12:01 +08:00
|
|
|
void tls_device_sk_destruct(struct sock *sk)
|
2018-04-30 15:16:16 +08:00
|
|
|
{
|
|
|
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
2018-07-13 19:33:39 +08:00
|
|
|
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2018-07-13 19:33:43 +08:00
|
|
|
tls_ctx->sk_destruct(sk);
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2018-07-13 19:33:43 +08:00
|
|
|
if (tls_ctx->tx_conf == TLS_HW) {
|
|
|
|
if (ctx->open_record)
|
|
|
|
destroy_record(ctx->open_record);
|
|
|
|
delete_all_records(ctx);
|
|
|
|
crypto_free_aead(ctx->aead_send);
|
|
|
|
clean_acked_data_disable(inet_csk(sk));
|
|
|
|
}
|
2018-04-30 15:16:16 +08:00
|
|
|
|
net/tls: Fix race in TLS device down flow
Socket destruction flow and tls_device_down function sync against each
other using tls_device_lock and the context refcount, to guarantee the
device resources are freed via tls_dev_del() by the end of
tls_device_down.
In the following unfortunate flow, this won't happen:
- refcount is decreased to zero in tls_device_sk_destruct.
- tls_device_down starts, skips the context as refcount is zero, going
all the way until it flushes the gc work, and returns without freeing
the device resources.
- only then, tls_device_queue_ctx_destruction is called, queues the gc
work and frees the context's device resources.
Solve it by decreasing the refcount in the socket's destruction flow
under the tls_device_lock, for perfect synchronization. This does not
slow down the common likely destructor flow, in which both the refcount
is decreased and the spinlock is acquired, anyway.
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Reviewed-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2022-07-15 16:42:16 +08:00
|
|
|
tls_device_queue_ctx_destruction(tls_ctx);
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
2019-12-18 06:12:01 +08:00
|
|
|
EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2019-04-11 02:04:31 +08:00
|
|
|
void tls_device_free_resources_tx(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
|
|
|
|
|
|
|
tls_free_partial_record(sk, tls_ctx);
|
|
|
|
}
|
|
|
|
|
2019-10-05 07:19:22 +08:00
|
|
|
void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
|
|
|
|
{
|
|
|
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
|
|
|
|
|
|
|
trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
|
|
|
|
WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
|
|
|
|
|
2019-06-11 12:40:09 +08:00
|
|
|
static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
|
|
|
|
u32 seq)
|
|
|
|
{
|
|
|
|
struct net_device *netdev;
|
|
|
|
struct sk_buff *skb;
|
2019-07-09 10:53:13 +08:00
|
|
|
int err = 0;
|
2019-06-11 12:40:09 +08:00
|
|
|
u8 *rcd_sn;
|
|
|
|
|
|
|
|
skb = tcp_write_queue_tail(sk);
|
|
|
|
if (skb)
|
|
|
|
TCP_SKB_CB(skb)->eor = 1;
|
|
|
|
|
|
|
|
rcd_sn = tls_ctx->tx.rec_seq;
|
|
|
|
|
2019-10-05 07:19:22 +08:00
|
|
|
trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
|
2019-06-11 12:40:09 +08:00
|
|
|
down_read(&device_offload_lock);
|
net/tls: Use RCU API to access tls_ctx->netdev
Currently, tls_device_down synchronizes with tls_device_resync_rx using
RCU, however, the pointer to netdev is stored using WRITE_ONCE and
loaded using READ_ONCE.
Although such approach is technically correct (rcu_dereference is
essentially a READ_ONCE, and rcu_assign_pointer uses WRITE_ONCE to store
NULL), using special RCU helpers for pointers is more valid, as it
includes additional checks and might change the implementation
transparently to the callers.
Mark the netdev pointer as __rcu and use the correct RCU helpers to
access it. For non-concurrent access pass the right conditions that
guarantee safe access (locks taken, refcount value). Also use the
correct helper in mlx5e, where even READ_ONCE was missing.
The transition to RCU exposes existing issues, fixed by this commit:
1. bond_tls_device_xmit could read netdev twice, and it could become
NULL the second time, after the NULL check passed.
2. Drivers shouldn't stop processing the last packet if tls_device_down
just set netdev to NULL, before tls_dev_del was called. This prevents a
possible packet drop when transitioning to the fallback software mode.
Fixes: 89df6a810470 ("net/bonding: Implement TLS TX device offload")
Fixes: c55dcdd435aa ("net/tls: Fix use-after-free after the TLS device goes down and up")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Link: https://lore.kernel.org/r/20220810081602.1435800-1-maximmi@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-10 16:16:02 +08:00
|
|
|
netdev = rcu_dereference_protected(tls_ctx->netdev,
|
|
|
|
lockdep_is_held(&device_offload_lock));
|
2019-06-11 12:40:09 +08:00
|
|
|
if (netdev)
|
2019-07-09 10:53:13 +08:00
|
|
|
err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
|
|
|
|
rcd_sn,
|
|
|
|
TLS_OFFLOAD_CTX_DIR_TX);
|
2019-06-11 12:40:09 +08:00
|
|
|
up_read(&device_offload_lock);
|
2019-07-09 10:53:13 +08:00
|
|
|
if (err)
|
|
|
|
return;
|
2019-06-11 12:40:09 +08:00
|
|
|
|
|
|
|
clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
|
|
|
|
}
|
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
static void tls_append_frag(struct tls_record_info *record,
|
|
|
|
struct page_frag *pfrag,
|
|
|
|
int size)
|
|
|
|
{
|
|
|
|
skb_frag_t *frag;
|
|
|
|
|
|
|
|
frag = &record->frags[record->num_frags - 1];
|
2019-07-23 11:08:26 +08:00
|
|
|
if (skb_frag_page(frag) == pfrag->page &&
|
2019-07-30 22:40:33 +08:00
|
|
|
skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
|
2019-07-23 11:08:26 +08:00
|
|
|
skb_frag_size_add(frag, size);
|
2018-04-30 15:16:16 +08:00
|
|
|
} else {
|
|
|
|
++frag;
|
2023-05-11 09:12:12 +08:00
|
|
|
skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
|
|
|
|
size);
|
2018-04-30 15:16:16 +08:00
|
|
|
++record->num_frags;
|
|
|
|
get_page(pfrag->page);
|
|
|
|
}
|
|
|
|
|
|
|
|
pfrag->offset += size;
|
|
|
|
record->len += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tls_push_record(struct sock *sk,
|
|
|
|
struct tls_context *ctx,
|
2018-07-13 19:33:39 +08:00
|
|
|
struct tls_offload_context_tx *offload_ctx,
|
2018-04-30 15:16:16 +08:00
|
|
|
struct tls_record_info *record,
|
2019-09-07 13:29:59 +08:00
|
|
|
int flags)
|
2018-04-30 15:16:16 +08:00
|
|
|
{
|
2019-02-14 15:11:35 +08:00
|
|
|
struct tls_prot_info *prot = &ctx->prot_info;
|
2018-04-30 15:16:16 +08:00
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
skb_frag_t *frag;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
record->end_seq = tp->write_seq + record->len;
|
2019-09-07 13:29:58 +08:00
|
|
|
list_add_tail_rcu(&record->list, &offload_ctx->records_list);
|
2018-04-30 15:16:16 +08:00
|
|
|
offload_ctx->open_record = NULL;
|
2019-06-11 12:40:09 +08:00
|
|
|
|
|
|
|
if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
|
|
|
|
tls_device_resync_tx(sk, ctx, tp->write_seq);
|
|
|
|
|
2019-06-04 06:17:05 +08:00
|
|
|
tls_advance_record_sn(sk, prot, &ctx->tx);
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
for (i = 0; i < record->num_frags; i++) {
|
|
|
|
frag = &record->frags[i];
|
|
|
|
sg_unmark_end(&offload_ctx->sg_tx_data[i]);
|
|
|
|
sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
|
2019-07-30 22:40:33 +08:00
|
|
|
skb_frag_size(frag), skb_frag_off(frag));
|
2019-07-23 11:08:26 +08:00
|
|
|
sk_mem_charge(sk, skb_frag_size(frag));
|
2018-04-30 15:16:16 +08:00
|
|
|
get_page(skb_frag_page(frag));
|
|
|
|
}
|
|
|
|
sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
|
|
|
|
|
|
|
|
/* all ready, send */
|
|
|
|
return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
|
|
|
|
}
|
|
|
|
|
2023-08-05 06:59:51 +08:00
|
|
|
static void tls_device_record_close(struct sock *sk,
|
|
|
|
struct tls_context *ctx,
|
|
|
|
struct tls_record_info *record,
|
|
|
|
struct page_frag *pfrag,
|
|
|
|
unsigned char record_type)
|
2019-09-07 13:29:59 +08:00
|
|
|
{
|
|
|
|
struct tls_prot_info *prot = &ctx->prot_info;
|
2023-08-05 06:59:51 +08:00
|
|
|
struct page_frag dummy_tag_frag;
|
2019-09-07 13:29:59 +08:00
|
|
|
|
|
|
|
/* append tag
|
|
|
|
* device will fill in the tag, we just need to append a placeholder
|
|
|
|
* use socket memory to improve coalescing (re-using a single buffer
|
|
|
|
* increases frag count)
|
2023-08-05 06:59:51 +08:00
|
|
|
* if we can't allocate memory now use the dummy page
|
2019-09-07 13:29:59 +08:00
|
|
|
*/
|
2023-08-05 06:59:51 +08:00
|
|
|
if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) &&
|
|
|
|
!skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) {
|
|
|
|
dummy_tag_frag.page = dummy_page;
|
|
|
|
dummy_tag_frag.offset = 0;
|
|
|
|
pfrag = &dummy_tag_frag;
|
2019-09-07 13:29:59 +08:00
|
|
|
}
|
2023-08-05 06:59:51 +08:00
|
|
|
tls_append_frag(record, pfrag, prot->tag_size);
|
2019-09-07 13:29:59 +08:00
|
|
|
|
|
|
|
/* fill prepend */
|
|
|
|
tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
|
|
|
|
record->len - prot->overhead_size,
|
2020-11-24 23:24:46 +08:00
|
|
|
record_type);
|
2019-09-07 13:29:59 +08:00
|
|
|
}
|
|
|
|
|
2018-07-13 19:33:39 +08:00
|
|
|
static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
|
2018-04-30 15:16:16 +08:00
|
|
|
struct page_frag *pfrag,
|
|
|
|
size_t prepend_size)
|
|
|
|
{
|
|
|
|
struct tls_record_info *record;
|
|
|
|
skb_frag_t *frag;
|
|
|
|
|
|
|
|
record = kmalloc(sizeof(*record), GFP_KERNEL);
|
|
|
|
if (!record)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
frag = &record->frags[0];
|
2023-05-11 09:12:12 +08:00
|
|
|
skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
|
|
|
|
prepend_size);
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
get_page(pfrag->page);
|
|
|
|
pfrag->offset += prepend_size;
|
|
|
|
|
|
|
|
record->num_frags = 1;
|
|
|
|
record->len = prepend_size;
|
|
|
|
offload_ctx->open_record = record;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tls_do_allocation(struct sock *sk,
|
2018-07-13 19:33:39 +08:00
|
|
|
struct tls_offload_context_tx *offload_ctx,
|
2018-04-30 15:16:16 +08:00
|
|
|
struct page_frag *pfrag,
|
|
|
|
size_t prepend_size)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!offload_ctx->open_record) {
|
|
|
|
if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
|
|
|
|
sk->sk_allocation))) {
|
2020-03-18 01:04:39 +08:00
|
|
|
READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
|
2018-04-30 15:16:16 +08:00
|
|
|
sk_stream_moderate_sndbuf(sk);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (pfrag->size > pfrag->offset)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!sk_page_frag_refill(sk, pfrag))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-07 13:30:00 +08:00
|
|
|
static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
|
|
|
|
{
|
|
|
|
size_t pre_copy, nocache;
|
|
|
|
|
|
|
|
pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
|
|
|
|
if (pre_copy) {
|
|
|
|
pre_copy = min(pre_copy, bytes);
|
|
|
|
if (copy_from_iter(addr, pre_copy, i) != pre_copy)
|
|
|
|
return -EFAULT;
|
|
|
|
bytes -= pre_copy;
|
|
|
|
addr += pre_copy;
|
|
|
|
}
|
|
|
|
|
|
|
|
nocache = round_down(bytes, SMP_CACHE_BYTES);
|
|
|
|
if (copy_from_iter_nocache(addr, nocache, i) != nocache)
|
|
|
|
return -EFAULT;
|
|
|
|
bytes -= nocache;
|
|
|
|
addr += nocache;
|
|
|
|
|
|
|
|
if (bytes && copy_from_iter(addr, bytes, i) != bytes)
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
static int tls_push_data(struct sock *sk,
|
2023-06-08 02:19:20 +08:00
|
|
|
struct iov_iter *iter,
|
2018-04-30 15:16:16 +08:00
|
|
|
size_t size, int flags,
|
2023-06-08 02:19:20 +08:00
|
|
|
unsigned char record_type)
|
2018-04-30 15:16:16 +08:00
|
|
|
{
|
|
|
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
2019-02-14 15:11:35 +08:00
|
|
|
struct tls_prot_info *prot = &tls_ctx->prot_info;
|
2018-07-13 19:33:39 +08:00
|
|
|
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
|
2021-04-27 18:28:22 +08:00
|
|
|
struct tls_record_info *record;
|
2019-08-08 08:03:59 +08:00
|
|
|
int tls_push_record_flags;
|
2018-04-30 15:16:16 +08:00
|
|
|
struct page_frag *pfrag;
|
|
|
|
size_t orig_size = size;
|
|
|
|
u32 max_open_record_len;
|
net/tls: sendfile fails with ktls offload
At first when sendpage gets called, if there is more data, 'more' in
tls_push_data() gets set which later sets pending_open_record_frags, but
when there is no more data in file left, and last time tls_push_data()
gets called, pending_open_record_frags doesn't get reset. And later when
2 bytes of encrypted alert comes as sendmsg, it first checks for
pending_open_record_frags, and since this is set, it creates a record with
0 data bytes to encrypt, meaning record length is prepend_size + tag_size
only, which causes problem.
We should set/reset pending_open_record_frags based on more bit.
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 02:40:21 +08:00
|
|
|
bool more = false;
|
2018-04-30 15:16:16 +08:00
|
|
|
bool done = false;
|
net/tls: sendfile fails with ktls offload
At first when sendpage gets called, if there is more data, 'more' in
tls_push_data() gets set which later sets pending_open_record_frags, but
when there is no more data in file left, and last time tls_push_data()
gets called, pending_open_record_frags doesn't get reset. And later when
2 bytes of encrypted alert comes as sendmsg, it first checks for
pending_open_record_frags, and since this is set, it creates a record with
0 data bytes to encrypt, meaning record length is prepend_size + tag_size
only, which causes problem.
We should set/reset pending_open_record_frags based on more bit.
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 02:40:21 +08:00
|
|
|
int copy, rc = 0;
|
2018-04-30 15:16:16 +08:00
|
|
|
long timeo;
|
|
|
|
|
|
|
|
if (flags &
|
2023-07-27 03:15:52 +08:00
|
|
|
~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
|
|
|
|
MSG_SPLICE_PAGES | MSG_EOR))
|
2019-12-05 14:41:18 +08:00
|
|
|
return -EOPNOTSUPP;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2023-07-27 03:15:52 +08:00
|
|
|
if ((flags & (MSG_MORE | MSG_EOR)) == (MSG_MORE | MSG_EOR))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-10-07 12:09:28 +08:00
|
|
|
if (unlikely(sk->sk_err))
|
2018-04-30 15:16:16 +08:00
|
|
|
return -sk->sk_err;
|
|
|
|
|
2019-08-08 08:03:59 +08:00
|
|
|
flags |= MSG_SENDPAGE_DECRYPTED;
|
2023-06-24 06:54:58 +08:00
|
|
|
tls_push_record_flags = flags | MSG_MORE;
|
2019-08-08 08:03:59 +08:00
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
|
2019-02-27 23:38:03 +08:00
|
|
|
if (tls_is_partially_sent_record(tls_ctx)) {
|
|
|
|
rc = tls_push_partial_record(sk, tls_ctx, flags);
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
}
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
pfrag = sk_page_frag(sk);
|
|
|
|
|
|
|
|
/* TLS_HEADER_SIZE is not counted as part of the TLS record, and
|
|
|
|
* we need to leave room for an authentication tag.
|
|
|
|
*/
|
|
|
|
max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
|
2019-02-14 15:11:35 +08:00
|
|
|
prot->prepend_size;
|
2018-04-30 15:16:16 +08:00
|
|
|
do {
|
2019-10-07 12:09:29 +08:00
|
|
|
rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
|
|
|
|
if (unlikely(rc)) {
|
2018-04-30 15:16:16 +08:00
|
|
|
rc = sk_stream_wait_memory(sk, &timeo);
|
|
|
|
if (!rc)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
record = ctx->open_record;
|
|
|
|
if (!record)
|
|
|
|
break;
|
|
|
|
handle_error:
|
|
|
|
if (record_type != TLS_RECORD_TYPE_DATA) {
|
|
|
|
/* avoid sending partial
|
|
|
|
* record with type !=
|
|
|
|
* application_data
|
|
|
|
*/
|
|
|
|
size = orig_size;
|
|
|
|
destroy_record(record);
|
|
|
|
ctx->open_record = NULL;
|
2019-02-14 15:11:35 +08:00
|
|
|
} else if (record->len > prot->prepend_size) {
|
2018-04-30 15:16:16 +08:00
|
|
|
goto last_record;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
record = ctx->open_record;
|
|
|
|
|
2022-05-18 17:27:31 +08:00
|
|
|
copy = min_t(size_t, size, max_open_record_len - record->len);
|
2023-06-08 02:19:20 +08:00
|
|
|
if (copy && (flags & MSG_SPLICE_PAGES)) {
|
2023-06-08 02:19:19 +08:00
|
|
|
struct page_frag zc_pfrag;
|
|
|
|
struct page **pages = &zc_pfrag.page;
|
|
|
|
size_t off;
|
|
|
|
|
2023-06-08 02:19:20 +08:00
|
|
|
rc = iov_iter_extract_pages(iter, &pages,
|
|
|
|
copy, 1, 0, &off);
|
2023-06-08 02:19:19 +08:00
|
|
|
if (rc <= 0) {
|
|
|
|
if (rc == 0)
|
|
|
|
rc = -EIO;
|
|
|
|
goto handle_error;
|
|
|
|
}
|
|
|
|
copy = rc;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!sendpage_ok(zc_pfrag.page))) {
|
2023-06-08 02:19:20 +08:00
|
|
|
iov_iter_revert(iter, copy);
|
2023-06-08 02:19:19 +08:00
|
|
|
rc = -EIO;
|
|
|
|
goto handle_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
zc_pfrag.offset = off;
|
|
|
|
zc_pfrag.size = copy;
|
|
|
|
tls_append_frag(record, &zc_pfrag, copy);
|
2022-05-18 17:27:31 +08:00
|
|
|
} else if (copy) {
|
|
|
|
copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
|
|
|
|
|
2022-04-26 23:49:49 +08:00
|
|
|
rc = tls_device_copy_data(page_address(pfrag->page) +
|
2022-05-18 17:27:31 +08:00
|
|
|
pfrag->offset, copy,
|
2023-06-08 02:19:20 +08:00
|
|
|
iter);
|
2022-04-26 23:49:49 +08:00
|
|
|
if (rc)
|
|
|
|
goto handle_error;
|
|
|
|
tls_append_frag(record, pfrag, copy);
|
|
|
|
}
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
size -= copy;
|
|
|
|
if (!size) {
|
|
|
|
last_record:
|
|
|
|
tls_push_record_flags = flags;
|
2023-06-24 06:54:58 +08:00
|
|
|
if (flags & MSG_MORE) {
|
net/tls: sendfile fails with ktls offload
At first when sendpage gets called, if there is more data, 'more' in
tls_push_data() gets set which later sets pending_open_record_frags, but
when there is no more data in file left, and last time tls_push_data()
gets called, pending_open_record_frags doesn't get reset. And later when
2 bytes of encrypted alert comes as sendmsg, it first checks for
pending_open_record_frags, and since this is set, it creates a record with
0 data bytes to encrypt, meaning record length is prepend_size + tag_size
only, which causes problem.
We should set/reset pending_open_record_frags based on more bit.
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 02:40:21 +08:00
|
|
|
more = true;
|
2018-04-30 15:16:16 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
done = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (done || record->len >= max_open_record_len ||
|
|
|
|
(record->num_frags >= MAX_SKB_FRAGS - 1)) {
|
2023-08-05 06:59:51 +08:00
|
|
|
tls_device_record_close(sk, tls_ctx, record,
|
|
|
|
pfrag, record_type);
|
2019-09-07 13:29:59 +08:00
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
rc = tls_push_record(sk,
|
|
|
|
tls_ctx,
|
|
|
|
ctx,
|
|
|
|
record,
|
2019-09-07 13:29:59 +08:00
|
|
|
tls_push_record_flags);
|
2018-04-30 15:16:16 +08:00
|
|
|
if (rc < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (!done);
|
|
|
|
|
net/tls: sendfile fails with ktls offload
At first when sendpage gets called, if there is more data, 'more' in
tls_push_data() gets set which later sets pending_open_record_frags, but
when there is no more data in file left, and last time tls_push_data()
gets called, pending_open_record_frags doesn't get reset. And later when
2 bytes of encrypted alert comes as sendmsg, it first checks for
pending_open_record_frags, and since this is set, it creates a record with
0 data bytes to encrypt, meaning record length is prepend_size + tag_size
only, which causes problem.
We should set/reset pending_open_record_frags based on more bit.
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 02:40:21 +08:00
|
|
|
tls_ctx->pending_open_record_frags = more;
|
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
if (orig_size - size > 0)
|
|
|
|
rc = orig_size - size;
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
|
|
|
{
|
|
|
|
unsigned char record_type = TLS_RECORD_TYPE_DATA;
|
2019-11-06 06:24:35 +08:00
|
|
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
2018-04-30 15:16:16 +08:00
|
|
|
int rc;
|
|
|
|
|
2023-06-08 02:19:19 +08:00
|
|
|
if (!tls_ctx->zerocopy_sendfile)
|
|
|
|
msg->msg_flags &= ~MSG_SPLICE_PAGES;
|
|
|
|
|
2019-11-06 06:24:35 +08:00
|
|
|
mutex_lock(&tls_ctx->tx_lock);
|
2018-04-30 15:16:16 +08:00
|
|
|
lock_sock(sk);
|
|
|
|
|
|
|
|
if (unlikely(msg->msg_controllen)) {
|
2022-07-08 09:03:13 +08:00
|
|
|
rc = tls_process_cmsg(sk, msg, &record_type);
|
2018-04-30 15:16:16 +08:00
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2023-06-08 02:19:20 +08:00
|
|
|
rc = tls_push_data(sk, &msg->msg_iter, size, msg->msg_flags,
|
|
|
|
record_type);
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
release_sock(sk);
|
2019-11-06 06:24:35 +08:00
|
|
|
mutex_unlock(&tls_ctx->tx_lock);
|
2018-04-30 15:16:16 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2023-06-08 02:19:12 +08:00
|
|
|
void tls_device_splice_eof(struct socket *sock)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
2023-06-08 02:19:20 +08:00
|
|
|
struct iov_iter iter = {};
|
2023-06-08 02:19:12 +08:00
|
|
|
|
|
|
|
if (!tls_is_partially_sent_record(tls_ctx))
|
|
|
|
return;
|
|
|
|
|
|
|
|
mutex_lock(&tls_ctx->tx_lock);
|
|
|
|
lock_sock(sk);
|
|
|
|
|
|
|
|
if (tls_is_partially_sent_record(tls_ctx)) {
|
2023-06-08 02:19:20 +08:00
|
|
|
iov_iter_bvec(&iter, ITER_SOURCE, NULL, 0, 0);
|
|
|
|
tls_push_data(sk, &iter, 0, 0, TLS_RECORD_TYPE_DATA);
|
2023-06-08 02:19:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
release_sock(sk);
|
|
|
|
mutex_unlock(&tls_ctx->tx_lock);
|
|
|
|
}
|
|
|
|
|
2018-07-13 19:33:39 +08:00
|
|
|
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
|
2018-04-30 15:16:16 +08:00
|
|
|
u32 seq, u64 *p_record_sn)
|
|
|
|
{
|
|
|
|
u64 record_sn = context->hint_record_sn;
|
2020-02-19 12:10:22 +08:00
|
|
|
struct tls_record_info *info, *last;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
info = context->retransmit_hint;
|
|
|
|
if (!info ||
|
|
|
|
before(seq, info->end_seq - info->len)) {
|
|
|
|
/* if retransmit_hint is irrelevant start
|
2021-03-24 14:16:22 +08:00
|
|
|
* from the beginning of the list
|
2018-04-30 15:16:16 +08:00
|
|
|
*/
|
2019-09-07 13:29:58 +08:00
|
|
|
info = list_first_entry_or_null(&context->records_list,
|
|
|
|
struct tls_record_info, list);
|
|
|
|
if (!info)
|
|
|
|
return NULL;
|
2020-02-19 12:10:22 +08:00
|
|
|
/* send the start_marker record if seq number is before the
|
|
|
|
* tls offload start marker sequence number. This record is
|
|
|
|
* required to handle TCP packets which are before TLS offload
|
|
|
|
* started.
|
|
|
|
* And if it's not start marker, look if this seq number
|
|
|
|
* belongs to the list.
|
|
|
|
*/
|
|
|
|
if (likely(!tls_record_is_start_marker(info))) {
|
|
|
|
/* we have the first record, get the last record to see
|
|
|
|
* if this seq number belongs to the list.
|
|
|
|
*/
|
|
|
|
last = list_last_entry(&context->records_list,
|
|
|
|
struct tls_record_info, list);
|
|
|
|
|
|
|
|
if (!between(seq, tls_record_start_seq(info),
|
|
|
|
last->end_seq))
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-04-30 15:16:16 +08:00
|
|
|
record_sn = context->unacked_record_sn;
|
|
|
|
}
|
|
|
|
|
2019-09-07 13:29:58 +08:00
|
|
|
/* We just need the _rcu for the READ_ONCE() */
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_from_rcu(info, &context->records_list, list) {
|
2018-04-30 15:16:16 +08:00
|
|
|
if (before(seq, info->end_seq)) {
|
|
|
|
if (!context->retransmit_hint ||
|
|
|
|
after(info->end_seq,
|
|
|
|
context->retransmit_hint->end_seq)) {
|
|
|
|
context->hint_record_sn = record_sn;
|
|
|
|
context->retransmit_hint = info;
|
|
|
|
}
|
|
|
|
*p_record_sn = record_sn;
|
2019-09-07 13:29:58 +08:00
|
|
|
goto exit_rcu_unlock;
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
|
|
|
record_sn++;
|
|
|
|
}
|
2019-09-07 13:29:58 +08:00
|
|
|
info = NULL;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2019-09-07 13:29:58 +08:00
|
|
|
exit_rcu_unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return info;
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(tls_get_record);
|
|
|
|
|
|
|
|
static int tls_device_push_pending_record(struct sock *sk, int flags)
|
|
|
|
{
|
2023-06-08 02:19:20 +08:00
|
|
|
struct iov_iter iter;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2023-06-08 02:19:20 +08:00
|
|
|
iov_iter_kvec(&iter, ITER_SOURCE, NULL, 0, 0);
|
|
|
|
return tls_push_data(sk, &iter, 0, flags, TLS_RECORD_TYPE_DATA);
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
|
|
|
|
2019-02-27 23:38:04 +08:00
|
|
|
void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
|
|
|
|
{
|
2019-11-06 06:24:34 +08:00
|
|
|
if (tls_is_partially_sent_record(ctx)) {
|
2019-02-27 23:38:04 +08:00
|
|
|
gfp_t sk_allocation = sk->sk_allocation;
|
|
|
|
|
2019-11-06 06:24:34 +08:00
|
|
|
WARN_ON_ONCE(sk->sk_write_pending);
|
|
|
|
|
2019-02-27 23:38:04 +08:00
|
|
|
sk->sk_allocation = GFP_ATOMIC;
|
2019-08-08 08:03:59 +08:00
|
|
|
tls_push_partial_record(sk, ctx,
|
|
|
|
MSG_DONTWAIT | MSG_NOSIGNAL |
|
|
|
|
MSG_SENDPAGE_DECRYPTED);
|
2019-02-27 23:38:04 +08:00
|
|
|
sk->sk_allocation = sk_allocation;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-05 03:00:12 +08:00
|
|
|
static void tls_device_resync_rx(struct tls_context *tls_ctx,
|
2019-06-11 12:40:00 +08:00
|
|
|
struct sock *sk, u32 seq, u8 *rcd_sn)
|
2019-06-05 03:00:12 +08:00
|
|
|
{
|
2019-10-05 07:19:22 +08:00
|
|
|
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
|
2019-06-05 03:00:12 +08:00
|
|
|
struct net_device *netdev;
|
|
|
|
|
2019-10-05 07:19:22 +08:00
|
|
|
trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
|
2021-06-01 20:07:59 +08:00
|
|
|
rcu_read_lock();
|
net/tls: Use RCU API to access tls_ctx->netdev
Currently, tls_device_down synchronizes with tls_device_resync_rx using
RCU, however, the pointer to netdev is stored using WRITE_ONCE and
loaded using READ_ONCE.
Although such approach is technically correct (rcu_dereference is
essentially a READ_ONCE, and rcu_assign_pointer uses WRITE_ONCE to store
NULL), using special RCU helpers for pointers is more valid, as it
includes additional checks and might change the implementation
transparently to the callers.
Mark the netdev pointer as __rcu and use the correct RCU helpers to
access it. For non-concurrent access pass the right conditions that
guarantee safe access (locks taken, refcount value). Also use the
correct helper in mlx5e, where even READ_ONCE was missing.
The transition to RCU exposes existing issues, fixed by this commit:
1. bond_tls_device_xmit could read netdev twice, and it could become
NULL the second time, after the NULL check passed.
2. Drivers shouldn't stop processing the last packet if tls_device_down
just set netdev to NULL, before tls_dev_del was called. This prevents a
possible packet drop when transitioning to the fallback software mode.
Fixes: 89df6a810470 ("net/bonding: Implement TLS TX device offload")
Fixes: c55dcdd435aa ("net/tls: Fix use-after-free after the TLS device goes down and up")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Link: https://lore.kernel.org/r/20220810081602.1435800-1-maximmi@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-10 16:16:02 +08:00
|
|
|
netdev = rcu_dereference(tls_ctx->netdev);
|
2019-06-05 03:00:12 +08:00
|
|
|
if (netdev)
|
2019-06-11 12:40:08 +08:00
|
|
|
netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
|
|
|
|
TLS_OFFLOAD_CTX_DIR_RX);
|
2021-06-01 20:07:59 +08:00
|
|
|
rcu_read_unlock();
|
2019-10-05 07:19:27 +08:00
|
|
|
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
|
2019-06-05 03:00:12 +08:00
|
|
|
}
|
|
|
|
|
net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device.
Async resync follows two distinct stages:
1. The NIC driver indicates that it would like to resync on some TLS
record within the received packet (P), but the driver does not
know (yet) which of the TLS records within the packet.
At this stage, the NIC driver will query the device to find the exact
TCP sequence for resync (tcpsn), however, the driver does not wait
for the device to provide the response.
2. Eventually, the device responds, and the driver provides the tcpsn
within the resync packet to KTLS. Now, KTLS can check the tcpsn against
any processed TLS records within packet P, and also against any record
that is processed in the future within packet P.
The asynchronous resync path simplifies the device driver, as it can
save bits on the packet completion (32-bit TCP sequence), and pass this
information on an asynchronous command instead.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2020-06-09 00:11:38 +08:00
|
|
|
static bool
|
|
|
|
tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
|
2020-11-15 21:14:48 +08:00
|
|
|
s64 resync_req, u32 *seq, u16 *rcd_delta)
|
net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device.
Async resync follows two distinct stages:
1. The NIC driver indicates that it would like to resync on some TLS
record within the received packet (P), but the driver does not
know (yet) which of the TLS records within the packet.
At this stage, the NIC driver will query the device to find the exact
TCP sequence for resync (tcpsn), however, the driver does not wait
for the device to provide the response.
2. Eventually, the device responds, and the driver provides the tcpsn
within the resync packet to KTLS. Now, KTLS can check the tcpsn against
any processed TLS records within packet P, and also against any record
that is processed in the future within packet P.
The asynchronous resync path simplifies the device driver, as it can
save bits on the packet completion (32-bit TCP sequence), and pass this
information on an asynchronous command instead.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2020-06-09 00:11:38 +08:00
|
|
|
{
|
|
|
|
u32 is_async = resync_req & RESYNC_REQ_ASYNC;
|
|
|
|
u32 req_seq = resync_req >> 32;
|
|
|
|
u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
|
2020-11-15 21:14:48 +08:00
|
|
|
u16 i;
|
|
|
|
|
|
|
|
*rcd_delta = 0;
|
net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device.
Async resync follows two distinct stages:
1. The NIC driver indicates that it would like to resync on some TLS
record within the received packet (P), but the driver does not
know (yet) which of the TLS records within the packet.
At this stage, the NIC driver will query the device to find the exact
TCP sequence for resync (tcpsn), however, the driver does not wait
for the device to provide the response.
2. Eventually, the device responds, and the driver provides the tcpsn
within the resync packet to KTLS. Now, KTLS can check the tcpsn against
any processed TLS records within packet P, and also against any record
that is processed in the future within packet P.
The asynchronous resync path simplifies the device driver, as it can
save bits on the packet completion (32-bit TCP sequence), and pass this
information on an asynchronous command instead.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2020-06-09 00:11:38 +08:00
|
|
|
|
|
|
|
if (is_async) {
|
2020-11-15 21:14:48 +08:00
|
|
|
/* shouldn't get to wraparound:
|
|
|
|
* too long in async stage, something bad happened
|
|
|
|
*/
|
|
|
|
if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
|
|
|
|
return false;
|
|
|
|
|
net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device.
Async resync follows two distinct stages:
1. The NIC driver indicates that it would like to resync on some TLS
record within the received packet (P), but the driver does not
know (yet) which of the TLS records within the packet.
At this stage, the NIC driver will query the device to find the exact
TCP sequence for resync (tcpsn), however, the driver does not wait
for the device to provide the response.
2. Eventually, the device responds, and the driver provides the tcpsn
within the resync packet to KTLS. Now, KTLS can check the tcpsn against
any processed TLS records within packet P, and also against any record
that is processed in the future within packet P.
The asynchronous resync path simplifies the device driver, as it can
save bits on the packet completion (32-bit TCP sequence), and pass this
information on an asynchronous command instead.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2020-06-09 00:11:38 +08:00
|
|
|
/* asynchronous stage: log all headers seq such that
|
|
|
|
* req_seq <= seq <= end_seq, and wait for real resync request
|
|
|
|
*/
|
2020-11-15 21:14:48 +08:00
|
|
|
if (before(*seq, req_seq))
|
|
|
|
return false;
|
|
|
|
if (!after(*seq, req_end) &&
|
net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device.
Async resync follows two distinct stages:
1. The NIC driver indicates that it would like to resync on some TLS
record within the received packet (P), but the driver does not
know (yet) which of the TLS records within the packet.
At this stage, the NIC driver will query the device to find the exact
TCP sequence for resync (tcpsn), however, the driver does not wait
for the device to provide the response.
2. Eventually, the device responds, and the driver provides the tcpsn
within the resync packet to KTLS. Now, KTLS can check the tcpsn against
any processed TLS records within packet P, and also against any record
that is processed in the future within packet P.
The asynchronous resync path simplifies the device driver, as it can
save bits on the packet completion (32-bit TCP sequence), and pass this
information on an asynchronous command instead.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2020-06-09 00:11:38 +08:00
|
|
|
resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
|
|
|
|
resync_async->log[resync_async->loglen++] = *seq;
|
|
|
|
|
2020-11-15 21:14:48 +08:00
|
|
|
resync_async->rcd_delta++;
|
|
|
|
|
net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device.
Async resync follows two distinct stages:
1. The NIC driver indicates that it would like to resync on some TLS
record within the received packet (P), but the driver does not
know (yet) which of the TLS records within the packet.
At this stage, the NIC driver will query the device to find the exact
TCP sequence for resync (tcpsn), however, the driver does not wait
for the device to provide the response.
2. Eventually, the device responds, and the driver provides the tcpsn
within the resync packet to KTLS. Now, KTLS can check the tcpsn against
any processed TLS records within packet P, and also against any record
that is processed in the future within packet P.
The asynchronous resync path simplifies the device driver, as it can
save bits on the packet completion (32-bit TCP sequence), and pass this
information on an asynchronous command instead.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2020-06-09 00:11:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* synchronous stage: check against the logged entries and
|
|
|
|
* proceed to check the next entries if no match was found
|
|
|
|
*/
|
2020-11-15 21:14:48 +08:00
|
|
|
for (i = 0; i < resync_async->loglen; i++)
|
|
|
|
if (req_seq == resync_async->log[i] &&
|
|
|
|
atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
|
|
|
|
*rcd_delta = resync_async->rcd_delta - i;
|
net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device.
Async resync follows two distinct stages:
1. The NIC driver indicates that it would like to resync on some TLS
record within the received packet (P), but the driver does not
know (yet) which of the TLS records within the packet.
At this stage, the NIC driver will query the device to find the exact
TCP sequence for resync (tcpsn), however, the driver does not wait
for the device to provide the response.
2. Eventually, the device responds, and the driver provides the tcpsn
within the resync packet to KTLS. Now, KTLS can check the tcpsn against
any processed TLS records within packet P, and also against any record
that is processed in the future within packet P.
The asynchronous resync path simplifies the device driver, as it can
save bits on the packet completion (32-bit TCP sequence), and pass this
information on an asynchronous command instead.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2020-06-09 00:11:38 +08:00
|
|
|
*seq = req_seq;
|
2020-11-15 21:14:48 +08:00
|
|
|
resync_async->loglen = 0;
|
|
|
|
resync_async->rcd_delta = 0;
|
net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device.
Async resync follows two distinct stages:
1. The NIC driver indicates that it would like to resync on some TLS
record within the received packet (P), but the driver does not
know (yet) which of the TLS records within the packet.
At this stage, the NIC driver will query the device to find the exact
TCP sequence for resync (tcpsn), however, the driver does not wait
for the device to provide the response.
2. Eventually, the device responds, and the driver provides the tcpsn
within the resync packet to KTLS. Now, KTLS can check the tcpsn against
any processed TLS records within packet P, and also against any record
that is processed in the future within packet P.
The asynchronous resync path simplifies the device driver, as it can
save bits on the packet completion (32-bit TCP sequence), and pass this
information on an asynchronous command instead.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2020-06-09 00:11:38 +08:00
|
|
|
return true;
|
|
|
|
}
|
2020-11-15 21:14:48 +08:00
|
|
|
|
|
|
|
resync_async->loglen = 0;
|
|
|
|
resync_async->rcd_delta = 0;
|
net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device.
Async resync follows two distinct stages:
1. The NIC driver indicates that it would like to resync on some TLS
record within the received packet (P), but the driver does not
know (yet) which of the TLS records within the packet.
At this stage, the NIC driver will query the device to find the exact
TCP sequence for resync (tcpsn), however, the driver does not wait
for the device to provide the response.
2. Eventually, the device responds, and the driver provides the tcpsn
within the resync packet to KTLS. Now, KTLS can check the tcpsn against
any processed TLS records within packet P, and also against any record
that is processed in the future within packet P.
The asynchronous resync path simplifies the device driver, as it can
save bits on the packet completion (32-bit TCP sequence), and pass this
information on an asynchronous command instead.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2020-06-09 00:11:38 +08:00
|
|
|
|
|
|
|
if (req_seq == *seq &&
|
|
|
|
atomic64_try_cmpxchg(&resync_async->req,
|
|
|
|
&resync_req, 0))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 12:40:02 +08:00
|
|
|
void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
|
2018-07-13 19:33:43 +08:00
|
|
|
{
|
|
|
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
|
|
|
struct tls_offload_context_rx *rx_ctx;
|
2019-06-11 12:40:02 +08:00
|
|
|
u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
|
2020-06-08 17:42:52 +08:00
|
|
|
u32 sock_data, is_req_pending;
|
2019-06-11 12:40:02 +08:00
|
|
|
struct tls_prot_info *prot;
|
2018-07-13 19:33:43 +08:00
|
|
|
s64 resync_req;
|
2020-11-15 21:14:48 +08:00
|
|
|
u16 rcd_delta;
|
2018-07-13 19:33:43 +08:00
|
|
|
u32 req_seq;
|
|
|
|
|
|
|
|
if (tls_ctx->rx_conf != TLS_HW)
|
|
|
|
return;
|
net/tls: Fix use-after-free after the TLS device goes down and up
When a netdev with active TLS offload goes down, tls_device_down is
called to stop the offload and tear down the TLS context. However, the
socket stays alive, and it still points to the TLS context, which is now
deallocated. If a netdev goes up, while the connection is still active,
and the data flow resumes after a number of TCP retransmissions, it will
lead to a use-after-free of the TLS context.
This commit addresses this bug by keeping the context alive until its
normal destruction, and implements the necessary fallbacks, so that the
connection can resume in software (non-offloaded) kTLS mode.
On the TX side tls_sw_fallback is used to encrypt all packets. The RX
side already has all the necessary fallbacks, because receiving
non-decrypted packets is supported. The thing needed on the RX side is
to block resync requests, which are normally produced after receiving
non-decrypted packets.
The necessary synchronization is implemented for a graceful teardown:
first the fallbacks are deployed, then the driver resources are released
(it used to be possible to have a tls_dev_resync after tls_dev_del).
A new flag called TLS_RX_DEV_DEGRADED is added to indicate the fallback
mode. It's used to skip the RX resync logic completely, as it becomes
useless, and some objects may be released (for example, resync_async,
which is allocated and freed by the driver).
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-06-01 20:08:00 +08:00
|
|
|
if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
|
|
|
|
return;
|
2018-07-13 19:33:43 +08:00
|
|
|
|
2019-06-11 12:40:02 +08:00
|
|
|
prot = &tls_ctx->prot_info;
|
2018-07-13 19:33:43 +08:00
|
|
|
rx_ctx = tls_offload_ctx_rx(tls_ctx);
|
2019-06-11 12:40:02 +08:00
|
|
|
memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
|
|
|
|
|
|
|
|
switch (rx_ctx->resync_type) {
|
|
|
|
case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
|
|
|
|
resync_req = atomic64_read(&rx_ctx->resync_req);
|
|
|
|
req_seq = resync_req >> 32;
|
|
|
|
seq += TLS_HEADER_SIZE - 1;
|
2020-06-08 17:42:52 +08:00
|
|
|
is_req_pending = resync_req;
|
2019-06-11 12:40:02 +08:00
|
|
|
|
2020-06-08 17:42:52 +08:00
|
|
|
if (likely(!is_req_pending) || req_seq != seq ||
|
2019-06-11 12:40:02 +08:00
|
|
|
!atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
|
|
|
|
return;
|
|
|
|
break;
|
|
|
|
case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
|
|
|
|
if (likely(!rx_ctx->resync_nh_do_now))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* head of next rec is already in, note that the sock_inq will
|
|
|
|
* include the currently parsed message when called from parser
|
|
|
|
*/
|
2019-10-05 07:19:22 +08:00
|
|
|
sock_data = tcp_inq(sk);
|
|
|
|
if (sock_data > rcd_len) {
|
|
|
|
trace_tls_device_rx_resync_nh_delay(sk, sock_data,
|
|
|
|
rcd_len);
|
2019-06-11 12:40:02 +08:00
|
|
|
return;
|
2019-10-05 07:19:22 +08:00
|
|
|
}
|
2019-06-11 12:40:02 +08:00
|
|
|
|
|
|
|
rx_ctx->resync_nh_do_now = 0;
|
|
|
|
seq += rcd_len;
|
|
|
|
tls_bigint_increment(rcd_sn, prot->rec_seq_size);
|
|
|
|
break;
|
net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device.
Async resync follows two distinct stages:
1. The NIC driver indicates that it would like to resync on some TLS
record within the received packet (P), but the driver does not
know (yet) which of the TLS records within the packet.
At this stage, the NIC driver will query the device to find the exact
TCP sequence for resync (tcpsn), however, the driver does not wait
for the device to provide the response.
2. Eventually, the device responds, and the driver provides the tcpsn
within the resync packet to KTLS. Now, KTLS can check the tcpsn against
any processed TLS records within packet P, and also against any record
that is processed in the future within packet P.
The asynchronous resync path simplifies the device driver, as it can
save bits on the packet completion (32-bit TCP sequence), and pass this
information on an asynchronous command instead.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2020-06-09 00:11:38 +08:00
|
|
|
case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
|
|
|
|
resync_req = atomic64_read(&rx_ctx->resync_async->req);
|
|
|
|
is_req_pending = resync_req;
|
|
|
|
if (likely(!is_req_pending))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!tls_device_rx_resync_async(rx_ctx->resync_async,
|
2020-11-15 21:14:48 +08:00
|
|
|
resync_req, &seq, &rcd_delta))
|
net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device.
Async resync follows two distinct stages:
1. The NIC driver indicates that it would like to resync on some TLS
record within the received packet (P), but the driver does not
know (yet) which of the TLS records within the packet.
At this stage, the NIC driver will query the device to find the exact
TCP sequence for resync (tcpsn), however, the driver does not wait
for the device to provide the response.
2. Eventually, the device responds, and the driver provides the tcpsn
within the resync packet to KTLS. Now, KTLS can check the tcpsn against
any processed TLS records within packet P, and also against any record
that is processed in the future within packet P.
The asynchronous resync path simplifies the device driver, as it can
save bits on the packet completion (32-bit TCP sequence), and pass this
information on an asynchronous command instead.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2020-06-09 00:11:38 +08:00
|
|
|
return;
|
2020-11-15 21:14:48 +08:00
|
|
|
tls_bigint_subtract(rcd_sn, rcd_delta);
|
net/tls: Add asynchronous resync
This patch adds support for asynchronous resynchronization in tls_device.
Async resync follows two distinct stages:
1. The NIC driver indicates that it would like to resync on some TLS
record within the received packet (P), but the driver does not
know (yet) which of the TLS records within the packet.
At this stage, the NIC driver will query the device to find the exact
TCP sequence for resync (tcpsn), however, the driver does not wait
for the device to provide the response.
2. Eventually, the device responds, and the driver provides the tcpsn
within the resync packet to KTLS. Now, KTLS can check the tcpsn against
any processed TLS records within packet P, and also against any record
that is processed in the future within packet P.
The asynchronous resync path simplifies the device driver, as it can
save bits on the packet completion (32-bit TCP sequence), and pass this
information on an asynchronous command instead.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2020-06-09 00:11:38 +08:00
|
|
|
break;
|
2019-06-11 12:40:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
|
|
|
|
struct tls_offload_context_rx *ctx,
|
|
|
|
struct sock *sk, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct strp_msg *rxm;
|
|
|
|
|
|
|
|
/* device will request resyncs by itself based on stream scan */
|
|
|
|
if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
|
|
|
|
return;
|
|
|
|
/* already scheduled */
|
|
|
|
if (ctx->resync_nh_do_now)
|
|
|
|
return;
|
|
|
|
/* seen decrypted fragments since last fully-failed record */
|
|
|
|
if (ctx->resync_nh_reset) {
|
|
|
|
ctx->resync_nh_reset = 0;
|
|
|
|
ctx->resync_nh.decrypted_failed = 1;
|
|
|
|
ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* doing resync, bump the next target in case it fails */
|
|
|
|
if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
|
|
|
|
ctx->resync_nh.decrypted_tgt *= 2;
|
|
|
|
else
|
|
|
|
ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
|
|
|
|
|
|
|
|
rxm = strp_msg(skb);
|
|
|
|
|
|
|
|
/* head of next rec is already in, parser will sync for us */
|
|
|
|
if (tcp_inq(sk) > rxm->full_len) {
|
2019-10-05 07:19:22 +08:00
|
|
|
trace_tls_device_rx_resync_nh_schedule(sk);
|
2019-06-11 12:40:02 +08:00
|
|
|
ctx->resync_nh_do_now = 1;
|
|
|
|
} else {
|
|
|
|
struct tls_prot_info *prot = &tls_ctx->prot_info;
|
|
|
|
u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
|
|
|
|
|
|
|
|
memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
|
|
|
|
tls_bigint_increment(rcd_sn, prot->rec_seq_size);
|
|
|
|
|
|
|
|
tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
|
|
|
|
rcd_sn);
|
|
|
|
}
|
2018-07-13 19:33:43 +08:00
|
|
|
}
|
|
|
|
|
2022-07-15 13:22:30 +08:00
|
|
|
static int
|
2022-09-20 21:01:48 +08:00
|
|
|
tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
|
2018-07-13 19:33:43 +08:00
|
|
|
{
|
2022-09-20 21:01:48 +08:00
|
|
|
struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
|
|
|
|
const struct tls_cipher_size_desc *cipher_sz;
|
2022-07-23 07:50:32 +08:00
|
|
|
int err, offset, copy, data_len, pos;
|
|
|
|
struct sk_buff *skb, *skb_iter;
|
2018-07-13 19:33:43 +08:00
|
|
|
struct scatterlist sg[1];
|
2022-07-15 13:22:30 +08:00
|
|
|
struct strp_msg *rxm;
|
2018-07-13 19:33:43 +08:00
|
|
|
char *orig_buf, *buf;
|
|
|
|
|
2022-09-20 21:01:48 +08:00
|
|
|
switch (tls_ctx->crypto_recv.info.cipher_type) {
|
|
|
|
case TLS_CIPHER_AES_GCM_128:
|
2022-09-20 21:01:49 +08:00
|
|
|
case TLS_CIPHER_AES_GCM_256:
|
2022-09-20 21:01:48 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_recv.info.cipher_type];
|
|
|
|
|
2022-07-23 07:50:32 +08:00
|
|
|
rxm = strp_msg(tls_strp_msg(sw_ctx));
|
2022-09-20 21:01:48 +08:00
|
|
|
orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv,
|
|
|
|
sk->sk_allocation);
|
2018-07-13 19:33:43 +08:00
|
|
|
if (!orig_buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
buf = orig_buf;
|
|
|
|
|
2022-07-23 07:50:32 +08:00
|
|
|
err = tls_strp_msg_cow(sw_ctx);
|
|
|
|
if (unlikely(err))
|
2018-07-13 19:33:43 +08:00
|
|
|
goto free_buf;
|
2022-07-23 07:50:32 +08:00
|
|
|
|
|
|
|
skb = tls_strp_msg(sw_ctx);
|
|
|
|
rxm = strp_msg(skb);
|
|
|
|
offset = rxm->offset;
|
2018-07-13 19:33:43 +08:00
|
|
|
|
|
|
|
sg_init_table(sg, 1);
|
|
|
|
sg_set_buf(&sg[0], buf,
|
2022-09-20 21:01:48 +08:00
|
|
|
rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv);
|
|
|
|
err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_sz->iv);
|
2019-06-04 06:16:59 +08:00
|
|
|
if (err)
|
|
|
|
goto free_buf;
|
2018-07-13 19:33:43 +08:00
|
|
|
|
|
|
|
/* We are interested only in the decrypted data not the auth */
|
2022-07-15 13:22:30 +08:00
|
|
|
err = decrypt_skb(sk, sg);
|
2018-07-13 19:33:43 +08:00
|
|
|
if (err != -EBADMSG)
|
|
|
|
goto free_buf;
|
|
|
|
else
|
|
|
|
err = 0;
|
|
|
|
|
2022-09-20 21:01:48 +08:00
|
|
|
data_len = rxm->full_len - cipher_sz->tag;
|
2018-07-13 19:33:43 +08:00
|
|
|
|
2019-04-26 08:35:09 +08:00
|
|
|
if (skb_pagelen(skb) > offset) {
|
2019-04-26 08:35:10 +08:00
|
|
|
copy = min_t(int, skb_pagelen(skb) - offset, data_len);
|
2018-07-13 19:33:43 +08:00
|
|
|
|
2019-06-04 06:16:59 +08:00
|
|
|
if (skb->decrypted) {
|
|
|
|
err = skb_store_bits(skb, offset, buf, copy);
|
|
|
|
if (err)
|
|
|
|
goto free_buf;
|
|
|
|
}
|
2018-07-13 19:33:43 +08:00
|
|
|
|
2019-04-26 08:35:09 +08:00
|
|
|
offset += copy;
|
|
|
|
buf += copy;
|
|
|
|
}
|
2018-07-13 19:33:43 +08:00
|
|
|
|
2019-04-26 08:35:10 +08:00
|
|
|
pos = skb_pagelen(skb);
|
2018-07-13 19:33:43 +08:00
|
|
|
skb_walk_frags(skb, skb_iter) {
|
2019-04-26 08:35:10 +08:00
|
|
|
int frag_pos;
|
|
|
|
|
|
|
|
/* Practically all frags must belong to msg if reencrypt
|
|
|
|
* is needed with current strparser and coalescing logic,
|
|
|
|
* but strparser may "get optimized", so let's be safe.
|
|
|
|
*/
|
|
|
|
if (pos + skb_iter->len <= offset)
|
|
|
|
goto done_with_frag;
|
|
|
|
if (pos >= data_len + rxm->offset)
|
|
|
|
break;
|
|
|
|
|
|
|
|
frag_pos = offset - pos;
|
|
|
|
copy = min_t(int, skb_iter->len - frag_pos,
|
|
|
|
data_len + rxm->offset - offset);
|
2018-07-13 19:33:43 +08:00
|
|
|
|
2019-06-04 06:16:59 +08:00
|
|
|
if (skb_iter->decrypted) {
|
|
|
|
err = skb_store_bits(skb_iter, frag_pos, buf, copy);
|
|
|
|
if (err)
|
|
|
|
goto free_buf;
|
|
|
|
}
|
2018-07-13 19:33:43 +08:00
|
|
|
|
|
|
|
offset += copy;
|
|
|
|
buf += copy;
|
2019-04-26 08:35:10 +08:00
|
|
|
done_with_frag:
|
|
|
|
pos += skb_iter->len;
|
2018-07-13 19:33:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
free_buf:
|
|
|
|
kfree(orig_buf);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2022-07-15 13:22:30 +08:00
|
|
|
int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
|
2018-07-13 19:33:43 +08:00
|
|
|
{
|
|
|
|
struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
|
2022-07-15 13:22:30 +08:00
|
|
|
struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
|
|
|
|
struct sk_buff *skb = tls_strp_msg(sw_ctx);
|
|
|
|
struct strp_msg *rxm = strp_msg(skb);
|
2023-05-17 09:50:41 +08:00
|
|
|
int is_decrypted, is_encrypted;
|
|
|
|
|
|
|
|
if (!tls_strp_msg_mixed_decrypted(sw_ctx)) {
|
|
|
|
is_decrypted = skb->decrypted;
|
|
|
|
is_encrypted = !is_decrypted;
|
|
|
|
} else {
|
|
|
|
is_decrypted = 0;
|
|
|
|
is_encrypted = 0;
|
2018-07-13 19:33:43 +08:00
|
|
|
}
|
|
|
|
|
2019-10-05 07:19:23 +08:00
|
|
|
trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
|
|
|
|
tls_ctx->rx.rec_seq, rxm->full_len,
|
|
|
|
is_encrypted, is_decrypted);
|
|
|
|
|
net/tls: Fix use-after-free after the TLS device goes down and up
When a netdev with active TLS offload goes down, tls_device_down is
called to stop the offload and tear down the TLS context. However, the
socket stays alive, and it still points to the TLS context, which is now
deallocated. If a netdev goes up, while the connection is still active,
and the data flow resumes after a number of TCP retransmissions, it will
lead to a use-after-free of the TLS context.
This commit addresses this bug by keeping the context alive until its
normal destruction, and implements the necessary fallbacks, so that the
connection can resume in software (non-offloaded) kTLS mode.
On the TX side tls_sw_fallback is used to encrypt all packets. The RX
side already has all the necessary fallbacks, because receiving
non-decrypted packets is supported. The thing needed on the RX side is
to block resync requests, which are normally produced after receiving
non-decrypted packets.
The necessary synchronization is implemented for a graceful teardown:
first the fallbacks are deployed, then the driver resources are released
(it used to be possible to have a tls_dev_resync after tls_dev_del).
A new flag called TLS_RX_DEV_DEGRADED is added to indicate the fallback
mode. It's used to skip the RX resync logic completely, as it becomes
useless, and some objects may be released (for example, resync_async,
which is allocated and freed by the driver).
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-06-01 20:08:00 +08:00
|
|
|
if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
|
|
|
|
if (likely(is_encrypted || is_decrypted))
|
2022-04-08 11:38:23 +08:00
|
|
|
return is_decrypted;
|
net/tls: Fix use-after-free after the TLS device goes down and up
When a netdev with active TLS offload goes down, tls_device_down is
called to stop the offload and tear down the TLS context. However, the
socket stays alive, and it still points to the TLS context, which is now
deallocated. If a netdev goes up, while the connection is still active,
and the data flow resumes after a number of TCP retransmissions, it will
lead to a use-after-free of the TLS context.
This commit addresses this bug by keeping the context alive until its
normal destruction, and implements the necessary fallbacks, so that the
connection can resume in software (non-offloaded) kTLS mode.
On the TX side tls_sw_fallback is used to encrypt all packets. The RX
side already has all the necessary fallbacks, because receiving
non-decrypted packets is supported. The thing needed on the RX side is
to block resync requests, which are normally produced after receiving
non-decrypted packets.
The necessary synchronization is implemented for a graceful teardown:
first the fallbacks are deployed, then the driver resources are released
(it used to be possible to have a tls_dev_resync after tls_dev_del).
A new flag called TLS_RX_DEV_DEGRADED is added to indicate the fallback
mode. It's used to skip the RX resync logic completely, as it becomes
useless, and some objects may be released (for example, resync_async,
which is allocated and freed by the driver).
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-06-01 20:08:00 +08:00
|
|
|
|
|
|
|
/* After tls_device_down disables the offload, the next SKB will
|
|
|
|
* likely have initial fragments decrypted, and final ones not
|
|
|
|
* decrypted. We need to reencrypt that single SKB.
|
|
|
|
*/
|
2022-09-20 21:01:48 +08:00
|
|
|
return tls_device_reencrypt(sk, tls_ctx);
|
net/tls: Fix use-after-free after the TLS device goes down and up
When a netdev with active TLS offload goes down, tls_device_down is
called to stop the offload and tear down the TLS context. However, the
socket stays alive, and it still points to the TLS context, which is now
deallocated. If a netdev goes up, while the connection is still active,
and the data flow resumes after a number of TCP retransmissions, it will
lead to a use-after-free of the TLS context.
This commit addresses this bug by keeping the context alive until its
normal destruction, and implements the necessary fallbacks, so that the
connection can resume in software (non-offloaded) kTLS mode.
On the TX side tls_sw_fallback is used to encrypt all packets. The RX
side already has all the necessary fallbacks, because receiving
non-decrypted packets is supported. The thing needed on the RX side is
to block resync requests, which are normally produced after receiving
non-decrypted packets.
The necessary synchronization is implemented for a graceful teardown:
first the fallbacks are deployed, then the driver resources are released
(it used to be possible to have a tls_dev_resync after tls_dev_del).
A new flag called TLS_RX_DEV_DEGRADED is added to indicate the fallback
mode. It's used to skip the RX resync logic completely, as it becomes
useless, and some objects may be released (for example, resync_async,
which is allocated and freed by the driver).
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-06-01 20:08:00 +08:00
|
|
|
}
|
|
|
|
|
2019-06-11 12:40:02 +08:00
|
|
|
/* Return immediately if the record is either entirely plaintext or
|
2018-07-13 19:33:43 +08:00
|
|
|
* entirely ciphertext. Otherwise handle reencrypt partially decrypted
|
|
|
|
* record.
|
|
|
|
*/
|
2019-06-11 12:40:02 +08:00
|
|
|
if (is_decrypted) {
|
|
|
|
ctx->resync_nh_reset = 1;
|
2022-04-08 11:38:23 +08:00
|
|
|
return is_decrypted;
|
2019-06-11 12:40:02 +08:00
|
|
|
}
|
|
|
|
if (is_encrypted) {
|
|
|
|
tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->resync_nh_reset = 1;
|
2022-09-20 21:01:48 +08:00
|
|
|
return tls_device_reencrypt(sk, tls_ctx);
|
2018-07-13 19:33:43 +08:00
|
|
|
}
|
|
|
|
|
2019-04-26 03:32:02 +08:00
|
|
|
static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
|
|
|
|
struct net_device *netdev)
|
|
|
|
{
|
|
|
|
if (sk->sk_destruct != tls_device_sk_destruct) {
|
|
|
|
refcount_set(&ctx->refcount, 1);
|
|
|
|
dev_hold(netdev);
|
net/tls: Use RCU API to access tls_ctx->netdev
Currently, tls_device_down synchronizes with tls_device_resync_rx using
RCU, however, the pointer to netdev is stored using WRITE_ONCE and
loaded using READ_ONCE.
Although such approach is technically correct (rcu_dereference is
essentially a READ_ONCE, and rcu_assign_pointer uses WRITE_ONCE to store
NULL), using special RCU helpers for pointers is more valid, as it
includes additional checks and might change the implementation
transparently to the callers.
Mark the netdev pointer as __rcu and use the correct RCU helpers to
access it. For non-concurrent access pass the right conditions that
guarantee safe access (locks taken, refcount value). Also use the
correct helper in mlx5e, where even READ_ONCE was missing.
The transition to RCU exposes existing issues, fixed by this commit:
1. bond_tls_device_xmit could read netdev twice, and it could become
NULL the second time, after the NULL check passed.
2. Drivers shouldn't stop processing the last packet if tls_device_down
just set netdev to NULL, before tls_dev_del was called. This prevents a
possible packet drop when transitioning to the fallback software mode.
Fixes: 89df6a810470 ("net/bonding: Implement TLS TX device offload")
Fixes: c55dcdd435aa ("net/tls: Fix use-after-free after the TLS device goes down and up")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Link: https://lore.kernel.org/r/20220810081602.1435800-1-maximmi@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-10 16:16:02 +08:00
|
|
|
RCU_INIT_POINTER(ctx->netdev, netdev);
|
2019-04-26 03:32:02 +08:00
|
|
|
spin_lock_irq(&tls_device_lock);
|
|
|
|
list_add_tail(&ctx->list, &tls_device_list);
|
|
|
|
spin_unlock_irq(&tls_device_lock);
|
|
|
|
|
|
|
|
ctx->sk_destruct = sk->sk_destruct;
|
2019-12-18 06:12:01 +08:00
|
|
|
smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
|
2019-04-26 03:32:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
|
|
|
|
{
|
2019-02-14 15:11:35 +08:00
|
|
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
|
|
|
struct tls_prot_info *prot = &tls_ctx->prot_info;
|
2022-09-20 21:01:48 +08:00
|
|
|
const struct tls_cipher_size_desc *cipher_sz;
|
2018-04-30 15:16:16 +08:00
|
|
|
struct tls_record_info *start_marker_record;
|
2018-07-13 19:33:39 +08:00
|
|
|
struct tls_offload_context_tx *offload_ctx;
|
2018-04-30 15:16:16 +08:00
|
|
|
struct tls_crypto_info *crypto_info;
|
|
|
|
struct net_device *netdev;
|
|
|
|
char *iv, *rec_seq;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
__be64 rcd_sn;
|
2019-09-03 12:31:03 +08:00
|
|
|
int rc;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
if (!ctx)
|
2019-09-03 12:31:03 +08:00
|
|
|
return -EINVAL;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2019-09-03 12:31:03 +08:00
|
|
|
if (ctx->priv_ctx_tx)
|
|
|
|
return -EEXIST;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2022-03-19 11:15:20 +08:00
|
|
|
netdev = get_netdev_for_sock(sk);
|
|
|
|
if (!netdev) {
|
|
|
|
pr_err_ratelimited("%s: netdev not found\n", __func__);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2022-03-19 11:15:20 +08:00
|
|
|
if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
|
|
|
|
rc = -EOPNOTSUPP;
|
|
|
|
goto release_netdev;
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
|
|
|
|
2018-09-12 23:44:42 +08:00
|
|
|
crypto_info = &ctx->crypto_send.info;
|
2019-06-29 07:07:59 +08:00
|
|
|
if (crypto_info->version != TLS_1_2_VERSION) {
|
|
|
|
rc = -EOPNOTSUPP;
|
2022-03-19 11:15:20 +08:00
|
|
|
goto release_netdev;
|
2019-06-29 07:07:59 +08:00
|
|
|
}
|
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
switch (crypto_info->cipher_type) {
|
|
|
|
case TLS_CIPHER_AES_GCM_128:
|
|
|
|
iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
|
|
|
|
rec_seq =
|
|
|
|
((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
|
|
|
|
break;
|
2022-09-20 21:01:49 +08:00
|
|
|
case TLS_CIPHER_AES_GCM_256:
|
|
|
|
iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
|
|
|
|
rec_seq =
|
|
|
|
((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
|
|
|
|
break;
|
2018-04-30 15:16:16 +08:00
|
|
|
default:
|
|
|
|
rc = -EINVAL;
|
2022-03-19 11:15:20 +08:00
|
|
|
goto release_netdev;
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
2022-09-20 21:01:48 +08:00
|
|
|
cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type];
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2019-06-11 12:40:00 +08:00
|
|
|
/* Sanity-check the rec_seq_size for stack allocations */
|
2022-09-20 21:01:48 +08:00
|
|
|
if (cipher_sz->rec_seq > TLS_MAX_REC_SEQ_SIZE) {
|
2019-06-11 12:40:00 +08:00
|
|
|
rc = -EINVAL;
|
2022-03-19 11:15:20 +08:00
|
|
|
goto release_netdev;
|
2019-06-11 12:40:00 +08:00
|
|
|
}
|
|
|
|
|
2019-07-09 10:53:15 +08:00
|
|
|
prot->version = crypto_info->version;
|
|
|
|
prot->cipher_type = crypto_info->cipher_type;
|
2022-09-20 21:01:48 +08:00
|
|
|
prot->prepend_size = TLS_HEADER_SIZE + cipher_sz->iv;
|
|
|
|
prot->tag_size = cipher_sz->tag;
|
2019-02-14 15:11:35 +08:00
|
|
|
prot->overhead_size = prot->prepend_size + prot->tag_size;
|
2022-09-20 21:01:48 +08:00
|
|
|
prot->iv_size = cipher_sz->iv;
|
|
|
|
prot->salt_size = cipher_sz->salt;
|
|
|
|
ctx->tx.iv = kmalloc(cipher_sz->iv + cipher_sz->salt, GFP_KERNEL);
|
2018-04-30 15:16:16 +08:00
|
|
|
if (!ctx->tx.iv) {
|
|
|
|
rc = -ENOMEM;
|
2022-03-19 11:15:20 +08:00
|
|
|
goto release_netdev;
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
|
|
|
|
2022-09-20 21:01:48 +08:00
|
|
|
memcpy(ctx->tx.iv + cipher_sz->salt, iv, cipher_sz->iv);
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2022-09-20 21:01:48 +08:00
|
|
|
prot->rec_seq_size = cipher_sz->rec_seq;
|
|
|
|
ctx->tx.rec_seq = kmemdup(rec_seq, cipher_sz->rec_seq, GFP_KERNEL);
|
2018-04-30 15:16:16 +08:00
|
|
|
if (!ctx->tx.rec_seq) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto free_iv;
|
|
|
|
}
|
|
|
|
|
2022-03-19 11:15:20 +08:00
|
|
|
start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
|
|
|
|
if (!start_marker_record) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto free_rec_seq;
|
|
|
|
}
|
|
|
|
|
|
|
|
offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
|
|
|
|
if (!offload_ctx) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto free_marker_record;
|
|
|
|
}
|
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
|
|
|
|
if (rc)
|
2022-03-19 11:15:20 +08:00
|
|
|
goto free_offload_ctx;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
/* start at rec_seq - 1 to account for the start marker record */
|
|
|
|
memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
|
|
|
|
offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
|
|
|
|
|
|
|
|
start_marker_record->end_seq = tcp_sk(sk)->write_seq;
|
|
|
|
start_marker_record->len = 0;
|
|
|
|
start_marker_record->num_frags = 0;
|
|
|
|
|
2022-07-27 17:43:42 +08:00
|
|
|
INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
|
|
|
|
offload_ctx->ctx = ctx;
|
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
INIT_LIST_HEAD(&offload_ctx->records_list);
|
|
|
|
list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
|
|
|
|
spin_lock_init(&offload_ctx->lock);
|
2018-05-10 21:27:25 +08:00
|
|
|
sg_init_table(offload_ctx->sg_tx_data,
|
|
|
|
ARRAY_SIZE(offload_ctx->sg_tx_data));
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
|
|
|
|
ctx->push_pending_record = tls_device_push_pending_record;
|
|
|
|
|
|
|
|
/* TLS offload is greatly simplified if we don't send
|
|
|
|
* SKBs where only part of the payload needs to be encrypted.
|
|
|
|
* So mark the last skb in the write queue as end of record.
|
|
|
|
*/
|
|
|
|
skb = tcp_write_queue_tail(sk);
|
|
|
|
if (skb)
|
|
|
|
TCP_SKB_CB(skb)->eor = 1;
|
|
|
|
|
|
|
|
/* Avoid offloading if the device is down
|
|
|
|
* We don't want to offload new flows after
|
|
|
|
* the NETDEV_DOWN event
|
2019-09-03 12:31:04 +08:00
|
|
|
*
|
|
|
|
* device_offload_lock is taken in tls_devices's NETDEV_DOWN
|
|
|
|
* handler thus protecting from the device going down before
|
|
|
|
* ctx was added to tls_device_list.
|
2018-04-30 15:16:16 +08:00
|
|
|
*/
|
2019-09-03 12:31:04 +08:00
|
|
|
down_read(&device_offload_lock);
|
2018-04-30 15:16:16 +08:00
|
|
|
if (!(netdev->flags & IFF_UP)) {
|
|
|
|
rc = -EINVAL;
|
2019-09-03 12:31:04 +08:00
|
|
|
goto release_lock;
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx->priv_ctx_tx = offload_ctx;
|
|
|
|
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
|
2018-09-12 23:44:42 +08:00
|
|
|
&ctx->crypto_send.info,
|
2018-04-30 15:16:16 +08:00
|
|
|
tcp_sk(sk)->write_seq);
|
2019-10-05 07:19:22 +08:00
|
|
|
trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
|
|
|
|
tcp_sk(sk)->write_seq, rec_seq, rc);
|
2018-04-30 15:16:16 +08:00
|
|
|
if (rc)
|
2019-09-03 12:31:04 +08:00
|
|
|
goto release_lock;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2018-07-13 19:33:43 +08:00
|
|
|
tls_device_attach(ctx, sk, netdev);
|
2019-09-03 12:31:04 +08:00
|
|
|
up_read(&device_offload_lock);
|
2018-04-30 15:16:16 +08:00
|
|
|
|
2023-06-14 04:50:06 +08:00
|
|
|
/* following this assignment tls_is_skb_tx_device_offloaded
|
2018-04-30 15:16:16 +08:00
|
|
|
* will return true and the context might be accessed
|
|
|
|
* by the netdev's xmit function.
|
|
|
|
*/
|
2018-07-13 19:33:43 +08:00
|
|
|
smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
|
|
|
|
dev_put(netdev);
|
2019-09-03 12:31:03 +08:00
|
|
|
|
|
|
|
return 0;
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
release_lock:
|
|
|
|
up_read(&device_offload_lock);
|
|
|
|
clean_acked_data_disable(inet_csk(sk));
|
|
|
|
crypto_free_aead(offload_ctx->aead_send);
|
|
|
|
free_offload_ctx:
|
|
|
|
kfree(offload_ctx);
|
|
|
|
ctx->priv_ctx_tx = NULL;
|
|
|
|
free_marker_record:
|
|
|
|
kfree(start_marker_record);
|
2022-03-19 11:15:20 +08:00
|
|
|
free_rec_seq:
|
|
|
|
kfree(ctx->tx.rec_seq);
|
|
|
|
free_iv:
|
|
|
|
kfree(ctx->tx.iv);
|
|
|
|
release_netdev:
|
|
|
|
dev_put(netdev);
|
2018-04-30 15:16:16 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2018-07-13 19:33:43 +08:00
|
|
|
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
|
|
|
|
{
|
2019-10-05 07:19:22 +08:00
|
|
|
struct tls12_crypto_info_aes_gcm_128 *info;
|
2018-07-13 19:33:43 +08:00
|
|
|
struct tls_offload_context_rx *context;
|
|
|
|
struct net_device *netdev;
|
|
|
|
int rc = 0;
|
|
|
|
|
2019-06-29 07:07:59 +08:00
|
|
|
if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2018-07-13 19:33:43 +08:00
|
|
|
netdev = get_netdev_for_sock(sk);
|
|
|
|
if (!netdev) {
|
|
|
|
pr_err_ratelimited("%s: netdev not found\n", __func__);
|
2019-09-03 12:31:04 +08:00
|
|
|
return -EINVAL;
|
2018-07-13 19:33:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
|
2019-12-05 14:41:18 +08:00
|
|
|
rc = -EOPNOTSUPP;
|
2018-07-13 19:33:43 +08:00
|
|
|
goto release_netdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Avoid offloading if the device is down
|
|
|
|
* We don't want to offload new flows after
|
|
|
|
* the NETDEV_DOWN event
|
2019-09-03 12:31:04 +08:00
|
|
|
*
|
|
|
|
* device_offload_lock is taken in tls_devices's NETDEV_DOWN
|
|
|
|
* handler thus protecting from the device going down before
|
|
|
|
* ctx was added to tls_device_list.
|
2018-07-13 19:33:43 +08:00
|
|
|
*/
|
2019-09-03 12:31:04 +08:00
|
|
|
down_read(&device_offload_lock);
|
2018-07-13 19:33:43 +08:00
|
|
|
if (!(netdev->flags & IFF_UP)) {
|
|
|
|
rc = -EINVAL;
|
2019-09-03 12:31:04 +08:00
|
|
|
goto release_lock;
|
2018-07-13 19:33:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
|
|
|
|
if (!context) {
|
|
|
|
rc = -ENOMEM;
|
2019-09-03 12:31:04 +08:00
|
|
|
goto release_lock;
|
2018-07-13 19:33:43 +08:00
|
|
|
}
|
2019-06-11 12:40:02 +08:00
|
|
|
context->resync_nh_reset = 1;
|
2018-07-13 19:33:43 +08:00
|
|
|
|
|
|
|
ctx->priv_ctx_rx = context;
|
|
|
|
rc = tls_set_sw_offload(sk, ctx, 0);
|
|
|
|
if (rc)
|
|
|
|
goto release_ctx;
|
|
|
|
|
|
|
|
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
|
2018-09-12 23:44:42 +08:00
|
|
|
&ctx->crypto_recv.info,
|
2018-07-13 19:33:43 +08:00
|
|
|
tcp_sk(sk)->copied_seq);
|
2019-10-05 07:19:22 +08:00
|
|
|
info = (void *)&ctx->crypto_recv.info;
|
|
|
|
trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
|
|
|
|
tcp_sk(sk)->copied_seq, info->rec_seq, rc);
|
2019-04-26 03:32:01 +08:00
|
|
|
if (rc)
|
2018-07-13 19:33:43 +08:00
|
|
|
goto free_sw_resources;
|
|
|
|
|
|
|
|
tls_device_attach(ctx, sk, netdev);
|
2019-09-03 12:31:03 +08:00
|
|
|
up_read(&device_offload_lock);
|
|
|
|
|
|
|
|
dev_put(netdev);
|
|
|
|
|
|
|
|
return 0;
|
2018-07-13 19:33:43 +08:00
|
|
|
|
|
|
|
free_sw_resources:
|
2019-04-20 07:51:38 +08:00
|
|
|
up_read(&device_offload_lock);
|
2018-07-13 19:33:43 +08:00
|
|
|
tls_sw_free_resources_rx(sk);
|
2019-04-20 07:51:38 +08:00
|
|
|
down_read(&device_offload_lock);
|
2018-07-13 19:33:43 +08:00
|
|
|
release_ctx:
|
|
|
|
ctx->priv_ctx_rx = NULL;
|
|
|
|
release_lock:
|
|
|
|
up_read(&device_offload_lock);
|
2019-09-03 12:31:04 +08:00
|
|
|
release_netdev:
|
|
|
|
dev_put(netdev);
|
2018-07-13 19:33:43 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void tls_device_offload_cleanup_rx(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
|
|
|
struct net_device *netdev;
|
|
|
|
|
|
|
|
down_read(&device_offload_lock);
|
net/tls: Use RCU API to access tls_ctx->netdev
Currently, tls_device_down synchronizes with tls_device_resync_rx using
RCU, however, the pointer to netdev is stored using WRITE_ONCE and
loaded using READ_ONCE.
Although such approach is technically correct (rcu_dereference is
essentially a READ_ONCE, and rcu_assign_pointer uses WRITE_ONCE to store
NULL), using special RCU helpers for pointers is more valid, as it
includes additional checks and might change the implementation
transparently to the callers.
Mark the netdev pointer as __rcu and use the correct RCU helpers to
access it. For non-concurrent access pass the right conditions that
guarantee safe access (locks taken, refcount value). Also use the
correct helper in mlx5e, where even READ_ONCE was missing.
The transition to RCU exposes existing issues, fixed by this commit:
1. bond_tls_device_xmit could read netdev twice, and it could become
NULL the second time, after the NULL check passed.
2. Drivers shouldn't stop processing the last packet if tls_device_down
just set netdev to NULL, before tls_dev_del was called. This prevents a
possible packet drop when transitioning to the fallback software mode.
Fixes: 89df6a810470 ("net/bonding: Implement TLS TX device offload")
Fixes: c55dcdd435aa ("net/tls: Fix use-after-free after the TLS device goes down and up")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Link: https://lore.kernel.org/r/20220810081602.1435800-1-maximmi@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-10 16:16:02 +08:00
|
|
|
netdev = rcu_dereference_protected(tls_ctx->netdev,
|
|
|
|
lockdep_is_held(&device_offload_lock));
|
2018-07-13 19:33:43 +08:00
|
|
|
if (!netdev)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
|
|
|
|
TLS_OFFLOAD_CTX_DIR_RX);
|
|
|
|
|
|
|
|
if (tls_ctx->tx_conf != TLS_HW) {
|
|
|
|
dev_put(netdev);
|
net/tls: Use RCU API to access tls_ctx->netdev
Currently, tls_device_down synchronizes with tls_device_resync_rx using
RCU, however, the pointer to netdev is stored using WRITE_ONCE and
loaded using READ_ONCE.
Although such approach is technically correct (rcu_dereference is
essentially a READ_ONCE, and rcu_assign_pointer uses WRITE_ONCE to store
NULL), using special RCU helpers for pointers is more valid, as it
includes additional checks and might change the implementation
transparently to the callers.
Mark the netdev pointer as __rcu and use the correct RCU helpers to
access it. For non-concurrent access pass the right conditions that
guarantee safe access (locks taken, refcount value). Also use the
correct helper in mlx5e, where even READ_ONCE was missing.
The transition to RCU exposes existing issues, fixed by this commit:
1. bond_tls_device_xmit could read netdev twice, and it could become
NULL the second time, after the NULL check passed.
2. Drivers shouldn't stop processing the last packet if tls_device_down
just set netdev to NULL, before tls_dev_del was called. This prevents a
possible packet drop when transitioning to the fallback software mode.
Fixes: 89df6a810470 ("net/bonding: Implement TLS TX device offload")
Fixes: c55dcdd435aa ("net/tls: Fix use-after-free after the TLS device goes down and up")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Link: https://lore.kernel.org/r/20220810081602.1435800-1-maximmi@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-10 16:16:02 +08:00
|
|
|
rcu_assign_pointer(tls_ctx->netdev, NULL);
|
2020-11-26 06:18:10 +08:00
|
|
|
} else {
|
|
|
|
set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
|
2018-07-13 19:33:43 +08:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
up_read(&device_offload_lock);
|
|
|
|
tls_sw_release_resources_rx(sk);
|
|
|
|
}
|
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
static int tls_device_down(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct tls_context *ctx, *tmp;
|
|
|
|
unsigned long flags;
|
|
|
|
LIST_HEAD(list);
|
|
|
|
|
|
|
|
/* Request a write lock to block new offload attempts */
|
|
|
|
down_write(&device_offload_lock);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&tls_device_lock, flags);
|
|
|
|
list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
|
net/tls: Use RCU API to access tls_ctx->netdev
Currently, tls_device_down synchronizes with tls_device_resync_rx using
RCU, however, the pointer to netdev is stored using WRITE_ONCE and
loaded using READ_ONCE.
Although such approach is technically correct (rcu_dereference is
essentially a READ_ONCE, and rcu_assign_pointer uses WRITE_ONCE to store
NULL), using special RCU helpers for pointers is more valid, as it
includes additional checks and might change the implementation
transparently to the callers.
Mark the netdev pointer as __rcu and use the correct RCU helpers to
access it. For non-concurrent access pass the right conditions that
guarantee safe access (locks taken, refcount value). Also use the
correct helper in mlx5e, where even READ_ONCE was missing.
The transition to RCU exposes existing issues, fixed by this commit:
1. bond_tls_device_xmit could read netdev twice, and it could become
NULL the second time, after the NULL check passed.
2. Drivers shouldn't stop processing the last packet if tls_device_down
just set netdev to NULL, before tls_dev_del was called. This prevents a
possible packet drop when transitioning to the fallback software mode.
Fixes: 89df6a810470 ("net/bonding: Implement TLS TX device offload")
Fixes: c55dcdd435aa ("net/tls: Fix use-after-free after the TLS device goes down and up")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Link: https://lore.kernel.org/r/20220810081602.1435800-1-maximmi@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-10 16:16:02 +08:00
|
|
|
struct net_device *ctx_netdev =
|
|
|
|
rcu_dereference_protected(ctx->netdev,
|
|
|
|
lockdep_is_held(&device_offload_lock));
|
|
|
|
|
|
|
|
if (ctx_netdev != netdev ||
|
2018-04-30 15:16:16 +08:00
|
|
|
!refcount_inc_not_zero(&ctx->refcount))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
list_move(&ctx->list, &list);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&tls_device_lock, flags);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(ctx, tmp, &list, list) {
|
net/tls: Fix use-after-free after the TLS device goes down and up
When a netdev with active TLS offload goes down, tls_device_down is
called to stop the offload and tear down the TLS context. However, the
socket stays alive, and it still points to the TLS context, which is now
deallocated. If a netdev goes up, while the connection is still active,
and the data flow resumes after a number of TCP retransmissions, it will
lead to a use-after-free of the TLS context.
This commit addresses this bug by keeping the context alive until its
normal destruction, and implements the necessary fallbacks, so that the
connection can resume in software (non-offloaded) kTLS mode.
On the TX side tls_sw_fallback is used to encrypt all packets. The RX
side already has all the necessary fallbacks, because receiving
non-decrypted packets is supported. The thing needed on the RX side is
to block resync requests, which are normally produced after receiving
non-decrypted packets.
The necessary synchronization is implemented for a graceful teardown:
first the fallbacks are deployed, then the driver resources are released
(it used to be possible to have a tls_dev_resync after tls_dev_del).
A new flag called TLS_RX_DEV_DEGRADED is added to indicate the fallback
mode. It's used to skip the RX resync logic completely, as it becomes
useless, and some objects may be released (for example, resync_async,
which is allocated and freed by the driver).
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-06-01 20:08:00 +08:00
|
|
|
/* Stop offloaded TX and switch to the fallback.
|
2023-06-14 04:50:06 +08:00
|
|
|
* tls_is_skb_tx_device_offloaded will return false.
|
net/tls: Fix use-after-free after the TLS device goes down and up
When a netdev with active TLS offload goes down, tls_device_down is
called to stop the offload and tear down the TLS context. However, the
socket stays alive, and it still points to the TLS context, which is now
deallocated. If a netdev goes up, while the connection is still active,
and the data flow resumes after a number of TCP retransmissions, it will
lead to a use-after-free of the TLS context.
This commit addresses this bug by keeping the context alive until its
normal destruction, and implements the necessary fallbacks, so that the
connection can resume in software (non-offloaded) kTLS mode.
On the TX side tls_sw_fallback is used to encrypt all packets. The RX
side already has all the necessary fallbacks, because receiving
non-decrypted packets is supported. The thing needed on the RX side is
to block resync requests, which are normally produced after receiving
non-decrypted packets.
The necessary synchronization is implemented for a graceful teardown:
first the fallbacks are deployed, then the driver resources are released
(it used to be possible to have a tls_dev_resync after tls_dev_del).
A new flag called TLS_RX_DEV_DEGRADED is added to indicate the fallback
mode. It's used to skip the RX resync logic completely, as it becomes
useless, and some objects may be released (for example, resync_async,
which is allocated and freed by the driver).
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-06-01 20:08:00 +08:00
|
|
|
*/
|
|
|
|
WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
|
|
|
|
|
|
|
|
/* Stop the RX and TX resync.
|
|
|
|
* tls_dev_resync must not be called after tls_dev_del.
|
|
|
|
*/
|
net/tls: Use RCU API to access tls_ctx->netdev
Currently, tls_device_down synchronizes with tls_device_resync_rx using
RCU, however, the pointer to netdev is stored using WRITE_ONCE and
loaded using READ_ONCE.
Although such approach is technically correct (rcu_dereference is
essentially a READ_ONCE, and rcu_assign_pointer uses WRITE_ONCE to store
NULL), using special RCU helpers for pointers is more valid, as it
includes additional checks and might change the implementation
transparently to the callers.
Mark the netdev pointer as __rcu and use the correct RCU helpers to
access it. For non-concurrent access pass the right conditions that
guarantee safe access (locks taken, refcount value). Also use the
correct helper in mlx5e, where even READ_ONCE was missing.
The transition to RCU exposes existing issues, fixed by this commit:
1. bond_tls_device_xmit could read netdev twice, and it could become
NULL the second time, after the NULL check passed.
2. Drivers shouldn't stop processing the last packet if tls_device_down
just set netdev to NULL, before tls_dev_del was called. This prevents a
possible packet drop when transitioning to the fallback software mode.
Fixes: 89df6a810470 ("net/bonding: Implement TLS TX device offload")
Fixes: c55dcdd435aa ("net/tls: Fix use-after-free after the TLS device goes down and up")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Link: https://lore.kernel.org/r/20220810081602.1435800-1-maximmi@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-10 16:16:02 +08:00
|
|
|
rcu_assign_pointer(ctx->netdev, NULL);
|
net/tls: Fix use-after-free after the TLS device goes down and up
When a netdev with active TLS offload goes down, tls_device_down is
called to stop the offload and tear down the TLS context. However, the
socket stays alive, and it still points to the TLS context, which is now
deallocated. If a netdev goes up, while the connection is still active,
and the data flow resumes after a number of TCP retransmissions, it will
lead to a use-after-free of the TLS context.
This commit addresses this bug by keeping the context alive until its
normal destruction, and implements the necessary fallbacks, so that the
connection can resume in software (non-offloaded) kTLS mode.
On the TX side tls_sw_fallback is used to encrypt all packets. The RX
side already has all the necessary fallbacks, because receiving
non-decrypted packets is supported. The thing needed on the RX side is
to block resync requests, which are normally produced after receiving
non-decrypted packets.
The necessary synchronization is implemented for a graceful teardown:
first the fallbacks are deployed, then the driver resources are released
(it used to be possible to have a tls_dev_resync after tls_dev_del).
A new flag called TLS_RX_DEV_DEGRADED is added to indicate the fallback
mode. It's used to skip the RX resync logic completely, as it becomes
useless, and some objects may be released (for example, resync_async,
which is allocated and freed by the driver).
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-06-01 20:08:00 +08:00
|
|
|
|
|
|
|
/* Start skipping the RX resync logic completely. */
|
|
|
|
set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
|
|
|
|
|
|
|
|
/* Sync with inflight packets. After this point:
|
|
|
|
* TX: no non-encrypted packets will be passed to the driver.
|
|
|
|
* RX: resync requests from the driver will be ignored.
|
|
|
|
*/
|
|
|
|
synchronize_net();
|
|
|
|
|
|
|
|
/* Release the offload context on the driver side. */
|
2018-07-13 19:33:43 +08:00
|
|
|
if (ctx->tx_conf == TLS_HW)
|
|
|
|
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
|
|
|
|
TLS_OFFLOAD_CTX_DIR_TX);
|
2020-11-26 06:18:10 +08:00
|
|
|
if (ctx->rx_conf == TLS_HW &&
|
|
|
|
!test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
|
2018-07-13 19:33:43 +08:00
|
|
|
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
|
|
|
|
TLS_OFFLOAD_CTX_DIR_RX);
|
net/tls: Fix use-after-free after the TLS device goes down and up
When a netdev with active TLS offload goes down, tls_device_down is
called to stop the offload and tear down the TLS context. However, the
socket stays alive, and it still points to the TLS context, which is now
deallocated. If a netdev goes up, while the connection is still active,
and the data flow resumes after a number of TCP retransmissions, it will
lead to a use-after-free of the TLS context.
This commit addresses this bug by keeping the context alive until its
normal destruction, and implements the necessary fallbacks, so that the
connection can resume in software (non-offloaded) kTLS mode.
On the TX side tls_sw_fallback is used to encrypt all packets. The RX
side already has all the necessary fallbacks, because receiving
non-decrypted packets is supported. The thing needed on the RX side is
to block resync requests, which are normally produced after receiving
non-decrypted packets.
The necessary synchronization is implemented for a graceful teardown:
first the fallbacks are deployed, then the driver resources are released
(it used to be possible to have a tls_dev_resync after tls_dev_del).
A new flag called TLS_RX_DEV_DEGRADED is added to indicate the fallback
mode. It's used to skip the RX resync logic completely, as it becomes
useless, and some objects may be released (for example, resync_async,
which is allocated and freed by the driver).
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-06-01 20:08:00 +08:00
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
dev_put(netdev);
|
|
|
|
|
net/tls: Fix use-after-free after the TLS device goes down and up
When a netdev with active TLS offload goes down, tls_device_down is
called to stop the offload and tear down the TLS context. However, the
socket stays alive, and it still points to the TLS context, which is now
deallocated. If a netdev goes up, while the connection is still active,
and the data flow resumes after a number of TCP retransmissions, it will
lead to a use-after-free of the TLS context.
This commit addresses this bug by keeping the context alive until its
normal destruction, and implements the necessary fallbacks, so that the
connection can resume in software (non-offloaded) kTLS mode.
On the TX side tls_sw_fallback is used to encrypt all packets. The RX
side already has all the necessary fallbacks, because receiving
non-decrypted packets is supported. The thing needed on the RX side is
to block resync requests, which are normally produced after receiving
non-decrypted packets.
The necessary synchronization is implemented for a graceful teardown:
first the fallbacks are deployed, then the driver resources are released
(it used to be possible to have a tls_dev_resync after tls_dev_del).
A new flag called TLS_RX_DEV_DEGRADED is added to indicate the fallback
mode. It's used to skip the RX resync logic completely, as it becomes
useless, and some objects may be released (for example, resync_async,
which is allocated and freed by the driver).
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-06-01 20:08:00 +08:00
|
|
|
/* Move the context to a separate list for two reasons:
|
|
|
|
* 1. When the context is deallocated, list_del is called.
|
|
|
|
* 2. It's no longer an offloaded context, so we don't want to
|
|
|
|
* run offload-specific code on this context.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&tls_device_lock, flags);
|
|
|
|
list_move_tail(&ctx->list, &tls_device_down_list);
|
|
|
|
spin_unlock_irqrestore(&tls_device_lock, flags);
|
|
|
|
|
|
|
|
/* Device contexts for RX and TX will be freed in on sk_destruct
|
|
|
|
* by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
|
tls: Fix context leak on tls_device_down
The commit cited below claims to fix a use-after-free condition after
tls_device_down. Apparently, the description wasn't fully accurate. The
context stayed alive, but ctx->netdev became NULL, and the offload was
torn down without a proper fallback, so a bug was present, but a
different kind of bug.
Due to misunderstanding of the issue, the original patch dropped the
refcount_dec_and_test line for the context to avoid the alleged
premature deallocation. That line has to be restored, because it matches
the refcount_inc_not_zero from the same function, otherwise the contexts
that survived tls_device_down are leaked.
This patch fixes the described issue by restoring refcount_dec_and_test.
After this change, there is no leak anymore, and the fallback to
software kTLS still works.
Fixes: c55dcdd435aa ("net/tls: Fix use-after-free after the TLS device goes down and up")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/r/20220512091830.678684-1-maximmi@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-05-12 17:18:30 +08:00
|
|
|
* Now release the ref taken above.
|
net/tls: Fix use-after-free after the TLS device goes down and up
When a netdev with active TLS offload goes down, tls_device_down is
called to stop the offload and tear down the TLS context. However, the
socket stays alive, and it still points to the TLS context, which is now
deallocated. If a netdev goes up, while the connection is still active,
and the data flow resumes after a number of TCP retransmissions, it will
lead to a use-after-free of the TLS context.
This commit addresses this bug by keeping the context alive until its
normal destruction, and implements the necessary fallbacks, so that the
connection can resume in software (non-offloaded) kTLS mode.
On the TX side tls_sw_fallback is used to encrypt all packets. The RX
side already has all the necessary fallbacks, because receiving
non-decrypted packets is supported. The thing needed on the RX side is
to block resync requests, which are normally produced after receiving
non-decrypted packets.
The necessary synchronization is implemented for a graceful teardown:
first the fallbacks are deployed, then the driver resources are released
(it used to be possible to have a tls_dev_resync after tls_dev_del).
A new flag called TLS_RX_DEV_DEGRADED is added to indicate the fallback
mode. It's used to skip the RX resync logic completely, as it becomes
useless, and some objects may be released (for example, resync_async,
which is allocated and freed by the driver).
Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-06-01 20:08:00 +08:00
|
|
|
*/
|
2022-07-21 17:11:27 +08:00
|
|
|
if (refcount_dec_and_test(&ctx->refcount)) {
|
|
|
|
/* sk_destruct ran after tls_device_down took a ref, and
|
|
|
|
* it returned early. Complete the destruction here.
|
|
|
|
*/
|
|
|
|
list_del(&ctx->list);
|
tls: Fix context leak on tls_device_down
The commit cited below claims to fix a use-after-free condition after
tls_device_down. Apparently, the description wasn't fully accurate. The
context stayed alive, but ctx->netdev became NULL, and the offload was
torn down without a proper fallback, so a bug was present, but a
different kind of bug.
Due to misunderstanding of the issue, the original patch dropped the
refcount_dec_and_test line for the context to avoid the alleged
premature deallocation. That line has to be restored, because it matches
the refcount_inc_not_zero from the same function, otherwise the contexts
that survived tls_device_down are leaked.
This patch fixes the described issue by restoring refcount_dec_and_test.
After this change, there is no leak anymore, and the fallback to
software kTLS still works.
Fixes: c55dcdd435aa ("net/tls: Fix use-after-free after the TLS device goes down and up")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/r/20220512091830.678684-1-maximmi@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-05-12 17:18:30 +08:00
|
|
|
tls_device_free_ctx(ctx);
|
2022-07-21 17:11:27 +08:00
|
|
|
}
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
up_write(&device_offload_lock);
|
|
|
|
|
2022-07-27 17:43:42 +08:00
|
|
|
flush_workqueue(destruct_wq);
|
2018-04-30 15:16:16 +08:00
|
|
|
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tls_dev_event(struct notifier_block *this, unsigned long event,
|
|
|
|
void *ptr)
|
|
|
|
{
|
|
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
|
|
|
|
2019-05-22 10:02:02 +08:00
|
|
|
if (!dev->tlsdev_ops &&
|
|
|
|
!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
|
2018-04-30 15:16:16 +08:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case NETDEV_REGISTER:
|
|
|
|
case NETDEV_FEAT_CHANGE:
|
2021-01-17 22:59:49 +08:00
|
|
|
if (netif_is_bond_master(dev))
|
|
|
|
return NOTIFY_DONE;
|
2018-07-13 19:33:43 +08:00
|
|
|
if ((dev->features & NETIF_F_HW_TLS_RX) &&
|
2019-06-11 12:40:08 +08:00
|
|
|
!dev->tlsdev_ops->tls_dev_resync)
|
2018-07-13 19:33:43 +08:00
|
|
|
return NOTIFY_BAD;
|
|
|
|
|
2018-04-30 15:16:16 +08:00
|
|
|
if (dev->tlsdev_ops &&
|
|
|
|
dev->tlsdev_ops->tls_dev_add &&
|
|
|
|
dev->tlsdev_ops->tls_dev_del)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
else
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
case NETDEV_DOWN:
|
|
|
|
return tls_device_down(dev);
|
|
|
|
}
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block tls_dev_notifier = {
|
|
|
|
.notifier_call = tls_dev_event,
|
|
|
|
};
|
|
|
|
|
2022-07-14 15:07:54 +08:00
|
|
|
int __init tls_device_init(void)
|
2018-04-30 15:16:16 +08:00
|
|
|
{
|
2022-07-27 17:43:42 +08:00
|
|
|
int err;
|
|
|
|
|
2023-08-05 06:59:51 +08:00
|
|
|
dummy_page = alloc_page(GFP_KERNEL);
|
|
|
|
if (!dummy_page)
|
2022-07-27 17:43:42 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2023-08-05 06:59:51 +08:00
|
|
|
destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
|
|
|
|
if (!destruct_wq) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_free_dummy;
|
|
|
|
}
|
|
|
|
|
2022-07-27 17:43:42 +08:00
|
|
|
err = register_netdevice_notifier(&tls_dev_notifier);
|
|
|
|
if (err)
|
2023-08-05 06:59:51 +08:00
|
|
|
goto err_destroy_wq;
|
2022-07-27 17:43:42 +08:00
|
|
|
|
2023-08-05 06:59:51 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_destroy_wq:
|
|
|
|
destroy_workqueue(destruct_wq);
|
|
|
|
err_free_dummy:
|
|
|
|
put_page(dummy_page);
|
2022-07-27 17:43:42 +08:00
|
|
|
return err;
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void __exit tls_device_cleanup(void)
|
|
|
|
{
|
|
|
|
unregister_netdevice_notifier(&tls_dev_notifier);
|
2022-07-27 17:43:42 +08:00
|
|
|
destroy_workqueue(destruct_wq);
|
2019-05-09 07:46:14 +08:00
|
|
|
clean_acked_data_flush();
|
2023-08-05 06:59:51 +08:00
|
|
|
put_page(dummy_page);
|
2018-04-30 15:16:16 +08:00
|
|
|
}
|