nvme-tcp: sanitize TLS key handling

There is a difference between TLS configured (ie the user has
provisioned/requested a key) and TLS enabled (ie the connection
is encrypted with TLS). This becomes important for secure concatenation,
where the initial authentication is run on an unencrypted connection
(ie with TLS configured, but not enabled), and then the queue is reset to
run over TLS (ie TLS configured _and_ enabled).
So to differentiate between those two states store the generated
key in opts->tls_key (as we're using the same TLS key for all queues),
the key serial of the resulting TLS handshake in ctrl->tls_pskid
(to signal that TLS on the admin queue is enabled), and a simple
flag for the queues to indicated that TLS has been enabled.

Signed-off-by: Hannes Reinecke <hare@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
This commit is contained in:
Hannes Reinecke 2024-07-22 14:02:19 +02:00 committed by Keith Busch
parent 79559c7533
commit 363895767f
4 changed files with 43 additions and 17 deletions

View File

@ -4715,7 +4715,6 @@ static void nvme_free_ctrl(struct device *dev)
if (!subsys || ctrl->instance != subsys->instance)
ida_free(&nvme_instance_ida, ctrl->instance);
key_put(ctrl->tls_key);
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
cleanup_srcu_struct(&ctrl->srcu);

View File

@ -373,7 +373,7 @@ struct nvme_ctrl {
struct nvme_dhchap_key *ctrl_key;
u16 transaction;
#endif
struct key *tls_key;
key_serial_t tls_pskid;
/* Power saving configuration */
u64 ps_max_latency_us;

View File

@ -670,9 +670,9 @@ static ssize_t tls_key_show(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
if (!ctrl->tls_key)
if (!ctrl->tls_pskid)
return 0;
return sysfs_emit(buf, "%08x", key_serial(ctrl->tls_key));
return sysfs_emit(buf, "%08x", ctrl->tls_pskid);
}
static DEVICE_ATTR_RO(tls_key);
#endif

View File

@ -165,6 +165,7 @@ struct nvme_tcp_queue {
bool hdr_digest;
bool data_digest;
bool tls_enabled;
struct ahash_request *rcv_hash;
struct ahash_request *snd_hash;
__le32 exp_ddgst;
@ -213,7 +214,21 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}
static inline bool nvme_tcp_tls(struct nvme_ctrl *ctrl)
/*
* Check if the queue is TLS encrypted
*/
static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue)
{
if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
return 0;
return queue->tls_enabled;
}
/*
* Check if TLS is configured for the controller.
*/
static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl)
{
if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
return 0;
@ -368,7 +383,7 @@ static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue)
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
{
return !nvme_tcp_tls(&queue->ctrl->ctrl) &&
return !nvme_tcp_queue_tls(queue) &&
nvme_tcp_queue_has_pending(queue);
}
@ -1427,7 +1442,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
memset(&msg, 0, sizeof(msg));
iov.iov_base = icresp;
iov.iov_len = sizeof(*icresp);
if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
if (nvme_tcp_queue_tls(queue)) {
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
@ -1439,7 +1454,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
goto free_icresp;
}
ret = -ENOTCONN;
if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
if (nvme_tcp_queue_tls(queue)) {
ctype = tls_get_record_type(queue->sock->sk,
(struct cmsghdr *)cbuf);
if (ctype != TLS_RECORD_TYPE_DATA) {
@ -1587,7 +1602,10 @@ static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
qid, pskid);
queue->tls_err = -ENOKEY;
} else {
ctrl->ctrl.tls_key = tls_key;
queue->tls_enabled = true;
if (qid == 0)
ctrl->ctrl.tls_pskid = key_serial(tls_key);
key_put(tls_key);
queue->tls_err = 0;
}
@ -1768,7 +1786,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
}
/* If PSKs are configured try to start TLS */
if (IS_ENABLED(CONFIG_NVME_TCP_TLS) && pskid) {
if (nvme_tcp_tls_configured(nctrl) && pskid) {
ret = nvme_tcp_start_tls(nctrl, queue, pskid);
if (ret)
goto err_init_connect;
@ -1829,6 +1847,8 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue);
/* Stopping the queue will disable TLS */
queue->tls_enabled = false;
mutex_unlock(&queue->queue_lock);
}
@ -1925,16 +1945,17 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
int ret;
key_serial_t pskid = 0;
if (nvme_tcp_tls(ctrl)) {
if (nvme_tcp_tls_configured(ctrl)) {
if (ctrl->opts->tls_key)
pskid = key_serial(ctrl->opts->tls_key);
else
else {
pskid = nvme_tls_psk_default(ctrl->opts->keyring,
ctrl->opts->host->nqn,
ctrl->opts->subsysnqn);
if (!pskid) {
dev_err(ctrl->device, "no valid PSK found\n");
return -ENOKEY;
if (!pskid) {
dev_err(ctrl->device, "no valid PSK found\n");
return -ENOKEY;
}
}
}
@ -1957,13 +1978,14 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
int i, ret;
if (nvme_tcp_tls(ctrl) && !ctrl->tls_key) {
if (nvme_tcp_tls_configured(ctrl) && !ctrl->tls_pskid) {
dev_err(ctrl->device, "no PSK negotiated\n");
return -ENOKEY;
}
for (i = 1; i < ctrl->queue_count; i++) {
ret = nvme_tcp_alloc_queue(ctrl, i,
key_serial(ctrl->tls_key));
ctrl->tls_pskid);
if (ret)
goto out_free_queues;
}
@ -2144,6 +2166,11 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
if (remove)
nvme_unquiesce_admin_queue(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, remove);
if (ctrl->tls_pskid) {
dev_dbg(ctrl->device, "Wipe negotiated TLS_PSK %08x\n",
ctrl->tls_pskid);
ctrl->tls_pskid = 0;
}
}
static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,