Skip to content

Commit 70525e5

Browse files
hreineckekeithbusch
authored andcommitted
nvmet-tcp: peek icreq before starting TLS
Incoming connection might be either 'normal' NVMe-TCP connections starting with icreq or TLS handshakes. To ensure that 'normal' connections can still be handled we need to peek the first packet and only start TLS handshake if it's not an icreq. With that we can lift the restriction to always set TREQ to 'required' when TLS1.3 is enabled. Signed-off-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Keith Busch <kbusch@kernel.org>
1 parent a1c5dd8 commit 70525e5

3 files changed

Lines changed: 82 additions & 9 deletions

File tree

drivers/nvme/target/configfs.c

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,20 @@ static ssize_t nvmet_addr_treq_store(struct config_item *item,
198198
return -EINVAL;
199199

200200
found:
201+
if (port->disc_addr.trtype == NVMF_TRTYPE_TCP &&
202+
port->disc_addr.tsas.tcp.sectype == NVMF_TCP_SECTYPE_TLS13) {
203+
switch (nvmet_addr_treq[i].type) {
204+
case NVMF_TREQ_NOT_SPECIFIED:
205+
pr_debug("treq '%s' not allowed for TLS1.3\n",
206+
nvmet_addr_treq[i].name);
207+
return -EINVAL;
208+
case NVMF_TREQ_NOT_REQUIRED:
209+
pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n");
210+
break;
211+
default:
212+
break;
213+
}
214+
}
201215
treq |= nvmet_addr_treq[i].type;
202216
port->disc_addr.treq = treq;
203217
return count;
@@ -410,12 +424,15 @@ static ssize_t nvmet_addr_tsas_store(struct config_item *item,
410424

411425
nvmet_port_init_tsas_tcp(port, sectype);
412426
/*
413-
* The TLS implementation currently does not support
414-
* secure concatenation, so TREQ is always set to 'required'
415-
* if TLS is enabled.
427+
* If TLS is enabled TREQ should be set to 'required' per default
416428
*/
417429
if (sectype == NVMF_TCP_SECTYPE_TLS13) {
418-
treq |= NVMF_TREQ_REQUIRED;
430+
u8 sc = nvmet_port_disc_addr_treq_secure_channel(port);
431+
432+
if (sc == NVMF_TREQ_NOT_SPECIFIED)
433+
treq |= NVMF_TREQ_REQUIRED;
434+
else
435+
treq |= sc;
419436
} else {
420437
treq |= NVMF_TREQ_NOT_SPECIFIED;
421438
}

drivers/nvme/target/nvmet.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,11 @@ static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *por
184184
return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
185185
}
186186

187+
static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
188+
{
189+
return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
190+
}
191+
187192
struct nvmet_ctrl {
188193
struct nvmet_subsys *subsys;
189194
struct nvmet_sq **sqs;

drivers/nvme/target/tcp.c

Lines changed: 56 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1730,6 +1730,54 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
17301730
}
17311731

17321732
#ifdef CONFIG_NVME_TARGET_TCP_TLS
1733+
static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
1734+
{
1735+
struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1736+
int len, ret;
1737+
struct kvec iov = {
1738+
.iov_base = (u8 *)&queue->pdu + queue->offset,
1739+
.iov_len = sizeof(struct nvme_tcp_hdr),
1740+
};
1741+
char cbuf[CMSG_LEN(sizeof(char))] = {};
1742+
struct msghdr msg = {
1743+
.msg_control = cbuf,
1744+
.msg_controllen = sizeof(cbuf),
1745+
.msg_flags = MSG_PEEK,
1746+
};
1747+
1748+
if (nvmet_port_secure_channel_required(queue->port->nport))
1749+
return 0;
1750+
1751+
len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1752+
iov.iov_len, msg.msg_flags);
1753+
if (unlikely(len < 0)) {
1754+
pr_debug("queue %d: peek error %d\n",
1755+
queue->idx, len);
1756+
return len;
1757+
}
1758+
1759+
ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1760+
if (ret < 0)
1761+
return ret;
1762+
1763+
if (len < sizeof(struct nvme_tcp_hdr)) {
1764+
pr_debug("queue %d: short read, %d bytes missing\n",
1765+
queue->idx, (int)iov.iov_len - len);
1766+
return -EAGAIN;
1767+
}
1768+
pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n",
1769+
queue->idx, hdr->type, hdr->hlen, hdr->plen,
1770+
(int)sizeof(struct nvme_tcp_icreq_pdu));
1771+
if (hdr->type == nvme_tcp_icreq &&
1772+
hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
1773+
hdr->plen == (__le32)sizeof(struct nvme_tcp_icreq_pdu)) {
1774+
pr_debug("queue %d: icreq detected\n",
1775+
queue->idx);
1776+
return len;
1777+
}
1778+
return 0;
1779+
}
1780+
17331781
static void nvmet_tcp_tls_handshake_done(void *data, int status,
17341782
key_serial_t peerid)
17351783
{
@@ -1876,11 +1924,14 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
18761924
sk->sk_user_data = NULL;
18771925
sk->sk_data_ready = port->data_ready;
18781926
read_unlock_bh(&sk->sk_callback_lock);
1879-
if (!nvmet_tcp_tls_handshake(queue))
1880-
return;
1881-
1882-
/* TLS handshake failed, terminate the connection */
1883-
goto out_destroy_sq;
1927+
if (!nvmet_tcp_try_peek_pdu(queue)) {
1928+
if (!nvmet_tcp_tls_handshake(queue))
1929+
return;
1930+
/* TLS handshake failed, terminate the connection */
1931+
goto out_destroy_sq;
1932+
}
1933+
/* Not a TLS connection, continue with normal processing */
1934+
queue->state = NVMET_TCP_Q_CONNECTING;
18841935
}
18851936
#endif
18861937

0 commit comments

Comments
 (0)