blob: 569f1505f73c842e3ee0247a127379d02d111696 [file] [log] [blame]
Daniel Willmannf91d2aa2023-01-04 18:20:55 +01001/*! \file osmo_io_uring.c
2 * io_uring backend for osmo_io.
3 *
4 * (C) 2022-2023 by sysmocom s.f.m.c.
5 * Author: Daniel Willmann <daniel@sysmocom.de>
Harald Welte1047ed72023-11-18 18:51:58 +01006 * (C) 2023-2024 by Harald Welte <laforge@osmocom.org>
Daniel Willmannf91d2aa2023-01-04 18:20:55 +01007 *
8 * All Rights Reserved.
9 *
10 * SPDX-License-Identifier: GPL-2.0+
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23/* TODO:
24 * Parameters:
25 * - number of simultaneous read/write in uring for given fd
26 *
27 */
28
29#include "../config.h"
30#if defined(__linux__)
31
32#include <stdio.h>
33#include <talloc.h>
34#include <unistd.h>
35#include <string.h>
36#include <stdbool.h>
37#include <errno.h>
38
Harald Welte1047ed72023-11-18 18:51:58 +010039#include <netinet/in.h>
40#include <netinet/sctp.h>
Daniel Willmannf91d2aa2023-01-04 18:20:55 +010041#include <sys/eventfd.h>
42#include <liburing.h>
43
44#include <osmocom/core/osmo_io.h>
45#include <osmocom/core/linuxlist.h>
46#include <osmocom/core/logging.h>
47#include <osmocom/core/msgb.h>
48#include <osmocom/core/select.h>
49#include <osmocom/core/talloc.h>
50#include <osmocom/core/utils.h>
51#include <osmocom/core/socket.h>
52
53#include "osmo_io_internal.h"
54
55#define IOFD_URING_ENTRIES 4096
56
57struct osmo_io_uring {
58 struct osmo_fd event_ofd;
59 struct io_uring ring;
60};
61
62static __thread struct osmo_io_uring g_ring;
63
64static void iofd_uring_cqe(struct io_uring *ring);
Harald Welte987a86a2023-11-18 18:46:24 +010065
66/*! read call-back for eventfd notifying us if entries are in the completion queue */
Daniel Willmannf91d2aa2023-01-04 18:20:55 +010067static int iofd_uring_poll_cb(struct osmo_fd *ofd, unsigned int what)
68{
69 struct io_uring *ring = ofd->data;
70 eventfd_t val;
71 int rc;
72
73 if (what & OSMO_FD_READ) {
74 rc = eventfd_read(ofd->fd, &val);
75 if (rc < 0) {
76 LOGP(DLIO, LOGL_ERROR, "eventfd_read() returned error\n");
77 return rc;
78 }
79
80 iofd_uring_cqe(ring);
81 }
82 if (what & OSMO_FD_WRITE)
83 OSMO_ASSERT(0);
84
85 return 0;
86}
87
88/*! initialize the uring and tie it into our event loop */
89void osmo_iofd_uring_init(void)
90{
Harald Welte9c604f42024-03-14 08:21:42 +010091 int rc, evfd;
92
Daniel Willmannf91d2aa2023-01-04 18:20:55 +010093 rc = io_uring_queue_init(IOFD_URING_ENTRIES, &g_ring.ring, 0);
94 if (rc < 0)
Harald Welte5fcfbe02024-03-14 08:18:19 +010095 osmo_panic("failure during io_uring_queue_init(): %s\n", strerror(-rc));
Daniel Willmannf91d2aa2023-01-04 18:20:55 +010096
97 rc = eventfd(0, 0);
98 if (rc < 0) {
99 io_uring_queue_exit(&g_ring.ring);
Harald Welte5fcfbe02024-03-14 08:18:19 +0100100 osmo_panic("failure creating eventfd(0, 0) for io_uring: %s\n", strerror(-rc));
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100101 }
Harald Welte9c604f42024-03-14 08:21:42 +0100102 evfd = rc;
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100103
Harald Welte9c604f42024-03-14 08:21:42 +0100104 osmo_fd_setup(&g_ring.event_ofd, evfd, OSMO_FD_READ, iofd_uring_poll_cb, &g_ring.ring, 0);
105 rc = osmo_fd_register(&g_ring.event_ofd);
106 if (rc < 0) {
107 close(evfd);
108 io_uring_queue_exit(&g_ring.ring);
109 osmo_panic("failure registering io_uring-eventfd as osmo_fd: %d\n", rc);
110 }
Andreas Eversberg9c0004a2024-03-14 13:03:05 +0100111 rc = io_uring_register_eventfd(&g_ring.ring, evfd);
Harald Welte9c604f42024-03-14 08:21:42 +0100112 if (rc < 0) {
113 osmo_fd_unregister(&g_ring.event_ofd);
114 close(evfd);
115 io_uring_queue_exit(&g_ring.ring);
116 osmo_panic("failure registering eventfd with io_uring: %s\n", strerror(-rc));
117 }
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100118}
119
120
121static void iofd_uring_submit_recv(struct osmo_io_fd *iofd, enum iofd_msg_action action)
122{
123 struct msgb *msg;
124 struct iofd_msghdr *msghdr;
125 struct io_uring_sqe *sqe;
126
127 msg = iofd_msgb_pending_or_alloc(iofd);
128 if (!msg) {
129 LOGPIO(iofd, LOGL_ERROR, "Could not allocate msgb for reading\n");
130 OSMO_ASSERT(0);
131 }
132
Harald Welte1047ed72023-11-18 18:51:58 +0100133 msghdr = iofd_msghdr_alloc(iofd, action, msg, iofd->cmsg_size);
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100134 if (!msghdr) {
135 LOGPIO(iofd, LOGL_ERROR, "Could not allocate msghdr for reading\n");
136 OSMO_ASSERT(0);
137 }
138
139 msghdr->iov[0].iov_base = msg->tail;
140 msghdr->iov[0].iov_len = msgb_tailroom(msg);
141
142 switch (action) {
143 case IOFD_ACT_READ:
144 break;
Harald Welte1047ed72023-11-18 18:51:58 +0100145 case IOFD_ACT_RECVMSG:
146 msghdr->hdr.msg_control = msghdr->cmsg;
147 msghdr->hdr.msg_controllen = iofd->cmsg_size;
148 /* fall-through */
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100149 case IOFD_ACT_RECVFROM:
150 msghdr->hdr.msg_iov = &msghdr->iov[0];
151 msghdr->hdr.msg_iovlen = 1;
152 msghdr->hdr.msg_name = &msghdr->osa.u.sa;
153 msghdr->hdr.msg_namelen = osmo_sockaddr_size(&msghdr->osa);
154 break;
155 default:
156 OSMO_ASSERT(0);
157 }
158
159 sqe = io_uring_get_sqe(&g_ring.ring);
160 if (!sqe) {
161 LOGPIO(iofd, LOGL_ERROR, "Could not get io_uring_sqe\n");
162 OSMO_ASSERT(0);
163 }
164
165 switch (action) {
166 case IOFD_ACT_READ:
167 io_uring_prep_readv(sqe, iofd->fd, msghdr->iov, 1, 0);
168 break;
Harald Welte1047ed72023-11-18 18:51:58 +0100169 case IOFD_ACT_RECVMSG:
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100170 case IOFD_ACT_RECVFROM:
171 io_uring_prep_recvmsg(sqe, iofd->fd, &msghdr->hdr, msghdr->flags);
172 break;
173 default:
174 OSMO_ASSERT(0);
175 }
176 io_uring_sqe_set_data(sqe, msghdr);
177
178 io_uring_submit(&g_ring.ring);
179 /* NOTE: This only works if we have one read per fd */
180 iofd->u.uring.read_msghdr = msghdr;
181}
182
Harald Welte987a86a2023-11-18 18:46:24 +0100183/*! completion call-back for READ/RECVFROM */
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100184static void iofd_uring_handle_recv(struct iofd_msghdr *msghdr, int rc)
185{
186 struct osmo_io_fd *iofd = msghdr->iofd;
187 struct msgb *msg = msghdr->msg;
188
189 if (rc > 0)
190 msgb_put(msg, rc);
191
192 if (!IOFD_FLAG_ISSET(iofd, IOFD_FLAG_CLOSED))
193 iofd_handle_recv(iofd, msg, rc, msghdr);
194
195 if (iofd->u.uring.read_enabled && !IOFD_FLAG_ISSET(iofd, IOFD_FLAG_CLOSED))
196 iofd_uring_submit_recv(iofd, msghdr->action);
197 else
198 iofd->u.uring.read_msghdr = NULL;
199
200
201 iofd_msghdr_free(msghdr);
202}
203
204static int iofd_uring_submit_tx(struct osmo_io_fd *iofd);
205
Harald Welte987a86a2023-11-18 18:46:24 +0100206/*! completion call-back for WRITE/SENDTO */
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100207static void iofd_uring_handle_tx(struct iofd_msghdr *msghdr, int rc)
208{
209 struct osmo_io_fd *iofd = msghdr->iofd;
210
Andreas Eversberg0f123aa2024-02-15 12:14:48 +0100211 /* Detach msghdr from iofd. It might get freed here or it is freed during iofd_handle_send_completion().
212 * If there is pending data to send, iofd_uring_submit_tx() will attach it again.
213 * iofd_handle_send_completion() will invoke a callback function to signal the possibility of write/send.
214 * This callback function might close iofd, leading to the potential freeing of iofd->u.uring.write_msghdr if
215 * still attached. Since iofd_handle_send_completion() frees msghdr at the end of the function, detaching
216 * msghdr here prevents a double-free bug. */
217 if (iofd->u.uring.write_msghdr == msghdr)
218 iofd->u.uring.write_msghdr = NULL;
219
Daniel Willmann84611882023-11-21 10:17:00 +0100220 if (OSMO_UNLIKELY(IOFD_FLAG_ISSET(iofd, IOFD_FLAG_CLOSED))) {
221 msgb_free(msghdr->msg);
222 iofd_msghdr_free(msghdr);
223 } else {
224 iofd_handle_send_completion(iofd, rc, msghdr);
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100225 }
226
Harald Welte987a86a2023-11-18 18:46:24 +0100227 /* submit the next to-be-transmitted message for this file descriptor */
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100228 if (iofd->u.uring.write_enabled && !IOFD_FLAG_ISSET(iofd, IOFD_FLAG_CLOSED))
229 iofd_uring_submit_tx(iofd);
230}
231
Harald Welte987a86a2023-11-18 18:46:24 +0100232/*! handle completion of a single I/O message */
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100233static void iofd_uring_handle_completion(struct iofd_msghdr *msghdr, int res)
234{
235 struct osmo_io_fd *iofd = msghdr->iofd;
236
237 IOFD_FLAG_SET(iofd, IOFD_FLAG_IN_CALLBACK);
238
239 switch (msghdr->action) {
240 case IOFD_ACT_READ:
241 case IOFD_ACT_RECVFROM:
Harald Welte1047ed72023-11-18 18:51:58 +0100242 case IOFD_ACT_RECVMSG:
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100243 iofd_uring_handle_recv(msghdr, res);
244 break;
245 case IOFD_ACT_WRITE:
246 case IOFD_ACT_SENDTO:
Harald Welte1047ed72023-11-18 18:51:58 +0100247 case IOFD_ACT_SENDMSG:
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100248 iofd_uring_handle_tx(msghdr, res);
249 break;
250 default:
251 OSMO_ASSERT(0)
252 }
253
Andreas Eversberg8db60092024-02-15 10:16:33 +0100254 IOFD_FLAG_UNSET(iofd, IOFD_FLAG_IN_CALLBACK);
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100255
256 if (IOFD_FLAG_ISSET(iofd, IOFD_FLAG_TO_FREE) && !iofd->u.uring.read_msghdr && !iofd->u.uring.write_msghdr)
257 talloc_free(iofd);
258}
259
Harald Welte987a86a2023-11-18 18:46:24 +0100260/*! process all pending completion queue entries in given io_uring */
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100261static void iofd_uring_cqe(struct io_uring *ring)
262{
263 int rc;
264 struct io_uring_cqe *cqe;
265 struct iofd_msghdr *msghdr;
266
267 while (io_uring_peek_cqe(ring, &cqe) == 0) {
268
269 msghdr = io_uring_cqe_get_data(cqe);
270 if (!msghdr) {
271 LOGP(DLIO, LOGL_DEBUG, "Cancellation returned\n");
272 io_uring_cqe_seen(ring, cqe);
273 continue;
274 }
Andreas Eversberg8db60092024-02-15 10:16:33 +0100275 if (!msghdr->iofd) {
276 io_uring_cqe_seen(ring, cqe);
277 iofd_msghdr_free(msghdr);
278 continue;
279 }
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100280
281 rc = cqe->res;
282 /* Hand the entry back to the kernel before */
283 io_uring_cqe_seen(ring, cqe);
284
285 iofd_uring_handle_completion(msghdr, rc);
286
287 }
288}
289
Harald Welte987a86a2023-11-18 18:46:24 +0100290/*! will submit the next to-be-transmitted message for given iofd */
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100291static int iofd_uring_submit_tx(struct osmo_io_fd *iofd)
292{
293 struct io_uring_sqe *sqe;
294 struct iofd_msghdr *msghdr;
295
296 msghdr = iofd_txqueue_dequeue(iofd);
297 if (!msghdr)
298 return -ENODATA;
299
300 sqe = io_uring_get_sqe(&g_ring.ring);
301 if (!sqe) {
302 LOGPIO(iofd, LOGL_ERROR, "Could not get io_uring_sqe\n");
303 OSMO_ASSERT(0);
304 }
305
306 io_uring_sqe_set_data(sqe, msghdr);
307
308 switch (msghdr->action) {
309 case IOFD_ACT_WRITE:
310 case IOFD_ACT_SENDTO:
Harald Welte1047ed72023-11-18 18:51:58 +0100311 case IOFD_ACT_SENDMSG:
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100312 io_uring_prep_sendmsg(sqe, msghdr->iofd->fd, &msghdr->hdr, msghdr->flags);
313 break;
314 default:
315 OSMO_ASSERT(0);
316 }
317
318 io_uring_submit(&g_ring.ring);
319 iofd->u.uring.write_msghdr = msghdr;
320
321 return 0;
322}
323
324static void iofd_uring_write_enable(struct osmo_io_fd *iofd);
325static void iofd_uring_read_enable(struct osmo_io_fd *iofd);
326
327static int iofd_uring_register(struct osmo_io_fd *iofd)
328{
329 return 0;
330}
331
332static int iofd_uring_unregister(struct osmo_io_fd *iofd)
333{
334 struct io_uring_sqe *sqe;
Andreas Eversberg8db60092024-02-15 10:16:33 +0100335 struct iofd_msghdr *msghdr;
336
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100337 if (iofd->u.uring.read_msghdr) {
Andreas Eversberg8db60092024-02-15 10:16:33 +0100338 msghdr = iofd->u.uring.read_msghdr;
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100339 sqe = io_uring_get_sqe(&g_ring.ring);
340 OSMO_ASSERT(sqe != NULL);
341 io_uring_sqe_set_data(sqe, NULL);
342 LOGPIO(iofd, LOGL_DEBUG, "Cancelling read\n");
Andreas Eversberg8db60092024-02-15 10:16:33 +0100343 iofd->u.uring.read_msghdr = NULL;
344 talloc_steal(OTC_GLOBAL, msghdr);
345 msghdr->iofd = NULL;
346 io_uring_prep_cancel(sqe, msghdr, 0);
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100347 }
348
349 if (iofd->u.uring.write_msghdr) {
Andreas Eversberg8db60092024-02-15 10:16:33 +0100350 msghdr = iofd->u.uring.write_msghdr;
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100351 sqe = io_uring_get_sqe(&g_ring.ring);
352 OSMO_ASSERT(sqe != NULL);
353 io_uring_sqe_set_data(sqe, NULL);
354 LOGPIO(iofd, LOGL_DEBUG, "Cancelling write\n");
Andreas Eversberg8db60092024-02-15 10:16:33 +0100355 iofd->u.uring.write_msghdr = NULL;
356 talloc_steal(OTC_GLOBAL, msghdr);
357 msgb_free(msghdr->msg);
358 msghdr->iofd = NULL;
359 io_uring_prep_cancel(sqe, msghdr, 0);
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100360 }
361 io_uring_submit(&g_ring.ring);
362
Andreas Eversbergd7256c62024-02-09 13:01:15 +0100363 if (IOFD_FLAG_ISSET(iofd, IOFD_FLAG_NOTIFY_CONNECTED)) {
364 osmo_fd_unregister(&iofd->u.uring.connect_ofd);
365 IOFD_FLAG_UNSET(iofd, IOFD_FLAG_NOTIFY_CONNECTED);
366 }
367
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100368 return 0;
369}
370
371static void iofd_uring_write_enable(struct osmo_io_fd *iofd)
372{
373 iofd->u.uring.write_enabled = true;
374
375 if (iofd->u.uring.write_msghdr)
376 return;
377
Andreas Eversbergd7256c62024-02-09 13:01:15 +0100378 /* This function is called again, once the socket is connected. */
379 if (IOFD_FLAG_ISSET(iofd, IOFD_FLAG_NOTIFY_CONNECTED))
380 return;
381
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100382 if (osmo_iofd_txqueue_len(iofd) > 0)
383 iofd_uring_submit_tx(iofd);
384 else if (iofd->mode == OSMO_IO_FD_MODE_READ_WRITE) {
385 /* Empty write request to check when the socket is connected */
386 struct iofd_msghdr *msghdr;
387 struct io_uring_sqe *sqe;
388 struct msgb *msg = msgb_alloc_headroom(0, 0, "io_uring write dummy");
389 if (!msg) {
390 LOGPIO(iofd, LOGL_ERROR, "Could not allocate msgb for writing\n");
391 OSMO_ASSERT(0);
392 }
Harald Welte1047ed72023-11-18 18:51:58 +0100393 msghdr = iofd_msghdr_alloc(iofd, IOFD_ACT_WRITE, msg, 0);
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100394 if (!msghdr) {
395 LOGPIO(iofd, LOGL_ERROR, "Could not allocate msghdr for writing\n");
396 OSMO_ASSERT(0);
397 }
398
399 msghdr->iov[0].iov_base = msgb_data(msg);
Harald Welte1047ed72023-11-18 18:51:58 +0100400 msghdr->iov[0].iov_len = msgb_length(msg);
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100401
402 sqe = io_uring_get_sqe(&g_ring.ring);
403 if (!sqe) {
404 LOGPIO(iofd, LOGL_ERROR, "Could not get io_uring_sqe\n");
405 OSMO_ASSERT(0);
406 }
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100407 io_uring_prep_writev(sqe, iofd->fd, msghdr->iov, 1, 0);
408 io_uring_sqe_set_data(sqe, msghdr);
409
410 io_uring_submit(&g_ring.ring);
411 iofd->u.uring.write_msghdr = msghdr;
412 }
413}
414
415static void iofd_uring_write_disable(struct osmo_io_fd *iofd)
416{
417 iofd->u.uring.write_enabled = false;
418}
419
420static void iofd_uring_read_enable(struct osmo_io_fd *iofd)
421{
422 iofd->u.uring.read_enabled = true;
423
424 if (iofd->u.uring.read_msghdr)
425 return;
426
Andreas Eversbergd7256c62024-02-09 13:01:15 +0100427 /* This function is called again, once the socket is connected. */
428 if (IOFD_FLAG_ISSET(iofd, IOFD_FLAG_NOTIFY_CONNECTED))
429 return;
430
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100431 switch (iofd->mode) {
432 case OSMO_IO_FD_MODE_READ_WRITE:
433 iofd_uring_submit_recv(iofd, IOFD_ACT_READ);
434 break;
435 case OSMO_IO_FD_MODE_RECVFROM_SENDTO:
436 iofd_uring_submit_recv(iofd, IOFD_ACT_RECVFROM);
437 break;
Harald Welte1047ed72023-11-18 18:51:58 +0100438 case OSMO_IO_FD_MODE_RECVMSG_SENDMSG:
439 iofd_uring_submit_recv(iofd, IOFD_ACT_RECVMSG);
440 break;
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100441 default:
442 OSMO_ASSERT(0);
443 }
444}
445
446static void iofd_uring_read_disable(struct osmo_io_fd *iofd)
447{
448 iofd->u.uring.read_enabled = false;
449}
450
451static int iofd_uring_close(struct osmo_io_fd *iofd)
452{
453 iofd_uring_read_disable(iofd);
454 iofd_uring_write_disable(iofd);
455 iofd_uring_unregister(iofd);
456 return close(iofd->fd);
457}
458
Andreas Eversbergd7256c62024-02-09 13:01:15 +0100459/* called via osmocom poll/select main handling once outbound non-blocking connect() completes */
460static int iofd_uring_connected_cb(struct osmo_fd *ofd, unsigned int what)
461{
462 struct osmo_io_fd *iofd = ofd->data;
463
Vadim Yanitskiy61906932024-03-14 15:42:14 +0700464 LOGPIO(iofd, LOGL_DEBUG, "Socket connected or failed.\n");
Andreas Eversbergd7256c62024-02-09 13:01:15 +0100465
466 if (!(what & OSMO_FD_WRITE))
467 return 0;
468
469 /* Unregister from poll/select handling. */
470 osmo_fd_unregister(ofd);
471 IOFD_FLAG_UNSET(iofd, IOFD_FLAG_NOTIFY_CONNECTED);
472
473 /* Notify the application about this via a zero-length write completion call-back. */
474 IOFD_FLAG_SET(iofd, IOFD_FLAG_IN_CALLBACK);
475 switch (iofd->mode) {
476 case OSMO_IO_FD_MODE_READ_WRITE:
477 iofd->io_ops.write_cb(iofd, 0, NULL);
478 break;
479 case OSMO_IO_FD_MODE_RECVFROM_SENDTO:
480 iofd->io_ops.sendto_cb(iofd, 0, NULL, NULL);
481 break;
482 case OSMO_IO_FD_MODE_RECVMSG_SENDMSG:
483 iofd->io_ops.sendmsg_cb(iofd, 0, NULL);
484 break;
485 }
486 IOFD_FLAG_UNSET(iofd, IOFD_FLAG_IN_CALLBACK);
487
488 /* If write/read notifications are pending, enable it now. */
489 if (iofd->u.uring.write_enabled && !IOFD_FLAG_ISSET(iofd, IOFD_FLAG_CLOSED))
490 iofd_uring_write_enable(iofd);
491 if (iofd->u.uring.read_enabled && !IOFD_FLAG_ISSET(iofd, IOFD_FLAG_CLOSED))
492 iofd_uring_read_enable(iofd);
493
494 if (IOFD_FLAG_ISSET(iofd, IOFD_FLAG_TO_FREE) && !iofd->u.uring.read_msghdr && !iofd->u.uring.write_msghdr)
495 talloc_free(iofd);
496 return 0;
497}
498
499static void iofd_uring_notify_connected(struct osmo_io_fd *iofd)
500{
501 if (iofd->mode == OSMO_IO_FD_MODE_RECVMSG_SENDMSG) {
502 /* Don't call this function after enabling read or write. */
503 OSMO_ASSERT(!iofd->u.uring.write_enabled && !iofd->u.uring.read_enabled);
504
505 /* Use a temporary osmo_fd which we can use to notify us once the connection is established
506 * or failed (indicated by FD becoming writable).
507 * This is needed as (at least for SCTP sockets) one cannot submit a zero-length writev/sendmsg
508 * in order to get notification when the socekt is writable.*/
509 if (!IOFD_FLAG_ISSET(iofd, IOFD_FLAG_NOTIFY_CONNECTED)) {
510 osmo_fd_setup(&iofd->u.uring.connect_ofd, iofd->fd, OSMO_FD_WRITE,
511 iofd_uring_connected_cb, iofd, 0);
Andreas Eversbergada88ce2024-03-01 17:47:44 +0100512 if (osmo_fd_register(&iofd->u.uring.connect_ofd) < 0)
513 LOGPIO(iofd, LOGL_ERROR, "Failed to register FD for connect event.\n");
514 else
515 IOFD_FLAG_SET(iofd, IOFD_FLAG_NOTIFY_CONNECTED);
Andreas Eversbergd7256c62024-02-09 13:01:15 +0100516 }
517 } else
518 iofd_uring_write_enable(iofd);
519}
520
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100521const struct iofd_backend_ops iofd_uring_ops = {
522 .register_fd = iofd_uring_register,
523 .unregister_fd = iofd_uring_unregister,
524 .close = iofd_uring_close,
525 .write_enable = iofd_uring_write_enable,
526 .write_disable = iofd_uring_write_disable,
527 .read_enable = iofd_uring_read_enable,
528 .read_disable = iofd_uring_read_disable,
Andreas Eversbergd7256c62024-02-09 13:01:15 +0100529 .notify_connected = iofd_uring_notify_connected,
Daniel Willmannf91d2aa2023-01-04 18:20:55 +0100530};
531
532#endif /* defined(__linux__) */