Pau Espin Pedrol | f9f5b30 | 2022-04-12 12:34:25 +0200 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #include <unistd.h> |
| 3 | #include <stdint.h> |
| 4 | #include <stdbool.h> |
| 5 | #include <stdlib.h> |
| 6 | #include <string.h> |
| 7 | #include <stdio.h> |
| 8 | #include <assert.h> |
| 9 | #include <errno.h> |
| 10 | #include <pthread.h> |
| 11 | |
| 12 | #include <osmocom/core/linuxlist.h> |
| 13 | #include <osmocom/core/talloc.h> |
| 14 | #include <osmocom/core/logging.h> |
| 15 | #include <osmocom/core/exec.h> |
| 16 | |
| 17 | #include "internal.h" |
| 18 | |
| 19 | #include <netinet/sctp.h> |
| 20 | |
| 21 | /*********************************************************************** |
| 22 | * Client (Control/User Plane Separation) Socket |
| 23 | ***********************************************************************/ |
| 24 | |
| 25 | #define CUPS_MSGB_SIZE 1024 |
| 26 | |
| 27 | #define LOGCC(cc, lvl, fmt, args ...) \ |
| 28 | LOGP(DUECUPS, lvl, "%s: " fmt, (cc)->sockname, ## args) |
| 29 | |
| 30 | struct subprocess { |
| 31 | /* member in daemon->cups_clients */ |
| 32 | struct llist_head list; |
| 33 | /* pointer to the client that started us */ |
| 34 | struct cups_client *cups_client; |
| 35 | /* PID of the process */ |
| 36 | pid_t pid; |
| 37 | }; |
| 38 | |
| 39 | static json_t *gen_uecups_term_ind(pid_t pid, int status); |
| 40 | |
| 41 | /* kill the specified subprocess and forget about it */ |
| 42 | static void subprocess_destroy(struct subprocess *p, int signal) |
| 43 | { |
| 44 | LOGCC(p->cups_client, LOGL_DEBUG, "Kill subprocess pid %llu with signal %u\n", |
| 45 | (unsigned long long)p->pid, signal); |
| 46 | kill(p->pid, signal); |
| 47 | llist_del(&p->list); |
| 48 | talloc_free(p); |
| 49 | } |
| 50 | |
| 51 | static struct subprocess *subprocess_by_pid(struct gtp_daemon *d, pid_t pid) |
| 52 | { |
| 53 | struct subprocess *sproc; |
| 54 | llist_for_each_entry(sproc, &d->subprocesses, list) { |
| 55 | if (sproc->pid == pid) |
| 56 | return sproc; |
| 57 | } |
| 58 | return NULL; |
| 59 | } |
| 60 | |
| 61 | void child_terminated(struct gtp_daemon *d, int pid, int status) |
| 62 | { |
| 63 | struct subprocess *sproc; |
| 64 | json_t *jterm_ind; |
| 65 | |
| 66 | LOGP(DUECUPS, LOGL_DEBUG, "SIGCHLD receive from pid %u; status=%d\n", pid, status); |
| 67 | |
| 68 | sproc = subprocess_by_pid(d, pid); |
| 69 | if (!sproc) { |
| 70 | LOGP(DUECUPS, LOGL_NOTICE, "subprocess %u terminated (status=%d) but we don't know it?\n", |
| 71 | pid, status); |
| 72 | return; |
| 73 | } |
| 74 | |
| 75 | /* generate prog_term_ind towards control plane */ |
| 76 | jterm_ind = gen_uecups_term_ind(pid, status); |
| 77 | if (!jterm_ind) |
| 78 | return; |
| 79 | |
| 80 | cups_client_tx_json(sproc->cups_client, jterm_ind); |
| 81 | |
| 82 | llist_del(&sproc->list); |
| 83 | talloc_free(sproc); |
| 84 | } |
| 85 | |
| 86 | /* Send JSON to a given client/connection */ |
| 87 | int cups_client_tx_json(struct cups_client *cc, json_t *jtx) |
| 88 | { |
| 89 | struct msgb *msg = msgb_alloc(CUPS_MSGB_SIZE, "Tx JSON"); |
| 90 | char *json_str = json_dumps(jtx, JSON_SORT_KEYS); |
| 91 | char *out; |
| 92 | int json_strlen; |
| 93 | |
| 94 | json_decref(jtx); |
| 95 | if (!json_str) { |
| 96 | LOGCC(cc, LOGL_ERROR, "Error encoding JSON\n"); |
| 97 | return 0; |
| 98 | } |
| 99 | json_strlen = strlen(json_str); |
| 100 | |
| 101 | LOGCC(cc, LOGL_DEBUG, "JSON Tx '%s'\n", json_str); |
| 102 | |
| 103 | if (json_strlen > msgb_tailroom(msg)) { |
| 104 | LOGCC(cc, LOGL_ERROR, "Not enough room for JSON in msgb\n"); |
| 105 | free(json_str); |
| 106 | return 0; |
| 107 | } |
| 108 | |
| 109 | out = (char *)msgb_put(msg, json_strlen); |
| 110 | memcpy(out, json_str, json_strlen); |
| 111 | free(json_str); |
| 112 | osmo_stream_srv_send(cc->srv, msg); |
| 113 | |
| 114 | return 0; |
| 115 | } |
| 116 | |
| 117 | json_t *gen_uecups_result(const char *name, const char *res) |
| 118 | { |
| 119 | json_t *jres = json_object(); |
| 120 | json_t *jret = json_object(); |
| 121 | |
| 122 | json_object_set_new(jres, "result", json_string(res)); |
| 123 | json_object_set_new(jret, name, jres); |
| 124 | |
| 125 | return jret; |
| 126 | } |
| 127 | |
| 128 | static int parse_ep(struct sockaddr_storage *out, json_t *in) |
| 129 | { |
| 130 | json_t *jaddr_type, *jport, *jip; |
| 131 | const char *addr_type, *ip; |
| 132 | uint8_t buf[16]; |
| 133 | |
| 134 | /* {"addr_type":"IPV4","ip":"31323334","Port":2152} */ |
| 135 | |
| 136 | if (!json_is_object(in)) |
| 137 | return -EINVAL; |
| 138 | |
| 139 | jaddr_type = json_object_get(in, "addr_type"); |
| 140 | jport = json_object_get(in, "Port"); |
| 141 | jip = json_object_get(in, "ip"); |
| 142 | |
| 143 | if (!jaddr_type || !jport || !jip) |
| 144 | return -EINVAL; |
| 145 | |
| 146 | if (!json_is_string(jaddr_type) || !json_is_integer(jport) || !json_is_string(jip)) |
| 147 | return -EINVAL; |
| 148 | |
| 149 | addr_type = json_string_value(jaddr_type); |
| 150 | ip = json_string_value(jip); |
| 151 | |
| 152 | memset(out, 0, sizeof(*out)); |
| 153 | |
| 154 | if (!strcmp(addr_type, "IPV4")) { |
| 155 | struct sockaddr_in *sin = (struct sockaddr_in *) out; |
| 156 | if (osmo_hexparse(ip, buf, sizeof(buf)) != 4) |
| 157 | return -EINVAL; |
| 158 | memcpy(&sin->sin_addr, buf, 4); |
| 159 | sin->sin_family = AF_INET; |
| 160 | sin->sin_port = htons(json_integer_value(jport)); |
| 161 | } else if (!strcmp(addr_type, "IPV6")) { |
| 162 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) out; |
| 163 | if (osmo_hexparse(ip, buf, sizeof(buf)) != 16) |
| 164 | return -EINVAL; |
| 165 | memcpy(&sin6->sin6_addr, buf, 16); |
| 166 | sin6->sin6_family = AF_INET6; |
| 167 | sin6->sin6_port = htons(json_integer_value(jport)); |
| 168 | } else |
| 169 | return -EINVAL; |
| 170 | |
| 171 | return 0; |
| 172 | } |
| 173 | |
| 174 | static int parse_eua(struct sockaddr_storage *out, json_t *jip, json_t *jaddr_type) |
| 175 | { |
| 176 | const char *addr_type, *ip; |
| 177 | uint8_t buf[16]; |
| 178 | |
| 179 | if (!json_is_string(jip) || !json_is_string(jaddr_type)) |
| 180 | return -EINVAL; |
| 181 | |
| 182 | addr_type = json_string_value(jaddr_type); |
| 183 | ip = json_string_value(jip); |
| 184 | |
| 185 | memset(out, 0, sizeof(*out)); |
| 186 | |
| 187 | if (!strcmp(addr_type, "IPV4")) { |
| 188 | struct sockaddr_in *sin = (struct sockaddr_in *) out; |
| 189 | if (osmo_hexparse(ip, buf, sizeof(buf)) != 4) |
| 190 | return -EINVAL; |
| 191 | memcpy(&sin->sin_addr, buf, 4); |
| 192 | sin->sin_family = AF_INET; |
| 193 | } else if (!strcmp(addr_type, "IPV6")) { |
| 194 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) out; |
| 195 | if (osmo_hexparse(ip, buf, sizeof(buf)) != 16) |
| 196 | return -EINVAL; |
| 197 | memcpy(&sin6->sin6_addr, buf, 16); |
| 198 | sin6->sin6_family = AF_INET6; |
| 199 | } else |
| 200 | return -EINVAL; |
| 201 | |
| 202 | return 0; |
| 203 | } |
| 204 | |
| 205 | |
| 206 | static int parse_create_tun(struct gtp_tunnel_params *out, json_t *ctun) |
| 207 | { |
| 208 | json_t *jlocal_gtp_ep, *jremote_gtp_ep; |
| 209 | json_t *jrx_teid, *jtx_teid; |
| 210 | json_t *jtun_dev_name, *jtun_netns_name; |
| 211 | json_t *juser_addr, *juser_addr_type; |
| 212 | int rc; |
| 213 | |
| 214 | /* '{"create_tun":{"tx_teid":1234,"rx_teid":5678,"user_addr_type":"IPV4","user_addr":"21222324","local_gtp_ep":{"addr_type":"IPV4","ip":"31323334","Port":2152},"remote_gtp_ep":{"addr_type":"IPV4","ip":"41424344","Port":2152},"tun_dev_name":"tun23","tun_netns_name":"foo"}}' */ |
| 215 | |
| 216 | if (!json_is_object(ctun)) |
| 217 | return -EINVAL; |
| 218 | |
| 219 | /* mandatory IEs */ |
| 220 | jlocal_gtp_ep = json_object_get(ctun, "local_gtp_ep"); |
| 221 | jremote_gtp_ep = json_object_get(ctun, "remote_gtp_ep"); |
| 222 | jrx_teid = json_object_get(ctun, "rx_teid"); |
| 223 | jtx_teid = json_object_get(ctun, "tx_teid"); |
| 224 | jtun_dev_name = json_object_get(ctun, "tun_dev_name"); |
| 225 | juser_addr = json_object_get(ctun, "user_addr"); |
| 226 | juser_addr_type = json_object_get(ctun, "user_addr_type"); |
| 227 | |
| 228 | if (!jlocal_gtp_ep || !jremote_gtp_ep || !jrx_teid || !jtx_teid || !jtun_dev_name || |
| 229 | !juser_addr || !juser_addr_type) |
| 230 | return -EINVAL; |
| 231 | if (!json_is_object(jlocal_gtp_ep) || !json_is_object(jremote_gtp_ep) || |
| 232 | !json_is_integer(jrx_teid) || !json_is_integer(jtx_teid) || |
| 233 | !json_is_string(jtun_dev_name) || |
| 234 | !json_is_string(juser_addr) || !json_is_string(juser_addr_type)) |
| 235 | return -EINVAL; |
| 236 | |
| 237 | memset(out, 0, sizeof(*out)); |
| 238 | |
| 239 | rc = parse_ep(&out->local_udp, jlocal_gtp_ep); |
| 240 | if (rc < 0) |
| 241 | return rc; |
| 242 | rc = parse_ep(&out->remote_udp, jremote_gtp_ep); |
| 243 | if (rc < 0) |
| 244 | return rc; |
| 245 | rc = parse_eua(&out->user_addr, juser_addr, juser_addr_type); |
| 246 | if (rc < 0) |
| 247 | return rc; |
| 248 | out->rx_teid = json_integer_value(jrx_teid); |
| 249 | out->tx_teid = json_integer_value(jtx_teid); |
| 250 | out->tun_name = talloc_strdup(out, json_string_value(jtun_dev_name)); |
| 251 | |
| 252 | /* optional IEs */ |
| 253 | jtun_netns_name = json_object_get(ctun, "tun_netns_name"); |
| 254 | if (jtun_netns_name) { |
| 255 | if (!json_is_string(jtun_netns_name)) |
| 256 | return -EINVAL; |
| 257 | out->tun_netns_name = talloc_strdup(out, json_string_value(jtun_netns_name)); |
| 258 | } |
| 259 | |
| 260 | return 0; |
| 261 | } |
| 262 | |
| 263 | |
| 264 | static int cups_client_handle_create_tun(struct cups_client *cc, json_t *ctun) |
| 265 | { |
| 266 | int rc; |
| 267 | struct gtp_tunnel_params *tpars = talloc_zero(cc, struct gtp_tunnel_params); |
| 268 | struct gtp_tunnel *t; |
| 269 | |
| 270 | rc = parse_create_tun(tpars, ctun); |
| 271 | if (rc < 0) { |
| 272 | talloc_free(tpars); |
| 273 | return rc; |
| 274 | } |
| 275 | |
| 276 | t = gtp_tunnel_alloc(g_daemon, tpars); |
| 277 | if (!t) { |
| 278 | LOGCC(cc, LOGL_NOTICE, "Failed to allocate tunnel\n"); |
| 279 | cups_client_tx_json(cc, gen_uecups_result("create_tun_res", "ERR_NOT_FOUND")); |
| 280 | } else { |
| 281 | cups_client_tx_json(cc, gen_uecups_result("create_tun_res", "OK")); |
| 282 | } |
| 283 | |
| 284 | talloc_free(tpars); |
| 285 | return 0; |
| 286 | } |
| 287 | |
| 288 | static int cups_client_handle_destroy_tun(struct cups_client *cc, json_t *dtun) |
| 289 | { |
| 290 | struct sockaddr_storage local_ep_addr; |
| 291 | json_t *jlocal_gtp_ep, *jrx_teid; |
| 292 | uint32_t rx_teid; |
| 293 | int rc; |
| 294 | |
| 295 | jlocal_gtp_ep = json_object_get(dtun, "local_gtp_ep"); |
| 296 | jrx_teid = json_object_get(dtun, "rx_teid"); |
| 297 | |
| 298 | if (!jlocal_gtp_ep || !jrx_teid) |
| 299 | return -EINVAL; |
| 300 | |
| 301 | if (!json_is_object(jlocal_gtp_ep) || !json_is_integer(jrx_teid)) |
| 302 | return -EINVAL; |
| 303 | |
| 304 | rc = parse_ep(&local_ep_addr, jlocal_gtp_ep); |
| 305 | if (rc < 0) |
| 306 | return rc; |
| 307 | rx_teid = json_integer_value(jrx_teid); |
| 308 | |
| 309 | rc = gtp_tunnel_destroy(g_daemon, &local_ep_addr, rx_teid); |
| 310 | if (rc < 0) { |
| 311 | LOGCC(cc, LOGL_NOTICE, "Failed to destroy tunnel\n"); |
| 312 | cups_client_tx_json(cc, gen_uecups_result("destroy_tun_res", "ERR_NOT_FOUND")); |
| 313 | } else { |
| 314 | cups_client_tx_json(cc, gen_uecups_result("destroy_tun_res", "OK")); |
| 315 | } |
| 316 | |
| 317 | return 0; |
| 318 | } |
| 319 | |
| 320 | static json_t *gen_uecups_term_ind(pid_t pid, int status) |
| 321 | { |
| 322 | json_t *jterm = json_object(); |
| 323 | json_t *jret = json_object(); |
| 324 | |
| 325 | json_object_set_new(jterm, "pid", json_integer(pid)); |
| 326 | json_object_set_new(jterm, "exit_code", json_integer(status)); |
| 327 | |
| 328 | json_object_set_new(jret, "program_term_ind", jterm); |
| 329 | |
| 330 | return jret; |
| 331 | } |
| 332 | |
| 333 | static json_t *gen_uecups_start_res(pid_t pid, const char *result) |
| 334 | { |
| 335 | json_t *ret = gen_uecups_result("start_program_res", result); |
| 336 | json_object_set_new(json_object_get(ret, "start_program_res"), "pid", json_integer(pid)); |
| 337 | |
| 338 | return ret; |
| 339 | } |
| 340 | |
| 341 | static int cups_client_handle_start_program(struct cups_client *cc, json_t *sprog) |
| 342 | { |
| 343 | json_t *juser, *jcmd, *jenv, *jnetns, *jres; |
| 344 | struct gtp_daemon *d = cc->d; |
| 345 | const char *cmd, *user; |
| 346 | char **addl_env = NULL; |
| 347 | sigset_t oldmask; |
| 348 | int nsfd = -1, rc; |
| 349 | |
| 350 | juser = json_object_get(sprog, "run_as_user"); |
| 351 | jcmd = json_object_get(sprog, "command"); |
| 352 | jenv = json_object_get(sprog, "environment"); |
| 353 | jnetns = json_object_get(sprog, "tun_netns_name"); |
| 354 | |
| 355 | /* mandatory parts */ |
| 356 | if (!juser || !jcmd) |
| 357 | return -EINVAL; |
| 358 | if (!json_is_string(juser) || !json_is_string(jcmd)) |
| 359 | return -EINVAL; |
| 360 | |
| 361 | /* optional parts */ |
| 362 | if (jenv && !json_is_array(jenv)) |
| 363 | return -EINVAL; |
| 364 | if (jnetns && !json_is_string(jnetns)) |
| 365 | return -EINVAL; |
| 366 | |
| 367 | cmd = json_string_value(jcmd); |
| 368 | user = json_string_value(juser); |
| 369 | if (jnetns) { |
| 370 | struct tun_device *tun = tun_device_find_netns(d, json_string_value(jnetns)); |
| 371 | if (!tun) |
| 372 | return -ENODEV; |
| 373 | nsfd = tun->netns_fd; |
| 374 | } |
| 375 | |
| 376 | /* build environment */ |
| 377 | if (jenv) { |
| 378 | json_t *j; |
| 379 | int i; |
| 380 | addl_env = talloc_zero_array(cc, char *, json_array_size(jenv)+1); |
| 381 | if (!addl_env) |
| 382 | return -ENOMEM; |
| 383 | json_array_foreach(jenv, i, j) { |
| 384 | addl_env[i] = talloc_strdup(addl_env, json_string_value(j)); |
| 385 | } |
| 386 | } |
| 387 | |
| 388 | if (jnetns) { |
| 389 | rc = switch_ns(nsfd, &oldmask); |
| 390 | if (rc < 0) { |
| 391 | talloc_free(addl_env); |
| 392 | return -EIO; |
| 393 | } |
| 394 | } |
| 395 | |
| 396 | rc = osmo_system_nowait2(cmd, osmo_environment_whitelist, addl_env, user); |
| 397 | |
| 398 | if (jnetns) { |
| 399 | OSMO_ASSERT(restore_ns(&oldmask) == 0); |
| 400 | } |
| 401 | |
| 402 | talloc_free(addl_env); |
| 403 | |
| 404 | if (rc > 0) { |
| 405 | /* create a record about the subprocess we started, so we can notify the |
| 406 | * client that crated it upon termination */ |
| 407 | struct subprocess *sproc = talloc_zero(cc, struct subprocess); |
| 408 | if (!sproc) |
| 409 | return -ENOMEM; |
| 410 | |
| 411 | sproc->cups_client = cc; |
| 412 | sproc->pid = rc; |
| 413 | llist_add_tail(&sproc->list, &d->subprocesses); |
| 414 | jres = gen_uecups_start_res(sproc->pid, "OK"); |
| 415 | } else { |
| 416 | jres = gen_uecups_start_res(0, "ERR_INVALID_DATA"); |
| 417 | } |
| 418 | |
| 419 | cups_client_tx_json(cc, jres); |
| 420 | |
| 421 | return 0; |
| 422 | } |
| 423 | |
| 424 | static int cups_client_handle_reset_all_state(struct cups_client *cc, json_t *sprog) |
| 425 | { |
| 426 | struct gtp_daemon *d = cc->d; |
| 427 | struct gtp_tunnel *t, *t2; |
| 428 | struct subprocess *p, *p2; |
| 429 | json_t *jres; |
| 430 | |
| 431 | LOGCC(cc, LOGL_DEBUG, "Destroying all tunnels\n"); |
| 432 | pthread_rwlock_wrlock(&d->rwlock); |
| 433 | llist_for_each_entry_safe(t, t2, &d->gtp_tunnels, list) { |
| 434 | _gtp_tunnel_destroy(t); |
| 435 | } |
| 436 | pthread_rwlock_unlock(&d->rwlock); |
| 437 | |
| 438 | /* no locking needed as this list is only used by main thread */ |
| 439 | LOGCC(cc, LOGL_DEBUG, "Destroying all subprocesses\n"); |
| 440 | llist_for_each_entry_safe(p, p2, &d->subprocesses, list) { |
| 441 | subprocess_destroy(p, SIGKILL); |
| 442 | } |
| 443 | |
| 444 | if (d->reset_all_state_tun_remaining == 0) { |
| 445 | jres = gen_uecups_result("reset_all_state_res", "OK"); |
| 446 | cups_client_tx_json(cc, jres); |
| 447 | } else { |
| 448 | cc->reset_all_state_res_pending = true; |
| 449 | } |
| 450 | |
| 451 | return 0; |
| 452 | } |
| 453 | |
| 454 | static int cups_client_handle_json(struct cups_client *cc, json_t *jroot) |
| 455 | { |
| 456 | void *iter; |
| 457 | const char *key; |
| 458 | json_t *cmd; |
| 459 | int rc; |
| 460 | |
| 461 | if (!json_is_object(jroot)) |
| 462 | return -EINVAL; |
| 463 | |
| 464 | iter = json_object_iter(jroot); |
| 465 | key = json_object_iter_key(iter); |
| 466 | cmd = json_object_iter_value(iter); |
| 467 | if (!iter || !key || !cmd) |
| 468 | return -EINVAL; |
| 469 | |
| 470 | if (!strcmp(key, "create_tun")) { |
| 471 | rc = cups_client_handle_create_tun(cc, cmd); |
| 472 | } else if (!strcmp(key, "destroy_tun")) { |
| 473 | rc = cups_client_handle_destroy_tun(cc, cmd); |
| 474 | } else if (!strcmp(key, "start_program")) { |
| 475 | rc = cups_client_handle_start_program(cc, cmd); |
| 476 | } else if (!strcmp(key, "reset_all_state")) { |
| 477 | rc = cups_client_handle_reset_all_state(cc, cmd); |
| 478 | } else { |
| 479 | LOGCC(cc, LOGL_NOTICE, "Unknown command '%s' received\n", key); |
| 480 | return -EINVAL; |
| 481 | } |
| 482 | |
| 483 | if (rc < 0) { |
| 484 | LOGCC(cc, LOGL_NOTICE, "Error %d handling '%s' command\n", rc, key); |
| 485 | char buf[64]; |
| 486 | snprintf(buf, sizeof(buf), "%s_res", key); |
| 487 | cups_client_tx_json(cc, gen_uecups_result(buf, "ERR_INVALID_DATA")); |
| 488 | return -EINVAL; |
| 489 | } |
| 490 | |
| 491 | return 0; |
| 492 | } |
| 493 | |
| 494 | /* control/user plane separation per-client read cb */ |
| 495 | static int cups_client_read_cb(struct osmo_stream_srv *conn) |
| 496 | { |
| 497 | struct osmo_fd *ofd = osmo_stream_srv_get_ofd(conn); |
| 498 | struct cups_client *cc = osmo_stream_srv_get_data(conn); |
| 499 | struct msgb *msg = msgb_alloc(CUPS_MSGB_SIZE, "Rx JSON"); |
| 500 | struct sctp_sndrcvinfo sinfo; |
| 501 | json_error_t jerr; |
| 502 | json_t *jroot; |
| 503 | int flags = 0; |
| 504 | int rc = 0; |
| 505 | |
| 506 | /* Read message from socket */ |
| 507 | /* we cannot use osmo_stream_srv_recv() here, as we might get some out-of-band info from |
| 508 | * SCTP. FIXME: add something like osmo_stream_srv_recv_sctp() to libosmo-netif and use |
| 509 | * it here as well as in libosmo-sigtran and osmo-msc */ |
| 510 | rc = sctp_recvmsg(ofd->fd, msg->tail, msgb_tailroom(msg), NULL, NULL, &sinfo, &flags); |
| 511 | if (rc <= 0) { |
| 512 | osmo_stream_srv_destroy(conn); |
| 513 | rc = -1; |
| 514 | goto out; |
| 515 | } else |
| 516 | msgb_put(msg, rc); |
| 517 | |
| 518 | if (flags & MSG_NOTIFICATION) { |
| 519 | union sctp_notification *notif = (union sctp_notification *) msgb_data(msg); |
| 520 | switch (notif->sn_header.sn_type) { |
| 521 | case SCTP_SHUTDOWN_EVENT: |
| 522 | osmo_stream_srv_destroy(conn); |
| 523 | rc = -EBADF; |
| 524 | goto out; |
| 525 | default: |
| 526 | break; |
| 527 | } |
| 528 | goto out; |
| 529 | } |
| 530 | |
| 531 | LOGCC(cc, LOGL_DEBUG, "Rx '%s'\n", msgb_data(msg)); |
| 532 | |
| 533 | /* Parse the JSON */ |
| 534 | jroot = json_loadb((const char *) msgb_data(msg), msgb_length(msg), 0, &jerr); |
| 535 | if (!jroot) { |
| 536 | LOGCC(cc, LOGL_ERROR, "Error decoding JSON (%s)", jerr.text); |
| 537 | rc = -1; |
| 538 | goto out; |
| 539 | } |
| 540 | |
| 541 | /* Dispatch */ |
| 542 | rc = cups_client_handle_json(cc, jroot); |
| 543 | |
| 544 | json_decref(jroot); |
| 545 | msgb_free(msg); |
| 546 | |
| 547 | return 0; |
| 548 | out: |
| 549 | msgb_free(msg); |
| 550 | return rc; |
| 551 | } |
| 552 | |
| 553 | static int cups_client_closed_cb(struct osmo_stream_srv *conn) |
| 554 | { |
| 555 | struct cups_client *cc = osmo_stream_srv_get_data(conn); |
| 556 | struct gtp_daemon *d = cc->d; |
| 557 | struct subprocess *p, *p2; |
| 558 | |
| 559 | /* kill + forget about all subprocesses of this client */ |
| 560 | /* We need no locking here as the subprocess list is only used from the main thread */ |
| 561 | llist_for_each_entry_safe(p, p2, &d->subprocesses, list) { |
| 562 | if (p->cups_client == cc) |
| 563 | subprocess_destroy(p, SIGKILL); |
| 564 | } |
| 565 | |
| 566 | LOGCC(cc, LOGL_INFO, "UECUPS connection lost\n"); |
| 567 | llist_del(&cc->list); |
| 568 | return 0; |
| 569 | } |
| 570 | |
| 571 | |
| 572 | /* the control/user plane separation server bind/accept fd */ |
| 573 | static int cups_accept_cb(struct osmo_stream_srv_link *link, int fd) |
| 574 | { |
| 575 | struct gtp_daemon *d = osmo_stream_srv_link_get_data(link); |
| 576 | struct cups_client *cc; |
| 577 | |
| 578 | cc = talloc_zero(d, struct cups_client); |
| 579 | if (!cc) |
| 580 | return -1; |
| 581 | |
| 582 | cc->d = d; |
| 583 | osmo_sock_get_name_buf(cc->sockname, sizeof(cc->sockname), fd); |
| 584 | cc->srv = osmo_stream_srv_create(cc, link, fd, cups_client_read_cb, cups_client_closed_cb, cc); |
| 585 | if (!cc->srv) { |
| 586 | talloc_free(cc); |
| 587 | return -1; |
| 588 | } |
| 589 | LOGCC(cc, LOGL_INFO, "Accepted new UECUPS connection\n"); |
| 590 | |
| 591 | llist_add_tail(&cc->list, &d->cups_clients); |
| 592 | |
| 593 | return 0; |
| 594 | } |
| 595 | |
| 596 | struct osmo_stream_srv_link *cups_srv_link_create(struct gtp_daemon *d) |
| 597 | { |
| 598 | struct osmo_stream_srv_link *srv_link; |
| 599 | srv_link = osmo_stream_srv_link_create(g_daemon); |
| 600 | if (!srv_link) |
| 601 | return NULL; |
| 602 | |
| 603 | /* UECUPS socket for control from control plane side */ |
| 604 | osmo_stream_srv_link_set_nodelay(srv_link, true); |
| 605 | osmo_stream_srv_link_set_addr(srv_link, g_daemon->cfg.cups_local_ip); |
| 606 | osmo_stream_srv_link_set_port(srv_link, g_daemon->cfg.cups_local_port); |
| 607 | osmo_stream_srv_link_set_proto(srv_link, IPPROTO_SCTP); |
| 608 | osmo_stream_srv_link_set_data(srv_link, g_daemon); |
| 609 | osmo_stream_srv_link_set_accept_cb(srv_link, cups_accept_cb); |
| 610 | osmo_stream_srv_link_open(srv_link); |
| 611 | return srv_link; |
| 612 | } |