blob: abbfa50b8c5f313338ae6e68c352b70a0da0d392 [file] [log] [blame]
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +02001/* Gb proxy peer handling */
2
3/* (C) 2010 by Harald Welte <laforge@gnumonks.org>
4 * (C) 2010-2013 by On-Waves
5 * (C) 2013 by Holger Hans Peter Freyther
6 * All Rights Reserved
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Affero General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU Affero General Public License for more details.
17 *
18 * You should have received a copy of the GNU Affero General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 *
21 */
22
Daniel Willmanna16ecc32021-03-10 09:57:12 +010023#include <osmocom/gbproxy/gb_proxy.h>
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020024
Oliver Smith29532c22021-01-29 11:13:00 +010025#include "debug.h"
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020026
27#include <osmocom/gprs/protocol/gsm_08_18.h>
Daniel Willmannc8a50092021-01-17 13:11:41 +010028#include <osmocom/core/crc16.h>
Daniel Willmann8f407b12020-12-02 19:33:50 +010029#include <osmocom/core/logging.h>
Daniel Willmannee834af2020-12-14 16:22:39 +010030#include <osmocom/core/linuxlist.h>
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020031#include <osmocom/core/rate_ctr.h>
32#include <osmocom/core/stats.h>
33#include <osmocom/core/talloc.h>
Daniel Willmannc8a50092021-01-17 13:11:41 +010034#include <osmocom/core/utils.h>
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020035#include <osmocom/gsm/tlv.h>
36
37#include <string.h>
38
39extern void *tall_sgsn_ctx;
40
Harald Welte560bdb32020-12-04 22:24:47 +010041static const struct rate_ctr_desc bvc_ctr_description[] = {
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020042 { "blocked", "BVC Block " },
43 { "unblocked", "BVC Unblock " },
44 { "dropped", "BVC blocked, dropped packet " },
45 { "inv-nsei", "NSEI mismatch " },
46 { "tx-err", "NS Transmission error " },
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020047};
48
Harald Welte560bdb32020-12-04 22:24:47 +010049osmo_static_assert(ARRAY_SIZE(bvc_ctr_description) == GBPROX_PEER_CTR_LAST, everything_described);
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020050
Harald Welte560bdb32020-12-04 22:24:47 +010051static const struct rate_ctr_group_desc bvc_ctrg_desc = {
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020052 .group_name_prefix = "gbproxy:peer",
53 .group_description = "GBProxy Peer Statistics",
Harald Welte560bdb32020-12-04 22:24:47 +010054 .num_ctr = ARRAY_SIZE(bvc_ctr_description),
55 .ctr_desc = bvc_ctr_description,
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020056 .class_id = OSMO_STATS_CLASS_PEER,
57};
58
59
Harald Welte560bdb32020-12-04 22:24:47 +010060/* Find the gbproxy_bvc by its BVCI. There can only be one match */
Harald Weltee5209642020-12-05 19:59:45 +010061struct gbproxy_bvc *gbproxy_bvc_by_bvci(struct gbproxy_nse *nse, uint16_t bvci)
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020062{
Harald Welte8b4c7942020-12-05 10:14:49 +010063 struct gbproxy_bvc *bvc;
Harald Weltee5209642020-12-05 19:59:45 +010064 hash_for_each_possible(nse->bvcs, bvc, list, bvci) {
65 if (bvc->bvci == bvci)
66 return bvc;
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020067 }
68 return NULL;
69}
70
Harald Welte560bdb32020-12-04 22:24:47 +010071struct gbproxy_bvc *gbproxy_bvc_alloc(struct gbproxy_nse *nse, uint16_t bvci)
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020072{
Daniel Willmann990b1512021-07-09 13:58:58 +020073 char idbuf[64];
Harald Welte560bdb32020-12-04 22:24:47 +010074 struct gbproxy_bvc *bvc;
Daniel Willmanne50550e2020-11-26 18:19:21 +010075 OSMO_ASSERT(nse);
76 struct gbproxy_config *cfg = nse->cfg;
77 OSMO_ASSERT(cfg);
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020078
Harald Welte560bdb32020-12-04 22:24:47 +010079 bvc = talloc_zero(tall_sgsn_ctx, struct gbproxy_bvc);
80 if (!bvc)
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020081 return NULL;
82
Daniel Willmann990b1512021-07-09 13:58:58 +020083 snprintf(idbuf, sizeof(idbuf), "BVC%05u-NSE%05u", bvci,
84 nse->nsei);
85 osmo_identifier_sanitize_buf(idbuf, NULL, '_');
Harald Welte560bdb32020-12-04 22:24:47 +010086 bvc->bvci = bvci;
Harald Welte4bd3e492020-12-07 12:06:52 +010087 bvc->ctrg = rate_ctr_group_alloc(bvc, &bvc_ctrg_desc, (nse->nsei << 16) | bvci);
Harald Welte560bdb32020-12-04 22:24:47 +010088 if (!bvc->ctrg) {
89 talloc_free(bvc);
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020090 return NULL;
91 }
Daniel Willmann990b1512021-07-09 13:58:58 +020092 rate_ctr_group_set_name(bvc->ctrg, idbuf);
Harald Welte560bdb32020-12-04 22:24:47 +010093 bvc->nse = nse;
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020094
Harald Welte8b4c7942020-12-05 10:14:49 +010095 hash_add(nse->bvcs, &bvc->list, bvc->bvci);
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +020096
Harald Weltecfc7e8e2020-12-07 12:03:10 +010097 LOGPBVC_CAT(bvc, DOBJ, LOGL_INFO, "BVC Created\n");
98
99 /* We leave allocating the bvc->fi to the caller, as the FSM details depend
100 * on the type of BVC (SIG/PTP) and role (SGSN/BSS) */
Daniel Willmann3ea37932021-02-10 13:41:14 +0100101 return bvc;
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +0200102}
103
Harald Welte560bdb32020-12-04 22:24:47 +0100104void gbproxy_bvc_free(struct gbproxy_bvc *bvc)
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +0200105{
Harald Welte560bdb32020-12-04 22:24:47 +0100106 if (!bvc)
Daniel Willmannf6a36cc2020-12-04 17:38:46 +0100107 return;
Daniel Willmanne50550e2020-11-26 18:19:21 +0100108
Harald Weltecfc7e8e2020-12-07 12:03:10 +0100109 LOGPBVC_CAT(bvc, DOBJ, LOGL_INFO, "BVC Destroying\n");
110
Harald Welte8b4c7942020-12-05 10:14:49 +0100111 hash_del(&bvc->list);
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +0200112
Harald Welte560bdb32020-12-04 22:24:47 +0100113 rate_ctr_group_free(bvc->ctrg);
114 bvc->ctrg = NULL;
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +0200115
Harald Weltee5209642020-12-05 19:59:45 +0100116 osmo_fsm_inst_free(bvc->fi);
117
Daniel Willmanna631a3a2021-07-14 18:10:16 +0200118 if (bvc->cell) {
119 gbproxy_cell_cleanup_bvc(bvc->cell, bvc);
Harald Weltee5209642020-12-05 19:59:45 +0100120 bvc->cell = NULL;
121 }
Harald Welte560bdb32020-12-04 22:24:47 +0100122 talloc_free(bvc);
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +0200123}
124
Harald Weltee5209642020-12-05 19:59:45 +0100125/*! remove BVCs on NSE specified by NSEI.
Harald Welte560bdb32020-12-04 22:24:47 +0100126 * \param[in] cfg proxy in which we operate
127 * \param[in] nsei NS entity in which we should clean up
Harald Weltee5209642020-12-05 19:59:45 +0100128 * \param[in] bvci if 0: remove all PTP BVCs; if != 0: BVCI of the single BVC to clean up */
129int gbproxy_cleanup_bvcs(struct gbproxy_nse *nse, uint16_t bvci)
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +0200130{
Harald Weltee5209642020-12-05 19:59:45 +0100131 struct hlist_node *btmp;
132 struct gbproxy_bvc *bvc;
133 int j, counter = 0;
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +0200134
Harald Weltee5209642020-12-05 19:59:45 +0100135 if (!nse)
136 return 0;
137
138 hash_for_each_safe(nse->bvcs, j, btmp, bvc, list) {
139 if (bvci && bvc->bvci != bvci)
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +0200140 continue;
Harald Weltee5209642020-12-05 19:59:45 +0100141 if (bvci == 0 && bvc->bvci == 0)
142 continue;
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +0200143
Harald Weltee5209642020-12-05 19:59:45 +0100144 gbproxy_bvc_free(bvc);
145 counter += 1;
Pau Espin Pedrol1ddefb12019-08-30 19:48:34 +0200146 }
147
148 return counter;
149}
Daniel Willmanne50550e2020-11-26 18:19:21 +0100150
Harald Weltee5209642020-12-05 19:59:45 +0100151
152/***********************************************************************
153 * CELL
154 ***********************************************************************/
155
156/* Allocate a new 'cell' object */
Philipp Maiere4597ec2021-02-09 16:02:00 +0100157struct gbproxy_cell *gbproxy_cell_alloc(struct gbproxy_config *cfg, uint16_t bvci,
158 const struct gprs_ra_id *raid, uint16_t cid)
Harald Weltee5209642020-12-05 19:59:45 +0100159{
160 struct gbproxy_cell *cell;
161 OSMO_ASSERT(cfg);
162
163 cell = talloc_zero(cfg, struct gbproxy_cell);
164 if (!cell)
165 return NULL;
166
167 cell->cfg = cfg;
168 cell->bvci = bvci;
Philipp Maiere4597ec2021-02-09 16:02:00 +0100169 cell->id.cid = cid;
170 memcpy(&cell->id.raid, raid, sizeof(cell->id.raid));
Harald Weltee5209642020-12-05 19:59:45 +0100171
172 hash_add(cfg->cells, &cell->list, cell->bvci);
173
Harald Weltecfc7e8e2020-12-07 12:03:10 +0100174 LOGPCELL_CAT(cell, DOBJ, LOGL_INFO, "CELL Created\n");
175
Harald Weltee5209642020-12-05 19:59:45 +0100176 return cell;
177}
178
179/* Find cell by BVCI */
180struct gbproxy_cell *gbproxy_cell_by_bvci(struct gbproxy_config *cfg, uint16_t bvci)
181{
182 struct gbproxy_cell *cell;
183
184 hash_for_each_possible(cfg->cells, cell, list, bvci) {
185 if (cell->bvci == bvci)
186 return cell;
187 }
188 return NULL;
189}
190
Daniel Willmannfccbef02021-01-19 17:59:19 +0100191struct gbproxy_cell *gbproxy_cell_by_cellid(struct gbproxy_config *cfg, const struct gprs_ra_id *raid, uint16_t cid)
192{
193 int i;
194 struct gbproxy_cell *cell;
195
196 hash_for_each(cfg->cells, i, cell, list) {
197 if (cell->id.cid == cid && gsm48_ra_equal(&cell->id.raid, raid)) {
198 return cell;
199 }
200 }
201 return NULL;
202}
203
Daniel Willmanna631a3a2021-07-14 18:10:16 +0200204void gbproxy_cell_cleanup_bvc(struct gbproxy_cell *cell, struct gbproxy_bvc *bvc)
205{
206 int i;
207
208 if (cell->bss_bvc == bvc)
209 return gbproxy_cell_free(cell);
210
211 /* we could also be a SGSN-side BVC */
212 for (i = 0; i < ARRAY_SIZE(cell->sgsn_bvc); i++) {
213 if (cell->sgsn_bvc[i] == bvc)
214 cell->sgsn_bvc[i] = NULL;
215 }
216
217}
218
Harald Weltee5209642020-12-05 19:59:45 +0100219void gbproxy_cell_free(struct gbproxy_cell *cell)
220{
221 unsigned int i;
222
223 if (!cell)
224 return;
225
Harald Weltecfc7e8e2020-12-07 12:03:10 +0100226 LOGPCELL_CAT(cell, DOBJ, LOGL_INFO, "CELL Destroying\n");
227
Harald Weltee5209642020-12-05 19:59:45 +0100228 /* remove from cfg.cells */
229 hash_del(&cell->list);
230
231 /* remove back-pointers from the BSS side */
232 if (cell->bss_bvc && cell->bss_bvc->cell)
233 cell->bss_bvc->cell = NULL;
234
235 /* remove back-pointers from the SGSN side */
236 for (i = 0; i < ARRAY_SIZE(cell->sgsn_bvc); i++) {
237 if (!cell->sgsn_bvc[i])
238 continue;
239 if (cell->sgsn_bvc[i]->cell)
240 cell->sgsn_bvc[i]->cell = NULL;
241 }
242
243 talloc_free(cell);
244}
245
246bool gbproxy_cell_add_sgsn_bvc(struct gbproxy_cell *cell, struct gbproxy_bvc *bvc)
247{
248 unsigned int i;
249 for (i = 0; i < ARRAY_SIZE(cell->sgsn_bvc); i++) {
250 if (!cell->sgsn_bvc[i]) {
251 cell->sgsn_bvc[i] = bvc;
Harald Weltecfc7e8e2020-12-07 12:03:10 +0100252 LOGPCELL_CAT(cell, DOBJ, LOGL_DEBUG, "CELL linked to SGSN\n");
253 LOGPBVC_CAT(bvc, DOBJ, LOGL_DEBUG, "BVC linked to CELL\n");
Harald Weltee5209642020-12-05 19:59:45 +0100254 return true;
255 }
256 }
257 return false;
258}
259
Daniel Willmann77493b12020-12-29 21:13:31 +0100260
261/***********************************************************************
262 * TLLI cache
263 ***********************************************************************/
264
Daniel Willmann9170c342021-01-17 13:12:46 +0100265static inline struct gbproxy_tlli_cache_entry *_get_tlli_entry(struct gbproxy_config *cfg, uint32_t tlli)
266{
267 struct gbproxy_tlli_cache_entry *cache_entry;
268
269 hash_for_each_possible(cfg->tlli_cache.entries, cache_entry, list, tlli) {
270 if (cache_entry->tlli == tlli)
271 return cache_entry;
272 }
273 return NULL;
274}
275
Daniel Willmann77493b12020-12-29 21:13:31 +0100276void gbproxy_tlli_cache_update(struct gbproxy_nse *nse, uint32_t tlli)
277{
278 struct gbproxy_config *cfg = nse->cfg;
279 struct timespec now;
280 struct gbproxy_tlli_cache_entry *cache_entry = _get_tlli_entry(cfg, tlli);
281
282 osmo_clock_gettime(CLOCK_MONOTONIC, &now);
283
284 if (cache_entry) {
285 /* Update the entry if it already exists */
286 cache_entry->nse = nse;
287 cache_entry->tstamp = now.tv_sec;
288 return;
289 }
290
291 cache_entry = talloc_zero(cfg, struct gbproxy_tlli_cache_entry);
292 cache_entry->tlli = tlli;
293 cache_entry->nse = nse;
294 cache_entry->tstamp = now.tv_sec;
295 hash_add(cfg->tlli_cache.entries, &cache_entry->list, cache_entry->tlli);
296}
297
298static void _tlli_cache_remove_nse(struct gbproxy_nse *nse) {
299 uint i;
300 struct gbproxy_config *cfg = nse->cfg;
301 struct gbproxy_tlli_cache_entry *tlli_cache;
302 struct hlist_node *tmp;
303
304 hash_for_each_safe(cfg->tlli_cache.entries, i, tmp, tlli_cache, list) {
305 if (tlli_cache->nse == nse) {
306 hash_del(&tlli_cache->list);
307 talloc_free(tlli_cache);
308 }
309 }
310}
311
312void gbproxy_tlli_cache_remove(struct gbproxy_config *cfg, uint32_t tlli)
313{
314 struct gbproxy_tlli_cache_entry *tlli_cache;
315 struct hlist_node *tmp;
316
317 hash_for_each_possible_safe(cfg->tlli_cache.entries, tlli_cache, tmp, list, tlli) {
318 if (tlli_cache->tlli == tlli) {
319 hash_del(&tlli_cache->list);
320 talloc_free(tlli_cache);
321 return;
322 }
323 }
324}
325
326int gbproxy_tlli_cache_cleanup(struct gbproxy_config *cfg)
327{
328 int i, count = 0;
329 struct gbproxy_tlli_cache_entry *tlli_cache;
330 struct hlist_node *tmp;
331 struct timespec now;
332 time_t expiry;
333
334 osmo_clock_gettime(CLOCK_MONOTONIC, &now);
335 expiry = now.tv_sec - cfg->tlli_cache.timeout;
336
337 hash_for_each_safe(cfg->tlli_cache.entries, i, tmp, tlli_cache, list) {
338 if (tlli_cache->tstamp < expiry) {
339 count++;
340 LOGP(DGPRS, LOGL_NOTICE, "Cache entry for TLLI %08x expired, removing\n", tlli_cache->tlli);
341 hash_del(&tlli_cache->list);
342 talloc_free(tlli_cache);
343 }
344 }
345 return count;
Daniel Willmannc8a50092021-01-17 13:11:41 +0100346
347}
348/***********************************************************************
349 * IMSI cache
350 ***********************************************************************/
351static inline uint16_t _checksum_imsi(const char *imsi)
352{
353 size_t len = strlen(imsi);
354 return osmo_crc16(0, (const uint8_t *)imsi, len);
355}
356
Daniel Willmann361d0b52021-07-09 17:44:30 +0200357static inline struct gbproxy_imsi_cache_entry *_get_imsi_entry(struct gbproxy_config *cfg, const char *imsi, enum cache_usage_type usage)
Daniel Willmannc8a50092021-01-17 13:11:41 +0100358{
359 struct gbproxy_imsi_cache_entry *cache_entry;
360 uint16_t imsi_hash = _checksum_imsi(imsi);
361
362 hash_for_each_possible(cfg->imsi_cache.entries, cache_entry, list, imsi_hash) {
Daniel Willmann361d0b52021-07-09 17:44:30 +0200363 if (!strncmp(cache_entry->imsi, imsi, sizeof(cache_entry->imsi)) && cache_entry->usage == usage)
Daniel Willmannc8a50092021-01-17 13:11:41 +0100364 return cache_entry;
365 }
366 return NULL;
367}
368
Daniel Willmann361d0b52021-07-09 17:44:30 +0200369void gbproxy_imsi_cache_update(struct gbproxy_nse *nse, const char *imsi, enum cache_usage_type usage)
Daniel Willmannc8a50092021-01-17 13:11:41 +0100370{
371 struct gbproxy_config *cfg = nse->cfg;
372 struct timespec now;
Daniel Willmann361d0b52021-07-09 17:44:30 +0200373 struct gbproxy_imsi_cache_entry *cache_entry = _get_imsi_entry(cfg, imsi, usage);
Daniel Willmannc8a50092021-01-17 13:11:41 +0100374 uint16_t imsi_hash = _checksum_imsi(imsi);
375
376 osmo_clock_gettime(CLOCK_MONOTONIC, &now);
377
378 if (cache_entry) {
379 /* Update the entry if it already exists */
380 cache_entry->nse = nse;
381 cache_entry->tstamp = now.tv_sec;
382 return;
383 }
384
385 cache_entry = talloc_zero(cfg, struct gbproxy_imsi_cache_entry);
386 OSMO_STRLCPY_ARRAY(cache_entry->imsi, imsi);
387 cache_entry->nse = nse;
Daniel Willmann361d0b52021-07-09 17:44:30 +0200388 cache_entry->usage = usage;
Daniel Willmannc8a50092021-01-17 13:11:41 +0100389 cache_entry->tstamp = now.tv_sec;
390 hash_add(cfg->imsi_cache.entries, &cache_entry->list, imsi_hash);
391}
392
393static void _imsi_cache_remove_nse(struct gbproxy_nse *nse) {
394 uint i;
395 struct gbproxy_config *cfg = nse->cfg;
396 struct gbproxy_imsi_cache_entry *imsi_cache;
397 struct hlist_node *tmp;
398
399 hash_for_each_safe(cfg->imsi_cache.entries, i, tmp, imsi_cache, list) {
400 if (imsi_cache->nse == nse) {
401 hash_del(&imsi_cache->list);
402 talloc_free(imsi_cache);
403 }
404 }
405}
406
Daniel Willmann361d0b52021-07-09 17:44:30 +0200407void gbproxy_imsi_cache_remove(struct gbproxy_config *cfg, const char *imsi, enum cache_usage_type usage)
Daniel Willmannc8a50092021-01-17 13:11:41 +0100408{
409 struct gbproxy_imsi_cache_entry *imsi_cache;
410 struct hlist_node *tmp;
411 uint16_t imsi_hash = _checksum_imsi(imsi);
412
413 hash_for_each_possible_safe(cfg->imsi_cache.entries, imsi_cache, tmp, list, imsi_hash) {
Daniel Willmann361d0b52021-07-09 17:44:30 +0200414 if (!(strncmp(imsi_cache->imsi, imsi, sizeof(imsi_cache->imsi))) && imsi_cache->usage == usage) {
Daniel Willmannc8a50092021-01-17 13:11:41 +0100415 hash_del(&imsi_cache->list);
416 talloc_free(imsi_cache);
417 return;
418 }
419 }
420}
421
422int gbproxy_imsi_cache_cleanup(struct gbproxy_config *cfg)
423{
424 int i, count = 0;
425 struct gbproxy_imsi_cache_entry *imsi_cache;
426 struct hlist_node *tmp;
427 struct timespec now;
428 time_t expiry;
429
430 osmo_clock_gettime(CLOCK_MONOTONIC, &now);
431 expiry = now.tv_sec - cfg->imsi_cache.timeout;
432
433 hash_for_each_safe(cfg->imsi_cache.entries, i, tmp, imsi_cache, list) {
434 if (imsi_cache->tstamp < expiry) {
435 count++;
436 LOGP(DGPRS, LOGL_NOTICE, "Cache entry for IMSI %s expired, removing\n", imsi_cache->imsi);
437 hash_del(&imsi_cache->list);
438 talloc_free(imsi_cache);
439 }
440 }
441 return count;
Daniel Willmann77493b12020-12-29 21:13:31 +0100442}
443
Harald Weltee5209642020-12-05 19:59:45 +0100444/***********************************************************************
445 * NSE - NS Entity
446 ***********************************************************************/
447
448struct gbproxy_nse *gbproxy_nse_alloc(struct gbproxy_config *cfg, uint16_t nsei, bool sgsn_facing)
Daniel Willmanne50550e2020-11-26 18:19:21 +0100449{
450 struct gbproxy_nse *nse;
451 OSMO_ASSERT(cfg);
452
453 nse = talloc_zero(tall_sgsn_ctx, struct gbproxy_nse);
454 if (!nse)
455 return NULL;
456
457 nse->nsei = nsei;
Daniel Willmanna8b61652021-02-12 05:05:14 +0100458 nse->max_sdu_len = DEFAULT_NSE_SDU;
Daniel Willmanne50550e2020-11-26 18:19:21 +0100459 nse->cfg = cfg;
Harald Weltee5209642020-12-05 19:59:45 +0100460 nse->sgsn_facing = sgsn_facing;
Daniel Willmanne50550e2020-11-26 18:19:21 +0100461
Harald Weltee5209642020-12-05 19:59:45 +0100462 if (sgsn_facing)
463 hash_add(cfg->sgsn_nses, &nse->list, nsei);
464 else
465 hash_add(cfg->bss_nses, &nse->list, nsei);
Daniel Willmanne50550e2020-11-26 18:19:21 +0100466
Harald Welte8b4c7942020-12-05 10:14:49 +0100467 hash_init(nse->bvcs);
Daniel Willmanne50550e2020-11-26 18:19:21 +0100468
Harald Weltecfc7e8e2020-12-07 12:03:10 +0100469 LOGPNSE_CAT(nse, DOBJ, LOGL_INFO, "NSE Created\n");
470
Daniel Willmanne50550e2020-11-26 18:19:21 +0100471 return nse;
472}
473
Daniel Willmannee834af2020-12-14 16:22:39 +0100474static void _nse_free(struct gbproxy_nse *nse)
Daniel Willmanne50550e2020-11-26 18:19:21 +0100475{
Harald Welte8b4c7942020-12-05 10:14:49 +0100476 struct gbproxy_bvc *bvc;
477 struct hlist_node *tmp;
478 int i;
479
Daniel Willmannf6a36cc2020-12-04 17:38:46 +0100480 if (!nse)
481 return;
Daniel Willmanne50550e2020-11-26 18:19:21 +0100482
Harald Weltecfc7e8e2020-12-07 12:03:10 +0100483 LOGPNSE_CAT(nse, DOBJ, LOGL_INFO, "NSE Destroying\n");
484
Harald Welted2fef952020-12-05 00:31:07 +0100485 hash_del(&nse->list);
Daniel Willmannc8a50092021-01-17 13:11:41 +0100486 /* Clear the cache entries of this NSE */
Daniel Willmann77493b12020-12-29 21:13:31 +0100487 _tlli_cache_remove_nse(nse);
Daniel Willmannc8a50092021-01-17 13:11:41 +0100488 _imsi_cache_remove_nse(nse);
Daniel Willmanne50550e2020-11-26 18:19:21 +0100489
Harald Welte8b4c7942020-12-05 10:14:49 +0100490 hash_for_each_safe(nse->bvcs, i, tmp, bvc, list)
Harald Welte560bdb32020-12-04 22:24:47 +0100491 gbproxy_bvc_free(bvc);
Daniel Willmanne50550e2020-11-26 18:19:21 +0100492
493 talloc_free(nse);
494}
Daniel Willmannee834af2020-12-14 16:22:39 +0100495static void _sgsn_free(struct gbproxy_sgsn *sgsn);
496
497void gbproxy_nse_free(struct gbproxy_nse *nse)
498{
499 if (!nse)
500 return;
501 OSMO_ASSERT(nse->cfg);
502
503 if (nse->sgsn_facing) {
504 struct gbproxy_sgsn *sgsn = gbproxy_sgsn_by_nsei(nse->cfg, nse->nsei);
505 OSMO_ASSERT(sgsn);
506 _sgsn_free(sgsn);
507 }
508
509 _nse_free(nse);
510}
Daniel Willmanne50550e2020-11-26 18:19:21 +0100511
Harald Weltee5209642020-12-05 19:59:45 +0100512struct gbproxy_nse *gbproxy_nse_by_nsei(struct gbproxy_config *cfg, uint16_t nsei, uint32_t flags)
Daniel Willmanne50550e2020-11-26 18:19:21 +0100513{
514 struct gbproxy_nse *nse;
515 OSMO_ASSERT(cfg);
516
Harald Weltee5209642020-12-05 19:59:45 +0100517 if (flags & NSE_F_SGSN) {
518 hash_for_each_possible(cfg->sgsn_nses, nse, list, nsei) {
519 if (nse->nsei == nsei)
520 return nse;
521 }
522 }
523
524 if (flags & NSE_F_BSS) {
525 hash_for_each_possible(cfg->bss_nses, nse, list, nsei) {
526 if (nse->nsei == nsei)
527 return nse;
528 }
Daniel Willmanne50550e2020-11-26 18:19:21 +0100529 }
530
531 return NULL;
532}
533
Harald Weltee5209642020-12-05 19:59:45 +0100534struct gbproxy_nse *gbproxy_nse_by_nsei_or_new(struct gbproxy_config *cfg, uint16_t nsei, bool sgsn_facing)
Daniel Willmanne50550e2020-11-26 18:19:21 +0100535{
536 struct gbproxy_nse *nse;
537 OSMO_ASSERT(cfg);
538
Harald Weltee5209642020-12-05 19:59:45 +0100539 nse = gbproxy_nse_by_nsei(cfg, nsei, sgsn_facing ? NSE_F_SGSN : NSE_F_BSS);
Daniel Willmanne50550e2020-11-26 18:19:21 +0100540 if (!nse)
Harald Weltee5209642020-12-05 19:59:45 +0100541 nse = gbproxy_nse_alloc(cfg, nsei, sgsn_facing);
Daniel Willmanne50550e2020-11-26 18:19:21 +0100542
543 return nse;
Harald Welte560bdb32020-12-04 22:24:47 +0100544}
Daniel Willmannee834af2020-12-14 16:22:39 +0100545
Daniel Willmann77493b12020-12-29 21:13:31 +0100546struct gbproxy_nse *gbproxy_nse_by_tlli(struct gbproxy_config *cfg, uint32_t tlli)
547{
548 struct gbproxy_tlli_cache_entry *tlli_cache;
549
550 hash_for_each_possible(cfg->tlli_cache.entries, tlli_cache, list, tlli) {
551 if (tlli_cache->tlli == tlli)
552 return tlli_cache->nse;
553 }
554 return NULL;
555}
556
Daniel Willmann361d0b52021-07-09 17:44:30 +0200557struct gbproxy_nse *gbproxy_nse_by_imsi(struct gbproxy_config *cfg, const char *imsi, enum cache_usage_type usage)
Daniel Willmannc8a50092021-01-17 13:11:41 +0100558{
559 struct gbproxy_imsi_cache_entry *imsi_cache;
560 uint16_t imsi_hash = _checksum_imsi(imsi);
561
562 hash_for_each_possible(cfg->imsi_cache.entries, imsi_cache, list, imsi_hash) {
Daniel Willmann361d0b52021-07-09 17:44:30 +0200563 if (!strncmp(imsi_cache->imsi, imsi, sizeof(imsi_cache->imsi)) && imsi_cache->usage == usage)
Daniel Willmannc8a50092021-01-17 13:11:41 +0100564 return imsi_cache->nse;
565 }
566 return NULL;
567}
Daniel Willmann77493b12020-12-29 21:13:31 +0100568
Daniel Willmann216dff82020-12-29 16:07:17 +0100569/***********************************************************************
570 * SGSN - Serving GPRS Support Node
571 ***********************************************************************/
572
573/*! Allocate a new SGSN. This ensures the corresponding gbproxy_nse is allocated as well
574 * \param[in] cfg The gbproxy configuration
575 * \param[in] nsei The nsei where the SGSN can be reached
Daniel Willmanna648f3c2020-12-28 18:07:27 +0100576 * \param[in] name A name to give the SGSN
Daniel Willmann216dff82020-12-29 16:07:17 +0100577 * \return The SGSN, NULL if it couldn't be allocated
578 */
Daniel Willmanna648f3c2020-12-28 18:07:27 +0100579struct gbproxy_sgsn *gbproxy_sgsn_alloc(struct gbproxy_config *cfg, uint16_t nsei, const char *name)
Daniel Willmannee834af2020-12-14 16:22:39 +0100580{
581 struct gbproxy_sgsn *sgsn;
582 OSMO_ASSERT(cfg);
583
584 sgsn = talloc_zero(tall_sgsn_ctx, struct gbproxy_sgsn);
585 if (!sgsn)
586 return NULL;
587
588 sgsn->nse = gbproxy_nse_alloc(cfg, nsei, true);
589 if (!sgsn->nse) {
Vadim Yanitskiy6964e882021-01-05 14:42:46 +0100590 LOGP(DOBJ, LOGL_ERROR, "Could not allocate NSE(%05u) for SGSN(%s)\n",
591 nsei, sgsn->name);
Daniel Willmanna648f3c2020-12-28 18:07:27 +0100592 goto free_sgsn;
Daniel Willmannee834af2020-12-14 16:22:39 +0100593 }
594
Daniel Willmanna648f3c2020-12-28 18:07:27 +0100595 if (name)
596 sgsn->name = talloc_strdup(sgsn, name);
597 else
598 sgsn->name = talloc_asprintf(sgsn, "NSE(%05u)", sgsn->nse->nsei);
599 if (!sgsn->name)
600 goto free_sgsn;
601
Daniel Willmannee834af2020-12-14 16:22:39 +0100602 sgsn->pool.allow_attach = true;
603 sgsn->pool.nri_ranges = osmo_nri_ranges_alloc(sgsn);
604
605 llist_add_tail(&sgsn->list, &cfg->sgsns);
606 LOGPSGSN_CAT(sgsn, DOBJ, LOGL_INFO, "SGSN Created\n");
607 return sgsn;
Daniel Willmanna648f3c2020-12-28 18:07:27 +0100608
609free_sgsn:
610 talloc_free(sgsn);
611 return NULL;
Daniel Willmannee834af2020-12-14 16:22:39 +0100612}
613
614/* Only free gbproxy_sgsn, sgsn can't be NULL */
615static void _sgsn_free(struct gbproxy_sgsn *sgsn) {
616 struct gbproxy_config *cfg;
617
618 OSMO_ASSERT(sgsn->nse);
619 cfg = sgsn->nse->cfg;
620 OSMO_ASSERT(cfg);
621
622 LOGPSGSN_CAT(sgsn, DOBJ, LOGL_INFO, "SGSN Destroying\n");
623 llist_del(&sgsn->list);
Daniel Willmann5193f222021-01-11 05:00:46 +0100624 /* talloc will free ->name and ->pool.nri_ranges */
Daniel Willmannee834af2020-12-14 16:22:39 +0100625 talloc_free(sgsn);
626}
627
Daniel Willmann216dff82020-12-29 16:07:17 +0100628/*! Free the SGSN. This ensures the corresponding gbproxy_nse is freed as well
629 * \param[in] sgsn The SGSN
630 */
Daniel Willmannee834af2020-12-14 16:22:39 +0100631void gbproxy_sgsn_free(struct gbproxy_sgsn *sgsn)
632{
633 if (!sgsn)
634 return;
635
Daniel Willmann37518b32021-05-27 18:13:36 +0200636 OSMO_ASSERT(sgsn->nse);
Daniel Willmannee834af2020-12-14 16:22:39 +0100637
638 _nse_free(sgsn->nse);
639 _sgsn_free(sgsn);
640}
641
Daniel Willmann216dff82020-12-29 16:07:17 +0100642/*! Return the SGSN for a given NSEI
643 * \param[in] cfg The gbproxy configuration
644 * \param[in] nsei The nsei where the SGSN can be reached
645 * \return Returns the matching SGSN or NULL if it couldn't be found
646 */
Daniel Willmanna648f3c2020-12-28 18:07:27 +0100647struct gbproxy_sgsn *gbproxy_sgsn_by_name(struct gbproxy_config *cfg, const char *name)
648{
649 struct gbproxy_sgsn *sgsn;
650 OSMO_ASSERT(cfg);
651
652 llist_for_each_entry(sgsn, &cfg->sgsns, list) {
653 if (!strcmp(sgsn->name, name))
654 return sgsn;
655 }
656
657 return NULL;
658}
659
660/*! Return the SGSN for a given NSEI
661 * \param[in] cfg The gbproxy configuration
662 * \param[in] nsei The nsei where the SGSN can be reached
663 * \return Returns the matching SGSN or NULL if it couldn't be found
664 */
Daniel Willmannee834af2020-12-14 16:22:39 +0100665struct gbproxy_sgsn *gbproxy_sgsn_by_nsei(struct gbproxy_config *cfg, uint16_t nsei)
666{
667 struct gbproxy_sgsn *sgsn;
668 OSMO_ASSERT(cfg);
669
670 llist_for_each_entry(sgsn, &cfg->sgsns, list) {
671 if (sgsn->nse->nsei == nsei)
672 return sgsn;
673 }
674
675 return NULL;
676}
677
Daniel Willmann216dff82020-12-29 16:07:17 +0100678/*! Return the SGSN for a given NSEI, creating a new one if none exists
679 * \param[in] cfg The gbproxy configuration
680 * \param[in] nsei The nsei where the SGSN can be reached
681 * \return Returns the SGSN
682 */
Daniel Willmannee834af2020-12-14 16:22:39 +0100683struct gbproxy_sgsn *gbproxy_sgsn_by_nsei_or_new(struct gbproxy_config *cfg, uint16_t nsei)
684{
685 struct gbproxy_sgsn *sgsn;
686 OSMO_ASSERT(cfg);
687
688 sgsn = gbproxy_sgsn_by_nsei(cfg, nsei);
689 if (!sgsn)
Daniel Willmanna648f3c2020-12-28 18:07:27 +0100690 sgsn = gbproxy_sgsn_alloc(cfg, nsei, NULL);
Daniel Willmannee834af2020-12-14 16:22:39 +0100691
692 return sgsn;
693}
694
695/*! Return the gbproxy_sgsn matching that NRI
696 * \param[in] cfg proxy in which we operate
697 * \param[in] nri NRI to look for
698 * \param[out] null_nri If not NULL this indicates whether the NRI is a null NRI
699 * \return The SGSN this NRI has been added to, NULL if no matching SGSN could be found
700 */
701struct gbproxy_sgsn *gbproxy_sgsn_by_nri(struct gbproxy_config *cfg, uint16_t nri, bool *null_nri)
702{
703 struct gbproxy_sgsn *sgsn;
704 OSMO_ASSERT(cfg);
705
706 llist_for_each_entry(sgsn, &cfg->sgsns, list) {
Daniel Willmann37518b32021-05-27 18:13:36 +0200707 if (!sgsn->nse->alive)
708 continue;
709
Daniel Willmannee834af2020-12-14 16:22:39 +0100710 if (osmo_nri_v_matches_ranges(nri, sgsn->pool.nri_ranges)) {
711 /* Also check if the NRI we're looking for is a NULL NRI */
Vadim Yanitskiy4dba67a2021-01-05 14:36:52 +0100712 if (null_nri) {
Daniel Willmannee834af2020-12-14 16:22:39 +0100713 if (osmo_nri_v_matches_ranges(nri, cfg->pool.null_nri_ranges))
714 *null_nri = true;
715 else
716 *null_nri = false;
717 }
718 return sgsn;
719 }
720 }
721
722 return NULL;
723}
Daniel Willmannd4ab1f92020-12-21 18:53:55 +0100724
Daniel Willmann37518b32021-05-27 18:13:36 +0200725/*! Select a pseudo-random SGSN for a given TLLI, ignoring any SGSN that is not accepting connections or down
Daniel Willmannd4ab1f92020-12-21 18:53:55 +0100726 * \param[in] cfg The gbproxy configuration
727 * \param[in] sgsn_avoid If not NULL then avoid this SGSN when selecting a new one. Use for load redistribution
728 * \param[in] tlli The tlli to choose an SGSN for. The same tlli will map to the same SGSN as long as no SGSN is
729 * added/removed or allow_attach changes.
730 * \return Returns the sgsn on success, NULL if no SGSN that allows new connections could be found
731 */
732struct gbproxy_sgsn *gbproxy_sgsn_by_tlli(struct gbproxy_config *cfg, struct gbproxy_sgsn *sgsn_avoid,
733 uint32_t tlli)
734{
735 uint32_t i = 0;
736 uint32_t index, num_sgsns;
Daniel Willmannd4ab1f92020-12-21 18:53:55 +0100737 OSMO_ASSERT(cfg);
738
Daniel Willmannb387c1e2020-12-27 18:14:39 +0100739 struct gbproxy_sgsn *sgsn = cfg->pool.nsf_override;
740
741 if (sgsn) {
742 LOGPSGSN(sgsn, LOGL_DEBUG, "Node selection function is overridden by config\n");
743 return sgsn;
744 }
745
Daniel Willmann5193f222021-01-11 05:00:46 +0100746 /* TODO: We should keep track of count in cfg */
Daniel Willmannd4ab1f92020-12-21 18:53:55 +0100747 num_sgsns = llist_count(&cfg->sgsns);
748
749 if (num_sgsns == 0)
750 return NULL;
751
Daniel Willmann5193f222021-01-11 05:00:46 +0100752 /* FIXME: 256 SGSNs ought to be enough for everyone */
Daniel Willmannd4ab1f92020-12-21 18:53:55 +0100753 index = hash_32(tlli, 8) % num_sgsns;
754
Daniel Willmann5193f222021-01-11 05:00:46 +0100755 /* Get the first enabled SGSN after index */
Daniel Willmannd4ab1f92020-12-21 18:53:55 +0100756 llist_for_each_entry(sgsn, &cfg->sgsns, list) {
Daniel Willmann37518b32021-05-27 18:13:36 +0200757 if (i >= index && sgsn->pool.allow_attach && sgsn->nse->alive) {
Daniel Willmannd4ab1f92020-12-21 18:53:55 +0100758 return sgsn;
759 }
760 i++;
761 }
Daniel Willmann5193f222021-01-11 05:00:46 +0100762 /* Start again from the beginning */
Daniel Willmann3c56a2a2021-01-05 15:52:05 +0100763 i = 0;
Daniel Willmannd4ab1f92020-12-21 18:53:55 +0100764 llist_for_each_entry(sgsn, &cfg->sgsns, list) {
Daniel Willmann3c56a2a2021-01-05 15:52:05 +0100765 if (i >= index) {
Daniel Willmannd4ab1f92020-12-21 18:53:55 +0100766 break;
Daniel Willmann37518b32021-05-27 18:13:36 +0200767 } else if (sgsn->pool.allow_attach && sgsn->nse->alive) {
Daniel Willmannd4ab1f92020-12-21 18:53:55 +0100768 return sgsn;
769 }
770 i++;
771 }
772
773 return NULL;
774}
Daniel Willmann37518b32021-05-27 18:13:36 +0200775
776/*! Return the first available gbproxy_sgsn
777 * \param[in] cfg proxy in which we operate
778 * \return The SGSN, NULL if no matching SGSN could be found
779 */
780struct gbproxy_sgsn *gbproxy_sgsn_by_available(struct gbproxy_config *cfg)
781{
782 struct gbproxy_sgsn *sgsn;
783 OSMO_ASSERT(cfg);
784
785 llist_for_each_entry(sgsn, &cfg->sgsns, list)
786 if (sgsn->nse->alive &&sgsn->pool.allow_attach)
787 return sgsn;
788
789 return NULL;
790}