blob: 62c4d94f12c5700f6580e9ae9c65f461b487eb8d [file] [log] [blame]
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +02001/* Gb proxy peer handling */
2
3/* (C) 2010 by Harald Welte <laforge@gnumonks.org>
4 * (C) 2010-2013 by On-Waves
5 * (C) 2013 by Holger Hans Peter Freyther
6 * All Rights Reserved
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Affero General Public License as published by
10 * the Free Software Foundation; either version 3 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU Affero General Public License for more details.
17 *
18 * You should have received a copy of the GNU Affero General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 *
21 */
22
Neels Hofmeyr396f2e62017-09-04 15:13:25 +020023#include <osmocom/sgsn/gb_proxy.h>
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +020024
Neels Hofmeyr396f2e62017-09-04 15:13:25 +020025#include <osmocom/sgsn/debug.h>
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +020026
27#include <osmocom/gprs/protocol/gsm_08_18.h>
28#include <osmocom/core/rate_ctr.h>
Jacob Erlbeck46caed82015-11-02 15:15:38 +010029#include <osmocom/core/stats.h>
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +020030#include <osmocom/core/talloc.h>
Neels Hofmeyree6cfdc2017-07-13 02:03:50 +020031#include <osmocom/gsm/tlv.h>
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +020032
33#include <string.h>
34
Pau Espin Pedrolb1d1c242018-10-30 17:27:59 +010035extern void *tall_sgsn_ctx;
Neels Hofmeyree6cfdc2017-07-13 02:03:50 +020036
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +020037static const struct rate_ctr_desc peer_ctr_description[] = {
38 { "blocked", "BVC Block " },
39 { "unblocked", "BVC Unblock " },
40 { "dropped", "BVC blocked, dropped packet " },
41 { "inv-nsei", "NSEI mismatch " },
42 { "tx-err", "NS Transmission error " },
Pau Espin Pedrolc19817b2018-08-20 19:57:42 +020043 { "raid-mod:bss", "RAID patched (BSS )" },
44 { "raid-mod:sgsn", "RAID patched (SGSN)" },
45 { "apn-mod:sgsn", "APN patched " },
46 { "tlli-mod:bss", "TLLI patched (BSS )" },
47 { "tlli-mod:sgsn", "TLLI patched (SGSN)" },
48 { "ptmsi-mod:bss", "P-TMSI patched (BSS )" },
49 { "ptmsi-mod:sgsn","P-TMSI patched (SGSN)" },
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +020050 { "mod-crypt-err", "Patch error: encrypted " },
51 { "mod-err", "Patch error: other " },
52 { "attach-reqs", "Attach Request count " },
53 { "attach-rejs", "Attach Reject count " },
Holger Hans Peter Freyther98fa3dc2015-11-09 14:30:22 +010054 { "attach-acks", "Attach Accept count " },
55 { "attach-cpls", "Attach Completed count " },
56 { "ra-upd-reqs", "RoutingArea Update Request count" },
57 { "ra-upd-rejs", "RoutingArea Update Reject count " },
58 { "ra-upd-acks", "RoutingArea Update Accept count " },
59 { "ra-upd-cpls", "RoutingArea Update Compltd count" },
60 { "gmm-status", "GMM Status count (BSS)" },
61 { "gmm-status", "GMM Status count (SGSN)" },
62 { "detach-reqs", "Detach Request count " },
63 { "detach-acks", "Detach Accept count " },
64 { "pdp-act-reqs", "PDP Activation Request count " },
65 { "pdp-act-rejs", "PDP Activation Reject count " },
66 { "pdp-act-acks", "PDP Activation Accept count " },
67 { "pdp-deact-reqs","PDP Deactivation Request count " },
68 { "pdp-deact-acks","PDP Deactivation Accept count " },
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +020069 { "tlli-unknown", "TLLI from SGSN unknown " },
70 { "tlli-cache", "TLLI cache size " },
71};
72
Holger Hans Peter Freyther98fa3dc2015-11-09 14:30:22 +010073osmo_static_assert(ARRAY_SIZE(peer_ctr_description) == GBPROX_PEER_CTR_LAST, everything_described);
74
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +020075static const struct rate_ctr_group_desc peer_ctrg_desc = {
Max8a01a802017-12-20 13:10:11 +010076 .group_name_prefix = "gbproxy:peer",
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +020077 .group_description = "GBProxy Peer Statistics",
78 .num_ctr = ARRAY_SIZE(peer_ctr_description),
79 .ctr_desc = peer_ctr_description,
Jacob Erlbeck46caed82015-11-02 15:15:38 +010080 .class_id = OSMO_STATS_CLASS_PEER,
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +020081};
82
83
Daniel Willmann447ad442020-11-26 18:19:21 +010084/* Find the gbproxy_peer by its BVCI. There can only be one match */
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +020085struct gbproxy_peer *gbproxy_peer_by_bvci(struct gbproxy_config *cfg, uint16_t bvci)
86{
Daniel Willmann447ad442020-11-26 18:19:21 +010087 struct gbproxy_nse *nse;
88
89 llist_for_each_entry(nse, &cfg->nse_peers, list) {
90 struct gbproxy_peer *peer;
91 llist_for_each_entry(peer, &nse->bts_peers, list) {
92 if (peer->bvci == bvci)
93 return peer;
94 }
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +020095 }
96 return NULL;
97}
98
Daniel Willmann447ad442020-11-26 18:19:21 +010099/* Find the gbproxy_peer by its NSEI */
100/* FIXME: Only returns the first peer, but we could have multiple on this nsei */
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200101struct gbproxy_peer *gbproxy_peer_by_nsei(struct gbproxy_config *cfg,
102 uint16_t nsei)
103{
Daniel Willmann447ad442020-11-26 18:19:21 +0100104 struct gbproxy_nse *nse;
105 llist_for_each_entry(nse, &cfg->nse_peers, list) {
106 if (nse->nsei == nsei && !llist_empty(&nse->bts_peers))
107 return llist_first_entry(&nse->bts_peers, struct gbproxy_peer, list);
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200108 }
109 return NULL;
110}
111
112/* look-up a peer by its Routeing Area Identification (RAI) */
Harald Welteeb4233e2020-11-24 09:27:38 +0100113/* FIXME: this doesn't make sense, as RA can span multiple peers! */
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200114struct gbproxy_peer *gbproxy_peer_by_rai(struct gbproxy_config *cfg,
115 const uint8_t *ra)
116{
Daniel Willmann447ad442020-11-26 18:19:21 +0100117 struct gbproxy_nse *nse;
118
119 llist_for_each_entry(nse, &cfg->nse_peers, list) {
120 struct gbproxy_peer *peer;
121 llist_for_each_entry(peer, &nse->bts_peers, list) {
122 if (!memcmp(peer->ra, ra, 6))
123 return peer;
124 }
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200125 }
Daniel Willmann447ad442020-11-26 18:19:21 +0100126
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200127 return NULL;
128}
129
130/* look-up a peer by its Location Area Identification (LAI) */
Harald Welteeb4233e2020-11-24 09:27:38 +0100131/* FIXME: this doesn't make sense, as LA can span multiple peers! */
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200132struct gbproxy_peer *gbproxy_peer_by_lai(struct gbproxy_config *cfg,
133 const uint8_t *la)
134{
Daniel Willmann447ad442020-11-26 18:19:21 +0100135 struct gbproxy_nse *nse;
136
137 llist_for_each_entry(nse, &cfg->nse_peers, list) {
138 struct gbproxy_peer *peer;
139 llist_for_each_entry(peer, &nse->bts_peers, list) {
140 if (!memcmp(peer->ra, la, 5))
141 return peer;
142 }
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200143 }
144 return NULL;
145}
146
147/* look-up a peer by its Location Area Code (LAC) */
Harald Welteeb4233e2020-11-24 09:27:38 +0100148/* FIXME: this doesn't make sense, as LAC can span multiple peers! */
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200149struct gbproxy_peer *gbproxy_peer_by_lac(struct gbproxy_config *cfg,
150 const uint8_t *la)
151{
Daniel Willmann447ad442020-11-26 18:19:21 +0100152 struct gbproxy_nse *nse;
153
154 llist_for_each_entry(nse, &cfg->nse_peers, list) {
155 struct gbproxy_peer *peer;
156 llist_for_each_entry(peer, &nse->bts_peers, list) {
157 if (!memcmp(peer->ra + 3, la + 3, 2))
158 return peer;
159 }
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200160 }
161 return NULL;
162}
163
164struct gbproxy_peer *gbproxy_peer_by_bssgp_tlv(struct gbproxy_config *cfg,
165 struct tlv_parsed *tp)
166{
167 if (TLVP_PRESENT(tp, BSSGP_IE_BVCI)) {
168 uint16_t bvci;
169
170 bvci = ntohs(tlvp_val16_unal(tp, BSSGP_IE_BVCI));
171 if (bvci >= 2)
172 return gbproxy_peer_by_bvci(cfg, bvci);
173 }
174
Harald Welteeb4233e2020-11-24 09:27:38 +0100175 /* FIXME: this doesn't make sense, as RA can span multiple peers! */
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200176 if (TLVP_PRESENT(tp, BSSGP_IE_ROUTEING_AREA)) {
177 uint8_t *rai = (uint8_t *)TLVP_VAL(tp, BSSGP_IE_ROUTEING_AREA);
178 /* Only compare LAC part, since MCC/MNC are possibly patched.
179 * Since the LAC of different BSS must be different when
180 * MCC/MNC are patched, collisions shouldn't happen. */
181 return gbproxy_peer_by_lac(cfg, rai);
182 }
183
Harald Welteeb4233e2020-11-24 09:27:38 +0100184 /* FIXME: this doesn't make sense, as LA can span multiple peers! */
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200185 if (TLVP_PRESENT(tp, BSSGP_IE_LOCATION_AREA)) {
186 uint8_t *lai = (uint8_t *)TLVP_VAL(tp, BSSGP_IE_LOCATION_AREA);
187 return gbproxy_peer_by_lac(cfg, lai);
188 }
189
190 return NULL;
191}
192
Pau Espin Pedrol82f13612018-08-17 13:13:27 +0200193static void clean_stale_timer_cb(void *data)
194{
195 time_t now;
196 struct timespec ts = {0,};
197 struct gbproxy_peer *peer = (struct gbproxy_peer *) data;
Daniel Willmann447ad442020-11-26 18:19:21 +0100198 OSMO_ASSERT(peer);
199 OSMO_ASSERT(peer->nse);
200 struct gbproxy_config *cfg = peer->nse->cfg;
201 OSMO_ASSERT(cfg);
Pau Espin Pedrol82f13612018-08-17 13:13:27 +0200202
203 osmo_clock_gettime(CLOCK_MONOTONIC, &ts);
204 now = ts.tv_sec;
205 gbproxy_remove_stale_link_infos(peer, now);
Daniel Willmann447ad442020-11-26 18:19:21 +0100206 if (cfg->clean_stale_timer_freq != 0)
Pau Espin Pedrol82f13612018-08-17 13:13:27 +0200207 osmo_timer_schedule(&peer->clean_stale_timer,
Daniel Willmann447ad442020-11-26 18:19:21 +0100208 cfg->clean_stale_timer_freq, 0);
Pau Espin Pedrol82f13612018-08-17 13:13:27 +0200209}
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200210
Daniel Willmann447ad442020-11-26 18:19:21 +0100211struct gbproxy_peer *gbproxy_peer_alloc(struct gbproxy_nse *nse, uint16_t bvci)
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200212{
213 struct gbproxy_peer *peer;
Daniel Willmann447ad442020-11-26 18:19:21 +0100214 OSMO_ASSERT(nse);
215 struct gbproxy_config *cfg = nse->cfg;
216 OSMO_ASSERT(cfg);
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200217
Pau Espin Pedrolb1d1c242018-10-30 17:27:59 +0100218 peer = talloc_zero(tall_sgsn_ctx, struct gbproxy_peer);
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200219 if (!peer)
220 return NULL;
221
222 peer->bvci = bvci;
223 peer->ctrg = rate_ctr_group_alloc(peer, &peer_ctrg_desc, bvci);
Harald Welte26c14652017-07-12 00:25:51 +0200224 if (!peer->ctrg) {
225 talloc_free(peer);
226 return NULL;
227 }
Daniel Willmann447ad442020-11-26 18:19:21 +0100228 peer->nse = nse;
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200229
Daniel Willmann447ad442020-11-26 18:19:21 +0100230 llist_add(&peer->list, &nse->bts_peers);
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200231
Jacob Erlbeckf8562e32014-09-19 16:03:07 +0200232 INIT_LLIST_HEAD(&peer->patch_state.logical_links);
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200233
Pau Espin Pedrol82f13612018-08-17 13:13:27 +0200234 osmo_timer_setup(&peer->clean_stale_timer, clean_stale_timer_cb, peer);
Daniel Willmann447ad442020-11-26 18:19:21 +0100235 if (cfg->clean_stale_timer_freq != 0)
Pau Espin Pedrol82f13612018-08-17 13:13:27 +0200236 osmo_timer_schedule(&peer->clean_stale_timer,
Daniel Willmann447ad442020-11-26 18:19:21 +0100237 cfg->clean_stale_timer_freq, 0);
Pau Espin Pedrol82f13612018-08-17 13:13:27 +0200238
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200239 return peer;
240}
241
242void gbproxy_peer_free(struct gbproxy_peer *peer)
243{
Daniel Willmann447ad442020-11-26 18:19:21 +0100244 OSMO_ASSERT(peer);
245
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200246 llist_del(&peer->list);
Pau Espin Pedrol82f13612018-08-17 13:13:27 +0200247 osmo_timer_del(&peer->clean_stale_timer);
Jacob Erlbeck91d2f8a2014-09-19 15:07:27 +0200248 gbproxy_delete_link_infos(peer);
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200249
250 rate_ctr_group_free(peer->ctrg);
251 peer->ctrg = NULL;
252
253 talloc_free(peer);
254}
255
Daniel Willmann9e583c82020-11-30 17:14:24 +0100256void gbproxy_peer_move(struct gbproxy_peer *peer, struct gbproxy_nse *nse)
257{
258 llist_del(&peer->list);
259 llist_add(&peer->list, &nse->bts_peers);
260 peer->nse = nse;
261}
262
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200263int gbproxy_cleanup_peers(struct gbproxy_config *cfg, uint16_t nsei, uint16_t bvci)
264{
265 int counter = 0;
Daniel Willmann447ad442020-11-26 18:19:21 +0100266 struct gbproxy_nse *nse, *ntmp;
267 OSMO_ASSERT(cfg);
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200268
Daniel Willmann447ad442020-11-26 18:19:21 +0100269 llist_for_each_entry_safe(nse, ntmp, &cfg->nse_peers, list) {
270 struct gbproxy_peer *peer, *tmp;
271 if (nse->nsei != nsei)
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200272 continue;
Daniel Willmann447ad442020-11-26 18:19:21 +0100273 llist_for_each_entry_safe(peer, tmp, &nse->bts_peers, list) {
274 if (bvci && peer->bvci != bvci)
275 continue;
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200276
Daniel Willmann447ad442020-11-26 18:19:21 +0100277 gbproxy_peer_free(peer);
278 counter += 1;
279 }
Jacob Erlbeck5f1faa32014-08-21 10:01:30 +0200280 }
281
282 return counter;
283}
Daniel Willmann447ad442020-11-26 18:19:21 +0100284
285struct gbproxy_nse *gbproxy_nse_alloc(struct gbproxy_config *cfg, uint16_t nsei)
286{
287 struct gbproxy_nse *nse;
288 OSMO_ASSERT(cfg);
289
290 nse = talloc_zero(tall_sgsn_ctx, struct gbproxy_nse);
291 if (!nse)
292 return NULL;
293
294 nse->nsei = nsei;
295 nse->cfg = cfg;
296
297 llist_add(&nse->list, &cfg->nse_peers);
298
299 INIT_LLIST_HEAD(&nse->bts_peers);
300
301 return nse;
302}
303
304void gbproxy_nse_free(struct gbproxy_nse *nse)
305{
306 struct gbproxy_peer *peer, *tmp;
307 OSMO_ASSERT(nse);
308
309 llist_del(&nse->list);
310
311 llist_for_each_entry_safe(peer, tmp, &nse->bts_peers, list)
312 gbproxy_peer_free(peer);
313
314 talloc_free(nse);
315}
316
317struct gbproxy_nse *gbproxy_nse_by_nsei(struct gbproxy_config *cfg, uint16_t nsei)
318{
319 struct gbproxy_nse *nse;
320 OSMO_ASSERT(cfg);
321
322 llist_for_each_entry(nse, &cfg->nse_peers, list) {
323 if (nse->nsei == nsei)
324 return nse;
325 }
326
327 return NULL;
328}
329
330struct gbproxy_nse *gbproxy_nse_by_nsei_or_new(struct gbproxy_config *cfg, uint16_t nsei)
331{
332 struct gbproxy_nse *nse;
333 OSMO_ASSERT(cfg);
334
335 nse = gbproxy_nse_by_nsei(cfg, nsei);
336 if (!nse)
337 nse = gbproxy_nse_alloc(cfg, nsei);
338
339 return nse;
340}