blob: 918cf425e0bdc50b37184aec8fc975d89a656659 [file] [log] [blame]
Harald Welte5df0be62019-04-17 20:54:29 +02001/*
2 Samba Unix SMB/CIFS implementation.
3
4 Samba trivial allocation library - new interface
5
6 NOTE: Please read talloc_guide.txt for full documentation
7
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
10
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
13 ** under the LGPL
14
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
19
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
24
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27*/
28
29/*
30 inspired by http://swapped.cc/halloc/
31*/
32
Eric Wild8200fcc2019-11-27 18:01:44 +010033#include <parts.h>
34#include <assert.h>
35#include <osmocom/core/utils.h>
36
Harald Welte5df0be62019-04-17 20:54:29 +020037#include "replace.h"
38#include "talloc.h"
39
40#ifdef HAVE_SYS_AUXV_H
41#include <sys/auxv.h>
42#endif
43
44#if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
45#error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
46#endif
47
48#if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
49#error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
50#endif
51
52/* Special macros that are no-ops except when run under Valgrind on
53 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
54#ifdef HAVE_VALGRIND_MEMCHECK_H
55 /* memcheck.h includes valgrind.h */
56#include <valgrind/memcheck.h>
57#elif defined(HAVE_VALGRIND_H)
58#include <valgrind.h>
59#endif
60
61/* use this to force every realloc to change the pointer, to stress test
62 code that might not cope */
63#define ALWAYS_REALLOC 0
64
65
66#define MAX_TALLOC_SIZE 0x10000000
67
68#define TALLOC_FLAG_FREE 0x01
69#define TALLOC_FLAG_LOOP 0x02
70#define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
71#define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
72
73/*
74 * Bits above this are random, used to make it harder to fake talloc
75 * headers during an attack. Try not to change this without good reason.
76 */
77#define TALLOC_FLAG_MASK 0x0F
78
79#define TALLOC_MAGIC_REFERENCE ((const char *)1)
80
81#define TALLOC_MAGIC_BASE 0xe814ec70
82#define TALLOC_MAGIC_NON_RANDOM ( \
83 ~TALLOC_FLAG_MASK & ( \
84 TALLOC_MAGIC_BASE + \
85 (TALLOC_BUILD_VERSION_MAJOR << 24) + \
86 (TALLOC_BUILD_VERSION_MINOR << 16) + \
87 (TALLOC_BUILD_VERSION_RELEASE << 8)))
88static unsigned int talloc_magic = TALLOC_MAGIC_NON_RANDOM;
89
90/* by default we abort when given a bad pointer (such as when talloc_free() is called
91 on a pointer that came from malloc() */
92#ifndef TALLOC_ABORT
93#define TALLOC_ABORT(reason) abort()
94#endif
95
96#ifndef discard_const_p
97#if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
98# define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
99#else
100# define discard_const_p(type, ptr) ((type *)(ptr))
101#endif
102#endif
103
104/* these macros gain us a few percent of speed on gcc */
105#if (__GNUC__ >= 3)
106/* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
107 as its first argument */
108#ifndef likely
109#define likely(x) __builtin_expect(!!(x), 1)
110#endif
111#ifndef unlikely
112#define unlikely(x) __builtin_expect(!!(x), 0)
113#endif
114#else
115#ifndef likely
116#define likely(x) (x)
117#endif
118#ifndef unlikely
119#define unlikely(x) (x)
120#endif
121#endif
122
123/* this null_context is only used if talloc_enable_leak_report() or
124 talloc_enable_leak_report_full() is called, otherwise it remains
125 NULL
126*/
127static void *null_context;
128static bool talloc_report_null;
129static bool talloc_report_null_full;
130static void *autofree_context;
131
132static void talloc_setup_atexit(void);
133
134/* used to enable fill of memory on free, which can be useful for
135 * catching use after free errors when valgrind is too slow
136 */
137static struct {
138 bool initialised;
139 bool enabled;
140 uint8_t fill_value;
141} talloc_fill;
142
143#define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
144
145/*
146 * do not wipe the header, to allow the
147 * double-free logic to still work
148 */
149#define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
150 if (unlikely(talloc_fill.enabled)) { \
151 size_t _flen = (_tc)->size; \
152 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
153 memset(_fptr, talloc_fill.fill_value, _flen); \
154 } \
155} while (0)
156
157#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
158/* Mark the whole chunk as not accessable */
159#define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
160 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
161 char *_fptr = (char *)(_tc); \
162 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
163} while(0)
164#else
165#define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
166#endif
167
168#define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
169 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
170 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
171} while (0)
172
173#define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
174 if (unlikely(talloc_fill.enabled)) { \
175 size_t _flen = (_tc)->size - (_new_size); \
176 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
177 _fptr += (_new_size); \
178 memset(_fptr, talloc_fill.fill_value, _flen); \
179 } \
180} while (0)
181
182#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
183/* Mark the unused bytes not accessable */
184#define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
185 size_t _flen = (_tc)->size - (_new_size); \
186 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
187 _fptr += (_new_size); \
188 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
189} while (0)
190#else
191#define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
192#endif
193
194#define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
195 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
196 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
197} while (0)
198
199#define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
200 if (unlikely(talloc_fill.enabled)) { \
201 size_t _flen = (_tc)->size - (_new_size); \
202 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
203 _fptr += (_new_size); \
204 memset(_fptr, talloc_fill.fill_value, _flen); \
205 } \
206} while (0)
207
208#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
209/* Mark the unused bytes as undefined */
210#define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
211 size_t _flen = (_tc)->size - (_new_size); \
212 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
213 _fptr += (_new_size); \
214 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
215} while (0)
216#else
217#define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
218#endif
219
220#define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
221 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
222 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
223} while (0)
224
225#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
226/* Mark the new bytes as undefined */
227#define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
228 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
229 size_t _new_used = TC_HDR_SIZE + (_new_size); \
230 size_t _flen = _new_used - _old_used; \
231 char *_fptr = _old_used + (char *)(_tc); \
232 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
233} while (0)
234#else
235#define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
236#endif
237
238#define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
239 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
240} while (0)
241
242struct talloc_reference_handle {
243 struct talloc_reference_handle *next, *prev;
244 void *ptr;
245 const char *location;
246};
247
248struct talloc_memlimit {
249 struct talloc_chunk *parent;
250 struct talloc_memlimit *upper;
251 size_t max_size;
252 size_t cur_size;
253};
254
255static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
256static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
257 size_t size);
258static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
259 size_t size);
260static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
261
262static inline void _tc_set_name_const(struct talloc_chunk *tc,
263 const char *name);
264static struct talloc_chunk *_vasprintf_tc(const void *t,
265 const char *fmt,
266 va_list ap);
267
268typedef int (*talloc_destructor_t)(void *);
269
270struct talloc_pool_hdr;
271
272struct talloc_chunk {
273 /*
274 * flags includes the talloc magic, which is randomised to
275 * make overwrite attacks harder
276 */
277 unsigned flags;
278
279 /*
280 * If you have a logical tree like:
281 *
282 * <parent>
283 * / | \
284 * / | \
285 * / | \
286 * <child 1> <child 2> <child 3>
287 *
288 * The actual talloc tree is:
289 *
290 * <parent>
291 * |
292 * <child 1> - <child 2> - <child 3>
293 *
294 * The children are linked with next/prev pointers, and
295 * child 1 is linked to the parent with parent/child
296 * pointers.
297 */
298
299 struct talloc_chunk *next, *prev;
300 struct talloc_chunk *parent, *child;
301 struct talloc_reference_handle *refs;
302 talloc_destructor_t destructor;
303 const char *name;
304 size_t size;
305
306 /*
307 * limit semantics:
308 * if 'limit' is set it means all *new* children of the context will
309 * be limited to a total aggregate size ox max_size for memory
310 * allocations.
311 * cur_size is used to keep track of the current use
312 */
313 struct talloc_memlimit *limit;
314
315 /*
316 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
317 * is a pointer to the struct talloc_chunk of the pool that it was
318 * allocated from. This way children can quickly find the pool to chew
319 * from.
320 */
321 struct talloc_pool_hdr *pool;
322};
323
324/* 16 byte alignment seems to keep everyone happy */
325#define TC_ALIGN16(s) (((s)+15)&~15)
326#define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
327#define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
328
329_PUBLIC_ int talloc_version_major(void)
330{
331 return TALLOC_VERSION_MAJOR;
332}
333
334_PUBLIC_ int talloc_version_minor(void)
335{
336 return TALLOC_VERSION_MINOR;
337}
338
339_PUBLIC_ int talloc_test_get_magic(void)
340{
341 return talloc_magic;
342}
343
344static inline void _talloc_chunk_set_free(struct talloc_chunk *tc,
345 const char *location)
346{
347 /*
348 * Mark this memory as free, and also over-stamp the talloc
349 * magic with the old-style magic.
350 *
351 * Why? This tries to avoid a memory read use-after-free from
352 * disclosing our talloc magic, which would then allow an
353 * attacker to prepare a valid header and so run a destructor.
354 *
355 */
356 tc->flags = TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE
357 | (tc->flags & TALLOC_FLAG_MASK);
358
359 /* we mark the freed memory with where we called the free
360 * from. This means on a double free error we can report where
361 * the first free came from
362 */
363 if (location) {
364 tc->name = location;
365 }
366}
367
368static inline void _talloc_chunk_set_not_free(struct talloc_chunk *tc)
369{
370 /*
371 * Mark this memory as not free.
372 *
373 * Why? This is memory either in a pool (and so available for
374 * talloc's re-use or after the realloc(). We need to mark
375 * the memory as free() before any realloc() call as we can't
376 * write to the memory after that.
377 *
378 * We put back the normal magic instead of the 'not random'
379 * magic.
380 */
381
382 tc->flags = talloc_magic |
383 ((tc->flags & TALLOC_FLAG_MASK) & ~TALLOC_FLAG_FREE);
384}
385
386static void (*talloc_log_fn)(const char *message);
387
388_PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
389{
390 talloc_log_fn = log_fn;
391}
392
393#ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
394void talloc_lib_init(void) __attribute__((constructor));
395void talloc_lib_init(void)
396{
397 uint32_t random_value;
398#if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
399 uint8_t *p;
400 /*
401 * Use the kernel-provided random values used for
402 * ASLR. This won't change per-exec, which is ideal for us
403 */
404 p = (uint8_t *) getauxval(AT_RANDOM);
405 if (p) {
406 /*
407 * We get 16 bytes from getauxval. By calling rand(),
408 * a totally insecure PRNG, but one that will
409 * deterministically have a different value when called
410 * twice, we ensure that if two talloc-like libraries
411 * are somehow loaded in the same address space, that
412 * because we choose different bytes, we will keep the
413 * protection against collision of multiple talloc
414 * libs.
415 *
416 * This protection is important because the effects of
417 * passing a talloc pointer from one to the other may
418 * be very hard to determine.
419 */
420 int offset = rand() % (16 - sizeof(random_value));
421 memcpy(&random_value, p + offset, sizeof(random_value));
422 } else
423#endif
424 {
425 /*
426 * Otherwise, hope the location we are loaded in
427 * memory is randomised by someone else
428 */
429 random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
430 }
431 talloc_magic = random_value & ~TALLOC_FLAG_MASK;
432}
433#else
434#warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
435#endif
436
437static void talloc_lib_atexit(void)
438{
439 TALLOC_FREE(autofree_context);
440
441 if (talloc_total_size(null_context) == 0) {
442 return;
443 }
444
445 if (talloc_report_null_full) {
446 talloc_report_full(null_context, stderr);
447 } else if (talloc_report_null) {
448 talloc_report(null_context, stderr);
449 }
450}
451
452static void talloc_setup_atexit(void)
453{
454 static bool done;
455
456 if (done) {
457 return;
458 }
459
460 atexit(talloc_lib_atexit);
461 done = true;
462}
463
464static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
465static void talloc_log(const char *fmt, ...)
466{
467 va_list ap;
468 char *message;
469
470 if (!talloc_log_fn) {
471 return;
472 }
473
474 va_start(ap, fmt);
475 message = talloc_vasprintf(NULL, fmt, ap);
476 va_end(ap);
477
478 talloc_log_fn(message);
479 talloc_free(message);
480}
481
482static void talloc_log_stderr(const char *message)
483{
484 fprintf(stderr, "%s", message);
485}
486
487_PUBLIC_ void talloc_set_log_stderr(void)
488{
489 talloc_set_log_fn(talloc_log_stderr);
490}
491
492static void (*talloc_abort_fn)(const char *reason);
493
494_PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
495{
496 talloc_abort_fn = abort_fn;
497}
498
499static void talloc_abort(const char *reason)
500{
Harald Welte189f43d2019-04-17 21:19:04 +0200501 talloc_log("%s\r\n", reason);
Harald Welte5df0be62019-04-17 20:54:29 +0200502
503 if (!talloc_abort_fn) {
504 TALLOC_ABORT(reason);
505 }
506
507 talloc_abort_fn(reason);
508}
509
510static void talloc_abort_access_after_free(void)
511{
512 talloc_abort("Bad talloc magic value - access after free");
513}
514
515static void talloc_abort_unknown_value(void)
516{
517 talloc_abort("Bad talloc magic value - unknown value");
518}
519
520/* panic if we get a bad magic value */
521static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
522{
523 const char *pp = (const char *)ptr;
524 struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
525 if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
526 if ((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK))
527 == (TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE)) {
Harald Welte189f43d2019-04-17 21:19:04 +0200528 talloc_log("talloc: access after free error - first free may be at %s\r\n", tc->name);
Harald Welte5df0be62019-04-17 20:54:29 +0200529 talloc_abort_access_after_free();
530 return NULL;
531 }
532
533 talloc_abort_unknown_value();
534 return NULL;
535 }
536 return tc;
537}
538
539/* hook into the front of the list */
540#define _TLIST_ADD(list, p) \
541do { \
542 if (!(list)) { \
543 (list) = (p); \
544 (p)->next = (p)->prev = NULL; \
545 } else { \
546 (list)->prev = (p); \
547 (p)->next = (list); \
548 (p)->prev = NULL; \
549 (list) = (p); \
550 }\
551} while (0)
552
553/* remove an element from a list - element doesn't have to be in list. */
554#define _TLIST_REMOVE(list, p) \
555do { \
556 if ((p) == (list)) { \
557 (list) = (p)->next; \
558 if (list) (list)->prev = NULL; \
559 } else { \
560 if ((p)->prev) (p)->prev->next = (p)->next; \
561 if ((p)->next) (p)->next->prev = (p)->prev; \
562 } \
563 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
564} while (0)
565
566
567/*
568 return the parent chunk of a pointer
569*/
570static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
571{
572 struct talloc_chunk *tc;
573
574 if (unlikely(ptr == NULL)) {
575 return NULL;
576 }
577
578 tc = talloc_chunk_from_ptr(ptr);
579 while (tc->prev) tc=tc->prev;
580
581 return tc->parent;
582}
583
584_PUBLIC_ void *talloc_parent(const void *ptr)
585{
586 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
587 return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
588}
589
590/*
591 find parents name
592*/
593_PUBLIC_ const char *talloc_parent_name(const void *ptr)
594{
595 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
596 return tc? tc->name : NULL;
597}
598
599/*
600 A pool carries an in-pool object count count in the first 16 bytes.
601 bytes. This is done to support talloc_steal() to a parent outside of the
602 pool. The count includes the pool itself, so a talloc_free() on a pool will
603 only destroy the pool if the count has dropped to zero. A talloc_free() of a
604 pool member will reduce the count, and eventually also call free(3) on the
605 pool memory.
606
607 The object count is not put into "struct talloc_chunk" because it is only
608 relevant for talloc pools and the alignment to 16 bytes would increase the
609 memory footprint of each talloc chunk by those 16 bytes.
610*/
611
612struct talloc_pool_hdr {
613 void *end;
614 unsigned int object_count;
615 size_t poolsize;
616};
617
618#define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
619
620static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
621{
622 return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE);
623}
624
625static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
626{
627 return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE);
628}
629
630static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
631{
632 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
633 return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
634}
635
636static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
637{
638 return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
639}
640
641/* If tc is inside a pool, this gives the next neighbour. */
642static inline void *tc_next_chunk(struct talloc_chunk *tc)
643{
644 return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
645}
646
647static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
648{
649 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
650 return tc_next_chunk(tc);
651}
652
653/* Mark the whole remaining pool as not accessable */
654static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
655{
656 size_t flen = tc_pool_space_left(pool_hdr);
657
658 if (unlikely(talloc_fill.enabled)) {
659 memset(pool_hdr->end, talloc_fill.fill_value, flen);
660 }
661
662#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
663 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
664#endif
665}
666
667/*
668 Allocate from a pool
669*/
670
671static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
672 size_t size, size_t prefix_len)
673{
674 struct talloc_pool_hdr *pool_hdr = NULL;
675 size_t space_left;
676 struct talloc_chunk *result;
677 size_t chunk_size;
678
679 if (parent == NULL) {
680 return NULL;
681 }
682
683 if (parent->flags & TALLOC_FLAG_POOL) {
684 pool_hdr = talloc_pool_from_chunk(parent);
685 }
686 else if (parent->flags & TALLOC_FLAG_POOLMEM) {
687 pool_hdr = parent->pool;
688 }
689
690 if (pool_hdr == NULL) {
691 return NULL;
692 }
693
694 space_left = tc_pool_space_left(pool_hdr);
695
696 /*
697 * Align size to 16 bytes
698 */
699 chunk_size = TC_ALIGN16(size + prefix_len);
700
701 if (space_left < chunk_size) {
702 return NULL;
703 }
704
705 result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len);
706
707#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
708 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
709#endif
710
711 pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
712
713 result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
714 result->pool = pool_hdr;
715
716 pool_hdr->object_count++;
717
718 return result;
719}
720
721/*
722 Allocate a bit of memory as a child of an existing pointer
723*/
724static inline void *__talloc_with_prefix(const void *context,
725 size_t size,
726 size_t prefix_len,
727 struct talloc_chunk **tc_ret)
728{
729 struct talloc_chunk *tc = NULL;
730 struct talloc_memlimit *limit = NULL;
731 size_t total_len = TC_HDR_SIZE + size + prefix_len;
732 struct talloc_chunk *parent = NULL;
733
Eric Wild8200fcc2019-11-27 18:01:44 +0100734 // do not allocate while handling interrupts!
735 OSMO_ASSERT( !(SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) )
736
Harald Welte5df0be62019-04-17 20:54:29 +0200737 if (unlikely(context == NULL)) {
738 context = null_context;
739 }
740
741 if (unlikely(size >= MAX_TALLOC_SIZE)) {
742 return NULL;
743 }
744
745 if (unlikely(total_len < TC_HDR_SIZE)) {
746 return NULL;
747 }
748
749 if (likely(context != NULL)) {
750 parent = talloc_chunk_from_ptr(context);
751
752 if (parent->limit != NULL) {
753 limit = parent->limit;
754 }
755
756 tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
757 }
758
759 if (tc == NULL) {
760 char *ptr;
761
762 /*
763 * Only do the memlimit check/update on actual allocation.
764 */
765 if (!talloc_memlimit_check(limit, total_len)) {
766 errno = ENOMEM;
767 return NULL;
768 }
769
770 ptr = malloc(total_len);
771 if (unlikely(ptr == NULL)) {
772 return NULL;
773 }
774 tc = (struct talloc_chunk *)(ptr + prefix_len);
775 tc->flags = talloc_magic;
776 tc->pool = NULL;
777
778 talloc_memlimit_grow(limit, total_len);
779 }
780
781 tc->limit = limit;
782 tc->size = size;
783 tc->destructor = NULL;
784 tc->child = NULL;
785 tc->name = NULL;
786 tc->refs = NULL;
787
788 if (likely(context != NULL)) {
789 if (parent->child) {
790 parent->child->parent = NULL;
791 tc->next = parent->child;
792 tc->next->prev = tc;
793 } else {
794 tc->next = NULL;
795 }
796 tc->parent = parent;
797 tc->prev = NULL;
798 parent->child = tc;
799 } else {
800 tc->next = tc->prev = tc->parent = NULL;
801 }
802
803 *tc_ret = tc;
804 return TC_PTR_FROM_CHUNK(tc);
805}
806
807static inline void *__talloc(const void *context,
808 size_t size,
809 struct talloc_chunk **tc)
810{
811 return __talloc_with_prefix(context, size, 0, tc);
812}
813
814/*
815 * Create a talloc pool
816 */
817
818static inline void *_talloc_pool(const void *context, size_t size)
819{
820 struct talloc_chunk *tc;
821 struct talloc_pool_hdr *pool_hdr;
822 void *result;
823
824 result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
825
826 if (unlikely(result == NULL)) {
827 return NULL;
828 }
829
830 pool_hdr = talloc_pool_from_chunk(tc);
831
832 tc->flags |= TALLOC_FLAG_POOL;
833 tc->size = 0;
834
835 pool_hdr->object_count = 1;
836 pool_hdr->end = result;
837 pool_hdr->poolsize = size;
838
839 tc_invalidate_pool(pool_hdr);
840
841 return result;
842}
843
844_PUBLIC_ void *talloc_pool(const void *context, size_t size)
845{
846 return _talloc_pool(context, size);
847}
848
849/*
850 * Create a talloc pool correctly sized for a basic size plus
851 * a number of subobjects whose total size is given. Essentially
852 * a custom allocator for talloc to reduce fragmentation.
853 */
854
855_PUBLIC_ void *_talloc_pooled_object(const void *ctx,
856 size_t type_size,
857 const char *type_name,
858 unsigned num_subobjects,
859 size_t total_subobjects_size)
860{
861 size_t poolsize, subobjects_slack, tmp;
862 struct talloc_chunk *tc;
863 struct talloc_pool_hdr *pool_hdr;
864 void *ret;
865
866 poolsize = type_size + total_subobjects_size;
867
868 if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
869 goto overflow;
870 }
871
872 if (num_subobjects == UINT_MAX) {
873 goto overflow;
874 }
875 num_subobjects += 1; /* the object body itself */
876
877 /*
878 * Alignment can increase the pool size by at most 15 bytes per object
879 * plus alignment for the object itself
880 */
881 subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
882 if (subobjects_slack < num_subobjects) {
883 goto overflow;
884 }
885
886 tmp = poolsize + subobjects_slack;
887 if ((tmp < poolsize) || (tmp < subobjects_slack)) {
888 goto overflow;
889 }
890 poolsize = tmp;
891
892 ret = _talloc_pool(ctx, poolsize);
893 if (ret == NULL) {
894 return NULL;
895 }
896
897 tc = talloc_chunk_from_ptr(ret);
898 tc->size = type_size;
899
900 pool_hdr = talloc_pool_from_chunk(tc);
901
902#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
903 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
904#endif
905
906 pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
907
908 _tc_set_name_const(tc, type_name);
909 return ret;
910
911overflow:
912 return NULL;
913}
914
915/*
916 setup a destructor to be called on free of a pointer
917 the destructor should return 0 on success, or -1 on failure.
918 if the destructor fails then the free is failed, and the memory can
919 be continued to be used
920*/
921_PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
922{
923 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
924 tc->destructor = destructor;
925}
926
927/*
928 increase the reference count on a piece of memory.
929*/
930_PUBLIC_ int talloc_increase_ref_count(const void *ptr)
931{
932 if (unlikely(!talloc_reference(null_context, ptr))) {
933 return -1;
934 }
935 return 0;
936}
937
938/*
939 helper for talloc_reference()
940
941 this is referenced by a function pointer and should not be inline
942*/
943static int talloc_reference_destructor(struct talloc_reference_handle *handle)
944{
945 struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
946 _TLIST_REMOVE(ptr_tc->refs, handle);
947 return 0;
948}
949
950/*
951 more efficient way to add a name to a pointer - the name must point to a
952 true string constant
953*/
954static inline void _tc_set_name_const(struct talloc_chunk *tc,
955 const char *name)
956{
957 tc->name = name;
958}
959
960/*
961 internal talloc_named_const()
962*/
963static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
964{
965 void *ptr;
966 struct talloc_chunk *tc;
967
968 ptr = __talloc(context, size, &tc);
969 if (unlikely(ptr == NULL)) {
970 return NULL;
971 }
972
973 _tc_set_name_const(tc, name);
974
975 return ptr;
976}
977
978/*
979 make a secondary reference to a pointer, hanging off the given context.
980 the pointer remains valid until both the original caller and this given
981 context are freed.
982
983 the major use for this is when two different structures need to reference the
984 same underlying data, and you want to be able to free the two instances separately,
985 and in either order
986*/
987_PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
988{
989 struct talloc_chunk *tc;
990 struct talloc_reference_handle *handle;
991 if (unlikely(ptr == NULL)) return NULL;
992
993 tc = talloc_chunk_from_ptr(ptr);
994 handle = (struct talloc_reference_handle *)_talloc_named_const(context,
995 sizeof(struct talloc_reference_handle),
996 TALLOC_MAGIC_REFERENCE);
997 if (unlikely(handle == NULL)) return NULL;
998
999 /* note that we hang the destructor off the handle, not the
1000 main context as that allows the caller to still setup their
1001 own destructor on the context if they want to */
1002 talloc_set_destructor(handle, talloc_reference_destructor);
1003 handle->ptr = discard_const_p(void, ptr);
1004 handle->location = location;
1005 _TLIST_ADD(tc->refs, handle);
1006 return handle->ptr;
1007}
1008
1009static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
1010
1011static inline void _tc_free_poolmem(struct talloc_chunk *tc,
1012 const char *location)
1013{
1014 struct talloc_pool_hdr *pool;
1015 struct talloc_chunk *pool_tc;
1016 void *next_tc;
1017
1018 pool = tc->pool;
1019 pool_tc = talloc_chunk_from_pool(pool);
1020 next_tc = tc_next_chunk(tc);
1021
1022 _talloc_chunk_set_free(tc, location);
1023
1024 TC_INVALIDATE_FULL_CHUNK(tc);
1025
1026 if (unlikely(pool->object_count == 0)) {
1027 talloc_abort("Pool object count zero!");
1028 return;
1029 }
1030
1031 pool->object_count--;
1032
1033 if (unlikely(pool->object_count == 1
1034 && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
1035 /*
1036 * if there is just one object left in the pool
1037 * and pool->flags does not have TALLOC_FLAG_FREE,
1038 * it means this is the pool itself and
1039 * the rest is available for new objects
1040 * again.
1041 */
1042 pool->end = tc_pool_first_chunk(pool);
1043 tc_invalidate_pool(pool);
1044 return;
1045 }
1046
1047 if (unlikely(pool->object_count == 0)) {
1048 /*
1049 * we mark the freed memory with where we called the free
1050 * from. This means on a double free error we can report where
1051 * the first free came from
1052 */
1053 pool_tc->name = location;
1054
1055 if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
1056 _tc_free_poolmem(pool_tc, location);
1057 } else {
1058 /*
1059 * The tc_memlimit_update_on_free()
1060 * call takes into account the
1061 * prefix TP_HDR_SIZE allocated before
1062 * the pool talloc_chunk.
1063 */
1064 tc_memlimit_update_on_free(pool_tc);
1065 TC_INVALIDATE_FULL_CHUNK(pool_tc);
1066 free(pool);
1067 }
1068 return;
1069 }
1070
1071 if (pool->end == next_tc) {
1072 /*
1073 * if pool->pool still points to end of
1074 * 'tc' (which is stored in the 'next_tc' variable),
1075 * we can reclaim the memory of 'tc'.
1076 */
1077 pool->end = tc;
1078 return;
1079 }
1080
1081 /*
1082 * Do nothing. The memory is just "wasted", waiting for the pool
1083 * itself to be freed.
1084 */
1085}
1086
1087static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1088 void *ptr,
1089 const char *location);
1090
1091static inline int _talloc_free_internal(void *ptr, const char *location);
1092
1093/*
1094 internal free call that takes a struct talloc_chunk *.
1095*/
1096static inline int _tc_free_internal(struct talloc_chunk *tc,
1097 const char *location)
1098{
1099 void *ptr_to_free;
1100 void *ptr = TC_PTR_FROM_CHUNK(tc);
1101
1102 if (unlikely(tc->refs)) {
1103 int is_child;
1104 /* check if this is a reference from a child or
1105 * grandchild back to it's parent or grandparent
1106 *
1107 * in that case we need to remove the reference and
1108 * call another instance of talloc_free() on the current
1109 * pointer.
1110 */
1111 is_child = talloc_is_parent(tc->refs, ptr);
1112 _talloc_free_internal(tc->refs, location);
1113 if (is_child) {
1114 return _talloc_free_internal(ptr, location);
1115 }
1116 return -1;
1117 }
1118
1119 if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1120 /* we have a free loop - stop looping */
1121 return 0;
1122 }
1123
1124 if (unlikely(tc->destructor)) {
1125 talloc_destructor_t d = tc->destructor;
1126
1127 /*
1128 * Protect the destructor against some overwrite
1129 * attacks, by explicitly checking it has the right
1130 * magic here.
1131 */
1132 if (talloc_chunk_from_ptr(ptr) != tc) {
1133 /*
1134 * This can't actually happen, the
1135 * call itself will panic.
1136 */
1137 TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1138 }
1139
1140 if (d == (talloc_destructor_t)-1) {
1141 return -1;
1142 }
1143 tc->destructor = (talloc_destructor_t)-1;
1144 if (d(ptr) == -1) {
1145 /*
1146 * Only replace the destructor pointer if
1147 * calling the destructor didn't modify it.
1148 */
1149 if (tc->destructor == (talloc_destructor_t)-1) {
1150 tc->destructor = d;
1151 }
1152 return -1;
1153 }
1154 tc->destructor = NULL;
1155 }
1156
1157 if (tc->parent) {
1158 _TLIST_REMOVE(tc->parent->child, tc);
1159 if (tc->parent->child) {
1160 tc->parent->child->parent = tc->parent;
1161 }
1162 } else {
1163 if (tc->prev) tc->prev->next = tc->next;
1164 if (tc->next) tc->next->prev = tc->prev;
1165 tc->prev = tc->next = NULL;
1166 }
1167
1168 tc->flags |= TALLOC_FLAG_LOOP;
1169
1170 _tc_free_children_internal(tc, ptr, location);
1171
1172 _talloc_chunk_set_free(tc, location);
1173
1174 if (tc->flags & TALLOC_FLAG_POOL) {
1175 struct talloc_pool_hdr *pool;
1176
1177 pool = talloc_pool_from_chunk(tc);
1178
1179 if (unlikely(pool->object_count == 0)) {
1180 talloc_abort("Pool object count zero!");
1181 return 0;
1182 }
1183
1184 pool->object_count--;
1185
1186 if (likely(pool->object_count != 0)) {
1187 return 0;
1188 }
1189
1190 /*
1191 * With object_count==0, a pool becomes a normal piece of
1192 * memory to free. If it's allocated inside a pool, it needs
1193 * to be freed as poolmem, else it needs to be just freed.
1194 */
1195 ptr_to_free = pool;
1196 } else {
1197 ptr_to_free = tc;
1198 }
1199
1200 if (tc->flags & TALLOC_FLAG_POOLMEM) {
1201 _tc_free_poolmem(tc, location);
1202 return 0;
1203 }
1204
1205 tc_memlimit_update_on_free(tc);
1206
1207 TC_INVALIDATE_FULL_CHUNK(tc);
1208 free(ptr_to_free);
1209 return 0;
1210}
1211
1212/*
1213 internal talloc_free call
1214*/
1215static inline int _talloc_free_internal(void *ptr, const char *location)
1216{
1217 struct talloc_chunk *tc;
1218
1219 if (unlikely(ptr == NULL)) {
1220 return -1;
1221 }
1222
1223 /* possibly initialised the talloc fill value */
1224 if (unlikely(!talloc_fill.initialised)) {
1225 const char *fill = getenv(TALLOC_FILL_ENV);
1226 if (fill != NULL) {
1227 talloc_fill.enabled = true;
1228 talloc_fill.fill_value = strtoul(fill, NULL, 0);
1229 }
1230 talloc_fill.initialised = true;
1231 }
1232
1233 tc = talloc_chunk_from_ptr(ptr);
1234 return _tc_free_internal(tc, location);
1235}
1236
1237static inline size_t _talloc_total_limit_size(const void *ptr,
1238 struct talloc_memlimit *old_limit,
1239 struct talloc_memlimit *new_limit);
1240
1241/*
1242 move a lump of memory from one talloc context to another return the
1243 ptr on success, or NULL if it could not be transferred.
1244 passing NULL as ptr will always return NULL with no side effects.
1245*/
1246static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1247{
1248 struct talloc_chunk *tc, *new_tc;
1249 size_t ctx_size = 0;
1250
1251 if (unlikely(!ptr)) {
1252 return NULL;
1253 }
1254
1255 if (unlikely(new_ctx == NULL)) {
1256 new_ctx = null_context;
1257 }
1258
1259 tc = talloc_chunk_from_ptr(ptr);
1260
1261 if (tc->limit != NULL) {
1262
1263 ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1264
1265 /* Decrement the memory limit from the source .. */
1266 talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1267
1268 if (tc->limit->parent == tc) {
1269 tc->limit->upper = NULL;
1270 } else {
1271 tc->limit = NULL;
1272 }
1273 }
1274
1275 if (unlikely(new_ctx == NULL)) {
1276 if (tc->parent) {
1277 _TLIST_REMOVE(tc->parent->child, tc);
1278 if (tc->parent->child) {
1279 tc->parent->child->parent = tc->parent;
1280 }
1281 } else {
1282 if (tc->prev) tc->prev->next = tc->next;
1283 if (tc->next) tc->next->prev = tc->prev;
1284 }
1285
1286 tc->parent = tc->next = tc->prev = NULL;
1287 return discard_const_p(void, ptr);
1288 }
1289
1290 new_tc = talloc_chunk_from_ptr(new_ctx);
1291
1292 if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1293 return discard_const_p(void, ptr);
1294 }
1295
1296 if (tc->parent) {
1297 _TLIST_REMOVE(tc->parent->child, tc);
1298 if (tc->parent->child) {
1299 tc->parent->child->parent = tc->parent;
1300 }
1301 } else {
1302 if (tc->prev) tc->prev->next = tc->next;
1303 if (tc->next) tc->next->prev = tc->prev;
1304 tc->prev = tc->next = NULL;
1305 }
1306
1307 tc->parent = new_tc;
1308 if (new_tc->child) new_tc->child->parent = NULL;
1309 _TLIST_ADD(new_tc->child, tc);
1310
1311 if (tc->limit || new_tc->limit) {
1312 ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1313 new_tc->limit);
1314 /* .. and increment it in the destination. */
1315 if (new_tc->limit) {
1316 talloc_memlimit_grow(new_tc->limit, ctx_size);
1317 }
1318 }
1319
1320 return discard_const_p(void, ptr);
1321}
1322
1323/*
1324 move a lump of memory from one talloc context to another return the
1325 ptr on success, or NULL if it could not be transferred.
1326 passing NULL as ptr will always return NULL with no side effects.
1327*/
1328_PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1329{
1330 struct talloc_chunk *tc;
1331
1332 if (unlikely(ptr == NULL)) {
1333 return NULL;
1334 }
1335
1336 tc = talloc_chunk_from_ptr(ptr);
1337
1338 if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1339 struct talloc_reference_handle *h;
1340
Harald Welte189f43d2019-04-17 21:19:04 +02001341 talloc_log("WARNING: talloc_steal with references at %s\r\n",
Harald Welte5df0be62019-04-17 20:54:29 +02001342 location);
1343
1344 for (h=tc->refs; h; h=h->next) {
Harald Welte189f43d2019-04-17 21:19:04 +02001345 talloc_log("\treference at %s\r\n",
Harald Welte5df0be62019-04-17 20:54:29 +02001346 h->location);
1347 }
1348 }
1349
1350#if 0
1351 /* this test is probably too expensive to have on in the
1352 normal build, but it useful for debugging */
1353 if (talloc_is_parent(new_ctx, ptr)) {
Harald Welte189f43d2019-04-17 21:19:04 +02001354 talloc_log("WARNING: stealing into talloc child at %s\r\n", location);
Harald Welte5df0be62019-04-17 20:54:29 +02001355 }
1356#endif
1357
1358 return _talloc_steal_internal(new_ctx, ptr);
1359}
1360
1361/*
1362 this is like a talloc_steal(), but you must supply the old
1363 parent. This resolves the ambiguity in a talloc_steal() which is
1364 called on a context that has more than one parent (via references)
1365
1366 The old parent can be either a reference or a parent
1367*/
1368_PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1369{
1370 struct talloc_chunk *tc;
1371 struct talloc_reference_handle *h;
1372
1373 if (unlikely(ptr == NULL)) {
1374 return NULL;
1375 }
1376
1377 if (old_parent == talloc_parent(ptr)) {
1378 return _talloc_steal_internal(new_parent, ptr);
1379 }
1380
1381 tc = talloc_chunk_from_ptr(ptr);
1382 for (h=tc->refs;h;h=h->next) {
1383 if (talloc_parent(h) == old_parent) {
1384 if (_talloc_steal_internal(new_parent, h) != h) {
1385 return NULL;
1386 }
1387 return discard_const_p(void, ptr);
1388 }
1389 }
1390
1391 /* it wasn't a parent */
1392 return NULL;
1393}
1394
1395/*
1396 remove a secondary reference to a pointer. This undo's what
1397 talloc_reference() has done. The context and pointer arguments
1398 must match those given to a talloc_reference()
1399*/
1400static inline int talloc_unreference(const void *context, const void *ptr)
1401{
1402 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1403 struct talloc_reference_handle *h;
1404
1405 if (unlikely(context == NULL)) {
1406 context = null_context;
1407 }
1408
1409 for (h=tc->refs;h;h=h->next) {
1410 struct talloc_chunk *p = talloc_parent_chunk(h);
1411 if (p == NULL) {
1412 if (context == NULL) break;
1413 } else if (TC_PTR_FROM_CHUNK(p) == context) {
1414 break;
1415 }
1416 }
1417 if (h == NULL) {
1418 return -1;
1419 }
1420
1421 return _talloc_free_internal(h, __location__);
1422}
1423
1424/*
1425 remove a specific parent context from a pointer. This is a more
1426 controlled variant of talloc_free()
1427*/
1428_PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1429{
1430 struct talloc_chunk *tc_p, *new_p, *tc_c;
1431 void *new_parent;
1432
1433 if (ptr == NULL) {
1434 return -1;
1435 }
1436
1437 if (context == NULL) {
1438 context = null_context;
1439 }
1440
1441 if (talloc_unreference(context, ptr) == 0) {
1442 return 0;
1443 }
1444
1445 if (context != NULL) {
1446 tc_c = talloc_chunk_from_ptr(context);
1447 } else {
1448 tc_c = NULL;
1449 }
1450 if (tc_c != talloc_parent_chunk(ptr)) {
1451 return -1;
1452 }
1453
1454 tc_p = talloc_chunk_from_ptr(ptr);
1455
1456 if (tc_p->refs == NULL) {
1457 return _talloc_free_internal(ptr, __location__);
1458 }
1459
1460 new_p = talloc_parent_chunk(tc_p->refs);
1461 if (new_p) {
1462 new_parent = TC_PTR_FROM_CHUNK(new_p);
1463 } else {
1464 new_parent = NULL;
1465 }
1466
1467 if (talloc_unreference(new_parent, ptr) != 0) {
1468 return -1;
1469 }
1470
1471 _talloc_steal_internal(new_parent, ptr);
1472
1473 return 0;
1474}
1475
1476/*
1477 add a name to an existing pointer - va_list version
1478*/
1479static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1480 const char *fmt,
1481 va_list ap) PRINTF_ATTRIBUTE(2,0);
1482
1483static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1484 const char *fmt,
1485 va_list ap)
1486{
1487 struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1488 fmt,
1489 ap);
1490 if (likely(name_tc)) {
1491 tc->name = TC_PTR_FROM_CHUNK(name_tc);
1492 _tc_set_name_const(name_tc, ".name");
1493 } else {
1494 tc->name = NULL;
1495 }
1496 return tc->name;
1497}
1498
1499/*
1500 add a name to an existing pointer
1501*/
1502_PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1503{
1504 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1505 const char *name;
1506 va_list ap;
1507 va_start(ap, fmt);
1508 name = tc_set_name_v(tc, fmt, ap);
1509 va_end(ap);
1510 return name;
1511}
1512
1513
1514/*
1515 create a named talloc pointer. Any talloc pointer can be named, and
1516 talloc_named() operates just like talloc() except that it allows you
1517 to name the pointer.
1518*/
1519_PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1520{
1521 va_list ap;
1522 void *ptr;
1523 const char *name;
1524 struct talloc_chunk *tc;
1525
1526 ptr = __talloc(context, size, &tc);
1527 if (unlikely(ptr == NULL)) return NULL;
1528
1529 va_start(ap, fmt);
1530 name = tc_set_name_v(tc, fmt, ap);
1531 va_end(ap);
1532
1533 if (unlikely(name == NULL)) {
1534 _talloc_free_internal(ptr, __location__);
1535 return NULL;
1536 }
1537
1538 return ptr;
1539}
1540
1541/*
1542 return the name of a talloc ptr, or "UNNAMED"
1543*/
1544static inline const char *__talloc_get_name(const void *ptr)
1545{
1546 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1547 if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1548 return ".reference";
1549 }
1550 if (likely(tc->name)) {
1551 return tc->name;
1552 }
1553 return "UNNAMED";
1554}
1555
1556_PUBLIC_ const char *talloc_get_name(const void *ptr)
1557{
1558 return __talloc_get_name(ptr);
1559}
1560
1561/*
1562 check if a pointer has the given name. If it does, return the pointer,
1563 otherwise return NULL
1564*/
1565_PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1566{
1567 const char *pname;
1568 if (unlikely(ptr == NULL)) return NULL;
1569 pname = __talloc_get_name(ptr);
1570 if (likely(pname == name || strcmp(pname, name) == 0)) {
1571 return discard_const_p(void, ptr);
1572 }
1573 return NULL;
1574}
1575
1576static void talloc_abort_type_mismatch(const char *location,
1577 const char *name,
1578 const char *expected)
1579{
1580 const char *reason;
1581
1582 reason = talloc_asprintf(NULL,
1583 "%s: Type mismatch: name[%s] expected[%s]",
1584 location,
1585 name?name:"NULL",
1586 expected);
1587 if (!reason) {
1588 reason = "Type mismatch";
1589 }
1590
1591 talloc_abort(reason);
1592}
1593
1594_PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1595{
1596 const char *pname;
1597
1598 if (unlikely(ptr == NULL)) {
1599 talloc_abort_type_mismatch(location, NULL, name);
1600 return NULL;
1601 }
1602
1603 pname = __talloc_get_name(ptr);
1604 if (likely(pname == name || strcmp(pname, name) == 0)) {
1605 return discard_const_p(void, ptr);
1606 }
1607
1608 talloc_abort_type_mismatch(location, pname, name);
1609 return NULL;
1610}
1611
1612/*
1613 this is for compatibility with older versions of talloc
1614*/
1615_PUBLIC_ void *talloc_init(const char *fmt, ...)
1616{
1617 va_list ap;
1618 void *ptr;
1619 const char *name;
1620 struct talloc_chunk *tc;
1621
1622 ptr = __talloc(NULL, 0, &tc);
1623 if (unlikely(ptr == NULL)) return NULL;
1624
1625 va_start(ap, fmt);
1626 name = tc_set_name_v(tc, fmt, ap);
1627 va_end(ap);
1628
1629 if (unlikely(name == NULL)) {
1630 _talloc_free_internal(ptr, __location__);
1631 return NULL;
1632 }
1633
1634 return ptr;
1635}
1636
1637static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1638 void *ptr,
1639 const char *location)
1640{
1641 while (tc->child) {
1642 /* we need to work out who will own an abandoned child
1643 if it cannot be freed. In priority order, the first
1644 choice is owner of any remaining reference to this
1645 pointer, the second choice is our parent, and the
1646 final choice is the null context. */
1647 void *child = TC_PTR_FROM_CHUNK(tc->child);
1648 const void *new_parent = null_context;
1649 if (unlikely(tc->child->refs)) {
1650 struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1651 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1652 }
1653 if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1654 if (talloc_parent_chunk(child) != tc) {
1655 /*
1656 * Destructor already reparented this child.
1657 * No further reparenting needed.
1658 */
1659 continue;
1660 }
1661 if (new_parent == null_context) {
1662 struct talloc_chunk *p = talloc_parent_chunk(ptr);
1663 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1664 }
1665 _talloc_steal_internal(new_parent, child);
1666 }
1667 }
1668}
1669
1670/*
1671 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1672 should probably not be used in new code. It's in here to keep the talloc
1673 code consistent across Samba 3 and 4.
1674*/
1675_PUBLIC_ void talloc_free_children(void *ptr)
1676{
1677 struct talloc_chunk *tc_name = NULL;
1678 struct talloc_chunk *tc;
1679
1680 if (unlikely(ptr == NULL)) {
1681 return;
1682 }
1683
1684 tc = talloc_chunk_from_ptr(ptr);
1685
1686 /* we do not want to free the context name if it is a child .. */
1687 if (likely(tc->child)) {
1688 for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1689 if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1690 }
1691 if (tc_name) {
1692 _TLIST_REMOVE(tc->child, tc_name);
1693 if (tc->child) {
1694 tc->child->parent = tc;
1695 }
1696 }
1697 }
1698
1699 _tc_free_children_internal(tc, ptr, __location__);
1700
1701 /* .. so we put it back after all other children have been freed */
1702 if (tc_name) {
1703 if (tc->child) {
1704 tc->child->parent = NULL;
1705 }
1706 tc_name->parent = tc;
1707 _TLIST_ADD(tc->child, tc_name);
1708 }
1709}
1710
1711/*
1712 Allocate a bit of memory as a child of an existing pointer
1713*/
1714_PUBLIC_ void *_talloc(const void *context, size_t size)
1715{
1716 struct talloc_chunk *tc;
1717 return __talloc(context, size, &tc);
1718}
1719
1720/*
1721 externally callable talloc_set_name_const()
1722*/
1723_PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1724{
1725 _tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1726}
1727
1728/*
1729 create a named talloc pointer. Any talloc pointer can be named, and
1730 talloc_named() operates just like talloc() except that it allows you
1731 to name the pointer.
1732*/
1733_PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1734{
1735 return _talloc_named_const(context, size, name);
1736}
1737
1738/*
1739 free a talloc pointer. This also frees all child pointers of this
1740 pointer recursively
1741
1742 return 0 if the memory is actually freed, otherwise -1. The memory
1743 will not be freed if the ref_count is > 1 or the destructor (if
1744 any) returns non-zero
1745*/
1746_PUBLIC_ int _talloc_free(void *ptr, const char *location)
1747{
1748 struct talloc_chunk *tc;
1749
Eric Wild8200fcc2019-11-27 18:01:44 +01001750 // do not deallocate while handling interrupts!
1751 OSMO_ASSERT( !(SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) )
1752
Harald Welte5df0be62019-04-17 20:54:29 +02001753 if (unlikely(ptr == NULL)) {
1754 return -1;
1755 }
1756
1757 tc = talloc_chunk_from_ptr(ptr);
1758
1759 if (unlikely(tc->refs != NULL)) {
1760 struct talloc_reference_handle *h;
1761
1762 if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1763 /* in this case we do know which parent should
1764 get this pointer, as there is really only
1765 one parent */
1766 return talloc_unlink(null_context, ptr);
1767 }
1768
Harald Welte189f43d2019-04-17 21:19:04 +02001769 talloc_log("ERROR: talloc_free with references at %s\r\n",
Harald Welte5df0be62019-04-17 20:54:29 +02001770 location);
1771
1772 for (h=tc->refs; h; h=h->next) {
Harald Welte189f43d2019-04-17 21:19:04 +02001773 talloc_log("\treference at %s\r\n",
Harald Welte5df0be62019-04-17 20:54:29 +02001774 h->location);
1775 }
1776 return -1;
1777 }
1778
1779 return _talloc_free_internal(ptr, location);
1780}
1781
1782
1783
1784/*
1785 A talloc version of realloc. The context argument is only used if
1786 ptr is NULL
1787*/
1788_PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1789{
1790 struct talloc_chunk *tc;
1791 void *new_ptr;
1792 bool malloced = false;
1793 struct talloc_pool_hdr *pool_hdr = NULL;
1794 size_t old_size = 0;
1795 size_t new_size = 0;
1796
1797 /* size zero is equivalent to free() */
1798 if (unlikely(size == 0)) {
1799 talloc_unlink(context, ptr);
1800 return NULL;
1801 }
1802
1803 if (unlikely(size >= MAX_TALLOC_SIZE)) {
1804 return NULL;
1805 }
1806
1807 /* realloc(NULL) is equivalent to malloc() */
1808 if (ptr == NULL) {
1809 return _talloc_named_const(context, size, name);
1810 }
1811
1812 tc = talloc_chunk_from_ptr(ptr);
1813
1814 /* don't allow realloc on referenced pointers */
1815 if (unlikely(tc->refs)) {
1816 return NULL;
1817 }
1818
1819 /* don't let anybody try to realloc a talloc_pool */
1820 if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1821 return NULL;
1822 }
1823
1824 if (tc->limit && (size > tc->size)) {
1825 if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1826 errno = ENOMEM;
1827 return NULL;
1828 }
1829 }
1830
1831 /* handle realloc inside a talloc_pool */
1832 if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1833 pool_hdr = tc->pool;
1834 }
1835
1836#if (ALWAYS_REALLOC == 0)
1837 /* don't shrink if we have less than 1k to gain */
1838 if (size < tc->size && tc->limit == NULL) {
1839 if (pool_hdr) {
1840 void *next_tc = tc_next_chunk(tc);
1841 TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1842 tc->size = size;
1843 if (next_tc == pool_hdr->end) {
1844 /* note: tc->size has changed, so this works */
1845 pool_hdr->end = tc_next_chunk(tc);
1846 }
1847 return ptr;
1848 } else if ((tc->size - size) < 1024) {
1849 /*
1850 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1851 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1852 * after each realloc call, which slows down
1853 * testing a lot :-(.
1854 *
1855 * That is why we only mark memory as undefined here.
1856 */
1857 TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1858
1859 /* do not shrink if we have less than 1k to gain */
1860 tc->size = size;
1861 return ptr;
1862 }
1863 } else if (tc->size == size) {
1864 /*
1865 * do not change the pointer if it is exactly
1866 * the same size.
1867 */
1868 return ptr;
1869 }
1870#endif
1871
1872 /*
1873 * by resetting magic we catch users of the old memory
1874 *
1875 * We mark this memory as free, and also over-stamp the talloc
1876 * magic with the old-style magic.
1877 *
1878 * Why? This tries to avoid a memory read use-after-free from
1879 * disclosing our talloc magic, which would then allow an
1880 * attacker to prepare a valid header and so run a destructor.
1881 *
1882 * What else? We have to re-stamp back a valid normal magic
1883 * on this memory once realloc() is done, as it will have done
1884 * a memcpy() into the new valid memory. We can't do this in
1885 * reverse as that would be a real use-after-free.
1886 */
1887 _talloc_chunk_set_free(tc, NULL);
1888
1889#if ALWAYS_REALLOC
1890 if (pool_hdr) {
1891 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1892 pool_hdr->object_count--;
1893
1894 if (new_ptr == NULL) {
1895 new_ptr = malloc(TC_HDR_SIZE+size);
1896 malloced = true;
1897 new_size = size;
1898 }
1899
1900 if (new_ptr) {
1901 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1902 TC_INVALIDATE_FULL_CHUNK(tc);
1903 }
1904 } else {
1905 /* We're doing malloc then free here, so record the difference. */
1906 old_size = tc->size;
1907 new_size = size;
1908 new_ptr = malloc(size + TC_HDR_SIZE);
1909 if (new_ptr) {
1910 memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1911 free(tc);
1912 }
1913 }
1914#else
1915 if (pool_hdr) {
1916 struct talloc_chunk *pool_tc;
1917 void *next_tc = tc_next_chunk(tc);
1918 size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1919 size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1920 size_t space_needed;
1921 size_t space_left;
1922 unsigned int chunk_count = pool_hdr->object_count;
1923
1924 pool_tc = talloc_chunk_from_pool(pool_hdr);
1925 if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1926 chunk_count -= 1;
1927 }
1928
1929 if (chunk_count == 1) {
1930 /*
1931 * optimize for the case where 'tc' is the only
1932 * chunk in the pool.
1933 */
1934 char *start = tc_pool_first_chunk(pool_hdr);
1935 space_needed = new_chunk_size;
1936 space_left = (char *)tc_pool_end(pool_hdr) - start;
1937
1938 if (space_left >= space_needed) {
1939 size_t old_used = TC_HDR_SIZE + tc->size;
1940 size_t new_used = TC_HDR_SIZE + size;
1941 new_ptr = start;
1942
1943#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1944 {
1945 /*
1946 * The area from
1947 * start -> tc may have
1948 * been freed and thus been marked as
1949 * VALGRIND_MEM_NOACCESS. Set it to
1950 * VALGRIND_MEM_UNDEFINED so we can
1951 * copy into it without valgrind errors.
1952 * We can't just mark
1953 * new_ptr -> new_ptr + old_used
1954 * as this may overlap on top of tc,
1955 * (which is why we use memmove, not
1956 * memcpy below) hence the MIN.
1957 */
1958 size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1959 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1960 }
1961#endif
1962
1963 memmove(new_ptr, tc, old_used);
1964
1965 tc = (struct talloc_chunk *)new_ptr;
1966 TC_UNDEFINE_GROW_CHUNK(tc, size);
1967
1968 /*
1969 * first we do not align the pool pointer
1970 * because we want to invalidate the padding
1971 * too.
1972 */
1973 pool_hdr->end = new_used + (char *)new_ptr;
1974 tc_invalidate_pool(pool_hdr);
1975
1976 /* now the aligned pointer */
1977 pool_hdr->end = new_chunk_size + (char *)new_ptr;
1978 goto got_new_ptr;
1979 }
1980
1981 next_tc = NULL;
1982 }
1983
1984 if (new_chunk_size == old_chunk_size) {
1985 TC_UNDEFINE_GROW_CHUNK(tc, size);
1986 _talloc_chunk_set_not_free(tc);
1987 tc->size = size;
1988 return ptr;
1989 }
1990
1991 if (next_tc == pool_hdr->end) {
1992 /*
1993 * optimize for the case where 'tc' is the last
1994 * chunk in the pool.
1995 */
1996 space_needed = new_chunk_size - old_chunk_size;
1997 space_left = tc_pool_space_left(pool_hdr);
1998
1999 if (space_left >= space_needed) {
2000 TC_UNDEFINE_GROW_CHUNK(tc, size);
2001 _talloc_chunk_set_not_free(tc);
2002 tc->size = size;
2003 pool_hdr->end = tc_next_chunk(tc);
2004 return ptr;
2005 }
2006 }
2007
2008 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
2009
2010 if (new_ptr == NULL) {
2011 new_ptr = malloc(TC_HDR_SIZE+size);
2012 malloced = true;
2013 new_size = size;
2014 }
2015
2016 if (new_ptr) {
2017 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
2018
2019 _tc_free_poolmem(tc, __location__ "_talloc_realloc");
2020 }
2021 }
2022 else {
2023 /* We're doing realloc here, so record the difference. */
2024 old_size = tc->size;
2025 new_size = size;
2026 new_ptr = realloc(tc, size + TC_HDR_SIZE);
2027 }
2028got_new_ptr:
2029#endif
2030 if (unlikely(!new_ptr)) {
2031 /*
2032 * Ok, this is a strange spot. We have to put back
2033 * the old talloc_magic and any flags, except the
2034 * TALLOC_FLAG_FREE as this was not free'ed by the
2035 * realloc() call after all
2036 */
2037 _talloc_chunk_set_not_free(tc);
2038 return NULL;
2039 }
2040
2041 /*
2042 * tc is now the new value from realloc(), the old memory we
2043 * can't access any more and was preemptively marked as
2044 * TALLOC_FLAG_FREE before the call. Now we mark it as not
2045 * free again
2046 */
2047 tc = (struct talloc_chunk *)new_ptr;
2048 _talloc_chunk_set_not_free(tc);
2049 if (malloced) {
2050 tc->flags &= ~TALLOC_FLAG_POOLMEM;
2051 }
2052 if (tc->parent) {
2053 tc->parent->child = tc;
2054 }
2055 if (tc->child) {
2056 tc->child->parent = tc;
2057 }
2058
2059 if (tc->prev) {
2060 tc->prev->next = tc;
2061 }
2062 if (tc->next) {
2063 tc->next->prev = tc;
2064 }
2065
2066 if (new_size > old_size) {
2067 talloc_memlimit_grow(tc->limit, new_size - old_size);
2068 } else if (new_size < old_size) {
2069 talloc_memlimit_shrink(tc->limit, old_size - new_size);
2070 }
2071
2072 tc->size = size;
2073 _tc_set_name_const(tc, name);
2074
2075 return TC_PTR_FROM_CHUNK(tc);
2076}
2077
2078/*
2079 a wrapper around talloc_steal() for situations where you are moving a pointer
2080 between two structures, and want the old pointer to be set to NULL
2081*/
2082_PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
2083{
2084 const void **pptr = discard_const_p(const void *,_pptr);
2085 void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
2086 (*pptr) = NULL;
2087 return ret;
2088}
2089
2090enum talloc_mem_count_type {
2091 TOTAL_MEM_SIZE,
2092 TOTAL_MEM_BLOCKS,
2093 TOTAL_MEM_LIMIT,
2094};
2095
2096static inline size_t _talloc_total_mem_internal(const void *ptr,
2097 enum talloc_mem_count_type type,
2098 struct talloc_memlimit *old_limit,
2099 struct talloc_memlimit *new_limit)
2100{
2101 size_t total = 0;
2102 struct talloc_chunk *c, *tc;
2103
2104 if (ptr == NULL) {
2105 ptr = null_context;
2106 }
2107 if (ptr == NULL) {
2108 return 0;
2109 }
2110
2111 tc = talloc_chunk_from_ptr(ptr);
2112
2113 if (old_limit || new_limit) {
2114 if (tc->limit && tc->limit->upper == old_limit) {
2115 tc->limit->upper = new_limit;
2116 }
2117 }
2118
2119 /* optimize in the memlimits case */
2120 if (type == TOTAL_MEM_LIMIT &&
2121 tc->limit != NULL &&
2122 tc->limit != old_limit &&
2123 tc->limit->parent == tc) {
2124 return tc->limit->cur_size;
2125 }
2126
2127 if (tc->flags & TALLOC_FLAG_LOOP) {
2128 return 0;
2129 }
2130
2131 tc->flags |= TALLOC_FLAG_LOOP;
2132
2133 if (old_limit || new_limit) {
2134 if (old_limit == tc->limit) {
2135 tc->limit = new_limit;
2136 }
2137 }
2138
2139 switch (type) {
2140 case TOTAL_MEM_SIZE:
2141 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2142 total = tc->size;
2143 }
2144 break;
2145 case TOTAL_MEM_BLOCKS:
2146 total++;
2147 break;
2148 case TOTAL_MEM_LIMIT:
2149 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2150 /*
2151 * Don't count memory allocated from a pool
2152 * when calculating limits. Only count the
2153 * pool itself.
2154 */
2155 if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2156 if (tc->flags & TALLOC_FLAG_POOL) {
2157 /*
2158 * If this is a pool, the allocated
2159 * size is in the pool header, and
2160 * remember to add in the prefix
2161 * length.
2162 */
2163 struct talloc_pool_hdr *pool_hdr
2164 = talloc_pool_from_chunk(tc);
2165 total = pool_hdr->poolsize +
2166 TC_HDR_SIZE +
2167 TP_HDR_SIZE;
2168 } else {
2169 total = tc->size + TC_HDR_SIZE;
2170 }
2171 }
2172 }
2173 break;
2174 }
2175 for (c = tc->child; c; c = c->next) {
2176 total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2177 old_limit, new_limit);
2178 }
2179
2180 tc->flags &= ~TALLOC_FLAG_LOOP;
2181
2182 return total;
2183}
2184
2185/*
2186 return the total size of a talloc pool (subtree)
2187*/
2188_PUBLIC_ size_t talloc_total_size(const void *ptr)
2189{
2190 return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2191}
2192
2193/*
2194 return the total number of blocks in a talloc pool (subtree)
2195*/
2196_PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2197{
2198 return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2199}
2200
2201/*
2202 return the number of external references to a pointer
2203*/
2204_PUBLIC_ size_t talloc_reference_count(const void *ptr)
2205{
2206 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2207 struct talloc_reference_handle *h;
2208 size_t ret = 0;
2209
2210 for (h=tc->refs;h;h=h->next) {
2211 ret++;
2212 }
2213 return ret;
2214}
2215
2216/*
2217 report on memory usage by all children of a pointer, giving a full tree view
2218*/
2219_PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2220 void (*callback)(const void *ptr,
2221 int depth, int max_depth,
2222 int is_ref,
2223 void *private_data),
2224 void *private_data)
2225{
2226 struct talloc_chunk *c, *tc;
2227
2228 if (ptr == NULL) {
2229 ptr = null_context;
2230 }
2231 if (ptr == NULL) return;
2232
2233 tc = talloc_chunk_from_ptr(ptr);
2234
2235 if (tc->flags & TALLOC_FLAG_LOOP) {
2236 return;
2237 }
2238
2239 callback(ptr, depth, max_depth, 0, private_data);
2240
2241 if (max_depth >= 0 && depth >= max_depth) {
2242 return;
2243 }
2244
2245 tc->flags |= TALLOC_FLAG_LOOP;
2246 for (c=tc->child;c;c=c->next) {
2247 if (c->name == TALLOC_MAGIC_REFERENCE) {
2248 struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2249 callback(h->ptr, depth + 1, max_depth, 1, private_data);
2250 } else {
2251 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2252 }
2253 }
2254 tc->flags &= ~TALLOC_FLAG_LOOP;
2255}
2256
2257static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2258{
2259 const char *name = __talloc_get_name(ptr);
2260 struct talloc_chunk *tc;
2261 FILE *f = (FILE *)_f;
2262
2263 if (is_ref) {
Harald Welte189f43d2019-04-17 21:19:04 +02002264 fprintf(f, "%*sreference to: %s\r\n", depth*4, "", name);
Harald Welte5df0be62019-04-17 20:54:29 +02002265 return;
2266 }
2267
2268 tc = talloc_chunk_from_ptr(ptr);
2269 if (tc->limit && tc->limit->parent == tc) {
2270 fprintf(f, "%*s%-30s is a memlimit context"
Harald Welte189f43d2019-04-17 21:19:04 +02002271 " (max_size = %lu bytes, cur_size = %lu bytes)\r\n",
Harald Welte5df0be62019-04-17 20:54:29 +02002272 depth*4, "",
2273 name,
2274 (unsigned long)tc->limit->max_size,
2275 (unsigned long)tc->limit->cur_size);
2276 }
2277
2278 if (depth == 0) {
Harald Welte189f43d2019-04-17 21:19:04 +02002279 fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\r",
Harald Welte5df0be62019-04-17 20:54:29 +02002280 (max_depth < 0 ? "full " :""), name,
2281 (unsigned long)talloc_total_size(ptr),
2282 (unsigned long)talloc_total_blocks(ptr));
2283 return;
2284 }
2285
Harald Welte189f43d2019-04-17 21:19:04 +02002286 fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\r\n",
Harald Welte5df0be62019-04-17 20:54:29 +02002287 depth*4, "",
2288 name,
2289 (unsigned long)talloc_total_size(ptr),
2290 (unsigned long)talloc_total_blocks(ptr),
2291 (int)talloc_reference_count(ptr), ptr);
2292
2293#if 0
2294 fprintf(f, "content: ");
2295 if (talloc_total_size(ptr)) {
2296 int tot = talloc_total_size(ptr);
2297 int i;
2298
2299 for (i = 0; i < tot; i++) {
2300 if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2301 fprintf(f, "%c", ((char *)ptr)[i]);
2302 } else {
2303 fprintf(f, "~%02x", ((char *)ptr)[i]);
2304 }
2305 }
2306 }
Harald Welte189f43d2019-04-17 21:19:04 +02002307 fprintf(f, "\r\n");
Harald Welte5df0be62019-04-17 20:54:29 +02002308#endif
2309}
2310
2311/*
2312 report on memory usage by all children of a pointer, giving a full tree view
2313*/
2314_PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2315{
2316 if (f) {
2317 talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2318 fflush(f);
2319 }
2320}
2321
2322/*
2323 report on memory usage by all children of a pointer, giving a full tree view
2324*/
2325_PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2326{
2327 talloc_report_depth_file(ptr, 0, -1, f);
2328}
2329
2330/*
2331 report on memory usage by all children of a pointer
2332*/
2333_PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2334{
2335 talloc_report_depth_file(ptr, 0, 1, f);
2336}
2337
2338/*
2339 enable tracking of the NULL context
2340*/
2341_PUBLIC_ void talloc_enable_null_tracking(void)
2342{
2343 if (null_context == NULL) {
2344 null_context = _talloc_named_const(NULL, 0, "null_context");
2345 if (autofree_context != NULL) {
2346 talloc_reparent(NULL, null_context, autofree_context);
2347 }
2348 }
2349}
2350
2351/*
2352 enable tracking of the NULL context, not moving the autofree context
2353 into the NULL context. This is needed for the talloc testsuite
2354*/
2355_PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2356{
2357 if (null_context == NULL) {
2358 null_context = _talloc_named_const(NULL, 0, "null_context");
2359 }
2360}
2361
2362/*
2363 disable tracking of the NULL context
2364*/
2365_PUBLIC_ void talloc_disable_null_tracking(void)
2366{
2367 if (null_context != NULL) {
2368 /* we have to move any children onto the real NULL
2369 context */
2370 struct talloc_chunk *tc, *tc2;
2371 tc = talloc_chunk_from_ptr(null_context);
2372 for (tc2 = tc->child; tc2; tc2=tc2->next) {
2373 if (tc2->parent == tc) tc2->parent = NULL;
2374 if (tc2->prev == tc) tc2->prev = NULL;
2375 }
2376 for (tc2 = tc->next; tc2; tc2=tc2->next) {
2377 if (tc2->parent == tc) tc2->parent = NULL;
2378 if (tc2->prev == tc) tc2->prev = NULL;
2379 }
2380 tc->child = NULL;
2381 tc->next = NULL;
2382 }
2383 talloc_free(null_context);
2384 null_context = NULL;
2385}
2386
2387/*
2388 enable leak reporting on exit
2389*/
2390_PUBLIC_ void talloc_enable_leak_report(void)
2391{
2392 talloc_enable_null_tracking();
2393 talloc_report_null = true;
2394 talloc_setup_atexit();
2395}
2396
2397/*
2398 enable full leak reporting on exit
2399*/
2400_PUBLIC_ void talloc_enable_leak_report_full(void)
2401{
2402 talloc_enable_null_tracking();
2403 talloc_report_null_full = true;
2404 talloc_setup_atexit();
2405}
2406
2407/*
2408 talloc and zero memory.
2409*/
2410_PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2411{
2412 void *p = _talloc_named_const(ctx, size, name);
2413
2414 if (p) {
2415 memset(p, '\0', size);
2416 }
2417
2418 return p;
2419}
2420
2421/*
2422 memdup with a talloc.
2423*/
2424_PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2425{
2426 void *newp = _talloc_named_const(t, size, name);
2427
2428 if (likely(newp)) {
2429 memcpy(newp, p, size);
2430 }
2431
2432 return newp;
2433}
2434
2435static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2436{
2437 char *ret;
2438 struct talloc_chunk *tc;
2439
2440 ret = (char *)__talloc(t, len + 1, &tc);
2441 if (unlikely(!ret)) return NULL;
2442
2443 memcpy(ret, p, len);
2444 ret[len] = 0;
2445
2446 _tc_set_name_const(tc, ret);
2447 return ret;
2448}
2449
2450/*
2451 strdup with a talloc
2452*/
2453_PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2454{
2455 if (unlikely(!p)) return NULL;
2456 return __talloc_strlendup(t, p, strlen(p));
2457}
2458
2459/*
2460 strndup with a talloc
2461*/
2462_PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2463{
2464 if (unlikely(!p)) return NULL;
2465 return __talloc_strlendup(t, p, strnlen(p, n));
2466}
2467
2468static inline char *__talloc_strlendup_append(char *s, size_t slen,
2469 const char *a, size_t alen)
2470{
2471 char *ret;
2472
2473 ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2474 if (unlikely(!ret)) return NULL;
2475
2476 /* append the string and the trailing \0 */
2477 memcpy(&ret[slen], a, alen);
2478 ret[slen+alen] = 0;
2479
2480 _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2481 return ret;
2482}
2483
2484/*
2485 * Appends at the end of the string.
2486 */
2487_PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2488{
2489 if (unlikely(!s)) {
2490 return talloc_strdup(NULL, a);
2491 }
2492
2493 if (unlikely(!a)) {
2494 return s;
2495 }
2496
2497 return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2498}
2499
2500/*
2501 * Appends at the end of the talloc'ed buffer,
2502 * not the end of the string.
2503 */
2504_PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2505{
2506 size_t slen;
2507
2508 if (unlikely(!s)) {
2509 return talloc_strdup(NULL, a);
2510 }
2511
2512 if (unlikely(!a)) {
2513 return s;
2514 }
2515
2516 slen = talloc_get_size(s);
2517 if (likely(slen > 0)) {
2518 slen--;
2519 }
2520
2521 return __talloc_strlendup_append(s, slen, a, strlen(a));
2522}
2523
2524/*
2525 * Appends at the end of the string.
2526 */
2527_PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2528{
2529 if (unlikely(!s)) {
2530 return talloc_strndup(NULL, a, n);
2531 }
2532
2533 if (unlikely(!a)) {
2534 return s;
2535 }
2536
2537 return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2538}
2539
2540/*
2541 * Appends at the end of the talloc'ed buffer,
2542 * not the end of the string.
2543 */
2544_PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2545{
2546 size_t slen;
2547
2548 if (unlikely(!s)) {
2549 return talloc_strndup(NULL, a, n);
2550 }
2551
2552 if (unlikely(!a)) {
2553 return s;
2554 }
2555
2556 slen = talloc_get_size(s);
2557 if (likely(slen > 0)) {
2558 slen--;
2559 }
2560
2561 return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2562}
2563
2564#ifndef HAVE_VA_COPY
2565#ifdef HAVE___VA_COPY
2566#define va_copy(dest, src) __va_copy(dest, src)
2567#else
2568#define va_copy(dest, src) (dest) = (src)
2569#endif
2570#endif
2571
2572static struct talloc_chunk *_vasprintf_tc(const void *t,
2573 const char *fmt,
2574 va_list ap) PRINTF_ATTRIBUTE(2,0);
2575
2576static struct talloc_chunk *_vasprintf_tc(const void *t,
2577 const char *fmt,
2578 va_list ap)
2579{
2580 int vlen;
2581 size_t len;
2582 char *ret;
2583 va_list ap2;
2584 struct talloc_chunk *tc;
2585 char buf[1024];
2586
2587 /* this call looks strange, but it makes it work on older solaris boxes */
2588 va_copy(ap2, ap);
2589 vlen = vsnprintf(buf, sizeof(buf), fmt, ap2);
2590 va_end(ap2);
2591 if (unlikely(vlen < 0)) {
2592 return NULL;
2593 }
2594 len = vlen;
2595 if (unlikely(len + 1 < len)) {
2596 return NULL;
2597 }
2598
2599 ret = (char *)__talloc(t, len+1, &tc);
2600 if (unlikely(!ret)) return NULL;
2601
2602 if (len < sizeof(buf)) {
2603 memcpy(ret, buf, len+1);
2604 } else {
2605 va_copy(ap2, ap);
2606 vsnprintf(ret, len+1, fmt, ap2);
2607 va_end(ap2);
2608 }
2609
2610 _tc_set_name_const(tc, ret);
2611 return tc;
2612}
2613
2614_PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2615{
2616 struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2617 if (tc == NULL) {
2618 return NULL;
2619 }
2620 return TC_PTR_FROM_CHUNK(tc);
2621}
2622
2623
2624/*
2625 Perform string formatting, and return a pointer to newly allocated
2626 memory holding the result, inside a memory pool.
2627 */
2628_PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2629{
2630 va_list ap;
2631 char *ret;
2632
2633 va_start(ap, fmt);
2634 ret = talloc_vasprintf(t, fmt, ap);
2635 va_end(ap);
2636 return ret;
2637}
2638
2639static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2640 const char *fmt, va_list ap)
2641 PRINTF_ATTRIBUTE(3,0);
2642
2643static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2644 const char *fmt, va_list ap)
2645{
2646 ssize_t alen;
2647 va_list ap2;
2648 char c;
2649
2650 va_copy(ap2, ap);
2651 alen = vsnprintf(&c, 1, fmt, ap2);
2652 va_end(ap2);
2653
2654 if (alen <= 0) {
2655 /* Either the vsnprintf failed or the format resulted in
2656 * no characters being formatted. In the former case, we
2657 * ought to return NULL, in the latter we ought to return
2658 * the original string. Most current callers of this
2659 * function expect it to never return NULL.
2660 */
2661 return s;
2662 }
2663
2664 s = talloc_realloc(NULL, s, char, slen + alen + 1);
2665 if (!s) return NULL;
2666
2667 va_copy(ap2, ap);
2668 vsnprintf(s + slen, alen + 1, fmt, ap2);
2669 va_end(ap2);
2670
2671 _tc_set_name_const(talloc_chunk_from_ptr(s), s);
2672 return s;
2673}
2674
2675/**
2676 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2677 * and return @p s, which may have moved. Good for gradually
2678 * accumulating output into a string buffer. Appends at the end
2679 * of the string.
2680 **/
2681_PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2682{
2683 if (unlikely(!s)) {
2684 return talloc_vasprintf(NULL, fmt, ap);
2685 }
2686
2687 return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2688}
2689
2690/**
2691 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2692 * and return @p s, which may have moved. Always appends at the
2693 * end of the talloc'ed buffer, not the end of the string.
2694 **/
2695_PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2696{
2697 size_t slen;
2698
2699 if (unlikely(!s)) {
2700 return talloc_vasprintf(NULL, fmt, ap);
2701 }
2702
2703 slen = talloc_get_size(s);
2704 if (likely(slen > 0)) {
2705 slen--;
2706 }
2707
2708 return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2709}
2710
2711/*
2712 Realloc @p s to append the formatted result of @p fmt and return @p
2713 s, which may have moved. Good for gradually accumulating output
2714 into a string buffer.
2715 */
2716_PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2717{
2718 va_list ap;
2719
2720 va_start(ap, fmt);
2721 s = talloc_vasprintf_append(s, fmt, ap);
2722 va_end(ap);
2723 return s;
2724}
2725
2726/*
2727 Realloc @p s to append the formatted result of @p fmt and return @p
2728 s, which may have moved. Good for gradually accumulating output
2729 into a buffer.
2730 */
2731_PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2732{
2733 va_list ap;
2734
2735 va_start(ap, fmt);
2736 s = talloc_vasprintf_append_buffer(s, fmt, ap);
2737 va_end(ap);
2738 return s;
2739}
2740
2741/*
2742 alloc an array, checking for integer overflow in the array size
2743*/
2744_PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2745{
2746 if (count >= MAX_TALLOC_SIZE/el_size) {
2747 return NULL;
2748 }
2749 return _talloc_named_const(ctx, el_size * count, name);
2750}
2751
2752/*
2753 alloc an zero array, checking for integer overflow in the array size
2754*/
2755_PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2756{
2757 if (count >= MAX_TALLOC_SIZE/el_size) {
2758 return NULL;
2759 }
2760 return _talloc_zero(ctx, el_size * count, name);
2761}
2762
2763/*
2764 realloc an array, checking for integer overflow in the array size
2765*/
2766_PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2767{
2768 if (count >= MAX_TALLOC_SIZE/el_size) {
2769 return NULL;
2770 }
2771 return _talloc_realloc(ctx, ptr, el_size * count, name);
2772}
2773
2774/*
2775 a function version of talloc_realloc(), so it can be passed as a function pointer
2776 to libraries that want a realloc function (a realloc function encapsulates
2777 all the basic capabilities of an allocation library, which is why this is useful)
2778*/
2779_PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2780{
2781 return _talloc_realloc(context, ptr, size, NULL);
2782}
2783
2784
2785static int talloc_autofree_destructor(void *ptr)
2786{
2787 autofree_context = NULL;
2788 return 0;
2789}
2790
2791/*
2792 return a context which will be auto-freed on exit
2793 this is useful for reducing the noise in leak reports
2794*/
2795_PUBLIC_ void *talloc_autofree_context(void)
2796{
2797 if (autofree_context == NULL) {
2798 autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2799 talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2800 talloc_setup_atexit();
2801 }
2802 return autofree_context;
2803}
2804
2805_PUBLIC_ size_t talloc_get_size(const void *context)
2806{
2807 struct talloc_chunk *tc;
2808
2809 if (context == NULL) {
2810 return 0;
2811 }
2812
2813 tc = talloc_chunk_from_ptr(context);
2814
2815 return tc->size;
2816}
2817
2818/*
2819 find a parent of this context that has the given name, if any
2820*/
2821_PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2822{
2823 struct talloc_chunk *tc;
2824
2825 if (context == NULL) {
2826 return NULL;
2827 }
2828
2829 tc = talloc_chunk_from_ptr(context);
2830 while (tc) {
2831 if (tc->name && strcmp(tc->name, name) == 0) {
2832 return TC_PTR_FROM_CHUNK(tc);
2833 }
2834 while (tc && tc->prev) tc = tc->prev;
2835 if (tc) {
2836 tc = tc->parent;
2837 }
2838 }
2839 return NULL;
2840}
2841
2842/*
2843 show the parentage of a context
2844*/
2845_PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2846{
2847 struct talloc_chunk *tc;
2848
2849 if (context == NULL) {
Harald Welte189f43d2019-04-17 21:19:04 +02002850 fprintf(file, "talloc no parents for NULL\r\n");
Harald Welte5df0be62019-04-17 20:54:29 +02002851 return;
2852 }
2853
2854 tc = talloc_chunk_from_ptr(context);
Harald Welte189f43d2019-04-17 21:19:04 +02002855 fprintf(file, "talloc parents of '%s'\r\n", __talloc_get_name(context));
Harald Welte5df0be62019-04-17 20:54:29 +02002856 while (tc) {
Harald Welte189f43d2019-04-17 21:19:04 +02002857 fprintf(file, "\t'%s'\r\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
Harald Welte5df0be62019-04-17 20:54:29 +02002858 while (tc && tc->prev) tc = tc->prev;
2859 if (tc) {
2860 tc = tc->parent;
2861 }
2862 }
2863 fflush(file);
2864}
2865
2866/*
2867 return 1 if ptr is a parent of context
2868*/
2869static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2870{
2871 struct talloc_chunk *tc;
2872
2873 if (context == NULL) {
2874 return 0;
2875 }
2876
2877 tc = talloc_chunk_from_ptr(context);
2878 while (tc) {
2879 if (depth <= 0) {
2880 return 0;
2881 }
2882 if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2883 while (tc && tc->prev) tc = tc->prev;
2884 if (tc) {
2885 tc = tc->parent;
2886 depth--;
2887 }
2888 }
2889 return 0;
2890}
2891
2892/*
2893 return 1 if ptr is a parent of context
2894*/
2895_PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2896{
2897 return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2898}
2899
2900/*
2901 return the total size of memory used by this context and all children
2902*/
2903static inline size_t _talloc_total_limit_size(const void *ptr,
2904 struct talloc_memlimit *old_limit,
2905 struct talloc_memlimit *new_limit)
2906{
2907 return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2908 old_limit, new_limit);
2909}
2910
2911static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2912{
2913 struct talloc_memlimit *l;
2914
2915 for (l = limit; l != NULL; l = l->upper) {
2916 if (l->max_size != 0 &&
2917 ((l->max_size <= l->cur_size) ||
2918 (l->max_size - l->cur_size < size))) {
2919 return false;
2920 }
2921 }
2922
2923 return true;
2924}
2925
2926/*
2927 Update memory limits when freeing a talloc_chunk.
2928*/
2929static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2930{
2931 size_t limit_shrink_size;
2932
2933 if (!tc->limit) {
2934 return;
2935 }
2936
2937 /*
2938 * Pool entries don't count. Only the pools
2939 * themselves are counted as part of the memory
2940 * limits. Note that this also takes care of
2941 * nested pools which have both flags
2942 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2943 */
2944 if (tc->flags & TALLOC_FLAG_POOLMEM) {
2945 return;
2946 }
2947
2948 /*
2949 * If we are part of a memory limited context hierarchy
2950 * we need to subtract the memory used from the counters
2951 */
2952
2953 limit_shrink_size = tc->size+TC_HDR_SIZE;
2954
2955 /*
2956 * If we're deallocating a pool, take into
2957 * account the prefix size added for the pool.
2958 */
2959
2960 if (tc->flags & TALLOC_FLAG_POOL) {
2961 limit_shrink_size += TP_HDR_SIZE;
2962 }
2963
2964 talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2965
2966 if (tc->limit->parent == tc) {
2967 free(tc->limit);
2968 }
2969
2970 tc->limit = NULL;
2971}
2972
2973/*
2974 Increase memory limit accounting after a malloc/realloc.
2975*/
2976static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2977 size_t size)
2978{
2979 struct talloc_memlimit *l;
2980
2981 for (l = limit; l != NULL; l = l->upper) {
2982 size_t new_cur_size = l->cur_size + size;
2983 if (new_cur_size < l->cur_size) {
Harald Welte189f43d2019-04-17 21:19:04 +02002984 talloc_abort("logic error in talloc_memlimit_grow\r\n");
Harald Welte5df0be62019-04-17 20:54:29 +02002985 return;
2986 }
2987 l->cur_size = new_cur_size;
2988 }
2989}
2990
2991/*
2992 Decrease memory limit accounting after a free/realloc.
2993*/
2994static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
2995 size_t size)
2996{
2997 struct talloc_memlimit *l;
2998
2999 for (l = limit; l != NULL; l = l->upper) {
3000 if (l->cur_size < size) {
Harald Welte189f43d2019-04-17 21:19:04 +02003001 talloc_abort("logic error in talloc_memlimit_shrink\r\n");
Harald Welte5df0be62019-04-17 20:54:29 +02003002 return;
3003 }
3004 l->cur_size = l->cur_size - size;
3005 }
3006}
3007
3008_PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
3009{
3010 struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
3011 struct talloc_memlimit *orig_limit;
3012 struct talloc_memlimit *limit = NULL;
3013
3014 if (tc->limit && tc->limit->parent == tc) {
3015 tc->limit->max_size = max_size;
3016 return 0;
3017 }
3018 orig_limit = tc->limit;
3019
3020 limit = malloc(sizeof(struct talloc_memlimit));
3021 if (limit == NULL) {
3022 return 1;
3023 }
3024 limit->parent = tc;
3025 limit->max_size = max_size;
3026 limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
3027
3028 if (orig_limit) {
3029 limit->upper = orig_limit;
3030 } else {
3031 limit->upper = NULL;
3032 }
3033
3034 return 0;
3035}