blob: 790f657b79b5ea749a1934e149031e75d738dce8 [file] [log] [blame]
Harald Welte5df0be62019-04-17 20:54:29 +02001/*
2 Samba Unix SMB/CIFS implementation.
3
4 Samba trivial allocation library - new interface
5
6 NOTE: Please read talloc_guide.txt for full documentation
7
8 Copyright (C) Andrew Tridgell 2004
9 Copyright (C) Stefan Metzmacher 2006
10
11 ** NOTE! The following LGPL license applies to the talloc
12 ** library. This does NOT imply that all of Samba is released
13 ** under the LGPL
14
15 This library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Lesser General Public
17 License as published by the Free Software Foundation; either
18 version 3 of the License, or (at your option) any later version.
19
20 This library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Lesser General Public License for more details.
24
25 You should have received a copy of the GNU Lesser General Public
26 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27*/
28
29/*
30 inspired by http://swapped.cc/halloc/
31*/
32
Eric Wildff5c3902019-10-17 20:21:44 +020033#include <parts.h>
34#include <assert.h>
35#include <osmocom/core/utils.h>
36
Harald Welte5df0be62019-04-17 20:54:29 +020037#include "replace.h"
38#include "talloc.h"
39
40#ifdef HAVE_SYS_AUXV_H
41#include <sys/auxv.h>
42#endif
43
44#if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR)
45#error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR"
46#endif
47
48#if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR)
49#error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR"
50#endif
51
52/* Special macros that are no-ops except when run under Valgrind on
53 * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
54#ifdef HAVE_VALGRIND_MEMCHECK_H
55 /* memcheck.h includes valgrind.h */
56#include <valgrind/memcheck.h>
57#elif defined(HAVE_VALGRIND_H)
58#include <valgrind.h>
59#endif
60
61/* use this to force every realloc to change the pointer, to stress test
62 code that might not cope */
63#define ALWAYS_REALLOC 0
64
65
66#define MAX_TALLOC_SIZE 0x10000000
67
68#define TALLOC_FLAG_FREE 0x01
69#define TALLOC_FLAG_LOOP 0x02
70#define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */
71#define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */
72
73/*
74 * Bits above this are random, used to make it harder to fake talloc
75 * headers during an attack. Try not to change this without good reason.
76 */
77#define TALLOC_FLAG_MASK 0x0F
78
79#define TALLOC_MAGIC_REFERENCE ((const char *)1)
80
81#define TALLOC_MAGIC_BASE 0xe814ec70
82#define TALLOC_MAGIC_NON_RANDOM ( \
83 ~TALLOC_FLAG_MASK & ( \
84 TALLOC_MAGIC_BASE + \
85 (TALLOC_BUILD_VERSION_MAJOR << 24) + \
86 (TALLOC_BUILD_VERSION_MINOR << 16) + \
87 (TALLOC_BUILD_VERSION_RELEASE << 8)))
88static unsigned int talloc_magic = TALLOC_MAGIC_NON_RANDOM;
89
90/* by default we abort when given a bad pointer (such as when talloc_free() is called
91 on a pointer that came from malloc() */
92#ifndef TALLOC_ABORT
93#define TALLOC_ABORT(reason) abort()
94#endif
95
96#ifndef discard_const_p
97#if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T)
98# define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr)))
99#else
100# define discard_const_p(type, ptr) ((type *)(ptr))
101#endif
102#endif
103
104/* these macros gain us a few percent of speed on gcc */
105#if (__GNUC__ >= 3)
106/* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
107 as its first argument */
108#ifndef likely
109#define likely(x) __builtin_expect(!!(x), 1)
110#endif
111#ifndef unlikely
112#define unlikely(x) __builtin_expect(!!(x), 0)
113#endif
114#else
115#ifndef likely
116#define likely(x) (x)
117#endif
118#ifndef unlikely
119#define unlikely(x) (x)
120#endif
121#endif
122
123/* this null_context is only used if talloc_enable_leak_report() or
124 talloc_enable_leak_report_full() is called, otherwise it remains
125 NULL
126*/
127static void *null_context;
128static bool talloc_report_null;
129static bool talloc_report_null_full;
130static void *autofree_context;
131
132static void talloc_setup_atexit(void);
133
134/* used to enable fill of memory on free, which can be useful for
135 * catching use after free errors when valgrind is too slow
136 */
137static struct {
138 bool initialised;
139 bool enabled;
140 uint8_t fill_value;
141} talloc_fill;
142
143#define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
144
145/*
146 * do not wipe the header, to allow the
147 * double-free logic to still work
148 */
149#define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
150 if (unlikely(talloc_fill.enabled)) { \
151 size_t _flen = (_tc)->size; \
152 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
153 memset(_fptr, talloc_fill.fill_value, _flen); \
154 } \
155} while (0)
156
157#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
158/* Mark the whole chunk as not accessable */
159#define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
160 size_t _flen = TC_HDR_SIZE + (_tc)->size; \
161 char *_fptr = (char *)(_tc); \
162 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
163} while(0)
164#else
165#define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
166#endif
167
168#define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
169 TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
170 TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
171} while (0)
172
173#define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
174 if (unlikely(talloc_fill.enabled)) { \
175 size_t _flen = (_tc)->size - (_new_size); \
176 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
177 _fptr += (_new_size); \
178 memset(_fptr, talloc_fill.fill_value, _flen); \
179 } \
180} while (0)
181
182#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
183/* Mark the unused bytes not accessable */
184#define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
185 size_t _flen = (_tc)->size - (_new_size); \
186 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
187 _fptr += (_new_size); \
188 VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
189} while (0)
190#else
191#define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
192#endif
193
194#define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
195 TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
196 TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
197} while (0)
198
199#define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
200 if (unlikely(talloc_fill.enabled)) { \
201 size_t _flen = (_tc)->size - (_new_size); \
202 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
203 _fptr += (_new_size); \
204 memset(_fptr, talloc_fill.fill_value, _flen); \
205 } \
206} while (0)
207
208#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
209/* Mark the unused bytes as undefined */
210#define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
211 size_t _flen = (_tc)->size - (_new_size); \
212 char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \
213 _fptr += (_new_size); \
214 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
215} while (0)
216#else
217#define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
218#endif
219
220#define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \
221 TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \
222 TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
223} while (0)
224
225#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
226/* Mark the new bytes as undefined */
227#define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
228 size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
229 size_t _new_used = TC_HDR_SIZE + (_new_size); \
230 size_t _flen = _new_used - _old_used; \
231 char *_fptr = _old_used + (char *)(_tc); \
232 VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
233} while (0)
234#else
235#define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
236#endif
237
238#define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
239 TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
240} while (0)
241
242struct talloc_reference_handle {
243 struct talloc_reference_handle *next, *prev;
244 void *ptr;
245 const char *location;
246};
247
248struct talloc_memlimit {
249 struct talloc_chunk *parent;
250 struct talloc_memlimit *upper;
251 size_t max_size;
252 size_t cur_size;
253};
254
255static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size);
256static inline void talloc_memlimit_grow(struct talloc_memlimit *limit,
257 size_t size);
258static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit,
259 size_t size);
260static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc);
261
262static inline void _tc_set_name_const(struct talloc_chunk *tc,
263 const char *name);
264static struct talloc_chunk *_vasprintf_tc(const void *t,
265 const char *fmt,
266 va_list ap);
267
268typedef int (*talloc_destructor_t)(void *);
269
270struct talloc_pool_hdr;
271
272struct talloc_chunk {
273 /*
274 * flags includes the talloc magic, which is randomised to
275 * make overwrite attacks harder
276 */
277 unsigned flags;
278
279 /*
280 * If you have a logical tree like:
281 *
282 * <parent>
283 * / | \
284 * / | \
285 * / | \
286 * <child 1> <child 2> <child 3>
287 *
288 * The actual talloc tree is:
289 *
290 * <parent>
291 * |
292 * <child 1> - <child 2> - <child 3>
293 *
294 * The children are linked with next/prev pointers, and
295 * child 1 is linked to the parent with parent/child
296 * pointers.
297 */
298
299 struct talloc_chunk *next, *prev;
300 struct talloc_chunk *parent, *child;
301 struct talloc_reference_handle *refs;
302 talloc_destructor_t destructor;
303 const char *name;
304 size_t size;
305
306 /*
307 * limit semantics:
308 * if 'limit' is set it means all *new* children of the context will
309 * be limited to a total aggregate size ox max_size for memory
310 * allocations.
311 * cur_size is used to keep track of the current use
312 */
313 struct talloc_memlimit *limit;
314
315 /*
316 * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool"
317 * is a pointer to the struct talloc_chunk of the pool that it was
318 * allocated from. This way children can quickly find the pool to chew
319 * from.
320 */
321 struct talloc_pool_hdr *pool;
322};
323
324/* 16 byte alignment seems to keep everyone happy */
325#define TC_ALIGN16(s) (((s)+15)&~15)
326#define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
327#define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
328
329_PUBLIC_ int talloc_version_major(void)
330{
331 return TALLOC_VERSION_MAJOR;
332}
333
334_PUBLIC_ int talloc_version_minor(void)
335{
336 return TALLOC_VERSION_MINOR;
337}
338
339_PUBLIC_ int talloc_test_get_magic(void)
340{
341 return talloc_magic;
342}
343
344static inline void _talloc_chunk_set_free(struct talloc_chunk *tc,
345 const char *location)
346{
347 /*
348 * Mark this memory as free, and also over-stamp the talloc
349 * magic with the old-style magic.
350 *
351 * Why? This tries to avoid a memory read use-after-free from
352 * disclosing our talloc magic, which would then allow an
353 * attacker to prepare a valid header and so run a destructor.
354 *
355 */
356 tc->flags = TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE
357 | (tc->flags & TALLOC_FLAG_MASK);
358
359 /* we mark the freed memory with where we called the free
360 * from. This means on a double free error we can report where
361 * the first free came from
362 */
363 if (location) {
364 tc->name = location;
365 }
366}
367
368static inline void _talloc_chunk_set_not_free(struct talloc_chunk *tc)
369{
370 /*
371 * Mark this memory as not free.
372 *
373 * Why? This is memory either in a pool (and so available for
374 * talloc's re-use or after the realloc(). We need to mark
375 * the memory as free() before any realloc() call as we can't
376 * write to the memory after that.
377 *
378 * We put back the normal magic instead of the 'not random'
379 * magic.
380 */
381
382 tc->flags = talloc_magic |
383 ((tc->flags & TALLOC_FLAG_MASK) & ~TALLOC_FLAG_FREE);
384}
385
386static void (*talloc_log_fn)(const char *message);
387
388_PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message))
389{
390 talloc_log_fn = log_fn;
391}
392
393#ifdef HAVE_CONSTRUCTOR_ATTRIBUTE
394void talloc_lib_init(void) __attribute__((constructor));
395void talloc_lib_init(void)
396{
397 uint32_t random_value;
398#if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM)
399 uint8_t *p;
400 /*
401 * Use the kernel-provided random values used for
402 * ASLR. This won't change per-exec, which is ideal for us
403 */
404 p = (uint8_t *) getauxval(AT_RANDOM);
405 if (p) {
406 /*
407 * We get 16 bytes from getauxval. By calling rand(),
408 * a totally insecure PRNG, but one that will
409 * deterministically have a different value when called
410 * twice, we ensure that if two talloc-like libraries
411 * are somehow loaded in the same address space, that
412 * because we choose different bytes, we will keep the
413 * protection against collision of multiple talloc
414 * libs.
415 *
416 * This protection is important because the effects of
417 * passing a talloc pointer from one to the other may
418 * be very hard to determine.
419 */
420 int offset = rand() % (16 - sizeof(random_value));
421 memcpy(&random_value, p + offset, sizeof(random_value));
422 } else
423#endif
424 {
425 /*
426 * Otherwise, hope the location we are loaded in
427 * memory is randomised by someone else
428 */
429 random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF);
430 }
431 talloc_magic = random_value & ~TALLOC_FLAG_MASK;
432}
433#else
434#warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available"
435#endif
436
437static void talloc_lib_atexit(void)
438{
439 TALLOC_FREE(autofree_context);
440
441 if (talloc_total_size(null_context) == 0) {
442 return;
443 }
444
445 if (talloc_report_null_full) {
446 talloc_report_full(null_context, stderr);
447 } else if (talloc_report_null) {
448 talloc_report(null_context, stderr);
449 }
450}
451
452static void talloc_setup_atexit(void)
453{
454 static bool done;
455
456 if (done) {
457 return;
458 }
459
460 atexit(talloc_lib_atexit);
461 done = true;
462}
463
464static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2);
465static void talloc_log(const char *fmt, ...)
466{
467 va_list ap;
468 char *message;
469
470 if (!talloc_log_fn) {
471 return;
472 }
473
474 va_start(ap, fmt);
475 message = talloc_vasprintf(NULL, fmt, ap);
476 va_end(ap);
477
478 talloc_log_fn(message);
479 talloc_free(message);
480}
481
482static void talloc_log_stderr(const char *message)
483{
484 fprintf(stderr, "%s", message);
485}
486
487_PUBLIC_ void talloc_set_log_stderr(void)
488{
489 talloc_set_log_fn(talloc_log_stderr);
490}
491
492static void (*talloc_abort_fn)(const char *reason);
493
494_PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason))
495{
496 talloc_abort_fn = abort_fn;
497}
498
499static void talloc_abort(const char *reason)
500{
Harald Welte189f43d2019-04-17 21:19:04 +0200501 talloc_log("%s\r\n", reason);
Harald Welte5df0be62019-04-17 20:54:29 +0200502
503 if (!talloc_abort_fn) {
504 TALLOC_ABORT(reason);
505 }
506
507 talloc_abort_fn(reason);
508}
509
510static void talloc_abort_access_after_free(void)
511{
512 talloc_abort("Bad talloc magic value - access after free");
513}
514
515static void talloc_abort_unknown_value(void)
516{
517 talloc_abort("Bad talloc magic value - unknown value");
518}
519
520/* panic if we get a bad magic value */
521static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr)
522{
523 const char *pp = (const char *)ptr;
524 struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE);
525 if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) {
526 if ((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK))
527 == (TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE)) {
Harald Welte189f43d2019-04-17 21:19:04 +0200528 talloc_log("talloc: access after free error - first free may be at %s\r\n", tc->name);
Harald Welte5df0be62019-04-17 20:54:29 +0200529 talloc_abort_access_after_free();
530 return NULL;
531 }
532
533 talloc_abort_unknown_value();
534 return NULL;
535 }
536 return tc;
537}
538
539/* hook into the front of the list */
540#define _TLIST_ADD(list, p) \
541do { \
542 if (!(list)) { \
543 (list) = (p); \
544 (p)->next = (p)->prev = NULL; \
545 } else { \
546 (list)->prev = (p); \
547 (p)->next = (list); \
548 (p)->prev = NULL; \
549 (list) = (p); \
550 }\
551} while (0)
552
553/* remove an element from a list - element doesn't have to be in list. */
554#define _TLIST_REMOVE(list, p) \
555do { \
556 if ((p) == (list)) { \
557 (list) = (p)->next; \
558 if (list) (list)->prev = NULL; \
559 } else { \
560 if ((p)->prev) (p)->prev->next = (p)->next; \
561 if ((p)->next) (p)->next->prev = (p)->prev; \
562 } \
563 if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \
564} while (0)
565
566
567/*
568 return the parent chunk of a pointer
569*/
570static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr)
571{
572 struct talloc_chunk *tc;
573
574 if (unlikely(ptr == NULL)) {
575 return NULL;
576 }
577
578 tc = talloc_chunk_from_ptr(ptr);
579 while (tc->prev) tc=tc->prev;
580
581 return tc->parent;
582}
583
584_PUBLIC_ void *talloc_parent(const void *ptr)
585{
586 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
587 return tc? TC_PTR_FROM_CHUNK(tc) : NULL;
588}
589
590/*
591 find parents name
592*/
593_PUBLIC_ const char *talloc_parent_name(const void *ptr)
594{
595 struct talloc_chunk *tc = talloc_parent_chunk(ptr);
596 return tc? tc->name : NULL;
597}
598
599/*
600 A pool carries an in-pool object count count in the first 16 bytes.
601 bytes. This is done to support talloc_steal() to a parent outside of the
602 pool. The count includes the pool itself, so a talloc_free() on a pool will
603 only destroy the pool if the count has dropped to zero. A talloc_free() of a
604 pool member will reduce the count, and eventually also call free(3) on the
605 pool memory.
606
607 The object count is not put into "struct talloc_chunk" because it is only
608 relevant for talloc pools and the alignment to 16 bytes would increase the
609 memory footprint of each talloc chunk by those 16 bytes.
610*/
611
612struct talloc_pool_hdr {
613 void *end;
614 unsigned int object_count;
615 size_t poolsize;
616};
617
618#define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
619
620static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
621{
622 return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE);
623}
624
625static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
626{
627 return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE);
628}
629
630static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
631{
632 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
633 return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize;
634}
635
636static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
637{
638 return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
639}
640
641/* If tc is inside a pool, this gives the next neighbour. */
642static inline void *tc_next_chunk(struct talloc_chunk *tc)
643{
644 return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size);
645}
646
647static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
648{
649 struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
650 return tc_next_chunk(tc);
651}
652
653/* Mark the whole remaining pool as not accessable */
654static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
655{
656 size_t flen = tc_pool_space_left(pool_hdr);
657
658 if (unlikely(talloc_fill.enabled)) {
659 memset(pool_hdr->end, talloc_fill.fill_value, flen);
660 }
661
662#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
663 VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
664#endif
665}
666
667/*
668 Allocate from a pool
669*/
670
671static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent,
672 size_t size, size_t prefix_len)
673{
674 struct talloc_pool_hdr *pool_hdr = NULL;
675 size_t space_left;
676 struct talloc_chunk *result;
677 size_t chunk_size;
678
679 if (parent == NULL) {
680 return NULL;
681 }
682
683 if (parent->flags & TALLOC_FLAG_POOL) {
684 pool_hdr = talloc_pool_from_chunk(parent);
685 }
686 else if (parent->flags & TALLOC_FLAG_POOLMEM) {
687 pool_hdr = parent->pool;
688 }
689
690 if (pool_hdr == NULL) {
691 return NULL;
692 }
693
694 space_left = tc_pool_space_left(pool_hdr);
695
696 /*
697 * Align size to 16 bytes
698 */
699 chunk_size = TC_ALIGN16(size + prefix_len);
700
701 if (space_left < chunk_size) {
702 return NULL;
703 }
704
705 result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len);
706
707#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
708 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
709#endif
710
711 pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
712
713 result->flags = talloc_magic | TALLOC_FLAG_POOLMEM;
714 result->pool = pool_hdr;
715
716 pool_hdr->object_count++;
717
718 return result;
719}
720
721/*
722 Allocate a bit of memory as a child of an existing pointer
723*/
724static inline void *__talloc_with_prefix(const void *context,
725 size_t size,
726 size_t prefix_len,
727 struct talloc_chunk **tc_ret)
728{
729 struct talloc_chunk *tc = NULL;
730 struct talloc_memlimit *limit = NULL;
731 size_t total_len = TC_HDR_SIZE + size + prefix_len;
732 struct talloc_chunk *parent = NULL;
733
Eric Wildff5c3902019-10-17 20:21:44 +0200734 // do not allocate while handling interrupts!
735 OSMO_ASSERT( !(SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) )
736
Harald Welte5df0be62019-04-17 20:54:29 +0200737 if (unlikely(context == NULL)) {
738 context = null_context;
739 }
740
741 if (unlikely(size >= MAX_TALLOC_SIZE)) {
742 return NULL;
743 }
744
745 if (unlikely(total_len < TC_HDR_SIZE)) {
746 return NULL;
747 }
748
749 if (likely(context != NULL)) {
750 parent = talloc_chunk_from_ptr(context);
751
752 if (parent->limit != NULL) {
753 limit = parent->limit;
754 }
755
756 tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len);
757 }
758
759 if (tc == NULL) {
760 char *ptr;
761
762 /*
763 * Only do the memlimit check/update on actual allocation.
764 */
765 if (!talloc_memlimit_check(limit, total_len)) {
766 errno = ENOMEM;
767 return NULL;
768 }
769
770 ptr = malloc(total_len);
771 if (unlikely(ptr == NULL)) {
772 return NULL;
773 }
774 tc = (struct talloc_chunk *)(ptr + prefix_len);
775 tc->flags = talloc_magic;
776 tc->pool = NULL;
777
778 talloc_memlimit_grow(limit, total_len);
779 }
780
781 tc->limit = limit;
782 tc->size = size;
783 tc->destructor = NULL;
784 tc->child = NULL;
785 tc->name = NULL;
786 tc->refs = NULL;
787
788 if (likely(context != NULL)) {
789 if (parent->child) {
790 parent->child->parent = NULL;
791 tc->next = parent->child;
792 tc->next->prev = tc;
793 } else {
794 tc->next = NULL;
795 }
796 tc->parent = parent;
797 tc->prev = NULL;
798 parent->child = tc;
799 } else {
800 tc->next = tc->prev = tc->parent = NULL;
801 }
802
803 *tc_ret = tc;
804 return TC_PTR_FROM_CHUNK(tc);
805}
806
807static inline void *__talloc(const void *context,
808 size_t size,
809 struct talloc_chunk **tc)
810{
811 return __talloc_with_prefix(context, size, 0, tc);
812}
813
814/*
815 * Create a talloc pool
816 */
817
818static inline void *_talloc_pool(const void *context, size_t size)
819{
820 struct talloc_chunk *tc;
821 struct talloc_pool_hdr *pool_hdr;
822 void *result;
823
824 result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc);
825
826 if (unlikely(result == NULL)) {
827 return NULL;
828 }
829
830 pool_hdr = talloc_pool_from_chunk(tc);
831
832 tc->flags |= TALLOC_FLAG_POOL;
833 tc->size = 0;
834
835 pool_hdr->object_count = 1;
836 pool_hdr->end = result;
837 pool_hdr->poolsize = size;
838
839 tc_invalidate_pool(pool_hdr);
840
841 return result;
842}
843
844_PUBLIC_ void *talloc_pool(const void *context, size_t size)
845{
846 return _talloc_pool(context, size);
847}
848
849/*
850 * Create a talloc pool correctly sized for a basic size plus
851 * a number of subobjects whose total size is given. Essentially
852 * a custom allocator for talloc to reduce fragmentation.
853 */
854
855_PUBLIC_ void *_talloc_pooled_object(const void *ctx,
856 size_t type_size,
857 const char *type_name,
858 unsigned num_subobjects,
859 size_t total_subobjects_size)
860{
861 size_t poolsize, subobjects_slack, tmp;
862 struct talloc_chunk *tc;
863 struct talloc_pool_hdr *pool_hdr;
864 void *ret;
865
866 poolsize = type_size + total_subobjects_size;
867
868 if ((poolsize < type_size) || (poolsize < total_subobjects_size)) {
869 goto overflow;
870 }
871
872 if (num_subobjects == UINT_MAX) {
873 goto overflow;
874 }
875 num_subobjects += 1; /* the object body itself */
876
877 /*
878 * Alignment can increase the pool size by at most 15 bytes per object
879 * plus alignment for the object itself
880 */
881 subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects;
882 if (subobjects_slack < num_subobjects) {
883 goto overflow;
884 }
885
886 tmp = poolsize + subobjects_slack;
887 if ((tmp < poolsize) || (tmp < subobjects_slack)) {
888 goto overflow;
889 }
890 poolsize = tmp;
891
892 ret = _talloc_pool(ctx, poolsize);
893 if (ret == NULL) {
894 return NULL;
895 }
896
897 tc = talloc_chunk_from_ptr(ret);
898 tc->size = type_size;
899
900 pool_hdr = talloc_pool_from_chunk(tc);
901
902#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
903 VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size);
904#endif
905
906 pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size));
907
908 _tc_set_name_const(tc, type_name);
909 return ret;
910
911overflow:
912 return NULL;
913}
914
915/*
916 setup a destructor to be called on free of a pointer
917 the destructor should return 0 on success, or -1 on failure.
918 if the destructor fails then the free is failed, and the memory can
919 be continued to be used
920*/
921_PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *))
922{
923 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
924 tc->destructor = destructor;
925}
926
927/*
928 increase the reference count on a piece of memory.
929*/
930_PUBLIC_ int talloc_increase_ref_count(const void *ptr)
931{
932 if (unlikely(!talloc_reference(null_context, ptr))) {
933 return -1;
934 }
935 return 0;
936}
937
938/*
939 helper for talloc_reference()
940
941 this is referenced by a function pointer and should not be inline
942*/
943static int talloc_reference_destructor(struct talloc_reference_handle *handle)
944{
945 struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr);
946 _TLIST_REMOVE(ptr_tc->refs, handle);
947 return 0;
948}
949
950/*
951 more efficient way to add a name to a pointer - the name must point to a
952 true string constant
953*/
954static inline void _tc_set_name_const(struct talloc_chunk *tc,
955 const char *name)
956{
957 tc->name = name;
958}
959
960/*
961 internal talloc_named_const()
962*/
963static inline void *_talloc_named_const(const void *context, size_t size, const char *name)
964{
965 void *ptr;
966 struct talloc_chunk *tc;
967
968 ptr = __talloc(context, size, &tc);
969 if (unlikely(ptr == NULL)) {
970 return NULL;
971 }
972
973 _tc_set_name_const(tc, name);
974
975 return ptr;
976}
977
978/*
979 make a secondary reference to a pointer, hanging off the given context.
980 the pointer remains valid until both the original caller and this given
981 context are freed.
982
983 the major use for this is when two different structures need to reference the
984 same underlying data, and you want to be able to free the two instances separately,
985 and in either order
986*/
987_PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location)
988{
989 struct talloc_chunk *tc;
990 struct talloc_reference_handle *handle;
991 if (unlikely(ptr == NULL)) return NULL;
992
993 tc = talloc_chunk_from_ptr(ptr);
994 handle = (struct talloc_reference_handle *)_talloc_named_const(context,
995 sizeof(struct talloc_reference_handle),
996 TALLOC_MAGIC_REFERENCE);
997 if (unlikely(handle == NULL)) return NULL;
998
999 /* note that we hang the destructor off the handle, not the
1000 main context as that allows the caller to still setup their
1001 own destructor on the context if they want to */
1002 talloc_set_destructor(handle, talloc_reference_destructor);
1003 handle->ptr = discard_const_p(void, ptr);
1004 handle->location = location;
1005 _TLIST_ADD(tc->refs, handle);
1006 return handle->ptr;
1007}
1008
1009static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
1010
1011static inline void _tc_free_poolmem(struct talloc_chunk *tc,
1012 const char *location)
1013{
1014 struct talloc_pool_hdr *pool;
1015 struct talloc_chunk *pool_tc;
1016 void *next_tc;
1017
1018 pool = tc->pool;
1019 pool_tc = talloc_chunk_from_pool(pool);
1020 next_tc = tc_next_chunk(tc);
1021
1022 _talloc_chunk_set_free(tc, location);
1023
1024 TC_INVALIDATE_FULL_CHUNK(tc);
1025
1026 if (unlikely(pool->object_count == 0)) {
1027 talloc_abort("Pool object count zero!");
1028 return;
1029 }
1030
1031 pool->object_count--;
1032
1033 if (unlikely(pool->object_count == 1
1034 && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
1035 /*
1036 * if there is just one object left in the pool
1037 * and pool->flags does not have TALLOC_FLAG_FREE,
1038 * it means this is the pool itself and
1039 * the rest is available for new objects
1040 * again.
1041 */
1042 pool->end = tc_pool_first_chunk(pool);
1043 tc_invalidate_pool(pool);
1044 return;
1045 }
1046
1047 if (unlikely(pool->object_count == 0)) {
1048 /*
1049 * we mark the freed memory with where we called the free
1050 * from. This means on a double free error we can report where
1051 * the first free came from
1052 */
1053 pool_tc->name = location;
1054
1055 if (pool_tc->flags & TALLOC_FLAG_POOLMEM) {
1056 _tc_free_poolmem(pool_tc, location);
1057 } else {
1058 /*
1059 * The tc_memlimit_update_on_free()
1060 * call takes into account the
1061 * prefix TP_HDR_SIZE allocated before
1062 * the pool talloc_chunk.
1063 */
1064 tc_memlimit_update_on_free(pool_tc);
1065 TC_INVALIDATE_FULL_CHUNK(pool_tc);
1066 free(pool);
1067 }
1068 return;
1069 }
1070
1071 if (pool->end == next_tc) {
1072 /*
1073 * if pool->pool still points to end of
1074 * 'tc' (which is stored in the 'next_tc' variable),
1075 * we can reclaim the memory of 'tc'.
1076 */
1077 pool->end = tc;
1078 return;
1079 }
1080
1081 /*
1082 * Do nothing. The memory is just "wasted", waiting for the pool
1083 * itself to be freed.
1084 */
1085}
1086
1087static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1088 void *ptr,
1089 const char *location);
1090
1091static inline int _talloc_free_internal(void *ptr, const char *location);
1092
1093/*
1094 internal free call that takes a struct talloc_chunk *.
1095*/
1096static inline int _tc_free_internal(struct talloc_chunk *tc,
1097 const char *location)
1098{
1099 void *ptr_to_free;
1100 void *ptr = TC_PTR_FROM_CHUNK(tc);
1101
1102 if (unlikely(tc->refs)) {
1103 int is_child;
1104 /* check if this is a reference from a child or
1105 * grandchild back to it's parent or grandparent
1106 *
1107 * in that case we need to remove the reference and
1108 * call another instance of talloc_free() on the current
1109 * pointer.
1110 */
1111 is_child = talloc_is_parent(tc->refs, ptr);
1112 _talloc_free_internal(tc->refs, location);
1113 if (is_child) {
1114 return _talloc_free_internal(ptr, location);
1115 }
1116 return -1;
1117 }
1118
1119 if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) {
1120 /* we have a free loop - stop looping */
1121 return 0;
1122 }
1123
1124 if (unlikely(tc->destructor)) {
1125 talloc_destructor_t d = tc->destructor;
1126
1127 /*
1128 * Protect the destructor against some overwrite
1129 * attacks, by explicitly checking it has the right
1130 * magic here.
1131 */
1132 if (talloc_chunk_from_ptr(ptr) != tc) {
1133 /*
1134 * This can't actually happen, the
1135 * call itself will panic.
1136 */
1137 TALLOC_ABORT("talloc_chunk_from_ptr failed!");
1138 }
1139
1140 if (d == (talloc_destructor_t)-1) {
1141 return -1;
1142 }
1143 tc->destructor = (talloc_destructor_t)-1;
1144 if (d(ptr) == -1) {
1145 /*
1146 * Only replace the destructor pointer if
1147 * calling the destructor didn't modify it.
1148 */
1149 if (tc->destructor == (talloc_destructor_t)-1) {
1150 tc->destructor = d;
1151 }
1152 return -1;
1153 }
1154 tc->destructor = NULL;
1155 }
1156
1157 if (tc->parent) {
1158 _TLIST_REMOVE(tc->parent->child, tc);
1159 if (tc->parent->child) {
1160 tc->parent->child->parent = tc->parent;
1161 }
1162 } else {
1163 if (tc->prev) tc->prev->next = tc->next;
1164 if (tc->next) tc->next->prev = tc->prev;
1165 tc->prev = tc->next = NULL;
1166 }
1167
1168 tc->flags |= TALLOC_FLAG_LOOP;
1169
1170 _tc_free_children_internal(tc, ptr, location);
1171
1172 _talloc_chunk_set_free(tc, location);
1173
1174 if (tc->flags & TALLOC_FLAG_POOL) {
1175 struct talloc_pool_hdr *pool;
1176
1177 pool = talloc_pool_from_chunk(tc);
1178
1179 if (unlikely(pool->object_count == 0)) {
1180 talloc_abort("Pool object count zero!");
1181 return 0;
1182 }
1183
1184 pool->object_count--;
1185
1186 if (likely(pool->object_count != 0)) {
1187 return 0;
1188 }
1189
1190 /*
1191 * With object_count==0, a pool becomes a normal piece of
1192 * memory to free. If it's allocated inside a pool, it needs
1193 * to be freed as poolmem, else it needs to be just freed.
1194 */
1195 ptr_to_free = pool;
1196 } else {
1197 ptr_to_free = tc;
1198 }
1199
1200 if (tc->flags & TALLOC_FLAG_POOLMEM) {
1201 _tc_free_poolmem(tc, location);
1202 return 0;
1203 }
1204
1205 tc_memlimit_update_on_free(tc);
1206
1207 TC_INVALIDATE_FULL_CHUNK(tc);
1208 free(ptr_to_free);
1209 return 0;
1210}
1211
1212/*
1213 internal talloc_free call
1214*/
1215static inline int _talloc_free_internal(void *ptr, const char *location)
1216{
1217 struct talloc_chunk *tc;
1218
1219 if (unlikely(ptr == NULL)) {
1220 return -1;
1221 }
1222
1223 /* possibly initialised the talloc fill value */
1224 if (unlikely(!talloc_fill.initialised)) {
1225 const char *fill = getenv(TALLOC_FILL_ENV);
1226 if (fill != NULL) {
1227 talloc_fill.enabled = true;
1228 talloc_fill.fill_value = strtoul(fill, NULL, 0);
1229 }
1230 talloc_fill.initialised = true;
1231 }
1232
1233 tc = talloc_chunk_from_ptr(ptr);
1234 return _tc_free_internal(tc, location);
1235}
1236
1237static inline size_t _talloc_total_limit_size(const void *ptr,
1238 struct talloc_memlimit *old_limit,
1239 struct talloc_memlimit *new_limit);
1240
1241/*
1242 move a lump of memory from one talloc context to another return the
1243 ptr on success, or NULL if it could not be transferred.
1244 passing NULL as ptr will always return NULL with no side effects.
1245*/
1246static void *_talloc_steal_internal(const void *new_ctx, const void *ptr)
1247{
1248 struct talloc_chunk *tc, *new_tc;
1249 size_t ctx_size = 0;
1250
1251 if (unlikely(!ptr)) {
1252 return NULL;
1253 }
1254
1255 if (unlikely(new_ctx == NULL)) {
1256 new_ctx = null_context;
1257 }
1258
1259 tc = talloc_chunk_from_ptr(ptr);
1260
1261 if (tc->limit != NULL) {
1262
1263 ctx_size = _talloc_total_limit_size(ptr, NULL, NULL);
1264
1265 /* Decrement the memory limit from the source .. */
1266 talloc_memlimit_shrink(tc->limit->upper, ctx_size);
1267
1268 if (tc->limit->parent == tc) {
1269 tc->limit->upper = NULL;
1270 } else {
1271 tc->limit = NULL;
1272 }
1273 }
1274
1275 if (unlikely(new_ctx == NULL)) {
1276 if (tc->parent) {
1277 _TLIST_REMOVE(tc->parent->child, tc);
1278 if (tc->parent->child) {
1279 tc->parent->child->parent = tc->parent;
1280 }
1281 } else {
1282 if (tc->prev) tc->prev->next = tc->next;
1283 if (tc->next) tc->next->prev = tc->prev;
1284 }
1285
1286 tc->parent = tc->next = tc->prev = NULL;
1287 return discard_const_p(void, ptr);
1288 }
1289
1290 new_tc = talloc_chunk_from_ptr(new_ctx);
1291
1292 if (unlikely(tc == new_tc || tc->parent == new_tc)) {
1293 return discard_const_p(void, ptr);
1294 }
1295
1296 if (tc->parent) {
1297 _TLIST_REMOVE(tc->parent->child, tc);
1298 if (tc->parent->child) {
1299 tc->parent->child->parent = tc->parent;
1300 }
1301 } else {
1302 if (tc->prev) tc->prev->next = tc->next;
1303 if (tc->next) tc->next->prev = tc->prev;
1304 tc->prev = tc->next = NULL;
1305 }
1306
1307 tc->parent = new_tc;
1308 if (new_tc->child) new_tc->child->parent = NULL;
1309 _TLIST_ADD(new_tc->child, tc);
1310
1311 if (tc->limit || new_tc->limit) {
1312 ctx_size = _talloc_total_limit_size(ptr, tc->limit,
1313 new_tc->limit);
1314 /* .. and increment it in the destination. */
1315 if (new_tc->limit) {
1316 talloc_memlimit_grow(new_tc->limit, ctx_size);
1317 }
1318 }
1319
1320 return discard_const_p(void, ptr);
1321}
1322
1323/*
1324 move a lump of memory from one talloc context to another return the
1325 ptr on success, or NULL if it could not be transferred.
1326 passing NULL as ptr will always return NULL with no side effects.
1327*/
1328_PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location)
1329{
1330 struct talloc_chunk *tc;
1331
1332 if (unlikely(ptr == NULL)) {
1333 return NULL;
1334 }
1335
1336 tc = talloc_chunk_from_ptr(ptr);
1337
1338 if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) {
1339 struct talloc_reference_handle *h;
1340
Harald Welte189f43d2019-04-17 21:19:04 +02001341 talloc_log("WARNING: talloc_steal with references at %s\r\n",
Harald Welte5df0be62019-04-17 20:54:29 +02001342 location);
1343
1344 for (h=tc->refs; h; h=h->next) {
Harald Welte189f43d2019-04-17 21:19:04 +02001345 talloc_log("\treference at %s\r\n",
Harald Welte5df0be62019-04-17 20:54:29 +02001346 h->location);
1347 }
1348 }
1349
1350#if 0
1351 /* this test is probably too expensive to have on in the
1352 normal build, but it useful for debugging */
1353 if (talloc_is_parent(new_ctx, ptr)) {
Harald Welte189f43d2019-04-17 21:19:04 +02001354 talloc_log("WARNING: stealing into talloc child at %s\r\n", location);
Harald Welte5df0be62019-04-17 20:54:29 +02001355 }
1356#endif
1357
1358 return _talloc_steal_internal(new_ctx, ptr);
1359}
1360
1361/*
1362 this is like a talloc_steal(), but you must supply the old
1363 parent. This resolves the ambiguity in a talloc_steal() which is
1364 called on a context that has more than one parent (via references)
1365
1366 The old parent can be either a reference or a parent
1367*/
1368_PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr)
1369{
1370 struct talloc_chunk *tc;
1371 struct talloc_reference_handle *h;
1372
1373 if (unlikely(ptr == NULL)) {
1374 return NULL;
1375 }
1376
1377 if (old_parent == talloc_parent(ptr)) {
1378 return _talloc_steal_internal(new_parent, ptr);
1379 }
1380
1381 tc = talloc_chunk_from_ptr(ptr);
1382 for (h=tc->refs;h;h=h->next) {
1383 if (talloc_parent(h) == old_parent) {
1384 if (_talloc_steal_internal(new_parent, h) != h) {
1385 return NULL;
1386 }
1387 return discard_const_p(void, ptr);
1388 }
1389 }
1390
1391 /* it wasn't a parent */
1392 return NULL;
1393}
1394
1395/*
1396 remove a secondary reference to a pointer. This undo's what
1397 talloc_reference() has done. The context and pointer arguments
1398 must match those given to a talloc_reference()
1399*/
1400static inline int talloc_unreference(const void *context, const void *ptr)
1401{
1402 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1403 struct talloc_reference_handle *h;
1404
1405 if (unlikely(context == NULL)) {
1406 context = null_context;
1407 }
1408
1409 for (h=tc->refs;h;h=h->next) {
1410 struct talloc_chunk *p = talloc_parent_chunk(h);
1411 if (p == NULL) {
1412 if (context == NULL) break;
1413 } else if (TC_PTR_FROM_CHUNK(p) == context) {
1414 break;
1415 }
1416 }
1417 if (h == NULL) {
1418 return -1;
1419 }
1420
1421 return _talloc_free_internal(h, __location__);
1422}
1423
1424/*
1425 remove a specific parent context from a pointer. This is a more
1426 controlled variant of talloc_free()
1427*/
1428_PUBLIC_ int talloc_unlink(const void *context, void *ptr)
1429{
1430 struct talloc_chunk *tc_p, *new_p, *tc_c;
1431 void *new_parent;
1432
1433 if (ptr == NULL) {
1434 return -1;
1435 }
1436
1437 if (context == NULL) {
1438 context = null_context;
1439 }
1440
1441 if (talloc_unreference(context, ptr) == 0) {
1442 return 0;
1443 }
1444
1445 if (context != NULL) {
1446 tc_c = talloc_chunk_from_ptr(context);
1447 } else {
1448 tc_c = NULL;
1449 }
1450 if (tc_c != talloc_parent_chunk(ptr)) {
1451 return -1;
1452 }
1453
1454 tc_p = talloc_chunk_from_ptr(ptr);
1455
1456 if (tc_p->refs == NULL) {
1457 return _talloc_free_internal(ptr, __location__);
1458 }
1459
1460 new_p = talloc_parent_chunk(tc_p->refs);
1461 if (new_p) {
1462 new_parent = TC_PTR_FROM_CHUNK(new_p);
1463 } else {
1464 new_parent = NULL;
1465 }
1466
1467 if (talloc_unreference(new_parent, ptr) != 0) {
1468 return -1;
1469 }
1470
1471 _talloc_steal_internal(new_parent, ptr);
1472
1473 return 0;
1474}
1475
1476/*
1477 add a name to an existing pointer - va_list version
1478*/
1479static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1480 const char *fmt,
1481 va_list ap) PRINTF_ATTRIBUTE(2,0);
1482
1483static inline const char *tc_set_name_v(struct talloc_chunk *tc,
1484 const char *fmt,
1485 va_list ap)
1486{
1487 struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc),
1488 fmt,
1489 ap);
1490 if (likely(name_tc)) {
1491 tc->name = TC_PTR_FROM_CHUNK(name_tc);
1492 _tc_set_name_const(name_tc, ".name");
1493 } else {
1494 tc->name = NULL;
1495 }
1496 return tc->name;
1497}
1498
1499/*
1500 add a name to an existing pointer
1501*/
1502_PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...)
1503{
1504 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1505 const char *name;
1506 va_list ap;
1507 va_start(ap, fmt);
1508 name = tc_set_name_v(tc, fmt, ap);
1509 va_end(ap);
1510 return name;
1511}
1512
1513
1514/*
1515 create a named talloc pointer. Any talloc pointer can be named, and
1516 talloc_named() operates just like talloc() except that it allows you
1517 to name the pointer.
1518*/
1519_PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...)
1520{
1521 va_list ap;
1522 void *ptr;
1523 const char *name;
1524 struct talloc_chunk *tc;
1525
1526 ptr = __talloc(context, size, &tc);
1527 if (unlikely(ptr == NULL)) return NULL;
1528
1529 va_start(ap, fmt);
1530 name = tc_set_name_v(tc, fmt, ap);
1531 va_end(ap);
1532
1533 if (unlikely(name == NULL)) {
1534 _talloc_free_internal(ptr, __location__);
1535 return NULL;
1536 }
1537
1538 return ptr;
1539}
1540
1541/*
1542 return the name of a talloc ptr, or "UNNAMED"
1543*/
1544static inline const char *__talloc_get_name(const void *ptr)
1545{
1546 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
1547 if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) {
1548 return ".reference";
1549 }
1550 if (likely(tc->name)) {
1551 return tc->name;
1552 }
1553 return "UNNAMED";
1554}
1555
1556_PUBLIC_ const char *talloc_get_name(const void *ptr)
1557{
1558 return __talloc_get_name(ptr);
1559}
1560
1561/*
1562 check if a pointer has the given name. If it does, return the pointer,
1563 otherwise return NULL
1564*/
1565_PUBLIC_ void *talloc_check_name(const void *ptr, const char *name)
1566{
1567 const char *pname;
1568 if (unlikely(ptr == NULL)) return NULL;
1569 pname = __talloc_get_name(ptr);
1570 if (likely(pname == name || strcmp(pname, name) == 0)) {
1571 return discard_const_p(void, ptr);
1572 }
1573 return NULL;
1574}
1575
1576static void talloc_abort_type_mismatch(const char *location,
1577 const char *name,
1578 const char *expected)
1579{
1580 const char *reason;
1581
1582 reason = talloc_asprintf(NULL,
1583 "%s: Type mismatch: name[%s] expected[%s]",
1584 location,
1585 name?name:"NULL",
1586 expected);
1587 if (!reason) {
1588 reason = "Type mismatch";
1589 }
1590
1591 talloc_abort(reason);
1592}
1593
1594_PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location)
1595{
1596 const char *pname;
1597
1598 if (unlikely(ptr == NULL)) {
1599 talloc_abort_type_mismatch(location, NULL, name);
1600 return NULL;
1601 }
1602
1603 pname = __talloc_get_name(ptr);
1604 if (likely(pname == name || strcmp(pname, name) == 0)) {
1605 return discard_const_p(void, ptr);
1606 }
1607
1608 talloc_abort_type_mismatch(location, pname, name);
1609 return NULL;
1610}
1611
1612/*
1613 this is for compatibility with older versions of talloc
1614*/
1615_PUBLIC_ void *talloc_init(const char *fmt, ...)
1616{
1617 va_list ap;
1618 void *ptr;
1619 const char *name;
1620 struct talloc_chunk *tc;
1621
1622 ptr = __talloc(NULL, 0, &tc);
1623 if (unlikely(ptr == NULL)) return NULL;
1624
1625 va_start(ap, fmt);
1626 name = tc_set_name_v(tc, fmt, ap);
1627 va_end(ap);
1628
1629 if (unlikely(name == NULL)) {
1630 _talloc_free_internal(ptr, __location__);
1631 return NULL;
1632 }
1633
1634 return ptr;
1635}
1636
1637static inline void _tc_free_children_internal(struct talloc_chunk *tc,
1638 void *ptr,
1639 const char *location)
1640{
1641 while (tc->child) {
1642 /* we need to work out who will own an abandoned child
1643 if it cannot be freed. In priority order, the first
1644 choice is owner of any remaining reference to this
1645 pointer, the second choice is our parent, and the
1646 final choice is the null context. */
1647 void *child = TC_PTR_FROM_CHUNK(tc->child);
1648 const void *new_parent = null_context;
1649 if (unlikely(tc->child->refs)) {
1650 struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs);
1651 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1652 }
1653 if (unlikely(_tc_free_internal(tc->child, location) == -1)) {
1654 if (talloc_parent_chunk(child) != tc) {
1655 /*
1656 * Destructor already reparented this child.
1657 * No further reparenting needed.
1658 */
1659 continue;
1660 }
1661 if (new_parent == null_context) {
1662 struct talloc_chunk *p = talloc_parent_chunk(ptr);
1663 if (p) new_parent = TC_PTR_FROM_CHUNK(p);
1664 }
1665 _talloc_steal_internal(new_parent, child);
1666 }
1667 }
1668}
1669
1670/*
1671 this is a replacement for the Samba3 talloc_destroy_pool functionality. It
1672 should probably not be used in new code. It's in here to keep the talloc
1673 code consistent across Samba 3 and 4.
1674*/
1675_PUBLIC_ void talloc_free_children(void *ptr)
1676{
1677 struct talloc_chunk *tc_name = NULL;
1678 struct talloc_chunk *tc;
1679
1680 if (unlikely(ptr == NULL)) {
1681 return;
1682 }
1683
1684 tc = talloc_chunk_from_ptr(ptr);
1685
1686 /* we do not want to free the context name if it is a child .. */
1687 if (likely(tc->child)) {
1688 for (tc_name = tc->child; tc_name; tc_name = tc_name->next) {
1689 if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break;
1690 }
1691 if (tc_name) {
1692 _TLIST_REMOVE(tc->child, tc_name);
1693 if (tc->child) {
1694 tc->child->parent = tc;
1695 }
1696 }
1697 }
1698
1699 _tc_free_children_internal(tc, ptr, __location__);
1700
1701 /* .. so we put it back after all other children have been freed */
1702 if (tc_name) {
1703 if (tc->child) {
1704 tc->child->parent = NULL;
1705 }
1706 tc_name->parent = tc;
1707 _TLIST_ADD(tc->child, tc_name);
1708 }
1709}
1710
1711/*
1712 Allocate a bit of memory as a child of an existing pointer
1713*/
1714_PUBLIC_ void *_talloc(const void *context, size_t size)
1715{
1716 struct talloc_chunk *tc;
1717 return __talloc(context, size, &tc);
1718}
1719
1720/*
1721 externally callable talloc_set_name_const()
1722*/
1723_PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name)
1724{
1725 _tc_set_name_const(talloc_chunk_from_ptr(ptr), name);
1726}
1727
1728/*
1729 create a named talloc pointer. Any talloc pointer can be named, and
1730 talloc_named() operates just like talloc() except that it allows you
1731 to name the pointer.
1732*/
1733_PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name)
1734{
1735 return _talloc_named_const(context, size, name);
1736}
1737
1738/*
1739 free a talloc pointer. This also frees all child pointers of this
1740 pointer recursively
1741
1742 return 0 if the memory is actually freed, otherwise -1. The memory
1743 will not be freed if the ref_count is > 1 or the destructor (if
1744 any) returns non-zero
1745*/
1746_PUBLIC_ int _talloc_free(void *ptr, const char *location)
1747{
1748 struct talloc_chunk *tc;
1749
1750 if (unlikely(ptr == NULL)) {
1751 return -1;
1752 }
1753
1754 tc = talloc_chunk_from_ptr(ptr);
1755
1756 if (unlikely(tc->refs != NULL)) {
1757 struct talloc_reference_handle *h;
1758
1759 if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) {
1760 /* in this case we do know which parent should
1761 get this pointer, as there is really only
1762 one parent */
1763 return talloc_unlink(null_context, ptr);
1764 }
1765
Harald Welte189f43d2019-04-17 21:19:04 +02001766 talloc_log("ERROR: talloc_free with references at %s\r\n",
Harald Welte5df0be62019-04-17 20:54:29 +02001767 location);
1768
1769 for (h=tc->refs; h; h=h->next) {
Harald Welte189f43d2019-04-17 21:19:04 +02001770 talloc_log("\treference at %s\r\n",
Harald Welte5df0be62019-04-17 20:54:29 +02001771 h->location);
1772 }
1773 return -1;
1774 }
1775
1776 return _talloc_free_internal(ptr, location);
1777}
1778
1779
1780
1781/*
1782 A talloc version of realloc. The context argument is only used if
1783 ptr is NULL
1784*/
1785_PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name)
1786{
1787 struct talloc_chunk *tc;
1788 void *new_ptr;
1789 bool malloced = false;
1790 struct talloc_pool_hdr *pool_hdr = NULL;
1791 size_t old_size = 0;
1792 size_t new_size = 0;
1793
1794 /* size zero is equivalent to free() */
1795 if (unlikely(size == 0)) {
1796 talloc_unlink(context, ptr);
1797 return NULL;
1798 }
1799
1800 if (unlikely(size >= MAX_TALLOC_SIZE)) {
1801 return NULL;
1802 }
1803
1804 /* realloc(NULL) is equivalent to malloc() */
1805 if (ptr == NULL) {
1806 return _talloc_named_const(context, size, name);
1807 }
1808
1809 tc = talloc_chunk_from_ptr(ptr);
1810
1811 /* don't allow realloc on referenced pointers */
1812 if (unlikely(tc->refs)) {
1813 return NULL;
1814 }
1815
1816 /* don't let anybody try to realloc a talloc_pool */
1817 if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
1818 return NULL;
1819 }
1820
1821 if (tc->limit && (size > tc->size)) {
1822 if (!talloc_memlimit_check(tc->limit, (size - tc->size))) {
1823 errno = ENOMEM;
1824 return NULL;
1825 }
1826 }
1827
1828 /* handle realloc inside a talloc_pool */
1829 if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
1830 pool_hdr = tc->pool;
1831 }
1832
1833#if (ALWAYS_REALLOC == 0)
1834 /* don't shrink if we have less than 1k to gain */
1835 if (size < tc->size && tc->limit == NULL) {
1836 if (pool_hdr) {
1837 void *next_tc = tc_next_chunk(tc);
1838 TC_INVALIDATE_SHRINK_CHUNK(tc, size);
1839 tc->size = size;
1840 if (next_tc == pool_hdr->end) {
1841 /* note: tc->size has changed, so this works */
1842 pool_hdr->end = tc_next_chunk(tc);
1843 }
1844 return ptr;
1845 } else if ((tc->size - size) < 1024) {
1846 /*
1847 * if we call TC_INVALIDATE_SHRINK_CHUNK() here
1848 * we would need to call TC_UNDEFINE_GROW_CHUNK()
1849 * after each realloc call, which slows down
1850 * testing a lot :-(.
1851 *
1852 * That is why we only mark memory as undefined here.
1853 */
1854 TC_UNDEFINE_SHRINK_CHUNK(tc, size);
1855
1856 /* do not shrink if we have less than 1k to gain */
1857 tc->size = size;
1858 return ptr;
1859 }
1860 } else if (tc->size == size) {
1861 /*
1862 * do not change the pointer if it is exactly
1863 * the same size.
1864 */
1865 return ptr;
1866 }
1867#endif
1868
1869 /*
1870 * by resetting magic we catch users of the old memory
1871 *
1872 * We mark this memory as free, and also over-stamp the talloc
1873 * magic with the old-style magic.
1874 *
1875 * Why? This tries to avoid a memory read use-after-free from
1876 * disclosing our talloc magic, which would then allow an
1877 * attacker to prepare a valid header and so run a destructor.
1878 *
1879 * What else? We have to re-stamp back a valid normal magic
1880 * on this memory once realloc() is done, as it will have done
1881 * a memcpy() into the new valid memory. We can't do this in
1882 * reverse as that would be a real use-after-free.
1883 */
1884 _talloc_chunk_set_free(tc, NULL);
1885
1886#if ALWAYS_REALLOC
1887 if (pool_hdr) {
1888 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
1889 pool_hdr->object_count--;
1890
1891 if (new_ptr == NULL) {
1892 new_ptr = malloc(TC_HDR_SIZE+size);
1893 malloced = true;
1894 new_size = size;
1895 }
1896
1897 if (new_ptr) {
1898 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
1899 TC_INVALIDATE_FULL_CHUNK(tc);
1900 }
1901 } else {
1902 /* We're doing malloc then free here, so record the difference. */
1903 old_size = tc->size;
1904 new_size = size;
1905 new_ptr = malloc(size + TC_HDR_SIZE);
1906 if (new_ptr) {
1907 memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
1908 free(tc);
1909 }
1910 }
1911#else
1912 if (pool_hdr) {
1913 struct talloc_chunk *pool_tc;
1914 void *next_tc = tc_next_chunk(tc);
1915 size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
1916 size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
1917 size_t space_needed;
1918 size_t space_left;
1919 unsigned int chunk_count = pool_hdr->object_count;
1920
1921 pool_tc = talloc_chunk_from_pool(pool_hdr);
1922 if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
1923 chunk_count -= 1;
1924 }
1925
1926 if (chunk_count == 1) {
1927 /*
1928 * optimize for the case where 'tc' is the only
1929 * chunk in the pool.
1930 */
1931 char *start = tc_pool_first_chunk(pool_hdr);
1932 space_needed = new_chunk_size;
1933 space_left = (char *)tc_pool_end(pool_hdr) - start;
1934
1935 if (space_left >= space_needed) {
1936 size_t old_used = TC_HDR_SIZE + tc->size;
1937 size_t new_used = TC_HDR_SIZE + size;
1938 new_ptr = start;
1939
1940#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
1941 {
1942 /*
1943 * The area from
1944 * start -> tc may have
1945 * been freed and thus been marked as
1946 * VALGRIND_MEM_NOACCESS. Set it to
1947 * VALGRIND_MEM_UNDEFINED so we can
1948 * copy into it without valgrind errors.
1949 * We can't just mark
1950 * new_ptr -> new_ptr + old_used
1951 * as this may overlap on top of tc,
1952 * (which is why we use memmove, not
1953 * memcpy below) hence the MIN.
1954 */
1955 size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used);
1956 VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len);
1957 }
1958#endif
1959
1960 memmove(new_ptr, tc, old_used);
1961
1962 tc = (struct talloc_chunk *)new_ptr;
1963 TC_UNDEFINE_GROW_CHUNK(tc, size);
1964
1965 /*
1966 * first we do not align the pool pointer
1967 * because we want to invalidate the padding
1968 * too.
1969 */
1970 pool_hdr->end = new_used + (char *)new_ptr;
1971 tc_invalidate_pool(pool_hdr);
1972
1973 /* now the aligned pointer */
1974 pool_hdr->end = new_chunk_size + (char *)new_ptr;
1975 goto got_new_ptr;
1976 }
1977
1978 next_tc = NULL;
1979 }
1980
1981 if (new_chunk_size == old_chunk_size) {
1982 TC_UNDEFINE_GROW_CHUNK(tc, size);
1983 _talloc_chunk_set_not_free(tc);
1984 tc->size = size;
1985 return ptr;
1986 }
1987
1988 if (next_tc == pool_hdr->end) {
1989 /*
1990 * optimize for the case where 'tc' is the last
1991 * chunk in the pool.
1992 */
1993 space_needed = new_chunk_size - old_chunk_size;
1994 space_left = tc_pool_space_left(pool_hdr);
1995
1996 if (space_left >= space_needed) {
1997 TC_UNDEFINE_GROW_CHUNK(tc, size);
1998 _talloc_chunk_set_not_free(tc);
1999 tc->size = size;
2000 pool_hdr->end = tc_next_chunk(tc);
2001 return ptr;
2002 }
2003 }
2004
2005 new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
2006
2007 if (new_ptr == NULL) {
2008 new_ptr = malloc(TC_HDR_SIZE+size);
2009 malloced = true;
2010 new_size = size;
2011 }
2012
2013 if (new_ptr) {
2014 memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
2015
2016 _tc_free_poolmem(tc, __location__ "_talloc_realloc");
2017 }
2018 }
2019 else {
2020 /* We're doing realloc here, so record the difference. */
2021 old_size = tc->size;
2022 new_size = size;
2023 new_ptr = realloc(tc, size + TC_HDR_SIZE);
2024 }
2025got_new_ptr:
2026#endif
2027 if (unlikely(!new_ptr)) {
2028 /*
2029 * Ok, this is a strange spot. We have to put back
2030 * the old talloc_magic and any flags, except the
2031 * TALLOC_FLAG_FREE as this was not free'ed by the
2032 * realloc() call after all
2033 */
2034 _talloc_chunk_set_not_free(tc);
2035 return NULL;
2036 }
2037
2038 /*
2039 * tc is now the new value from realloc(), the old memory we
2040 * can't access any more and was preemptively marked as
2041 * TALLOC_FLAG_FREE before the call. Now we mark it as not
2042 * free again
2043 */
2044 tc = (struct talloc_chunk *)new_ptr;
2045 _talloc_chunk_set_not_free(tc);
2046 if (malloced) {
2047 tc->flags &= ~TALLOC_FLAG_POOLMEM;
2048 }
2049 if (tc->parent) {
2050 tc->parent->child = tc;
2051 }
2052 if (tc->child) {
2053 tc->child->parent = tc;
2054 }
2055
2056 if (tc->prev) {
2057 tc->prev->next = tc;
2058 }
2059 if (tc->next) {
2060 tc->next->prev = tc;
2061 }
2062
2063 if (new_size > old_size) {
2064 talloc_memlimit_grow(tc->limit, new_size - old_size);
2065 } else if (new_size < old_size) {
2066 talloc_memlimit_shrink(tc->limit, old_size - new_size);
2067 }
2068
2069 tc->size = size;
2070 _tc_set_name_const(tc, name);
2071
2072 return TC_PTR_FROM_CHUNK(tc);
2073}
2074
2075/*
2076 a wrapper around talloc_steal() for situations where you are moving a pointer
2077 between two structures, and want the old pointer to be set to NULL
2078*/
2079_PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr)
2080{
2081 const void **pptr = discard_const_p(const void *,_pptr);
2082 void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr));
2083 (*pptr) = NULL;
2084 return ret;
2085}
2086
2087enum talloc_mem_count_type {
2088 TOTAL_MEM_SIZE,
2089 TOTAL_MEM_BLOCKS,
2090 TOTAL_MEM_LIMIT,
2091};
2092
2093static inline size_t _talloc_total_mem_internal(const void *ptr,
2094 enum talloc_mem_count_type type,
2095 struct talloc_memlimit *old_limit,
2096 struct talloc_memlimit *new_limit)
2097{
2098 size_t total = 0;
2099 struct talloc_chunk *c, *tc;
2100
2101 if (ptr == NULL) {
2102 ptr = null_context;
2103 }
2104 if (ptr == NULL) {
2105 return 0;
2106 }
2107
2108 tc = talloc_chunk_from_ptr(ptr);
2109
2110 if (old_limit || new_limit) {
2111 if (tc->limit && tc->limit->upper == old_limit) {
2112 tc->limit->upper = new_limit;
2113 }
2114 }
2115
2116 /* optimize in the memlimits case */
2117 if (type == TOTAL_MEM_LIMIT &&
2118 tc->limit != NULL &&
2119 tc->limit != old_limit &&
2120 tc->limit->parent == tc) {
2121 return tc->limit->cur_size;
2122 }
2123
2124 if (tc->flags & TALLOC_FLAG_LOOP) {
2125 return 0;
2126 }
2127
2128 tc->flags |= TALLOC_FLAG_LOOP;
2129
2130 if (old_limit || new_limit) {
2131 if (old_limit == tc->limit) {
2132 tc->limit = new_limit;
2133 }
2134 }
2135
2136 switch (type) {
2137 case TOTAL_MEM_SIZE:
2138 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2139 total = tc->size;
2140 }
2141 break;
2142 case TOTAL_MEM_BLOCKS:
2143 total++;
2144 break;
2145 case TOTAL_MEM_LIMIT:
2146 if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) {
2147 /*
2148 * Don't count memory allocated from a pool
2149 * when calculating limits. Only count the
2150 * pool itself.
2151 */
2152 if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
2153 if (tc->flags & TALLOC_FLAG_POOL) {
2154 /*
2155 * If this is a pool, the allocated
2156 * size is in the pool header, and
2157 * remember to add in the prefix
2158 * length.
2159 */
2160 struct talloc_pool_hdr *pool_hdr
2161 = talloc_pool_from_chunk(tc);
2162 total = pool_hdr->poolsize +
2163 TC_HDR_SIZE +
2164 TP_HDR_SIZE;
2165 } else {
2166 total = tc->size + TC_HDR_SIZE;
2167 }
2168 }
2169 }
2170 break;
2171 }
2172 for (c = tc->child; c; c = c->next) {
2173 total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type,
2174 old_limit, new_limit);
2175 }
2176
2177 tc->flags &= ~TALLOC_FLAG_LOOP;
2178
2179 return total;
2180}
2181
2182/*
2183 return the total size of a talloc pool (subtree)
2184*/
2185_PUBLIC_ size_t talloc_total_size(const void *ptr)
2186{
2187 return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL);
2188}
2189
2190/*
2191 return the total number of blocks in a talloc pool (subtree)
2192*/
2193_PUBLIC_ size_t talloc_total_blocks(const void *ptr)
2194{
2195 return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL);
2196}
2197
2198/*
2199 return the number of external references to a pointer
2200*/
2201_PUBLIC_ size_t talloc_reference_count(const void *ptr)
2202{
2203 struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr);
2204 struct talloc_reference_handle *h;
2205 size_t ret = 0;
2206
2207 for (h=tc->refs;h;h=h->next) {
2208 ret++;
2209 }
2210 return ret;
2211}
2212
2213/*
2214 report on memory usage by all children of a pointer, giving a full tree view
2215*/
2216_PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth,
2217 void (*callback)(const void *ptr,
2218 int depth, int max_depth,
2219 int is_ref,
2220 void *private_data),
2221 void *private_data)
2222{
2223 struct talloc_chunk *c, *tc;
2224
2225 if (ptr == NULL) {
2226 ptr = null_context;
2227 }
2228 if (ptr == NULL) return;
2229
2230 tc = talloc_chunk_from_ptr(ptr);
2231
2232 if (tc->flags & TALLOC_FLAG_LOOP) {
2233 return;
2234 }
2235
2236 callback(ptr, depth, max_depth, 0, private_data);
2237
2238 if (max_depth >= 0 && depth >= max_depth) {
2239 return;
2240 }
2241
2242 tc->flags |= TALLOC_FLAG_LOOP;
2243 for (c=tc->child;c;c=c->next) {
2244 if (c->name == TALLOC_MAGIC_REFERENCE) {
2245 struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c);
2246 callback(h->ptr, depth + 1, max_depth, 1, private_data);
2247 } else {
2248 talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data);
2249 }
2250 }
2251 tc->flags &= ~TALLOC_FLAG_LOOP;
2252}
2253
2254static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f)
2255{
2256 const char *name = __talloc_get_name(ptr);
2257 struct talloc_chunk *tc;
2258 FILE *f = (FILE *)_f;
2259
2260 if (is_ref) {
Harald Welte189f43d2019-04-17 21:19:04 +02002261 fprintf(f, "%*sreference to: %s\r\n", depth*4, "", name);
Harald Welte5df0be62019-04-17 20:54:29 +02002262 return;
2263 }
2264
2265 tc = talloc_chunk_from_ptr(ptr);
2266 if (tc->limit && tc->limit->parent == tc) {
2267 fprintf(f, "%*s%-30s is a memlimit context"
Harald Welte189f43d2019-04-17 21:19:04 +02002268 " (max_size = %lu bytes, cur_size = %lu bytes)\r\n",
Harald Welte5df0be62019-04-17 20:54:29 +02002269 depth*4, "",
2270 name,
2271 (unsigned long)tc->limit->max_size,
2272 (unsigned long)tc->limit->cur_size);
2273 }
2274
2275 if (depth == 0) {
Harald Welte189f43d2019-04-17 21:19:04 +02002276 fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\r",
Harald Welte5df0be62019-04-17 20:54:29 +02002277 (max_depth < 0 ? "full " :""), name,
2278 (unsigned long)talloc_total_size(ptr),
2279 (unsigned long)talloc_total_blocks(ptr));
2280 return;
2281 }
2282
Harald Welte189f43d2019-04-17 21:19:04 +02002283 fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\r\n",
Harald Welte5df0be62019-04-17 20:54:29 +02002284 depth*4, "",
2285 name,
2286 (unsigned long)talloc_total_size(ptr),
2287 (unsigned long)talloc_total_blocks(ptr),
2288 (int)talloc_reference_count(ptr), ptr);
2289
2290#if 0
2291 fprintf(f, "content: ");
2292 if (talloc_total_size(ptr)) {
2293 int tot = talloc_total_size(ptr);
2294 int i;
2295
2296 for (i = 0; i < tot; i++) {
2297 if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) {
2298 fprintf(f, "%c", ((char *)ptr)[i]);
2299 } else {
2300 fprintf(f, "~%02x", ((char *)ptr)[i]);
2301 }
2302 }
2303 }
Harald Welte189f43d2019-04-17 21:19:04 +02002304 fprintf(f, "\r\n");
Harald Welte5df0be62019-04-17 20:54:29 +02002305#endif
2306}
2307
2308/*
2309 report on memory usage by all children of a pointer, giving a full tree view
2310*/
2311_PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f)
2312{
2313 if (f) {
2314 talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f);
2315 fflush(f);
2316 }
2317}
2318
2319/*
2320 report on memory usage by all children of a pointer, giving a full tree view
2321*/
2322_PUBLIC_ void talloc_report_full(const void *ptr, FILE *f)
2323{
2324 talloc_report_depth_file(ptr, 0, -1, f);
2325}
2326
2327/*
2328 report on memory usage by all children of a pointer
2329*/
2330_PUBLIC_ void talloc_report(const void *ptr, FILE *f)
2331{
2332 talloc_report_depth_file(ptr, 0, 1, f);
2333}
2334
2335/*
2336 enable tracking of the NULL context
2337*/
2338_PUBLIC_ void talloc_enable_null_tracking(void)
2339{
2340 if (null_context == NULL) {
2341 null_context = _talloc_named_const(NULL, 0, "null_context");
2342 if (autofree_context != NULL) {
2343 talloc_reparent(NULL, null_context, autofree_context);
2344 }
2345 }
2346}
2347
2348/*
2349 enable tracking of the NULL context, not moving the autofree context
2350 into the NULL context. This is needed for the talloc testsuite
2351*/
2352_PUBLIC_ void talloc_enable_null_tracking_no_autofree(void)
2353{
2354 if (null_context == NULL) {
2355 null_context = _talloc_named_const(NULL, 0, "null_context");
2356 }
2357}
2358
2359/*
2360 disable tracking of the NULL context
2361*/
2362_PUBLIC_ void talloc_disable_null_tracking(void)
2363{
2364 if (null_context != NULL) {
2365 /* we have to move any children onto the real NULL
2366 context */
2367 struct talloc_chunk *tc, *tc2;
2368 tc = talloc_chunk_from_ptr(null_context);
2369 for (tc2 = tc->child; tc2; tc2=tc2->next) {
2370 if (tc2->parent == tc) tc2->parent = NULL;
2371 if (tc2->prev == tc) tc2->prev = NULL;
2372 }
2373 for (tc2 = tc->next; tc2; tc2=tc2->next) {
2374 if (tc2->parent == tc) tc2->parent = NULL;
2375 if (tc2->prev == tc) tc2->prev = NULL;
2376 }
2377 tc->child = NULL;
2378 tc->next = NULL;
2379 }
2380 talloc_free(null_context);
2381 null_context = NULL;
2382}
2383
2384/*
2385 enable leak reporting on exit
2386*/
2387_PUBLIC_ void talloc_enable_leak_report(void)
2388{
2389 talloc_enable_null_tracking();
2390 talloc_report_null = true;
2391 talloc_setup_atexit();
2392}
2393
2394/*
2395 enable full leak reporting on exit
2396*/
2397_PUBLIC_ void talloc_enable_leak_report_full(void)
2398{
2399 talloc_enable_null_tracking();
2400 talloc_report_null_full = true;
2401 talloc_setup_atexit();
2402}
2403
2404/*
2405 talloc and zero memory.
2406*/
2407_PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name)
2408{
2409 void *p = _talloc_named_const(ctx, size, name);
2410
2411 if (p) {
2412 memset(p, '\0', size);
2413 }
2414
2415 return p;
2416}
2417
2418/*
2419 memdup with a talloc.
2420*/
2421_PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name)
2422{
2423 void *newp = _talloc_named_const(t, size, name);
2424
2425 if (likely(newp)) {
2426 memcpy(newp, p, size);
2427 }
2428
2429 return newp;
2430}
2431
2432static inline char *__talloc_strlendup(const void *t, const char *p, size_t len)
2433{
2434 char *ret;
2435 struct talloc_chunk *tc;
2436
2437 ret = (char *)__talloc(t, len + 1, &tc);
2438 if (unlikely(!ret)) return NULL;
2439
2440 memcpy(ret, p, len);
2441 ret[len] = 0;
2442
2443 _tc_set_name_const(tc, ret);
2444 return ret;
2445}
2446
2447/*
2448 strdup with a talloc
2449*/
2450_PUBLIC_ char *talloc_strdup(const void *t, const char *p)
2451{
2452 if (unlikely(!p)) return NULL;
2453 return __talloc_strlendup(t, p, strlen(p));
2454}
2455
2456/*
2457 strndup with a talloc
2458*/
2459_PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n)
2460{
2461 if (unlikely(!p)) return NULL;
2462 return __talloc_strlendup(t, p, strnlen(p, n));
2463}
2464
2465static inline char *__talloc_strlendup_append(char *s, size_t slen,
2466 const char *a, size_t alen)
2467{
2468 char *ret;
2469
2470 ret = talloc_realloc(NULL, s, char, slen + alen + 1);
2471 if (unlikely(!ret)) return NULL;
2472
2473 /* append the string and the trailing \0 */
2474 memcpy(&ret[slen], a, alen);
2475 ret[slen+alen] = 0;
2476
2477 _tc_set_name_const(talloc_chunk_from_ptr(ret), ret);
2478 return ret;
2479}
2480
2481/*
2482 * Appends at the end of the string.
2483 */
2484_PUBLIC_ char *talloc_strdup_append(char *s, const char *a)
2485{
2486 if (unlikely(!s)) {
2487 return talloc_strdup(NULL, a);
2488 }
2489
2490 if (unlikely(!a)) {
2491 return s;
2492 }
2493
2494 return __talloc_strlendup_append(s, strlen(s), a, strlen(a));
2495}
2496
2497/*
2498 * Appends at the end of the talloc'ed buffer,
2499 * not the end of the string.
2500 */
2501_PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a)
2502{
2503 size_t slen;
2504
2505 if (unlikely(!s)) {
2506 return talloc_strdup(NULL, a);
2507 }
2508
2509 if (unlikely(!a)) {
2510 return s;
2511 }
2512
2513 slen = talloc_get_size(s);
2514 if (likely(slen > 0)) {
2515 slen--;
2516 }
2517
2518 return __talloc_strlendup_append(s, slen, a, strlen(a));
2519}
2520
2521/*
2522 * Appends at the end of the string.
2523 */
2524_PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n)
2525{
2526 if (unlikely(!s)) {
2527 return talloc_strndup(NULL, a, n);
2528 }
2529
2530 if (unlikely(!a)) {
2531 return s;
2532 }
2533
2534 return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n));
2535}
2536
2537/*
2538 * Appends at the end of the talloc'ed buffer,
2539 * not the end of the string.
2540 */
2541_PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n)
2542{
2543 size_t slen;
2544
2545 if (unlikely(!s)) {
2546 return talloc_strndup(NULL, a, n);
2547 }
2548
2549 if (unlikely(!a)) {
2550 return s;
2551 }
2552
2553 slen = talloc_get_size(s);
2554 if (likely(slen > 0)) {
2555 slen--;
2556 }
2557
2558 return __talloc_strlendup_append(s, slen, a, strnlen(a, n));
2559}
2560
2561#ifndef HAVE_VA_COPY
2562#ifdef HAVE___VA_COPY
2563#define va_copy(dest, src) __va_copy(dest, src)
2564#else
2565#define va_copy(dest, src) (dest) = (src)
2566#endif
2567#endif
2568
2569static struct talloc_chunk *_vasprintf_tc(const void *t,
2570 const char *fmt,
2571 va_list ap) PRINTF_ATTRIBUTE(2,0);
2572
2573static struct talloc_chunk *_vasprintf_tc(const void *t,
2574 const char *fmt,
2575 va_list ap)
2576{
2577 int vlen;
2578 size_t len;
2579 char *ret;
2580 va_list ap2;
2581 struct talloc_chunk *tc;
2582 char buf[1024];
2583
2584 /* this call looks strange, but it makes it work on older solaris boxes */
2585 va_copy(ap2, ap);
2586 vlen = vsnprintf(buf, sizeof(buf), fmt, ap2);
2587 va_end(ap2);
2588 if (unlikely(vlen < 0)) {
2589 return NULL;
2590 }
2591 len = vlen;
2592 if (unlikely(len + 1 < len)) {
2593 return NULL;
2594 }
2595
2596 ret = (char *)__talloc(t, len+1, &tc);
2597 if (unlikely(!ret)) return NULL;
2598
2599 if (len < sizeof(buf)) {
2600 memcpy(ret, buf, len+1);
2601 } else {
2602 va_copy(ap2, ap);
2603 vsnprintf(ret, len+1, fmt, ap2);
2604 va_end(ap2);
2605 }
2606
2607 _tc_set_name_const(tc, ret);
2608 return tc;
2609}
2610
2611_PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap)
2612{
2613 struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap);
2614 if (tc == NULL) {
2615 return NULL;
2616 }
2617 return TC_PTR_FROM_CHUNK(tc);
2618}
2619
2620
2621/*
2622 Perform string formatting, and return a pointer to newly allocated
2623 memory holding the result, inside a memory pool.
2624 */
2625_PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...)
2626{
2627 va_list ap;
2628 char *ret;
2629
2630 va_start(ap, fmt);
2631 ret = talloc_vasprintf(t, fmt, ap);
2632 va_end(ap);
2633 return ret;
2634}
2635
2636static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2637 const char *fmt, va_list ap)
2638 PRINTF_ATTRIBUTE(3,0);
2639
2640static inline char *__talloc_vaslenprintf_append(char *s, size_t slen,
2641 const char *fmt, va_list ap)
2642{
2643 ssize_t alen;
2644 va_list ap2;
2645 char c;
2646
2647 va_copy(ap2, ap);
2648 alen = vsnprintf(&c, 1, fmt, ap2);
2649 va_end(ap2);
2650
2651 if (alen <= 0) {
2652 /* Either the vsnprintf failed or the format resulted in
2653 * no characters being formatted. In the former case, we
2654 * ought to return NULL, in the latter we ought to return
2655 * the original string. Most current callers of this
2656 * function expect it to never return NULL.
2657 */
2658 return s;
2659 }
2660
2661 s = talloc_realloc(NULL, s, char, slen + alen + 1);
2662 if (!s) return NULL;
2663
2664 va_copy(ap2, ap);
2665 vsnprintf(s + slen, alen + 1, fmt, ap2);
2666 va_end(ap2);
2667
2668 _tc_set_name_const(talloc_chunk_from_ptr(s), s);
2669 return s;
2670}
2671
2672/**
2673 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2674 * and return @p s, which may have moved. Good for gradually
2675 * accumulating output into a string buffer. Appends at the end
2676 * of the string.
2677 **/
2678_PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap)
2679{
2680 if (unlikely(!s)) {
2681 return talloc_vasprintf(NULL, fmt, ap);
2682 }
2683
2684 return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap);
2685}
2686
2687/**
2688 * Realloc @p s to append the formatted result of @p fmt and @p ap,
2689 * and return @p s, which may have moved. Always appends at the
2690 * end of the talloc'ed buffer, not the end of the string.
2691 **/
2692_PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap)
2693{
2694 size_t slen;
2695
2696 if (unlikely(!s)) {
2697 return talloc_vasprintf(NULL, fmt, ap);
2698 }
2699
2700 slen = talloc_get_size(s);
2701 if (likely(slen > 0)) {
2702 slen--;
2703 }
2704
2705 return __talloc_vaslenprintf_append(s, slen, fmt, ap);
2706}
2707
2708/*
2709 Realloc @p s to append the formatted result of @p fmt and return @p
2710 s, which may have moved. Good for gradually accumulating output
2711 into a string buffer.
2712 */
2713_PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...)
2714{
2715 va_list ap;
2716
2717 va_start(ap, fmt);
2718 s = talloc_vasprintf_append(s, fmt, ap);
2719 va_end(ap);
2720 return s;
2721}
2722
2723/*
2724 Realloc @p s to append the formatted result of @p fmt and return @p
2725 s, which may have moved. Good for gradually accumulating output
2726 into a buffer.
2727 */
2728_PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...)
2729{
2730 va_list ap;
2731
2732 va_start(ap, fmt);
2733 s = talloc_vasprintf_append_buffer(s, fmt, ap);
2734 va_end(ap);
2735 return s;
2736}
2737
2738/*
2739 alloc an array, checking for integer overflow in the array size
2740*/
2741_PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2742{
2743 if (count >= MAX_TALLOC_SIZE/el_size) {
2744 return NULL;
2745 }
2746 return _talloc_named_const(ctx, el_size * count, name);
2747}
2748
2749/*
2750 alloc an zero array, checking for integer overflow in the array size
2751*/
2752_PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name)
2753{
2754 if (count >= MAX_TALLOC_SIZE/el_size) {
2755 return NULL;
2756 }
2757 return _talloc_zero(ctx, el_size * count, name);
2758}
2759
2760/*
2761 realloc an array, checking for integer overflow in the array size
2762*/
2763_PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name)
2764{
2765 if (count >= MAX_TALLOC_SIZE/el_size) {
2766 return NULL;
2767 }
2768 return _talloc_realloc(ctx, ptr, el_size * count, name);
2769}
2770
2771/*
2772 a function version of talloc_realloc(), so it can be passed as a function pointer
2773 to libraries that want a realloc function (a realloc function encapsulates
2774 all the basic capabilities of an allocation library, which is why this is useful)
2775*/
2776_PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size)
2777{
2778 return _talloc_realloc(context, ptr, size, NULL);
2779}
2780
2781
2782static int talloc_autofree_destructor(void *ptr)
2783{
2784 autofree_context = NULL;
2785 return 0;
2786}
2787
2788/*
2789 return a context which will be auto-freed on exit
2790 this is useful for reducing the noise in leak reports
2791*/
2792_PUBLIC_ void *talloc_autofree_context(void)
2793{
2794 if (autofree_context == NULL) {
2795 autofree_context = _talloc_named_const(NULL, 0, "autofree_context");
2796 talloc_set_destructor(autofree_context, talloc_autofree_destructor);
2797 talloc_setup_atexit();
2798 }
2799 return autofree_context;
2800}
2801
2802_PUBLIC_ size_t talloc_get_size(const void *context)
2803{
2804 struct talloc_chunk *tc;
2805
2806 if (context == NULL) {
2807 return 0;
2808 }
2809
2810 tc = talloc_chunk_from_ptr(context);
2811
2812 return tc->size;
2813}
2814
2815/*
2816 find a parent of this context that has the given name, if any
2817*/
2818_PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name)
2819{
2820 struct talloc_chunk *tc;
2821
2822 if (context == NULL) {
2823 return NULL;
2824 }
2825
2826 tc = talloc_chunk_from_ptr(context);
2827 while (tc) {
2828 if (tc->name && strcmp(tc->name, name) == 0) {
2829 return TC_PTR_FROM_CHUNK(tc);
2830 }
2831 while (tc && tc->prev) tc = tc->prev;
2832 if (tc) {
2833 tc = tc->parent;
2834 }
2835 }
2836 return NULL;
2837}
2838
2839/*
2840 show the parentage of a context
2841*/
2842_PUBLIC_ void talloc_show_parents(const void *context, FILE *file)
2843{
2844 struct talloc_chunk *tc;
2845
2846 if (context == NULL) {
Harald Welte189f43d2019-04-17 21:19:04 +02002847 fprintf(file, "talloc no parents for NULL\r\n");
Harald Welte5df0be62019-04-17 20:54:29 +02002848 return;
2849 }
2850
2851 tc = talloc_chunk_from_ptr(context);
Harald Welte189f43d2019-04-17 21:19:04 +02002852 fprintf(file, "talloc parents of '%s'\r\n", __talloc_get_name(context));
Harald Welte5df0be62019-04-17 20:54:29 +02002853 while (tc) {
Harald Welte189f43d2019-04-17 21:19:04 +02002854 fprintf(file, "\t'%s'\r\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc)));
Harald Welte5df0be62019-04-17 20:54:29 +02002855 while (tc && tc->prev) tc = tc->prev;
2856 if (tc) {
2857 tc = tc->parent;
2858 }
2859 }
2860 fflush(file);
2861}
2862
2863/*
2864 return 1 if ptr is a parent of context
2865*/
2866static int _talloc_is_parent(const void *context, const void *ptr, int depth)
2867{
2868 struct talloc_chunk *tc;
2869
2870 if (context == NULL) {
2871 return 0;
2872 }
2873
2874 tc = talloc_chunk_from_ptr(context);
2875 while (tc) {
2876 if (depth <= 0) {
2877 return 0;
2878 }
2879 if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1;
2880 while (tc && tc->prev) tc = tc->prev;
2881 if (tc) {
2882 tc = tc->parent;
2883 depth--;
2884 }
2885 }
2886 return 0;
2887}
2888
2889/*
2890 return 1 if ptr is a parent of context
2891*/
2892_PUBLIC_ int talloc_is_parent(const void *context, const void *ptr)
2893{
2894 return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH);
2895}
2896
2897/*
2898 return the total size of memory used by this context and all children
2899*/
2900static inline size_t _talloc_total_limit_size(const void *ptr,
2901 struct talloc_memlimit *old_limit,
2902 struct talloc_memlimit *new_limit)
2903{
2904 return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT,
2905 old_limit, new_limit);
2906}
2907
2908static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
2909{
2910 struct talloc_memlimit *l;
2911
2912 for (l = limit; l != NULL; l = l->upper) {
2913 if (l->max_size != 0 &&
2914 ((l->max_size <= l->cur_size) ||
2915 (l->max_size - l->cur_size < size))) {
2916 return false;
2917 }
2918 }
2919
2920 return true;
2921}
2922
2923/*
2924 Update memory limits when freeing a talloc_chunk.
2925*/
2926static void tc_memlimit_update_on_free(struct talloc_chunk *tc)
2927{
2928 size_t limit_shrink_size;
2929
2930 if (!tc->limit) {
2931 return;
2932 }
2933
2934 /*
2935 * Pool entries don't count. Only the pools
2936 * themselves are counted as part of the memory
2937 * limits. Note that this also takes care of
2938 * nested pools which have both flags
2939 * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set.
2940 */
2941 if (tc->flags & TALLOC_FLAG_POOLMEM) {
2942 return;
2943 }
2944
2945 /*
2946 * If we are part of a memory limited context hierarchy
2947 * we need to subtract the memory used from the counters
2948 */
2949
2950 limit_shrink_size = tc->size+TC_HDR_SIZE;
2951
2952 /*
2953 * If we're deallocating a pool, take into
2954 * account the prefix size added for the pool.
2955 */
2956
2957 if (tc->flags & TALLOC_FLAG_POOL) {
2958 limit_shrink_size += TP_HDR_SIZE;
2959 }
2960
2961 talloc_memlimit_shrink(tc->limit, limit_shrink_size);
2962
2963 if (tc->limit->parent == tc) {
2964 free(tc->limit);
2965 }
2966
2967 tc->limit = NULL;
2968}
2969
2970/*
2971 Increase memory limit accounting after a malloc/realloc.
2972*/
2973static void talloc_memlimit_grow(struct talloc_memlimit *limit,
2974 size_t size)
2975{
2976 struct talloc_memlimit *l;
2977
2978 for (l = limit; l != NULL; l = l->upper) {
2979 size_t new_cur_size = l->cur_size + size;
2980 if (new_cur_size < l->cur_size) {
Harald Welte189f43d2019-04-17 21:19:04 +02002981 talloc_abort("logic error in talloc_memlimit_grow\r\n");
Harald Welte5df0be62019-04-17 20:54:29 +02002982 return;
2983 }
2984 l->cur_size = new_cur_size;
2985 }
2986}
2987
2988/*
2989 Decrease memory limit accounting after a free/realloc.
2990*/
2991static void talloc_memlimit_shrink(struct talloc_memlimit *limit,
2992 size_t size)
2993{
2994 struct talloc_memlimit *l;
2995
2996 for (l = limit; l != NULL; l = l->upper) {
2997 if (l->cur_size < size) {
Harald Welte189f43d2019-04-17 21:19:04 +02002998 talloc_abort("logic error in talloc_memlimit_shrink\r\n");
Harald Welte5df0be62019-04-17 20:54:29 +02002999 return;
3000 }
3001 l->cur_size = l->cur_size - size;
3002 }
3003}
3004
3005_PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size)
3006{
3007 struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx);
3008 struct talloc_memlimit *orig_limit;
3009 struct talloc_memlimit *limit = NULL;
3010
3011 if (tc->limit && tc->limit->parent == tc) {
3012 tc->limit->max_size = max_size;
3013 return 0;
3014 }
3015 orig_limit = tc->limit;
3016
3017 limit = malloc(sizeof(struct talloc_memlimit));
3018 if (limit == NULL) {
3019 return 1;
3020 }
3021 limit->parent = tc;
3022 limit->max_size = max_size;
3023 limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit);
3024
3025 if (orig_limit) {
3026 limit->upper = orig_limit;
3027 } else {
3028 limit->upper = NULL;
3029 }
3030
3031 return 0;
3032}