56 #define USE_PRINTF (1) 61 #ifndef TLSF_USE_LOCKS 62 #define TLSF_USE_LOCKS (0) 65 #ifndef TLSF_STATISTIC 66 #define TLSF_STATISTIC (0) 77 #ifndef CHECK_DOUBLE_FREE 78 #define CHECK_DOUBLE_FREE (0) 84 #define TLSF_CREATE_LOCK(_unused_) do{}while(0) 85 #define TLSF_DESTROY_LOCK(_unused_) do{}while(0) 86 #define TLSF_ACQUIRE_LOCK(_unused_) do{}while(0) 87 #define TLSF_RELEASE_LOCK(_unused_) do{}while(0) 91 #define TLSF_ADD_SIZE(tlsf, b) do { \ 92 tlsf->used_size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; \ 93 if (tlsf->used_size > tlsf->max_size) { \ 94 tlsf->max_size = tlsf->used_size; \ 98 #define TLSF_REMOVE_SIZE(tlsf, b) do { \ 99 tlsf->used_size -= (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; \ 102 #define TLSF_ADD_SIZE(tlsf, b) do{}while(0) 103 #define TLSF_REMOVE_SIZE(tlsf, b) do{}while(0) 106 #if USE_MMAP || USE_SBRK 111 #include <sys/mman.h> 116 #if !defined(__GNUC__) 124 #define _DEBUG_TLSF_ (0) 133 #define BLOCK_ALIGN (sizeof(void *) * 2) 136 #define MAX_LOG2_SLI (5) 137 #define MAX_SLI (1 << MAX_LOG2_SLI) 139 #define FLI_OFFSET (6) 141 #define SMALL_BLOCK (128) 142 #define REAL_FLI (MAX_FLI - FLI_OFFSET) 143 #define MIN_BLOCK_SIZE (sizeof (free_ptr_t)) 144 #define BHDR_OVERHEAD (sizeof (bhdr_t) - MIN_BLOCK_SIZE) 145 #define TLSF_SIGNATURE (0x2A59FA59) 147 #define PTR_MASK (sizeof(void *) - 1) 148 #define BLOCK_SIZE (0xFFFFFFFF - PTR_MASK) 150 #define GET_NEXT_BLOCK(_addr, _r) ((bhdr_t *) ((char *) (_addr) + (_r))) 151 #define MEM_ALIGN ((BLOCK_ALIGN) - 1) 152 #define ROUNDUP_SIZE(_r) (((_r) + MEM_ALIGN) & ~MEM_ALIGN) 153 #define ROUNDDOWN_SIZE(_r) ((_r) & ~MEM_ALIGN) 154 #define ROUNDUP(_x, _v) ((((~(_x)) + 1) & ((_v)-1)) + (_x)) 156 #define BLOCK_STATE (0x1) 157 #define PREV_STATE (0x2) 160 #define FREE_BLOCK (0x1) 161 #define USED_BLOCK (0x0) 164 #define PREV_FREE (0x2) 165 #define PREV_USED (0x0) 168 #define DEFAULT_AREA_SIZE (1024*10) 171 #define PAGE_SIZE (getpagesize()) 176 # define PRINT_MSG(...) printf(__VA_ARGS__) 177 # define ERROR_MSG(...) fprintf(stderr, __VA_ARGS__) 179 # if !defined(PRINT_MSG) 180 # define PRINT_MSG(...) 182 # if !defined(ERROR_MSG) 183 # define ERROR_MSG(...) 187 #ifndef ATTRIBUTE_UNUSED 188 #if defined(__GNUC__) 189 #define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) 191 #define ATTRIBUTE_UNUSED 196 typedef unsigned int u32_t;
197 typedef unsigned char u8_t;
226 u32_t tlsf_signature;
247 u32_t sl_bitmap[REAL_FLI];
249 bhdr_t *matrix[REAL_FLI][MAX_SLI];
256 static __inline__
void set_bit(
int nr, u32_t * addr);
257 static __inline__
void clear_bit(
int nr, u32_t * addr);
258 static __inline__
int ls_bit(
int x);
259 static __inline__
int ms_bit(
int x);
260 static __inline__
void MAPPING_SEARCH(
size_t * _r,
int *_fl,
int *_sl);
261 static __inline__
void MAPPING_INSERT(
size_t _r,
int *_fl,
int *_sl);
262 static __inline__
bhdr_t *FIND_SUITABLE_BLOCK(
tlsf_t * _tlsf,
int *_fl,
int *_sl);
263 static __inline__
bhdr_t *process_area(
void *area,
size_t size);
264 #if USE_SBRK || USE_MMAP 265 static __inline__
void *get_new_area(
size_t * size);
268 static const int table[] = {
269 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
272 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
275 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
278 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
281 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
284 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
287 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
290 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
295 static __inline__
int ls_bit(
int i) {
297 unsigned int x = i & -i;
299 a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
300 return table[x >> a] + a;
303 static __inline__
int ms_bit(
int i) {
305 unsigned int x = (
unsigned int) i;
307 a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
308 return table[x >> a] + a;
311 static __inline__
void set_bit(
int nr, u32_t * addr) {
312 addr[nr >> 5] |= 1 << (nr & 0x1f);
315 static __inline__
void clear_bit(
int nr, u32_t * addr) {
316 addr[nr >> 5] &= ~(1 << (nr & 0x1f));
319 static __inline__
void MAPPING_SEARCH(
size_t * _r,
int *_fl,
int *_sl) {
322 if (*_r < SMALL_BLOCK) {
324 *_sl = *_r / (SMALL_BLOCK / MAX_SLI);
326 _t = (1 << (ms_bit(*_r) - MAX_LOG2_SLI)) - 1;
329 *_sl = (*_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
338 static __inline__
void MAPPING_INSERT(
size_t _r,
int *_fl,
int *_sl) {
339 if (_r < SMALL_BLOCK) {
341 *_sl = _r / (SMALL_BLOCK / MAX_SLI);
344 *_sl = (_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
350 static __inline__
bhdr_t *FIND_SUITABLE_BLOCK(
tlsf_t * _tlsf,
int *_fl,
int *_sl) {
351 u32_t _tmp = _tlsf->sl_bitmap[*_fl] & (~0 << *_sl);
356 _b = _tlsf->matrix[*_fl][*_sl];
358 *_fl = ls_bit(_tlsf->fl_bitmap & (~0 << (*_fl + 1)));
360 *_sl = ls_bit(_tlsf->sl_bitmap[*_fl]);
361 _b = _tlsf->matrix[*_fl][*_sl];
368 #define EXTRACT_BLOCK_HDR(_b, _tlsf, _fl, _sl) do { \ 369 _tlsf -> matrix [_fl] [_sl] = _b -> ptr.free_ptr.next; \ 370 if (_tlsf -> matrix[_fl][_sl]) \ 371 _tlsf -> matrix[_fl][_sl] -> ptr.free_ptr.prev = NULL; \ 373 clear_bit (_sl, &_tlsf -> sl_bitmap [_fl]); \ 374 if (!_tlsf -> sl_bitmap [_fl]) \ 375 clear_bit (_fl, &_tlsf -> fl_bitmap); \ 377 _b -> ptr.free_ptr.prev = NULL; \ 378 _b -> ptr.free_ptr.next = NULL; \ 382 #define EXTRACT_BLOCK(_b, _tlsf, _fl, _sl) do { \ 383 if (_b -> ptr.free_ptr.next) \ 384 _b -> ptr.free_ptr.next -> ptr.free_ptr.prev = _b -> ptr.free_ptr.prev; \ 385 if (_b -> ptr.free_ptr.prev) \ 386 _b -> ptr.free_ptr.prev -> ptr.free_ptr.next = _b -> ptr.free_ptr.next; \ 387 if (_tlsf -> matrix [_fl][_sl] == _b) { \ 388 _tlsf -> matrix [_fl][_sl] = _b -> ptr.free_ptr.next; \ 389 if (!_tlsf -> matrix [_fl][_sl]) { \ 390 clear_bit (_sl, &_tlsf -> sl_bitmap[_fl]); \ 391 if (!_tlsf -> sl_bitmap [_fl]) \ 392 clear_bit (_fl, &_tlsf -> fl_bitmap); \ 395 _b -> ptr.free_ptr.prev = NULL; \ 396 _b -> ptr.free_ptr.next = NULL; \ 399 #define INSERT_BLOCK(_b, _tlsf, _fl, _sl) do { \ 400 _b -> ptr.free_ptr.prev = NULL; \ 401 _b -> ptr.free_ptr.next = _tlsf -> matrix [_fl][_sl]; \ 402 if (_tlsf -> matrix [_fl][_sl]) \ 403 _tlsf -> matrix [_fl][_sl] -> ptr.free_ptr.prev = _b; \ 404 _tlsf -> matrix [_fl][_sl] = _b; \ 405 set_bit (_sl, &_tlsf -> sl_bitmap [_fl]); \ 406 set_bit (_fl, &_tlsf -> fl_bitmap); \ 409 #if USE_SBRK || USE_MMAP 410 static __inline__
void *get_new_area(
size_t * size) {
414 area = (
void *) sbrk(0);
415 if (((
void *) sbrk(*size)) != ((
void *) -1))
419 #ifndef MAP_ANONYMOUS 421 # define MAP_ANONYMOUS MAP_ANON 426 *size = ROUNDUP(*size, PAGE_SIZE);
428 mmap(0, *size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)) != MAP_FAILED)
431 return ((
void *) ~0);
435 static __inline__
bhdr_t *process_area(
void *area,
size_t size) {
442 MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(
sizeof(
area_info_t)) | USED_BLOCK |
444 b = (
bhdr_t *) GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
446 ROUNDDOWN_SIZE(size - 3 * BHDR_OVERHEAD - (ib->size & BLOCK_SIZE)) | USED_BLOCK | PREV_USED;
447 b->ptr.free_ptr.prev = b->ptr.free_ptr.next = 0;
448 lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
450 lb->size = 0 | USED_BLOCK | PREV_FREE;
461 static char *mp = NULL;
464 size_t rtl_init_memory_pool(
size_t mem_pool_size,
void *mem_pool) {
469 if (!mem_pool || !mem_pool_size || mem_pool_size <
sizeof(
tlsf_t) + BHDR_OVERHEAD * 8) {
470 ERROR_MSG(
"rtl_init_memory_pool (): memory_pool invalid\n");
474 if (((
unsigned long) mem_pool & PTR_MASK)) {
475 ERROR_MSG(
"rtl_init_memory_pool (): mem_pool must be aligned to a word\n");
478 tlsf = (
tlsf_t *) mem_pool;
480 if (tlsf->tlsf_signature == TLSF_SIGNATURE) {
481 mp = (
char *) mem_pool;
482 b = GET_NEXT_BLOCK(mp, ROUNDUP_SIZE(
sizeof(
tlsf_t)));
483 return b->size & BLOCK_SIZE;
486 mp = (
char *) mem_pool;
489 memset(mem_pool, 0,
sizeof(
tlsf_t));
491 tlsf->tlsf_signature = TLSF_SIGNATURE;
493 TLSF_CREATE_LOCK(&tlsf->lock);
495 ib = process_area(GET_NEXT_BLOCK
496 (mem_pool, ROUNDUP_SIZE(
sizeof(
tlsf_t))),
497 ROUNDDOWN_SIZE(mem_pool_size -
sizeof(
tlsf_t)));
498 b = GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
499 rtl_free_ex(b->ptr.buffer, tlsf);
503 tlsf->used_size = mem_pool_size - (b->size & BLOCK_SIZE);
504 tlsf->max_size = tlsf->used_size;
507 return (b->size & BLOCK_SIZE);
511 size_t rtl_add_new_area(
void *area,
size_t area_size,
void *mem_pool) {
515 bhdr_t *ib0, *b0, *lb0, *ib1, *b1, *lb1, *next_b;
519 savesz = tlsf->used_size;
522 memset(area, 0, area_size);
523 ptr = tlsf->area_head;
526 ib0 = process_area(area, area_size);
527 b0 = GET_NEXT_BLOCK(ib0->ptr.buffer, ib0->size & BLOCK_SIZE);
528 lb0 = GET_NEXT_BLOCK(b0->ptr.buffer, b0->size & BLOCK_SIZE);
534 ib1 = (
bhdr_t *) ((
char *) ptr - BHDR_OVERHEAD);
535 b1 = GET_NEXT_BLOCK(ib1->ptr.buffer, ib1->size & BLOCK_SIZE);
539 if ((
unsigned long) ib1 == (
unsigned long) lb0 + BHDR_OVERHEAD) {
540 if (tlsf->area_head == ptr) {
541 tlsf->area_head = ptr->next;
544 ptr_prev->next = ptr->next;
549 ROUNDDOWN_SIZE((b0->size & BLOCK_SIZE) +
550 (ib1->size & BLOCK_SIZE) +
551 2 * BHDR_OVERHEAD) | USED_BLOCK | PREV_USED;
561 if ((
unsigned long) lb1->ptr.buffer == (
unsigned long) ib0) {
562 if (tlsf->area_head == ptr) {
563 tlsf->area_head = ptr->next;
566 ptr_prev->next = ptr->next;
571 ROUNDDOWN_SIZE((b0->size & BLOCK_SIZE) +
572 (ib0->size & BLOCK_SIZE) +
573 2 * BHDR_OVERHEAD) | USED_BLOCK | (lb1->size & PREV_STATE);
574 next_b = GET_NEXT_BLOCK(lb1->ptr.buffer, lb1->size & BLOCK_SIZE);
575 next_b->prev_hdr = lb1;
587 ai->next = tlsf->area_head;
589 tlsf->area_head = ai;
590 rtl_free_ex(b0->ptr.buffer, mem_pool);
593 tlsf->used_size=savesz;
595 return (b0->size & BLOCK_SIZE);
600 size_t rtl_get_used_size(
void *mem_pool ATTRIBUTE_UNUSED) {
603 return ((
tlsf_t *) mem_pool)->used_size;
610 size_t rtl_get_max_size(
void *mem_pool ATTRIBUTE_UNUSED) {
613 return ((
tlsf_t *) mem_pool)->max_size;
620 void rtl_destroy_memory_pool(
void *mem_pool) {
624 tlsf->tlsf_signature = 0;
626 TLSF_DESTROY_LOCK(&tlsf->lock);
632 void *rtl_tlsf_malloc(
size_t size) {
636 #if USE_MMAP || USE_SBRK 641 area_size =
sizeof(
tlsf_t) + BHDR_OVERHEAD * 8;
642 area_size = (area_size > DEFAULT_AREA_SIZE) ? area_size : DEFAULT_AREA_SIZE;
643 area = get_new_area(&area_size);
644 if (area == ((
void *) ~0))
646 rtl_init_memory_pool(area_size, area);
650 TLSF_ACQUIRE_LOCK(&((
tlsf_t *) mp)->lock);
652 ret = rtl_malloc_ex(size, mp);
654 TLSF_RELEASE_LOCK(&((
tlsf_t *) mp)->lock);
660 void rtl_tlsf_free(
void *ptr) {
663 TLSF_ACQUIRE_LOCK(&((
tlsf_t *) mp)->lock);
665 rtl_free_ex(ptr, mp);
667 TLSF_RELEASE_LOCK(&((
tlsf_t *) mp)->lock);
672 void *rtl_tlsf_realloc(
void *ptr,
size_t size) {
676 #if USE_MMAP || USE_SBRK 678 return rtl_tlsf_malloc(size);
682 TLSF_ACQUIRE_LOCK(&((
tlsf_t *) mp)->lock);
684 ret = rtl_realloc_ex(ptr, size, mp);
686 TLSF_RELEASE_LOCK(&((
tlsf_t *) mp)->lock);
692 void *rtl_tlsf_calloc(
size_t nelem,
size_t elem_size) {
696 TLSF_ACQUIRE_LOCK(&((
tlsf_t *) mp)->lock);
698 ret = rtl_calloc_ex(nelem, elem_size, mp);
700 TLSF_RELEASE_LOCK(&((
tlsf_t *) mp)->lock);
706 void *rtl_malloc_ex(
size_t size,
void *mem_pool) {
713 size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
716 MAPPING_SEARCH(&size, &fl, &sl);
720 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
721 #if USE_MMAP || USE_SBRK 726 area_size = size + BHDR_OVERHEAD * 8;
727 area_size = (area_size > DEFAULT_AREA_SIZE) ? area_size : DEFAULT_AREA_SIZE;
728 area = get_new_area(&area_size);
729 if (area == ((
void *) ~0))
731 rtl_add_new_area(area, area_size, mem_pool);
733 MAPPING_SEARCH(&size, &fl, &sl);
735 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
741 EXTRACT_BLOCK_HDR(b, tlsf, fl, sl);
744 next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
746 tmp_size = (b->size & BLOCK_SIZE) - size;
747 if (tmp_size >=
sizeof(
bhdr_t)) {
748 tmp_size -= BHDR_OVERHEAD;
749 b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
750 b2->size = tmp_size | FREE_BLOCK | PREV_USED;
751 next_b->prev_hdr = b2;
752 MAPPING_INSERT(tmp_size, &fl, &sl);
753 INSERT_BLOCK(b2, tlsf, fl, sl);
755 b->size = size | (b->size & PREV_STATE);
757 next_b->size &= (~PREV_FREE);
758 b->size &= (~FREE_BLOCK);
761 TLSF_ADD_SIZE(tlsf, b);
763 return (
void *) b->ptr.buffer;
767 void rtl_free_ex(
void *ptr,
void *mem_pool) {
776 b = (
bhdr_t *) ((
char *) ptr - BHDR_OVERHEAD);
778 #ifdef CHECK_DOUBLE_FREE 779 if (b->size & FREE_BLOCK) {
780 ERROR_MSG(
"rtl_free_ex(): double free %p\n", ptr);
785 b->size |= FREE_BLOCK;
787 TLSF_REMOVE_SIZE(tlsf, b);
789 b->ptr.free_ptr.prev = NULL;
790 b->ptr.free_ptr.next = NULL;
791 tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
792 if (tmp_b->size & FREE_BLOCK) {
793 MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
794 EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
795 b->size += (tmp_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
797 if (b->size & PREV_FREE) {
799 MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
800 EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
801 tmp_b->size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
804 MAPPING_INSERT(b->size & BLOCK_SIZE, &fl, &sl);
805 INSERT_BLOCK(b, tlsf, fl, sl);
807 tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
808 tmp_b->size |= PREV_FREE;
813 void *rtl_realloc_ex(
void *ptr,
size_t new_size,
void *mem_pool) {
818 bhdr_t *b, *tmp_b, *next_b;
824 return (
void *) rtl_malloc_ex(new_size, mem_pool);
827 }
else if (!new_size) {
828 rtl_free_ex(ptr, mem_pool);
832 b = (
bhdr_t *) ((
char *) ptr - BHDR_OVERHEAD);
834 #ifdef CHECK_DOUBLE_FREE 835 if (b->size & FREE_BLOCK) {
836 ERROR_MSG(
"rtl_realloc_ex(): invalid pointer %p\n", ptr);
837 return (
void *) rtl_malloc_ex(new_size, mem_pool);
841 next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
842 new_size = (new_size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(new_size);
843 tmp_size = (b->size & BLOCK_SIZE);
844 if (new_size <= tmp_size) {
845 TLSF_REMOVE_SIZE(tlsf, b);
846 if (next_b->size & FREE_BLOCK) {
847 MAPPING_INSERT(next_b->size & BLOCK_SIZE, &fl, &sl);
848 EXTRACT_BLOCK(next_b, tlsf, fl, sl);
849 tmp_size += (next_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
850 next_b = GET_NEXT_BLOCK(next_b->ptr.buffer, next_b->size & BLOCK_SIZE);
854 tmp_size -= new_size;
855 if (tmp_size >=
sizeof(
bhdr_t)) {
856 tmp_size -= BHDR_OVERHEAD;
857 tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, new_size);
858 tmp_b->size = tmp_size | FREE_BLOCK | PREV_USED;
859 next_b->prev_hdr = tmp_b;
860 next_b->size |= PREV_FREE;
861 MAPPING_INSERT(tmp_size, &fl, &sl);
862 INSERT_BLOCK(tmp_b, tlsf, fl, sl);
863 b->size = new_size | (b->size & PREV_STATE);
865 TLSF_ADD_SIZE(tlsf, b);
866 return (
void *) b->ptr.buffer;
868 if ((next_b->size & FREE_BLOCK)) {
869 if (new_size <= (tmp_size + (next_b->size & BLOCK_SIZE))) {
870 TLSF_REMOVE_SIZE(tlsf, b);
871 MAPPING_INSERT(next_b->size & BLOCK_SIZE, &fl, &sl);
872 EXTRACT_BLOCK(next_b, tlsf, fl, sl);
873 b->size += (next_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
874 next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
875 next_b->prev_hdr = b;
876 next_b->size &= ~PREV_FREE;
877 tmp_size = (b->size & BLOCK_SIZE) - new_size;
878 if (tmp_size >=
sizeof(
bhdr_t)) {
879 tmp_size -= BHDR_OVERHEAD;
880 tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, new_size);
881 tmp_b->size = tmp_size | FREE_BLOCK | PREV_USED;
882 next_b->prev_hdr = tmp_b;
883 next_b->size |= PREV_FREE;
884 MAPPING_INSERT(tmp_size, &fl, &sl);
885 INSERT_BLOCK(tmp_b, tlsf, fl, sl);
886 b->size = new_size | (b->size & PREV_STATE);
888 TLSF_ADD_SIZE(tlsf, b);
889 return (
void *) b->ptr.buffer;
893 if (!(ptr_aux = rtl_malloc_ex(new_size, mem_pool))) {
897 cpsize = ((b->size & BLOCK_SIZE) > new_size) ? new_size : (b->size & BLOCK_SIZE);
899 memcpy(ptr_aux, ptr, cpsize);
901 rtl_free_ex(ptr, mem_pool);
907 void *rtl_calloc_ex(
size_t nelem,
size_t elem_size,
void *mem_pool) {
911 if (nelem <= 0 || elem_size <= 0)
914 if (!(ptr = rtl_malloc_ex(nelem * elem_size, mem_pool)))
916 memset(ptr, 0, nelem * elem_size);
931 extern void dump_memory_region(
unsigned char *mem_ptr,
unsigned int size);
932 extern void print_block(
bhdr_t * b);
933 extern void print_tlsf(
tlsf_t * tlsf);
934 void print_all_blocks(
tlsf_t * tlsf);
936 void dump_memory_region(
unsigned char *mem_ptr,
unsigned int size) {
938 unsigned long begin = (
unsigned long) mem_ptr;
939 unsigned long end = (
unsigned long) mem_ptr + size;
949 PRINT_MSG(
"\nMemory region dumped: 0x%lx - 0x%lx\n\n", begin, end);
952 PRINT_MSG(
"0x%lx ", begin);
954 while (begin < end) {
955 if (((
unsigned char *) begin)[0] == 0)
958 PRINT_MSG(
"%02x", ((
unsigned char *) begin)[0]);
959 if (((
unsigned char *) begin)[1] == 0)
962 PRINT_MSG(
"%02x ", ((
unsigned char *) begin)[1]);
966 PRINT_MSG(
"\n0x%lx ", begin);
974 void print_block(
bhdr_t * b) {
977 PRINT_MSG(
">> [%p] (", b);
978 if ((b->size & BLOCK_SIZE))
979 PRINT_MSG(
"%lu bytes, ", (
unsigned long) (b->size & BLOCK_SIZE));
981 PRINT_MSG(
"sentinel, ");
982 if ((b->size & BLOCK_STATE) == FREE_BLOCK)
983 PRINT_MSG(
"free [%p, %p], ", b->ptr.free_ptr.prev, b->ptr.free_ptr.next);
986 if ((b->size & PREV_STATE) == PREV_FREE)
987 PRINT_MSG(
"prev. free [%p])\n", b->prev_hdr);
989 PRINT_MSG(
"prev used)\n");
992 void print_tlsf(
tlsf_t * tlsf) {
996 PRINT_MSG(
"\nTLSF at %p\n", tlsf);
998 PRINT_MSG(
"FL bitmap: 0x%x\n\n", (
unsigned) tlsf->fl_bitmap);
1000 for (i = 0; i < REAL_FLI; i++) {
1001 if (tlsf->sl_bitmap[i])
1002 PRINT_MSG(
"SL bitmap 0x%x\n", (
unsigned) tlsf->sl_bitmap[i]);
1003 for (j = 0; j < MAX_SLI; j++) {
1004 next = tlsf->matrix[i][j];
1006 PRINT_MSG(
"-> [%d][%d]\n", i, j);
1009 next = next->ptr.free_ptr.next;
1015 void print_all_blocks(
tlsf_t * tlsf) {
1018 PRINT_MSG(
"\nTLSF at %p\nALL BLOCKS\n\n", tlsf);
1019 ai = tlsf->area_head;
1021 next = (
bhdr_t *) ((
char *) ai - BHDR_OVERHEAD);
1024 if ((next->size & BLOCK_SIZE))
1025 next = GET_NEXT_BLOCK(next->ptr.buffer, next->size & BLOCK_SIZE);