mirror of
				https://github.com/GrapheneOS/hardened_malloc.git
				synced 2025-10-31 00:06:33 +01:00 
			
		
		
		
	improve code reuse for malloc API entry points
This commit is contained in:
		
							parent
							
								
									89faba4232
								
							
						
					
					
						commit
						26b74b87bf
					
				
					 1 changed files with 3 additions and 7 deletions
				
			
		
							
								
								
									
										10
									
								
								h_malloc.c
									
										
									
									
									
								
							
							
						
						
									
										10
									
								
								h_malloc.c
									
										
									
									
									
								
							|  | @ -1321,13 +1321,13 @@ static size_t adjust_size_for_canary(size_t size) { | ||||||
| static inline void *alloc(size_t size) { | static inline void *alloc(size_t size) { | ||||||
|     unsigned arena = init(); |     unsigned arena = init(); | ||||||
|     thread_unseal_metadata(); |     thread_unseal_metadata(); | ||||||
|     size = adjust_size_for_canary(size); |  | ||||||
|     void *p = allocate(arena, size); |     void *p = allocate(arena, size); | ||||||
|     thread_seal_metadata(); |     thread_seal_metadata(); | ||||||
|     return p; |     return p; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| EXPORT void *h_malloc(size_t size) { | EXPORT void *h_malloc(size_t size) { | ||||||
|  |     size = adjust_size_for_canary(size); | ||||||
|     return alloc(size); |     return alloc(size); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -1337,11 +1337,8 @@ EXPORT void *h_calloc(size_t nmemb, size_t size) { | ||||||
|         errno = ENOMEM; |         errno = ENOMEM; | ||||||
|         return NULL; |         return NULL; | ||||||
|     } |     } | ||||||
|     unsigned arena = init(); |  | ||||||
|     thread_unseal_metadata(); |  | ||||||
|     total_size = adjust_size_for_canary(total_size); |     total_size = adjust_size_for_canary(total_size); | ||||||
|     void *p = allocate(arena, total_size); |     void *p = alloc(total_size); | ||||||
|     thread_seal_metadata(); |  | ||||||
|     if (!ZERO_ON_FREE && likely(p != NULL) && total_size && total_size <= MAX_SLAB_SIZE_CLASS) { |     if (!ZERO_ON_FREE && likely(p != NULL) && total_size && total_size <= MAX_SLAB_SIZE_CLASS) { | ||||||
|         memset(p, 0, total_size - canary_size); |         memset(p, 0, total_size - canary_size); | ||||||
|     } |     } | ||||||
|  | @ -1349,12 +1346,11 @@ EXPORT void *h_calloc(size_t nmemb, size_t size) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| EXPORT void *h_realloc(void *old, size_t size) { | EXPORT void *h_realloc(void *old, size_t size) { | ||||||
|  |     size = adjust_size_for_canary(size); | ||||||
|     if (old == NULL) { |     if (old == NULL) { | ||||||
|         return alloc(size); |         return alloc(size); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     size = adjust_size_for_canary(size); |  | ||||||
| 
 |  | ||||||
|     if (size > MAX_SLAB_SIZE_CLASS) { |     if (size > MAX_SLAB_SIZE_CLASS) { | ||||||
|         size = get_large_size_class(size); |         size = get_large_size_class(size); | ||||||
|         if (unlikely(!size)) { |         if (unlikely(!size)) { | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		
		Reference in a new issue