mirror of
				https://github.com/GrapheneOS/hardened_malloc.git
				synced 2025-11-03 01:06:33 +01:00 
			
		
		
		
	Add workaround for suspend service on 5th generation Pixel devices
This commit is contained in:
		
							parent
							
								
									0d6d63cbe7
								
							
						
					
					
						commit
						7e3d763a07
					
				
					 1 changed files with 29 additions and 5 deletions
				
			
		
							
								
								
									
										34
									
								
								h_malloc.c
									
										
									
									
									
								
							
							
						
						
									
										34
									
								
								h_malloc.c
									
										
									
									
									
								
							| 
						 | 
					@ -76,6 +76,9 @@ static union {
 | 
				
			||||||
#ifdef USE_PKEY
 | 
					#ifdef USE_PKEY
 | 
				
			||||||
        int metadata_pkey;
 | 
					        int metadata_pkey;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					        bool zero_on_free;
 | 
				
			||||||
 | 
					        bool purge_slabs;
 | 
				
			||||||
 | 
					        bool region_quarantine_protect;
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
    char padding[PAGE_SIZE];
 | 
					    char padding[PAGE_SIZE];
 | 
				
			||||||
} ro __attribute__((aligned(PAGE_SIZE)));
 | 
					} ro __attribute__((aligned(PAGE_SIZE)));
 | 
				
			||||||
| 
						 | 
					@ -443,7 +446,7 @@ static void *slot_pointer(size_t size, void *slab, size_t slot) {
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void write_after_free_check(const char *p, size_t size) {
 | 
					static void write_after_free_check(const char *p, size_t size) {
 | 
				
			||||||
    if (!WRITE_AFTER_FREE_CHECK) {
 | 
					    if (!WRITE_AFTER_FREE_CHECK || !ro.zero_on_free) {
 | 
				
			||||||
        return;
 | 
					        return;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -693,7 +696,7 @@ static inline void deallocate_small(void *p, const size_t *expected_size) {
 | 
				
			||||||
    if (likely(!is_zero_size)) {
 | 
					    if (likely(!is_zero_size)) {
 | 
				
			||||||
        check_canary(metadata, p, size);
 | 
					        check_canary(metadata, p, size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if (ZERO_ON_FREE) {
 | 
					        if (ro.zero_on_free) {
 | 
				
			||||||
            memset(p, 0, size - canary_size);
 | 
					            memset(p, 0, size - canary_size);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
| 
						 | 
					@ -770,7 +773,7 @@ static inline void deallocate_small(void *p, const size_t *expected_size) {
 | 
				
			||||||
        metadata->prev = NULL;
 | 
					        metadata->prev = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if (c->empty_slabs_total + slab_size > max_empty_slabs_total) {
 | 
					        if (c->empty_slabs_total + slab_size > max_empty_slabs_total) {
 | 
				
			||||||
            if (!memory_map_fixed(slab, slab_size)) {
 | 
					            if (ro.purge_slabs && !memory_map_fixed(slab, slab_size)) {
 | 
				
			||||||
                label_slab(slab, slab_size, class);
 | 
					                label_slab(slab, slab_size, class);
 | 
				
			||||||
                stats_slab_deallocate(c, slab_size);
 | 
					                stats_slab_deallocate(c, slab_size);
 | 
				
			||||||
                enqueue_free_slab(c, metadata);
 | 
					                enqueue_free_slab(c, metadata);
 | 
				
			||||||
| 
						 | 
					@ -855,7 +858,7 @@ static void regions_quarantine_deallocate_pages(void *p, size_t size, size_t gua
 | 
				
			||||||
        return;
 | 
					        return;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (unlikely(memory_map_fixed(p, size))) {
 | 
					    if (!ro.region_quarantine_protect || unlikely(memory_map_fixed(p, size))) {
 | 
				
			||||||
        memory_purge(p, size);
 | 
					        memory_purge(p, size);
 | 
				
			||||||
    } else {
 | 
					    } else {
 | 
				
			||||||
        memory_set_name(p, size, "malloc large quarantine");
 | 
					        memory_set_name(p, size, "malloc large quarantine");
 | 
				
			||||||
| 
						 | 
					@ -1071,6 +1074,22 @@ static inline void enforce_init(void) {
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					COLD static void handle_bugs(void) {
 | 
				
			||||||
 | 
					    char path[256];
 | 
				
			||||||
 | 
					    if (readlink("/proc/self/exe", path, sizeof(path)) == -1) {
 | 
				
			||||||
 | 
					        return;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    // Pixel 4a 5G, 5 and 5a suspend service
 | 
				
			||||||
 | 
					    const char suspend_service[] = "/system/bin/hw/android.system.suspend@1.0-service";
 | 
				
			||||||
 | 
					    if (strcmp(suspend_service, path) == 0) {
 | 
				
			||||||
 | 
					        ro.zero_on_free = false;
 | 
				
			||||||
 | 
					        // ro.purge_slabs = false;
 | 
				
			||||||
 | 
					        // ro.region_quarantine_protect = false;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
COLD static void init_slow_path(void) {
 | 
					COLD static void init_slow_path(void) {
 | 
				
			||||||
    static struct mutex lock = MUTEX_INITIALIZER;
 | 
					    static struct mutex lock = MUTEX_INITIALIZER;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1085,6 +1104,11 @@ COLD static void init_slow_path(void) {
 | 
				
			||||||
    ro.metadata_pkey = pkey_alloc(0, 0);
 | 
					    ro.metadata_pkey = pkey_alloc(0, 0);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    ro.purge_slabs = true;
 | 
				
			||||||
 | 
					    ro.zero_on_free = ZERO_ON_FREE;
 | 
				
			||||||
 | 
					    ro.region_quarantine_protect = true;
 | 
				
			||||||
 | 
					    handle_bugs();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (unlikely(sysconf(_SC_PAGESIZE) != PAGE_SIZE)) {
 | 
					    if (unlikely(sysconf(_SC_PAGESIZE) != PAGE_SIZE)) {
 | 
				
			||||||
        fatal_error("runtime page size does not match compile-time page size which is not supported");
 | 
					        fatal_error("runtime page size does not match compile-time page size which is not supported");
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
| 
						 | 
					@ -1360,7 +1384,7 @@ EXPORT void *h_calloc(size_t nmemb, size_t size) {
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    total_size = adjust_size_for_canary(total_size);
 | 
					    total_size = adjust_size_for_canary(total_size);
 | 
				
			||||||
    void *p = alloc(total_size);
 | 
					    void *p = alloc(total_size);
 | 
				
			||||||
    if (!ZERO_ON_FREE && likely(p != NULL) && total_size && total_size <= max_slab_size_class) {
 | 
					    if (!ro.zero_on_free && likely(p != NULL) && total_size && total_size <= max_slab_size_class) {
 | 
				
			||||||
        memset(p, 0, total_size - canary_size);
 | 
					        memset(p, 0, total_size - canary_size);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    return p;
 | 
					    return p;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		
		Reference in a new issue