malloc.c glibc 2.35
malloc.c diff from glibc 2.31, 2.35
malloc.c glibc 2.35
glibc 2.35과 2.31버전에서 malloc.c의 파일의 git diff를 이용해서 변경 점을 분석해보자.
__malloc_assert
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
@@ -279,25 +287,22 @@
#define MALLOC_DEBUG 0
#endif
+#if IS_IN (libc)
#ifndef NDEBUG
# define __assert_fail(assertion, file, line, function) \
__malloc_assert(assertion, file, line, function)
-extern const char *__progname;
-
-static void
+_Noreturn static void
__malloc_assert (const char *assertion, const char *file, unsigned int line,
const char *function)
{
- (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
- __progname, __progname[0] ? ": " : "",
- file, line,
- function ? function : "", function ? ": " : "",
- assertion);
- fflush (stderr);
- abort ();
+ __libc_message (do_abort, "\
+Fatal glibc error: malloc assertion failure in %s: %s\n",
+ function, assertion);
+ __builtin_unreachable ();
}
#endif
+#endif
- 크게 중요하지는 않지만
__malloc_assert의 방식이 fxprintf 후 fflush를 하는 방법이였다면__libc_message를 통해 통합되었다.fflush함수가 실행 안된다는 점 영향을 줄 수 있다.
SAFE LINKING
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
@@ -327,12 +332,28 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
# define MAX_TCACHE_COUNT UINT16_MAX
#endif
+ Safe-Linking:
+ Use randomness from ASLR (mmap_base) to protect single-linked lists
+ of Fast-Bins and TCache. That is, mask the "next" pointers of the
+ lists' chunks, and also perform allocation alignment checks on them.
+ This mechanism reduces the risk of pointer hijacking, as was done with
+ Safe-Unlinking in the double-linked lists of Small-Bins.
+ It assumes a minimum page size of 4096 bytes (12 bits). Systems with
+ larger pages provide less entropy, although the pointer mangling
+ still works. */
+
+#define PROTECT_PTR(pos, ptr) \
+ ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
+#define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)
-
Tcache와Fast-Bins에next포인터의 주소값을 보호하기 위한 방법이다. 2.32 >= libc 부터 적용되었다. - 자세한건
tcache_put, get함수에서 알아보자
tcache_key
tcache_key 의 자료형이 바뀌었다.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
@@ -2895,7 +3122,7 @@ typedef struct tcache_entry
{
struct tcache_entry *next;
/* This field exists to detect double frees. */
- struct tcache_perthread_struct *key;
+ uintptr_t key;
} tcache_entry;
@@ -2912,6 +3139,31 @@ typedef struct tcache_perthread_struct
static __thread bool tcache_shutting_down = false;
static __thread tcache_perthread_struct *tcache = NULL;
+ Process-wide key to try and catch a double-free in the same thread. */
+static uintptr_t tcache_key;
+
+/* The value of tcache_key does not really have to be a cryptographically
+ secure random number. It only needs to be arbitrary enough so that it does
+ not collide with values present in applications. If a collision does happen
+ consistently enough, it could cause a degradation in performance since the
+ entire list is checked to check if the block indeed has been freed the
+ second time. The odds of this happening are exceedingly low though, about 1
+ in 2^wordsize. There is probably a higher chance of the performance
+ degradation being due to a double free where the first free happened in a
+ different thread; that's a case this check does not cover. */
+static void
+tcache_key_initialize (void)
+{
+ if (__getrandom (&tcache_key, sizeof(tcache_key), GRND_NONBLOCK)
+ != sizeof (tcache_key))
+ {
+ tcache_key = random_bits ();
+#if __WORDSIZE == 64
+ tcache_key = (tcache_key << 32) | random_bits ();
+#endif
+ }
- 기존에 key는
tcache_perthread_struct주소였다면 random 값을 가지고 key를 만든다.
tcache_put
1
2
3
4
5
6
7
8
9
10
11
12
@@ -2921,9 +3173,9 @@ tcache_put (mchunkptr chunk, size_t tc_idx)
/* Mark this chunk as "in the tcache" so the test in _int_free will
detect a double free. */
- e->key = tcache;
+ e->key = tcache_key;
- e->next = tcache->entries[tc_idx];
+ e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
tcache->entries[tc_idx] = e;
++(tcache->counts[tc_idx]);
}
- tcache에 청크를 넣을 때
e->next를 암호화 하여 넣는다. -
e->next가 저장되는 주소값에tcache->entries[tc_idx]의 값 xor 연산이 일어난다.#define PROTECT_PTR(pos, ptr)((__typeof (ptr)) ((((size_t) pos) » 12) ^ ((size_t) ptr)))` - 만약 chunk 주소가 0x4000000 이고 entries에 0이 있다면
0x4000 ^ 0이다.
tcache_get
1
2
3
4
5
6
7
8
9
10
11
12
13
@@ -2934,9 +3186,11 @@ static __always_inline void *
tcache_get (size_t tc_idx)
{
tcache_entry *e = tcache->entries[tc_idx];
- tcache->entries[tc_idx] = e->next;
+ if (__glibc_unlikely (!aligned_OK (e)))
+ malloc_printerr ("malloc(): unaligned tcache chunk detected");
+ tcache->entries[tc_idx] = REVEAL_PTR (e->next);
--(tcache->counts[tc_idx]);
- e->key = NULL;
+ e->key = 0;
return (void *) e;
}
- get 함수에서 청크를 가져갈 때 entrie에 있는 청크가 정상적으로 aligned 되어있는지 확인한다.
- 그리고 e->next를 다시 entries에 넣을 때 복호화를 해서 넣는다.
#define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)
_libc_malloc
malloc_hook
1
2
3
4
5
6
7
8
9
10
@@ -3027,10 +3286,8 @@ __libc_malloc (size_t bytes)
_Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2,
"PTRDIFF_MAX is not more than half of SIZE_MAX");
- void *(*hook) (size_t, const void *)
- = atomic_forced_read (__malloc_hook);
- if (__builtin_expect (hook != NULL, 0))
- return (*hook)(bytes, RETURN_ADDRESS (0));
+ if (!__malloc_initialized)
+ ptmalloc_init ();
- 기존에 있던 malloc_hook 실행이 사라졌다.
tag_new_usable
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
@@ -3048,14 +3305,15 @@ __libc_malloc (size_t bytes)
&& tcache
&& tcache->counts[tc_idx] > 0)
{
- return tcache_get (tc_idx);
+ victim = tcache_get (tc_idx);
+ return tag_new_usable (victim);
}
DIAG_POP_NEEDS_COMMENT;
#endif
if (SINGLE_THREAD_P)
{
- victim = _int_malloc (&main_arena, bytes);
+ victim = tag_new_usable (_int_malloc (&main_arena, bytes));
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
&main_arena == arena_for_chunk (mem2chunk (victim)));
return victim;
@@ -3076,6 +3334,8 @@ __libc_malloc (size_t bytes)
if (ar_ptr != NULL)
__libc_lock_unlock (ar_ptr->mutex);
+ victim = tag_new_usable (victim);
+
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
ar_ptr == arena_for_chunk (mem2chunk (victim)));
return victim;
- tcache_get을 통한 할당이나
_int_malloc으로 할당을 했다면 chunk에 tag를 다는 함수가 추가 되었다. - 크게 중요한 부분인지는 모르겠다.
1
2
3
4
5
6
7
8
9
10
static __always_inline void *
tag_new_usable (void *ptr)
{
if (__glibc_unlikely (mtag_enabled) && ptr)
{
mchunkptr cp = mem2chunk(ptr);
ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), memsize (cp));
}
return ptr;
}
_libc_free
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
@@ -3088,17 +3348,16 @@ __libc_free (void *mem)
mstate ar_ptr;
mchunkptr p; /* chunk corresponding to mem */
- void (*hook) (void *, const void *)
- = atomic_forced_read (__free_hook);
- if (__builtin_expect (hook != NULL, 0))
- {
- (*hook)(mem, RETURN_ADDRESS (0));
- return;
- }
-
if (mem == 0) /* free(0) has no effect */
return;
+ /* Quickly check that the freed pointer matches the tag for the memory.
+ This gives a useful double-free detection. */
+ if (__glibc_unlikely (mtag_enabled))
+ *(volatile char *)mem;
+
+ int err = errno;
+
- 마찬가지로 free_hook이 제거 되었다.
_int_malloc
fastbin check
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
@@ -3570,8 +3823,11 @@ _int_malloc (mstate av, size_t bytes)
victim = pp; \
if (victim == NULL) \
break; \
+ pp = REVEAL_PTR (victim->fd); \
+ if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
+ malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
} \
- while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
+ while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
!= victim); \
if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
@@ -3583,8 +3839,11 @@ _int_malloc (mstate av, size_t bytes)
if (victim != NULL)
{
+ if (__glibc_unlikely (misaligned_chunk (victim)))
+ malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
+
if (SINGLE_THREAD_P)
- *fb = victim->fd;
+ *fb = REVEAL_PTR (victim->fd);
else
REMOVE_FB (fb, pp, victim);
if (__glibc_likely (victim != NULL))
@@ -3605,8 +3864,10 @@ _int_malloc (mstate av, size_t bytes)
while (tcache->counts[tc_idx] < mp_.tcache_count
&& (tc_victim = *fb) != NULL)
{
+ if (__glibc_unlikely (misaligned_chunk (tc_victim)))
+ malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
if (SINGLE_THREAD_P)
- *fb = tc_victim->fd;
+ *fb = REVEAL_PTR (tc_victim->fd);
else
{
REMOVE_FB (fb, pp, tc_victim);
- tcache와 마찬가지로 fd를 가져올 때
REVEAL_PTR를 통해 복호화 한다. - 또한 fastbin에서 찾은 청크가 제대로 aligned 되었는지 체크한다.
_int_free
tcache free
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
@@ -4190,17 +4451,24 @@ _int_free (mstate av, mchunkptr p, int have_lock)
trust it (it also matches random payload data at a 1 in
2^<size_t> chance), so verify it's not an unlikely
coincidence before aborting. */
- if (__glibc_unlikely (e->key == tcache))
+ if (__glibc_unlikely (e->key == tcache_key))
{
tcache_entry *tmp;
+ size_t cnt = 0;
LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
for (tmp = tcache->entries[tc_idx]; tmp; tmp = tmp->next)
- if (tmp == e)
- malloc_printerr ("free(): double free detected in tcache 2");
- /* If we get here, it was a coincidence. We've wasted a
- few cycles, but don't abort. */
+ tmp = REVEAL_PTR (tmp->next), ++cnt)
+ {
+ if (cnt >= mp_.tcache_count)
+ malloc_printerr ("free(): too many chunks detected in tcache");
+ if (__glibc_unlikely (!aligned_OK (tmp)))
+ malloc_printerr ("free(): unaligned chunk detected in tcache 2");
+ if (tmp == e)
+ malloc_printerr ("free(): double free detected in tcache 2");
+ /* If we get here, it was a coincidence. We've wasted a
+ few cycles, but don't abort. */
+ }
}
if (tcache->counts[tc_idx] < mp_.tcache_count)
- tcache dfb를 방지하기 위한 루틴에서 조금 바뀌었다.
-
tcache->entries[tc_idx]에서 탐색을 시작하는데 청크의 개수가 7개 이상이면 에러를 표출한다. - 또한 각 청크들이 정상적으로 정렬되었는지 확인한다.
fastbin free
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
@@ -4264,7 +4532,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
add (i.e., double free). */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
- p->fd = old;
+ p->fd = PROTECT_PTR (&p->fd, old);
*fb = p;
}
else
@@ -4274,7 +4542,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
add (i.e., double free). */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
- p->fd = old2 = old;
+ old2 = old;
+ p->fd = PROTECT_PTR (&p->fd, old);
}
while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
!= old2);
- fastbin에서도 청크를 삽입할 때 암호화 진행해서 넣는다.
malloc_consolidate
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
@@ -4472,13 +4741,17 @@ static void malloc_consolidate(mstate av)
if (p != 0) {
do {
{
+ if (__glibc_unlikely (misaligned_chunk (p)))
+ malloc_printerr ("malloc_consolidate(): "
+ "unaligned fastbin chunk detected");
+
unsigned int idx = fastbin_index (chunksize (p));
if ((&fastbin (av, idx)) != fb)
malloc_printerr ("malloc_consolidate(): invalid chunk size");
}
check_inuse_chunk(av, p);
- nextp = p->fd;
+ nextp = REVEAL_PTR (p->fd);
/* Slightly streamlined version of consolidation code in free() */
size = chunksize (p);
malloc_consolidate함수에서도 fastbin chunk의 사이즈의 aligned를 체크한다.
정리
- tcache, fastbin에서
SAFE LINKING기법이 추가되어 fd 주소를 암호화 하였다. - tcache, fastbin 청크를 할당 해서 가져올 때
misaligned check가 추가되었다. - 또한
malloc_consolidate에서도 청크 aligned를 본다.
This post is licensed under
CC BY 4.0
by the author.