malloc.c glibc 2.39
malloc.c diff from glibc 2.35, 2.39
malloc.c glibc 2.39
glibc 2.35과 2.39버전에서 malloc.c의 파일의 git diff를 이용해서 변경 점을 분석해보자.
global_max_fast
1
2
3
4
5
6
7
8
9
@@ -1778,7 +1765,7 @@ typedef struct malloc_chunk *mfastbinptr;
#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
/* Maximum size of memory handled in fastbins. */
-static INTERNAL_SIZE_T global_max_fast;
+static uint8_t global_max_fast;
/*
Set value of max_fast.
- 변수의 자료형이 8byte에서 1byte로 바뀌었다.
-
global_max_fast값을 바꾸는 공격을 통해 fastbin을 사용하기도 하여 수정한 것 같다.
tcache_get
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
/* Caller must ensure that we know tc_idx is valid and there's
available chunks to remove. Removes chunk from the middle of the
list. */
static __always_inline void *
tcache_get_n (size_t tc_idx, tcache_entry **ep)
{
tcache_entry *e;
if (ep == &(tcache->entries[tc_idx]))
e = *ep;
else
e = REVEAL_PTR (*ep);
if (__glibc_unlikely (!aligned_OK (e)))
malloc_printerr ("malloc(): unaligned tcache chunk detected");
if (ep == &(tcache->entries[tc_idx]))
*ep = REVEAL_PTR (e->next);
else
*ep = PROTECT_PTR (ep, REVEAL_PTR (e->next));
--(tcache->counts[tc_idx]);
e->key = 0;
return (void *) e;
}
/* Like the above, but removes from the head of the list. */
static __always_inline void *
tcache_get (size_t tc_idx)
{
return tcache_get_n (tc_idx, & tcache->entries[tc_idx]);
}
-
tcache_get_n이라는 함수가 추가 되었다. - 일반적인
tcache_get함수는 동일하게 작동하지만tcache_get_n을 부르는 다른 경우로_mid_memalign함수에서 호출한다.
unsorted chunks
1
2
3
4
5
6
7
8
9
@@ -4043,8 +4124,6 @@ _int_malloc (mstate av, size_t bytes)
}
/* remove from unsorted list */
- if (__glibc_unlikely (bck->fd != victim))
- malloc_printerr ("malloc(): corrupted unsorted chunks 3");
unsorted_chunks (av)->bk = bck;
bck->fd = unsorted_chunks (av);
- unsorted list에서 chunk를 제거할 때 bck->fd 검사과정이 제거 되었다.
_int_free
regular bin free
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
@@ -4570,31 +4643,52 @@ _int_free (mstate av, mchunkptr p, int have_lock)
if (!have_lock)
__libc_lock_lock (av->mutex);
- nextchunk = chunk_at_offset(p, size);
-
- /* Lightweight tests: check whether the block is already the
- top block. */
- if (__glibc_unlikely (p == av->top))
- malloc_printerr ("double free or corruption (top)");
- /* Or whether the next chunk is beyond the boundaries of the arena. */
- if (__builtin_expect (contiguous (av)
- && (char *) nextchunk
- >= ((char *) av->top + chunksize(av->top)), 0))
- malloc_printerr ("double free or corruption (out)");
- /* Or whether the block is actually not marked used. */
- if (__glibc_unlikely (!prev_inuse(nextchunk)))
- malloc_printerr ("double free or corruption (!prev)");
-
- nextsize = chunksize(nextchunk);
- if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
- || __builtin_expect (nextsize >= av->system_mem, 0))
- malloc_printerr ("free(): invalid next size (normal)");
+ _int_free_merge_chunk (av, p, size);
- free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
+ if (!have_lock)
+ __libc_lock_unlock (av->mutex);
+ }
+ /*
+ If the chunk was allocated via mmap, release via munmap().
+ */
+
+ else {
+ munmap_chunk (p);
+ }
+}
+
+/* Try to merge chunk P of SIZE bytes with its neighbors. Put the
+ resulting chunk on the appropriate bin list. P must not be on a
+ bin list yet, and it can be in use. */
+static void
+_int_free_merge_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size)
+{
+ mchunkptr nextchunk = chunk_at_offset(p, size);
+
+ /* Lightweight tests: check whether the block is already the
+ top block. */
+ if (__glibc_unlikely (p == av->top))
+ malloc_printerr ("double free or corruption (top)");
+ /* Or whether the next chunk is beyond the boundaries of the arena. */
+ if (__builtin_expect (contiguous (av)
+ && (char *) nextchunk
+ >= ((char *) av->top + chunksize(av->top)), 0))
+ malloc_printerr ("double free or corruption (out)");
+ /* Or whether the block is actually not marked used. */
+ if (__glibc_unlikely (!prev_inuse(nextchunk)))
+ malloc_printerr ("double free or corruption (!prev)");
+
+ INTERNAL_SIZE_T nextsize = chunksize(nextchunk);
+ if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
+ || __builtin_expect (nextsize >= av->system_mem, 0))
+ malloc_printerr ("free(): invalid next size (normal)");
+
+ free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
- /* consolidate backward */
- if (!prev_inuse(p)) {
- prevsize = prev_size (p);
+ /* Consolidate backward. */
+ if (!prev_inuse(p))
+ {
+ INTERNAL_SIZE_T prevsize = prev_size (p);
size += prevsize;
p = chunk_at_offset(p, -((long) prevsize));
if (__glibc_unlikely (chunksize(p) != prevsize))
- mmap bit가 없는 regular bin이 free 될 때
int_free_merge_chunk라는 함수가 생겼다. - free와 consolidate과정은 동일하다.
_int_free_merge_chunk
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
static void
_int_free_merge_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size)
{
mchunkptr nextchunk = chunk_at_offset(p, size);
/* Lightweight tests: check whether the block is already the
top block. */
if (__glibc_unlikely (p == av->top))
malloc_printerr ("double free or corruption (top)");
/* Or whether the next chunk is beyond the boundaries of the arena. */
if (__builtin_expect (contiguous (av)
&& (char *) nextchunk
>= ((char *) av->top + chunksize(av->top)), 0))
malloc_printerr ("double free or corruption (out)");
/* Or whether the block is actually not marked used. */
if (__glibc_unlikely (!prev_inuse(nextchunk)))
malloc_printerr ("double free or corruption (!prev)");
INTERNAL_SIZE_T nextsize = chunksize(nextchunk);
if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
|| __builtin_expect (nextsize >= av->system_mem, 0))
malloc_printerr ("free(): invalid next size (normal)");
free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
/* Consolidate backward. */
if (!prev_inuse(p))
{
INTERNAL_SIZE_T prevsize = prev_size (p);
size += prevsize;
p = chunk_at_offset(p, -((long) prevsize));
if (__glibc_unlikely (chunksize(p) != prevsize))
malloc_printerr ("corrupted size vs. prev_size while consolidating");
unlink_chunk (av, p);
}
/* Write the chunk header, maybe after merging with the following chunk. */
size = _int_free_create_chunk (av, p, size, nextchunk, nextsize);
_int_free_maybe_consolidate (av, size);
}
-
_int_free_merge_chunk함수는 chunk와 next_chunk의 검사를 진행해 정상인지 확인한다. - prev_chunk가
unused라면 병합을 진행한다. - next_chunk에 대한 병합은
_int_free_create_chunk에서 진행한다.
_int_free_create_chunk
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
/* Create a chunk at P of SIZE bytes, with SIZE potentially increased
to cover the immediately following chunk NEXTCHUNK of NEXTSIZE
bytes (if NEXTCHUNK is unused). The chunk at P is not actually
read and does not have to be initialized. After creation, it is
placed on the appropriate bin list. The function returns the size
of the new chunk. */
static INTERNAL_SIZE_T
_int_free_create_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size,
mchunkptr nextchunk, INTERNAL_SIZE_T nextsize)
{
if (nextchunk != av->top)
{
/* get and clear inuse bit */
bool nextinuse = inuse_bit_at_offset (nextchunk, nextsize);
/* consolidate forward */
if (!nextinuse) {
unlink_chunk (av, nextchunk);
size += nextsize;
} else
clear_inuse_bit_at_offset(nextchunk, 0);
/*
Place the chunk in unsorted chunk list. Chunks are
not placed into regular bins until after they have
been given one chance to be used in malloc.
*/
mchunkptr bck = unsorted_chunks (av);
mchunkptr fwd = bck->fd;
if (__glibc_unlikely (fwd->bk != bck))
malloc_printerr ("free(): corrupted unsorted chunks");
p->fd = fwd;
p->bk = bck;
if (!in_smallbin_range(size))
{
p->fd_nextsize = NULL;
p->bk_nextsize = NULL;
}
bck->fd = p;
fwd->bk = p;
set_head(p, size | PREV_INUSE);
set_foot(p, size);
check_free_chunk(av, p);
}
else
{
/* If the chunk borders the current high end of memory,
consolidate into top. */
size += nextsize;
set_head(p, size | PREV_INUSE);
av->top = p;
check_chunk(av, p);
}
return size;
}
- next_chunk가 unused라면 병합을 진행한다.
- 만들어진 P를
unsorted list에 삽입한다.
_int_free_maybe_consolidate
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
/* If freeing a large space, consolidate possibly-surrounding
chunks. Then, if the total unused topmost memory exceeds trim
threshold, ask malloc_trim to reduce top. */
static void
_int_free_maybe_consolidate (mstate av, INTERNAL_SIZE_T size)
{
/* Unless max_fast is 0, we don't know if there are fastbins
bordering top, so we cannot tell for sure whether threshold has
been reached unless fastbins are consolidated. But we don't want
to consolidate on each free. As a compromise, consolidation is
performed if FASTBIN_CONSOLIDATION_THRESHOLD is reached. */
if (size >= FASTBIN_CONSOLIDATION_THRESHOLD)
{
if (atomic_load_relaxed (&av->have_fastchunks))
malloc_consolidate(av);
if (av == &main_arena)
{
#ifndef MORECORE_CANNOT_TRIM
if (chunksize (av->top) >= mp_.trim_threshold)
systrim (mp_.top_pad, av);
#endif
}
else
{
/* Always try heap_trim, even if the top chunk is not large,
because the corresponding heap might go away. */
heap_info *heap = heap_for_ptr (top (av));
assert (heap->ar_ptr == av);
heap_trim (heap, mp_.top_pad);
}
}
}
- size가
FASTBIN_CONSOLIDATION_THRESHOLD이상일 경우 fastchunk가 있다면malloc_consolidate가 진행된다.
정리
-
global_max_fast가uint8자료형으로 바뀌었다. -
_int_free과정에서 코드가 함수로 나누어졌다. - exploit 기법상 영향을 크게 주지는 않을 거 같다.
This post is licensed under
CC BY 4.0
by the author.