diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/alpha/config.in linux-2.4.26-leo/arch/alpha/config.in
--- linux-2.4.26/arch/alpha/config.in	2004-02-20 14:11:37.000000000 +0000
+++ linux-2.4.26-leo/arch/alpha/config.in	2004-06-22 20:53:45.000000000 +0100
@@ -468,3 +468,12 @@
 
 source crypto/Config.in
 source lib/Config.in
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+	source grsecurity/Config.in
+fi
+endmenu
+
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/alpha/kernel/osf_sys.c linux-2.4.26-leo/arch/alpha/kernel/osf_sys.c
--- linux-2.4.26/arch/alpha/kernel/osf_sys.c	2003-06-13 15:51:29.000000000 +0100
+++ linux-2.4.26-leo/arch/alpha/kernel/osf_sys.c	2004-06-22 20:53:45.000000000 +0100
@@ -33,6 +33,7 @@
 #include <linux/file.h>
 #include <linux/types.h>
 #include <linux/ipc.h>
+#include <linux/grsecurity.h>
 
 #include <asm/fpu.h>
 #include <asm/io.h>
@@ -230,6 +231,11 @@
 	struct file *file = NULL;
 	unsigned long ret = -EBADF;
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (flags & MAP_MIRROR)
+		return -EINVAL;
+#endif
+
 #if 0
 	if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
 		printk("%s: unimplemented OSF mmap flags %04lx\n", 
@@ -240,6 +246,13 @@
 		if (!file)
 			goto out;
 	}
+
+	if(gr_handle_mmap(file, prot)) {
+		fput(file);
+		ret = -EACCES;
+		goto out;
+	}
+
 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 	down_write(&current->mm->mmap_sem);
 	ret = do_mmap(file, addr, len, prot, flags, off);
@@ -1357,6 +1370,10 @@
 	   merely specific addresses, but regions of memory -- perhaps
 	   this feature should be incorporated into all ports?  */
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP
+	if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
+#endif
+
 	if (addr) {
 		addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
 		if (addr != -ENOMEM)
@@ -1364,8 +1381,15 @@
 	}
 
 	/* Next, try allocating at TASK_UNMAPPED_BASE.  */
-	addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
-					 len, limit);
+
+	addr = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP
+	if (current->flags & PF_PAX_RANDMMAP)
+		addr += current->mm->delta_mmap;
+#endif
+
+	addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
 	if (addr != -ENOMEM)
 		return addr;
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/alpha/kernel/process.c linux-2.4.26-leo/arch/alpha/kernel/process.c
--- linux-2.4.26/arch/alpha/kernel/process.c	2003-08-25 12:44:39.000000000 +0100
+++ linux-2.4.26-leo/arch/alpha/kernel/process.c	2004-05-26 23:34:34.000000000 +0100
@@ -186,6 +186,7 @@
 	args.mode = mode;
 	args.restart_cmd = restart_cmd;
 #ifdef CONFIG_SMP
+	preempt_disable();
 	smp_call_function(common_shutdown_1, &args, 1, 0);
 #endif
 	common_shutdown_1(&args);
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/alpha/kernel/ptrace.c linux-2.4.26-leo/arch/alpha/kernel/ptrace.c
--- linux-2.4.26/arch/alpha/kernel/ptrace.c	2003-06-13 15:51:29.000000000 +0100
+++ linux-2.4.26-leo/arch/alpha/kernel/ptrace.c	2004-06-22 20:53:45.000000000 +0100
@@ -13,6 +13,7 @@
 #include <linux/ptrace.h>
 #include <linux/user.h>
 #include <linux/slab.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -275,6 +276,10 @@
 	read_unlock(&tasklist_lock);
 	if (!child)
 		goto out_notsk;
+
+	if(gr_handle_ptrace(child, request))
+		goto out;
+
 	if (request == PTRACE_ATTACH) {
 		ret = ptrace_attach(child);
 		goto out;
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/alpha/mm/fault.c linux-2.4.26-leo/arch/alpha/mm/fault.c
--- linux-2.4.26/arch/alpha/mm/fault.c	2002-11-28 23:53:08.000000000 +0000
+++ linux-2.4.26-leo/arch/alpha/mm/fault.c	2004-06-22 20:53:45.000000000 +0100
@@ -53,6 +53,139 @@
 	__reload_thread(&current->thread);
 }
 
+/*
+ * PaX: decide what to do with offenders (regs->pc = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when patched PLT trampoline was detected
+ *         3 when unpatched PLT trampoline was detected
+ *	   4 when legitimate ET_EXEC was detected
+ */
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+	int err;
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (current->flags & PF_PAX_RANDEXEC) {
+		if (regs->pc >= current->mm->start_code &&
+		    regs->pc < current->mm->end_code)
+		{
+			if (regs->r26 == regs->pc)
+				return 1;
+			regs->pc += current->mm->delta_exec;
+			return 4;
+		}
+	}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT
+	do { /* PaX: patched PLT emulation #1 */
+		unsigned int ldah, ldq, jmp;
+
+		err = get_user(ldah, (unsigned int *)regs->pc);
+		err |= get_user(ldq, (unsigned int *)(regs->pc+4));
+		err |= get_user(jmp, (unsigned int *)(regs->pc+8));
+
+		if (err)
+			break;
+
+		if ((ldah & 0xFFFF0000U)== 0x277B0000U &&
+		    (ldq & 0xFFFF0000U) == 0xA77B0000U &&
+		    jmp == 0x6BFB0000U)
+		{
+			unsigned long r27, addr;
+			unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
+			unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
+
+			addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
+			err = get_user(r27, (unsigned long*)addr);
+			if (err)
+				break;
+
+			regs->r27 = r27;
+			regs->pc = r27;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #2 */
+		unsigned int ldah, lda, br;
+
+		err = get_user(ldah, (unsigned int *)regs->pc);
+		err |= get_user(lda, (unsigned int *)(regs->pc+4));
+		err |= get_user(br, (unsigned int *)(regs->pc+8));
+
+		if (err)
+			break;
+
+		if ((ldah & 0xFFFF0000U)== 0x277B0000U &&
+		    (lda & 0xFFFF0000U) == 0xA77B0000U &&
+		    (br & 0xFFE00000U) == 0xC3E00000U)
+		{
+			unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
+			unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
+			unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
+
+			regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
+			regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: unpatched PLT emulation */
+		unsigned int br;
+
+		err = get_user(br, (unsigned int *)regs->pc);
+
+		if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
+			unsigned int br2, ldq, nop, jmp;
+			unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
+
+			addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
+			err = get_user(br2, (unsigned int *)addr);  
+			err |= get_user(ldq, (unsigned int *)(addr+4));
+			err |= get_user(nop, (unsigned int *)(addr+8));
+			err |= get_user(jmp, (unsigned int *)(addr+12));
+			err |= get_user(resolver, (unsigned long *)(addr+16));
+
+			if (err)
+				break;
+
+			if (br2 == 0xC3600000U &&
+			    ldq == 0xA77B000CU &&
+			    nop == 0x47FF041FU &&
+			    jmp == 0x6B7B0000U)
+			{
+				regs->r28 = regs->pc+4;
+				regs->r27 = addr+16;
+				regs->pc = resolver;
+				return 3;
+			}
+		}
+	} while (0);
+#endif
+
+	return 1;
+}
+
+void pax_report_insns(void *pc)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 5; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int*)pc+i)) {
+			printk("<invalid address>.");
+			break;
+		}
+		printk("%08x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 
 /*
  * This routine handles page faults.  It determines the address,
@@ -133,8 +266,32 @@
 good_area:
 	info.si_code = SEGV_ACCERR;
 	if (cause < 0) {
-		if (!(vma->vm_flags & VM_EXEC))
+		if (!(vma->vm_flags & VM_EXEC)) {
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+			if (!(current->flags & PF_PAX_PAGEEXEC) || address != regs->pc)
+				goto bad_area;
+
+			up_read(&mm->mmap_sem);
+			switch(pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT
+			case 2:
+			case 3:
+				return;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+			case 4:
+				return;
+#endif
+			}
+			pax_report_fault(regs, (void*)regs->pc, (void*)rdusp());
+			do_exit(SIGKILL);
+#else
 			goto bad_area;
+#endif
+		}
 	} else if (!cause) {
 		/* Allow reads even for write-only mappings */
 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/arm/config.in linux-2.4.26-leo/arch/arm/config.in
--- linux-2.4.26/arch/arm/config.in	2004-02-20 14:11:37.000000000 +0000
+++ linux-2.4.26-leo/arch/arm/config.in	2004-06-22 20:53:45.000000000 +0100
@@ -736,3 +736,11 @@
 
 source crypto/Config.in
 source lib/Config.in
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+	source grsecurity/Config.in
+fi
+endmenu
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/cris/config.in linux-2.4.26-leo/arch/cris/config.in
--- linux-2.4.26/arch/cris/config.in	2004-02-20 14:11:37.000000000 +0000
+++ linux-2.4.26-leo/arch/cris/config.in	2004-06-22 20:53:45.000000000 +0100
@@ -276,3 +276,12 @@
 source crypto/Config.in
 source lib/Config.in
 endmenu
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+    source grsecurity/Config.in
+fi
+endmenu
+
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/boot/bootsect.S linux-2.4.26-leo/arch/i386/boot/bootsect.S
--- linux-2.4.26/arch/i386/boot/bootsect.S	2003-08-25 12:44:39.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/boot/bootsect.S	2004-06-22 20:53:45.000000000 +0100
@@ -237,7 +237,7 @@
 #ifdef __BIG_KERNEL__
 					# look in setup.S for bootsect_kludge
 	bootsect_kludge = 0x220		# 0x200 + 0x20 which is the size of the
-	lcall	bootsect_kludge		# bootsector + bootsect_kludge offset
+	lcall	*bootsect_kludge	# bootsector + bootsect_kludge offset
 #else
 	movw	%es, %ax
 	subw	$SYSSEG, %ax
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/boot/setup.S linux-2.4.26-leo/arch/i386/boot/setup.S
--- linux-2.4.26/arch/i386/boot/setup.S	2004-02-20 14:11:37.000000000 +0000
+++ linux-2.4.26-leo/arch/i386/boot/setup.S	2004-06-22 20:53:45.000000000 +0100
@@ -637,7 +637,7 @@
 	cmpw	$0, %cs:realmode_swtch
 	jz	rmodeswtch_normal
 
-	lcall	%cs:realmode_swtch
+	lcall	*%cs:realmode_swtch
 
 	jmp	rmodeswtch_end
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/config.in linux-2.4.26-leo/arch/i386/config.in
--- linux-2.4.26/arch/i386/config.in	2004-02-20 14:11:37.000000000 +0000
+++ linux-2.4.26-leo/arch/i386/config.in	2004-06-22 20:53:45.000000000 +0100
@@ -99,6 +99,7 @@
 fi
 if [ "$CONFIG_M686" = "y" ]; then
    define_int  CONFIG_X86_L1_CACHE_SHIFT 5
+   define_bool CONFIG_X86_ALIGNMENT_16 y
    define_bool CONFIG_X86_HAS_TSC y
    define_bool CONFIG_X86_GOOD_APIC y
    bool 'PGE extensions (not for Cyrix/Transmeta)' CONFIG_X86_PGE
@@ -108,6 +109,7 @@
 fi
 if [ "$CONFIG_MPENTIUMIII" = "y" ]; then
    define_int  CONFIG_X86_L1_CACHE_SHIFT 5
+   define_bool CONFIG_X86_ALIGNMENT_16 y
    define_bool CONFIG_X86_HAS_TSC y
    define_bool CONFIG_X86_GOOD_APIC y
    define_bool CONFIG_X86_PGE y
@@ -116,6 +118,7 @@
 fi
 if [ "$CONFIG_MPENTIUM4" = "y" ]; then
    define_int  CONFIG_X86_L1_CACHE_SHIFT 7
+   define_bool CONFIG_X86_ALIGNMENT_16 y
    define_bool CONFIG_X86_HAS_TSC y
    define_bool CONFIG_X86_GOOD_APIC y
    define_bool CONFIG_X86_PGE y
@@ -135,6 +138,7 @@
 fi
 if [ "$CONFIG_MK7" = "y" ]; then
    define_int  CONFIG_X86_L1_CACHE_SHIFT 6
+   define_bool CONFIG_X86_ALIGNMENT_16 y
    define_bool CONFIG_X86_HAS_TSC y
    define_bool CONFIG_X86_GOOD_APIC y
    define_bool CONFIG_X86_USE_3DNOW y
@@ -225,6 +229,7 @@
 bool 'Math emulation' CONFIG_MATH_EMULATION
 bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
 bool 'Symmetric multi-processing support' CONFIG_SMP
+bool 'Preemptible Kernel' CONFIG_PREEMPT
 if [ "$CONFIG_SMP" != "y" ]; then
    bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC
    dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC
@@ -258,9 +263,12 @@
    fi
 fi
 
-if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
-   define_bool CONFIG_HAVE_DEC_LOCK y
+if [ "$CONFIG_SMP" = "y" -o "$CONFIG_PREEMPT" = "y" ]; then
+   if [ "$CONFIG_X86_CMPXCHG" = "y" ]; then
+      define_bool CONFIG_HAVE_DEC_LOCK y
+   fi
 fi
+
 endmenu
 
 mainmenu_option next_comment
@@ -487,3 +495,11 @@
 
 source crypto/Config.in
 source lib/Config.in
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+	source grsecurity/Config.in
+fi
+endmenu
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/apm.c linux-2.4.26-leo/arch/i386/kernel/apm.c
--- linux-2.4.26/arch/i386/kernel/apm.c	2003-08-25 12:44:39.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/apm.c	2004-06-22 20:53:45.000000000 +0100
@@ -614,7 +614,7 @@
 	__asm__ __volatile__(APM_DO_ZERO_SEGS
 		"pushl %%edi\n\t"
 		"pushl %%ebp\n\t"
-		"lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t"
+		"lcall *%%ss:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t"
 		"setc %%al\n\t"
 		"popl %%ebp\n\t"
 		"popl %%edi\n\t"
@@ -666,7 +666,7 @@
 		__asm__ __volatile__(APM_DO_ZERO_SEGS
 			"pushl %%edi\n\t"
 			"pushl %%ebp\n\t"
-			"lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t"
+			"lcall *%%ss:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t"
 			"setc %%bl\n\t"
 			"popl %%ebp\n\t"
 			"popl %%edi\n\t"
@@ -1985,6 +1985,12 @@
 		 __va((unsigned long)0x40 << 4));
 	_set_limit((char *)&gdt[APM_40 >> 3], 4095 - (0x40 << 4));
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	set_base(gdt2[APM_40 >> 3],
+		__va((unsigned long)0x40 << 4));
+	_set_limit((char *)&gdt2[APM_40 >> 3], 4095 - (0x40 << 4));
+#endif
+
 	apm_bios_entry.offset = apm_info.bios.offset;
 	apm_bios_entry.segment = APM_CS;
 	set_base(gdt[APM_CS >> 3],
@@ -1993,6 +1999,16 @@
 		 __va((unsigned long)apm_info.bios.cseg_16 << 4));
 	set_base(gdt[APM_DS >> 3],
 		 __va((unsigned long)apm_info.bios.dseg << 4));
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	set_base(gdt2[APM_CS >> 3],
+		__va((unsigned long)apm_info.bios.cseg << 4));
+	set_base(gdt2[APM_CS_16 >> 3],
+		__va((unsigned long)apm_info.bios.cseg_16 << 4));
+	set_base(gdt2[APM_DS >> 3],
+		__va((unsigned long)apm_info.bios.dseg << 4));
+#endif
+
 #ifndef APM_RELAX_SEGMENTS
 	if (apm_info.bios.version == 0x100) {
 #endif
@@ -2002,6 +2018,13 @@
 		_set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1);
 		/* For the DEC Hinote Ultra CT475 (and others?) */
 		_set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1);
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+		_set_limit((char *)&gdt2[APM_CS >> 3], 64 * 1024 - 1);
+		_set_limit((char *)&gdt2[APM_CS_16 >> 3], 64 * 1024 - 1);
+		_set_limit((char *)&gdt2[APM_DS >> 3], 64 * 1024 - 1);
+#endif
+
 #ifndef APM_RELAX_SEGMENTS
 	} else {
 		_set_limit((char *)&gdt[APM_CS >> 3],
@@ -2010,6 +2033,16 @@
 			(apm_info.bios.cseg_16_len - 1) & 0xffff);
 		_set_limit((char *)&gdt[APM_DS >> 3],
 			(apm_info.bios.dseg_len - 1) & 0xffff);
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+		_set_limit((char *)&gdt2[APM_CS >> 3],
+			(apm_info.bios.cseg_len - 1) & 0xffff);
+		_set_limit((char *)&gdt2[APM_CS_16 >> 3],
+			(apm_info.bios.cseg_16_len - 1) & 0xffff);
+		_set_limit((char *)&gdt2[APM_DS >> 3],
+			(apm_info.bios.dseg_len - 1) & 0xffff);
+#endif
+
 	}
 #endif
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/cpuid.c linux-2.4.26-leo/arch/i386/kernel/cpuid.c
--- linux-2.4.26/arch/i386/kernel/cpuid.c	2001-10-11 17:04:57.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/cpuid.c	2004-05-26 23:34:34.000000000 +0100
@@ -60,7 +60,8 @@
 static inline void do_cpuid(int cpu, u32 reg, u32 *data)
 {
   struct cpuid_command cmd;
-  
+
+  preempt_disable();
   if ( cpu == smp_processor_id() ) {
     cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
   } else {
@@ -70,6 +71,7 @@
     
     smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1);
   }
+  preempt_enable();
 }
 #else /* ! CONFIG_SMP */
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/entry.S linux-2.4.26-leo/arch/i386/kernel/entry.S
--- linux-2.4.26/arch/i386/kernel/entry.S	2003-06-13 15:51:29.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/entry.S	2004-06-22 20:53:45.000000000 +0100
@@ -73,7 +73,7 @@
  * these are offsets into the task-struct.
  */
 state		=  0
-flags		=  4
+preempt_count	=  4
 sigpending	=  8
 addr_limit	= 12
 exec_domain	= 16
@@ -81,8 +81,28 @@
 tsk_ptrace	= 24
 processor	= 52
 
+/* These are offsets into the irq_stat structure
+ * There is one per cpu and it is aligned to 32
+ * byte boundry (we put that here as a shift count)
+ */
+irq_array_shift                 = CONFIG_X86_L1_CACHE_SHIFT
+
+irq_stat_local_irq_count        = 4
+irq_stat_local_bh_count         = 8
+
 ENOSYS = 38
 
+#ifdef CONFIG_SMP
+#define GET_CPU_INDX	movl processor(%ebx),%eax;  \
+                        shll $irq_array_shift,%eax
+#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx); \
+                             GET_CPU_INDX
+#define CPU_INDX (,%eax)
+#else
+#define GET_CPU_INDX
+#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx)
+#define CPU_INDX
+#endif
 
 #define SAVE_ALL \
 	cld; \
@@ -209,6 +229,17 @@
 	jae badsys
 	call *SYMBOL_NAME(sys_call_table)(,%eax,4)
 	movl %eax,EAX(%esp)		# save the return value
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDKSTACK
+	cli                             # need_resched and signals atomic test
+	cmpl $0,need_resched(%ebx)
+	jne reschedule
+	cmpl $0,sigpending(%ebx)
+	jne signal_return
+	call SYMBOL_NAME(pax_randomize_kstack)
+	jmp restore_all
+#endif
+
 ENTRY(ret_from_sys_call)
 	cli				# need_resched and signals atomic test
 	cmpl $0,need_resched(%ebx)
@@ -255,12 +286,30 @@
 	ALIGN
 ENTRY(ret_from_intr)
 	GET_CURRENT(%ebx)
+#ifdef CONFIG_PREEMPT
+	cli
+	decl preempt_count(%ebx)
+#endif
 ret_from_exception:
 	movl EFLAGS(%esp),%eax		# mix EFLAGS and CS
 	movb CS(%esp),%al
 	testl $(VM_MASK | 3),%eax	# return to VM86 mode or non-supervisor?
 	jne ret_from_sys_call
+#ifdef CONFIG_PREEMPT
+	cmpl $0,preempt_count(%ebx)
+	jnz restore_all
+	cmpl $0,need_resched(%ebx)
+	jz restore_all
+	movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
+	addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
+	jnz restore_all
+	incl preempt_count(%ebx)
+	sti
+	call SYMBOL_NAME(preempt_schedule)
+	jmp ret_from_intr
+#else
 	jmp restore_all
+#endif
 
 	ALIGN
 reschedule:
@@ -297,6 +346,9 @@
 	GET_CURRENT(%ebx)
 	call *%edi
 	addl $8,%esp
+#ifdef CONFIG_PREEMPT
+	cli
+#endif
 	jmp ret_from_exception
 
 ENTRY(coprocessor_error)
@@ -316,12 +368,18 @@
 	movl %cr0,%eax
 	testl $0x4,%eax			# EM (math emulation bit)
 	jne device_not_available_emulate
+#ifdef CONFIG_PREEMPT
+	cli
+#endif
 	call SYMBOL_NAME(math_state_restore)
 	jmp ret_from_exception
 device_not_available_emulate:
 	pushl $0		# temporary storage for ORIG_EIP
 	call  SYMBOL_NAME(math_emulate)
 	addl $4,%esp
+#ifdef CONFIG_PREEMPT
+	cli
+#endif
 	jmp ret_from_exception
 
 ENTRY(debug)
@@ -389,8 +447,56 @@
 	jmp error_code
 
 ENTRY(page_fault)
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	ALIGN
+	pushl $ SYMBOL_NAME(pax_do_page_fault)
+#else
 	pushl $ SYMBOL_NAME(do_page_fault)
+#endif
+
+#ifndef CONFIG_GRKERNSEC_PAX_EMUTRAMP
 	jmp error_code
+#else
+	pushl %ds
+	pushl %eax
+	xorl %eax,%eax
+	pushl %ebp
+	pushl %edi
+	pushl %esi
+	pushl %edx
+	decl %eax			# eax = -1
+	pushl %ecx
+	pushl %ebx
+	cld
+	movl %es,%ecx
+	movl ORIG_EAX(%esp), %esi	# get the error code
+	movl ES(%esp), %edi		# get the function address
+	movl %eax, ORIG_EAX(%esp)
+	movl %ecx, ES(%esp)
+	movl %esp,%edx
+	pushl %esi			# push the error code
+	pushl %edx			# push the pt_regs pointer
+	movl $(__KERNEL_DS),%edx
+	movl %edx,%ds
+	movl %edx,%es
+	GET_CURRENT(%ebx)
+	call *%edi
+	addl $8,%esp
+	decl %eax
+	jnz ret_from_exception
+
+	popl %ebx
+	popl %ecx
+	popl %edx
+	popl %esi
+	popl %edi
+	popl %ebp
+	popl %eax
+	popl %ds
+	popl %es
+	addl $4,%esp
+	jmp system_call
+#endif
 
 ENTRY(machine_check)
 	pushl $0
@@ -402,7 +508,7 @@
 	pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
 	jmp error_code
 
-.data
+.section .rodata, "a",@progbits
 ENTRY(sys_call_table)
 	.long SYMBOL_NAME(sys_ni_syscall)	/* 0  -  old "setup()" system call*/
 	.long SYMBOL_NAME(sys_exit)
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/head.S linux-2.4.26-leo/arch/i386/kernel/head.S
--- linux-2.4.26/arch/i386/kernel/head.S	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/i386/kernel/head.S	2004-06-22 20:53:45.000000000 +0100
@@ -37,10 +37,17 @@
 #define X86_VENDOR_ID	CPU_PARAMS+36	/* tied to NCAPINTS in cpufeature.h */
 
 /*
+ * Real beginning of normal "text" segment
+ */
+ENTRY(stext)
+ENTRY(_stext)
+
+/*
  * swapper_pg_dir is the main page directory, address 0x00101000
  *
  * On entry, %esi points to the real-mode code as a 32-bit pointer.
  */
+.global startup_32
 startup_32:
 /*
  * Set segments to known values
@@ -86,7 +93,7 @@
 				   PRESENT+RW+USER */
 2:	stosl
 	add $0x1000,%eax
-	cmp $empty_zero_page-__PAGE_OFFSET,%edi
+	cmp $0x00c00007,%eax
 	jne 2b
 
 /*
@@ -100,9 +107,19 @@
 	movl %eax,%cr0		/* ..and set paging (PG) bit */
 	jmp 1f			/* flush the prefetch-queue */
 1:
+
+#if !defined(CONFIG_GRKERNSEC_PAX_KERNEXEC) || defined(CONFIG_SMP)
+
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+	orw  %bx,%bx
+	jz  1f
+#endif
+
 	movl $1f,%eax
 	jmp *%eax		/* make sure eip is relocated */
 1:
+#endif
+
 	/* Set up the stack pointer */
 	lss stack_start,%esp
 
@@ -121,7 +138,7 @@
  */
 	xorl %eax,%eax
 	movl $ SYMBOL_NAME(__bss_start),%edi
-	movl $ SYMBOL_NAME(_end),%ecx
+	movl $ SYMBOL_NAME(__bss_end),%ecx
 	subl %edi,%ecx
 	rep
 	stosb
@@ -272,8 +289,6 @@
 	jmp L6			# main should never return here, but
 				# just in case, we know what happens.
 
-ready:	.byte 0
-
 /*
  * We depend on ET to be correct. This checks for 287/387.
  */
@@ -319,13 +334,6 @@
 	jne rp_sidt
 	ret
 
-ENTRY(stack_start)
-	.long SYMBOL_NAME(init_task_union)+8192
-	.long __KERNEL_DS
-
-/* This is the default interrupt "handler" :-) */
-int_msg:
-	.asciz "Unknown interrupt\n"
 	ALIGN
 ignore_int:
 	cld
@@ -347,6 +355,18 @@
 	popl %eax
 	iret
 
+.data
+ready:	.byte 0
+
+ENTRY(stack_start)
+	.long SYMBOL_NAME(init_task_union)+8192
+	.long __KERNEL_DS
+
+.section .rodata,"a"
+/* This is the default interrupt "handler" :-) */
+int_msg:
+	.asciz "Unknown interrupt\n"
+
 /*
  * The interrupt descriptor table has room for 256 idt's,
  * the global descriptor table is dependent on the number
@@ -372,54 +392,77 @@
 SYMBOL_NAME(gdt):
 	.long SYMBOL_NAME(gdt_table)
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+.globl SYMBOL_NAME(gdt2)
+	.word 0
+gdt_descr2:
+	.word GDT_ENTRIES*8-1
+SYMBOL_NAME(gdt2):
+	.long SYMBOL_NAME(gdt_table2)
+#endif
+
 /*
  * This is initialized to create an identity-mapping at 0-8M (for bootup
  * purposes) and another mapping of the 0-8M area at virtual address
  * PAGE_OFFSET.
  */
-.org 0x1000
+.section .data.swapper_pg_dir,"a",@progbits
 ENTRY(swapper_pg_dir)
-	.long 0x00102007
-	.long 0x00103007
-	.fill BOOT_USER_PGD_PTRS-2,4,0
+	.long pg0-__PAGE_OFFSET+7
+	.long pg1-__PAGE_OFFSET+7
+	.long pg2-__PAGE_OFFSET+7
+	.fill BOOT_USER_PGD_PTRS-3,4,0
 	/* default: 766 entries */
-	.long 0x00102007
-	.long 0x00103007
+	.long pg0-__PAGE_OFFSET+7
+	.long pg1-__PAGE_OFFSET+7
+	.long pg2-__PAGE_OFFSET+7
 	/* default: 254 entries */
-	.fill BOOT_KERNEL_PGD_PTRS-2,4,0
+	.fill BOOT_KERNEL_PGD_PTRS-3,4,0
+
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+ENTRY(kernexec_pg_dir)
+	.fill 1024,4,0
+#endif
 
 /*
  * The page tables are initialized to only 8MB here - the final page
  * tables are set up later depending on memory size.
  */
-.org 0x2000
+.section .data.pg0,"a",@progbits
 ENTRY(pg0)
+	.fill 1024,4,0
 
-.org 0x3000
+.section .data.pg1,"a",@progbits
 ENTRY(pg1)
+	.fill 1024,4,0
+
+.section .data.pg2,"a",@progbits
+ENTRY(pg2)
+	.fill 1024,4,0
 
 /*
  * empty_zero_page must immediately follow the page tables ! (The
  * initialization loop counts until empty_zero_page)
  */
-
-.org 0x4000
+.section .rodata.empty_zero_page,"a",@progbits
 ENTRY(empty_zero_page)
-
-.org 0x5000
+	.fill 1024,4,0
 
 /*
- * Real beginning of normal "text" segment
+ * The IDT has to be page-aligned to simplify the Pentium
+ * F0 0F bug workaround.  We have a special link segment
+ * for this.
  */
-ENTRY(stext)
-ENTRY(_stext)
+.section .rodata.idt,"a",@progbits
+ENTRY(idt_table)
+	.fill 256,8,0
 
 /*
  * This starts the data section. Note that the above is all
  * in the text section because it has alignment requirements
  * that we cannot fulfill any other way.
  */
-.data
+.section .rodata,"a",@progbits
 
 ALIGN
 /*
@@ -430,19 +473,61 @@
  */
 ENTRY(gdt_table)
 	.quad 0x0000000000000000	/* NULL descriptor */
-	.quad 0x0000000000000000	/* not used */
-	.quad 0x00cf9a000000ffff	/* 0x10 kernel 4GB code at 0x00000000 */
-	.quad 0x00cf92000000ffff	/* 0x18 kernel 4GB data at 0x00000000 */
-	.quad 0x00cffa000000ffff	/* 0x23 user   4GB code at 0x00000000 */
-	.quad 0x00cff2000000ffff	/* 0x2b user   4GB data at 0x00000000 */
+
+#if defined(CONFIG_GRKERNSEC_PAX_KERNEXEC) && defined(CONFIG_PCI_BIOS)
+	.quad 0x00cf9b000000ffff        /* 0x08 kernel 4GB code at 0x00000000 */
+#else
+	.quad 0x0000000000000000        /* not used */
+#endif
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+	.quad 0xc0cf9b400000ffff        /* 0x10 kernel 4GB code at 0xc0400000 */
+#else
+	.quad 0x00cf9b000000ffff        /* 0x10 kernel 4GB code at 0x00000000 */
+#endif
+
+	.quad 0x00cf93000000ffff	/* 0x18 kernel 4GB data at 0x00000000 */
+	.quad 0x00cffb000000ffff	/* 0x23 user   4GB code at 0x00000000 */
+	.quad 0x00cff3000000ffff	/* 0x2b user   4GB data at 0x00000000 */
+	.quad 0x0000000000000000        /* not used */
+	.quad 0x0000000000000000        /* not used */
+	/*
+	 * The APM segments have byte granularity and their bases
+	 * and limits are set at run time.
+	 */
+	.quad 0x0040930000000000        /* 0x40 APM set up for bad BIOS's */
+	.quad 0x00409b0000000000        /* 0x48 APM CS    code */
+	.quad 0x00009b0000000000        /* 0x50 APM CS 16 code (16 bit) */
+	.quad 0x0040930000000000        /* 0x58 APM DS    data */
+	.fill NR_CPUS*4,8,0             /* space for TSS's and LDT's */
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+ENTRY(gdt_table2)
+	.quad 0x0000000000000000        /* NULL descriptor */
+
+#if defined(CONFIG_GRKERNSEC_PAX_KERNEXEC) && defined(CONFIG_PCI_BIOS)
+	.quad 0x00cf9b000000ffff        /* 0x08 kernel 4GB code at 0x00000000 */
+#else
+	.quad 0x0000000000000000        /* not used */
+#endif
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+	.quad 0xc0cf9b400000ffff        /* 0x10 kernel 4GB code at 0xc0400000 */
+#else
+	.quad 0x00cf9b000000ffff        /* 0x10 kernel 4GB code at 0x00000000 */
+#endif
+
+	.quad 0x00cf93000000ffff        /* 0x18 kernel 4GB data at 0x00000000 */
+	.quad 0x60c5fb000000ffff        /* 0x23 user 1.5GB code at 0x60000000 */
+	.quad 0x00c5f3000000ffff        /* 0x2b user 1.5GB data at 0x00000000 */
+
 	.quad 0x0000000000000000	/* not used */
 	.quad 0x0000000000000000	/* not used */
 	/*
 	 * The APM segments have byte granularity and their bases
 	 * and limits are set at run time.
 	 */
-	.quad 0x0040920000000000	/* 0x40 APM set up for bad BIOS's */
-	.quad 0x00409a0000000000	/* 0x48 APM CS    code */
-	.quad 0x00009a0000000000	/* 0x50 APM CS 16 code (16 bit) */
-	.quad 0x0040920000000000	/* 0x58 APM DS    data */
+	.quad 0x0040930000000000	/* 0x40 APM set up for bad BIOS's */
+	.quad 0x00409b0000000000	/* 0x48 APM CS    code */
+	.quad 0x00009b0000000000	/* 0x50 APM CS 16 code (16 bit) */
+	.quad 0x0040930000000000	/* 0x58 APM DS    data */
 	.fill NR_CPUS*4,8,0		/* space for TSS's and LDT's */
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/i386_ksyms.c linux-2.4.26-leo/arch/i386/kernel/i386_ksyms.c
--- linux-2.4.26/arch/i386/kernel/i386_ksyms.c	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/i386_ksyms.c	2004-06-22 20:53:45.000000000 +0100
@@ -74,6 +74,9 @@
 EXPORT_SYMBOL(get_cmos_time);
 EXPORT_SYMBOL(apm_info);
 EXPORT_SYMBOL(gdt);
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+EXPORT_SYMBOL(gdt2);
+#endif
 EXPORT_SYMBOL(empty_zero_page);
 
 #ifdef CONFIG_DEBUG_IOVIRT
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/i387.c linux-2.4.26-leo/arch/i386/kernel/i387.c
--- linux-2.4.26/arch/i386/kernel/i387.c	2003-08-25 12:44:39.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/i387.c	2004-05-26 23:34:34.000000000 +0100
@@ -10,6 +10,7 @@
 
 #include <linux/config.h>
 #include <linux/sched.h>
+#include <linux/spinlock.h>
 #include <linux/init.h>
 #include <asm/processor.h>
 #include <asm/i387.h>
@@ -89,6 +90,8 @@
 {
 	struct task_struct *tsk = current;
 
+	preempt_disable();
+	
 	if (tsk->flags & PF_USEDFPU) {
 		__save_init_fpu(tsk);
 		return;
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/ioport.c linux-2.4.26-leo/arch/i386/kernel/ioport.c
--- linux-2.4.26/arch/i386/kernel/ioport.c	2003-06-13 15:51:29.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/ioport.c	2004-06-22 20:53:45.000000000 +0100
@@ -14,6 +14,7 @@
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
 #include <linux/stddef.h>
+#include <linux/grsecurity.h>
 
 /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
 static void set_bitmap(unsigned long *bitmap, short base, short extent, int new_value)
@@ -55,17 +56,27 @@
 asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on)
 {
 	struct thread_struct * t = &current->thread;
-	struct tss_struct * tss = init_tss + smp_processor_id();
+	struct tss_struct * tss;
 
 	if ((from + num <= from) || (from + num > IO_BITMAP_SIZE*32))
 		return -EINVAL;
+#ifdef CONFIG_GRKERNSEC_IO
+	if (turn_on) {
+		gr_handle_ioperm();
+#else
 	if (turn_on && !capable(CAP_SYS_RAWIO))
+#endif
 		return -EPERM;
+#ifdef CONFIG_GRKERNSEC_IO
+	}
+#endif
 	/*
 	 * If it's the first ioperm() call in this thread's lifetime, set the
 	 * IO bitmap up. ioperm() is much less timing critical than clone(),
 	 * this is why we delay this operation until now:
 	 */
+	preempt_disable();
+	tss = init_tss + smp_processor_id();
 	if (!t->ioperm) {
 		/*
 		 * just in case ...
@@ -84,6 +95,7 @@
 		memcpy(tss->io_bitmap, t->io_bitmap, IO_BITMAP_BYTES);
 		tss->bitmap = IO_BITMAP_OFFSET; /* Activate it in the TSS */
 	}
+	preempt_enable();
 
 	return 0;
 }
@@ -109,8 +121,13 @@
 		return -EINVAL;
 	/* Trying to gain more privileges? */
 	if (level > old) {
+#ifdef CONFIG_GRKERNSEC_IO
+		gr_handle_iopl();
+		return -EPERM;
+#else
 		if (!capable(CAP_SYS_RAWIO))
 			return -EPERM;
+#endif
 	}
 	regs->eflags = (regs->eflags & 0xffffcfff) | (level << 12);
 	return 0;
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/irq.c linux-2.4.26-leo/arch/i386/kernel/irq.c
--- linux-2.4.26/arch/i386/kernel/irq.c	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/i386/kernel/irq.c	2004-05-26 23:34:34.000000000 +0100
@@ -284,9 +284,11 @@
 				show("wait_on_irq");
 				count = ~0;
 			}
+			preempt_disable();
 			__sti();
 			SYNC_OTHER_CORES(cpu);
 			__cli();
+			preempt_enable_no_resched();
 			if (irqs_running())
 				continue;
 			if (global_irq_lock)
@@ -360,8 +362,9 @@
 
 	__save_flags(flags);
 	if (flags & (1 << EFLAGS_IF_SHIFT)) {
-		int cpu = smp_processor_id();
+		int cpu;
 		__cli();
+		cpu = smp_processor_id();
 		if (!local_irq_count(cpu))
 			get_irqlock(cpu);
 	}
@@ -369,11 +372,14 @@
 
 void __global_sti(void)
 {
-	int cpu = smp_processor_id();
+	int cpu;
 
+	preempt_disable();
+	cpu = smp_processor_id();
 	if (!local_irq_count(cpu))
 		release_irqlock(cpu);
 	__sti();
+	preempt_enable();
 }
 
 /*
@@ -388,13 +394,15 @@
 	int retval;
 	int local_enabled;
 	unsigned long flags;
-	int cpu = smp_processor_id();
+	int cpu;
 
 	__save_flags(flags);
 	local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
 	/* default to local */
 	retval = 2 + local_enabled;
 
+	preempt_disable();
+	cpu = smp_processor_id();
 	/* check for global flags if we're not in an interrupt */
 	if (!local_irq_count(cpu)) {
 		if (local_enabled)
@@ -402,6 +410,7 @@
 		if (global_irq_holder == cpu)
 			retval = 0;
 	}
+	preempt_enable();
 	return retval;
 }
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/ldt.c linux-2.4.26-leo/arch/i386/kernel/ldt.c
--- linux-2.4.26/arch/i386/kernel/ldt.c	2004-02-20 14:11:37.000000000 +0000
+++ linux-2.4.26-leo/arch/i386/kernel/ldt.c	2004-06-22 20:53:45.000000000 +0100
@@ -151,7 +151,7 @@
 {
 	int err;
 	unsigned long size;
-	void *address;
+	const void *address;
 
 	err = 0;
 	address = &default_ldt[0];
@@ -214,6 +214,13 @@
 		}
 	}
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if ((current->flags & PF_PAX_SEGMEXEC) && (ldt_info.contents & 2)) {
+		error = -EINVAL;
+		goto out_unlock;
+	}
+#endif
+
 	entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
 		  (ldt_info.limit & 0x0ffff);
 	entry_2 = (ldt_info.base_addr & 0xff000000) |
@@ -224,7 +231,7 @@
 		  ((ldt_info.seg_not_present ^ 1) << 15) |
 		  (ldt_info.seg_32bit << 22) |
 		  (ldt_info.limit_in_pages << 23) |
-		  0x7000;
+		  0x7100;
 	if (!oldmode)
 		entry_2 |= (ldt_info.useable << 20);
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/microcode.c linux-2.4.26-leo/arch/i386/kernel/microcode.c
--- linux-2.4.26/arch/i386/kernel/microcode.c	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/microcode.c	2004-05-26 23:34:34.000000000 +0100
@@ -397,15 +397,18 @@
 {
 	int i, error;
 
+	preempt_disable();
 	if (smp_call_function(collect_cpu_info, NULL, 1, 1) != 0) {
 		printk(KERN_ERR "microcode: Error! Could not run on all processors\n");
 		error = -EIO;
+		preempt_enable();
 		goto out;
 	}
 	collect_cpu_info(NULL);
 
 	if ((error = find_matching_ucodes())) {
 		printk(KERN_ERR "microcode: Error in the microcode data\n");
+		preempt_enable();
 		goto out_free;
 	}
 
@@ -414,6 +417,7 @@
 		error = -EIO;
 	}
 	do_update_one(NULL);
+	preempt_enable();
 
 out_free:
 	for (i = 0; i < smp_num_cpus; i++) {
@@ -428,6 +432,7 @@
 		}
 	}
 out:
+
 	return error;
 }
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/msr.c linux-2.4.26-leo/arch/i386/kernel/msr.c
--- linux-2.4.26/arch/i386/kernel/msr.c	2001-10-11 17:04:57.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/msr.c	2004-05-26 23:34:34.000000000 +0100
@@ -114,8 +114,9 @@
 {
   struct msr_command cmd;
 
+  preempt_disable();
   if ( cpu == smp_processor_id() ) {
-    return wrmsr_eio(reg, eax, edx);
+    cmd.err = wrmsr_eio(reg, eax, edx);
   } else {
     cmd.cpu = cpu;
     cmd.reg = reg;
@@ -123,16 +124,19 @@
     cmd.data[1] = edx;
     
     smp_call_function(msr_smp_wrmsr, &cmd, 1, 1);
-    return cmd.err;
   }
+
+  preempt_enable();
+  return cmd.err;
 }
 
 static inline int do_rdmsr(int cpu, u32 reg, u32 *eax, u32 *edx)
 {
   struct msr_command cmd;
 
+  preempt_disable();
   if ( cpu == smp_processor_id() ) {
-    return rdmsr_eio(reg, eax, edx);
+    cmd.err = rdmsr_eio(reg, eax, edx);
   } else {
     cmd.cpu = cpu;
     cmd.reg = reg;
@@ -141,9 +145,10 @@
     
     *eax = cmd.data[0];
     *edx = cmd.data[1];
-
-    return cmd.err;
   }
+
+  preempt_enable();
+  return cmd.err;
 }
 
 #else /* ! CONFIG_SMP */
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/mtrr.c linux-2.4.26-leo/arch/i386/kernel/mtrr.c
--- linux-2.4.26/arch/i386/kernel/mtrr.c	2003-06-13 15:51:29.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/mtrr.c	2004-05-26 23:34:34.000000000 +0100
@@ -1065,6 +1065,9 @@
     wait_barrier_execute = TRUE;
     wait_barrier_cache_enable = TRUE;
     atomic_set (&undone_count, smp_num_cpus - 1);
+
+    preempt_disable();
+
     /*  Start the ball rolling on other CPUs  */
     if (smp_call_function (ipi_handler, &data, 1, 0) != 0)
 	panic ("mtrr: timed out waiting for other CPUs\n");
@@ -1090,6 +1093,9 @@
 	then enable the local cache and return  */
     wait_barrier_cache_enable = FALSE;
     set_mtrr_done (&ctxt);
+
+    preempt_enable();
+
 }   /*  End Function set_mtrr_smp  */
 
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/pci-pc.c linux-2.4.26-leo/arch/i386/kernel/pci-pc.c
--- linux-2.4.26/arch/i386/kernel/pci-pc.c	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/i386/kernel/pci-pc.c	2004-06-22 20:53:45.000000000 +0100
@@ -17,6 +17,7 @@
 #include <asm/io.h>
 #include <asm/smp.h>
 #include <asm/smpboot.h>
+#include <asm/desc.h>
 
 #include "pci-i386.h"
 
@@ -575,10 +576,16 @@
  * the array there.
  */
 
+#if defined(CONFIG_GRKERNSEC_PAX_KERNEXEC) && defined(CONFIG_PCI_BIOS)
+#define __FLAT_KERNEL_CS 0x08
+#else
+#define __FLAT_KERNEL_CS __KERNEL_CS
+#endif
+
 static struct {
 	unsigned long address;
 	unsigned short segment;
-} bios32_indirect = { 0, __KERNEL_CS };
+} bios32_indirect = { 0, __FLAT_KERNEL_CS };
 
 /*
  * Returns the entry point for the given service, NULL on error
@@ -619,7 +626,9 @@
 static struct {
 	unsigned long address;
 	unsigned short segment;
-} pci_indirect = { 0, __KERNEL_CS };
+} pci_indirect = { 0, __FLAT_KERNEL_CS };
+
+#undef __FLAT_KERNEL_CS
 
 static int pci_bios_present;
 
@@ -1457,6 +1466,7 @@
 	if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT))
 		pcibios_sort();
 #endif
+
 }
 
 char * __devinit  pcibios_setup(char *str)
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/process.c linux-2.4.26-leo/arch/i386/kernel/process.c
--- linux-2.4.26/arch/i386/kernel/process.c	2004-02-20 14:11:37.000000000 +0000
+++ linux-2.4.26-leo/arch/i386/kernel/process.c	2004-06-22 20:53:45.000000000 +0100
@@ -209,18 +209,18 @@
    doesn't work with at least one type of 486 motherboard.  It is easy
    to stop this code working; hence the copious comments. */
 
-static unsigned long long
+static const unsigned long long
 real_mode_gdt_entries [3] =
 {
 	0x0000000000000000ULL,	/* Null descriptor */
-	0x00009a000000ffffULL,	/* 16-bit real-mode 64k code at 0x00000000 */
-	0x000092000100ffffULL	/* 16-bit real-mode 64k data at 0x00000100 */
+	0x00009b000000ffffULL,	/* 16-bit real-mode 64k code at 0x00000000 */
+	0x000093000100ffffULL	/* 16-bit real-mode 64k data at 0x00000100 */
 };
 
 static struct
 {
 	unsigned short       size __attribute__ ((packed));
-	unsigned long long * base __attribute__ ((packed));
+	const unsigned long long * base __attribute__ ((packed));
 }
 real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries },
 real_mode_idt = { 0x3ff, 0 },
@@ -552,7 +552,7 @@
 {
 	struct pt_regs * childregs;
 
-	childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1;
+	childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p - sizeof(unsigned long))) - 1;
 	struct_cpy(childregs, regs);
 	childregs->eax = 0;
 	childregs->esp = esp;
@@ -613,6 +613,16 @@
 	dump->u_fpvalid = dump_fpu (regs, &dump->i387);
 }
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+void pax_switch_segments(struct task_struct * tsk)
+{
+	if (tsk->flags & PF_PAX_SEGMEXEC)
+		__asm__ __volatile__("lgdt %0": "=m" (gdt_descr2));
+	else
+		__asm__ __volatile__("lgdt %0": "=m" (gdt_descr));
+}
+#endif
+
 /*
  * This special macro can be used to load a debugging register
  */
@@ -652,6 +662,10 @@
 
 	unlazy_fpu(prev_p);
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	pax_switch_segments(next_p);
+#endif
+
 	/*
 	 * Reload esp0, LDT and the page table pointer:
 	 */
@@ -792,3 +806,30 @@
 }
 #undef last_sched
 #undef first_sched
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDKSTACK
+asmlinkage void pax_randomize_kstack(void)
+{
+	struct tss_struct *tss = init_tss + smp_processor_id();
+	unsigned long time;
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+	if (!pax_aslr)
+		return;
+#endif
+
+	rdtscl(time);
+
+	/* P4 seems to return a 0 LSB, ignore it */
+#ifdef CONFIG_MPENTIUM4
+	time &= 0x3EUL;
+	time <<= 1;
+#else
+	time &= 0x1FUL;
+	time <<= 2;
+#endif
+
+	tss->esp0 ^= time;
+	current->thread.esp0 = tss->esp0;
+}
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/ptrace.c linux-2.4.26-leo/arch/i386/kernel/ptrace.c
--- linux-2.4.26/arch/i386/kernel/ptrace.c	2002-08-03 01:39:42.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/ptrace.c	2004-06-22 20:53:45.000000000 +0100
@@ -13,6 +13,7 @@
 #include <linux/errno.h>
 #include <linux/ptrace.h>
 #include <linux/user.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -177,6 +178,9 @@
 	if (pid == 1)		/* you may not mess with init */
 		goto out_tsk;
 
+	if(gr_handle_ptrace(child, request))
+		goto out_tsk;
+
 	if (request == PTRACE_ATTACH) {
 		ret = ptrace_attach(child);
 		goto out_tsk;
@@ -256,6 +260,17 @@
 			  if(addr < (long) &dummy->u_debugreg[4] &&
 			     ((unsigned long) data) >= TASK_SIZE-3) break;
 			  
+#ifdef CONFIG_GRKERNSEC
+			  if(addr >= (long) &dummy->u_debugreg[0] &&
+			     addr <= (long) &dummy->u_debugreg[3]){
+				  long reg   = (addr - (long) &dummy->u_debugreg[0]) >> 2;
+				  long type  = (child->thread.debugreg[7] >> (DR_CONTROL_SHIFT + 4*reg)) & 3;
+				  long align = (child->thread.debugreg[7] >> (DR_CONTROL_SHIFT + 2 + 4*reg)) & 3;
+				  if((type & 1) && (data & align))
+					break;
+			  }
+#endif
+
 			  if(addr == (long) &dummy->u_debugreg[7]) {
 				  data &= ~DR_CONTROL_RESERVED;
 				  for(i=0; i<4; i++)
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/setup.c linux-2.4.26-leo/arch/i386/kernel/setup.c
--- linux-2.4.26/arch/i386/kernel/setup.c	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/setup.c	2004-06-22 20:53:45.000000000 +0100
@@ -3218,7 +3218,7 @@
 	set_tss_desc(nr,t);
 	gdt_table[__TSS(nr)].b &= 0xfffffdff;
 	load_TR(nr);
-	load_LDT(&init_mm.context);
+	_load_LDT(&init_mm.context);
 
 	/*
 	 * Clear all 6 debug registers:
@@ -3284,7 +3284,16 @@
 	printk(KERN_INFO "Your Pentium Pro seems ok.\n");
 	return 0;
 }
-	
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+static int __init setup_pax_softmode(char *str)
+{
+	get_option (&str, &pax_softmode);
+	return 1;
+}
+__setup("pax_softmode=", setup_pax_softmode);
+#endif
+
 /*
  * Local Variables:
  * mode:c
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/smp.c linux-2.4.26-leo/arch/i386/kernel/smp.c
--- linux-2.4.26/arch/i386/kernel/smp.c	2003-06-13 15:51:29.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/smp.c	2004-05-26 23:34:34.000000000 +0100
@@ -360,10 +360,14 @@
 
 asmlinkage void smp_invalidate_interrupt (void)
 {
-	unsigned long cpu = smp_processor_id();
+	unsigned long cpu;
+
+	preempt_disable();
+
+	cpu = smp_processor_id();
 
 	if (!test_bit(cpu, &flush_cpumask))
-		return;
+		goto out;
 		/* 
 		 * This was a BUG() but until someone can quote me the
 		 * line from the intel manual that guarantees an IPI to
@@ -384,6 +388,8 @@
 	}
 	ack_APIC_irq();
 	clear_bit(cpu, &flush_cpumask);
+out:
+	preempt_enable();
 }
 
 static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
@@ -433,17 +439,22 @@
 void flush_tlb_current_task(void)
 {
 	struct mm_struct *mm = current->mm;
-	unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
+	unsigned long cpu_mask;
 
+	preempt_disable();
+	cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
 	local_flush_tlb();
 	if (cpu_mask)
 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+	preempt_enable();
 }
 
 void flush_tlb_mm (struct mm_struct * mm)
 {
-	unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
+	unsigned long cpu_mask;
 
+	preempt_disable();
+	cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
 	if (current->active_mm == mm) {
 		if (current->mm)
 			local_flush_tlb();
@@ -452,13 +463,16 @@
 	}
 	if (cpu_mask)
 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+	preempt_enable();
 }
 
 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
+	unsigned long cpu_mask;
 
+	preempt_disable();
+	cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
 	if (current->active_mm == mm) {
 		if(current->mm)
 			__flush_tlb_one(va);
@@ -468,6 +482,7 @@
 
 	if (cpu_mask)
 		flush_tlb_others(cpu_mask, mm, va);
+	preempt_enable();
 }
 
 static inline void do_flush_tlb_all_local(void)
@@ -486,9 +501,11 @@
 
 void flush_tlb_all(void)
 {
+	preempt_disable();
 	smp_call_function (flush_tlb_all_ipi,0,1,1);
 
 	do_flush_tlb_all_local();
+	preempt_enable();
 }
 
 /*
@@ -572,7 +589,7 @@
 static void stop_this_cpu (void * dummy)
 {
 	/*
-	 * Remove this CPU:
+	 * Remove this CPU: assumes preemption is disabled
 	 */
 	clear_bit(smp_processor_id(), &cpu_online_map);
 	__cli();
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/sys_i386.c linux-2.4.26-leo/arch/i386/kernel/sys_i386.c
--- linux-2.4.26/arch/i386/kernel/sys_i386.c	2003-08-25 12:44:39.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/kernel/sys_i386.c	2004-06-22 20:53:45.000000000 +0100
@@ -18,6 +18,7 @@
 #include <linux/mman.h>
 #include <linux/file.h>
 #include <linux/utsname.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/ipc.h>
@@ -48,6 +49,11 @@
 	int error = -EBADF;
 	struct file * file = NULL;
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	if (flags & MAP_MIRROR)
+		return -EINVAL;
+#endif
+
 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 	if (!(flags & MAP_ANONYMOUS)) {
 		file = fget(fd);
@@ -55,8 +61,14 @@
 			goto out;
 	}
 
+	if(gr_handle_mmap(file, prot)) {
+		fput(file);
+		error = -EACCES;
+		goto out;
+	}
+
 	down_write(&current->mm->mmap_sem);
-	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+	error = do_mmap(file, addr, len, prot, flags, pgoff << PAGE_SHIFT);
 	up_write(&current->mm->mmap_sem);
 
 	if (file)
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/trampoline.S linux-2.4.26-leo/arch/i386/kernel/trampoline.S
--- linux-2.4.26/arch/i386/kernel/trampoline.S	2002-11-28 23:53:09.000000000 +0000
+++ linux-2.4.26-leo/arch/i386/kernel/trampoline.S	2004-06-22 20:53:45.000000000 +0100
@@ -54,7 +54,7 @@
 	lmsw	%ax		# into protected mode
 	jmp	flush_instr
 flush_instr:
-	ljmpl	$__KERNEL_CS, $0x00100000
+	ljmpl	$__KERNEL_CS, $SYMBOL_NAME(startup_32)-__PAGE_OFFSET
 			# jump to startup_32 in arch/i386/kernel/head.S
 
 idt_48:
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/kernel/traps.c linux-2.4.26-leo/arch/i386/kernel/traps.c
--- linux-2.4.26/arch/i386/kernel/traps.c	2002-11-28 23:53:09.000000000 +0000
+++ linux-2.4.26-leo/arch/i386/kernel/traps.c	2004-06-22 20:53:45.000000000 +0100
@@ -54,15 +54,10 @@
 asmlinkage void lcall7(void);
 asmlinkage void lcall27(void);
 
-struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
+const struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
 		{ 0, 0 }, { 0, 0 } };
 
-/*
- * The IDT has to be page-aligned to simplify the Pentium
- * F0 0F bug workaround.. We have a special link segment
- * for this.
- */
-struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
+extern struct desc_struct idt_table[256];
 
 asmlinkage void divide_error(void);
 asmlinkage void debug(void);
@@ -228,14 +223,23 @@
 		show_stack((unsigned long*)esp);
 
 		printk("\nCode: ");
+
+#ifndef CONFIG_GRKERNSEC_PAX_KERNEXEC
 		if(regs->eip < PAGE_OFFSET)
 			goto bad;
+#endif
 
 		for(i=0;i<20;i++)
 		{
 			unsigned char c;
+
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+			if(__get_user(c, &((unsigned char*)regs->eip)[i+__KERNEL_TEXT_OFFSET])) {
+#else
 			if(__get_user(c, &((unsigned char*)regs->eip)[i])) {
 bad:
+#endif
+
 				printk(" Bad EIP value.");
 				break;
 			}
@@ -258,8 +262,13 @@
 
 	eip = regs->eip;
 
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+	eip += __KERNEL_TEXT_OFFSET;
+#else
 	if (eip < PAGE_OFFSET)
 		goto no_bug;
+#endif
+
 	if (__get_user(ud2, (unsigned short *)eip))
 		goto no_bug;
 	if (ud2 != 0x0b0f)
@@ -267,7 +276,13 @@
 	if (__get_user(line, (unsigned short *)(eip + 2)))
 		goto bug;
 	if (__get_user(file, (char **)(eip + 4)) ||
+
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+		__get_user(c, file + __KERNEL_TEXT_OFFSET))
+#else
 		(unsigned long)file < PAGE_OFFSET || __get_user(c, file))
+#endif
+
 		file = "<bad filename>";
 
 	printk("kernel BUG at %s:%d!\n", file, line);
@@ -422,6 +437,13 @@
 			regs->eip = fixup;
 			return;
 		}
+
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+		if ((regs->xcs & 0xFFFF) == __KERNEL_CS)
+				die("PAX: suspicious general protection fault", regs, error_code);
+		else
+#endif
+
 		die("general protection fault", regs, error_code);
 	}
 }
@@ -527,13 +549,12 @@
 {
 	unsigned int condition;
 	struct task_struct *tsk = current;
-	unsigned long eip = regs->eip;
 	siginfo_t info;
 
 	__asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
 
 	/* If the user set TF, it's simplest to clear it right away. */
-	if ((eip >=PAGE_OFFSET) && (regs->eflags & TF_MASK))
+	if (!(regs->xcs & 3) && (regs->eflags & TF_MASK) && !(regs->eflags & VM_MASK))
 		goto clear_TF;
 
 	/* Mask out spurious debug traps due to lazy DR7 setting */
@@ -751,6 +772,8 @@
  *
  * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  * Don't touch unless you *really* know how it works.
+ *
+ * Must be called with kernel preemption disabled.
  */
 asmlinkage void math_state_restore(struct pt_regs regs)
 {
@@ -826,7 +849,7 @@
 	_set_gate(idt_table+n,15,3,addr);
 }
 
-static void __init set_call_gate(void *a, void *addr)
+static void __init set_call_gate(const void *a, void *addr)
 {
 	_set_gate(a,12,3,addr);
 }
@@ -852,14 +875,45 @@
 	"rorl $16,%%eax" \
 	: "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
 
-void set_tss_desc(unsigned int n, void *addr)
+void set_tss_desc(unsigned int n, const void *addr)
 {
 	_set_tssldt_desc(gdt_table+__TSS(n), (int)addr, 235, 0x89);
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	_set_tssldt_desc(gdt_table2+__TSS(n), (int)addr, 235, 0x89);
+#endif
+
 }
 
-void set_ldt_desc(unsigned int n, void *addr, unsigned int size)
+void __set_ldt_desc(unsigned int n, const void *addr, unsigned int size)
 {
 	_set_tssldt_desc(gdt_table+__LDT(n), (int)addr, ((size << 3)-1), 0x82);
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	_set_tssldt_desc(gdt_table2+__LDT(n), (int)addr, ((size << 3)-1), 0x82);
+#endif
+
+}
+
+void set_ldt_desc(unsigned int n, const void *addr, unsigned int size)
+{
+
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+	unsigned long flags, cr3;
+
+	pax_open_kernel(flags, cr3);
+#endif
+
+	_set_tssldt_desc(gdt_table+__LDT(n), (int)addr, ((size << 3)-1), 0x82);
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	_set_tssldt_desc(gdt_table2+__LDT(n), (int)addr, ((size << 3)-1), 0x82);
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+	pax_close_kernel(flags, cr3);
+#endif
+
 }
 
 #ifdef CONFIG_X86_VISWS_APIC
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/lib/dec_and_lock.c linux-2.4.26-leo/arch/i386/lib/dec_and_lock.c
--- linux-2.4.26/arch/i386/lib/dec_and_lock.c	2000-07-08 02:20:16.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/lib/dec_and_lock.c	2004-05-26 23:34:34.000000000 +0100
@@ -8,6 +8,7 @@
  */
 
 #include <linux/spinlock.h>
+#include <linux/sched.h>
 #include <asm/atomic.h>
 
 int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/Makefile linux-2.4.26-leo/arch/i386/Makefile
--- linux-2.4.26/arch/i386/Makefile	2003-06-13 15:51:29.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/Makefile	2004-06-22 20:53:45.000000000 +0100
@@ -114,6 +114,9 @@
 
 MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
 
+arch/i386/vmlinux.lds: arch/i386/vmlinux.lds.S FORCE
+	$(CPP) -C -P -I$(HPATH) -D__KERNEL__ -imacros $(HPATH)/linux/config.h -imacros $(HPATH)/asm-i386/segment.h -imacros $(HPATH)/asm-i386/page.h -Ui386 arch/i386/vmlinux.lds.S >arch/i386/vmlinux.lds
+
 vmlinux: arch/i386/vmlinux.lds
 
 FORCE: ;
@@ -150,6 +153,7 @@
 	@$(MAKEBOOT) clean
 
 archmrproper:
+	rm -f arch/i386/vmlinux.lds
 
 archdep:
 	@$(MAKEBOOT) dep
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/mm/fault.c linux-2.4.26-leo/arch/i386/mm/fault.c
--- linux-2.4.26/arch/i386/mm/fault.c	2004-02-20 14:11:37.000000000 +0000
+++ linux-2.4.26-leo/arch/i386/mm/fault.c	2004-06-22 20:53:45.000000000 +0100
@@ -19,6 +19,8 @@
 #include <linux/init.h>
 #include <linux/tty.h>
 #include <linux/vt_kern.h>		/* For unblank_screen() */
+#include <linux/unistd.h>
+#include <linux/compiler.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -127,6 +129,10 @@
 asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);
 extern unsigned long idt;
 
+#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC)
+static int pax_handle_fetch_fault(struct pt_regs *regs);
+#endif
+
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
@@ -137,23 +143,31 @@
  *	bit 1 == 0 means read, 1 means write
  *	bit 2 == 0 means kernel, 1 means user-mode
  */
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+static int do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
+#else
+asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long error_code)
+#endif
 {
 	struct task_struct *tsk;
 	struct mm_struct *mm;
 	struct vm_area_struct * vma;
+#ifndef CONFIG_GRKERNSEC_PAX_PAGEEXEC
 	unsigned long address;
+#endif
 	unsigned long page;
 	unsigned long fixup;
 	int write;
 	siginfo_t info;
 
+#ifndef CONFIG_GRKERNSEC_PAX_PAGEEXEC
 	/* get the address */
 	__asm__("movl %%cr2,%0":"=r" (address));
 
 	/* It's safe to allow irq's after cr2 has been saved */
 	if (regs->eflags & X86_EFLAGS_IF)
 		local_irq_enable();
+#endif
 
 	tsk = current;
 
@@ -258,7 +272,7 @@
 			tsk->thread.screen_bitmap |= 1 << bit;
 	}
 	up_read(&mm->mmap_sem);
-	return;
+	return 0;
 
 /*
  * Something tried to access memory that isn't in our memory map..
@@ -269,6 +283,40 @@
 
 	/* User mode accesses just cause a SIGSEGV */
 	if (error_code & 4) {
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+		if (current->flags & PF_PAX_SEGMEXEC) {
+
+#if defined(CONFIG_GRKERNSEC_PAX_EMUTRAMP) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+		if ((error_code == 4) && (regs->eip + SEGMEXEC_TASK_SIZE == address)) {
+			switch (pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+			case 5:
+				return 0;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP
+			case 4:
+				return 0;
+			case 3:
+			case 2:
+				return 1;
+#endif
+
+			case 1:
+			default:
+				;
+			}
+		}
+#endif
+
+			if (address >= SEGMEXEC_TASK_SIZE) {
+				pax_report_fault(regs, (void*)regs->eip, (void*)regs->esp);
+				do_exit(SIGKILL);
+			}
+		}
+#endif
+
 		tsk->thread.cr2 = address;
 		/* Kernel addresses are always protection faults */
 		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
@@ -278,7 +326,7 @@
 		/* info.si_code has been set above */
 		info.si_addr = (void *)address;
 		force_sig_info(SIGSEGV, &info, tsk);
-		return;
+		return 0;
 	}
 
 	/*
@@ -291,7 +339,7 @@
 
 		if (nr == 6) {
 			do_invalid_op(regs, 0);
-			return;
+			return 0;
 		}
 	}
 
@@ -299,7 +347,7 @@
 	/* Are we prepared to handle this kernel fault?  */
 	if ((fixup = search_exception_table(regs->eip)) != 0) {
 		regs->eip = fixup;
-		return;
+		return 0;
 	}
 
 /*
@@ -311,6 +359,18 @@
 
 	if (address < PAGE_SIZE)
 		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+	else if (init_mm.start_code + __KERNEL_TEXT_OFFSET <= address && address < init_mm.end_code + __KERNEL_TEXT_OFFSET) {
+		if (tsk->curr_ip)
+			printk(KERN_ERR "PAX: From %u.%u.%u.%u: %s:%d, uid/euid: %u/%u, attempted to modify kernel code",
+					 NIPQUAD(tsk->curr_ip), tsk->comm, tsk->pid, tsk->uid, tsk->euid);
+		else
+			printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code",
+					 tsk->comm, tsk->pid, tsk->uid, tsk->euid);
+	}
+#endif
+
 	else
 		printk(KERN_ALERT "Unable to handle kernel paging request");
 	printk(" at virtual address %08lx\n",address);
@@ -363,7 +423,7 @@
 	/* Kernel mode? Handle exceptions or die */
 	if (!(error_code & 4))
 		goto no_context;
-	return;
+	return 0;
 
 vmalloc_fault:
 	{
@@ -396,6 +456,448 @@
 		pte_k = pte_offset(pmd_k, address);
 		if (!pte_present(*pte_k))
 			goto no_context;
-		return;
+		return 0;
 	}
 }
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+/* PaX: called with the page_table_lock spinlock held */
+static inline pte_t * pax_get_pte(struct mm_struct *mm, unsigned long address)
+{
+	pgd_t *pgd;
+	pmd_t *pmd;
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd || !pgd_present(*pgd))
+		return 0;
+	pmd = pmd_offset(pgd, address);
+	if (!pmd || !pmd_present(*pmd))
+		return 0;
+	return pte_offset(pmd, address);
+}
+#endif
+
+/*
+ * PaX: decide what to do with offenders (regs->eip = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when sigreturn trampoline was detected
+ *         3 when rt_sigreturn trampoline was detected
+ *         4 when gcc trampoline was detected
+ *	   5 when legitimate ET_EXEC was detected
+ */
+#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC)
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP
+	static const unsigned char trans[8] = {6, 1, 2, 0, 13, 5, 3, 4};
+#endif
+	int err;
+	
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (current->flags & PF_PAX_RANDEXEC) {
+		unsigned long esp_4;
+		if (regs->eip >= current->mm->start_code &&
+		    regs->eip < current->mm->end_code)
+		{
+			err = get_user(esp_4, (unsigned long*)(regs->esp-4UL));
+			if (err || esp_4 == regs->eip)
+				return 1;
+			regs->eip += current->mm->delta_exec;
+			return 5;
+		}
+	}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP
+
+#ifndef CONFIG_GRKERNSEC_PAX_EMUSIGRT
+	if (!(current->flags & PF_PAX_EMUTRAMP))
+		return 1;
+#endif
+
+	do { /* PaX: sigreturn emulation */
+		unsigned char pop, mov;
+		unsigned short sys;
+		unsigned long nr;
+
+		err = get_user(pop, (unsigned char *)(regs->eip));
+		err |= get_user(mov, (unsigned char *)(regs->eip + 1));
+		err |= get_user(nr, (unsigned long *)(regs->eip + 2));
+		err |= get_user(sys, (unsigned short *)(regs->eip + 6));
+
+		if (err)
+			break;
+
+		if (pop == 0x58 &&
+		    mov == 0xb8 &&
+		    nr == __NR_sigreturn &&
+		    sys == 0x80cd)
+		{
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUSIGRT
+			int sig;
+			struct k_sigaction *ka;
+			__sighandler_t handler;
+
+			if (get_user(sig, (int *)regs->esp))
+				return 1;
+			if (sig < 1 || sig > _NSIG || sig == SIGKILL || sig == SIGSTOP)
+				return 1;
+			ka = &current->sig->action[sig-1];
+			handler = ka->sa.sa_handler;
+			if (handler == SIG_DFL || handler == SIG_IGN) {
+				if (!(current->flags & PF_PAX_EMUTRAMP))
+					return 1;
+			} else if (ka->sa.sa_flags & SA_SIGINFO)
+				return 1;
+#endif
+
+			regs->esp += 4;
+			regs->eax = nr;
+			regs->eip += 8;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: rt_sigreturn emulation */
+		unsigned char mov;
+		unsigned short sys;
+		unsigned long nr;
+
+		err = get_user(mov, (unsigned char *)(regs->eip));
+		err |= get_user(nr, (unsigned long *)(regs->eip + 1));
+		err |= get_user(sys, (unsigned short *)(regs->eip + 5));
+
+		if (err)
+			break;
+
+		if (mov == 0xb8 &&
+		    nr == __NR_rt_sigreturn &&
+		    sys == 0x80cd)
+		{
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUSIGRT
+			int sig;
+			struct k_sigaction *ka;
+			__sighandler_t handler;
+
+			if (get_user(sig, (int *)regs->esp))
+				return 1;
+			if (sig < 1 || sig > _NSIG || sig == SIGKILL || sig == SIGSTOP)
+				return 1;
+			ka = &current->sig->action[sig-1];
+			handler = ka->sa.sa_handler;
+			if (handler == SIG_DFL || handler == SIG_IGN) {
+				if (!(current->flags & PF_PAX_EMUTRAMP))
+					return 1;
+			} else if (!(ka->sa.sa_flags & SA_SIGINFO))
+				return 1;
+#endif
+
+			regs->eax = nr;
+			regs->eip += 7;
+			return 3;
+		}
+	} while (0);
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUSIGRT
+	if (!(current->flags & PF_PAX_EMUTRAMP))
+		return 1;
+#endif
+
+	do { /* PaX: gcc trampoline emulation #1 */
+		unsigned char mov1, mov2;
+		unsigned short jmp;
+		unsigned long addr1, addr2, ret;
+		unsigned short call;
+
+		err = get_user(mov1, (unsigned char *)regs->eip);
+		err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
+		err |= get_user(mov2, (unsigned char *)(regs->eip + 5));
+		err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
+		err |= get_user(jmp, (unsigned short *)(regs->eip + 10));
+		err |= get_user(ret, (unsigned long *)regs->esp);
+
+		if (err)
+			break;
+
+		err = get_user(call, (unsigned short *)(ret-2));
+		if (err)
+			break;
+
+		if ((mov1 & 0xF8) == 0xB8 &&
+		    (mov2 & 0xF8) == 0xB8 &&
+		    (mov1 & 0x07) != (mov2 & 0x07) &&
+		    (jmp & 0xF8FF) == 0xE0FF &&
+		    (mov2 & 0x07) == ((jmp>>8) & 0x07) &&
+		    (call & 0xF8FF) == 0xD0FF &&
+		    regs->eip == ((unsigned long*)regs)[trans[(call>>8) & 0x07]])
+		{
+			((unsigned long *)regs)[trans[mov1 & 0x07]] = addr1;
+			((unsigned long *)regs)[trans[mov2 & 0x07]] = addr2;
+			regs->eip = addr2;
+			return 4;
+		}
+	} while (0);
+
+	do { /* PaX: gcc trampoline emulation #2 */
+		unsigned char mov, jmp;
+		unsigned long addr1, addr2, ret;
+		unsigned short call;
+
+		err = get_user(mov, (unsigned char *)regs->eip);
+		err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
+		err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
+		err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
+		err |= get_user(ret, (unsigned long *)regs->esp);
+
+		if (err)
+			break;
+
+		err = get_user(call, (unsigned short *)(ret-2));
+		if (err)
+			break;
+
+		if ((mov & 0xF8) == 0xB8 &&
+		    jmp == 0xE9 &&
+		    (call & 0xF8FF) == 0xD0FF &&
+		    regs->eip == ((unsigned long*)regs)[trans[(call>>8) & 0x07]])
+		{
+			((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
+			regs->eip += addr2 + 10;
+			return 4;
+		}
+	} while (0);
+
+	do { /* PaX: gcc trampoline emulation #3 */
+		unsigned char mov, jmp;
+		char offset;
+		unsigned long addr1, addr2, ret;
+		unsigned short call;
+
+		err = get_user(mov, (unsigned char *)regs->eip);
+		err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
+		err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
+		err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
+		err |= get_user(ret, (unsigned long *)regs->esp);
+
+		if (err)
+			break;
+
+		err = get_user(call, (unsigned short *)(ret-3));
+		err |= get_user(offset, (char *)(ret-1));
+		if (err)
+			break;
+
+		if ((mov & 0xF8) == 0xB8 &&
+		    jmp == 0xE9 &&
+		    call == 0x55FF)
+		{
+			unsigned long addr;
+
+			err = get_user(addr, (unsigned long*)(regs->ebp + (unsigned long)(long)offset));
+			if (err || regs->eip != addr)
+				break;
+
+			((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
+			regs->eip += addr2 + 10;
+			return 4;
+		}
+	} while (0);
+
+	do { /* PaX: gcc trampoline emulation #4 */
+		unsigned char mov, jmp, sib;
+		char offset;
+		unsigned long addr1, addr2, ret;
+		unsigned short call;
+
+		err = get_user(mov, (unsigned char *)regs->eip);
+		err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
+		err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
+		err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
+		err |= get_user(ret, (unsigned long *)regs->esp);
+
+		if (err)
+			break;
+
+		err = get_user(call, (unsigned short *)(ret-4));
+		err |= get_user(sib, (unsigned char *)(ret-2));
+		err |= get_user(offset, (char *)(ret-1));
+		if (err)
+			break;
+
+		if ((mov & 0xF8) == 0xB8 &&
+		    jmp == 0xE9 &&
+		    call == 0x54FF &&
+		    sib == 0x24)
+		{
+			unsigned long addr;
+
+			err = get_user(addr, (unsigned long*)(regs->esp + 4 + (unsigned long)(long)offset));
+			if (err || regs->eip != addr)
+				break;
+
+			((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
+			regs->eip += addr2 + 10;
+			return 4;
+		}
+	} while (0);
+
+	do { /* PaX: gcc trampoline emulation #5 */
+		unsigned char mov, jmp, sib;
+		unsigned long addr1, addr2, ret, offset;
+		unsigned short call;
+
+		err = get_user(mov, (unsigned char *)regs->eip);
+		err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
+		err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
+		err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
+		err |= get_user(ret, (unsigned long *)regs->esp);
+
+		if (err)
+			break;
+
+		err = get_user(call, (unsigned short *)(ret-7));
+		err |= get_user(sib, (unsigned char *)(ret-5));
+		err |= get_user(offset, (unsigned long *)(ret-4));
+		if (err)
+			break;
+
+		if ((mov & 0xF8) == 0xB8 &&
+		    jmp == 0xE9 &&
+		    call == 0x94FF &&
+		    sib == 0x24)
+		{
+			unsigned long addr;
+
+			err = get_user(addr, (unsigned long*)(regs->esp + 4 + offset));
+			if (err || regs->eip != addr)
+				break;
+
+			((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
+			regs->eip += addr2 + 10;
+			return 4;
+		}
+	} while (0);
+#endif
+
+	return 1; /* PaX in action */
+}
+
+void pax_report_insns(void *pc)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 20; i++) {
+		unsigned char c;
+		if (get_user(c, (unsigned char*)pc+i)) {
+			printk("<invalid address>.");
+			break;
+		}
+		printk("%02x ", c);
+	}
+	printk("\n");
+}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+/*
+ * PaX: handle the extra page faults or pass it down to the original handler
+ *
+ * returns 0 when nothing special was detected
+ *         1 when sigreturn trampoline (syscall) has to be emulated
+ */
+asmlinkage int pax_do_page_fault(struct pt_regs *regs, unsigned long error_code)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned long address;
+	pte_t *pte;
+	unsigned char pte_mask;
+	int ret;
+
+	__asm__("movl %%cr2,%0":"=r" (address));
+
+	/* It's safe to allow irq's after cr2 has been saved */
+	if (likely(regs->eflags & X86_EFLAGS_IF))
+		local_irq_enable();
+
+	if (unlikely((error_code & 5) != 5 ||
+		     address >= TASK_SIZE ||
+		     !(current->flags & PF_PAX_PAGEEXEC)))
+		return do_page_fault(regs, error_code, address);
+
+	/* PaX: it's our fault, let's handle it if we can */
+
+	/* PaX: take a look at read faults before acquiring any locks */
+	if (unlikely((error_code == 5) && (regs->eip == address))) { 
+		/* instruction fetch attempt from a protected page in user mode */
+		ret = pax_handle_fetch_fault(regs);
+		switch (ret) {
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+		case 5:
+			return 0;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP
+		case 4:
+			return 0;
+		case 3:
+		case 2:
+			return 1;
+#endif
+		case 1:
+		default:
+			pax_report_fault(regs, (void*)regs->eip, (void*)regs->esp);
+			do_exit(SIGKILL);
+		}
+	}
+
+	pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & 2) << (_PAGE_BIT_DIRTY-1));
+
+	spin_lock(&mm->page_table_lock);
+	pte = pax_get_pte(mm, address);
+	if (unlikely(!pte || !(pte_val(*pte) & _PAGE_PRESENT) || pte_exec(*pte))) {
+		spin_unlock(&mm->page_table_lock);
+		do_page_fault(regs, error_code, address);
+		return 0;
+	}
+
+	if (unlikely((error_code == 7) && !pte_write(*pte))) {
+		/* write attempt to a protected page in user mode */
+		spin_unlock(&mm->page_table_lock);
+		do_page_fault(regs, error_code, address);
+		return 0;
+	}
+
+	/*
+	 * PaX: fill DTLB with user rights and retry
+	 */
+	__asm__ __volatile__ (
+		"orb %2,%1\n"
+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
+/*   
+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's   
+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
+ * page fault when examined during a TLB load attempt. this is true not only
+ * for PTEs holding a non-present entry but also present entries that will
+ * raise a page fault (such as those set up by PaX, or the copy-on-write
+ * mechanism). in effect it means that we do *not* need to flush the TLBs
+ * for our target pages since their PTEs are simply not in the TLBs at all.
+ * the best thing in omitting it is that we gain around 15-20% speed in the
+ * fast path of the page fault handler and can get rid of tracing since we
+ * can no longer flush unintended entries.
+ */
+
+		"invlpg %0\n"
+#endif
+
+		"testb $0,%0\n"
+		"xorb %3,%1\n"
+		:
+		: "m" (*(char*)address), "m" (*(char*)pte) , "q" (pte_mask) , "i" (_PAGE_USER)
+		: "memory", "cc");
+	spin_unlock(&mm->page_table_lock);
+	return 0;
+}
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/mm/init.c linux-2.4.26-leo/arch/i386/mm/init.c
--- linux-2.4.26/arch/i386/mm/init.c	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/mm/init.c	2004-06-22 20:53:45.000000000 +0100
@@ -37,6 +37,7 @@
 #include <asm/e820.h>
 #include <asm/apic.h>
 #include <asm/tlb.h>
+#include <asm/desc.h>
 
 mmu_gather_t mmu_gathers[NR_CPUS];
 unsigned long highstart_pfn, highend_pfn;
@@ -46,6 +47,7 @@
 int do_check_pgt_cache(int low, int high)
 {
 	int freed = 0;
+	preempt_disable();
 	if(pgtable_cache_size > high) {
 		do {
 			if (pgd_quicklist) {
@@ -62,6 +64,7 @@
 			}
 		} while(pgtable_cache_size > low);
 	}
+	preempt_enable();
 	return freed;
 }
 
@@ -122,7 +125,7 @@
 
 /* References to section boundaries */
 
-extern char _text, _etext, _edata, __bss_start, _end;
+extern char _text, _etext, _data, _edata, __bss_start, _end;
 extern char __init_begin, __init_end;
 
 static inline void set_pte_phys (unsigned long vaddr,
@@ -365,6 +368,10 @@
 
 	__flush_tlb_all();
 
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+	memcpy(kernexec_pg_dir, swapper_pg_dir, sizeof(kernexec_pg_dir));
+#endif
+
 #ifdef CONFIG_HIGHMEM
 	kmap_init();
 #endif
@@ -529,7 +536,7 @@
 	reservedpages = free_pages_init();
 
 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
-	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
+	datasize =  (unsigned long) &_edata - (unsigned long) &_data;
 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
 
 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
@@ -597,6 +604,43 @@
 		totalram_pages++;
 	}
 	printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
+
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+	/* PaX: limit KERNEL_CS to actual size */
+	{
+		unsigned long limit;
+		pgd_t *pgd;
+		pmd_t *pmd;
+
+		limit = (unsigned long)&_etext >> PAGE_SHIFT;
+		gdt_table[2].a = (gdt_table[2].a & 0xFFFF0000UL) | (limit & 0x0FFFFUL);
+		gdt_table[2].b = (gdt_table[2].b & 0xFFF0FFFFUL) | (limit & 0xF0000UL);
+
+#ifdef CONFIG_PCI_BIOS
+		printk(KERN_INFO "PAX: warning, PCI BIOS might still be in use, keeping flat KERNEL_CS.\n");
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+		gdt_table2[2].a = (gdt_table2[2].a & 0xFFFF0000UL) | (limit & 0x0FFFFUL);
+		gdt_table2[2].b = (gdt_table2[2].b & 0xFFF0FFFFUL) | (limit & 0xF0000UL);
+#endif
+
+	/* PaX: make KERNEL_CS read-only */
+		for (addr = __KERNEL_TEXT_OFFSET; addr < __KERNEL_TEXT_OFFSET + 0x00400000UL; addr += (1UL << PMD_SHIFT)) {
+			pgd = pgd_offset_k(addr);
+			pmd = pmd_offset(pgd, addr);
+			set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_GLOBAL));
+		}
+		memcpy(kernexec_pg_dir, swapper_pg_dir, sizeof(kernexec_pg_dir));
+		for (addr = __KERNEL_TEXT_OFFSET; addr < __KERNEL_TEXT_OFFSET + 0x00400000UL; addr += (1UL << PMD_SHIFT)) {
+			pgd = pgd_offset_k(addr);
+			pmd = pmd_offset(pgd, addr);
+			set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
+		}
+		flush_tlb_all();
+	}
+#endif
+
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/mm/ioremap.c linux-2.4.26-leo/arch/i386/mm/ioremap.c
--- linux-2.4.26/arch/i386/mm/ioremap.c	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/i386/mm/ioremap.c	2004-06-22 20:53:46.000000000 +0100
@@ -49,7 +49,7 @@
 	if (address >= end)
 		BUG();
 	do {
-		pte_t * pte = pte_alloc(&init_mm, pmd, address);
+		pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
 		if (!pte)
 			return -ENOMEM;
 		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/vmlinux.lds linux-2.4.26-leo/arch/i386/vmlinux.lds
--- linux-2.4.26/arch/i386/vmlinux.lds	2002-02-25 19:37:53.000000000 +0000
+++ linux-2.4.26-leo/arch/i386/vmlinux.lds	2004-06-22 21:08:08.000000000 +0100
@@ -1,3 +1,547 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+       
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+       
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+       
+       
+       
+       
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+       
+
+       
+       
+       
+               
+               
+       
+       
+
 /* ld script to make i386 Linux kernel
  * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
  */
@@ -6,41 +550,45 @@
 ENTRY(_start)
 SECTIONS
 {
-  . = 0xC0000000 + 0x100000;
-  _text = .;			/* Text and read-only data */
-  .text : {
-	*(.text)
-	*(.fixup)
-	*(.gnu.warning)
-	} = 0x9090
-
-  _etext = .;			/* End of text section */
+  . = (0xC0000000) + 0x100000;
+  .text.startup : {
+        BYTE(0xEA) /* jmp far */
+        LONG(startup_32 + (0) - (0xC0000000))
+        SHORT(0x10)
+        }
 
-  .rodata : { *(.rodata) *(.rodata.*) }
-  .kstrtab : { *(.kstrtab) }
+  . = ALIGN(32);
+  _data = .;
+  .data : { /* Data */
+        *(.data)
+        CONSTRUCTORS
+        }
 
-  . = ALIGN(16);		/* Exception table */
-  __start___ex_table = .;
-  __ex_table : { *(__ex_table) }
-  __stop___ex_table = .;
+  . = ALIGN(32);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
 
-  __start___ksymtab = .;	/* Kernel symbol table */
-  __ksymtab : { *(__ksymtab) }
-  __stop___ksymtab = .;
+  . = ALIGN(8192);
+  .data.init_task : { *(.data.init_task) }
 
-  .data : {			/* Data */
-	*(.data)
-	CONSTRUCTORS
-	}
+  . = ALIGN(4096);
+  .data.page_aligned : {
+        *(.data.swapper_pg_dir)
+        *(.data.pg0)
+        *(.data.pg1)
+        *(.data.pg2)
+        }
 
-  _edata = .;			/* End of data section */
+  _edata = .; /* End of data section */
 
-  . = ALIGN(8192);		/* init_task */
-  .data.init_task : { *(.data.init_task) }
+  __bss_start = .; /* BSS */
+  .bss : {
+        *(.bss)
+        LONG(0)
+        }
+  __bss_end = . ;
 
-  . = ALIGN(4096);		/* Init code and data */
+  . = ALIGN(4096); /* Init code and data */
   __init_begin = .;
-  .text.init : { *(.text.init) }
   .data.init : { *(.data.init) }
   . = ALIGN(16);
   __setup_start = .;
@@ -49,27 +597,49 @@
   __initcall_start = .;
   .initcall.init : { *(.initcall.init) }
   __initcall_end = .;
+  .text.init : { *(.text.init) }
   . = ALIGN(4096);
   __init_end = .;
+  _text = .; /* Text and read-only data */
+  .text : {
+
 
+        *(.text)
+        *(.fixup)
+        *(.gnu.warning)
+        } = 0x9090
+
+  _etext = .; /* End of text section */
   . = ALIGN(4096);
-  .data.page_aligned : { *(.data.idt) }
+  . += (0);
+  .rodata.page_aligned : {
+        *(.rodata.empty_zero_page)
+        *(.rodata.idt)
+        }
+  .rodata : { *(.rodata) *(.rodata.*) }
+  .kstrtab : { *(.kstrtab) }
 
-  . = ALIGN(32);
-  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+  . = ALIGN(16); /* Exception table */
+  __start___ex_table = .;
+  __ex_table : { *(__ex_table) }
+  __stop___ex_table = .;
+
+  __start___ksymtab = .; /* Kernel symbol table */
+  __ksymtab : { *(__ksymtab) }
+  __stop___ksymtab = .;
+
+
+
+
+  _end = .;
 
-  __bss_start = .;		/* BSS */
-  .bss : {
-	*(.bss)
-	}
-  _end = . ;
 
   /* Sections to be discarded */
   /DISCARD/ : {
-	*(.text.exit)
-	*(.data.exit)
-	*(.exitcall.exit)
-	}
+        *(.text.exit)
+        *(.data.exit)
+        *(.exitcall.exit)
+        }
 
   /* Stabs debugging sections.  */
   .stab 0 : { *(.stab) }
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/i386/vmlinux.lds.S linux-2.4.26-leo/arch/i386/vmlinux.lds.S
--- linux-2.4.26/arch/i386/vmlinux.lds.S	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/arch/i386/vmlinux.lds.S	2004-06-22 20:53:46.000000000 +0100
@@ -0,0 +1,125 @@
+/* ld script to make i386 Linux kernel
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
+ */
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+SECTIONS
+{
+  . = __PAGE_OFFSET + 0x100000;
+  .text.startup : {
+	BYTE(0xEA) /* jmp far */
+	LONG(startup_32 + __KERNEL_TEXT_OFFSET - __PAGE_OFFSET)
+	SHORT(__KERNEL_CS)
+	}
+
+  . = ALIGN(32);
+  _data = .;
+  .data : {			/* Data */
+	*(.data)
+	CONSTRUCTORS
+	}
+
+  . = ALIGN(32);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+  . = ALIGN(8192);
+  .data.init_task : { *(.data.init_task) }
+
+  . = ALIGN(4096);
+  .data.page_aligned : {
+	*(.data.swapper_pg_dir)
+	*(.data.pg0)
+	*(.data.pg1)
+	*(.data.pg2)
+	}
+
+  _edata = .;			/* End of data section */
+
+  __bss_start = .;		/* BSS */
+  .bss : {
+	*(.bss)
+	LONG(0)
+	} 
+  __bss_end = . ;
+
+  . = ALIGN(4096);		/* Init code and data */
+  __init_begin = .;
+  .data.init : { *(.data.init) }
+  . = ALIGN(16);
+  __setup_start = .;
+  .setup.init : { *(.setup.init) }
+  __setup_end = .;
+  __initcall_start = .;
+  .initcall.init : { *(.initcall.init) }
+  __initcall_end = .;
+
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+  __text_init_start = .;
+  .text.init (. - __KERNEL_TEXT_OFFSET) : AT (__text_init_start) {
+	*(.text.init)
+	. = ALIGN(4*1024*1024) - 1;
+	BYTE(0)
+	}
+  __init_end = . + __KERNEL_TEXT_OFFSET;
+
+/*
+ * PaX: this must be kept in synch with the KERNEL_CS base
+ * in the GDTs in arch/i386/kernel/head.S
+ */
+  _text = .;			/* Text and read-only data */
+  .text : AT (. + __KERNEL_TEXT_OFFSET) {
+#else
+  .text.init : { *(.text.init) }
+  . = ALIGN(4096);
+  __init_end = .;
+  _text = .;			/* Text and read-only data */
+  .text : {
+#endif
+
+	*(.text)
+	*(.fixup)
+	*(.gnu.warning)
+	} = 0x9090
+
+  _etext = .;			/* End of text section */
+  . = ALIGN(4096);
+  . += __KERNEL_TEXT_OFFSET;
+  .rodata.page_aligned : {
+	*(.rodata.empty_zero_page)
+	*(.rodata.idt)
+	}
+  .rodata : { *(.rodata) *(.rodata.*) }
+  .kstrtab : { *(.kstrtab) }
+
+  . = ALIGN(16);		/* Exception table */
+  __start___ex_table = .;
+  __ex_table : { *(__ex_table) }
+  __stop___ex_table = .;
+
+  __start___ksymtab = .;	/* Kernel symbol table */
+  __ksymtab : { *(__ksymtab) }
+  __stop___ksymtab = .;
+
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+  _end = ALIGN(4*1024*1024);
+#else
+  _end = .;
+#endif
+
+  /* Sections to be discarded */
+  /DISCARD/ : {
+	*(.text.exit)
+	*(.data.exit)
+	*(.exitcall.exit)
+	}
+
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment 0 : { *(.comment) }
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ia64/config.in linux-2.4.26-leo/arch/ia64/config.in
--- linux-2.4.26/arch/ia64/config.in	2004-02-20 14:11:37.000000000 +0000
+++ linux-2.4.26-leo/arch/ia64/config.in	2004-06-22 20:53:46.000000000 +0100
@@ -319,3 +319,12 @@
 int 'Kernel messages buffer length shift (0 = default)' CONFIG_LOG_BUF_SHIFT 0
 
 endmenu
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+    source grsecurity/Config.in
+fi
+endmenu
+
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ia64/kernel/ptrace.c linux-2.4.26-leo/arch/ia64/kernel/ptrace.c
--- linux-2.4.26/arch/ia64/kernel/ptrace.c	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/arch/ia64/kernel/ptrace.c	2004-06-22 20:53:46.000000000 +0100
@@ -16,6 +16,7 @@
 #include <linux/ptrace.h>
 #include <linux/smp_lock.h>
 #include <linux/user.h>
+#include <linux/grsecurity.h>
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -1299,6 +1300,9 @@
 	if (pid == 1)		/* no messing around with init! */
 		goto out_tsk;
 
+	if (gr_handle_ptrace(child, request))
+		goto out_tsk;
+
 	if (request == PTRACE_ATTACH) {
 		ret = ptrace_attach(child);
 		goto out_tsk;
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ia64/kernel/sys_ia64.c linux-2.4.26-leo/arch/ia64/kernel/sys_ia64.c
--- linux-2.4.26/arch/ia64/kernel/sys_ia64.c	2004-02-20 14:11:37.000000000 +0000
+++ linux-2.4.26-leo/arch/ia64/kernel/sys_ia64.c	2004-06-22 20:53:46.000000000 +0100
@@ -16,6 +16,7 @@
 #include <linux/smp_lock.h>
 #include <linux/highuid.h>
 #include <linux/hugetlb.h>
+#include <linux/grsecurity.h>
 
 #include <asm/shmparam.h>
 #include <asm/uaccess.h>
@@ -211,6 +212,11 @@
 		goto out;
 	}
 
+	if (gr_handle_mmap(file, prot)) {
+		addr = -EACCES;
+		goto out;
+	}
+
 	down_write(&current->mm->mmap_sem);
 	addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
 	up_write(&current->mm->mmap_sem);
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/m68k/config.in linux-2.4.26-leo/arch/m68k/config.in
--- linux-2.4.26/arch/m68k/config.in	2004-02-20 14:11:37.000000000 +0000
+++ linux-2.4.26-leo/arch/m68k/config.in	2004-06-22 20:53:46.000000000 +0100
@@ -557,3 +557,11 @@
 
 source crypto/Config.in
 source lib/Config.in
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+	source grsecurity/Config.in
+fi
+endmenu
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/mips/config.in linux-2.4.26-leo/arch/mips/config.in
--- linux-2.4.26/arch/mips/config.in	2002-11-28 23:53:09.000000000 +0000
+++ linux-2.4.26-leo/arch/mips/config.in	2004-06-22 20:53:46.000000000 +0100
@@ -7,3 +7,11 @@
 define_bool CONFIG_MIPS64 n
 
 source arch/mips/config-shared.in
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+        source grsecurity/Config.in
+fi
+endmenu
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/mips/config-shared.in linux-2.4.26-leo/arch/mips/config-shared.in
--- linux-2.4.26/arch/mips/config-shared.in	2004-02-20 14:11:37.000000000 +0000
+++ linux-2.4.26-leo/arch/mips/config-shared.in	2004-05-26 23:34:34.000000000 +0100
@@ -937,6 +937,7 @@
    define_bool CONFIG_HOTPLUG_PCI n
 fi
 
+dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_NEW_IRQ
 bool 'System V IPC' CONFIG_SYSVIPC
 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
 bool 'Sysctl support' CONFIG_SYSCTL
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/mips/kernel/i8259.c linux-2.4.26-leo/arch/mips/kernel/i8259.c
--- linux-2.4.26/arch/mips/kernel/i8259.c	2003-08-25 12:44:40.000000000 +0100
+++ linux-2.4.26-leo/arch/mips/kernel/i8259.c	2004-05-26 23:34:34.000000000 +0100
@@ -8,6 +8,7 @@
  * Copyright (C) 1992 Linus Torvalds
  * Copyright (C) 1994 - 2000 Ralf Baechle
  */
+#include <linux/sched.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/mips/kernel/irq.c linux-2.4.26-leo/arch/mips/kernel/irq.c
--- linux-2.4.26/arch/mips/kernel/irq.c	2004-02-20 14:11:38.000000000 +0000
+++ linux-2.4.26-leo/arch/mips/kernel/irq.c	2004-05-26 23:34:34.000000000 +0100
@@ -8,6 +8,8 @@
  * Copyright (C) 1992 Linus Torvalds
  * Copyright (C) 1994 - 2000 Ralf Baechle
  */
+
+#include <linux/sched.h>
 #include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
@@ -19,11 +21,13 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/random.h>
-#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/ptrace.h>
 
 #include <asm/atomic.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
+#include <asm/debug.h>
 
 /*
  * Controller mappings for all interrupt sources:
@@ -429,6 +433,8 @@
 	struct irqaction * action;
 	unsigned int status;
 
+	preempt_disable();
+
 	kstat.irqs[cpu][irq]++;
 	spin_lock(&desc->lock);
 	desc->handler->ack(irq);
@@ -490,6 +496,27 @@
 
 	if (softirq_pending(cpu))
 		do_softirq();
+
+#if defined(CONFIG_PREEMPT)
+	while (--current->preempt_count == 0) {
+		db_assert(intr_off());
+		db_assert(!in_interrupt());
+
+		if (current->need_resched == 0) {
+			break;
+		}
+
+		current->preempt_count ++;
+		sti();
+		if (user_mode(regs)) {
+			schedule();
+		} else {
+			preempt_schedule();
+		}
+		cli();
+	}
+#endif
+
 	return 1;
 }
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/mips/mm/extable.c linux-2.4.26-leo/arch/mips/mm/extable.c
--- linux-2.4.26/arch/mips/mm/extable.c	2002-11-28 23:53:10.000000000 +0000
+++ linux-2.4.26-leo/arch/mips/mm/extable.c	2004-05-26 23:34:34.000000000 +0100
@@ -3,6 +3,7 @@
  */
 #include <linux/config.h>
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <asm/uaccess.h>
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/mips/mm/fault.c linux-2.4.26-leo/arch/mips/mm/fault.c
--- linux-2.4.26/arch/mips/mm/fault.c	2003-08-25 12:44:40.000000000 +0100
+++ linux-2.4.26-leo/arch/mips/mm/fault.c	2004-06-22 20:53:46.000000000 +0100
@@ -69,6 +69,24 @@
 	}
 }
 
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+void pax_report_insns(void *pc)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 5; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int*)pc+i)) {
+			printk("<invalid address>.");
+			break;
+		}
+		printk("%08x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/mips64/config.in linux-2.4.26-leo/arch/mips64/config.in
--- linux-2.4.26/arch/mips64/config.in	2002-11-28 23:53:10.000000000 +0000
+++ linux-2.4.26-leo/arch/mips64/config.in	2004-06-22 20:53:46.000000000 +0100
@@ -7,3 +7,11 @@
 define_bool CONFIG_MIPS64 y
 
 source arch/mips/config-shared.in
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+        source grsecurity/Config.in
+fi
+endmenu
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/mips64/kernel/ioctl32.c linux-2.4.26-leo/arch/mips64/kernel/ioctl32.c
--- linux-2.4.26/arch/mips64/kernel/ioctl32.c	2004-02-20 14:11:38.000000000 +0000
+++ linux-2.4.26-leo/arch/mips64/kernel/ioctl32.c	2004-05-26 23:34:18.000000000 +0100
@@ -62,6 +62,7 @@
 
 #include <linux/mtd/mtd.h>
 #include <linux/serial.h>
+#include <linux/dm-ioctl.h>
 
 #ifdef CONFIG_SIBYTE_TBPROF
 #include <asm/sibyte/trace_prof.h>
@@ -2327,6 +2328,22 @@
 	IOCTL32_DEFAULT(RESTART_ARRAY_RW),
 #endif /* CONFIG_MD */
 
+#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
+	IOCTL32_DEFAULT(DM_VERSION),
+	IOCTL32_DEFAULT(DM_REMOVE_ALL),
+	IOCTL32_DEFAULT(DM_DEV_CREATE),
+	IOCTL32_DEFAULT(DM_DEV_REMOVE),
+	IOCTL32_DEFAULT(DM_TABLE_LOAD),
+	IOCTL32_DEFAULT(DM_DEV_SUSPEND),
+	IOCTL32_DEFAULT(DM_DEV_RENAME),
+	IOCTL32_DEFAULT(DM_TABLE_DEPS),
+	IOCTL32_DEFAULT(DM_DEV_STATUS),
+	IOCTL32_DEFAULT(DM_TABLE_STATUS),
+	IOCTL32_DEFAULT(DM_DEV_WAIT),
+	IOCTL32_DEFAULT(DM_LIST_DEVICES),
+	IOCTL32_DEFAULT(DM_TABLE_CLEAR),
+#endif /* CONFIG_BLK_DEV_DM */
+
 #ifdef CONFIG_SIBYTE_TBPROF
 	IOCTL32_DEFAULT(SBPROF_ZBSTART),
 	IOCTL32_DEFAULT(SBPROF_ZBSTOP),
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/mips64/mm/fault.c linux-2.4.26-leo/arch/mips64/mm/fault.c
--- linux-2.4.26/arch/mips64/mm/fault.c	2004-02-20 14:11:38.000000000 +0000
+++ linux-2.4.26-leo/arch/mips64/mm/fault.c	2004-06-22 20:53:46.000000000 +0100
@@ -90,6 +90,24 @@
 	}
 }
 
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+void pax_report_insns(void *pc)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 5; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int*)pc+i)) {
+			printk("<invalid address>.");
+			break;
+		}
+		printk("%08x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/parisc/config.in linux-2.4.26-leo/arch/parisc/config.in
--- linux-2.4.26/arch/parisc/config.in	2004-02-20 14:11:38.000000000 +0000
+++ linux-2.4.26-leo/arch/parisc/config.in	2004-06-22 20:53:46.000000000 +0100
@@ -204,3 +204,11 @@
 
 source crypto/Config.in
 source lib/Config.in
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+	source grsecurity/Config.in
+fi
+endmenu
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/parisc/kernel/ioctl32.c linux-2.4.26-leo/arch/parisc/kernel/ioctl32.c
--- linux-2.4.26/arch/parisc/kernel/ioctl32.c	2003-08-25 12:44:40.000000000 +0100
+++ linux-2.4.26-leo/arch/parisc/kernel/ioctl32.c	2004-06-22 20:53:46.000000000 +0100
@@ -55,6 +55,7 @@
 #define max max */
 #include <linux/lvm.h>
 #endif /* LVM */
+#include <linux/dm-ioctl.h>
 
 #include <scsi/scsi.h>
 /* Ugly hack. */
@@ -1434,7 +1435,11 @@
 	 * To have permissions to do most of the vt ioctls, we either have
 	 * to be the owner of the tty, or super-user.
 	 */
+#ifdef CONFIG_GRKERNSEC
+	if (current->tty == tty || capable(CAP_SYS_TTY_CONFIG))
+#else
 	if (current->tty == tty || suser())
+#endif
 		return 1;
 	return 0;                                                    
 }
@@ -3423,6 +3428,22 @@
 COMPATIBLE_IOCTL(LV_BMAP)
 COMPATIBLE_IOCTL(LV_SNAPSHOT_USE_RATE)
 #endif /* LVM */
+/* Device-Mapper */
+#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
+COMPATIBLE_IOCTL(DM_VERSION)
+COMPATIBLE_IOCTL(DM_REMOVE_ALL)
+COMPATIBLE_IOCTL(DM_DEV_CREATE)
+COMPATIBLE_IOCTL(DM_DEV_REMOVE)
+COMPATIBLE_IOCTL(DM_TABLE_LOAD)
+COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
+COMPATIBLE_IOCTL(DM_DEV_RENAME)
+COMPATIBLE_IOCTL(DM_TABLE_DEPS)
+COMPATIBLE_IOCTL(DM_DEV_STATUS)
+COMPATIBLE_IOCTL(DM_TABLE_STATUS)
+COMPATIBLE_IOCTL(DM_DEV_WAIT)
+COMPATIBLE_IOCTL(DM_LIST_DEVICES)
+COMPATIBLE_IOCTL(DM_TABLE_CLEAR)
+#endif /* CONFIG_BLK_DEV_DM */
 #if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
 COMPATIBLE_IOCTL(DRM_IOCTL_GET_MAGIC)
 COMPATIBLE_IOCTL(DRM_IOCTL_IRQ_BUSID)
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/parisc/kernel/ptrace.c linux-2.4.26-leo/arch/parisc/kernel/ptrace.c
--- linux-2.4.26/arch/parisc/kernel/ptrace.c	2002-11-28 23:53:10.000000000 +0000
+++ linux-2.4.26-leo/arch/parisc/kernel/ptrace.c	2004-06-22 20:53:46.000000000 +0100
@@ -15,7 +15,7 @@
 #include <linux/ptrace.h>
 #include <linux/user.h>
 #include <linux/personality.h>
-
+#include <linux/grsecurity.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -119,6 +119,9 @@
 	if (pid == 1)		/* no messing around with init! */
 		goto out_tsk;
 
+	if (gr_handle_ptrace(child, request))
+		goto out_tsk;
+
 	if (request == PTRACE_ATTACH) {
 		ret = ptrace_attach(child);
 		goto out_tsk;
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/parisc/kernel/sys_parisc32.c linux-2.4.26-leo/arch/parisc/kernel/sys_parisc32.c
--- linux-2.4.26/arch/parisc/kernel/sys_parisc32.c	2003-06-13 15:51:31.000000000 +0100
+++ linux-2.4.26-leo/arch/parisc/kernel/sys_parisc32.c	2004-06-22 20:53:46.000000000 +0100
@@ -50,6 +50,7 @@
 #include <linux/highmem.h>
 #include <linux/highuid.h>
 #include <linux/mman.h>
+#include <linux/grsecurity.h>
 
 #include <asm/types.h>
 #include <asm/uaccess.h>
@@ -177,6 +178,11 @@
 	struct file *file;
 	int retval;
 	int i;
+#ifdef CONFIG_GRKERNSEC
+	struct file *old_exec_file;
+	struct acl_subject_label *old_acl;
+	struct rlimit old_rlim[RLIM_NLIMITS];
+#endif
 
 	file = open_exec(filename);
 
@@ -184,7 +190,26 @@
 	if (IS_ERR(file))
 		return retval;
 
+	gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->user->processes), 1);
+
+	if (gr_handle_nproc()) {
+		allow_write_access(file);
+		fput(file);
+		return -EAGAIN;
+	}
+
+	if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
+		allow_write_access(file);
+		fput(file);
+		return -EACCES;
+	}
+
 	bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDUSTACK
+	bprm.p -= (get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK;
+#endif
+
 	memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0]));
 
 	DBG(("do_execve32(%s, %p, %p, %p)\n", filename, argv, envp, regs));
@@ -209,11 +234,24 @@
 	if (retval < 0)
 		goto out;
 	
+	if (!gr_tpe_allow(file)) {
+		retval = -EACCES;
+		goto out;
+	}
+
+	if (gr_check_crash_exec(file)) {
+		retval = -EACCES;
+		goto out;
+	}
+
 	retval = copy_strings_kernel(1, &bprm.filename, &bprm);
 	if (retval < 0)
 		goto out;
 
 	bprm.exec = bprm.p;
+
+	gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
+
 	retval = copy_strings32(bprm.envc, envp, &bprm);
 	if (retval < 0)
 		goto out;
@@ -222,11 +260,32 @@
 	if (retval < 0)
 		goto out;
 
+#ifdef CONFIG_GRKERNSEC
+	old_acl = current->acl;
+	memcpy(old_rlim, current->rlim, sizeof(old_rlim));
+	old_exec_file = current->exec_file;
+	get_file(file);
+	current->exec_file = file;
+#endif
+
+	gr_set_proc_label(file->f_dentry, file->f_vfsmnt);
+
 	retval = search_binary_handler(&bprm,regs);
-	if (retval >= 0)
+	if (retval >= 0) {
+#ifdef CONFIG_GRKERNSEC
+		if (old_exec_file)
+			fput(old_exec_file);
+#endif
 		/* execve success */
 		return retval;
+	}
 
+#ifdef CONFIG_GRKERNSEC
+	current->acl = old_acl;
+	memcpy(current->rlim, old_rlim, sizeof(old_rlim));
+	fput(current->exec_file);
+	current->exec_file = old_exec_file;
+#endif
 out:
 	/* Something went wrong, return the inode and free the argument pages*/
 	allow_write_access(bprm.file);
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/parisc/kernel/sys_parisc.c linux-2.4.26-leo/arch/parisc/kernel/sys_parisc.c
--- linux-2.4.26/arch/parisc/kernel/sys_parisc.c	2002-11-28 23:53:10.000000000 +0000
+++ linux-2.4.26-leo/arch/parisc/kernel/sys_parisc.c	2004-06-22 20:53:46.000000000 +0100
@@ -12,6 +12,7 @@
 #include <linux/mman.h>
 #include <linux/shm.h>
 #include <linux/smp_lock.h>
+#include <linux/grsecurity.h>
 
 int sys_pipe(int *fildes)
 {
@@ -90,6 +91,11 @@
 		inode = filp->f_dentry->d_inode;
 	}
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP
+	if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
+		addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
+#endif
+
 	if (inode && (flags & MAP_SHARED) && (inode->i_mapping->i_mmap_shared)) {
 		addr = get_shared_area(inode, addr, len, pgoff);
 	} else {
@@ -104,12 +110,23 @@
 {
 	struct file * file = NULL;
 	unsigned long error = -EBADF;
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (flags & MAP_MIRROR)
+		return -EINVAL;
+#endif
+
 	if (!(flags & MAP_ANONYMOUS)) {
 		file = fget(fd);
 		if (!file)
 			goto out;
 	}
 
+	if (gr_handle_mmap(file, prot)) {
+		fput(file);
+		return -EACCES;
+	}
+
 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
 	down_write(&current->mm->mmap_sem);
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/parisc/kernel/traps.c linux-2.4.26-leo/arch/parisc/kernel/traps.c
--- linux-2.4.26/arch/parisc/kernel/traps.c	2003-08-25 12:44:40.000000000 +0100
+++ linux-2.4.26-leo/arch/parisc/kernel/traps.c	2004-06-22 20:53:46.000000000 +0100
@@ -637,9 +637,7 @@
 
 			down_read(&current->mm->mmap_sem);
 			vma = find_vma(current->mm,regs->iaoq[0]);
-			if (vma && (regs->iaoq[0] >= vma->vm_start)
-				&& (vma->vm_flags & VM_EXEC)) {
-
+			if (vma && (regs->iaoq[0] >= vma->vm_start)) {
 				fault_address = regs->iaoq[0];
 				fault_space = regs->iasq[0];
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/parisc/mm/fault.c linux-2.4.26-leo/arch/parisc/mm/fault.c
--- linux-2.4.26/arch/parisc/mm/fault.c	2003-06-13 15:51:31.000000000 +0100
+++ linux-2.4.26-leo/arch/parisc/mm/fault.c	2004-06-22 20:53:46.000000000 +0100
@@ -15,6 +15,7 @@
 #include <linux/ptrace.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
+#include <linux/unistd.h>
 
 #include <asm/uaccess.h>
 #include <asm/traps.h>
@@ -53,7 +54,7 @@
 static unsigned long
 parisc_acctyp(unsigned long code, unsigned int inst)
 {
-	if (code == 6 || code == 16)
+	if (code == 6 || code == 7 || code == 16)
 	    return VM_EXEC;
 
 	switch (inst & 0xf0000000) {
@@ -139,6 +140,136 @@
 			}
 #endif
 
+/*
+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
+ *
+ * returns 1 when task should be killed 
+ *	   2 when rt_sigreturn trampoline was detected
+ *	   3 when unpatched PLT trampoline was detected
+ *	   4 when legitimate ET_EXEC was detected
+ */
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC  
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+	int err;
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (current->flags & PF_PAX_RANDEXEC) {
+		if (instruction_pointer(regs) >= current->mm->start_code &&
+		    instruction_pointer(regs) < current->mm->end_code)
+		{
+#if 0
+			/* PaX: this needs fixing */
+			if ((regs->gr[2] & ~3UL) == instruction_pointer(regs))
+				return 1;
+#endif
+			regs->iaoq[0] += current->mm->delta_exec;
+			if ((regs->iaoq[1] & ~3UL) >= current->mm->start_code &&
+			    (regs->iaoq[1] & ~3UL) < current->mm->end_code)
+				regs->iaoq[1] += current->mm->delta_exec;
+			return 4;
+		}
+	}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT
+	do { /* PaX: unpatched PLT emulation */
+		unsigned int bl, depwi;
+
+		err = get_user(bl, (unsigned int*)instruction_pointer(regs));
+		err |= get_user(depwi, (unsigned int*)(instruction_pointer(regs)+4));
+
+		if (err)
+			break;
+
+		if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
+			unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
+
+			err = get_user(ldw, (unsigned int*)addr);
+			err |= get_user(bv, (unsigned int*)(addr+4));
+			err |= get_user(ldw2, (unsigned int*)(addr+8));
+
+			if (err)
+				break;
+
+			if (ldw == 0x0E801096U &&
+			    bv == 0xEAC0C000U &&
+			    ldw2 == 0x0E881095U)
+			{
+				unsigned int resolver, map;
+
+				err = get_user(resolver, (unsigned int*)(instruction_pointer(regs)+8));
+				err |= get_user(map, (unsigned int*)(instruction_pointer(regs)+12));
+				if (err)
+					break;
+
+				regs->gr[20] = instruction_pointer(regs)+8;
+				regs->gr[21] = map;
+				regs->gr[22] = resolver;
+				regs->iaoq[0] = resolver | 3UL;
+				regs->iaoq[1] = regs->iaoq[0] + 4;
+				return 3;
+			}
+		}
+	} while (0);
+#endif
+ 
+#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP
+
+#ifndef CONFIG_GRKERNSEC_PAX_EMUSIGRT
+	if (!(current->flags & PF_PAX_EMUTRAMP))
+		return 1;
+#endif
+
+	do { /* PaX: rt_sigreturn emulation */
+		unsigned int ldi1, ldi2, bel, nop;
+
+		err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
+		err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
+		err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
+		err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
+
+		if (err)
+			break;
+
+                if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
+		    ldi2 == 0x3414015AU &&
+		    bel == 0xE4008200U &&
+		    nop == 0x08000240U)
+		{
+			regs->gr[25] = (ldi1 & 2) >> 1;
+			regs->gr[20] = __NR_rt_sigreturn;
+			regs->gr[31] = regs->iaoq[1] + 16;
+			regs->sr[0] = regs->iasq[1];
+			regs->iaoq[0] = 0x100UL;
+			regs->iaoq[1] = regs->iaoq[0] + 4;
+			regs->iasq[0] = regs->sr[2];
+			regs->iasq[1] = regs->sr[2];
+			return 2;
+		}
+	} while (0);
+#endif
+
+	return 1;
+}
+
+void pax_report_insns(void *pc)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 5; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int*)pc+i)) {
+			printk("<invalid address>.");
+			break;
+		}
+		printk("%08x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 void do_page_fault(struct pt_regs *regs, unsigned long code,
 			      unsigned long address)
 {
@@ -164,8 +295,38 @@
 
 	acc_type = parisc_acctyp(code,regs->iir);
 
-	if ((vma->vm_flags & acc_type) != acc_type)
+	if ((vma->vm_flags & acc_type) != acc_type) {
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+		if ((current->flags & PF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
+		    (address & ~3UL) == instruction_pointer(regs))
+		   {
+			up_read(&mm->mmap_sem);
+			switch(pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+			case 4:
+				return;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT
+			case 3:
+				return;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP
+			case 2:
+				return;
+#endif
+
+			}
+			pax_report_fault(regs, (void*)instruction_pointer(regs), (void*)regs->gr[30]);
+			do_exit(SIGKILL);
+		}
+#endif
+
 		goto bad_area;
+	}
 
 	/*
 	 * If for any reason at all we couldn't handle the fault, make
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/config.in linux-2.4.26-leo/arch/ppc/config.in
--- linux-2.4.26/arch/ppc/config.in	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/arch/ppc/config.in	2004-06-22 20:53:46.000000000 +0100
@@ -167,6 +167,8 @@
   int  'Maximum number of CPUs (2-32)' CONFIG_NR_CPUS 32
 fi
 
+bool 'Preemptible kernel support' CONFIG_PREEMPT
+
 if [ "$CONFIG_6xx" = "y" -a "$CONFIG_8260" = "n" ];then
   bool 'AltiVec Support' CONFIG_ALTIVEC
   bool 'Thermal Management Support' CONFIG_TAU
@@ -667,3 +669,12 @@
 int 'Kernel messages buffer length shift (0 = default)' CONFIG_LOG_BUF_SHIFT 0
 
 endmenu
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+    source grsecurity/Config.in
+fi
+endmenu
+
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/kernel/entry.S linux-2.4.26-leo/arch/ppc/kernel/entry.S
--- linux-2.4.26/arch/ppc/kernel/entry.S	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/arch/ppc/kernel/entry.S	2004-05-26 23:34:34.000000000 +0100
@@ -287,6 +287,46 @@
 	 */
 	cmpi	0,r3,0
 	beq	restore
+#ifdef CONFIG_PREEMPT
+	lwz	r3,PREEMPT_COUNT(r2)
+	cmpi	0,r3,1
+	bge	ret_from_except
+	lwz	r5,_MSR(r1)
+	andi.	r5,r5,MSR_PR
+	bne	do_signal_ret
+	lwz	r5,NEED_RESCHED(r2)
+	cmpi	0,r5,0
+	beq	ret_from_except
+	lis	r3,irq_stat@h
+	ori	r3,r3,irq_stat@l
+#ifdef CONFIG_SMP
+	lwz     r5,CPU(r2)
+	rlwinm  r5,r5,5,0,26
+	add     r3,r3,r5
+#endif
+	lwz	r5,4(r3)
+	lwz	r3,8(r3)
+	add	r3,r3,r5
+	cmpi	0,r3,0
+	bne	ret_from_except
+	lwz	r3,PREEMPT_COUNT(r2)
+	addi	r3,r3,1
+	stw	r3,PREEMPT_COUNT(r2)
+	mfmsr	r0
+	ori	r0,r0,MSR_EE
+	mtmsr	r0
+	sync
+	bl	preempt_schedule
+	mfmsr	r0
+	rlwinm	r0,r0,0,17,15
+	mtmsr	r0
+	sync
+	lwz	r3,PREEMPT_COUNT(r2)
+	subi	r3,r3,1
+	stw	r3,PREEMPT_COUNT(r2)
+	li	r3,1
+	b	ret_from_intercept
+#endif /* CONFIG_PREEMPT */
 	.globl	ret_from_except
 ret_from_except:
 	lwz	r3,_MSR(r1)	/* Returning to user mode? */
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/kernel/head_4xx.S linux-2.4.26-leo/arch/ppc/kernel/head_4xx.S
--- linux-2.4.26/arch/ppc/kernel/head_4xx.S	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/ppc/kernel/head_4xx.S	2004-06-22 20:53:46.000000000 +0100
@@ -296,15 +296,12 @@
 
 	/* Most of the Linux PTE is ready to load into the TLB LO.
 	 * We set ZSEL, where only the LS-bit determines user access.
-	 * We set execute, because we don't have the granularity to
-	 * properly set this at the page level (Linux problem).
 	 * If shared is set, we cause a zero PID->TID load.
 	 * Many of these bits are software only.  Bits we don't set
 	 * here we (properly should) assume have the appropriate value.
 	 */
 	li	r22, 0x0ce2
 	andc	r21, r21, r22		/* Make sure 20, 21 are zero */
-	ori	r21, r21, _PAGE_HWEXEC	/* make it executable */
 
 	/* find the TLB index that caused the fault.  It has to be here.
 	*/
@@ -783,7 +780,6 @@
 	stw	r23, tlb_4xx_index@l(0)
 
 6:
-	ori	r21, r21, _PAGE_HWEXEC		/* make it executable */
 	tlbwe	r21, r23, TLB_DATA		/* Load TLB LO */
 
 	/* Create EPN.  This is the faulting address plus a static
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/kernel/irq.c linux-2.4.26-leo/arch/ppc/kernel/irq.c
--- linux-2.4.26/arch/ppc/kernel/irq.c	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/ppc/kernel/irq.c	2004-05-26 23:34:34.000000000 +0100
@@ -551,6 +551,34 @@
 	return 1; /* lets ret_from_int know we can do checks */
 }
 
+#ifdef CONFIG_PREEMPT
+int
+preempt_intercept(struct pt_regs *regs)
+{
+	int ret;
+
+	preempt_disable();
+
+	switch(regs->trap) {
+	case 0x500:
+		ret = do_IRQ(regs);
+		break;
+#ifndef CONFIG_4xx
+	case 0x900:
+#else
+	case 0x1000:
+#endif
+		ret = timer_interrupt(regs);
+		break;
+	default:
+		BUG();
+	}
+
+	preempt_enable();
+	return ret;
+}
+#endif /* CONFIG_PREEMPT */
+
 unsigned long probe_irq_on (void)
 {
 	return 0;
@@ -647,11 +675,13 @@
 				show("wait_on_irq");
 				count = ~0;
 			}
+			preempt_disable();
 			__sti();
 			/* don't worry about the lock race Linus found
 			 * on intel here. -- Cort
 			 */
 			__cli();
+			preempt_enable_no_resched();
 			if (atomic_read(&global_irq_count))
 				continue;
 			if (global_irq_lock)
@@ -727,6 +757,8 @@
 	global_irq_holder = cpu;
 }
 
+#define	EFLAGS_IF_SHIFT	15
+
 /*
  * A global "cli()" while in an interrupt context
  * turns into just a local cli(). Interrupts
@@ -744,9 +776,10 @@
 	unsigned long flags;
 
 	__save_flags(flags);
-	if (flags & (1 << 15)) {
-		int cpu = smp_processor_id();
+	if (flags & (1 << EFLAGS_IF_SHIFT)) {
+		int cpu;
 		__cli();
+		cpu = smp_processor_id();
 		if (!local_irq_count(cpu))
 			get_irqlock(cpu);
 	}
@@ -754,11 +787,14 @@
 
 void __global_sti(void)
 {
-	int cpu = smp_processor_id();
+	int cpu;
 
+	preempt_disable();
+	cpu = smp_processor_id();
 	if (!local_irq_count(cpu))
 		release_irqlock(cpu);
 	__sti();
+	preempt_enable();
 }
 
 /*
@@ -773,19 +809,23 @@
 	int retval;
 	int local_enabled;
 	unsigned long flags;
+	int cpu;
 
 	__save_flags(flags);
-	local_enabled = (flags >> 15) & 1;
+	local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
 	/* default to local */
 	retval = 2 + local_enabled;
 
 	/* check for global flags if we're not in an interrupt */
-	if (!local_irq_count(smp_processor_id())) {
+	preempt_disable();
+	cpu = smp_processor_id();
+	if (!local_irq_count(cpu)) {
 		if (local_enabled)
 			retval = 1;
-		if (global_irq_holder == (unsigned char) smp_processor_id())
+		if (global_irq_holder == cpu)
 			retval = 0;
 	}
+	preempt_enable();
 	return retval;
 }
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/kernel/mk_defs.c linux-2.4.26-leo/arch/ppc/kernel/mk_defs.c
--- linux-2.4.26/arch/ppc/kernel/mk_defs.c	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/ppc/kernel/mk_defs.c	2004-05-26 23:34:34.000000000 +0100
@@ -39,6 +39,9 @@
 	DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending));
 	DEFINE(THREAD, offsetof(struct task_struct, thread));
 	DEFINE(MM, offsetof(struct task_struct, mm));
+#ifdef CONFIG_PREEMPT
+	DEFINE(PREEMPT_COUNT, offsetof(struct task_struct, preempt_count));
+#endif
 	DEFINE(ACTIVE_MM, offsetof(struct task_struct, active_mm));
 	DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/kernel/open_pic.c linux-2.4.26-leo/arch/ppc/kernel/open_pic.c
--- linux-2.4.26/arch/ppc/kernel/open_pic.c	2004-02-20 14:11:39.000000000 +0000
+++ linux-2.4.26-leo/arch/ppc/kernel/open_pic.c	2004-05-26 23:34:34.000000000 +0100
@@ -601,19 +601,24 @@
 void __init do_openpic_setup_cpu(void)
 {
  	int i;
-	u32 msk = 1 << smp_hw_index[smp_processor_id()];
+#ifdef CONFIG_IRQ_ALL_CPUS
+	u32 msk;
+#endif /* CONFIG_IRQ_ALL_CPUS */
 
 	spin_lock(&openpic_setup_lock);
 
 #ifdef CONFIG_IRQ_ALL_CPUS
+	msk = 1 << smp_hw_index[smp_processor_id()];
+
  	/* let the openpic know we want intrs. default affinity
  	 * is 0xffffffff until changed via /proc
  	 * That's how it's done on x86. If we want it differently, then
  	 * we should make sure we also change the default values of irq_affinity
  	 * in irq.c.
  	 */
- 	for (i = 0; i < NumSources; i++)
+ 	for (i = 0; i < NumSources; i++) {
 		openpic_mapirq(i, msk, ~0U);
+	}
 #endif /* CONFIG_IRQ_ALL_CPUS */
  	openpic_set_priority(0);
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/kernel/ptrace.c linux-2.4.26-leo/arch/ppc/kernel/ptrace.c
--- linux-2.4.26/arch/ppc/kernel/ptrace.c	2003-08-25 12:44:40.000000000 +0100
+++ linux-2.4.26-leo/arch/ppc/kernel/ptrace.c	2004-06-22 20:53:46.000000000 +0100
@@ -24,6 +24,7 @@
 #include <linux/errno.h>
 #include <linux/ptrace.h>
 #include <linux/user.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -195,6 +196,9 @@
 	if (pid == 1)		/* you may not mess with init */
 		goto out_tsk;
 
+	if (gr_handle_ptrace(child, request))
+		goto out_tsk;
+
 	if (request == PTRACE_ATTACH) {
 		ret = ptrace_attach(child);
 		goto out_tsk;
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/kernel/setup.c linux-2.4.26-leo/arch/ppc/kernel/setup.c
--- linux-2.4.26/arch/ppc/kernel/setup.c	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/arch/ppc/kernel/setup.c	2004-05-26 23:34:34.000000000 +0100
@@ -502,6 +502,20 @@
 	strcpy(cmd_line, CONFIG_CMDLINE);
 #endif /* CONFIG_CMDLINE */
 
+#ifdef CONFIG_PREEMPT
+	/* Override the irq routines for external & timer interrupts here,
+	 * as the MMU has only been minimally setup at this point and
+	 * there are no protections on page zero.
+	 */
+	{
+		extern int preempt_intercept(struct pt_regs *);
+	
+		do_IRQ_intercept = (unsigned long) &preempt_intercept;
+		timer_interrupt_intercept = (unsigned long) &preempt_intercept;
+
+	}
+#endif /* CONFIG_PREEMPT */
+
 	platform_init(r3, r4, r5, r6, r7);
 
 	if (ppc_md.progress)
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/kernel/syscalls.c linux-2.4.26-leo/arch/ppc/kernel/syscalls.c
--- linux-2.4.26/arch/ppc/kernel/syscalls.c	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/ppc/kernel/syscalls.c	2004-06-22 20:53:46.000000000 +0100
@@ -35,6 +35,7 @@
 #include <linux/ipc.h>
 #include <linux/utsname.h>
 #include <linux/file.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/ipc.h>
@@ -191,18 +192,29 @@
 	struct file * file = NULL;
 	int ret = -EBADF;
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (flags & MAP_MIRROR)
+		return -EINVAL;
+#endif
+
 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 	if (!(flags & MAP_ANONYMOUS)) {
 		if (!(file = fget(fd)))
 			goto out;
 	}
 
+	if (gr_handle_mmap(file, prot)) {
+		fput(file);
+		ret = -EACCES;
+		goto out;
+	}
+
 	ret = -EINVAL;
 	if ((! allow_mmap_address(addr)) && (flags & MAP_FIXED))
 		goto out;
 	
 	down_write(&current->mm->mmap_sem);
-	ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+	ret = do_mmap(file, addr, len, prot, flags, pgoff << PAGE_SHIFT);
 	up_write(&current->mm->mmap_sem);
 	if (file)
 		fput(file);
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/kernel/temp.c linux-2.4.26-leo/arch/ppc/kernel/temp.c
--- linux-2.4.26/arch/ppc/kernel/temp.c	2003-08-25 12:44:40.000000000 +0100
+++ linux-2.4.26-leo/arch/ppc/kernel/temp.c	2004-05-26 23:34:34.000000000 +0100
@@ -138,7 +138,7 @@
 
 static void tau_timeout(void * info)
 {
-	unsigned long cpu = smp_processor_id();
+	unsigned long cpu;
 	unsigned long flags;
 	int size;
 	int shrink;
@@ -146,6 +146,8 @@
 	/* disabling interrupts *should* be okay */
 	save_flags(flags); cli();
 
+	cpu = smp_processor_id();
+
 #ifndef CONFIG_TAU_INT
 	TAUupdate(cpu);
 #endif
@@ -191,13 +193,15 @@
 
 static void tau_timeout_smp(unsigned long unused)
 {
-
 	/* schedule ourselves to be run again */
 	mod_timer(&tau_timer, jiffies + shrink_timer) ;
+
+	preempt_disable();
 #ifdef CONFIG_SMP
 	smp_call_function(tau_timeout, NULL, 1, 0);
 #endif
 	tau_timeout(NULL);
+	preempt_enable();
 }
 
 /*
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/lib/dec_and_lock.c linux-2.4.26-leo/arch/ppc/lib/dec_and_lock.c
--- linux-2.4.26/arch/ppc/lib/dec_and_lock.c	2001-11-16 18:10:08.000000000 +0000
+++ linux-2.4.26-leo/arch/ppc/lib/dec_and_lock.c	2004-05-26 23:34:34.000000000 +0100
@@ -1,4 +1,5 @@
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <asm/atomic.h>
 #include <asm/system.h>
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/mm/fault.c linux-2.4.26-leo/arch/ppc/mm/fault.c
--- linux-2.4.26/arch/ppc/mm/fault.c	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/ppc/mm/fault.c	2004-06-22 20:53:46.000000000 +0100
@@ -26,6 +26,9 @@
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -52,6 +55,360 @@
 void bad_page_fault(struct pt_regs *, unsigned long, int sig);
 void do_page_fault(struct pt_regs *, unsigned long, unsigned long);
 
+#ifdef CONFIG_GRKERNSEC_PAX_EMUSIGRT
+void pax_syscall_close(struct vm_area_struct * vma)
+{
+	vma->vm_mm->call_syscall = 0UL;
+}
+
+static struct page* pax_syscall_nopage(struct vm_area_struct *vma, unsigned long address, int write_access)
+{
+	struct page* page;
+	unsigned int *kaddr;
+
+	page = alloc_page(GFP_HIGHUSER);
+	if (!page)
+		return page;
+
+	kaddr = kmap(page);
+	memset(kaddr, 0, PAGE_SIZE);
+	kaddr[0] = 0x44000002U; /* sc */
+	__flush_dcache_icache(kaddr);
+	kunmap(page);
+	return page;
+}
+
+static struct vm_operations_struct pax_vm_ops = {
+	close:		pax_syscall_close,
+	nopage:		pax_syscall_nopage,
+};
+
+static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
+{
+	vma->vm_mm = current->mm;
+	vma->vm_start = addr;
+	vma->vm_end = addr + PAGE_SIZE;
+	vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
+	vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
+	vma->vm_ops = &pax_vm_ops;
+	vma->vm_pgoff = 0UL;
+	vma->vm_file = NULL;
+	vma->vm_private_data = NULL;
+	insert_vm_struct(current->mm, vma);
+	++current->mm->total_vm;
+}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+/*
+ * PaX: decide what to do with offenders (regs->nip = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when patched GOT trampoline was detected
+ *         3 when patched PLT trampoline was detected
+ *         4 when unpatched PLT trampoline was detected
+ *         5 when legitimate ET_EXEC was detected
+ *         6 when sigreturn trampoline was detected
+ *         7 when rt_sigreturn trampoline was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+	int err;
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (current->flags & PF_PAX_RANDEXEC) {
+		if (regs->nip >= current->mm->start_code &&
+		    regs->nip < current->mm->end_code)
+		{
+			if (regs->link == regs->nip)
+				return 1;
+
+			regs->nip += current->mm->delta_exec;
+			return 5;
+		}
+	}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT
+	do { /* PaX: patched GOT emulation */
+		unsigned int blrl;
+
+		err = get_user(blrl, (unsigned int*)regs->nip);
+
+		if (!err && blrl == 0x4E800021U) {
+			unsigned long temp = regs->nip;
+
+			regs->nip = regs->link & 0xFFFFFFFCUL;
+			regs->link = temp + 4UL;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #1 */
+		unsigned int b;
+
+		err = get_user(b, (unsigned int *)regs->nip);
+
+		if (!err && (b & 0xFC000003U) == 0x48000000U) {
+			regs->nip += (((b | 0xFC000000UL) ^ 0x02000000UL) + 0x02000000UL);
+			return 3;
+		}
+	} while (0);
+
+	do { /* PaX: unpatched PLT emulation #1 */
+		unsigned int li, b;
+
+		err = get_user(li, (unsigned int *)regs->nip);
+		err |= get_user(b, (unsigned int *)(regs->nip+4));
+
+		if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
+			unsigned int rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
+                        unsigned long addr = b | 0xFC000000UL;
+
+                        addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
+			err = get_user(rlwinm, (unsigned int*)addr);
+			err |= get_user(add, (unsigned int*)(addr+4));
+			err |= get_user(li2, (unsigned int*)(addr+8));
+			err |= get_user(addis2, (unsigned int*)(addr+12));
+			err |= get_user(mtctr, (unsigned int*)(addr+16));
+			err |= get_user(li3, (unsigned int*)(addr+20));
+			err |= get_user(addis3, (unsigned int*)(addr+24));
+			err |= get_user(bctr, (unsigned int*)(addr+28));
+
+			if (err)
+				break;
+
+			if (rlwinm == 0x556C083CU &&
+			    add == 0x7D6C5A14U &&
+			    (li2 & 0xFFFF0000U) == 0x39800000U &&
+			    (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
+			    mtctr == 0x7D8903A6U &&
+			    (li3 & 0xFFFF0000U) == 0x39800000U &&
+			    (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
+			    bctr == 0x4E800420U)
+			{
+				regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
+				regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
+				regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
+				regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
+				regs->ctr += (addis2 & 0xFFFFU) << 16;
+				regs->nip = regs->ctr;
+				return 4;
+			}
+		}
+	} while (0);
+
+#if 0
+	do { /* PaX: unpatched PLT emulation #2 */
+		unsigned int lis, lwzu, b, bctr;
+
+		err = get_user(lis, (unsigned int *)regs->nip);
+		err |= get_user(lwzu, (unsigned int *)(regs->nip+4));
+		err |= get_user(b, (unsigned int *)(regs->nip+8));
+		err |= get_user(bctr, (unsigned int *)(regs->nip+12));
+
+		if (err)
+			break;
+
+		if ((lis & 0xFFFF0000U) == 0x39600000U &&
+		    (lwzu & 0xU) == 0xU &&
+		    (b & 0xFC000003U) == 0x48000000U &&
+		    bctr == 0x4E800420U)
+		{
+			unsigned int addis, addi, rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
+                        unsigned long addr = b | 0xFC000000UL;
+
+                        addr = regs->nip + 12 + ((addr ^ 0x02000000UL) + 0x02000000UL);
+			err = get_user(addis, (unsigned int*)addr);
+			err |= get_user(addi, (unsigned int*)(addr+4));
+			err |= get_user(rlwinm, (unsigned int*)(addr+8));
+			err |= get_user(add, (unsigned int*)(addr+12));
+			err |= get_user(li2, (unsigned int*)(addr+16));
+			err |= get_user(addis2, (unsigned int*)(addr+20));
+			err |= get_user(mtctr, (unsigned int*)(addr+24));
+			err |= get_user(li3, (unsigned int*)(addr+28));
+			err |= get_user(addis3, (unsigned int*)(addr+32));
+			err |= get_user(bctr, (unsigned int*)(addr+36));
+
+			if (err)
+				break;
+
+			if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
+			    (addi & 0xFFFF0000U) == 0x396B0000U &&
+			    rlwinm == 0x556C083CU &&
+			    add == 0x7D6C5A14U &&
+			    (li2 & 0xFFFF0000U) == 0x39800000U &&
+			    (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
+			    mtctr == 0x7D8903A6U &&
+			    (li3 & 0xFFFF0000U) == 0x39800000U &&
+			    (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
+			    bctr == 0x4E800420U)
+			{
+				regs->gpr[PT_R11] = 
+				regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
+				regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
+				regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
+				regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
+				regs->ctr += (addis2 & 0xFFFFU) << 16;
+				regs->nip = regs->ctr;
+				return 4;
+			}
+		}
+	} while (0);
+#endif
+
+	do { /* PaX: unpatched PLT emulation #3 */
+		unsigned int li, b;
+
+		err = get_user(li, (unsigned int *)regs->nip);
+		err |= get_user(b, (unsigned int *)(regs->nip+4));
+
+		if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
+			unsigned int addis, lwz, mtctr, bctr;
+			unsigned long addr = b | 0xFC000000UL;
+
+			addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
+			err = get_user(addis, (unsigned int*)addr);
+			err |= get_user(lwz, (unsigned int*)(addr+4));
+			err |= get_user(mtctr, (unsigned int*)(addr+8));
+			err |= get_user(bctr, (unsigned int*)(addr+12));
+
+			if (err)
+				break;
+
+			if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
+			    (lwz & 0xFFFF0000U) == 0x816B0000U &&
+			    mtctr == 0x7D6903A6U &&
+			    bctr == 0x4E800420U)
+			{
+				unsigned int r11;
+
+				addr = (addis << 16) + (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
+				addr += (((lwz | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
+
+				err = get_user(r11, (unsigned int*)addr);
+				if (err)
+					break;
+
+				regs->gpr[PT_R11] = r11;
+				regs->ctr = r11;
+				regs->nip = r11;
+				return 4;
+			}
+		}
+	} while (0);
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUSIGRT
+	do { /* PaX: sigreturn emulation */
+		unsigned int li, sc;
+
+		err = get_user(li, (unsigned int *)regs->nip);
+		err |= get_user(sc, (unsigned int *)(regs->nip+4));
+
+		if (!err && li == 0x38007777U && sc == 0x44000002U) {
+			struct vm_area_struct *vma;
+			unsigned long call_syscall;
+
+			down_read(&current->mm->mmap_sem);
+			call_syscall = current->mm->call_syscall;
+			up_read(&current->mm->mmap_sem);
+			if (likely(call_syscall))
+				goto emulate;
+
+			vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+
+			down_write(&current->mm->mmap_sem);
+			if (current->mm->call_syscall) {
+				call_syscall = current->mm->call_syscall;
+				up_write(&current->mm->mmap_sem);
+				if (vma) kmem_cache_free(vm_area_cachep, vma);
+				goto emulate;
+			}
+
+			call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
+			if (!vma || (call_syscall & ~PAGE_MASK)) {
+				up_write(&current->mm->mmap_sem);
+				if (vma) kmem_cache_free(vm_area_cachep, vma);
+				return 1;
+			}
+
+			pax_insert_vma(vma, call_syscall);
+			current->mm->call_syscall = call_syscall;
+			up_write(&current->mm->mmap_sem);
+
+emulate:
+			regs->gpr[PT_R0] = 0x7777UL;
+			regs->nip = call_syscall;
+			return 6;
+		}
+	} while (0);
+
+	do { /* PaX: rt_sigreturn emulation */
+		unsigned int li, sc;
+
+		err = get_user(li, (unsigned int *)regs->nip);
+		err |= get_user(sc, (unsigned int *)(regs->nip+4));
+
+		if (!err && li == 0x38006666U && sc == 0x44000002U) {
+			struct vm_area_struct *vma;
+			unsigned int call_syscall;
+
+			down_read(&current->mm->mmap_sem);
+			call_syscall = current->mm->call_syscall;
+			up_read(&current->mm->mmap_sem);
+			if (likely(call_syscall))
+				goto rt_emulate;
+
+			vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+
+			down_write(&current->mm->mmap_sem);
+			if (current->mm->call_syscall) {
+				call_syscall = current->mm->call_syscall;
+				up_write(&current->mm->mmap_sem);
+				if (vma) kmem_cache_free(vm_area_cachep, vma);
+				goto rt_emulate;
+			}
+
+			call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
+			if (!vma || (call_syscall & ~PAGE_MASK)) {
+				up_write(&current->mm->mmap_sem);
+				if (vma) kmem_cache_free(vm_area_cachep, vma);
+				return 1;
+			}
+
+			pax_insert_vma(vma, call_syscall);
+			current->mm->call_syscall = call_syscall;
+			up_write(&current->mm->mmap_sem);
+
+rt_emulate:
+			regs->gpr[PT_R0] = 0x6666UL;
+			regs->nip = call_syscall;
+			return 7;
+		}
+	} while (0);
+#endif
+
+        return 1;
+}
+
+void pax_report_insns(void *pc)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 5; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int*)pc+i)) {
+			printk("<invalid address>.");
+			break;
+		}
+		printk("%08x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 /*
  * Check whether the instruction at regs->nip is a store using
  * an update addressing form which will update r1.
@@ -112,7 +469,7 @@
 	 * indicate errors in DSISR but can validly be set in SRR1.
 	 */
 	if (regs->trap == 0x400)
-		error_code &= 0x48200000;
+		error_code &= 0x58200000;
 	else
 		is_write = error_code & 0x02000000;
 #endif /* CONFIG_4xx || CONFIG_BOOKE */
@@ -245,6 +602,38 @@
 
 	/* User mode accesses cause a SIGSEGV */
 	if (user_mode(regs)) {
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+		if (current->flags & PF_PAX_PAGEEXEC) {
+			if ((regs->trap == 0x400) && (regs->nip == address)) {
+				switch (pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT
+				case 2:
+				case 3:
+				case 4:
+					return;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+				case 5:
+					return;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUSIGRT
+				case 6:
+				case 7:
+					return;
+#endif
+
+				}
+
+				pax_report_fault(regs, (void*)regs->nip, (void*)regs->gpr[1]);
+				do_exit(SIGKILL);
+			}
+		}
+#endif
+
 		info.si_signo = SIGSEGV;
 		info.si_errno = 0;
 		info.si_code = code;
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/mm/init.c linux-2.4.26-leo/arch/ppc/mm/init.c
--- linux-2.4.26/arch/ppc/mm/init.c	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/ppc/mm/init.c	2004-05-26 23:34:34.000000000 +0100
@@ -126,6 +126,9 @@
 int do_check_pgt_cache(int low, int high)
 {
 	int freed = 0;
+
+	preempt_disable();
+
 	if (pgtable_cache_size > high) {
 		do {
                         if (pgd_quicklist) {
@@ -138,6 +141,9 @@
 			}
 		} while (pgtable_cache_size > low);
 	}
+
+	preempt_enable();
+
 	return freed;
 }
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc/mm/tlb.c linux-2.4.26-leo/arch/ppc/mm/tlb.c
--- linux-2.4.26/arch/ppc/mm/tlb.c	2003-08-25 12:44:40.000000000 +0100
+++ linux-2.4.26-leo/arch/ppc/mm/tlb.c	2004-05-26 23:34:34.000000000 +0100
@@ -58,11 +58,14 @@
 	 * we can and should dispense with flush_tlb_all().
 	 *  -- paulus.
 	 */
+
+	preempt_disable();
 	local_flush_tlb_range(&init_mm, TASK_SIZE, ~0UL);
 
 #ifdef CONFIG_SMP
 	smp_send_tlb_invalidate(0);
 #endif /* CONFIG_SMP */
+	preempt_enable();
 }
 
 /*
@@ -73,8 +76,10 @@
 void
 local_flush_tlb_mm(struct mm_struct *mm)
 {
+	preempt_disable();
 	if (Hash == 0) {
 		_tlbia();
+		preempt_enable();
 		return;
 	}
 
@@ -88,6 +93,7 @@
 #ifdef CONFIG_SMP
 	smp_send_tlb_invalidate(0);
 #endif
+	preempt_enable();
 }
 
 void
@@ -97,8 +103,10 @@
 	pmd_t *pmd;
 	pte_t *pte;
 
+	preempt_disable();
 	if (Hash == 0) {
 		_tlbie(vmaddr);
+		preempt_enable();
 		return;
 	}
 	mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
@@ -111,6 +119,7 @@
 #ifdef CONFIG_SMP
 	smp_send_tlb_invalidate(0);
 #endif
+	preempt_enable();
 }
 
 
@@ -127,13 +136,17 @@
 	unsigned long pmd_end;
 	unsigned int ctx = mm->context;
 
+	preempt_disable();
 	if (Hash == 0) {
 		_tlbia();
+		preempt_enable();
 		return;
 	}
 	start &= PAGE_MASK;
-	if (start >= end)
+	if (start >= end) {
+		preempt_enable();
 		return;
+	}
 	pmd = pmd_offset(pgd_offset(mm, start), start);
 	do {
 		pmd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
@@ -156,4 +169,5 @@
 #ifdef CONFIG_SMP
 	smp_send_tlb_invalidate(0);
 #endif
+	preempt_enable();
 }
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/ppc64/kernel/ioctl32.c linux-2.4.26-leo/arch/ppc64/kernel/ioctl32.c
--- linux-2.4.26/arch/ppc64/kernel/ioctl32.c	2004-02-20 14:11:39.000000000 +0000
+++ linux-2.4.26-leo/arch/ppc64/kernel/ioctl32.c	2004-06-22 20:53:46.000000000 +0100
@@ -66,6 +66,7 @@
 #if defined(CONFIG_BLK_DEV_LVM) || defined(CONFIG_BLK_DEV_LVM_MODULE)
 #include <linux/lvm.h>
 #endif /* LVM */
+#include <linux/dm-ioctl.h>
 
 #include <scsi/scsi.h>
 /* Ugly hack. */
@@ -1824,7 +1825,11 @@
 	 * To have permissions to do most of the vt ioctls, we either have
 	 * to be the owner of the tty, or super-user.
 	 */
+#ifdef CONFIG_GRKERNSEC
+	if (current->tty == tty || capable(CAP_SYS_TTY_CONFIG))
+#else
 	if (current->tty == tty || suser())
+#endif
 		return 1;
 	return 0;                                                    
 }
@@ -4408,6 +4413,22 @@
 COMPATIBLE_IOCTL(NBD_PRINT_DEBUG),
 COMPATIBLE_IOCTL(NBD_SET_SIZE_BLOCKS),
 COMPATIBLE_IOCTL(NBD_DISCONNECT),
+/* device-mapper */
+#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
+COMPATIBLE_IOCTL(DM_VERSION),
+COMPATIBLE_IOCTL(DM_REMOVE_ALL),
+COMPATIBLE_IOCTL(DM_DEV_CREATE),
+COMPATIBLE_IOCTL(DM_DEV_REMOVE),
+COMPATIBLE_IOCTL(DM_TABLE_LOAD),
+COMPATIBLE_IOCTL(DM_DEV_SUSPEND),
+COMPATIBLE_IOCTL(DM_DEV_RENAME),
+COMPATIBLE_IOCTL(DM_TABLE_DEPS),
+COMPATIBLE_IOCTL(DM_DEV_STATUS),
+COMPATIBLE_IOCTL(DM_TABLE_STATUS),
+COMPATIBLE_IOCTL(DM_DEV_WAIT),
+COMPATIBLE_IOCTL(DM_LIST_DEVICES),
+COMPATIBLE_IOCTL(DM_TABLE_CLEAR),
+#endif /* CONFIG_BLK_DEV_DM */
 /* Remove *PRIVATE in 2.5 */
 COMPATIBLE_IOCTL(SIOCDEVPRIVATE),
 COMPATIBLE_IOCTL(SIOCDEVPRIVATE+1),
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/s390/config.in linux-2.4.26-leo/arch/s390/config.in
--- linux-2.4.26/arch/s390/config.in	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/s390/config.in	2004-06-22 20:53:46.000000000 +0100
@@ -87,3 +87,11 @@
 
 source crypto/Config.in
 source lib/Config.in
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+	source grsecurity/Config.in
+fi
+endmenu
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/s390x/config.in linux-2.4.26-leo/arch/s390x/config.in
--- linux-2.4.26/arch/s390x/config.in	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/s390x/config.in	2004-06-22 20:53:46.000000000 +0100
@@ -91,3 +91,11 @@
 
 source crypto/Config.in
 source lib/Config.in
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+	source grsecurity/Config.in
+fi
+endmenu
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/s390x/kernel/ioctl32.c linux-2.4.26-leo/arch/s390x/kernel/ioctl32.c
--- linux-2.4.26/arch/s390x/kernel/ioctl32.c	2003-08-25 12:44:40.000000000 +0100
+++ linux-2.4.26-leo/arch/s390x/kernel/ioctl32.c	2004-05-26 23:34:18.000000000 +0100
@@ -30,6 +30,7 @@
 #include <linux/blk.h>
 #include <linux/elevator.h>
 #include <linux/raw.h>
+#include <linux/dm-ioctl.h>
 #include <asm/types.h>
 #include <asm/uaccess.h>
 #include <asm/dasd.h>
@@ -627,6 +628,20 @@
 
 	IOCTL32_DEFAULT(SIOCGSTAMP),
 
+	IOCTL32_DEFAULT(DM_VERSION),
+	IOCTL32_DEFAULT(DM_REMOVE_ALL),
+	IOCTL32_DEFAULT(DM_DEV_CREATE),
+	IOCTL32_DEFAULT(DM_DEV_REMOVE),
+	IOCTL32_DEFAULT(DM_TABLE_LOAD),
+	IOCTL32_DEFAULT(DM_DEV_SUSPEND),
+	IOCTL32_DEFAULT(DM_DEV_RENAME),
+	IOCTL32_DEFAULT(DM_TABLE_DEPS),
+	IOCTL32_DEFAULT(DM_DEV_STATUS),
+	IOCTL32_DEFAULT(DM_TABLE_STATUS),
+	IOCTL32_DEFAULT(DM_DEV_WAIT),
+	IOCTL32_DEFAULT(DM_LIST_DEVICES),
+	IOCTL32_DEFAULT(DM_TABLE_CLEAR),
+
 	IOCTL32_DEFAULT(LOOP_SET_FD),
 	IOCTL32_DEFAULT(LOOP_CLR_FD),
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sh/config.in linux-2.4.26-leo/arch/sh/config.in
--- linux-2.4.26/arch/sh/config.in	2004-02-20 14:11:39.000000000 +0000
+++ linux-2.4.26-leo/arch/sh/config.in	2004-06-22 20:53:46.000000000 +0100
@@ -493,3 +493,11 @@
 
 source crypto/Config.in
 source lib/Config.in
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+	source grsecurity/Config.in
+fi
+endmenu
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc/boot/Makefile linux-2.4.26-leo/arch/sparc/boot/Makefile
--- linux-2.4.26/arch/sparc/boot/Makefile	2002-08-03 01:39:43.000000000 +0100
+++ linux-2.4.26-leo/arch/sparc/boot/Makefile	2004-06-22 20:53:46.000000000 +0100
@@ -24,7 +24,7 @@
 
 BTOBJS := $(HEAD) init/main.o init/version.o init/do_mounts.o
 BTLIBS := $(CORE_FILES_NO_BTFIX) $(FILESYSTEMS) \
-	$(DRIVERS) $(NETWORKS)
+	$(DRIVERS) $(NETWORKS) $(GRSECURITY)
 
 # I wanted to make this depend upon BTOBJS so that a parallel
 # build would work, but this fails because $(HEAD) cannot work
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc/config.in linux-2.4.26-leo/arch/sparc/config.in
--- linux-2.4.26/arch/sparc/config.in	2004-02-20 14:11:40.000000000 +0000
+++ linux-2.4.26-leo/arch/sparc/config.in	2004-06-22 20:53:46.000000000 +0100
@@ -282,3 +282,11 @@
 
 source crypto/Config.in
 source lib/Config.in
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+	source grsecurity/Config.in
+fi
+endmenu
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc/kernel/ptrace.c linux-2.4.26-leo/arch/sparc/kernel/ptrace.c
--- linux-2.4.26/arch/sparc/kernel/ptrace.c	2002-08-03 01:39:43.000000000 +0100
+++ linux-2.4.26-leo/arch/sparc/kernel/ptrace.c	2004-06-22 20:53:46.000000000 +0100
@@ -17,6 +17,7 @@
 #include <linux/user.h>
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
+#include <linux/grsecurity.h>
 
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -310,6 +311,9 @@
 		goto out;
 	}
 
+	if(gr_handle_ptrace(child, request))
+		goto out_tsk;
+
 	if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
 	    || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
 		if (ptrace_attach(child)) {
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc/kernel/sys_sparc.c linux-2.4.26-leo/arch/sparc/kernel/sys_sparc.c
--- linux-2.4.26/arch/sparc/kernel/sys_sparc.c	2003-08-25 12:44:40.000000000 +0100
+++ linux-2.4.26-leo/arch/sparc/kernel/sys_sparc.c	2004-06-22 20:53:46.000000000 +0100
@@ -20,6 +20,7 @@
 #include <linux/utsname.h>
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/ipc.h>
@@ -54,6 +55,13 @@
 		return -ENOMEM;
 	if (ARCH_SUN4C_SUN4 && len > 0x20000000)
 		return -ENOMEM;
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP
+	if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
+		addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
+	else
+#endif
+
 	if (!addr)
 		addr = TASK_UNMAPPED_BASE;
 
@@ -225,6 +233,11 @@
 	struct file * file = NULL;
 	unsigned long retval = -EBADF;
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (flags & MAP_MIRROR)
+		return -EINVAL;
+#endif
+
 	if (!(flags & MAP_ANONYMOUS)) {
 		file = fget(fd);
 		if (!file)
@@ -243,6 +256,12 @@
 	if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
 		goto out_putf;
 
+	if (gr_handle_mmap(file, prot)) {
+		fput(file);
+		retval = -EACCES;
+		goto out;
+	}
+
 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
 	down_write(&current->mm->mmap_sem);
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc/kernel/sys_sunos.c linux-2.4.26-leo/arch/sparc/kernel/sys_sunos.c
--- linux-2.4.26/arch/sparc/kernel/sys_sunos.c	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/arch/sparc/kernel/sys_sunos.c	2004-06-22 20:53:46.000000000 +0100
@@ -68,6 +68,11 @@
 	struct file * file = NULL;
 	unsigned long retval, ret_type;
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (flags & MAP_MIRROR)
+		return -EINVAL;
+#endif
+
 	if(flags & MAP_NORESERVE) {
 		static int cnt;
 		if (cnt++ < 10)
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc/mm/fault.c linux-2.4.26-leo/arch/sparc/mm/fault.c
--- linux-2.4.26/arch/sparc/mm/fault.c	2003-06-13 15:51:32.000000000 +0100
+++ linux-2.4.26-leo/arch/sparc/mm/fault.c	2004-06-22 20:53:46.000000000 +0100
@@ -19,6 +19,9 @@
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
 #include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
 
 #include <asm/system.h>
 #include <asm/segment.h>
@@ -200,6 +203,264 @@
 	return 0;
 }
 
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+void pax_emuplt_close(struct vm_area_struct * vma)
+{
+	vma->vm_mm->call_dl_resolve = 0UL;
+}
+
+static struct page* pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int write_access)
+{
+	struct page* page;
+	unsigned int *kaddr;
+
+	page = alloc_page(GFP_HIGHUSER);
+	if (!page)
+		return page;
+
+	kaddr = kmap(page);
+	memset(kaddr, 0, PAGE_SIZE);
+	kaddr[0] = 0x9DE3BFA8U; /* save */
+	flush_dcache_page(page);
+	kunmap(page);
+	return page;
+}
+
+static struct vm_operations_struct pax_vm_ops = {
+	close:		pax_emuplt_close,
+	nopage:		pax_emuplt_nopage,
+};
+
+static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
+{
+	vma->vm_mm = current->mm;
+	vma->vm_start = addr;
+	vma->vm_end = addr + PAGE_SIZE;
+	vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
+	vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
+	vma->vm_ops = &pax_vm_ops;
+	vma->vm_pgoff = 0UL;
+	vma->vm_file = NULL;
+	vma->vm_private_data = NULL;
+	insert_vm_struct(current->mm, vma);
+	++current->mm->total_vm;
+}
+
+/*
+ * PaX: decide what to do with offenders (regs->pc = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when patched PLT trampoline was detected
+ *         3 when unpatched PLT trampoline was detected
+ *	   4 when legitimate ET_EXEC was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+	int err;
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (current->flags & PF_PAX_RANDEXEC) {
+		if (regs->pc >= current->mm->start_code &&
+		    regs->pc < current->mm->end_code)
+		{
+			if (regs->u_regs[UREG_RETPC] + 8UL == regs->pc)
+				return 1;
+
+			regs->pc += current->mm->delta_exec;
+			if (regs->npc >= current->mm->start_code &&
+			    regs->npc < current->mm->end_code)
+				regs->npc += current->mm->delta_exec;
+			return 4;
+		}
+		if (regs->pc >= current->mm->start_code + current->mm->delta_exec &&
+		    regs->pc < current->mm->end_code + current->mm->delta_exec)
+		{
+			regs->pc -= current->mm->delta_exec;
+			if (regs->npc >= current->mm->start_code + current->mm->delta_exec &&
+			    regs->npc < current->mm->end_code + current->mm->delta_exec)
+				regs->npc -= current->mm->delta_exec;
+		}
+	}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT
+	do { /* PaX: patched PLT emulation #1 */
+		unsigned int sethi1, sethi2, jmpl;
+
+		err = get_user(sethi1, (unsigned int *)regs->pc);
+		err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
+		err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
+
+		if (err)
+			break;
+
+		if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
+		    (sethi2 & 0xFFC00000U) == 0x03000000U &&
+		    (jmpl & 0xFFFFE000U) == 0x81C06000U)
+		{
+			unsigned int addr;
+
+			regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
+			addr = regs->u_regs[UREG_G1];
+			addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
+			regs->pc = addr;
+			regs->npc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	{ /* PaX: patched PLT emulation #2 */
+		unsigned int ba;
+
+		err = get_user(ba, (unsigned int *)regs->pc);
+
+		if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
+			unsigned int addr;
+
+			addr = regs->pc + 4 + (((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U);
+			regs->pc = addr;
+			regs->npc = addr+4;
+			return 2;
+		}
+	}
+
+	do { /* PaX: patched PLT emulation #3 */
+		unsigned int sethi, jmpl, nop;
+
+		err = get_user(sethi, (unsigned int*)regs->pc);
+		err |= get_user(jmpl, (unsigned int*)(regs->pc+4));
+		err |= get_user(nop, (unsigned int*)(regs->pc+8));
+
+		if (err)
+			break;
+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
+		    (jmpl & 0xFFFFE000U) == 0x81C06000U &&
+		    nop == 0x01000000U)
+		{
+			unsigned int addr;
+
+			addr = (sethi & 0x003FFFFFU) << 10;
+			regs->u_regs[UREG_G1] = addr;
+			addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
+			regs->pc = addr;
+			regs->npc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: unpatched PLT emulation step 1 */
+		unsigned int sethi, ba, nop;
+
+		err = get_user(sethi, (unsigned int *)regs->pc);
+		err |= get_user(ba, (unsigned int *)(regs->pc+4));
+		err |= get_user(nop, (unsigned int *)(regs->pc+8));
+
+		if (err)
+			break;
+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
+		    ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
+		    nop == 0x01000000U)
+		{
+			unsigned int addr, save, call;
+
+			if ((ba & 0xFFC00000U) == 0x30800000U)
+				addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
+			else
+				addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+
+			err = get_user(save, (unsigned int *)addr);
+			err |= get_user(call, (unsigned int *)(addr+4));
+			err |= get_user(nop, (unsigned int *)(addr+8)); 
+			if (err)
+				break;
+
+			if (save == 0x9DE3BFA8U &&
+			    (call & 0xC0000000U) == 0x40000000U &&
+			    nop == 0x01000000U)
+			{
+				struct vm_area_struct *vma;
+				unsigned long call_dl_resolve;
+
+				down_read(&current->mm->mmap_sem);
+				call_dl_resolve = current->mm->call_dl_resolve;
+				up_read(&current->mm->mmap_sem);
+				if (likely(call_dl_resolve))
+					goto emulate;
+
+				vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+
+				down_write(&current->mm->mmap_sem);
+				if (current->mm->call_dl_resolve) {
+					call_dl_resolve = current->mm->call_dl_resolve;
+					up_write(&current->mm->mmap_sem);
+					if (vma) kmem_cache_free(vm_area_cachep, vma);
+					goto emulate;
+				}
+
+				call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
+				if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
+					up_write(&current->mm->mmap_sem);
+					if (vma) kmem_cache_free(vm_area_cachep, vma);
+					return 1;
+				}
+
+				pax_insert_vma(vma, call_dl_resolve);
+				current->mm->call_dl_resolve = call_dl_resolve;
+				up_write(&current->mm->mmap_sem);
+
+emulate:
+				regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
+				regs->pc = call_dl_resolve;
+				regs->npc = addr+4;
+				return 3;
+			}
+		}
+	} while (0);
+
+	do { /* PaX: unpatched PLT emulation step 2 */
+		unsigned int save, call, nop;
+
+		err = get_user(save, (unsigned int*)(regs->pc-4));
+		err |= get_user(call, (unsigned int*)regs->pc);
+		err |= get_user(nop, (unsigned int*)(regs->pc+4));
+		if (err)
+			break;
+
+		if (save == 0x9DE3BFA8U &&
+		    (call & 0xC0000000U) == 0x40000000U &&
+		    nop == 0x01000000U)
+		{
+			unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
+
+			regs->u_regs[UREG_RETPC] = regs->pc;
+			regs->pc = dl_resolve;
+			regs->npc = dl_resolve+4;
+			return 3;
+		}
+	} while (0);
+
+#endif
+
+	return 1;
+}
+
+void pax_report_insns(void *pc)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 5; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int*)pc+i)) {
+			printk("<invalid address>.");
+			break;
+		}
+		printk("%08x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
 			       unsigned long address)
 {
@@ -263,6 +524,29 @@
 		if(!(vma->vm_flags & VM_WRITE))
 			goto bad_area;
 	} else {
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+		if ((current->flags & PF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
+			up_read(&mm->mmap_sem);
+			switch (pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT
+			case 2:
+			case 3:
+				return;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+			case 4:
+				return;
+#endif
+
+			}
+			pax_report_fault(regs, (void*)regs->pc, (void*)regs->u_regs[UREG_FP]);
+			do_exit(SIGKILL);
+		}
+#endif
+
 		/* Allow reads even for write-only mappings */
 		if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
 			goto bad_area;
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc/mm/init.c linux-2.4.26-leo/arch/sparc/mm/init.c
--- linux-2.4.26/arch/sparc/mm/init.c	2002-11-28 23:53:12.000000000 +0000
+++ linux-2.4.26-leo/arch/sparc/mm/init.c	2004-06-22 20:53:46.000000000 +0100
@@ -350,17 +350,17 @@
 
 	/* Initialize the protection map with non-constant, MMU dependent values. */
 	protection_map[0] = PAGE_NONE;
-	protection_map[1] = PAGE_READONLY;
-	protection_map[2] = PAGE_COPY;
-	protection_map[3] = PAGE_COPY;
+	protection_map[1] = PAGE_READONLY_NOEXEC;
+	protection_map[2] = PAGE_COPY_NOEXEC;
+	protection_map[3] = PAGE_COPY_NOEXEC;
 	protection_map[4] = PAGE_READONLY;
 	protection_map[5] = PAGE_READONLY;
 	protection_map[6] = PAGE_COPY;
 	protection_map[7] = PAGE_COPY;
 	protection_map[8] = PAGE_NONE;
-	protection_map[9] = PAGE_READONLY;
-	protection_map[10] = PAGE_SHARED;
-	protection_map[11] = PAGE_SHARED;
+	protection_map[9] = PAGE_READONLY_NOEXEC;
+	protection_map[10] = PAGE_SHARED_NOEXEC;
+	protection_map[11] = PAGE_SHARED_NOEXEC;
 	protection_map[12] = PAGE_READONLY;
 	protection_map[13] = PAGE_READONLY;
 	protection_map[14] = PAGE_SHARED;
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc/mm/srmmu.c linux-2.4.26-leo/arch/sparc/mm/srmmu.c
--- linux-2.4.26/arch/sparc/mm/srmmu.c	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/arch/sparc/mm/srmmu.c	2004-06-22 20:53:46.000000000 +0100
@@ -2047,6 +2047,13 @@
 	BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED));
 	BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
 	BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	BTFIXUPSET_INT(page_shared_noexec, pgprot_val(SRMMU_PAGE_SHARED_NOEXEC));
+	BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
+	BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
+#endif
+
 	BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
 	page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
 	pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc64/config.in linux-2.4.26-leo/arch/sparc64/config.in
--- linux-2.4.26/arch/sparc64/config.in	2004-02-20 14:11:40.000000000 +0000
+++ linux-2.4.26-leo/arch/sparc64/config.in	2004-06-22 20:53:46.000000000 +0100
@@ -318,3 +318,11 @@
 
 source crypto/Config.in
 source lib/Config.in
+
+mainmenu_option next_comment
+comment 'Grsecurity'
+bool 'Grsecurity' CONFIG_GRKERNSEC
+if [ "$CONFIG_GRKERNSEC" = "y" ]; then
+	source grsecurity/Config.in
+fi
+endmenu
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc64/kernel/ioctl32.c linux-2.4.26-leo/arch/sparc64/kernel/ioctl32.c
--- linux-2.4.26/arch/sparc64/kernel/ioctl32.c	2004-02-20 14:11:40.000000000 +0000
+++ linux-2.4.26-leo/arch/sparc64/kernel/ioctl32.c	2004-06-22 20:53:46.000000000 +0100
@@ -56,6 +56,7 @@
 #if defined(CONFIG_BLK_DEV_LVM) || defined(CONFIG_BLK_DEV_LVM_MODULE)
 #include <linux/lvm.h>
 #endif /* LVM */
+#include <linux/dm-ioctl.h>
 
 #include <scsi/scsi.h>
 /* Ugly hack. */
@@ -2046,7 +2047,11 @@
 	 * To have permissions to do most of the vt ioctls, we either have
 	 * to be the owner of the tty, or super-user.
 	 */
+#ifdef CONFIG_GRKERNSEC
+	if (current->tty == tty || capable(CAP_SYS_TTY_CONFIG))
+#else
 	if (current->tty == tty || suser())
+#endif
 		return 1;
 	return 0;                                                    
 }
@@ -5085,6 +5090,22 @@
 COMPATIBLE_IOCTL(NBD_PRINT_DEBUG)
 COMPATIBLE_IOCTL(NBD_SET_SIZE_BLOCKS)
 COMPATIBLE_IOCTL(NBD_DISCONNECT)
+/* device-mapper */
+#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
+COMPATIBLE_IOCTL(DM_VERSION)
+COMPATIBLE_IOCTL(DM_REMOVE_ALL)
+COMPATIBLE_IOCTL(DM_DEV_CREATE)
+COMPATIBLE_IOCTL(DM_DEV_REMOVE)
+COMPATIBLE_IOCTL(DM_TABLE_LOAD)
+COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
+COMPATIBLE_IOCTL(DM_DEV_RENAME)
+COMPATIBLE_IOCTL(DM_TABLE_DEPS)
+COMPATIBLE_IOCTL(DM_DEV_STATUS)
+COMPATIBLE_IOCTL(DM_TABLE_STATUS)
+COMPATIBLE_IOCTL(DM_DEV_WAIT)
+COMPATIBLE_IOCTL(DM_LIST_DEVICES)
+COMPATIBLE_IOCTL(DM_TABLE_CLEAR)
+#endif /* CONFIG_BLK_DEV_DM */
 /* Linux-1394 */
 #if defined(CONFIG_IEEE1394) || defined(CONFIG_IEEE1394_MODULE)
 COMPATIBLE_IOCTL(AMDTP_IOC_CHANNEL)
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc64/kernel/itlb_base.S linux-2.4.26-leo/arch/sparc64/kernel/itlb_base.S
--- linux-2.4.26/arch/sparc64/kernel/itlb_base.S	2003-06-13 15:51:32.000000000 +0100
+++ linux-2.4.26-leo/arch/sparc64/kernel/itlb_base.S	2004-06-22 20:53:46.000000000 +0100
@@ -41,7 +41,9 @@
 	CREATE_VPTE_OFFSET2(%g4, %g6)			! Create VPTE offset
 	ldxa		[%g3 + %g6] ASI_P, %g5		! Load VPTE
 1:	brgez,pn	%g5, 3f				! Not valid, branch out
-	 nop						! Delay-slot
+	and		%g5, _PAGE_EXEC, %g4
+	brz,pn		%g4, 3f				! Not executable, branch out
+	nop						! Delay-slot
 2:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN	! Load PTE into TLB
 	retry						! Trap return
 3:	rdpr		%pstate, %g4			! Move into alternate globals
@@ -74,8 +76,6 @@
 	nop
 	nop
 	nop
-	nop
-	nop
 	CREATE_VPTE_NOP
 
 #undef CREATE_VPTE_OFFSET1
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc64/kernel/ptrace.c linux-2.4.26-leo/arch/sparc64/kernel/ptrace.c
--- linux-2.4.26/arch/sparc64/kernel/ptrace.c	2002-11-28 23:53:12.000000000 +0000
+++ linux-2.4.26-leo/arch/sparc64/kernel/ptrace.c	2004-06-22 20:53:46.000000000 +0100
@@ -18,6 +18,7 @@
 #include <linux/user.h>
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
+#include <linux/grsecurity.h>
 
 #include <asm/asi.h>
 #include <asm/pgtable.h>
@@ -161,6 +162,11 @@
 		goto out;
 	}
 
+	if (gr_handle_ptrace(child, (long)request)) {
+		pt_error_return(regs, EPERM);
+		goto out_tsk;
+	}
+
 	if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
 	    || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
 		if (ptrace_attach(child)) {
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc64/kernel/sys_sparc32.c linux-2.4.26-leo/arch/sparc64/kernel/sys_sparc32.c
--- linux-2.4.26/arch/sparc64/kernel/sys_sparc32.c	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/arch/sparc64/kernel/sys_sparc32.c	2004-06-22 20:53:46.000000000 +0100
@@ -52,6 +52,8 @@
 #include <linux/sysctl.h>
 #include <linux/dnotify.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/random.h>
+#include <linux/grsecurity.h>
 
 #include <asm/types.h>
 #include <asm/ipc.h>
@@ -3232,8 +3234,18 @@
 	struct file * file;
 	int retval;
 	int i;
+#ifdef CONFIG_GRKERNSEC
+	struct file *old_exec_file;
+	struct acl_subject_label *old_acl;
+	struct rlimit old_rlim[RLIM_NLIMITS];
+#endif
 
 	bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDUSTACK
+	bprm.p -= (get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK;
+#endif
+
 	memset(bprm.page, 0, MAX_ARG_PAGES * sizeof(bprm.page[0]));
 
 	file = open_exec(filename);
@@ -3242,6 +3254,20 @@
 	if (IS_ERR(file))
 		return retval;
 
+	gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->user->processes), 1);
+
+	if (gr_handle_nproc()) {
+		allow_write_access(file);
+		fput(file);
+		return -EAGAIN;
+	}
+
+	if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
+		allow_write_access(file);
+		fput(file);
+		return -EACCES;
+	}
+
 	bprm.file = file;
 	bprm.filename = filename;
 	bprm.sh_bang = 0;
@@ -3262,11 +3288,24 @@
 	if (retval < 0)
 		goto out;
 	
+	if(!gr_tpe_allow(file)) {
+		retval = -EACCES;
+		goto out;
+	}
+
+	if (gr_check_crash_exec(file)) {
+		retval = -EACCES;
+		goto out;
+	}
+
 	retval = copy_strings_kernel(1, &bprm.filename, &bprm);
 	if (retval < 0)
 		goto out;
 
 	bprm.exec = bprm.p;
+
+	gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
+
 	retval = copy_strings32(bprm.envc, envp, &bprm);
 	if (retval < 0)
 		goto out;
@@ -3275,11 +3314,32 @@
 	if (retval < 0)
 		goto out;
 
+#ifdef CONFIG_GRKERNSEC
+	old_acl = current->acl;
+	memcpy(old_rlim, current->rlim, sizeof(old_rlim));
+	old_exec_file = current->exec_file;
+	get_file(file);
+	current->exec_file = file;
+#endif
+
+        gr_set_proc_label(file->f_dentry, file->f_vfsmnt);
+
 	retval = search_binary_handler(&bprm, regs);
-	if (retval >= 0)
+	if (retval >= 0) {
+#ifdef CONFIG_GRKERNSEC
+		if (old_exec_file)
+			fput(old_exec_file);
+#endif
 		/* execve success */
 		return retval;
+	}
 
+#ifdef CONFIG_GRKERNSEC
+	current->acl = old_acl;
+	memcpy(current->rlim, old_rlim, sizeof(old_rlim));
+	fput(current->exec_file);
+	current->exec_file = old_exec_file;
+#endif
 out:
 	/* Something went wrong, return the inode and free the argument pages*/
 	allow_write_access(bprm.file);
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc64/kernel/sys_sparc.c linux-2.4.26-leo/arch/sparc64/kernel/sys_sparc.c
--- linux-2.4.26/arch/sparc64/kernel/sys_sparc.c	2003-08-25 12:44:40.000000000 +0100
+++ linux-2.4.26-leo/arch/sparc64/kernel/sys_sparc.c	2004-06-22 20:53:46.000000000 +0100
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/ipc.h>
 #include <linux/personality.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/ipc.h>
@@ -63,6 +64,13 @@
 		task_size = 0xf0000000UL;
 	if (len > task_size || len > -PAGE_OFFSET)
 		return -ENOMEM;
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP
+	if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
+		addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
+	else
+#endif
+
 	if (!addr)
 		addr = TASK_UNMAPPED_BASE;
 
@@ -289,11 +297,22 @@
 	struct file * file = NULL;
 	unsigned long retval = -EBADF;
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (flags & MAP_MIRROR)
+		return -EINVAL;
+#endif
+
 	if (!(flags & MAP_ANONYMOUS)) {
 		file = fget(fd);
 		if (!file)
 			goto out;
 	}
+
+	if (gr_handle_mmap(file, prot)) {
+		retval = -EACCES;
+		goto out_putf;
+	}
+
 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 	len = PAGE_ALIGN(len);
 	retval = -EINVAL;
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc64/kernel/sys_sunos32.c linux-2.4.26-leo/arch/sparc64/kernel/sys_sunos32.c
--- linux-2.4.26/arch/sparc64/kernel/sys_sunos32.c	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/arch/sparc64/kernel/sys_sunos32.c	2004-06-22 20:53:46.000000000 +0100
@@ -68,6 +68,11 @@
 	struct file *file = NULL;
 	unsigned long retval, ret_type;
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (flags & MAP_MIRROR)
+		return -EINVAL;
+#endif
+
 	if(flags & MAP_NORESERVE) {
 		static int cnt;
 		if (cnt++ < 10)
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc64/mm/fault.c linux-2.4.26-leo/arch/sparc64/mm/fault.c
--- linux-2.4.26/arch/sparc64/mm/fault.c	2002-11-28 23:53:12.000000000 +0000
+++ linux-2.4.26-leo/arch/sparc64/mm/fault.c	2004-06-22 20:53:46.000000000 +0100
@@ -16,6 +16,9 @@
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -299,6 +302,363 @@
 	unhandled_fault (address, current, regs);
 }
 
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT
+static void pax_emuplt_close(struct vm_area_struct * vma)
+{
+	vma->vm_mm->call_dl_resolve = 0UL;
+}
+
+static struct page* pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int write_access)
+{
+	struct page* page;
+	unsigned int *kaddr;
+
+	page = alloc_page(GFP_HIGHUSER);
+	if (!page)
+		return page;
+
+	kaddr = kmap(page);
+	memset(kaddr, 0, PAGE_SIZE);
+	kaddr[0] = 0x9DE3BFA8U; /* save */
+	flush_dcache_page(page);
+	kunmap(page);
+	return page;
+}
+
+static struct vm_operations_struct pax_vm_ops = {
+	close:		pax_emuplt_close,
+	nopage:		pax_emuplt_nopage,
+};
+
+static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
+{
+	vma->vm_mm = current->mm;
+	vma->vm_start = addr;
+	vma->vm_end = addr + PAGE_SIZE;
+	vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
+	vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
+	vma->vm_ops = &pax_vm_ops;
+	vma->vm_pgoff = 0UL; 
+	vma->vm_file = NULL;
+	vma->vm_private_data = NULL;
+	insert_vm_struct(current->mm, vma);
+	++current->mm->total_vm;
+}
+#endif
+
+/*
+ * PaX: decide what to do with offenders (regs->tpc = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when patched PLT trampoline was detected
+ *         3 when unpatched PLT trampoline was detected
+ *	   4 when legitimate ET_EXEC was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT
+	int err;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (current->flags & PF_PAX_RANDEXEC) {
+		if (regs->tpc >= current->mm->start_code &&
+		    regs->tpc < current->mm->end_code)
+		{
+			if (regs->u_regs[UREG_RETPC] + 8UL == regs->tpc)
+				return 1;
+
+			regs->tpc += current->mm->delta_exec;
+			if (regs->tnpc >= current->mm->start_code &&
+			    regs->tnpc < current->mm->end_code)
+				regs->tnpc += current->mm->delta_exec;
+			return 4;
+		}
+		if (regs->tpc >= current->mm->start_code + current->mm->delta_exec &&
+		    regs->tpc < current->mm->end_code + current->mm->delta_exec)
+		{
+			regs->tpc -= current->mm->delta_exec;
+			if (regs->tnpc >= current->mm->start_code + current->mm->delta_exec &&
+			    regs->tnpc < current->mm->end_code + current->mm->delta_exec)
+				regs->tnpc -= current->mm->delta_exec;
+		}
+	}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT
+	do { /* PaX: patched PLT emulation #1 */
+		unsigned int sethi1, sethi2, jmpl;
+
+		err = get_user(sethi1, (unsigned int*)regs->tpc);
+		err |= get_user(sethi2, (unsigned int*)(regs->tpc+4));
+		err |= get_user(jmpl, (unsigned int*)(regs->tpc+8));
+
+		if (err)
+			break;
+
+		if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
+		    (sethi2 & 0xFFC00000U) == 0x03000000U &&
+		    (jmpl & 0xFFFFE000U) == 0x81C06000U)
+		{
+			unsigned long addr;
+
+			regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
+			addr = regs->u_regs[UREG_G1];
+			addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	{ /* PaX: patched PLT emulation #2 */
+		unsigned int ba;
+
+		err = get_user(ba, (unsigned int*)regs->tpc);
+
+		if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
+			unsigned long addr;
+
+			addr = regs->tpc + 4 + (((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL);
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	}
+
+	do { /* PaX: patched PLT emulation #3 */
+		unsigned int sethi, jmpl, nop;
+
+		err = get_user(sethi, (unsigned int*)regs->tpc);
+		err |= get_user(jmpl, (unsigned int*)(regs->tpc+4));
+		err |= get_user(nop, (unsigned int*)(regs->tpc+8));
+
+		if (err)
+			break;
+
+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
+		    (jmpl & 0xFFFFE000U) == 0x81C06000U &&
+		    nop == 0x01000000U)
+		{
+			unsigned long addr;
+
+			addr = (sethi & 0x003FFFFFU) << 10;
+			regs->u_regs[UREG_G1] = addr;
+			addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #4 */
+		unsigned int mov1, call, mov2;
+
+		err = get_user(mov1, (unsigned int*)regs->tpc);
+		err |= get_user(call, (unsigned int*)(regs->tpc+4));
+		err |= get_user(mov2, (unsigned int*)(regs->tpc+8));
+
+		if (err)
+			break;
+
+		if (mov1 == 0x8210000FU &&
+		    (call & 0xC0000000U) == 0x40000000U &&
+		    mov2 == 0x9E100001U)
+		{
+			unsigned long addr;
+
+			regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
+			addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #5 */
+		unsigned int sethi1, sethi2, or1, or2, sllx, jmpl, nop;
+
+		err = get_user(sethi1, (unsigned int*)regs->tpc);
+		err |= get_user(sethi2, (unsigned int*)(regs->tpc+4));
+		err |= get_user(or1, (unsigned int*)(regs->tpc+8));
+		err |= get_user(or2, (unsigned int*)(regs->tpc+12));
+		err |= get_user(sllx, (unsigned int*)(regs->tpc+16));
+		err |= get_user(jmpl, (unsigned int*)(regs->tpc+20));
+		err |= get_user(nop, (unsigned int*)(regs->tpc+24));
+
+		if (err)
+			break;
+
+		if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
+		    (sethi2 & 0xFFC00000U) == 0x0B000000U &&
+		    (or1 & 0xFFFFE000U) == 0x82106000U &&
+		    (or2 & 0xFFFFE000U) == 0x8A116000U &&
+		    sllx == 0x83287020 &&
+		    jmpl == 0x81C04005U &&
+		    nop == 0x01000000U)
+		{
+			unsigned long addr;
+
+			regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
+			regs->u_regs[UREG_G1] <<= 32;
+			regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
+			addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #6 */
+		unsigned int sethi1, sethi2, sllx, or,  jmpl, nop;
+
+		err = get_user(sethi1, (unsigned int*)regs->tpc);
+		err |= get_user(sethi2, (unsigned int*)(regs->tpc+4));
+		err |= get_user(sllx, (unsigned int*)(regs->tpc+8));
+		err |= get_user(or, (unsigned int*)(regs->tpc+12));
+		err |= get_user(jmpl, (unsigned int*)(regs->tpc+16));
+		err |= get_user(nop, (unsigned int*)(regs->tpc+20));
+
+		if (err)
+			break;
+
+		if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
+		    (sethi2 & 0xFFC00000U) == 0x0B000000U &&
+		    sllx == 0x83287020 &&
+		    (or & 0xFFFFE000U) == 0x8A116000U &&
+		    jmpl == 0x81C04005U &&
+		    nop == 0x01000000U)
+		{
+			unsigned long addr;
+
+			regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
+			regs->u_regs[UREG_G1] <<= 32;
+			regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
+			addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: unpatched PLT emulation step 1 */
+		unsigned int sethi, ba, nop;
+
+		err = get_user(sethi, (unsigned int*)regs->tpc);
+		err |= get_user(ba, (unsigned int*)(regs->tpc+4));
+		err |= get_user(nop, (unsigned int*)(regs->tpc+8));
+
+		if (err)
+			break;
+
+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
+		    ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
+		    nop == 0x01000000U)
+		{
+			unsigned long addr;
+			unsigned int save, call;
+
+			if ((ba & 0xFFC00000U) == 0x30800000U)
+				addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
+			else
+				addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
+
+			err = get_user(save, (unsigned int*)addr);
+			err |= get_user(call, (unsigned int*)(addr+4));
+			err |= get_user(nop, (unsigned int*)(addr+8));
+
+			if (err)
+				break;
+
+			if (save == 0x9DE3BFA8U &&
+			    (call & 0xC0000000U) == 0x40000000U &&
+			    nop == 0x01000000U)
+			{
+				struct vm_area_struct *vma;
+				unsigned long call_dl_resolve;
+
+				down_read(&current->mm->mmap_sem);
+				call_dl_resolve = current->mm->call_dl_resolve;
+				up_read(&current->mm->mmap_sem);
+				if (likely(call_dl_resolve))
+					goto emulate;
+
+				vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+
+				down_write(&current->mm->mmap_sem);
+				if (current->mm->call_dl_resolve) {
+					call_dl_resolve = current->mm->call_dl_resolve;
+					up_write(&current->mm->mmap_sem);
+					if (vma) kmem_cache_free(vm_area_cachep, vma);
+					goto emulate;
+				}
+
+				call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
+				if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
+					up_write(&current->mm->mmap_sem);
+					if (vma) kmem_cache_free(vm_area_cachep, vma);
+					return 1;
+				}
+
+				pax_insert_vma(vma, call_dl_resolve);
+				current->mm->call_dl_resolve = call_dl_resolve;
+				up_write(&current->mm->mmap_sem);
+
+emulate:
+				regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
+				regs->tpc = call_dl_resolve;
+				regs->tnpc = addr+4;
+				return 3;
+			}
+		}
+	} while (0);
+
+	do { /* PaX: unpatched PLT emulation step 2 */
+		unsigned int save, call, nop;
+
+		err = get_user(save, (unsigned int*)(regs->tpc-4));
+		err |= get_user(call, (unsigned int*)regs->tpc);
+		err |= get_user(nop, (unsigned int*)(regs->tpc+4));
+		if (err)
+			break;
+
+		if (save == 0x9DE3BFA8U &&
+		    (call & 0xC0000000U) == 0x40000000U &&
+		    nop == 0x01000000U)
+		{
+			unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
+
+			regs->u_regs[UREG_RETPC] = regs->tpc;
+			regs->tpc = dl_resolve;
+			regs->tnpc = dl_resolve+4;
+			return 3;
+		}
+	} while (0);
+#endif
+
+	return 1;
+}
+
+void pax_report_insns(void *pc)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 5; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int*)pc+i)) {
+			printk("<invalid address>.");
+			break;
+		}
+		printk("%08x ", c);
+	}
+	printk("\n");
+}
+#endif  
+
+
 asmlinkage void do_sparc64_fault(struct pt_regs *regs)
 {
 	struct mm_struct *mm = current->mm;
@@ -338,6 +698,7 @@
 
 	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
 		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
 		address &= 0xffffffff;
 	}
 
@@ -346,6 +707,34 @@
 	if (!vma)
 		goto bad_area;
 
+	/* PaX: detect ITLB misses on non-exec pages */
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	if ((current->flags & PF_PAX_PAGEEXEC) && vma->vm_start <= address && 
+	    !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
+	{
+		if (address != regs->tpc)
+			goto good_area;
+
+		up_read(&mm->mmap_sem);
+		switch (pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT
+		case 2:
+		case 3:
+			goto fault_done;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+		case 4:
+			goto fault_done;
+#endif
+
+		}
+		pax_report_fault(regs, (void*)regs->tpc, (void*)(regs->u_regs[UREG_FP] + STACK_BIAS));
+		do_exit(SIGKILL);
+	}
+#endif
+
 	/* Pure DTLB misses do not tell us whether the fault causing
 	 * load/store/atomic was a write or not, it only says that there
 	 * was no match.  So in such a case we (carefully) read the
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/sparc64/solaris/misc.c linux-2.4.26-leo/arch/sparc64/solaris/misc.c
--- linux-2.4.26/arch/sparc64/solaris/misc.c	2002-11-28 23:53:12.000000000 +0000
+++ linux-2.4.26-leo/arch/sparc64/solaris/misc.c	2004-06-22 20:53:46.000000000 +0100
@@ -53,6 +53,11 @@
 	struct file *file = NULL;
 	unsigned long retval, ret_type;
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (flags & MAP_MIRROR)
+		return -EINVAL;
+#endif
+
 	/* Do we need it here? */
 	set_personality(PER_SVR4);
 	if (flags & MAP_NORESERVE) {
diff -urN --exclude-from=diff-exclude linux-2.4.26/arch/x86_64/ia32/ia32_ioctl.c linux-2.4.26-leo/arch/x86_64/ia32/ia32_ioctl.c
--- linux-2.4.26/arch/x86_64/ia32/ia32_ioctl.c	2004-05-19 21:34:37.000000000 +0100
+++ linux-2.4.26-leo/arch/x86_64/ia32/ia32_ioctl.c	2004-06-22 20:53:46.000000000 +0100
@@ -68,6 +68,7 @@
 #define max max
 #include <linux/lvm.h>
 #endif /* LVM */
+#include <linux/dm-ioctl.h>
 
 #include <scsi/scsi.h>
 /* Ugly hack. */
@@ -1950,7 +1951,11 @@
 	 * To have permissions to do most of the vt ioctls, we either have
 	 * to be the owner of the tty, or super-user.
 	 */
+#ifdef CONFIG_GRKERNSEC
+	if (current->tty == tty || capable(CAP_SYS_TTY_CONFIG))
+#else
 	if (current->tty == tty || suser())
+#endif
 		return 1;
 	return 0;                                                    
 }
@@ -4140,6 +4145,22 @@
 COMPATIBLE_IOCTL(LV_BMAP)
 COMPATIBLE_IOCTL(LV_SNAPSHOT_USE_RATE)
 #endif /* LVM */
+/* Device-Mapper */
+#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
+COMPATIBLE_IOCTL(DM_VERSION)
+COMPATIBLE_IOCTL(DM_REMOVE_ALL)
+COMPATIBLE_IOCTL(DM_DEV_CREATE)
+COMPATIBLE_IOCTL(DM_DEV_REMOVE)
+COMPATIBLE_IOCTL(DM_TABLE_LOAD)
+COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
+COMPATIBLE_IOCTL(DM_DEV_RENAME)
+COMPATIBLE_IOCTL(DM_TABLE_DEPS)
+COMPATIBLE_IOCTL(DM_DEV_STATUS)
+COMPATIBLE_IOCTL(DM_TABLE_STATUS)
+COMPATIBLE_IOCTL(DM_DEV_WAIT)
+COMPATIBLE_IOCTL(DM_LIST_DEVICES)
+COMPATIBLE_IOCTL(DM_TABLE_CLEAR)
+#endif /* CONFIG_BLK_DEV_DM */
 #ifdef CONFIG_AUTOFS_FS
 COMPATIBLE_IOCTL(AUTOFS_IOC_READY)
 COMPATIBLE_IOCTL(AUTOFS_IOC_FAIL)
diff -urN --exclude-from=diff-exclude linux-2.4.26/CREDITS linux-2.4.26-leo/CREDITS
--- linux-2.4.26/CREDITS	2004-05-19 21:34:35.000000000 +0100
+++ linux-2.4.26-leo/CREDITS	2004-05-26 23:34:34.000000000 +0100
@@ -999,8 +999,8 @@
 
 N: Nigel Gamble
 E: nigel@nrg.org
-E: nigel@sgi.com
 D: Interrupt-driven printer driver
+D: Preemptible kernel
 S: 120 Alley Way
 S: Mountain View, California 94040
 S: USA
diff -urN --exclude-from=diff-exclude linux-2.4.26/crypto/Config.in linux-2.4.26-leo/crypto/Config.in
--- linux-2.4.26/crypto/Config.in	2004-05-19 21:34:37.000000000 +0100
+++ linux-2.4.26-leo/crypto/Config.in	2004-05-26 23:34:14.000000000 +0100
@@ -4,7 +4,9 @@
 mainmenu_option next_comment
 comment 'Cryptographic options'
 
-if [ "$CONFIG_INET_AH" = "y" -o \
+if [ "$CONFIG_BLK_DEV_CRYPTOLOOP" = "y" -o \
+     "$CONFIG_BLK_DEV_CRYPTOLOOP" = "m" -o \
+     "$CONFIG_INET_AH" = "y" -o \
      "$CONFIG_INET_AH" = "m" -o \
      "$CONFIG_INET_ESP" = "y" -o \
      "$CONFIG_INET_ESP" = "m" -o \
diff -urN --exclude-from=diff-exclude linux-2.4.26/Documentation/Configure.help linux-2.4.26-leo/Documentation/Configure.help
--- linux-2.4.26/Documentation/Configure.help	2004-05-19 21:34:35.000000000 +0100
+++ linux-2.4.26-leo/Documentation/Configure.help	2004-06-22 20:59:00.000000000 +0100
@@ -296,6 +296,17 @@
   If you have a system with several CPUs, you do not need to say Y
   here: the local APIC will be used automatically.
 
+Preemptible Kernel
+CONFIG_PREEMPT
+  This option reduces the latency of the kernel when reacting to
+  real-time or interactive events by allowing a low priority process to
+  be preempted even if it is in kernel mode executing a system call.
+  This allows applications to run more reliably even when the system is
+  under load.
+
+  Say Y here if you are building a kernel for a desktop, embedded or
+  real-time system.  Say N if you are unsure.
+
 Kernel math emulation
 CONFIG_MATH_EMULATION
   Linux can emulate a math coprocessor (used for floating point
@@ -1931,6 +1942,20 @@
   want), say M here and read <file:Documentation/modules.txt>.  The
   module will be called lvm-mod.o.
 
+Device-mapper support
+CONFIG_BLK_DEV_DM
+  Device-mapper is a low level volume manager.  It works by allowing
+  people to specify mappings for ranges of logical sectors.  Various
+  mapping types are available, in addition people may write their own
+  modules containing custom mappings if they wish.
+
+  Higher level volume managers such as LVM2 use this driver.
+
+  If you want to compile this as a module, say M here and read 
+  <file:Documentation/modules.txt>.  The module will be called dm-mod.o.
+
+  If unsure, say N.
+
 Multiple devices driver support (RAID and LVM)
 CONFIG_MD
   Support multiple physical spindles through a single logical device.
@@ -2039,6 +2064,18 @@
 
   If unsure, say Y.
 
+Verbose startup / shutdown messages
+CONFIG_MD_VERBMSG
+  If set, display verbose messages on kernel startup about RAID
+  device initialisation. This can slow down startup by a few seconds,
+  especially on framebuffer terminal types, or if a lot of RAID
+  devices are present, and also fills up space in the dmesg buffer
+  that could be useful elsewhere. However, it aids tracking down 
+  certain RAID problems. It adds about 1k to the kernel size.
+
+  If unsure, say Y. Even if un-set, error messages are still
+  reported
+
 Multipath I/O support
 CONFIG_MD_MULTIPATH
   Multipath-IO is the ability of certain devices to address the same
@@ -2852,6 +2889,20 @@
   If you want to compile it as a module, say M here and read
   Documentation/modules.txt.  If unsure, say `N'.
 
+stealth networking support
+CONFIG_IP_NF_MATCH_STEALTH
+  Enabling this option will drop all syn packets coming to unserved tcp
+  ports as well as all packets coming to unserved udp ports.  If you
+  are using your system to route any type of packets (ie. via NAT)
+  you should put this module at the end of your ruleset, since it will 
+  drop packets that aren't going to ports that are listening on your 
+  machine itself, it doesn't take into account that the packet might be 
+  destined for someone on your internal network if you're using NAT for 
+  instance.
+
+  If you want to compile it as a module, say M here and read
+  Documentation/modules.txt.  If unsure, say `N'.
+
 MAC address match support
 CONFIG_IP_NF_MATCH_MAC
   MAC matching allows you to match packets based on the source
@@ -23244,6 +23295,935 @@
 
   "Area6" will work for most boards. For ADX, select "Area5".
 
+Grsecurity
+CONFIG_GRKERNSEC
+  If you say Y here, you will be able to configure many features that
+  will enhance the security of your system.  It is highly recommended
+  that you say Y here and read through the help for each option so
+  you fully understand the features and can evaluate their usefulness
+  for your machine.
+
+Additional security levels
+CONFIG_GRKERNSEC_LOW
+
+  Low additional security
+  -----------------------------------------------------------------------
+  If you choose this option, several of the grsecurity options will
+  be enabled that will give you greater protection against a number
+  of attacks, while assuring that none of your software will have any 
+  conflicts with the additional security measures.  If you run a lot of 
+  unusual software, or you are having problems with the higher security 
+  levels, you should say Y here.  With this option, the following features
+  are enabled:
+  
+  linking restrictions
+  fifo restrictions
+  random pids
+  enforcing nproc on execve()
+  restricted dmesg
+  random ip ids
+  enforced chdir("/") on chroot
+
+  Medium additional security
+  -----------------------------------------------------------------------
+  If you say Y here, several features in addition to those included in the 
+  low additional security level will be enabled.  These features provide
+  even more security to your system, though in rare cases they may
+  be incompatible with very old or poorly written software.  If you 
+  enable this option, make sure that your auth service (identd) is 
+  running as gid 10 (usually group wheel). With this option the following 
+  features (in addition to those provided in the low additional security 
+  level) will be enabled:
+
+  random tcp source ports
+  failed fork logging
+  time change logging
+  signal logging
+  deny mounts in chroot
+  deny double chrooting
+  deny sysctl writes in chroot
+  deny mknod in chroot
+  deny access to abstract AF_UNIX sockets out of chroot
+  deny pivot_root in chroot
+  denied writes of /dev/kmem, /dev/mem, and /dev/port
+  /proc restrictions with special gid set to 10 (usually wheel)
+  address space layout randomization
+  removal of addresses from /proc/<pid>/[maps|stat]
+
+  High additional security
+  ----------------------------------------------------------------------
+  If you say Y here, many of the features of grsecurity will be enabled,
+  that will protect you against many kinds of attacks against
+  your system.  The heightened security comes at a cost of an 
+  increased chance of incompatibilities with rare software on your 
+  machine.  Since this security level enables PaX, you should view 
+  <http://pax.grsecurity.net> and read about the PaX project.  While 
+  you are there, download chpax and run it on binaries that cause 
+  problems with PaX.  Also remember that since the /proc restrictions are 
+  enabled, you must run your identd as group wheel (gid 10).  
+  This security level enables the following features in addition to those
+  listed in the low and medium security levels:
+
+  additional /proc restrictions
+  chmod restrictions in chroot
+  no signals, ptrace, or viewing processes outside of chroot
+  capability restrictions in chroot
+  deny fchdir out of chroot
+  priority restrictions in chroot
+  segmentation-based implementation of PaX
+  mprotect restrictions
+  kernel stack randomization
+  mount/unmount/remount logging
+  kernel symbol hiding
+
+Customized additional security
+CONFIG_GRKERNSEC_CUSTOM
+  If you say Y here, you will be able to configure every grsecurity 
+  option, which allows you to enable many more features that aren't 
+  covered in the basic security levels.  These additional features include 
+  TPE, socket restrictions, and the sysctl system for grsecurity.  It is 
+  advised that you read through the help for each option to determine its 
+  usefulness in your situation.
+
+Support soft mode
+CONFIG_GRKERNSEC_PAX_SOFTMODE
+  Enabling this option will allow you to run PaX in soft mode, that
+  is, PaX features will not be enforced by default, only on executables
+  marked explicitly.  You must also enable PT_PAX_FLAGS support as it
+  is the only way to mark executables for soft mode use.
+
+  Soft mode can be activated by using the "pax_softmode=1" kernel command
+  line option on boot.  Furthermore you can control various PaX features
+  at runtime via the entries in /proc/sys/kernel/pax.
+
+Use legacy ELF header marking
+CONFIG_GRKERNSEC_PAX_EI_PAX
+  Enabling this option will allow you to control PaX features on
+  a per executable basis via the 'chpax' utility available at
+  http://pax.grsecurity.net/.  The control flags will be read from
+  an otherwise reserved part of the ELF header.  This marking has
+  numerous drawbacks (no support for soft-mode, toolchain does not
+  know about the non-standard use of the ELF header) therefore it
+  has been deprecated in favour of PT_PAX_FLAGS support.
+
+  You should enable this option only if your toolchain does not yet
+  support the new control flag location (PT_PAX_FLAGS) or you still
+  have applications not marked by PT_PAX_FLAGS.
+
+  Note that if you enable PT_PAX_FLAGS marking support as well,
+  it will override the legacy EI_PAX marks.
+
+Use ELF program header marking
+CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS
+  Enabling this option will allow you to control PaX features on
+  a per executable basis via the 'paxctl' utility available at
+  http://pax.grsecurity.net/.  The control flags will be read from
+  a PaX specific ELF program header (PT_PAX_FLAGS).  This marking
+  has the benefits of supporting both soft mode and being fully
+  integrated into the toolchain (the binutils patch is available
+  from http://pax.grsecurity.net).
+
+  Note that if you enable the legacy EI_PAX marking support as well,
+  it will be overridden by the PT_PAX_FLAGS marking.
+
+MAC system integration
+CONFIG_GRKERNSEC_PAX_NO_ACL_FLAGS
+  Mandatory Access Control systems have the option of controlling
+  PaX flags on a per executable basis, choose the method supported
+  by your particular system.
+
+  - "none": if your MAC system does not interact with PaX,
+  - "direct": if your MAC system defines pax_set_flags() itself,
+  - "hook": if your MAC system uses the pax_set_flags_func callback.
+
+  NOTE: this option is for developers/integrators only.
+
+Enforce non-executable pages
+CONFIG_GRKERNSEC_PAX_NOEXEC
+  By design some architectures do not allow for protecting memory
+  pages against execution or even if they do, Linux does not make
+  use of this feature.  In practice this means that if a page is
+  readable (such as the stack or heap) it is also executable.
+
+  There is a well known exploit technique that makes use of this
+  fact and a common programming mistake where an attacker can
+  introduce code of his choice somewhere in the attacked program's
+  memory (typically the stack or the heap) and then execute it.
+
+  If the attacked program was running with different (typically
+  higher) privileges than that of the attacker, then he can elevate
+  his own privilege level (e.g. get a root shell, write to files for
+  which he does not have write access to, etc).
+
+  Enabling this option will let you choose from various features
+  that prevent the injection and execution of 'foreign' code in
+  a program.
+
+  This will also break programs that rely on the old behaviour and
+  expect that dynamically allocated memory via the malloc() family
+  of functions is executable (which it is not).  Notable examples
+  are the XFree86 4.x server, the java runtime and wine.
+
+Paging based non-executable pages
+CONFIG_GRKERNSEC_PAX_PAGEEXEC
+  This implementation is based on the paging feature of the CPU.
+  On i386 it has a variable performance impact on applications
+  depending on their memory usage pattern.  You should carefully
+  test your applications before using this feature in production.
+  On alpha, parisc, sparc and sparc64 there is no performance
+  impact.  On ppc there is a slight performance impact.
+
+Segmentation based non-executable pages
+CONFIG_GRKERNSEC_PAX_SEGMEXEC
+  This implementation is based on the segmentation feature of the
+  CPU and has little performance impact, however applications will
+  be limited to a 1.5 GB address space instead of the normal 3 GB.
+
+Emulate trampolines
+CONFIG_GRKERNSEC_PAX_EMUTRAMP
+  There are some programs and libraries that for one reason or
+  another attempt to execute special small code snippets from
+  non-executable memory pages.  Most notable examples are the
+  signal handler return code generated by the kernel itself and
+  the GCC trampolines.
+
+  If you enabled CONFIG_GRKERNSEC_PAX_PAGEEXEC or 
+  CONFIG_GRKERNSEC_PAX_SEGMEXEC then such programs will no longer
+  work under your kernel.
+
+  As a remedy you can say Y here and use the 'chpax' or 'paxctl'
+  utilities to enable trampoline emulation for the affected programs
+  yet still have the protection provided by the non-executable pages.
+
+  On parisc and ppc you MUST enable this option and EMUSIGRT as
+  well, otherwise your system will not even boot.
+
+  Alternatively you can say N here and use the 'chpax' or 'paxctl'
+  utilities to disable CONFIG_GRKERNSEC_PAX_PAGEEXEC and 
+  CONFIG_GRKERNSEC_PAX_SEGMEXEC for the affected files.
+
+  NOTE: enabling this feature *may* open up a loophole in the
+  protection provided by non-executable pages that an attacker
+  could abuse.  Therefore the best solution is to not have any
+  files on your system that would require this option.  This can
+  be achieved by not using libc5 (which relies on the kernel
+  signal handler return code) and not using or rewriting programs
+  that make use of the nested function implementation of GCC.
+  Skilled users can just fix GCC itself so that it implements
+  nested function calls in a way that does not interfere with PaX.
+
+Automatically emulate sigreturn trampolines
+CONFIG_GRKERNSEC_PAX_EMUSIGRT
+  Enabling this option will have the kernel automatically detect
+  and emulate signal return trampolines executing on the stack
+  that would otherwise lead to task termination.
+
+  This solution is intended as a temporary one for users with
+  legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
+  Modula-3 runtime, etc) or executables linked to such, basically
+  everything that does not specify its own SA_RESTORER function in
+  normal executable memory like glibc 2.1+ does.
+
+  On parisc and ppc you MUST enable this option, otherwise your
+  system will not even boot.
+
+  NOTE: this feature cannot be disabled on a per executable basis
+  and since it *does* open up a loophole in the protection provided
+  by non-executable pages, the best solution is to not have any
+  files on your system that would require this option.
+
+Restrict mprotect()
+CONFIG_GRKERNSEC_PAX_MPROTECT
+  Enabling this option will prevent programs from
+   - changing the executable status of memory pages that were
+     not originally created as executable,
+   - making read-only executable pages writable again,
+   - creating executable pages from anonymous memory.
+
+  You should say Y here to complete the protection provided by
+  the enforcement of non-executable pages.
+
+  NOTE: you can use the 'chpax' utility to control this
+  feature on a per file basis. chpax is available at
+  <http://pax.grsecurity.net>
+
+Disallow ELF text relocations
+CONFIG_GRKERNSEC_PAX_NOELFRELOCS
+  Non-executable pages and mprotect() restrictions are effective
+  in preventing the introduction of new executable code into an
+  attacked task's address space.  There remain only two venues
+  for this kind of attack: if the attacker can execute already
+  existing code in the attacked task then he can either have it
+  create and mmap() a file containing his code or have it mmap()
+  an already existing ELF library that does not have position
+  independent code in it and use mprotect() on it to make it
+  writable and copy his code there.  While protecting against
+  the former approach is beyond PaX, the latter can be prevented
+  by having only PIC ELF libraries on one's system (which do not
+  need to relocate their code).  If you are sure this is your case,
+  then enable this option otherwise be careful as you may not even
+  be able to boot or log on your system (for example, some PAM
+  modules are erroneously compiled as non-PIC by default).
+
+  NOTE: if you are using dynamic ELF executables (as suggested
+  when using ASLR) then you must have made sure that you linked
+  your files using the PIC version of crt1 (the et_dyn.zip package
+  referenced there has already been updated to support this).
+
+Enforce non-executable kernel pages
+CONFIG_GRKERNSEC_PAX_KERNEXEC
+  This is the kernel land equivalent of PAGEEXEC and MPROTECT,
+  that is, enabling this option will make it harder to inject
+  and execute 'foreign' code in kernel memory itself.
+
+Address Space Layout Randomization
+CONFIG_GRKERNSEC_PAX_ASLR
+  Many if not most exploit techniques rely on the knowledge of
+  certain addresses in the attacked program.  The following options
+  will allow the kernel to apply a certain amount of randomization
+  to specific parts of the program thereby forcing an attacker to
+  guess them in most cases.  Any failed guess will most likely crash
+  the attacked program which allows the kernel to detect such attempts
+  and react on them.  PaX itself provides no reaction mechanisms,
+  instead it is strongly encouraged that you make use of grsecurity's
+  built-in crash detection features or develop one yourself.
+
+  By saying Y here you can choose to randomize the following areas:
+   - top of the task's kernel stack
+   - top of the task's userland stack
+   - base address for mmap() requests that do not specify one
+     (this includes all libraries)
+   - base address of the main executable
+
+  It is strongly recommended to say Y here as address space layout
+  randomization has negligible impact on performance yet it provides
+  a very effective protection.
+
+  NOTE: you can use the 'chpax' or 'paxctl' utilities to control most
+  of these features on a per file basis.
+
+Randomize kernel stack base
+CONFIG_GRKERNSEC_PAX_RANDKSTACK
+  By saying Y here the kernel will randomize every task's kernel
+  stack on every system call.  This will not only force an attacker
+  to guess it but also prevent him from making use of possible
+  leaked information about it.
+
+  Since the kernel stack is a rather scarce resource, randomization
+  may cause unexpected stack overflows, therefore you should very
+  carefully test your system.  Note that once enabled in the kernel
+  configuration, this feature cannot be disabled on a per file basis.
+
+Randomize user stack base
+CONFIG_GRKERNSEC_PAX_RANDUSTACK
+  By saying Y here the kernel will randomize every task's userland
+  stack.  The randomization is done in two steps where the second
+  one may apply a big amount of shift to the top of the stack and
+  cause problems for programs that want to use lots of memory (more
+  than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
+  For this reason the second step can be controlled by 'chpax' or
+  'paxctl' on a per file basis.
+
+Randomize ET_EXEC base
+CONFIG_GRKERNSEC_PAX_RANDEXEC     
+  By saying Y here the kernel will randomize the base address of normal
+  ET_EXEC ELF executables as well.  This is accomplished by mapping the
+  executable in memory in a special way which also allows for detecting
+  attackers who attempt to execute its code for their purposes.  Since
+  this special mapping causes performance degradation and the attack
+  detection may create false alarms as well, you should carefully test
+  your executables when this feature is enabled.
+
+  This solution is intended only as a temporary one until you relink
+  your programs as a dynamic ELF file.
+
+  NOTE: you can use the 'chpax' or 'paxctl' utilities to control
+  this feature on a per file basis.
+
+Allow ELF ET_EXEC text relocations
+CONFIG_GRKERNSEC_PAX_ETEXECRELOCS
+  On some architectures like the alpha there are incorrectly
+  created applications that require text relocations and would
+  not work without enabling this option.  If you are an alpha
+  user, you should enable this option and disable it once you
+  have made sure that none of your applications need it.
+
+Automatically emulate ELF PLT
+CONFIG_GRKERNSEC_PAX_EMUPLT
+  Enabling this option will have the kernel automatically detect
+  and emulate the Procedure Linkage Table entries in ELF files.
+  On some architectures such entries are in writable memory, and
+  become non-executable leading to task termination.  Therefore
+  it is mandatory that you enable this option on alpha, parisc, ppc,
+  sparc and sparc64, otherwise your system would not even boot.
+
+  NOTE: this feature *does* open up a loophole in the protection
+  provided by the non-executable pages, therefore the proper
+  solution is to modify the toolchain to produce a PLT that does
+  not need to be writable.
+
+Randomize mmap() base
+CONFIG_GRKERNSEC_PAX_RANDMMAP
+  By saying Y here the kernel will use a randomized base address for
+  mmap() requests that do not specify one themselves.  As a result
+  all dynamically loaded libraries will appear at random addresses
+  and therefore be harder to exploit by a technique where an attacker
+  attempts to execute library code for his purposes (e.g. spawn a
+  shell from an exploited program that is running at an elevated
+  privilege level).
+
+  Furthermore, if a program is relinked as a dynamic ELF file, its
+  base address will be randomized as well, completing the full
+  randomization of the address space layout.  Attacking such programs
+  becomes a guess game.  You can find an example of doing this at
+  <http://pax.grsecurity.net/et_dyn.zip> and practical samples at
+  <http://www.grsecurity.net/grsec-gcc-specs.tar.gz> .
+
+  NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
+  feature on a per file basis.
+
+Deny writing to /dev/kmem and /dev/mem
+CONFIG_GRKERNSEC_KMEM
+  If you say Y here, /dev/kmem and /dev/mem won't be allowed to
+  be written to via mmap or otherwise to modify the running kernel.
+  If you have module support disabled, enabling this, along with /dev/port
+  (below) will close up four ways that are currently used to insert 
+  malicious code into the running kernel. Even with all these features 
+  enabled, we still highly recommend that you use the ACL system, as it
+  is still possible for an attacker to modify the running kernel 
+  through privileged I/O granted by ioperm/iopl. 
+  If you are not using XFree86, you may be able to stop this additional
+  case by enabling the 'Disable privileged I/O' option. Though nothing
+  legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
+  but only to video memory, which is the only writing we allow in this
+  case.  If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
+  not be allowed to mprotect it with PROT_WRITE later.
+  Enabling this feature could make certain apps like VMWare stop working,
+  as they need to write to other locations in /dev/mem.
+  It is highly recommended that you say Y here if you meet all the 
+  conditions above.
+
+Deny access to /dev/port
+CONFIG_GRKERNSEC_PORT
+  If you say Y here, /dev/port will not be able to be opened. This option
+  is normally used in conjunction with /dev/kmem (above). It is seperated
+  here because it breaks certain utilities (for example, kbdrate).
+  Is is highly recommended that you say Y here.
+
+Disable privileged I/O
+CONFIG_GRKERNSEC_IO
+  If you say Y here, all ioperm and iopl calls will return an error.
+  Ioperm and iopl can be used to modify the running kernel.
+  Unfortunately, some programs need this access to operate properly,
+  the most notable of which are XFree86 and hwclock.  hwclock can be
+  remedied by having RTC support in the kernel, so CONFIG_RTC is
+  enabled if this option is enabled, to ensure that hwclock operates
+  correctly.  XFree86 still will not operate correctly with this option
+  enabled, so DO NOT CHOOSE Y IF YOU USE XFree86.  If you use XFree86
+  and you still want to protect your kernel against modification,
+  use the ACL system.
+
+Hide kernel symbols
+CONFIG_GRKERNSEC_HIDESYM
+  If you say Y here, getting information on loaded modules, and 
+  displaying all kernel symbols through a syscall will be restricted
+  to users with CAP_SYS_MODULE.  This option is only effective 
+  provided the following conditions are met:
+  1) The kernel using grsecurity is not precompiled by some distribution
+  2) You are using the ACL system and hiding other files such as your
+     kernel image and System.map
+  3) You have the additional /proc restrictions enabled, which removes
+     /proc/kcore
+  If the above conditions are met, this option will aid to provide a
+  useful protection against local and remote kernel exploitation of
+  overflows and arbitrary read/write vulnerabilities.
+
+/proc/<pid>/ipaddr support
+CONFIG_GRKERNSEC_PROC_IPADDR
+  If you say Y here, a new entry will be added to each /proc/<pid>
+  directory that contains the IP address of the person using the task.
+  The IP is carried across local TCP and AF_UNIX stream sockets.
+  This information can be useful for IDS/IPSes to perform remote response
+  to a local attack.  The entry is readable by only the owner of the 
+  process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
+  the RBAC system), and thus does not create privacy concerns.
+
+Proc Restrictions
+CONFIG_GRKERNSEC_PROC
+  If you say Y here, the permissions of the /proc filesystem
+  will be altered to enhance system security and privacy.  Depending
+  upon the options you choose, you can either restrict users to see
+  only the processes they themselves run, or choose a group that can
+  view all processes and files normally restricted to root if you choose
+  the "restrict to user only" option.  NOTE: If you're running identd as 
+  a non-root user, you will have to run it as the group you specify here.
+
+Restrict /proc to user only
+CONFIG_GRKERNSEC_PROC_USER
+  If you say Y here, non-root users will only be able to view their own 
+  processes, and restricts them from viewing network-related information,  
+  and viewing kernel symbol and module information.
+
+Restrict /proc to user and group
+CONFIG_GRKERNSEC_PROC_USERGROUP
+  If you say Y here, you will be able to select a group that will be
+  able to view all processes, network-related information, and
+  kernel and symbol information.  This option is useful if you want
+  to run identd as a non-root user.
+
+Remove addresses from /proc/pid/[maps|stat]
+CONFIG_GRKERNSEC_PROC_MEMMAP
+  If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
+  give no information about the addresses of its mappings if
+  PaX features that rely on random addresses are enabled on the task.
+  If you use PaX it is greatly recommended that you say Y here as it 
+  closes up a hole that makes the full ASLR useless for suid 
+  binaries.
+
+Additional proc restrictions
+CONFIG_GRKERNSEC_PROC_ADD
+  If you say Y here, additional restrictions will be placed on
+  /proc that keep normal users from viewing cpu and device information.
+
+Dmesg(8) Restriction
+CONFIG_GRKERNSEC_DMESG
+  If you say Y here, non-root users will not be able to use dmesg(8)
+  to view up to the last 4kb of messages in the kernel's log buffer.
+  If the sysctl option is enabled, a sysctl option with name "dmesg" is 
+  created.
+
+Linking restrictions
+CONFIG_GRKERNSEC_LINK
+  If you say Y here, /tmp race exploits will be prevented, since users
+  will no longer be able to follow symlinks owned by other users in 
+  world-writable +t directories (i.e. /tmp), unless the owner of the 
+  symlink is the owner of the directory. users will also not be
+  able to hardlink to files they do not own.  If the sysctl option is
+  enabled, a sysctl option with name "linking_restrictions" is created.
+
+FIFO restrictions
+CONFIG_GRKERNSEC_FIFO
+  If you say Y here, users will not be able to write to FIFOs they don't
+  own in world-writable +t directories (i.e. /tmp), unless the owner of
+  the FIFO is the same owner of the directory it's held in.  If the sysctl
+  option is enabled, a sysctl option with name "fifo_restrictions" is 
+  created.
+
+Enforce RLIMIT_NPROC on execs
+CONFIG_GRKERNSEC_EXECVE
+  If you say Y here, users with a resource limit on processes will
+  have the value checked during execve() calls.  The current system
+  only checks the system limit during fork() calls.  If the sysctl option
+  is enabled, a sysctl option with name "execve_limiting" is created.
+
+Single group for auditing
+CONFIG_GRKERNSEC_AUDIT_GROUP
+  If you say Y here, the exec, chdir, (un)mount, and ipc logging features
+  will only operate on a group you specify.  This option is recommended
+  if you only want to watch certain users instead of having a large
+  amount of logs from the entire system.  If the sysctl option is enabled,
+  a sysctl option with name "audit_group" is created.
+
+GID for auditing
+CONFIG_GRKERNSEC_AUDIT_GID
+  Here you can choose the GID that will be the target of kernel auditing.
+  Remember to add the users you want to log to the GID specified here.
+  If the sysctl option is enabled, whatever you choose here won't matter. 
+  You'll have to specify the GID in your bootup script by echoing the GID 
+  to the proper /proc entry.  View the help on the sysctl option for more 
+  information.  If the sysctl option is enabled, a sysctl option with name 
+  "audit_gid" is created.
+
+Chdir logging
+CONFIG_GRKERNSEC_AUDIT_CHDIR
+  If you say Y here, all chdir() calls will be logged.  If the sysctl 
+  option is enabled, a sysctl option with name "audit_chdir" is created.
+
+(Un)Mount logging
+CONFIG_GRKERNSEC_AUDIT_MOUNT
+  If you say Y here, all mounts and unmounts will be logged.  If the 
+  sysctl option is enabled, a sysctl option with name "audit_mount" is 
+  created.
+
+IPC logging
+CONFIG_GRKERNSEC_AUDIT_IPC
+  If you say Y here, creation and removal of message queues, semaphores,
+  and shared memory will be logged.  If the sysctl option is enabled, a
+  sysctl option with name "audit_ipc" is created.
+
+Exec logging
+CONFIG_GRKERNSEC_EXECLOG
+  If you say Y here, all execve() calls will be logged (since the
+  other exec*() calls are frontends to execve(), all execution
+  will be logged).  Useful for shell-servers that like to keep track
+  of their users.  If the sysctl option is enabled, a sysctl option with
+  name "exec_logging" is created.
+  WARNING: This option when enabled will produce a LOT of logs, especially
+  on an active system.
+
+Resource logging
+CONFIG_GRKERNSEC_RESLOG
+  If you say Y here, all attempts to overstep resource limits will
+  be logged with the resource name, the requested size, and the current
+  limit.  It is highly recommended that you say Y here.
+
+Signal logging
+CONFIG_GRKERNSEC_SIGNAL
+  If you say Y here, certain important signals will be logged, such as
+  SIGSEGV, which will as a result inform you of when a error in a program
+  occurred, which in some cases could mean a possible exploit attempt.
+  If the sysctl option is enabled, a sysctl option with name 
+  "signal_logging" is created.
+
+Fork failure logging
+CONFIG_GRKERNSEC_FORKFAIL
+  If you say Y here, all failed fork() attempts will be logged.
+  This could suggest a fork bomb, or someone attempting to overstep
+  their process limit.  If the sysctl option is enabled, a sysctl option
+  with name "forkfail_logging" is created.
+
+Time change logging
+CONFIG_GRKERNSEC_TIME
+  If you say Y here, any changes of the system clock will be logged.
+  If the sysctl option is enabled, a sysctl option with name 
+  "timechange_logging" is created.
+
+ELF text relocations logging
+CONFIG_GRKERNSEC_AUDIT_TEXTREL
+  If you say Y here, text relocations will be logged with the filename 
+  of the offending library or binary.  The purpose of the feature is 
+  to help Linux distribution developers get rid of libraries and 
+  binaries that need text relocations which hinder the future progress 
+  of PaX.  Only Linux distribution developers should say Y here, and 
+  never on a production machine, as this option creates an information 
+  leak that could aid an attacker in defeating the randomization of
+  a single memory region.  If the sysctl option is enabled, a sysctl
+  option with name "audit_textrel" is created.
+
+Chroot jail restrictions
+CONFIG_GRKERNSEC_CHROOT
+  If you say Y here, you will be able to choose several options that will
+  make breaking out of a chrooted jail much more difficult.  If you
+  encounter no software incompatibilities with the following options, it
+  is recommended that you enable each one.
+
+Deny access to abstract AF_UNIX sockets out of chroot
+CONFIG_GRKERNSEC_CHROOT_UNIX
+  If you say Y here, processes inside a chroot will not be able to
+  connect to abstract (meaning not belonging to a filesystem) Unix
+  domain sockets that were bound outside of a chroot.  It is recommended
+  that you say Y here.  If the sysctl option is enabled, a sysctl option
+  with name "chroot_deny_unix" is created.
+
+Deny shmat() out of chroot
+CONFIG_GRKERNSEC_CHROOT_SHMAT
+  If you say Y here, processes inside a chroot will not be able to attach
+  to shared memory segments that were created outside of the chroot jail.
+  It is recommended that you say Y here.  If the sysctl option is enabled,
+  a sysctl option with name "chroot_deny_shmat" is created.
+
+Protect outside processes
+CONFIG_GRKERNSEC_CHROOT_FINDTASK
+  If you say Y here, processes inside a chroot will not be able to
+  kill, send signals with fcntl, ptrace, capget, setpgid, getpgid, 
+  getsid, or view any process outside of the chroot.  If the sysctl 
+  option is enabled, a sysctl option with name "chroot_findtask" is 
+  created.
+
+Deny mounts in chroot
+CONFIG_GRKERNSEC_CHROOT_MOUNT
+  If you say Y here, processes inside a chroot will not be able to
+  mount or remount filesystems.  If the sysctl option is enabled, a 
+  sysctl option with name "chroot_deny_mount" is created.
+
+Deny pivot_root in chroot
+CONFIG_GRKERNSEC_CHROOT_PIVOT
+  If you say Y here, processes inside a chroot will not be able to use
+  a function called pivot_root() that was introduced in Linux 2.3.41.  It 
+  works similar to chroot in that it changes the root filesystem.  This 
+  function could be misused in a chrooted process to attempt to break out 
+  of the chroot, and therefore should not be allowed.  If the sysctl 
+  option is enabled, a sysctl option with name "chroot_deny_pivot" is 
+  created.
+
+Deny double-chroots
+CONFIG_GRKERNSEC_CHROOT_DOUBLE
+  If you say Y here, processes inside a chroot will not be able to chroot
+  again outside of the chroot.  This is a widely used method of breaking 
+  out of a chroot jail and should not be allowed.  If the sysctl option 
+  is enabled, a sysctl option with name "chroot_deny_chroot" is created.
+
+Deny fchdir outside of chroot
+CONFIG_GRKERNSEC_CHROOT_FCHDIR
+  If you say Y here, a well-known method of breaking chroots by fchdir'ing
+  to a file descriptor of the chrooting process that points to a directory
+  outside the filesystem will be stopped.  If the sysctl option
+  is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
+
+Enforce chdir("/") on all chroots
+CONFIG_GRKERNSEC_CHROOT_CHDIR
+  If you say Y here, the current working directory of all newly-chrooted
+  applications will be set to the the root directory of the chroot.
+  The man page on chroot(2) states:
+  Note that this call does not change  the  current  working
+  directory,  so  that `.' can be outside the tree rooted at
+  `/'.  In particular, the  super-user  can  escape  from  a
+  `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.  
+
+  It is recommended that you say Y here, since it's not known to break
+  any software.  If the sysctl option is enabled, a sysctl option with
+  name "chroot_enforce_chdir" is created.
+
+Deny (f)chmod +s in chroot
+CONFIG_GRKERNSEC_CHROOT_CHMOD
+  If you say Y here, processes inside a chroot will not be able to chmod
+  or fchmod files to make them have suid or sgid bits.  This protects 
+  against another published method of breaking a chroot.  If the sysctl 
+  option is enabled, a sysctl option with name "chroot_deny_chmod" is
+  created.
+
+Deny mknod in chroot
+CONFIG_GRKERNSEC_CHROOT_MKNOD
+  If you say Y here, processes inside a chroot will not be allowed to
+  mknod.  The problem with using mknod inside a chroot is that it
+  would allow an attacker to create a device entry that is the same
+  as one on the physical root of your system, which could range from
+  anything from the console device to a device for your harddrive (which
+  they could then use to wipe the drive or steal data).  It is recommended
+  that you say Y here, unless you run into software incompatibilities.
+  If the sysctl option is enabled, a sysctl option with name
+  "chroot_deny_mknod" is created.
+
+Restrict priority changes in chroot
+CONFIG_GRKERNSEC_CHROOT_NICE
+  If you say Y here, processes inside a chroot will not be able to raise
+  the priority of processes in the chroot, or alter the priority of 
+  processes outside the chroot.  This provides more security than simply
+  removing CAP_SYS_NICE from the process' capability set.  If the
+  sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
+  is created.
+
+Log all execs within chroot
+CONFIG_GRKERNSEC_CHROOT_EXECLOG
+  If you say Y here, all executions inside a chroot jail will be logged 
+  to syslog.  This can cause a large amount of logs if certain
+  applications (eg. djb's daemontools) are installed on the system, and
+  is therefore left as an option.  If the sysctl option is enabled, a 
+  sysctl option with name "chroot_execlog" is created.
+
+Deny sysctl writes in chroot
+CONFIG_GRKERNSEC_CHROOT_SYSCTL
+  If you say Y here, an attacker in a chroot will not be able to
+  write to sysctl entries, either by sysctl(2) or through a /proc
+  interface.  It is strongly recommended that you say Y here. If the
+  sysctl option is enabled, a sysctl option with name 
+  "chroot_deny_sysctl" is created.
+
+Chroot jail capability restrictions
+CONFIG_GRKERNSEC_CHROOT_CAPS
+  If you say Y here, the capabilities on all root processes within a
+  chroot jail will be lowered to stop module insertion, raw i/o,
+  system and net admin tasks, rebooting the system, modifying immutable 
+  files, modifying IPC owned by another, and changing the system time.
+  This is left an option because it can break some apps.  Disable this
+  if your chrooted apps are having problems performing those kinds of
+  tasks.  If the sysctl option is enabled, a sysctl option with
+  name "chroot_caps" is created.
+
+Trusted path execution
+CONFIG_GRKERNSEC_TPE
+  If you say Y here, you will be able to choose a gid to add to the
+  supplementary groups of users you want to mark as "untrusted."
+  These users will not be able to execute any files that are not in
+  root-owned directories writable only by root.  If the sysctl option
+  is enabled, a sysctl option with name "tpe" is created.
+
+Group for trusted path execution
+CONFIG_GRKERNSEC_TPE_GID
+  Here you can choose the GID to enable trusted path protection for.
+  Remember to add the users you want protection enabled for to the GID 
+  specified here.  If the sysctl option is enabled, whatever you choose
+  here won't matter. You'll have to specify the GID in your bootup 
+  script by echoing the GID to the proper /proc entry.  View the help
+  on the sysctl option for more information.  If the sysctl option is
+  enabled, a sysctl option with name "tpe_gid" is created.
+
+Partially restrict non-root users
+CONFIG_GRKERNSEC_TPE_ALL
+  If you say Y here, All non-root users other than the ones in the 
+  group specified in the main TPE option will only be allowed to
+  execute files in directories they own that are not group or
+  world-writable, or in directories owned by root and writable only by
+  root.  If the sysctl option is enabled, a sysctl option with name 
+  "tpe_restrict_all" is created.
+
+Randomized PIDs
+CONFIG_GRKERNSEC_RANDPID
+  If you say Y here, all PIDs created on the system will be
+  pseudo-randomly generated.  This is extremely effective along
+  with the /proc restrictions to disallow an attacker from guessing
+  pids of daemons, etc.  PIDs are also used in some cases as part
+  of a naming system for temporary files, so this option would keep
+  those filenames from being predicted as well.  We also use code
+  to make sure that PID numbers aren't reused too soon.  If the sysctl
+  option is enabled, a sysctl option with name "rand_pids" is created.
+
+Larger entropy pools
+CONFIG_GRKERNSEC_RANDNET
+  If you say Y here, the entropy pools used for many features of Linux
+  and grsecurity will be doubled in size.  Since several grsecurity 
+  features use additional randomness, it is recommended that you say Y 
+  here.  Saying Y here has a similar effect as modifying
+  /proc/sys/kernel/random/poolsize.
+
+Truly random TCP ISN selection
+CONFIG_GRKERNSEC_RANDISN
+  If you say Y here, Linux's default selection of TCP Initial Sequence
+  Numbers (ISNs) will be replaced with that of OpenBSD.  Linux uses
+  an MD4 hash based on the connection plus a time value to create the
+  ISN, while OpenBSD's selection is random.  If the sysctl option is 
+  enabled, a sysctl option with name "rand_isns" is created.
+
+Randomized IP IDs
+CONFIG_GRKERNSEC_RANDID
+  If you say Y here, all the id field on all outgoing packets
+  will be randomized.  This hinders os fingerprinters and
+  keeps your machine from being used as a bounce for an untraceable
+  portscan.  Ids are used for fragmented packets, fragments belonging
+  to the same packet have the same id.  By default linux only
+  increments the id value on each packet sent to an individual host.
+  We use a port of the OpenBSD random ip id code to achieve the
+  randomness, while keeping the possibility of id duplicates to
+  near none.  If the sysctl option is enabled, a sysctl option with name
+  "rand_ip_ids" is created.
+
+Randomized TCP source ports
+CONFIG_GRKERNSEC_RANDSRC
+  If you say Y here, situations where a source port is generated on the
+  fly for the TCP protocol (ie. with connect() ) will be altered so that
+  the source port is generated at random, instead of a simple incrementing
+  algorithm.  If the sysctl option is enabled, a sysctl option with name
+  "rand_tcp_src_ports" is created.
+
+Randomized RPC XIDs
+CONFIG_GRKERNSEC_RANDRPC
+  If you say Y here, the method of determining XIDs for RPC requests will
+  be randomized, instead of using linux's default behavior of simply
+  incrementing the XID.  If you want your RPC connections to be more
+  secure, say Y here.  If the sysctl option is enabled, a sysctl option 
+  with name "rand_rpc" is created.
+
+Socket restrictions
+CONFIG_GRKERNSEC_SOCKET
+  If you say Y here, you will be able to choose from several options.
+  If you assign a GID on your system and add it to the supplementary
+  groups of users you want to restrict socket access to, this patch
+  will perform up to three things, based on the option(s) you choose.
+
+Deny all socket access
+CONFIG_GRKERNSEC_SOCKET_ALL
+  If you say Y here, you will be able to choose a GID of whose users will
+  be unable to connect to other hosts from your machine or run server
+  applications from your machine.  If the sysctl option is enabled, a
+  sysctl option with name "socket_all" is created.
+
+Group for disabled socket access
+CONFIG_GRKERNSEC_SOCKET_ALL_GID
+  Here you can choose the GID to disable socket access for. Remember to 
+  add the users you want socket access disabled for to the GID 
+  specified here.  If the sysctl option is enabled, whatever you choose
+  here won't matter. You'll have to specify the GID in your bootup 
+  script by echoing the GID to the proper /proc entry.  View the help
+  on the sysctl option for more information.  If the sysctl option is
+  enabled, a sysctl option with name "socket_all_gid" is created.
+
+Deny all client socket access
+CONFIG_GRKERNSEC_SOCKET_CLIENT
+  If you say Y here, you will be able to choose a GID of whose users will
+  be unable to connect to other hosts from your machine, but will be
+  able to run servers.  If this option is enabled, all users in the group
+  you specify will have to use passive mode when initiating ftp transfers
+  from the shell on your machine.  If the sysctl option is enabled, a
+  sysctl option with name "socket_client" is created.
+
+Group for disabled client socket access
+CONFIG_GRKERNSEC_SOCKET_CLIENT_GID
+  Here you can choose the GID to disable client socket access for. 
+  Remember to add the users you want client socket access disabled for to 
+  the GID specified here.  If the sysctl option is enabled, whatever you 
+  choose here won't matter. You'll have to specify the GID in your bootup 
+  script by echoing the GID to the proper /proc entry.  View the help
+  on the sysctl option for more information.  If the sysctl option is
+  enabled, a sysctl option with name "socket_client_gid" is created.
+
+Deny all server socket access
+CONFIG_GRKERNSEC_SOCKET_SERVER
+  If you say Y here, you will be able to choose a GID of whose users will
+  be unable to run server applications from your machine.  If the sysctl 
+  option is enabled, a sysctl option with name "socket_server" is created.
+
+Group for disabled server socket access
+CONFIG_GRKERNSEC_SOCKET_SERVER_GID
+  Here you can choose the GID to disable server socket access for. 
+  Remember to add the users you want server socket access disabled for to 
+  the GID specified here.  If the sysctl option is enabled, whatever you 
+  choose here won't matter. You'll have to specify the GID in your bootup 
+  script by echoing the GID to the proper /proc entry.  View the help
+  on the sysctl option for more information.  If the sysctl option is
+  enabled, a sysctl option with name "socket_server_gid" is created.
+
+Sysctl support
+CONFIG_GRKERNSEC_SYSCTL
+  If you say Y here, you will be able to change the options that
+  grsecurity runs with at bootup, without having to recompile your
+  kernel.  You can echo values to files in /proc/sys/kernel/grsecurity
+  to enable (1) or disable (0) various features.  All the sysctl entries
+  are mutable until the "grsec_lock" entry is set to a non-zero value.
+  All features are disabled by default. Please note that this option could 
+  reduce the effectiveness of the added security of this patch if an ACL 
+  system is not put in place.  Your init scripts should be read-only, and 
+  root should not have access to adding modules or performing raw i/o 
+  operations.  All options should be set at startup, and the grsec_lock 
+  entry should be set to a non-zero value after all the options are set.  
+  *THIS IS EXTREMELY IMPORTANT*
+
+Number of burst messages
+CONFIG_GRKERNSEC_FLOODBURST
+  This option allows you to choose the maximum number of messages allowed
+  within the flood time interval you chose in a separate option.  The 
+  default should be suitable for most people, however if you find that 
+  many of your logs are being interpreted as flooding, you may want to 
+  raise this value.
+
+Seconds in between log messages
+CONFIG_GRKERNSEC_FLOODTIME
+  This option allows you to enforce the number of seconds between
+  grsecurity log messages.  The default should be suitable for most 
+  people, however, if you choose to change it, choose a value small enough
+  to allow informative logs to be produced, but large enough to
+  prevent flooding.
+
+Hide kernel processes
+CONFIG_GRKERNSEC_ACL_HIDEKERN
+  If you say Y here, when the ACL system is enabled via gradm -E,
+  an additional ACL will be passed to the kernel that hides all kernel
+  processes.  These processes will only be viewable by the authenticated
+  admin, or processes that have viewing access set.
+
+Maximum tries before password lockout
+CONFIG_GRKERNSEC_ACL_MAXTRIES
+  This option enforces the maximum number of times a user can attempt
+  to authorize themselves with the grsecurity ACL system before being
+  denied the ability to attempt authorization again for a specified time.  
+  The lower the number, the harder it will be to brute-force a password.
+
+Time to wait after max password tries, in seconds
+CONFIG_GRKERNSEC_ACL_TIMEOUT
+  This option specifies the time the user must wait after attempting to 
+  authorize to the ACL system with the maximum number of invalid 
+  passwords.  The higher the number, the harder it will be to brute-force 
+  a password.
+
 Disable data cache
 CONFIG_DCACHE_DISABLE
   This option allows you to run the kernel with data cache disabled.
diff -urN --exclude-from=diff-exclude linux-2.4.26/Documentation/preempt-locking.txt linux-2.4.26-leo/Documentation/preempt-locking.txt
--- linux-2.4.26/Documentation/preempt-locking.txt	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/Documentation/preempt-locking.txt	2004-05-26 23:34:34.000000000 +0100
@@ -0,0 +1,104 @@
+		  Proper Locking Under a Preemptible Kernel:
+		       Keeping Kernel Code Preempt-Safe
+			  Robert Love <rml@tech9.net>
+			   Last Updated: 22 Jan 2002
+
+
+INTRODUCTION
+
+
+A preemptible kernel creates new locking issues.  The issues are the same as
+those under SMP: concurrency and reentrancy.  Thankfully, the Linux preemptible
+kernel model leverages existing SMP locking mechanisms.  Thus, the kernel
+requires explicit additional locking for very few additional situations.
+
+This document is for all kernel hackers.  Developing code in the kernel
+requires protecting these situations.
+ 
+
+RULE #1: Per-CPU data structures need explicit protection
+
+
+Two similar problems arise. An example code snippet:
+
+	struct this_needs_locking tux[NR_CPUS];
+	tux[smp_processor_id()] = some_value;
+	/* task is preempted here... */
+	something = tux[smp_processor_id()];
+
+First, since the data is per-CPU, it may not have explicit SMP locking, but
+require it otherwise.  Second, when a preempted task is finally rescheduled,
+the previous value of smp_processor_id may not equal the current.  You must
+protect these situations by disabling preemption around them.
+
+
+RULE #2: CPU state must be protected.
+
+
+Under preemption, the state of the CPU must be protected.  This is arch-
+dependent, but includes CPU structures and state not preserved over a context
+switch.  For example, on x86, entering and exiting FPU mode is now a critical
+section that must occur while preemption is disabled.  Think what would happen
+if the kernel is executing a floating-point instruction and is then preempted.
+Remember, the kernel does not save FPU state except for user tasks.  Therefore,
+upon preemption, the FPU registers will be sold to the lowest bidder.  Thus,
+preemption must be disabled around such regions.
+
+Note, some FPU functions are already explicitly preempt safe.  For example,
+kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
+However, math_state_restore must be called with preemption disabled.
+
+
+RULE #3: Lock acquire and release must be performed by same task
+
+
+A lock acquired in one task must be released by the same task.  This
+means you can't do oddball things like acquire a lock and go off to
+play while another task releases it.  If you want to do something
+like this, acquire and release the task in the same code path and
+have the caller wait on an event by the other task.
+
+
+SOLUTION
+
+
+Data protection under preemption is achieved by disabling preemption for the
+duration of the critical region.
+
+preempt_enable()		decrement the preempt counter
+preempt_disable()		increment the preempt counter
+preempt_enable_no_resched()	decrement, but do not immediately preempt
+preempt_get_count()		return the preempt counter
+
+The functions are nestable.  In other words, you can call preempt_disable
+n-times in a code path, and preemption will not be reenabled until the n-th
+call to preempt_enable.  The preempt statements define to nothing if
+preemption is not enabled.
+
+Note that you do not need to explicitly prevent preemption if you are holding
+any locks or interrupts are disabled, since preemption is implicitly disabled
+in those cases.
+
+Example:
+
+	cpucache_t *cc; /* this is per-CPU */
+	preempt_disable();
+	cc = cc_data(searchp);
+	if (cc && cc->avail) {
+		__free_block(searchp, cc_entry(cc), cc->avail);
+		cc->avail = 0;
+	}
+	preempt_enable();
+	return 0;
+
+Notice how the preemption statements must encompass every reference of the
+critical variables.  Another example:
+
+	int buf[NR_CPUS];
+	set_cpu_val(buf);
+	if (buf[smp_processor_id()] == -1) printf(KERN_INFO "wee!\n");
+	spin_lock(&buf_lock);
+	/* ... */
+
+This code is not preempt-safe, but see how easily we can fix it by simply
+moving the spin_lock up two lines.
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/block/Config.in linux-2.4.26-leo/drivers/block/Config.in
--- linux-2.4.26/drivers/block/Config.in	2003-11-28 18:26:19.000000000 +0000
+++ linux-2.4.26-leo/drivers/block/Config.in	2004-05-26 23:34:14.000000000 +0100
@@ -41,6 +41,7 @@
 dep_tristate 'Micro Memory MM5415 Battery Backed RAM support (EXPERIMENTAL)' CONFIG_BLK_DEV_UMEM $CONFIG_PCI $CONFIG_EXPERIMENTAL
 
 tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
+dep_tristate '  Cryptoloop support' CONFIG_BLK_DEV_CRYPTOLOOP $CONFIG_BLK_DEV_LOOP
 dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
 
 tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/block/cryptoloop.c linux-2.4.26-leo/drivers/block/cryptoloop.c
--- linux-2.4.26/drivers/block/cryptoloop.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/block/cryptoloop.c	2004-05-26 23:34:14.000000000 +0100
@@ -0,0 +1,179 @@
+/*
+   Linux loop encryption enabling module
+
+   Copyright (C)  2002 Herbert Valerio Riedel <hvr@gnu.org>
+   Copyright (C)  2003 Fruhwirth Clemens <clemens@endorphin.org>
+
+   This module is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This module is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this module; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/module.h>
+
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <linux/blkdev.h>
+#include <linux/loop.h>
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+#include <asm/scatterlist.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI");
+MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>");
+
+static int
+cryptoloop_init(struct loop_device *lo, /* const */ struct loop_info *info)
+{
+	int err = -EINVAL;
+	char cms[LO_NAME_SIZE];			/* cipher-mode string */
+	char *cipher;
+	char *mode;
+	char *cmsp = cms;			/* c-m string pointer */
+	struct crypto_tfm *tfm = NULL;
+
+	/* encryption breaks for non sector aligned offsets */
+
+	if (info->lo_offset % LOOP_IV_SECTOR_SIZE)
+		goto out;
+
+	strncpy(cms, info->lo_name, LO_NAME_SIZE);
+	cms[LO_NAME_SIZE - 1] = 0;
+	cipher = strsep(&cmsp, "-");
+	mode = strsep(&cmsp, "-");
+
+	if (mode == NULL || strcmp(mode, "cbc") == 0)
+		tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC);
+	else if (strcmp(mode, "ecb") == 0)
+		tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB);
+	if (tfm == NULL)
+		return -EINVAL;
+
+	err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key,
+					   info->lo_encrypt_key_size);
+	
+	if (err != 0)
+		goto out_free_tfm;
+
+	lo->key_data = tfm;
+	return 0;
+
+ out_free_tfm:
+	crypto_free_tfm(tfm);
+
+ out:
+	return err;
+}
+
+typedef int (*encdec_t)(struct crypto_tfm *tfm,
+			struct scatterlist *sg_out,
+			struct scatterlist *sg_in,
+			unsigned int nsg, u8 *iv);
+
+static int
+cryptoloop_transfer(struct loop_device *lo, int cmd, char *raw_buf,
+		     char *loop_buf, int size, sector_t IV)
+{
+	struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
+	struct scatterlist sg_out = { 0, };
+	struct scatterlist sg_in = { 0, };
+
+	encdec_t encdecfunc;
+	char const *in;
+	char *out;
+
+	if (cmd == READ) {
+		in = raw_buf;
+		out = loop_buf;
+		encdecfunc = tfm->crt_u.cipher.cit_decrypt_iv;
+	} else {
+		in = loop_buf;
+		out = raw_buf;
+		encdecfunc = tfm->crt_u.cipher.cit_encrypt_iv;
+	}
+
+	while (size > 0) {
+		const int sz = min(size, LOOP_IV_SECTOR_SIZE);
+		u32 iv[4] = { 0, };
+		iv[0] = cpu_to_le32(IV & 0xffffffff);
+
+		sg_in.page = virt_to_page(in);
+		sg_in.offset = (unsigned long)in & ~PAGE_MASK;
+		sg_in.length = sz;
+
+		sg_out.page = virt_to_page(out);
+		sg_out.offset = (unsigned long)out & ~PAGE_MASK;
+		sg_out.length = sz;
+
+		encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv);
+
+		IV++;
+		size -= sz;
+		in += sz;
+		out += sz;
+	}
+
+	return 0;
+}
+
+
+static int
+cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
+{
+	return -EINVAL;
+}
+
+static int
+cryptoloop_release(struct loop_device *lo)
+{
+	struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
+	if (tfm != NULL) {
+		crypto_free_tfm(tfm);
+		lo->key_data = NULL;
+		return 0;
+	}
+	printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n");
+	return -EINVAL;
+}
+
+static struct loop_func_table cryptoloop_funcs = {
+	.number = LO_CRYPT_CRYPTOAPI,
+	.init = cryptoloop_init,
+	.ioctl = cryptoloop_ioctl,
+	.transfer = cryptoloop_transfer,
+	.release = cryptoloop_release,
+	/* .owner = THIS_MODULE */
+};
+
+static int __init
+init_cryptoloop(void)
+{
+	int rc = loop_register_transfer(&cryptoloop_funcs);
+
+	if (rc)
+		printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
+	return rc;
+}
+
+static void __exit
+cleanup_cryptoloop(void)
+{
+	if (loop_unregister_transfer(LO_CRYPT_CRYPTOAPI))
+		printk(KERN_ERR
+			"cryptoloop: loop_unregister_transfer failed\n");
+}
+
+module_init(init_cryptoloop);
+module_exit(cleanup_cryptoloop);
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/block/loop.c linux-2.4.26-leo/drivers/block/loop.c
--- linux-2.4.26/drivers/block/loop.c	2003-08-25 12:44:41.000000000 +0100
+++ linux-2.4.26-leo/drivers/block/loop.c	2004-05-26 23:34:14.000000000 +0100
@@ -88,7 +88,7 @@
  * Transfer functions
  */
 static int transfer_none(struct loop_device *lo, int cmd, char *raw_buf,
-			 char *loop_buf, int size, int real_block)
+			 char *loop_buf, int size, sector_t IV)
 {
 	if (raw_buf != loop_buf) {
 		if (cmd == READ)
@@ -101,7 +101,7 @@
 }
 
 static int transfer_xor(struct loop_device *lo, int cmd, char *raw_buf,
-			char *loop_buf, int size, int real_block)
+			char *loop_buf, int size, sector_t IV)
 {
 	char	*in, *out, *key;
 	int	i, keysize;
@@ -189,7 +189,7 @@
 	len = bh->b_size;
 	data = bh->b_data;
 	while (len > 0) {
-		int IV = index * (PAGE_CACHE_SIZE/bsize) + offset/bsize;
+		const sector_t IV = (index << (PAGE_CACHE_SHIFT - LOOP_IV_SECTOR_BITS)) + (offset >> LOOP_IV_SECTOR_BITS);
 		int transfer_result;
 
 		size = PAGE_CACHE_SIZE - offset;
@@ -249,7 +249,7 @@
 	unsigned long count = desc->count;
 	struct lo_read_data *p = (struct lo_read_data*)desc->buf;
 	struct loop_device *lo = p->lo;
-	int IV = page->index * (PAGE_CACHE_SIZE/p->bsize) + offset/p->bsize;
+	const sector_t IV = (page->index << (PAGE_CACHE_SHIFT - LOOP_IV_SECTOR_BITS)) + (offset >> LOOP_IV_SECTOR_BITS);
 
 	if (size > count)
 		size = count;
@@ -301,20 +301,6 @@
 	return bs;
 }
 
-static inline unsigned long loop_get_iv(struct loop_device *lo,
-					unsigned long sector)
-{
-	int bs = loop_get_bs(lo);
-	unsigned long offset, IV;
-
-	IV = sector / (bs >> 9) + lo->lo_offset / bs;
-	offset = ((sector % (bs >> 9)) << 9) + lo->lo_offset % bs;
-	if (offset >= bs)
-		IV++;
-
-	return IV;
-}
-
 static int do_bh_filebacked(struct loop_device *lo, struct buffer_head *bh, int rw)
 {
 	loff_t pos;
@@ -462,7 +448,7 @@
 {
 	struct buffer_head *bh = NULL;
 	struct loop_device *lo;
-	unsigned long IV;
+	sector_t IV;
 
 	if (!buffer_locked(rbh))
 		BUG();
@@ -507,7 +493,7 @@
 	 * piggy old buffer on original, and submit for I/O
 	 */
 	bh = loop_get_buffer(lo, rbh);
-	IV = loop_get_iv(lo, rbh->b_rsector);
+	IV = rbh->b_rsector + (lo->lo_offset >> LOOP_IV_SECTOR_BITS);
 	if (rw == WRITE) {
 		set_bit(BH_Dirty, &bh->b_state);
 		if (lo_do_transfer(lo, WRITE, bh->b_data, rbh->b_data,
@@ -544,7 +530,7 @@
 		bh->b_end_io(bh, !ret);
 	} else {
 		struct buffer_head *rbh = bh->b_private;
-		unsigned long IV = loop_get_iv(lo, rbh->b_rsector);
+		const sector_t IV = rbh->b_rsector + (lo->lo_offset >> LOOP_IV_SECTOR_BITS);
 
 		ret = lo_do_transfer(lo, READ, bh->b_data, rbh->b_data,
 				     bh->b_size, IV);
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/block/Makefile linux-2.4.26-leo/drivers/block/Makefile
--- linux-2.4.26/drivers/block/Makefile	2003-06-13 15:51:32.000000000 +0100
+++ linux-2.4.26-leo/drivers/block/Makefile	2004-05-26 23:34:14.000000000 +0100
@@ -31,6 +31,7 @@
 obj-$(CONFIG_BLK_DEV_DAC960)	+= DAC960.o
 obj-$(CONFIG_BLK_DEV_UMEM)	+= umem.o
 obj-$(CONFIG_BLK_DEV_NBD)	+= nbd.o
+obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o
 
 subdir-$(CONFIG_PARIDE) += paride
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/char/keyboard.c linux-2.4.26-leo/drivers/char/keyboard.c
--- linux-2.4.26/drivers/char/keyboard.c	2003-11-28 18:26:20.000000000 +0000
+++ linux-2.4.26-leo/drivers/char/keyboard.c	2004-06-22 20:53:46.000000000 +0100
@@ -545,6 +545,16 @@
 	if ((kbd->kbdmode == VC_RAW || kbd->kbdmode == VC_MEDIUMRAW) &&
 	    !(SPECIALS_ALLOWED_IN_RAW_MODE & (1 << value)))
 		return;
+
+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+	{
+		void *func = spec_fn_table[value];
+		if (func == show_state || func == show_ptregs ||
+		    func == show_mem)
+			return;
+	}
+#endif
+
 	spec_fn_table[value]();
 }
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/char/mem.c linux-2.4.26-leo/drivers/char/mem.c
--- linux-2.4.26/drivers/char/mem.c	2003-11-28 18:26:20.000000000 +0000
+++ linux-2.4.26-leo/drivers/char/mem.c	2004-06-22 20:59:00.000000000 +0100
@@ -22,6 +22,7 @@
 #include <linux/tty.h>
 #include <linux/capability.h>
 #include <linux/ptrace.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -42,6 +43,10 @@
 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
 extern void tapechar_init(void);
 #endif
+
+#ifdef CONFIG_GRKERNSEC
+extern struct file_operations grsec_fops;
+#endif
      
 static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp,
 			    const char * buf, size_t count, loff_t *ppos)
@@ -115,6 +120,11 @@
 	unsigned long p = *ppos;
 	unsigned long end_mem;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+	gr_handle_mem_write();
+	return -EPERM;
+#endif
+
 	end_mem = __pa(high_memory);
 	if (p >= end_mem)
 		return 0;
@@ -187,6 +197,12 @@
 {
 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+	if (gr_handle_mem_mmap(offset, vma))
+		return -EPERM;
+#endif
+
+
 	/*
 	 * Accessing memory above the top the kernel knows about or
 	 * through a file pointer that was marked O_SYNC will be
@@ -286,6 +302,11 @@
 	ssize_t virtr = 0;
 	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+	gr_handle_kmem_write();
+	return -EPERM;
+#endif
+
 	if (p < (unsigned long) high_memory) {
 		wrote = count;
 		if (count > (unsigned long) high_memory - p)
@@ -402,7 +423,23 @@
 			count = size;
 
 		zap_page_range(mm, addr, count);
-        	zeromap_page_range(addr, count, PAGE_COPY);
+	        zeromap_page_range(addr, count, vma->vm_page_prot); 
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+		if (vma->vm_flags & VM_MIRROR) {
+			unsigned long addr_m;
+			struct vm_area_struct * vma_m;
+
+			addr_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+			vma_m = find_vma(mm, addr_m);
+			if (vma_m && vma_m->vm_start == addr_m && (vma_m->vm_flags & VM_MIRROR)) {
+				addr_m = addr + (unsigned long)vma->vm_private_data;
+				zap_page_range(mm, addr_m, count);
+			} else
+				printk(KERN_ERR "PAX: VMMIRROR: read_zero bug, %08lx, %08lx\n",
+				       addr, vma->vm_start);
+		}
+#endif
 
 		size -= count;
 		buf += count;
@@ -525,6 +562,15 @@
 
 static int open_port(struct inode * inode, struct file * filp)
 {
+#ifdef CONFIG_GRKERNSEC_PORT
+	gr_handle_open_port();
+	return -EPERM;
+#endif
+	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+
+static int open_mem(struct inode * inode, struct file * filp)
+{
 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
 }
 
@@ -582,6 +628,11 @@
 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
 	unsigned long size = vma->vm_end - vma->vm_start;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+	if (gr_handle_mem_mmap(offset, vma))
+		return -EPERM;
+#endif
+
 	/*
 	 * If the user is not attempting to mmap a high memory address then
 	 * the standard mmap_mem mechanism will work.  High memory addresses
@@ -617,7 +668,6 @@
 #define full_lseek      null_lseek
 #define write_zero	write_null
 #define read_full       read_zero
-#define open_mem	open_port
 #define open_kmem	open_mem
 
 static struct file_operations mem_fops = {
@@ -693,6 +743,11 @@
 		case 9:
 			filp->f_op = &urandom_fops;
 			break;
+#ifdef CONFIG_GRKERNSEC
+		case 10:
+			filp->f_op = &grsec_fops;
+			break;
+#endif
 		default:
 			return -ENXIO;
 	}
@@ -719,7 +774,10 @@
 	{5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
 	{7, "full",    S_IRUGO | S_IWUGO,           &full_fops},
 	{8, "random",  S_IRUGO | S_IWUSR,           &random_fops},
-	{9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops}
+	{9, "urandom", S_IRUGO | S_IWUSR,           &urandom_fops},
+#ifdef CONFIG_GRKERNSEC
+	{10,"grsec",   S_IRUSR | S_IWUGO,	    &grsec_fops}
+#endif
     };
     int i;
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/char/random.c linux-2.4.26-leo/drivers/char/random.c
--- linux-2.4.26/drivers/char/random.c	2004-02-20 14:11:41.000000000 +0000
+++ linux-2.4.26-leo/drivers/char/random.c	2004-06-22 20:53:46.000000000 +0100
@@ -262,9 +262,15 @@
 /*
  * Configuration information
  */
+#ifdef CONFIG_GRKERNSEC_RANDNET
+#define DEFAULT_POOL_SIZE 1024
+#define SECONDARY_POOL_SIZE 256
+#define BATCH_ENTROPY_SIZE 512
+#else
 #define DEFAULT_POOL_SIZE 512
 #define SECONDARY_POOL_SIZE 128
 #define BATCH_ENTROPY_SIZE 256
+#endif
 #define USE_SHA
 
 /*
@@ -389,6 +395,7 @@
 /*
  * Static global variables
  */
+
 static struct entropy_store *random_state; /* The default global store */
 static struct entropy_store *sec_random_state; /* secondary store */
 static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
@@ -2210,6 +2217,29 @@
 	return halfMD4Transform(hash, keyptr->secret);
 }
 
+#ifdef CONFIG_GRKERNSEC
+/* the following function is provided by PaX under the GPL */
+unsigned long get_random_long(void)
+{       
+	static time_t rekey_time;
+	static __u32 secret[12];
+	time_t t;
+
+	/*
+	 * Pick a random secret every REKEY_INTERVAL seconds
+	 */
+	t = CURRENT_TIME;
+	if (!rekey_time || (t - rekey_time) > REKEY_INTERVAL) {
+		rekey_time = t;
+		get_random_bytes(secret, sizeof(secret));
+	}
+
+	secret[1] = halfMD4Transform(secret+8, secret);
+	secret[0] = halfMD4Transform(secret+8, secret);
+	return *(unsigned long *)secret;
+}
+#endif
+
 #ifdef CONFIG_SYN_COOKIES
 /*
  * Secure SYN cookie computation. This is the algorithm worked out by
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/char/tty_io.c linux-2.4.26-leo/drivers/char/tty_io.c
--- linux-2.4.26/drivers/char/tty_io.c	2004-05-19 21:34:38.000000000 +0100
+++ linux-2.4.26-leo/drivers/char/tty_io.c	2004-06-22 20:53:46.000000000 +0100
@@ -1404,7 +1404,11 @@
 		retval = -ENODEV;
 	filp->f_flags = saved_flags;
 
+#ifdef CONFIG_GRKERNSEC
+	if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_TTY_CONFIG))
+#else
 	if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) && !suser())
+#endif
 		retval = -EBUSY;
 
 	if (retval) {
@@ -1506,7 +1510,11 @@
 {
 	char ch, mbz = 0;
 
+#ifdef CONFIG_GRKERNSEC
+	if ((current->tty != tty) && !capable(CAP_SYS_TTY_CONFIG))
+#else
 	if ((current->tty != tty) && !suser())
+#endif
 		return -EPERM;
 	if (get_user(ch, arg))
 		return -EFAULT;
@@ -1544,7 +1552,11 @@
 	if (inode->i_rdev == SYSCONS_DEV ||
 	    inode->i_rdev == CONSOLE_DEV) {
 		struct file *f;
+#ifdef CONFIG_GRKERNSEC
+		if (!capable(CAP_SYS_TTY_CONFIG))
+#else
 		if (!suser())
+#endif
 			return -EPERM;
 		spin_lock(&redirect_lock);
 		f = redirect;
@@ -1596,7 +1608,11 @@
 		 * This tty is already the controlling
 		 * tty for another session group!
 		 */
+#ifdef CONFIG_GRKERNSEC
+		if ((arg == 1) && capable(CAP_SYS_ADMIN)) {
+#else
 		if ((arg == 1) && suser()) {
+#endif
 			/*
 			 * Steal it away
 			 */
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/char/vt.c linux-2.4.26-leo/drivers/char/vt.c
--- linux-2.4.26/drivers/char/vt.c	2002-11-28 23:53:12.000000000 +0000
+++ linux-2.4.26-leo/drivers/char/vt.c	2004-06-22 20:53:46.000000000 +0100
@@ -179,6 +179,11 @@
 	case KDSKBENT:
 		if (!perm)
 			return -EPERM;
+#ifdef CONFIG_GRKERNSEC
+		if (!capable(CAP_SYS_TTY_CONFIG))
+			return -EPERM;
+#endif
+
 		if (!i && v == K_NOSUCHMAP) {
 			/* disallocate map */
 			key_map = key_maps[s];
@@ -301,6 +306,11 @@
 		if (!perm)
 			return -EPERM;
 
+#ifdef CONFIG_GRKERNSEC
+		if (!capable(CAP_SYS_TTY_CONFIG))
+			return -EPERM;
+#endif
+
 		q = func_table[i];
 		first_free = funcbufptr + (funcbufsize - funcbufleft);
 		for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++) 
@@ -443,7 +453,11 @@
 	 * to be the owner of the tty, or super-user.
 	 */
 	perm = 0;
+#ifdef CONFIG_GRKERNSEC
+	if (current->tty == tty || capable(CAP_SYS_TTY_CONFIG))
+#else
 	if (current->tty == tty || suser())
+#endif
 		perm = 1;
  
 	kbd = kbd_table + console;
@@ -1038,12 +1052,20 @@
 		return do_unimap_ioctl(cmd, (struct unimapdesc *)arg, perm);
 
 	case VT_LOCKSWITCH:
+#ifdef CONFIG_GRKERNSEC
+		if (!capable(CAP_SYS_TTY_CONFIG))
+#else
 		if (!suser())
+#endif
 		   return -EPERM;
 		vt_dont_switch = 1;
 		return 0;
 	case VT_UNLOCKSWITCH:
+#ifdef CONFIG_GRKERNSEC
+		if (!capable(CAP_SYS_TTY_CONFIG))
+#else
 		if (!suser())
+#endif
 		   return -EPERM;
 		vt_dont_switch = 0;
 		return 0;
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/ieee1394/csr.c linux-2.4.26-leo/drivers/ieee1394/csr.c
--- linux-2.4.26/drivers/ieee1394/csr.c	2004-02-20 14:11:42.000000000 +0000
+++ linux-2.4.26-leo/drivers/ieee1394/csr.c	2004-05-26 23:34:34.000000000 +0100
@@ -18,6 +18,7 @@
  */
 
 #include <linux/string.h>
+#include <linux/sched.h>
 #include <linux/module.h> /* needed for MODULE_PARM */
 #include <linux/param.h>
 #include <linux/spinlock.h>
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/Config.in linux-2.4.26-leo/drivers/md/Config.in
--- linux-2.4.26/drivers/md/Config.in	2001-09-14 22:22:18.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/Config.in	2004-05-26 23:34:27.000000000 +0100
@@ -12,7 +12,11 @@
 dep_tristate '  RAID-1 (mirroring) mode' CONFIG_MD_RAID1 $CONFIG_BLK_DEV_MD
 dep_tristate '  RAID-4/RAID-5 mode' CONFIG_MD_RAID5 $CONFIG_BLK_DEV_MD
 dep_tristate '  Multipath I/O support' CONFIG_MD_MULTIPATH $CONFIG_BLK_DEV_MD
+dep_bool     '  Verbose startup messages' CONFIG_MD_VERBMSG $CONFIG_BLK_DEV_MD
+
 
 dep_tristate ' Logical volume manager (LVM) support' CONFIG_BLK_DEV_LVM $CONFIG_MD
+dep_tristate ' Device-mapper support' CONFIG_BLK_DEV_DM $CONFIG_MD
+dep_tristate '  Mirror (RAID-1) support' CONFIG_BLK_DEV_DM_MIRROR $CONFIG_BLK_DEV_DM
 
 endmenu
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm.c linux-2.4.26-leo/drivers/md/dm.c
--- linux-2.4.26/drivers/md/dm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,1115 @@
+/*
+ * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+#include "kcopyd.h"
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/blk.h>
+#include <linux/blkpg.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+#include <linux/kdev_t.h>
+#include <linux/lvm.h>
+
+#include <asm/uaccess.h>
+
+static const char *_name = DM_NAME;
+#define DEFAULT_READ_AHEAD 64
+
+struct dm_io {
+	struct mapped_device *md;
+
+	struct dm_target *ti;
+	int rw;
+	union map_info map_context;
+	void (*end_io) (struct buffer_head * bh, int uptodate);
+	void *context;
+};
+
+struct deferred_io {
+	int rw;
+	struct buffer_head *bh;
+	struct deferred_io *next;
+};
+
+/*
+ * Bits for the md->flags field.
+ */
+#define DMF_BLOCK_IO 0
+#define DMF_SUSPENDED 1
+
+struct mapped_device {
+	struct rw_semaphore lock;
+	atomic_t holders;
+
+	kdev_t dev;
+	unsigned long flags;
+
+	/*
+	 * A list of ios that arrived while we were suspended.
+	 */
+	atomic_t pending;
+	wait_queue_head_t wait;
+	struct deferred_io *deferred;
+
+	/*
+	 * The current mapping.
+	 */
+	struct dm_table *map;
+
+	/*
+	 * io objects are allocated from here.
+	 */
+	mempool_t *io_pool;
+
+	/*
+	 * Event handling.
+	 */
+	uint32_t event_nr;
+	wait_queue_head_t eventq;
+};
+
+#define MIN_IOS 256
+static kmem_cache_t *_io_cache;
+
+static struct mapped_device *get_kdev(kdev_t dev);
+static int dm_request(request_queue_t *q, int rw, struct buffer_head *bh);
+static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb);
+
+/*-----------------------------------------------------------------
+ * In order to avoid the 256 minor number limit we are going to
+ * register more major numbers as neccessary.
+ *---------------------------------------------------------------*/
+#define MAX_MINORS (1 << MINORBITS)
+
+struct major_details {
+	unsigned int major;
+
+	int transient;
+	struct list_head transient_list;
+
+	unsigned int first_free_minor;
+	int nr_free_minors;
+
+	struct mapped_device *mds[MAX_MINORS];
+	int blk_size[MAX_MINORS];
+	int blksize_size[MAX_MINORS];
+	int hardsect_size[MAX_MINORS];
+};
+
+static struct rw_semaphore _dev_lock;
+static struct major_details *_majors[MAX_BLKDEV];
+
+/*
+ * This holds a list of majors that non-specified device numbers
+ * may be allocated from.  Only majors with free minors appear on
+ * this list.
+ */
+static LIST_HEAD(_transients_free);
+
+static int __alloc_major(unsigned int major, struct major_details **result)
+{
+	int r;
+	unsigned int transient = !major;
+	struct major_details *maj;
+
+	/* Major already allocated? */
+	if (major && _majors[major])
+		return 0;
+
+	maj = kmalloc(sizeof(*maj), GFP_KERNEL);
+	if (!maj)
+		return -ENOMEM;
+
+	memset(maj, 0, sizeof(*maj));
+	INIT_LIST_HEAD(&maj->transient_list);
+
+	maj->nr_free_minors = MAX_MINORS;
+
+	r = register_blkdev(major, _name, &dm_blk_dops);
+	if (r < 0) {
+		DMERR("register_blkdev failed for %d", major);
+		kfree(maj);
+		return r;
+	}
+	if (r > 0)
+		major = r;
+
+	maj->major = major;
+
+	if (transient) {
+		maj->transient = transient;
+		list_add_tail(&maj->transient_list, &_transients_free);
+	}
+
+	_majors[major] = maj;
+
+	blk_size[major] = maj->blk_size;
+	blksize_size[major] = maj->blksize_size;
+	hardsect_size[major] = maj->hardsect_size;
+	read_ahead[major] = DEFAULT_READ_AHEAD;
+
+	blk_queue_make_request(BLK_DEFAULT_QUEUE(major), dm_request);
+
+	*result = maj;
+	return 0;
+}
+
+static void __free_major(struct major_details *maj)
+{
+	unsigned int major = maj->major;
+
+	list_del(&maj->transient_list);
+
+	read_ahead[major] = 0;
+	blk_size[major] = NULL;
+	blksize_size[major] = NULL;
+	hardsect_size[major] = NULL;
+
+	_majors[major] = NULL;
+	kfree(maj);
+
+	if (unregister_blkdev(major, _name) < 0)
+		DMERR("devfs_unregister_blkdev failed");
+}
+
+static void free_all_majors(void)
+{
+	unsigned int major = ARRAY_SIZE(_majors);
+
+	down_write(&_dev_lock);
+
+	while (major--)
+		if (_majors[major])
+			__free_major(_majors[major]);
+
+	up_write(&_dev_lock);
+}
+
+static void free_dev(kdev_t dev)
+{
+	unsigned int major = major(dev);
+	unsigned int minor = minor(dev);
+	struct major_details *maj;
+
+	down_write(&_dev_lock);
+
+	maj = _majors[major];
+	if (!maj)
+		goto out;
+
+	maj->mds[minor] = NULL;
+	maj->nr_free_minors++;
+
+	if (maj->nr_free_minors == MAX_MINORS) {
+		__free_major(maj);
+		goto out;
+	}
+
+	if (!maj->transient)
+		goto out;
+
+	if (maj->nr_free_minors == 1)
+		list_add_tail(&maj->transient_list, &_transients_free);
+
+	if (minor < maj->first_free_minor)
+		maj->first_free_minor = minor;
+
+      out:
+	up_write(&_dev_lock);
+}
+
+static void __alloc_minor(struct major_details *maj, unsigned int minor,
+			  struct mapped_device *md)
+{
+	maj->mds[minor] = md;
+	md->dev = mk_kdev(maj->major, minor);
+	maj->nr_free_minors--;
+
+	if (maj->transient && !maj->nr_free_minors)
+		list_del_init(&maj->transient_list);
+}
+
+/*
+ * See if requested kdev_t is available.
+ */
+static int specific_dev(kdev_t dev, struct mapped_device *md)
+{
+	int r = 0;
+	unsigned int major = major(dev);
+	unsigned int minor = minor(dev);
+	struct major_details *maj;
+
+	if (!major || (major > MAX_BLKDEV) || (minor >= MAX_MINORS)) {
+		DMWARN("device number requested out of range (%d, %d)",
+		       major, minor);
+		return -EINVAL;
+	}
+
+	down_write(&_dev_lock);
+	maj = _majors[major];
+
+	/* Register requested major? */
+	if (!maj) {
+		r = __alloc_major(major, &maj);
+		if (r)
+			goto out;
+
+		major = maj->major;
+	}
+
+	if (maj->mds[minor]) {
+		r = -EBUSY;
+		goto out;
+	}
+
+	__alloc_minor(maj, minor, md);
+
+      out:
+	up_write(&_dev_lock);
+
+	return r;
+}
+
+/*
+ * Find first unused device number, requesting a new major number if required.
+ */
+static int first_free_dev(struct mapped_device *md)
+{
+	int r = 0;
+	struct major_details *maj;
+
+	down_write(&_dev_lock);
+
+	if (list_empty(&_transients_free)) {
+		r = __alloc_major(0, &maj);
+		if (r)
+			goto out;
+	} else
+		maj = list_entry(_transients_free.next, struct major_details,
+				 transient_list);
+
+	while (maj->mds[maj->first_free_minor++])
+		;
+
+	__alloc_minor(maj, maj->first_free_minor - 1, md);
+
+      out:
+	up_write(&_dev_lock);
+
+	return r;
+}
+
+static struct mapped_device *get_kdev(kdev_t dev)
+{
+	struct mapped_device *md;
+	struct major_details *maj;
+
+	down_read(&_dev_lock);
+	maj = _majors[major(dev)];
+	if (!maj) {
+		md = NULL;
+		goto out;
+	}
+	md = maj->mds[minor(dev)];
+	if (md)
+		dm_get(md);
+      out:
+	up_read(&_dev_lock);
+
+	return md;
+}
+
+/*-----------------------------------------------------------------
+ * init/exit code
+ *---------------------------------------------------------------*/
+
+static __init int local_init(void)
+{
+	init_rwsem(&_dev_lock);
+
+	/* allocate a slab for the dm_ios */
+	_io_cache = kmem_cache_create("dm io",
+				      sizeof(struct dm_io), 0, 0, NULL, NULL);
+
+	if (!_io_cache)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void local_exit(void)
+{
+	kmem_cache_destroy(_io_cache);
+	free_all_majors();
+
+	DMINFO("cleaned up");
+}
+
+/*
+ * We have a lot of init/exit functions, so it seems easier to
+ * store them in an array.  The disposable macro 'xx'
+ * expands a prefix into a pair of function names.
+ */
+static struct {
+	int (*init) (void);
+	void (*exit) (void);
+
+} _inits[] = {
+#define xx(n) {n ## _init, n ## _exit},
+	xx(local)
+	xx(kcopyd)
+	xx(dm_target)
+	xx(dm_linear)
+	xx(dm_stripe)
+	xx(dm_snapshot)
+	xx(dm_interface)
+#undef xx
+};
+
+static int __init dm_init(void)
+{
+	const int count = ARRAY_SIZE(_inits);
+
+	int r, i;
+
+	for (i = 0; i < count; i++) {
+		r = _inits[i].init();
+		if (r)
+			goto bad;
+	}
+
+	return 0;
+
+      bad:
+	while (i--)
+		_inits[i].exit();
+
+	return r;
+}
+
+static void __exit dm_exit(void)
+{
+	int i = ARRAY_SIZE(_inits);
+
+	while (i--)
+		_inits[i].exit();
+}
+
+/*
+ * Block device functions
+ */
+static int dm_blk_open(struct inode *inode, struct file *file)
+{
+	struct mapped_device *md;
+
+	md = get_kdev(inode->i_rdev);
+	if (!md)
+		return -ENXIO;
+
+	return 0;
+}
+
+static int dm_blk_close(struct inode *inode, struct file *file)
+{
+	struct mapped_device *md;
+
+	md = get_kdev(inode->i_rdev);
+	dm_put(md);		/* put the reference gained by dm_blk_open */
+	dm_put(md);
+	return 0;
+}
+
+static inline struct dm_io *alloc_io(struct mapped_device *md)
+{
+	return mempool_alloc(md->io_pool, GFP_NOIO);
+}
+
+static inline void free_io(struct mapped_device *md, struct dm_io *io)
+{
+	mempool_free(io, md->io_pool);
+}
+
+static inline struct deferred_io *alloc_deferred(void)
+{
+	return kmalloc(sizeof(struct deferred_io), GFP_NOIO);
+}
+
+static inline void free_deferred(struct deferred_io *di)
+{
+	kfree(di);
+}
+
+static inline sector_t volume_size(kdev_t dev)
+{
+	return blk_size[major(dev)][minor(dev)] << 1;
+}
+
+/* FIXME: check this */
+static int dm_blk_ioctl(struct inode *inode, struct file *file,
+			unsigned int command, unsigned long a)
+{
+	kdev_t dev = inode->i_rdev;
+	long size;
+
+	switch (command) {
+	case BLKROSET:
+	case BLKROGET:
+	case BLKRASET:
+	case BLKRAGET:
+	case BLKFLSBUF:
+	case BLKSSZGET:
+		//case BLKRRPART: /* Re-read partition tables */
+		//case BLKPG:
+	case BLKELVGET:
+	case BLKELVSET:
+	case BLKBSZGET:
+	case BLKBSZSET:
+		return blk_ioctl(dev, command, a);
+		break;
+
+	case BLKGETSIZE:
+		size = volume_size(dev);
+		if (copy_to_user((void *) a, &size, sizeof(long)))
+			return -EFAULT;
+		break;
+
+	case BLKGETSIZE64:
+		size = volume_size(dev);
+		if (put_user((u64) ((u64) size) << 9, (u64 *) a))
+			return -EFAULT;
+		break;
+
+	case BLKRRPART:
+		return -ENOTTY;
+
+	case LV_BMAP:
+		return dm_user_bmap(inode, (struct lv_bmap *) a);
+
+	default:
+		DMWARN("unknown block ioctl 0x%x", command);
+		return -ENOTTY;
+	}
+
+	return 0;
+}
+
+/*
+ * Add the buffer to the list of deferred io.
+ */
+static int queue_io(struct mapped_device *md, struct buffer_head *bh, int rw)
+{
+	struct deferred_io *di;
+
+	di = alloc_deferred();
+	if (!di)
+		return -ENOMEM;
+
+	down_write(&md->lock);
+
+	if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
+		up_write(&md->lock);
+		free_deferred(di);
+		return 1;
+	}
+
+	di->bh = bh;
+	di->rw = rw;
+	di->next = md->deferred;
+	md->deferred = di;
+
+	up_write(&md->lock);
+	return 0;		/* deferred successfully */
+}
+
+/*
+ * bh->b_end_io routine that decrements the pending count
+ * and then calls the original bh->b_end_io fn.
+ */
+static void dec_pending(struct buffer_head *bh, int uptodate)
+{
+	int r;
+	struct dm_io *io = bh->b_private;
+	dm_endio_fn endio = io->ti->type->end_io;
+
+	if (endio) {
+		r = endio(io->ti, bh, io->rw, uptodate ? 0 : -EIO,
+			  &io->map_context);
+		if (r < 0)
+			uptodate = 0;
+
+		else if (r > 0)
+			/* the target wants another shot at the io */
+			return;
+	}
+
+	if (atomic_dec_and_test(&io->md->pending))
+		/* nudge anyone waiting on suspend queue */
+		wake_up(&io->md->wait);
+
+	bh->b_end_io = io->end_io;
+	bh->b_private = io->context;
+	free_io(io->md, io);
+
+	bh->b_end_io(bh, uptodate);
+}
+
+/*
+ * Do the bh mapping for a given leaf
+ */
+static inline int __map_buffer(struct mapped_device *md, int rw,
+			       struct buffer_head *bh, struct dm_io *io)
+{
+	struct dm_target *ti;
+
+	if (!md->map)
+		return -EINVAL;
+
+	ti = dm_table_find_target(md->map, bh->b_rsector);
+	if (!ti->type)
+		return -EINVAL;
+
+	/* hook the end io request fn */
+	atomic_inc(&md->pending);
+	io->md = md;
+	io->ti = ti;
+	io->rw = rw;
+	io->end_io = bh->b_end_io;
+	io->context = bh->b_private;
+	bh->b_end_io = dec_pending;
+	bh->b_private = io;
+
+	return ti->type->map(ti, bh, rw, &io->map_context);
+}
+
+/*
+ * Checks to see if we should be deferring io, if so it queues it
+ * and returns 1.
+ */
+static inline int __deferring(struct mapped_device *md, int rw,
+			      struct buffer_head *bh)
+{
+	int r;
+
+	/*
+	 * If we're suspended we have to queue this io for later.
+	 */
+	while (test_bit(DMF_BLOCK_IO, &md->flags)) {
+		up_read(&md->lock);
+
+		/*
+		 * There's no point deferring a read ahead
+		 * request, just drop it.
+		 */
+		if (rw == READA) {
+			down_read(&md->lock);
+			return -EIO;
+		}
+
+		r = queue_io(md, bh, rw);
+		down_read(&md->lock);
+
+		if (r < 0)
+			return r;
+
+		if (r == 0)
+			return 1;	/* deferred successfully */
+
+	}
+
+	return 0;
+}
+
+static int dm_request(request_queue_t *q, int rw, struct buffer_head *bh)
+{
+	int r;
+	struct dm_io *io;
+	struct mapped_device *md;
+
+	md = get_kdev(bh->b_rdev);
+	if (!md) {
+		buffer_IO_error(bh);
+		return 0;
+	}
+
+	io = alloc_io(md);
+	down_read(&md->lock);
+
+	r = __deferring(md, rw, bh);
+	if (r < 0)
+		goto bad;
+
+	else if (!r) {
+		/* not deferring */
+		r = __map_buffer(md, rw, bh, io);
+		if (r < 0)
+			goto bad;
+	} else
+		r = 0;
+
+	up_read(&md->lock);
+	dm_put(md);
+	return r;
+
+      bad:
+	buffer_IO_error(bh);
+	up_read(&md->lock);
+	dm_put(md);
+	return 0;
+}
+
+static int check_dev_size(kdev_t dev, unsigned long block)
+{
+	unsigned int major = major(dev);
+	unsigned int minor = minor(dev);
+
+	/* FIXME: check this */
+	unsigned long max_sector = (blk_size[major][minor] << 1) + 1;
+	unsigned long sector = (block + 1) * (blksize_size[major][minor] >> 9);
+
+	return (sector > max_sector) ? 0 : 1;
+}
+
+/*
+ * Creates a dummy buffer head and maps it (for lilo).
+ */
+static int __bmap(struct mapped_device *md, kdev_t dev, unsigned long block,
+		  kdev_t *r_dev, unsigned long *r_block)
+{
+	struct buffer_head bh;
+	struct dm_target *ti;
+	union map_info map_context;
+	int r;
+
+	if (test_bit(DMF_BLOCK_IO, &md->flags)) {
+		return -EPERM;
+	}
+
+	if (!check_dev_size(dev, block)) {
+		return -EINVAL;
+	}
+
+	if (!md->map)
+		return -EINVAL;
+
+	/* setup dummy bh */
+	memset(&bh, 0, sizeof(bh));
+	bh.b_blocknr = block;
+	bh.b_dev = bh.b_rdev = dev;
+	bh.b_size = blksize_size[major(dev)][minor(dev)];
+	bh.b_rsector = block * (bh.b_size >> 9);
+
+	/* find target */
+	ti = dm_table_find_target(md->map, bh.b_rsector);
+
+	/* do the mapping */
+	r = ti->type->map(ti, &bh, READ, &map_context);
+	ti->type->end_io(ti, &bh, READ, 0, &map_context);
+
+	if (!r) {
+		*r_dev = bh.b_rdev;
+		*r_block = bh.b_rsector / (bh.b_size >> 9);
+	}
+
+	return r;
+}
+
+/*
+ * Marshals arguments and results between user and kernel space.
+ */
+static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb)
+{
+	struct mapped_device *md;
+	unsigned long block, r_block;
+	kdev_t r_dev;
+	int r;
+
+	if (get_user(block, &lvb->lv_block))
+		return -EFAULT;
+
+	md = get_kdev(inode->i_rdev);
+	if (!md)
+		return -ENXIO;
+
+	down_read(&md->lock);
+	r = __bmap(md, inode->i_rdev, block, &r_dev, &r_block);
+	up_read(&md->lock);
+	dm_put(md);
+
+	if (!r && (put_user(kdev_t_to_nr(r_dev), &lvb->lv_dev) ||
+		   put_user(r_block, &lvb->lv_block)))
+		r = -EFAULT;
+
+	return r;
+}
+
+static void free_md(struct mapped_device *md)
+{
+	free_dev(md->dev);
+	mempool_destroy(md->io_pool);
+	kfree(md);
+}
+
+/*
+ * Allocate and initialise a blank device with a given minor.
+ */
+static struct mapped_device *alloc_md(kdev_t dev)
+{
+	int r;
+	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
+
+	if (!md) {
+		DMWARN("unable to allocate device, out of memory.");
+		return NULL;
+	}
+
+	memset(md, 0, sizeof(*md));
+
+	/* Allocate suitable device number */
+	if (!dev)
+		r = first_free_dev(md);
+	else
+		r = specific_dev(dev, md);
+
+	if (r) {
+		kfree(md);
+		return NULL;
+	}
+
+	md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
+				     mempool_free_slab, _io_cache);
+	if (!md->io_pool) {
+		free_md(md);
+		kfree(md);
+		return NULL;
+	}
+
+	init_rwsem(&md->lock);
+	atomic_set(&md->holders, 1);
+	atomic_set(&md->pending, 0);
+	init_waitqueue_head(&md->wait);
+	init_waitqueue_head(&md->eventq);
+
+	return md;
+}
+
+/*
+ * The hardsect size for a mapped device is the largest hardsect size
+ * from the devices it maps onto.
+ */
+static int __find_hardsect_size(struct list_head *devices)
+{
+	int result = 512, size;
+	struct list_head *tmp;
+
+	list_for_each (tmp, devices) {
+		struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+		size = get_hardsect_size(dd->dev);
+		if (size > result)
+			result = size;
+	}
+
+	return result;
+}
+
+/*
+ * Bind a table to the device.
+ */
+static void event_callback(void *context)
+{
+	struct mapped_device *md = (struct mapped_device *) context;
+
+	down_write(&md->lock);
+	md->event_nr++;
+	wake_up_interruptible(&md->eventq);
+	up_write(&md->lock);
+}
+
+static int __bind(struct mapped_device *md, struct dm_table *t)
+{
+	unsigned int minor = minor(md->dev);
+	unsigned int major = major(md->dev);
+	md->map = t;
+
+	/* in k */
+	blk_size[major][minor] = dm_table_get_size(t) >> 1;
+	blksize_size[major][minor] = BLOCK_SIZE;
+	hardsect_size[major][minor] =
+	    __find_hardsect_size(dm_table_get_devices(t));
+	register_disk(NULL, md->dev, 1, &dm_blk_dops, blk_size[major][minor]);
+
+	dm_table_event_callback(md->map, event_callback, md);
+	dm_table_get(t);
+	return 0;
+}
+
+static void __unbind(struct mapped_device *md)
+{
+	unsigned int minor = minor(md->dev);
+	unsigned int major = major(md->dev);
+
+	if (md->map) {
+		dm_table_event_callback(md->map, NULL, NULL);
+		dm_table_put(md->map);
+		md->map = NULL;
+
+	}
+
+	blk_size[major][minor] = 0;
+	blksize_size[major][minor] = 0;
+	hardsect_size[major][minor] = 0;
+}
+
+/*
+ * Constructor for a new device.
+ */
+int dm_create(kdev_t dev, struct mapped_device **result)
+{
+	struct mapped_device *md;
+
+	md = alloc_md(dev);
+	if (!md)
+		return -ENXIO;
+
+	__unbind(md);	/* Ensure zero device size */
+
+	*result = md;
+	return 0;
+}
+
+void dm_get(struct mapped_device *md)
+{
+	atomic_inc(&md->holders);
+}
+
+void dm_put(struct mapped_device *md)
+{
+	if (atomic_dec_and_test(&md->holders)) {
+		if (md->map)
+			dm_table_suspend_targets(md->map);
+		__unbind(md);
+		free_md(md);
+	}
+}
+
+/*
+ * Requeue the deferred io by calling generic_make_request.
+ */
+static void flush_deferred_io(struct deferred_io *c)
+{
+	struct deferred_io *n;
+
+	while (c) {
+		n = c->next;
+		generic_make_request(c->rw, c->bh);
+		free_deferred(c);
+		c = n;
+	}
+}
+
+/*
+ * Swap in a new table (destroying old one).
+ */
+int dm_swap_table(struct mapped_device *md, struct dm_table *table)
+{
+	int r;
+
+	down_write(&md->lock);
+
+	/*
+	 * The device must be suspended, or have no table bound yet.
+	 */
+	if (md->map && !test_bit(DMF_SUSPENDED, &md->flags)) {
+		up_write(&md->lock);
+		return -EPERM;
+	}
+
+	__unbind(md);
+	r = __bind(md, table);
+	if (r)
+		return r;
+
+	up_write(&md->lock);
+	return 0;
+}
+
+/*
+ * We need to be able to change a mapping table under a mounted
+ * filesystem.  For example we might want to move some data in
+ * the background.  Before the table can be swapped with
+ * dm_bind_table, dm_suspend must be called to flush any in
+ * flight io and ensure that any further io gets deferred.
+ */
+int dm_suspend(struct mapped_device *md)
+{
+	int r = 0;
+	DECLARE_WAITQUEUE(wait, current);
+
+	down_write(&md->lock);
+
+	/*
+	 * First we set the BLOCK_IO flag so no more ios will be
+	 * mapped.
+	 */
+	if (test_bit(DMF_BLOCK_IO, &md->flags)) {
+		up_write(&md->lock);
+		return -EINVAL;
+	}
+
+	set_bit(DMF_BLOCK_IO, &md->flags);
+	add_wait_queue(&md->wait, &wait);
+	up_write(&md->lock);
+
+	/*
+	 * Then we wait for the already mapped ios to
+	 * complete.
+	 */
+	run_task_queue(&tq_disk);
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		if (!atomic_read(&md->pending) || signal_pending(current))
+			break;
+
+		schedule();
+	}
+	set_current_state(TASK_RUNNING);
+
+	down_write(&md->lock);
+	remove_wait_queue(&md->wait, &wait);
+
+	/* did we flush everything ? */
+	if (atomic_read(&md->pending)) {
+		clear_bit(DMF_BLOCK_IO, &md->flags);
+		r = -EINTR;
+	} else {
+		set_bit(DMF_SUSPENDED, &md->flags);
+		if (md->map)
+			dm_table_suspend_targets(md->map);
+	}
+	up_write(&md->lock);
+
+	return r;
+}
+
+int dm_resume(struct mapped_device *md)
+{
+	struct deferred_io *def;
+
+	down_write(&md->lock);
+	if (!test_bit(DMF_SUSPENDED, &md->flags)) {
+		up_write(&md->lock);
+		return -EINVAL;
+	}
+
+	if (md->map)
+		dm_table_resume_targets(md->map);
+
+	clear_bit(DMF_SUSPENDED, &md->flags);
+	clear_bit(DMF_BLOCK_IO, &md->flags);
+	def = md->deferred;
+	md->deferred = NULL;
+	up_write(&md->lock);
+
+	flush_deferred_io(def);
+	run_task_queue(&tq_disk);
+
+	return 0;
+}
+
+struct dm_table *dm_get_table(struct mapped_device *md)
+{
+	struct dm_table *t;
+
+	down_read(&md->lock);
+	t = md->map;
+	if (t)
+		dm_table_get(t);
+	up_read(&md->lock);
+
+	return t;
+}
+
+/*-----------------------------------------------------------------
+ * Event notification.
+ *---------------------------------------------------------------*/
+uint32_t dm_get_event_nr(struct mapped_device *md)
+{
+	uint32_t r;
+
+	down_read(&md->lock);
+	r = md->event_nr;
+	up_read(&md->lock);
+
+	return r;
+}
+
+int dm_add_wait_queue(struct mapped_device *md, wait_queue_t *wq,
+		      uint32_t event_nr)
+{
+	down_write(&md->lock);
+	if (event_nr != md->event_nr) {
+		up_write(&md->lock);
+		return 1;
+	}
+
+	add_wait_queue(&md->eventq, wq);
+	up_write(&md->lock);
+
+	return 0;
+}
+
+const char *dm_kdevname(kdev_t dev)
+{
+	static char buffer[32];
+	sprintf(buffer, "%03d:%03d", MAJOR(dev), MINOR(dev));
+	return buffer;
+}
+
+void dm_remove_wait_queue(struct mapped_device *md, wait_queue_t *wq)
+{
+	down_write(&md->lock);
+	remove_wait_queue(&md->eventq, wq);
+	up_write(&md->lock);
+}
+
+kdev_t dm_kdev(struct mapped_device *md)
+{
+	kdev_t dev;
+
+	down_read(&md->lock);
+	dev = md->dev;
+	up_read(&md->lock);
+
+	return dev;
+}
+
+int dm_suspended(struct mapped_device *md)
+{
+	return test_bit(DMF_SUSPENDED, &md->flags);
+}
+
+struct block_device_operations dm_blk_dops = {
+	.open = dm_blk_open,
+	.release = dm_blk_close,
+	.ioctl = dm_blk_ioctl,
+	.owner = THIS_MODULE
+};
+
+/*
+ * module hooks
+ */
+module_init(dm_init);
+module_exit(dm_exit);
+
+MODULE_DESCRIPTION(DM_NAME " driver");
+MODULE_AUTHOR("Joe Thornber <thornber@sistina.com>");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(dm_kdevname);
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-daemon.c linux-2.4.26-leo/drivers/md/dm-daemon.c
--- linux-2.4.26/drivers/md/dm-daemon.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-daemon.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2003 Sistina Software
+ *
+ * This file is released under the LGPL.
+ */
+
+#include "dm.h"
+#include "dm-daemon.h"
+
+#include <linux/module.h>
+#include <linux/sched.h>
+
+static int daemon(void *arg)
+{
+	struct dm_daemon *dd = (struct dm_daemon *) arg;
+	DECLARE_WAITQUEUE(wq, current);
+
+	daemonize();
+	reparent_to_init();
+
+	/* block all signals */
+	spin_lock_irq(&current->sigmask_lock);
+	sigfillset(&current->blocked);
+	flush_signals(current);
+	spin_unlock_irq(&current->sigmask_lock);
+
+	strcpy(current->comm, dd->name);
+	atomic_set(&dd->please_die, 0);
+
+	add_wait_queue(&dd->job_queue, &wq);
+
+	down(&dd->run_lock);
+	up(&dd->start_lock);
+
+	/*
+	 * dd->fn() could do anything, very likely it will
+	 * suspend.  So we can't set the state to
+	 * TASK_INTERRUPTIBLE before calling it.  In order to
+	 * prevent a race with a waking thread we do this little
+	 * dance with the dd->woken variable.
+	 */
+	while (1) {
+		do {
+			set_current_state(TASK_RUNNING);
+
+			if (atomic_read(&dd->please_die))
+				goto out;
+
+			atomic_set(&dd->woken, 0);
+			dd->fn();
+			yield();
+
+			set_current_state(TASK_INTERRUPTIBLE);
+		} while (atomic_read(&dd->woken));
+
+		schedule();
+	}
+
+ out:
+	remove_wait_queue(&dd->job_queue, &wq);
+	up(&dd->run_lock);
+	return 0;
+}
+
+int dm_daemon_start(struct dm_daemon *dd, const char *name, void (*fn)(void))
+{
+	pid_t pid = 0;
+
+	/*
+	 * Initialise the dm_daemon.
+	 */
+	dd->fn = fn;
+	strncpy(dd->name, name, sizeof(dd->name) - 1);
+	sema_init(&dd->start_lock, 1);
+	sema_init(&dd->run_lock, 1);
+	init_waitqueue_head(&dd->job_queue);
+
+	/*
+	 * Start the new thread.
+	 */
+	down(&dd->start_lock);
+	pid = kernel_thread(daemon, dd, 0);
+	if (pid <= 0) {
+		DMERR("Failed to start %s thread", name);
+		return -EAGAIN;
+	}
+
+	/*
+	 * wait for the daemon to up this mutex.
+	 */
+	down(&dd->start_lock);
+	up(&dd->start_lock);
+
+	return 0;
+}
+
+void dm_daemon_stop(struct dm_daemon *dd)
+{
+	atomic_set(&dd->please_die, 1);
+	dm_daemon_wake(dd);
+	down(&dd->run_lock);
+	up(&dd->run_lock);
+}
+
+void dm_daemon_wake(struct dm_daemon *dd)
+{
+	atomic_set(&dd->woken, 1);
+	wake_up_interruptible(&dd->job_queue);
+}
+
+EXPORT_SYMBOL(dm_daemon_start);
+EXPORT_SYMBOL(dm_daemon_stop);
+EXPORT_SYMBOL(dm_daemon_wake);
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-daemon.h linux-2.4.26-leo/drivers/md/dm-daemon.h
--- linux-2.4.26/drivers/md/dm-daemon.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-daemon.h	2004-06-22 21:04:39.000000000 +0100
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2003 Sistina Software
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef DM_DAEMON_H
+#define DM_DAEMON_H
+
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+
+struct dm_daemon {
+	void (*fn)(void);
+	char name[16];
+	atomic_t please_die;
+	struct semaphore start_lock;
+	struct semaphore run_lock;
+
+	atomic_t woken;
+	wait_queue_head_t job_queue;
+};
+
+int dm_daemon_start(struct dm_daemon *dd, const char *name, void (*fn)(void));
+void dm_daemon_stop(struct dm_daemon *dd);
+void dm_daemon_wake(struct dm_daemon *dd);
+int dm_daemon_running(struct dm_daemon *dd);
+
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-exception-store.c linux-2.4.26-leo/drivers/md/dm-exception-store.c
--- linux-2.4.26/drivers/md/dm-exception-store.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-exception-store.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,673 @@
+/*
+ * dm-snapshot.c
+ *
+ * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-snapshot.h"
+#include "dm-io.h"
+#include "kcopyd.h"
+
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+/*-----------------------------------------------------------------
+ * Persistent snapshots, by persistent we mean that the snapshot
+ * will survive a reboot.
+ *---------------------------------------------------------------*/
+
+/*
+ * We need to store a record of which parts of the origin have
+ * been copied to the snapshot device.  The snapshot code
+ * requires that we copy exception chunks to chunk aligned areas
+ * of the COW store.  It makes sense therefore, to store the
+ * metadata in chunk size blocks.
+ *
+ * There is no backward or forward compatibility implemented,
+ * snapshots with different disk versions than the kernel will
+ * not be usable.  It is expected that "lvcreate" will blank out
+ * the start of a fresh COW device before calling the snapshot
+ * constructor.
+ *
+ * The first chunk of the COW device just contains the header.
+ * After this there is a chunk filled with exception metadata,
+ * followed by as many exception chunks as can fit in the
+ * metadata areas.
+ *
+ * All on disk structures are in little-endian format.  The end
+ * of the exceptions info is indicated by an exception with a
+ * new_chunk of 0, which is invalid since it would point to the
+ * header chunk.
+ */
+
+/*
+ * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
+ */
+#define SNAP_MAGIC 0x70416e53
+
+/*
+ * The on-disk version of the metadata.
+ */
+#define SNAPSHOT_DISK_VERSION 1
+
+struct disk_header {
+	uint32_t magic;
+
+	/*
+	 * Is this snapshot valid.  There is no way of recovering
+	 * an invalid snapshot.
+	 */
+	uint32_t valid;
+
+	/*
+	 * Simple, incrementing version. no backward
+	 * compatibility.
+	 */
+	uint32_t version;
+
+	/* In sectors */
+	uint32_t chunk_size;
+};
+
+struct disk_exception {
+	uint64_t old_chunk;
+	uint64_t new_chunk;
+};
+
+struct commit_callback {
+	void (*callback)(void *, int success);
+	void *context;
+};
+
+/*
+ * The top level structure for a persistent exception store.
+ */
+struct pstore {
+	struct dm_snapshot *snap;	/* up pointer to my snapshot */
+	int version;
+	int valid;
+	uint32_t chunk_size;
+	uint32_t exceptions_per_area;
+
+	/*
+	 * Now that we have an asynchronous kcopyd there is no
+	 * need for large chunk sizes, so it wont hurt to have a
+	 * whole chunks worth of metadata in memory at once.
+	 */
+	void *area;
+
+	/*
+	 * Used to keep track of which metadata area the data in
+	 * 'chunk' refers to.
+	 */
+	uint32_t current_area;
+
+	/*
+	 * The next free chunk for an exception.
+	 */
+	uint32_t next_free;
+
+	/*
+	 * The index of next free exception in the current
+	 * metadata area.
+	 */
+	uint32_t current_committed;
+
+	atomic_t pending_count;
+	uint32_t callback_count;
+	struct commit_callback *callbacks;
+};
+
+static inline unsigned int sectors_to_pages(unsigned int sectors)
+{
+	return sectors / (PAGE_SIZE / SECTOR_SIZE);
+}
+
+static int alloc_area(struct pstore *ps)
+{
+	int r = -ENOMEM;
+	size_t i, len, nr_pages;
+	struct page *page, *last = NULL;
+
+	len = ps->chunk_size << SECTOR_SHIFT;
+
+	/*
+	 * Allocate the chunk_size block of memory that will hold
+	 * a single metadata area.
+	 */
+	ps->area = vmalloc(len);
+	if (!ps->area)
+		return r;
+
+	nr_pages = sectors_to_pages(ps->chunk_size);
+
+	/*
+	 * We lock the pages for ps->area into memory since
+	 * they'll be doing a lot of io.  We also chain them
+	 * together ready for dm-io.
+	 */
+	for (i = 0; i < nr_pages; i++) {
+		page = vmalloc_to_page(ps->area + (i * PAGE_SIZE));
+		LockPage(page);
+		if (last)
+			last->list.next = &page->list;
+		last = page;
+	}
+
+	return 0;
+}
+
+static void free_area(struct pstore *ps)
+{
+	size_t i, nr_pages;
+	struct page *page;
+
+	nr_pages = sectors_to_pages(ps->chunk_size);
+	for (i = 0; i < nr_pages; i++) {
+		page = vmalloc_to_page(ps->area + (i * PAGE_SIZE));
+		page->list.next = NULL;
+		UnlockPage(page);
+	}
+
+	vfree(ps->area);
+}
+
+/*
+ * Read or write a chunk aligned and sized block of data from a device.
+ */
+static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
+{
+	struct io_region where;
+	unsigned int bits;
+
+	where.dev = ps->snap->cow->dev;
+	where.sector = ps->chunk_size * chunk;
+	where.count = ps->chunk_size;
+
+	return dm_io_sync(1, &where, rw, vmalloc_to_page(ps->area), 0, &bits);
+}
+
+/*
+ * Read or write a metadata area.  Remembering to skip the first
+ * chunk which holds the header.
+ */
+static int area_io(struct pstore *ps, uint32_t area, int rw)
+{
+	int r;
+	uint32_t chunk;
+
+	/* convert a metadata area index to a chunk index */
+	chunk = 1 + ((ps->exceptions_per_area + 1) * area);
+
+	r = chunk_io(ps, chunk, rw);
+	if (r)
+		return r;
+
+	ps->current_area = area;
+	return 0;
+}
+
+static int zero_area(struct pstore *ps, uint32_t area)
+{
+	memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT);
+	return area_io(ps, area, WRITE);
+}
+
+static int read_header(struct pstore *ps, int *new_snapshot)
+{
+	int r;
+	struct disk_header *dh;
+
+	r = chunk_io(ps, 0, READ);
+	if (r)
+		return r;
+
+	dh = (struct disk_header *) ps->area;
+
+	if (le32_to_cpu(dh->magic) == 0) {
+		*new_snapshot = 1;
+
+	} else if (le32_to_cpu(dh->magic) == SNAP_MAGIC) {
+		*new_snapshot = 0;
+		ps->valid = le32_to_cpu(dh->valid);
+		ps->version = le32_to_cpu(dh->version);
+		ps->chunk_size = le32_to_cpu(dh->chunk_size);
+
+	} else {
+		DMWARN("Invalid/corrupt snapshot");
+		r = -ENXIO;
+	}
+
+	return r;
+}
+
+static int write_header(struct pstore *ps)
+{
+	struct disk_header *dh;
+
+	memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT);
+
+	dh = (struct disk_header *) ps->area;
+	dh->magic = cpu_to_le32(SNAP_MAGIC);
+	dh->valid = cpu_to_le32(ps->valid);
+	dh->version = cpu_to_le32(ps->version);
+	dh->chunk_size = cpu_to_le32(ps->chunk_size);
+
+	return chunk_io(ps, 0, WRITE);
+}
+
+/*
+ * Access functions for the disk exceptions, these do the endian conversions.
+ */
+static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
+{
+	if (index >= ps->exceptions_per_area)
+		return NULL;
+
+	return ((struct disk_exception *) ps->area) + index;
+}
+
+static int read_exception(struct pstore *ps,
+			  uint32_t index, struct disk_exception *result)
+{
+	struct disk_exception *e;
+
+	e = get_exception(ps, index);
+	if (!e)
+		return -EINVAL;
+
+	/* copy it */
+	result->old_chunk = le64_to_cpu(e->old_chunk);
+	result->new_chunk = le64_to_cpu(e->new_chunk);
+
+	return 0;
+}
+
+static int write_exception(struct pstore *ps,
+			   uint32_t index, struct disk_exception *de)
+{
+	struct disk_exception *e;
+
+	e = get_exception(ps, index);
+	if (!e)
+		return -EINVAL;
+
+	/* copy it */
+	e->old_chunk = cpu_to_le64(de->old_chunk);
+	e->new_chunk = cpu_to_le64(de->new_chunk);
+
+	return 0;
+}
+
+/*
+ * Registers the exceptions that are present in the current area.
+ * 'full' is filled in to indicate if the area has been
+ * filled.
+ */
+static int insert_exceptions(struct pstore *ps, int *full)
+{
+	int r;
+	unsigned int i;
+	struct disk_exception de;
+
+	/* presume the area is full */
+	*full = 1;
+
+	for (i = 0; i < ps->exceptions_per_area; i++) {
+		r = read_exception(ps, i, &de);
+
+		if (r)
+			return r;
+
+		/*
+		 * If the new_chunk is pointing at the start of
+		 * the COW device, where the first metadata area
+		 * is we know that we've hit the end of the
+		 * exceptions.  Therefore the area is not full.
+		 */
+		if (de.new_chunk == 0LL) {
+			ps->current_committed = i;
+			*full = 0;
+			break;
+		}
+
+		/*
+		 * Keep track of the start of the free chunks.
+		 */
+		if (ps->next_free <= de.new_chunk)
+			ps->next_free = de.new_chunk + 1;
+
+		/*
+		 * Otherwise we add the exception to the snapshot.
+		 */
+		r = dm_add_exception(ps->snap, de.old_chunk, de.new_chunk);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+static int read_exceptions(struct pstore *ps)
+{
+	uint32_t area;
+	int r, full = 1;
+
+	/*
+	 * Keeping reading chunks and inserting exceptions until
+	 * we find a partially full area.
+	 */
+	for (area = 0; full; area++) {
+		r = area_io(ps, area, READ);
+		if (r)
+			return r;
+
+		r = insert_exceptions(ps, &full);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+static inline struct pstore *get_info(struct exception_store *store)
+{
+	return (struct pstore *) store->context;
+}
+
+static void persistent_fraction_full(struct exception_store *store,
+				     sector_t *numerator, sector_t *denominator)
+{
+	*numerator = get_info(store)->next_free * store->snap->chunk_size;
+	*denominator = get_dev_size(store->snap->cow->dev);
+}
+
+static void persistent_destroy(struct exception_store *store)
+{
+	struct pstore *ps = get_info(store);
+
+	dm_io_put(sectors_to_pages(ps->chunk_size));
+	vfree(ps->callbacks);
+	free_area(ps);
+	kfree(ps);
+}
+
+static int persistent_read_metadata(struct exception_store *store)
+{
+	int r, new_snapshot;
+	struct pstore *ps = get_info(store);
+
+	/*
+	 * Read the snapshot header.
+	 */
+	r = read_header(ps, &new_snapshot);
+	if (r)
+		return r;
+
+	/*
+	 * Do we need to setup a new snapshot ?
+	 */
+	if (new_snapshot) {
+		r = write_header(ps);
+		if (r) {
+			DMWARN("write_header failed");
+			return r;
+		}
+
+		r = zero_area(ps, 0);
+		if (r) {
+			DMWARN("zero_area(0) failed");
+			return r;
+		}
+
+	} else {
+		/*
+		 * Sanity checks.
+		 */
+		if (!ps->valid) {
+			DMWARN("snapshot is marked invalid");
+			return -EINVAL;
+		}
+
+		if (ps->version != SNAPSHOT_DISK_VERSION) {
+			DMWARN("unable to handle snapshot disk version %d",
+			       ps->version);
+			return -EINVAL;
+		}
+
+		/*
+		 * Read the metadata.
+		 */
+		r = read_exceptions(ps);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+static int persistent_prepare(struct exception_store *store,
+			      struct exception *e)
+{
+	struct pstore *ps = get_info(store);
+	uint32_t stride;
+	sector_t size = get_dev_size(store->snap->cow->dev);
+
+	/* Is there enough room ? */
+	if (size < ((ps->next_free + 1) * store->snap->chunk_size))
+		return -ENOSPC;
+
+	e->new_chunk = ps->next_free;
+
+	/*
+	 * Move onto the next free pending, making sure to take
+	 * into account the location of the metadata chunks.
+	 */
+	stride = (ps->exceptions_per_area + 1);
+	if ((++ps->next_free % stride) == 1)
+		ps->next_free++;
+
+	atomic_inc(&ps->pending_count);
+	return 0;
+}
+
+static void persistent_commit(struct exception_store *store,
+			      struct exception *e,
+			      void (*callback) (void *, int success),
+			      void *callback_context)
+{
+	int r;
+	unsigned int i;
+	struct pstore *ps = get_info(store);
+	struct disk_exception de;
+	struct commit_callback *cb;
+
+	de.old_chunk = e->old_chunk;
+	de.new_chunk = e->new_chunk;
+	write_exception(ps, ps->current_committed++, &de);
+
+	/*
+	 * Add the callback to the back of the array.  This code
+	 * is the only place where the callback array is
+	 * manipulated, and we know that it will never be called
+	 * multiple times concurrently.
+	 */
+	cb = ps->callbacks + ps->callback_count++;
+	cb->callback = callback;
+	cb->context = callback_context;
+
+	/*
+	 * If there are no more exceptions in flight, or we have
+	 * filled this metadata area we commit the exceptions to
+	 * disk.
+	 */
+	if (atomic_dec_and_test(&ps->pending_count) ||
+	    (ps->current_committed == ps->exceptions_per_area)) {
+		r = area_io(ps, ps->current_area, WRITE);
+		if (r)
+			ps->valid = 0;
+
+		for (i = 0; i < ps->callback_count; i++) {
+			cb = ps->callbacks + i;
+			cb->callback(cb->context, r == 0 ? 1 : 0);
+		}
+
+		ps->callback_count = 0;
+	}
+
+	/*
+	 * Have we completely filled the current area ?
+	 */
+	if (ps->current_committed == ps->exceptions_per_area) {
+		ps->current_committed = 0;
+		r = zero_area(ps, ps->current_area + 1);
+		if (r)
+			ps->valid = 0;
+	}
+}
+
+static void persistent_drop(struct exception_store *store)
+{
+	struct pstore *ps = get_info(store);
+
+	ps->valid = 0;
+	if (write_header(ps))
+		DMWARN("write header failed");
+}
+
+int dm_create_persistent(struct exception_store *store, uint32_t chunk_size)
+{
+	int r;
+	struct pstore *ps;
+
+	r = dm_io_get(sectors_to_pages(chunk_size));
+	if (r)
+		return r;
+
+	/* allocate the pstore */
+	ps = kmalloc(sizeof(*ps), GFP_KERNEL);
+	if (!ps) {
+		r = -ENOMEM;
+		goto bad;
+	}
+
+	ps->snap = store->snap;
+	ps->valid = 1;
+	ps->version = SNAPSHOT_DISK_VERSION;
+	ps->chunk_size = chunk_size;
+	ps->exceptions_per_area = (chunk_size << SECTOR_SHIFT) /
+	    sizeof(struct disk_exception);
+	ps->next_free = 2;	/* skipping the header and first area */
+	ps->current_committed = 0;
+
+	r = alloc_area(ps);
+	if (r)
+		goto bad;
+
+	/*
+	 * Allocate space for all the callbacks.
+	 */
+	ps->callback_count = 0;
+	atomic_set(&ps->pending_count, 0);
+	ps->callbacks = vcalloc(ps->exceptions_per_area,
+				sizeof(*ps->callbacks));
+
+	if (!ps->callbacks) {
+		r = -ENOMEM;
+		goto bad;
+	}
+
+	store->destroy = persistent_destroy;
+	store->read_metadata = persistent_read_metadata;
+	store->prepare_exception = persistent_prepare;
+	store->commit_exception = persistent_commit;
+	store->drop_snapshot = persistent_drop;
+	store->fraction_full = persistent_fraction_full;
+	store->context = ps;
+
+	return 0;
+
+      bad:
+	dm_io_put(sectors_to_pages(chunk_size));
+	if (ps) {
+		if (ps->callbacks)
+			vfree(ps->callbacks);
+
+		kfree(ps);
+	}
+	return r;
+}
+
+/*-----------------------------------------------------------------
+ * Implementation of the store for non-persistent snapshots.
+ *---------------------------------------------------------------*/
+struct transient_c {
+	sector_t next_free;
+};
+
+void transient_destroy(struct exception_store *store)
+{
+	kfree(store->context);
+}
+
+int transient_read_metadata(struct exception_store *store)
+{
+	return 0;
+}
+
+int transient_prepare(struct exception_store *store, struct exception *e)
+{
+	struct transient_c *tc = (struct transient_c *) store->context;
+	sector_t size = get_dev_size(store->snap->cow->dev);
+
+	if (size < (tc->next_free + store->snap->chunk_size))
+		return -1;
+
+	e->new_chunk = sector_to_chunk(store->snap, tc->next_free);
+	tc->next_free += store->snap->chunk_size;
+
+	return 0;
+}
+
+void transient_commit(struct exception_store *store,
+		      struct exception *e,
+		      void (*callback) (void *, int success),
+		      void *callback_context)
+{
+	/* Just succeed */
+	callback(callback_context, 1);
+}
+
+static void transient_fraction_full(struct exception_store *store,
+				    sector_t *numerator, sector_t *denominator)
+{
+	*numerator = ((struct transient_c *) store->context)->next_free;
+	*denominator = get_dev_size(store->snap->cow->dev);
+}
+
+int dm_create_transient(struct exception_store *store,
+			struct dm_snapshot *s, int blocksize)
+{
+	struct transient_c *tc;
+
+	memset(store, 0, sizeof(*store));
+	store->destroy = transient_destroy;
+	store->read_metadata = transient_read_metadata;
+	store->prepare_exception = transient_prepare;
+	store->commit_exception = transient_commit;
+	store->fraction_full = transient_fraction_full;
+	store->snap = s;
+
+	tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL);
+	if (!tc)
+		return -ENOMEM;
+
+	tc->next_free = 0;
+	store->context = tc;
+
+	return 0;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm.h linux-2.4.26-leo/drivers/md/dm.h
--- linux-2.4.26/drivers/md/dm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm.h	2004-06-22 21:04:34.000000000 +0100
@@ -0,0 +1,175 @@
+/*
+ * Internal header file for device mapper
+ *
+ * Copyright (C) 2001, 2002 Sistina Software
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef DM_INTERNAL_H
+#define DM_INTERNAL_H
+
+#include <linux/fs.h>
+#include <linux/device-mapper.h>
+#include <linux/list.h>
+#include <linux/blkdev.h>
+
+#define DM_NAME "device-mapper"
+#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x)
+#define DMERR(f, x...) printk(KERN_ERR DM_NAME ": " f "\n" , ## x)
+#define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x)
+
+/*
+ * FIXME: I think this should be with the definition of sector_t
+ * in types.h.
+ */
+#ifdef CONFIG_LBD
+#define SECTOR_FORMAT "%Lu"
+#else
+#define SECTOR_FORMAT "%lu"
+#endif
+
+#define SECTOR_SHIFT 9
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+
+extern struct block_device_operations dm_blk_dops;
+
+/*
+ * List of devices that a metadevice uses and should open/close.
+ */
+struct dm_dev {
+	struct list_head list;
+
+	atomic_t count;
+	int mode;
+	kdev_t dev;
+	struct block_device *bdev;
+};
+
+struct dm_table;
+struct mapped_device;
+
+/*-----------------------------------------------------------------
+ * Functions for manipulating a struct mapped_device.
+ * Drop the reference with dm_put when you finish with the object.
+ *---------------------------------------------------------------*/
+int dm_create(kdev_t dev, struct mapped_device **md);
+
+/*
+ * Reference counting for md.
+ */
+void dm_get(struct mapped_device *md);
+void dm_put(struct mapped_device *md);
+
+/*
+ * A device can still be used while suspended, but I/O is deferred.
+ */
+int dm_suspend(struct mapped_device *md);
+int dm_resume(struct mapped_device *md);
+
+/*
+ * The device must be suspended before calling this method.
+ */
+int dm_swap_table(struct mapped_device *md, struct dm_table *t);
+
+/*
+ * Drop a reference on the table when you've finished with the
+ * result.
+ */
+struct dm_table *dm_get_table(struct mapped_device *md);
+
+/*
+ * Event functions.
+ */
+uint32_t dm_get_event_nr(struct mapped_device *md);
+int dm_add_wait_queue(struct mapped_device *md, wait_queue_t *wq,
+		      uint32_t event_nr);
+void dm_remove_wait_queue(struct mapped_device *md, wait_queue_t *wq);
+
+/*
+ * Info functions.
+ */
+kdev_t dm_kdev(struct mapped_device *md);
+int dm_suspended(struct mapped_device *md);
+
+/*-----------------------------------------------------------------
+ * Functions for manipulating a table.  Tables are also reference
+ * counted.
+ *---------------------------------------------------------------*/
+int dm_table_create(struct dm_table **result, int mode, unsigned num_targets);
+
+void dm_table_get(struct dm_table *t);
+void dm_table_put(struct dm_table *t);
+
+int dm_table_add_target(struct dm_table *t, const char *type,
+			sector_t start,	sector_t len, char *params);
+int dm_table_complete(struct dm_table *t);
+void dm_table_event_callback(struct dm_table *t,
+			     void (*fn)(void *), void *context);
+void dm_table_event(struct dm_table *t);
+sector_t dm_table_get_size(struct dm_table *t);
+struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
+struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
+unsigned int dm_table_get_num_targets(struct dm_table *t);
+struct list_head *dm_table_get_devices(struct dm_table *t);
+int dm_table_get_mode(struct dm_table *t);
+void dm_table_suspend_targets(struct dm_table *t);
+void dm_table_resume_targets(struct dm_table *t);
+
+/*-----------------------------------------------------------------
+ * A registry of target types.
+ *---------------------------------------------------------------*/
+int dm_target_init(void);
+void dm_target_exit(void);
+struct target_type *dm_get_target_type(const char *name);
+void dm_put_target_type(struct target_type *t);
+
+
+/*-----------------------------------------------------------------
+ * Useful inlines.
+ *---------------------------------------------------------------*/
+static inline int array_too_big(unsigned long fixed, unsigned long obj,
+				unsigned long num)
+{
+	return (num > (ULONG_MAX - fixed) / obj);
+}
+
+/*
+ * ceiling(n / size) * size
+ */
+static inline unsigned long dm_round_up(unsigned long n, unsigned long size)
+{
+	unsigned long r = n % size;
+	return n + (r ? (size - r) : 0);
+}
+
+/*
+ * Ceiling(n / size)
+ */
+static inline unsigned long dm_div_up(unsigned long n, unsigned long size)
+{
+	return dm_round_up(n, size) / size;
+}
+
+const char *dm_kdevname(kdev_t dev);
+
+/*
+ * The device-mapper can be driven through one of two interfaces;
+ * ioctl or filesystem, depending which patch you have applied.
+ */
+int dm_interface_init(void);
+void dm_interface_exit(void);
+
+/*
+ * Targets for linear and striped mappings
+ */
+int dm_linear_init(void);
+void dm_linear_exit(void);
+
+int dm_stripe_init(void);
+void dm_stripe_exit(void);
+
+int dm_snapshot_init(void);
+void dm_snapshot_exit(void);
+
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-io.c linux-2.4.26-leo/drivers/md/dm-io.c
--- linux-2.4.26/drivers/md/dm-io.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-io.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,361 @@
+/*
+ * Copyright (C) 2003 Sistina Software
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-io.h"
+
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+
+/* FIXME: can we shrink this ? */
+struct io_context {
+	int rw;
+	unsigned int error;
+	atomic_t count;
+	struct task_struct *sleeper;
+	io_notify_fn callback;
+	void *context;
+};
+
+/*
+ * We maintain a pool of buffer heads for dispatching the io.
+ */
+static unsigned int _num_bhs;
+static mempool_t *_buffer_pool;
+
+/*
+ * io contexts are only dynamically allocated for asynchronous
+ * io.  Since async io is likely to be the majority of io we'll
+ * have the same number of io contexts as buffer heads ! (FIXME:
+ * must reduce this).
+ */
+mempool_t *_io_pool;
+
+static void *alloc_bh(int gfp_mask, void *pool_data)
+{
+	struct buffer_head *bh;
+
+	bh = kmem_cache_alloc(bh_cachep, gfp_mask);
+	if (bh) {
+		bh->b_reqnext = NULL;
+		init_waitqueue_head(&bh->b_wait);
+		INIT_LIST_HEAD(&bh->b_inode_buffers);
+	}
+
+	return bh;
+}
+
+static void *alloc_io(int gfp_mask, void *pool_data)
+{
+	return kmalloc(sizeof(struct io_context), gfp_mask);
+}
+
+static void free_io(void *element, void *pool_data)
+{
+	kfree(element);
+}
+
+static unsigned int pages_to_buffers(unsigned int pages)
+{
+	return 4 * pages;	/* too many ? */
+}
+
+static int resize_pool(unsigned int new_bhs)
+{
+	int r = 0;
+
+	if (_buffer_pool) {
+		if (new_bhs == 0) {
+			/* free off the pools */
+			mempool_destroy(_buffer_pool);
+			mempool_destroy(_io_pool);
+			_buffer_pool = _io_pool = NULL;
+		} else {
+			/* resize the pools */
+			r = mempool_resize(_buffer_pool, new_bhs, GFP_KERNEL);
+			if (!r)
+				r = mempool_resize(_io_pool,
+						   new_bhs, GFP_KERNEL);
+		}
+	} else {
+		/* create new pools */
+		_buffer_pool = mempool_create(new_bhs, alloc_bh,
+					      mempool_free_slab, bh_cachep);
+		if (!_buffer_pool)
+			r = -ENOMEM;
+
+		_io_pool = mempool_create(new_bhs, alloc_io, free_io, NULL);
+		if (!_io_pool) {
+			mempool_destroy(_buffer_pool);
+			_buffer_pool = NULL;
+			r = -ENOMEM;
+		}
+	}
+
+	if (!r)
+		_num_bhs = new_bhs;
+
+	return r;
+}
+
+int dm_io_get(unsigned int num_pages)
+{
+	return resize_pool(_num_bhs + pages_to_buffers(num_pages));
+}
+
+void dm_io_put(unsigned int num_pages)
+{
+	resize_pool(_num_bhs - pages_to_buffers(num_pages));
+}
+
+/*-----------------------------------------------------------------
+ * We need to keep track of which region a buffer is doing io
+ * for.  In order to save a memory allocation we store this in an
+ * unused field of the buffer head, and provide these access
+ * functions.
+ *
+ * FIXME: add compile time check that an unsigned int can fit
+ * into a pointer.
+ *
+ *---------------------------------------------------------------*/
+static inline void bh_set_region(struct buffer_head *bh, unsigned int region)
+{
+	bh->b_journal_head = (void *) region;
+}
+
+static inline int bh_get_region(struct buffer_head *bh)
+{
+	return (unsigned int) bh->b_journal_head;
+}
+
+/*-----------------------------------------------------------------
+ * We need an io object to keep track of the number of bhs that
+ * have been dispatched for a particular io.
+ *---------------------------------------------------------------*/
+static void dec_count(struct io_context *io, unsigned int region, int error)
+{
+	if (error)
+		set_bit(region, &io->error);
+
+	if (atomic_dec_and_test(&io->count)) {
+		if (io->sleeper)
+			wake_up_process(io->sleeper);
+
+		else {
+			int r = io->error;
+			io_notify_fn fn = io->callback;
+			void *context = io->context;
+
+			mempool_free(io, _io_pool);
+			fn(r, context);
+		}
+	}
+}
+
+static void endio(struct buffer_head *bh, int uptodate)
+{
+	struct io_context *io = (struct io_context *) bh->b_private;
+
+	if (!uptodate && io->rw != WRITE) {
+		/*
+		 * We need to zero this region, otherwise people
+		 * like kcopyd may write the arbitrary contents
+		 * of the page.
+		 */
+		memset(bh->b_data, 0, bh->b_size);
+	}
+
+	dec_count((struct io_context *) bh->b_private,
+		  bh_get_region(bh), !uptodate);
+	mempool_free(bh, _buffer_pool);
+}
+
+/*
+ * Primitives for alignment calculations.
+ */
+int fls(unsigned n)
+{
+	return generic_fls32(n);
+}
+
+static inline int log2_floor(unsigned n)
+{
+	return ffs(n) - 1;
+}
+
+static inline int log2_align(unsigned n)
+{
+	return fls(n) - 1;
+}
+
+/*
+ * Returns the next block for io.
+ */
+static int do_page(kdev_t dev, sector_t *block, sector_t end_block,
+		   unsigned int block_size,
+		   struct page *p, unsigned int offset,
+		   unsigned int region, struct io_context *io)
+{
+	struct buffer_head *bh;
+	sector_t b = *block;
+	sector_t blocks_per_page = PAGE_SIZE / block_size;
+	unsigned int this_size; /* holds the size of the current io */
+	sector_t len;
+
+	if (!blocks_per_page) {
+		DMERR("dm-io: PAGE_SIZE (%lu) < block_size (%u) unsupported",
+		      PAGE_SIZE, block_size);
+		return 0;
+	}
+
+	while ((offset < PAGE_SIZE) && (b != end_block)) {
+		bh = mempool_alloc(_buffer_pool, GFP_NOIO);
+		init_buffer(bh, endio, io);
+		bh_set_region(bh, region);
+
+		/*
+		 * Block size must be a power of 2 and aligned
+		 * correctly.
+		 */
+
+		len = min(end_block - b, blocks_per_page);
+		len = min(len, blocks_per_page - offset / block_size);
+
+		if (!len) {
+			DMERR("dm-io: Invalid offset/block_size (%u/%u).",
+			      offset, block_size);
+			return 0;
+		}
+
+		this_size = 1 << log2_align(len);
+		if (b)
+			this_size = min(this_size,
+					(unsigned) 1 << log2_floor(b));
+
+		/*
+		 * Add in the job offset.
+		 */
+		bh->b_blocknr = (b / this_size);
+		bh->b_size = block_size * this_size;
+		set_bh_page(bh, p, offset);
+		bh->b_this_page = bh;
+
+		bh->b_dev = dev;
+		atomic_set(&bh->b_count, 1);
+
+		bh->b_state = ((1 << BH_Uptodate) | (1 << BH_Mapped) |
+			       (1 << BH_Lock));
+
+		if (io->rw == WRITE)
+			clear_bit(BH_Dirty, &bh->b_state);
+
+		atomic_inc(&io->count);
+		submit_bh(io->rw, bh);
+
+		b += this_size;
+		offset += block_size * this_size;
+	}
+
+	*block = b;
+	return (b == end_block);
+}
+
+static void do_region(unsigned int region, struct io_region *where,
+		      struct page *page, unsigned int offset,
+		      struct io_context *io)
+{
+	unsigned int block_size = get_hardsect_size(where->dev);
+	unsigned int sblock_size = block_size >> 9;
+	sector_t block = where->sector / sblock_size;
+	sector_t end_block = (where->sector + where->count) / sblock_size;
+
+	while (1) {
+		if (do_page(where->dev, &block, end_block, block_size,
+			    page, offset, region, io))
+			break;
+
+		offset = 0;	/* only offset the first page */
+
+		page = list_entry(page->list.next, struct page, list);
+	}
+}
+
+static void dispatch_io(unsigned int num_regions, struct io_region *where,
+			struct page *pages, unsigned int offset,
+			struct io_context *io)
+{
+	int i;
+
+	for (i = 0; i < num_regions; i++)
+		if (where[i].count)
+			do_region(i, where + i, pages, offset, io);
+
+	/*
+	 * Drop the extra refence that we were holding to avoid
+	 * the io being completed too early.
+	 */
+	dec_count(io, 0, 0);
+}
+
+/*
+ * Synchronous io
+ */
+int dm_io_sync(unsigned int num_regions, struct io_region *where,
+	       int rw, struct page *pages, unsigned int offset,
+	       unsigned int *error_bits)
+{
+	struct io_context io;
+
+	BUG_ON(num_regions > 1 && rw != WRITE);
+
+	io.rw = rw;
+	io.error = 0;
+	atomic_set(&io.count, 1); /* see dispatch_io() */
+	io.sleeper = current;
+
+	dispatch_io(num_regions, where, pages, offset, &io);
+	run_task_queue(&tq_disk);
+
+	while (1) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+
+		if (!atomic_read(&io.count))
+			break;
+
+		schedule();
+	}
+	set_current_state(TASK_RUNNING);
+
+	*error_bits = io.error;
+	return io.error ? -EIO : 0;
+}
+
+/*
+ * Asynchronous io
+ */
+int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
+		struct page *pages, unsigned int offset,
+		io_notify_fn fn, void *context)
+{
+	struct io_context *io = mempool_alloc(_io_pool, GFP_NOIO);
+
+	io->rw = rw;
+	io->error = 0;
+	atomic_set(&io->count, 1); /* see dispatch_io() */
+	io->sleeper = NULL;
+	io->callback = fn;
+	io->context = context;
+
+	dispatch_io(num_regions, where, pages, offset, io);
+	return 0;
+}
+
+EXPORT_SYMBOL(dm_io_get);
+EXPORT_SYMBOL(dm_io_put);
+EXPORT_SYMBOL(dm_io_sync);
+EXPORT_SYMBOL(dm_io_async);
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-ioctl.c linux-2.4.26-leo/drivers/md/dm-ioctl.c
--- linux-2.4.26/drivers/md/dm-ioctl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-ioctl.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,1284 @@
+/*
+ * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/miscdevice.h>
+#include <linux/dm-ioctl.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/blk.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+
+#define DM_DRIVER_EMAIL "dm@uk.sistina.com"
+
+/*-----------------------------------------------------------------
+ * The ioctl interface needs to be able to look up devices by
+ * name or uuid.
+ *---------------------------------------------------------------*/
+struct hash_cell {
+	struct list_head name_list;
+	struct list_head uuid_list;
+
+	char *name;
+	char *uuid;
+	struct mapped_device *md;
+	struct dm_table *new_map;
+
+	/* I hate devfs */
+	devfs_handle_t devfs_entry;
+};
+
+#define NUM_BUCKETS 64
+#define MASK_BUCKETS (NUM_BUCKETS - 1)
+static struct list_head _name_buckets[NUM_BUCKETS];
+static struct list_head _uuid_buckets[NUM_BUCKETS];
+
+static devfs_handle_t _dev_dir;
+void dm_hash_remove_all(void);
+
+/*
+ * Guards access to both hash tables.
+ */
+static DECLARE_RWSEM(_hash_lock);
+
+static void init_buckets(struct list_head *buckets)
+{
+	unsigned int i;
+
+	for (i = 0; i < NUM_BUCKETS; i++)
+		INIT_LIST_HEAD(buckets + i);
+}
+
+int dm_hash_init(void)
+{
+	init_buckets(_name_buckets);
+	init_buckets(_uuid_buckets);
+	_dev_dir = devfs_mk_dir(0, DM_DIR, NULL);
+	return 0;
+}
+
+void dm_hash_exit(void)
+{
+	dm_hash_remove_all();
+	devfs_unregister(_dev_dir);
+}
+
+/*-----------------------------------------------------------------
+ * Hash function:
+ * We're not really concerned with the str hash function being
+ * fast since it's only used by the ioctl interface.
+ *---------------------------------------------------------------*/
+static unsigned int hash_str(const char *str)
+{
+	const unsigned int hash_mult = 2654435387U;
+	unsigned int h = 0;
+
+	while (*str)
+		h = (h + (unsigned int) *str++) * hash_mult;
+
+	return h & MASK_BUCKETS;
+}
+
+/*-----------------------------------------------------------------
+ * Code for looking up a device by name
+ *---------------------------------------------------------------*/
+static struct hash_cell *__get_name_cell(const char *str)
+{
+	struct list_head *tmp;
+	struct hash_cell *hc;
+	unsigned int h = hash_str(str);
+
+	list_for_each (tmp, _name_buckets + h) {
+		hc = list_entry(tmp, struct hash_cell, name_list);
+		if (!strcmp(hc->name, str))
+			return hc;
+	}
+
+	return NULL;
+}
+
+static struct hash_cell *__get_uuid_cell(const char *str)
+{
+	struct list_head *tmp;
+	struct hash_cell *hc;
+	unsigned int h = hash_str(str);
+
+	list_for_each (tmp, _uuid_buckets + h) {
+		hc = list_entry(tmp, struct hash_cell, uuid_list);
+		if (!strcmp(hc->uuid, str))
+			return hc;
+	}
+
+	return NULL;
+}
+
+/*-----------------------------------------------------------------
+ * Inserting, removing and renaming a device.
+ *---------------------------------------------------------------*/
+static inline char *kstrdup(const char *str)
+{
+	char *r = kmalloc(strlen(str) + 1, GFP_KERNEL);
+	if (r)
+		strcpy(r, str);
+	return r;
+}
+
+static struct hash_cell *alloc_cell(const char *name, const char *uuid,
+				    struct mapped_device *md)
+{
+	struct hash_cell *hc;
+
+	hc = kmalloc(sizeof(*hc), GFP_KERNEL);
+	if (!hc)
+		return NULL;
+
+	hc->name = kstrdup(name);
+	if (!hc->name) {
+		kfree(hc);
+		return NULL;
+	}
+
+	if (!uuid)
+		hc->uuid = NULL;
+
+	else {
+		hc->uuid = kstrdup(uuid);
+		if (!hc->uuid) {
+			kfree(hc->name);
+			kfree(hc);
+			return NULL;
+		}
+	}
+
+	INIT_LIST_HEAD(&hc->name_list);
+	INIT_LIST_HEAD(&hc->uuid_list);
+	hc->md = md;
+	hc->new_map = NULL;
+	return hc;
+}
+
+static void free_cell(struct hash_cell *hc)
+{
+	if (hc) {
+		kfree(hc->name);
+		kfree(hc->uuid);
+		kfree(hc);
+	}
+}
+
+/*
+ * devfs stuff.
+ */
+static int register_with_devfs(struct hash_cell *hc)
+{
+	kdev_t dev = dm_kdev(hc->md);
+
+	hc->devfs_entry =
+	    devfs_register(_dev_dir, hc->name, DEVFS_FL_CURRENT_OWNER,
+			   major(dev), minor(dev),
+			   S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
+			   &dm_blk_dops, NULL);
+
+	return 0;
+}
+
+static int unregister_with_devfs(struct hash_cell *hc)
+{
+	devfs_unregister(hc->devfs_entry);
+	return 0;
+}
+
+/*
+ * The kdev_t and uuid of a device can never change once it is
+ * initially inserted.
+ */
+int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md)
+{
+	struct hash_cell *cell;
+
+	/*
+	 * Allocate the new cells.
+	 */
+	cell = alloc_cell(name, uuid, md);
+	if (!cell)
+		return -ENOMEM;
+
+	/*
+	 * Insert the cell into both hash tables.
+	 */
+	down_write(&_hash_lock);
+	if (__get_name_cell(name))
+		goto bad;
+
+	list_add(&cell->name_list, _name_buckets + hash_str(name));
+
+	if (uuid) {
+		if (__get_uuid_cell(uuid)) {
+			list_del(&cell->name_list);
+			goto bad;
+		}
+		list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
+	}
+	register_with_devfs(cell);
+	dm_get(md);
+	up_write(&_hash_lock);
+
+	return 0;
+
+      bad:
+	up_write(&_hash_lock);
+	free_cell(cell);
+	return -EBUSY;
+}
+
+void __hash_remove(struct hash_cell *hc)
+{
+	/* remove from the dev hash */
+	list_del(&hc->uuid_list);
+	list_del(&hc->name_list);
+	unregister_with_devfs(hc);
+	dm_put(hc->md);
+	if (hc->new_map)
+		dm_table_put(hc->new_map);
+	free_cell(hc);
+}
+
+void dm_hash_remove_all(void)
+{
+	int i;
+	struct hash_cell *hc;
+	struct list_head *tmp, *n;
+
+	down_write(&_hash_lock);
+	for (i = 0; i < NUM_BUCKETS; i++) {
+		list_for_each_safe (tmp, n, _name_buckets + i) {
+			hc = list_entry(tmp, struct hash_cell, name_list);
+			__hash_remove(hc);
+		}
+	}
+	up_write(&_hash_lock);
+}
+
+int dm_hash_rename(const char *old, const char *new)
+{
+	char *new_name, *old_name;
+	struct hash_cell *hc;
+
+	/*
+	 * duplicate new.
+	 */
+	new_name = kstrdup(new);
+	if (!new_name)
+		return -ENOMEM;
+
+	down_write(&_hash_lock);
+
+	/*
+	 * Is new free ?
+	 */
+	hc = __get_name_cell(new);
+	if (hc) {
+		DMWARN("asked to rename to an already existing name %s -> %s",
+		       old, new);
+		up_write(&_hash_lock);
+		kfree(new_name);
+		return -EBUSY;
+	}
+
+	/*
+	 * Is there such a device as 'old' ?
+	 */
+	hc = __get_name_cell(old);
+	if (!hc) {
+		DMWARN("asked to rename a non existent device %s -> %s",
+		       old, new);
+		up_write(&_hash_lock);
+		kfree(new_name);
+		return -ENXIO;
+	}
+
+	/*
+	 * rename and move the name cell.
+	 */
+	list_del(&hc->name_list);
+	old_name = hc->name;
+	hc->name = new_name;
+	list_add(&hc->name_list, _name_buckets + hash_str(new_name));
+
+	/* rename the device node in devfs */
+	unregister_with_devfs(hc);
+	register_with_devfs(hc);
+
+	up_write(&_hash_lock);
+	kfree(old_name);
+	return 0;
+}
+
+/*-----------------------------------------------------------------
+ * Implementation of the ioctl commands
+ *---------------------------------------------------------------*/
+/*
+ * All the ioctl commands get dispatched to functions with this
+ * prototype.
+ */
+typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size);
+
+static int remove_all(struct dm_ioctl *param, size_t param_size)
+{
+	dm_hash_remove_all();
+	param->data_size = 0;
+	return 0;
+}
+
+/*
+ * Round up the ptr to an 8-byte boundary.
+ */
+#define ALIGN_MASK 7
+static inline void *align_ptr(void *ptr)
+{
+	return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
+}
+
+/*
+ * Retrieves the data payload buffer from an already allocated
+ * struct dm_ioctl.
+ */
+static void *get_result_buffer(struct dm_ioctl *param, size_t param_size,
+			       size_t *len)
+{
+	param->data_start = align_ptr(param + 1) - (void *) param;
+
+	if (param->data_start < param_size)
+		*len = param_size - param->data_start;
+	else
+		*len = 0;
+
+	return ((void *) param) + param->data_start;
+}
+
+static int list_devices(struct dm_ioctl *param, size_t param_size)
+{
+	unsigned int i;
+	struct hash_cell *hc;
+	size_t len, needed = 0;
+	struct dm_name_list *nl, *old_nl = NULL;
+
+	down_write(&_hash_lock);
+
+	/*
+	 * Loop through all the devices working out how much
+	 * space we need.
+	 */
+	for (i = 0; i < NUM_BUCKETS; i++) {
+		list_for_each_entry (hc, _name_buckets + i, name_list) {
+			needed += sizeof(struct dm_name_list);
+			needed += strlen(hc->name);
+			needed += ALIGN_MASK;
+		}
+	}
+
+	/*
+	 * Grab our output buffer.
+	 */
+	nl = get_result_buffer(param, param_size, &len);
+	if (len < needed) {
+		param->flags |= DM_BUFFER_FULL_FLAG;
+		goto out;
+	}
+	param->data_size = param->data_start + needed;
+
+	nl->dev = 0;	/* Flags no data */
+
+	/*
+	 * Now loop through filling out the names.
+	 */
+	for (i = 0; i < NUM_BUCKETS; i++) {
+		list_for_each_entry (hc, _name_buckets + i, name_list) {
+			if (old_nl)
+				old_nl->next = (uint32_t) ((void *) nl -
+							   (void *) old_nl);
+
+			nl->dev = dm_kdev(hc->md);
+			nl->next = 0;
+			strcpy(nl->name, hc->name);
+
+			old_nl = nl;
+			nl = align_ptr(((void *) ++nl) + strlen(hc->name) + 1);
+		}
+	}
+
+ out:
+	up_write(&_hash_lock);
+	return 0;
+}
+
+static int check_name(const char *name)
+{
+	if (strchr(name, '/')) {
+		DMWARN("invalid device name");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Fills in a dm_ioctl structure, ready for sending back to
+ * userland.
+ */
+static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
+{
+	kdev_t dev = dm_kdev(md);
+	struct dm_table *table;
+	struct block_device *bdev;
+
+	param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG |
+			  DM_ACTIVE_PRESENT_FLAG);
+
+	if (dm_suspended(md))
+		param->flags |= DM_SUSPEND_FLAG;
+
+	param->dev = kdev_t_to_nr(dev);
+
+	if (is_read_only(dev))
+		param->flags |= DM_READONLY_FLAG;
+
+	param->event_nr = dm_get_event_nr(md);
+
+	table = dm_get_table(md);
+	if (table) {
+		param->flags |= DM_ACTIVE_PRESENT_FLAG;
+		param->target_count = dm_table_get_num_targets(table);
+		dm_table_put(table);
+	} else
+		param->target_count = 0;
+
+	bdev = bdget(param->dev);
+	if (!bdev)
+		return -ENXIO;
+	param->open_count = bdev->bd_openers;
+	bdput(bdev);
+
+	return 0;
+}
+
+static int dev_create(struct dm_ioctl *param, size_t param_size)
+{
+	int r;
+	kdev_t dev = 0;
+	struct mapped_device *md;
+
+	r = check_name(param->name);
+	if (r)
+		return r;
+
+	if (param->flags & DM_PERSISTENT_DEV_FLAG)
+		dev = to_kdev_t(param->dev);
+
+	r = dm_create(dev, &md);
+	if (r)
+		return r;
+
+	r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
+	if (r) {
+		dm_put(md);
+		return r;
+	}
+
+	param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+
+	r = __dev_status(md, param);
+	dm_put(md);
+
+	return r;
+}
+
+/*
+ * Always use UUID for lookups if it's present, otherwise use name.
+ */
+static inline struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
+{
+	return *param->uuid ?
+	    __get_uuid_cell(param->uuid) : __get_name_cell(param->name);
+}
+
+static inline struct mapped_device *find_device(struct dm_ioctl *param)
+{
+	struct hash_cell *hc;
+	struct mapped_device *md = NULL;
+
+	down_read(&_hash_lock);
+	hc = __find_device_hash_cell(param);
+	if (hc) {
+		md = hc->md;
+
+		/*
+		 * Sneakily write in both the name and the uuid
+		 * while we have the cell.
+		 */
+		strncpy(param->name, hc->name, sizeof(param->name));
+		if (hc->uuid)
+			strncpy(param->uuid, hc->uuid, sizeof(param->uuid) - 1);
+		else
+			param->uuid[0] = '\0';
+
+		if (hc->new_map)
+			param->flags |= DM_INACTIVE_PRESENT_FLAG;
+		else
+			param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+
+		dm_get(md);
+	}
+	up_read(&_hash_lock);
+
+	return md;
+}
+
+static int dev_remove(struct dm_ioctl *param, size_t param_size)
+{
+	struct hash_cell *hc;
+
+	down_write(&_hash_lock);
+	hc = __find_device_hash_cell(param);
+
+	if (!hc) {
+		DMWARN("device doesn't appear to be in the dev hash table.");
+		up_write(&_hash_lock);
+		return -ENXIO;
+	}
+
+	__hash_remove(hc);
+	up_write(&_hash_lock);
+	param->data_size = 0;
+	return 0;
+}
+
+/*
+ * Check a string doesn't overrun the chunk of
+ * memory we copied from userland.
+ */
+static int invalid_str(char *str, void *end)
+{
+	while ((void *) str < end)
+		if (!*str++)
+			return 0;
+
+	return -EINVAL;
+}
+
+static int dev_rename(struct dm_ioctl *param, size_t param_size)
+{
+	int r;
+	char *new_name = (char *) param + param->data_start;
+
+	if (new_name < (char *) (param + 1) ||
+	    invalid_str(new_name, (void *) param + param_size)) {
+		DMWARN("Invalid new logical volume name supplied.");
+		return -EINVAL;
+	}
+
+	r = check_name(new_name);
+	if (r)
+		return r;
+
+	param->data_size = 0;
+	return dm_hash_rename(param->name, new_name);
+}
+
+static int do_suspend(struct dm_ioctl *param)
+{
+	int r = 0;
+	struct mapped_device *md;
+
+	md = find_device(param);
+	if (!md)
+		return -ENXIO;
+
+	if (!dm_suspended(md))
+		r = dm_suspend(md);
+
+	if (!r)
+		r = __dev_status(md, param);
+
+	dm_put(md);
+	return r;
+}
+
+static int do_resume(struct dm_ioctl *param)
+{
+	int r = 0;
+	struct hash_cell *hc;
+	struct mapped_device *md;
+	struct dm_table *new_map;
+
+	down_write(&_hash_lock);
+
+	hc = __find_device_hash_cell(param);
+	if (!hc) {
+		DMWARN("device doesn't appear to be in the dev hash table.");
+		up_write(&_hash_lock);
+		return -ENXIO;
+	}
+
+	md = hc->md;
+	dm_get(md);
+
+	new_map = hc->new_map;
+	hc->new_map = NULL;
+	param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+
+	up_write(&_hash_lock);
+
+	/* Do we need to load a new map ? */
+	if (new_map) {
+		/* Suspend if it isn't already suspended */
+		if (!dm_suspended(md))
+			dm_suspend(md);
+
+		r = dm_swap_table(md, new_map);
+		if (r) {
+			dm_put(md);
+			dm_table_put(new_map);
+			return r;
+		}
+
+		if (dm_table_get_mode(new_map) & FMODE_WRITE)
+			set_device_ro(dm_kdev(md), 0);
+		else
+			set_device_ro(dm_kdev(md), 1);
+
+		dm_table_put(new_map);
+	}
+
+	if (dm_suspended(md))
+		r = dm_resume(md);
+
+	if (!r)
+		r = __dev_status(md, param);
+
+	dm_put(md);
+	return r;
+}
+
+/*
+ * Set or unset the suspension state of a device.
+ * If the device already is in the requested state we just return its status.
+ */
+static int dev_suspend(struct dm_ioctl *param, size_t param_size)
+{
+	if (param->flags & DM_SUSPEND_FLAG)
+		return do_suspend(param);
+
+	return do_resume(param);
+}
+
+/*
+ * Copies device info back to user space, used by
+ * the create and info ioctls.
+ */
+static int dev_status(struct dm_ioctl *param, size_t param_size)
+{
+	int r;
+	struct mapped_device *md;
+
+	md = find_device(param);
+	if (!md)
+		return -ENXIO;
+
+	r = __dev_status(md, param);
+	dm_put(md);
+	return r;
+}
+
+/*
+ * Build up the status struct for each target
+ */
+static void retrieve_status(struct dm_table *table, struct dm_ioctl *param,
+			    size_t param_size)
+{
+	unsigned int i, num_targets;
+	struct dm_target_spec *spec;
+	char *outbuf, *outptr;
+	status_type_t type;
+	size_t remaining, len, used = 0;
+
+	outptr = outbuf = get_result_buffer(param, param_size, &len);
+
+	if (param->flags & DM_STATUS_TABLE_FLAG)
+		type = STATUSTYPE_TABLE;
+	else
+		type = STATUSTYPE_INFO;
+
+	/* Get all the target info */
+	num_targets = dm_table_get_num_targets(table);
+	for (i = 0; i < num_targets; i++) {
+		struct dm_target *ti = dm_table_get_target(table, i);
+
+		remaining = len - (outptr - outbuf);
+		if (remaining < sizeof(struct dm_target_spec)) {
+			param->flags |= DM_BUFFER_FULL_FLAG;
+			break;
+		}
+
+		spec = (struct dm_target_spec *) outptr;
+
+		spec->status = 0;
+		spec->sector_start = ti->begin;
+		spec->length = ti->len;
+		strncpy(spec->target_type, ti->type->name,
+			sizeof(spec->target_type));
+
+		outptr += sizeof(struct dm_target_spec);
+		remaining = len - (outptr - outbuf);
+
+		/* Get the status/table string from the target driver */
+		if (ti->type->status) {
+			if (ti->type->status(ti, type, outptr, remaining)) {
+				param->flags |= DM_BUFFER_FULL_FLAG;
+				break;
+			}
+		} else
+			outptr[0] = '\0';
+
+		outptr += strlen(outptr) + 1;
+		used = param->data_start + (outptr - outbuf);
+
+		align_ptr(outptr);
+		spec->next = outptr - outbuf;
+	}
+
+	if (used)
+		param->data_size = used;
+
+	param->target_count = num_targets;
+}
+
+/*
+ * Wait for a device to report an event
+ */
+static int dev_wait(struct dm_ioctl *param, size_t param_size)
+{
+	int r;
+	struct mapped_device *md;
+	struct dm_table *table;
+	DECLARE_WAITQUEUE(wq, current);
+
+	md = find_device(param);
+	if (!md)
+		return -ENXIO;
+
+	/*
+	 * Wait for a notification event
+	 */
+	set_current_state(TASK_INTERRUPTIBLE);
+	if (!dm_add_wait_queue(md, &wq, param->event_nr)) {
+		schedule();
+		dm_remove_wait_queue(md, &wq);
+	}
+	set_current_state(TASK_RUNNING);
+
+	/*
+	 * The userland program is going to want to know what
+	 * changed to trigger the event, so we may as well tell
+	 * him and save an ioctl.
+	 */
+	r = __dev_status(md, param);
+	if (r)
+		goto out;
+
+	table = dm_get_table(md);
+	if (table) {
+		retrieve_status(table, param, param_size);
+		dm_table_put(table);
+	}
+
+ out:
+	dm_put(md);
+	return r;
+}
+
+static inline int get_mode(struct dm_ioctl *param)
+{
+	int mode = FMODE_READ | FMODE_WRITE;
+
+	if (param->flags & DM_READONLY_FLAG)
+		mode = FMODE_READ;
+
+	return mode;
+}
+
+static int next_target(struct dm_target_spec *last, uint32_t next, void *end,
+		       struct dm_target_spec **spec, char **target_params)
+{
+	*spec = (struct dm_target_spec *) ((unsigned char *) last + next);
+	*target_params = (char *) (*spec + 1);
+
+	if (*spec < (last + 1))
+		return -EINVAL;
+
+	return invalid_str(*target_params, end);
+}
+
+static int populate_table(struct dm_table *table, struct dm_ioctl *param,
+			  size_t param_size)
+{
+	int r;
+	unsigned int i = 0;
+	struct dm_target_spec *spec = (struct dm_target_spec *) param;
+	uint32_t next = param->data_start;
+	void *end = (void *) param + param_size;
+	char *target_params;
+
+	if (!param->target_count) {
+		DMWARN("populate_table: no targets specified");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < param->target_count; i++) {
+
+		r = next_target(spec, next, end, &spec, &target_params);
+		if (r) {
+			DMWARN("unable to find target");
+			return r;
+		}
+
+		r = dm_table_add_target(table, spec->target_type,
+					(sector_t) spec->sector_start,
+					(sector_t) spec->length,
+					target_params);
+		if (r) {
+			DMWARN("error adding target to table");
+			return r;
+		}
+
+		next = spec->next;
+	}
+
+	return dm_table_complete(table);
+}
+
+static int table_load(struct dm_ioctl *param, size_t param_size)
+{
+	int r;
+	struct hash_cell *hc;
+	struct dm_table *t;
+
+	r = dm_table_create(&t, get_mode(param), param->target_count);
+	if (r)
+		return r;
+
+	r = populate_table(t, param, param_size);
+	if (r) {
+		dm_table_put(t);
+		return r;
+	}
+
+	down_write(&_hash_lock);
+	hc = __find_device_hash_cell(param);
+	if (!hc) {
+		DMWARN("device doesn't appear to be in the dev hash table.");
+		up_write(&_hash_lock);
+		return -ENXIO;
+	}
+
+	if (hc->new_map)
+		dm_table_put(hc->new_map);
+	hc->new_map = t;
+	param->flags |= DM_INACTIVE_PRESENT_FLAG;
+
+	r = __dev_status(hc->md, param);
+	up_write(&_hash_lock);
+	return r;
+}
+
+static int table_clear(struct dm_ioctl *param, size_t param_size)
+{
+	int r;
+	struct hash_cell *hc;
+
+	down_write(&_hash_lock);
+
+	hc = __find_device_hash_cell(param);
+	if (!hc) {
+		DMWARN("device doesn't appear to be in the dev hash table.");
+		up_write(&_hash_lock);
+		return -ENXIO;
+	}
+
+	if (hc->new_map) {
+		dm_table_put(hc->new_map);
+		hc->new_map = NULL;
+	}
+
+	param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+
+	r = __dev_status(hc->md, param);
+	up_write(&_hash_lock);
+	return r;
+}
+
+/*
+ * Retrieves a list of devices used by a particular dm device.
+ */
+static void retrieve_deps(struct dm_table *table, struct dm_ioctl *param,
+			  size_t param_size)
+{
+	unsigned int count = 0;
+	struct list_head *tmp;
+	size_t len, needed;
+	struct dm_target_deps *deps;
+
+	deps = get_result_buffer(param, param_size, &len);
+
+	/*
+	 * Count the devices.
+	 */
+	list_for_each(tmp, dm_table_get_devices(table))
+		count++;
+
+	/*
+	 * Check we have enough space.
+	 */
+	needed = sizeof(*deps) + (sizeof(*deps->dev) * count);
+	if (len < needed) {
+		param->flags |= DM_BUFFER_FULL_FLAG;
+		return;
+	}
+
+	/*
+	 * Fill in the devices.
+	 */
+	deps->count = count;
+	count = 0;
+	list_for_each(tmp, dm_table_get_devices(table)) {
+		struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+		deps->dev[count++] = dd->bdev->bd_dev;
+	}
+
+	param->data_size = param->data_start + needed;
+}
+
+static int table_deps(struct dm_ioctl *param, size_t param_size)
+{
+	int r;
+	struct mapped_device *md;
+	struct dm_table *table;
+
+	md = find_device(param);
+	if (!md)
+		return -ENXIO;
+
+	r = __dev_status(md, param);
+	if (r)
+		goto out;
+
+	table = dm_get_table(md);
+	if (table) {
+		retrieve_deps(table, param, param_size);
+		dm_table_put(table);
+	}
+
+ out:
+	dm_put(md);
+	return r;
+}
+
+/*
+ * Return the status of a device as a text string for each
+ * target.
+ */
+static int table_status(struct dm_ioctl *param, size_t param_size)
+{
+	int r;
+	struct mapped_device *md;
+	struct dm_table *table;
+
+	md = find_device(param);
+	if (!md)
+		return -ENXIO;
+
+	r = __dev_status(md, param);
+	if (r)
+		goto out;
+ 
+	table = dm_get_table(md);
+	if (table) {
+		retrieve_status(table, param, param_size);
+		dm_table_put(table);
+	}
+
+ out:
+	dm_put(md);
+	return r;
+}
+
+/*-----------------------------------------------------------------
+ * Implementation of open/close/ioctl on the special char
+ * device.
+ *---------------------------------------------------------------*/
+static ioctl_fn lookup_ioctl(unsigned int cmd)
+{
+	static struct {
+		int cmd;
+		ioctl_fn fn;
+	} _ioctls[] = {
+		{DM_VERSION_CMD, NULL},	/* version is dealt with elsewhere */
+		{DM_REMOVE_ALL_CMD, remove_all},
+		{DM_LIST_DEVICES_CMD, list_devices},
+
+		{DM_DEV_CREATE_CMD, dev_create},
+		{DM_DEV_REMOVE_CMD, dev_remove},
+		{DM_DEV_RENAME_CMD, dev_rename},
+		{DM_DEV_SUSPEND_CMD, dev_suspend},
+		{DM_DEV_STATUS_CMD, dev_status},
+		{DM_DEV_WAIT_CMD, dev_wait},
+
+		{DM_TABLE_LOAD_CMD, table_load},
+		{DM_TABLE_CLEAR_CMD, table_clear},
+		{DM_TABLE_DEPS_CMD, table_deps},
+		{DM_TABLE_STATUS_CMD, table_status}
+	};
+
+	return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn;
+}
+
+/*
+ * As well as checking the version compatibility this always
+ * copies the kernel interface version out.
+ */
+static int check_version(unsigned int cmd, struct dm_ioctl *user)
+{
+	uint32_t version[3];
+	int r = 0;
+
+	if (copy_from_user(version, user->version, sizeof(version)))
+		return -EFAULT;
+
+	if ((DM_VERSION_MAJOR != version[0]) ||
+	    (DM_VERSION_MINOR < version[1])) {
+		DMWARN("ioctl interface mismatch: "
+		       "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
+		       DM_VERSION_MAJOR, DM_VERSION_MINOR,
+		       DM_VERSION_PATCHLEVEL,
+		       version[0], version[1], version[2], cmd);
+		r = -EINVAL;
+	}
+
+	/*
+	 * Fill in the kernel version.
+	 */
+	version[0] = DM_VERSION_MAJOR;
+	version[1] = DM_VERSION_MINOR;
+	version[2] = DM_VERSION_PATCHLEVEL;
+	if (copy_to_user(user->version, version, sizeof(version)))
+		return -EFAULT;
+
+	return r;
+}
+
+static void free_params(struct dm_ioctl *param)
+{
+	vfree(param);
+}
+
+static int copy_params(struct dm_ioctl *user, struct dm_ioctl **param)
+{
+	struct dm_ioctl tmp, *dmi;
+
+	if (copy_from_user(&tmp, user, sizeof(tmp)))
+		return -EFAULT;
+
+	if (tmp.data_size < sizeof(tmp))
+		return -EINVAL;
+
+	dmi = (struct dm_ioctl *) vmalloc(tmp.data_size);
+	if (!dmi)
+		return -ENOMEM;
+
+	if (copy_from_user(dmi, user, tmp.data_size)) {
+		vfree(dmi);
+		return -EFAULT;
+	}
+
+	*param = dmi;
+	return 0;
+}
+
+static int validate_params(uint cmd, struct dm_ioctl *param)
+{
+	/* Always clear this flag */
+	param->flags &= ~DM_BUFFER_FULL_FLAG;
+
+	/* Ignores parameters */
+	if (cmd == DM_REMOVE_ALL_CMD || cmd == DM_LIST_DEVICES_CMD)
+		return 0;
+
+	/* Unless creating, either name or uuid but not both */
+	if (cmd != DM_DEV_CREATE_CMD) {
+		if ((!*param->uuid && !*param->name) ||
+		    (*param->uuid && *param->name)) {
+			DMWARN("one of name or uuid must be supplied, cmd(%u)",
+			       cmd);
+			return -EINVAL;
+		}
+	}
+
+	/* Ensure strings are terminated */
+	param->name[DM_NAME_LEN - 1] = '\0';
+	param->uuid[DM_UUID_LEN - 1] = '\0';
+
+	return 0;
+}
+
+static int ctl_ioctl(struct inode *inode, struct file *file,
+		     uint command, ulong u)
+{
+	int r = 0;
+	unsigned int cmd;
+	struct dm_ioctl *param;
+	struct dm_ioctl *user = (struct dm_ioctl *) u;
+	ioctl_fn fn = NULL;
+	size_t param_size;
+
+	/* only root can play with this */
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (_IOC_TYPE(command) != DM_IOCTL)
+		return -ENOTTY;
+
+	cmd = _IOC_NR(command);
+
+	/*
+	 * Check the interface version passed in.  This also
+	 * writes out the kernel's interface version.
+	 */
+	r = check_version(cmd, user);
+	if (r)
+		return r;
+
+	/*
+	 * Nothing more to do for the version command.
+	 */
+	if (cmd == DM_VERSION_CMD)
+		return 0;
+
+	fn = lookup_ioctl(cmd);
+	if (!fn) {
+		DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
+		return -ENOTTY;
+	}
+
+	/*
+	 * FIXME: I don't like this, we're trying to avoid low
+	 * memory issues when a device is suspended.
+	 */
+	current->flags |= PF_MEMALLOC;
+
+	/*
+	 * Copy the parameters into kernel space.
+	 */
+	r = copy_params(user, &param);
+	if (r) {
+		current->flags &= ~PF_MEMALLOC;
+		return r;
+	}
+
+	r = validate_params(cmd, param);
+	if (r)
+		goto out;
+
+	param_size = param->data_size;
+	param->data_size = sizeof(*param);
+	r = fn(param, param_size);
+
+	/*
+	 * Copy the results back to userland.
+	 */
+	if (!r && copy_to_user(user, param, param->data_size))
+		r = -EFAULT;
+
+ out:
+	free_params(param);
+	current->flags &= ~PF_MEMALLOC;
+	return r;
+}
+
+static struct file_operations _ctl_fops = {
+	.ioctl	 = ctl_ioctl,
+	.owner	 = THIS_MODULE,
+};
+
+static devfs_handle_t _ctl_handle;
+
+static struct miscdevice _dm_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name  = DM_NAME,
+	.fops  = &_ctl_fops
+};
+
+/*
+ * Create misc character device and link to DM_DIR/control.
+ */
+int __init dm_interface_init(void)
+{
+	int r;
+	char rname[64];
+
+	r = dm_hash_init();
+	if (r)
+		return r;
+
+	r = misc_register(&_dm_misc);
+	if (r) {
+		DMERR("misc_register failed for control device");
+		dm_hash_exit();
+		return r;
+	}
+
+	r = devfs_generate_path(_dm_misc.devfs_handle, rname + 3,
+				sizeof rname - 3);
+	if (r == -ENOSYS)
+		goto done;	/* devfs not present */
+
+	if (r < 0) {
+		DMERR("devfs_generate_path failed for control device");
+		goto failed;
+	}
+
+	strncpy(rname + r, "../", 3);
+	r = devfs_mk_symlink(NULL, DM_DIR "/control",
+			     DEVFS_FL_DEFAULT, rname + r, &_ctl_handle, NULL);
+	if (r) {
+		DMERR("devfs_mk_symlink failed for control device");
+		goto failed;
+	}
+	devfs_auto_unregister(_dm_misc.devfs_handle, _ctl_handle);
+
+      done:
+	DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR,
+	       DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA,
+	       DM_DRIVER_EMAIL);
+	return 0;
+
+      failed:
+	misc_deregister(&_dm_misc);
+	dm_hash_exit();
+	return r;
+}
+
+void dm_interface_exit(void)
+{
+	if (misc_deregister(&_dm_misc) < 0)
+		DMERR("misc_deregister failed for control device");
+
+	dm_hash_exit();
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-io.h linux-2.4.26-leo/drivers/md/dm-io.h
--- linux-2.4.26/drivers/md/dm-io.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-io.h	2004-06-22 21:04:34.000000000 +0100
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2003 Sistina Software
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _DM_IO_H
+#define _DM_IO_H
+
+#include "dm.h"
+
+#include <linux/list.h>
+
+/* Move these to bitops.h eventually */
+/* Improved generic_fls algorithm (in 2.4 there is no generic_fls so far) */
+/* (c) 2002, D.Phillips and Sistina Software */
+/* Licensed under Version 2 of the GPL */
+
+static unsigned generic_fls8(unsigned n)
+{
+	return n & 0xf0 ?
+	    n & 0xc0 ? (n >> 7) + 7 : (n >> 5) + 5:
+	    n & 0x0c ? (n >> 3) + 3 : n - ((n + 1) >> 2);
+}
+
+static inline unsigned generic_fls16(unsigned n)
+{
+	return	n & 0xff00? generic_fls8(n >> 8) + 8 : generic_fls8(n);
+}
+
+static inline unsigned generic_fls32(unsigned n)
+{
+	return	n & 0xffff0000 ? generic_fls16(n >> 16) + 16 : generic_fls16(n);
+}
+
+/* FIXME make this configurable */
+#define DM_MAX_IO_REGIONS 8
+
+struct io_region {
+	kdev_t dev;
+	sector_t sector;
+	sector_t count;
+};
+
+
+/*
+ * 'error' is a bitset, with each bit indicating whether an error
+ * occurred doing io to the corresponding region.
+ */
+typedef void (*io_notify_fn)(unsigned int error, void *context);
+
+
+/*
+ * Before anyone uses the IO interface they should call
+ * dm_io_get(), specifying roughly how many pages they are
+ * expecting to perform io on concurrently.
+ *
+ * This function may block.
+ */
+int dm_io_get(unsigned int num_pages);
+void dm_io_put(unsigned int num_pages);
+
+
+/*
+ * Synchronous IO.
+ *
+ * Please ensure that the rw flag in the next two functions is
+ * either READ or WRITE, ie. we don't take READA.  Any
+ * regions with a zero count field will be ignored.
+ */
+int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
+	       struct page *pages, unsigned int offset,
+	       unsigned int *error_bits);
+
+
+/*
+ * Aynchronous IO.
+ *
+ * The 'where' array may be safely allocated on the stack since
+ * the function takes a copy.
+ */
+int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
+		struct page *pages, unsigned int offset,
+		io_notify_fn fn, void *context);
+
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-linear.c linux-2.4.26-leo/drivers/md/dm-linear.c
--- linux-2.4.26/drivers/md/dm-linear.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-linear.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2001 Sistina Software (UK) Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+
+/*
+ * Linear: maps a linear range of a device.
+ */
+struct linear_c {
+	struct dm_dev *dev;
+	sector_t start;
+};
+
+/*
+ * Construct a linear mapping: <dev_path> <offset>
+ */
+static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	struct linear_c *lc;
+
+	if (argc != 2) {
+		ti->error = "dm-linear: Invalid argument count";
+		return -EINVAL;
+	}
+
+	lc = kmalloc(sizeof(*lc), GFP_KERNEL);
+	if (lc == NULL) {
+		ti->error = "dm-linear: Cannot allocate linear context";
+		return -ENOMEM;
+	}
+
+	if (sscanf(argv[1], SECTOR_FORMAT, &lc->start) != 1) {
+		ti->error = "dm-linear: Invalid device sector";
+		goto bad;
+	}
+
+	if (dm_get_device(ti, argv[0], lc->start, ti->len,
+			  dm_table_get_mode(ti->table), &lc->dev)) {
+		ti->error = "dm-linear: Device lookup failed";
+		goto bad;
+	}
+
+	ti->private = lc;
+	return 0;
+
+      bad:
+	kfree(lc);
+	return -EINVAL;
+}
+
+static void linear_dtr(struct dm_target *ti)
+{
+	struct linear_c *lc = (struct linear_c *) ti->private;
+
+	dm_put_device(ti, lc->dev);
+	kfree(lc);
+}
+
+static int linear_map(struct dm_target *ti, struct buffer_head *bh, int rw,
+		      union map_info *map_context)
+{
+	struct linear_c *lc = (struct linear_c *) ti->private;
+
+	bh->b_rdev = lc->dev->dev;
+	bh->b_rsector = lc->start + (bh->b_rsector - ti->begin);
+
+	return 1;
+}
+
+static int linear_status(struct dm_target *ti, status_type_t type,
+			 char *result, unsigned int maxlen)
+{
+	struct linear_c *lc = (struct linear_c *) ti->private;
+	kdev_t kdev;
+
+	switch (type) {
+	case STATUSTYPE_INFO:
+		result[0] = '\0';
+		break;
+
+	case STATUSTYPE_TABLE:
+		kdev = to_kdev_t(lc->dev->bdev->bd_dev);
+		snprintf(result, maxlen, "%s " SECTOR_FORMAT,
+			 dm_kdevname(kdev), lc->start);
+		break;
+	}
+	return 0;
+}
+
+static struct target_type linear_target = {
+	.name   = "linear",
+	.module = THIS_MODULE,
+	.ctr    = linear_ctr,
+	.dtr    = linear_dtr,
+	.map    = linear_map,
+	.status = linear_status,
+};
+
+int __init dm_linear_init(void)
+{
+	int r = dm_register_target(&linear_target);
+
+	if (r < 0)
+		DMERR("linear: register failed %d", r);
+
+	return r;
+}
+
+void dm_linear_exit(void)
+{
+	int r = dm_unregister_target(&linear_target);
+
+	if (r < 0)
+		DMERR("linear: unregister failed %d", r);
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-log.c linux-2.4.26-leo/drivers/md/dm-log.c
--- linux-2.4.26/drivers/md/dm-log.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-log.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2003 Sistina Software
+ *
+ * This file is released under the LGPL.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include "dm-log.h"
+#include "dm-io.h"
+
+static LIST_HEAD(_log_types);
+static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
+
+int dm_register_dirty_log_type(struct dirty_log_type *type)
+{
+	spin_lock(&_lock);
+	type->use_count = 0;
+	if (type->module)
+		__MOD_INC_USE_COUNT(type->module);
+
+	list_add(&type->list, &_log_types);
+	spin_unlock(&_lock);
+
+	return 0;
+}
+
+int dm_unregister_dirty_log_type(struct dirty_log_type *type)
+{
+	spin_lock(&_lock);
+
+	if (type->use_count)
+		DMWARN("Attempt to unregister a log type that is still in use");
+	else {
+		list_del(&type->list);
+		if (type->module)
+			__MOD_DEC_USE_COUNT(type->module);
+	}
+
+	spin_unlock(&_lock);
+
+	return 0;
+}
+
+static struct dirty_log_type *get_type(const char *type_name)
+{
+	struct dirty_log_type *type;
+	struct list_head *tmp;
+
+	spin_lock(&_lock);
+	list_for_each (tmp, &_log_types) {
+		type = list_entry(tmp, struct dirty_log_type, list);
+		if (!strcmp(type_name, type->name)) {
+			type->use_count++;
+			spin_unlock(&_lock);
+			return type;
+		}
+	}
+
+	spin_unlock(&_lock);
+	return NULL;
+}
+
+static void put_type(struct dirty_log_type *type)
+{
+	spin_lock(&_lock);
+	type->use_count--;
+	spin_unlock(&_lock);
+}
+
+struct dirty_log *dm_create_dirty_log(const char *type_name, sector_t dev_size,
+				      unsigned int argc, char **argv)
+{
+	struct dirty_log_type *type;
+	struct dirty_log *log;
+
+	log = kmalloc(sizeof(*log), GFP_KERNEL);
+	if (!log)
+		return NULL;
+
+	type = get_type(type_name);
+	if (!type) {
+		kfree(log);
+		return NULL;
+	}
+
+	log->type = type;
+	if (type->ctr(log, dev_size, argc, argv)) {
+		kfree(log);
+		put_type(type);
+		return NULL;
+	}
+
+	return log;
+}
+
+void dm_destroy_dirty_log(struct dirty_log *log)
+{
+	log->type->dtr(log);
+	put_type(log->type);
+	kfree(log);
+}
+
+
+/*-----------------------------------------------------------------
+ * In core log, ie. trivial, non-persistent
+ *
+ * For now we'll keep this simple and just have 2 bitsets, one
+ * for clean/dirty, the other for sync/nosync.  The sync bitset
+ * will be freed when everything is in sync.
+ *
+ * FIXME: problems with a 64bit sector_t
+ *---------------------------------------------------------------*/
+struct core_log {
+	sector_t region_size;
+	unsigned int region_count;
+	unsigned long *clean_bits;
+	unsigned long *sync_bits;
+	unsigned long *recovering_bits;	/* FIXME: this seems excessive */
+
+	int sync_search;
+};
+
+#define BYTE_SHIFT 3
+
+static int core_ctr(struct dirty_log *log, sector_t dev_size,
+		    unsigned int argc, char **argv)
+{
+	struct core_log *clog;
+	sector_t region_size;
+	unsigned int region_count;
+	size_t bitset_size;
+
+	if (argc != 1) {
+		DMWARN("wrong number of arguments to core_log");
+		return -EINVAL;
+	}
+
+	if (sscanf(argv[0], SECTOR_FORMAT, &region_size) != 1) {
+		DMWARN("invalid region size string");
+		return -EINVAL;
+	}
+
+	region_count = dm_div_up(dev_size, region_size);
+
+	clog = kmalloc(sizeof(*clog), GFP_KERNEL);
+	if (!clog) {
+		DMWARN("couldn't allocate core log");
+		return -ENOMEM;
+	}
+
+	clog->region_size = region_size;
+	clog->region_count = region_count;
+
+	/*
+ 	 * Work out how many words we need to hold the bitset.
+ 	 */
+	bitset_size = dm_round_up(region_count,
+				  sizeof(*clog->clean_bits) << BYTE_SHIFT);
+	bitset_size >>= BYTE_SHIFT;
+
+	clog->clean_bits = vmalloc(bitset_size);
+	if (!clog->clean_bits) {
+		DMWARN("couldn't allocate clean bitset");
+		kfree(clog);
+		return -ENOMEM;
+	}
+	memset(clog->clean_bits, -1, bitset_size);
+
+	clog->sync_bits = vmalloc(bitset_size);
+	if (!clog->sync_bits) {
+		DMWARN("couldn't allocate sync bitset");
+		vfree(clog->clean_bits);
+		kfree(clog);
+		return -ENOMEM;
+	}
+	memset(clog->sync_bits, 0, bitset_size);
+
+	clog->recovering_bits = vmalloc(bitset_size);
+	if (!clog->recovering_bits) {
+		DMWARN("couldn't allocate sync bitset");
+		vfree(clog->sync_bits);
+		vfree(clog->clean_bits);
+		kfree(clog);
+		return -ENOMEM;
+	}
+	memset(clog->recovering_bits, 0, bitset_size);
+	clog->sync_search = 0;
+	log->context = clog;
+	return 0;
+}
+
+static void core_dtr(struct dirty_log *log)
+{
+	struct core_log *clog = (struct core_log *) log->context;
+	vfree(clog->clean_bits);
+	vfree(clog->sync_bits);
+	vfree(clog->recovering_bits);
+	kfree(clog);
+}
+
+static sector_t core_get_region_size(struct dirty_log *log)
+{
+	struct core_log *clog = (struct core_log *) log->context;
+	return clog->region_size;
+}
+
+static int core_is_clean(struct dirty_log *log, region_t region)
+{
+	struct core_log *clog = (struct core_log *) log->context;
+	return test_bit(region, clog->clean_bits);
+}
+
+static int core_in_sync(struct dirty_log *log, region_t region, int block)
+{
+	struct core_log *clog = (struct core_log *) log->context;
+
+	return test_bit(region, clog->sync_bits) ? 1 : 0;
+}
+
+static int core_flush(struct dirty_log *log)
+{
+	/* no op */
+	return 0;
+}
+
+static void core_mark_region(struct dirty_log *log, region_t region)
+{
+	struct core_log *clog = (struct core_log *) log->context;
+	clear_bit(region, clog->clean_bits);
+}
+
+static void core_clear_region(struct dirty_log *log, region_t region)
+{
+	struct core_log *clog = (struct core_log *) log->context;
+	set_bit(region, clog->clean_bits);
+}
+
+static int core_get_resync_work(struct dirty_log *log, region_t *region)
+{
+	struct core_log *clog = (struct core_log *) log->context;
+
+	if (clog->sync_search >= clog->region_count)
+		return 0;
+
+	do {
+		*region = find_next_zero_bit(clog->sync_bits,
+					     clog->region_count,
+					     clog->sync_search);
+		clog->sync_search = *region + 1;
+
+		if (*region == clog->region_count)
+			return 0;
+
+	} while (test_bit(*region, clog->recovering_bits));
+
+	set_bit(*region, clog->recovering_bits);
+	return 1;
+}
+
+static void core_complete_resync_work(struct dirty_log *log, region_t region,
+				      int success)
+{
+	struct core_log *clog = (struct core_log *) log->context;
+
+	clear_bit(region, clog->recovering_bits);
+	if (success)
+		set_bit(region, clog->sync_bits);
+}
+
+static struct dirty_log_type _core_type = {
+	.name = "core",
+
+	.ctr = core_ctr,
+	.dtr = core_dtr,
+	.get_region_size = core_get_region_size,
+	.is_clean = core_is_clean,
+	.in_sync = core_in_sync,
+	.flush = core_flush,
+	.mark_region = core_mark_region,
+	.clear_region = core_clear_region,
+	.get_resync_work = core_get_resync_work,
+	.complete_resync_work = core_complete_resync_work
+};
+
+__init int dm_dirty_log_init(void)
+{
+	int r;
+
+	r = dm_register_dirty_log_type(&_core_type);
+	if (r)
+		DMWARN("couldn't register core log");
+
+	return r;
+}
+
+void dm_dirty_log_exit(void)
+{
+	dm_unregister_dirty_log_type(&_core_type);
+}
+
+EXPORT_SYMBOL(dm_register_dirty_log_type);
+EXPORT_SYMBOL(dm_unregister_dirty_log_type);
+EXPORT_SYMBOL(dm_dirty_log_init);
+EXPORT_SYMBOL(dm_dirty_log_exit);
+EXPORT_SYMBOL(dm_create_dirty_log);
+EXPORT_SYMBOL(dm_destroy_dirty_log);
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-log.h linux-2.4.26-leo/drivers/md/dm-log.h
--- linux-2.4.26/drivers/md/dm-log.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-log.h	2004-06-22 21:04:40.000000000 +0100
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2003 Sistina Software
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef DM_DIRTY_LOG
+#define DM_DIRTY_LOG
+
+#include "dm.h"
+
+typedef sector_t region_t;
+
+struct dirty_log_type;
+
+struct dirty_log {
+	struct dirty_log_type *type;
+	void *context;
+};
+
+struct dirty_log_type {
+	struct list_head list;
+	const char *name;
+	struct module *module;
+	unsigned int use_count;
+
+	int (*ctr)(struct dirty_log *log, sector_t dev_size,
+		   unsigned int argc, char **argv);
+	void (*dtr)(struct dirty_log *log);
+
+	/*
+	 * Retrieves the smallest size of region that the log can
+	 * deal with.
+	 */
+	sector_t (*get_region_size)(struct dirty_log *log);
+
+        /*
+	 * A predicate to say whether a region is clean or not.
+	 * May block.
+	 */
+	int (*is_clean)(struct dirty_log *log, region_t region);
+
+	/*
+	 *  Returns: 0, 1, -EWOULDBLOCK, < 0
+	 *
+	 * A predicate function to check the area given by
+	 * [sector, sector + len) is in sync.
+	 *
+	 * If -EWOULDBLOCK is returned the state of the region is
+	 * unknown, typically this will result in a read being
+	 * passed to a daemon to deal with, since a daemon is
+	 * allowed to block.
+	 */
+	int (*in_sync)(struct dirty_log *log, region_t region, int can_block);
+
+	/*
+	 * Flush the current log state (eg, to disk).  This
+	 * function may block.
+	 */
+	int (*flush)(struct dirty_log *log);
+
+	/*
+	 * Mark an area as clean or dirty.  These functions may
+	 * block, though for performance reasons blocking should
+	 * be extremely rare (eg, allocating another chunk of
+	 * memory for some reason).
+	 */
+	void (*mark_region)(struct dirty_log *log, region_t region);
+	void (*clear_region)(struct dirty_log *log, region_t region);
+
+	/*
+	 * Returns: <0 (error), 0 (no region), 1 (region)
+	 *
+	 * The mirrord will need perform recovery on regions of
+	 * the mirror that are in the NOSYNC state.  This
+	 * function asks the log to tell the caller about the
+	 * next region that this machine should recover.
+	 *
+	 * Do not confuse this function with 'in_sync()', one
+	 * tells you if an area is synchronised, the other
+	 * assigns recovery work.
+	*/
+	int (*get_resync_work)(struct dirty_log *log, region_t *region);
+
+	/*
+	 * This notifies the log that the resync of an area has
+	 * been completed.  The log should then mark this region
+	 * as CLEAN.
+	 */
+	void (*complete_resync_work)(struct dirty_log *log,
+				     region_t region, int success);
+};
+
+int dm_register_dirty_log_type(struct dirty_log_type *type);
+int dm_unregister_dirty_log_type(struct dirty_log_type *type);
+
+
+/*
+ * Make sure you use these two functions, rather than calling
+ * type->constructor/destructor() directly.
+ */
+struct dirty_log *dm_create_dirty_log(const char *type_name, sector_t dev_size,
+				      unsigned int argc, char **argv);
+void dm_destroy_dirty_log(struct dirty_log *log);
+
+/*
+ * init/exit functions.
+ */
+int dm_dirty_log_init(void);
+void dm_dirty_log_exit(void);
+
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-raid1.c linux-2.4.26-leo/drivers/md/dm-raid1.c
--- linux-2.4.26/drivers/md/dm-raid1.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-raid1.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,1294 @@
+/*
+ * Copyright (C) 2003 Sistina Software Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+#include "dm-daemon.h"
+#include "dm-io.h"
+#include "dm-log.h"
+#include "kcopyd.h"
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
+
+static struct dm_daemon _kmirrord;
+
+/*-----------------------------------------------------------------
+ * buffer lists:
+ *
+ * We play with singly linked lists of buffers, but we want to be
+ * careful to add new buffers to the back of the list, to avoid
+ * buffers being starved of attention.
+ *---------------------------------------------------------------*/
+struct buffer_list {
+	struct buffer_head *head;
+	struct buffer_head *tail;
+};
+
+static inline void buffer_list_init(struct buffer_list *bl)
+{
+	bl->head = bl->tail = NULL;
+}
+
+static inline void buffer_list_add(struct buffer_list *bl,
+				   struct buffer_head *bh)
+{
+	bh->b_reqnext = NULL;
+
+	if (bl->tail) {
+		bl->tail->b_reqnext = bh;
+		bl->tail = bh;
+	} else
+		bl->head = bl->tail = bh;
+}
+
+static struct buffer_head *buffer_list_pop(struct buffer_list *bl)
+{
+	struct buffer_head *bh = bl->head;
+
+	if (bh) {
+		bl->head = bl->head->b_reqnext;
+		if (!bl->head)
+			bl->tail = NULL;
+
+		bh->b_reqnext = NULL;
+	}
+
+	return bh;
+}
+
+/*-----------------------------------------------------------------
+ * Region hash
+ *
+ * The mirror splits itself up into discrete regions.  Each
+ * region can be in one of three states: clean, dirty,
+ * nosync.  There is no need to put clean regions in the hash.
+ *
+ * In addition to being present in the hash table a region _may_
+ * be present on one of three lists.
+ *
+ *   clean_regions: Regions on this list have no io pending to
+ *   them, they are in sync, we are no longer interested in them,
+ *   they are dull.  rh_update_states() will remove them from the
+ *   hash table.
+ *
+ *   quiesced_regions: These regions have been spun down, ready
+ *   for recovery.  rh_recovery_start() will remove regions from
+ *   this list and hand them to kmirrord, which will schedule the
+ *   recovery io with kcopyd.
+ *
+ *   recovered_regions: Regions that kcopyd has successfully
+ *   recovered.  rh_update_states() will now schedule any delayed
+ *   io, up the recovery_count, and remove the region from the
+ *   hash.
+ *
+ * There are 2 locks:
+ *   A rw spin lock 'hash_lock' protects just the hash table,
+ *   this is never held in write mode from interrupt context,
+ *   which I believe means that we only have to disable irqs when
+ *   doing a write lock.
+ *
+ *   An ordinary spin lock 'region_lock' that protects the three
+ *   lists in the region_hash, with the 'state', 'list' and
+ *   'bhs_delayed' fields of the regions.  This is used from irq
+ *   context, so all other uses will have to suspend local irqs.
+ *---------------------------------------------------------------*/
+struct mirror_set;
+struct region_hash {
+	struct mirror_set *ms;
+	sector_t region_size;
+
+	/* holds persistent region state */
+	struct dirty_log *log;
+
+	/* hash table */
+	rwlock_t hash_lock;
+	mempool_t *region_pool;
+	unsigned int mask;
+	unsigned int nr_buckets;
+	struct list_head *buckets;
+
+	spinlock_t region_lock;
+	struct semaphore recovery_count;
+	struct list_head clean_regions;
+	struct list_head quiesced_regions;
+	struct list_head recovered_regions;
+};
+
+enum {
+	RH_CLEAN,
+	RH_DIRTY,
+	RH_NOSYNC,
+	RH_RECOVERING
+};
+
+struct region {
+	struct region_hash *rh;	/* FIXME: can we get rid of this ? */
+	region_t key;
+	int state;
+
+	struct list_head hash_list;
+	struct list_head list;
+
+	atomic_t pending;
+	struct buffer_head *delayed_bhs;
+};
+
+/*
+ * Conversion fns
+ */
+static inline region_t bh_to_region(struct region_hash *rh,
+				    struct buffer_head *bh)
+{
+	return bh->b_rsector / rh->region_size;
+}
+
+static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
+{
+	return region * rh->region_size;
+}
+
+/* FIXME move this */
+static void queue_bh(struct mirror_set *ms, struct buffer_head *bh, int rw);
+
+static void *region_alloc(int gfp_mask, void *pool_data)
+{
+	return kmalloc(sizeof(struct region), gfp_mask);
+}
+
+static void region_free(void *element, void *pool_data)
+{
+	kfree(element);
+}
+
+#define MIN_REGIONS 64
+#define MAX_RECOVERY 1
+static int rh_init(struct region_hash *rh, struct mirror_set *ms,
+		   struct dirty_log *log, sector_t region_size,
+		   region_t nr_regions)
+{
+	unsigned int nr_buckets, max_buckets;
+	size_t i;
+
+	/*
+	 * Calculate a suitable number of buckets for our hash
+	 * table.
+	 */
+	max_buckets = nr_regions >> 6;
+	for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
+		;
+	nr_buckets >>= 1;
+
+	rh->ms = ms;
+	rh->log = log;
+	rh->region_size = region_size;
+	rwlock_init(&rh->hash_lock);
+	rh->mask = nr_buckets - 1;
+	rh->nr_buckets = nr_buckets;
+
+	rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
+	if (!rh->buckets) {
+		DMERR("unable to allocate region hash memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < nr_buckets; i++)
+		INIT_LIST_HEAD(rh->buckets + i);
+
+	spin_lock_init(&rh->region_lock);
+	sema_init(&rh->recovery_count, 0);
+	INIT_LIST_HEAD(&rh->clean_regions);
+	INIT_LIST_HEAD(&rh->quiesced_regions);
+	INIT_LIST_HEAD(&rh->recovered_regions);
+
+	rh->region_pool = mempool_create(MIN_REGIONS, region_alloc,
+					 region_free, NULL);
+	if (!rh->region_pool) {
+		vfree(rh->buckets);
+		rh->buckets = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void rh_exit(struct region_hash *rh)
+{
+	unsigned int h;
+	struct region *reg;
+	struct list_head *tmp, *tmp2;
+
+	BUG_ON(!list_empty(&rh->quiesced_regions));
+	for (h = 0; h < rh->nr_buckets; h++) {
+		list_for_each_safe (tmp, tmp2, rh->buckets + h) {
+			reg = list_entry(tmp, struct region, hash_list);
+			BUG_ON(atomic_read(&reg->pending));
+			mempool_free(reg, rh->region_pool);
+		}
+	}
+
+	if (rh->log)
+		dm_destroy_dirty_log(rh->log);
+	if (rh->region_pool)
+		mempool_destroy(rh->region_pool);
+	vfree(rh->buckets);
+}
+
+#define RH_HASH_MULT 2654435387U
+
+static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
+{
+	return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
+}
+
+static struct region *__rh_lookup(struct region_hash *rh, region_t region)
+{
+	struct region *reg;
+
+	list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
+		if (reg->key == region)
+			return reg;
+
+	return NULL;
+}
+
+static void __rh_insert(struct region_hash *rh, struct region *reg)
+{
+	unsigned int h = rh_hash(rh, reg->key);
+	list_add(&reg->hash_list, rh->buckets + h);
+}
+
+static struct region *__rh_alloc(struct region_hash *rh, region_t region)
+{
+	struct region *reg, *nreg;
+
+	read_unlock(&rh->hash_lock);
+	nreg = mempool_alloc(rh->region_pool, GFP_NOIO);
+	nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
+		RH_CLEAN : RH_NOSYNC;
+	nreg->rh = rh;
+	nreg->key = region;
+
+	INIT_LIST_HEAD(&nreg->list);
+
+	atomic_set(&nreg->pending, 0);
+	nreg->delayed_bhs = NULL;
+	write_lock_irq(&rh->hash_lock);
+
+	reg = __rh_lookup(rh, region);
+	if (reg)
+		/* we lost the race */
+		mempool_free(nreg, rh->region_pool);
+
+	else {
+		__rh_insert(rh, nreg);
+		if (nreg->state == RH_CLEAN) {
+			spin_lock_irq(&rh->region_lock);
+			list_add(&nreg->list, &rh->clean_regions);
+			spin_unlock_irq(&rh->region_lock);
+		}
+		reg = nreg;
+	}
+	write_unlock_irq(&rh->hash_lock);
+	read_lock(&rh->hash_lock);
+
+	return reg;
+}
+
+static inline struct region *__rh_find(struct region_hash *rh, region_t region)
+{
+	struct region *reg;
+
+	reg = __rh_lookup(rh, region);
+	if (!reg)
+		reg = __rh_alloc(rh, region);
+
+	return reg;
+}
+
+static int rh_state(struct region_hash *rh, region_t region, int may_block)
+{
+	int r;
+	struct region *reg;
+
+	read_lock(&rh->hash_lock);
+	reg = __rh_lookup(rh, region);
+	read_unlock(&rh->hash_lock);
+
+	if (reg)
+		return reg->state;
+
+	/*
+	 * The region wasn't in the hash, so we fall back to the
+	 * dirty log.
+	 */
+	r = rh->log->type->in_sync(rh->log, region, may_block);
+
+	/*
+	 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
+	 * taken as a RH_NOSYNC
+	 */
+	return r == 1 ? RH_CLEAN : RH_NOSYNC;
+}
+
+static inline int rh_in_sync(struct region_hash *rh,
+			     region_t region, int may_block)
+{
+	int state = rh_state(rh, region, may_block);
+	return state == RH_CLEAN || state == RH_DIRTY;
+}
+
+static void dispatch_buffers(struct mirror_set *ms, struct buffer_head *bh)
+{
+	struct buffer_head *nbh;
+
+	while (bh) {
+		nbh = bh->b_reqnext;
+		queue_bh(ms, bh, WRITE);
+		bh = nbh;
+	}
+}
+
+static void rh_update_states(struct region_hash *rh)
+{
+	struct list_head *tmp, *tmp2;
+	struct region *reg;
+
+	LIST_HEAD(clean);
+	LIST_HEAD(recovered);
+
+	/*
+	 * Quickly grab the lists.
+	 */
+	write_lock_irq(&rh->hash_lock);
+	spin_lock(&rh->region_lock);
+	if (!list_empty(&rh->clean_regions)) {
+		list_splice(&rh->clean_regions, &clean);
+		INIT_LIST_HEAD(&rh->clean_regions);
+
+		list_for_each_entry (reg, &clean, list) {
+			rh->log->type->clear_region(rh->log, reg->key);
+			list_del(&reg->hash_list);
+		}
+	}
+
+	if (!list_empty(&rh->recovered_regions)) {
+		list_splice(&rh->recovered_regions, &recovered);
+		INIT_LIST_HEAD(&rh->recovered_regions);
+
+		list_for_each_entry (reg, &recovered, list)
+			list_del(&reg->hash_list);
+	}
+	spin_unlock(&rh->region_lock);
+	write_unlock_irq(&rh->hash_lock);
+
+	/*
+	 * All the regions on the recovered and clean lists have
+	 * now been pulled out of the system, so no need to do
+	 * any more locking.
+	 */
+	list_for_each_safe (tmp, tmp2, &recovered) {
+		reg = list_entry(tmp, struct region, list);
+
+		rh->log->type->complete_resync_work(rh->log, reg->key, 1);
+		dispatch_buffers(rh->ms, reg->delayed_bhs);
+		up(&rh->recovery_count);
+		mempool_free(reg, rh->region_pool);
+	}
+
+	list_for_each_safe (tmp, tmp2, &clean) {
+		reg = list_entry(tmp, struct region, list);
+		mempool_free(reg, rh->region_pool);
+	}
+}
+
+static void rh_inc(struct region_hash *rh, region_t region)
+{
+	struct region *reg;
+
+	read_lock(&rh->hash_lock);
+	reg = __rh_find(rh, region);
+	if (reg->state == RH_CLEAN) {
+		rh->log->type->mark_region(rh->log, reg->key);
+
+		spin_lock_irq(&rh->region_lock);
+		reg->state = RH_DIRTY;
+		list_del_init(&reg->list);	/* take off the clean list */
+		spin_unlock_irq(&rh->region_lock);
+	}
+
+	atomic_inc(&reg->pending);
+	read_unlock(&rh->hash_lock);
+}
+
+static void rh_inc_pending(struct region_hash *rh, struct buffer_list *buffers)
+{
+	struct buffer_head *bh;
+
+	for (bh = buffers->head; bh; bh = bh->b_reqnext)
+		rh_inc(rh, bh_to_region(rh, bh));
+}
+
+static void rh_dec(struct region_hash *rh, region_t region)
+{
+	unsigned long flags;
+	struct region *reg;
+	int wake = 0;
+
+	read_lock(&rh->hash_lock);
+	reg = __rh_lookup(rh, region);
+	read_unlock(&rh->hash_lock);
+
+	if (atomic_dec_and_test(&reg->pending)) {
+		spin_lock_irqsave(&rh->region_lock, flags);
+		if (reg->state == RH_RECOVERING) {
+			list_add_tail(&reg->list, &rh->quiesced_regions);
+		} else {
+			reg->state = RH_CLEAN;
+			list_add(&reg->list, &rh->clean_regions);
+		}
+		spin_unlock_irqrestore(&rh->region_lock, flags);
+		wake = 1;
+	}
+
+	if (wake)
+		dm_daemon_wake(&_kmirrord);
+}
+
+/*
+ * Starts quiescing a region in preparation for recovery.
+ */
+static int __rh_recovery_prepare(struct region_hash *rh)
+{
+	int r;
+	struct region *reg;
+	region_t region;
+
+	/*
+	 * Ask the dirty log what's next.
+	 */
+	r = rh->log->type->get_resync_work(rh->log, &region);
+	if (r <= 0)
+		return r;
+
+	/*
+	 * Get this region, and start it quiescing by setting the
+	 * recovering flag.
+	 */
+	read_lock(&rh->hash_lock);
+	reg = __rh_find(rh, region);
+	read_unlock(&rh->hash_lock);
+
+	spin_lock_irq(&rh->region_lock);
+	reg->state = RH_RECOVERING;
+
+	/* Already quiesced ? */
+	if (atomic_read(&reg->pending))
+		list_del_init(&reg->list);
+
+	else {
+		list_del_init(&reg->list);
+		list_add(&reg->list, &rh->quiesced_regions);
+	}
+	spin_unlock_irq(&rh->region_lock);
+
+	return 1;
+}
+
+static void rh_recovery_prepare(struct region_hash *rh)
+{
+	while (!down_trylock(&rh->recovery_count))
+		if (__rh_recovery_prepare(rh) <= 0) {
+			up(&rh->recovery_count);
+			break;
+		}
+}
+
+/*
+ * Returns any quiesced regions.
+ */
+static struct region *rh_recovery_start(struct region_hash *rh)
+{
+	struct region *reg = NULL;
+
+	spin_lock_irq(&rh->region_lock);
+	if (!list_empty(&rh->quiesced_regions)) {
+		reg = list_entry(rh->quiesced_regions.next,
+				 struct region, list);
+		list_del_init(&reg->list);	/* remove from the quiesced list */
+	}
+	spin_unlock_irq(&rh->region_lock);
+
+	return reg;
+}
+
+/* FIXME: success ignored for now */
+static void rh_recovery_end(struct region *reg, int success)
+{
+	struct region_hash *rh = reg->rh;
+
+	spin_lock_irq(&rh->region_lock);
+	list_add(&reg->list, &reg->rh->recovered_regions);
+	spin_unlock_irq(&rh->region_lock);
+
+	dm_daemon_wake(&_kmirrord);
+}
+
+static void rh_flush(struct region_hash *rh)
+{
+	rh->log->type->flush(rh->log);
+}
+
+static void rh_delay(struct region_hash *rh, struct buffer_head *bh)
+{
+	struct region *reg;
+
+	read_lock(&rh->hash_lock);
+	reg = __rh_find(rh, bh_to_region(rh, bh));
+	bh->b_reqnext = reg->delayed_bhs;
+	reg->delayed_bhs = bh;
+	read_unlock(&rh->hash_lock);
+}
+
+static void rh_stop_recovery(struct region_hash *rh)
+{
+	int i;
+
+	/* wait for any recovering regions */
+	for (i = 0; i < MAX_RECOVERY; i++)
+		down(&rh->recovery_count);
+}
+
+static void rh_start_recovery(struct region_hash *rh)
+{
+	int i;
+
+	for (i = 0; i < MAX_RECOVERY; i++)
+		up(&rh->recovery_count);
+
+	dm_daemon_wake(&_kmirrord);
+}
+
+/*-----------------------------------------------------------------
+ * Mirror set structures.
+ *---------------------------------------------------------------*/
+struct mirror {
+	atomic_t error_count;
+	struct dm_dev *dev;
+	sector_t offset;
+};
+
+struct mirror_set {
+	struct dm_target *ti;
+	struct list_head list;
+	struct region_hash rh;
+	struct kcopyd_client *kcopyd_client;
+
+	spinlock_t lock;	/* protects the next two lists */
+	struct buffer_list reads;
+	struct buffer_list writes;
+
+	/* recovery */
+	region_t nr_regions;
+	region_t sync_count;
+
+	unsigned int nr_mirrors;
+	struct mirror mirror[0];
+};
+
+/*
+ * Every mirror should look like this one.
+ */
+#define DEFAULT_MIRROR 0
+
+/*
+ * This is yucky.  We squirrel the mirror_set struct away inside
+ * b_reqnext for write buffers.  This is safe since the bh
+ * doesn't get submitted to the lower levels of block layer.
+ */
+static struct mirror_set *bh_get_ms(struct buffer_head *bh)
+{
+	return (struct mirror_set *) bh->b_reqnext;
+}
+
+static void bh_set_ms(struct buffer_head *bh, struct mirror_set *ms)
+{
+	bh->b_reqnext = (struct buffer_head *) ms;
+}
+
+/*-----------------------------------------------------------------
+ * Recovery.
+ *
+ * When a mirror is first activated we may find that some regions
+ * are in the no-sync state.  We have to recover these by
+ * recopying from the default mirror to all the others.
+ *---------------------------------------------------------------*/
+static void recovery_complete(int read_err, unsigned int write_err,
+			      void *context)
+{
+	struct region *reg = (struct region *) context;
+	struct mirror_set *ms = reg->rh->ms;
+
+	/* FIXME: better error handling */
+	rh_recovery_end(reg, read_err || write_err);
+	if (++ms->sync_count == ms->nr_regions)
+		/* the sync is complete */
+		dm_table_event(ms->ti->table);
+}
+
+static int recover(struct mirror_set *ms, struct region *reg)
+{
+	int r;
+	unsigned int i;
+	struct io_region from, to[ms->nr_mirrors - 1], *dest;
+	struct mirror *m;
+	unsigned int flags = 0;
+
+	/* fill in the source */
+	m = ms->mirror + DEFAULT_MIRROR;
+	from.dev = m->dev->dev;
+	from.sector = m->offset + region_to_sector(reg->rh, reg->key);
+	if (reg->key == (ms->nr_regions - 1)) {
+		/*
+		 * The final region may be smaller than
+		 * region_size.
+		 */
+		from.count = ms->ti->len & (reg->rh->region_size - 1);
+		if (!from.count)
+			from.count = reg->rh->region_size;
+	} else
+		from.count = reg->rh->region_size;
+
+	/* fill in the destinations */
+	for (i = 1; i < ms->nr_mirrors; i++) {
+		m = ms->mirror + i;
+		dest = to + (i - 1);
+
+		dest->dev = m->dev->dev;
+		dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
+		dest->count = from.count;
+	}
+
+	/* hand to kcopyd */
+	set_bit(KCOPYD_IGNORE_ERROR, &flags);
+	r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
+			recovery_complete, reg);
+
+	return r;
+}
+
+static void do_recovery(struct mirror_set *ms)
+{
+	int r;
+	struct region *reg;
+
+	/*
+	 * Start quiescing some regions.
+	 */
+	rh_recovery_prepare(&ms->rh);
+
+	/*
+	 * Copy any already quiesced regions.
+	 */
+	while ((reg = rh_recovery_start(&ms->rh))) {
+		r = recover(ms, reg);
+		if (r)
+			rh_recovery_end(reg, 0);
+	}
+}
+
+/*-----------------------------------------------------------------
+ * Reads
+ *---------------------------------------------------------------*/
+static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
+{
+	/* FIXME: add read balancing */
+	return ms->mirror + DEFAULT_MIRROR;
+}
+
+/*
+ * remap a buffer to a particular mirror.
+ */
+static void map_buffer(struct mirror_set *ms,
+		       struct mirror *m, struct buffer_head *bh)
+{
+	bh->b_rdev = m->dev->dev;
+	bh->b_rsector = m->offset + (bh->b_rsector - ms->ti->begin);
+}
+
+static void do_reads(struct mirror_set *ms, struct buffer_list *reads)
+{
+	region_t region;
+	struct buffer_head *bh;
+	struct mirror *m;
+
+	while ((bh = buffer_list_pop(reads))) {
+		region = bh_to_region(&ms->rh, bh);
+
+		/*
+		 * We can only read balance if the region is in sync.
+		 */
+		if (rh_in_sync(&ms->rh, region, 0))
+			m = choose_mirror(ms, bh->b_rsector);
+		else
+			m = ms->mirror + DEFAULT_MIRROR;
+
+		map_buffer(ms, m, bh);
+		generic_make_request(READ, bh);
+	}
+}
+
+/*-----------------------------------------------------------------
+ * Writes.
+ *
+ * We do different things with the write io depending on the
+ * state of the region that it's in:
+ *
+ * SYNC: 	increment pending, use kcopyd to write to *all* mirrors
+ * RECOVERING:	delay the io until recovery completes
+ * NOSYNC:	increment pending, just write to the default mirror
+ *---------------------------------------------------------------*/
+static void write_callback(unsigned int error, void *context)
+{
+	unsigned int i;
+	int uptodate = 1;
+	struct buffer_head *bh = (struct buffer_head *) context;
+	struct mirror_set *ms;
+
+	ms = bh_get_ms(bh);
+	bh_set_ms(bh, NULL);
+
+	/*
+	 * NOTE: We don't decrement the pending count here,
+	 * instead it is done by the targets endio function.
+	 * This way we handle both writes to SYNC and NOSYNC
+	 * regions with the same code.
+	 */
+
+	if (error) {
+		/*
+		 * only error the io if all mirrors failed.
+		 * FIXME: bogus
+		 */
+		uptodate = 0;
+		for (i = 0; i < ms->nr_mirrors; i++)
+			if (!test_bit(i, &error)) {
+				uptodate = 1;
+				break;
+			}
+	}
+	bh->b_end_io(bh, uptodate);
+}
+
+static void do_write(struct mirror_set *ms, struct buffer_head *bh)
+{
+	unsigned int i;
+	struct io_region io[ms->nr_mirrors];
+	struct mirror *m;
+
+	for (i = 0; i < ms->nr_mirrors; i++) {
+		m = ms->mirror + i;
+
+		io[i].dev = m->dev->dev;
+		io[i].sector = m->offset + (bh->b_rsector - ms->ti->begin);
+		io[i].count = bh->b_size >> 9;
+	}
+
+	bh_set_ms(bh, ms);
+	dm_io_async(ms->nr_mirrors, io, WRITE, bh->b_page,
+		    (unsigned int) bh->b_data & ~PAGE_MASK, write_callback, bh);
+}
+
+static void do_writes(struct mirror_set *ms, struct buffer_list *writes)
+{
+	int state;
+	struct buffer_head *bh;
+	struct buffer_list sync, nosync, recover, *this_list = NULL;
+
+	if (!writes->head)
+		return;
+
+	/*
+	 * Classify each write.
+	 */
+	buffer_list_init(&sync);
+	buffer_list_init(&nosync);
+	buffer_list_init(&recover);
+
+	while ((bh = buffer_list_pop(writes))) {
+		state = rh_state(&ms->rh, bh_to_region(&ms->rh, bh), 1);
+		switch (state) {
+		case RH_CLEAN:
+		case RH_DIRTY:
+			this_list = &sync;
+			break;
+
+		case RH_NOSYNC:
+			this_list = &nosync;
+			break;
+
+		case RH_RECOVERING:
+			this_list = &recover;
+			break;
+		}
+
+		buffer_list_add(this_list, bh);
+	}
+
+	/*
+	 * Increment the pending counts for any regions that will
+	 * be written to (writes to recover regions are going to
+	 * be delayed).
+	 */
+	rh_inc_pending(&ms->rh, &sync);
+	rh_inc_pending(&ms->rh, &nosync);
+	rh_flush(&ms->rh);
+
+	/*
+	 * Dispatch io.
+	 */
+	while ((bh = buffer_list_pop(&sync)))
+		do_write(ms, bh);
+
+	while ((bh = buffer_list_pop(&recover)))
+		rh_delay(&ms->rh, bh);
+
+	while ((bh = buffer_list_pop(&nosync))) {
+		map_buffer(ms, ms->mirror + DEFAULT_MIRROR, bh);
+		generic_make_request(WRITE, bh);
+	}
+}
+
+/*-----------------------------------------------------------------
+ * kmirrord
+ *---------------------------------------------------------------*/
+static LIST_HEAD(_mirror_sets);
+static DECLARE_RWSEM(_mirror_sets_lock);
+
+static void do_mirror(struct mirror_set *ms)
+{
+	struct buffer_list reads, writes;
+
+	spin_lock(&ms->lock);
+	memcpy(&reads, &ms->reads, sizeof(reads));
+	buffer_list_init(&ms->reads);
+	memcpy(&writes, &ms->writes, sizeof(writes));
+	buffer_list_init(&ms->writes);
+	spin_unlock(&ms->lock);
+
+	rh_update_states(&ms->rh);
+	do_recovery(ms);
+	do_reads(ms, &reads);
+	do_writes(ms, &writes);
+	run_task_queue(&tq_disk);
+}
+
+static void do_work(void)
+{
+	struct mirror_set *ms;
+
+	down_read(&_mirror_sets_lock);
+	list_for_each_entry (ms, &_mirror_sets, list)
+		do_mirror(ms);
+	up_read(&_mirror_sets_lock);
+}
+
+/*-----------------------------------------------------------------
+ * Target functions
+ *---------------------------------------------------------------*/
+static struct mirror_set *alloc_context(unsigned int nr_mirrors,
+					sector_t region_size,
+					struct dm_target *ti,
+					struct dirty_log *dl)
+{
+	size_t len;
+	struct mirror_set *ms = NULL;
+
+	if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
+		return NULL;
+
+	len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
+
+	ms = kmalloc(len, GFP_KERNEL);
+	if (!ms) {
+		ti->error = "dm-mirror: Cannot allocate mirror context";
+		return NULL;
+	}
+
+	memset(ms, 0, len);
+	spin_lock_init(&ms->lock);
+
+	ms->ti = ti;
+	ms->nr_mirrors = nr_mirrors;
+	ms->nr_regions = dm_div_up(ti->len, region_size);
+
+	if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
+		ti->error = "dm-mirror: Error creating dirty region hash";
+		kfree(ms);
+		return NULL;
+	}
+
+	return ms;
+}
+
+static void free_context(struct mirror_set *ms, struct dm_target *ti,
+			 unsigned int m)
+{
+	while (m--)
+		dm_put_device(ti, ms->mirror[m].dev);
+
+	rh_exit(&ms->rh);
+	kfree(ms);
+}
+
+static inline int _check_region_size(struct dm_target *ti, sector_t size)
+{
+	return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
+		 size > ti->len);
+}
+
+static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
+		      unsigned int mirror, char **argv)
+{
+	sector_t offset;
+
+	if (sscanf(argv[1], SECTOR_FORMAT, &offset) != 1) {
+		ti->error = "dm-mirror: Invalid offset";
+		return -EINVAL;
+	}
+
+	if (dm_get_device(ti, argv[0], offset, ti->len,
+			  dm_table_get_mode(ti->table),
+			  &ms->mirror[mirror].dev)) {
+		ti->error = "dm-mirror: Device lookup failure";
+		return -ENXIO;
+	}
+
+	ms->mirror[mirror].offset = offset;
+
+	return 0;
+}
+
+static int add_mirror_set(struct mirror_set *ms)
+{
+	down_write(&_mirror_sets_lock);
+	list_add_tail(&ms->list, &_mirror_sets);
+	up_write(&_mirror_sets_lock);
+	dm_daemon_wake(&_kmirrord);
+
+	return 0;
+}
+
+static void del_mirror_set(struct mirror_set *ms)
+{
+	down_write(&_mirror_sets_lock);
+	list_del(&ms->list);
+	up_write(&_mirror_sets_lock);
+}
+
+/*
+ * Create dirty log: log_type #log_params <log_params>
+ */
+static struct dirty_log *create_dirty_log(struct dm_target *ti,
+					  unsigned int argc, char **argv,
+					  unsigned int *args_used)
+{
+	unsigned int param_count;
+	struct dirty_log *dl;
+
+	if (argc < 2) {
+		ti->error = "dm-mirror: Insufficient mirror log arguments";
+		return NULL;
+	}
+
+	if (sscanf(argv[1], "%u", &param_count) != 1 || param_count != 1) {
+		ti->error = "dm-mirror: Invalid mirror log argument count";
+		return NULL;
+	}
+
+	*args_used = 2 + param_count;
+
+	if (argc < *args_used) {
+		ti->error = "dm-mirror: Insufficient mirror log arguments";
+		return NULL;
+	}
+
+	dl = dm_create_dirty_log(argv[0], ti->len, param_count, argv + 2);
+	if (!dl) {
+		ti->error = "dm-mirror: Error creating mirror dirty log";
+		return NULL;
+	}
+
+	if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
+		ti->error = "dm-mirror: Invalid region size";
+		dm_destroy_dirty_log(dl);
+		return NULL;
+	}
+
+	return dl;
+}
+
+/*
+ * Construct a mirror mapping:
+ *
+ * log_type #log_params <log_params>
+ * #mirrors [mirror_path offset]{2,}
+ *
+ * For now, #log_params = 1, log_type = "core"
+ *
+ */
+#define DM_IO_PAGES 64
+static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	int r;
+	unsigned int nr_mirrors, m, args_used;
+	struct mirror_set *ms;
+	struct dirty_log *dl;
+
+	dl = create_dirty_log(ti, argc, argv, &args_used);
+	if (!dl)
+		return -EINVAL;
+
+	argv += args_used;
+	argc -= args_used;
+
+	if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
+	    nr_mirrors < 2) {
+		ti->error = "dm-mirror: Invalid number of mirrors";
+		dm_destroy_dirty_log(dl);
+		return -EINVAL;
+	}
+
+	argv++, argc--;
+
+	if (argc != nr_mirrors * 2) {
+		ti->error = "dm-mirror: Wrong number of mirror arguments";
+		dm_destroy_dirty_log(dl);
+		return -EINVAL;
+	}
+
+	ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
+	if (!ms) {
+		dm_destroy_dirty_log(dl);
+		return -ENOMEM;
+	}
+
+	/* Get the mirror parameter sets */
+	for (m = 0; m < nr_mirrors; m++) {
+		r = get_mirror(ms, ti, m, argv);
+		if (r) {
+			free_context(ms, ti, m);
+			return r;
+		}
+		argv += 2;
+		argc -= 2;
+	}
+
+	ti->private = ms;
+
+	r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
+	if (r) {
+		free_context(ms, ti, ms->nr_mirrors);
+		return r;
+	}
+
+	add_mirror_set(ms);
+	return 0;
+}
+
+static void mirror_dtr(struct dm_target *ti)
+{
+	struct mirror_set *ms = (struct mirror_set *) ti->private;
+
+	del_mirror_set(ms);
+	kcopyd_client_destroy(ms->kcopyd_client);
+	free_context(ms, ti, ms->nr_mirrors);
+}
+
+static void queue_bh(struct mirror_set *ms, struct buffer_head *bh, int rw)
+{
+	int wake = 0;
+	struct buffer_list *bl;
+
+	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
+	spin_lock(&ms->lock);
+	wake = !(bl->head);
+	buffer_list_add(bl, bh);
+	spin_unlock(&ms->lock);
+
+	if (wake)
+		dm_daemon_wake(&_kmirrord);
+}
+
+/*
+ * Mirror mapping function
+ */
+static int mirror_map(struct dm_target *ti, struct buffer_head *bh,
+		      int rw, union map_info *map_context)
+{
+	int r;
+	struct mirror *m;
+	struct mirror_set *ms = ti->private;
+
+	/* FIXME: nasty hack, 32 bit sector_t only */
+	map_context->ll = bh->b_rsector / ms->rh.region_size;
+
+	if (rw == WRITE) {
+		queue_bh(ms, bh, rw);
+		return 0;
+	}
+
+	r = ms->rh.log->type->in_sync(ms->rh.log, bh_to_region(&ms->rh, bh), 0);
+	if (r < 0 && r != -EWOULDBLOCK)
+		return r;
+
+	if (r == -EWOULDBLOCK)	/* FIXME: ugly */
+		r = 0;
+
+	/*
+	 * We don't want to fast track a recovery just for a read
+	 * ahead.  So we just let it silently fail.
+	 * FIXME: get rid of this.
+	 */
+	if (!r && rw == READA)
+		return -EIO;
+
+	if (!r) {
+		/* Pass this io over to the daemon */
+		queue_bh(ms, bh, rw);
+		return 0;
+	}
+
+	m = choose_mirror(ms, bh->b_rsector);
+	if (!m)
+		return -EIO;
+
+	map_buffer(ms, m, bh);
+	return 1;
+}
+
+static int mirror_end_io(struct dm_target *ti, struct buffer_head *bh,
+			 int rw, int error, union map_info *map_context)
+{
+	struct mirror_set *ms = (struct mirror_set *) ti->private;
+	region_t region = map_context->ll;
+
+	/*
+	 * We need to dec pending if this was a write.
+	 */
+	if (rw == WRITE)
+		rh_dec(&ms->rh, region);
+
+	return 0;
+}
+
+static void mirror_suspend(struct dm_target *ti)
+{
+	struct mirror_set *ms = (struct mirror_set *) ti->private;
+	rh_stop_recovery(&ms->rh);
+}
+
+static void mirror_resume(struct dm_target *ti)
+{
+	struct mirror_set *ms = (struct mirror_set *) ti->private;
+	rh_start_recovery(&ms->rh);
+}
+
+static int mirror_status(struct dm_target *ti, status_type_t type,
+			 char *result, unsigned int maxlen)
+{
+	unsigned int m, sz = 0;
+	struct mirror_set *ms = (struct mirror_set *) ti->private;
+
+	switch (type) {
+	case STATUSTYPE_INFO:
+		sz += snprintf(result + sz, maxlen - sz, "%d ", ms->nr_mirrors);
+
+		for (m = 0; m < ms->nr_mirrors; m++)
+			sz += snprintf(result + sz, maxlen - sz, "%s ",
+				       dm_kdevname(ms->mirror[m].dev->dev));
+
+		sz += snprintf(result + sz, maxlen - sz, "%lu/%lu",
+			       ms->sync_count, ms->nr_regions);
+		break;
+
+	case STATUSTYPE_TABLE:
+		sz += snprintf(result + sz, maxlen - sz,
+			       "%s 1 " SECTOR_FORMAT " %d ",
+			       ms->rh.log->type->name, ms->rh.region_size,
+			       ms->nr_mirrors);
+
+		for (m = 0; m < ms->nr_mirrors; m++)
+			sz += snprintf(result + sz, maxlen - sz, "%s %ld ",
+				       dm_kdevname(ms->mirror[m].dev->dev),
+				       ms->mirror[m].offset);
+	}
+
+	return 0;
+}
+
+static struct target_type mirror_target = {
+	.name	 = "mirror",
+	.module	 = THIS_MODULE,
+	.ctr	 = mirror_ctr,
+	.dtr	 = mirror_dtr,
+	.map	 = mirror_map,
+	.end_io	 = mirror_end_io,
+	.suspend = mirror_suspend,
+	.resume	 = mirror_resume,
+	.status	 = mirror_status,
+};
+
+static int __init dm_mirror_init(void)
+{
+	int r;
+
+	r = dm_dirty_log_init();
+	if (r)
+		return r;
+
+	r = dm_daemon_start(&_kmirrord, "kmirrord", do_work);
+	if (r) {
+		DMERR("couldn't start kmirrord");
+		dm_dirty_log_exit();
+		return r;
+	}
+
+	r = dm_register_target(&mirror_target);
+	if (r < 0) {
+		DMERR("%s: Failed to register mirror target",
+		      mirror_target.name);
+		dm_dirty_log_exit();
+		dm_daemon_stop(&_kmirrord);
+	}
+
+	return r;
+}
+
+static void __exit dm_mirror_exit(void)
+{
+	int r;
+
+	r = dm_unregister_target(&mirror_target);
+	if (r < 0)
+		DMERR("%s: unregister failed %d", mirror_target.name, r);
+
+	dm_daemon_stop(&_kmirrord);
+	dm_dirty_log_exit();
+}
+
+/* Module hooks */
+module_init(dm_mirror_init);
+module_exit(dm_mirror_exit);
+
+MODULE_DESCRIPTION(DM_NAME " mirror target");
+MODULE_AUTHOR("Heinz Mauelshagen <mge@sistina.com>");
+MODULE_LICENSE("GPL");
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-snapshot.c linux-2.4.26-leo/drivers/md/dm-snapshot.c
--- linux-2.4.26/drivers/md/dm-snapshot.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-snapshot.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,1237 @@
+/*
+ * dm-snapshot.c
+ *
+ * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/config.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/mempool.h>
+#include <linux/device-mapper.h>
+#include <linux/vmalloc.h>
+
+#include "dm-snapshot.h"
+#include "kcopyd.h"
+
+/*
+ * FIXME: Remove this before release.
+ */
+#if 0
+#define DMDEBUG(x...) DMWARN( ## x)
+#else
+#define DMDEBUG(x...)
+#endif
+
+/*
+ * The percentage increment we will wake up users at
+ */
+#define WAKE_UP_PERCENT 5
+
+/*
+ * kcopyd priority of snapshot operations
+ */
+#define SNAPSHOT_COPY_PRIORITY 2
+
+/*
+ * Each snapshot reserves this many pages for io
+ * FIXME: calculate this
+ */
+#define SNAPSHOT_PAGES 256
+
+struct pending_exception {
+	struct exception e;
+
+	/*
+	 * Origin buffers waiting for this to complete are held
+	 * in a list (using b_reqnext).
+	 */
+	struct buffer_head *origin_bhs;
+	struct buffer_head *snapshot_bhs;
+
+	/*
+	 * Other pending_exceptions that are processing this
+	 * chunk.  When this list is empty, we know we can
+	 * complete the origins.
+	 */
+	struct list_head siblings;
+
+	/* Pointer back to snapshot context */
+	struct dm_snapshot *snap;
+
+	/*
+	 * 1 indicates the exception has already been sent to
+	 * kcopyd.
+	 */
+	int started;
+};
+
+/*
+ * Hash table mapping origin volumes to lists of snapshots and
+ * a lock to protect it
+ */
+static kmem_cache_t *exception_cache;
+static kmem_cache_t *pending_cache;
+static mempool_t *pending_pool;
+
+/*
+ * One of these per registered origin, held in the snapshot_origins hash
+ */
+struct origin {
+	/* The origin device */
+	kdev_t dev;
+
+	struct list_head hash_list;
+
+	/* List of snapshots for this origin */
+	struct list_head snapshots;
+};
+
+/*
+ * Size of the hash table for origin volumes. If we make this
+ * the size of the minors list then it should be nearly perfect
+ */
+#define ORIGIN_HASH_SIZE 256
+#define ORIGIN_MASK      0xFF
+static struct list_head *_origins;
+static struct rw_semaphore _origins_lock;
+
+static int init_origin_hash(void)
+{
+	int i;
+
+	_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
+			   GFP_KERNEL);
+	if (!_origins) {
+		DMERR("Device mapper: Snapshot: unable to allocate memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
+		INIT_LIST_HEAD(_origins + i);
+	init_rwsem(&_origins_lock);
+
+	return 0;
+}
+
+static void exit_origin_hash(void)
+{
+	kfree(_origins);
+}
+
+static inline unsigned int origin_hash(kdev_t dev)
+{
+	return MINOR(dev) & ORIGIN_MASK;
+}
+
+static struct origin *__lookup_origin(kdev_t origin)
+{
+	struct list_head *slist;
+	struct list_head *ol;
+	struct origin *o;
+
+	ol = &_origins[origin_hash(origin)];
+	list_for_each(slist, ol) {
+		o = list_entry(slist, struct origin, hash_list);
+
+		if (o->dev == origin)
+			return o;
+	}
+
+	return NULL;
+}
+
+static void __insert_origin(struct origin *o)
+{
+	struct list_head *sl = &_origins[origin_hash(o->dev)];
+	list_add_tail(&o->hash_list, sl);
+}
+
+/*
+ * Make a note of the snapshot and its origin so we can look it
+ * up when the origin has a write on it.
+ */
+static int register_snapshot(struct dm_snapshot *snap)
+{
+	struct origin *o;
+	kdev_t dev = snap->origin->dev;
+
+	down_write(&_origins_lock);
+	o = __lookup_origin(dev);
+
+	if (!o) {
+		/* New origin */
+		o = kmalloc(sizeof(*o), GFP_KERNEL);
+		if (!o) {
+			up_write(&_origins_lock);
+			return -ENOMEM;
+		}
+
+		/* Initialise the struct */
+		INIT_LIST_HEAD(&o->snapshots);
+		o->dev = dev;
+
+		__insert_origin(o);
+	}
+
+	list_add_tail(&snap->list, &o->snapshots);
+
+	up_write(&_origins_lock);
+	return 0;
+}
+
+static void unregister_snapshot(struct dm_snapshot *s)
+{
+	struct origin *o;
+
+	down_write(&_origins_lock);
+	o = __lookup_origin(s->origin->dev);
+
+	list_del(&s->list);
+	if (list_empty(&o->snapshots)) {
+		list_del(&o->hash_list);
+		kfree(o);
+	}
+
+	up_write(&_origins_lock);
+}
+
+/*
+ * Implementation of the exception hash tables.
+ */
+static int init_exception_table(struct exception_table *et, uint32_t size)
+{
+	unsigned int i;
+
+	et->hash_mask = size - 1;
+	et->table = vcalloc(size, sizeof(struct list_head));
+	if (!et->table)
+		return -ENOMEM;
+
+	for (i = 0; i < size; i++)
+		INIT_LIST_HEAD(et->table + i);
+
+	return 0;
+}
+
+static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem)
+{
+	struct list_head *slot, *entry, *temp;
+	struct exception *ex;
+	int i, size;
+
+	size = et->hash_mask + 1;
+	for (i = 0; i < size; i++) {
+		slot = et->table + i;
+
+		list_for_each_safe(entry, temp, slot) {
+			ex = list_entry(entry, struct exception, hash_list);
+			kmem_cache_free(mem, ex);
+		}
+	}
+
+	vfree(et->table);
+}
+
+/*
+ * FIXME: check how this hash fn is performing.
+ */
+static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
+{
+	return chunk & et->hash_mask;
+}
+
+static void insert_exception(struct exception_table *eh, struct exception *e)
+{
+	struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
+	list_add(&e->hash_list, l);
+}
+
+static inline void remove_exception(struct exception *e)
+{
+	list_del(&e->hash_list);
+}
+
+/*
+ * Return the exception data for a sector, or NULL if not
+ * remapped.
+ */
+static struct exception *lookup_exception(struct exception_table *et,
+					  chunk_t chunk)
+{
+	struct list_head *slot, *el;
+	struct exception *e;
+
+	slot = &et->table[exception_hash(et, chunk)];
+	list_for_each(el, slot) {
+		e = list_entry(el, struct exception, hash_list);
+		if (e->old_chunk == chunk)
+			return e;
+	}
+
+	return NULL;
+}
+
+static inline struct exception *alloc_exception(void)
+{
+	struct exception *e;
+
+	e = kmem_cache_alloc(exception_cache, GFP_NOIO);
+	if (!e)
+		e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
+
+	return e;
+}
+
+static inline void free_exception(struct exception *e)
+{
+	kmem_cache_free(exception_cache, e);
+}
+
+static inline struct pending_exception *alloc_pending_exception(void)
+{
+	return mempool_alloc(pending_pool, GFP_NOIO);
+}
+
+static inline void free_pending_exception(struct pending_exception *pe)
+{
+	mempool_free(pe, pending_pool);
+}
+
+int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
+{
+	struct exception *e;
+
+	e = alloc_exception();
+	if (!e)
+		return -ENOMEM;
+
+	e->old_chunk = old;
+	e->new_chunk = new;
+	insert_exception(&s->complete, e);
+	return 0;
+}
+
+/*
+ * Hard coded magic.
+ */
+static int calc_max_buckets(void)
+{
+	unsigned long mem;
+
+	mem = num_physpages << PAGE_SHIFT;
+	mem /= 50;
+	mem /= sizeof(struct list_head);
+
+	return mem;
+}
+
+/*
+ * Rounds a number down to a power of 2.
+ */
+static inline uint32_t round_down(uint32_t n)
+{
+	while (n & (n - 1))
+		n &= (n - 1);
+	return n;
+}
+
+/*
+ * Allocate room for a suitable hash table.
+ */
+static int init_hash_tables(struct dm_snapshot *s)
+{
+	sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
+
+	/*
+	 * Calculate based on the size of the original volume or
+	 * the COW volume...
+	 */
+	cow_dev_size = get_dev_size(s->cow->dev);
+	origin_dev_size = get_dev_size(s->origin->dev);
+	max_buckets = calc_max_buckets();
+
+	hash_size = min(origin_dev_size, cow_dev_size) / s->chunk_size;
+	hash_size = min(hash_size, max_buckets);
+
+	/* Round it down to a power of 2 */
+	hash_size = round_down(hash_size);
+	if (init_exception_table(&s->complete, hash_size))
+		return -ENOMEM;
+
+	/*
+	 * Allocate hash table for in-flight exceptions
+	 * Make this smaller than the real hash table
+	 */
+	hash_size >>= 3;
+	if (!hash_size)
+		hash_size = 64;
+
+	if (init_exception_table(&s->pending, hash_size)) {
+		exit_exception_table(&s->complete, exception_cache);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/*
+ * Round a number up to the nearest 'size' boundary.  size must
+ * be a power of 2.
+ */
+static inline ulong round_up(ulong n, ulong size)
+{
+	size--;
+	return (n + size) & ~size;
+}
+
+/*
+ * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
+ */
+static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	struct dm_snapshot *s;
+	unsigned long chunk_size;
+	int r = -EINVAL;
+	char persistent;
+	char *origin_path;
+	char *cow_path;
+	char *value;
+	int blocksize;
+
+	if (argc < 4) {
+		ti->error = "dm-snapshot: requires exactly 4 arguments";
+		r = -EINVAL;
+		goto bad1;
+	}
+
+	origin_path = argv[0];
+	cow_path = argv[1];
+	persistent = toupper(*argv[2]);
+
+	if (persistent != 'P' && persistent != 'N') {
+		ti->error = "Persistent flag is not P or N";
+		r = -EINVAL;
+		goto bad1;
+	}
+
+	chunk_size = simple_strtoul(argv[3], &value, 10);
+	if (chunk_size == 0 || value == NULL) {
+		ti->error = "Invalid chunk size";
+		r = -EINVAL;
+		goto bad1;
+	}
+
+	s = kmalloc(sizeof(*s), GFP_KERNEL);
+	if (s == NULL) {
+		ti->error = "Cannot allocate snapshot context private "
+		    "structure";
+		r = -ENOMEM;
+		goto bad1;
+	}
+
+	r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
+	if (r) {
+		ti->error = "Cannot get origin device";
+		goto bad2;
+	}
+
+	/* FIXME: get cow length */
+	r = dm_get_device(ti, cow_path, 0, 0,
+			  FMODE_READ | FMODE_WRITE, &s->cow);
+	if (r) {
+		dm_put_device(ti, s->origin);
+		ti->error = "Cannot get COW device";
+		goto bad2;
+	}
+
+	/*
+	 * Chunk size must be multiple of page size.  Silently
+	 * round up if it's not.
+	 */
+	chunk_size = round_up(chunk_size, PAGE_SIZE / SECTOR_SIZE);
+
+	/* Validate the chunk size against the device block size */
+	blocksize = get_hardsect_size(s->cow->dev);
+	if (chunk_size % (blocksize / SECTOR_SIZE)) {
+		ti->error = "Chunk size is not a multiple of device blocksize";
+		r = -EINVAL;
+		goto bad3;
+	}
+
+	/* Check the sizes are small enough to fit in one kiovec */
+	if (chunk_size > KIO_MAX_SECTORS) {
+		ti->error = "Chunk size is too big";
+		r = -EINVAL;
+		goto bad3;
+	}
+
+	/* Check chunk_size is a power of 2 */
+	if (chunk_size & (chunk_size - 1)) {
+		ti->error = "Chunk size is not a power of 2";
+		r = -EINVAL;
+		goto bad3;
+	}
+
+	s->chunk_size = chunk_size;
+	s->chunk_mask = chunk_size - 1;
+	s->type = persistent;
+	for (s->chunk_shift = 0; chunk_size;
+	     s->chunk_shift++, chunk_size >>= 1)
+		;
+	s->chunk_shift--;
+
+	s->valid = 1;
+	s->have_metadata = 0;
+	s->last_percent = 0;
+	init_rwsem(&s->lock);
+	s->table = ti->table;
+
+	/* Allocate hash table for COW data */
+	if (init_hash_tables(s)) {
+		ti->error = "Unable to allocate hash table space";
+		r = -ENOMEM;
+		goto bad3;
+	}
+
+	/*
+	 * Check the persistent flag - done here because we need the iobuf
+	 * to check the LV header
+	 */
+	s->store.snap = s;
+
+	if (persistent == 'P')
+		r = dm_create_persistent(&s->store, s->chunk_size);
+	else
+		r = dm_create_transient(&s->store, s, blocksize);
+
+	if (r) {
+		ti->error = "Couldn't create exception store";
+		r = -EINVAL;
+		goto bad4;
+	}
+
+	r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
+	if (r) {
+		ti->error = "Could not create kcopyd client";
+		goto bad5;
+	}
+
+	/* Flush IO to the origin device */
+	fsync_dev_lockfs(s->origin->dev);
+
+	/* Add snapshot to the list of snapshots for this origin */
+	if (register_snapshot(s)) {
+		r = -EINVAL;
+		ti->error = "Cannot register snapshot origin";
+		goto bad6;
+	}
+	unlockfs(s->origin->dev);
+
+	ti->private = s;
+	return 0;
+
+ bad6:
+	unlockfs(s->origin->dev);
+	kcopyd_client_destroy(s->kcopyd_client);
+
+ bad5:
+	s->store.destroy(&s->store);
+
+ bad4:
+	exit_exception_table(&s->pending, pending_cache);
+	exit_exception_table(&s->complete, exception_cache);
+
+ bad3:
+	dm_put_device(ti, s->cow);
+	dm_put_device(ti, s->origin);
+
+ bad2:
+	kfree(s);
+
+ bad1:
+	return r;
+}
+
+static void snapshot_dtr(struct dm_target *ti)
+{
+	struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
+
+	dm_table_event(ti->table);
+
+	unregister_snapshot(s);
+
+	exit_exception_table(&s->pending, pending_cache);
+	exit_exception_table(&s->complete, exception_cache);
+
+	/* Deallocate memory used */
+	s->store.destroy(&s->store);
+
+	dm_put_device(ti, s->origin);
+	dm_put_device(ti, s->cow);
+	kcopyd_client_destroy(s->kcopyd_client);
+	kfree(s);
+}
+
+/*
+ * We hold lists of buffer_heads, using the b_reqnext field.
+ */
+static void queue_buffer(struct buffer_head **queue, struct buffer_head *bh)
+{
+	bh->b_reqnext = *queue;
+	*queue = bh;
+}
+
+/*
+ * FIXME: inefficient.
+ */
+static void queue_buffers(struct buffer_head **queue, struct buffer_head *bhs)
+{
+	while (*queue)
+		queue = &((*queue)->b_reqnext);
+
+	*queue = bhs;
+}
+
+/*
+ * Flush a list of buffers.
+ */
+static void flush_buffers(struct buffer_head *bh)
+{
+	struct buffer_head *n;
+
+	DMDEBUG("begin flush");
+	while (bh) {
+		n = bh->b_reqnext;
+		bh->b_reqnext = NULL;
+		DMDEBUG("flushing %p", bh);
+		generic_make_request(WRITE, bh);
+		bh = n;
+	}
+
+	run_task_queue(&tq_disk);
+}
+
+/*
+ * Error a list of buffers.
+ */
+static void error_buffers(struct buffer_head *bh)
+{
+	struct buffer_head *n;
+
+	while (bh) {
+		n = bh->b_reqnext;
+		bh->b_reqnext = NULL;
+		buffer_IO_error(bh);
+		bh = n;
+	}
+}
+
+static struct buffer_head *__flush_bhs(struct pending_exception *pe)
+{
+	struct pending_exception *sibling;
+
+	if (list_empty(&pe->siblings))
+		return pe->origin_bhs;
+
+	sibling = list_entry(pe->siblings.next,
+			     struct pending_exception, siblings);
+
+	list_del(&pe->siblings);
+
+	/* FIXME: I think there's a race on SMP machines here, add spin lock */
+	queue_buffers(&sibling->origin_bhs, pe->origin_bhs);
+
+	return NULL;
+}
+
+static void pending_complete(struct pending_exception *pe, int success)
+{
+	struct exception *e;
+	struct dm_snapshot *s = pe->snap;
+	struct buffer_head *flush = NULL;
+
+	if (success) {
+		e = alloc_exception();
+		if (!e) {
+			DMWARN("Unable to allocate exception.");
+			down_write(&s->lock);
+			s->store.drop_snapshot(&s->store);
+			s->valid = 0;
+			flush = __flush_bhs(pe);
+			up_write(&s->lock);
+
+			error_buffers(pe->snapshot_bhs);
+			goto out;
+		}
+
+		/*
+		 * Add a proper exception, and remove the
+		 * in-flight exception from the list.
+		 */
+		down_write(&s->lock);
+
+		memcpy(e, &pe->e, sizeof(*e));
+		insert_exception(&s->complete, e);
+		remove_exception(&pe->e);
+		flush = __flush_bhs(pe);
+
+		/* Submit any pending write BHs */
+		up_write(&s->lock);
+
+		flush_buffers(pe->snapshot_bhs);
+		DMDEBUG("Exception completed successfully.");
+
+		/* Notify any interested parties */
+		if (s->store.fraction_full) {
+			sector_t numerator, denominator;
+			int pc;
+
+			s->store.fraction_full(&s->store, &numerator,
+					       &denominator);
+			pc = numerator * 100 / denominator;
+
+			if (pc >= s->last_percent + WAKE_UP_PERCENT) {
+				dm_table_event(s->table);
+				s->last_percent = pc - pc % WAKE_UP_PERCENT;
+			}
+		}
+
+	} else {
+		/* Read/write error - snapshot is unusable */
+		down_write(&s->lock);
+		if (s->valid)
+			DMERR("Error reading/writing snapshot");
+		s->store.drop_snapshot(&s->store);
+		s->valid = 0;
+		remove_exception(&pe->e);
+		flush = __flush_bhs(pe);
+		up_write(&s->lock);
+
+		error_buffers(pe->snapshot_bhs);
+
+		dm_table_event(s->table);
+		DMDEBUG("Exception failed.");
+	}
+
+ out:
+	if (flush)
+		flush_buffers(flush);
+
+	free_pending_exception(pe);
+}
+
+static void commit_callback(void *context, int success)
+{
+	struct pending_exception *pe = (struct pending_exception *) context;
+	pending_complete(pe, success);
+}
+
+/*
+ * Called when the copy I/O has finished.  kcopyd actually runs
+ * this code so don't block.
+ */
+static void copy_callback(int read_err, unsigned int write_err, void *context)
+{
+	struct pending_exception *pe = (struct pending_exception *) context;
+	struct dm_snapshot *s = pe->snap;
+
+	if (read_err || write_err)
+		pending_complete(pe, 0);
+
+	else
+		/* Update the metadata if we are persistent */
+		s->store.commit_exception(&s->store, &pe->e, commit_callback,
+					  pe);
+}
+
+/*
+ * Dispatches the copy operation to kcopyd.
+ */
+static inline void start_copy(struct pending_exception *pe)
+{
+	struct dm_snapshot *s = pe->snap;
+	struct io_region src, dest;
+	kdev_t dev = s->origin->dev;
+	int *sizes = blk_size[major(dev)];
+	sector_t dev_size = (sector_t) -1;
+
+	if (pe->started)
+		return;
+
+	/* this is protected by snap->lock */
+	pe->started = 1;
+
+	if (sizes && sizes[minor(dev)])
+		dev_size = sizes[minor(dev)] << 1;
+
+	src.dev = dev;
+	src.sector = chunk_to_sector(s, pe->e.old_chunk);
+	src.count = min(s->chunk_size, dev_size - src.sector);
+
+	dest.dev = s->cow->dev;
+	dest.sector = chunk_to_sector(s, pe->e.new_chunk);
+	dest.count = src.count;
+
+	/* Hand over to kcopyd */
+	kcopyd_copy(s->kcopyd_client,
+		    &src, 1, &dest, 0, copy_callback, pe);
+}
+
+/*
+ * Looks to see if this snapshot already has a pending exception
+ * for this chunk, otherwise it allocates a new one and inserts
+ * it into the pending table.
+ */
+static struct pending_exception *find_pending_exception(struct dm_snapshot *s,
+							struct buffer_head *bh)
+{
+	struct exception *e;
+	struct pending_exception *pe;
+	chunk_t chunk = sector_to_chunk(s, bh->b_rsector);
+
+	/*
+	 * Is there a pending exception for this already ?
+	 */
+	e = lookup_exception(&s->pending, chunk);
+	if (e) {
+		/* cast the exception to a pending exception */
+		pe = list_entry(e, struct pending_exception, e);
+
+	} else {
+		/* Create a new pending exception */
+		pe = alloc_pending_exception();
+		pe->e.old_chunk = chunk;
+		pe->origin_bhs = pe->snapshot_bhs = NULL;
+		INIT_LIST_HEAD(&pe->siblings);
+		pe->snap = s;
+		pe->started = 0;
+
+		if (s->store.prepare_exception(&s->store, &pe->e)) {
+			free_pending_exception(pe);
+			s->valid = 0;
+			return NULL;
+		}
+
+		insert_exception(&s->pending, &pe->e);
+	}
+
+	return pe;
+}
+
+static inline void remap_exception(struct dm_snapshot *s, struct exception *e,
+				   struct buffer_head *bh)
+{
+	bh->b_rdev = s->cow->dev;
+	bh->b_rsector = chunk_to_sector(s, e->new_chunk) +
+	    (bh->b_rsector & s->chunk_mask);
+}
+
+static int snapshot_map(struct dm_target *ti, struct buffer_head *bh, int rw,
+			union map_info *map_context)
+{
+	struct exception *e;
+	struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
+	int r = 1;
+	chunk_t chunk;
+	struct pending_exception *pe;
+
+	chunk = sector_to_chunk(s, bh->b_rsector);
+
+	/* Full snapshots are not usable */
+	if (!s->valid)
+		return -1;
+
+	/*
+	 * Write to snapshot - higher level takes care of RW/RO
+	 * flags so we should only get this if we are
+	 * writeable.
+	 */
+	if (rw == WRITE) {
+
+		down_write(&s->lock);
+
+		/* If the block is already remapped - use that, else remap it */
+		e = lookup_exception(&s->complete, chunk);
+		if (e)
+			remap_exception(s, e, bh);
+
+		else {
+			pe = find_pending_exception(s, bh);
+
+			if (!pe) {
+				s->store.drop_snapshot(&s->store);
+				s->valid = 0;
+				r = -EIO;
+			} else {
+				remap_exception(s, &pe->e, bh);
+				queue_buffer(&pe->snapshot_bhs, bh);
+				start_copy(pe);
+				r = 0;
+			}
+		}
+
+		up_write(&s->lock);
+
+	} else {
+		/*
+		 * FIXME: this read path scares me because we
+		 * always use the origin when we have a pending
+		 * exception.  However I can't think of a
+		 * situation where this is wrong - ejt.
+		 */
+
+		/* Do reads */
+		down_read(&s->lock);
+
+		/* See if it it has been remapped */
+		e = lookup_exception(&s->complete, chunk);
+		if (e)
+			remap_exception(s, e, bh);
+		else
+			bh->b_rdev = s->origin->dev;
+
+		up_read(&s->lock);
+	}
+
+	return r;
+}
+
+void snapshot_resume(struct dm_target *ti)
+{
+	struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
+
+	if (s->have_metadata)
+		return;
+
+	if (s->store.read_metadata(&s->store)) {
+		down_write(&s->lock);
+		s->valid = 0;
+		up_write(&s->lock);
+	}
+
+	s->have_metadata = 1;
+}
+
+static int snapshot_status(struct dm_target *ti, status_type_t type,
+			   char *result, unsigned int maxlen)
+{
+	struct dm_snapshot *snap = (struct dm_snapshot *) ti->private;
+	char cow[16];
+	char org[16];
+
+	switch (type) {
+	case STATUSTYPE_INFO:
+		if (!snap->valid)
+			snprintf(result, maxlen, "Invalid");
+		else {
+			if (snap->store.fraction_full) {
+				sector_t numerator, denominator;
+				snap->store.fraction_full(&snap->store,
+							  &numerator,
+							  &denominator);
+				snprintf(result, maxlen,
+					 SECTOR_FORMAT "/" SECTOR_FORMAT,
+					 numerator, denominator);
+			}
+			else
+				snprintf(result, maxlen, "Unknown");
+		}
+		break;
+
+	case STATUSTYPE_TABLE:
+		/*
+		 * kdevname returns a static pointer so we need
+		 * to make private copies if the output is to
+		 * make sense.
+		 */
+		strncpy(cow, dm_kdevname(snap->cow->dev), sizeof(cow));
+		strncpy(org, dm_kdevname(snap->origin->dev), sizeof(org));
+		snprintf(result, maxlen, "%s %s %c %ld", org, cow,
+			 snap->type, snap->chunk_size);
+		break;
+	}
+
+	return 0;
+}
+
+/*-----------------------------------------------------------------
+ * Origin methods
+ *---------------------------------------------------------------*/
+static void list_merge(struct list_head *l1, struct list_head *l2)
+{
+	struct list_head *l1_n, *l2_p;
+
+	l1_n = l1->next;
+	l2_p = l2->prev;
+
+	l1->next = l2;
+	l2->prev = l1;
+
+	l2_p->next = l1_n;
+	l1_n->prev = l2_p;
+}
+
+static int __origin_write(struct list_head *snapshots, struct buffer_head *bh)
+{
+	int r = 1, first = 1;
+	struct list_head *sl;
+	struct dm_snapshot *snap;
+	struct exception *e;
+	struct pending_exception *pe, *last = NULL;
+	chunk_t chunk;
+
+	/* Do all the snapshots on this origin */
+	list_for_each(sl, snapshots) {
+		snap = list_entry(sl, struct dm_snapshot, list);
+
+		/* Only deal with valid snapshots */
+		if (!snap->valid)
+			continue;
+
+		down_write(&snap->lock);
+
+		/*
+		 * Remember, different snapshots can have
+		 * different chunk sizes.
+		 */
+		chunk = sector_to_chunk(snap, bh->b_rsector);
+
+		/*
+		 * Check exception table to see if block
+		 * is already remapped in this snapshot
+		 * and trigger an exception if not.
+		 */
+		e = lookup_exception(&snap->complete, chunk);
+		if (!e) {
+			pe = find_pending_exception(snap, bh);
+			if (!pe) {
+				snap->store.drop_snapshot(&snap->store);
+				snap->valid = 0;
+
+			} else {
+				if (last)
+					list_merge(&pe->siblings,
+						   &last->siblings);
+
+				last = pe;
+				r = 0;
+			}
+		}
+
+		up_write(&snap->lock);
+	}
+
+	/*
+	 * Now that we have a complete pe list we can start the copying.
+	 */
+	if (last) {
+		pe = last;
+		do {
+			down_write(&pe->snap->lock);
+			if (first)
+				queue_buffer(&pe->origin_bhs, bh);
+			start_copy(pe);
+			up_write(&pe->snap->lock);
+			first = 0;
+			pe = list_entry(pe->siblings.next,
+					struct pending_exception, siblings);
+
+		} while (pe != last);
+	}
+
+	return r;
+}
+
+/*
+ * Called on a write from the origin driver.
+ */
+int do_origin(struct dm_dev *origin, struct buffer_head *bh)
+{
+	struct origin *o;
+	int r;
+
+	down_read(&_origins_lock);
+	o = __lookup_origin(origin->dev);
+	if (!o)
+		BUG();
+
+	r = __origin_write(&o->snapshots, bh);
+	up_read(&_origins_lock);
+
+	return r;
+}
+
+/*
+ * Origin: maps a linear range of a device, with hooks for snapshotting.
+ */
+
+/*
+ * Construct an origin mapping: <dev_path>
+ * The context for an origin is merely a 'struct dm_dev *'
+ * pointing to the real device.
+ */
+static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	int r;
+	struct dm_dev *dev;
+
+	if (argc != 1) {
+		ti->error = "dm-origin: incorrect number of arguments";
+		return -EINVAL;
+	}
+
+	r = dm_get_device(ti, argv[0], 0, ti->len,
+			  dm_table_get_mode(ti->table), &dev);
+	if (r) {
+		ti->error = "Cannot get target device";
+		return r;
+	}
+
+	ti->private = dev;
+	return 0;
+}
+
+static void origin_dtr(struct dm_target *ti)
+{
+	struct dm_dev *dev = (struct dm_dev *) ti->private;
+	dm_put_device(ti, dev);
+}
+
+static int origin_map(struct dm_target *ti, struct buffer_head *bh, int rw,
+		      union map_info *map_context)
+{
+	struct dm_dev *dev = (struct dm_dev *) ti->private;
+	bh->b_rdev = dev->dev;
+
+	/* Only tell snapshots if this is a write */
+	return (rw == WRITE) ? do_origin(dev, bh) : 1;
+}
+
+static int origin_status(struct dm_target *ti, status_type_t type, char *result,
+			 unsigned int maxlen)
+{
+	struct dm_dev *dev = (struct dm_dev *) ti->private;
+
+	switch (type) {
+	case STATUSTYPE_INFO:
+		result[0] = '\0';
+		break;
+
+	case STATUSTYPE_TABLE:
+		snprintf(result, maxlen, "%s", dm_kdevname(dev->dev));
+		break;
+	}
+
+	return 0;
+}
+
+static struct target_type origin_target = {
+	name:	"snapshot-origin",
+	module:	THIS_MODULE,
+	ctr:	origin_ctr,
+	dtr:	origin_dtr,
+	map:	origin_map,
+	status:	origin_status,
+};
+
+static struct target_type snapshot_target = {
+	name:	"snapshot",
+	module:	THIS_MODULE,
+	ctr:	snapshot_ctr,
+	dtr:	snapshot_dtr,
+	map:	snapshot_map,
+	resume: snapshot_resume,
+	status:	snapshot_status,
+};
+
+int __init dm_snapshot_init(void)
+{
+	int r;
+
+	r = dm_register_target(&snapshot_target);
+	if (r) {
+		DMERR("snapshot target register failed %d", r);
+		return r;
+	}
+
+	r = dm_register_target(&origin_target);
+	if (r < 0) {
+		DMERR("Device mapper: Origin: register failed %d\n", r);
+		goto bad1;
+	}
+
+	r = init_origin_hash();
+	if (r) {
+		DMERR("init_origin_hash failed.");
+		goto bad2;
+	}
+
+	exception_cache = kmem_cache_create("dm-snapshot-ex",
+					    sizeof(struct exception),
+					    __alignof__(struct exception),
+					    0, NULL, NULL);
+	if (!exception_cache) {
+		DMERR("Couldn't create exception cache.");
+		r = -ENOMEM;
+		goto bad3;
+	}
+
+	pending_cache =
+	    kmem_cache_create("dm-snapshot-in",
+			      sizeof(struct pending_exception),
+			      __alignof__(struct pending_exception),
+			      0, NULL, NULL);
+	if (!pending_cache) {
+		DMERR("Couldn't create pending cache.");
+		r = -ENOMEM;
+		goto bad4;
+	}
+
+	pending_pool = mempool_create(128, mempool_alloc_slab,
+				      mempool_free_slab, pending_cache);
+	if (!pending_pool) {
+		DMERR("Couldn't create pending pool.");
+		r = -ENOMEM;
+		goto bad5;
+	}
+
+	return 0;
+
+      bad5:
+	kmem_cache_destroy(pending_cache);
+      bad4:
+	kmem_cache_destroy(exception_cache);
+      bad3:
+	exit_origin_hash();
+      bad2:
+	dm_unregister_target(&origin_target);
+      bad1:
+	dm_unregister_target(&snapshot_target);
+	return r;
+}
+
+void dm_snapshot_exit(void)
+{
+	int r;
+
+	r = dm_unregister_target(&snapshot_target);
+	if (r)
+		DMERR("snapshot unregister failed %d", r);
+
+	r = dm_unregister_target(&origin_target);
+	if (r)
+		DMERR("origin unregister failed %d", r);
+
+	exit_origin_hash();
+	mempool_destroy(pending_pool);
+	kmem_cache_destroy(pending_cache);
+	kmem_cache_destroy(exception_cache);
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-snapshot.h linux-2.4.26-leo/drivers/md/dm-snapshot.h
--- linux-2.4.26/drivers/md/dm-snapshot.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-snapshot.h	2004-06-22 21:04:38.000000000 +0100
@@ -0,0 +1,158 @@
+/*
+ * dm-snapshot.c
+ *
+ * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_SNAPSHOT_H
+#define DM_SNAPSHOT_H
+
+#include "dm.h"
+#include <linux/blkdev.h>
+
+struct exception_table {
+	uint32_t hash_mask;
+	struct list_head *table;
+};
+
+/*
+ * The snapshot code deals with largish chunks of the disk at a
+ * time. Typically 64k - 256k.
+ */
+/* FIXME: can we get away with limiting these to a uint32_t ? */
+typedef sector_t chunk_t;
+
+/*
+ * An exception is used where an old chunk of data has been
+ * replaced by a new one.
+ */
+struct exception {
+	struct list_head hash_list;
+
+	chunk_t old_chunk;
+	chunk_t new_chunk;
+};
+
+/*
+ * Abstraction to handle the meta/layout of exception stores (the
+ * COW device).
+ */
+struct exception_store {
+
+	/*
+	 * Destroys this object when you've finished with it.
+	 */
+	void (*destroy) (struct exception_store *store);
+
+	/*
+	 * The target shouldn't read the COW device until this is
+	 * called.
+	 */
+	int (*read_metadata) (struct exception_store *store);
+
+	/*
+	 * Find somewhere to store the next exception.
+	 */
+	int (*prepare_exception) (struct exception_store *store,
+				  struct exception *e);
+
+	/*
+	 * Update the metadata with this exception.
+	 */
+	void (*commit_exception) (struct exception_store *store,
+				  struct exception *e,
+				  void (*callback) (void *, int success),
+				  void *callback_context);
+
+	/*
+	 * The snapshot is invalid, note this in the metadata.
+	 */
+	void (*drop_snapshot) (struct exception_store *store);
+
+	/*
+	 * Return how full the snapshot is.
+	 */
+	void (*fraction_full) (struct exception_store *store,
+			       sector_t *numerator,
+			       sector_t *denominator);
+
+	struct dm_snapshot *snap;
+	void *context;
+};
+
+struct dm_snapshot {
+	struct rw_semaphore lock;
+	struct dm_table *table;
+
+	struct dm_dev *origin;
+	struct dm_dev *cow;
+
+	/* List of snapshots per Origin */
+	struct list_head list;
+
+	/* Size of data blocks saved - must be a power of 2 */
+	chunk_t chunk_size;
+	chunk_t chunk_mask;
+	chunk_t chunk_shift;
+
+	/* You can't use a snapshot if this is 0 (e.g. if full) */
+	int valid;
+	int have_metadata;
+
+	/* Used for display of table */
+	char type;
+
+	/* The last percentage we notified */
+	int last_percent;
+
+	struct exception_table pending;
+	struct exception_table complete;
+
+	/* The on disk metadata handler */
+	struct exception_store store;
+
+	struct kcopyd_client *kcopyd_client;
+};
+
+/*
+ * Used by the exception stores to load exceptions hen
+ * initialising.
+ */
+int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new);
+
+/*
+ * Constructor and destructor for the default persistent
+ * store.
+ */
+int dm_create_persistent(struct exception_store *store, uint32_t chunk_size);
+
+int dm_create_transient(struct exception_store *store,
+			struct dm_snapshot *s, int blocksize);
+
+/*
+ * Return the number of sectors in the device.
+ */
+static inline sector_t get_dev_size(kdev_t dev)
+{
+	int *sizes;
+
+	sizes = blk_size[MAJOR(dev)];
+	if (sizes)
+		return sizes[MINOR(dev)] << 1;
+
+	return 0;
+}
+
+static inline chunk_t sector_to_chunk(struct dm_snapshot *s, sector_t sector)
+{
+	return (sector & ~s->chunk_mask) >> s->chunk_shift;
+}
+
+static inline sector_t chunk_to_sector(struct dm_snapshot *s, chunk_t chunk)
+{
+	return chunk << s->chunk_shift;
+}
+
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-stripe.c linux-2.4.26-leo/drivers/md/dm-stripe.c
--- linux-2.4.26/drivers/md/dm-stripe.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-stripe.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2001 Sistina Software (UK) Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+
+struct stripe {
+	struct dm_dev *dev;
+	sector_t physical_start;
+};
+
+struct stripe_c {
+	uint32_t stripes;
+
+	/* The size of this target / num. stripes */
+	uint32_t stripe_width;
+
+	/* stripe chunk size */
+	uint32_t chunk_shift;
+	sector_t chunk_mask;
+
+	struct stripe stripe[0];
+};
+
+static inline struct stripe_c *alloc_context(unsigned int stripes)
+{
+	size_t len;
+
+	if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
+			  stripes))
+		return NULL;
+
+	len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
+
+	return kmalloc(len, GFP_KERNEL);
+}
+
+/*
+ * Parse a single <dev> <sector> pair
+ */
+static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
+		      unsigned int stripe, char **argv)
+{
+	sector_t start;
+
+	if (sscanf(argv[1], SECTOR_FORMAT, &start) != 1)
+		return -EINVAL;
+
+	if (dm_get_device(ti, argv[0], start, sc->stripe_width,
+			  dm_table_get_mode(ti->table),
+			  &sc->stripe[stripe].dev))
+		return -ENXIO;
+
+	sc->stripe[stripe].physical_start = start;
+	return 0;
+}
+
+/*
+ * FIXME: Nasty function, only present because we can't link
+ * against __moddi3 and __divdi3.
+ *
+ * returns a == b * n
+ */
+static int multiple(sector_t a, sector_t b, sector_t *n)
+{
+	sector_t acc, prev, i;
+
+	*n = 0;
+	while (a >= b) {
+		for (acc = b, prev = 0, i = 1;
+		     acc <= a;
+		     prev = acc, acc <<= 1, i <<= 1)
+			;
+
+		a -= prev;
+		*n += i >> 1;
+	}
+
+	return a == 0;
+}
+
+/*
+ * Construct a striped mapping.
+ * <number of stripes> <chunk size (2^^n)> [<dev_path> <offset>]+
+ */
+static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	struct stripe_c *sc;
+	sector_t width;
+	uint32_t stripes;
+	uint32_t chunk_size;
+	char *end;
+	int r;
+	unsigned int i;
+
+	if (argc < 2) {
+		ti->error = "dm-stripe: Not enough arguments";
+		return -EINVAL;
+	}
+
+	stripes = simple_strtoul(argv[0], &end, 10);
+	if (*end) {
+		ti->error = "dm-stripe: Invalid stripe count";
+		return -EINVAL;
+	}
+
+	chunk_size = simple_strtoul(argv[1], &end, 10);
+	if (*end) {
+		ti->error = "dm-stripe: Invalid chunk_size";
+		return -EINVAL;
+	}
+
+	/*
+	 * chunk_size is a power of two
+	 */
+	if (!chunk_size || (chunk_size & (chunk_size - 1))) {
+		ti->error = "dm-stripe: Invalid chunk size";
+		return -EINVAL;
+	}
+
+	if (!multiple(ti->len, stripes, &width)) {
+		ti->error = "dm-stripe: Target length not divisable by "
+		    "number of stripes";
+		return -EINVAL;
+	}
+
+	/*
+	 * Do we have enough arguments for that many stripes ?
+	 */
+	if (argc != (2 + 2 * stripes)) {
+		ti->error = "dm-stripe: Not enough destinations specified";
+		return -EINVAL;
+	}
+
+	sc = alloc_context(stripes);
+	if (!sc) {
+		ti->error = "dm-stripe: Memory allocation for striped context "
+		    "failed";
+		return -ENOMEM;
+	}
+
+	sc->stripes = stripes;
+	sc->stripe_width = width;
+
+	sc->chunk_mask = ((sector_t) chunk_size) - 1;
+	for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++)
+		chunk_size >>= 1;
+	sc->chunk_shift--;
+
+	/*
+	 * Get the stripe destinations.
+	 */
+	for (i = 0; i < stripes; i++) {
+		argv += 2;
+
+		r = get_stripe(ti, sc, i, argv);
+		if (r < 0) {
+			ti->error = "dm-stripe: Couldn't parse stripe "
+			    "destination";
+			while (i--)
+				dm_put_device(ti, sc->stripe[i].dev);
+			kfree(sc);
+			return r;
+		}
+	}
+
+	ti->private = sc;
+	return 0;
+}
+
+static void stripe_dtr(struct dm_target *ti)
+{
+	unsigned int i;
+	struct stripe_c *sc = (struct stripe_c *) ti->private;
+
+	for (i = 0; i < sc->stripes; i++)
+		dm_put_device(ti, sc->stripe[i].dev);
+
+	kfree(sc);
+}
+
+static int stripe_map(struct dm_target *ti, struct buffer_head *bh, int rw,
+		      union map_info *context)
+{
+	struct stripe_c *sc = (struct stripe_c *) ti->private;
+
+	sector_t offset = bh->b_rsector - ti->begin;
+	uint32_t chunk = (uint32_t) (offset >> sc->chunk_shift);
+	uint32_t stripe = chunk % sc->stripes;	/* 32bit modulus */
+	chunk = chunk / sc->stripes;
+
+	bh->b_rdev = sc->stripe[stripe].dev->dev;
+	bh->b_rsector = sc->stripe[stripe].physical_start +
+	    (chunk << sc->chunk_shift) + (offset & sc->chunk_mask);
+	return 1;
+}
+
+static int stripe_status(struct dm_target *ti, status_type_t type,
+			 char *result, unsigned int maxlen)
+{
+	struct stripe_c *sc = (struct stripe_c *) ti->private;
+	int offset;
+	unsigned int i;
+
+	switch (type) {
+	case STATUSTYPE_INFO:
+		result[0] = '\0';
+		break;
+
+	case STATUSTYPE_TABLE:
+		offset = snprintf(result, maxlen, "%d " SECTOR_FORMAT,
+				  sc->stripes, sc->chunk_mask + 1);
+		for (i = 0; i < sc->stripes; i++) {
+			offset +=
+			    snprintf(result + offset, maxlen - offset,
+				     " %s " SECTOR_FORMAT,
+		       dm_kdevname(to_kdev_t(sc->stripe[i].dev->bdev->bd_dev)),
+				     sc->stripe[i].physical_start);
+		}
+		break;
+	}
+	return 0;
+}
+
+static struct target_type stripe_target = {
+	.name   = "striped",
+	.module = THIS_MODULE,
+	.ctr    = stripe_ctr,
+	.dtr    = stripe_dtr,
+	.map    = stripe_map,
+	.status = stripe_status,
+};
+
+int __init dm_stripe_init(void)
+{
+	int r;
+
+	r = dm_register_target(&stripe_target);
+	if (r < 0)
+		DMWARN("striped target registration failed");
+
+	return r;
+}
+
+void dm_stripe_exit(void)
+{
+	if (dm_unregister_target(&stripe_target))
+		DMWARN("striped target unregistration failed");
+
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-table.c linux-2.4.26-leo/drivers/md/dm-table.c
--- linux-2.4.26/drivers/md/dm-table.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-table.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,679 @@
+/*
+ * Copyright (C) 2001 Sistina Software (UK) Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/blkdev.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+
+#define MAX_DEPTH 16
+#define NODE_SIZE L1_CACHE_BYTES
+#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
+#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
+
+struct dm_table {
+	atomic_t holders;
+
+	/* btree table */
+	unsigned int depth;
+	unsigned int counts[MAX_DEPTH];	/* in nodes */
+	sector_t *index[MAX_DEPTH];
+
+	unsigned int num_targets;
+	unsigned int num_allocated;
+	sector_t *highs;
+	struct dm_target *targets;
+
+	/*
+	 * Indicates the rw permissions for the new logical
+	 * device.  This should be a combination of FMODE_READ
+	 * and FMODE_WRITE.
+	 */
+	int mode;
+
+	/* a list of devices used by this table */
+	struct list_head devices;
+
+	/* events get handed up using this callback */
+	void (*event_fn)(void *);
+	void *event_context;
+};
+
+/*
+ * Similar to ceiling(log_size(n))
+ */
+static unsigned int int_log(unsigned long n, unsigned long base)
+{
+	int result = 0;
+
+	while (n > 1) {
+		n = dm_div_up(n, base);
+		result++;
+	}
+
+	return result;
+}
+
+/*
+ * Calculate the index of the child node of the n'th node k'th key.
+ */
+static inline unsigned int get_child(unsigned int n, unsigned int k)
+{
+	return (n * CHILDREN_PER_NODE) + k;
+}
+
+/*
+ * Return the n'th node of level l from table t.
+ */
+static inline sector_t *get_node(struct dm_table *t, unsigned int l,
+				 unsigned int n)
+{
+	return t->index[l] + (n * KEYS_PER_NODE);
+}
+
+/*
+ * Return the highest key that you could lookup from the n'th
+ * node on level l of the btree.
+ */
+static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
+{
+	for (; l < t->depth - 1; l++)
+		n = get_child(n, CHILDREN_PER_NODE - 1);
+
+	if (n >= t->counts[l])
+		return (sector_t) - 1;
+
+	return get_node(t, l, n)[KEYS_PER_NODE - 1];
+}
+
+/*
+ * Fills in a level of the btree based on the highs of the level
+ * below it.
+ */
+static int setup_btree_index(unsigned int l, struct dm_table *t)
+{
+	unsigned int n, k;
+	sector_t *node;
+
+	for (n = 0U; n < t->counts[l]; n++) {
+		node = get_node(t, l, n);
+
+		for (k = 0U; k < KEYS_PER_NODE; k++)
+			node[k] = high(t, l + 1, get_child(n, k));
+	}
+
+	return 0;
+}
+
+
+
+int dm_table_create(struct dm_table **result, int mode, unsigned num_targets)
+{
+	struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
+
+	if (!t)
+		return -ENOMEM;
+
+	memset(t, 0, sizeof(*t));
+	INIT_LIST_HEAD(&t->devices);
+	atomic_set(&t->holders, 1);
+
+	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
+
+	/* Allocate both the target array and offset array at once. */
+	t->highs = (sector_t *) vcalloc(sizeof(struct dm_target) +
+					sizeof(sector_t), num_targets);
+	if (!t->highs) {
+		kfree(t);
+		return -ENOMEM;
+	}
+
+	memset(t->highs, -1, sizeof(*t->highs) * num_targets);
+
+	t->targets = (struct dm_target *) (t->highs + num_targets);
+	t->num_allocated = num_targets;
+	t->mode = mode;
+	*result = t;
+	return 0;
+}
+
+static void free_devices(struct list_head *devices)
+{
+	struct list_head *tmp, *next;
+
+	for (tmp = devices->next; tmp != devices; tmp = next) {
+		struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+		next = tmp->next;
+		kfree(dd);
+	}
+}
+
+void table_destroy(struct dm_table *t)
+{
+	unsigned int i;
+
+	/* free the indexes (see dm_table_complete) */
+	if (t->depth >= 2)
+		vfree(t->index[t->depth - 2]);
+
+	/* free the targets */
+	for (i = 0; i < t->num_targets; i++) {
+		struct dm_target *tgt = t->targets + i;
+
+		if (tgt->type->dtr)
+			tgt->type->dtr(tgt);
+
+		dm_put_target_type(tgt->type);
+	}
+
+	vfree(t->highs);
+
+	/* free the device list */
+	if (t->devices.next != &t->devices) {
+		DMWARN("devices still present during destroy: "
+		       "dm_table_remove_device calls missing");
+
+		free_devices(&t->devices);
+	}
+
+	kfree(t);
+}
+
+void dm_table_get(struct dm_table *t)
+{
+	atomic_inc(&t->holders);
+}
+
+void dm_table_put(struct dm_table *t)
+{
+	if (atomic_dec_and_test(&t->holders))
+		table_destroy(t);
+}
+
+/*
+ * Convert a device path to a dev_t.
+ */
+static int lookup_device(const char *path, kdev_t *dev)
+{
+	int r;
+	struct nameidata nd;
+	struct inode *inode;
+
+	if (!path_init(path, LOOKUP_FOLLOW, &nd))
+		return 0;
+
+	if ((r = path_walk(path, &nd)))
+		goto out;
+
+	inode = nd.dentry->d_inode;
+	if (!inode) {
+		r = -ENOENT;
+		goto out;
+	}
+
+	if (!S_ISBLK(inode->i_mode)) {
+		r = -ENOTBLK;
+		goto out;
+	}
+
+	*dev = inode->i_rdev;
+
+      out:
+	path_release(&nd);
+	return r;
+}
+
+/*
+ * See if we've already got a device in the list.
+ */
+static struct dm_dev *find_device(struct list_head *l, kdev_t dev)
+{
+	struct list_head *tmp;
+
+	list_for_each(tmp, l) {
+		struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+		if (kdev_same(dd->dev, dev))
+			return dd;
+	}
+
+	return NULL;
+}
+
+/*
+ * Open a device so we can use it as a map destination.
+ */
+static int open_dev(struct dm_dev *dd)
+{
+	if (dd->bdev)
+		BUG();
+
+	dd->bdev = bdget(kdev_t_to_nr(dd->dev));
+	if (!dd->bdev)
+		return -ENOMEM;
+
+	return blkdev_get(dd->bdev, dd->mode, 0, BDEV_RAW);
+}
+
+/*
+ * Close a device that we've been using.
+ */
+static void close_dev(struct dm_dev *dd)
+{
+	if (!dd->bdev)
+		return;
+
+	blkdev_put(dd->bdev, BDEV_RAW);
+	dd->bdev = NULL;
+}
+
+/*
+ * If possible (ie. blk_size[major] is set), this checks an area
+ * of a destination device is valid.
+ */
+static int check_device_area(kdev_t dev, sector_t start, sector_t len)
+{
+	int *sizes;
+	sector_t dev_size;
+
+	if (!(sizes = blk_size[major(dev)]) || !(dev_size = sizes[minor(dev)]))
+		/* we don't know the device details,
+		 * so give the benefit of the doubt */
+		return 1;
+
+	/* convert to 512-byte sectors */
+	dev_size <<= 1;
+
+	return ((start < dev_size) && (len <= (dev_size - start)));
+}
+
+/*
+ * This upgrades the mode on an already open dm_dev.  Being
+ * careful to leave things as they were if we fail to reopen the
+ * device.
+ */
+static int upgrade_mode(struct dm_dev *dd, int new_mode)
+{
+	int r;
+	struct dm_dev dd_copy;
+
+	memcpy(&dd_copy, dd, sizeof(dd_copy));
+
+	dd->mode |= new_mode;
+	dd->bdev = NULL;
+	r = open_dev(dd);
+	if (!r)
+		close_dev(&dd_copy);
+	else
+		memcpy(dd, &dd_copy, sizeof(dd_copy));
+
+	return r;
+}
+
+/*
+ * Add a device to the list, or just increment the usage count if
+ * it's already present.
+ */
+int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
+		  sector_t len, int mode, struct dm_dev **result)
+{
+	int r;
+	kdev_t dev;
+	struct dm_dev *dd;
+	unsigned major, minor;
+	struct dm_table *t = ti->table;
+
+	if (!t)
+		BUG();
+
+	if (sscanf(path, "%u:%u", &major, &minor) == 2) {
+		/* Extract the major/minor numbers */
+		dev = mk_kdev(major, minor);
+	} else {
+		/* convert the path to a device */
+		if ((r = lookup_device(path, &dev)))
+			return r;
+	}
+
+	dd = find_device(&t->devices, dev);
+	if (!dd) {
+		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+		if (!dd)
+			return -ENOMEM;
+
+		dd->dev = dev;
+		dd->mode = mode;
+		dd->bdev = NULL;
+
+		if ((r = open_dev(dd))) {
+			kfree(dd);
+			return r;
+		}
+
+		atomic_set(&dd->count, 0);
+		list_add(&dd->list, &t->devices);
+
+	} else if (dd->mode != (mode | dd->mode)) {
+		r = upgrade_mode(dd, mode);
+		if (r)
+			return r;
+	}
+	atomic_inc(&dd->count);
+
+	if (!check_device_area(dd->dev, start, len)) {
+		DMWARN("device %s too small for target", path);
+		dm_put_device(ti, dd);
+		return -EINVAL;
+	}
+
+	*result = dd;
+
+	return 0;
+}
+
+/*
+ * Decrement a devices use count and remove it if neccessary.
+ */
+void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
+{
+	if (atomic_dec_and_test(&dd->count)) {
+		close_dev(dd);
+		list_del(&dd->list);
+		kfree(dd);
+	}
+}
+
+/*
+ * Checks to see if the target joins onto the end of the table.
+ */
+static int adjoin(struct dm_table *table, struct dm_target *ti)
+{
+	struct dm_target *prev;
+
+	if (!table->num_targets)
+		return !ti->begin;
+
+	prev = &table->targets[table->num_targets - 1];
+	return (ti->begin == (prev->begin + prev->len));
+}
+
+/*
+ * Used to dynamically allocate the arg array.
+ */
+static char **realloc_argv(unsigned *array_size, char **old_argv)
+{
+	char **argv;
+	unsigned new_size;
+
+	new_size = *array_size ? *array_size * 2 : 64;
+	argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
+	if (argv) {
+		memcpy(argv, old_argv, *array_size * sizeof(*argv));
+		*array_size = new_size;
+	}
+
+	kfree(old_argv);
+	return argv;
+}
+
+/*
+ * Destructively splits up the argument list to pass to ctr.
+ */
+static int split_args(int *argc, char ***argvp, char *input)
+{
+	char *start, *end = input, *out, **argv = NULL;
+	unsigned array_size = 0;
+
+	*argc = 0;
+	argv = realloc_argv(&array_size, argv);
+	if (!argv)
+		return -ENOMEM;
+
+	while (1) {
+		start = end;
+
+		/* Skip whitespace */
+		while (*start && isspace(*start))
+			start++;
+
+		if (!*start)
+			break;	/* success, we hit the end */
+
+		/* 'out' is used to remove any back-quotes */
+		end = out = start;
+		while (*end) {
+			/* Everything apart from '\0' can be quoted */
+			if (*end == '\\' && *(end + 1)) {
+				*out++ = *(end + 1);
+				end += 2;
+				continue;
+			}
+
+			if (isspace(*end))
+				break;	/* end of token */
+
+			*out++ = *end++;
+		}
+
+		/* have we already filled the array ? */
+		if ((*argc + 1) > array_size) {
+			argv = realloc_argv(&array_size, argv);
+			if (!argv)
+				return -ENOMEM;
+		}
+
+		/* we know this is whitespace */
+		if (*end)
+			end++;
+
+		/* terminate the string and put it in the array */
+		*out = '\0';
+		argv[*argc] = start;
+		(*argc)++;
+	}
+
+	*argvp = argv;
+	return 0;
+}
+
+int dm_table_add_target(struct dm_table *t, const char *type,
+			sector_t start, sector_t len, char *params)
+{
+	int r = -EINVAL, argc;
+	char **argv;
+	struct dm_target *tgt;
+
+	if (t->num_targets >= t->num_allocated)
+		return -ENOMEM;
+
+	tgt = t->targets + t->num_targets;
+	memset(tgt, 0, sizeof(*tgt));
+
+	tgt->type = dm_get_target_type(type);
+	if (!tgt->type) {
+		tgt->error = "unknown target type";
+		return -EINVAL;
+	}
+
+	tgt->table = t;
+	tgt->begin = start;
+	tgt->len = len;
+	tgt->error = "Unknown error";
+
+	/*
+	 * Does this target adjoin the previous one ?
+	 */
+	if (!adjoin(t, tgt)) {
+		tgt->error = "Gap in table";
+		r = -EINVAL;
+		goto bad;
+	}
+
+	r = split_args(&argc, &argv, params);
+	if (r) {
+		tgt->error = "couldn't split parameters (insufficient memory)";
+		goto bad;
+	}
+
+	r = tgt->type->ctr(tgt, argc, argv);
+	kfree(argv);
+	if (r)
+		goto bad;
+
+	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
+	return 0;
+
+      bad:
+	printk(KERN_ERR DM_NAME ": %s\n", tgt->error);
+	dm_put_target_type(tgt->type);
+	return r;
+}
+
+static int setup_indexes(struct dm_table *t)
+{
+	int i;
+	unsigned int total = 0;
+	sector_t *indexes;
+
+	/* allocate the space for *all* the indexes */
+	for (i = t->depth - 2; i >= 0; i--) {
+		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
+		total += t->counts[i];
+	}
+
+	indexes = (sector_t *) vcalloc(total, (unsigned long) NODE_SIZE);
+	if (!indexes)
+		return -ENOMEM;
+
+	/* set up internal nodes, bottom-up */
+	for (i = t->depth - 2, total = 0; i >= 0; i--) {
+		t->index[i] = indexes;
+		indexes += (KEYS_PER_NODE * t->counts[i]);
+		setup_btree_index(i, t);
+	}
+
+	return 0;
+}
+
+/*
+ * Builds the btree to index the map.
+ */
+int dm_table_complete(struct dm_table *t)
+{
+	int r = 0;
+	unsigned int leaf_nodes;
+
+	/* how many indexes will the btree have ? */
+	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
+	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
+
+	/* leaf layer has already been set up */
+	t->counts[t->depth - 1] = leaf_nodes;
+	t->index[t->depth - 1] = t->highs;
+
+	if (t->depth >= 2)
+		r = setup_indexes(t);
+
+	return r;
+}
+
+static spinlock_t _event_lock = SPIN_LOCK_UNLOCKED;
+void dm_table_event_callback(struct dm_table *t,
+			     void (*fn)(void *), void *context)
+{
+	spin_lock_irq(&_event_lock);
+	t->event_fn = fn;
+	t->event_context = context;
+	spin_unlock_irq(&_event_lock);
+}
+
+void dm_table_event(struct dm_table *t)
+{
+	spin_lock(&_event_lock);
+	if (t->event_fn)
+		t->event_fn(t->event_context);
+	spin_unlock(&_event_lock);
+}
+
+sector_t dm_table_get_size(struct dm_table *t)
+{
+	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
+}
+
+struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
+{
+	if (index > t->num_targets)
+		return NULL;
+
+	return t->targets + index;
+}
+
+/*
+ * Search the btree for the correct target.
+ */
+struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
+{
+	unsigned int l, n = 0, k = 0;
+	sector_t *node;
+
+	for (l = 0; l < t->depth; l++) {
+		n = get_child(n, k);
+		node = get_node(t, l, n);
+
+		for (k = 0; k < KEYS_PER_NODE; k++)
+			if (node[k] >= sector)
+				break;
+	}
+
+	return &t->targets[(KEYS_PER_NODE * n) + k];
+}
+
+unsigned int dm_table_get_num_targets(struct dm_table *t)
+{
+	return t->num_targets;
+}
+
+struct list_head *dm_table_get_devices(struct dm_table *t)
+{
+	return &t->devices;
+}
+
+int dm_table_get_mode(struct dm_table *t)
+{
+	return t->mode;
+}
+
+void dm_table_suspend_targets(struct dm_table *t)
+{
+	int i;
+
+	for (i = 0; i < t->num_targets; i++) {
+		struct dm_target *ti = t->targets + i;
+
+		if (ti->type->suspend)
+			ti->type->suspend(ti);
+	}
+}
+
+void dm_table_resume_targets(struct dm_table *t)
+{
+	int i;
+
+	for (i = 0; i < t->num_targets; i++) {
+		struct dm_target *ti = t->targets + i;
+
+		if (ti->type->resume)
+			ti->type->resume(ti);
+	}
+}
+
+EXPORT_SYMBOL(dm_get_device);
+EXPORT_SYMBOL(dm_put_device);
+EXPORT_SYMBOL(dm_table_event);
+EXPORT_SYMBOL(dm_table_get_mode);
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/dm-target.c linux-2.4.26-leo/drivers/md/dm-target.c
--- linux-2.4.26/drivers/md/dm-target.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/dm-target.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2001 Sistina Software (UK) Limited
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+
+struct tt_internal {
+	struct target_type tt;
+
+	struct list_head list;
+	long use;
+};
+
+static LIST_HEAD(_targets);
+static DECLARE_RWSEM(_lock);
+
+#define DM_MOD_NAME_SIZE 32
+
+static inline struct tt_internal *__find_target_type(const char *name)
+{
+	struct list_head *tih;
+	struct tt_internal *ti;
+
+	list_for_each(tih, &_targets) {
+		ti = list_entry(tih, struct tt_internal, list);
+
+		if (!strcmp(name, ti->tt.name))
+			return ti;
+	}
+
+	return NULL;
+}
+
+static struct tt_internal *get_target_type(const char *name)
+{
+	struct tt_internal *ti;
+
+	down_read(&_lock);
+	ti = __find_target_type(name);
+
+	if (ti) {
+		if (ti->use == 0 && ti->tt.module)
+			__MOD_INC_USE_COUNT(ti->tt.module);
+		ti->use++;
+	}
+	up_read(&_lock);
+
+	return ti;
+}
+
+static void load_module(const char *name)
+{
+	char module_name[DM_MOD_NAME_SIZE] = "dm-";
+
+	/* Length check for strcat() below */
+	if (strlen(name) > (DM_MOD_NAME_SIZE - 4))
+		return;
+
+	strcat(module_name, name);
+	request_module(module_name);
+}
+
+struct target_type *dm_get_target_type(const char *name)
+{
+	struct tt_internal *ti = get_target_type(name);
+
+	if (!ti) {
+		load_module(name);
+		ti = get_target_type(name);
+	}
+
+	return ti ? &ti->tt : NULL;
+}
+
+void dm_put_target_type(struct target_type *t)
+{
+	struct tt_internal *ti = (struct tt_internal *) t;
+
+	down_read(&_lock);
+	if (--ti->use == 0 && ti->tt.module)
+		__MOD_DEC_USE_COUNT(ti->tt.module);
+
+	if (ti->use < 0)
+		BUG();
+	up_read(&_lock);
+
+	return;
+}
+
+static struct tt_internal *alloc_target(struct target_type *t)
+{
+	struct tt_internal *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
+
+	if (ti) {
+		memset(ti, 0, sizeof(*ti));
+		ti->tt = *t;
+	}
+
+	return ti;
+}
+
+int dm_register_target(struct target_type *t)
+{
+	int rv = 0;
+	struct tt_internal *ti = alloc_target(t);
+
+	if (!ti)
+		return -ENOMEM;
+
+	down_write(&_lock);
+	if (__find_target_type(t->name)) {
+		kfree(ti);
+		rv = -EEXIST;
+	} else
+		list_add(&ti->list, &_targets);
+
+	up_write(&_lock);
+	return rv;
+}
+
+int dm_unregister_target(struct target_type *t)
+{
+	struct tt_internal *ti;
+
+	down_write(&_lock);
+	if (!(ti = __find_target_type(t->name))) {
+		up_write(&_lock);
+		return -EINVAL;
+	}
+
+	if (ti->use) {
+		up_write(&_lock);
+		return -ETXTBSY;
+	}
+
+	list_del(&ti->list);
+	kfree(ti);
+
+	up_write(&_lock);
+	return 0;
+}
+
+/*
+ * io-err: always fails an io, useful for bringing
+ * up LVs that have holes in them.
+ */
+static int io_err_ctr(struct dm_target *ti, unsigned int argc, char **args)
+{
+	return 0;
+}
+
+static void io_err_dtr(struct dm_target *ti)
+{
+	/* empty */
+}
+
+static int io_err_map(struct dm_target *ti, struct buffer_head *bh, int rw,
+		      union map_info *map_context)
+{
+	return -EIO;
+}
+
+static struct target_type error_target = {
+	.name = "error",
+	.ctr  = io_err_ctr,
+	.dtr  = io_err_dtr,
+	.map  = io_err_map,
+};
+
+int dm_target_init(void)
+{
+	return dm_register_target(&error_target);
+}
+
+void dm_target_exit(void)
+{
+	if (dm_unregister_target(&error_target))
+		DMWARN("error target unregistration failed");
+}
+
+EXPORT_SYMBOL(dm_register_target);
+EXPORT_SYMBOL(dm_unregister_target);
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/kcopyd.c linux-2.4.26-leo/drivers/md/kcopyd.c
--- linux-2.4.26/drivers/md/kcopyd.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/kcopyd.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,666 @@
+/*
+ * Copyright (C) 2002 Sistina Software (UK) Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <asm/atomic.h>
+
+#include <linux/blkdev.h>
+#include <linux/config.h>
+#include <linux/device-mapper.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/locks.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "kcopyd.h"
+#include "dm-daemon.h"
+
+/* FIXME: this is only needed for the DMERR macros */
+#include "dm.h"
+
+static struct dm_daemon _kcopyd;
+
+#define SECTORS_PER_PAGE (PAGE_SIZE / SECTOR_SIZE)
+#define SUB_JOB_SIZE 128
+#define PAGES_PER_SUB_JOB (SUB_JOB_SIZE / SECTORS_PER_PAGE)
+#define SUB_JOB_COUNT 8
+
+/*-----------------------------------------------------------------
+ * Each kcopyd client has its own little pool of preallocated
+ * pages for kcopyd io.
+ *---------------------------------------------------------------*/
+struct kcopyd_client {
+	struct list_head list;
+
+	spinlock_t lock;
+	struct list_head pages;
+	unsigned int nr_pages;
+	unsigned int nr_free_pages;
+	unsigned int max_split;
+};
+
+static inline void __push_page(struct kcopyd_client *kc, struct page *p)
+{
+	list_add(&p->list, &kc->pages);
+	kc->nr_free_pages++;
+}
+
+static inline struct page *__pop_page(struct kcopyd_client *kc)
+{
+	struct page *p;
+
+	p = list_entry(kc->pages.next, struct page, list);
+	list_del(&p->list);
+	kc->nr_free_pages--;
+
+	return p;
+}
+
+static int kcopyd_get_pages(struct kcopyd_client *kc,
+			    unsigned int nr, struct list_head *pages)
+{
+	struct page *p;
+	INIT_LIST_HEAD(pages);
+
+	spin_lock(&kc->lock);
+	if (kc->nr_free_pages < nr) {
+		spin_unlock(&kc->lock);
+		return -ENOMEM;
+	}
+
+	while (nr--) {
+		p = __pop_page(kc);
+		list_add(&p->list, pages);
+	}
+	spin_unlock(&kc->lock);
+
+	return 0;
+}
+
+static void kcopyd_put_pages(struct kcopyd_client *kc, struct list_head *pages)
+{
+	struct list_head *tmp, *tmp2;
+
+	spin_lock(&kc->lock);
+	list_for_each_safe (tmp, tmp2, pages)
+		__push_page(kc, list_entry(tmp, struct page, list));
+	spin_unlock(&kc->lock);
+}
+
+/*
+ * These three functions resize the page pool.
+ */
+static void release_pages(struct list_head *pages)
+{
+	struct page *p;
+	struct list_head *tmp, *tmp2;
+
+	list_for_each_safe (tmp, tmp2, pages) {
+		p = list_entry(tmp, struct page, list);
+		UnlockPage(p);
+		__free_page(p);
+	}
+}
+
+static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
+{
+	unsigned int i;
+	struct page *p;
+	LIST_HEAD(new);
+
+	for (i = 0; i < nr; i++) {
+		p = alloc_page(GFP_KERNEL);
+		if (!p) {
+			release_pages(&new);
+			return -ENOMEM;
+		}
+
+		LockPage(p);
+		list_add(&p->list, &new);
+	}
+
+	kcopyd_put_pages(kc, &new);
+	kc->nr_pages += nr;
+	kc->max_split = kc->nr_pages / PAGES_PER_SUB_JOB;
+	if (kc->max_split > SUB_JOB_COUNT)
+		kc->max_split = SUB_JOB_COUNT;
+
+	return 0;
+}
+
+static void client_free_pages(struct kcopyd_client *kc)
+{
+	BUG_ON(kc->nr_free_pages != kc->nr_pages);
+	release_pages(&kc->pages);
+	kc->nr_free_pages = kc->nr_pages = 0;
+}
+
+/*-----------------------------------------------------------------
+ * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
+ * for this reason we use a mempool to prevent the client from
+ * ever having to do io (which could cause a deadlock).
+ *---------------------------------------------------------------*/
+struct kcopyd_job {
+	struct kcopyd_client *kc;
+	struct list_head list;
+	unsigned int flags;
+
+	/*
+	 * Error state of the job.
+	 */
+	int read_err;
+	unsigned int write_err;
+
+	/*
+	 * Either READ or WRITE
+	 */
+	int rw;
+	struct io_region source;
+
+	/*
+	 * The destinations for the transfer.
+	 */
+	unsigned int num_dests;
+	struct io_region dests[KCOPYD_MAX_REGIONS];
+
+	sector_t offset;
+	unsigned int nr_pages;
+	struct list_head pages;
+
+	/*
+	 * Set this to ensure you are notified when the job has
+	 * completed.  'context' is for callback to use.
+	 */
+	kcopyd_notify_fn fn;
+	void *context;
+
+	/*
+	 * These fields are only used if the job has been split
+	 * into more manageable parts.
+	 */
+	struct semaphore lock;
+	atomic_t sub_jobs;
+	sector_t progress;
+};
+
+/* FIXME: this should scale with the number of pages */
+#define MIN_JOBS 512
+
+static kmem_cache_t *_job_cache;
+static mempool_t *_job_pool;
+
+/*
+ * We maintain three lists of jobs:
+ *
+ * i)   jobs waiting for pages
+ * ii)  jobs that have pages, and are waiting for the io to be issued.
+ * iii) jobs that have completed.
+ *
+ * All three of these are protected by job_lock.
+ */
+static spinlock_t _job_lock = SPIN_LOCK_UNLOCKED;
+
+static LIST_HEAD(_complete_jobs);
+static LIST_HEAD(_io_jobs);
+static LIST_HEAD(_pages_jobs);
+
+static int jobs_init(void)
+{
+	INIT_LIST_HEAD(&_complete_jobs);
+	INIT_LIST_HEAD(&_io_jobs);
+	INIT_LIST_HEAD(&_pages_jobs);
+
+	_job_cache = kmem_cache_create("kcopyd-jobs",
+				       sizeof(struct kcopyd_job),
+				       __alignof__(struct kcopyd_job),
+				       0, NULL, NULL);
+	if (!_job_cache)
+		return -ENOMEM;
+
+	_job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab,
+				   mempool_free_slab, _job_cache);
+	if (!_job_pool) {
+		kmem_cache_destroy(_job_cache);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void jobs_exit(void)
+{
+	BUG_ON(!list_empty(&_complete_jobs));
+	BUG_ON(!list_empty(&_io_jobs));
+	BUG_ON(!list_empty(&_pages_jobs));
+
+	mempool_destroy(_job_pool);
+	kmem_cache_destroy(_job_cache);
+}
+
+/*
+ * Functions to push and pop a job onto the head of a given job
+ * list.
+ */
+static inline struct kcopyd_job *pop(struct list_head *jobs)
+{
+	struct kcopyd_job *job = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&_job_lock, flags);
+
+	if (!list_empty(jobs)) {
+		job = list_entry(jobs->next, struct kcopyd_job, list);
+		list_del(&job->list);
+	}
+	spin_unlock_irqrestore(&_job_lock, flags);
+
+	return job;
+}
+
+static inline void push(struct list_head *jobs, struct kcopyd_job *job)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&_job_lock, flags);
+	list_add_tail(&job->list, jobs);
+	spin_unlock_irqrestore(&_job_lock, flags);
+}
+
+/*
+ * These three functions process 1 item from the corresponding
+ * job list.
+ *
+ * They return:
+ * < 0: error
+ *   0: success
+ * > 0: can't process yet.
+ */
+static int run_complete_job(struct kcopyd_job *job)
+{
+	void *context = job->context;
+	int read_err = job->read_err;
+	unsigned int write_err = job->write_err;
+	kcopyd_notify_fn fn = job->fn;
+
+	kcopyd_put_pages(job->kc, &job->pages);
+	mempool_free(job, _job_pool);
+	fn(read_err, write_err, context);
+	return 0;
+}
+
+static void complete_io(unsigned int error, void *context)
+{
+	struct kcopyd_job *job = (struct kcopyd_job *) context;
+
+	if (error) {
+		if (job->rw == WRITE)
+			job->write_err &= error;
+		else
+			job->read_err = 1;
+
+		if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
+			push(&_complete_jobs, job);
+			dm_daemon_wake(&_kcopyd);
+			return;
+		}
+	}
+
+	if (job->rw == WRITE)
+		push(&_complete_jobs, job);
+
+	else {
+		job->rw = WRITE;
+		push(&_io_jobs, job);
+	}
+
+	dm_daemon_wake(&_kcopyd);
+}
+
+/*
+ * Request io on as many buffer heads as we can currently get for
+ * a particular job.
+ */
+static int run_io_job(struct kcopyd_job *job)
+{
+	int r;
+
+	if (job->rw == READ)
+		r = dm_io_async(1, &job->source, job->rw,
+				list_entry(job->pages.next, struct page, list),
+				job->offset, complete_io, job);
+
+	else
+		r = dm_io_async(job->num_dests, job->dests, job->rw,
+				list_entry(job->pages.next, struct page, list),
+				job->offset, complete_io, job);
+
+	return r;
+}
+
+static int run_pages_job(struct kcopyd_job *job)
+{
+	int r;
+
+	job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
+				  SECTORS_PER_PAGE);
+	r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
+	if (!r) {
+		/* this job is ready for io */
+		push(&_io_jobs, job);
+		return 0;
+	}
+
+	if (r == -ENOMEM)
+		/* can't complete now */
+		return 1;
+
+	return r;
+}
+
+/*
+ * Run through a list for as long as possible.  Returns the count
+ * of successful jobs.
+ */
+static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
+{
+	struct kcopyd_job *job;
+	int r, count = 0;
+
+	while ((job = pop(jobs))) {
+
+		r = fn(job);
+
+		if (r < 0) {
+			/* error this rogue job */
+			if (job->rw == WRITE)
+				job->write_err = (unsigned int) -1;
+			else
+				job->read_err = 1;
+			push(&_complete_jobs, job);
+			break;
+		}
+
+		if (r > 0) {
+			/*
+			 * We couldn't service this job ATM, so
+			 * push this job back onto the list.
+			 */
+			push(jobs, job);
+			break;
+		}
+
+		count++;
+	}
+
+	return count;
+}
+
+/*
+ * kcopyd does this every time it's woken up.
+ */
+static void do_work(void)
+{
+	/*
+	 * The order that these are called is *very* important.
+	 * complete jobs can free some pages for pages jobs.
+	 * Pages jobs when successful will jump onto the io jobs
+	 * list.  io jobs call wake when they complete and it all
+	 * starts again.
+	 */
+	process_jobs(&_complete_jobs, run_complete_job);
+	process_jobs(&_pages_jobs, run_pages_job);
+	process_jobs(&_io_jobs, run_io_job);
+	run_task_queue(&tq_disk);
+}
+
+/*
+ * If we are copying a small region we just dispatch a single job
+ * to do the copy, otherwise the io has to be split up into many
+ * jobs.
+ */
+static void dispatch_job(struct kcopyd_job *job)
+{
+	push(&_pages_jobs, job);
+	dm_daemon_wake(&_kcopyd);
+}
+
+static void segment_complete(int read_err,
+			     unsigned int write_err, void *context)
+{
+	/* FIXME: tidy this function */
+	sector_t progress = 0;
+	sector_t count = 0;
+	struct kcopyd_job *job = (struct kcopyd_job *) context;
+
+	down(&job->lock);
+
+	/* update the error */
+	if (read_err)
+		job->read_err = 1;
+
+	if (write_err)
+		job->write_err &= write_err;
+
+	/*
+	 * Only dispatch more work if there hasn't been an error.
+	 */
+	if ((!job->read_err && !job->write_err) ||
+	    test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
+		/* get the next chunk of work */
+		progress = job->progress;
+		count = job->source.count - progress;
+		if (count) {
+			if (count > SUB_JOB_SIZE)
+				count = SUB_JOB_SIZE;
+
+			job->progress += count;
+		}
+	}
+	up(&job->lock);
+
+	if (count) {
+		int i;
+		struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO);
+
+		memcpy(sub_job, job, sizeof(*job));
+		sub_job->source.sector += progress;
+		sub_job->source.count = count;
+
+		for (i = 0; i < job->num_dests; i++) {
+			sub_job->dests[i].sector += progress;
+			sub_job->dests[i].count = count;
+		}
+
+		sub_job->fn = segment_complete;
+		sub_job->context = job;
+		dispatch_job(sub_job);
+
+	} else if (atomic_dec_and_test(&job->sub_jobs)) {
+
+		/*
+		 * To avoid a race we must keep the job around
+		 * until after the notify function has completed.
+		 * Otherwise the client may try and stop the job
+		 * after we've completed.
+		 */
+		job->fn(read_err, write_err, job->context);
+		mempool_free(job, _job_pool);
+	}
+}
+
+/*
+ * Create some little jobs that will do the move between
+ * them.
+ */
+static void split_job(struct kcopyd_job *job)
+{
+	int nr;
+
+	nr = dm_div_up(job->source.count, SUB_JOB_SIZE);
+	if (nr > job->kc->max_split)
+		nr = job->kc->max_split;
+
+	atomic_set(&job->sub_jobs, nr);
+	while (nr--)
+		segment_complete(0, 0u, job);
+}
+
+int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
+		unsigned int num_dests, struct io_region *dests,
+		unsigned int flags, kcopyd_notify_fn fn, void *context)
+{
+	struct kcopyd_job *job;
+
+	/*
+	 * Allocate a new job.
+	 */
+	job = mempool_alloc(_job_pool, GFP_NOIO);
+
+	/*
+	 * set up for the read.
+	 */
+	job->kc = kc;
+	job->flags = flags;
+	job->read_err = 0;
+	job->write_err = 0;
+	job->rw = READ;
+
+	memcpy(&job->source, from, sizeof(*from));
+
+	job->num_dests = num_dests;
+	memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
+
+	job->offset = 0;
+	job->nr_pages = 0;
+	INIT_LIST_HEAD(&job->pages);
+
+	job->fn = fn;
+	job->context = context;
+
+	if (job->source.count < SUB_JOB_SIZE)
+		dispatch_job(job);
+
+	else {
+		init_MUTEX(&job->lock);
+		job->progress = 0;
+		split_job(job);
+	}
+
+	return 0;
+}
+
+/*
+ * Cancels a kcopyd job, eg. someone might be deactivating a
+ * mirror.
+ */
+int kcopyd_cancel(struct kcopyd_job *job, int block)
+{
+	/* FIXME: finish */
+	return -1;
+}
+
+/*-----------------------------------------------------------------
+ * Unit setup
+ *---------------------------------------------------------------*/
+static DECLARE_MUTEX(_client_lock);
+static LIST_HEAD(_clients);
+
+static int client_add(struct kcopyd_client *kc)
+{
+	down(&_client_lock);
+	list_add(&kc->list, &_clients);
+	up(&_client_lock);
+	return 0;
+}
+
+static void client_del(struct kcopyd_client *kc)
+{
+	down(&_client_lock);
+	list_del(&kc->list);
+	up(&_client_lock);
+}
+
+int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
+{
+	int r = 0;
+	struct kcopyd_client *kc;
+
+	if (nr_pages * SECTORS_PER_PAGE < SUB_JOB_SIZE) {
+		DMERR("kcopyd client requested %u pages: minimum is %lu",
+		      nr_pages, SUB_JOB_SIZE / SECTORS_PER_PAGE);
+		return -ENOMEM;
+	}
+
+	kc = kmalloc(sizeof(*kc), GFP_KERNEL);
+	if (!kc)
+		return -ENOMEM;
+
+	kc->lock = SPIN_LOCK_UNLOCKED;
+	INIT_LIST_HEAD(&kc->pages);
+	kc->nr_pages = kc->nr_free_pages = 0;
+	r = client_alloc_pages(kc, nr_pages);
+	if (r) {
+		kfree(kc);
+		return r;
+	}
+
+	r = dm_io_get(nr_pages);
+	if (r) {
+		client_free_pages(kc);
+		kfree(kc);
+		return r;
+	}
+
+	r = client_add(kc);
+	if (r) {
+		dm_io_put(nr_pages);
+		client_free_pages(kc);
+		kfree(kc);
+		return r;
+	}
+
+	*result = kc;
+	return 0;
+}
+
+void kcopyd_client_destroy(struct kcopyd_client *kc)
+{
+	dm_io_put(kc->nr_pages);
+	client_free_pages(kc);
+	client_del(kc);
+	kfree(kc);
+}
+
+
+int __init kcopyd_init(void)
+{
+	int r;
+
+	r = jobs_init();
+	if (r)
+		return r;
+
+	r = dm_daemon_start(&_kcopyd, "kcopyd", do_work);
+	if (r)
+		jobs_exit();
+
+	return r;
+}
+
+void kcopyd_exit(void)
+{
+	jobs_exit();
+	dm_daemon_stop(&_kcopyd);
+}
+
+EXPORT_SYMBOL(kcopyd_client_create);
+EXPORT_SYMBOL(kcopyd_client_destroy);
+EXPORT_SYMBOL(kcopyd_copy);
+EXPORT_SYMBOL(kcopyd_cancel);
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/kcopyd.h linux-2.4.26-leo/drivers/md/kcopyd.h
--- linux-2.4.26/drivers/md/kcopyd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/kcopyd.h	2004-06-22 21:04:34.000000000 +0100
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2001 Sistina Software
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_KCOPYD_H
+#define DM_KCOPYD_H
+
+/*
+ * Needed for the definition of offset_t.
+ */
+#include <linux/device-mapper.h>
+#include <linux/iobuf.h>
+
+#include "dm-io.h"
+
+int kcopyd_init(void);
+void kcopyd_exit(void);
+
+/* FIXME: make this configurable */
+#define KCOPYD_MAX_REGIONS 8
+
+#define KCOPYD_IGNORE_ERROR 1
+
+/*
+ * To use kcopyd you must first create a kcopyd client object.
+ */
+struct kcopyd_client;
+int kcopyd_client_create(unsigned int num_pages, struct kcopyd_client **result);
+void kcopyd_client_destroy(struct kcopyd_client *kc);
+
+/*
+ * Submit a copy job to kcopyd.  This is built on top of the
+ * previous three fns.
+ *
+ * read_err is a boolean,
+ * write_err is a bitset, with 1 bit for each destination region
+ */
+typedef void (*kcopyd_notify_fn)(int read_err,
+				 unsigned int write_err, void *context);
+
+int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
+		unsigned int num_dests, struct io_region *dests,
+		unsigned int flags, kcopyd_notify_fn fn, void *context);
+
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/lvm.c linux-2.4.26-leo/drivers/md/lvm.c
--- linux-2.4.26/drivers/md/lvm.c	2004-02-20 14:11:42.000000000 +0000
+++ linux-2.4.26-leo/drivers/md/lvm.c	2004-05-26 23:34:19.000000000 +0100
@@ -236,9 +236,6 @@
 #define DEVICE_OFF(device)
 #define LOCAL_END_REQUEST
 
-/* lvm_do_lv_create calls fsync_dev_lockfs()/unlockfs() */
-/* #define	LVM_VFS_ENHANCEMENT */
-
 #include <linux/config.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -2250,12 +2247,8 @@
 	if (lv_ptr->lv_access & LV_SNAPSHOT) {
 		lv_t *org = lv_ptr->lv_snapshot_org, *last;
 
-		/* sync the original logical volume */
-		fsync_dev(org->lv_dev);
-#ifdef	LVM_VFS_ENHANCEMENT
 		/* VFS function call to sync and lock the filesystem */
 		fsync_dev_lockfs(org->lv_dev);
-#endif
 
 		down_write(&org->lv_lock);
 		org->lv_access |= LV_SNAPSHOT_ORG;
@@ -2281,11 +2274,9 @@
 	else
 		set_device_ro(lv_ptr->lv_dev, 1);
 
-#ifdef	LVM_VFS_ENHANCEMENT
 /* VFS function call to unlock the filesystem */
 	if (lv_ptr->lv_access & LV_SNAPSHOT)
 		unlockfs(lv_ptr->lv_snapshot_org->lv_dev);
-#endif
 
 	lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de =
 	    lvm_fs_create_lv(vg_ptr, lv_ptr);
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/Makefile linux-2.4.26-leo/drivers/md/Makefile
--- linux-2.4.26/drivers/md/Makefile	2001-11-11 18:09:32.000000000 +0000
+++ linux-2.4.26-leo/drivers/md/Makefile	2004-05-26 23:34:19.000000000 +0100
@@ -4,24 +4,41 @@
 
 O_TARGET	:= mddev.o
 
-export-objs	:= md.o xor.o
-list-multi	:= lvm-mod.o
+export-objs	:= md.o xor.o dm-table.o dm-target.o kcopyd.o dm-daemon.o \
+		   dm-log.o dm-io.o dm.o
+
+list-multi	:= lvm-mod.o dm-mod.o dm-mirror-mod.o
 lvm-mod-objs	:= lvm.o lvm-snap.o lvm-fs.o
+dm-mod-objs	:= dm.o dm-table.o dm-target.o dm-ioctl.o \
+		   dm-linear.o dm-stripe.o dm-snapshot.o dm-exception-store.o \
+		   kcopyd.o dm-daemon.o dm-io.o
+dm-mirror-mod-objs := dm-raid1.o dm-log.o
 
 # Note: link order is important.  All raid personalities
 # and xor.o must come before md.o, as they each initialise 
 # themselves, and md.o may use the personalities when it 
 # auto-initialised.
 
-obj-$(CONFIG_MD_LINEAR)		+= linear.o
-obj-$(CONFIG_MD_RAID0)		+= raid0.o
-obj-$(CONFIG_MD_RAID1)		+= raid1.o
-obj-$(CONFIG_MD_RAID5)		+= raid5.o xor.o
-obj-$(CONFIG_MD_MULTIPATH)	+= multipath.o
-obj-$(CONFIG_BLK_DEV_MD)	+= md.o
-obj-$(CONFIG_BLK_DEV_LVM)	+= lvm-mod.o
+obj-$(CONFIG_MD_LINEAR)			+= linear.o
+obj-$(CONFIG_MD_RAID0)			+= raid0.o
+obj-$(CONFIG_MD_RAID1)			+= raid1.o
+obj-$(CONFIG_MD_RAID5)			+= raid5.o xor.o
+obj-$(CONFIG_MD_MULTIPATH)		+= multipath.o
+obj-$(CONFIG_BLK_DEV_MD)		+= md.o
+
+obj-$(CONFIG_BLK_DEV_LVM)		+= lvm-mod.o
+
+obj-$(CONFIG_BLK_DEV_DM)		+= dm-mod.o
+obj-$(CONFIG_BLK_DEV_DM_MIRROR)		+= dm-mirror.o
 
 include $(TOPDIR)/Rules.make
 
 lvm-mod.o: $(lvm-mod-objs)
 	$(LD) -r -o $@ $(lvm-mod-objs)
+
+dm-mod.o: $(dm-mod-objs)
+	$(LD) -r -o $@ $(dm-mod-objs)
+
+dm-mirror.o: $(dm-mirror-mod-objs)
+	$(LD) -r -o $@ $(dm-mirror-mod-objs)
+
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/md.c linux-2.4.26-leo/drivers/md/md.c
--- linux-2.4.26/drivers/md/md.c	2003-08-25 12:44:42.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/md.c	2004-05-26 23:34:27.000000000 +0100
@@ -12,6 +12,7 @@
    - kmod support by: Cyrus Durgin
    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
+   - Verbose startup messages by Paul Evans <nerd@freeuk.com>
 
    - lots of fixes and improvements to the RAID1/RAID5 and generic
      RAID code (such as request based resynchronization):
@@ -524,7 +525,8 @@
 		printk(NO_SB,partition_name(dev));
 		return -EINVAL;
 	}
-	printk(KERN_INFO " [events: %08lx]\n", (unsigned long)rdev->sb->events_lo);
+	VERBMSG(printk(KERN_INFO " [events: %08lx]\n", 
+                        (unsigned long)rdev->sb->events_lo))
 	ret = 0;
 abort:
 	return ret;
@@ -633,7 +635,8 @@
 	md_list_add(&rdev->same_set, &mddev->disks);
 	rdev->mddev = mddev;
 	mddev->nb_dev++;
-	printk(KERN_INFO "md: bind<%s,%d>\n", partition_name(rdev->dev), mddev->nb_dev);
+	VERBMSG(printk(KERN_INFO "md: bind<%s,%d>\n", 
+                partition_name(rdev->dev), mddev->nb_dev))
 }
 
 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
@@ -645,8 +648,8 @@
 	md_list_del(&rdev->same_set);
 	MD_INIT_LIST_HEAD(&rdev->same_set);
 	rdev->mddev->nb_dev--;
-	printk(KERN_INFO "md: unbind<%s,%d>\n", partition_name(rdev->dev),
-						 rdev->mddev->nb_dev);
+	VERBMSG(printk(KERN_INFO "md: unbind<%s,%d>\n", 
+                partition_name(rdev->dev), rdev->mddev->nb_dev))
 	rdev->mddev = NULL;
 }
 
@@ -949,7 +952,8 @@
 		goto skip;
 	}
 
-	printk(KERN_INFO "(write) %s's sb offset: %ld\n", partition_name(dev), sb_offset);
+	VERBMSG(printk(KERN_INFO "(write) %s's sb offset: %ld\n", 
+		partition_name(dev), sb_offset))
 
 	if (!sync_page_io(dev, sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE)) {
 		printk("md: write_disk_sb failed for device %s\n", partition_name(dev));
@@ -1009,6 +1013,7 @@
 	int err, count = 100;
 	struct md_list_head *tmp;
 	mdk_rdev_t *rdev;
+	char *part_name;
 
 	if (!mddev->sb_dirty) {
 		printk("hm, md_update_sb() called without ->sb_dirty == 1, from %p.\n", __builtin_return_address(0));
@@ -1038,29 +1043,30 @@
 	if (mddev->sb->not_persistent)
 		return 0;
 
-	printk(KERN_INFO "md: updating md%d RAID superblock on device\n",
-					mdidx(mddev));
+	VERBMSG(printk(KERN_INFO "md: updating md%d RAID superblock on device\n",
+					mdidx(mddev)))
 
 	err = 0;
 	ITERATE_RDEV(mddev,rdev,tmp) {
-		printk(KERN_INFO "md: ");
+		part_name = partition_name(rdev->dev);
 		if (rdev->faulty)
-			printk("(skipping faulty ");
+			printk(KERN_INFO "md: (skipping faulty %s)\n", part_name);
 		if (rdev->alias_device)
-			printk("(skipping alias ");
+			printk(KERN_INFO "md: (skipping alias %s)\n", part_name);
 		if (!rdev->faulty && disk_faulty(&rdev->sb->this_disk)) {
-			printk("(skipping new-faulty %s )\n",
-			       partition_name(rdev->dev));
+			printk(KERN_INFO "md: (skipping new-faulty %s)\n",
+			       part_name);
 			continue;
 		}
-		printk("%s ", partition_name(rdev->dev));
+		VERBMSG(printk(KERN_INFO "md: %s ", part_name))
+
 		if (!rdev->faulty && !rdev->alias_device) {
-			printk("[events: %08lx]",
-				(unsigned long)rdev->sb->events_lo);
+			VERBMSG(printk("[events: %08lx]",
+				(unsigned long)rdev->sb->events_lo))
 			err += write_disk_sb(rdev);
-		} else
-			printk(")\n");
+		}
 	}
+	VERBMSG(printk("\n"))
 	if (err) {
 		if (--count) {
 			printk(KERN_ERR "md: errors occurred during superblock update, repeating\n");
@@ -1246,9 +1252,9 @@
 					rdev->sb->events_hi--;
 		}
 
-		printk(KERN_INFO "md: %s's event counter: %08lx\n",
+		VERBMSG(printk(KERN_INFO "md: %s's event counter: %08lx\n",
 		       partition_name(rdev->dev),
-			(unsigned long)rdev->sb->events_lo);
+			(unsigned long)rdev->sb->events_lo))
 		if (!freshest) {
 			freshest = rdev;
 			continue;
@@ -1600,12 +1606,13 @@
 	}
 	md_maxreadahead[mdidx(mddev)] = readahead;
 
-	printk(KERN_INFO "md%d: max total readahead window set to %ldk\n",
-		mdidx(mddev), readahead*(PAGE_SIZE/1024));
+	VERBMSG(printk(KERN_INFO "md%d: max total readahead window set to %ldk\n",
+	        	mdidx(mddev), readahead*(PAGE_SIZE/1024));
+
+        	printk(KERN_INFO
+		        "md%d: %d data-disks, max readahead per data-disk: %ldk\n",
+			mdidx(mddev), data_disks, readahead/data_disks*(PAGE_SIZE/1024)))
 
-	printk(KERN_INFO
-		"md%d: %d data-disks, max readahead per data-disk: %ldk\n",
-			mdidx(mddev), data_disks, readahead/data_disks*(PAGE_SIZE/1024));
 	return 0;
 abort:
 	return 1;
@@ -1684,9 +1691,10 @@
 			return -EINVAL;
 		}
 	} else
-		if (chunk_size)
-			printk(KERN_INFO "md: RAID level %d does not need chunksize! Continuing anyway.\n",
-			       mddev->sb->level);
+		if (chunk_size) {
+			VERBMSG(printk(KERN_INFO "md: RAID level %d does not need chunksize! Continuing anyway.\n",
+			       mddev->sb->level))
+                }
 
 	if (pnum >= MAX_PERSONALITY) {
 		MD_BUG();
@@ -1857,7 +1865,7 @@
 			 * interrupted.
 			 */
 			if (!mddev->recovery_running && !resync_interrupted) {
-				printk(KERN_INFO "md: marking sb clean...\n");
+				VERBMSG(printk(KERN_INFO "md: marking sb clean...\n"))
 				mddev->sb->state |= 1 << MD_SB_CLEAN;
 			}
 			mddev->sb_dirty = 1;
@@ -1871,11 +1879,13 @@
 	 * Free resources if final stop
 	 */
 	if (!ro) {
-		printk(KERN_INFO "md: md%d stopped.\n", mdidx(mddev));
+		VERBMSG(printk(KERN_INFO "md: md%d stopped.\n", mdidx(mddev)))
 		free_mddev(mddev);
 
-	} else
-		printk(KERN_INFO "md: md%d switched to read-only mode.\n", mdidx(mddev));
+	} else {
+		VERBMSG(printk(KERN_INFO "md: md%d switched to read-only mode.\n", mdidx(mddev)))
+        }
+
 out:
 	return err;
 }
@@ -1898,8 +1908,6 @@
 
 static void autorun_array(mddev_t *mddev)
 {
-	mdk_rdev_t *rdev;
-	struct md_list_head *tmp;
 	int err;
 
 	if (mddev->disks.prev == &mddev->disks) {
@@ -1907,12 +1915,17 @@
 		return;
 	}
 
-	printk(KERN_INFO "md: running: ");
-
-	ITERATE_RDEV(mddev,rdev,tmp) {
-		printk("<%s>", partition_name(rdev->dev));
-	}
-	printk("\n");
+        VERBMSG({       /* remember; VERBMSG is just an #ifdef section */
+                mdk_rdev_t *rdev;
+                struct md_list_head *tmp;
+                
+        	printk(KERN_INFO "md: running: ");
+
+        	ITERATE_RDEV(mddev,rdev,tmp) {
+	        	printk("<%s>", partition_name(rdev->dev));
+        	}
+	        printk("\n");
+        })
 
 	err = do_md_run (mddev);
 	if (err) {
@@ -1946,12 +1959,13 @@
 	kdev_t md_kdev;
 
 
-	printk(KERN_INFO "md: autorun ...\n");
+	VERBMSG(printk(KERN_INFO "md: autorun ...\n"))
 	while (pending_raid_disks.next != &pending_raid_disks) {
 		rdev0 = md_list_entry(pending_raid_disks.next,
 					 mdk_rdev_t, pending);
 
-		printk(KERN_INFO "md: considering %s ...\n", partition_name(rdev0->dev));
+		VERBMSG(printk(KERN_INFO "md: considering %s ...\n", 
+                        partition_name(rdev0->dev)))
 		MD_INIT_LIST_HEAD(&candidates);
 		ITERATE_RDEV_PENDING(rdev,tmp) {
 			if (uuid_equal(rdev0, rdev)) {
@@ -1961,7 +1975,8 @@
 					       partition_name(rdev->dev), partition_name(rdev0->dev));
 					continue;
 				}
-				printk(KERN_INFO "md:  adding %s ...\n", partition_name(rdev->dev));
+				VERBMSG(printk(KERN_INFO "md:  adding %s ...\n", 
+                                        partition_name(rdev->dev)))
 				md_list_del(&rdev->pending);
 				md_list_add(&rdev->pending, &candidates);
 			}
@@ -1995,7 +2010,7 @@
 		}
 		autorun_array(mddev);
 	}
-	printk(KERN_INFO "md: ... autorun DONE.\n");
+	VERBMSG(printk(KERN_INFO "md: ... autorun DONE.\n"))
 }
 
 /*
@@ -3315,7 +3330,7 @@
 	}
 
 	pers[pnum] = p;
-	printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
+	VERBMSG(printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum))
 	return 0;
 }
 
@@ -3326,7 +3341,7 @@
 		return -EINVAL;
 	}
 
-	printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
+	VERBMSG(printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name))
 	pers[pnum] = NULL;
 	return 0;
 }
@@ -3690,6 +3705,7 @@
 		 */
 		md_mdelay(1000*1);
 	}
+
 	return NOTIFY_DONE;
 }
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/raid0.c linux-2.4.26-leo/drivers/md/raid0.c
--- linux-2.4.26/drivers/md/raid0.c	2003-06-13 15:51:34.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/raid0.c	2004-05-26 23:34:27.000000000 +0100
@@ -38,12 +38,12 @@
 	conf->nr_strip_zones = 0;
  
 	ITERATE_RDEV_ORDERED(mddev,rdev1,j1) {
-		printk("raid0: looking at %s\n", partition_name(rdev1->dev));
+                VERBMSG(printk("raid0: looking at %s\n", partition_name(rdev1->dev)))
 		c = 0;
 		ITERATE_RDEV_ORDERED(mddev,rdev2,j2) {
-			printk("raid0:   comparing %s(%ld) with %s(%ld)\n", partition_name(rdev1->dev), rdev1->size, partition_name(rdev2->dev), rdev2->size);
+			VERBMSG(printk("raid0:   comparing %s(%ld) with %s(%ld)\n", partition_name(rdev1->dev), rdev1->size, partition_name(rdev2->dev), rdev2->size))
 			if (rdev2 == rdev1) {
-				printk("raid0:   END\n");
+				VERBMSG(printk("raid0:   END\n"))
 				break;
 			}
 			if (rdev2->size == rdev1->size)
@@ -52,19 +52,19 @@
 				 * Not unique, dont count it as a new
 				 * group
 				 */
-				printk("raid0:   EQUAL\n");
+				VERBMSG(printk("raid0:   EQUAL\n"))
 				c = 1;
 				break;
 			}
-			printk("raid0:   NOT EQUAL\n");
+			VERBMSG(printk("raid0:   NOT EQUAL\n"))
 		}
 		if (!c) {
-			printk("raid0:   ==> UNIQUE\n");
+			VERBMSG(printk("raid0:   ==> UNIQUE\n"))
 			conf->nr_strip_zones++;
-			printk("raid0: %d zones\n", conf->nr_strip_zones);
+			VERBMSG(printk("raid0: %d zones\n", conf->nr_strip_zones))
 		}
 	}
-		printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
+		VERBMSG(printk("raid0: FINAL %d zones\n", conf->nr_strip_zones))
 
 	conf->strip_zone = vmalloc(sizeof(struct strip_zone)*
 				conf->nr_strip_zones);
@@ -80,30 +80,31 @@
 	{
 		struct strip_zone *zone = conf->strip_zone + i;
 
-		printk("raid0: zone %d\n", i);
+		VERBMSG(printk("raid0: zone %d\n", i))
 		zone->dev_offset = current_offset;
 		smallest = NULL;
 		c = 0;
 
 		ITERATE_RDEV_ORDERED(mddev,rdev,j) {
 
-			printk("raid0: checking %s ...", partition_name(rdev->dev));
+			VERBMSG(printk("raid0: checking %s ...", partition_name(rdev->dev)))
 			if (rdev->size > current_offset)
 			{
-				printk(" contained as device %d\n", c);
+				VERBMSG(printk(" contained as device %d\n", c))
 				zone->dev[c] = rdev;
 				c++;
 				if (!smallest || (rdev->size <smallest->size)) {
 					smallest = rdev;
-					printk("  (%ld) is smallest!.\n", rdev->size);
+					VERBMSG(printk("  (%ld) is smallest!.\n", rdev->size))
 				}
-			} else
-				printk(" nope.\n");
+			} else {
+				VERBMSG(printk(" nope.\n"))
+                        }
 		}
 
 		zone->nb_dev = c;
 		zone->size = (smallest->size - current_offset) * c;
-		printk("raid0: zone->nb_dev: %d, size: %ld\n",zone->nb_dev,zone->size);
+		VERBMSG(printk("raid0: zone->nb_dev: %d, size: %ld\n",zone->nb_dev,zone->size))
 
 		if (!conf->smallest || (zone->size < conf->smallest->size))
 			conf->smallest = zone;
@@ -112,9 +113,9 @@
 		curr_zone_offset += zone->size;
 
 		current_offset = smallest->size;
-		printk("raid0: current zone offset: %ld\n", current_offset);
+		VERBMSG(printk("raid0: current zone offset: %ld\n", current_offset))
 	}
-	printk("raid0: done.\n");
+	VERBMSG(printk("raid0: done.\n"))
 	return 0;
 }
 
@@ -138,15 +139,15 @@
 	if (create_strip_zones (mddev)) 
 		goto out_free_conf;
 
-	printk("raid0 : md_size is %d blocks.\n", md_size[mdidx(mddev)]);
-	printk("raid0 : conf->smallest->size is %ld blocks.\n", conf->smallest->size);
+	VERBMSG(printk("raid0: md_size is %d blocks.\n", md_size[mdidx(mddev)]);
+	          printk("raid0: conf->smallest->size is %ld blocks.\n", conf->smallest->size))
 	nb_zone = md_size[mdidx(mddev)]/conf->smallest->size +
 			(md_size[mdidx(mddev)] % conf->smallest->size ? 1 : 0);
-	printk("raid0 : nb_zone is %ld.\n", nb_zone);
+	VERBMSG(printk("raid0: nb_zone is %ld.\n", nb_zone))
 	conf->nr_zones = nb_zone;
 
-	printk("raid0 : Allocating %ld bytes for hash.\n",
-				nb_zone*sizeof(struct raid0_hash));
+	VERBMSG(printk("raid0: Allocating %ld bytes for hash.\n",
+				nb_zone*sizeof(struct raid0_hash)))
 
 	conf->hash_table = vmalloc (sizeof (struct raid0_hash)*nb_zone);
 	if (!conf->hash_table)
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/raid1.c linux-2.4.26-leo/drivers/md/raid1.c
--- linux-2.4.26/drivers/md/raid1.c	2004-05-19 21:34:38.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/raid1.c	2004-05-26 23:34:27.000000000 +0100
@@ -1627,8 +1627,9 @@
 					disk_idx);
 				continue;
 			}
-			printk(OPERATIONAL, partition_name(rdev->dev),
- 					disk_idx);
+			VERBMSG(printk(OPERATIONAL, 
+                                        partition_name(rdev->dev),
+ 					disk_idx))
 			disk->number = descriptor->number;
 			disk->raid_disk = disk_idx;
 			disk->dev = rdev->dev;
@@ -1643,7 +1644,7 @@
 		/*
 		 * Must be a spare disk ..
 		 */
-			printk(SPARE, partition_name(rdev->dev));
+			VERBMSG(printk(SPARE, partition_name(rdev->dev)))
 			disk->number = descriptor->number;
 			disk->raid_disk = disk_idx;
 			disk->dev = rdev->dev;
@@ -1766,7 +1767,8 @@
 		md_recover_arrays();
 
 
-	printk(ARRAY_IS_ACTIVE, mdidx(mddev), sb->active_disks, sb->raid_disks);
+	VERBMSG(printk(ARRAY_IS_ACTIVE, mdidx(mddev), 
+			sb->active_disks, sb->raid_disks))
 	/*
 	 * Ok, everything is just fine now
 	 */
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/md/raid5.c linux-2.4.26-leo/drivers/md/raid5.c
--- linux-2.4.26/drivers/md/raid5.c	2003-08-25 12:44:42.000000000 +0100
+++ linux-2.4.26-leo/drivers/md/raid5.c	2004-05-26 23:34:27.000000000 +0100
@@ -1448,7 +1448,8 @@
 				printk(KERN_ERR "raid5: disabled device %s (device %d already operational)\n", partition_name(rdev->dev), raid_disk);
 				continue;
 			}
-			printk(KERN_INFO "raid5: device %s operational as raid disk %d\n", partition_name(rdev->dev), raid_disk);
+			VERBMSG(printk(KERN_INFO "raid5: device %s operational as raid disk %d\n", 
+                                partition_name(rdev->dev), raid_disk))
 	
 			disk->number = desc->number;
 			disk->raid_disk = raid_disk;
@@ -1461,7 +1462,8 @@
 			/*
 			 * Must be a spare disk ..
 			 */
-			printk(KERN_INFO "raid5: spare disk %s\n", partition_name(rdev->dev));
+			VERBMSG(printk(KERN_INFO "raid5: spare disk %s\n", 
+                                partition_name(rdev->dev)))
 			disk->number = desc->number;
 			disk->raid_disk = raid_disk;
 			disk->dev = rdev->dev;
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/pci/proc.c linux-2.4.26-leo/drivers/pci/proc.c
--- linux-2.4.26/drivers/pci/proc.c	2002-11-28 23:53:14.000000000 +0000
+++ linux-2.4.26-leo/drivers/pci/proc.c	2004-06-22 20:53:46.000000000 +0100
@@ -562,7 +562,15 @@
 		pci_for_each_dev(dev) {
 			pci_proc_attach_device(dev);
 		}
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+		entry = create_proc_entry("pci", S_IRUSR, NULL);
+#elif CONFIG_GRKERNSEC_PROC_USERGROUP
+		entry = create_proc_entry("pci", S_IRUSR | S_IRGRP, NULL);
+#endif
+#else
 		entry = create_proc_entry("pci", 0, NULL);
+#endif
 		if (entry)
 			entry->proc_fops = &proc_pci_operations;
 	}
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/sound/sound_core.c linux-2.4.26-leo/drivers/sound/sound_core.c
--- linux-2.4.26/drivers/sound/sound_core.c	2001-09-30 20:26:08.000000000 +0100
+++ linux-2.4.26-leo/drivers/sound/sound_core.c	2004-05-26 23:34:34.000000000 +0100
@@ -37,6 +37,7 @@
 #include <linux/config.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/usb/storage/unusual_devs.h linux-2.4.26-leo/drivers/usb/storage/unusual_devs.h
--- linux-2.4.26/drivers/usb/storage/unusual_devs.h	2004-05-19 21:34:39.000000000 +0100
+++ linux-2.4.26-leo/drivers/usb/storage/unusual_devs.h	2004-06-22 21:09:13.000000000 +0100
@@ -322,6 +322,18 @@
 		"USB Hard Disk",
 		US_SC_RBC, US_PR_CB, NULL, 0 ), 
 
+/* Included by Paul Evans <nerd@freeuk.com>
+ * For A-mego Dual Slot USB Card Reader/Writer
+ * From instructions found on:
+ * http://www.qbik.ch/usb/devices/showdev.php?id=1262
+ */
+
+UNUSUAL_DEV(  0x0aec, 0x5010, 0x0100, 0x0100, 
+		"A-Mego", 
+		"CSU-LA1 Dual Slot USB Card Reader / Writer", 
+		US_SC_SCSI, US_PR_BULK, NULL, 
+		US_FL_FIX_INQUIRY ),
+
 #ifdef CONFIG_USB_STORAGE_ISD200
 UNUSUAL_DEV(  0x05ab, 0x0031, 0x0100, 0x0110,
 		"In-System",
diff -urN --exclude-from=diff-exclude linux-2.4.26/drivers/video/vesafb.c linux-2.4.26-leo/drivers/video/vesafb.c
--- linux-2.4.26/drivers/video/vesafb.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/drivers/video/vesafb.c	2004-06-22 20:53:46.000000000 +0100
@@ -546,7 +546,7 @@
 	video_visual = (video_bpp == 8) ?
 		FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
 
-#ifndef __i386__
+#if !defined(__i386__) || defined(CONFIG_GRKERNSEC_PAX_KERNEXEC)
 	screen_info.vesapm_seg = 0;
 #endif
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/adfs/map.c linux-2.4.26-leo/fs/adfs/map.c
--- linux-2.4.26/fs/adfs/map.c	2001-10-25 21:53:53.000000000 +0100
+++ linux-2.4.26-leo/fs/adfs/map.c	2004-05-26 23:34:34.000000000 +0100
@@ -12,6 +12,7 @@
 #include <linux/fs.h>
 #include <linux/adfs_fs.h>
 #include <linux/spinlock.h>
+#include <linux/sched.h>
 
 #include "adfs.h"
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/binfmt_aout.c linux-2.4.26-leo/fs/binfmt_aout.c
--- linux-2.4.26/fs/binfmt_aout.c	2001-11-03 01:39:20.000000000 +0000
+++ linux-2.4.26-leo/fs/binfmt_aout.c	2004-06-22 20:53:46.000000000 +0100
@@ -113,10 +113,12 @@
 /* If the size of the dump file exceeds the rlimit, then see what would happen
    if we wrote the stack, but not the data area.  */
 #ifdef __sparc__
+	gr_learn_resource(current, RLIMIT_CORE, dump.u_dsize+dump.u_ssize, 1);
 	if ((dump.u_dsize+dump.u_ssize) >
 	    current->rlim[RLIMIT_CORE].rlim_cur)
 		dump.u_dsize = 0;
 #else
+	gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE, 1);
 	if ((dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE >
 	    current->rlim[RLIMIT_CORE].rlim_cur)
 		dump.u_dsize = 0;
@@ -124,10 +126,12 @@
 
 /* Make sure we have enough room to write the stack and data areas. */
 #ifdef __sparc__
+	gr_learn_resource(current, RLIMIT_CORE, dump.u_ssize, 1);
 	if ((dump.u_ssize) >
 	    current->rlim[RLIMIT_CORE].rlim_cur)
 		dump.u_ssize = 0;
 #else
+	gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize+1) * PAGE_SIZE, 1);
 	if ((dump.u_ssize+1) * PAGE_SIZE >
 	    current->rlim[RLIMIT_CORE].rlim_cur)
 		dump.u_ssize = 0;
@@ -276,6 +280,8 @@
 	rlim = current->rlim[RLIMIT_DATA].rlim_cur;
 	if (rlim >= RLIM_INFINITY)
 		rlim = ~0;
+
+	gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
 	if (ex.a_data + ex.a_bss > rlim)
 		return -ENOMEM;
 
@@ -307,6 +313,24 @@
 	current->mm->mmap = NULL;
 	compute_creds(bprm);
  	current->flags &= ~PF_FORKNOEXEC;
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
+		current->flags |= PF_PAX_PAGEEXEC;
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP
+		if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
+			current->flags |= PF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+		if (!(N_FLAGS(ex) & F_PAX_MPROTECT))        
+			current->flags |= PF_PAX_MPROTECT;
+#endif
+
+	}
+#endif
+
 #ifdef __sparc__
 	if (N_MAGIC(ex) == NMAGIC) {
 		loff_t pos = fd_offset;
@@ -393,7 +417,7 @@
 
 		down_write(&current->mm->mmap_sem);
  		error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
-				PROT_READ | PROT_WRITE | PROT_EXEC,
+				PROT_READ | PROT_WRITE,
 				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
 				fd_offset + ex.a_text);
 		up_write(&current->mm->mmap_sem);
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/binfmt_elf.c linux-2.4.26-leo/fs/binfmt_elf.c
--- linux-2.4.26/fs/binfmt_elf.c	2004-05-19 21:34:40.000000000 +0100
+++ linux-2.4.26-leo/fs/binfmt_elf.c	2004-06-22 20:53:46.000000000 +0100
@@ -33,10 +33,13 @@
 #include <linux/smp_lock.h>
 #include <linux/compiler.h>
 #include <linux/highmem.h>
+#include <linux/random.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/param.h>
 #include <asm/pgalloc.h>
+#include <asm/system.h>
 
 #define DLINFO_ITEMS 13
 
@@ -86,6 +89,12 @@
 	if (end <= start)
 		return;
 	do_brk(start, end - start);
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	if (current->flags & PF_PAX_RANDEXEC)
+		do_mmap_pgoff(NULL, ELF_PAGEALIGN(start + current->mm->delta_exec), 0UL, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED | MAP_MIRROR, start);
+#endif
+
 }
 
 
@@ -414,6 +423,203 @@
 	return elf_entry;
 }
 
+#if (defined(CONFIG_GRKERNSEC_PAX_EI_PAX) || defined(CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS)) && defined(CONFIG_GRKERNSEC_PAX_SOFTMODE)
+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
+{
+	unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	if (elf_phdata->p_flags & PF_PAGEEXEC)
+		pax_flags |= PF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if (elf_phdata->p_flags & PF_SEGMEXEC) {
+		pax_flags &= ~PF_PAX_PAGEEXEC;
+		pax_flags |= PF_PAX_SEGMEXEC;
+	}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP
+	if (elf_phdata->p_flags & PF_EMUTRAMP)
+		pax_flags |= PF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+	if (elf_phdata->p_flags & PF_MPROTECT)
+		pax_flags |= PF_PAX_MPROTECT;
+#endif
+
+#if defined(CONFIG_GRKERNSEC_PAX_RANDMMAP) || defined(CONFIG_GRKERNSEC_PAX_RANDUSTACK)
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+	if (pax_aslr)
+#endif
+
+	if (elf_phdata->p_flags & PF_RANDMMAP)
+		pax_flags |= PF_PAX_RANDMMAP;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+	if (pax_aslr)
+#endif
+
+	if (elf_phdata->p_flags & PF_RANDEXEC)
+		pax_flags |= PF_PAX_RANDEXEC;
+#endif
+
+	return pax_flags;
+}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS
+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
+{
+	unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
+		pax_flags |= PF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if (!(elf_phdata->p_flags & PF_NOSEGMEXEC)) {
+		pax_flags &= ~PF_PAX_PAGEEXEC;
+		pax_flags |= PF_PAX_SEGMEXEC;
+	}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP
+	if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
+		pax_flags |= PF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+	if (!(elf_phdata->p_flags & PF_NOMPROTECT))
+		pax_flags |= PF_PAX_MPROTECT;
+#endif
+
+#if defined(CONFIG_GRKERNSEC_PAX_RANDMMAP) || defined(CONFIG_GRKERNSEC_PAX_RANDUSTACK)
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+	if (pax_aslr)
+#endif
+
+	if (!(elf_phdata->p_flags & PF_NORANDMMAP))
+		pax_flags |= PF_PAX_RANDMMAP;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+	if (pax_aslr)
+#endif
+
+	if (!(elf_phdata->p_flags & PF_NORANDEXEC))
+		pax_flags |= PF_PAX_RANDEXEC;
+#endif
+
+	return pax_flags;
+}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EI_PAX
+static int pax_parse_ei_pax(const struct elfhdr * const elf_ex)
+{
+	unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
+		pax_flags |= PF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC)) {
+		pax_flags &= ~PF_PAX_PAGEEXEC;
+		pax_flags |= PF_PAX_SEGMEXEC;
+	}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP
+	if ((pax_flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
+		pax_flags |= PF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+	if ((pax_flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
+		pax_flags |= PF_PAX_MPROTECT;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_ASLR
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+	if (pax_aslr)
+#endif
+
+	if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
+		pax_flags |= PF_PAX_RANDMMAP;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+	if (pax_aslr)
+#endif
+
+	if ((elf_ex->e_ident[EI_PAX] & EF_PAX_RANDEXEC) && (elf_ex->e_type == ET_EXEC) && (pax_flags & PF_PAX_MPROTECT))
+		pax_flags |= PF_PAX_RANDEXEC;
+#endif
+
+	return pax_flags;
+}
+#endif
+
+#if defined(CONFIG_GRKERNSEC_PAX_EI_PAX) || defined(CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS)
+static int pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
+{
+	unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS
+	unsigned long i;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EI_PAX
+	pax_flags = pax_parse_ei_pax(elf_ex);
+#endif
+       
+#ifdef CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS
+	for (i = 0UL; i < elf_ex->e_phnum; i++)
+		if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
+			if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
+			    ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
+			    ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
+			    ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
+			    ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)) ||
+			    ((elf_phdata[i].p_flags & PF_RANDEXEC) && ((elf_phdata[i].p_flags & PF_NORANDEXEC) || elf_ex->e_type == ET_DYN || !(elf_phdata[i].p_flags & PF_MPROTECT))) ||
+			    (!(elf_phdata[i].p_flags & PF_NORANDEXEC) && (elf_ex->e_type == ET_DYN || (elf_phdata[i].p_flags & PF_NOMPROTECT))))
+				return -EINVAL;
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+			if (pax_softmode)
+				pax_flags = pax_parse_softmode(&elf_phdata[i]);
+			else
+#endif
+
+				pax_flags = pax_parse_hardmode(&elf_phdata[i]);
+			break;
+		}
+#endif
+
+	if (0 > pax_check_flags(&pax_flags))
+		return -EINVAL;
+
+	current->flags |= pax_flags;
+	return 0;
+}
+#endif
+
 /*
  * These are the functions used to load ELF style executables and shared
  * libraries.  There is no binary dependent code anywhere else.
@@ -446,6 +652,11 @@
   	struct exec interp_ex;
 	char passed_fileno[6];
 	struct files_struct *files;
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	unsigned long load_addr_random = 0UL;
+	unsigned long load_bias_random = 0UL;
+#endif
 	
 	/* Get the exec-header */
 	elf_ex = *((struct elfhdr *) bprm->buf);
@@ -623,7 +834,46 @@
 	current->mm->end_data = 0;
 	current->mm->end_code = 0;
 	current->mm->mmap = NULL;
+
+#ifdef CONFIG_GRKERNSEC_PAX_ASLR
+	current->mm->delta_mmap = 0UL;
+	current->mm->delta_exec = 0UL;
+	current->mm->delta_stack = 0UL;
+#endif
+
 	current->flags &= ~PF_FORKNOEXEC;
+
+#if defined(CONFIG_GRKERNSEC_PAX_EI_PAX) || defined(CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS)
+	if (0 > pax_parse_elf_flags(&elf_ex, elf_phdata)) {
+		send_sig(SIGKILL, current, 0);
+		goto out_free_dentry;
+	}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_HAVE_ACL_FLAGS
+	pax_set_flags(bprm);
+#elif defined(CONFIG_GRKERNSEC_PAX_HOOK_ACL_FLAGS)
+	if (pax_set_flags_func)
+		(*pax_set_flags_func)(bprm);
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_DLRESOLVE
+	if (current->flags & PF_PAX_PAGEEXEC)
+		current->mm->call_dl_resolve = 0UL;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_ASLR
+	if (current->flags & PF_PAX_RANDMMAP) {
+#define pax_delta_mask(delta, lsb, len) (((delta) & ((1UL << (len)) - 1)) << (lsb))
+
+		current->mm->delta_mmap = pax_delta_mask(get_random_long(), PAX_DELTA_MMAP_LSB(current), PAX_DELTA_MMAP_LEN(current));
+		current->mm->delta_exec = pax_delta_mask(get_random_long(), PAX_DELTA_EXEC_LSB(current), PAX_DELTA_EXEC_LEN(current));
+		current->mm->delta_stack = pax_delta_mask(get_random_long(), PAX_DELTA_STACK_LSB(current), PAX_DELTA_STACK_LEN(current));
+	}
+#endif
+
+	gr_set_pax_flags(current);	
+
 	elf_entry = (unsigned long) elf_ex.e_entry;
 
 	/* Do this so that we can load the interpreter, if need be.  We will
@@ -632,7 +882,7 @@
 	retval = setup_arg_pages(bprm);
 	if (retval < 0) {
 		send_sig(SIGKILL, current, 0);
-		return retval;
+		goto out_free_dentry;
 	}
 	
 	current->mm->start_stack = bprm->p;
@@ -679,11 +929,84 @@
 			   base, as well as whatever program they might try to exec.  This
 		           is because the brk will follow the loader, and is not movable.  */
 			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP
+			/* PaX: randomize base address at the default exe base if requested */
+			if (current->flags & PF_PAX_RANDMMAP) {
+				load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE(current) - vaddr + current->mm->delta_exec);
+				elf_flags |= MAP_FIXED;
+			}
+#endif
+
 		}
 
-		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
-		if (BAD_ADDR(error))
-			continue;
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+		if ((current->flags & PF_PAX_RANDEXEC) && (elf_ex.e_type == ET_EXEC)) {
+			error = -ENOMEM;
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+			if (current->flags & PF_PAX_PAGEEXEC)
+				error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot & ~PROT_EXEC, elf_flags);
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+			if (current->flags & PF_PAX_SEGMEXEC) {
+				unsigned long addr, len;
+
+				addr = ELF_PAGESTART(load_bias + vaddr);
+				len = elf_ppnt->p_filesz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr);
+				if (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len)
+					continue;
+				down_write(&current->mm->mmap_sem);
+				error = do_mmap_pgoff(bprm->file, addr, len, elf_prot, elf_flags, (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr)) >> PAGE_SHIFT);
+				up_write(&current->mm->mmap_sem);
+			}
+#endif
+
+			if (BAD_ADDR(error))
+				continue;
+
+			/* PaX: mirror at a randomized base */
+			down_write(&current->mm->mmap_sem);
+
+			if (!load_addr_set) {
+				load_addr_random = get_unmapped_area(bprm->file, 0UL, elf_ppnt->p_filesz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr), (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr)) >> PAGE_SHIFT, MAP_PRIVATE);
+				if (BAD_ADDR(load_addr_random)) {
+					up_write(&current->mm->mmap_sem);
+					continue;
+				}
+				load_bias_random = load_addr_random - vaddr;
+			}
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+			if (current->flags & PF_PAX_PAGEEXEC)
+				load_addr_random = do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), 0UL, elf_prot, elf_flags | MAP_MIRROR, error);
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+			if (current->flags & PF_PAX_SEGMEXEC) {
+				if (elf_prot & PROT_EXEC) {
+					load_addr_random = do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), elf_ppnt->p_memsz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr), PROT_NONE, MAP_PRIVATE | MAP_FIXED, 0UL);
+					if (!BAD_ADDR(load_addr_random)) {
+						load_addr_random = do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr + SEGMEXEC_TASK_SIZE), 0UL, elf_prot, elf_flags | MAP_MIRROR, error);
+						if (!BAD_ADDR(load_addr_random))
+							load_addr_random -= SEGMEXEC_TASK_SIZE;
+					}
+				} else
+					load_addr_random = do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), 0UL, elf_prot, elf_flags | MAP_MIRROR, error);
+			}
+#endif
+
+			up_write(&current->mm->mmap_sem);
+			if (BAD_ADDR(load_addr_random))
+				continue;
+		} else
+#endif
+		{
+			error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
+			if (BAD_ADDR(error))
+				continue;
+		}
 
 		if (!load_addr_set) {
 			load_addr_set = 1;
@@ -694,6 +1017,11 @@
 				load_addr += load_bias;
 				reloc_func_desc = load_addr;
 			}
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+			current->mm->delta_exec = load_addr_random - load_addr;
+#endif
+
 		}
 		k = elf_ppnt->p_vaddr;
 		if (k < start_code) start_code = k;
@@ -720,6 +1048,24 @@
 	start_data += load_bias;
 	end_data += load_bias;
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+	if (pax_aslr)
+#endif
+
+	if (current->flags & PF_PAX_RANDMMAP)
+		elf_brk += pax_delta_mask(get_random_long(), 4, PAGE_SHIFT);
+#undef pax_delta_mask
+#endif
+
+	/* Calling set_brk effectively mmaps the pages that we need
+	 * for the bss and break sections
+	 */
+	set_brk(elf_bss, elf_brk);
+
+	padzero(elf_bss);
+
 	if (elf_interpreter) {
 		if (interpreter_type == INTERPRETER_AOUT)
 			elf_entry = load_aout_interp(&interp_ex,
@@ -768,13 +1114,6 @@
 	current->mm->end_data = end_data;
 	current->mm->start_stack = bprm->p;
 
-	/* Calling set_brk effectively mmaps the pages that we need
-	 * for the bss and break sections
-	 */
-	set_brk(elf_bss, elf_brk);
-
-	padzero(elf_bss);
-
 #if 0
 	printk("(start_brk) %lx\n" , (long) current->mm->start_brk);
 	printk("(end_code) %lx\n" , (long) current->mm->end_code);
@@ -811,6 +1150,10 @@
 	ELF_PLAT_INIT(regs, reloc_func_desc);
 #endif
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	pax_switch_segments(current);
+#endif
+
 	start_thread(regs, elf_entry, bprm->p);
 	if (current->ptrace & PT_PTRACED)
 		send_sig(SIGTRAP, current, 0);
@@ -1039,8 +1382,11 @@
 #undef DUMP_SEEK
 
 #define DUMP_WRITE(addr, nr)	\
+	do { \
+	gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
 	if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
-		goto end_coredump;
+		goto end_coredump; \
+	} while (0);
 #define DUMP_SEEK(off)	\
 	if (!dump_seek(file, (off))) \
 		goto end_coredump;
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/binfmt_misc.c linux-2.4.26-leo/fs/binfmt_misc.c
--- linux-2.4.26/fs/binfmt_misc.c	2002-08-03 01:39:45.000000000 +0100
+++ linux-2.4.26-leo/fs/binfmt_misc.c	2004-06-22 20:53:46.000000000 +0100
@@ -102,9 +102,11 @@
 	int retval;
 
 	retval = -ENOEXEC;
-	if (!enabled)
+	if (!enabled || bprm->misc)
 		goto _ret;
 
+	bprm->misc++;
+
 	/* to keep locking time low, we copy the interpreter string */
 	read_lock(&entries_lock);
 	fmt = check_file(bprm);
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/buffer.c linux-2.4.26-leo/fs/buffer.c
--- linux-2.4.26/fs/buffer.c	2004-02-20 14:11:44.000000000 +0000
+++ linux-2.4.26-leo/fs/buffer.c	2004-06-22 20:53:46.000000000 +0100
@@ -419,6 +419,34 @@
 	fsync_dev(dev);
 }
 
+int fsync_dev_lockfs(kdev_t dev)
+{
+	/* you are not allowed to try locking all the filesystems
+	** on the system, your chances of getting through without
+	** total deadlock are slim to none.
+	*/
+	if (!dev)
+		return fsync_dev(dev) ;
+
+	sync_buffers(dev, 0);
+
+	lock_kernel();
+	/* note, the FS might need to start transactions to 
+	** sync the inodes, or the quota, no locking until
+	** after these are done
+	*/
+	sync_inodes(dev);
+	DQUOT_SYNC_DEV(dev);
+	/* if inodes or quotas could be dirtied during the
+	** sync_supers_lockfs call, the FS is responsible for getting
+	** them on disk, without deadlocking against the lock
+	*/
+	sync_supers_lockfs(dev) ;
+	unlock_kernel();
+
+	return sync_buffers(dev, 1) ;
+}
+
 asmlinkage long sys_sync(void)
 {
 	fsync_dev(0);
@@ -799,6 +827,7 @@
 	bh->b_list = BUF_CLEAN;
 	bh->b_end_io = handler;
 	bh->b_private = private;
+	bh->b_journal_head = NULL;
 }
 
 void end_buffer_io_async(struct buffer_head * bh, int uptodate)
@@ -1863,6 +1892,9 @@
 	int err;
 
 	err = -EFBIG;
+
+	gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long) size, 1);
+
         limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
 		send_sig(SIGXFSZ, current, 0);
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/exec.c linux-2.4.26-leo/fs/exec.c
--- linux-2.4.26/fs/exec.c	2004-02-20 14:11:44.000000000 +0000
+++ linux-2.4.26-leo/fs/exec.c	2004-06-22 20:53:46.000000000 +0100
@@ -43,6 +43,9 @@
 #include <asm/uaccess.h>
 #include <asm/pgalloc.h>
 #include <asm/mmu_context.h>
+#include <linux/major.h>
+#include <linux/random.h>
+#include <linux/grsecurity.h>
 
 #ifdef CONFIG_KMOD
 #include <linux/kmod.h>
@@ -56,6 +59,20 @@
 static struct linux_binfmt *formats;
 static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
 
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+
+#if defined(CONFIG_GRKERNSEC_PAX_RANDMMAP) || defined(CONFIG_GRKERNSEC_PAX_RANDUSTACK) || defined(CONFIG_GRKERNSEC_PAX_RANDKSTACK)
+unsigned int pax_aslr=1;
+#endif
+
+unsigned int pax_softmode;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_HOOK_ACL_FLAGS
+void (*pax_set_flags_func)(struct linux_binprm *bprm);
+EXPORT_SYMBOL(pax_set_flags_func);
+#endif
+
 int register_binfmt(struct linux_binfmt * fmt)
 {
 	struct linux_binfmt ** tmp = &formats;
@@ -290,7 +307,11 @@
 	struct vm_area_struct *vma; 
 	pgprot_t prot = PAGE_COPY; 
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if (page_count(page) != 1 && (!(tsk->flags & PF_PAX_SEGMEXEC) || page_count(page) != 2))
+#else
 	if (page_count(page) != 1)
+#endif
 		printk(KERN_ERR "mem_map disagrees with %p at %08lx\n", page, address);
 	pgd = pgd_offset(tsk->mm, address);
 
@@ -303,9 +324,19 @@
 		goto out;
 	if (!pte_none(*pte))
 		goto out;
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if (page_count(page) == 1) {
+#endif
+
 	lru_cache_add(page);
 	flush_dcache_page(page);
 	flush_page_to_ram(page);
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	}
+#endif
+
 	/* lookup is cheap because there is only a single entry in the list */
 	vma = find_vma(tsk->mm, address); 
 	if (vma) 
@@ -329,6 +360,10 @@
 	struct vm_area_struct *mpnt;
 	int i;
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	struct vm_area_struct *mpnt_m = NULL;
+#endif
+
 	stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
 
 	bprm->p += stack_base;
@@ -340,12 +375,27 @@
 	if (!mpnt) 
 		return -ENOMEM; 
 	
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if ((current->flags & PF_PAX_SEGMEXEC) && (VM_STACK_FLAGS & VM_MAYEXEC)) {
+		mpnt_m = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+		if (!mpnt_m) {
+			kmem_cache_free(vm_area_cachep, mpnt);
+			return -ENOMEM;
+		}
+	}
+#endif
+
 	down_write(&current->mm->mmap_sem);
 	{
 		mpnt->vm_mm = current->mm;
 		mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
 		mpnt->vm_end = STACK_TOP;
 		mpnt->vm_flags = VM_STACK_FLAGS;
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+		if (!(current->flags & PF_PAX_PAGEEXEC))
+			mpnt->vm_page_prot = protection_map[(VM_STACK_FLAGS | VM_EXEC) & 0x7];
+		else
+#endif
 		mpnt->vm_page_prot = protection_map[VM_STACK_FLAGS & 0x7];
 		mpnt->vm_ops = NULL;
 		mpnt->vm_pgoff = 0;
@@ -353,6 +403,25 @@
 		mpnt->vm_private_data = (void *) 0;
 		insert_vm_struct(current->mm, mpnt);
 		current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+		if (mpnt_m) {
+			*mpnt_m = *mpnt;
+			if (!(VM_STACK_FLAGS & VM_EXEC)) {
+				mpnt_m->vm_flags &= ~(VM_READ | VM_WRITE | VM_EXEC);
+				mpnt_m->vm_page_prot = PAGE_NONE;
+			}
+			mpnt_m->vm_start += SEGMEXEC_TASK_SIZE;
+			mpnt_m->vm_end += SEGMEXEC_TASK_SIZE;
+			mpnt_m->vm_flags |= VM_MIRROR;
+			mpnt->vm_flags |= VM_MIRROR;
+			mpnt_m->vm_private_data = (void *)(mpnt->vm_start - mpnt_m->vm_start);
+			mpnt->vm_private_data = (void *)(mpnt_m->vm_start - mpnt->vm_start);
+			insert_vm_struct(current->mm, mpnt_m);
+			current->mm->total_vm = (mpnt_m->vm_end - mpnt_m->vm_start) >> PAGE_SHIFT;
+		}
+#endif
+
 	} 
 
 	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
@@ -360,6 +429,14 @@
 		if (page) {
 			bprm->page[i] = NULL;
 			put_dirty_page(current,page,stack_base);
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+			if (mpnt_m) {
+				page_cache_get(page);
+				put_dirty_page(current, page, stack_base + SEGMEXEC_TASK_SIZE);
+			}
+#endif
+
 		}
 		stack_base += PAGE_SIZE;
 	}
@@ -455,8 +532,8 @@
 		active_mm = current->active_mm;
 		current->mm = mm;
 		current->active_mm = mm;
-		task_unlock(current);
 		activate_mm(active_mm, mm);
+		task_unlock(current);
 		mm_release();
 		if (old_mm) {
 			if (active_mm != old_mm) BUG();
@@ -615,6 +692,30 @@
 	}
 	current->comm[i] = '\0';
 
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	current->flags &= ~PF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP
+	current->flags &= ~PF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+	current->flags &= ~PF_PAX_MPROTECT;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_ASLR
+	current->flags &= ~PF_PAX_RANDMMAP;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	current->flags &= ~PF_PAX_RANDEXEC;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	current->flags &= ~PF_PAX_SEGMEXEC;
+#endif
+
 	flush_thread();
 
 	de_thread(current);
@@ -714,6 +815,9 @@
 			cap_set_full(bprm->cap_effective);
 	}
 
+	if (gr_handle_ptrace_exec(bprm->file->f_dentry, bprm->file->f_vfsmnt))
+		return -EACCES;
+
 	memset(bprm->buf,0,BINPRM_BUF_SIZE);
 	return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
 }
@@ -776,8 +880,13 @@
 	
         /* AUD: Audit candidate if current->cap_effective is set */
 
-        current->suid = current->euid = current->fsuid = bprm->e_uid;
-        current->sgid = current->egid = current->fsgid = bprm->e_gid;
+	if (!gr_check_user_change(-1, bprm->e_uid, bprm->e_uid))
+		current->suid = current->euid = current->fsuid = bprm->e_uid;
+
+	if (!gr_check_group_change(-1, bprm->e_gid, bprm->e_gid))
+	        current->sgid = current->egid = current->fsgid = bprm->e_gid;
+
+	gr_handle_chroot_caps(current);
 
 	if(do_unlock)
 		unlock_kernel();
@@ -912,6 +1021,11 @@
 	struct file *file;
 	int retval;
 	int i;
+#ifdef CONFIG_GRKERNSEC
+	struct file *old_exec_file;
+	struct acl_subject_label *old_acl;
+	struct rlimit old_rlim[RLIM_NLIMITS];
+#endif
 
 	file = open_exec(filename);
 
@@ -919,12 +1033,37 @@
 	if (IS_ERR(file))
 		return retval;
 
+	gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->user->processes), 1);
+
+	if (gr_handle_nproc()) {
+		allow_write_access(file);
+		fput(file);
+		return -EAGAIN;
+	}
+
+	if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
+		allow_write_access(file);
+		fput(file);
+		return -EACCES;
+	}
+
 	bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDUSTACK
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+	if (pax_aslr)
+#endif
+
+	bprm.p -= (get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK;
+#endif
+
 	memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0])); 
 
 	bprm.file = file;
 	bprm.filename = filename;
 	bprm.sh_bang = 0;
+	bprm.misc = 0;
 	bprm.loader = 0;
 	bprm.exec = 0;
 	if ((bprm.argc = count(argv, bprm.p / sizeof(void *))) < 0) {
@@ -943,11 +1082,26 @@
 	if (retval < 0) 
 		goto out; 
 
+	if (!gr_tpe_allow(file)) {
+		retval = -EACCES;
+		goto out;
+	}
+
+	if(gr_check_crash_exec(file)) {
+		retval = -EACCES;
+		goto out;
+	}
+
 	retval = copy_strings_kernel(1, &bprm.filename, &bprm);
 	if (retval < 0) 
 		goto out; 
 
 	bprm.exec = bprm.p;
+
+	gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
+
+	gr_handle_exec_args(&bprm, argv);
+
 	retval = copy_strings(bprm.envc, envp, &bprm);
 	if (retval < 0) 
 		goto out; 
@@ -956,11 +1110,32 @@
 	if (retval < 0) 
 		goto out; 
 
+#ifdef CONFIG_GRKERNSEC
+	old_acl = current->acl;
+	memcpy(old_rlim, current->rlim, sizeof(old_rlim));
+	old_exec_file = current->exec_file;
+	get_file(file);
+	current->exec_file = file;
+#endif
+
+	gr_set_proc_label(file->f_dentry, file->f_vfsmnt);
+
 	retval = search_binary_handler(&bprm,regs);
-	if (retval >= 0)
+	if (retval >= 0) {
+#ifdef CONFIG_GRKERNSEC
+		if (old_exec_file)
+			fput(old_exec_file);
+#endif
 		/* execve success */
 		return retval;
+	}
 
+#ifdef CONFIG_GRKERNSEC
+	current->acl = old_acl;
+	memcpy(current->rlim, old_rlim, sizeof(old_rlim));
+	fput(current->exec_file);
+	current->exec_file = old_exec_file;
+#endif
 out:
 	/* Something went wrong, return the inode and free the argument pages*/
 	allow_write_access(bprm.file);
@@ -1102,6 +1277,130 @@
 	*out_ptr = 0;
 }
 
+int pax_check_flags(unsigned long * flags)
+{
+	int retval = 0;
+
+#if !defined(__i386__) || !defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC)
+	if (*flags & PF_PAX_SEGMEXEC)
+	{
+		*flags &= ~PF_PAX_SEGMEXEC;
+		retval = -EINVAL;
+	}
+#endif
+
+	if ((*flags & PF_PAX_PAGEEXEC)
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	    &&  (*flags & PF_PAX_SEGMEXEC)
+#endif
+
+	   )
+	{
+		*flags &= ~PF_PAX_PAGEEXEC;
+		retval = -EINVAL;
+	}
+
+	if ((*flags & PF_PAX_MPROTECT)
+
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+	    && !(*flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC))
+#endif
+
+	   )
+	{
+		*flags &= ~PF_PAX_MPROTECT;
+		retval = -EINVAL;
+	}
+
+	if ((*flags & PF_PAX_EMUTRAMP)
+
+#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP
+	    && !(*flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC))
+#endif
+
+	   )
+	{
+		*flags &= ~PF_PAX_EMUTRAMP;
+		retval = -EINVAL;
+	}
+
+	if ((*flags & PF_PAX_RANDEXEC)
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+	    && !(*flags & PF_PAX_MPROTECT)
+#endif
+
+	   )
+	{
+		*flags &= ~PF_PAX_RANDEXEC;
+		retval = -EINVAL;
+	}
+
+	return retval;
+}
+
+EXPORT_SYMBOL(pax_check_flags);
+
+#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC)
+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
+{
+	struct task_struct *tsk = current;
+	struct mm_struct *mm = current->mm;
+	char* buffer_exec = (char*)__get_free_page(GFP_ATOMIC);
+	char* buffer_fault = (char*)__get_free_page(GFP_ATOMIC);
+	char* path_exec=NULL;
+	char* path_fault=NULL;
+	unsigned long start=0UL, end=0UL, offset=0UL;
+
+	if (buffer_exec && buffer_fault) {
+		struct vm_area_struct* vma, * vma_exec=NULL, * vma_fault=NULL;
+
+		down_read(&mm->mmap_sem);
+		vma = mm->mmap;
+		while (vma && (!vma_exec || !vma_fault)) {
+			if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
+				vma_exec = vma;
+			if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
+				vma_fault = vma;
+			vma = vma->vm_next;
+		}
+		if (vma_exec) {
+			path_exec = d_path(vma_exec->vm_file->f_dentry, vma_exec->vm_file->f_vfsmnt, buffer_exec, PAGE_SIZE);
+			if (IS_ERR(path_exec))
+				path_exec = "<path too long>";
+		}
+		if (vma_fault) {
+			start = vma_fault->vm_start;
+			end = vma_fault->vm_end;
+			offset = vma_fault->vm_pgoff << PAGE_SHIFT;
+			if (vma_fault->vm_file) {
+				path_fault = d_path(vma_fault->vm_file->f_dentry, vma_fault->vm_file->f_vfsmnt, buffer_fault, PAGE_SIZE);
+				if (IS_ERR(path_fault))
+					path_fault = "<path too long>";
+			} else
+				path_fault = "<anonymous mapping>";
+		}
+		up_read(&mm->mmap_sem);
+	}
+	if (tsk->curr_ip) {
+		printk(KERN_ERR "PAX: From %u.%u.%u.%u: execution attempt in: %s, %08lx-%08lx %08lx\n", NIPQUAD(tsk->curr_ip), path_fault, start, end, offset);
+		printk(KERN_ERR "PAX: From %u.%u.%u.%u: terminating task: %s(%s):%d, uid/euid: %u/%u, "
+				"PC: %p, SP: %p\n", NIPQUAD(tsk->curr_ip), path_exec, tsk->comm, tsk->pid,
+				tsk->uid, tsk->euid, pc, sp);
+	} else {
+		printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
+		printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
+				"PC: %p, SP: %p\n", path_exec, tsk->comm, tsk->pid,
+				tsk->uid, tsk->euid, pc, sp);
+	}
+	if (buffer_exec) free_page((unsigned long)buffer_exec);
+	if (buffer_fault) free_page((unsigned long)buffer_fault);
+	pax_report_insns(pc);
+	do_coredump(SIGKILL, regs);
+}
+#endif
+
 int do_coredump(long signr, struct pt_regs * regs)
 {
 	struct linux_binfmt * binfmt;
@@ -1122,6 +1421,7 @@
 		current->fsuid = 0;
 	}
 	current->mm->dumpable = 0;
+	gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
 	if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
 		goto fail;
 
@@ -1141,7 +1441,7 @@
 		goto close_fail;
 	if (!file->f_op->write)
 		goto close_fail;
-	if (do_truncate(file->f_dentry, 0) != 0)
+	if (do_truncate(file->f_dentry, 0, file->f_vfsmnt) != 0)
 		goto close_fail;
 
 	retval = binfmt->core_dump(signr, regs, file);
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/fat/cache.c linux-2.4.26-leo/fs/fat/cache.c
--- linux-2.4.26/fs/fat/cache.c	2001-10-12 21:48:42.000000000 +0100
+++ linux-2.4.26-leo/fs/fat/cache.c	2004-05-26 23:34:34.000000000 +0100
@@ -14,6 +14,7 @@
 #include <linux/string.h>
 #include <linux/stat.h>
 #include <linux/fat_cvf.h>
+#include <linux/sched.h>
 
 #if 0
 #  define PRINTK(x) printk x
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/fat/inode.c linux-2.4.26-leo/fs/fat/inode.c
--- linux-2.4.26/fs/fat/inode.c	2004-05-19 21:34:40.000000000 +0100
+++ linux-2.4.26-leo/fs/fat/inode.c	2004-05-26 23:34:22.000000000 +0100
@@ -8,6 +8,12 @@
  *  Fixes:
  *
  *  	Max Cohan: Fixed invalid FSINFO offset when info_sector is 0
+ *
+ *  Added:
+ *
+ *      Paul Evans: Added fmode/dmode options for separate file/dir permissions
+ *                  See also include/linux/msdos_fs_sb.h
+ *
  */
 
 #include <linux/module.h>
@@ -75,6 +81,9 @@
 static struct list_head fat_inode_hashtable[FAT_HASH_SIZE];
 spinlock_t fat_inode_lock = SPIN_LOCK_UNLOCKED;
 
+/* mask for inode permission bits - used by fmode/dmode */
+#define MODEMASK (~0777)
+
 void fat_hash_init(void)
 {
 	int i;
@@ -220,6 +229,8 @@
 	opts->fs_uid = current->uid;
 	opts->fs_gid = current->gid;
 	opts->fs_umask = current->fs->umask;
+	opts->fs_fmode = 0666; /* rw-rw-rw- */
+	opts->fs_dmode = 0777; /* rwxrwxrwx */
 	opts->quiet = opts->sys_immutable = opts->dotsOK = opts->showexec = 0;
 	opts->codepage = 0;
 	opts->nocase = 0;
@@ -299,6 +310,20 @@
 				if (*value) ret = 0;
 			}
 		}
+		else if (!strcmp(this_char,"fmode")) {
+			if (!value || !*value) ret = 0;
+			else {
+				opts->fs_fmode = simple_strtoul(value,&value,8);
+				if (*value) ret = 0;
+			}
+		}
+		else if (!strcmp(this_char,"dmode")) {
+			if (!value || !*value) ret = 0;
+			else {
+				opts->fs_dmode = simple_strtoul(value,&value,8);
+				if (*value) ret = 0;
+			}
+		}
 		else if (!strcmp(this_char,"debug")) {
 			if (value) ret = 0;
 			else *debug = 1;
@@ -385,7 +410,7 @@
 	inode->i_gid = sbi->options.fs_gid;
 	inode->i_version = ++event;
 	inode->i_generation = 0;
-	inode->i_mode = (S_IRWXUGO & ~sbi->options.fs_umask) | S_IFDIR;
+	inode->i_mode = (S_IRWXUGO & ~sbi->options.fs_umask & (sbi->options.fs_dmode | MODEMASK)) | S_IFDIR;
 	inode->i_op = sbi->dir_ops;
 	inode->i_fop = &fat_dir_operations;
 	if (sbi->fat_bits == 32) {
@@ -745,9 +770,10 @@
 	if (error || debug) {
 		/* The MSDOS_CAN_BMAP is obsolete, but left just to remember */
 		printk("[MS-DOS FS Rel. 12,FAT %d,check=%c,conv=%c,"
-		       "uid=%d,gid=%d,umask=%03o%s]\n",
+		       "uid=%d,gid=%d,umask=%03o,fmode=%03o,dmode=%03o%s]\n",
 		       sbi->fat_bits,opts.name_check,
 		       opts.conversion,opts.fs_uid,opts.fs_gid,opts.fs_umask,
+		       opts.fs_fmode,opts.fs_dmode,
 		       MSDOS_CAN_BMAP(sbi) ? ",bmap" : "");
 		printk("[me=0x%x,cs=%d,#f=%d,fs=%d,fl=%ld,ds=%ld,de=%d,data=%ld,"
 		       "se=%u,ts=%u,ls=%d,rc=%ld,fc=%u]\n",
@@ -913,7 +939,7 @@
 	if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) {
 		inode->i_generation &= ~1;
 		inode->i_mode = MSDOS_MKMODE(de->attr,S_IRWXUGO &
-		    ~sbi->options.fs_umask) | S_IFDIR;
+		    ~sbi->options.fs_umask & (sbi->options.fs_dmode | MODEMASK)) | S_IFDIR;
 		inode->i_op = sbi->dir_ops;
 		inode->i_fop = &fat_dir_operations;
 
@@ -946,7 +972,7 @@
 		    ((sbi->options.showexec &&
 		       !is_exec(de->ext))
 		    	? S_IRUGO|S_IWUGO : S_IRWXUGO)
-		    & ~sbi->options.fs_umask) | S_IFREG;
+		    & ~sbi->options.fs_umask & (sbi->options.fs_fmode | MODEMASK)) | S_IFREG;
 		MSDOS_I(inode)->i_start = CF_LE_W(de->start);
 		if (sbi->fat_bits == 32)
 			MSDOS_I(inode)->i_start |= (CF_LE_W(de->starthi) << 16);
@@ -1037,6 +1063,7 @@
 	struct super_block *sb = dentry->d_sb;
 	struct inode *inode = dentry->d_inode;
 	int error;
+	unsigned int mode;
 
 	/* FAT cannot truncate to a longer file */
 	if (attr->ia_valid & ATTR_SIZE) {
@@ -1063,12 +1090,17 @@
 	if (error)
 		return error;
 
-	if (S_ISDIR(inode->i_mode))
+	if (S_ISDIR(inode->i_mode)) {
 		inode->i_mode |= S_IXUGO;
+		mode = ~MSDOS_SB(sb)->options.fs_umask & (MSDOS_SB(sb)->options.fs_dmode | MODEMASK);
+		}
+	else
+		mode = ~MSDOS_SB(sb)->options.fs_umask & (MSDOS_SB(sb)->options.fs_fmode | MODEMASK);
+
+	inode->i_mode = ((inode->i_mode & S_IFMT) | 
+			 ((((inode->i_mode & S_IRWXU & mode) | S_IRUSR) >> 6)*S_IXUGO)
+			) & mode;
 
-	inode->i_mode = ((inode->i_mode & S_IFMT) | ((((inode->i_mode & S_IRWXU
-	    & ~MSDOS_SB(sb)->options.fs_umask) | S_IRUSR) >> 6)*S_IXUGO)) &
-	    ~MSDOS_SB(sb)->options.fs_umask;
 	return 0;
 }
 MODULE_LICENSE("GPL");
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/fcntl.c linux-2.4.26-leo/fs/fcntl.c
--- linux-2.4.26/fs/fcntl.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/fs/fcntl.c	2004-06-22 20:53:46.000000000 +0100
@@ -12,6 +12,7 @@
 #include <linux/slab.h>
 #include <linux/iobuf.h>
 #include <linux/ptrace.h>
+#include <linux/grsecurity.h>
 
 #include <asm/poll.h>
 #include <asm/siginfo.h>
@@ -65,6 +66,8 @@
 	int error;
 	int start;
 
+	gr_learn_resource(current, RLIMIT_NOFILE, orig_start, 0);
+
 	write_lock(&files->file_lock);
 	
 	error = -EINVAL;
@@ -87,6 +90,7 @@
 	}
 	
 	error = -EMFILE;
+	gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
 	if (newfd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
 		goto out;
 
@@ -142,6 +146,8 @@
 	struct file * file, *tofree;
 	struct files_struct * files = current->files;
 
+	gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
+
 	write_lock(&files->file_lock);
 	if (!(file = fcheck(oldfd)))
 		goto out_unlock;
@@ -450,6 +456,10 @@
 			match = -p->pgrp;
 		if (pid != match)
 			continue;
+		if (gr_check_protected_task(p))
+			continue;
+		if (gr_pid_is_chrooted(p))
+			continue;
 		send_sigio_to_task(p, fown, fd, band);
 	}
 out:
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/jbd/journal.c linux-2.4.26-leo/fs/jbd/journal.c
--- linux-2.4.26/fs/jbd/journal.c	2004-05-19 21:34:40.000000000 +0100
+++ linux-2.4.26-leo/fs/jbd/journal.c	2004-05-26 23:34:19.000000000 +0100
@@ -1804,9 +1804,9 @@
 
 		if (buffer_jbd(bh)) {
 			/* Someone did it for us! */
-			J_ASSERT_BH(bh, bh->b_private != NULL);
+ 			J_ASSERT_BH(bh, bh->b_journal_head != NULL);
 			journal_free_journal_head(jh);
-			jh = bh->b_private;
+ 			jh = bh->b_journal_head;
 		} else {
 			/*
 			 * We actually don't need jh_splice_lock when
@@ -1814,7 +1814,7 @@
 			 */
 			spin_lock(&jh_splice_lock);
 			set_bit(BH_JBD, &bh->b_state);
-			bh->b_private = jh;
+			bh->b_journal_head = jh;
 			jh->b_bh = bh;
 			atomic_inc(&bh->b_count);
 			spin_unlock(&jh_splice_lock);
@@ -1823,7 +1823,7 @@
 	}
 	jh->b_jcount++;
 	spin_unlock(&journal_datalist_lock);
-	return bh->b_private;
+	return bh->b_journal_head;
 }
 
 /*
@@ -1856,7 +1856,7 @@
 			J_ASSERT_BH(bh, jh2bh(jh) == bh);
 			BUFFER_TRACE(bh, "remove journal_head");
 			spin_lock(&jh_splice_lock);
-			bh->b_private = NULL;
+			bh->b_journal_head = NULL;
 			jh->b_bh = NULL;	/* debug, really */
 			clear_bit(BH_JBD, &bh->b_state);
 			__brelse(bh);
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/Makefile linux-2.4.26-leo/fs/Makefile
--- linux-2.4.26/fs/Makefile	2004-02-20 14:11:44.000000000 +0000
+++ linux-2.4.26-leo/fs/Makefile	2004-06-22 20:53:46.000000000 +0100
@@ -7,7 +7,7 @@
 
 O_TARGET := fs.o
 
-export-objs :=	filesystems.o open.o dcache.o buffer.o dquot.o
+export-objs :=	filesystems.o open.o dcache.o buffer.o dquot.o exec.o
 mod-subdirs :=	nls
 
 obj-y :=	open.o read_write.o devices.o file_table.o buffer.o \
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/namei.c linux-2.4.26-leo/fs/namei.c
--- linux-2.4.26/fs/namei.c	2003-08-25 12:44:43.000000000 +0100
+++ linux-2.4.26-leo/fs/namei.c	2004-06-22 20:53:46.000000000 +0100
@@ -22,6 +22,7 @@
 #include <linux/dnotify.h>
 #include <linux/smp_lock.h>
 #include <linux/personality.h>
+#include <linux/grsecurity.h>
 
 #include <asm/namei.h>
 #include <asm/uaccess.h>
@@ -343,6 +344,13 @@
 		current->state = TASK_RUNNING;
 		schedule();
 	}
+
+	if (gr_handle_follow_link(dentry->d_parent->d_inode,
+				  dentry->d_inode, dentry, nd->mnt)) {
+		path_release(nd);
+		return -EACCES;
+	}
+
 	current->link_count++;
 	current->total_link_count++;
 	UPDATE_ATIME(dentry->d_inode);
@@ -643,6 +651,10 @@
 			}
 		}
 return_base:
+		if (!gr_acl_handle_hidden_file(nd->dentry, nd->mnt)) {
+			path_release(nd);
+			return -ENOENT;
+		}
 		return 0;
 out_dput:
 		dput(dentry);
@@ -1005,7 +1017,7 @@
 	struct dentry *dentry;
 	struct dentry *dir;
 	int count = 0;
-
+	
 	acc_mode = ACC_MODE(flag);
 
 	/*
@@ -1015,7 +1027,19 @@
 		error = path_lookup(pathname, lookup_flags(flag), nd);
 		if (error)
 			return error;
+
+		if (gr_handle_rawio(nd->dentry->d_inode)) {
+			error = -EPERM;
+			goto exit;
+		}
+	
+		if (!gr_acl_handle_open(nd->dentry, nd->mnt, flag)) {
+			error = -EACCES;
+			goto exit;
+		}
+
 		dentry = nd->dentry;
+
 		goto ok;
 	}
 
@@ -1048,8 +1072,22 @@
 
 	/* Negative dentry, just create the file */
 	if (!dentry->d_inode) {
+		if (gr_handle_chroot_chmod(dentry, nd->mnt, mode)) {
+			error = -EACCES;
+			up(&dir->d_inode->i_sem);
+			goto exit_dput;
+		}
+		if (!gr_acl_handle_creat(dentry, nd->dentry, nd->mnt, flag, mode)) {
+			error = -EACCES;
+			up(&dir->d_inode->i_sem);
+			goto exit_dput;
+		}
+
 		error = vfs_create(dir->d_inode, dentry,
 				   mode & ~current->fs->umask);
+		if (!error)
+			gr_handle_create(dentry, nd->mnt);
+
 		up(&dir->d_inode->i_sem);
 		dput(nd->dentry);
 		nd->dentry = dentry;
@@ -1058,12 +1096,34 @@
 		/* Don't check for write permission, don't truncate */
 		acc_mode = 0;
 		flag &= ~O_TRUNC;
+
 		goto ok;
 	}
 
 	/*
 	 * It already exists.
 	 */
+
+	if (gr_handle_rawio(dentry->d_inode)) {
+		error = -EPERM;
+		up(&dir->d_inode->i_sem);
+		goto exit_dput;
+	}
+
+	if (!gr_acl_handle_open(dentry, nd->mnt, flag)) {
+		error = -EACCES;
+		up(&dir->d_inode->i_sem);
+		goto exit_dput;
+	}
+
+	inode = dentry->d_inode;
+
+	if (gr_handle_fifo(dentry, nd->mnt, dir, flag, acc_mode)) {
+		up(&dir->d_inode->i_sem);
+		error = -EACCES;
+		goto exit_dput;
+	}
+
 	up(&dir->d_inode->i_sem);
 
 	error = -EEXIST;
@@ -1153,7 +1213,7 @@
 		if (!error) {
 			DQUOT_INIT(inode);
 			
-			error = do_truncate(dentry, 0);
+			error = do_truncate(dentry,0,nd->mnt);
 		}
 		put_write_access(inode);
 		if (error)
@@ -1184,6 +1244,13 @@
 	 * stored in nd->last.name and we will have to putname() it when we
 	 * are done. Procfs-like symlinks just set LAST_BIND.
 	 */
+
+	if (gr_handle_follow_link(dentry->d_parent->d_inode, dentry->d_inode,
+				  dentry, nd->mnt)) {
+		error = -EACCES;
+		goto exit_dput;
+	}
+
 	UPDATE_ATIME(dentry->d_inode);
 	error = dentry->d_inode->i_op->follow_link(dentry, nd);
 	dput(dentry);
@@ -1282,6 +1349,19 @@
 
 	mode &= ~current->fs->umask;
 	if (!IS_ERR(dentry)) {
+		if (gr_handle_chroot_mknod(dentry, nd.mnt, mode) ||
+		    gr_handle_chroot_chmod(dentry, nd.mnt, mode)) {
+			error = -EPERM;
+			dput(dentry);
+			goto out_dput;
+		}
+
+		if (!gr_acl_handle_mknod(dentry, nd.dentry, nd.mnt, mode)) {
+			error = -EACCES;
+			dput(dentry);
+			goto out_dput;
+		}
+	
 		switch (mode & S_IFMT) {
 		case 0: case S_IFREG:
 			error = vfs_create(nd.dentry->d_inode,dentry,mode);
@@ -1295,8 +1375,13 @@
 		default:
 			error = -EINVAL;
 		}
+
+		if(!error)
+			gr_handle_create(dentry, nd.mnt);
+
 		dput(dentry);
 	}
+out_dput:
 	up(&nd.dentry->d_inode->i_sem);
 	path_release(&nd);
 out:
@@ -1348,8 +1433,17 @@
 		dentry = lookup_create(&nd, 1);
 		error = PTR_ERR(dentry);
 		if (!IS_ERR(dentry)) {
-			error = vfs_mkdir(nd.dentry->d_inode, dentry,
+			error = 0;
+
+			if (!gr_acl_handle_mkdir(dentry, nd.dentry, nd.mnt))
+				error = -EACCES;
+
+			if(!error)
+				error = vfs_mkdir(nd.dentry->d_inode, dentry,
 					  mode & ~current->fs->umask);
+			if(!error)
+				gr_handle_create(dentry, nd.mnt);
+			
 			dput(dentry);
 		}
 		up(&nd.dentry->d_inode->i_sem);
@@ -1433,6 +1527,8 @@
 	char * name;
 	struct dentry *dentry;
 	struct nameidata nd;
+	ino_t saved_ino = 0;
+	kdev_t saved_dev = 0;
 
 	name = getname(pathname);
 	if(IS_ERR(name))
@@ -1457,7 +1553,22 @@
 	dentry = lookup_hash(&nd.last, nd.dentry);
 	error = PTR_ERR(dentry);
 	if (!IS_ERR(dentry)) {
-		error = vfs_rmdir(nd.dentry->d_inode, dentry);
+		error = 0;
+		if (dentry->d_inode) {
+			if (dentry->d_inode->i_nlink <= 1) {
+				saved_ino = dentry->d_inode->i_ino;
+				saved_dev = dentry->d_inode->i_dev;
+			}
+
+			if (!gr_acl_handle_rmdir(dentry, nd.mnt))
+				error = -EACCES;
+		}
+
+		if (!error)
+			error = vfs_rmdir(nd.dentry->d_inode, dentry);
+		if (!error && (saved_dev || saved_ino))
+			gr_handle_delete(saved_ino,saved_dev);
+
 		dput(dentry);
 	}
 	up(&nd.dentry->d_inode->i_sem);
@@ -1501,6 +1612,8 @@
 	char * name;
 	struct dentry *dentry;
 	struct nameidata nd;
+	ino_t saved_ino = 0;
+	kdev_t saved_dev = 0;
 
 	name = getname(pathname);
 	if(IS_ERR(name))
@@ -1519,7 +1632,21 @@
 		/* Why not before? Because we want correct error value */
 		if (nd.last.name[nd.last.len])
 			goto slashes;
-		error = vfs_unlink(nd.dentry->d_inode, dentry);
+		error = 0;
+		if (dentry->d_inode) {
+			if (dentry->d_inode->i_nlink <= 1) {
+				saved_ino = dentry->d_inode->i_ino;
+				saved_dev = dentry->d_inode->i_dev;
+			}
+
+			if (!gr_acl_handle_unlink(dentry, nd.mnt))
+				error = -EACCES;
+		}
+
+		if (!error)
+			error = vfs_unlink(nd.dentry->d_inode, dentry);
+		if (!error && (saved_ino || saved_dev))
+			gr_handle_delete(saved_ino,saved_dev);
 	exit2:
 		dput(dentry);
 	}
@@ -1583,7 +1710,15 @@
 		dentry = lookup_create(&nd, 0);
 		error = PTR_ERR(dentry);
 		if (!IS_ERR(dentry)) {
-			error = vfs_symlink(nd.dentry->d_inode, dentry, from);
+			error = 0;
+
+			if (!gr_acl_handle_symlink(dentry, nd.dentry, nd.mnt, from))
+				error = -EACCES;
+
+			if(!error)	
+				error = vfs_symlink(nd.dentry->d_inode, dentry, from);
+			if (!error)
+				gr_handle_create(dentry, nd.mnt);
 			dput(dentry);
 		}
 		up(&nd.dentry->d_inode->i_sem);
@@ -1667,7 +1802,27 @@
 		new_dentry = lookup_create(&nd, 0);
 		error = PTR_ERR(new_dentry);
 		if (!IS_ERR(new_dentry)) {
-			error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry);
+			error = 0;
+
+			if (gr_handle_hardlink(old_nd.dentry, old_nd.mnt,
+					       old_nd.dentry->d_inode,
+					       old_nd.dentry->d_inode->i_mode, to)) {
+				error = -EPERM;
+				goto out_error;
+			}
+
+			if (!gr_acl_handle_link(new_dentry, nd.dentry, nd.mnt,
+						 old_nd.dentry, old_nd.mnt, to)) {
+				error = -EACCES;
+				goto out_error;
+			}
+
+			error = vfs_link(old_nd.dentry, 
+					nd.dentry->d_inode, new_dentry);
+
+			if (!error)
+				gr_handle_create(new_dentry, nd.mnt);
+out_error:
 			dput(new_dentry);
 		}
 		up(&nd.dentry->d_inode->i_sem);
@@ -1898,10 +2053,15 @@
 	if (IS_ERR(new_dentry))
 		goto exit4;
 
-	lock_kernel();
-	error = vfs_rename(old_dir->d_inode, old_dentry,
+	error = gr_acl_handle_rename(new_dentry, newnd.dentry, newnd.mnt,
+				     old_dentry, old_dir->d_inode, oldnd.mnt, newname);
+
+	if (error == 1) {
+		lock_kernel();
+		error = vfs_rename(old_dir->d_inode, old_dentry,
 				   new_dir->d_inode, new_dentry);
-	unlock_kernel();
+		unlock_kernel();
+	}
 
 	dput(new_dentry);
 exit4:
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/namespace.c linux-2.4.26-leo/fs/namespace.c
--- linux-2.4.26/fs/namespace.c	2004-02-20 14:11:44.000000000 +0000
+++ linux-2.4.26-leo/fs/namespace.c	2004-06-22 20:53:47.000000000 +0100
@@ -15,6 +15,7 @@
 #include <linux/quotaops.h>
 #include <linux/acct.h>
 #include <linux/module.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 
@@ -325,6 +326,8 @@
 			lock_kernel();
 			retval = do_remount_sb(sb, MS_RDONLY, 0);
 			unlock_kernel();
+
+			gr_log_remount(mnt->mnt_devname, retval);
 		}
 		up_write(&sb->s_umount);
 		return retval;
@@ -350,6 +353,9 @@
 	}
 	spin_unlock(&dcache_lock);
 	up_write(&current->namespace->sem);
+
+	gr_log_unmount(mnt->mnt_devname, retval);
+
 	return retval;
 }
 
@@ -732,6 +738,12 @@
 	if (retval)
 		return retval;
 
+	if (gr_handle_chroot_mount(nd.dentry, nd.mnt, dev_name)) {
+		retval = -EPERM;
+		path_release(&nd);
+		return retval;
+	}
+
 	if (flags & MS_REMOUNT)
 		retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
 				    data_page);
@@ -743,6 +755,9 @@
 		retval = do_add_mount(&nd, type_page, flags, mnt_flags,
 				      dev_name, data_page);
 	path_release(&nd);
+
+	gr_log_mount(dev_name, dir_name, retval);
+
 	return retval;
 }
 
@@ -912,6 +927,9 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
+	if (gr_handle_chroot_pivot())
+		return -EPERM;
+
 	lock_kernel();
 
 	error = __user_walk(new_root, LOOKUP_POSITIVE|LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd);
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/nfsd/nfssvc.c linux-2.4.26-leo/fs/nfsd/nfssvc.c
--- linux-2.4.26/fs/nfsd/nfssvc.c	2002-11-28 23:53:15.000000000 +0000
+++ linux-2.4.26-leo/fs/nfsd/nfssvc.c	2004-05-26 23:34:34.000000000 +0100
@@ -250,6 +250,7 @@
 	svc_exit_thread(rqstp);
 
 	/* Release module */
+	unlock_kernel();
 	MOD_DEC_USE_COUNT;
 }
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/nls/nls_base.c linux-2.4.26-leo/fs/nls/nls_base.c
--- linux-2.4.26/fs/nls/nls_base.c	2002-08-03 01:39:45.000000000 +0100
+++ linux-2.4.26-leo/fs/nls/nls_base.c	2004-05-26 23:34:34.000000000 +0100
@@ -18,6 +18,7 @@
 #ifdef CONFIG_KMOD
 #include <linux/kmod.h>
 #endif
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 
 static struct nls_table *tables;
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/open.c linux-2.4.26-leo/fs/open.c
--- linux-2.4.26/fs/open.c	2004-02-20 14:11:44.000000000 +0000
+++ linux-2.4.26-leo/fs/open.c	2004-06-22 20:53:47.000000000 +0100
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/tty.h>
 #include <linux/iobuf.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 
@@ -95,7 +96,7 @@
 	write_unlock(&files->file_lock);
 }
 
-int do_truncate(struct dentry *dentry, loff_t length)
+int do_truncate(struct dentry *dentry, loff_t length, struct vfsmount *mnt)
 {
 	struct inode *inode = dentry->d_inode;
 	int error;
@@ -105,6 +106,9 @@
 	if (length < 0)
 		return -EINVAL;
 
+	if (!gr_acl_handle_truncate(dentry, mnt))
+		return -EACCES;
+
 	down_write(&inode->i_alloc_sem);
 	down(&inode->i_sem);
 	newattrs.ia_size = length;
@@ -165,7 +169,7 @@
 	error = locks_verify_truncate(inode, NULL, length);
 	if (!error) {
 		DQUOT_INIT(inode);
-		error = do_truncate(nd.dentry, length);
+		error = do_truncate(nd.dentry, length, nd.mnt);
 	}
 	put_write_access(inode);
 
@@ -217,7 +221,7 @@
 
 	error = locks_verify_truncate(inode, file, length);
 	if (!error)
-		error = do_truncate(dentry, length);
+		error = do_truncate(dentry, length, file->f_vfsmnt);
 out_putf:
 	fput(file);
 out:
@@ -292,6 +296,12 @@
 		    (error = permission(inode,MAY_WRITE)) != 0)
 			goto dput_and_out;
 	}
+
+	if (!gr_acl_handle_utime(nd.dentry, nd.mnt)) {
+		error = -EACCES;
+		goto dput_and_out;
+	}
+
 	error = notify_change(nd.dentry, &newattrs);
 dput_and_out:
 	path_release(&nd);
@@ -344,6 +354,12 @@
 		    (error = permission(inode,MAY_WRITE)) != 0)
 			goto dput_and_out;
 	}
+
+	if (!gr_acl_handle_utime(nd.dentry, nd.mnt)) {
+		error = -EACCES;
+		goto dput_and_out;
+	}
+
 	error = notify_change(nd.dentry, &newattrs);
 dput_and_out:
 	path_release(&nd);
@@ -386,6 +402,10 @@
 		if(!res && (mode & S_IWOTH) && IS_RDONLY(nd.dentry->d_inode)
 		   && !special_file(nd.dentry->d_inode->i_mode))
 			res = -EROFS;
+		
+		if (!res && !gr_acl_handle_access(nd.dentry, nd.mnt, mode))
+			res =  -EACCES;
+
 		path_release(&nd);
 	}
 
@@ -409,6 +429,8 @@
 	if (error)
 		goto dput_and_out;
 
+	gr_log_chdir(nd.dentry, nd.mnt);
+
 	set_fs_pwd(current->fs, nd.mnt, nd.dentry);
 
 dput_and_out:
@@ -439,6 +461,13 @@
 		goto out_putf;
 
 	error = permission(inode, MAY_EXEC);
+
+	if (!error && !gr_chroot_fchdir(dentry, mnt))
+		error = -EPERM;
+
+	if (!error)
+		gr_log_chdir(dentry, mnt);
+
 	if (!error)
 		set_fs_pwd(current->fs, mnt, dentry);
 out_putf:
@@ -465,8 +494,16 @@
 	if (!capable(CAP_SYS_CHROOT))
 		goto dput_and_out;
 
+	if (gr_handle_chroot_chroot(nd.dentry, nd.mnt))
+		goto dput_and_out;
+
 	set_fs_root(current->fs, nd.mnt, nd.dentry);
 	set_fs_altroot();
+
+	gr_handle_chroot_caps(current);
+
+	gr_handle_chroot_chdir(nd.dentry, nd.mnt);
+
 	error = 0;
 dput_and_out:
 	path_release(&nd);
@@ -495,8 +532,20 @@
 	err = -EPERM;
 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
 		goto out_putf;
+
+	if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
+		err = -EACCES;
+		goto out_putf;
+	}
+
 	if (mode == (mode_t) -1)
 		mode = inode->i_mode;
+
+	if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
+		err = -EPERM;
+		goto out_putf;
+	}	    
+
 	newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
 	newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
 	err = notify_change(dentry, &newattrs);
@@ -527,8 +576,19 @@
 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
 		goto dput_and_out;
 
+	if (!gr_acl_handle_chmod(nd.dentry, nd.mnt, mode)) {
+		error = -EACCES;
+		goto dput_and_out;
+	}
+
 	if (mode == (mode_t) -1)
 		mode = inode->i_mode;
+
+	if (gr_handle_chroot_chmod(nd.dentry, nd.mnt, mode)) {
+		error = -EACCES;
+		goto dput_and_out;
+	}
+
 	newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
 	newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
 	error = notify_change(nd.dentry, &newattrs);
@@ -539,7 +599,7 @@
 	return error;
 }
 
-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
 {
 	struct inode * inode;
 	int error;
@@ -556,6 +616,12 @@
 	error = -EPERM;
 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
 		goto out;
+
+	if (!gr_acl_handle_chown(dentry, mnt)) {
+		error = -EACCES;
+		goto out;
+	}
+
 	if (user == (uid_t) -1)
 		user = inode->i_uid;
 	if (group == (gid_t) -1)
@@ -606,7 +672,7 @@
 
 	error = user_path_walk(filename, &nd);
 	if (!error) {
-		error = chown_common(nd.dentry, user, group);
+		error = chown_common(nd.dentry, user, group, nd.mnt);
 		path_release(&nd);
 	}
 	return error;
@@ -619,7 +685,7 @@
 
 	error = user_path_walk_link(filename, &nd);
 	if (!error) {
-		error = chown_common(nd.dentry, user, group);
+		error = chown_common(nd.dentry, user, group, nd.mnt);
 		path_release(&nd);
 	}
 	return error;
@@ -633,7 +699,8 @@
 
 	file = fget(fd);
 	if (file) {
-		error = chown_common(file->f_dentry, user, group);
+		error = chown_common(file->f_dentry, user,
+				group, file->f_vfsmnt);
 		fput(file);
 	}
 	return error;
@@ -753,6 +820,7 @@
 	 * N.B. For clone tasks sharing a files structure, this test
 	 * will limit the total number of files that can be opened.
 	 */
+	gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
 	if (fd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
 		goto out;
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/proc/array.c linux-2.4.26-leo/fs/proc/array.c
--- linux-2.4.26/fs/proc/array.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/fs/proc/array.c	2004-06-22 20:53:47.000000000 +0100
@@ -273,6 +273,18 @@
 			    cap_t(p->cap_effective));
 }
 
+#if defined(CONFIG_GRKERNSEC_PAX_NOEXEC) || defined(CONFIG_GRKERNSEC_PAX_ASLR)
+static inline char *task_pax(struct task_struct *p, char *buffer)
+{
+	return buffer + sprintf(buffer, "PaX:\t%c%c%c%c%c%c\n",
+				p->flags & PF_PAX_PAGEEXEC ? 'P' : 'p',
+				p->flags & PF_PAX_EMUTRAMP ? 'E' : 'e',
+				p->flags & PF_PAX_MPROTECT ? 'M' : 'm',
+				p->flags & PF_PAX_RANDMMAP ? 'R' : 'r',
+				p->flags & PF_PAX_RANDEXEC ? 'X' : 'x',
+				p->flags & PF_PAX_SEGMEXEC ? 'S' : 's');
+}
+#endif
 
 int proc_pid_status(struct task_struct *task, char * buffer)
 {
@@ -295,9 +307,20 @@
 #if defined(CONFIG_ARCH_S390)
 	buffer = task_show_regs(task, buffer);
 #endif
+
+#if defined(CONFIG_GRKERNSEC_PAX_NOEXEC) || defined(CONFIG_GRKERNSEC_PAX_ASLR)
+	buffer = task_pax(task, buffer);
+#endif
+
 	return buffer - orig;
 }
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+#define PAX_RAND_FLAGS (task->flags & PF_PAX_RANDMMAP || \
+		        task->flags & PF_PAX_SEGMEXEC || \
+			task->flags & PF_PAX_RANDEXEC)
+#endif
+
 int proc_pid_stat(struct task_struct *task, char * buffer)
 {
 	unsigned long vsize, eip, esp, wchan;
@@ -335,6 +358,19 @@
 
 	wchan = get_wchan(task);
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	if (PAX_RAND_FLAGS) {
+		eip = 0;
+		esp = 0;
+		wchan = 0;
+	}
+#endif
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+	wchan = 0;
+	eip = 0;
+	esp = 0;
+#endif
+
 	collect_sigign_sigcatch(task, &sigign, &sigcatch);
 
 	/* scale priority and nice values from timeslices to -20..20 */
@@ -374,9 +410,15 @@
 		vsize,
 		mm ? mm->rss : 0, /* you might want to shift this left 3 */
 		task->rlim[RLIMIT_RSS].rlim_cur,
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+		PAX_RAND_FLAGS ? 0 : (mm ? mm->start_code : 0),
+		PAX_RAND_FLAGS ? 0 : (mm ? mm->end_code : 0),
+		PAX_RAND_FLAGS ? 0 : (mm ? mm->start_stack : 0),
+#else
 		mm ? mm->start_code : 0,
 		mm ? mm->end_code : 0,
 		mm ? mm->start_stack : 0,
+#endif
 		esp,
 		eip,
 		/* The signal information here is obsolete.
@@ -514,6 +556,7 @@
 
 static int show_map(struct seq_file *m, void *v)
 {
+	struct task_struct *task = m->private;
 	struct vm_area_struct *map = v;
 	struct file *file = map->vm_file;
 	int flags = map->vm_flags;
@@ -528,8 +571,13 @@
 	}
 
 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+			PAX_RAND_FLAGS ? 0UL : map->vm_start,
+			PAX_RAND_FLAGS ? 0UL : map->vm_end,
+#else
 			map->vm_start,
 			map->vm_end,
+#endif
 			flags & VM_READ ? 'r' : '-',
 			flags & VM_WRITE ? 'w' : '-',
 			flags & VM_EXEC ? 'x' : '-',
@@ -602,6 +650,16 @@
 	.show	= show_map
 };
 
+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR	 
+int proc_pid_ipaddr(struct task_struct *task, char * buffer)	 
+{	 
+	int len;	 
+
+	len = sprintf(buffer, "%u.%u.%u.%u\n", NIPQUAD(task->curr_ip));	 
+	return len;	 
+}	 
+#endif
+
 #ifdef CONFIG_SMP
 int proc_pid_cpu(struct task_struct *task, char * buffer)
 {
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/proc/base.c linux-2.4.26-leo/fs/proc/base.c
--- linux-2.4.26/fs/proc/base.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/fs/proc/base.c	2004-06-22 20:53:47.000000000 +0100
@@ -25,6 +25,7 @@
 #include <linux/string.h>
 #include <linux/seq_file.h>
 #include <linux/namespace.h>
+#include <linux/grsecurity.h>
 
 /*
  * For hysterical raisins we keep the same inumbers as in the old procfs.
@@ -40,6 +41,9 @@
 int proc_pid_status(struct task_struct*,char*);
 int proc_pid_statm(struct task_struct*,char*);
 int proc_pid_cpu(struct task_struct*,char*);
+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
+int proc_pid_ipaddr(struct task_struct*,char*);
+#endif
 
 static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
 {
@@ -126,7 +130,8 @@
 #define MAY_PTRACE(task) \
 	(task == current || \
 	(task->p_pptr == current && \
-	(task->ptrace & PT_PTRACED) && task->state == TASK_STOPPED))
+	(task->ptrace & PT_PTRACED) && task->state == TASK_STOPPED && \
+	!gr_handle_proc_ptrace(task)))
 
 static int may_ptrace_attach(struct task_struct *task)
 {
@@ -145,6 +150,8 @@
 	rmb();
 	if (!is_dumpable(task) && !capable(CAP_SYS_PTRACE))
 		goto out;
+	if (gr_handle_proc_ptrace(task))
+		goto out;
 
 	retval = 1;
 
@@ -263,9 +270,22 @@
 
 static int proc_permission(struct inode *inode, int mask)
 {
+	int ret;
+	struct task_struct *task;
+
 	if (vfs_permission(inode, mask) != 0)
 		return -EACCES;
-	return proc_check_root(inode);
+	ret = proc_check_root(inode);
+
+	if (ret)
+		return ret;
+
+	task = inode->u.proc_i.task;
+
+	if (!task)
+		return 0;
+
+	return gr_acl_handle_procpidmem(task);
 }
 
 extern struct seq_operations proc_pid_maps_op;
@@ -598,6 +618,9 @@
 	PROC_PID_STATM,
 	PROC_PID_MAPS,
 	PROC_PID_CPU,
+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
+	PROC_PID_IPADDR,
+#endif
 	PROC_PID_MOUNTS,
 	PROC_PID_FD_DIR = 0x8000,	/* 0x8000-0xffff */
 };
@@ -613,6 +636,9 @@
 #ifdef CONFIG_SMP
   E(PROC_PID_CPU,	"cpu",		S_IFREG|S_IRUGO),
 #endif
+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
+  E(PROC_PID_IPADDR,	"ipaddr",	S_IFREG|S_IRUSR),
+#endif
   E(PROC_PID_MAPS,	"maps",		S_IFREG|S_IRUGO),
   E(PROC_PID_MEM,	"mem",		S_IFREG|S_IRUSR|S_IWUSR),
   E(PROC_PID_CWD,	"cwd",		S_IFLNK|S_IRWXUGO),
@@ -769,10 +795,17 @@
 	get_task_struct(task);
 	inode->u.proc_i.task = task;
 	inode->i_uid = 0;
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+	inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
+#else
 	inode->i_gid = 0;
+#endif
+
 	if (ino == PROC_PID_INO || task_dumpable(task)) {
 		inode->i_uid = task->euid;
+#ifndef CONFIG_GRKERNSEC_PROC_USERGROUP
 		inode->i_gid = task->egid;
+#endif
 	}
 
 out:
@@ -980,6 +1013,12 @@
 			inode->u.proc_i.op.proc_read = proc_pid_cpu;
 			break;
 #endif
+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
+		case PROC_PID_IPADDR:
+			inode->i_fop = &proc_info_file_operations;
+			inode->u.proc_i.op.proc_read = proc_pid_ipaddr;
+			break;
+#endif
 		case PROC_PID_MEM:
 			inode->i_op = &proc_mem_inode_operations;
 			inode->i_fop = &proc_mem_operations;
@@ -1078,13 +1117,35 @@
 	if (!task)
 		goto out;
 
+	if(gr_check_hidden_task(task)) {
+		free_task_struct(task);
+		goto out;
+	}
+	
+#ifdef CONFIG_GRKERNSEC_PROC
+	if (current->uid && (task->uid != current->uid)
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+	    && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
+#endif
+	) {
+		free_task_struct(task);
+		goto out;
+	}	
+#endif
 	inode = proc_pid_make_inode(dir->i_sb, task, PROC_PID_INO);
 
 	free_task_struct(task);
 
 	if (!inode)
 		goto out;
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+	inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
+#elif CONFIG_GRKERNSEC_PROC_USERGROUP
+	inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP;
+	inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
+#else
 	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
+#endif
 	inode->i_op = &proc_base_inode_operations;
 	inode->i_fop = &proc_base_operations;
 	inode->i_nlink = 3;
@@ -1124,6 +1185,18 @@
 		int pid = p->pid;
 		if (!pid)
 			continue;
+		if(gr_pid_is_chrooted(p))
+			continue;
+		if(gr_check_hidden_task(p)) 
+			continue;
+#ifdef CONFIG_GRKERNSEC_PROC
+		if (current->uid && (p->uid != current->uid)
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+	    		&& !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
+#endif
+		)
+			continue;	
+#endif
 		if (--index >= 0)
 			continue;
 		pids[nr_pids] = pid;
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/proc/generic.c linux-2.4.26-leo/fs/proc/generic.c
--- linux-2.4.26/fs/proc/generic.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/fs/proc/generic.c	2004-06-22 20:53:47.000000000 +0100
@@ -504,6 +504,32 @@
 	return ent;
 }
 
+#ifdef CONFIG_GRKERNSEC_PROC
+struct proc_dir_entry *proc_priv_mkdir(const char *name, struct proc_dir_entry *parent)
+{
+	struct proc_dir_entry *ent;
+	mode_t mode = 0;
+
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+	mode = S_IFDIR | S_IRUSR | S_IXUSR;
+#elif CONFIG_GRKERNSEC_PROC_USERGROUP
+	mode = S_IFDIR | S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP;
+#endif
+
+	ent = proc_create(&parent, name, mode, 2);
+	if (ent) {
+		ent->proc_fops = &proc_dir_operations;
+		ent->proc_iops = &proc_dir_inode_operations;
+
+		if (proc_register(parent, ent) < 0) {
+			kfree(ent);
+			ent = NULL;
+		}
+	}
+	return ent;
+}
+#endif
+
 struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
 					 struct proc_dir_entry *parent)
 {
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/proc/inode.c linux-2.4.26-leo/fs/proc/inode.c
--- linux-2.4.26/fs/proc/inode.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/fs/proc/inode.c	2004-06-22 20:53:47.000000000 +0100
@@ -152,7 +152,11 @@
 		if (de->mode) {
 			inode->i_mode = de->mode;
 			inode->i_uid = de->uid;
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+			inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
+#else
 			inode->i_gid = de->gid;
+#endif			
 		}
 		if (de->size)
 			inode->i_size = de->size;
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/proc/proc_misc.c linux-2.4.26-leo/fs/proc/proc_misc.c
--- linux-2.4.26/fs/proc/proc_misc.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/fs/proc/proc_misc.c	2004-06-22 20:53:47.000000000 +0100
@@ -590,6 +590,7 @@
 void __init proc_misc_init(void)
 {
 	struct proc_dir_entry *entry;
+	int gr_mode = 0;
 	static struct {
 		char *name;
 		int (*read_proc)(char*,char**,off_t,int,int*,void*);
@@ -604,17 +605,21 @@
 #ifdef CONFIG_STRAM_PROC
 		{"stram",	stram_read_proc},
 #endif
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) && !defined(CONFIG_GRKERNSEC_PROC)
 		{"modules",	modules_read_proc},
 #endif
 		{"stat",	kstat_read_proc},
+#ifndef CONFIG_GRKERNSEC_PROC_ADD
 		{"devices",	devices_read_proc},
-#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_X86)
+#endif
+#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_X86) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 		{"interrupts",	interrupts_read_proc},
 #endif
 		{"filesystems",	filesystems_read_proc},
+#ifndef CONFIG_GRKERNSEC_PROC_ADD
 		{"dma",		dma_read_proc},
 		{"cmdline",	cmdline_read_proc},
+#endif
 #ifdef CONFIG_SGI_DS1286
 		{"rtc",		ds1286_read_proc},
 #endif
@@ -626,29 +631,60 @@
 	for (p = simple_ones; p->name; p++)
 		create_proc_read_entry(p->name, 0, NULL, p->read_proc, NULL);
 
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+	gr_mode = S_IRUSR;
+#elif CONFIG_GRKERNSEC_PROC_USERGROUP
+	gr_mode = S_IRUSR | S_IRGRP;
+#endif
+#if defined(CONFIG_GRKERNSEC_PROC) && defined(CONFIG_MODULES)
+	create_proc_read_entry("modules", gr_mode, NULL, &modules_read_proc, NULL);
+#endif
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+	create_proc_read_entry("devices", gr_mode, NULL, &devices_read_proc, NULL);
+	create_proc_read_entry("dma", gr_mode, NULL, &dma_read_proc, NULL);
+	create_proc_read_entry("cmdline", gr_mode, NULL, &cmdline_read_proc, NULL);
+#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_X86)
+	create_proc_read_entry("interrupts", gr_mode, NULL, &interrupts_read_proc, NULL);
+#endif
+#endif
+
 	proc_symlink("mounts", NULL, "self/mounts");
 
 	/* And now for trickier ones */
 	entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
 	if (entry)
 		entry->proc_fops = &proc_kmsg_operations;
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+	create_seq_entry("cpuinfo", gr_mode, &proc_cpuinfo_operations);
+#else
 	create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
-#if defined(CONFIG_X86)
+#endif
+#if defined(CONFIG_X86) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 	create_seq_entry("interrupts", 0, &proc_interrupts_operations);
+#elif defined(CONFIG_X86)
+	create_seq_entry("interrupts", gr_mode, &proc_interrupts_operations);
 #endif
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+	create_seq_entry("ioports", gr_mode, &proc_ioports_operations);
+	create_seq_entry("iomem", gr_mode, &proc_iomem_operations);
+	create_seq_entry("slabinfo",gr_mode,&proc_slabinfo_operations);
+#else
 	create_seq_entry("ioports", 0, &proc_ioports_operations);
 	create_seq_entry("iomem", 0, &proc_iomem_operations);
-	create_seq_entry("partitions", 0, &proc_partitions_operations);
 	create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
+#endif
+	create_seq_entry("partitions", 0, &proc_partitions_operations);
 #ifdef CONFIG_MODULES
-	create_seq_entry("ksyms", 0, &proc_ksyms_operations);
+	create_seq_entry("ksyms", gr_mode, &proc_ksyms_operations);
 #endif
+#ifndef CONFIG_GRKERNSEC_PROC_ADD
 	proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
 	if (proc_root_kcore) {
 		proc_root_kcore->proc_fops = &proc_kcore_operations;
 		proc_root_kcore->size =
 				(size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
 	}
+#endif
 	if (prof_shift) {
 		entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL);
 		if (entry) {
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/proc/proc_tty.c linux-2.4.26-leo/fs/proc/proc_tty.c
--- linux-2.4.26/fs/proc/proc_tty.c	2000-04-21 23:17:57.000000000 +0100
+++ linux-2.4.26-leo/fs/proc/proc_tty.c	2004-06-22 20:53:47.000000000 +0100
@@ -174,7 +174,11 @@
 	if (!proc_mkdir("tty", 0))
 		return;
 	proc_tty_ldisc = proc_mkdir("tty/ldisc", 0);
+#ifdef CONFIG_GRKERNSEC_PROC
+	proc_tty_driver = proc_priv_mkdir("tty/driver", 0);
+#else
 	proc_tty_driver = proc_mkdir("tty/driver", 0);
+#endif
 
 	create_proc_read_entry("tty/ldiscs", 0, 0, tty_ldiscs_read_proc,NULL);
 	create_proc_read_entry("tty/drivers", 0, 0, tty_drivers_read_proc,NULL);
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/proc/root.c linux-2.4.26-leo/fs/proc/root.c
--- linux-2.4.26/fs/proc/root.c	2002-08-03 01:39:45.000000000 +0100
+++ linux-2.4.26-leo/fs/proc/root.c	2004-06-22 20:53:47.000000000 +0100
@@ -37,13 +37,21 @@
 		return;
 	}
 	proc_misc_init();
+#ifdef CONFIG_GRKERNSEC_PROC
+	proc_net = proc_priv_mkdir("net", 0);
+#else
 	proc_net = proc_mkdir("net", 0);
+#endif
 #ifdef CONFIG_SYSVIPC
 	proc_mkdir("sysvipc", 0);
 #endif
 #ifdef CONFIG_SYSCTL
+#ifdef CONFIG_GRKERNSEC_PROC
+	proc_sys_root = proc_priv_mkdir("sys", 0);
+#else
 	proc_sys_root = proc_mkdir("sys", 0);
 #endif
+#endif
 #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
 	proc_mkdir("sys/fs", 0);
 	proc_mkdir("sys/fs/binfmt_misc", 0);
@@ -67,7 +75,12 @@
 #ifdef CONFIG_PPC_RTAS
 	proc_rtas_init();
 #endif
+
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+	proc_bus = proc_priv_mkdir("bus", 0);
+#else
 	proc_bus = proc_mkdir("bus", 0);
+#endif
 }
 
 static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry)
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/readdir.c linux-2.4.26-leo/fs/readdir.c
--- linux-2.4.26/fs/readdir.c	2004-02-20 14:11:44.000000000 +0000
+++ linux-2.4.26-leo/fs/readdir.c	2004-06-22 20:53:47.000000000 +0100
@@ -10,6 +10,7 @@
 #include <linux/stat.h>
 #include <linux/file.h>
 #include <linux/smp_lock.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 
@@ -182,6 +183,7 @@
 struct readdir_callback {
 	struct old_linux_dirent * dirent;
 	int count;
+	struct nameidata nd;
 };
 
 static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset,
@@ -192,6 +194,10 @@
 
 	if (buf->count)
 		return -EINVAL;
+
+	if (!gr_acl_handle_filldir(buf->nd.dentry, buf->nd.mnt, ino))
+		return 0;
+	    
 	buf->count++;
 	dirent = buf->dirent;
 	put_user(ino, &dirent->d_ino);
@@ -216,6 +222,9 @@
 	buf.count = 0;
 	buf.dirent = dirent;
 
+	buf.nd.dentry = file->f_dentry;
+	buf.nd.mnt = file->f_vfsmnt;
+
 	error = vfs_readdir(file, fillonedir, &buf);
 	if (error >= 0)
 		error = buf.count;
@@ -243,6 +252,7 @@
 	struct linux_dirent * previous;
 	int count;
 	int error;
+	struct nameidata nd;
 };
 
 static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
@@ -255,6 +265,10 @@
 	buf->error = -EINVAL;	/* only used if we fail.. */
 	if (reclen > buf->count)
 		return -EINVAL;
+
+	if (!gr_acl_handle_filldir(buf->nd.dentry, buf->nd.mnt, ino))
+		return 0;
+
 	dirent = buf->previous;
 	if (dirent)
 		put_user(offset, &dirent->d_off);
@@ -287,6 +301,9 @@
 	buf.count = count;
 	buf.error = 0;
 
+	buf.nd.dentry = file->f_dentry;
+	buf.nd.mnt = file->f_vfsmnt;
+
 	error = vfs_readdir(file, filldir, &buf);
 	if (error < 0)
 		goto out_putf;
@@ -321,6 +338,7 @@
 	struct linux_dirent64 * previous;
 	int count;
 	int error;
+	struct nameidata nd;
 };
 
 static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
@@ -333,6 +351,10 @@
 	buf->error = -EINVAL;	/* only used if we fail.. */
 	if (reclen > buf->count)
 		return -EINVAL;
+
+	if (!gr_acl_handle_filldir(buf->nd.dentry, buf->nd.mnt, ino))
+		return 0;
+	
 	dirent = buf->previous;
 	if (dirent) {
 		d.d_off = offset;
@@ -370,6 +392,9 @@
 	buf.count = count;
 	buf.error = 0;
 
+	buf.nd.mnt = file->f_vfsmnt;
+	buf.nd.dentry = file->f_dentry;
+
 	error = vfs_readdir(file, filldir64, &buf);
 	if (error < 0)
 		goto out_putf;
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/reiserfs/super.c linux-2.4.26-leo/fs/reiserfs/super.c
--- linux-2.4.26/fs/reiserfs/super.c	2003-08-25 12:44:43.000000000 +0100
+++ linux-2.4.26-leo/fs/reiserfs/super.c	2004-05-26 23:34:19.000000000 +0100
@@ -73,7 +73,7 @@
     reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
     journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
     reiserfs_block_writes(&th) ;
-    journal_end(&th, s, 1) ;
+    journal_end_sync(&th, s, 1) ;
   }
   s->s_dirt = dirty;
   unlock_kernel() ;
diff -urN --exclude-from=diff-exclude linux-2.4.26/fs/super.c linux-2.4.26-leo/fs/super.c
--- linux-2.4.26/fs/super.c	2003-08-25 12:44:43.000000000 +0100
+++ linux-2.4.26-leo/fs/super.c	2004-05-26 23:34:19.000000000 +0100
@@ -39,6 +39,12 @@
 spinlock_t sb_lock = SPIN_LOCK_UNLOCKED;
 
 /*
+ * stub of a filesystem used to make sure an FS isn't mounted
+ * in the middle of a lockfs call
+ */
+static DECLARE_FSTYPE_DEV(lockfs_fs_type, "lockfs", NULL);
+
+/*
  * Handling of filesystem drivers list.
  * Rules:
  *	Inclusion to/removals from/scanning of list are protected by spinlock.
@@ -436,6 +442,25 @@
 	put_super(sb);
 }
 
+static void write_super_lockfs(struct super_block *sb)
+{
+	lock_super(sb);
+	if (sb->s_root && sb->s_op) {
+		if (sb->s_dirt && sb->s_op->write_super)
+			sb->s_op->write_super(sb);
+		if (sb->s_op->write_super_lockfs)
+			sb->s_op->write_super_lockfs(sb);
+	}
+	unlock_super(sb);
+
+	/* 
+	 * if no lockfs call is provided, use the sync_fs call instead.
+	 * this must be done without the super lock held
+	 */
+	if (!sb->s_op->write_super_lockfs && sb->s_op->sync_fs)
+		sb->s_op->sync_fs(sb);
+}
+
 static inline void write_super(struct super_block *sb)
 {
 	lock_super(sb);
@@ -483,6 +508,119 @@
 	spin_unlock(&sb_lock);
 }
 
+static struct super_block *find_super_for_lockfs(kdev_t dev)
+{
+	struct super_block *lockfs_sb = alloc_super();
+	struct super_block * s;
+
+	if (!dev)
+		return NULL;
+restart:
+	spin_lock(&sb_lock);
+	s = find_super(dev);
+	if (s) {
+		spin_unlock(&sb_lock);
+		down_read(&s->s_umount);
+		if (s->s_root) {
+			destroy_super(lockfs_sb);
+			return s;
+		}
+		drop_super(s);
+		goto restart;
+	}
+	/* if (s) we either return or goto, so we know s == NULL here.
+	 * At this point, there are no mounted filesystems on this device,
+	 * so we pretend to mount one.
+	 */
+	if (!lockfs_sb) {
+		spin_unlock(&sb_lock);
+		return NULL;
+	}
+	s = lockfs_sb;
+	s->s_dev = dev;
+	if (lockfs_fs_type.fs_supers.prev == NULL)
+		INIT_LIST_HEAD(&lockfs_fs_type.fs_supers);
+	insert_super(s, &lockfs_fs_type);
+	s->s_root = (struct dentry *)1;
+	/* alloc_super gives us a write lock on s_umount, this
+	 * way we know there are no concurrent lockfs holders for this dev.  
+	 * It allows us to remove the temp super from the list of supers 
+	 * immediately when unlockfs is called
+	 */
+	return s;
+}
+/*
+ * Note: don't check the dirty flag before waiting, we want the lock
+ * to happen every time this is called.  dev must be non-zero
+ */
+void sync_supers_lockfs(kdev_t dev)
+{
+	struct super_block *sb;
+	sb = find_super_for_lockfs(dev);
+	if (sb) {
+		write_super_lockfs(sb);
+		/* the drop_super is done by unlockfs */
+	}
+}
+
+static void drop_super_lockfs(struct super_block *s)
+{
+	if (s->s_type == &lockfs_fs_type) {
+		struct file_system_type *fs = s->s_type;
+
+		/* 
+		 * nobody else is allowed to grab_super() on our temp
+		 */
+		if (!deactivate_super(s))
+			BUG();
+
+		spin_lock(&sb_lock);
+		s->s_root = NULL;
+		list_del(&s->s_list);
+		list_del(&s->s_instances);
+		spin_unlock(&sb_lock);
+
+		up_write(&s->s_umount);
+		put_super(s);
+		put_filesystem(fs);
+	} else
+		drop_super(s);
+}
+
+void unlockfs(kdev_t dev)
+{
+	struct super_block *s;
+	if (!dev)
+		return;
+
+	spin_lock(&sb_lock);
+	s = find_super(dev);
+	if (s) {
+		/* 
+		 * find_super and the original lockfs call both incremented
+		 * the reference count.  drop one of them
+		 */
+		s->s_count--;
+		spin_unlock(&sb_lock);
+		if (s->s_root) {
+			if (s->s_op->unlockfs)
+				s->s_op->unlockfs(s);
+			drop_super_lockfs(s);
+			goto out;
+		} else {
+			printk("unlockfs: no s_root, dev %s\n", kdevname(dev));
+			BUG();
+		}
+	} else {
+		printk("unlockfs: no super found, dev %s\n", kdevname(dev));
+		BUG();
+	}
+
+	spin_unlock(&sb_lock);
+out:
+	return;
+}
+
 /**
  *	get_super	-	get the superblock of a device
  *	@dev: device to get the superblock for
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/Config.in linux-2.4.26-leo/grsecurity/Config.in
--- linux-2.4.26/grsecurity/Config.in	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/Config.in	2004-06-22 20:59:48.000000000 +0100
@@ -0,0 +1,398 @@
+define_bool CONFIG_CRYPTO y
+define_bool CONFIG_CRYPTO_SHA256 y
+choice 'Security level' \
+        "Low		CONFIG_GRKERNSEC_LOW \
+         Medium		CONFIG_GRKERNSEC_MID \
+         High		CONFIG_GRKERNSEC_HI \
+	  Customized	CONFIG_GRKERNSEC_CUSTOM" Customized
+if [ "$CONFIG_GRKERNSEC_LOW" = "y" ]; then
+define_bool CONFIG_GRKERNSEC_RANDSRC n
+define_bool CONFIG_GRKERNSEC_RANDRPC n
+define_bool CONFIG_GRKERNSEC_FORKFAIL n
+define_bool CONFIG_GRKERNSEC_TIME n
+define_bool CONFIG_GRKERNSEC_SIGNAL n
+define_bool CONFIG_GRKERNSEC_CHROOT_SHMAT n
+define_bool CONFIG_GRKERNSEC_CHROOT_MOUNT n
+define_bool CONFIG_GRKERNSEC_CHROOT_FCHDIR n
+define_bool CONFIG_GRKERNSEC_CHROOT_DOUBLE n
+define_bool CONFIG_GRKERNSEC_CHROOT_PIVOT n
+define_bool CONFIG_GRKERNSEC_CHROOT_MKNOD n
+define_bool CONFIG_GRKERNSEC_PROC n
+define_bool CONFIG_GRKERNSEC_PROC_IPADDR n
+define_bool CONFIG_GRKERNSEC_PROC_MEMMAP n
+define_bool CONFIG_GRKERNSEC_HIDESYM n
+define_bool CONFIG_GRKERNSEC_CHROOT_CAPS n
+define_bool CONFIG_GRKERNSEC_CHROOT_SYSCTL n
+define_bool CONFIG_GRKERNSEC_PROC_USERGROUP n
+define_bool CONFIG_GRKERNSEC_KMEM n
+define_bool CONFIG_GRKERNSEC_PORT n
+define_bool CONFIG_GRKERNSEC_PROC_ADD n
+define_bool CONFIG_GRKERNSEC_CHROOT_CHMOD n
+define_bool CONFIG_GRKERNSEC_CHROOT_NICE n
+define_bool CONFIG_GRKERNSEC_CHROOT_FINDTASK n
+define_bool CONFIG_GRKERNSEC_PAX_RANDUSTACK n
+define_bool CONFIG_GRKERNSEC_PAX_ASLR n
+define_bool CONFIG_GRKERNSEC_PAX_RANDMMAP n
+define_bool CONFIG_GRKERNSEC_PAX_NOEXEC n
+define_bool CONFIG_GRKERNSEC_PAX_PAGEEXEC n
+define_bool CONFIG_GRKERNSEC_PAX_NOELFRELOCS n
+define_bool CONFIG_GRKERNSEC_PAX_ETEXECRELOCS n
+define_bool CONFIG_GRKERNSEC_PAX_MPROTECT n
+define_bool CONFIG_GRKERNSEC_PAX_SOFTMODE n
+define_bool CONFIG_GRKERNSEC_PAX_EI_PAX n
+define_bool CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS n
+define_bool CONFIG_GRKERNSEC_PAX_NO_ACL_FLAGS n
+define_bool CONFIG_GRKERNSEC_PAX_RANDEXEC n
+define_bool CONFIG_GRKERNSEC_PAX_EMUTRAMP n
+define_bool CONFIG_GRKERNSEC_PAX_EMUSIGRT n
+if [ "$CONFIG_X86" = "y" ]; then
+define_bool CONFIG_GRKERNSEC_PAX_RANDKSTACK n
+define_bool CONFIG_GRKERNSEC_PAX_KERNEXEC n
+define_bool CONFIG_GRKERNSEC_IO n
+define_bool CONFIG_GRKERNSEC_PAX_SEGMEXEC n
+fi
+define_bool CONFIG_GRKERNSEC_AUDIT_MOUNT n
+define_bool CONFIG_GRKERNSEC_ACL_HIDEKERN n
+define_bool CONFIG_GRKERNSEC_RESLOG n
+define_int CONFIG_GRKERNSEC_ACL_MAXTRIES 3
+define_int CONFIG_GRKERNSEC_ACL_TIMEOUT 30
+
+define_int  CONFIG_GRKERNSEC_FLOODTIME 10
+define_int  CONFIG_GRKERNSEC_FLOODBURST 4
+define_bool CONFIG_GRKERNSEC_LINK y
+define_bool CONFIG_GRKERNSEC_FIFO y
+define_bool CONFIG_GRKERNSEC_RANDPID y
+define_bool CONFIG_GRKERNSEC_EXECVE y
+define_bool CONFIG_GRKERNSEC_RANDNET y
+define_bool CONFIG_GRKERNSEC_RANDISN n
+define_bool CONFIG_GRKERNSEC_DMESG y
+define_bool CONFIG_GRKERNSEC_RANDID y
+define_bool CONFIG_GRKERNSEC_CHROOT_CHDIR y
+fi
+if [ "$CONFIG_GRKERNSEC_MID" = "y" ]; then
+define_bool CONFIG_GRKERNSEC_KMEM n
+define_bool CONFIG_GRKERNSEC_PROC_IPADDR n
+define_bool CONFIG_GRKERNSEC_HIDESYM n
+define_bool CONFIG_GRKERNSEC_PROC_ADD n
+define_bool CONFIG_GRKERNSEC_CHROOT_CHMOD n
+define_bool CONFIG_GRKERNSEC_CHROOT_NICE n
+define_bool CONFIG_GRKERNSEC_CHROOT_FINDTASK n
+define_bool CONFIG_GRKERNSEC_PAX_NOEXEC n
+define_bool CONFIG_GRKERNSEC_PAX_PAGEEXEC n
+define_bool CONFIG_GRKERNSEC_PAX_NOELFRELOCS n
+define_bool CONFIG_GRKERNSEC_PAX_ETEXECRELOCS n
+define_bool CONFIG_GRKERNSEC_PAX_MPROTECT n
+define_bool CONFIG_GRKERNSEC_PAX_SOFTMODE n
+define_bool CONFIG_GRKERNSEC_PAX_EI_PAX y
+define_bool CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS y
+define_bool CONFIG_GRKERNSEC_PAX_NO_ACL_FLAGS y
+define_bool CONFIG_GRKERNSEC_PAX_RANDEXEC n
+define_bool CONFIG_GRKERNSEC_PAX_EMUTRAMP n
+define_bool CONFIG_GRKERNSEC_PAX_EMUSIGRT n
+if [ "$CONFIG_X86" = "y" ]; then
+define_bool CONFIG_GRKERNSEC_IO n
+define_bool CONFIG_GRKERNSEC_PAX_SEGMEXEC n
+fi
+define_bool CONFIG_GRKERNSEC_AUDIT_MOUNT n
+define_bool CONFIG_GRKERNSEC_CHROOT_CAPS n
+define_bool CONFIG_GRKERNSEC_AUDIT_MOUNT n
+define_bool CONFIG_GRKERNSEC_CHROOT_FCHDIR n
+define_bool CONFIG_GRKERNSEC_ACL_HIDEKERN n
+define_bool CONFIG_GRKERNSEC_RESLOG n
+define_int CONFIG_GRKERNSEC_ACL_MAXTRIES 3
+define_int CONFIG_GRKERNSEC_ACL_TIMEOUT 30
+
+define_int  CONFIG_GRKERNSEC_FLOODTIME 10
+define_int  CONFIG_GRKERNSEC_FLOODBURST 4
+define_bool CONFIG_GRKERNSEC_PROC_MEMMAP y
+define_bool CONFIG_GRKERNSEC_CHROOT_SYSCTL y
+define_bool CONFIG_GRKERNSEC_LINK y
+define_bool CONFIG_GRKERNSEC_FIFO y
+define_bool CONFIG_GRKERNSEC_RANDPID y
+define_bool CONFIG_GRKERNSEC_EXECVE y
+define_bool CONFIG_GRKERNSEC_DMESG y
+define_bool CONFIG_GRKERNSEC_RANDID y
+define_bool CONFIG_GRKERNSEC_RANDNET y
+define_bool CONFIG_GRKERNSEC_RANDISN y
+define_bool CONFIG_GRKERNSEC_RANDSRC y
+define_bool CONFIG_GRKERNSEC_RANDRPC y
+define_bool CONFIG_GRKERNSEC_FORKFAIL y
+define_bool CONFIG_GRKERNSEC_TIME y
+define_bool CONFIG_GRKERNSEC_SIGNAL y
+define_bool CONFIG_GRKERNSEC_CHROOT y
+define_bool CONFIG_GRKERNSEC_CHROOT_SHMAT n
+define_bool CONFIG_GRKERNSEC_CHROOT_UNIX y
+define_bool CONFIG_GRKERNSEC_CHROOT_MOUNT y
+define_bool CONFIG_GRKERNSEC_CHROOT_PIVOT y
+define_bool CONFIG_GRKERNSEC_CHROOT_DOUBLE y
+define_bool CONFIG_GRKERNSEC_CHROOT_CHDIR y
+define_bool CONFIG_GRKERNSEC_CHROOT_MKNOD y
+define_bool CONFIG_GRKERNSEC_PROC y
+define_bool CONFIG_GRKERNSEC_PROC_USERGROUP y
+define_int  CONFIG_GRKERNSEC_PROC_GID 10
+define_bool CONFIG_GRKERNSEC_PAX_RANDUSTACK y
+define_bool CONFIG_GRKERNSEC_PAX_RANDKSTACK n
+define_bool CONFIG_GRKERNSEC_PAX_KERNEXEC n
+define_bool CONFIG_GRKERNSEC_PAX_ASLR y
+define_bool CONFIG_GRKERNSEC_PAX_RANDMMAP y
+fi
+if [ "$CONFIG_GRKERNSEC_HI" = "y" ]; then
+define_int CONFIG_GRKERNSEC_FLOODTIME 10
+define_int  CONFIG_GRKERNSEC_FLOODBURST 4
+define_bool CONFIG_GRKERNSEC_LINK y
+define_bool CONFIG_GRKERNSEC_FIFO y
+define_bool CONFIG_GRKERNSEC_RANDPID y
+define_bool CONFIG_GRKERNSEC_EXECVE y
+define_bool CONFIG_GRKERNSEC_DMESG y
+define_bool CONFIG_GRKERNSEC_RANDID y
+define_bool CONFIG_GRKERNSEC_RANDSRC y
+define_bool CONFIG_GRKERNSEC_RANDRPC y
+define_bool CONFIG_GRKERNSEC_FORKFAIL y
+define_bool CONFIG_GRKERNSEC_TIME y
+define_bool CONFIG_GRKERNSEC_SIGNAL y
+define_bool CONFIG_GRKERNSEC_CHROOT_SHMAT y
+define_bool CONFIG_GRKERNSEC_CHROOT_UNIX y
+define_bool CONFIG_GRKERNSEC_CHROOT_MOUNT y
+define_bool CONFIG_GRKERNSEC_CHROOT_FCHDIR y
+define_bool CONFIG_GRKERNSEC_CHROOT_PIVOT y
+define_bool CONFIG_GRKERNSEC_CHROOT_DOUBLE y
+define_bool CONFIG_GRKERNSEC_CHROOT_CHDIR y
+define_bool CONFIG_GRKERNSEC_CHROOT_MKNOD y
+define_bool CONFIG_GRKERNSEC_CHROOT_CAPS y
+define_bool CONFIG_GRKERNSEC_CHROOT_SYSCTL y
+define_bool CONFIG_GRKERNSEC_CHROOT_FINDTASK y
+define_bool CONFIG_GRKERNSEC_PROC y
+define_bool CONFIG_GRKERNSEC_PROC_IPADDR n
+define_bool CONFIG_GRKERNSEC_PROC_MEMMAP y
+define_bool CONFIG_GRKERNSEC_HIDESYM y
+define_bool CONFIG_GRKERNSEC_PROC_USERGROUP y
+define_int  CONFIG_GRKERNSEC_PROC_GID 10
+define_bool CONFIG_GRKERNSEC_KMEM y
+define_bool CONFIG_GRKERNSEC_PORT y
+define_bool CONFIG_GRKERNSEC_RESLOG y
+define_bool CONFIG_GRKERNSEC_RANDNET y
+define_bool CONFIG_GRKERNSEC_RANDISN y
+
+define_bool CONFIG_GRKERNSEC_AUDIT_MOUNT n
+define_bool CONFIG_GRKERNSEC_ACL_HIDEKERN n
+define_int CONFIG_GRKERNSEC_ACL_MAXTRIES 3
+define_int CONFIG_GRKERNSEC_ACL_TIMEOUT 30
+
+define_bool CONFIG_GRKERNSEC_PROC_ADD y
+define_bool CONFIG_GRKERNSEC_CHROOT_CHMOD y
+define_bool CONFIG_GRKERNSEC_CHROOT_NICE y
+define_bool CONFIG_GRKERNSEC_PAX_RANDUSTACK y
+define_bool CONFIG_GRKERNSEC_PAX_ASLR y
+define_bool CONFIG_GRKERNSEC_PAX_RANDMMAP y
+define_bool CONFIG_GRKERNSEC_PAX_NOEXEC y
+define_bool CONFIG_GRKERNSEC_PAX_PAGEEXEC n
+define_bool CONFIG_GRKERNSEC_PAX_NOELFRELOCS n
+define_bool CONFIG_GRKERNSEC_PAX_MPROTECT y
+define_bool CONFIG_GRKERNSEC_PAX_ETEXECRELOCS n
+define_bool CONFIG_GRKERNSEC_PAX_SOFTMODE n
+define_bool CONFIG_GRKERNSEC_PAX_EI_PAX y
+define_bool CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS y
+define_bool CONFIG_GRKERNSEC_PAX_NO_ACL_FLAGS y
+if [ "$CONFIG_X86" = "y" ]; then
+define_bool CONFIG_GRKERNSEC_IO n
+if [ "$CONFIG_MODULES" != "y" -a "$CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM" != "y" ]; then
+define_bool CONFIG_GRKERNSEC_PAX_KERNEXEC y
+fi
+define_bool CONFIG_GRKERNSEC_PAX_RANDKSTACK y
+define_bool CONFIG_GRKERNSEC_PAX_RANDEXEC y
+define_bool CONFIG_GRKERNSEC_PAX_SEGMEXEC y
+define_bool CONFIG_GRKERNSEC_PAX_EMUTRAMP n
+define_bool CONFIG_GRKERNSEC_PAX_EMUSIGRT n
+fi
+if [ "$CONFIG_PARISC" = "y" ]; then
+define_bool CONFIG_GRKERNSEC_PAX_EMUTRAMP y
+define_bool CONFIG_GRKERNSEC_PAX_EMUSIGRT y
+fi
+define_bool CONFIG_GRKERNSEC_AUDIT_MOUNT y
+fi
+if [ "$CONFIG_GRKERNSEC_CUSTOM" = "y" ]; then
+mainmenu_option next_comment
+comment 'PaX Control'
+bool 'Support soft mode' CONFIG_GRKERNSEC_PAX_SOFTMODE
+bool 'Use legacy ELF header marking' CONFIG_GRKERNSEC_PAX_EI_PAX
+bool 'Use ELF program header marking' CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS
+choice 'MAC system integration' \
+	"none		CONFIG_GRKERNSEC_PAX_NO_ACL_FLAGS \
+	 direct		CONFIG_GRKERNSEC_PAX_HAVE_ACL_FLAGS \
+	 hook		CONFIG_GRKERNSEC_PAX_HOOK_ACL_FLAGS" none
+endmenu
+mainmenu_option next_comment
+comment 'Address Space Protection'
+if [ "$CONFIG_GRKERNSEC_PAX_EI_PAX" = "y" -o \
+     "$CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS" = "y" -o \
+     "$CONFIG_GRKERNSEC_PAX_HAVE_ACL_FLAGS" = "y" -o \
+     "$CONFIG_GRKERNSEC_PAX_HOOK_ACL_FLAGS" = "y" ]; then
+   bool 'Enforce Non-executable pages' CONFIG_GRKERNSEC_PAX_NOEXEC
+   if [ "$CONFIG_GRKERNSEC_PAX_NOEXEC" = "y" ]; then
+     bool 'Paging based non-executable pages' CONFIG_GRKERNSEC_PAX_PAGEEXEC
+     if [ "$CONFIG_X86" = "y" ]; then
+       bool 'Segmentation based non-executable pages' CONFIG_GRKERNSEC_PAX_SEGMEXEC
+     fi
+     if [ "$CONFIG_X86" = "y" -o "$CONFIG_PARISC" = "y" -o "$CONFIG_PPC32" = "y" ]; then
+       if [ "$CONFIG_GRKERNSEC_PAX_PAGEEXEC" = "y" -o "$CONFIG_GRKERNSEC_PAX_SEGMEXEC" = "y" ]; then
+         bool '   Emulate trampolines' CONFIG_GRKERNSEC_PAX_EMUTRAMP
+         if [ "$CONFIG_GRKERNSEC_PAX_EMUTRAMP" = "y" ]; then
+           bool '    Automatically emulate sigreturn trampolines' CONFIG_GRKERNSEC_PAX_EMUSIGRT
+         fi
+       fi
+     fi
+     bool '   Restrict mprotect()' CONFIG_GRKERNSEC_PAX_MPROTECT
+     if [ "$CONFIG_GRKERNSEC_PAX_MPROTECT" = "y" ]; then
+       if [ "$CONFIG_X86" = "y" ]; then
+         bool '    Disallow ELF text relocations (DANGEROUS)' CONFIG_GRKERNSEC_PAX_NOELFRELOCS
+       else
+       if [ "$CONFIG_ALPHA" = "y" -o "$CONFIG_PARISC" = "y" ]; then
+         bool '    Allow ELF ET_EXEC text relocations' CONFIG_GRKERNSEC_PAX_ETEXECRELOCS
+       fi
+       if [ "$CONFIG_PPC32" = "y" ]; then
+         define_bool CONFIG_GRKERNSEC_PAX_SYSCALL y
+       fi
+       if [ "$CONFIG_ALPHA" = "y" -o "$CONFIG_PARISC" = "y" -o "$CONFIG_SPARC32" = "y" -o "$CONFIG_SPARC64" = "y" -o "$CONFIG_PPC32" = "y" ]; then
+         bool '    Automatically emulate ELF PLT' CONFIG_GRKERNSEC_PAX_EMUPLT
+        if [ "$CONFIG_GRKERNSEC_PAX_EMUPLT" = "y" ]; then
+           if [ "$CONFIG_SPARC32" = "y" -o "$CONFIG_SPARC64" = "y" ]; then
+       	     define_bool CONFIG_GRKERNSEC_PAX_DLRESOLVE y
+           fi
+        fi
+       fi
+       fi
+     fi
+   fi
+   if [ "$CONFIG_X86" = "y" -a "$CONFIG_MODULES" != "y" -a "$CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM" != "y" ]; then
+     bool 'Enforce non-executable kernel pages' CONFIG_GRKERNSEC_PAX_KERNEXEC
+   fi
+   bool 'Address Space Layout Randomization' CONFIG_GRKERNSEC_PAX_ASLR
+   if [ "$CONFIG_GRKERNSEC_PAX_ASLR" = "y" ]; then
+     if [ "$CONFIG_X86_TSC" = "y" ]; then
+       bool '  Randomize kernel stack base' CONFIG_GRKERNSEC_PAX_RANDKSTACK
+     fi
+     bool '  Randomize user stack base' CONFIG_GRKERNSEC_PAX_RANDUSTACK
+     bool '  Randomize mmap() base' CONFIG_GRKERNSEC_PAX_RANDMMAP
+     if [ "$CONFIG_GRKERNSEC_PAX_RANDMMAP" = "y" -a "$CONFIG_GRKERNSEC_PAX_MPROTECT" = "y" ]; then
+       bool '  Randomize ET_EXEC base' CONFIG_GRKERNSEC_PAX_RANDEXEC
+     fi
+   fi
+fi
+
+bool 'Deny writing to /dev/kmem and /dev/mem' CONFIG_GRKERNSEC_KMEM
+bool 'Deny access to /dev/port' CONFIG_GRKERNSEC_PORT
+if [ "$CONFIG_X86" = "y" ]; then
+  bool 'Disable privileged I/O' CONFIG_GRKERNSEC_IO
+  if [ "$CONFIG_GRKERNSEC_IO" = "y" ]; then
+    define_bool CONFIG_RTC y
+  fi
+fi
+bool 'Remove addresses from /proc/pid/[maps|stat]' CONFIG_GRKERNSEC_PROC_MEMMAP
+bool 'Hide kernel symbols' CONFIG_GRKERNSEC_HIDESYM
+endmenu
+mainmenu_option next_comment
+comment 'Role Based Access Control Options'
+bool 'Hide kernel processes' CONFIG_GRKERNSEC_ACL_HIDEKERN
+int 'Maximum tries before password lockout' CONFIG_GRKERNSEC_ACL_MAXTRIES 3
+int 'Time to wait after max password tries, in seconds' CONFIG_GRKERNSEC_ACL_TIMEOUT 30
+endmenu
+mainmenu_option next_comment
+comment 'Filesystem Protections'
+bool 'Proc restrictions' CONFIG_GRKERNSEC_PROC
+if [ "$CONFIG_GRKERNSEC_PROC" != "n" ]; then
+ bool '   Restrict to user only' CONFIG_GRKERNSEC_PROC_USER
+ if [ "$CONFIG_GRKERNSEC_PROC_USER" != "y" ]; then
+  bool '   Allow special group' CONFIG_GRKERNSEC_PROC_USERGROUP
+  if [ "$CONFIG_GRKERNSEC_PROC_USERGROUP" != "n" ]; then
+   int  '   GID for special group' CONFIG_GRKERNSEC_PROC_GID 1001
+  fi
+ fi
+ if [ "$CONFIG_GRKERNSEC_PROC_USER" != "n" -o "$CONFIG_GRKERNSEC_PROC_USERGROUP" != "n" ]; then
+  bool '   Additional restrictions' CONFIG_GRKERNSEC_PROC_ADD
+ fi
+fi
+bool 'Linking restrictions' CONFIG_GRKERNSEC_LINK
+bool 'FIFO restrictions' CONFIG_GRKERNSEC_FIFO
+bool 'Chroot jail restrictions' CONFIG_GRKERNSEC_CHROOT
+if [ "$CONFIG_GRKERNSEC_CHROOT" != "n" ]; then
+bool '   Deny mounts' CONFIG_GRKERNSEC_CHROOT_MOUNT
+bool '   Deny double-chroots' CONFIG_GRKERNSEC_CHROOT_DOUBLE
+bool '   Deny pivot_root in chroot' CONFIG_GRKERNSEC_CHROOT_PIVOT
+bool '   Enforce chdir("/") on all chroots' CONFIG_GRKERNSEC_CHROOT_CHDIR
+bool '   Deny (f)chmod +s' CONFIG_GRKERNSEC_CHROOT_CHMOD
+bool '   Deny fchdir out of chroot' CONFIG_GRKERNSEC_CHROOT_FCHDIR
+bool '   Deny mknod' CONFIG_GRKERNSEC_CHROOT_MKNOD
+bool '   Deny shmat() out of chroot' CONFIG_GRKERNSEC_CHROOT_SHMAT
+bool '   Deny access to abstract AF_UNIX sockets out of chroot' CONFIG_GRKERNSEC_CHROOT_UNIX
+bool '   Protect outside processes' CONFIG_GRKERNSEC_CHROOT_FINDTASK
+bool '   Restrict priority changes' CONFIG_GRKERNSEC_CHROOT_NICE
+bool '   Deny sysctl writes in chroot' CONFIG_GRKERNSEC_CHROOT_SYSCTL
+bool '   Capability restrictions within chroot' CONFIG_GRKERNSEC_CHROOT_CAPS
+fi
+endmenu
+mainmenu_option next_comment
+comment 'Kernel Auditing'
+bool 'Single group for auditing' CONFIG_GRKERNSEC_AUDIT_GROUP
+if [ "$CONFIG_GRKERNSEC_AUDIT_GROUP" != "n" ]; then
+int  '   GID for auditing' CONFIG_GRKERNSEC_AUDIT_GID 1007
+fi
+bool 'Exec logging' CONFIG_GRKERNSEC_EXECLOG
+bool 'Resource logging' CONFIG_GRKERNSEC_RESLOG
+bool 'Log execs within chroot' CONFIG_GRKERNSEC_CHROOT_EXECLOG
+bool 'Chdir logging' CONFIG_GRKERNSEC_AUDIT_CHDIR
+bool '(Un)Mount logging' CONFIG_GRKERNSEC_AUDIT_MOUNT
+bool 'IPC logging' CONFIG_GRKERNSEC_AUDIT_IPC
+bool 'Signal logging' CONFIG_GRKERNSEC_SIGNAL
+bool 'Fork failure logging' CONFIG_GRKERNSEC_FORKFAIL
+bool 'Time change logging' CONFIG_GRKERNSEC_TIME
+bool '/proc/<pid>/ipaddr support' CONFIG_GRKERNSEC_PROC_IPADDR
+if [ "$CONFIG_GRKERNSEC_PAX_MPROTECT" != "n" ]; then
+  bool 'ELF text relocations logging (READ HELP)' CONFIG_GRKERNSEC_AUDIT_TEXTREL
+fi
+endmenu
+mainmenu_option next_comment
+comment 'Executable Protections'
+bool 'Enforce RLIMIT_NPROC on execs' CONFIG_GRKERNSEC_EXECVE
+bool 'Dmesg(8) restriction' CONFIG_GRKERNSEC_DMESG
+bool 'Randomized PIDs' CONFIG_GRKERNSEC_RANDPID
+bool 'Trusted path execution' CONFIG_GRKERNSEC_TPE
+if [ "$CONFIG_GRKERNSEC_TPE" != "n" ]; then
+bool '   Partially restrict non-root users' CONFIG_GRKERNSEC_TPE_ALL
+int  '   GID for untrusted users:' CONFIG_GRKERNSEC_TPE_GID 1005
+fi
+endmenu
+mainmenu_option next_comment
+comment 'Network Protections'
+bool 'Larger entropy pools' CONFIG_GRKERNSEC_RANDNET
+bool 'Truly random TCP ISN selection' CONFIG_GRKERNSEC_RANDISN
+bool 'Randomized IP IDs' CONFIG_GRKERNSEC_RANDID
+bool 'Randomized TCP source ports' CONFIG_GRKERNSEC_RANDSRC
+bool 'Randomized RPC XIDs' CONFIG_GRKERNSEC_RANDRPC
+bool 'Socket restrictions' CONFIG_GRKERNSEC_SOCKET
+if [ "$CONFIG_GRKERNSEC_SOCKET" != "n" ]; then
+bool '  Deny any sockets to group' CONFIG_GRKERNSEC_SOCKET_ALL
+if [ "$CONFIG_GRKERNSEC_SOCKET_ALL" != "n" ]; then
+int  '   GID to deny all sockets for:' CONFIG_GRKERNSEC_SOCKET_ALL_GID 1004
+fi
+bool '  Deny client sockets to group' CONFIG_GRKERNSEC_SOCKET_CLIENT
+if [ "$CONFIG_GRKERNSEC_SOCKET_CLIENT" != "n" ]; then
+int  '   GID to deny client sockets for:' CONFIG_GRKERNSEC_SOCKET_CLIENT_GID 1003
+fi
+bool '  Deny server sockets to group' CONFIG_GRKERNSEC_SOCKET_SERVER
+if [ "$CONFIG_GRKERNSEC_SOCKET_SERVER" != "n" ]; then
+int  '   GID to deny server sockets for:' CONFIG_GRKERNSEC_SOCKET_SERVER_GID 1002
+fi
+fi
+endmenu
+if [ "$CONFIG_SYSCTL" != "n" ]; then
+mainmenu_option next_comment
+comment 'Sysctl support'
+bool 'Sysctl support' CONFIG_GRKERNSEC_SYSCTL
+endmenu
+fi
+mainmenu_option next_comment
+comment 'Logging options'
+int 'Seconds in between log messages (minimum)' CONFIG_GRKERNSEC_FLOODTIME 10
+int 'Number of messages in a burst (maximum)' CONFIG_GRKERNSEC_FLOODBURST 4
+endmenu
+fi
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/gracl_alloc.c linux-2.4.26-leo/grsecurity/gracl_alloc.c
--- linux-2.4.26/grsecurity/gracl_alloc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/gracl_alloc.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,93 @@
+/* stack-based acl allocation tracking (c) Brad Spengler 2002,2003 */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/gracl.h>
+#include <linux/grsecurity.h>
+
+static unsigned long alloc_stack_next = 1;
+static unsigned long alloc_stack_size = 1;
+static void **alloc_stack;
+
+static __inline__ int
+alloc_pop(void)
+{
+	if (alloc_stack_next == 1)
+		return 0;
+
+	kfree(alloc_stack[alloc_stack_next - 2]);
+
+	alloc_stack_next--;
+
+	return 1;
+}
+
+static __inline__ void
+alloc_push(void *buf)
+{
+	if (alloc_stack_next >= alloc_stack_size)
+		BUG();
+
+	alloc_stack[alloc_stack_next - 1] = buf;
+
+	alloc_stack_next++;
+
+	return;
+}
+
+void *
+acl_alloc(unsigned long len)
+{
+	void *ret;
+
+	if (len > PAGE_SIZE)
+		BUG();
+
+	ret = kmalloc(len, GFP_KERNEL);
+
+	if (ret)
+		alloc_push(ret);
+
+	return ret;
+}
+
+void
+acl_free_all(void)
+{
+	if (gr_acl_is_enabled() || !alloc_stack)
+		return;
+
+	while (alloc_pop()) ;
+
+	if (alloc_stack) {
+		if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
+			kfree(alloc_stack);
+		else
+			vfree(alloc_stack);
+	}
+
+	alloc_stack = NULL;
+	alloc_stack_size = 1;
+	alloc_stack_next = 1;
+
+	return;
+}
+
+int
+acl_alloc_stack_init(unsigned long size)
+{
+	if ((size * sizeof (void *)) <= PAGE_SIZE)
+		alloc_stack =
+		    (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
+	else
+		alloc_stack = (void **) vmalloc(size * sizeof (void *));
+
+	alloc_stack_size = size;
+
+	if (!alloc_stack)
+		return 0;
+	else
+		return 1;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/gracl.c linux-2.4.26-leo/grsecurity/gracl.c
--- linux-2.4.26/grsecurity/gracl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/gracl.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,3305 @@
+/* 
+ * grsecurity/gracl.c
+ * Copyright Brad Spengler 2001, 2002, 2003
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/sysctl.h>
+#include <linux/gracl.h>
+#include <linux/gralloc.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+#include <asm/uaccess.h>
+#include <asm/errno.h>
+#include <asm/mman.h>
+
+static struct acl_role_db acl_role_set;
+static struct acl_role_label *role_list_head;
+static struct name_db name_set;
+static struct name_db inodev_set;
+
+/* for keeping track of userspace pointers used for subjects, so we
+   can share references in the kernel as well
+*/
+static struct acl_subj_map_db subj_map_set;
+
+
+static struct acl_role_label *default_role;
+
+static u16 acl_sp_role_value;
+
+static DECLARE_MUTEX(gr_dev_sem);
+rwlock_t gr_inode_lock = RW_LOCK_UNLOCKED;
+
+extern char *gr_shared_page[4][NR_CPUS];
+struct gr_arg *gr_usermode;
+
+static unsigned long gr_status = GR_STATUS_INIT;
+
+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
+extern void gr_clear_learn_entries(void);
+
+#ifdef CONFIG_GRKERNSEC_RESLOG
+extern void gr_log_resource(const struct task_struct *task,
+			    const int res, const unsigned long wanted, const int gt);
+#endif
+
+unsigned char *gr_system_salt;
+unsigned char *gr_system_sum;
+
+static struct sprole_pw **acl_special_roles = NULL;
+static __u16 num_sprole_pws = 0;
+
+static struct acl_role_label *kernel_role = NULL;
+
+/* The following are used to keep a place held in the hash table when we move
+   entries around.  They can be replaced during insert. */
+
+static struct acl_subject_label *deleted_subject;
+static struct acl_object_label *deleted_object;
+static struct name_entry *deleted_inodev;
+
+/* for keeping track of the last and final allocated subjects, since
+   nested subject parsing is tricky
+*/
+static struct acl_subject_label *s_last = NULL;
+static struct acl_subject_label *s_final = NULL;
+
+static unsigned int gr_auth_attempts = 0;
+static unsigned long gr_auth_expires = 0UL;
+
+extern int gr_init_uidset(void);
+extern void gr_free_uidset(void);
+extern void gr_remove_uid(uid_t uid);
+extern int gr_find_uid(uid_t uid);
+
+__inline__ int
+gr_acl_is_enabled(void)
+{
+	return (gr_status & GR_READY);
+}
+
+__inline__ int
+gr_acl_tpe_check(void)
+{
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+	if (current->role->roletype & GR_ROLE_TPE)
+		return 1;
+	else
+		return 0;
+}
+
+int
+gr_handle_rawio(const struct inode *inode)
+{
+	if (inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO) &&
+	    ((gr_status & GR_READY)
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	     || (grsec_enable_chroot_caps && proc_is_chrooted(current))
+#endif
+	    ))
+		return 1;
+	return 0;
+}
+
+
+static __inline__ int
+gr_streq(const char *a, const char *b, const __u16 lena, const __u16 lenb)
+{
+	int i;
+	unsigned long *l1;
+	unsigned long *l2;
+	unsigned char *c1;
+	unsigned char *c2;
+	int num_longs;
+
+	if (likely(lena != lenb))
+		return 0;
+
+	l1 = (unsigned long *)a;
+	l2 = (unsigned long *)b;
+
+	num_longs = lena / sizeof(unsigned long);
+
+	for (i = num_longs; i--; l1++, l2++) {
+		if (unlikely(*l1 != *l2))
+			return 0;
+	}
+
+	c1 = (unsigned char *) l1;
+	c2 = (unsigned char *) l2;
+
+	i = lena - (num_longs * sizeof(unsigned long));	
+
+	for (; i--; c1++, c2++) {
+		if (unlikely(*c1 != *c2))
+			return 0;
+	}
+
+	return 1;
+}
+		
+static __inline__ char *
+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
+	    char *buf, int buflen)
+{
+	char *res;
+	struct dentry *our_dentry;
+	struct vfsmount *our_mount;
+	struct vfsmount *rootmnt;
+	struct dentry *root;
+
+	our_dentry = (struct dentry *) dentry;
+	our_mount = (struct vfsmount *) vfsmnt;
+
+	read_lock(&child_reaper->fs->lock);
+	rootmnt = mntget(child_reaper->fs->rootmnt);
+	root = dget(child_reaper->fs->root);
+	read_unlock(&child_reaper->fs->lock);
+
+	spin_lock(&dcache_lock);
+	res = __d_path(our_dentry, our_mount, root, rootmnt, buf, buflen);
+	spin_unlock(&dcache_lock);
+	if (unlikely(IS_ERR(res)))
+		res = strcpy(buf, "<path too long>");
+	dput(root);
+	mntput(rootmnt);
+	return res;
+}
+
+static __inline__ char *
+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
+	    char *buf, int buflen)
+{
+	char *res;
+	struct dentry *our_dentry;
+	struct vfsmount *our_mount;
+	struct vfsmount *rootmnt;
+	struct dentry *root;
+
+	our_dentry = (struct dentry *) dentry;
+	our_mount = (struct vfsmount *) vfsmnt;
+
+	read_lock(&child_reaper->fs->lock);
+	rootmnt = mntget(child_reaper->fs->rootmnt);
+	root = dget(child_reaper->fs->root);
+	read_unlock(&child_reaper->fs->lock);
+
+	res = __d_path(our_dentry, our_mount, root, rootmnt, buf, buflen);
+	if (unlikely(IS_ERR(res)))
+		res = strcpy(buf, "<path too long>");
+	dput(root);
+	mntput(rootmnt);
+	return res;
+}
+
+char *
+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return __d_real_path(dentry, mnt, gr_shared_page[0][smp_processor_id()],
+			   PAGE_SIZE);
+}
+
+char *
+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return d_real_path(dentry, mnt, gr_shared_page[0][smp_processor_id()],
+			   PAGE_SIZE);
+}
+
+char *
+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return d_real_path(dentry, mnt, gr_shared_page[1][smp_processor_id()],
+			   PAGE_SIZE);
+}
+
+char *
+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return d_real_path(dentry, mnt, gr_shared_page[2][smp_processor_id()],
+			   PAGE_SIZE);
+}
+
+char *
+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return d_real_path(dentry, mnt, gr_shared_page[3][smp_processor_id()],
+			   PAGE_SIZE);
+}
+
+__inline__ __u32
+to_gr_audit(const __u32 reqmode)
+{
+	__u32 retmode = 0;
+
+	retmode |= (reqmode & GR_READ) ? GR_AUDIT_READ : 0;
+	retmode |= (reqmode & GR_WRITE) ? GR_AUDIT_WRITE | GR_AUDIT_APPEND : 0;
+	retmode |= (reqmode & GR_APPEND) ? GR_AUDIT_APPEND : 0;
+	retmode |= (reqmode & GR_EXEC) ? GR_AUDIT_EXEC : 0;
+	retmode |= (reqmode & GR_INHERIT) ? GR_AUDIT_INHERIT : 0;
+	retmode |= (reqmode & GR_FIND) ? GR_AUDIT_FIND : 0;
+	retmode |= (reqmode & GR_SETID) ? GR_AUDIT_SETID : 0;
+	retmode |= (reqmode & GR_CREATE) ? GR_AUDIT_CREATE : 0;
+	retmode |= (reqmode & GR_DELETE) ? GR_AUDIT_DELETE : 0;
+
+	return retmode;
+}
+
+__inline__ struct acl_subject_label *
+lookup_subject_map(const struct acl_subject_label *userp)
+{
+	unsigned long index = shash(userp, subj_map_set.s_size);
+	struct subject_map *match;
+	__u8 i = 0;
+
+	match = subj_map_set.s_hash[index];
+
+	while (match && match->user != userp) {
+		index = (index + (1 << i)) % subj_map_set.s_size;
+		match = subj_map_set.s_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	if (match)
+		return match->kernel;
+	else
+		return NULL;
+}
+
+static void
+insert_subj_map_entry(struct subject_map *subjmap)
+{
+	unsigned long index = shash(subjmap->user, subj_map_set.s_size);
+	struct subject_map **curr;
+	__u8 i = 0;
+
+	curr = &subj_map_set.s_hash[index];
+
+	while (*curr) {
+		index = (index + (1 << i)) % subj_map_set.s_size;
+		curr = &subj_map_set.s_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	*curr = subjmap;
+
+	return;
+}
+
+__inline__ struct acl_role_label *
+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
+		      const gid_t gid)
+{
+	unsigned long index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
+	struct acl_role_label *match;
+	struct role_allowed_ip *ipp;
+	__u8 i = 0;
+
+	match = acl_role_set.r_hash[index];
+
+	while (match
+	       && (match->uidgid != uid || !(match->roletype & GR_ROLE_USER))) {
+		index = (index + (1 << i)) % acl_role_set.r_size;
+		match = acl_role_set.r_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	if (match == NULL) {
+	      try_group:
+		index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
+		match = acl_role_set.r_hash[index];
+		i = 0;
+
+		while (match && (match->uidgid != gid
+			   || !(match->roletype & GR_ROLE_GROUP))) {
+			index = (index + (1 << i)) % acl_role_set.r_size;
+			match = acl_role_set.r_hash[index];
+			i = (i + 1) % 32;
+		}
+
+		if (match == NULL)
+			match = default_role;
+		if (match->allowed_ips == NULL)
+			return match;
+		else {
+			for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
+				if (likely
+				    ((task->curr_ip & ipp->netmask) ==
+				     (ipp->addr & ipp->netmask)))
+					return match;
+			}
+			match = default_role;
+		}
+	} else if (match->allowed_ips == NULL) {
+		return match;
+	} else {
+		for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
+			if (likely
+			    ((task->curr_ip & ipp->netmask) ==
+			     (ipp->addr & ipp->netmask)))
+				return match;
+		}
+		goto try_group;
+	}
+
+	return match;
+}
+
+__inline__ struct acl_subject_label *
+lookup_acl_subj_label(const ino_t ino, const kdev_t dev,
+		      const struct acl_role_label *role)
+{
+	unsigned long subj_size = role->subj_hash_size;
+	struct acl_subject_label **s_hash = role->subj_hash;
+	unsigned long index = fhash(ino, dev, subj_size);
+	struct acl_subject_label *match;
+	__u8 i = 0;
+
+	match = s_hash[index];
+
+	while (match && (match->inode != ino || match->device != dev ||
+	       (match->mode & GR_DELETED))) {
+		index = (index + (1 << i)) % subj_size;
+		match = s_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	if (match && (match != deleted_subject) && !(match->mode & GR_DELETED))
+		return match;
+	else
+		return NULL;
+}
+
+static __inline__ struct acl_object_label *
+lookup_acl_obj_label(const ino_t ino, const kdev_t dev,
+		     const struct acl_subject_label *subj)
+{
+	unsigned long obj_size = subj->obj_hash_size;
+	struct acl_object_label **o_hash = subj->obj_hash;
+	unsigned long index = fhash(ino, dev, obj_size);
+	struct acl_object_label *match;
+	__u8 i = 0;
+
+	match = o_hash[index];
+
+	while (match && (match->inode != ino || match->device != dev ||
+	       (match->mode & GR_DELETED))) {
+		index = (index + (1 << i)) % obj_size;
+		match = o_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	if (match && (match != deleted_object) && !(match->mode & GR_DELETED))
+		return match;
+	else
+		return NULL;
+}
+
+static __inline__ struct acl_object_label *
+lookup_acl_obj_label_create(const ino_t ino, const kdev_t dev,
+		     const struct acl_subject_label *subj)
+{
+	unsigned long obj_size = subj->obj_hash_size;
+	struct acl_object_label **o_hash = subj->obj_hash;
+	unsigned long index = fhash(ino, dev, obj_size);
+	struct acl_object_label *match;
+	__u8 i = 0;
+
+	match = o_hash[index];
+
+	while (match && (match->inode != ino || match->device != dev ||
+	       !(match->mode & GR_DELETED))) {
+		index = (index + (1 << i)) % obj_size;
+		match = o_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	if (match && (match != deleted_object) && (match->mode & GR_DELETED))
+		return match;
+
+	i = 0;
+	index = fhash(ino, dev, obj_size);
+	match = o_hash[index];
+
+	while (match && (match->inode != ino || match->device != dev ||
+	       (match->mode & GR_DELETED))) {
+		index = (index + (1 << i)) % obj_size;
+		match = o_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	if (match && (match != deleted_object) && !(match->mode & GR_DELETED))
+		return match;
+	else
+		return NULL;
+}
+
+static __inline__ struct name_entry *
+lookup_name_entry(const char *name)
+{
+	__u16 len = strlen(name);
+	unsigned long index = nhash(name, len, name_set.n_size);
+	struct name_entry *match;
+	__u8 i = 0;
+
+	match = name_set.n_hash[index];
+
+	while (match && !gr_streq(match->name, name, match->len, len)) {
+		index = (index + (1 << i)) % name_set.n_size;
+		match = name_set.n_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	return match;
+}
+
+static __inline__ struct name_entry *
+lookup_inodev_entry(const ino_t ino, const kdev_t dev)
+{
+	unsigned long index = fhash(ino, dev, inodev_set.n_size);
+	struct name_entry *match;
+	__u8 i = 0;
+
+	match = inodev_set.n_hash[index];
+
+	while (match && (match->inode != ino || match->device != dev)) {
+		index = (index + (1 << i)) % inodev_set.n_size;
+		match = inodev_set.n_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	if (match && (match != deleted_inodev))
+		return match;
+	else
+		return NULL;
+}
+
+static void
+insert_inodev_entry(struct name_entry *nentry)
+{
+	unsigned long index = fhash(nentry->inode, nentry->device,
+				    inodev_set.n_size);
+	struct name_entry **curr;
+	__u8 i = 0;
+
+	curr = &inodev_set.n_hash[index];
+
+	while (*curr && *curr != deleted_inodev) {
+		index = (index + (1 << i)) % inodev_set.n_size;
+		curr = &inodev_set.n_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	*curr = nentry;
+
+	return;
+}
+
+static void
+insert_acl_role_label(struct acl_role_label *role)
+{
+	unsigned long index =
+	    rhash(role->uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
+	struct acl_role_label **curr;
+	__u8 i = 0;
+
+	curr = &acl_role_set.r_hash[index];
+
+	while (*curr) {
+		index = (index + (1 << i)) % acl_role_set.r_size;
+		curr = &acl_role_set.r_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	*curr = role;
+
+	return;
+}
+
+static int
+insert_name_entry(char *name, const ino_t inode, const kdev_t device)
+{
+	struct name_entry **curr;
+	__u8 i = 0;
+	__u16 len = strlen(name);
+	unsigned long index = nhash(name, len, name_set.n_size);
+
+	curr = &name_set.n_hash[index];
+
+	while (*curr && !gr_streq((*curr)->name, name, (*curr)->len, len)) {
+		index = (index + (1 << i)) % name_set.n_size;
+		curr = &name_set.n_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	if (!(*curr)) {
+		struct name_entry *nentry =
+		    acl_alloc(sizeof (struct name_entry));
+		if (!nentry)
+			return 0;
+		nentry->name = name;
+		nentry->inode = inode;
+		nentry->device = device;
+		nentry->len = len;
+		*curr = nentry;
+		/* insert us into the table searchable by inode/dev */
+		insert_inodev_entry(nentry);
+	}
+
+	return 1;
+}
+
+static void
+insert_acl_obj_label(struct acl_object_label *obj,
+		     struct acl_subject_label *subj)
+{
+	unsigned long index =
+	    fhash(obj->inode, obj->device, subj->obj_hash_size);
+	struct acl_object_label **curr;
+	__u8 i = 0;
+
+	curr = &subj->obj_hash[index];
+
+	while (*curr && *curr != deleted_object) {
+		index = (index + (1 << i)) % subj->obj_hash_size;
+		curr = &subj->obj_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	*curr = obj;
+
+	return;
+}
+
+static void
+insert_acl_subj_label(struct acl_subject_label *obj,
+		      struct acl_role_label *role)
+{
+	unsigned long subj_size = role->subj_hash_size;
+	struct acl_subject_label **s_hash = role->subj_hash;
+	unsigned long index = fhash(obj->inode, obj->device, subj_size);
+	struct acl_subject_label **curr;
+	__u8 i = 0;
+
+	curr = &s_hash[index];
+
+	while (*curr && *curr != deleted_subject) {
+		index = (index + (1 << i)) % subj_size;
+		curr = &s_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	*curr = obj;
+
+	return;
+}
+
+static void **
+create_table(__u32 * len)
+{
+	unsigned long table_sizes[] = {
+		7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
+		32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
+		4194301, 8388593, 16777213, 33554393, 67108859, 134217689,
+		268435399, 536870909, 1073741789, 2147483647
+	};
+	void *newtable = NULL;
+	unsigned int pwr = 0;
+
+	while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
+	       table_sizes[pwr] <= (2 * (*len)))
+		pwr++;
+
+	if (table_sizes[pwr] <= (2 * (*len)))
+		return newtable;
+
+	if ((table_sizes[pwr] * sizeof (void *)) <= PAGE_SIZE)
+		newtable =
+		    kmalloc(table_sizes[pwr] * sizeof (void *), GFP_KERNEL);
+	else
+		newtable = vmalloc(table_sizes[pwr] * sizeof (void *));
+
+	*len = table_sizes[pwr];
+
+	return newtable;
+}
+
+static int
+init_variables(const unsigned long acl_obj_size,
+	       const unsigned long acl_glob_size,
+	       const unsigned long acl_subj_size,
+	       const unsigned long acl_ip_size,
+	       const unsigned long acl_role_size,
+	       const unsigned long allowed_ip_size,
+	       const unsigned long acl_trans_size,
+	       const __u16 num_sprole_pws)
+{
+	unsigned long stacksize;
+
+	subj_map_set.s_size = acl_subj_size;
+	acl_role_set.r_size = acl_role_size;
+	name_set.n_size = (acl_obj_size + acl_subj_size);
+	inodev_set.n_size = (acl_obj_size + acl_subj_size);
+
+	if (!gr_init_uidset())
+		return 1;
+
+	/* set up the stack that holds allocation info */
+
+	stacksize = (3 * acl_obj_size) + (3 * acl_role_size) +
+	    (6 * acl_subj_size) + acl_ip_size + (2 * acl_trans_size) +
+	    allowed_ip_size + (2 * num_sprole_pws) + (2 * acl_glob_size) + 5;
+
+	if (!acl_alloc_stack_init(stacksize))
+		return 1;
+
+	/* create our empty, fake deleted acls */
+	deleted_subject =
+	    (struct acl_subject_label *)
+	    acl_alloc(sizeof (struct acl_subject_label));
+	deleted_object =
+	    (struct acl_object_label *)
+	    acl_alloc(sizeof (struct acl_object_label));
+	deleted_inodev =
+	    (struct name_entry *) acl_alloc(sizeof (struct name_entry));
+
+	if (!deleted_subject || !deleted_object || !deleted_inodev)
+		return 1;
+
+	memset(deleted_subject, 0, sizeof (struct acl_subject_label));
+	memset(deleted_object, 0, sizeof (struct acl_object_label));
+	memset(deleted_inodev, 0, sizeof (struct name_entry));
+
+	/* We only want 50% full tables for now */
+
+	subj_map_set.s_hash =
+	    (struct subject_map **) create_table(&subj_map_set.s_size);
+	acl_role_set.r_hash =
+	    (struct acl_role_label **) create_table(&acl_role_set.r_size);
+	name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size);
+	inodev_set.n_hash =
+	    (struct name_entry **) create_table(&inodev_set.n_size);
+
+	if (!subj_map_set.s_hash || !acl_role_set.r_hash || 
+	    !name_set.n_hash || !inodev_set.n_hash)
+		return 1;
+
+	memset(subj_map_set.s_hash, 0,
+	       sizeof(struct subject_map *) * subj_map_set.s_size);
+	memset(acl_role_set.r_hash, 0,
+	       sizeof (struct acl_role_label *) * acl_role_set.r_size);
+	memset(name_set.n_hash, 0,
+	       sizeof (struct name_entry *) * name_set.n_size);
+	memset(inodev_set.n_hash, 0,
+	       sizeof (struct name_entry *) * inodev_set.n_size);
+
+	return 0;
+}
+
+/* free information not needed after startup
+   currently contains user->kernel pointer mappings for subjects
+*/
+
+static void
+free_init_variables(void)
+{
+	__u32 i;
+
+	if (subj_map_set.s_hash) {
+		for (i = 0; i < subj_map_set.s_size; i++) {
+			if (subj_map_set.s_hash[i]) {
+				kfree(subj_map_set.s_hash[i]);
+				subj_map_set.s_hash[i] = NULL;
+			}
+		}
+
+		if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
+		    PAGE_SIZE)
+			kfree(subj_map_set.s_hash);
+		else
+			vfree(subj_map_set.s_hash);
+	}
+
+	return;
+}
+
+static void
+free_variables(void)
+{
+	struct acl_subject_label *s;
+	struct acl_role_label *r;
+	struct task_struct *task;
+
+	gr_clear_learn_entries();
+
+	read_lock(&tasklist_lock);
+	for_each_task(task) {
+		task->acl_sp_role = 0;
+		task->acl_role_id = 0;
+		task->acl = NULL;
+		task->role = NULL;
+	}
+	read_unlock(&tasklist_lock);
+
+	/* free all object hash tables */
+
+	if (role_list_head) {
+		for (r = role_list_head; r; r = r->next) {
+			if (!r->subj_hash)
+				break;
+			for (s = r->hash->first; s; s = s->next) {
+				if (!s->obj_hash)
+					break;
+				if ((s->obj_hash_size *
+				     sizeof (struct acl_object_label *)) <=
+				    PAGE_SIZE)
+					kfree(s->obj_hash);
+				else
+					vfree(s->obj_hash);
+			}
+			if ((r->subj_hash_size *
+			     sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
+				kfree(r->subj_hash);
+			else
+				vfree(r->subj_hash);
+		}
+	}
+
+	acl_free_all();
+
+	if (acl_role_set.r_hash) {
+		if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
+		    PAGE_SIZE)
+			kfree(acl_role_set.r_hash);
+		else
+			vfree(acl_role_set.r_hash);
+	}
+	if (name_set.n_hash) {
+		if ((name_set.n_size * sizeof (struct name_entry *)) <=
+		    PAGE_SIZE)
+			kfree(name_set.n_hash);
+		else
+			vfree(name_set.n_hash);
+	}
+
+	if (inodev_set.n_hash) {
+		if ((inodev_set.n_size * sizeof (struct name_entry *)) <=
+		    PAGE_SIZE)
+			kfree(inodev_set.n_hash);
+		else
+			vfree(inodev_set.n_hash);
+	}
+
+	gr_free_uidset();
+
+	memset(&name_set, 0, sizeof (struct name_db));
+	memset(&inodev_set, 0, sizeof (struct name_db));
+	memset(&acl_role_set, 0, sizeof (struct acl_role_db));
+	memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
+
+	role_list_head = NULL;
+	default_role = NULL;
+
+	return;
+}
+
+static __u32
+count_user_objs(struct acl_object_label *userp)
+{
+	struct acl_object_label o_tmp;
+	__u32 num = 0;
+
+	while (userp) {
+		if (copy_from_user(&o_tmp, userp,
+				   sizeof (struct acl_object_label)))
+			break;
+
+		userp = o_tmp.prev;
+		num++;
+	}
+
+	return num;
+}
+
+static struct acl_subject_label *
+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
+
+static int
+copy_user_glob(struct acl_object_label *obj)
+{
+	struct acl_object_label *g_tmp, **guser, *glast = NULL;
+	unsigned int len;
+	char *tmp;
+
+	if (obj->globbed == NULL)
+		return 0;
+
+	guser = &obj->globbed;
+	while (*guser) {
+		g_tmp = (struct acl_object_label *)
+			acl_alloc(sizeof (struct acl_object_label));
+		if (g_tmp == NULL)
+			return -ENOMEM;
+
+		if (copy_from_user(g_tmp, *guser,
+				   sizeof (struct acl_object_label)))
+			return -EFAULT;
+
+		len = strnlen_user(g_tmp->filename, PATH_MAX);
+
+		if (!len || len >= PATH_MAX)
+			return -EINVAL;
+
+		if ((tmp = (char *) acl_alloc(len)) == NULL)
+			return -ENOMEM;
+
+		if (copy_from_user(tmp, g_tmp->filename, len))
+			return -EFAULT;
+
+		g_tmp->filename = tmp;
+
+		if (glast)
+			glast->next = g_tmp;
+		g_tmp->prev = glast;
+		*guser = g_tmp;
+		glast = g_tmp;
+		guser = &((*guser)->next);
+	}
+		
+	return 0;
+}
+
+static int
+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
+	       struct acl_role_label *role)
+{
+	struct acl_object_label *o_tmp;
+	unsigned int len;
+	int ret;
+	char *tmp;
+
+	while (userp) {
+		if ((o_tmp = (struct acl_object_label *)
+		     acl_alloc(sizeof (struct acl_object_label))) == NULL)
+			return -ENOMEM;
+
+		if (copy_from_user(o_tmp, userp,
+				   sizeof (struct acl_object_label)))
+			return -EFAULT;
+
+		userp = o_tmp->prev;
+
+		len = strnlen_user(o_tmp->filename, PATH_MAX);
+
+		if (!len || len >= PATH_MAX)
+			return -EINVAL;
+
+		if ((tmp = (char *) acl_alloc(len)) == NULL)
+			return -ENOMEM;
+
+		if (copy_from_user(tmp, o_tmp->filename, len))
+			return -EFAULT;
+
+		o_tmp->filename = tmp;
+
+		insert_acl_obj_label(o_tmp, subj);
+		if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
+				       o_tmp->device))
+			return -ENOMEM;
+
+		ret = copy_user_glob(o_tmp);
+		if (ret)
+			return ret;
+
+		if (o_tmp->nested) {
+			o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
+			if (IS_ERR(o_tmp->nested))
+				return PTR_ERR(o_tmp->nested);
+
+			s_final = o_tmp->nested;
+		}
+	}
+
+	return 0;
+}
+
+static __u32
+count_user_subjs(struct acl_subject_label *userp)
+{
+	struct acl_subject_label s_tmp;
+	__u32 num = 0;
+
+	while (userp) {
+		if (copy_from_user(&s_tmp, userp,
+				   sizeof (struct acl_subject_label)))
+			break;
+
+		userp = s_tmp.prev;
+		/* do not count nested subjects against this count, since
+		   they are not included in the hash table, but are
+		   attached to objects.  We have already counted
+		   the subjects in userspace for the allocation 
+		   stack
+		*/
+		if (!(s_tmp.mode & GR_NESTED))
+			num++;
+	}
+
+	return num;
+}
+
+static int
+copy_user_allowedips(struct acl_role_label *rolep)
+{
+	struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
+
+	ruserip = rolep->allowed_ips;
+
+	while (ruserip) {
+		rlast = rtmp;
+
+		if ((rtmp = (struct role_allowed_ip *)
+		     acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
+			return -ENOMEM;
+
+		if (copy_from_user(rtmp, ruserip,
+				   sizeof (struct role_allowed_ip)))
+			return -EFAULT;
+
+		ruserip = rtmp->prev;
+
+		if (!rlast) {
+			rtmp->prev = NULL;
+			rolep->allowed_ips = rtmp;
+		} else {
+			rlast->next = rtmp;
+			rtmp->prev = rlast;
+		}
+
+		if (!ruserip)
+			rtmp->next = NULL;
+	}
+
+	return 0;
+}
+
+static int
+copy_user_transitions(struct acl_role_label *rolep)
+{
+	struct role_transition *rusertp, *rtmp = NULL, *rlast;
+	unsigned int len;
+	char *tmp;
+
+	rusertp = rolep->transitions;
+
+	while (rusertp) {
+		rlast = rtmp;
+
+		if ((rtmp = (struct role_transition *)
+		     acl_alloc(sizeof (struct role_transition))) == NULL)
+			return -ENOMEM;
+
+		if (copy_from_user(rtmp, rusertp,
+				   sizeof (struct role_transition)))
+			return -EFAULT;
+
+		rusertp = rtmp->prev;
+
+		len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
+
+		if (!len || len >= GR_SPROLE_LEN)
+			return -EINVAL;
+
+		if ((tmp = (char *) acl_alloc(len)) == NULL)
+			return -ENOMEM;
+
+		if (copy_from_user(tmp, rtmp->rolename, len))
+			return -EFAULT;
+
+		rtmp->rolename = tmp;
+
+		if (!rlast) {
+			rtmp->prev = NULL;
+			rolep->transitions = rtmp;
+		} else {
+			rlast->next = rtmp;
+			rtmp->prev = rlast;
+		}
+
+		if (!rusertp)
+			rtmp->next = NULL;
+	}
+
+	return 0;
+}
+
+static struct acl_subject_label *
+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
+{
+	struct acl_subject_label *s_tmp = NULL, *s_tmp2;
+	unsigned int len;
+	char *tmp;
+	__u32 num_objs;
+	struct acl_ip_label **i_tmp, *i_utmp2;
+	struct gr_hash_struct ghash;
+	struct subject_map *subjmap;
+	unsigned long i_num;
+	int err;
+
+	s_tmp = lookup_subject_map(userp);
+
+	/* we've already copied this subject into the kernel, just return
+	   the reference to it, and don't copy it over again
+	*/
+	if (s_tmp)
+		return(s_tmp);
+
+	if ((s_tmp = (struct acl_subject_label *)
+	    acl_alloc(sizeof (struct acl_subject_label))) == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
+	if (subjmap == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	subjmap->user = userp;
+	subjmap->kernel = s_tmp;
+	insert_subj_map_entry(subjmap);
+
+	if (copy_from_user(s_tmp, userp,
+			   sizeof (struct acl_subject_label)))
+		return ERR_PTR(-EFAULT);
+
+	if (!s_last) {
+		s_tmp->prev = NULL;
+		role->hash->first = s_tmp;
+	} else {
+		s_last->next = s_tmp;
+		s_tmp->prev = s_last;
+	}
+
+	s_last = s_tmp;
+
+	len = strnlen_user(s_tmp->filename, PATH_MAX);
+
+	if (!len || len >= PATH_MAX)
+		return ERR_PTR(-EINVAL);
+
+	if ((tmp = (char *) acl_alloc(len)) == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	if (copy_from_user(tmp, s_tmp->filename, len))
+		return ERR_PTR(-EFAULT);
+
+	s_tmp->filename = tmp;
+
+	if (!strcmp(s_tmp->filename, "/"))
+		role->root_label = s_tmp;
+
+	if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
+		return ERR_PTR(-EFAULT);
+
+	/* copy user and group transition tables */
+
+	if (s_tmp->user_trans_num) {
+		uid_t *uidlist;
+
+		uidlist = (uid_t *)acl_alloc(s_tmp->user_trans_num * sizeof(uid_t));
+		if (uidlist == NULL)
+			return ERR_PTR(-ENOMEM);
+		if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
+			return ERR_PTR(-EFAULT);
+
+		s_tmp->user_transitions = uidlist;
+	}
+
+	if (s_tmp->group_trans_num) {
+		gid_t *gidlist;
+
+		gidlist = (gid_t *)acl_alloc(s_tmp->group_trans_num * sizeof(gid_t));
+		if (gidlist == NULL)
+			return ERR_PTR(-ENOMEM);
+		if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
+			return ERR_PTR(-EFAULT);
+
+		s_tmp->group_transitions = gidlist;
+	}
+
+	/* set up object hash table */
+	num_objs = count_user_objs(ghash.first);
+
+	s_tmp->obj_hash_size = num_objs;
+	s_tmp->obj_hash =
+	    (struct acl_object_label **)
+	    create_table(&(s_tmp->obj_hash_size));
+
+	if (!s_tmp->obj_hash)
+		return ERR_PTR(-ENOMEM);
+
+	memset(s_tmp->obj_hash, 0,
+	       s_tmp->obj_hash_size *
+	       sizeof (struct acl_object_label *));
+
+	/* copy before adding in objects, since a nested
+	   acl could be found and be the final subject
+	   copied
+	*/
+
+	s_final = s_tmp;
+
+	/* add in objects */
+	err = copy_user_objs(ghash.first, s_tmp, role);
+
+	if (err)
+		return ERR_PTR(err);
+
+	/* set pointer for parent subject */
+	if (s_tmp->parent_subject) {
+		s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
+
+		if (IS_ERR(s_tmp2))
+			return s_tmp2;
+
+		s_tmp->parent_subject = s_tmp2;
+	}
+
+	/* add in ip acls */
+
+	if (!s_tmp->ip_num) {
+		s_tmp->ips = NULL;
+		goto insert;
+	}
+
+	i_tmp =
+	    (struct acl_ip_label **) acl_alloc(s_tmp->ip_num *
+					       sizeof (struct
+						       acl_ip_label *));
+
+	if (!i_tmp)
+		return ERR_PTR(-ENOMEM);
+
+	for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
+		*(i_tmp + i_num) =
+		    (struct acl_ip_label *)
+		    acl_alloc(sizeof (struct acl_ip_label));
+		if (!*(i_tmp + i_num))
+			return ERR_PTR(-ENOMEM);
+
+		if (copy_from_user
+		    (&i_utmp2, s_tmp->ips + i_num,
+		     sizeof (struct acl_ip_label *)))
+			return ERR_PTR(-EFAULT);
+
+		if (copy_from_user
+		    (*(i_tmp + i_num), i_utmp2,
+		     sizeof (struct acl_ip_label)))
+			return ERR_PTR(-EFAULT);
+	}
+
+	s_tmp->ips = i_tmp;
+
+insert:
+	if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
+			       s_tmp->device))
+		return ERR_PTR(-ENOMEM);
+
+	return s_tmp;
+}
+
+static int
+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
+{
+	struct acl_subject_label s_pre;
+	struct acl_subject_label * ret;
+	int err;
+
+	while (userp) {
+		if (copy_from_user(&s_pre, userp,
+				   sizeof (struct acl_subject_label)))
+			return -EFAULT;
+		
+		/* do not add nested subjects here, add
+		   while parsing objects
+		*/
+
+		if (s_pre.mode & GR_NESTED) {
+			userp = s_pre.prev;
+			continue;
+		}
+
+		ret = do_copy_user_subj(userp, role);
+
+		err = PTR_ERR(ret);
+		if (IS_ERR(ret))
+			return err;
+
+		insert_acl_subj_label(ret, role);
+
+		userp = s_pre.prev;
+	}
+
+	s_final->next = NULL;
+
+	return 0;
+}
+
+static int
+copy_user_acl(struct gr_arg *arg)
+{
+	struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2, *r_last;
+	struct sprole_pw *sptmp;
+	struct gr_hash_struct *ghash;
+	unsigned long r_num;
+	unsigned int len;
+	char *tmp;
+	int err = 0;
+	__u16 i;
+	__u32 num_subjs;
+
+	/* we need a default and kernel role */
+	if (arg->role_db.r_entries < 2)
+		return -EINVAL;
+
+	/* copy special role authentication info from userspace */
+
+	num_sprole_pws = arg->num_sprole_pws;
+	acl_special_roles = (struct sprole_pw **) acl_alloc(num_sprole_pws * sizeof(struct sprole_pw *));
+
+	if (!acl_special_roles) {
+		err = -ENOMEM;
+		goto cleanup;
+	}
+
+	for (i = 0; i < num_sprole_pws; i++) {
+		sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
+		if (!sptmp) {
+			err = -ENOMEM;
+			goto cleanup;
+		}
+		if (copy_from_user(sptmp, arg->sprole_pws + i,
+				   sizeof (struct sprole_pw))) {
+			err = -EFAULT;
+			goto cleanup;
+		}
+
+		len =
+		    strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
+
+		if (!len || len >= GR_SPROLE_LEN) {
+			err = -EINVAL;
+			goto cleanup;
+		}
+
+		if ((tmp = (char *) acl_alloc(len)) == NULL) {
+			err = -ENOMEM;
+			goto cleanup;
+		}
+
+		if (copy_from_user(tmp, sptmp->rolename, len)) {
+			err = -EFAULT;
+			goto cleanup;
+		}
+
+#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
+		printk(KERN_ALERT "Copying special role %s\n", tmp);
+#endif
+		sptmp->rolename = tmp;
+		acl_special_roles[i] = sptmp;
+	}
+
+	r_utmp = (struct acl_role_label **) arg->role_db.r_table;
+
+	for (r_num = 0; r_num < arg->role_db.r_entries; r_num++) {
+		r_last = r_tmp;
+
+		r_tmp = acl_alloc(sizeof (struct acl_role_label));
+
+		if (!r_tmp) {
+			err = -ENOMEM;
+			goto cleanup;
+		}
+
+		if (copy_from_user(&r_utmp2, r_utmp + r_num,
+				   sizeof (struct acl_role_label *))) {
+			err = -EFAULT;
+			goto cleanup;
+		}
+
+		if (copy_from_user(r_tmp, r_utmp2,
+				   sizeof (struct acl_role_label))) {
+			err = -EFAULT;
+			goto cleanup;
+		}
+
+		if (!r_last) {
+			r_tmp->prev = NULL;
+			role_list_head = r_tmp;
+		} else {
+			r_last->next = r_tmp;
+			r_tmp->prev = r_last;
+		}
+
+		if (r_num == (arg->role_db.r_entries - 1))
+			r_tmp->next = NULL;
+
+		len = strnlen_user(r_tmp->rolename, PATH_MAX);
+
+		if (!len || len >= PATH_MAX) {
+			err = -EINVAL;
+			goto cleanup;
+		}
+
+		if ((tmp = (char *) acl_alloc(len)) == NULL) {
+			err = -ENOMEM;
+			goto cleanup;
+		}
+		if (copy_from_user(tmp, r_tmp->rolename, len)) {
+			err = -EFAULT;
+			goto cleanup;
+		}
+		r_tmp->rolename = tmp;
+
+		if (!strcmp(r_tmp->rolename, "default")
+		    && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
+			default_role = r_tmp;
+		} else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
+			kernel_role = r_tmp;
+		}
+
+		if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
+			err = -ENOMEM;
+			goto cleanup;
+		}
+		if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
+			err = -EFAULT;
+			goto cleanup;
+		}
+
+		r_tmp->hash = ghash;
+
+		num_subjs = count_user_subjs(r_tmp->hash->first);
+
+		r_tmp->subj_hash_size = num_subjs;
+		r_tmp->subj_hash =
+		    (struct acl_subject_label **)
+		    create_table(&(r_tmp->subj_hash_size));
+
+		if (!r_tmp->subj_hash) {
+			err = -ENOMEM;
+			goto cleanup;
+		}
+
+		err = copy_user_allowedips(r_tmp);
+		if (err)
+			goto cleanup;
+
+		err = copy_user_transitions(r_tmp);
+		if (err)
+			goto cleanup;
+
+		memset(r_tmp->subj_hash, 0,
+		       r_tmp->subj_hash_size *
+		       sizeof (struct acl_subject_label *));
+
+		s_last = NULL;
+
+		err = copy_user_subjs(r_tmp->hash->first, r_tmp);
+
+		if (err)
+			goto cleanup;
+
+		insert_acl_role_label(r_tmp);
+	}
+
+      cleanup:
+	return err;
+
+}
+
+static int
+gracl_init(struct gr_arg *args)
+{
+	int error = 0;
+
+	memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
+	memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
+
+	if (init_variables(args->role_db.o_entries, args->role_db.g_entries,
+			   args->role_db.s_entries, args->role_db.i_entries,
+			   args->role_db.r_entries, args->role_db.a_entries,
+			   args->role_db.t_entries, args->num_sprole_pws)) {
+		security_alert_good(GR_INITF_ACL_MSG, GR_VERSION);
+		error = -ENOMEM;
+		free_variables();
+		goto out;
+	}
+
+	error = copy_user_acl(args);
+	free_init_variables();
+	if (error) {
+		free_variables();
+		goto out;
+	}
+
+	if ((error = gr_set_acls(0))) {
+		free_variables();
+		goto out;
+	}
+
+	gr_status |= GR_READY;
+      out:
+	return error;
+}
+
+static int
+glob_match(char *pattern, char *string)
+{
+	char *p1, *p2;
+
+	p1 = pattern;
+	p2 = string;
+
+	while (*p1 != '\0' && *p2 != '\0' && *p1 != '*') {
+		if (*p1 == *p2 || *p1 == '?') {
+			p1++;
+			p2++;
+		} else
+			break;
+	}
+	if (*p1 == '*') {
+		p1++;
+		while (*p2 != '\0') {
+			if (!glob_match(p1, p2))
+				return 0;
+			else
+				p2++;
+		}
+	}
+
+	if (*p2 == '\0' && *p1 == '*')
+		while (*p1 == '*')
+			p1++;
+
+	if (*p1 == '\0' && *p2 == '\0')
+		return 0;
+	else
+		return 1;
+}
+
+static struct acl_object_label *
+chk_glob_label(struct acl_object_label *globbed,
+		struct dentry *dentry, struct vfsmount *mnt, char **path)
+{
+	struct acl_object_label *tmp;
+
+	if (*path == NULL)
+		*path = gr_to_filename_nolock(dentry, mnt);
+
+	tmp = globbed;
+
+	while (tmp) {
+		if (!glob_match(tmp->filename, *path))
+			return tmp;
+		tmp = tmp->next;
+	}
+
+	return NULL;
+}
+
+static __inline__ struct acl_object_label *
+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
+	    struct dentry *curr_dentry,
+	    const struct acl_subject_label *subj, char **path)
+{
+	struct acl_subject_label *tmpsubj;
+	struct acl_object_label *retval;
+	struct acl_object_label *retval2;
+
+	tmpsubj = (struct acl_subject_label *) subj;
+	read_lock(&gr_inode_lock);
+	do {
+		retval =
+		    	lookup_acl_obj_label(curr_dentry->d_inode->i_ino,
+					 curr_dentry->d_inode->i_dev, tmpsubj);
+		if (retval) {
+			if (retval->globbed) {
+				retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
+						  (struct vfsmount *)orig_mnt, path);
+				if (retval2)
+					retval = retval2;
+			}
+			break;
+		}
+	} while ((tmpsubj = tmpsubj->parent_subject));
+	read_unlock(&gr_inode_lock);
+
+	return retval;
+}
+
+static struct acl_object_label *
+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
+	      const struct acl_subject_label *subj)
+{
+	struct dentry *dentry = (struct dentry *) l_dentry;
+	struct vfsmount *mnt = (struct vfsmount *) l_mnt;
+	struct dentry *root;
+	struct vfsmount *rootmnt;
+	struct acl_object_label *retval;
+	char *path = NULL;
+
+	read_lock(&child_reaper->fs->lock);
+	rootmnt = mntget(child_reaper->fs->rootmnt);
+	root = dget(child_reaper->fs->root);
+	read_unlock(&child_reaper->fs->lock);
+	spin_lock(&dcache_lock);
+
+	for (;;) {
+		if (dentry == root && mnt == rootmnt)
+			break;
+		if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
+			if (mnt->mnt_parent == mnt)
+				break;
+
+			retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
+			if (retval != NULL)
+				goto out;
+
+			dentry = mnt->mnt_mountpoint;
+			mnt = mnt->mnt_parent;
+			continue;
+		}
+
+		retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
+		if (retval != NULL)
+			goto out;
+
+		dentry = dentry->d_parent;
+	}
+
+	retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
+
+	if (retval == NULL)
+		retval = full_lookup(l_dentry, l_mnt, root, subj, &path);
+      out:
+	spin_unlock(&dcache_lock);
+	dput(root);
+	mntput(rootmnt);
+
+	return retval;
+}
+
+static struct acl_object_label *
+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
+	      const struct acl_subject_label *subj, char *path)
+{
+	struct dentry *dentry = (struct dentry *) l_dentry;
+	struct vfsmount *mnt = (struct vfsmount *) l_mnt;
+	struct dentry *root;
+	struct vfsmount *rootmnt;
+	struct acl_object_label *retval;
+
+	read_lock(&child_reaper->fs->lock);
+	rootmnt = mntget(child_reaper->fs->rootmnt);
+	root = dget(child_reaper->fs->root);
+	read_unlock(&child_reaper->fs->lock);
+	spin_lock(&dcache_lock);
+
+	for (;;) {
+		if (dentry == root && mnt == rootmnt)
+			break;
+		if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
+			if (mnt->mnt_parent == mnt)
+				break;
+
+			retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
+			if (retval != NULL)
+				goto out;
+
+			dentry = mnt->mnt_mountpoint;
+			mnt = mnt->mnt_parent;
+			continue;
+		}
+
+		retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
+		if (retval != NULL)
+			goto out;
+
+		dentry = dentry->d_parent;
+	}
+
+	retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
+
+	if (retval == NULL)
+		retval = full_lookup(l_dentry, l_mnt, root, subj, &path);
+      out:
+	spin_unlock(&dcache_lock);
+	dput(root);
+	mntput(rootmnt);
+
+	return retval;
+}
+
+static struct acl_subject_label *
+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
+	       const struct acl_role_label *role)
+{
+	struct dentry *dentry = (struct dentry *) l_dentry;
+	struct vfsmount *mnt = (struct vfsmount *) l_mnt;
+	struct dentry *root;
+	struct vfsmount *rootmnt;
+	struct acl_subject_label *retval;
+
+	read_lock(&child_reaper->fs->lock);
+	rootmnt = mntget(child_reaper->fs->rootmnt);
+	root = dget(child_reaper->fs->root);
+	read_unlock(&child_reaper->fs->lock);
+	spin_lock(&dcache_lock);
+
+	for (;;) {
+		if (unlikely(dentry == root && mnt == rootmnt))
+			break;
+		if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
+			if (mnt->mnt_parent == mnt)
+				break;
+
+			read_lock(&gr_inode_lock);
+			retval =
+			    lookup_acl_subj_label(dentry->d_inode->i_ino,
+						  dentry->d_inode->i_dev, role);
+			read_unlock(&gr_inode_lock);
+			if (unlikely(retval != NULL))
+				goto out;
+
+			dentry = mnt->mnt_mountpoint;
+			mnt = mnt->mnt_parent;
+			continue;
+		}
+
+		read_lock(&gr_inode_lock);
+		retval =
+		    lookup_acl_subj_label(dentry->d_inode->i_ino,
+					  dentry->d_inode->i_dev, role);
+		read_unlock(&gr_inode_lock);
+		if (unlikely(retval != NULL))
+			goto out;
+
+		dentry = dentry->d_parent;
+	}
+
+	read_lock(&gr_inode_lock);
+	retval =
+	    lookup_acl_subj_label(dentry->d_inode->i_ino,
+				  dentry->d_inode->i_dev, role);
+	read_unlock(&gr_inode_lock);
+
+	if (unlikely(retval == NULL)) {
+		read_lock(&gr_inode_lock);
+		retval =
+		    lookup_acl_subj_label(root->d_inode->i_ino,
+					  root->d_inode->i_dev, role);
+		read_unlock(&gr_inode_lock);
+	}
+      out:
+	spin_unlock(&dcache_lock);
+	dput(root);
+	mntput(rootmnt);
+
+	return retval;
+}
+
+static __inline__ void
+gr_log_learn(const struct acl_role_label *role, const uid_t uid, const gid_t gid,
+	     const struct task_struct *task, const char *pathname,
+	     const __u32 mode)
+{
+	security_learn(GR_LEARN_AUDIT_MSG, role->rolename, role->roletype,
+		       uid, gid, task->exec_file ? gr_to_filename1(task->exec_file->f_dentry,
+		       task->exec_file->f_vfsmnt) : task->acl->filename, task->acl->filename,
+		       1, 1, pathname, (unsigned long) mode, NIPQUAD(task->curr_ip));
+
+	return;
+}
+
+__u32
+gr_check_link(const struct dentry * new_dentry,
+	      const struct dentry * parent_dentry,
+	      const struct vfsmount * parent_mnt,
+	      const struct dentry * old_dentry, const struct vfsmount * old_mnt)
+{
+	struct acl_object_label *obj;
+	__u32 oldmode, newmode;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return (GR_WRITE | GR_CREATE);
+
+	obj = chk_obj_label(old_dentry, old_mnt, current->acl);
+	oldmode = obj->mode;
+
+	if (current->acl->mode & GR_LEARN)
+		oldmode |= (GR_WRITE | GR_CREATE);
+	newmode =
+	    gr_check_create(new_dentry, parent_dentry, parent_mnt,
+			    oldmode | GR_CREATE | GR_AUDIT_CREATE |
+			    GR_AUDIT_WRITE | GR_SUPPRESS);
+
+	if ((newmode & oldmode) == oldmode)
+		return newmode;
+	else if (current->acl->mode & GR_LEARN) {
+		gr_log_learn(current->role, current->uid, current->gid,
+			current, gr_to_filename(old_dentry, old_mnt), oldmode);
+		return (GR_WRITE | GR_CREATE);
+	} else if (newmode & GR_SUPPRESS)
+		return GR_SUPPRESS;
+	else
+		return 0;
+}
+
+__u32
+gr_search_file(const struct dentry * dentry, const __u32 mode,
+	       const struct vfsmount * mnt)
+{
+	__u32 retval = mode;
+	struct acl_subject_label *curracl;
+	struct acl_object_label *currobj;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return (mode & ~GR_AUDITS);
+
+	curracl = current->acl;
+
+	currobj = chk_obj_label(dentry, mnt, curracl);
+	retval = currobj->mode & mode;
+
+	if (unlikely
+	    ((curracl->mode & GR_LEARN) && !(mode & GR_NOPTRACE)
+	     && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
+		__u32 new_mode = mode;
+
+		new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
+
+		retval = new_mode;
+
+		if (!(mode & GR_NOLEARN))
+			gr_log_learn(current->role, current->uid, current->gid,
+				     current, gr_to_filename(dentry, mnt), new_mode);
+	}
+
+	return retval;
+}
+
+__u32
+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
+		const struct vfsmount * mnt, const __u32 mode)
+{
+	struct name_entry *match;
+	struct acl_object_label *matchpo;
+	struct acl_subject_label *curracl;
+	char *path;
+	__u32 retval;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return (mode & ~GR_AUDITS);
+
+	path = gr_to_filename(new_dentry, mnt);
+	match = lookup_name_entry(path);
+
+	if (!match)
+		goto check_parent;
+
+	curracl = current->acl;
+
+	read_lock(&gr_inode_lock);
+	matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
+	read_unlock(&gr_inode_lock);
+
+	if (matchpo) {
+		if ((matchpo->mode & mode) !=
+		    (mode & ~(GR_AUDITS | GR_SUPPRESS))
+		    && curracl->mode & GR_LEARN) {
+			__u32 new_mode = mode;
+
+			new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
+
+			gr_log_learn(current->role, current->uid, current->gid,
+				     current, gr_to_filename(new_dentry, mnt), new_mode);
+
+			return new_mode;
+		}
+		return (matchpo->mode & mode);
+	}
+
+      check_parent:
+	curracl = current->acl;
+
+	matchpo = chk_obj_create_label(parent, mnt, curracl, path);
+	retval = matchpo->mode & mode;
+
+	if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
+	    && (curracl->mode & GR_LEARN)) {
+		__u32 new_mode = mode;
+
+		new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
+
+		gr_log_learn(current->role, current->uid, current->gid, 
+			     current, gr_to_filename(new_dentry, mnt), new_mode);
+		return new_mode;
+	}
+
+	return retval;
+}
+
+int
+gr_check_hidden_task(const struct task_struct *task)
+{
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+
+	if (!(task->acl->mode & GR_FIND) && !(current->acl->mode & GR_VIEW))
+		return 1;
+
+	return 0;
+}
+
+int
+gr_check_protected_task(const struct task_struct *task)
+{
+	if (unlikely(!(gr_status & GR_READY) || !task))
+		return 0;
+
+	if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL))
+		return 1;
+
+	return 0;
+}
+
+__inline__ void
+gr_copy_label(struct task_struct *tsk)
+{
+	tsk->used_accept = 0;
+	tsk->acl_sp_role = 0;
+	tsk->acl_role_id = current->acl_role_id;
+	tsk->acl = current->acl;
+	tsk->role = current->role;
+	tsk->curr_ip = current->curr_ip;
+	if (current->exec_file)
+		get_file(current->exec_file);
+	tsk->exec_file = current->exec_file;
+	tsk->is_writable = current->is_writable;
+	if (unlikely(current->used_accept))
+		current->curr_ip = 0;
+
+	return;
+}
+
+static __inline__ void
+gr_set_proc_res(void)
+{
+	struct acl_subject_label *proc;
+	unsigned short i;
+
+	proc = current->acl;
+
+	if (proc->mode & GR_LEARN)
+		return;
+
+	for (i = 0; i < RLIM_NLIMITS; i++) {
+		if (!(proc->resmask & (1 << i)))
+			continue;
+
+		current->rlim[i].rlim_cur = proc->res[i].rlim_cur;
+		current->rlim[i].rlim_max = proc->res[i].rlim_max;
+	}
+
+	return;
+}
+
+void
+gr_set_pax_flags(struct task_struct *task)
+{
+	struct acl_subject_label *proc;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return;
+
+	proc = task->acl;
+
+	if (proc->mode & GR_PAXPAGE)
+		task->flags &= ~PF_PAX_PAGEEXEC;
+	if (proc->mode & GR_PAXSEGM)
+		task->flags &= ~PF_PAX_SEGMEXEC;
+	if (proc->mode & GR_PAXGCC)
+		task->flags |= PF_PAX_EMUTRAMP;
+	if (proc->mode & GR_PAXMPROTECT)
+		task->flags &= ~PF_PAX_MPROTECT;
+	if (proc->mode & GR_PAXRANDMMAP)
+		task->flags &= ~PF_PAX_RANDMMAP;
+	if (proc->mode & GR_PAXRANDEXEC)
+		task->flags |= PF_PAX_RANDEXEC;
+
+	return;
+}
+
+static __inline__ void
+do_set_role_label(struct task_struct *task, const uid_t uid, const gid_t gid)
+{
+	task->role = lookup_acl_role_label(task, uid, gid);
+
+	return;
+}
+
+int
+gr_check_user_change(int real, int effective, int fs)
+{
+	unsigned int i;
+	__u16 num;
+	uid_t *uidlist;
+	int curuid;
+	int realok = 0;
+	int effectiveok = 0;
+	int fsok = 0;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+
+	num = current->acl->user_trans_num;
+	uidlist = current->acl->user_transitions;
+
+	if (uidlist == NULL)
+		return 0;
+
+	if (real == -1)
+		realok = 1;
+	if (effective == -1)
+		effectiveok = 1;
+	if (fs == -1)
+		fsok = 1;
+
+	if (current->acl->user_trans_type & GR_ID_ALLOW) {
+		for (i = 0; i < num; i++) {
+			curuid = (int)uidlist[i];
+			if (real == curuid)
+				realok = 1;
+			if (effective == curuid)
+				effectiveok = 1;
+			if (fs == curuid)
+				fsok = 1;
+		}
+	} else if (current->acl->user_trans_type & GR_ID_DENY) {
+		for (i = 0; i < num; i++) {
+			curuid = (int)uidlist[i];
+			if (real == curuid)
+				break;
+			if (effective == curuid)
+				break;
+			if (fs == curuid)
+				break;
+		}
+		/* not in deny list */
+		if (i == num) {
+			realok = 1;
+			effectiveok = 1;
+			fsok = 1;
+		}
+	}
+
+	if (realok && effectiveok && fsok)
+		return 0;
+	else {
+		security_alert(GR_USRCHANGE_ACL_MSG,
+			realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real, DEFAULTSECARGS);
+		return 1;
+	}
+}
+
+int
+gr_check_group_change(int real, int effective, int fs)
+{
+	unsigned int i;
+	__u16 num;
+	gid_t *gidlist;
+	int curgid;
+	int realok = 0;
+	int effectiveok = 0;
+	int fsok = 0;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+
+	num = current->acl->group_trans_num;
+	gidlist = current->acl->group_transitions;
+
+	if (gidlist == NULL)
+		return 0;
+
+	if (real == -1)
+		realok = 1;
+	if (effective == -1)
+		effectiveok = 1;
+	if (fs == -1)
+		fsok = 1;
+
+	if (current->acl->group_trans_type & GR_ID_ALLOW) {
+		for (i = 0; i < num; i++) {
+			curgid = (int)gidlist[i];
+			if (real == curgid)
+				realok = 1;
+			if (effective == curgid)
+				effectiveok = 1;
+			if (fs == curgid)
+				fsok = 1;
+		}
+	} else if (current->acl->group_trans_type & GR_ID_DENY) {
+		for (i = 0; i < num; i++) {
+			curgid = (int)gidlist[i];
+			if (real == curgid)
+				break;
+			if (effective == curgid)
+				break;
+			if (fs == curgid)
+				break;
+		}
+		/* not in deny list */
+		if (i == num) {
+			realok = 1;
+			effectiveok = 1;
+			fsok = 1;
+		}
+	}
+
+	if (realok && effectiveok && fsok)
+		return 0;
+	else {
+		security_alert(GR_GRPCHANGE_ACL_MSG,
+			realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real, DEFAULTSECARGS);
+		return 1;
+	}
+}
+
+void
+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
+{
+	struct acl_object_label *obj;
+	struct file *filp;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return;
+
+	filp = task->exec_file;
+
+	/* kernel process, we'll give them the kernel role */
+	if (unlikely(!filp)) {
+		task->role = kernel_role;
+		task->acl = kernel_role->root_label;
+		return;
+	} else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
+		do_set_role_label(task, uid, gid);
+
+	task->acl =
+	    chk_subj_label(filp->f_dentry, filp->f_vfsmnt, task->role);
+
+	task->is_writable = 0;
+
+	/* ignore additional mmap checks for processes that are writable 
+	   by the default ACL */
+	obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
+	if (unlikely(obj->mode & GR_WRITE))
+		task->is_writable = 1;
+	obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label);
+	if (unlikely(obj->mode & GR_WRITE))
+		task->is_writable = 1;
+
+#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
+	printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
+#endif
+
+	gr_set_proc_res();
+
+	return;
+}
+
+void
+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	struct acl_subject_label *newacl;
+	struct acl_object_label *obj;
+	__u32 retmode;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return;
+
+	newacl = chk_subj_label(dentry, mnt, current->role);
+
+	obj = chk_obj_label(dentry, mnt, current->acl);
+	retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
+
+	if ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT)) {
+		if (obj->nested)
+			current->acl = obj->nested;
+		else
+			current->acl = newacl;
+	} else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
+		security_audit(GR_INHERIT_ACL_MSG, current->acl->filename,
+			       gr_to_filename(dentry, mnt), DEFAULTSECARGS);
+
+	current->is_writable = 0;
+
+	/* ignore additional mmap checks for processes that are writable 
+	   by the default ACL */
+	obj = chk_obj_label(dentry, mnt, default_role->root_label);
+	if (unlikely(obj->mode & GR_WRITE))
+		current->is_writable = 1;
+	obj = chk_obj_label(dentry, mnt, current->role->root_label);
+	if (unlikely(obj->mode & GR_WRITE))
+		current->is_writable = 1;
+
+	gr_set_proc_res();
+
+#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
+	printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", current->comm, current->pid, current->role->rolename, current->acl->filename);
+#endif
+	return;
+}
+
+static __inline__ void
+do_handle_delete(const ino_t ino, const kdev_t dev)
+{
+	struct acl_object_label *matchpo;
+	struct acl_subject_label *matchps;
+	struct acl_subject_label *i;
+	struct acl_role_label *role;
+
+	for (role = role_list_head; role; role = role->next) {
+		for (i = role->hash->first; i; i = i->next) {
+			if (unlikely((i->mode & GR_NESTED) &&
+				     (i->inode == ino) &&
+				     (i->device == dev)))
+				i->mode |= GR_DELETED;
+			if (unlikely((matchpo =
+			     lookup_acl_obj_label(ino, dev, i)) != NULL))
+				matchpo->mode |= GR_DELETED;
+		}
+
+		if (unlikely((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL))
+			matchps->mode |= GR_DELETED;
+	}
+
+	return;
+}
+
+void
+gr_handle_delete(const ino_t ino, const kdev_t dev)
+{
+	if (unlikely(!(gr_status & GR_READY)))
+		return;
+
+	write_lock(&gr_inode_lock);
+	if (unlikely((unsigned long)lookup_inodev_entry(ino, dev)))
+		do_handle_delete(ino, dev);
+	write_unlock(&gr_inode_lock);
+
+	return;
+}
+
+static __inline__ void
+update_acl_obj_label(const ino_t oldinode, const kdev_t olddevice,
+		     const ino_t newinode, const kdev_t newdevice,
+		     struct acl_subject_label *subj)
+{
+	unsigned long index = fhash(oldinode, olddevice, subj->obj_hash_size);
+	struct acl_object_label **match;
+	struct acl_object_label *tmp;
+	__u8 i = 0;
+
+	match = &subj->obj_hash[index];
+
+	while (*match && ((*match)->inode != oldinode ||
+	       (*match)->device != olddevice ||
+	       !((*match)->mode & GR_DELETED))) {
+		index = (index + (1 << i)) % subj->obj_hash_size;
+		match = &subj->obj_hash[index];
+		i = (i + 1) % 32;
+	}
+
+	if (*match && ((*match) != deleted_object)
+	    && ((*match)->inode == oldinode)
+	    && ((*match)->device == olddevice)
+	    && ((*match)->mode & GR_DELETED)) {
+		tmp = *match;
+		tmp->inode = newinode;
+		tmp->device = newdevice;
+		tmp->mode &= ~GR_DELETED;
+
+		*match = deleted_object;
+
+		insert_acl_obj_label(tmp, subj);
+	}
+
+	return;
+}
+
+static __inline__ void
+update_acl_subj_label(const ino_t oldinode, const kdev_t olddevice,
+		      const ino_t newinode, const kdev_t newdevice,
+		      struct acl_role_label *role)
+{
+	struct acl_subject_label **s_hash = role->subj_hash;
+	unsigned long subj_size = role->subj_hash_size;
+	unsigned long index = fhash(oldinode, olddevice, subj_size);
+	struct acl_subject_label **match;
+	struct acl_subject_label *tmp;
+	__u8 i = 0;
+
+	match = &s_hash[index];
+
+	while (*match && ((*match)->inode != oldinode ||
+	       (*match)->device != olddevice ||
+	       !((*match)->mode & GR_DELETED))) {
+		index = (index + (1 << i)) % subj_size;
+		i = (i + 1) % 32;
+		match = &s_hash[index];
+	}
+
+	if (*match && (*match != deleted_subject)
+	    && ((*match)->inode == oldinode)
+	    && ((*match)->device == olddevice)
+	    && ((*match)->mode & GR_DELETED)) {
+		tmp = *match;
+
+		tmp->inode = newinode;
+		tmp->device = newdevice;
+		tmp->mode &= ~GR_DELETED;
+
+		*match = deleted_subject;
+
+		insert_acl_subj_label(tmp, role);
+	}
+
+	return;
+}
+
+static __inline__ void
+update_inodev_entry(const ino_t oldinode, const kdev_t olddevice,
+		    const ino_t newinode, const kdev_t newdevice)
+{
+	unsigned long index = fhash(oldinode, olddevice, inodev_set.n_size);
+	struct name_entry **match;
+	struct name_entry *tmp;
+	__u8 i = 0;
+
+	match = &inodev_set.n_hash[index];
+
+	while (*match
+	       && ((*match)->inode != oldinode
+		   || (*match)->device != olddevice)) {
+		index = (index + (1 << i)) % inodev_set.n_size;
+		i = (i + 1) % 32;
+		match = &inodev_set.n_hash[index];
+	}
+
+	if (*match && (*match != deleted_inodev)
+	    && ((*match)->inode == oldinode)
+	    && ((*match)->device == olddevice)) {
+		tmp = *match;
+
+		tmp->inode = newinode;
+		tmp->device = newdevice;
+
+		*match = deleted_inodev;
+
+		insert_inodev_entry(tmp);
+	}
+
+	return;
+}
+
+static __inline__ void
+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
+		 const struct vfsmount *mnt)
+{
+	struct acl_subject_label *i;
+	struct acl_role_label *role;
+
+	for (role = role_list_head; role; role = role->next) {
+		update_acl_subj_label(matchn->inode, matchn->device,
+				      dentry->d_inode->i_ino,
+				      dentry->d_inode->i_dev, role);
+
+		for (i = role->hash->first; i; i = i->next) {
+			if (unlikely((i->mode & GR_NESTED) &&
+				     (i->inode == dentry->d_inode->i_ino) &&
+				     (i->device == dentry->d_inode->i_dev))) {
+				i->inode = dentry->d_inode->i_ino;
+				i->device = dentry->d_inode->i_dev;
+			}
+			update_acl_obj_label(matchn->inode, matchn->device,
+					     dentry->d_inode->i_ino,
+					     dentry->d_inode->i_dev, i);
+		}
+	}
+
+	update_inodev_entry(matchn->inode, matchn->device,
+			    dentry->d_inode->i_ino, dentry->d_inode->i_dev);
+
+	return;
+}
+
+void
+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	struct name_entry *matchn;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return;
+
+	matchn = lookup_name_entry(gr_to_filename(dentry, mnt));
+
+	if (unlikely((unsigned long)matchn)) {
+		write_lock(&gr_inode_lock);
+		do_handle_create(matchn, dentry, mnt);
+		write_unlock(&gr_inode_lock);
+	}
+
+	return;
+}
+
+int
+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
+		 struct dentry *old_dentry,
+		 struct dentry *new_dentry,
+		 struct vfsmount *mnt, const __u8 replace)
+{
+	struct name_entry *matchn;
+	int error = 0;
+
+	matchn = lookup_name_entry(gr_to_filename(new_dentry, mnt));
+
+	lock_kernel();
+	error = vfs_rename(old_dir, old_dentry, new_dir, new_dentry);
+	unlock_kernel();
+
+	if (unlikely(error))
+		return error;
+
+	/* we wouldn't have to check d_inode if it weren't for
+	   NFS silly-renaming
+	 */
+
+	write_lock(&gr_inode_lock);
+	if (unlikely(replace && new_dentry->d_inode)) {
+		if (unlikely(lookup_inodev_entry(new_dentry->d_inode->i_ino,
+					new_dentry->d_inode->i_dev) &&
+		    (old_dentry->d_inode->i_nlink <= 1)))
+			do_handle_delete(new_dentry->d_inode->i_ino,
+					 new_dentry->d_inode->i_dev);
+	}
+
+	if (unlikely(lookup_inodev_entry(old_dentry->d_inode->i_ino,
+				old_dentry->d_inode->i_dev) &&
+	    (old_dentry->d_inode->i_nlink <= 1)))
+		do_handle_delete(old_dentry->d_inode->i_ino,
+				 old_dentry->d_inode->i_dev);
+
+	if (unlikely((unsigned long)matchn))
+		do_handle_create(matchn, old_dentry, mnt);
+	write_unlock(&gr_inode_lock);
+
+	return error;
+}
+
+static int
+lookup_special_role_auth(const char *rolename, unsigned char **salt,
+			 unsigned char **sum)
+{
+	struct acl_role_label *r;
+	struct role_transition *trans;
+	__u16 i;
+	int found = 0;
+
+	/* check transition table */
+
+	for (trans = current->role->transitions; trans; trans = trans->next) {
+		if (!strcmp(rolename, trans->rolename)) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		return 0;
+
+	/* handle special roles that do not require authentication */
+
+	for (r = role_list_head; r; r = r->next) {
+		if (!strcmp(rolename, r->rolename)
+		    && (r->roletype & GR_ROLE_NOPW)) {
+			*salt = NULL;
+			*sum = NULL;
+			return 1;
+		}
+	}
+
+	for (i = 0; i < num_sprole_pws; i++) {
+		if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
+			*salt = acl_special_roles[i]->salt;
+			*sum = acl_special_roles[i]->sum;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static void
+assign_special_role(char *rolename)
+{
+	struct acl_object_label *obj;
+	struct acl_role_label *r;
+	struct acl_role_label *assigned = NULL;
+	struct task_struct *tsk;
+	struct file *filp;
+
+	for (r = role_list_head; r; r = r->next)
+		if (!strcmp(rolename, r->rolename) &&
+		    (r->roletype & GR_ROLE_SPECIAL))
+			assigned = r;
+
+	if (!assigned)
+		return;
+
+	tsk = current->p_pptr;
+	filp = tsk->exec_file;
+
+	if (tsk && filp) {
+		tsk->is_writable = 0;
+
+		acl_sp_role_value = (acl_sp_role_value % 65535) + 1;
+		tsk->acl_sp_role = 1;
+		tsk->acl_role_id = acl_sp_role_value;
+		tsk->role = assigned;
+		tsk->acl =
+		    chk_subj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role);
+
+		/* ignore additional mmap checks for processes that are writable 
+		   by the default ACL */
+		obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
+		if (unlikely(obj->mode & GR_WRITE))
+			tsk->is_writable = 1;
+		obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role->root_label);
+		if (unlikely(obj->mode & GR_WRITE))
+			tsk->is_writable = 1;
+
+#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
+		printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
+#endif
+	}
+
+	return;
+}
+
+ssize_t
+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
+{
+	struct gr_arg *arg;
+	unsigned char *sprole_salt;
+	unsigned char *sprole_sum;
+	int error = sizeof (struct gr_arg);
+	int error2 = 0;
+
+	down(&gr_dev_sem);
+
+	arg = (struct gr_arg *) buf;
+
+	if (count != sizeof (struct gr_arg)) {
+		security_alert_good(GR_DEV_ACL_MSG, count,
+				    (int) sizeof (struct gr_arg));
+		error = -EINVAL;
+		goto out;
+	}
+
+	if ((gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES)
+	    && time_before_eq(gr_auth_expires, jiffies)) {
+		gr_auth_expires = 0;
+		gr_auth_attempts = 0;
+	}
+
+	if (copy_from_user(gr_usermode, arg, sizeof (struct gr_arg))) {
+		error = -EFAULT;
+		goto out;
+	}
+
+	if (gr_usermode->mode != SPROLE && time_after(gr_auth_expires, jiffies)) {
+		error = -EBUSY;
+		goto out;
+	}
+
+	/* if non-root trying to do anything other than use a special role,
+	   do not attempt authentication, do not count towards authentication
+	   locking
+	 */
+
+	if (gr_usermode->mode != SPROLE && current->uid) {
+		error = -EPERM;
+		goto out;
+	}
+
+	/* ensure pw and special role name are null terminated */
+
+	gr_usermode->pw[GR_PW_LEN - 1] = '\0';
+	gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
+
+	/* Okay. 
+	 * We have our enough of the argument structure..(we have yet
+	 * to copy_from_user the tables themselves) . Copy the tables
+	 * only if we need them, i.e. for loading operations. */
+
+	switch (gr_usermode->mode) {
+	case STATUS:
+			if (gr_status & GR_READY)
+				error = 1;
+			else
+				error = 2;
+			goto out;
+	case SHUTDOWN:
+		if ((gr_status & GR_READY)
+		    && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
+			gr_status &= ~GR_READY;
+			security_alert_good(GR_SHUTS_ACL_MSG, DEFAULTSECARGS);
+			free_variables();
+			memset(gr_usermode, 0, sizeof (struct gr_arg));
+			memset(gr_system_salt, 0, GR_SALT_LEN);
+			memset(gr_system_sum, 0, GR_SHA_LEN);
+		} else if (gr_status & GR_READY) {
+			security_alert(GR_SHUTF_ACL_MSG, DEFAULTSECARGS);
+			error = -EPERM;
+		} else {
+			security_alert_good(GR_SHUTI_ACL_MSG, DEFAULTSECARGS);
+			error = -EAGAIN;
+		}
+		break;
+	case ENABLE:
+		if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
+			security_alert_good(GR_ENABLE_ACL_MSG, GR_VERSION);
+		else {
+			if (gr_status & GR_READY)
+				error = -EAGAIN;
+			else
+				error = error2;
+			security_alert(GR_ENABLEF_ACL_MSG, GR_VERSION,
+				       DEFAULTSECARGS);
+		}
+		break;
+	case RELOAD:
+		if (!(gr_status & GR_READY)) {
+			security_alert_good(GR_RELOADI_ACL_MSG);
+			error = -EAGAIN;
+		} else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
+			lock_kernel();
+			gr_status &= ~GR_READY;
+			free_variables();
+			if (!(error2 = gracl_init(gr_usermode))) {
+				unlock_kernel();
+				security_alert_good(GR_RELOAD_ACL_MSG,
+						    GR_VERSION);
+			} else {
+				unlock_kernel();
+				error = error2;
+				security_alert(GR_RELOADF_ACL_MSG, GR_VERSION,
+					       DEFAULTSECARGS);
+			}
+		} else {
+			security_alert(GR_RELOADF_ACL_MSG, GR_VERSION,
+				       DEFAULTSECARGS);
+			error = -EPERM;
+		}
+		break;
+	case SEGVMOD:
+		if (unlikely(!(gr_status & GR_READY))) {
+			security_alert_good(GR_SEGVMODI_ACL_MSG,
+					    DEFAULTSECARGS);
+			error = -EAGAIN;
+			break;
+		}
+
+		if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
+			security_alert_good(GR_SEGVMODS_ACL_MSG,
+					    DEFAULTSECARGS);
+			if (gr_usermode->segv_device && gr_usermode->segv_inode) {
+				struct acl_subject_label *segvacl;
+				segvacl =
+				    lookup_acl_subj_label(gr_usermode->segv_inode,
+							  gr_usermode->segv_device,
+							  current->role);
+				if (segvacl) {
+					segvacl->crashes = 0;
+					segvacl->expires = 0;
+				}
+			} else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
+				gr_remove_uid(gr_usermode->segv_uid);
+			}
+		} else {
+			security_alert(GR_SEGVMODF_ACL_MSG, DEFAULTSECARGS);
+			error = -EPERM;
+		}
+		break;
+	case SPROLE:
+		if (unlikely(!(gr_status & GR_READY))) {
+			security_alert_good(GR_SPROLEI_ACL_MSG, DEFAULTSECARGS);
+			error = -EAGAIN;
+			break;
+		}
+
+		if ((current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES)
+		    && time_before_eq(current->role->expires, jiffies)) {
+			current->role->expires = 0;
+			current->role->auth_attempts = 0;
+		}
+
+		if (time_after(current->role->expires, jiffies)) {
+			error = -EBUSY;
+			goto out;
+		}
+
+		if (lookup_special_role_auth
+		    (gr_usermode->sp_role, &sprole_salt, &sprole_sum)
+		    && ((!sprole_salt && !sprole_sum)
+			|| !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
+			assign_special_role(gr_usermode->sp_role);
+			security_alert_good(GR_SPROLES_ACL_MSG,
+					    (current->p_pptr) ? current->
+					    p_pptr->role->rolename : "",
+					    acl_sp_role_value, DEFAULTSECARGS);
+		} else {
+			security_alert(GR_SPROLEF_ACL_MSG, gr_usermode->sp_role,
+				       DEFAULTSECARGS);
+			error = -EPERM;
+			current->role->auth_attempts++;
+			if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) {
+				current->role->expires =
+				    jiffies + CONFIG_GRKERNSEC_ACL_TIMEOUT * HZ;
+				security_alert(GR_MAXROLEPW_ACL_MSG,
+				       CONFIG_GRKERNSEC_ACL_MAXTRIES,
+				       gr_usermode->sp_role, DEFAULTSECARGS);
+			}
+
+			goto out;
+		}
+		break;
+	case UNSPROLE:
+		if (unlikely(!(gr_status & GR_READY))) {
+			security_alert_good(GR_UNSPROLEI_ACL_MSG, DEFAULTSECARGS);
+			error = -EAGAIN;
+			break;
+		}
+
+		if ((current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES)
+		    && time_before_eq(current->role->expires, jiffies)) {
+			current->role->expires = 0;
+			current->role->auth_attempts = 0;
+		}
+
+		if (time_after(current->role->expires, jiffies)) {
+			error = -EBUSY;
+			goto out;
+		}
+
+		if ((current->role->roletype & GR_ROLE_SPECIAL) && 
+		    lookup_special_role_auth
+		    (current->role->rolename, &sprole_salt, &sprole_sum)
+		    && ((!sprole_salt && !sprole_sum)
+			|| !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
+			security_alert_good(GR_UNSPROLES_ACL_MSG,
+					    (current->p_pptr) ? current->
+					    p_pptr->role->rolename : "",
+					    (current->p_pptr) ? current->
+					    p_pptr->acl_role_id : 0, DEFAULTSECARGS);
+			gr_set_acls(1);
+			if (current->p_pptr)
+				current->p_pptr->acl_sp_role = 0;
+		} else {
+			security_alert(GR_UNSPROLEF_ACL_MSG, current->role->rolename,
+				       DEFAULTSECARGS);
+			error = -EPERM;
+			current->role->auth_attempts++;
+			if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) {
+				current->role->expires =
+				    jiffies + CONFIG_GRKERNSEC_ACL_TIMEOUT * HZ;
+				security_alert(GR_MAXROLEPW_ACL_MSG,
+				       CONFIG_GRKERNSEC_ACL_MAXTRIES,
+				       current->role->rolename, DEFAULTSECARGS);
+			}
+
+			goto out;
+		}
+		break;
+	default:
+		security_alert(GR_INVMODE_ACL_MSG, gr_usermode->mode,
+			       DEFAULTSECARGS);
+		error = -EINVAL;
+		break;
+	}
+
+	if (error != -EPERM)
+		goto out;
+
+	gr_auth_attempts++;
+
+	if (gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) {
+		security_alert(GR_MAXPW_ACL_MSG, CONFIG_GRKERNSEC_ACL_MAXTRIES);
+		gr_auth_expires = jiffies + CONFIG_GRKERNSEC_ACL_TIMEOUT * HZ;
+	}
+
+      out:
+	up(&gr_dev_sem);
+	return error;
+}
+
+int
+gr_set_acls(const int type)
+{
+	struct acl_object_label *obj;
+	struct task_struct *task;
+	struct file *filp;
+	unsigned short i;
+
+	read_lock(&tasklist_lock);
+	for_each_task(task) {
+		/* check to see if we're called from the exit handler,
+		   if so, only replace ACLs that have inherited the admin
+		   ACL */
+
+		if (type && (task->role != current->role ||
+			     task->acl_role_id != current->acl_role_id))
+			continue;
+
+		task->acl_role_id = 0;
+
+		if ((filp = task->exec_file)) {
+			do_set_role_label(task, task->uid, task->gid);
+
+			task->acl =
+			    chk_subj_label(filp->f_dentry, filp->f_vfsmnt,
+					   task->role);
+			if (task->acl) {
+				struct acl_subject_label *curr;
+				curr = task->acl;
+
+				task->is_writable = 0;
+				/* ignore additional mmap checks for processes that are writable 
+				   by the default ACL */
+				obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
+				if (unlikely(obj->mode & GR_WRITE))
+					task->is_writable = 1;
+				obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label);
+				if (unlikely(obj->mode & GR_WRITE))
+					task->is_writable = 1;
+
+#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
+				printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
+#endif
+				if (!(curr->mode & GR_LEARN))
+					for (i = 0; i < RLIM_NLIMITS; i++) {
+						if (!(curr->resmask & (1 << i)))
+							continue;
+
+						task->rlim[i].rlim_cur =
+						    curr->res[i].rlim_cur;
+						task->rlim[i].rlim_max =
+						    curr->res[i].rlim_max;
+					}
+			} else {
+				read_unlock(&tasklist_lock);
+				security_alert_good(GR_DEFACL_MSG, task->comm,
+						    task->pid);
+				return 1;
+			}
+		} else {
+			// it's a kernel process
+			task->role = kernel_role;
+			task->acl = kernel_role->root_label;
+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
+			task->acl->mode &= ~GR_FIND;
+#endif
+		}
+	}
+	read_unlock(&tasklist_lock);
+	return 0;
+}
+
+void
+gr_learn_resource(const struct task_struct *task,
+		  const int res, const unsigned long wanted, const int gt)
+{
+	struct acl_subject_label *acl;
+
+	if (unlikely((gr_status & GR_READY) &&
+		     task->acl && (task->acl->mode & GR_LEARN)))
+		goto skip_reslog;
+
+#ifdef CONFIG_GRKERNSEC_RESLOG
+	gr_log_resource(task, res, wanted, gt);
+#endif
+      skip_reslog:
+
+	if (unlikely(!(gr_status & GR_READY) || !wanted))
+		return;
+
+	acl = task->acl;
+
+	if (likely(!acl || !(acl->mode & GR_LEARN) ||
+		   !(acl->resmask & (1 << (unsigned short) res))))
+		return;
+
+	if (wanted >= acl->res[res].rlim_cur) {
+		unsigned long res_add;
+
+		res_add = wanted;
+		switch (res) {
+		case RLIMIT_CPU:
+			res_add += GR_RLIM_CPU_BUMP;
+			break;
+		case RLIMIT_FSIZE:
+			res_add += GR_RLIM_FSIZE_BUMP;
+			break;
+		case RLIMIT_DATA:
+			res_add += GR_RLIM_DATA_BUMP;
+			break;
+		case RLIMIT_STACK:
+			res_add += GR_RLIM_STACK_BUMP;
+			break;
+		case RLIMIT_CORE:
+			res_add += GR_RLIM_CORE_BUMP;
+			break;
+		case RLIMIT_RSS:
+			res_add += GR_RLIM_RSS_BUMP;
+			break;
+		case RLIMIT_NPROC:
+			res_add += GR_RLIM_NPROC_BUMP;
+			break;
+		case RLIMIT_NOFILE:
+			res_add += GR_RLIM_NOFILE_BUMP;
+			break;
+		case RLIMIT_MEMLOCK:
+			res_add += GR_RLIM_MEMLOCK_BUMP;
+			break;
+		case RLIMIT_AS:
+			res_add += GR_RLIM_AS_BUMP;
+			break;
+		case RLIMIT_LOCKS:
+			res_add += GR_RLIM_LOCKS_BUMP;
+			break;
+		}
+
+		acl->res[res].rlim_cur = res_add;
+
+		if (wanted > acl->res[res].rlim_max)
+			acl->res[res].rlim_max = res_add;
+
+		security_learn(GR_LEARN_AUDIT_MSG, current->role->rolename,
+			       current->role->roletype, acl->filename,
+			       acl->res[res].rlim_cur, acl->res[res].rlim_max,
+			       "", (unsigned long) res);
+	}
+
+	return;
+}
+
+#ifdef CONFIG_SYSCTL
+extern struct proc_dir_entry *proc_sys_root;
+
+__u32
+gr_handle_sysctl(const struct ctl_table *table, const void *oldval,
+		 const void *newval)
+{
+	struct proc_dir_entry *tmp;
+	struct nameidata nd;
+	const char *proc_sys = "/proc/sys";
+	char *path = gr_shared_page[0][smp_processor_id()];
+	struct acl_object_label *obj;
+	unsigned short len = 0, pos = 0, depth = 0, i;
+	__u32 err = 0;
+	__u32 mode = 0;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return 1;
+
+	if (oldval)
+		mode |= GR_READ;
+	if (newval)
+		mode |= GR_WRITE;
+
+	/* convert the requested sysctl entry into a pathname */
+
+	for (tmp = table->de; tmp != proc_sys_root; tmp = tmp->parent) {
+		len += strlen(tmp->name);
+		len++;
+		depth++;
+	}
+
+	if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE)
+		return 0;	// deny
+
+	memset(path, 0, PAGE_SIZE);
+
+	memcpy(path, proc_sys, strlen(proc_sys));
+
+	pos += strlen(proc_sys);
+
+	for (; depth > 0; depth--) {
+		path[pos] = '/';
+		pos++;
+		for (i = 1, tmp = table->de; tmp != proc_sys_root;
+		     tmp = tmp->parent) {
+			if (depth == i) {
+				memcpy(path + pos, tmp->name,
+				       strlen(tmp->name));
+				pos += strlen(tmp->name);
+			}
+			i++;
+		}
+	}
+
+	if (path_init(path, LOOKUP_FOLLOW, &nd))
+		err = path_walk(path, &nd);
+
+	if (err)
+		goto out;
+
+	obj = chk_obj_label(nd.dentry, nd.mnt, current->acl);
+	err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
+
+	if (unlikely((current->acl->mode & GR_LEARN) && ((err & mode) != mode))) {
+		__u32 new_mode = mode;
+
+		new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
+
+		err = new_mode;
+		gr_log_learn(current->role, current->uid, current->gid,
+			     current, path, new_mode);
+	} else if ((err & mode) != mode && !(err & GR_SUPPRESS)) {
+		security_alert(GR_SYSCTL_ACL_MSG, "denied", path,
+			       (mode & GR_READ) ? " reading" : "",
+			       (mode & GR_WRITE) ? " writing" : "",
+			       DEFAULTSECARGS);
+		err = 0;
+	} else if ((err & mode) != mode) {
+		err = 0;
+	} else if (((err & mode) == mode) && (err & GR_AUDITS)) {
+		security_audit(GR_SYSCTL_ACL_MSG, "successful",
+			       path, (mode & GR_READ) ? " reading" : "",
+			       (mode & GR_WRITE) ? " writing" : "",
+			       DEFAULTSECARGS);
+	}
+
+	path_release(&nd);
+
+      out:
+	return err;
+}
+#endif
+
+int
+gr_handle_proc_ptrace(struct task_struct *task)
+{
+	struct file *filp;
+	struct task_struct *tmp = task;
+	struct task_struct *curtemp = current;
+	__u32 retmode;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+
+	filp = task->exec_file;
+
+	read_lock(&tasklist_lock);
+	while (tmp->pid > 0) {
+		if (tmp == curtemp)
+			break;
+		tmp = tmp->p_pptr;
+	}
+	read_unlock(&tasklist_lock);
+
+	if (tmp->pid == 0 && !(current->acl->mode & GR_RELAXPTRACE))
+		return 1;
+
+	retmode = gr_search_file(filp->f_dentry, GR_NOPTRACE, filp->f_vfsmnt);
+
+	if (retmode & GR_NOPTRACE)
+		return 1;
+
+	if (!(current->acl->mode & GR_OVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
+	    && (current->acl != task->acl || (current->acl != current->role->root_label
+	    && current->pid != task->pid)))
+		return 1;
+
+	return 0;
+}
+
+int
+gr_handle_ptrace(struct task_struct *task, const long request)
+{
+	struct file *filp;
+	struct task_struct *tmp = task;
+	struct task_struct *curtemp = current;
+	__u32 retmode;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+
+	filp = task->exec_file;
+
+	if (task->acl->mode & GR_NOPTRACE) {
+		security_alert(GR_PTRACE_ACL_MSG, filp ? 
+			       gr_to_filename(filp->f_dentry, filp->f_vfsmnt)
+			       : "(none)", task->comm, task->pid, 
+			       DEFAULTSECARGS);
+		return 1;
+	}
+
+	read_lock(&tasklist_lock);
+	while (tmp->pid > 0) {
+		if (tmp == curtemp)
+			break;
+		tmp = tmp->p_pptr;
+	}
+	read_unlock(&tasklist_lock);
+
+	if (tmp->pid == 0 && !(current->acl->mode & GR_RELAXPTRACE)) {
+		security_alert(GR_PTRACE_ACL_MSG, filp ? 
+			       gr_to_filename(filp->f_dentry, filp->f_vfsmnt)
+			       : "(none)", task->comm, task->pid, 
+			       DEFAULTSECARGS);
+		return 1;
+	}
+
+	if (unlikely(!filp))
+		return 0;
+
+	retmode = gr_search_file(filp->f_dentry, GR_PTRACERD | GR_NOPTRACE, filp->f_vfsmnt);
+
+	if (retmode & GR_NOPTRACE) {
+		security_alert(GR_PTRACE_ACL_MSG, gr_to_filename(filp->f_dentry, filp->f_vfsmnt),
+			       task->comm, task->pid, DEFAULTSECARGS);
+		return 1;
+	}
+		
+	if (retmode & GR_PTRACERD) {
+		switch (request) {
+		case PTRACE_POKETEXT:
+		case PTRACE_POKEDATA:
+		case PTRACE_POKEUSR:
+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA)
+		case PTRACE_SETREGS:
+		case PTRACE_SETFPREGS:
+#endif
+#ifdef CONFIG_X86
+		case PTRACE_SETFPXREGS:
+#endif
+#ifdef CONFIG_ALTIVEC
+		case PTRACE_SETVRREGS:
+#endif
+			return 1;
+		default:
+			return 0;
+		}
+	} else if (!(current->acl->mode & GR_OVERRIDE) &&
+		   !(current->role->roletype & GR_ROLE_GOD)
+		   && (current->acl != task->acl
+		       || (current->acl != current->role->root_label
+			   && current->pid != task->pid))) {
+		security_alert(GR_PTRACE_ACL_MSG,
+			       gr_to_filename(filp->f_dentry, filp->f_vfsmnt),
+			       task->comm, task->pid, DEFAULTSECARGS);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+gr_handle_ptrace_exec(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	__u32 retmode;
+	struct acl_subject_label *subj;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+
+	if (unlikely
+	    ((current->ptrace & PT_PTRACED)
+	     && !(current->acl->mode & GR_OVERRIDE)))
+		retmode = gr_search_file(dentry, GR_PTRACERD, mnt);
+	else
+		return 0;
+
+	subj = chk_subj_label(dentry, mnt, current->role);
+
+	if (!(retmode & GR_PTRACERD) &&
+	    !(current->role->roletype & GR_ROLE_GOD) &&
+	    (current->acl != subj)) {
+		security_alert(GR_PTRACE_EXEC_ACL_MSG,
+			       gr_to_filename(dentry, mnt), DEFAULTSECARGS);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+gr_handle_mmap(const struct file *filp, const unsigned long prot)
+{
+	struct acl_object_label *obj, *obj2;
+
+	if (unlikely(!(gr_status & GR_READY) ||
+		     (current->acl->mode & GR_OVERRIDE) || !filp ||
+		     !(prot & PROT_EXEC)))
+		return 0;
+
+	if (unlikely(current->is_writable))
+		return 0;
+
+	obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
+	obj2 = chk_obj_label(filp->f_dentry, filp->f_vfsmnt,
+			     current->role->root_label);
+	if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
+		security_alert(GR_WRITLIB_ACL_MSG,
+			       gr_to_filename(filp->f_dentry, filp->f_vfsmnt),
+			       DEFAULTSECARGS);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
+{
+	__u32 mode;
+
+	if (unlikely(!file || !(prot & PROT_EXEC)))
+		return 1;
+
+	mode =
+	    gr_search_file(file->f_dentry,
+			   GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
+			   file->f_vfsmnt);
+
+	if (unlikely(!gr_tpe_allow(file) || (!(mode & GR_EXEC) && !(mode & GR_SUPPRESS)))) {
+		security_alert(GR_MMAP_ACL_MSG, "denied",
+			       gr_to_filename(file->f_dentry, file->f_vfsmnt),
+			       DEFAULTSECARGS);
+		return 0;
+	} else if (unlikely(!gr_tpe_allow(file) || !(mode & GR_EXEC))) {
+		return 0;
+	} else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
+		security_audit(GR_MMAP_ACL_MSG, "successful",
+			       gr_to_filename(file->f_dentry, file->f_vfsmnt),
+			       DEFAULTSECARGS);
+		return 1;
+	}
+
+	return 1;
+}
+
+int
+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
+{
+	__u32 mode;
+
+	if (unlikely(!file || !(prot & PROT_EXEC)))
+		return 1;
+
+	mode =
+	    gr_search_file(file->f_dentry,
+			   GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
+			   file->f_vfsmnt);
+
+	if (unlikely(!gr_tpe_allow(file) || (!(mode & GR_EXEC) && !(mode & GR_SUPPRESS)))) {
+		security_alert(GR_MPROTECT_ACL_MSG, "denied",
+			       gr_to_filename(file->f_dentry, file->f_vfsmnt),
+			       DEFAULTSECARGS);
+		return 0;
+	} else if (unlikely(!gr_tpe_allow(file) || !(mode & GR_EXEC))) {
+		return 0;
+	} else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
+		security_audit(GR_MPROTECT_ACL_MSG, "successful",
+			       gr_to_filename(file->f_dentry, file->f_vfsmnt),
+			       DEFAULTSECARGS);
+		return 1;
+	}
+
+	return 1;
+}
+
+void
+gr_acl_handle_psacct(struct task_struct *task, const long code)
+{
+	unsigned long runtime;
+	unsigned long cputime;
+	unsigned int wday, cday;
+	__u8 whr, chr;
+	__u8 wmin, cmin;
+	__u8 wsec, csec;
+	char cur_tty[64] = { 0 };
+	char parent_tty[64] = { 0 };
+
+	if (unlikely(!(gr_status & GR_READY) || !task->acl ||
+		     !(task->acl->mode & GR_PROCACCT)))
+		return;
+
+	runtime = (jiffies - task->start_time) / HZ;
+	wday = runtime / (3600 * 24);
+	runtime -= wday * (3600 * 24);
+	whr = runtime / 3600;
+	runtime -= whr * 3600;
+	wmin = runtime / 60;
+	runtime -= wmin * 60;
+	wsec = runtime;
+
+	cputime = (task->times.tms_utime + task->times.tms_stime) / HZ;
+	cday = cputime / (3600 * 24);
+	cputime -= cday * (3600 * 24);
+	chr = cputime / 3600;
+	cputime -= chr * 3600;
+	cmin = cputime / 60;
+	cputime -= cmin * 60;
+	csec = cputime;
+
+	security_audit(GR_ACL_PROCACCT_MSG, gr_task_fullpath(task), task->comm,
+		       task->pid, NIPQUAD(task->curr_ip), tty_name(task->tty,
+								   cur_tty),
+		       task->uid, task->euid, task->gid, task->egid, wday, whr,
+		       wmin, wsec, cday, chr, cmin, csec,
+		       (task->
+			flags & PF_SIGNALED) ? "killed by signal" : "exited",
+		       code, gr_parent_task_fullpath(task), 
+		       task->p_pptr->comm, task->p_pptr->pid,
+		       NIPQUAD(task->p_pptr->curr_ip),
+		       tty_name(task->p_pptr->tty, parent_tty),
+		       task->p_pptr->uid, task->p_pptr->euid, task->p_pptr->gid,
+		       task->p_pptr->egid);
+
+	return;
+}
+
+void gr_set_kernel_label(struct task_struct *task)
+{
+	if (gr_status & GR_READY) {
+		task->role = kernel_role;
+		task->acl = kernel_role->root_label;
+	}
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/gracl_cap.c linux-2.4.26-leo/grsecurity/gracl_cap.c
--- linux-2.4.26/grsecurity/gracl_cap.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/gracl_cap.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,86 @@
+/* capability handling routines, (c) Brad Spengler 2002,2003 */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/capability.h>
+#include <linux/gracl.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+static const char *captab_log[29] = {
+	"CAP_CHOWN",
+	"CAP_DAC_OVERRIDE",
+	"CAP_DAC_READ_SEARCH",
+	"CAP_FOWNER",
+	"CAP_FSETID",
+	"CAP_KILL",
+	"CAP_SETGID",
+	"CAP_SETUID",
+	"CAP_SETPCAP",
+	"CAP_LINUX_IMMUTABLE",
+	"CAP_NET_BIND_SERVICE",
+	"CAP_NET_BROADCAST",
+	"CAP_NET_ADMIN",
+	"CAP_NET_RAW",
+	"CAP_IPC_LOCK",
+	"CAP_IPC_OWNER",
+	"CAP_SYS_MODULE",
+	"CAP_SYS_RAWIO",
+	"CAP_SYS_CHROOT",
+	"CAP_SYS_PTRACE",
+	"CAP_SYS_PACCT",
+	"CAP_SYS_ADMIN",
+	"CAP_SYS_BOOT",
+	"CAP_SYS_NICE",
+	"CAP_SYS_RESOURCE",
+	"CAP_SYS_TIME",
+	"CAP_SYS_TTY_CONFIG",
+	"CAP_MKNOD",
+	"CAP_LEASE"
+};
+
+int
+gr_task_is_capable(struct task_struct *task, const int cap)
+{
+	struct acl_subject_label *curracl;
+	__u32 cap_drop = 0, cap_mask = 0;
+
+	if (!gr_acl_is_enabled())
+		return 1;
+
+	curracl = task->acl;
+
+	cap_drop = curracl->cap_lower;
+	cap_mask = curracl->cap_mask;
+
+	while ((curracl = curracl->parent_subject)) {
+		cap_drop |= curracl->cap_lower & (cap_mask & ~curracl->cap_mask);
+		cap_mask |= curracl->cap_mask;
+	}
+
+	if (!cap_raised(cap_drop, cap))
+		return 1;
+
+	curracl = task->acl;
+
+	if ((curracl->mode & GR_LEARN)
+	    && cap_raised(task->cap_effective, cap)) {
+		security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
+			       task->role->roletype, task->uid,
+			       task->gid, task->exec_file ?
+			       gr_to_filename(task->exec_file->f_dentry,
+			       task->exec_file->f_vfsmnt) : curracl->filename,
+			       curracl->filename, 0UL,
+			       0UL, "", (unsigned long) cap, NIPQUAD(task->curr_ip));
+		return 1;
+	}
+
+	if ((cap >= 0) && (cap < 29) && cap_raised(task->cap_effective, cap))
+		security_alert(GR_CAP_ACL_MSG, captab_log[cap], 
+				gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, 
+				task->gid, task->egid, gr_parent_task_fullpath(task), 
+				task->p_pptr->comm, task->p_pptr->pid, task->p_pptr->uid, 
+				task->p_pptr->euid, task->p_pptr->gid, task->p_pptr->egid);
+
+	return 0;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/gracl_fs.c linux-2.4.26-leo/grsecurity/gracl_fs.c
--- linux-2.4.26/grsecurity/gracl_fs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/gracl_fs.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,469 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+#include <linux/gracl.h>
+
+__u32
+gr_acl_handle_hidden_file(const struct dentry * dentry,
+			  const struct vfsmount * mnt)
+{
+	__u32 mode;
+
+	if (unlikely(!dentry->d_inode))
+		return GR_FIND;
+
+	mode =
+	    gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
+
+	if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
+		security_audit(GR_HIDDEN_ACL_MSG, "successful",
+			       gr_to_filename(dentry, mnt), DEFAULTSECARGS);
+		return mode;
+	} else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
+		security_alert(GR_HIDDEN_ACL_MSG, "denied",
+			       gr_to_filename(dentry, mnt),
+			       DEFAULTSECARGS);
+		return 0;
+	} else if (unlikely(!(mode & GR_FIND)))
+		return 0;
+
+	return GR_FIND;
+}
+
+__u32
+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
+		   const int fmode)
+{
+	__u32 reqmode = GR_FIND;
+	__u32 mode;
+
+	if (unlikely(!dentry->d_inode))
+		return reqmode;
+
+	if (unlikely(fmode & O_APPEND))
+		reqmode |= GR_APPEND;
+	else if (unlikely(fmode & FMODE_WRITE))
+		reqmode |= GR_WRITE;
+	if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
+		reqmode |= GR_READ;
+
+	mode =
+	    gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
+			   mnt);
+
+	if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
+		security_audit(GR_OPEN_ACL_MSG, "successful",
+			       gr_to_filename(dentry, mnt),
+			       reqmode & GR_READ ? " reading" : "",
+			       reqmode & GR_WRITE ? " writing" :
+			       reqmode & GR_APPEND ? " appending" : "",
+			       DEFAULTSECARGS);
+		return reqmode;
+	} else
+	    if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
+	{
+		security_alert(GR_OPEN_ACL_MSG, "denied",
+			       gr_to_filename(dentry, mnt),
+			       reqmode & GR_READ ? " reading" : "",
+			       reqmode & GR_WRITE ? " writing" : reqmode &
+			       GR_APPEND ? " appending" : "", DEFAULTSECARGS);
+		return 0;
+	} else if (unlikely((mode & reqmode) != reqmode))
+		return 0;
+
+	return reqmode;
+}
+
+__u32
+gr_acl_handle_creat(const struct dentry * dentry,
+		    const struct dentry * p_dentry,
+		    const struct vfsmount * p_mnt, const int fmode,
+		    const int imode)
+{
+	__u32 reqmode = GR_WRITE | GR_CREATE;
+	__u32 mode;
+
+	if (unlikely(fmode & O_APPEND))
+		reqmode |= GR_APPEND;
+	if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
+		reqmode |= GR_READ;
+	if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
+		reqmode |= GR_SETID;
+
+	mode =
+	    gr_check_create(dentry, p_dentry, p_mnt,
+			    reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
+
+	if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
+		security_audit(GR_CREATE_ACL_MSG, "successful",
+			       gr_to_filename(dentry, p_mnt),
+			       reqmode & GR_READ ? " reading" : "",
+			       reqmode & GR_WRITE ? " writing" :
+			       reqmode & GR_APPEND ? " appending" : "",
+			       DEFAULTSECARGS);
+		return reqmode;
+	} else
+	    if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
+	{
+		security_alert(GR_CREATE_ACL_MSG, "denied",
+			       gr_to_filename(dentry, p_mnt),
+			       reqmode & GR_READ ? " reading" : "",
+			       reqmode & GR_WRITE ? " writing" : reqmode &
+			       GR_APPEND ? " appending" : "", DEFAULTSECARGS);
+		return 0;
+	} else if (unlikely((mode & reqmode) != reqmode))
+		return 0;
+
+	return reqmode;
+}
+
+__u32
+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
+		     const int fmode)
+{
+	__u32 mode, reqmode = GR_FIND;
+
+	if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
+		reqmode |= GR_EXEC;
+	if (fmode & S_IWOTH)
+		reqmode |= GR_WRITE;
+	if (fmode & S_IROTH)
+		reqmode |= GR_READ;
+
+	mode =
+	    gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
+			   mnt);
+
+	if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
+		security_audit(GR_ACCESS_ACL_MSG, "successful",
+			       gr_to_filename(dentry, mnt),
+			       reqmode & GR_READ ? " reading" : "",
+			       reqmode & GR_WRITE ? " writing" : "",
+			       reqmode & GR_EXEC ? " executing" : "",
+			       DEFAULTSECARGS);
+		return reqmode;
+	} else
+	    if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
+	{
+		security_alert(GR_ACCESS_ACL_MSG, "denied",
+			       gr_to_filename(dentry, mnt),
+			       reqmode & GR_READ ? " reading" : "",
+			       reqmode & GR_WRITE ? " writing" : "",
+			       reqmode & GR_EXEC ? " executing" : "",
+			       DEFAULTSECARGS);
+		return 0;
+	} else if (unlikely((mode & reqmode) != reqmode))
+		return 0;
+
+	return reqmode;
+}
+
+#define generic_fs_handler(dentry, mnt, reqmode, fmt) \
+{ \
+	__u32 mode; \
+	\
+	mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt); \
+	\
+	if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { \
+		security_audit(fmt, "successful", \
+				gr_to_filename(dentry, mnt), DEFAULTSECARGS); \
+		return mode; \
+	} else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { \
+		security_alert(fmt, "denied", gr_to_filename(dentry, mnt), \
+				DEFAULTSECARGS); \
+		return 0; \
+	} else if (unlikely((mode & (reqmode)) != (reqmode))) \
+		return 0; \
+	\
+	return (reqmode); \
+}
+
+__u32
+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
+		     mode_t mode)
+{
+	if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
+		generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
+				   GR_FCHMOD_ACL_MSG);
+	} else {
+		generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
+	}
+}
+
+__u32
+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
+		    mode_t mode)
+{
+	if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
+		generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
+				   GR_CHMOD_ACL_MSG);
+	} else {
+		generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
+	}
+}
+
+__u32
+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
+			   GR_UNIXCONNECT_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_filldir(const struct dentry *dentry, const struct vfsmount *mnt,
+		      const ino_t ino)
+{
+	if (likely((unsigned long)(dentry->d_inode))) {
+		struct dentry d = *dentry;
+		struct inode inode = *(dentry->d_inode);
+
+		inode.i_ino = ino;
+		d.d_inode = &inode;
+
+		if (unlikely(!gr_search_file(&d, GR_FIND | GR_NOLEARN, mnt)))
+			return 0;
+	}
+
+	return 1;
+}
+
+__u32
+gr_acl_handle_link(const struct dentry * new_dentry,
+		   const struct dentry * parent_dentry,
+		   const struct vfsmount * parent_mnt,
+		   const struct dentry * old_dentry,
+		   const struct vfsmount * old_mnt, const char *to)
+{
+	__u32 needmode = GR_WRITE | GR_CREATE;
+	__u32 mode;
+
+	mode =
+	    gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
+			  old_mnt);
+
+	if (unlikely(((mode & needmode) == needmode) && mode & GR_AUDITS)) {
+		security_audit(GR_LINK_ACL_MSG, "successful",
+			       gr_to_filename(old_dentry, old_mnt), to,
+			       DEFAULTSECARGS);
+		return mode;
+	} else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
+		security_alert(GR_LINK_ACL_MSG, "denied",
+			       gr_to_filename(old_dentry, old_mnt), to,
+			       DEFAULTSECARGS);
+		return 0;
+	} else if (unlikely((mode & needmode) != needmode))
+		return 0;
+
+	return (GR_WRITE | GR_CREATE);
+}
+
+__u32
+gr_acl_handle_symlink(const struct dentry * new_dentry,
+		      const struct dentry * parent_dentry,
+		      const struct vfsmount * parent_mnt, const char *from)
+{
+	__u32 needmode = GR_WRITE | GR_CREATE;
+	__u32 mode;
+
+	mode =
+	    gr_check_create(new_dentry, parent_dentry, parent_mnt,
+			    GR_CREATE | GR_AUDIT_CREATE |
+			    GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
+
+	if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
+		security_audit(GR_SYMLINK_ACL_MSG, "successful",
+			       from, gr_to_filename(new_dentry, parent_mnt),
+			       DEFAULTSECARGS);
+		return mode;
+	} else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
+		security_alert(GR_SYMLINK_ACL_MSG, "denied",
+			       from, gr_to_filename(new_dentry, parent_mnt),
+			       DEFAULTSECARGS);
+		return 0;
+	} else if (unlikely((mode & needmode) != needmode))
+		return 0;
+
+	return (GR_WRITE | GR_CREATE);
+}
+
+#define generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, reqmode, fmt) \
+{ \
+	__u32 mode; \
+	\
+	mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); \
+	\
+	if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { \
+		security_audit(fmt, "successful", \
+				gr_to_filename(new_dentry, parent_mnt), \
+				DEFAULTSECARGS); \
+		return mode; \
+	} else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { \
+		security_alert(fmt, "denied", \
+				gr_to_filename(new_dentry, parent_mnt), \
+				DEFAULTSECARGS); \
+		return 0; \
+	} else if (unlikely((mode & (reqmode)) != (reqmode))) \
+		return 0; \
+	\
+	return (reqmode); \
+}
+
+__u32
+gr_acl_handle_mknod(const struct dentry * new_dentry,
+		    const struct dentry * parent_dentry,
+		    const struct vfsmount * parent_mnt,
+		    const int mode)
+{
+	__u32 reqmode = GR_WRITE | GR_CREATE;
+	if (unlikely(mode & (S_ISUID | S_ISGID)))
+		reqmode |= GR_SETID;
+
+	generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
+				  reqmode, GR_MKNOD_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_mkdir(const struct dentry *new_dentry,
+		    const struct dentry *parent_dentry,
+		    const struct vfsmount *parent_mnt)
+{
+	generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
+				  GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
+}
+
+#define RENAME_CHECK_SUCCESS(old, new) \
+	(((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
+	 ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
+
+int
+gr_acl_handle_rename(struct dentry *new_dentry,
+		     struct dentry *parent_dentry,
+		     const struct vfsmount *parent_mnt,
+		     struct dentry *old_dentry,
+		     struct inode *old_parent_inode,
+		     struct vfsmount *old_mnt, const char *newname)
+{
+	__u8 gr_replace = 1;
+	__u32 comp1, comp2;
+	int error = 0;
+
+	if (unlikely(!gr_acl_is_enabled()))
+		return 1;
+
+	if (!new_dentry->d_inode) {
+		gr_replace = 0;
+
+		comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
+					GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
+					GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
+		comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
+				       GR_DELETE | GR_AUDIT_DELETE |
+				       GR_AUDIT_READ | GR_AUDIT_WRITE |
+				       GR_SUPPRESS, old_mnt);
+	} else {
+		comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
+				       GR_CREATE | GR_DELETE |
+				       GR_AUDIT_CREATE | GR_AUDIT_DELETE |
+				       GR_AUDIT_READ | GR_AUDIT_WRITE |
+				       GR_SUPPRESS, parent_mnt);
+		comp2 =
+		    gr_search_file(old_dentry,
+				   GR_READ | GR_WRITE | GR_AUDIT_READ |
+				   GR_DELETE | GR_AUDIT_DELETE |
+				   GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
+	}
+
+	if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
+	    ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
+		security_audit(GR_RENAME_ACL_MSG, "successful",
+			       gr_to_filename(old_dentry, old_mnt),
+			       newname, DEFAULTSECARGS);
+	else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
+		 && !(comp2 & GR_SUPPRESS)) {
+		security_alert(GR_RENAME_ACL_MSG, "denied",
+			       gr_to_filename(old_dentry, old_mnt), newname,
+			       DEFAULTSECARGS);
+		error = -EACCES;
+	} else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
+		error = -EACCES;
+
+	if (error)
+		return error;
+
+	error = gr_handle_rename(old_parent_inode, parent_dentry->d_inode,
+				 old_dentry, new_dentry, old_mnt, gr_replace);
+
+	return error;
+}
+
+void
+gr_acl_handle_exit(void)
+{
+	u16 id;
+	char *rolename;
+
+	if (unlikely(current->acl_sp_role && gr_acl_is_enabled())) {
+		id = current->acl_role_id;
+		rolename = current->role->rolename;
+		gr_set_acls(1);
+		security_alert_good(GR_SPROLEL_ACL_MSG,
+				    rolename, id, DEFAULTSECARGS);
+	}
+
+	if (current->exec_file) {
+		fput(current->exec_file);
+		current->exec_file = NULL;
+	}
+}
+
+int
+gr_acl_handle_procpidmem(const struct task_struct *task)
+{
+	if (unlikely(!gr_acl_is_enabled()))
+		return 0;
+
+	if (task->acl->mode & GR_PROTPROCFD)
+		return -EACCES;
+
+	return 0;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/gracl_ip.c linux-2.4.26-leo/grsecurity/gracl_ip.c
--- linux-2.4.26/grsecurity/gracl_ip.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/gracl_ip.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,235 @@
+/* 
+ * grsecurity/gracl_ip.c
+ * Copyright Brad Spengler 2002, 2003
+ *
+ */
+
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/errno.h>
+#include <net/sock.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/smp_lock.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/gracl.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+#define GR_BIND 	0x01
+#define GR_CONNECT 	0x02
+
+static const char * gr_protocols[256] = {
+	"ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
+	"egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
+	"chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
+	"trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
+	"merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
+	"il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
+	"mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
+	"tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
+	"sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
+	"cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", 
+	"iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", 
+	"eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
+	"scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
+	"aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
+	"vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
+	"uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
+	"sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
+	"unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
+	"unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
+	"unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
+	"unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
+	"unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
+	"unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
+	"unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
+	"unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
+	"unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
+	"unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
+	"unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
+	"unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
+	"unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
+	"unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
+	"unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
+	};
+
+static const char * gr_socktypes[11] = {
+	"unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", 
+	"unknown:7", "unknown:8", "unknown:9", "packet"
+	};
+
+__inline__ const char *
+gr_proto_to_name(unsigned char proto)
+{
+	return gr_protocols[proto];
+}
+
+__inline__ const char *
+gr_socktype_to_name(unsigned char type)
+{
+	return gr_socktypes[type];
+}
+
+int
+gr_search_socket(const int domain, const int type, const int protocol)
+{
+	struct acl_subject_label *curr;
+
+	if (unlikely(!gr_acl_is_enabled()))
+		goto exit;
+
+	if ((domain < 0) || (type < 0) || (protocol < 0) || (domain != PF_INET)
+	    || (domain >= NPROTO) || (type >= SOCK_MAX) || (protocol > 255))
+		goto exit;	// let the kernel handle it
+
+	curr = current->acl;
+
+	if (!curr->ips)
+		goto exit;
+
+	if ((curr->ip_type & (1 << type)) &&
+	    (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
+		goto exit;
+
+	if (curr->mode & GR_LEARN) {
+		/* we don't place acls on raw sockets , and sometimes
+		   dgram/ip sockets are opened for ioctl and not
+		   bind/connect, so we'll fake a bind learn log */
+		if (type == SOCK_RAW || type == SOCK_PACKET) {
+			__u32 fakeip = 0;
+			security_learn(GR_IP_LEARN_MSG, current->role->rolename,
+				       current->role->roletype, current->uid,
+				       current->gid, current->exec_file ?
+				       gr_to_filename(current->exec_file->f_dentry,
+				       current->exec_file->f_vfsmnt) :
+				       curr->filename, curr->filename,
+				       NIPQUAD(fakeip), 0, type,
+				       protocol, GR_CONNECT, NIPQUAD(current->curr_ip));
+		} else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
+			__u32 fakeip = 0;
+			security_learn(GR_IP_LEARN_MSG, current->role->rolename,
+				       current->role->roletype, current->uid,
+				       current->gid, current->exec_file ?
+				       gr_to_filename(current->exec_file->f_dentry,
+				       current->exec_file->f_vfsmnt) :
+				       curr->filename, curr->filename,
+				       NIPQUAD(fakeip), 0, type,
+				       protocol, GR_BIND, NIPQUAD(current->curr_ip));
+		}
+		/* we'll log when they use connect or bind */
+		goto exit;
+	}
+
+	security_alert(GR_SOCK_MSG, "inet", gr_socktype_to_name(type),
+		       gr_proto_to_name(protocol), DEFAULTSECARGS);
+
+	return 0;
+      exit:
+	return 1;
+}
+
+static __inline__ int
+gr_search_connectbind(const int mode, const struct sock *sk,
+		      const struct sockaddr_in *addr, const int type)
+{
+	struct acl_subject_label *curr;
+	struct acl_ip_label *ip;
+	unsigned long i;
+	__u32 ip_addr = 0;
+	__u16 ip_port = 0;
+
+	if (unlikely(!gr_acl_is_enabled() || sk->family != PF_INET))
+		return 1;
+
+	curr = current->acl;
+
+	if (!curr->ips)
+		return 1;
+
+	ip_addr = addr->sin_addr.s_addr;
+	ip_port = ntohs(addr->sin_port);
+
+	for (i = 0; i < curr->ip_num; i++) {
+		ip = *(curr->ips + i);
+		if ((ip->mode & mode) &&
+		    (ip_port >= ip->low) &&
+		    (ip_port <= ip->high) &&
+		    ((ntohl(ip_addr) & ip->netmask) ==
+		     (ntohl(ip->addr) & ip->netmask))
+		    && (ip->
+			proto[sk->protocol / 32] & (1 << (sk->protocol % 32)))
+		    && (ip->type & (1 << type)))
+			return 1;
+	}
+
+	if (curr->mode & GR_LEARN) {
+		security_learn(GR_IP_LEARN_MSG, current->role->rolename,
+			       current->role->roletype, current->uid,
+			       current->gid, current->exec_file ?
+			       gr_to_filename(current->exec_file->f_dentry,
+			       current->exec_file->f_vfsmnt) :
+			       curr->filename, curr->filename,
+			       NIPQUAD(ip_addr), ip_port, type,
+			       sk->protocol, mode, NIPQUAD(current->curr_ip));
+		return 1;
+	}
+
+	if (mode == GR_BIND)
+		security_alert(GR_BIND_ACL_MSG, NIPQUAD(ip_addr), ip_port,
+			       gr_socktype_to_name(type), gr_proto_to_name(sk->protocol),
+			       DEFAULTSECARGS);
+	else if (mode == GR_CONNECT)
+		security_alert(GR_CONNECT_ACL_MSG, NIPQUAD(ip_addr), ip_port,
+			       gr_socktype_to_name(type), gr_proto_to_name(sk->protocol),
+			       DEFAULTSECARGS);
+
+	return 0;
+}
+
+int
+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
+{
+	return gr_search_connectbind(GR_CONNECT, sock->sk, addr, sock->type);
+}
+
+int
+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
+{
+	return gr_search_connectbind(GR_BIND, sock->sk, addr, sock->type);
+}
+
+int
+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
+{
+	if (addr)
+		return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
+	else {
+		struct sockaddr_in sin;
+
+		sin.sin_addr.s_addr = sk->daddr;
+		sin.sin_port = sk->dport;
+
+		return gr_search_connectbind(GR_CONNECT, sk, &sin, SOCK_DGRAM);
+	}
+}
+
+int
+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
+{
+	struct sockaddr_in sin;
+
+	if (unlikely(skb->len < sizeof (struct udphdr)))
+		return 1;	// skip this packet
+
+	sin.sin_addr.s_addr = skb->nh.iph->saddr;
+	sin.sin_port = skb->h.uh->source;
+
+	return gr_search_connectbind(GR_CONNECT, sk, &sin, SOCK_DGRAM);
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/gracl_learn.c linux-2.4.26-leo/grsecurity/gracl_learn.c
--- linux-2.4.26/grsecurity/gracl_learn.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/gracl_learn.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,170 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/smp_lock.h>
+#include <linux/string.h>
+#include <linux/file.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/grinternal.h>
+
+extern ssize_t write_grsec_handler(struct file * file, const char * buf,
+				   size_t count, loff_t *ppos);
+extern int gr_acl_is_enabled(void);
+
+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
+static int gr_learn_attached;
+
+/* use a 1MB buffer */
+#define LEARN_BUFFER_SIZE (1024 * 1024)
+
+static spinlock_t gr_learn_lock = SPIN_LOCK_UNLOCKED;
+static char *learn_buffer;
+static int learn_buffer_len;
+
+static ssize_t
+read_learn(struct file *file, char * buf, size_t count, loff_t * ppos)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	ssize_t retval = 0;
+
+	add_wait_queue(&learn_wait, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+	do {
+		spin_lock(&gr_learn_lock);
+		if (learn_buffer_len)
+			break;
+		spin_unlock(&gr_learn_lock);
+
+		if (file->f_flags & O_NONBLOCK) {
+			retval = -EAGAIN;
+			goto out;
+		}
+		if (signal_pending(current)) {
+			retval = -ERESTARTSYS;
+			goto out;
+		}
+
+		schedule();
+	} while (1);
+
+	if (copy_to_user(buf, learn_buffer, learn_buffer_len)) {
+		retval = -EFAULT;
+		spin_unlock(&gr_learn_lock);
+		goto out;
+	}
+
+	retval = learn_buffer_len;
+	learn_buffer_len = 0;
+
+	spin_unlock(&gr_learn_lock);
+out:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&learn_wait, &wait);
+	return retval;
+}
+
+static unsigned int
+poll_learn(struct file * file, poll_table * wait)
+{
+	poll_wait(file, &learn_wait, wait);
+
+	if (learn_buffer_len)
+		return (POLLIN | POLLRDNORM);
+
+	return 0;
+}
+
+void
+gr_clear_learn_entries(void)
+{
+	spin_lock(&gr_learn_lock);
+	learn_buffer_len = 0;
+	if (learn_buffer != NULL) {
+		vfree(learn_buffer);
+		learn_buffer = NULL;
+	}
+	spin_unlock(&gr_learn_lock);
+
+	return;
+}
+
+void
+gr_add_learn_entry(const char *fmt, ...)
+{
+	va_list args;
+	unsigned int len;
+
+	if (!gr_learn_attached)
+		return;
+
+	spin_lock(&gr_learn_lock);
+
+	/* leave a gap at the end so we know when it's "full" but don't have to
+	   compute the exact length of the string we're trying to append
+	*/
+	if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
+		spin_unlock(&gr_learn_lock);
+		wake_up_interruptible(&learn_wait);
+		return;
+	}
+	if (learn_buffer == NULL) {
+		spin_unlock(&gr_learn_lock);
+		return;
+	}
+
+	va_start(args, fmt);
+	len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
+	va_end(args);
+
+	learn_buffer_len += len + 1;
+
+	spin_unlock(&gr_learn_lock);
+	wake_up_interruptible(&learn_wait);
+
+	return;
+}
+
+static int
+open_learn(struct inode *inode, struct file *file)
+{
+	if (file->f_mode & FMODE_READ && gr_learn_attached)
+		return -EBUSY;
+	if (file->f_mode & FMODE_READ) {
+		spin_lock(&gr_learn_lock);
+		if (learn_buffer == NULL)
+			learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
+		if (learn_buffer == NULL)
+			return -ENOMEM;
+		learn_buffer_len = 0;
+		gr_learn_attached = 1;
+		spin_unlock(&gr_learn_lock);
+	}
+	return 0;
+}
+
+static int
+close_learn(struct inode *inode, struct file *file)
+{
+	if (file->f_mode & FMODE_READ) {
+		spin_lock(&gr_learn_lock);
+		if (learn_buffer != NULL) {
+			vfree(learn_buffer);
+			learn_buffer = NULL;
+		}
+		learn_buffer_len = 0;
+		gr_learn_attached = 0;
+		spin_unlock(&gr_learn_lock);
+	}
+
+	return 0;
+}
+		
+struct file_operations grsec_fops = {
+	read:		read_learn,
+	write:		write_grsec_handler,
+	open:		open_learn,
+	release:	close_learn,
+	poll:		poll_learn,
+};
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/gracl_res.c linux-2.4.26-leo/grsecurity/gracl_res.c
--- linux-2.4.26/grsecurity/gracl_res.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/gracl_res.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,46 @@
+/* resource handling routines (c) Brad Spengler 2002, 2003 */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/gracl.h>
+#include <linux/grinternal.h>
+
+static const char *restab_log[11] = {
+	"RLIMIT_CPU",
+	"RLIMIT_FSIZE",
+	"RLIMIT_DATA",
+	"RLIMIT_STACK",
+	"RLIMIT_CORE",
+	"RLIMIT_RSS",
+	"RLIMIT_NPROC",
+	"RLIMIT_NOFILE",
+	"RLIMIT_MEMLOCK",
+	"RLIMIT_AS",
+	"RLIMIT_LOCKS"
+};
+
+__inline__ void
+gr_log_resource(const struct task_struct *task,
+		const int res, const unsigned long wanted, const int gt)
+{
+	if (unlikely(res == RLIMIT_NPROC && 
+	    (cap_raised(task->cap_effective, CAP_SYS_ADMIN) || 
+	     cap_raised(task->cap_effective, CAP_SYS_RESOURCE))))
+		return;
+
+	if (unlikely(((gt && wanted > task->rlim[res].rlim_cur) ||
+		      (!gt && wanted >= task->rlim[res].rlim_cur)) &&
+		     task->rlim[res].rlim_cur != RLIM_INFINITY))
+		security_alert(GR_RESOURCE_MSG, wanted, restab_log[res],
+			       task->rlim[res].rlim_cur,
+			       gr_task_fullpath(task), task->comm,
+			       task->pid, task->uid, task->euid,
+			       task->gid, task->egid,
+			       gr_parent_task_fullpath(task),
+			       task->p_pptr->comm,
+			       task->p_pptr->pid, task->p_pptr->uid, 
+			       task->p_pptr->euid, task->p_pptr->gid,
+			       task->p_pptr->egid);
+
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/gracl_segv.c linux-2.4.26-leo/grsecurity/gracl_segv.c
--- linux-2.4.26/grsecurity/gracl_segv.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/gracl_segv.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,323 @@
+/* 
+ * grsecurity/gracl_segv.c
+ * Copyright Brad Spengler 2002, 2003
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/uaccess.h>
+#include <asm/errno.h>
+#include <asm/mman.h>
+#include <net/sock.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/gracl.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+static struct crash_uid *uid_set;
+static unsigned short uid_used;
+static rwlock_t gr_uid_lock = RW_LOCK_UNLOCKED;
+extern rwlock_t gr_inode_lock;
+extern struct acl_subject_label *
+	lookup_acl_subj_label(const ino_t inode, const kdev_t dev,
+			      struct acl_role_label *role);
+
+int
+gr_init_uidset(void)
+{
+	uid_set =
+	    kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
+	uid_used = 0;
+
+	return uid_set ? 1 : 0;
+}
+
+void
+gr_free_uidset(void)
+{
+	if (uid_set)
+		kfree(uid_set);
+
+	return;
+}
+
+int
+gr_find_uid(const uid_t uid)
+{
+	struct crash_uid *tmp = uid_set;
+	uid_t buid;
+	int low = 0, high = uid_used - 1, mid;
+
+	while (high >= low) {
+		mid = (low + high) >> 1;
+		buid = tmp[mid].uid;
+		if (buid == uid)
+			return mid;
+		if (buid > uid)
+			high = mid - 1;
+		if (buid < uid)
+			low = mid + 1;
+	}
+
+	return -1;
+}
+
+static __inline__ void
+gr_insertsort(void)
+{
+	unsigned short i, j;
+	struct crash_uid index;
+
+	for (i = 1; i < uid_used; i++) {
+		index = uid_set[i];
+		j = i;
+		while ((j > 0) && uid_set[j - 1].uid > index.uid) {
+			uid_set[j] = uid_set[j - 1];
+			j--;
+		}
+		uid_set[j] = index;
+	}
+
+	return;
+}
+
+static __inline__ void
+gr_insert_uid(const uid_t uid, const unsigned long expires)
+{
+	int loc;
+
+	if (uid_used == GR_UIDTABLE_MAX)
+		return;
+
+	loc = gr_find_uid(uid);
+
+	if (loc >= 0) {
+		uid_set[loc].expires = expires;
+		return;
+	}
+
+	uid_set[uid_used].uid = uid;
+	uid_set[uid_used].expires = expires;
+	uid_used++;
+
+	gr_insertsort();
+
+	return;
+}
+
+void
+gr_remove_uid(const unsigned short loc)
+{
+	unsigned short i;
+
+	for (i = loc + 1; i < uid_used; i++)
+		uid_set[i - i] = uid_set[i];
+
+	uid_used--;
+
+	return;
+}
+
+int
+gr_check_crash_uid(const uid_t uid)
+{
+	int loc;
+
+	if (unlikely(!gr_acl_is_enabled()))
+		return 0;
+
+	read_lock(&gr_uid_lock);
+	loc = gr_find_uid(uid);
+	read_unlock(&gr_uid_lock);
+
+	if (loc < 0)
+		return 0;
+
+	write_lock(&gr_uid_lock);
+	if (time_before_eq(uid_set[loc].expires, jiffies))
+		gr_remove_uid(loc);
+	else {
+		write_unlock(&gr_uid_lock);
+		return 1;
+	}
+
+	write_unlock(&gr_uid_lock);
+	return 0;
+}
+
+static __inline__ int
+proc_is_setxid(const struct task_struct *task)
+{
+	if (task->uid != task->euid || task->uid != task->suid ||
+	    task->uid != task->fsuid)
+		return 1;
+	if (task->gid != task->egid || task->gid != task->sgid ||
+	    task->gid != task->fsgid)
+		return 1;
+
+	return 0;
+}
+static __inline__ int
+gr_fake_force_sig(int sig, struct task_struct *t)
+{
+	unsigned long int flags;
+
+	spin_lock_irqsave(&t->sigmask_lock, flags);
+	if (t->sig == NULL) {
+		spin_unlock_irqrestore(&t->sigmask_lock, flags);
+		return -ESRCH;
+	}
+
+	if (t->sig->action[sig - 1].sa.sa_handler == SIG_IGN)
+		t->sig->action[sig - 1].sa.sa_handler = SIG_DFL;
+	sigdelset(&t->blocked, sig);
+	recalc_sigpending(t);
+	spin_unlock_irqrestore(&t->sigmask_lock, flags);
+
+	return send_sig_info(sig, (void *) 1L, t);
+}
+
+void
+gr_handle_crash(struct task_struct *task, const int sig)
+{
+	struct acl_subject_label *curr;
+	struct acl_subject_label *curr2;
+	struct task_struct *tsk;
+
+	if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
+		return;
+
+	if (unlikely(!gr_acl_is_enabled()))
+		return;
+
+	curr = task->acl;
+
+	if (!(curr->resmask & (1 << GR_CRASH_RES)))
+		return;
+
+	if (time_before_eq(curr->expires, jiffies)) {
+		curr->expires = 0;
+		curr->crashes = 0;
+	}
+
+	curr->crashes++;
+
+	if (!curr->expires)
+		curr->expires = jiffies + curr->res[GR_CRASH_RES].rlim_max;
+
+	if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
+	    time_after(curr->expires, jiffies)) {
+		if (task->uid && proc_is_setxid(task)) {
+			security_alert(GR_SEGVSTART_ACL_MSG,
+				       gr_task_fullpath(task), task->comm,
+				       task->pid, task->uid, task->euid,
+				       task->gid, task->egid,
+				       gr_parent_task_fullpath(task),
+				       task->p_pptr->comm, task->p_pptr->pid,
+				       task->p_pptr->uid, task->p_pptr->euid,
+				       task->p_pptr->gid, task->p_pptr->egid,
+				       task->uid,
+				       curr->res[GR_CRASH_RES].rlim_max / HZ);
+			write_lock(&gr_uid_lock);
+			gr_insert_uid(task->uid, curr->expires);
+			write_unlock(&gr_uid_lock);
+			curr->expires = 0;
+			curr->crashes = 0;
+			read_lock(&tasklist_lock);
+			for_each_task(tsk) {
+				if (tsk != task && tsk->uid == task->uid)
+					gr_fake_force_sig(SIGKILL, tsk);
+			}
+			read_unlock(&tasklist_lock);
+		} else {
+			security_alert(GR_SEGVNOSUID_ACL_MSG,
+				       gr_task_fullpath(task), task->comm,
+				       task->pid, task->uid, task->euid,
+				       task->gid, task->egid,
+				       gr_parent_task_fullpath(task),
+				       task->p_pptr->comm, task->p_pptr->pid,
+				       task->p_pptr->uid, task->p_pptr->euid,
+				       task->p_pptr->gid, task->p_pptr->egid,
+				       kdevname(curr->device), curr->inode,
+				       curr->res[GR_CRASH_RES].rlim_max / HZ);
+			read_lock(&tasklist_lock);
+			for_each_task(tsk) {
+				if (likely(tsk != task)) {
+					curr2 = tsk->acl;
+
+					if (curr2->device == curr->device &&
+					    curr2->inode == curr->inode)
+						gr_fake_force_sig(SIGKILL, tsk);
+				}
+			}
+			read_unlock(&tasklist_lock);
+		}
+	}
+
+	return;
+}
+
+int
+gr_check_crash_exec(const struct file *filp)
+{
+	struct acl_subject_label *curr;
+
+	if (unlikely(!gr_acl_is_enabled()))
+		return 0;
+
+	read_lock(&gr_inode_lock);
+	curr = lookup_acl_subj_label(filp->f_dentry->d_inode->i_ino,
+				     filp->f_dentry->d_inode->i_dev,
+				     current->role);
+	read_unlock(&gr_inode_lock);
+
+	if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
+	    (!curr->crashes && !curr->expires))
+		return 0;
+
+	if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
+	    time_after(curr->expires, jiffies))
+		return 1;
+	else if (time_before_eq(curr->expires, jiffies)) {
+		curr->crashes = 0;
+		curr->expires = 0;
+	}
+
+	return 0;
+}
+
+void
+gr_handle_alertkill(void)
+{
+	struct acl_subject_label *curracl;
+	__u32 curr_ip;
+	struct task_struct *task;
+
+	if (unlikely(!gr_acl_is_enabled()))
+		return;
+
+	curracl = current->acl;
+	curr_ip = current->curr_ip;
+
+	if ((curracl->mode & GR_KILLIPPROC) && curr_ip &&
+	    (curr_ip != 0xffffffff)) {
+		read_lock(&tasklist_lock);
+		for_each_task(task) {
+			if (task->curr_ip == curr_ip)
+				gr_fake_force_sig(SIGKILL, task);
+		}
+		read_unlock(&tasklist_lock);
+	} else if (curracl->mode & GR_KILLPROC)
+		gr_fake_force_sig(SIGKILL, current);
+
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/gracl_shm.c linux-2.4.26-leo/grsecurity/gracl_shm.c
--- linux-2.4.26/grsecurity/gracl_shm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/gracl_shm.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,36 @@
+/* shared memory handling routines, (c) Brad Spengler 2002, 2003 */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/ipc.h>
+#include <linux/gracl.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+int
+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
+		const time_t shm_createtime, const uid_t cuid, const int shmid)
+{
+	struct task_struct *task;
+
+	if (!gr_acl_is_enabled())
+		return 1;
+
+	task = find_task_by_pid(shm_cprid);
+
+	if (unlikely(!task))
+		task = find_task_by_pid(shm_lapid);
+
+	if (unlikely(task && ((task->start_time < shm_createtime) ||
+			      (task->pid == shm_lapid)) &&
+		     (task->acl->mode & GR_PROTSHM) &&
+		     (task->acl != current->acl))) {
+		security_alert(GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid,
+			       DEFAULTSECARGS);
+		return 0;
+	}
+
+	return 1;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_chdir.c linux-2.4.26-leo/grsecurity/grsec_chdir.c
--- linux-2.4.26/grsecurity/grsec_chdir.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_chdir.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,20 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+void
+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
+	if ((grsec_enable_chdir && grsec_enable_group &&
+	     in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
+					      !grsec_enable_group)) {
+		security_audit(GR_CHDIR_AUDIT_MSG, gr_to_filename(dentry, mnt),
+			       DEFAULTSECARGS);
+	}
+#endif
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_chroot.c linux-2.4.26-leo/grsecurity/grsec_chroot.c
--- linux-2.4.26/grsecurity/grsec_chroot.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_chroot.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,339 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/grinternal.h>
+
+int
+gr_handle_chroot_unix(const pid_t pid)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
+	struct task_struct *p, **htable;
+
+	if (unlikely(!grsec_enable_chroot_unix))
+		return 1;
+
+	if (likely(!proc_is_chrooted(current)))
+		return 1;
+
+	read_lock(&tasklist_lock);
+
+	htable = &pidhash[pid_hashfn(pid)];
+
+	for (p = *htable; p && p->pid != pid; p = p->pidhash_next) ;
+
+	if (unlikely(p && !have_same_root(current, p))) {
+		read_unlock(&tasklist_lock);
+		security_alert(GR_UNIX_CHROOT_MSG, DEFAULTSECARGS);
+		return 0;
+	}
+	read_unlock(&tasklist_lock);
+#endif
+	return 1;
+}
+
+int
+gr_handle_chroot_nice(void)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
+	if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
+		security_alert(GR_NICE_CHROOT_MSG, DEFAULTSECARGS);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_chroot_setpriority(const struct task_struct *p, const int niceval)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
+	if (grsec_enable_chroot_nice && (!have_same_root(p, current)
+					 || (have_same_root(p, current)
+					     && (niceval < p->nice)
+					     && proc_is_chrooted(current)))) {
+		security_alert(GR_PRIORITY_CHROOT_MSG, p->comm, p->pid,
+			       DEFAULTSECARGS);
+		return -ESRCH;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_chroot_capset(const struct task_struct *target)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
+	    !have_same_root(current, target)) {
+		security_alert(GR_CAPSET_CHROOT_MSG, target->comm, target->pid,
+			       DEFAULTSECARGS);
+		return 1;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_chroot_rawio(const struct inode *inode)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	if (grsec_enable_chroot_caps && proc_is_chrooted(current) && 
+	    inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
+		return 1;
+#endif
+	return 0;
+}
+
+int
+gr_pid_is_chrooted(const struct task_struct *p)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
+	if (!grsec_enable_chroot_findtask || (current->pid <= 1))
+		return 0;
+
+	if (p && p->fs && p->fs->root && p->fs->root->d_inode &&
+	    child_reaper && child_reaper->fs && child_reaper->fs->root &&
+	    child_reaper->fs->root->d_inode && current && current->fs &&
+	    current->fs->root && current->fs->root->d_inode) {
+		if (proc_is_chrooted(current) && !have_same_root(current, p))
+			return 1;
+	}
+#endif
+	return 0;
+}
+
+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
+{
+	struct dentry *dentry = (struct dentry *)u_dentry;
+	struct vfsmount *mnt = (struct vfsmount *)u_mnt;
+	struct dentry *realroot;
+	struct vfsmount *realrootmnt;
+	struct dentry *currentroot;
+	struct vfsmount *currentmnt;
+
+	read_lock(&child_reaper->fs->lock);
+	realrootmnt = mntget(child_reaper->fs->rootmnt);
+	realroot = dget(child_reaper->fs->root);
+	read_unlock(&child_reaper->fs->lock);
+
+	read_lock(&current->fs->lock);
+	currentmnt = mntget(current->fs->rootmnt);
+	currentroot = dget(current->fs->root);
+	read_unlock(&current->fs->lock);
+
+	spin_lock(&dcache_lock);
+	for (;;) {
+		if (unlikely((dentry == realroot && mnt == realrootmnt)
+		     || (dentry == currentroot && mnt == currentmnt)))
+			break;
+		if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
+			if (mnt->mnt_parent == mnt)
+				break;
+			dentry = mnt->mnt_mountpoint;
+			mnt = mnt->mnt_parent;
+			continue;
+		}
+		dentry = dentry->d_parent;
+	}
+	spin_unlock(&dcache_lock);
+
+	dput(currentroot);
+	mntput(currentmnt);
+
+	if (dentry == realroot && mnt == realrootmnt) {
+		/* access is outside of chroot */
+		dput(realroot);
+		mntput(realrootmnt);
+		return 0;
+	}
+
+	dput(realroot);
+	mntput(realrootmnt);
+	return 1;
+}
+#endif
+
+int
+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
+	if (!grsec_enable_chroot_fchdir)
+		return 1;
+
+	if (!proc_is_chrooted(current))
+		return 1;
+	else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
+		security_alert(GR_CHROOT_FCHDIR_MSG,
+			       gr_to_filename(u_dentry, u_mnt),
+			       DEFAULTSECARGS);
+		return 0;
+	}
+#endif
+	return 1;
+}
+
+int
+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
+		const time_t shm_createtime)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
+	struct task_struct *p, **htable;
+
+	if (unlikely(!grsec_enable_chroot_shmat))
+		return 1;
+
+	if (likely(!proc_is_chrooted(current)))
+		return 1;
+
+	read_lock(&tasklist_lock);
+
+	htable = &pidhash[pid_hashfn(shm_cprid)];
+
+	for (p = *htable; p && p->pid != shm_cprid; p = p->pidhash_next) ;
+
+	if (unlikely(p && !have_same_root(current, p) &&
+		     (p->start_time < shm_createtime))) {
+		read_unlock(&tasklist_lock);
+		security_alert(GR_SHMAT_CHROOT_MSG, DEFAULTSECARGS);
+		return 0;
+	}
+
+	if (unlikely(!p)) {
+		htable = &pidhash[pid_hashfn(shm_lapid)];
+		for (p = *htable; p && p->pid != shm_lapid;
+		     p = p->pidhash_next) ;
+
+		if (unlikely(p && !have_same_root(current, p))) {
+			read_unlock(&tasklist_lock);
+			security_alert(GR_SHMAT_CHROOT_MSG, DEFAULTSECARGS);
+			return 0;
+		}
+	}
+
+	read_unlock(&tasklist_lock);
+#endif
+	return 1;
+}
+
+void
+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
+	if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
+		security_audit(GR_EXEC_CHROOT_MSG, gr_to_filename(dentry, mnt),
+			       DEFAULTSECARGS);
+#endif
+	return;
+}
+
+int
+gr_handle_chroot_mknod(const struct dentry *dentry,
+		       const struct vfsmount *mnt, const int mode)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
+	if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
+	    proc_is_chrooted(current)) {
+		security_alert(GR_MKNOD_CHROOT_MSG,
+			       gr_to_filename(dentry, mnt), DEFAULTSECARGS);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_chroot_mount(const struct dentry *dentry,
+		       const struct vfsmount *mnt, const char *dev_name)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
+	if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
+		security_alert(GR_MOUNT_CHROOT_MSG, dev_name,
+			       gr_to_filename(dentry, mnt), DEFAULTSECARGS);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_chroot_pivot(void)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
+	if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
+		security_alert(GR_PIVOT_CHROOT_MSG, DEFAULTSECARGS);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
+	if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
+	    !gr_is_outside_chroot(dentry, mnt)) {
+		security_alert(GR_CHROOT_CHROOT_MSG,
+			       gr_to_filename(dentry, mnt), DEFAULTSECARGS);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
+
+void
+gr_handle_chroot_caps(struct task_struct *task)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
+		task->cap_permitted =
+		    cap_drop(task->cap_permitted, GR_CHROOT_CAPS);
+		task->cap_inheritable =
+		    cap_drop(task->cap_inheritable, GR_CHROOT_CAPS);
+		task->cap_effective =
+		    cap_drop(task->cap_effective, GR_CHROOT_CAPS);
+	}
+#endif
+	return;
+}
+
+int
+gr_handle_chroot_sysctl(const int op)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
+	if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
+	    && (op & 002))
+		return -EACCES;
+#endif
+	return 0;
+}
+
+void
+gr_handle_chroot_chdir(struct dentry *dentry, struct vfsmount *mnt)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
+	if (grsec_enable_chroot_chdir)
+		set_fs_pwd(current->fs, mnt, dentry);
+#endif
+	return;
+}
+
+int
+gr_handle_chroot_chmod(const struct dentry *dentry,
+		       const struct vfsmount *mnt, const int mode)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
+	if (grsec_enable_chroot_chmod &&
+	    ((mode & S_ISUID) || (mode & S_ISGID)) &&
+	    proc_is_chrooted(current)) {
+		security_alert(GR_CHMOD_CHROOT_MSG,
+			       gr_to_filename(dentry, mnt), DEFAULTSECARGS);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
+
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_disabled.c linux-2.4.26-leo/grsecurity/grsec_disabled.c
--- linux-2.4.26/grsecurity/grsec_disabled.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_disabled.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,386 @@
+/* 
+ * when grsecurity is disabled, compile all external functions into nothing
+ */
+
+#include <linux/kernel.h>
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/sysctl.h>
+
+#ifdef CONFIG_SYSCTL
+__inline__ __u32
+gr_handle_sysctl(const struct ctl_table * table, __u32 mode)
+{
+	return mode;
+}
+#endif
+
+__inline__ int
+gr_acl_is_enabled(void)
+{
+	return 0;
+}
+
+__inline__ int
+gr_handle_rawio(const struct inode *inode)
+{
+	return 0;
+}
+
+__inline__ void
+gr_acl_handle_psacct(struct task_struct *task, const long code)
+{
+	return;
+}
+
+__inline__ int
+gr_handle_ptrace_exec(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return 0;
+}
+
+__inline__ int
+gr_handle_mmap(const struct file *filp, const unsigned long prot)
+{
+	return 0;
+}
+
+__inline__ int
+gr_handle_ptrace(struct task_struct *task, const long request)
+{
+	return 0;
+}
+
+__inline__ int
+gr_handle_proc_ptrace(struct task_struct *task)
+{
+	return 0;
+}
+
+__inline__ void
+gr_learn_resource(const struct task_struct *task,
+		  const int res, const unsigned long wanted, const int gt)
+{
+	return;
+}
+
+__inline__ int
+gr_set_acls(const int type)
+{
+	return 0;
+}
+
+__inline__ int
+gr_check_hidden_task(const struct task_struct *tsk)
+{
+	return 0;
+}
+
+__inline__ int
+gr_check_protected_task(const struct task_struct *task)
+{
+	return 0;
+}
+
+__inline__ void
+gr_copy_label(struct task_struct *tsk)
+{
+	return;
+}
+
+__inline__ void
+gr_set_pax_flags(struct task_struct *task)
+{
+	return;
+}
+
+__inline__ void
+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return;
+}
+
+__inline__ void
+gr_handle_delete(const ino_t ino, const kdev_t dev)
+{
+	return;
+}
+
+__inline__ void
+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return;
+}
+
+__inline__ void
+gr_handle_crash(struct task_struct *task, const int sig)
+{
+	return;
+}
+
+__inline__ int
+gr_check_crash_exec(const struct file *filp)
+{
+	return 0;
+}
+
+__inline__ int
+gr_check_crash_uid(const uid_t uid)
+{
+	return 0;
+}
+
+__inline__ int
+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
+		 struct dentry *old_dentry,
+		 struct dentry *new_dentry,
+		 struct vfsmount *mnt, const __u8 replace)
+{
+	return 0;
+}
+
+__inline__ int
+gr_search_socket(const int family, const int type, const int protocol)
+{
+	return 1;
+}
+
+__inline__ int
+gr_search_connectbind(const int mode, const struct socket *sock,
+		      const struct sockaddr_in *addr)
+{
+	return 1;
+}
+
+__inline__ int
+gr_task_is_capable(struct task_struct *task, const int cap)
+{
+	return 1;
+}
+
+__inline__ void
+gr_handle_alertkill(void)
+{
+	return;
+}
+
+__inline__ __u32
+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_hidden_file(const struct dentry * dentry,
+			  const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
+		   const int fmode)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__inline__ int
+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
+		   unsigned int *vm_flags)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_truncate(const struct dentry * dentry,
+		       const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_access(const struct dentry * dentry,
+		     const struct vfsmount * mnt, const int fmode)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
+		     mode_t mode)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
+		    mode_t mode)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__inline__ void
+grsecurity_init(void)
+{
+	return;
+}
+
+__inline__ __u32
+gr_acl_handle_mknod(const struct dentry * new_dentry,
+		    const struct dentry * parent_dentry,
+		    const struct vfsmount * parent_mnt,
+		    const int mode)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_mkdir(const struct dentry * new_dentry,
+		    const struct dentry * parent_dentry,
+		    const struct vfsmount * parent_mnt)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_symlink(const struct dentry * new_dentry,
+		      const struct dentry * parent_dentry,
+		      const struct vfsmount * parent_mnt, const char *from)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_link(const struct dentry * new_dentry,
+		   const struct dentry * parent_dentry,
+		   const struct vfsmount * parent_mnt,
+		   const struct dentry * old_dentry,
+		   const struct vfsmount * old_mnt, const char *to)
+{
+	return 1;
+}
+
+__inline__ int
+gr_acl_handle_rename(const struct dentry *new_dentry,
+		     const struct dentry *parent_dentry,
+		     const struct vfsmount *parent_mnt,
+		     const struct dentry *old_dentry,
+		     const struct inode *old_parent_inode,
+		     const struct vfsmount *old_mnt, const char *newname)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_filldir(const struct dentry * dentry,
+		      const struct vfsmount * mnt, const ino_t ino)
+{
+	return 1;
+}
+
+__inline__ int
+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
+		const time_t shm_createtime, const uid_t cuid, const int shmid)
+{
+	return 1;
+}
+
+__inline__ int
+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
+{
+	return 1;
+}
+
+__inline__ int
+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__inline__ __u32
+gr_acl_handle_creat(const struct dentry * dentry,
+		    const struct dentry * p_dentry,
+		    const struct vfsmount * p_mnt, const int fmode,
+		    const int imode)
+{
+	return 1;
+}
+
+__inline__ void
+gr_acl_handle_exit(void)
+{
+	return;
+}
+
+__inline__ int
+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
+{
+	return 1;
+}
+
+__inline__ void
+gr_set_role_label(const uid_t uid, const gid_t gid)
+{
+	return;
+}
+
+__inline__ int
+gr_acl_handle_procpidmem(const struct task_struct *task)
+{
+	return 0;
+}
+
+__inline__ int
+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
+{
+	return 1;
+}
+
+__inline__ int
+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
+{
+	return 1;
+}
+
+__inline__ void
+gr_set_kernel_label(struct task_struct *task)
+{
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_exec.c linux-2.4.26-leo/grsecurity/grsec_exec.c
--- linux-2.4.26/grsecurity/grsec_exec.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_exec.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,70 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/grdefs.h>
+#include <linux/grinternal.h>
+#include <linux/capability.h>
+
+#include <asm/uaccess.h>
+
+int
+gr_handle_nproc(void)
+{
+#ifdef CONFIG_GRKERNSEC_EXECVE
+	if (grsec_enable_execve && current->user &&
+	    (atomic_read(&current->user->processes) >
+	     current->rlim[RLIMIT_NPROC].rlim_cur) &&
+	    !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
+		security_alert(GR_NPROC_MSG, DEFAULTSECARGS);
+		return -EAGAIN;
+	}
+#endif
+	return 0;
+}
+
+void
+gr_handle_exec_args(struct linux_binprm *bprm, char **argv)
+{
+#ifdef CONFIG_GRKERNSEC_EXECLOG
+	char grarg[64] = { 0 };
+	__u8 execlen = 0;
+	unsigned int i;
+
+	if (!((grsec_enable_execlog && grsec_enable_group &&
+	       in_group_p(grsec_audit_gid))
+	      || (grsec_enable_execlog && !grsec_enable_group)))
+		return;
+
+	if (unlikely(!argv))
+		goto log;
+
+	for (i = 0; i < bprm->argc && execlen < 62; i++) {
+		char *p;
+		__u8 len;
+
+		if (get_user(p, argv + i))
+			goto log;
+		if (!p)
+			goto log;
+		len = strnlen_user(p, 62 - execlen);
+		if (len > 62 - execlen)
+			len = 62 - execlen;
+		else if (len > 0)
+			len--;
+		if (copy_from_user(grarg + execlen, p, len))
+			goto log;
+		execlen += len;
+		*(grarg + execlen) = ' ';
+		*(grarg + execlen + 1) = '\0';
+		execlen++;
+	}
+
+      log:
+	security_audit(GR_EXEC_AUDIT_MSG, gr_to_filename(bprm->file->f_dentry,
+							 bprm->file->f_vfsmnt),
+		       grarg, DEFAULTSECARGS);
+#endif
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_fifo.c linux-2.4.26-leo/grsecurity/grsec_fifo.c
--- linux-2.4.26/grsecurity/grsec_fifo.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_fifo.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,24 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/grinternal.h>
+
+int
+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
+	       const struct dentry *dir, const int flag, const int acc_mode)
+{
+#ifdef CONFIG_GRKERNSEC_FIFO
+	if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
+	    !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
+	    (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
+	    (current->fsuid != dentry->d_inode->i_uid)) {
+		if (!permission(dentry->d_inode, acc_mode))
+			security_alert(GR_FIFO_MSG, gr_to_filename(dentry, mnt),
+				       dentry->d_inode->i_uid,
+				       dentry->d_inode->i_gid, DEFAULTSECARGS);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_fork.c linux-2.4.26-leo/grsecurity/grsec_fork.c
--- linux-2.4.26/grsecurity/grsec_fork.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_fork.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,14 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+void
+gr_log_forkfail(const int retval)
+{
+#ifdef CONFIG_GRKERNSEC_FORKFAIL
+	if (grsec_enable_forkfail)
+		security_alert(GR_FAILFORK_MSG, retval, DEFAULTSECARGS);
+#endif
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_init.c linux-2.4.26-leo/grsecurity/grsec_init.c
--- linux-2.4.26/grsecurity/grsec_init.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_init.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,226 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+#include <linux/gracl.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+int grsec_enable_link;
+int grsec_enable_dmesg;
+int grsec_enable_fifo;
+int grsec_enable_execve;
+int grsec_enable_execlog;
+int grsec_enable_signal;
+int grsec_enable_forkfail;
+int grsec_enable_time;
+int grsec_enable_audit_textrel;
+int grsec_enable_group;
+int grsec_audit_gid;
+int grsec_enable_chdir;
+int grsec_enable_audit_ipc;
+int grsec_enable_mount;
+int grsec_enable_chroot_findtask;
+int grsec_enable_chroot_mount;
+int grsec_enable_chroot_shmat;
+int grsec_enable_chroot_fchdir;
+int grsec_enable_chroot_double;
+int grsec_enable_chroot_pivot;
+int grsec_enable_chroot_chdir;
+int grsec_enable_chroot_chmod;
+int grsec_enable_chroot_mknod;
+int grsec_enable_chroot_nice;
+int grsec_enable_chroot_execlog;
+int grsec_enable_chroot_caps;
+int grsec_enable_chroot_sysctl;
+int grsec_enable_chroot_unix;
+int grsec_enable_tpe;
+int grsec_tpe_gid;
+int grsec_enable_tpe_all;
+int grsec_enable_randpid;
+int grsec_enable_randid;
+int grsec_enable_randisn;
+int grsec_enable_randsrc;
+int grsec_enable_randrpc;
+int grsec_enable_socket_all;
+int grsec_socket_all_gid;
+int grsec_enable_socket_client;
+int grsec_socket_client_gid;
+int grsec_enable_socket_server;
+int grsec_socket_server_gid;
+int grsec_lock;
+
+spinlock_t grsec_alert_lock = SPIN_LOCK_UNLOCKED;
+unsigned long grsec_alert_wtime = 0;
+unsigned long grsec_alert_fyet = 0;
+
+spinlock_t grsec_alertgood_lock = SPIN_LOCK_UNLOCKED;
+unsigned long grsec_alertgood_wtime = 0;
+unsigned long grsec_alertgood_fyet = 0;
+
+spinlock_t grsec_audit_lock = SPIN_LOCK_UNLOCKED;
+
+char *gr_shared_page[4][NR_CPUS];
+extern struct gr_arg *gr_usermode;
+extern unsigned char *gr_system_salt;
+extern unsigned char *gr_system_sum;
+extern struct task_struct **gr_conn_table;
+extern const unsigned int gr_conn_table_size;
+
+void
+grsecurity_init(void)
+{
+	int i, j;
+	/* create the per-cpu shared pages */
+
+	for (j = 0; j < 4; j++) {
+		for (i = 0; i < NR_CPUS; i++) {
+			gr_shared_page[j][i] = (char *) get_zeroed_page(GFP_KERNEL);
+			if (!gr_shared_page[j][i]) {
+				panic("Unable to allocate grsecurity shared page");
+				return;
+			}
+		}
+	}
+
+	/* create hash tables for ip tagging */
+
+	gr_conn_table = (struct task_struct **) vmalloc(gr_conn_table_size * sizeof(struct task_struct *));
+	if (gr_conn_table == NULL) {
+		panic("Unable to allocate grsecurity IP tagging table");
+		return;
+	}
+	memset(gr_conn_table, 0, gr_conn_table_size * sizeof(struct task_struct *));
+
+	/* allocate memory for authentication structure */
+	gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
+	gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
+	gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
+
+	if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
+		panic("Unable to allocate grsecurity authentication structure");
+		return;
+	}
+
+#ifndef CONFIG_GRKERNSEC_SYSCTL
+	grsec_lock = 1;
+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
+	grsec_enable_audit_textrel = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
+	grsec_enable_group = 1;
+	grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
+	grsec_enable_chdir = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
+	grsec_enable_audit_ipc = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
+	grsec_enable_mount = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_LINK
+	grsec_enable_link = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_DMESG
+	grsec_enable_dmesg = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_FIFO
+	grsec_enable_fifo = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_EXECVE
+	grsec_enable_execve = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_EXECLOG
+	grsec_enable_execlog = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_SIGNAL
+	grsec_enable_signal = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_FORKFAIL
+	grsec_enable_forkfail = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_TIME
+	grsec_enable_time = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
+	grsec_enable_chroot_findtask = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
+	grsec_enable_chroot_unix = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
+	grsec_enable_chroot_mount = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
+	grsec_enable_chroot_fchdir = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
+	grsec_enable_chroot_shmat = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
+	grsec_enable_chroot_double = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
+	grsec_enable_chroot_pivot = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
+	grsec_enable_chroot_chdir = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
+	grsec_enable_chroot_chmod = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
+	grsec_enable_chroot_mknod = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
+	grsec_enable_chroot_nice = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
+	grsec_enable_chroot_execlog = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	grsec_enable_chroot_caps = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
+	grsec_enable_chroot_sysctl = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE
+	grsec_enable_tpe = 1;
+	grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
+#ifdef CONFIG_GRKERNSEC_TPE_ALL
+	grsec_enable_tpe_all = 1;
+#endif
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDPID
+	grsec_enable_randpid = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDID
+	grsec_enable_randid = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDISN
+	grsec_enable_randisn = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDSRC
+	grsec_enable_randsrc = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDRPC
+	grsec_enable_randrpc = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
+	grsec_enable_socket_all = 1;
+	grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
+	grsec_enable_socket_client = 1;
+	grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
+	grsec_enable_socket_server = 1;
+	grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
+#endif
+#endif
+
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_ipc.c linux-2.4.26-leo/grsecurity/grsec_ipc.c
--- linux-2.4.26/grsecurity/grsec_ipc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_ipc.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,81 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/ipc.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+void
+gr_log_msgget(const int ret, const int msgflg)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
+	if (((grsec_enable_group && in_group_p(grsec_audit_gid) &&
+	      grsec_enable_audit_ipc) || (grsec_enable_audit_ipc &&
+					  !grsec_enable_group)) && (ret >= 0)
+	    && (msgflg & IPC_CREAT))
+		security_audit(GR_MSGQ_AUDIT_MSG, DEFAULTSECARGS);
+#endif
+	return;
+}
+
+void
+gr_log_msgrm(const uid_t uid, const uid_t cuid)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
+	if ((grsec_enable_group && in_group_p(grsec_audit_gid) &&
+	     grsec_enable_audit_ipc) ||
+	    (grsec_enable_audit_ipc && !grsec_enable_group))
+		security_audit(GR_MSGQR_AUDIT_MSG, uid, cuid, DEFAULTSECARGS);
+#endif
+	return;
+}
+
+void
+gr_log_semget(const int err, const int semflg)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
+	if (((grsec_enable_group && in_group_p(grsec_audit_gid) &&
+	      grsec_enable_audit_ipc) || (grsec_enable_audit_ipc &&
+					  !grsec_enable_group)) && (err >= 0)
+	    && (semflg & IPC_CREAT))
+		security_audit(GR_SEM_AUDIT_MSG, DEFAULTSECARGS);
+#endif
+	return;
+}
+
+void
+gr_log_semrm(const uid_t uid, const uid_t cuid)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
+	if ((grsec_enable_group && in_group_p(grsec_audit_gid) &&
+	     grsec_enable_audit_ipc) ||
+	    (grsec_enable_audit_ipc && !grsec_enable_group))
+		security_audit(GR_SEMR_AUDIT_MSG, uid, cuid, DEFAULTSECARGS);
+#endif
+	return;
+}
+
+void
+gr_log_shmget(const int err, const int shmflg, const size_t size)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
+	if (((grsec_enable_group && in_group_p(grsec_audit_gid) &&
+	      grsec_enable_audit_ipc) || (grsec_enable_audit_ipc &&
+					  !grsec_enable_group)) && (err >= 0)
+	    && (shmflg & IPC_CREAT))
+		security_audit(GR_SHM_AUDIT_MSG, size, DEFAULTSECARGS);
+#endif
+	return;
+}
+
+void
+gr_log_shmrm(const uid_t uid, const uid_t cuid)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
+	if ((grsec_enable_group && in_group_p(grsec_audit_gid) &&
+	     grsec_enable_audit_ipc) ||
+	    (grsec_enable_audit_ipc && !grsec_enable_group))
+		security_audit(GR_SHMR_AUDIT_MSG, uid, cuid, DEFAULTSECARGS);
+#endif
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_link.c linux-2.4.26-leo/grsecurity/grsec_link.c
--- linux-2.4.26/grsecurity/grsec_link.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_link.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,41 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/grinternal.h>
+
+int
+gr_handle_follow_link(const struct inode *parent,
+		      const struct inode *inode,
+		      const struct dentry *dentry, const struct vfsmount *mnt)
+{
+#ifdef CONFIG_GRKERNSEC_LINK
+	if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
+	    (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
+	    (parent->i_mode & S_IWOTH) && (current->fsuid != inode->i_uid)) {
+		security_alert(GR_SYMLINK_MSG, gr_to_filename(dentry, mnt),
+			       inode->i_uid, inode->i_gid, DEFAULTSECARGS);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_hardlink(const struct dentry *dentry,
+		   const struct vfsmount *mnt,
+		   struct inode *inode, const int mode, const char *to)
+{
+#ifdef CONFIG_GRKERNSEC_LINK
+	if (grsec_enable_link && current->fsuid != inode->i_uid &&
+	    (!S_ISREG(mode) || (mode & S_ISUID) ||
+	     ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
+	     (permission(inode, MAY_READ | MAY_WRITE))) &&
+	    !capable(CAP_FOWNER) && current->uid) {
+		security_alert(GR_HARDLINK_MSG, gr_to_filename(dentry, mnt),
+			       inode->i_uid, inode->i_gid, to, DEFAULTSECARGS);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_mem.c linux-2.4.26-leo/grsecurity/grsec_mem.c
--- linux-2.4.26/grsecurity/grsec_mem.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_mem.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,53 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/grinternal.h>
+
+void
+gr_handle_ioperm(void)
+{
+	security_alert(GR_IOPERM_MSG, DEFAULTSECARGS);
+	return;
+}
+
+void
+gr_handle_iopl(void)
+{
+	security_alert(GR_IOPL_MSG, DEFAULTSECARGS);
+	return;
+}
+
+void
+gr_handle_mem_write(void)
+{
+	security_alert(GR_MEM_WRITE_MSG, DEFAULTSECARGS);
+	return;
+}
+
+void
+gr_handle_kmem_write(void)
+{
+	security_alert(GR_KMEM_MSG, DEFAULTSECARGS);
+	return;
+}
+
+void
+gr_handle_open_port(void)
+{
+	security_alert(GR_PORT_OPEN_MSG, DEFAULTSECARGS);
+	return;
+}
+
+int
+gr_handle_mem_mmap(const unsigned long offset, struct vm_area_struct *vma)
+{
+	if (offset < __pa(high_memory) && (vma->vm_flags & VM_WRITE) &&
+	    !(offset == 0xf0000 && ((vma->vm_end - vma->vm_start) <= 0x10000)) &&
+	    !(offset == 0xa0000 && ((vma->vm_end - vma->vm_start) <= 0x20000))) {
+		security_alert(GR_MEM_MMAP_MSG, DEFAULTSECARGS);
+		return -EPERM;
+	} else if (offset < __pa(high_memory))
+		vma->vm_flags &= ~VM_MAYWRITE;
+
+	return 0;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_mount.c linux-2.4.26-leo/grsecurity/grsec_mount.c
--- linux-2.4.26/grsecurity/grsec_mount.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_mount.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,34 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+void
+gr_log_remount(const char *devname, const int retval)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
+	if (grsec_enable_mount && (retval >= 0))
+		security_audit(GR_REMOUNT_AUDIT_MSG, devname ? devname : "none", DEFAULTSECARGS);
+#endif
+	return;
+}
+
+void
+gr_log_unmount(const char *devname, const int retval)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
+	if (grsec_enable_mount && (retval >= 0))
+		security_audit(GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none", DEFAULTSECARGS);
+#endif
+	return;
+}
+
+void
+gr_log_mount(const char *from, const char *to, const int retval)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
+	if (grsec_enable_mount && (retval >= 0))
+		security_audit(GR_MOUNT_AUDIT_MSG, from, to, DEFAULTSECARGS);
+#endif
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_rand.c linux-2.4.26-leo/grsecurity/grsec_rand.c
--- linux-2.4.26/grsecurity/grsec_rand.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_rand.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,36 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+extern int last_pid;
+
+int
+gr_random_pid(spinlock_t * pid_lock)
+{
+#ifdef CONFIG_GRKERNSEC_RANDPID
+	struct task_struct *p;
+	int pid;
+
+	if (grsec_enable_randpid && current->fs->root) {
+		read_lock(&tasklist_lock);
+		spin_lock(pid_lock);
+
+	      repeater:
+
+		pid = 1 + (get_random_long() % PID_MAX);
+
+		for_each_task(p) {
+			if (p->pid == pid || p->pgrp == pid ||
+			    p->tgid == pid || p->session == pid)
+				goto repeater;
+		}
+		last_pid = pid;
+		spin_unlock(pid_lock);
+		read_unlock(&tasklist_lock);
+		return pid;
+	}
+#endif
+	return 0;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_sig.c linux-2.4.26-leo/grsecurity/grsec_sig.c
--- linux-2.4.26/grsecurity/grsec_sig.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_sig.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,47 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/grinternal.h>
+
+void
+gr_log_signal(const int sig, const struct task_struct *t)
+{
+#ifdef CONFIG_GRKERNSEC_SIGNAL
+	if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
+				    (sig == SIGABRT) || (sig == SIGBUS))) {
+		if (t->pid == current->pid) {
+			security_alert_good(GR_UNISIGLOG_MSG, sig,
+					    DEFAULTSECARGS);
+		} else {
+			security_alert_good(GR_DUALSIGLOG_MSG, sig,
+					    gr_task_fullpath0(t), t->comm,
+					    t->pid, t->uid, t->euid, t->gid,
+					    t->egid, gr_parent_task_fullpath0(t),
+					    t->p_pptr->comm,
+					    t->p_pptr->pid, t->p_pptr->uid,
+					    t->p_pptr->euid, t->p_pptr->gid,
+					    t->p_pptr->egid, DEFAULTSECARGS);
+		}
+	}
+#endif
+	return;
+}
+
+int
+gr_handle_signal(const struct task_struct *p, const int sig)
+{
+#ifdef CONFIG_GRKERNSEC
+	if (current->pid > 1 && gr_check_protected_task(p)) {
+		security_alert(GR_SIG_ACL_MSG, sig, gr_task_fullpath0(p),
+			       p->comm, p->pid, p->uid,
+			       p->euid, p->gid, p->egid,
+			       gr_parent_task_fullpath0(p), p->p_pptr->comm,
+			       p->p_pptr->pid, p->p_pptr->uid,
+			       p->p_pptr->euid, p->p_pptr->gid,
+			       p->p_pptr->egid, DEFAULTSECARGS);
+		return -EPERM;
+	} else if (gr_pid_is_chrooted(p)) {
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_sock.c linux-2.4.26-leo/grsecurity/grsec_sock.c
--- linux-2.4.26/grsecurity/grsec_sock.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_sock.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,199 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/net.h>
+#include <net/sock.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+#include <linux/gracl.h>
+
+#ifdef CONFIG_GRKERNSEC
+struct task_struct **gr_conn_table;
+const unsigned int gr_conn_table_size = 65521;
+struct task_struct *deleted_conn = (struct task_struct *)~0;
+spinlock_t gr_conn_table_lock = SPIN_LOCK_UNLOCKED;
+
+extern __inline__ const char * gr_socktype_to_name(unsigned char type);
+extern __inline__ const char * gr_proto_to_name(unsigned char proto);
+
+static __inline__ int 
+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
+{
+	return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
+}
+
+static __inline__ int
+conn_match(const struct task_struct *task, __u32 saddr, __u32 daddr, 
+	   __u16 sport, __u16 dport)
+{
+	if (unlikely(task != deleted_conn && task->gr_saddr == saddr && 
+		     task->gr_daddr == daddr && task->gr_sport == sport &&
+		     task->gr_dport == dport))
+		return 1;
+	else
+		return 0;
+}
+
+void gr_add_to_task_ip_table(struct task_struct *task)
+{
+	unsigned int index;
+
+	if (unlikely(gr_conn_table == NULL))
+		return;
+
+	index = conn_hash(task->gr_saddr, task->gr_daddr,
+			  task->gr_sport, task->gr_dport, 
+			  gr_conn_table_size);
+
+	spin_lock(&gr_conn_table_lock);
+
+	while (gr_conn_table[index] && gr_conn_table[index] != deleted_conn) {
+		index = (index + 1) % gr_conn_table_size;
+	}
+
+	gr_conn_table[index] = task;
+
+	spin_unlock(&gr_conn_table_lock);
+
+	return;
+}
+
+void gr_del_task_from_ip_table_nolock(struct task_struct *task)
+{
+	unsigned int index;
+
+	if (unlikely(gr_conn_table == NULL))
+		return;
+
+	index = conn_hash(task->gr_saddr, task->gr_daddr, 
+			  task->gr_sport, task->gr_dport, 
+			  gr_conn_table_size);
+
+	while (gr_conn_table[index] && !conn_match(gr_conn_table[index], 
+		task->gr_saddr, task->gr_daddr, task->gr_sport, 
+		task->gr_dport)) {
+		index = (index + 1) % gr_conn_table_size;
+	}
+
+	if (gr_conn_table[index]) {
+		if (gr_conn_table[(index + 1) % gr_conn_table_size])
+			gr_conn_table[index] = deleted_conn;
+		else
+			gr_conn_table[index] = NULL;
+	}
+
+	return;
+}
+
+struct task_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
+					     __u16 sport, __u16 dport)
+{
+	unsigned int index;
+
+	if (unlikely(gr_conn_table == NULL))
+		return NULL;
+
+	index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
+
+	while (gr_conn_table[index] && !conn_match(gr_conn_table[index], 
+		saddr, daddr, sport, dport)) {
+		index = (index + 1) % gr_conn_table_size;
+	}
+
+	return gr_conn_table[index];
+}
+
+#endif
+
+void gr_del_task_from_ip_table(struct task_struct *task)
+{
+#ifdef CONFIG_GRKERNSEC
+	spin_lock(&gr_conn_table_lock);
+	gr_del_task_from_ip_table_nolock(task);
+	spin_unlock(&gr_conn_table_lock);
+#endif
+	return;
+}
+
+void
+gr_attach_curr_ip(const struct sock *sk)
+{
+#ifdef CONFIG_GRKERNSEC
+	struct task_struct *p;
+
+	if (unlikely(sk->protocol != IPPROTO_TCP))
+		return;
+
+	spin_lock(&gr_conn_table_lock);
+	p = gr_lookup_task_ip_table(sk->daddr, sk->rcv_saddr,
+				    sk->dport, sk->sport);
+	if (unlikely(p != NULL)) {
+		current->curr_ip = p->curr_ip;
+		current->used_accept = 1;
+		gr_del_task_from_ip_table_nolock(p);
+		spin_unlock(&gr_conn_table_lock);
+		return;
+	}
+	spin_unlock(&gr_conn_table_lock);
+
+	current->curr_ip = sk->daddr;
+	current->used_accept = 1;
+#endif
+	return;
+}
+
+int
+gr_handle_sock_all(const int family, const int type, const int protocol)
+{
+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
+	if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
+	    (family != AF_UNIX) && (family != AF_LOCAL) && (type < SOCK_MAX)) {
+		security_alert(GR_SOCK2_MSG, family, gr_socktype_to_name(type), gr_proto_to_name(protocol),
+			       DEFAULTSECARGS);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_sock_server(const struct sockaddr *sck)
+{
+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
+	if (grsec_enable_socket_server &&
+	    in_group_p(grsec_socket_server_gid) &&
+	    sck && (sck->sa_family != AF_UNIX) &&
+	    (sck->sa_family != AF_LOCAL)) {
+		security_alert(GR_BIND_MSG, DEFAULTSECARGS);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_sock_client(const struct sockaddr *sck)
+{
+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
+	if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
+	    sck && (sck->sa_family != AF_UNIX) &&
+	    (sck->sa_family != AF_LOCAL)) {
+		security_alert(GR_CONNECT_MSG, DEFAULTSECARGS);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
+
+__u32
+gr_cap_rtnetlink(void)
+{
+#ifdef CONFIG_GRKERNSEC
+	if (!gr_acl_is_enabled())
+		return current->cap_effective;
+	else
+		return (current->cap_effective & ~(current->acl->cap_lower));
+#else
+	return current->cap_effective;
+#endif
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_sysctl.c linux-2.4.26-leo/grsecurity/grsec_sysctl.c
--- linux-2.4.26/grsecurity/grsec_sysctl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_sysctl.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,16 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
+#include <linux/grinternal.h>
+
+int
+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
+{
+#ifdef CONFIG_GRKERNSEC_SYSCTL
+	if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & 002)) {
+		security_alert(GR_SYSCTL_MSG, name, DEFAULTSECARGS);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_textrel.c linux-2.4.26-leo/grsecurity/grsec_textrel.c
--- linux-2.4.26/grsecurity/grsec_textrel.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_textrel.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,19 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/grinternal.h>
+#include <linux/grsecurity.h>
+
+void
+gr_log_textrel(struct vm_area_struct * vma)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
+	if (grsec_enable_audit_textrel)
+		security_audit(GR_TEXTREL_AUDIT_MSG, vma->vm_file ? 
+				gr_to_filename(vma->vm_file->f_dentry, vma->vm_file->f_vfsmnt)
+				: "<anonymous mapping>", vma->vm_start, 
+				vma->vm_pgoff, DEFAULTSECARGS);
+#endif
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_time.c linux-2.4.26-leo/grsecurity/grsec_time.c
--- linux-2.4.26/grsecurity/grsec_time.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_time.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,13 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/grinternal.h>
+
+void
+gr_log_timechange(void)
+{
+#ifdef CONFIG_GRKERNSEC_TIME
+	if (grsec_enable_time)
+		security_alert_good(GR_TIME_MSG, DEFAULTSECARGS);
+#endif
+	return;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsec_tpe.c linux-2.4.26-leo/grsecurity/grsec_tpe.c
--- linux-2.4.26/grsecurity/grsec_tpe.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsec_tpe.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,35 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/grinternal.h>
+
+extern int gr_acl_tpe_check(void);
+
+int
+gr_tpe_allow(const struct file *file)
+{
+#ifdef CONFIG_GRKERNSEC
+	struct inode *inode = file->f_dentry->d_parent->d_inode;
+
+	if (current->uid && ((grsec_enable_tpe && in_group_p(grsec_tpe_gid)) || gr_acl_tpe_check()) &&
+	    (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
+						(inode->i_mode & S_IWOTH))))) {
+		security_alert(GR_EXEC_TPE_MSG,
+			       gr_to_filename(file->f_dentry, file->f_vfsmnt),
+			       DEFAULTSECARGS);
+		return 0;
+	}
+#ifdef CONFIG_GRKERNSEC_TPE_ALL
+	if (current->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
+	    ((inode->i_uid && (inode->i_uid != current->uid)) ||
+	     (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
+		security_alert(GR_EXEC_TPE_MSG,
+			       gr_to_filename(file->f_dentry, file->f_vfsmnt),
+			       DEFAULTSECARGS);
+		return 0;
+	}
+#endif
+#endif
+	return 1;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/grsum.c linux-2.4.26-leo/grsecurity/grsum.c
--- linux-2.4.26/grsecurity/grsum.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/grsum.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,59 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/gracl.h>
+
+
+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
+#error "crypto and sha256 must be built into the kernel"
+#endif
+
+int
+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
+{
+	char *p;
+	struct crypto_tfm *tfm;
+	unsigned char temp_sum[GR_SHA_LEN];
+	struct scatterlist sg[2];
+	volatile int retval = 0;
+	volatile int dummy = 0;
+	unsigned int i;
+
+	tfm = crypto_alloc_tfm("sha256", 0);
+	if (tfm == NULL) {
+		/* should never happen, since sha256 should be built in */
+		return 1;
+	}
+
+	crypto_digest_init(tfm);
+
+	p = salt;
+	sg[0].page = virt_to_page(p);
+	sg[0].offset = ((long) p & ~PAGE_MASK);
+	sg[0].length = GR_SALT_LEN;
+	
+	crypto_digest_update(tfm, sg, 1);
+
+	p = entry->pw;
+	sg[0].page = virt_to_page(p);
+	sg[0].offset = ((long) p & ~PAGE_MASK);
+	sg[0].length = strlen(entry->pw);
+
+	crypto_digest_update(tfm, sg, 1);
+
+	crypto_digest_final(tfm, temp_sum);
+
+	memset(entry->pw, 0, GR_PW_LEN);
+
+	for (i = 0; i < GR_SHA_LEN; i++)
+		if (sum[i] != temp_sum[i])
+			retval = 1;
+		else
+			dummy = 1;	// waste a cycle
+
+	crypto_free_tfm(tfm);
+
+	return retval;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/Makefile linux-2.4.26-leo/grsecurity/Makefile
--- linux-2.4.26/grsecurity/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/Makefile	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,24 @@
+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
+# during 2001, 2002, and 2003 it has been completely redesigned by
+# Brad Spengler
+#
+# All code in this directory and various hooks inserted throughout the kernel
+# are copyright Brad Spengler, and released under the GPL, unless otherwise
+# noted (as in obsd_rand.c)
+
+O_TARGET := grsec.o
+
+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
+	grsec_mount.o grsec_rand.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
+	grsec_time.o grsec_tpe.o grsec_ipc.o grsec_link.o
+
+ifeq ($(CONFIG_GRKERNSEC),y)
+obj-y += grsec_init.o grsum.o gracl.o gracl_ip.o gracl_segv.o obsd_rand.o \
+	gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
+	gracl_learn.o grsec_textrel.o
+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
+else
+obj-y += grsec_disabled.o
+endif
+
+include $(TOPDIR)/Rules.make
diff -urN --exclude-from=diff-exclude linux-2.4.26/grsecurity/obsd_rand.c linux-2.4.26-leo/grsecurity/obsd_rand.c
--- linux-2.4.26/grsecurity/obsd_rand.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/grsecurity/obsd_rand.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,185 @@
+
+/*
+ * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
+ * 
+ * Version 1.89, last modified 19-Sep-99
+ *    
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
+ * All rights reserved.
+ *
+ * Copyright 1998 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ * Theo de Raadt <deraadt@openbsd.org> came up with the idea of using
+ * such a mathematical system to generate more random (yet non-repeating)
+ * ids to solve the resolver/named problem.  But Niels designed the
+ * actual system based on the constraints.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/smp_lock.h>
+#include <linux/random.h>
+#include <linux/grsecurity.h>
+
+#define RU_OUT 180
+#define RU_MAX 30000
+#define RU_GEN 2
+#define RU_N 32749
+#define RU_AGEN 7
+#define RU_M 31104
+#define PFAC_N 3
+const static __u16 pfacts[PFAC_N] = { 2, 3, 2729 };
+
+static __u16 ru_x;
+static __u16 ru_seed, ru_seed2;
+static __u16 ru_a, ru_b;
+static __u16 ru_g;
+static __u16 ru_counter = 0;
+static __u16 ru_msb = 0;
+static unsigned long ru_reseed = 0;
+static __u32 tmp;
+
+#define TCP_RNDISS_ROUNDS	15
+#define TCP_RNDISS_OUT		7200
+#define TCP_RNDISS_MAX		30000
+
+static __u8 tcp_rndiss_sbox[128];
+static __u16 tcp_rndiss_msb;
+static __u16 tcp_rndiss_cnt;
+static unsigned long tcp_rndiss_reseed;
+
+static __u16 pmod(__u16, __u16, __u16);
+static void ip_initid(void);
+__u16 ip_randomid(void);
+
+static __u16
+pmod(__u16 gen, __u16 exp, __u16 mod)
+{
+	__u16 s, t, u;
+
+	s = 1;
+	t = gen;
+	u = exp;
+
+	while (u) {
+		if (u & 1)
+			s = (s * t) % mod;
+		u >>= 1;
+		t = (t * t) % mod;
+	}
+	return (s);
+}
+
+static void
+ip_initid(void)
+{
+	__u16 j, i;
+	int noprime = 1;
+
+	ru_x = ((tmp = get_random_long()) & 0xFFFF) % RU_M;
+
+	ru_seed = (tmp >> 16) & 0x7FFF;
+	ru_seed2 = get_random_long() & 0x7FFF;
+
+	ru_b = ((tmp = get_random_long()) & 0xfffe) | 1;
+	ru_a = pmod(RU_AGEN, (tmp >> 16) & 0xfffe, RU_M);
+	while (ru_b % 3 == 0)
+		ru_b += 2;
+
+	j = (tmp = get_random_long()) % RU_N;
+	tmp = tmp >> 16;
+
+	while (noprime) {
+		for (i = 0; i < PFAC_N; i++)
+			if (j % pfacts[i] == 0)
+				break;
+
+		if (i >= PFAC_N)
+			noprime = 0;
+		else
+			j = (j + 1) % RU_N;
+	}
+
+	ru_g = pmod(RU_GEN, j, RU_N);
+	ru_counter = 0;
+
+	ru_reseed = xtime.tv_sec + RU_OUT;
+	ru_msb = ru_msb == 0x8000 ? 0 : 0x8000;
+}
+
+__u16
+ip_randomid(void)
+{
+	int i, n;
+
+	if (ru_counter >= RU_MAX || time_after(xtime.tv_sec, ru_reseed))
+		ip_initid();
+
+	if (!tmp)
+		tmp = get_random_long();
+
+	n = tmp & 0x3;
+	tmp = tmp >> 2;
+	if (ru_counter + n >= RU_MAX)
+		ip_initid();
+	for (i = 0; i <= n; i++)
+		ru_x = (ru_a * ru_x + ru_b) % RU_M;
+	ru_counter += i;
+
+	return ((ru_seed ^ pmod(ru_g, ru_seed2 ^ ru_x, RU_N)) | ru_msb);
+}
+
+__u16
+tcp_rndiss_encrypt(__u16 val)
+{
+	__u16 sum = 0, i;
+
+	for (i = 0; i < TCP_RNDISS_ROUNDS; i++) {
+		sum += 0x79b9;
+		val ^= ((__u16) tcp_rndiss_sbox[(val ^ sum) & 0x7f]) << 7;
+		val = ((val & 0xff) << 7) | (val >> 8);
+	}
+
+	return val;
+}
+
+static void
+tcp_rndiss_init(void)
+{
+	get_random_bytes(tcp_rndiss_sbox, sizeof (tcp_rndiss_sbox));
+	tcp_rndiss_reseed = xtime.tv_sec + TCP_RNDISS_OUT;
+	tcp_rndiss_msb = tcp_rndiss_msb == 0x8000 ? 0 : 0x8000;
+	tcp_rndiss_cnt = 0;
+}
+
+__u32
+ip_randomisn(void)
+{
+	if (tcp_rndiss_cnt >= TCP_RNDISS_MAX ||
+	    time_after(xtime.tv_sec, tcp_rndiss_reseed))
+		tcp_rndiss_init();
+
+	return (((tcp_rndiss_encrypt(tcp_rndiss_cnt++) |
+		  tcp_rndiss_msb) << 16) | (get_random_long() & 0x7fff));
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-alpha/a.out.h linux-2.4.26-leo/include/asm-alpha/a.out.h
--- linux-2.4.26/include/asm-alpha/a.out.h	2002-08-03 01:39:45.000000000 +0100
+++ linux-2.4.26-leo/include/asm-alpha/a.out.h	2004-06-22 20:53:47.000000000 +0100
@@ -98,7 +98,7 @@
 	set_personality (((BFPM->sh_bang || EX.ah.entry < 0x100000000 \
 			   ? ADDR_LIMIT_32BIT : 0) | PER_OSF4))
 
-#define STACK_TOP \
+#define __STACK_TOP \
   (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
 
 #endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-alpha/elf.h linux-2.4.26-leo/include/asm-alpha/elf.h
--- linux-2.4.26/include/asm-alpha/elf.h	2004-02-20 14:11:45.000000000 +0000
+++ linux-2.4.26-leo/include/asm-alpha/elf.h	2004-06-22 20:53:47.000000000 +0100
@@ -41,6 +41,18 @@
 
 #define ELF_ET_DYN_BASE		(TASK_UNMAPPED_BASE + 0x1000000)
 
+#ifdef CONFIG_GRKERNSEC_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE(tsk)        ((tsk)->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
+
+#define PAX_DELTA_MMAP_LSB(tsk)         PAGE_SHIFT
+#define PAX_DELTA_MMAP_LEN(tsk)         ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 28)
+#define PAX_DELTA_EXEC_LSB(tsk)         PAGE_SHIFT
+#define PAX_DELTA_EXEC_LEN(tsk)         ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 28)
+#define PAX_DELTA_STACK_LSB(tsk)        PAGE_SHIFT
+#define PAX_DELTA_STACK_LEN(tsk)        ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 19)
+#endif
+
+
 /* $0 is set by ld.so to a pointer to a function which might be 
    registered using atexit.  This provides a mean for the dynamic
    linker to call DT_FINI functions for shared libraries that have
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-alpha/mman.h linux-2.4.26-leo/include/asm-alpha/mman.h
--- linux-2.4.26/include/asm-alpha/mman.h	2000-03-16 22:07:09.000000000 +0000
+++ linux-2.4.26-leo/include/asm-alpha/mman.h	2004-06-22 20:53:47.000000000 +0100
@@ -24,6 +24,10 @@
 #define MAP_LOCKED	0x8000		/* lock the mapping */
 #define MAP_NORESERVE	0x10000		/* don't check for reservations */
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+#define MAP_MIRROR	0x20000
+#endif
+
 #define MS_ASYNC	1		/* sync memory asynchronously */
 #define MS_SYNC		2		/* synchronous memory sync */
 #define MS_INVALIDATE	4		/* invalidate the caches */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-alpha/pgtable.h linux-2.4.26-leo/include/asm-alpha/pgtable.h
--- linux-2.4.26/include/asm-alpha/pgtable.h	2002-08-03 01:39:45.000000000 +0100
+++ linux-2.4.26-leo/include/asm-alpha/pgtable.h	2004-06-22 20:53:47.000000000 +0100
@@ -96,6 +96,17 @@
 #define PAGE_SHARED	__pgprot(_PAGE_VALID | __ACCESS_BITS)
 #define PAGE_COPY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 #define PAGE_READONLY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+#define PAGE_SHARED_NOEXEC    __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
+#define PAGE_COPY_NOEXEC      __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
+#define PAGE_READONLY_NOEXEC  __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
+#else
+#define PAGE_SHARED_NOEXEC    PAGE_SHARED
+#define PAGE_COPY_NOEXEC      PAGE_COPY
+#define PAGE_READONLY_NOEXEC  PAGE_READONLY
+#endif
+
 #define PAGE_KERNEL	__pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
 
 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/a.out.h linux-2.4.26-leo/include/asm-i386/a.out.h
--- linux-2.4.26/include/asm-i386/a.out.h	1995-06-16 19:33:06.000000000 +0100
+++ linux-2.4.26-leo/include/asm-i386/a.out.h	2004-06-22 20:53:47.000000000 +0100
@@ -19,7 +19,11 @@
 
 #ifdef __KERNEL__
 
-#define STACK_TOP	TASK_SIZE
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+#define __STACK_TOP ((current->flags & PF_PAX_SEGMEXEC)?TASK_SIZE/2:TASK_SIZE)
+#else
+#define __STACK_TOP	TASK_SIZE
+#endif
 
 #endif
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/desc.h linux-2.4.26-leo/include/asm-i386/desc.h
--- linux-2.4.26/include/asm-i386/desc.h	2004-02-20 14:11:45.000000000 +0000
+++ linux-2.4.26-leo/include/asm-i386/desc.h	2004-06-22 20:53:47.000000000 +0100
@@ -46,7 +46,8 @@
 };
 
 extern struct desc_struct gdt_table[];
-extern struct desc_struct *idt, *gdt;
+extern struct desc_struct gdt_table2[];
+extern struct desc_struct *idt, *gdt, *gdt2;
 
 struct Xgt_desc_struct {
 	unsigned short size;
@@ -55,6 +56,7 @@
 
 #define idt_descr (*(struct Xgt_desc_struct *)((char *)&idt - 2))
 #define gdt_descr (*(struct Xgt_desc_struct *)((char *)&gdt - 2))
+#define gdt_descr2 (*(struct Xgt_desc_struct *)((char *)&gdt2 - 2))
 
 #define load_TR(n) __asm__ __volatile__("ltr %%ax"::"a" (__TSS(n)<<3))
 
@@ -64,16 +66,20 @@
  * This is the ldt that every process will get unless we need
  * something other than this.
  */
-extern struct desc_struct default_ldt[];
+extern const struct desc_struct default_ldt[];
 extern void set_intr_gate(unsigned int irq, void * addr);
-extern void set_ldt_desc(unsigned int n, void *addr, unsigned int size);
-extern void set_tss_desc(unsigned int n, void *addr);
+extern void set_ldt_desc(unsigned int n, const void *addr, unsigned int size);
+extern void __set_ldt_desc(unsigned int n, const void *addr, unsigned int size);
+extern void set_tss_desc(unsigned int n, const void *addr);
 
 static inline void clear_LDT(void)
 {
-	int cpu = smp_processor_id();
+	int cpu;
+	preempt_disable();
+	cpu = smp_processor_id();
 	set_ldt_desc(cpu, &default_ldt[0], 5);
 	__load_LDT(cpu);
+	preempt_enable();
 }
 
 /*
@@ -82,7 +88,7 @@
 static inline void load_LDT (mm_context_t *pc)
 {
 	int cpu = smp_processor_id();
-	void *segments = pc->ldt;
+	const void *segments = pc->ldt;
 	int count = pc->size;
 
 	if (!count) {
@@ -94,6 +100,34 @@
 	__load_LDT(cpu);
 }
 
+static inline void _load_LDT (mm_context_t *pc)
+{
+	int cpu = smp_processor_id();
+	const void *segments = pc->ldt;
+	int count = pc->size;
+
+	if (!count) {
+		segments = &default_ldt[0];
+		count = 5;
+	}
+		
+	__set_ldt_desc(cpu, segments, count);
+	__load_LDT(cpu);
+}
+
+#define pax_open_kernel(flags, cr3)		\
+do {						\
+	local_irq_save(flags);			\
+	asm("movl %%cr3,%0":"=r" (cr3));	\
+	load_cr3(kernexec_pg_dir);		\
+} while(0)
+
+#define pax_close_kernel(flags, cr3)		\
+do {						\
+	asm("movl %0,%%cr3": :"r" (cr3));	\
+	local_irq_restore(flags);		\
+} while(0)
+
 #endif /* !__ASSEMBLY__ */
 
 #endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/elf.h linux-2.4.26-leo/include/asm-i386/elf.h
--- linux-2.4.26/include/asm-i386/elf.h	2004-05-26 23:43:46.000000000 +0100
+++ linux-2.4.26-leo/include/asm-i386/elf.h	2004-06-22 21:05:29.000000000 +0100
@@ -55,7 +55,22 @@
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+#define ELF_ET_DYN_BASE		((current->flags & PF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE/3*2:TASK_SIZE/3*2)
+#else
 #define ELF_ET_DYN_BASE         (TASK_SIZE / 3 * 2)
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE(tsk)	0x08048000UL
+
+#define PAX_DELTA_MMAP_LSB(tsk)		PAGE_SHIFT
+#define PAX_DELTA_MMAP_LEN(tsk)		16
+#define PAX_DELTA_EXEC_LSB(tsk)		PAGE_SHIFT
+#define PAX_DELTA_EXEC_LEN(tsk)		16
+#define PAX_DELTA_STACK_LSB(tsk)	PAGE_SHIFT  
+#define PAX_DELTA_STACK_LEN(tsk)	((tsk)->flags & PF_PAX_SEGMEXEC ? 15 : 16)
+#endif
 
 /* Wow, the "main" arch needs arch dependent functions too.. :) */
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/hardirq.h linux-2.4.26-leo/include/asm-i386/hardirq.h
--- linux-2.4.26/include/asm-i386/hardirq.h	2004-02-20 13:06:56.000000000 +0000
+++ linux-2.4.26-leo/include/asm-i386/hardirq.h	2004-06-22 21:02:57.000000000 +0100
@@ -19,12 +19,16 @@
 
 /*
  * Are we in an interrupt context? Either doing bottom half
- * or hardware interrupt processing?
+ * or hardware interrupt processing?  Note the preempt check,
+ * this is both a bugfix and an optimization.  If we are
+ * preemptible, we cannot be in an interrupt.
  */
-#define in_interrupt() ({ int __cpu = smp_processor_id(); \
-	(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
+#define in_interrupt() (preempt_is_disabled() && \
+	({unsigned long __cpu = smp_processor_id(); \
+	(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }))
 
-#define in_irq() (local_irq_count(smp_processor_id()) != 0)
+#define in_irq() (preempt_is_disabled() && \
+        (local_irq_count(smp_processor_id()) != 0))
 
 #ifndef CONFIG_SMP
 
@@ -36,6 +40,8 @@
 
 #define synchronize_irq()	barrier()
 
+#define release_irqlock(cpu)	do { } while (0)
+
 #else
 
 #include <asm/atomic.h>
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/highmem.h linux-2.4.26-leo/include/asm-i386/highmem.h
--- linux-2.4.26/include/asm-i386/highmem.h	2004-05-19 21:34:41.000000000 +0100
+++ linux-2.4.26-leo/include/asm-i386/highmem.h	2004-06-22 21:02:58.000000000 +0100
@@ -91,6 +91,7 @@
 	enum fixed_addresses idx;
 	unsigned long vaddr;
 
+	preempt_disable();
 	if (page < highmem_start_page)
 		return page_address(page);
 
@@ -112,8 +113,10 @@
 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 
-	if (vaddr < FIXADDR_START) // FIXME
+	if (vaddr < FIXADDR_START) { // FIXME
+		preempt_enable();
 		return;
+	}
 
 	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
 		out_of_line_bug();
@@ -125,6 +128,8 @@
 	pte_clear(kmap_pte-idx);
 	__flush_tlb_one(vaddr);
 #endif
+
+	preempt_enable();
 }
 
 #endif /* __KERNEL__ */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/hw_irq.h linux-2.4.26-leo/include/asm-i386/hw_irq.h
--- linux-2.4.26/include/asm-i386/hw_irq.h	2004-02-20 13:06:53.000000000 +0000
+++ linux-2.4.26-leo/include/asm-i386/hw_irq.h	2004-06-22 21:02:57.000000000 +0100
@@ -95,6 +95,18 @@
 #define __STR(x) #x
 #define STR(x) __STR(x)
 
+#define GET_CURRENT \
+	"movl %esp, %ebx\n\t" \
+	"andl $-8192, %ebx\n\t"
+
+#ifdef CONFIG_PREEMPT
+#define BUMP_LOCK_COUNT \
+	GET_CURRENT \
+	"incl 4(%ebx)\n\t"
+#else
+#define BUMP_LOCK_COUNT
+#endif
+
 #define SAVE_ALL \
 	"cld\n\t" \
 	"pushl %es\n\t" \
@@ -108,15 +120,12 @@
 	"pushl %ebx\n\t" \
 	"movl $" STR(__KERNEL_DS) ",%edx\n\t" \
 	"movl %edx,%ds\n\t" \
-	"movl %edx,%es\n\t"
+	"movl %edx,%es\n\t" \
+	BUMP_LOCK_COUNT
 
 #define IRQ_NAME2(nr) nr##_interrupt(void)
 #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
 
-#define GET_CURRENT \
-	"movl %esp, %ebx\n\t" \
-	"andl $-8192, %ebx\n\t"
-
 /*
  *	SMP has a few special interrupts for IPI messages
  */
@@ -128,6 +137,7 @@
 asmlinkage void x(void); \
 asmlinkage void call_##x(void); \
 __asm__( \
+"\n .text" \
 "\n"__ALIGN_STR"\n" \
 SYMBOL_NAME_STR(x) ":\n\t" \
 	"pushl $"#v"-256\n\t" \
@@ -141,6 +151,7 @@
 asmlinkage void x(struct pt_regs * regs); \
 asmlinkage void call_##x(void); \
 __asm__( \
+"\n .text" \
 "\n"__ALIGN_STR"\n" \
 SYMBOL_NAME_STR(x) ":\n\t" \
 	"pushl $"#v"-256\n\t" \
@@ -155,6 +166,7 @@
 #define BUILD_COMMON_IRQ() \
 asmlinkage void call_do_IRQ(void); \
 __asm__( \
+	"\n .text" \
 	"\n" __ALIGN_STR"\n" \
 	"common_interrupt:\n\t" \
 	SAVE_ALL \
@@ -175,6 +187,7 @@
 #define BUILD_IRQ(nr) \
 asmlinkage void IRQ_NAME(nr); \
 __asm__( \
+"\n .text" \
 "\n"__ALIGN_STR"\n" \
 SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
 	"pushl $"#nr"-256\n\t" \
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/i387.h linux-2.4.26-leo/include/asm-i386/i387.h
--- linux-2.4.26/include/asm-i386/i387.h	2004-02-20 13:10:24.000000000 +0000
+++ linux-2.4.26-leo/include/asm-i386/i387.h	2004-06-22 21:07:49.000000000 +0100
@@ -12,6 +12,7 @@
 #define __ASM_I386_I387_H
 
 #include <linux/sched.h>
+#include <linux/spinlock.h>
 #include <asm/processor.h>
 #include <asm/sigcontext.h>
 #include <asm/user.h>
@@ -24,7 +25,7 @@
 extern void restore_fpu( struct task_struct *tsk );
 
 extern void kernel_fpu_begin(void);
-#define kernel_fpu_end() stts()
+#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
 
 
 #define unlazy_fpu( tsk ) do { \
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/mman.h linux-2.4.26-leo/include/asm-i386/mman.h
--- linux-2.4.26/include/asm-i386/mman.h	2000-03-15 01:45:20.000000000 +0000
+++ linux-2.4.26-leo/include/asm-i386/mman.h	2004-06-22 20:53:47.000000000 +0100
@@ -18,6 +18,10 @@
 #define MAP_LOCKED	0x2000		/* pages are locked */
 #define MAP_NORESERVE	0x4000		/* don't check for reservations */
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+#define MAP_MIRROR	0x8000
+#endif
+
 #define MS_ASYNC	1		/* sync memory asynchronously */
 #define MS_INVALIDATE	2		/* invalidate the caches */
 #define MS_SYNC		4		/* synchronous memory sync */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/page.h linux-2.4.26-leo/include/asm-i386/page.h
--- linux-2.4.26/include/asm-i386/page.h	2004-05-26 23:40:33.000000000 +0100
+++ linux-2.4.26-leo/include/asm-i386/page.h	2004-06-22 21:02:57.000000000 +0100
@@ -80,6 +80,12 @@
 
 #define __PAGE_OFFSET		(0xC0000000)
 
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+#define __KERNEL_TEXT_OFFSET	(0xC0400000)
+#else
+#define __KERNEL_TEXT_OFFSET	(0)
+#endif
+
 /*
  * This much address space is reserved for vmalloc() and iomap()
  * as well as fixmap mappings.
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/pgalloc.h linux-2.4.26-leo/include/asm-i386/pgalloc.h
--- linux-2.4.26/include/asm-i386/pgalloc.h	2004-02-20 13:06:56.000000000 +0000
+++ linux-2.4.26-leo/include/asm-i386/pgalloc.h	2004-06-22 21:02:57.000000000 +0100
@@ -14,6 +14,9 @@
 #define pmd_populate(mm, pmd, pte) \
 		set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
 
+#define pmd_populate_kernel(mm, pmd, pte) \
+		set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)))
+
 /*
  * Allocate and free page tables.
  */
@@ -75,20 +78,26 @@
 {
 	unsigned long *ret;
 
+	preempt_disable();
 	if ((ret = pgd_quicklist) != NULL) {
 		pgd_quicklist = (unsigned long *)(*ret);
 		ret[0] = 0;
 		pgtable_cache_size--;
-	} else
+		preempt_enable();
+	} else {
+		preempt_enable();
 		ret = (unsigned long *)get_pgd_slow();
+	}
 	return (pgd_t *)ret;
 }
 
 static inline void free_pgd_fast(pgd_t *pgd)
 {
+	preempt_disable();
 	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
 	pgd_quicklist = (unsigned long *) pgd;
 	pgtable_cache_size++;
+	preempt_enable();
 }
 
 static inline void free_pgd_slow(pgd_t *pgd)
@@ -119,19 +128,23 @@
 {
 	unsigned long *ret;
 
+	preempt_disable();
 	if ((ret = (unsigned long *)pte_quicklist) != NULL) {
 		pte_quicklist = (unsigned long *)(*ret);
 		ret[0] = ret[1];
 		pgtable_cache_size--;
 	}
+	preempt_enable();
 	return (pte_t *)ret;
 }
 
 static inline void pte_free_fast(pte_t *pte)
 {
+	preempt_disable();
 	*(unsigned long *)pte = (unsigned long) pte_quicklist;
 	pte_quicklist = (unsigned long *) pte;
 	pgtable_cache_size++;
+	preempt_enable();
 }
 
 static __inline__ void pte_free_slow(pte_t *pte)
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/pgtable.h linux-2.4.26-leo/include/asm-i386/pgtable.h
--- linux-2.4.26/include/asm-i386/pgtable.h	2004-05-26 23:40:33.000000000 +0100
+++ linux-2.4.26-leo/include/asm-i386/pgtable.h	2004-06-22 21:02:57.000000000 +0100
@@ -22,6 +22,11 @@
 #endif
 
 extern pgd_t swapper_pg_dir[1024];
+
+#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC
+extern pgd_t kernexec_pg_dir[1024];
+#endif
+
 extern void paging_init(void);
 
 /* Caches aren't brain-dead on the intel. */
@@ -205,6 +210,16 @@
 #define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+#define PAGE_SHARED_NOEXEC  __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC  __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+#define PAGE_READONLY_NOEXEC  __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 
+#else
+#define PAGE_SHARED_NOEXEC    PAGE_SHARED
+#define PAGE_COPY_NOEXEC      PAGE_COPY
+#define PAGE_READONLY_NOEXEC  PAGE_READONLY
+#endif
+
 #define __PAGE_KERNEL \
 	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
 #define __PAGE_KERNEL_NOCACHE \
@@ -237,18 +252,18 @@
  * This is the closest we can get..
  */
 #define __P000	PAGE_NONE
-#define __P001	PAGE_READONLY
-#define __P010	PAGE_COPY
-#define __P011	PAGE_COPY
+#define __P001  PAGE_READONLY_NOEXEC
+#define __P010  PAGE_COPY_NOEXEC
+#define __P011  PAGE_COPY_NOEXEC
 #define __P100	PAGE_READONLY
 #define __P101	PAGE_READONLY
 #define __P110	PAGE_COPY
 #define __P111	PAGE_COPY
 
 #define __S000	PAGE_NONE
-#define __S001	PAGE_READONLY
-#define __S010	PAGE_SHARED
-#define __S011	PAGE_SHARED
+#define __S001  PAGE_READONLY_NOEXEC
+#define __S010  PAGE_SHARED_NOEXEC
+#define __S011  PAGE_SHARED_NOEXEC
 #define __S100	PAGE_READONLY
 #define __S101	PAGE_READONLY
 #define __S110	PAGE_SHARED
@@ -324,7 +339,7 @@
 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
 
 /* to find an entry in a page-table-directory. */
-#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 
 #define __pgd_offset(address) pgd_index(address)
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/processor.h linux-2.4.26-leo/include/asm-i386/processor.h
--- linux-2.4.26/include/asm-i386/processor.h	2004-05-26 23:40:33.000000000 +0100
+++ linux-2.4.26-leo/include/asm-i386/processor.h	2004-06-22 21:02:57.000000000 +0100
@@ -261,10 +261,19 @@
  */
 #define TASK_SIZE	(PAGE_OFFSET)
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+#define SEGMEXEC_TASK_SIZE	((PAGE_OFFSET) / 2)
+#endif
+
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+#define TASK_UNMAPPED_BASE	((current->flags & PF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE/3:TASK_SIZE/3)
+#else
 #define TASK_UNMAPPED_BASE	(TASK_SIZE / 3)
+#endif
 
 /*
  * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/smplock.h linux-2.4.26-leo/include/asm-i386/smplock.h
--- linux-2.4.26/include/asm-i386/smplock.h	2004-02-20 13:06:56.000000000 +0000
+++ linux-2.4.26-leo/include/asm-i386/smplock.h	2004-06-22 21:02:57.000000000 +0100
@@ -14,7 +14,15 @@
 extern spinlock_cacheline_t kernel_flag_cacheline;  
 #define kernel_flag kernel_flag_cacheline.lock      
 
+#ifdef CONFIG_SMP
 #define kernel_locked()		spin_is_locked(&kernel_flag)
+#else
+#ifdef CONFIG_PREEMPT
+#define kernel_locked()		preempt_get_count()
+#else
+#define kernel_locked()		1
+#endif
+#endif
 
 /*
  * Release global kernel lock and global interrupt lock
@@ -46,6 +54,11 @@
  */
 static __inline__ void lock_kernel(void)
 {
+#ifdef CONFIG_PREEMPT
+	if (current->lock_depth == -1)
+		spin_lock(&kernel_flag);
+	++current->lock_depth;
+#else
 #if 1
 	if (!++current->lock_depth)
 		spin_lock(&kernel_flag);
@@ -58,6 +71,7 @@
 		:"=m" (__dummy_lock(&kernel_flag)),
 		 "=m" (current->lock_depth));
 #endif
+#endif
 }
 
 static __inline__ void unlock_kernel(void)
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/softirq.h linux-2.4.26-leo/include/asm-i386/softirq.h
--- linux-2.4.26/include/asm-i386/softirq.h	2004-02-20 13:06:56.000000000 +0000
+++ linux-2.4.26-leo/include/asm-i386/softirq.h	2004-06-22 21:02:57.000000000 +0100
@@ -5,14 +5,15 @@
 #include <asm/hardirq.h>
 
 #define __cpu_bh_enable(cpu) \
-		do { barrier(); local_bh_count(cpu)--; } while (0)
+		do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
 #define cpu_bh_disable(cpu) \
-		do { local_bh_count(cpu)++; barrier(); } while (0)
+		do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
 
 #define local_bh_disable()	cpu_bh_disable(smp_processor_id())
 #define __local_bh_enable()	__cpu_bh_enable(smp_processor_id())
 
-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
+#define in_softirq() ( preempt_is_disabled() & \
+			(local_bh_count(smp_processor_id()) != 0))
 
 /*
  * NOTE: this assembly code assumes:
@@ -22,7 +23,7 @@
  * If you change the offsets in irq_stat then you have to
  * update this code as well.
  */
-#define local_bh_enable()						\
+#define _local_bh_enable()						\
 do {									\
 	unsigned int *ptr = &local_bh_count(smp_processor_id());	\
 									\
@@ -45,4 +46,6 @@
 		/* no registers clobbered */ );				\
 } while (0)
 
+#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
+
 #endif	/* __ASM_SOFTIRQ_H */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/spinlock.h linux-2.4.26-leo/include/asm-i386/spinlock.h
--- linux-2.4.26/include/asm-i386/spinlock.h	2004-02-20 13:06:53.000000000 +0000
+++ linux-2.4.26-leo/include/asm-i386/spinlock.h	2004-06-22 21:02:57.000000000 +0100
@@ -77,7 +77,7 @@
 		:"=m" (lock->lock) : : "memory"
 
 
-static inline void spin_unlock(spinlock_t *lock)
+static inline void _raw_spin_unlock(spinlock_t *lock)
 {
 #if SPINLOCK_DEBUG
 	if (lock->magic != SPINLOCK_MAGIC)
@@ -97,7 +97,7 @@
 		:"=q" (oldval), "=m" (lock->lock) \
 		:"0" (oldval) : "memory"
 
-static inline void spin_unlock(spinlock_t *lock)
+static inline void _raw_spin_unlock(spinlock_t *lock)
 {
 	char oldval = 1;
 #if SPINLOCK_DEBUG
@@ -113,7 +113,7 @@
 
 #endif
 
-static inline int spin_trylock(spinlock_t *lock)
+static inline int _raw_spin_trylock(spinlock_t *lock)
 {
 	char oldval;
 	__asm__ __volatile__(
@@ -123,7 +123,7 @@
 	return oldval > 0;
 }
 
-static inline void spin_lock(spinlock_t *lock)
+static inline void _raw_spin_lock(spinlock_t *lock)
 {
 #if SPINLOCK_DEBUG
 	__label__ here;
@@ -179,7 +179,7 @@
  */
 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
 
-static inline void read_lock(rwlock_t *rw)
+static inline void _raw_read_lock(rwlock_t *rw)
 {
 #if SPINLOCK_DEBUG
 	if (rw->magic != RWLOCK_MAGIC)
@@ -188,7 +188,7 @@
 	__build_read_lock(rw, "__read_lock_failed");
 }
 
-static inline void write_lock(rwlock_t *rw)
+static inline void _raw_write_lock(rwlock_t *rw)
 {
 #if SPINLOCK_DEBUG
 	if (rw->magic != RWLOCK_MAGIC)
@@ -197,10 +197,10 @@
 	__build_write_lock(rw, "__write_lock_failed");
 }
 
-#define read_unlock(rw)		asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
-#define write_unlock(rw)	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
+#define _raw_read_unlock(rw)		asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
+#define _raw_write_unlock(rw)	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
 
-static inline int write_trylock(rwlock_t *lock)
+static inline int _raw_write_trylock(rwlock_t *lock)
 {
 	atomic_t *count = (atomic_t *)lock;
 	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-i386/system.h linux-2.4.26-leo/include/asm-i386/system.h
--- linux-2.4.26/include/asm-i386/system.h	2004-05-19 21:34:41.000000000 +0100
+++ linux-2.4.26-leo/include/asm-i386/system.h	2004-06-22 21:02:57.000000000 +0100
@@ -12,6 +12,8 @@
 struct task_struct;	/* one of the stranger aspects of C forward declarations.. */
 extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
 
+void pax_switch_segments(struct task_struct *);
+
 #define prepare_to_switch()	do { } while(0)
 #define switch_to(prev,next,last) do {					\
 	asm volatile("pushl %%esi\n\t"					\
@@ -323,6 +325,13 @@
 #define __save_and_cli(x)	do { __save_flags(x); __cli(); } while(0);
 #define __save_and_sti(x)	do { __save_flags(x); __sti(); } while(0);
 
+#define irqs_disabled()			\
+({					\
+	unsigned long flags;		\
+	__save_flags(flags);		\
+	!(flags & (1<<9));		\
+})
+
 /* For spinlocks etc */
 #if 0
 #define local_irq_save(x)	__asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-mips/smplock.h linux-2.4.26-leo/include/asm-mips/smplock.h
--- linux-2.4.26/include/asm-mips/smplock.h	2004-02-20 14:11:46.000000000 +0000
+++ linux-2.4.26-leo/include/asm-mips/smplock.h	2004-05-26 23:34:34.000000000 +0100
@@ -8,12 +8,21 @@
 #ifndef __ASM_SMPLOCK_H
 #define __ASM_SMPLOCK_H
 
+#include <linux/config.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 
 extern spinlock_t kernel_flag;
 
+#ifdef CONFIG_SMP
 #define kernel_locked()		spin_is_locked(&kernel_flag)
+#else
+#ifdef CONFIG_PREEMPT
+#define kernel_locked()         preempt_get_count()
+#else
+#define kernel_locked()         1
+#endif
+#endif
 
 /*
  * Release global kernel lock and global interrupt lock
@@ -45,8 +54,14 @@
  */
 static __inline__ void lock_kernel(void)
 {
+#ifdef CONFIG_PREEMPT
+	if (current->lock_depth == -1)
+		spin_lock(&kernel_flag);
+	++current->lock_depth;
+#else
 	if (!++current->lock_depth)
 		spin_lock(&kernel_flag);
+#endif
 }
 
 static __inline__ void unlock_kernel(void)
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-mips/softirq.h linux-2.4.26-leo/include/asm-mips/softirq.h
--- linux-2.4.26/include/asm-mips/softirq.h	2002-11-28 23:53:15.000000000 +0000
+++ linux-2.4.26-leo/include/asm-mips/softirq.h	2004-05-26 23:34:34.000000000 +0100
@@ -15,6 +15,7 @@
 
 static inline void cpu_bh_disable(int cpu)
 {
+	preempt_disable();
 	local_bh_count(cpu)++;
 	barrier();
 }
@@ -23,6 +24,7 @@
 {
 	barrier();
 	local_bh_count(cpu)--;
+	preempt_enable();
 }
 
 
@@ -36,6 +38,7 @@
 	cpu = smp_processor_id();				\
 	if (!--local_bh_count(cpu) && softirq_pending(cpu))	\
 		do_softirq();					\
+	preempt_enable();                                       \
 } while (0)
 
 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-mips/system.h linux-2.4.26-leo/include/asm-mips/system.h
--- linux-2.4.26/include/asm-mips/system.h	2004-02-20 14:11:46.000000000 +0000
+++ linux-2.4.26-leo/include/asm-mips/system.h	2004-05-26 23:34:34.000000000 +0100
@@ -333,4 +333,18 @@
 #define die_if_kernel(msg, regs)					\
 	__die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
 
+extern __inline__ int intr_on(void)
+{
+	unsigned long flags;
+	save_flags(flags);
+	return flags & 1;
+}
+
+extern __inline__ int intr_off(void)
+{
+	return ! intr_on();
+}
+
+#define irqs_disabled()	intr_off()
+
 #endif /* _ASM_SYSTEM_H */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-parisc/a.out.h linux-2.4.26-leo/include/asm-parisc/a.out.h
--- linux-2.4.26/include/asm-parisc/a.out.h	2000-12-05 20:29:39.000000000 +0000
+++ linux-2.4.26-leo/include/asm-parisc/a.out.h	2004-06-22 20:53:47.000000000 +0100
@@ -22,7 +22,7 @@
 /* XXX: STACK_TOP actually should be STACK_BOTTOM for parisc.
  * prumpf */
 
-#define STACK_TOP	TASK_SIZE
+#define __STACK_TOP	TASK_SIZE
 
 #endif
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-parisc/elf.h linux-2.4.26-leo/include/asm-parisc/elf.h
--- linux-2.4.26/include/asm-parisc/elf.h	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/include/asm-parisc/elf.h	2004-06-22 20:53:47.000000000 +0100
@@ -135,6 +135,17 @@
 
 #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE + 0x01000000)
 
+#ifdef CONFIG_GRKERNSEC_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE(tsk)        0x10000UL
+
+#define PAX_DELTA_MMAP_LSB(tsk)         PAGE_SHIFT
+#define PAX_DELTA_MMAP_LEN(tsk)         16
+#define PAX_DELTA_EXEC_LSB(tsk)         PAGE_SHIFT
+#define PAX_DELTA_EXEC_LEN(tsk)         16
+#define PAX_DELTA_STACK_LSB(tsk)        PAGE_SHIFT
+#define PAX_DELTA_STACK_LEN(tsk)        16
+#endif
+
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports.  This could be done in user space,
    but it's not easy, and we've already done it here.  */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-parisc/mman.h linux-2.4.26-leo/include/asm-parisc/mman.h
--- linux-2.4.26/include/asm-parisc/mman.h	2000-12-05 20:29:39.000000000 +0000
+++ linux-2.4.26-leo/include/asm-parisc/mman.h	2004-06-22 20:53:47.000000000 +0100
@@ -18,6 +18,10 @@
 #define MAP_NORESERVE	0x4000		/* don't check for reservations */
 #define MAP_GROWSDOWN	0x8000		/* stack-like segment */
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+#define MAP_MIRROR	0x0400
+#endif
+
 #define MS_SYNC		1		/* synchronous memory sync */
 #define MS_ASYNC	2		/* sync memory asynchronously */
 #define MS_INVALIDATE	4		/* invalidate the caches */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-parisc/pgtable.h linux-2.4.26-leo/include/asm-parisc/pgtable.h
--- linux-2.4.26/include/asm-parisc/pgtable.h	2003-06-13 15:51:38.000000000 +0100
+++ linux-2.4.26-leo/include/asm-parisc/pgtable.h	2004-06-22 20:53:47.000000000 +0100
@@ -167,6 +167,17 @@
 #define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
 #define PAGE_COPY       PAGE_EXECREAD
 #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+#define PAGE_SHARED_NOEXEC    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC      __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
+#define PAGE_READONLY_NOEXEC  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
+#else
+#define PAGE_SHARED_NOEXEC    PAGE_SHARED
+#define PAGE_COPY_NOEXEC      PAGE_COPY
+#define PAGE_READONLY_NOEXEC  PAGE_READONLY
+#endif
+
 #define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
 #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
 #define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-ppc/a.out.h linux-2.4.26-leo/include/asm-ppc/a.out.h
--- linux-2.4.26/include/asm-ppc/a.out.h	2003-08-25 12:44:44.000000000 +0100
+++ linux-2.4.26-leo/include/asm-ppc/a.out.h	2004-06-22 20:53:47.000000000 +0100
@@ -2,7 +2,7 @@
 #define __PPC_A_OUT_H__
 
 /* grabbed from the intel stuff  */
-#define STACK_TOP TASK_SIZE
+#define __STACK_TOP TASK_SIZE
 
 
 struct exec
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-ppc/dma.h linux-2.4.26-leo/include/asm-ppc/dma.h
--- linux-2.4.26/include/asm-ppc/dma.h	2003-06-13 15:51:38.000000000 +0100
+++ linux-2.4.26-leo/include/asm-ppc/dma.h	2004-05-26 23:34:34.000000000 +0100
@@ -11,6 +11,7 @@
 #include <linux/config.h>
 #include <asm/io.h>
 #include <linux/spinlock.h>
+#include <linux/sched.h>
 #include <asm/system.h>
 
 /*
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-ppc/elf.h linux-2.4.26-leo/include/asm-ppc/elf.h
--- linux-2.4.26/include/asm-ppc/elf.h	2003-06-13 15:51:38.000000000 +0100
+++ linux-2.4.26-leo/include/asm-ppc/elf.h	2004-06-22 20:53:47.000000000 +0100
@@ -46,6 +46,17 @@
 
 #define ELF_ET_DYN_BASE         (0x08000000)
 
+#ifdef CONFIG_GRKERNSEC_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE(tsk)	0x10000000UL
+
+#define PAX_DELTA_MMAP_LSB(tsk)		PAGE_SHIFT
+#define PAX_DELTA_MMAP_LEN(tsk)		15
+#define PAX_DELTA_EXEC_LSB(tsk)		PAGE_SHIFT
+#define PAX_DELTA_EXEC_LEN(tsk)		15
+#define PAX_DELTA_STACK_LSB(tsk)	PAGE_SHIFT
+#define PAX_DELTA_STACK_LEN(tsk)	15
+#endif
+
 #define USE_ELF_CORE_DUMP
 #define ELF_EXEC_PAGESIZE	4096
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-ppc/hardirq.h linux-2.4.26-leo/include/asm-ppc/hardirq.h
--- linux-2.4.26/include/asm-ppc/hardirq.h	2003-08-25 12:44:44.000000000 +0100
+++ linux-2.4.26-leo/include/asm-ppc/hardirq.h	2004-05-26 23:34:34.000000000 +0100
@@ -31,10 +31,12 @@
  * Are we in an interrupt context? Either doing bottom half
  * or hardware interrupt processing?
  */
-#define in_interrupt() ({ int __cpu = smp_processor_id(); \
-	(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
+#define in_interrupt() (preempt_is_disabled() && \
+	({ unsigned long __cpu = smp_processor_id(); \
+	(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }))
 
-#define in_irq() (local_irq_count(smp_processor_id()) != 0)
+#define in_irq() (preempt_is_disabled() && \
+	(local_irq_count(smp_processor_id()) != 0))
 
 #ifndef CONFIG_SMP
 
@@ -45,6 +47,7 @@
 #define hardirq_exit(cpu)	(local_irq_count(cpu)--)
 
 #define synchronize_irq()	do { } while (0)
+#define release_irqlock(cpu)	do { } while (0)
 
 #else /* CONFIG_SMP */
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-ppc/highmem.h linux-2.4.26-leo/include/asm-ppc/highmem.h
--- linux-2.4.26/include/asm-ppc/highmem.h	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/include/asm-ppc/highmem.h	2004-05-26 23:34:34.000000000 +0100
@@ -84,6 +84,7 @@
 	unsigned int idx;
 	unsigned long vaddr;
 
+	preempt_disable();
 	if (page < highmem_start_page)
 		return page_address(page);
 
@@ -105,8 +106,10 @@
 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 	unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
 
-	if (vaddr < KMAP_FIX_BEGIN) // FIXME
+	if (vaddr < KMAP_FIX_BEGIN) { // FIXME
+		preempt_enable();
 		return;
+	}
 
 	if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE)
 		BUG();
@@ -118,6 +121,7 @@
 	pte_clear(kmap_pte+idx);
 	flush_tlb_page(0, vaddr);
 #endif
+	preempt_enable();
 }
 
 #endif /* __KERNEL__ */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-ppc/hw_irq.h linux-2.4.26-leo/include/asm-ppc/hw_irq.h
--- linux-2.4.26/include/asm-ppc/hw_irq.h	2003-06-13 15:51:38.000000000 +0100
+++ linux-2.4.26-leo/include/asm-ppc/hw_irq.h	2004-05-26 23:34:34.000000000 +0100
@@ -20,6 +20,12 @@
 #define __save_and_cli(flags) ({__save_flags(flags);__cli();})
 #define __save_and_sti(flags) ({__save_flags(flags);__sti();})
 
+#define mfmsr()		({unsigned int rval; \
+			asm volatile("mfmsr %0" : "=r" (rval)); rval;})
+#define mtmsr(v)	asm volatile("mtmsr %0" : : "r" (v))
+
+#define irqs_disabled()	((mfmsr() & MSR_EE) == 0)
+
 extern void do_lost_interrupts(unsigned long);
 
 #define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-ppc/mman.h linux-2.4.26-leo/include/asm-ppc/mman.h
--- linux-2.4.26/include/asm-ppc/mman.h	2003-06-13 15:51:38.000000000 +0100
+++ linux-2.4.26-leo/include/asm-ppc/mman.h	2004-06-22 20:53:47.000000000 +0100
@@ -19,6 +19,10 @@
 #define MAP_DENYWRITE	0x0800		/* ETXTBSY */
 #define MAP_EXECUTABLE	0x1000		/* mark it as an executable */
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+#define MAP_MIRROR	0x0200
+#endif
+
 #define MS_ASYNC	1		/* sync memory asynchronously */
 #define MS_INVALIDATE	2		/* invalidate the caches */
 #define MS_SYNC		4		/* synchronous memory sync */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-ppc/mmu_context.h linux-2.4.26-leo/include/asm-ppc/mmu_context.h
--- linux-2.4.26/include/asm-ppc/mmu_context.h	2003-06-13 15:51:38.000000000 +0100
+++ linux-2.4.26-leo/include/asm-ppc/mmu_context.h	2004-05-26 23:34:34.000000000 +0100
@@ -155,6 +155,10 @@
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 			     struct task_struct *tsk, int cpu)
 {
+#ifdef CONFIG_PREEMPT
+	if (preempt_get_count() == 0)
+		BUG();
+#endif
 	tsk->thread.pgdir = next->pgd;
 	get_mmu_context(next);
 	set_context(next->context, next->pgd);
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-ppc/pgalloc.h linux-2.4.26-leo/include/asm-ppc/pgalloc.h
--- linux-2.4.26/include/asm-ppc/pgalloc.h	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/include/asm-ppc/pgalloc.h	2004-05-26 23:34:34.000000000 +0100
@@ -72,20 +72,25 @@
 {
         unsigned long *ret;
 
+	preempt_disable();
         if ((ret = pgd_quicklist) != NULL) {
                 pgd_quicklist = (unsigned long *)(*ret);
                 ret[0] = 0;
                 pgtable_cache_size--;
+		preempt_enable();
         } else
+		preempt_enable();
                 ret = (unsigned long *)get_pgd_slow();
         return (pgd_t *)ret;
 }
 
 extern __inline__ void free_pgd_fast(pgd_t *pgd)
 {
+	preempt_disable();
         *(unsigned long **)pgd = pgd_quicklist;
         pgd_quicklist = (unsigned long *) pgd;
         pgtable_cache_size++;
+	preempt_enable();
 }
 
 extern __inline__ void free_pgd_slow(pgd_t *pgd)
@@ -124,19 +129,23 @@
 {
         unsigned long *ret;
 
+	preempt_disable();
         if ((ret = pte_quicklist) != NULL) {
                 pte_quicklist = (unsigned long *)(*ret);
                 ret[0] = 0;
                 pgtable_cache_size--;
 	}
+	preempt_enable();
         return (pte_t *)ret;
 }
 
 extern __inline__ void pte_free_fast(pte_t *pte)
 {
+	preempt_disable();
         *(unsigned long **)pte = pte_quicklist;
         pte_quicklist = (unsigned long *) pte;
         pgtable_cache_size++;
+	preempt_enable();
 }
 
 extern __inline__ void pte_free_slow(pte_t *pte)
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-ppc/pgtable.h linux-2.4.26-leo/include/asm-ppc/pgtable.h
--- linux-2.4.26/include/asm-ppc/pgtable.h	2004-02-20 14:11:46.000000000 +0000
+++ linux-2.4.26-leo/include/asm-ppc/pgtable.h	2004-06-22 20:53:47.000000000 +0100
@@ -394,11 +394,21 @@
 
 #define PAGE_NONE	__pgprot(_PAGE_BASE)
 #define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC)
 #define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC | _PAGE_HWEXEC)
 #define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC)
+
+#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) && !defined(CONFIG_40x) && !defined(CONFIG_44x)
+# define PAGE_SHARED_NOEXEC	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_GUARDED)
+# define PAGE_COPY_NOEXEC	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED)
+# define PAGE_READONLY_NOEXEC	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED)
+#else
+# define PAGE_SHARED_NOEXEC	PAGE_SHARED
+# define PAGE_COPY_NOEXEC	PAGE_COPY
+# define PAGE_READONLY_NOEXEC	PAGE_READONLY
+#endif
 
 #define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
 #define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_SHARED)
@@ -411,21 +421,21 @@
  * This is the closest we can get..
  */
 #define __P000	PAGE_NONE
-#define __P001	PAGE_READONLY_X
-#define __P010	PAGE_COPY
-#define __P011	PAGE_COPY_X
-#define __P100	PAGE_READONLY
+#define __P001	PAGE_READONLY_NOEXEC
+#define __P010	PAGE_COPY_NOEXEC
+#define __P011	PAGE_COPY_NOEXEC
+#define __P100	PAGE_READONLY_X
 #define __P101	PAGE_READONLY_X
-#define __P110	PAGE_COPY
+#define __P110	PAGE_COPY_X
 #define __P111	PAGE_COPY_X
 
 #define __S000	PAGE_NONE
-#define __S001	PAGE_READONLY_X
-#define __S010	PAGE_SHARED
-#define __S011	PAGE_SHARED_X
-#define __S100	PAGE_READONLY
+#define __S001	PAGE_READONLY_NOEXEC
+#define __S010	PAGE_SHARED_NOEXEC
+#define __S011	PAGE_SHARED_NOEXEC
+#define __S100	PAGE_READONLY_X
 #define __S101	PAGE_READONLY_X
-#define __S110	PAGE_SHARED
+#define __S110	PAGE_SHARED_X
 #define __S111	PAGE_SHARED_X
 
 #ifndef __ASSEMBLY__
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-ppc/smplock.h linux-2.4.26-leo/include/asm-ppc/smplock.h
--- linux-2.4.26/include/asm-ppc/smplock.h	2003-06-13 15:51:38.000000000 +0100
+++ linux-2.4.26-leo/include/asm-ppc/smplock.h	2004-05-26 23:34:34.000000000 +0100
@@ -12,7 +12,15 @@
 
 extern spinlock_t kernel_flag;
 
+#ifdef CONFIG_SMP
 #define kernel_locked()		spin_is_locked(&kernel_flag)
+#else
+#ifdef CONFIG_PREEMPT
+#define kernel_locked()		preempt_get_count()
+#else
+#define kernel_locked()		1
+#endif
+#endif
 
 /*
  * Release global kernel lock and global interrupt lock
@@ -44,8 +52,14 @@
  */
 static __inline__ void lock_kernel(void)
 {
+#ifdef CONFIG_PREEMPT
+	if (current->lock_depth == -1)
+		spin_lock(&kernel_flag);
+	++current->lock_depth;
+#else
 	if (!++current->lock_depth)
 		spin_lock(&kernel_flag);
+#endif
 }
 
 static __inline__ void unlock_kernel(void)
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-ppc/softirq.h linux-2.4.26-leo/include/asm-ppc/softirq.h
--- linux-2.4.26/include/asm-ppc/softirq.h	2003-06-13 15:51:38.000000000 +0100
+++ linux-2.4.26-leo/include/asm-ppc/softirq.h	2004-05-26 23:34:34.000000000 +0100
@@ -7,6 +7,7 @@
 
 #define local_bh_disable()			\
 do {						\
+	preempt_disable();			\
 	local_bh_count(smp_processor_id())++;	\
 	barrier();				\
 } while (0)
@@ -15,9 +16,10 @@
 do {						\
 	barrier();				\
 	local_bh_count(smp_processor_id())--;	\
+	preempt_enable();			\
 } while (0)
 
-#define local_bh_enable()				\
+#define _local_bh_enable()				\
 do {							\
 	if (!--local_bh_count(smp_processor_id())	\
 	    && softirq_pending(smp_processor_id())) {	\
@@ -25,7 +27,14 @@
 	}						\
 } while (0)
 
-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
+#define local_bh_enable()			\
+do {						\
+	_local_bh_enable();			\
+	preempt_enable();			\
+} while (0)
+
+#define in_softirq() (preempt_is_disabled() && \
+			(local_bh_count(smp_processor_id()) != 0))
 
 #endif	/* __ASM_SOFTIRQ_H */
 #endif /* __KERNEL__ */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-sparc/a.out.h linux-2.4.26-leo/include/asm-sparc/a.out.h
--- linux-2.4.26/include/asm-sparc/a.out.h	2000-01-13 20:03:00.000000000 +0000
+++ linux-2.4.26-leo/include/asm-sparc/a.out.h	2004-06-22 20:53:47.000000000 +0100
@@ -91,7 +91,7 @@
 
 #include <asm/page.h>
 
-#define STACK_TOP	(PAGE_OFFSET - PAGE_SIZE)
+#define __STACK_TOP	(PAGE_OFFSET - PAGE_SIZE)
 
 #endif /* __KERNEL__ */
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-sparc/elf.h linux-2.4.26-leo/include/asm-sparc/elf.h
--- linux-2.4.26/include/asm-sparc/elf.h	2000-07-12 03:02:37.000000000 +0100
+++ linux-2.4.26-leo/include/asm-sparc/elf.h	2004-06-22 20:53:47.000000000 +0100
@@ -83,6 +83,18 @@
 
 #define ELF_ET_DYN_BASE         (0x08000000)
 
+#ifdef CONFIG_GRKERNSEC_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE(tsk)	0x10000UL
+
+#define PAX_DELTA_MMAP_LSB(tsk)		PAGE_SHIFT
+#define PAX_DELTA_MMAP_LEN(tsk)		16
+#define PAX_DELTA_EXEC_LSB(tsk)		PAGE_SHIFT
+#define PAX_DELTA_EXEC_LEN(tsk)		16
+#define PAX_DELTA_STACK_LSB(tsk)	PAGE_SHIFT
+#define PAX_DELTA_STACK_LEN(tsk)	16
+#endif
+
+
 /* This yields a mask that user programs can use to figure out what
    instruction set this cpu supports.  This can NOT be done in userspace
    on Sparc.  */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-sparc/mman.h linux-2.4.26-leo/include/asm-sparc/mman.h
--- linux-2.4.26/include/asm-sparc/mman.h	2003-06-13 15:51:38.000000000 +0100
+++ linux-2.4.26-leo/include/asm-sparc/mman.h	2004-06-22 20:53:47.000000000 +0100
@@ -24,6 +24,10 @@
 #define MAP_DENYWRITE	0x0800		/* ETXTBSY */
 #define MAP_EXECUTABLE	0x1000		/* mark it as an executable */
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+#define MAP_MIRROR	0x0400
+#endif
+
 #define MS_ASYNC	1		/* sync memory asynchronously */
 #define MS_INVALIDATE	2		/* invalidate the caches */
 #define MS_SYNC		4		/* synchronous memory sync */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-sparc/pgtable.h linux-2.4.26-leo/include/asm-sparc/pgtable.h
--- linux-2.4.26/include/asm-sparc/pgtable.h	2002-08-03 01:39:45.000000000 +0100
+++ linux-2.4.26-leo/include/asm-sparc/pgtable.h	2004-06-22 20:53:47.000000000 +0100
@@ -97,6 +97,13 @@
 BTFIXUPDEF_INT(page_shared)
 BTFIXUPDEF_INT(page_copy)
 BTFIXUPDEF_INT(page_readonly)
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+BTFIXUPDEF_INT(page_shared_noexec)
+BTFIXUPDEF_INT(page_copy_noexec)
+BTFIXUPDEF_INT(page_readonly_noexec)
+#endif
+
 BTFIXUPDEF_INT(page_kernel)
 
 #define PMD_SHIFT       	BTFIXUP_SIMM13(pmd_shift)
@@ -118,6 +125,16 @@
 #define PAGE_COPY      __pgprot(BTFIXUP_INT(page_copy))
 #define PAGE_READONLY  __pgprot(BTFIXUP_INT(page_readonly))
 
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+#define PAGE_SHARED_NOEXEC    __pgprot(BTFIXUP_INT(page_shared_noexec))
+#define PAGE_COPY_NOEXEC      __pgprot(BTFIXUP_INT(page_copy_noexec))
+#define PAGE_READONLY_NOEXEC  __pgprot(BTFIXUP_INT(page_readonly_noexec))
+#else
+#define PAGE_SHARED_NOEXEC    PAGE_SHARED
+#define PAGE_COPY_NOEXEC      PAGE_COPY
+#define PAGE_READONLY_NOEXEC  PAGE_READONLY
+#endif
+
 extern unsigned long page_kernel;
 
 #ifdef MODULE
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-sparc/pgtsrmmu.h linux-2.4.26-leo/include/asm-sparc/pgtsrmmu.h
--- linux-2.4.26/include/asm-sparc/pgtsrmmu.h	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/include/asm-sparc/pgtsrmmu.h	2004-06-22 20:53:47.000000000 +0100
@@ -76,6 +76,15 @@
 				    SRMMU_EXEC | SRMMU_REF)
 #define SRMMU_PAGE_RDONLY  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
 				    SRMMU_EXEC | SRMMU_REF)
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+#define SRMMU_PAGE_SHARED_NOEXEC  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+					   SRMMU_WRITE | SRMMU_REF)
+#define SRMMU_PAGE_COPY_NOEXEC    __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+					   SRMMU_REF)
+#define SRMMU_PAGE_RDONLY_NOEXEC  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+					   SRMMU_REF)
+#endif
+
 #define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
 				    SRMMU_DIRTY | SRMMU_REF)
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-sparc/uaccess.h linux-2.4.26-leo/include/asm-sparc/uaccess.h
--- linux-2.4.26/include/asm-sparc/uaccess.h	2003-06-13 15:51:38.000000000 +0100
+++ linux-2.4.26-leo/include/asm-sparc/uaccess.h	2004-06-22 20:53:47.000000000 +0100
@@ -39,7 +39,7 @@
  * No one can read/write anything from userland in the kernel space by setting
  * large size and address near to PAGE_OFFSET - a fault will break his intentions.
  */
-#define __user_ok(addr,size) ((addr) < STACK_TOP)
+#define __user_ok(addr,size) ((addr) < __STACK_TOP)
 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
 #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
 #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-sparc64/a.out.h linux-2.4.26-leo/include/asm-sparc64/a.out.h
--- linux-2.4.26/include/asm-sparc64/a.out.h	2001-04-27 06:17:26.000000000 +0100
+++ linux-2.4.26-leo/include/asm-sparc64/a.out.h	2004-06-22 20:53:47.000000000 +0100
@@ -95,7 +95,7 @@
 
 #ifdef __KERNEL__
 
-#define STACK_TOP (current->thread.flags & SPARC_FLAG_32BIT ? 0xf0000000 : 0x80000000000L)
+#define __STACK_TOP (current->thread.flags & SPARC_FLAG_32BIT ? 0xf0000000 : 0x80000000000L)
 
 #endif
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-sparc64/elf.h linux-2.4.26-leo/include/asm-sparc64/elf.h
--- linux-2.4.26/include/asm-sparc64/elf.h	2003-06-13 15:51:38.000000000 +0100
+++ linux-2.4.26-leo/include/asm-sparc64/elf.h	2004-06-22 20:53:47.000000000 +0100
@@ -82,6 +82,17 @@
 #define ELF_ET_DYN_BASE         0x0000010000000000UL
 #endif
 
+#ifdef CONFIG_GRKERNSEC_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE(tsk)       ((tsk)->thread.flags & SPARC_FLAG_32BIT ? 0x10000UL : 0x100000UL)
+
+#define PAX_DELTA_MMAP_LSB(tsk)         (PAGE_SHIFT + 1)
+#define PAX_DELTA_MMAP_LEN(tsk)         ((tsk)->thread.flags & SPARC_FLAG_32BIT ? 14 : 28 )
+#define PAX_DELTA_EXEC_LSB(tsk)         (PAGE_SHIFT + 1)
+#define PAX_DELTA_EXEC_LEN(tsk)         ((tsk)->thread.flags & SPARC_FLAG_32BIT ? 14 : 28 )
+#define PAX_DELTA_STACK_LSB(tsk)        PAGE_SHIFT
+#define PAX_DELTA_STACK_LEN(tsk)        ((tsk)->thread.flags & SPARC_FLAG_32BIT ? 15 : 29 )
+#endif
+
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this cpu supports.  */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-sparc64/mman.h linux-2.4.26-leo/include/asm-sparc64/mman.h
--- linux-2.4.26/include/asm-sparc64/mman.h	2003-06-13 15:51:38.000000000 +0100
+++ linux-2.4.26-leo/include/asm-sparc64/mman.h	2004-06-22 20:53:47.000000000 +0100
@@ -24,6 +24,10 @@
 #define MAP_DENYWRITE	0x0800		/* ETXTBSY */
 #define MAP_EXECUTABLE	0x1000		/* mark it as an executable */
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+#define MAP_MIRROR	0x0400
+#endif
+
 #define MS_ASYNC	1		/* sync memory asynchronously */
 #define MS_INVALIDATE	2		/* invalidate the caches */
 #define MS_SYNC		4		/* synchronous memory sync */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/asm-sparc64/pgtable.h linux-2.4.26-leo/include/asm-sparc64/pgtable.h
--- linux-2.4.26/include/asm-sparc64/pgtable.h	2002-08-03 01:39:45.000000000 +0100
+++ linux-2.4.26-leo/include/asm-sparc64/pgtable.h	2004-06-22 20:53:47.000000000 +0100
@@ -122,7 +122,8 @@
 #define _PAGE_G		0x0000000000000001	/* Global                             */
 
 /* Here are the SpitFire software bits we use in the TTE's. */
-#define _PAGE_MODIFIED	0x0000000000000800	/* Modified Page (ie. dirty)          */
+#define _PAGE_MODIFIED	0x0000000000001000	/* Modified Page (ie. dirty)          */
+#define _PAGE_EXEC	0x0000000000000800	/* Executable SW bit		      */
 #define _PAGE_ACCESSED	0x0000000000000400	/* Accessed Page (ie. referenced)     */
 #define _PAGE_READ	0x0000000000000200	/* Readable SW Bit                    */
 #define _PAGE_WRITE	0x0000000000000100	/* Writable SW Bit                    */
@@ -150,16 +151,30 @@
 
 /* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */
 #define PAGE_SHARED	__pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
-				  __ACCESS_BITS | _PAGE_WRITE)
+				  __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC)
 
 #define PAGE_COPY	__pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
-				  __ACCESS_BITS)
+				  __ACCESS_BITS | _PAGE_EXEC)
 
 #define PAGE_READONLY	__pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
-				  __ACCESS_BITS)
+				  __ACCESS_BITS | _PAGE_EXEC)
 
 #define PAGE_KERNEL	__pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
-				  __PRIV_BITS | __ACCESS_BITS | __DIRTY_BITS)
+				  __PRIV_BITS | __ACCESS_BITS | __DIRTY_BITS | \
+				  _PAGE_EXEC)
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+#define PAGE_SHARED_NOEXEC    __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+					__ACCESS_BITS | _PAGE_WRITE)
+#define PAGE_COPY_NOEXEC      __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+					__ACCESS_BITS)
+#define PAGE_READONLY_NOEXEC  __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+					__ACCESS_BITS)
+#else
+#define PAGE_SHARED_NOEXEC     PAGE_SHARED
+#define PAGE_COPY_NOEXEC       PAGE_COPY
+#define PAGE_READONLY_NOEXEC   PAGE_READONLY
+#endif
 
 #define PAGE_INVALID	__pgprot (0)
 
@@ -170,18 +185,18 @@
 #define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | __ACCESS_BITS | _PAGE_E)
 
 #define __P000	PAGE_NONE
-#define __P001	PAGE_READONLY
-#define __P010	PAGE_COPY
-#define __P011	PAGE_COPY
+#define __P001	PAGE_READONLY_NOEXEC
+#define __P010	PAGE_COPY_NOEXEC
+#define __P011	PAGE_COPY_NOEXEC
 #define __P100	PAGE_READONLY
 #define __P101	PAGE_READONLY
 #define __P110	PAGE_COPY
 #define __P111	PAGE_COPY
 
 #define __S000	PAGE_NONE
-#define __S001	PAGE_READONLY
-#define __S010	PAGE_SHARED
-#define __S011	PAGE_SHARED
+#define __S001	PAGE_READONLY_NOEXEC
+#define __S010	PAGE_SHARED_NOEXEC
+#define __S011	PAGE_SHARED_NOEXEC
 #define __S100	PAGE_READONLY
 #define __S101	PAGE_READONLY
 #define __S110	PAGE_SHARED
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/a.out.h linux-2.4.26-leo/include/linux/a.out.h
--- linux-2.4.26/include/linux/a.out.h	2004-05-26 23:40:33.000000000 +0100
+++ linux-2.4.26-leo/include/linux/a.out.h	2004-06-22 21:02:57.000000000 +0100
@@ -7,6 +7,16 @@
 
 #include <asm/a.out.h>
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDUSTACK
+#define __DELTA_STACK (current->mm->delta_stack)
+#else
+#define __DELTA_STACK 0UL
+#endif
+
+#ifndef STACK_TOP
+#define STACK_TOP	(__STACK_TOP - __DELTA_STACK)
+#endif
+
 #endif /* __STRUCT_EXEC_OVERRIDE__ */
 
 /* these go in the N_MACHTYPE field */
@@ -37,6 +47,14 @@
   M_MIPS2 = 152		/* MIPS R6000/R4000 binary */
 };
 
+/* Constants for the N_FLAGS field */
+#define F_PAX_PAGEEXEC 1	/* Paging based non-executable pages */
+#define F_PAX_EMUTRAMP 2	/* Emulate trampolines */
+#define F_PAX_MPROTECT 4	/* Restrict mprotect() */
+#define F_PAX_RANDMMAP 8	/* Randomize mmap() base */   
+#define F_PAX_RANDEXEC 16	/* Randomize ET_EXEC base */
+#define F_PAX_SEGMEXEC 32	/* Segmentation based non-executable pages */
+
 #if !defined (N_MAGIC)
 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
 #endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/binfmts.h linux-2.4.26-leo/include/linux/binfmts.h
--- linux-2.4.26/include/linux/binfmts.h	2004-05-26 23:40:33.000000000 +0100
+++ linux-2.4.26-leo/include/linux/binfmts.h	2004-06-22 21:02:57.000000000 +0100
@@ -30,6 +30,7 @@
 	int argc, envc;
 	char * filename;	/* Name of binary */
 	unsigned long loader, exec;
+	int misc;
 };
 
 /*
@@ -59,6 +60,8 @@
 extern int do_coredump(long signr, struct pt_regs * regs);
 extern void set_binfmt(struct linux_binfmt *new);
 
+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
+void pax_report_insns(void *pc);
 
 #if 0
 /* this went away now */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/brlock.h linux-2.4.26-leo/include/linux/brlock.h
--- linux-2.4.26/include/linux/brlock.h	2004-02-20 13:07:07.000000000 +0000
+++ linux-2.4.26-leo/include/linux/brlock.h	2004-06-22 21:03:09.000000000 +0100
@@ -171,11 +171,11 @@
 }
 
 #else
-# define br_read_lock(idx)	((void)(idx))
-# define br_read_unlock(idx)	((void)(idx))
-# define br_write_lock(idx)	((void)(idx))
-# define br_write_unlock(idx)	((void)(idx))
-#endif
+# define br_read_lock(idx)	({ (void)(idx); preempt_disable(); })
+# define br_read_unlock(idx)	({ (void)(idx); preempt_enable(); })
+# define br_write_lock(idx)	({ (void)(idx); preempt_disable(); })
+# define br_write_unlock(idx)	({ (void)(idx); preempt_enable(); })
+#endif	/* CONFIG_SMP */
 
 /*
  * Now enumerate all of the possible sw/hw IRQ protected
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/dcache.h linux-2.4.26-leo/include/linux/dcache.h
--- linux-2.4.26/include/linux/dcache.h	2004-02-20 13:06:53.000000000 +0000
+++ linux-2.4.26-leo/include/linux/dcache.h	2004-06-22 21:02:57.000000000 +0100
@@ -127,31 +127,6 @@
 
 extern spinlock_t dcache_lock;
 
-/**
- * d_drop - drop a dentry
- * @dentry: dentry to drop
- *
- * d_drop() unhashes the entry from the parent
- * dentry hashes, so that it won't be found through
- * a VFS lookup any more. Note that this is different
- * from deleting the dentry - d_delete will try to
- * mark the dentry negative if possible, giving a
- * successful _negative_ lookup, while d_drop will
- * just make the cache lookup fail.
- *
- * d_drop() is used mainly for stuff that wants
- * to invalidate a dentry for some reason (NFS
- * timeouts or autofs deletes).
- */
-
-static __inline__ void d_drop(struct dentry * dentry)
-{
-	spin_lock(&dcache_lock);
-	list_del(&dentry->d_hash);
-	INIT_LIST_HEAD(&dentry->d_hash);
-	spin_unlock(&dcache_lock);
-}
-
 static __inline__ int dname_external(struct dentry *d)
 {
 	return d->d_name.name != d->d_iname; 
@@ -276,3 +251,34 @@
 #endif /* __KERNEL__ */
 
 #endif	/* __LINUX_DCACHE_H */
+
+#if !defined(__LINUX_DCACHE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
+#define __LINUX_DCACHE_H_INLINES
+
+#ifdef __KERNEL__
+/**
+ * d_drop - drop a dentry
+ * @dentry: dentry to drop
+ *
+ * d_drop() unhashes the entry from the parent
+ * dentry hashes, so that it won't be found through
+ * a VFS lookup any more. Note that this is different
+ * from deleting the dentry - d_delete will try to
+ * mark the dentry negative if possible, giving a
+ * successful _negative_ lookup, while d_drop will
+ * just make the cache lookup fail.
+ *
+ * d_drop() is used mainly for stuff that wants
+ * to invalidate a dentry for some reason (NFS
+ * timeouts or autofs deletes).
+ */
+
+static __inline__ void d_drop(struct dentry * dentry)
+{
+	spin_lock(&dcache_lock);
+	list_del(&dentry->d_hash);
+	INIT_LIST_HEAD(&dentry->d_hash);
+	spin_unlock(&dcache_lock);
+}
+#endif
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/device-mapper.h linux-2.4.26-leo/include/linux/device-mapper.h
--- linux-2.4.26/include/linux/device-mapper.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/include/linux/device-mapper.h	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2001 Sistina Software (UK) Limited.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef _LINUX_DEVICE_MAPPER_H
+#define _LINUX_DEVICE_MAPPER_H
+
+typedef unsigned long sector_t;
+
+struct dm_target;
+struct dm_table;
+struct dm_dev;
+
+typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
+
+union map_info {
+	void *ptr;
+	unsigned long long ll;
+};
+
+/*
+ * In the constructor the target parameter will already have the
+ * table, type, begin and len fields filled in.
+ */
+typedef int (*dm_ctr_fn) (struct dm_target * target, unsigned int argc,
+			  char **argv);
+
+/*
+ * The destructor doesn't need to free the dm_target, just
+ * anything hidden ti->private.
+ */
+typedef void (*dm_dtr_fn) (struct dm_target * ti);
+
+/*
+ * The map function must return:
+ * < 0: error
+ * = 0: The target will handle the io by resubmitting it later
+ * > 0: simple remap complete
+ */
+typedef int (*dm_map_fn) (struct dm_target * ti, struct buffer_head * bh,
+			  int rw, union map_info *map_context);
+
+/*
+ * Returns:
+ * < 0 : error (currently ignored)
+ * 0   : ended successfully
+ * 1   : for some reason the io has still not completed (eg,
+ *       multipath target might want to requeue a failed io).
+ */
+typedef int (*dm_endio_fn) (struct dm_target * ti,
+			    struct buffer_head * bh, int rw, int error,
+			    union map_info *map_context);
+typedef void (*dm_suspend_fn) (struct dm_target *ti);
+typedef void (*dm_resume_fn) (struct dm_target *ti);
+typedef int (*dm_status_fn) (struct dm_target * ti, status_type_t status_type,
+			     char *result, unsigned int maxlen);
+
+void dm_error(const char *message);
+
+/*
+ * Constructors should call these functions to ensure destination devices
+ * are opened/closed correctly.
+ * FIXME: too many arguments.
+ */
+int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
+		  sector_t len, int mode, struct dm_dev **result);
+void dm_put_device(struct dm_target *ti, struct dm_dev *d);
+
+/*
+ * Information about a target type
+ */
+struct target_type {
+	const char *name;
+	struct module *module;
+	dm_ctr_fn ctr;
+	dm_dtr_fn dtr;
+	dm_map_fn map;
+	dm_endio_fn end_io;
+	dm_suspend_fn suspend;
+	dm_resume_fn resume;
+	dm_status_fn status;
+};
+
+struct dm_target {
+	struct dm_table *table;
+	struct target_type *type;
+
+	/* target limits */
+	sector_t begin;
+	sector_t len;
+
+	/* target specific data */
+	void *private;
+
+	/* Used to provide an error string from the ctr */
+	char *error;
+};
+
+int dm_register_target(struct target_type *t);
+int dm_unregister_target(struct target_type *t);
+
+#endif				/* _LINUX_DEVICE_MAPPER_H */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/dm-ioctl.h linux-2.4.26-leo/include/linux/dm-ioctl.h
--- linux-2.4.26/include/linux/dm-ioctl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/include/linux/dm-ioctl.h	2004-06-22 21:04:36.000000000 +0100
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef _LINUX_DM_IOCTL_H
+#define _LINUX_DM_IOCTL_H
+
+#include <linux/types.h>
+
+#define DM_DIR "mapper"		/* Slashes not supported */
+#define DM_MAX_TYPE_NAME 16
+#define DM_NAME_LEN 128
+#define DM_UUID_LEN 129
+
+/*
+ * A traditional ioctl interface for the device mapper.
+ *
+ * Each device can have two tables associated with it, an
+ * 'active' table which is the one currently used by io passing
+ * through the device, and an 'inactive' one which is a table
+ * that is being prepared as a replacement for the 'active' one.
+ *
+ * DM_VERSION:
+ * Just get the version information for the ioctl interface.
+ *
+ * DM_REMOVE_ALL:
+ * Remove all dm devices, destroy all tables.  Only really used
+ * for debug.
+ *
+ * DM_LIST_DEVICES:
+ * Get a list of all the dm device names.
+ *
+ * DM_DEV_CREATE:
+ * Create a new device, neither the 'active' or 'inactive' table
+ * slots will be filled.  The device will be in suspended state
+ * after creation, however any io to the device will get errored
+ * since it will be out-of-bounds.
+ *
+ * DM_DEV_REMOVE:
+ * Remove a device, destroy any tables.
+ *
+ * DM_DEV_RENAME:
+ * Rename a device.
+ *
+ * DM_SUSPEND:
+ * This performs both suspend and resume, depending which flag is
+ * passed in.
+ * Suspend: This command will not return until all pending io to
+ * the device has completed.  Further io will be deferred until
+ * the device is resumed.
+ * Resume: It is no longer an error to issue this command on an
+ * unsuspended device.  If a table is present in the 'inactive'
+ * slot, it will be moved to the active slot, then the old table
+ * from the active slot will be _destroyed_.  Finally the device
+ * is resumed.
+ *
+ * DM_DEV_STATUS:
+ * Retrieves the status for the table in the 'active' slot.
+ *
+ * DM_DEV_WAIT:
+ * Wait for a significant event to occur to the device.  This
+ * could either be caused by an event triggered by one of the
+ * targets of the table in the 'active' slot, or a table change.
+ *
+ * DM_TABLE_LOAD:
+ * Load a table into the 'inactive' slot for the device.  The
+ * device does _not_ need to be suspended prior to this command.
+ *
+ * DM_TABLE_CLEAR:
+ * Destroy any table in the 'inactive' slot (ie. abort).
+ *
+ * DM_TABLE_DEPS:
+ * Return a set of device dependencies for the 'active' table.
+ *
+ * DM_TABLE_STATUS:
+ * Return the targets status for the 'active' table.
+ */
+
+/*
+ * All ioctl arguments consist of a single chunk of memory, with
+ * this structure at the start.  If a uuid is specified any
+ * lookup (eg. for a DM_INFO) will be done on that, *not* the
+ * name.
+ */
+struct dm_ioctl {
+	/*
+	 * The version number is made up of three parts:
+	 * major - no backward or forward compatibility,
+	 * minor - only backwards compatible,
+	 * patch - both backwards and forwards compatible.
+	 *
+	 * All clients of the ioctl interface should fill in the
+	 * version number of the interface that they were
+	 * compiled with.
+	 *
+	 * All recognised ioctl commands (ie. those that don't
+	 * return -ENOTTY) fill out this field, even if the
+	 * command failed.
+	 */
+	uint32_t version[3];	/* in/out */
+	uint32_t data_size;	/* total size of data passed in
+				 * including this struct */
+
+	uint32_t data_start;	/* offset to start of data
+				 * relative to start of this struct */
+
+	uint32_t target_count;	/* in/out */
+	int32_t open_count;	/* out */
+	uint32_t flags;		/* in/out */
+	uint32_t event_nr;      /* in/out */
+	uint32_t padding;
+
+	uint64_t dev;		/* in/out */
+
+	char name[DM_NAME_LEN];	/* device name */
+	char uuid[DM_UUID_LEN];	/* unique identifier for
+				 * the block device */
+};
+
+/*
+ * Used to specify tables.  These structures appear after the
+ * dm_ioctl.
+ */
+struct dm_target_spec {
+	uint64_t sector_start;
+	uint64_t length;
+	int32_t status;		/* used when reading from kernel only */
+
+	/*
+	 * Offset in bytes (from the start of this struct) to
+	 * next target_spec.
+	 */
+	uint32_t next;
+
+	char target_type[DM_MAX_TYPE_NAME];
+
+	/*
+	 * Parameter string starts immediately after this object.
+	 * Be careful to add padding after string to ensure correct
+	 * alignment of subsequent dm_target_spec.
+	 */
+};
+
+/*
+ * Used to retrieve the target dependencies.
+ */
+struct dm_target_deps {
+	uint32_t count;		/* Array size */
+	uint32_t padding;	/* unused */
+	uint64_t dev[0];	/* out */
+};
+
+/*
+ * Used to get a list of all dm devices.
+ */
+struct dm_name_list {
+	uint64_t dev;
+	uint32_t next;		/* offset to the next record from
+				   the _start_ of this */
+	char name[0];
+};
+
+/*
+ * If you change this make sure you make the corresponding change
+ * to dm-ioctl.c:lookup_ioctl()
+ */
+enum {
+	/* Top level cmds */
+	DM_VERSION_CMD = 0,
+	DM_REMOVE_ALL_CMD,
+	DM_LIST_DEVICES_CMD,
+
+	/* device level cmds */
+	DM_DEV_CREATE_CMD,
+	DM_DEV_REMOVE_CMD,
+	DM_DEV_RENAME_CMD,
+	DM_DEV_SUSPEND_CMD,
+	DM_DEV_STATUS_CMD,
+	DM_DEV_WAIT_CMD,
+
+	/* Table level cmds */
+	DM_TABLE_LOAD_CMD,
+	DM_TABLE_CLEAR_CMD,
+	DM_TABLE_DEPS_CMD,
+	DM_TABLE_STATUS_CMD,
+};
+
+#define DM_IOCTL 0xfd
+
+#define DM_VERSION       _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl)
+#define DM_REMOVE_ALL    _IOWR(DM_IOCTL, DM_REMOVE_ALL_CMD, struct dm_ioctl)
+#define DM_LIST_DEVICES  _IOWR(DM_IOCTL, DM_LIST_DEVICES_CMD, struct dm_ioctl)
+
+#define DM_DEV_CREATE    _IOWR(DM_IOCTL, DM_DEV_CREATE_CMD, struct dm_ioctl)
+#define DM_DEV_REMOVE    _IOWR(DM_IOCTL, DM_DEV_REMOVE_CMD, struct dm_ioctl)
+#define DM_DEV_RENAME    _IOWR(DM_IOCTL, DM_DEV_RENAME_CMD, struct dm_ioctl)
+#define DM_DEV_SUSPEND   _IOWR(DM_IOCTL, DM_DEV_SUSPEND_CMD, struct dm_ioctl)
+#define DM_DEV_STATUS    _IOWR(DM_IOCTL, DM_DEV_STATUS_CMD, struct dm_ioctl)
+#define DM_DEV_WAIT      _IOWR(DM_IOCTL, DM_DEV_WAIT_CMD, struct dm_ioctl)
+
+#define DM_TABLE_LOAD    _IOWR(DM_IOCTL, DM_TABLE_LOAD_CMD, struct dm_ioctl)
+#define DM_TABLE_CLEAR   _IOWR(DM_IOCTL, DM_TABLE_CLEAR_CMD, struct dm_ioctl)
+#define DM_TABLE_DEPS    _IOWR(DM_IOCTL, DM_TABLE_DEPS_CMD, struct dm_ioctl)
+#define DM_TABLE_STATUS  _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, struct dm_ioctl)
+
+#define DM_VERSION_MAJOR	4
+#define DM_VERSION_MINOR	0
+#define DM_VERSION_PATCHLEVEL	5
+#define DM_VERSION_EXTRA	"-ioctl (2003-11-18)"
+
+/* Status bits */
+#define DM_READONLY_FLAG	(1 << 0) /* In/Out */
+#define DM_SUSPEND_FLAG		(1 << 1) /* In/Out */
+#define DM_PERSISTENT_DEV_FLAG	(1 << 3) /* In */
+
+/*
+ * Flag passed into ioctl STATUS command to get table information
+ * rather than current status.
+ */
+#define DM_STATUS_TABLE_FLAG	(1 << 4) /* In */
+
+/*
+ * Flags that indicate whether a table is present in either of
+ * the two table slots that a device has.
+ */
+#define DM_ACTIVE_PRESENT_FLAG   (1 << 5) /* Out */
+#define DM_INACTIVE_PRESENT_FLAG (1 << 6) /* Out */
+
+/*
+ * Indicates that the buffer passed in wasn't big enough for the
+ * results.
+ */
+#define DM_BUFFER_FULL_FLAG	(1 << 8) /* Out */
+
+#endif				/* _LINUX_DM_IOCTL_H */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/elf.h linux-2.4.26-leo/include/linux/elf.h
--- linux-2.4.26/include/linux/elf.h	2004-05-26 23:43:46.000000000 +0100
+++ linux-2.4.26-leo/include/linux/elf.h	2004-06-22 21:05:29.000000000 +0100
@@ -34,6 +34,10 @@
 #define PT_MIPS_REGINFO		0x70000000
 #define PT_MIPS_OPTIONS		0x70000001
 
+#define PT_LOOS			0x60000000
+#define PT_GNU_STACK		(PT_LOOS + 0x474e551)
+#define PT_PAX_FLAGS		(PT_LOOS + 0x5041580)
+
 /* Flags in the e_flags field of the header */
 #define EF_MIPS_NOREORDER 0x00000001
 #define EF_MIPS_PIC       0x00000002
@@ -122,6 +126,8 @@
 #define DT_DEBUG	21
 #define DT_TEXTREL	22
 #define DT_JMPREL	23
+#define DT_FLAGS	30
+#define DF_TEXTREL	0x00000004
 #define DT_LOPROC	0x70000000
 #define DT_HIPROC	0x7fffffff
 #define DT_MIPS_RLD_VERSION	0x70000001
@@ -260,6 +266,13 @@
 #define R_MIPS_LOVENDOR		100
 #define R_MIPS_HIVENDOR		127
 
+/* Constants for the e_flags field */
+#define EF_PAX_PAGEEXEC               1       /* Paging based non-executable pages */
+#define EF_PAX_EMUTRAMP               2       /* Emulate trampolines */
+#define EF_PAX_MPROTECT               4       /* Restrict mprotect() */
+#define EF_PAX_RANDMMAP               8       /* Randomize mmap() base */ 
+#define EF_PAX_RANDEXEC		      16      /* Randomize ET_EXEC base */
+#define EF_PAX_SEGMEXEC		      32      /* Segmentation based non-executable pages */
 
 /*
  * Sparc ELF relocation types
@@ -458,6 +471,19 @@
 #define PF_W		0x2
 #define PF_X		0x1
 
+#define PF_PAGEEXEC	(1 << 4)	/* Enable  PAGEEXEC */
+#define PF_NOPAGEEXEC	(1 << 5)	/* Disable PAGEEXEC */
+#define PF_SEGMEXEC	(1 << 6)	/* Enable  SEGMEXEC */
+#define PF_NOSEGMEXEC	(1 << 7)	/* Disable SEGMEXEC */
+#define PF_MPROTECT	(1 << 8)	/* Enable  MPROTECT */
+#define PF_NOMPROTECT	(1 << 9)	/* Disable MPROTECT */
+#define PF_RANDEXEC	(1 << 10)	/* Enable  RANDEXEC */
+#define PF_NORANDEXEC	(1 << 11)	/* Disable RANDEXEC */
+#define PF_EMUTRAMP	(1 << 12)	/* Enable  EMUTRAMP */
+#define PF_NOEMUTRAMP	(1 << 13)	/* Disable EMUTRAMP */
+#define PF_RANDMMAP	(1 << 14)	/* Enable  RANDMMAP */
+#define PF_NORANDMMAP	(1 << 15)	/* Disable RANDMMAP */
+
 typedef struct elf32_phdr{
   Elf32_Word	p_type;
   Elf32_Off	p_offset;
@@ -555,6 +581,8 @@
 #define	EI_VERSION	6
 #define	EI_PAD		7
 
+#define EI_PAX		14
+
 #define	ELFMAG0		0x7f		/* EI_MAG */
 #define	ELFMAG1		'E'
 #define	ELFMAG2		'L'
@@ -602,6 +630,7 @@
 #define elfhdr		elf32_hdr
 #define elf_phdr	elf32_phdr
 #define elf_note	elf32_note
+#define elf_dyn		Elf32_Dyn
 
 #else
 
@@ -609,6 +638,7 @@
 #define elfhdr		elf64_hdr
 #define elf_phdr	elf64_phdr
 #define elf_note	elf64_note
+#define elf_dyn		Elf64_Dyn
 
 #endif
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/fs.h linux-2.4.26-leo/include/linux/fs.h
--- linux-2.4.26/include/linux/fs.h	2004-02-20 14:11:47.000000000 +0000
+++ linux-2.4.26-leo/include/linux/fs.h	2004-06-22 21:02:57.000000000 +0100
@@ -266,7 +266,7 @@
 	struct page *b_page;		/* the page this bh is mapped to */
 	void (*b_end_io)(struct buffer_head *bh, int uptodate); /* I/O completion */
  	void *b_private;		/* reserved for b_end_io */
-
+ 	void *b_journal_head;		/* ext3 journal_heads */
 	unsigned long b_rsector;	/* Real buffer location on disk */
 	wait_queue_head_t b_wait;
 
@@ -1090,7 +1090,7 @@
 
 asmlinkage long sys_open(const char *, int, int);
 asmlinkage long sys_close(unsigned int);	/* yes, it's really unsigned */
-extern int do_truncate(struct dentry *, loff_t start);
+extern int do_truncate(struct dentry *, loff_t start, struct vfsmount *);
 
 extern struct file *filp_open(const char *, int, int);
 extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
@@ -1277,6 +1277,7 @@
 extern int sync_buffers(kdev_t, int);
 extern void sync_dev(kdev_t);
 extern int fsync_dev(kdev_t);
+extern int fsync_dev_lockfs(kdev_t);
 extern int fsync_super(struct super_block *);
 extern int fsync_no_super(kdev_t);
 extern void sync_inodes_sb(struct super_block *);
@@ -1295,6 +1296,8 @@
 extern int filemap_fdatasync(struct address_space *);
 extern int filemap_fdatawait(struct address_space *);
 extern void sync_supers(kdev_t dev, int wait);
+extern void sync_supers_lockfs(kdev_t);
+extern void unlockfs(kdev_t);
 extern int bmap(struct inode *, int);
 extern int notify_change(struct dentry *, struct iattr *);
 extern int permission(struct inode *, int);
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/fs_struct.h linux-2.4.26-leo/include/linux/fs_struct.h
--- linux-2.4.26/include/linux/fs_struct.h	2001-07-13 23:10:44.000000000 +0100
+++ linux-2.4.26-leo/include/linux/fs_struct.h	2004-05-26 23:34:34.000000000 +0100
@@ -20,6 +20,15 @@
 extern void exit_fs(struct task_struct *);
 extern void set_fs_altroot(void);
 
+struct fs_struct *copy_fs_struct(struct fs_struct *old);
+void put_fs_struct(struct fs_struct *fs);
+
+#endif
+#endif
+
+#if !defined(_LINUX_FS_STRUCT_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
+#define _LINUX_FS_STRUCT_H_INLINES
+#ifdef __KERNEL__
 /*
  * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
  * It can block. Requires the big lock held.
@@ -65,9 +74,5 @@
 		mntput(old_pwdmnt);
 	}
 }
-
-struct fs_struct *copy_fs_struct(struct fs_struct *old);
-void put_fs_struct(struct fs_struct *fs);
-
 #endif
 #endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/gracl.h linux-2.4.26-leo/include/linux/gracl.h
--- linux-2.4.26/include/linux/gracl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/include/linux/gracl.h	2004-06-22 21:07:31.000000000 +0100
@@ -0,0 +1,246 @@
+#ifndef GR_ACL_H
+#define GR_ACL_H
+#endif
+#include <linux/grdefs.h>
+#include <linux/resource.h>
+
+#include <asm/resource.h>
+
+/* * * * * * * * * * * * * * * * * * * * *
+ * grsecurity ACL System
+ * Main header file
+ * Purpose: define most gracl data structures 
+ * * * * * * * * * * * * * * * * * * * * */
+
+/* Major status information */
+
+#define GR_VERSION  "grsecurity 2.0"
+
+enum {
+
+	SHUTDOWN = 0,
+	ENABLE = 1,
+	SPROLE = 2,
+	RELOAD = 3,
+	SEGVMOD = 4,
+	STATUS = 5,
+	UNSPROLE = 6
+};
+
+/* Password setup definitions
+ * kernel/grhash.c */
+enum {
+	GR_PW_LEN = 128,
+	GR_SALT_LEN = 16,
+	GR_SHA_LEN = 32,
+};
+
+enum {
+	GR_SPROLE_LEN = 64,
+};
+
+/* Begin Data Structures */
+
+struct sprole_pw {
+	unsigned char *rolename;
+	unsigned char salt[GR_SALT_LEN];
+	unsigned char sum[GR_SHA_LEN];	/* 256-bit SHA hash of the password */
+};
+
+struct name_entry {
+	ino_t inode;
+	kdev_t device;
+	char *name;
+	__u16 len;
+};
+
+struct acl_role_db {
+	struct acl_role_label **r_hash;
+	__u32 r_size;
+};
+
+struct name_db {
+	struct name_entry **n_hash;
+	__u32 n_size;
+};
+
+struct crash_uid {
+	uid_t uid;
+	unsigned long expires;
+};
+
+struct gr_hash_struct {
+	void **table;
+	void **nametable;
+	void *first;
+	__u32 table_size;
+	__u32 used_size;
+	int type;
+};
+
+/* Userspace Grsecurity ACL data structures */
+struct acl_subject_label {
+	char *filename;
+	ino_t inode;
+	kdev_t device;
+	__u32 mode;
+	__u32 cap_mask;
+	__u32 cap_lower;
+
+	struct rlimit res[RLIM_NLIMITS + 1];
+	__u16 resmask;
+
+	__u8 user_trans_type;
+	__u8 group_trans_type;
+	uid_t *user_transitions;
+	gid_t *group_transitions;
+	__u16 user_trans_num;
+	__u16 group_trans_num;
+
+	__u32 ip_proto[8];
+	__u32 ip_type;
+	struct acl_ip_label **ips;
+	__u32 ip_num;
+
+	__u32 crashes;
+	unsigned long expires;
+
+	struct acl_subject_label *parent_subject;
+	struct gr_hash_struct *hash;
+	struct acl_ip_label *ip_object;
+	struct acl_subject_label *prev;
+	struct acl_subject_label *next;
+
+	struct acl_object_label **obj_hash;
+	__u32 obj_hash_size;
+};
+
+struct role_allowed_ip {
+	__u32 addr;
+	__u32 netmask;
+
+	struct role_allowed_ip *prev;
+	struct role_allowed_ip *next;
+};
+
+struct role_transition {
+	char *rolename;
+
+	struct role_transition *prev;
+	struct role_transition *next;
+};
+
+struct acl_role_label {
+	char *rolename;
+	uid_t uidgid;
+	__u16 roletype;
+
+	__u16 auth_attempts;
+	unsigned long expires;
+
+	struct acl_subject_label *root_label;
+	struct gr_hash_struct *hash;
+
+	struct acl_role_label *prev;
+	struct acl_role_label *next;
+
+	struct role_transition *transitions;
+	struct role_allowed_ip *allowed_ips;
+	struct acl_subject_label **subj_hash;
+	__u32 subj_hash_size;
+};
+
+struct user_acl_role_db {
+	struct acl_role_label **r_table;
+	__u32 r_entries;	/* number of entries in table */
+	__u32 s_entries;	/* total number of subject acls */
+	__u32 i_entries;	/* total number of ip acls */
+	__u32 o_entries;	/* Total number of object acls */
+	__u32 g_entries;	/* total number of globbed objects */
+	__u32 a_entries;	/* total number of allowed ips */
+	__u32 t_entries;	/* total number of transitions */
+};
+
+struct acl_object_label {
+	char *filename;
+	ino_t inode;
+	kdev_t device;
+	__u32 mode;
+
+	struct acl_subject_label *nested;
+	struct acl_object_label *globbed;
+
+	/* next two structures not used */
+
+	struct acl_object_label *prev;
+	struct acl_object_label *next;
+};
+
+struct acl_ip_label {
+	__u32 addr;
+	__u32 netmask;
+	__u16 low, high;
+	__u8 mode;
+	__u32 type;
+	__u32 proto[8];
+
+	/* next two structures not used */
+
+	struct acl_ip_label *prev;
+	struct acl_ip_label *next;
+};
+
+struct gr_arg {
+	struct user_acl_role_db role_db;
+	unsigned char pw[GR_PW_LEN];
+	unsigned char salt[GR_SALT_LEN];
+	unsigned char sum[GR_SHA_LEN];
+	unsigned char sp_role[GR_SPROLE_LEN];
+	struct sprole_pw *sprole_pws;
+	kdev_t segv_device;
+	ino_t segv_inode;
+	uid_t segv_uid;
+	__u16 num_sprole_pws;
+	__u16 mode;
+};
+
+struct subject_map {
+	struct acl_subject_label *user;
+	struct acl_subject_label *kernel;
+};
+
+struct acl_subj_map_db {
+	struct subject_map **s_hash;
+	__u32 s_size;
+};
+
+/* End Data Structures Section */
+
+/* Hash functions generated by empirical testing by Brad Spengler
+   Makes good use of the low bits of the inode.  Generally 0-1 times
+   in loop for successful match.  0-3 for unsuccessful match.
+   Shift/add algorithm with modulus of table size and an XOR*/
+
+static __inline__ unsigned long
+rhash(const uid_t uid, const __u16 type, const unsigned long sz)
+{
+	return (((uid << type) + (uid ^ type)) % sz);
+}
+
+static __inline__ unsigned long
+shash(const struct acl_subject_label *userp, const unsigned long sz)
+{
+	return ((const unsigned long)userp % sz);
+}
+
+static __inline__ unsigned long
+fhash(const ino_t ino, const kdev_t dev, const unsigned long sz)
+{
+	return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
+}
+
+static __inline__ unsigned long
+nhash(const char *name, const __u16 len, const unsigned long sz)
+{
+	return full_name_hash(name, len) % sz;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/gralloc.h linux-2.4.26-leo/include/linux/gralloc.h
--- linux-2.4.26/include/linux/gralloc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/include/linux/gralloc.h	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,8 @@
+#ifndef __GRALLOC_H
+#define __GRALLOC_H
+
+void acl_free_all(void);
+int acl_alloc_stack_init(unsigned long size);
+void *acl_alloc(unsigned long len);
+
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/grdefs.h linux-2.4.26-leo/include/linux/grdefs.h
--- linux-2.4.26/include/linux/grdefs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/include/linux/grdefs.h	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,116 @@
+#ifndef GRDEFS_H
+#define GRDEFS_H
+
+/* Begin grsecurity status declarations */
+
+enum {
+	GR_READY = 0x01,
+	GR_STATUS_INIT = 0x00	// disabled state
+};
+
+/* Begin  ACL declarations */
+
+/* Role flags */
+
+enum {
+	GR_ROLE_USER = 0x0001,
+	GR_ROLE_GROUP = 0x0002,
+	GR_ROLE_DEFAULT = 0x0004,
+	GR_ROLE_SPECIAL = 0x0008,
+	GR_ROLE_AUTH = 0x0010,
+	GR_ROLE_NOPW = 0x0020,
+	GR_ROLE_GOD = 0x0040,
+	GR_ROLE_LEARN = 0x0080,
+	GR_ROLE_TPE = 0x0100
+};
+
+/* ACL Subject and Object mode flags */
+enum {
+	GR_DELETED = 0x00000080
+};
+
+/* ACL Object-only mode flags */
+enum {
+	GR_READ 	= 0x00000001,
+	GR_APPEND 	= 0x00000002,
+	GR_WRITE 	= 0x00000004,
+	GR_EXEC 	= 0x00000008,
+	GR_FIND 	= 0x00000010,
+	GR_INHERIT 	= 0x00000040,
+	GR_PTRACERD 	= 0x00000100,
+	GR_SETID 	= 0x00000200,
+	GR_CREATE 	= 0x00000400,
+	GR_DELETE 	= 0x00000800,
+	GR_NOPTRACE	= 0x00001000,
+	GR_AUDIT_READ 	= 0x00002000,
+	GR_AUDIT_APPEND = 0x00004000,
+	GR_AUDIT_WRITE 	= 0x00008000,
+	GR_AUDIT_EXEC 	= 0x00010000,
+	GR_AUDIT_FIND 	= 0x00020000,
+	GR_AUDIT_INHERIT= 0x00040000,
+	GR_AUDIT_SETID 	= 0x00080000,
+	GR_AUDIT_CREATE = 0x00100000,
+	GR_AUDIT_DELETE = 0x00200000,
+	GR_SUPPRESS 	= 0x00400000,
+	GR_NOLEARN 	= 0x00800000
+};
+
+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
+		   GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
+		   GR_AUDIT_CREATE | GR_AUDIT_DELETE)
+
+/* ACL subject-only mode flags */
+enum {
+	GR_KILL 	= 0x00000001,
+	GR_VIEW 	= 0x00000002,
+	GR_PROTECTED 	= 0x00000100,
+	GR_LEARN 	= 0x00000200,
+	GR_OVERRIDE 	= 0x00000400,
+	/* just a placeholder, this mode is only used in userspace */
+	GR_DUMMY 	= 0x00000800,
+	GR_PAXPAGE 	= 0x00001000,
+	GR_PAXSEGM 	= 0x00002000,
+	GR_PAXGCC 	= 0x00004000,
+	GR_PAXRANDMMAP 	= 0x00008000,
+	GR_PAXRANDEXEC 	= 0x00010000,
+	GR_PAXMPROTECT 	= 0x00020000,
+	GR_PROTSHM 	= 0x00040000,
+	GR_KILLPROC 	= 0x00080000,
+	GR_KILLIPPROC	= 0x00100000,
+	/* just a placeholder, this mode is only used in userspace */
+	GR_NOTROJAN 	= 0x00200000,
+	GR_PROTPROCFD 	= 0x00400000,
+	GR_PROCACCT 	= 0x00800000,
+	GR_RELAXPTRACE	= 0x01000000,
+	GR_NESTED	= 0x02000000
+};
+
+enum {
+	GR_ID_USER      = 0x01,
+	GR_ID_GROUP     = 0x02,
+};
+
+enum {
+	GR_ID_ALLOW     = 0x01,
+	GR_ID_DENY      = 0x02,
+};
+
+#define GR_CRASH_RES	11
+#define GR_UIDTABLE_MAX 500
+
+/* begin resource learning section */
+enum {
+	GR_RLIM_CPU_BUMP = 60,
+	GR_RLIM_FSIZE_BUMP = 50000,
+	GR_RLIM_DATA_BUMP = 10000,
+	GR_RLIM_STACK_BUMP = 1000,
+	GR_RLIM_CORE_BUMP = 10000,
+	GR_RLIM_RSS_BUMP = 500000,
+	GR_RLIM_NPROC_BUMP = 1,
+	GR_RLIM_NOFILE_BUMP = 5,
+	GR_RLIM_MEMLOCK_BUMP = 50000,
+	GR_RLIM_AS_BUMP = 500000,
+	GR_RLIM_LOCKS_BUMP = 2
+};
+
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/grinternal.h linux-2.4.26-leo/include/linux/grinternal.h
--- linux-2.4.26/include/linux/grinternal.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/include/linux/grinternal.h	2004-06-22 21:03:04.000000000 +0100
@@ -0,0 +1,201 @@
+#ifndef __GRINTERNAL_H
+#define __GRINTERNAL_H
+
+#ifdef CONFIG_GRKERNSEC
+
+#include <linux/grdefs.h>
+#include <linux/grmsg.h>
+
+extern void gr_add_learn_entry(const char *fmt, ...);
+extern __u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
+			    const struct vfsmount *mnt);
+extern __u32 gr_check_create(const struct dentry *new_dentry,
+			     const struct dentry *parent,
+			     const struct vfsmount *mnt, const __u32 mode);
+extern int gr_check_protected_task(const struct task_struct *task);
+extern __u32 to_gr_audit(const __u32 reqmode);
+extern int gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
+			    struct dentry *old_dentry,
+			    struct dentry *new_dentry,
+			    struct vfsmount *mnt, const __u8 replace);
+extern int gr_set_acls(const int type);
+
+extern void gr_handle_alertkill(void);
+extern char *gr_to_filename(const struct dentry *dentry,
+			    const struct vfsmount *mnt);
+extern char *gr_to_filename1(const struct dentry *dentry,
+			    const struct vfsmount *mnt);
+extern char *gr_to_filename2(const struct dentry *dentry,
+			    const struct vfsmount *mnt);
+extern char *gr_to_filename3(const struct dentry *dentry,
+			    const struct vfsmount *mnt);
+
+extern int grsec_enable_link;
+extern int grsec_enable_fifo;
+extern int grsec_enable_execve;
+extern int grsec_enable_forkbomb;
+extern int grsec_forkbomb_gid;
+extern int grsec_forkbomb_sec;
+extern int grsec_forkbomb_max;
+extern int grsec_enable_execlog;
+extern int grsec_enable_signal;
+extern int grsec_enable_forkfail;
+extern int grsec_enable_time;
+extern int grsec_enable_chroot_shmat;
+extern int grsec_enable_chroot_findtask;
+extern int grsec_enable_chroot_mount;
+extern int grsec_enable_chroot_double;
+extern int grsec_enable_chroot_pivot;
+extern int grsec_enable_chroot_chdir;
+extern int grsec_enable_chroot_chmod;
+extern int grsec_enable_chroot_mknod;
+extern int grsec_enable_chroot_fchdir;
+extern int grsec_enable_chroot_nice;
+extern int grsec_enable_chroot_execlog;
+extern int grsec_enable_chroot_caps;
+extern int grsec_enable_chroot_sysctl;
+extern int grsec_enable_chroot_unix;
+extern int grsec_enable_tpe;
+extern int grsec_tpe_gid;
+extern int grsec_enable_tpe_all;
+extern int grsec_enable_randpid;
+extern int grsec_enable_socket_all;
+extern int grsec_socket_all_gid;
+extern int grsec_enable_socket_client;
+extern int grsec_socket_client_gid;
+extern int grsec_enable_socket_server;
+extern int grsec_socket_server_gid;
+extern int grsec_audit_gid;
+extern int grsec_enable_group;
+extern int grsec_enable_audit_ipc;
+extern int grsec_enable_audit_textrel;
+extern int grsec_enable_mount;
+extern int grsec_enable_chdir;
+extern int grsec_lock;
+
+extern struct task_struct *child_reaper;
+
+extern spinlock_t grsec_alert_lock;
+extern unsigned long grsec_alert_wtime;
+extern unsigned long grsec_alert_fyet;
+
+extern spinlock_t grsec_alertgood_lock;
+extern unsigned long grsec_alertgood_wtime;
+extern unsigned long grsec_alertgood_fyet;
+
+extern spinlock_t grsec_audit_lock;
+
+#define gr_task_fullpath(tsk) (tsk->exec_file ? \
+			gr_to_filename2(tsk->exec_file->f_dentry, \
+			tsk->exec_file->f_vfsmnt) : "/")
+
+#define gr_parent_task_fullpath(tsk) (tsk->p_pptr->exec_file ? \
+			gr_to_filename3(tsk->p_pptr->exec_file->f_dentry, \
+			tsk->p_pptr->exec_file->f_vfsmnt) : "/")
+
+#define gr_task_fullpath0(tsk) (tsk->exec_file ? \
+			gr_to_filename(tsk->exec_file->f_dentry, \
+			tsk->exec_file->f_vfsmnt) : "/")
+
+#define gr_parent_task_fullpath0(tsk) (tsk->p_pptr->exec_file ? \
+			gr_to_filename1(tsk->p_pptr->exec_file->f_dentry, \
+			tsk->p_pptr->exec_file->f_vfsmnt) : "/")
+
+#define proc_is_chrooted(tsk_a)  ((tsk_a->pid > 1) && \
+			  ((tsk_a->fs->root->d_inode->i_dev != \
+			  child_reaper->fs->root->d_inode->i_dev) || \
+			  (tsk_a->fs->root->d_inode->i_ino != \
+			  child_reaper->fs->root->d_inode->i_ino)))
+
+#define have_same_root(tsk_a,tsk_b) ((tsk_a->fs->root->d_inode->i_dev == \
+			  tsk_b->fs->root->d_inode->i_dev) && \
+			  (tsk_a->fs->root->d_inode->i_ino == \
+			  tsk_b->fs->root->d_inode->i_ino))
+
+#define DEFAULTSECARGS gr_task_fullpath(current), current->comm, \
+		       current->pid, current->uid, \
+		       current->euid, current->gid, current->egid, \
+		       gr_parent_task_fullpath(current), \
+		       current->p_pptr->comm, current->p_pptr->pid, \
+		       current->p_pptr->uid, current->p_pptr->euid, \
+		       current->p_pptr->gid, current->p_pptr->egid
+
+#define GR_CHROOT_CAPS ( \
+	CAP_TO_MASK(CAP_FOWNER) | \
+	CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
+	CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
+	CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
+	CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
+	CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
+	CAP_TO_MASK(CAP_IPC_OWNER))
+
+#define security_alert_good(normal_msg,args...) \
+({ \
+	spin_lock(&grsec_alertgood_lock); \
+	\
+	if (!grsec_alertgood_wtime || jiffies - grsec_alertgood_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) { \
+	    grsec_alertgood_wtime = jiffies; grsec_alertgood_fyet = 0; \
+	    if (current->curr_ip) \
+		printk(KERN_ALERT "grsec: From %u.%u.%u.%u: " normal_msg "\n", NIPQUAD(current->curr_ip) , ## args); \
+	    else \
+	    	printk(KERN_ALERT "grsec: " normal_msg "\n" , ## args); \
+	} else if((jiffies - grsec_alertgood_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alertgood_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { \
+	    grsec_alertgood_fyet++; \
+	    if (current->curr_ip) \
+		printk(KERN_ALERT "grsec: From %u.%u.%u.%u: " normal_msg "\n", NIPQUAD(current->curr_ip) , ## args); \
+	    else \
+	    	printk(KERN_ALERT "grsec: " normal_msg "\n" , ## args); \
+	} else if (grsec_alertgood_fyet == CONFIG_GRKERNSEC_FLOODBURST) { \
+	    grsec_alertgood_wtime = jiffies; grsec_alertgood_fyet++; \
+	    printk(KERN_ALERT "grsec: more alerts, logging disabled for " \
+		    "%d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); \
+	} \
+	\
+	spin_unlock(&grsec_alertgood_lock); \
+})
+
+#define security_alert(normal_msg,args...) \
+({ \
+	spin_lock(&grsec_alert_lock); \
+	\
+	if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) { \
+	    grsec_alert_wtime = jiffies; grsec_alert_fyet = 0; \
+	    if (current->curr_ip) \
+		printk(KERN_ALERT "grsec: From %u.%u.%u.%u: " normal_msg "\n", NIPQUAD(current->curr_ip) , ## args); \
+	    else \
+	    	printk(KERN_ALERT "grsec: " normal_msg "\n" , ## args); \
+	} else if((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { \
+	    grsec_alert_fyet++; \
+	    if (current->curr_ip) \
+		printk(KERN_ALERT "grsec: From %u.%u.%u.%u: " normal_msg "\n", NIPQUAD(current->curr_ip) , ## args); \
+	    else \
+	    	printk(KERN_ALERT "grsec: " normal_msg "\n" , ## args); \
+	} else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) { \
+	    grsec_alert_wtime = jiffies; grsec_alert_fyet++; \
+	    printk(KERN_ALERT "grsec: more alerts, logging disabled for " \
+		    "%d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); \
+	} \
+	\
+	gr_handle_alertkill(); \
+	spin_unlock(&grsec_alert_lock); \
+})
+
+#define security_audit(normal_msg,args...) \
+({ \
+	spin_lock(&grsec_audit_lock); \
+	if (current->curr_ip) \
+		printk(KERN_INFO "grsec: From %u.%u.%u.%u: " normal_msg "\n", \
+		       NIPQUAD(current->curr_ip) , ## args); \
+	else \
+		printk(KERN_INFO "grsec: " normal_msg "\n", ## args); \
+	spin_unlock(&grsec_audit_lock); \
+})
+
+#define security_learn(normal_msg,args...) \
+({ \
+	gr_add_learn_entry(normal_msg "\n", ## args); \
+})
+
+#endif
+
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/grmsg.h linux-2.4.26-leo/include/linux/grmsg.h
--- linux-2.4.26/include/linux/grmsg.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/include/linux/grmsg.h	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,108 @@
+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%d/%d gid/egid:%d/%d, parent %.256s[%.16s:%d] uid/euid:%d/%d gid/egid:%d/%d"
+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%d/%d gid/egid:%d/%d run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%d/%d gid/egid:%d/%d"
+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " DEFAULTSECMSG
+#define GR_IOPERM_MSG "denied use of ioperm() by " DEFAULTSECMSG
+#define GR_IOPL_MSG "denied use of iopl() by " DEFAULTSECMSG
+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by " DEFAULTSECMSG
+#define GR_UNIX_CHROOT_MSG "denied connect to abstract AF_UNIX socket outside of chroot by " DEFAULTSECMSG
+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by " DEFAULTSECMSG
+#define GR_KMEM_MSG "attempted write to /dev/kmem by " DEFAULTSECMSG
+#define GR_PORT_OPEN_MSG "attempted open of /dev/port by " DEFAULTSECMSG
+#define GR_MEM_WRITE_MSG "attempted write of /dev/mem by " DEFAULTSECMSG
+#define GR_MEM_MMAP_MSG "attempted mmap write of /dev/[k]mem by " DEFAULTSECMSG
+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by " DEFAULTSECMSG
+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%u.%u.%u.%u"
+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by " DEFAULTSECMSG
+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by " DEFAULTSECMSG
+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by " DEFAULTSECMSG
+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by " DEFAULTSECMSG
+#define GR_MKNOD_CHROOT_MSG "refused attempt to mknod %.950s from chroot by " DEFAULTSECMSG
+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by " DEFAULTSECMSG
+#define GR_UNIXCONNECT_ACL_MSG "%s connect to the unix domain socket %.950s by " DEFAULTSECMSG
+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by " DEFAULTSECMSG
+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by " DEFAULTSECMSG
+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by " DEFAULTSECMSG
+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by " DEFAULTSECMSG
+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for " DEFAULTSECMSG
+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by " DEFAULTSECMSG
+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by " DEFAULTSECMSG
+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by " DEFAULTSECMSG
+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by " DEFAULTSECMSG
+#define GR_NPROC_MSG "attempt to overstep process limit by " DEFAULTSECMSG
+#define GR_EXEC_ACL_MSG "%s execution of %.950s by " DEFAULTSECMSG
+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by " DEFAULTSECMSG
+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " Banning uid %u from login for %lu seconds"
+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " Banning execution of [%.16s:%lu] for %lu seconds"
+#define GR_MOUNT_CHROOT_MSG "denied attempt to mount %.30s as %.930s from chroot by " DEFAULTSECMSG
+#define GR_PIVOT_CHROOT_MSG "denied attempt to pivot_root from chroot by " DEFAULTSECMSG
+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by " DEFAULTSECMSG
+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by " DEFAULTSECMSG
+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by " DEFAULTSECMSG
+#define GR_CHROOT_CHROOT_MSG "denied attempt to double chroot to %.950s by " DEFAULTSECMSG
+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by " DEFAULTSECMSG
+#define GR_CHMOD_CHROOT_MSG "denied attempt to chmod +s %.950s by " DEFAULTSECMSG
+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by " DEFAULTSECMSG
+#define GR_CHROOT_FCHDIR_MSG "attempted fchdir outside of chroot to %.950s by " DEFAULTSECMSG
+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by " DEFAULTSECMSG
+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by " DEFAULTSECMSG
+#define GR_INITF_ACL_MSG "init_variables() failed %s"
+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
+#define GR_DEV_ACL_MSG "/dev/grsec: being fed garbage %d bytes sent %d required"
+#define GR_SHUTS_ACL_MSG "shutdown auth success for " DEFAULTSECMSG
+#define GR_SHUTF_ACL_MSG "shutdown auth failure for " DEFAULTSECMSG
+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for " DEFAULTSECMSG
+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for " DEFAULTSECMSG
+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for " DEFAULTSECMSG
+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for " DEFAULTSECMSG
+#define GR_ENABLE_ACL_MSG "Loaded %s"
+#define GR_ENABLEF_ACL_MSG "Unable to load %s for " DEFAULTSECMSG " RBAC system may already be enabled."
+#define GR_RELOADI_ACL_MSG "Ignoring reload request for disabled RBAC system"
+#define GR_RELOAD_ACL_MSG "Reloaded %s"
+#define GR_RELOADF_ACL_MSG "Failed reload of %s for " DEFAULTSECMSG
+#define GR_SPROLEI_ACL_MSG "Ignoring change to special role for disabled RBAC system for " DEFAULTSECMSG
+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by " DEFAULTSECMSG
+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by " DEFAULTSECMSG
+#define GR_SPROLEF_ACL_MSG "special role %s failure for " DEFAULTSECMSG
+#define GR_UNSPROLEI_ACL_MSG "Ignoring unauth of special role for disabled RBAC system for " DEFAULTSECMSG
+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by " DEFAULTSECMSG
+#define GR_UNSPROLEF_ACL_MSG "special role unauth of %s failure for " DEFAULTSECMSG
+#define GR_INVMODE_ACL_MSG "Invalid mode %d by " DEFAULTSECMSG
+#define GR_MAXPW_ACL_MSG "Maximum pw attempts reached (%d), locking password authentication"
+#define GR_MAXROLEPW_ACL_MSG "Maximum pw attempts reached (%d) trying to auth to special role %s, locking auth for role of " DEFAULTSECMSG
+#define GR_PRIORITY_CHROOT_MSG "attempted priority change of process (%.16s:%d) by " DEFAULTSECMSG
+#define GR_CAPSET_CHROOT_MSG "denied capset of (%.16s:%d) within chroot by " DEFAULTSECMSG
+#define GR_FAILFORK_MSG "failed fork with errno %d by " DEFAULTSECMSG
+#define GR_NICE_CHROOT_MSG "attempted priority change by " DEFAULTSECMSG
+#define GR_UNISIGLOG_MSG "signal %d sent to " DEFAULTSECMSG
+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by " DEFAULTSECMSG
+#define GR_SIG_ACL_MSG "Attempted send of signal %d to protected task " DEFAULTSECMSG " by " DEFAULTSECMSG
+#define GR_SYSCTL_MSG "attempt to modify grsecurity sysctl value : %.32s by " DEFAULTSECMSG
+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by " DEFAULTSECMSG
+#define GR_TIME_MSG "time set by " DEFAULTSECMSG
+#define GR_DEFACL_MSG "Fatal: Unable to find ACL for (%.16s:%d)"
+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by " DEFAULTSECMSG
+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by " DEFAULTSECMSG
+#define GR_SOCK_MSG "attempted socket(%.16s,%.16s,%.16s) by " DEFAULTSECMSG
+#define GR_SOCK2_MSG "attempted socket(%d,%.16s,%.16s) by " DEFAULTSECMSG
+#define GR_BIND_MSG "attempted bind() by " DEFAULTSECMSG
+#define GR_CONNECT_MSG "attempted connect by " DEFAULTSECMSG
+#define GR_BIND_ACL_MSG "attempted bind to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by " DEFAULTSECMSG
+#define GR_CONNECT_ACL_MSG "attempted connect to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by " DEFAULTSECMSG
+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%u.%u.%u.%u\t%u\t%u\t%u\t%u\t%u.%u.%u.%u"
+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process " DEFAULTSECMSG
+#define GR_CAP_ACL_MSG "use of %s denied for " DEFAULTSECMSG
+#define GR_USRCHANGE_ACL_MSG "change to uid %d denied for " DEFAULTSECMSG
+#define GR_GRPCHANGE_ACL_MSG "change to gid %d denied for " DEFAULTSECMSG
+#define GR_REMOUNT_AUDIT_MSG "remount of %.30s by " DEFAULTSECMSG
+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.30s by " DEFAULTSECMSG
+#define GR_MOUNT_AUDIT_MSG "mount %.30s to %.64s by " DEFAULTSECMSG
+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by " DEFAULTSECMSG
+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.63s) by " DEFAULTSECMSG
+#define GR_MSGQ_AUDIT_MSG "message queue created by " DEFAULTSECMSG
+#define GR_MSGQR_AUDIT_MSG "message queue of uid:%d euid:%d removed by " DEFAULTSECMSG
+#define GR_SEM_AUDIT_MSG "semaphore created by " DEFAULTSECMSG
+#define GR_SEMR_AUDIT_MSG "semaphore of uid:%d euid:%d removed by " DEFAULTSECMSG
+#define GR_SHM_AUDIT_MSG "shared memory of size %d created by " DEFAULTSECMSG
+#define GR_SHMR_AUDIT_MSG "shared memory of uid:%d euid:%d removed by " DEFAULTSECMSG
+#define GR_RESOURCE_MSG "attempted resource overstep by requesting %lu for %.16s against limit %lu by " DEFAULTSECMSG
+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by " DEFAULTSECMSG
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/grsecurity.h linux-2.4.26-leo/include/linux/grsecurity.h
--- linux-2.4.26/include/linux/grsecurity.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/include/linux/grsecurity.h	2004-06-22 21:02:57.000000000 +0100
@@ -0,0 +1,182 @@
+#ifndef GR_SECURITY_H
+#define GR_SECURITY_H
+
+extern int gr_check_user_change(int real, int effective, int fs);
+extern int gr_check_group_change(int real, int effective, int fs);
+
+extern void gr_add_to_task_ip_table(struct task_struct *p);
+extern void gr_del_task_from_ip_table(struct task_struct *p);
+
+extern int gr_pid_is_chrooted(const struct task_struct *p);
+extern int gr_handle_chroot_nice(void);
+extern int gr_handle_chroot_sysctl(const int op);
+extern int gr_handle_chroot_capset(const struct task_struct *target);
+extern int gr_handle_chroot_setpriority(const struct task_struct *p,
+					const int niceval);
+extern int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
+extern int gr_handle_chroot_chroot(const struct dentry *dentry,
+				   const struct vfsmount *mnt);
+extern void gr_handle_chroot_caps(struct task_struct *task);
+extern void gr_handle_chroot_chdir(struct dentry *dentry, struct vfsmount *mnt);
+extern int gr_handle_chroot_chmod(const struct dentry *dentry,
+				  const struct vfsmount *mnt, const int mode);
+extern int gr_handle_chroot_mknod(const struct dentry *dentry,
+				  const struct vfsmount *mnt, const int mode);
+extern int gr_handle_chroot_mount(const struct dentry *dentry,
+				  const struct vfsmount *mnt,
+				  const char *dev_name);
+extern int gr_handle_chroot_pivot(void);
+extern int gr_handle_chroot_unix(const pid_t pid);
+
+extern int gr_handle_rawio(const struct inode *inode);
+extern int gr_handle_nproc(void);
+
+extern void gr_handle_ioperm(void);
+extern void gr_handle_iopl(void);
+
+extern int gr_tpe_allow(const struct file *file);
+
+extern int gr_random_pid(spinlock_t * pid_lock);
+
+extern void gr_log_forkfail(const int retval);
+extern void gr_log_timechange(void);
+extern void gr_log_signal(const int sig, const struct task_struct *t);
+extern void gr_log_chdir(const struct dentry *dentry,
+			 const struct vfsmount *mnt);
+extern void gr_log_chroot_exec(const struct dentry *dentry,
+			       const struct vfsmount *mnt);
+extern void gr_handle_exec_args(struct linux_binprm *bprm, char **argv);
+extern void gr_log_remount(const char *devname, const int retval);
+extern void gr_log_unmount(const char *devname, const int retval);
+extern void gr_log_mount(const char *from, const char *to, const int retval);
+extern void gr_log_msgget(const int ret, const int msgflg);
+extern void gr_log_msgrm(const uid_t uid, const uid_t cuid);
+extern void gr_log_semget(const int err, const int semflg);
+extern void gr_log_semrm(const uid_t uid, const uid_t cuid);
+extern void gr_log_shmget(const int err, const int shmflg, const size_t size);
+extern void gr_log_shmrm(const uid_t uid, const uid_t cuid);
+extern void gr_log_textrel(struct vm_area_struct *vma);
+
+extern int gr_handle_follow_link(const struct inode *parent,
+				 const struct inode *inode,
+				 const struct dentry *dentry,
+				 const struct vfsmount *mnt);
+extern int gr_handle_fifo(const struct dentry *dentry,
+			  const struct vfsmount *mnt,
+			  const struct dentry *dir, const int flag,
+			  const int acc_mode);
+extern int gr_handle_hardlink(const struct dentry *dentry,
+			      const struct vfsmount *mnt,
+			      struct inode *inode,
+			      const int mode, const char *to);
+
+extern int gr_task_is_capable(struct task_struct *task, const int cap);
+extern void gr_learn_resource(const struct task_struct *task, const int limit,
+			      const unsigned long wanted, const int gt);
+extern void gr_copy_label(struct task_struct *tsk);
+extern void gr_handle_crash(struct task_struct *task, const int sig);
+extern int gr_handle_signal(const struct task_struct *p, const int sig);
+extern int gr_check_crash_uid(const uid_t uid);
+extern int gr_check_protected_task(const struct task_struct *task);
+extern int gr_acl_handle_mmap(const struct file *file,
+			      const unsigned long prot);
+extern int gr_acl_handle_mprotect(const struct file *file,
+				  const unsigned long prot);
+extern int gr_check_hidden_task(const struct task_struct *tsk);
+extern __u32 gr_acl_handle_truncate(const struct dentry *dentry,
+				    const struct vfsmount *mnt);
+extern __u32 gr_acl_handle_utime(const struct dentry *dentry,
+				 const struct vfsmount *mnt);
+extern __u32 gr_acl_handle_access(const struct dentry *dentry,
+				  const struct vfsmount *mnt, const int fmode);
+extern __u32 gr_acl_handle_fchmod(const struct dentry *dentry,
+				  const struct vfsmount *mnt, mode_t mode);
+extern __u32 gr_acl_handle_chmod(const struct dentry *dentry,
+				 const struct vfsmount *mnt, mode_t mode);
+extern __u32 gr_acl_handle_chown(const struct dentry *dentry,
+				 const struct vfsmount *mnt);
+extern int gr_handle_ptrace_exec(const struct dentry *dentry,
+				 const struct vfsmount *mnt);
+extern int gr_handle_ptrace(struct task_struct *task, const long request);
+extern int gr_handle_proc_ptrace(struct task_struct *task);
+extern int gr_handle_mmap(const struct file *filp, const unsigned long prot);
+extern __u32 gr_acl_handle_execve(const struct dentry *dentry,
+				  const struct vfsmount *mnt);
+extern int gr_check_crash_exec(const struct file *filp);
+extern int gr_acl_is_enabled(void);
+extern void gr_set_kernel_label(struct task_struct *task);
+extern void gr_set_role_label(struct task_struct *task, const uid_t uid,
+			      const gid_t gid);
+extern void gr_set_proc_label(const struct dentry *dentry,
+			      const struct vfsmount *mnt);
+extern __u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
+				       const struct vfsmount *mnt);
+extern __u32 gr_acl_handle_open(const struct dentry *dentry,
+				const struct vfsmount *mnt, const int fmode);
+extern __u32 gr_acl_handle_creat(const struct dentry *dentry,
+				 const struct dentry *p_dentry,
+				 const struct vfsmount *p_mnt, const int fmode,
+				 const int imode);
+extern void gr_handle_create(const struct dentry *dentry,
+			     const struct vfsmount *mnt);
+extern __u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
+				 const struct dentry *parent_dentry,
+				 const struct vfsmount *parent_mnt,
+				 const int mode);
+extern __u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
+				 const struct dentry *parent_dentry,
+				 const struct vfsmount *parent_mnt);
+extern __u32 gr_acl_handle_rmdir(const struct dentry *dentry,
+				 const struct vfsmount *mnt);
+extern void gr_handle_delete(const ino_t ino, const kdev_t dev);
+extern __u32 gr_acl_handle_unlink(const struct dentry *dentry,
+				  const struct vfsmount *mnt);
+extern __u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
+				   const struct dentry *parent_dentry,
+				   const struct vfsmount *parent_mnt,
+				   const char *from);
+extern __u32 gr_acl_handle_link(const struct dentry *new_dentry,
+				const struct dentry *parent_dentry,
+				const struct vfsmount *parent_mnt,
+				const struct dentry *old_dentry,
+				const struct vfsmount *old_mnt, const char *to);
+extern int gr_acl_handle_rename(struct dentry *new_dentry,
+				struct dentry *parent_dentry,
+				const struct vfsmount *parent_mnt,
+				struct dentry *old_dentry,
+				struct inode *old_parent_inode,
+				struct vfsmount *old_mnt, const char *newname);
+extern __u32 gr_check_link(const struct dentry *new_dentry,
+			   const struct dentry *parent_dentry,
+			   const struct vfsmount *parent_mnt,
+			   const struct dentry *old_dentry,
+			   const struct vfsmount *old_mnt);
+extern __u32 gr_acl_handle_filldir(const struct dentry *dentry,
+				   const struct vfsmount *mnt, const ino_t ino);
+extern __u32 gr_acl_handle_unix(const struct dentry *dentry,
+				const struct vfsmount *mnt);
+extern void gr_set_pax_flags(struct task_struct *task);
+extern void gr_acl_handle_exit(void);
+extern void gr_acl_handle_psacct(struct task_struct *task, const long code);
+extern int gr_acl_handle_procpidmem(const struct task_struct *task);
+extern __u32 gr_cap_rtnetlink(void);
+
+#ifdef CONFIG_GRKERNSEC
+extern void gr_handle_mem_write(void);
+extern void gr_handle_kmem_write(void);
+extern void gr_handle_open_port(void);
+extern int gr_handle_mem_mmap(const unsigned long offset,
+			      struct vm_area_struct *vma);
+
+extern __u16 ip_randomid(void);
+extern __u32 ip_randomisn(void);
+extern unsigned long get_random_long(void);
+
+extern int grsec_enable_dmesg;
+extern int grsec_enable_randid;
+extern int grsec_enable_randisn;
+extern int grsec_enable_randsrc;
+extern int grsec_enable_randrpc;
+#endif
+
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/highmem.h linux-2.4.26-leo/include/linux/highmem.h
--- linux-2.4.26/include/linux/highmem.h	2004-02-20 13:06:56.000000000 +0000
+++ linux-2.4.26-leo/include/linux/highmem.h	2004-06-22 21:02:58.000000000 +0100
@@ -33,18 +33,8 @@
 {
 	unsigned long addr;
 
-	__save_flags(*flags);
+	local_irq_save(*flags);
 
-	/*
-	 * could be low
-	 */
-	if (!PageHighMem(bh->b_page))
-		return bh->b_data;
-
-	/*
-	 * it's a highmem page
-	 */
-	__cli();
 	addr = (unsigned long) kmap_atomic(bh->b_page, KM_BH_IRQ);
 
 	if (addr & ~PAGE_MASK)
@@ -58,7 +48,7 @@
 	unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
 
 	kunmap_atomic((void *) ptr, KM_BH_IRQ);
-	__restore_flags(*flags);
+	local_irq_restore(*flags);
 }
 
 #else /* CONFIG_HIGHMEM */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/jbd.h linux-2.4.26-leo/include/linux/jbd.h
--- linux-2.4.26/include/linux/jbd.h	2003-12-02 14:30:15.000000000 +0000
+++ linux-2.4.26-leo/include/linux/jbd.h	2004-06-22 21:05:45.000000000 +0100
@@ -311,7 +311,7 @@
 
 static inline struct journal_head *bh2jh(struct buffer_head *bh)
 {
-	return bh->b_private;
+	return bh->b_journal_head;
 }
 
 #define HAVE_JOURNAL_CALLBACK_STATUS
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/kernel.h linux-2.4.26-leo/include/linux/kernel.h
--- linux-2.4.26/include/linux/kernel.h	2004-05-26 23:40:33.000000000 +0100
+++ linux-2.4.26-leo/include/linux/kernel.h	2004-06-22 21:02:57.000000000 +0100
@@ -71,14 +71,17 @@
 extern long long simple_strtoll(const char *,char **,unsigned int);
 extern int sprintf(char * buf, const char * fmt, ...)
 	__attribute__ ((format (printf, 2, 3)));
-extern int vsprintf(char *buf, const char *, va_list);
+extern int vsprintf(char *buf, const char *, va_list)
+	__attribute__ ((format (printf, 2, 0)));
 extern int snprintf(char * buf, size_t size, const char * fmt, ...)
 	__attribute__ ((format (printf, 3, 4)));
-extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+	__attribute__ ((format (printf, 3, 0)));
 
 extern int sscanf(const char *, const char *, ...)
 	__attribute__ ((format (scanf,2,3)));
-extern int vsscanf(const char *, const char *, va_list);
+extern int vsscanf(const char *, const char *, va_list)
+	__attribute__ ((format (scanf, 2, 0)));
 
 extern int get_option(char **str, int *pint);
 extern char *get_options(char *str, int nints, int *ints);
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/loop.h linux-2.4.26-leo/include/linux/loop.h
--- linux-2.4.26/include/linux/loop.h	2003-12-02 14:32:20.000000000 +0000
+++ linux-2.4.26-leo/include/linux/loop.h	2004-05-26 23:34:14.000000000 +0100
@@ -16,6 +16,14 @@
 #define LO_KEY_SIZE	32
 
 #ifdef __KERNEL__
+typedef int sector_t; /* for 2.6 this is defined in <asm/types.h> and
+			 most likely an u64; but since cryptoloop uses
+			 only the lower 32 bits of the block number
+			 passed, let's just use an 32bit int for now */
+
+/* definitions for IV metric */
+#define LOOP_IV_SECTOR_BITS 9
+#define LOOP_IV_SECTOR_SIZE (1 << LOOP_IV_SECTOR_BITS)
 
 /* Possible states of device */
 enum {
@@ -24,6 +32,12 @@
 	Lo_rundown,
 };
 
+struct loop_device;
+
+typedef	int (* transfer_proc_t)(struct loop_device *, int cmd,
+				char *raw_buf, char *loop_buf, int size,
+				sector_t real_block);
+
 struct loop_device {
 	int		lo_number;
 	int		lo_refcnt;
@@ -32,9 +46,7 @@
 	int		lo_encrypt_type;
 	int		lo_encrypt_key_size;
 	int		lo_flags;
-	int		(*transfer)(struct loop_device *, int cmd,
-				    char *raw_buf, char *loop_buf, int size,
-				    int real_block);
+	transfer_proc_t transfer;
 	char		lo_name[LO_NAME_SIZE];
 	char		lo_encrypt_key[LO_KEY_SIZE];
 	__u32           lo_init[2];
@@ -58,17 +70,13 @@
 	atomic_t		lo_pending;
 };
 
-typedef	int (* transfer_proc_t)(struct loop_device *, int cmd,
-				char *raw_buf, char *loop_buf, int size,
-				int real_block);
-
 static inline int lo_do_transfer(struct loop_device *lo, int cmd, char *rbuf,
-				 char *lbuf, int size, int rblock)
+				 char *lbuf, int size, sector_t real_block)
 {
 	if (!lo->transfer)
 		return 0;
 
-	return lo->transfer(lo, cmd, rbuf, lbuf, size, rblock);
+	return lo->transfer(lo, cmd, rbuf, lbuf, size, real_block);
 }
 #endif /* __KERNEL__ */
 
@@ -122,6 +130,7 @@
 #define LO_CRYPT_IDEA     6
 #define LO_CRYPT_DUMMY    9
 #define LO_CRYPT_SKIPJACK 10
+#define LO_CRYPT_CRYPTOAPI 18
 #define MAX_LO_CRYPT	20
 
 #ifdef __KERNEL__
@@ -129,7 +138,7 @@
 struct loop_func_table {
 	int number; 	/* filter type */ 
 	int (*transfer)(struct loop_device *lo, int cmd, char *raw_buf,
-			char *loop_buf, int size, int real_block);
+			char *loop_buf, int size, sector_t real_block);
 	int (*init)(struct loop_device *, struct loop_info *); 
 	/* release is called from loop_unregister_transfer or clr_fd */
 	int (*release)(struct loop_device *); 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/mempool.h linux-2.4.26-leo/include/linux/mempool.h
--- linux-2.4.26/include/linux/mempool.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/include/linux/mempool.h	2004-06-22 21:04:34.000000000 +0100
@@ -0,0 +1,31 @@
+/*
+ * memory buffer pool support
+ */
+#ifndef _LINUX_MEMPOOL_H
+#define _LINUX_MEMPOOL_H
+
+#include <linux/list.h>
+#include <linux/wait.h>
+
+struct mempool_s;
+typedef struct mempool_s mempool_t;
+
+typedef void * (mempool_alloc_t)(int gfp_mask, void *pool_data);
+typedef void (mempool_free_t)(void *element, void *pool_data);
+
+extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
+				 mempool_free_t *free_fn, void *pool_data);
+extern int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask);
+extern void mempool_destroy(mempool_t *pool);
+extern void * mempool_alloc(mempool_t *pool, int gfp_mask);
+extern void mempool_free(void *element, mempool_t *pool);
+
+/*
+ * A mempool_alloc_t and mempool_free_t that get the memory from
+ * a slab that is passed in through pool_data.
+ */
+void *mempool_alloc_slab(int gfp_mask, void *pool_data);
+void mempool_free_slab(void *element, void *pool_data);
+
+
+#endif /* _LINUX_MEMPOOL_H */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/mm.h linux-2.4.26-leo/include/linux/mm.h
--- linux-2.4.26/include/linux/mm.h	2004-05-26 23:40:33.000000000 +0100
+++ linux-2.4.26-leo/include/linux/mm.h	2004-06-22 21:02:57.000000000 +0100
@@ -22,9 +22,13 @@
 extern struct list_head active_list;
 extern struct list_head inactive_list;
 
+extern void gr_learn_resource(const struct task_struct * task, const int limit,
+			      const unsigned long wanted, const int gt);
+
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/atomic.h>
+#include <asm/mman.h>
 
 /*
  * Linux kernel virtual memory manager primitives.
@@ -104,6 +108,33 @@
 #define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
 #define VM_RESERVED	0x00080000	/* Don't unmap it from swap_out */
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+#define VM_MIRROR	0x00100000	/* vma is mirroring another */
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+#define VM_MAYNOTWRITE	0x00200000	/* vma cannot be granted VM_WRITE any more */
+#endif
+
+#ifdef ARCH_STACK_GROWSUP
+#define __VM_STACK_FLAGS	0x00000233
+#else
+#define __VM_STACK_FLAGS	0x00000133
+#endif
+
+#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC)
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+#define VM_STACK_FLAGS	(__VM_STACK_FLAGS | \
+			((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
+			((current->flags & (PF_PAX_PAGEEXEC|PF_PAX_SEGMEXEC))?0:VM_EXEC))
+#else
+#define VM_STACK_FLAGS	(__VM_STACK_FLAGS | VM_MAYEXEC | \
+			((current->flags & (PF_PAX_PAGEEXEC|PF_PAX_SEGMEXEC))?0:VM_EXEC))
+#endif
+#else
+#define VM_STACK_FLAGS (__VM_STACK_FLAGS | VM_EXEC | VM_MAYEXEC)
+#endif
+
 #ifndef VM_STACK_FLAGS
 #define VM_STACK_FLAGS	0x00000177
 #endif
@@ -492,6 +523,7 @@
 extern int vmtruncate(struct inode * inode, loff_t offset);
 extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
 extern pte_t *FASTCALL(pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
+extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
 extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
 extern int make_pages_present(unsigned long addr, unsigned long end);
 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
@@ -556,21 +588,47 @@
 	unsigned long len, unsigned long prot,
 	unsigned long flag, unsigned long pgoff);
 
+extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+
 static inline unsigned long do_mmap(struct file *file, unsigned long addr,
 	unsigned long len, unsigned long prot,
 	unsigned long flag, unsigned long offset)
 {
 	unsigned long ret = -EINVAL;
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if ((current->flags & PF_PAX_SEGMEXEC) &&
+	   (len > SEGMEXEC_TASK_SIZE || (addr && addr > SEGMEXEC_TASK_SIZE-len)))
+		goto out;
+#endif
+
 	if ((offset + PAGE_ALIGN(len)) < offset)
 		goto out;
 	if (!(offset & ~PAGE_MASK))
 		ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if ((current->flags & PF_PAX_SEGMEXEC) && ret < TASK_SIZE && ((flag & MAP_TYPE) == MAP_PRIVATE)
+
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+	    && (!(current->flags & PF_PAX_MPROTECT) || ((prot & PROT_EXEC) && file && !(prot & PROT_WRITE)))
+#endif
+	   )
+	{
+		unsigned long ret_m;
+		prot = prot & PROT_EXEC ? prot : PROT_NONE;
+		ret_m = do_mmap_pgoff(NULL, ret + SEGMEXEC_TASK_SIZE, 0UL, prot, flag | MAP_MIRROR | MAP_FIXED, ret);
+		if (ret_m >= TASK_SIZE) {
+			do_munmap(current->mm, ret, len);
+			ret = ret_m;
+		}
+	}
+#endif
+
 out:
 	return ret;
 }
 
-extern int do_munmap(struct mm_struct *, unsigned long, size_t);
-
 extern unsigned long do_brk(unsigned long, unsigned long);
 
 static inline void __vma_unlink(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev)
@@ -583,6 +641,12 @@
 
 static inline int can_vma_merge(struct vm_area_struct * vma, unsigned long vm_flags)
 {
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	if ((vma->vm_flags | vm_flags) & VM_MIRROR)
+		return 0;
+#endif
+
 	if (!vma->vm_file && vma->vm_flags == vm_flags)
 		return 1;
 	else
@@ -636,7 +700,12 @@
 
 	return gfp_mask;
 }
-	
+
+/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
+extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
+extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+					     struct vm_area_struct **pprev);
+
 /* vma is the first one with  address < vma->vm_end,
  * and even  address < vma->vm_start. Have to extend vma. */
 static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
@@ -651,11 +720,51 @@
 	address &= PAGE_MASK;
  	spin_lock(&vma->vm_mm->page_table_lock);
 	grow = (vma->vm_start - address) >> PAGE_SHIFT;
+
+	gr_learn_resource(current, RLIMIT_STACK, vma->vm_end - address, 1);
+	gr_learn_resource(current, RLIMIT_AS, (vma->vm_mm->total_vm + grow) << PAGE_SHIFT, 1);
+	gr_learn_resource(current, RLIMIT_MEMLOCK, (vma->vm_mm->locked_vm + grow) << PAGE_SHIFT, 1);
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	if (vma->vm_flags & VM_MIRROR) {
+		struct vm_area_struct * vma_m;
+		unsigned long address_m;
+
+		address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+		vma_m = find_vma(vma->vm_mm, address_m);
+		if (!vma_m || vma_m->vm_start != address_m || !(vma_m->vm_flags & VM_MIRROR) ||
+		    vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start) {
+			spin_unlock(&vma->vm_mm->page_table_lock);
+			printk(KERN_ERR "PAX: VMMIRROR: expand bug, %08lx, %08lx, %08lx, %08lx, %08lx\n",
+			       address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
+			return -ENOMEM;
+		}
+
+		address_m = address + (unsigned long)vma->vm_private_data;
+		if (vma_m->vm_end - address_m > current->rlim[RLIMIT_STACK].rlim_cur ||
+		    ((vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur ||
+		    ((vma_m->vm_flags & VM_LOCKED) && ((vma_m->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT) >
+		     current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
+			spin_unlock(&vma->vm_mm->page_table_lock);
+			return -ENOMEM;
+		}
+
+		vma_m->vm_start = address_m;
+		vma_m->vm_pgoff -= grow;
+		vma_m->vm_mm->total_vm += grow;
+		if (vma_m->vm_flags & VM_LOCKED)
+			vma_m->vm_mm->locked_vm += grow;
+	} else
+#endif
+
 	if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
-	    ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) {
+	    ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur ||
+	    ((vma->vm_flags & VM_LOCKED) && ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
+	     current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
 		spin_unlock(&vma->vm_mm->page_table_lock);
 		return -ENOMEM;
 	}
+
 	vma->vm_start = address;
 	vma->vm_pgoff -= grow;
 	vma->vm_mm->total_vm += grow;
@@ -665,11 +774,6 @@
 	return 0;
 }
 
-/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
-extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
-extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
-					     struct vm_area_struct **pprev);
-
 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
    NULL if none.  Assume start_addr < end_addr. */
 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/msdos_fs_sb.h linux-2.4.26-leo/include/linux/msdos_fs_sb.h
--- linux-2.4.26/include/linux/msdos_fs_sb.h	2001-10-12 21:48:42.000000000 +0100
+++ linux-2.4.26-leo/include/linux/msdos_fs_sb.h	2004-05-26 23:34:22.000000000 +0100
@@ -9,7 +9,9 @@
 struct fat_mount_options {
 	uid_t fs_uid;
 	gid_t fs_gid;
-	unsigned short fs_umask;
+	unsigned short fs_umask;  /* global mask that masks off bits from either following two fields */
+	unsigned short fs_fmode;  /* mode bits for files for fdmode */
+	unsigned short fs_dmode;  /* mode bits for directories for fdmode */
 	unsigned short codepage;  /* Codepage for shortname conversions */
 	char *iocharset;          /* Charset used for filename input/display */
 	unsigned short shortname; /* flags for shortname display/create rule */
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/proc_fs.h linux-2.4.26-leo/include/linux/proc_fs.h
--- linux-2.4.26/include/linux/proc_fs.h	2004-05-26 23:40:33.000000000 +0100
+++ linux-2.4.26-leo/include/linux/proc_fs.h	2004-06-22 21:03:04.000000000 +0100
@@ -143,6 +143,9 @@
 extern struct proc_dir_entry *proc_mknod(const char *,mode_t,
 		struct proc_dir_entry *,kdev_t);
 extern struct proc_dir_entry *proc_mkdir(const char *,struct proc_dir_entry *);
+#ifdef CONFIG_GRKERNSEC_PROC
+extern struct proc_dir_entry *proc_priv_mkdir(const char *, struct proc_dir_entry *);
+#endif
 
 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
 	mode_t mode, struct proc_dir_entry *base, 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/raid/md.h linux-2.4.26-leo/include/linux/raid/md.h
--- linux-2.4.26/include/linux/raid/md.h	2004-02-20 13:07:46.000000000 +0000
+++ linux-2.4.26-leo/include/linux/raid/md.h	2004-06-22 21:04:27.000000000 +0100
@@ -18,6 +18,8 @@
 #ifndef _MD_H
 #define _MD_H
 
+#include <linux/config.h>
+
 #include <linux/mm.h>
 #include <linux/fs.h>
 #include <linux/blkdev.h>
@@ -88,5 +90,15 @@
 
 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
 
+/* Verbose startup message option
+ *  Added by Paul Evans, <nerd@freeuk.com>
+ */
+
+#ifdef CONFIG_MD_VERBMSG
+# define VERBMSG(c) c;
+#else
+# define VERBMSG(c)
+#endif
+
 #endif 
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/sched.h linux-2.4.26-leo/include/linux/sched.h
--- linux-2.4.26/include/linux/sched.h	2004-02-20 14:11:47.000000000 +0000
+++ linux-2.4.26-leo/include/linux/sched.h	2004-06-22 21:02:57.000000000 +0100
@@ -27,6 +27,9 @@
 #include <linux/securebits.h>
 #include <linux/fs_struct.h>
 
+extern int gr_task_is_capable(struct task_struct *task, const int cap);
+extern int gr_pid_is_chrooted(const struct task_struct *p);
+
 struct exec_domain;
 
 /*
@@ -91,6 +94,7 @@
 #define TASK_UNINTERRUPTIBLE	2
 #define TASK_ZOMBIE		4
 #define TASK_STOPPED		8
+#define PREEMPT_ACTIVE		0x4000000
 
 #define __set_task_state(tsk, state_value)		\
 	do { (tsk)->state = (state_value); } while (0)
@@ -147,6 +151,9 @@
 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
 extern signed long FASTCALL(schedule_timeout(signed long timeout));
 asmlinkage void schedule(void);
+#ifdef CONFIG_PREEMPT
+asmlinkage void preempt_schedule(void);
+#endif
 
 extern int schedule_task(struct tq_struct *task);
 extern void flush_scheduled_tasks(void);
@@ -227,10 +234,24 @@
 	unsigned long cpu_vm_mask;
 	unsigned long swap_address;
 
-	unsigned dumpable:1;
+ 	unsigned dumpable:1;
 
 	/* Architecture-specific MM context */
 	mm_context_t context;
+
+#ifdef CONFIG_GRKERNSEC_PAX_DLRESOLVE
+	unsigned long call_dl_resolve;
+#endif
+
+#if defined(CONFIG_PPC32) && defined(CONFIG_GRKERNSEC_PAX_EMUSIGRT)
+	unsigned long call_syscall;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_ASLR
+	unsigned long delta_mmap;		/* PaX: randomized offset */
+	unsigned long delta_exec;		/* PaX: randomized offset */
+	unsigned long delta_stack;		/* PaX: randomized offset */
+#endif
 };
 
 extern int mmlist_nr;
@@ -285,7 +306,7 @@
 	 * offsets of these are hardcoded elsewhere - touch with care
 	 */
 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
-	unsigned long flags;	/* per process flags, defined below */
+	int preempt_count;	/* 0 => preemptable, <0 => BUG */
 	int sigpending;
 	mm_segment_t addr_limit;	/* thread address space:
 					 	0-0xBFFFFFFF for user-thead
@@ -327,6 +348,7 @@
 	struct mm_struct *active_mm;
 	struct list_head local_pages;
 	unsigned int allocation_order, nr_local_pages;
+	unsigned long flags;
 
 /* task state */
 	struct linux_binfmt *binfmt;
@@ -406,7 +428,7 @@
 	int (*notifier)(void *priv);
 	void *notifier_data;
 	sigset_t *notifier_mask;
-	
+
 /* Thread group tracking */
    	u32 parent_exec_id;
    	u32 self_exec_id;
@@ -415,6 +437,22 @@
 
 /* journalling filesystem info */
 	void *journal_info;
+
+#ifdef CONFIG_GRKERNSEC
+/* added by grsecurity's ACL system */	
+	struct acl_subject_label *acl;
+	struct acl_role_label *role;
+	struct file *exec_file;
+	u32 curr_ip;
+	u32 gr_saddr;
+	u32 gr_daddr;
+	u16 gr_sport;
+	u16 gr_dport;
+	u16 acl_role_id;	
+	u8 acl_sp_role:1;	
+	u8 used_accept:1;	
+	u8 is_writable:1;
+#endif
 };
 
 /*
@@ -436,6 +474,29 @@
 
 #define PF_USEDFPU	0x00100000	/* task used FPU this quantum (SMP) */
 
+#define PF_PAX_PAGEEXEC       0x01000000      /* Paging based non-executable pages */
+#define PF_PAX_EMUTRAMP       0x02000000      /* Emulate trampolines */
+#define PF_PAX_MPROTECT       0x04000000      /* Restrict mprotect() */
+#define PF_PAX_RANDMMAP       0x08000000      /* Randomize mmap() base */
+#define PF_PAX_RANDEXEC	      0x10000000      /* Randomize ET_EXEC base */
+#define PF_PAX_SEGMEXEC	      0x20000000      /* Segmentation based non-executable pages */
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+#if defined(CONFIG_GRKERNSEC_PAX_RANDMMAP) || defined(CONFIG_GRKERNSEC_PAX_RANDUSTACK) || defined(CONFIG_GRKERNSEC_PAX_RANDKSTACK)
+extern unsigned int pax_aslr;
+#endif
+
+extern unsigned int pax_softmode;
+#endif
+
+extern int pax_check_flags(unsigned long *);
+        
+#ifdef CONFIG_GRKERNSEC_PAX_HAVE_ACL_FLAGS
+extern void pax_set_flags(struct linux_binprm * bprm);
+#elif defined(CONFIG_GRKERNSEC_PAX_HOOK_ACL_FLAGS)
+extern void (*pax_set_flags_func)(struct linux_binprm * bprm);
+#endif
+
 /*
  * Ptrace flags
  */
@@ -550,6 +611,8 @@
 	*p->pidhash_pprev = p->pidhash_next;
 }
 
+#include <asm/current.h>
+
 static inline struct task_struct *find_task_by_pid(int pid)
 {
 	struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)];
@@ -557,6 +620,8 @@
 	for(p = *htable; p && p->pid != pid; p = p->pidhash_next)
 		;
 
+	if(gr_pid_is_chrooted(p)) p = NULL;
+
 	return p;
 }
 
@@ -578,8 +643,6 @@
 extern void free_uid(struct user_struct *);
 extern void switch_uid(struct user_struct *);
 
-#include <asm/current.h>
-
 extern unsigned long volatile jiffies;
 extern unsigned long itimer_ticks;
 extern unsigned long itimer_next;
@@ -743,7 +806,7 @@
 static inline int capable(int cap)
 {
 #if 1 /* ok now */
-	if (cap_raised(current->cap_effective, cap))
+	if (cap_raised(current->cap_effective, cap) && gr_task_is_capable(current, cap))
 #else
 	if (cap_is_fs_cap(cap) ? current->fsuid == 0 : current->euid == 0)
 #endif
@@ -958,5 +1021,10 @@
 		__cond_resched();
 }
 
+#define _TASK_STRUCT_DEFINED
+#include <linux/dcache.h>
+#include <linux/tqueue.h>
+#include <linux/fs_struct.h>
+
 #endif /* __KERNEL__ */
 #endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/smp_lock.h linux-2.4.26-leo/include/linux/smp_lock.h
--- linux-2.4.26/include/linux/smp_lock.h	2004-02-20 13:06:56.000000000 +0000
+++ linux-2.4.26-leo/include/linux/smp_lock.h	2004-06-22 21:02:57.000000000 +0100
@@ -3,7 +3,7 @@
 
 #include <linux/config.h>
 
-#ifndef CONFIG_SMP
+#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
 
 #define lock_kernel()				do { } while(0)
 #define unlock_kernel()				do { } while(0)
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/spinlock.h linux-2.4.26-leo/include/linux/spinlock.h
--- linux-2.4.26/include/linux/spinlock.h	2004-02-20 14:11:47.000000000 +0000
+++ linux-2.4.26-leo/include/linux/spinlock.h	2004-06-22 21:02:57.000000000 +0100
@@ -2,6 +2,7 @@
 #define __LINUX_SPINLOCK_H
 
 #include <linux/config.h>
+#include <linux/compiler.h>
 
 #include <asm/system.h>
 
@@ -64,8 +65,10 @@
 
 #if (DEBUG_SPINLOCKS < 1)
 
+#ifndef CONFIG_PREEMPT
 #define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
 #define ATOMIC_DEC_AND_LOCK
+#endif
 
 /*
  * Your basic spinlocks, allowing only a single CPU anywhere
@@ -82,11 +85,11 @@
 #endif
 
 #define spin_lock_init(lock)	do { } while(0)
-#define spin_lock(lock)		(void)(lock) /* Not "unused variable". */
+#define _raw_spin_lock(lock)	(void)(lock) /* Not "unused variable". */
 #define spin_is_locked(lock)	(0)
-#define spin_trylock(lock)	({1; })
+#define _raw_spin_trylock(lock)	({1; })
 #define spin_unlock_wait(lock)	do { } while(0)
-#define spin_unlock(lock)	do { } while(0)
+#define _raw_spin_unlock(lock)	do { } while(0)
 
 #elif (DEBUG_SPINLOCKS < 2)
 
@@ -146,13 +149,78 @@
 #endif
 
 #define rwlock_init(lock)	do { } while(0)
-#define read_lock(lock)		(void)(lock) /* Not "unused variable". */
-#define read_unlock(lock)	do { } while(0)
-#define write_lock(lock)	(void)(lock) /* Not "unused variable". */
-#define write_unlock(lock)	do { } while(0)
+#define _raw_read_lock(lock)	(void)(lock) /* Not "unused variable". */
+#define _raw_read_unlock(lock)	do { } while(0)
+#define _raw_write_lock(lock)	(void)(lock) /* Not "unused variable". */
+#define _raw_write_unlock(lock)	do { } while(0)
 
 #endif /* !SMP */
 
+#ifdef CONFIG_PREEMPT
+
+#define preempt_get_count()	(current->preempt_count)
+#define preempt_is_disabled()	(preempt_get_count() != 0)
+
+#define preempt_disable() \
+do { \
+	++current->preempt_count; \
+	barrier(); \
+} while (0)
+
+#define preempt_enable_no_resched() \
+do { \
+	--current->preempt_count; \
+	barrier(); \
+} while (0)
+
+#define preempt_enable() \
+do { \
+	--current->preempt_count; \
+	barrier(); \
+	if (unlikely(current->preempt_count < current->need_resched)) \
+		preempt_schedule(); \
+} while (0)
+
+#define spin_lock(lock)	\
+do { \
+	preempt_disable(); \
+	_raw_spin_lock(lock); \
+} while(0)
+
+#define spin_trylock(lock)	({preempt_disable(); _raw_spin_trylock(lock) ? \
+				1 : ({preempt_enable(); 0;});})
+#define spin_unlock(lock) \
+do { \
+	_raw_spin_unlock(lock); \
+	preempt_enable(); \
+} while (0)
+
+#define read_lock(lock)		({preempt_disable(); _raw_read_lock(lock);})
+#define read_unlock(lock)	({_raw_read_unlock(lock); preempt_enable();})
+#define write_lock(lock)	({preempt_disable(); _raw_write_lock(lock);})
+#define write_unlock(lock)	({_raw_write_unlock(lock); preempt_enable();})
+#define write_trylock(lock)	({preempt_disable();_raw_write_trylock(lock) ? \
+				1 : ({preempt_enable(); 0;});})
+
+#else
+
+#define preempt_get_count()	(0)
+#define preempt_is_disabled()	(1)
+#define preempt_disable()	do { } while (0)
+#define preempt_enable_no_resched()	do {} while(0)
+#define preempt_enable()	do { } while (0)
+
+#define spin_lock(lock)		_raw_spin_lock(lock)
+#define spin_trylock(lock)	_raw_spin_trylock(lock)
+#define spin_unlock(lock)	_raw_spin_unlock(lock)
+
+#define read_lock(lock)		_raw_read_lock(lock)
+#define read_unlock(lock)	_raw_read_unlock(lock)
+#define write_lock(lock)	_raw_write_lock(lock)
+#define write_unlock(lock)	_raw_write_unlock(lock)
+#define write_trylock(lock)	_raw_write_trylock(lock)
+#endif
+
 /* "lock on reference count zero" */
 #ifndef ATOMIC_DEC_AND_LOCK
 #include <asm/atomic.h>
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/sysctl.h linux-2.4.26-leo/include/linux/sysctl.h
--- linux-2.4.26/include/linux/sysctl.h	2004-05-26 23:40:38.000000000 +0100
+++ linux-2.4.26-leo/include/linux/sysctl.h	2004-06-22 21:02:59.000000000 +0100
@@ -128,8 +128,20 @@
 	KERN_PPC_L3CR=57,       /* l3cr register on PPC */
 	KERN_EXCEPTION_TRACE=58, /* boolean: exception trace */
  	KERN_CORE_SETUID=59,	/* int: set to allow core dumps of setuid apps */
+	KERN_GRSECURITY=68,	/* grsecurity */
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+	KERN_PAX=69,            /* PaX control */
+#endif
+
 };
 
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+enum {
+	PAX_ASLR=1,		/* PaX: disable/enable all randomization features */
+	PAX_SOFTMODE=2,		/* PaX: disable/enable soft mode */
+};
+#endif
 
 /* CTL_VM names: */
 enum
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/tqueue.h linux-2.4.26-leo/include/linux/tqueue.h
--- linux-2.4.26/include/linux/tqueue.h	2004-02-20 13:06:56.000000000 +0000
+++ linux-2.4.26-leo/include/linux/tqueue.h	2004-06-22 21:02:57.000000000 +0100
@@ -94,6 +94,22 @@
 extern spinlock_t tqueue_lock;
 
 /*
+ * Call all "bottom halfs" on a given list.
+ */
+
+extern void __run_task_queue(task_queue *list);
+
+static inline void run_task_queue(task_queue *list)
+{
+	if (TQ_ACTIVE(*list))
+		__run_task_queue(list);
+}
+
+#endif /* _LINUX_TQUEUE_H */
+
+#if !defined(_LINUX_TQUEUE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
+#define _LINUX_TQUEUE_H_INLINES
+/*
  * Queue a task on a tq.  Return non-zero if it was successfully
  * added.
  */
@@ -109,17 +125,4 @@
 	}
 	return ret;
 }
-
-/*
- * Call all "bottom halfs" on a given list.
- */
-
-extern void __run_task_queue(task_queue *list);
-
-static inline void run_task_queue(task_queue *list)
-{
-	if (TQ_ACTIVE(*list))
-		__run_task_queue(list);
-}
-
-#endif /* _LINUX_TQUEUE_H */
+#endif
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/linux/vmalloc.h linux-2.4.26-leo/include/linux/vmalloc.h
--- linux-2.4.26/include/linux/vmalloc.h	2003-12-02 14:28:19.000000000 +0000
+++ linux-2.4.26-leo/include/linux/vmalloc.h	2004-06-22 21:02:58.000000000 +0100
@@ -29,6 +29,7 @@
 extern void vmfree_area_pages(unsigned long address, unsigned long size);
 extern int vmalloc_area_pages(unsigned long address, unsigned long size,
                               int gfp_mask, pgprot_t prot);
+extern void *vcalloc(unsigned long nmemb, unsigned long elem_size);
 
 /*
  *	Allocate any pages
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/net/inetpeer.h linux-2.4.26-leo/include/net/inetpeer.h
--- linux-2.4.26/include/net/inetpeer.h	2004-05-26 23:42:13.000000000 +0100
+++ linux-2.4.26-leo/include/net/inetpeer.h	2004-06-22 21:04:27.000000000 +0100
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/spinlock.h>
+
 #include <asm/atomic.h>
 
 struct inet_peer
@@ -34,6 +35,11 @@
 /* can be called with or without local BH being disabled */
 struct inet_peer	*inet_getpeer(__u32 daddr, int create);
 
+#ifdef CONFIG_GRKERNSEC_RANDID
+extern int grsec_enable_randid;
+extern __u16 ip_randomid(void);
+#endif
+
 extern spinlock_t inet_peer_unused_lock;
 extern struct inet_peer *inet_peer_unused_head;
 extern struct inet_peer **inet_peer_unused_tailp;
@@ -58,7 +64,14 @@
 	__u16 id;
 
 	spin_lock_bh(&inet_peer_idlock);
-	id = p->ip_id_count++;
+
+#ifdef CONFIG_GRKERNSEC_RANDID
+	if(grsec_enable_randid)
+		id = htons(ip_randomid());
+	else
+#endif
+		id = p->ip_id_count++;
+
 	spin_unlock_bh(&inet_peer_idlock);
 	return id;
 }
diff -urN --exclude-from=diff-exclude linux-2.4.26/include/net/ip.h linux-2.4.26-leo/include/net/ip.h
--- linux-2.4.26/include/net/ip.h	2004-05-26 23:42:13.000000000 +0100
+++ linux-2.4.26-leo/include/net/ip.h	2004-06-22 21:04:27.000000000 +0100
@@ -64,6 +64,11 @@
 	void			(*destructor)(struct sock *);
 };
 
+#ifdef CONFIG_GRKERNSEC_RANDID
+extern int grsec_enable_randid;
+extern __u16 ip_randomid(void);
+#endif
+
 extern struct ip_ra_chain *ip_ra_chain;
 extern rwlock_t ip_ra_lock;
 
@@ -197,7 +202,13 @@
 		 * does not change, they drop every other packet in
 		 * a TCP stream using header compression.
 		 */
-		iph->id = ((sk && sk->daddr) ? htons(sk->protinfo.af_inet.id++) : 0);
+
+#ifdef CONFIG_GRKERNSEC_RANDID
+		if(grsec_enable_randid)
+			iph->id = htons(ip_randomid());
+		else
+#endif
+			iph->id = ((sk && sk->daddr) ? htons(sk->protinfo.af_inet.id++) : 0);
 	} else
 		__ip_select_ident(iph, dst);
 }
diff -urN --exclude-from=diff-exclude linux-2.4.26/init/main.c linux-2.4.26-leo/init/main.c
--- linux-2.4.26/init/main.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/init/main.c	2004-06-22 20:53:47.000000000 +0100
@@ -28,6 +28,7 @@
 #include <linux/bootmem.h>
 #include <linux/file.h>
 #include <linux/tty.h>
+#include <linux/grsecurity.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -112,6 +113,8 @@
 extern void ipc_init(void);
 #endif
 
+extern void grsecurity_init(void);
+
 /*
  * Boot command-line arguments
  */
@@ -560,6 +563,7 @@
 	do_basic_setup();
 
 	prepare_namespace();
+	grsecurity_init();
 
 	/*
 	 * Ok, we have completed the initial bootup, and
diff -urN --exclude-from=diff-exclude linux-2.4.26/ipc/msg.c linux-2.4.26-leo/ipc/msg.c
--- linux-2.4.26/ipc/msg.c	2003-06-13 15:51:39.000000000 +0100
+++ linux-2.4.26-leo/ipc/msg.c	2004-06-22 20:53:47.000000000 +0100
@@ -22,6 +22,7 @@
 #include <linux/init.h>
 #include <linux/proc_fs.h>
 #include <linux/list.h>
+#include <linux/grsecurity.h>
 #include <asm/uaccess.h>
 #include "util.h"
 
@@ -326,6 +327,9 @@
 		msg_unlock(id);
 	}
 	up(&msg_ids.sem);
+
+	gr_log_msgget(ret, msgflg);
+
 	return ret;
 }
 
@@ -560,6 +564,8 @@
 		break;
 	}
 	case IPC_RMID:
+		gr_log_msgrm(ipcp->uid, ipcp->cuid);
+
 		freeque (msqid); 
 		break;
 	}
diff -urN --exclude-from=diff-exclude linux-2.4.26/ipc/sem.c linux-2.4.26-leo/ipc/sem.c
--- linux-2.4.26/ipc/sem.c	2003-08-25 12:44:44.000000000 +0100
+++ linux-2.4.26-leo/ipc/sem.c	2004-06-22 20:53:47.000000000 +0100
@@ -63,6 +63,7 @@
 #include <linux/init.h>
 #include <linux/proc_fs.h>
 #include <linux/time.h>
+#include <linux/grsecurity.h>
 #include <asm/uaccess.h>
 #include "util.h"
 
@@ -182,6 +183,9 @@
 	}
 
 	up(&sem_ids.sem);
+
+	gr_log_semget(err, semflg);
+
 	return err;
 }
 
@@ -724,6 +728,8 @@
 
 	switch(cmd){
 	case IPC_RMID:
+		gr_log_semrm(ipcp->uid, ipcp->cuid);
+
 		freeary(semid);
 		err = 0;
 		break;
diff -urN --exclude-from=diff-exclude linux-2.4.26/ipc/shm.c linux-2.4.26-leo/ipc/shm.c
--- linux-2.4.26/ipc/shm.c	2002-08-03 01:39:46.000000000 +0100
+++ linux-2.4.26-leo/ipc/shm.c	2004-06-22 20:53:47.000000000 +0100
@@ -23,6 +23,7 @@
 #include <linux/mman.h>
 #include <linux/proc_fs.h>
 #include <asm/uaccess.h>
+#include <linux/grsecurity.h>
 
 #include "util.h"
 
@@ -38,8 +39,21 @@
 	time_t			shm_ctim;
 	pid_t			shm_cprid;
 	pid_t			shm_lprid;
+
+#ifdef CONFIG_GRKERNSEC
+	time_t			shm_createtime;
+	pid_t			shm_lapid;
+#endif
 };
 
+#ifdef CONFIG_GRKERNSEC
+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
+        	            const time_t shm_createtime, const uid_t cuid,
+                	    const int shmid);
+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
+                    const time_t shm_createtime);
+#endif
+
 #define shm_flags	shm_perm.mode
 
 static struct file_operations shm_file_operations;
@@ -209,6 +223,9 @@
 	shp->shm_lprid = 0;
 	shp->shm_atim = shp->shm_dtim = 0;
 	shp->shm_ctim = CURRENT_TIME;
+#ifdef CONFIG_GRKERNSEC
+	shp->shm_createtime = CURRENT_TIME;
+#endif
 	shp->shm_segsz = size;
 	shp->shm_nattch = 0;
 	shp->id = shm_buildid(id,shp->shm_perm.seq);
@@ -254,6 +271,9 @@
 		shm_unlock(id);
 	}
 	up(&shm_ids.sem);
+
+	gr_log_shmget(err, shmflg, size);
+
 	return err;
 }
 
@@ -509,6 +529,9 @@
 			err=-EPERM;
 			goto out_unlock_up;
 		}
+
+		gr_log_shmrm(shp->shm_perm.uid, shp->shm_perm.cuid);
+
 		if (shp->shm_nattch){
 			shp->shm_flags |= SHM_DEST;
 			/* Do not find it any more */
@@ -622,9 +645,28 @@
 		shm_unlock(shmid);
 		return -EACCES;
 	}
+
+#ifdef CONFIG_GRKERNSEC
+	if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
+			     shp->shm_perm.cuid, shmid)) {
+		shm_unlock(shmid);
+		return -EACCES;
+	}
+
+	if (!gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
+		shm_unlock(shmid);
+		return -EACCES;
+	}
+#endif
+
 	file = shp->shm_file;
 	size = file->f_dentry->d_inode->i_size;
 	shp->shm_nattch++;
+
+#ifdef CONFIG_GRKERNSEC
+	shp->shm_lapid = current->pid;
+#endif
+
 	shm_unlock(shmid);
 
 	down_write(&current->mm->mmap_sem);
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/capability.c linux-2.4.26-leo/kernel/capability.c
--- linux-2.4.26/kernel/capability.c	2000-06-24 05:06:37.000000000 +0100
+++ linux-2.4.26-leo/kernel/capability.c	2004-06-22 20:53:47.000000000 +0100
@@ -7,6 +7,7 @@
 
 #include <linux/mm.h>
 #include <asm/uaccess.h>
+#include <linux/grsecurity.h>
 
 kernel_cap_t cap_bset = CAP_INIT_EFF_SET;
 
@@ -168,6 +169,10 @@
              target = current;
      }
 
+     if (gr_handle_chroot_capset(target)) {
+	     error = -ESRCH;
+	     goto out;
+     }
 
      /* verify restrictions on target's new Inheritable set */
      if (!cap_issubset(inheritable,
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/exit.c linux-2.4.26-leo/kernel/exit.c
--- linux-2.4.26/kernel/exit.c	2002-11-28 23:53:15.000000000 +0000
+++ linux-2.4.26-leo/kernel/exit.c	2004-06-22 20:53:47.000000000 +0100
@@ -16,6 +16,7 @@
 #ifdef CONFIG_BSD_PROCESS_ACCT
 #include <linux/acct.h>
 #endif
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -282,7 +283,9 @@
 	current->mm = NULL;
 	/* active_mm is still 'mm' */
 	atomic_inc(&mm->mm_count);
+	preempt_disable();
 	enter_lazy_tlb(mm, current, smp_processor_id());
+	preempt_enable();
 	return mm;
 }
 
@@ -313,8 +316,8 @@
 		/* more a memory barrier than a real lock */
 		task_lock(tsk);
 		tsk->mm = NULL;
-		task_unlock(tsk);
 		enter_lazy_tlb(mm, current, smp_processor_id());
+		task_unlock(tsk);
 		mmput(mm);
 	}
 }
@@ -435,10 +438,20 @@
 	tsk->flags |= PF_EXITING;
 	del_timer_sync(&tsk->real_timer);
 
+	if (unlikely(preempt_get_count()))
+		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
+				current->comm, current->pid,
+				preempt_get_count());
+
 fake_volatile:
 #ifdef CONFIG_BSD_PROCESS_ACCT
 	acct_process(code);
 #endif
+
+	gr_acl_handle_psacct(tsk, code);
+	gr_acl_handle_exit();
+	gr_del_task_from_ip_table(tsk);
+
 	__exit_mm(tsk);
 
 	lock_kernel();
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/fork.c linux-2.4.26-leo/kernel/fork.c
--- linux-2.4.26/kernel/fork.c	2004-05-19 21:34:41.000000000 +0100
+++ linux-2.4.26-leo/kernel/fork.c	2004-06-22 20:53:47.000000000 +0100
@@ -22,6 +22,7 @@
 #include <linux/namespace.h>
 #include <linux/personality.h>
 #include <linux/compiler.h>
+#include <linux/grsecurity.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -93,6 +94,10 @@
 	if (flags & CLONE_PID)
 		return current->pid;
 
+	pid = gr_random_pid(&lastpid_lock);
+	if (pid)
+		return pid;
+
 	spin_lock(&lastpid_lock);
 	beginpid = last_pid;
 	if((++last_pid) & 0xffff8000) {
@@ -670,6 +675,9 @@
 	 * friends to set the per-user process limit to something lower
 	 * than the amount of processes root is running. -- Rik
 	 */
+
+	gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->user->processes), 0);
+
 	if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur
 		      && p->user != &root_user
 	              && !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
@@ -691,6 +699,13 @@
 	if (p->binfmt && p->binfmt->module)
 		__MOD_INC_USE_COUNT(p->binfmt->module);
 
+#ifdef CONFIG_PREEMPT
+	/*
+	 * Continue with preemption disabled as part of the context
+	 * switch, so start with preempt_count set to 1.
+	 */
+	p->preempt_count = 1;
+#endif
 	p->did_exec = 0;
 	p->swappable = 0;
 	p->state = TASK_UNINTERRUPTIBLE;
@@ -756,6 +771,7 @@
 	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
 	if (retval)
 		goto bad_fork_cleanup_namespace;
+	gr_copy_label(p);
 	p->semundo = NULL;
 	
 	/* Our parent execution domain becomes current domain
@@ -843,6 +859,9 @@
 	free_uid(p->user);
 bad_fork_free:
 	free_task_struct(p);
+
+	gr_log_forkfail(retval);
+
 	goto fork_out;
 }
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/ksyms.c linux-2.4.26-leo/kernel/ksyms.c
--- linux-2.4.26/kernel/ksyms.c	2004-02-20 14:11:47.000000000 +0000
+++ linux-2.4.26-leo/kernel/ksyms.c	2004-06-22 20:53:47.000000000 +0100
@@ -50,6 +50,7 @@
 #include <linux/dnotify.h>
 #include <linux/crc32.h>
 #include <linux/firmware.h>
+#include <linux/grsecurity.h>
 #include <asm/checksum.h>
 
 #if defined(CONFIG_PROC_FS)
@@ -115,6 +116,7 @@
 EXPORT_SYMBOL(__vmalloc);
 EXPORT_SYMBOL(vmap);
 EXPORT_SYMBOL(vmalloc_to_page);
+EXPORT_SYMBOL(vcalloc);
 EXPORT_SYMBOL(mem_map);
 EXPORT_SYMBOL(remap_page_range);
 EXPORT_SYMBOL(max_mapnr);
@@ -194,6 +196,8 @@
 EXPORT_SYMBOL(invalidate_inode_pages);
 EXPORT_SYMBOL(truncate_inode_pages);
 EXPORT_SYMBOL(fsync_dev);
+EXPORT_SYMBOL(fsync_dev_lockfs);
+EXPORT_SYMBOL(unlockfs);
 EXPORT_SYMBOL(fsync_no_super);
 EXPORT_SYMBOL(permission);
 EXPORT_SYMBOL(vfs_permission);
@@ -468,6 +472,9 @@
 EXPORT_SYMBOL(interruptible_sleep_on);
 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
 EXPORT_SYMBOL(schedule);
+#ifdef CONFIG_PREEMPT
+EXPORT_SYMBOL(preempt_schedule);
+#endif
 EXPORT_SYMBOL(schedule_timeout);
 #if CONFIG_SMP
 EXPORT_SYMBOL(set_cpus_allowed);
@@ -622,3 +629,9 @@
 /* To match ksyms with System.map */
 extern const char _end[];
 EXPORT_SYMBOL(_end);
+
+/* grsecurity */
+EXPORT_SYMBOL(gr_task_is_capable);
+EXPORT_SYMBOL(gr_pid_is_chrooted);
+EXPORT_SYMBOL(gr_learn_resource);
+EXPORT_SYMBOL(gr_set_kernel_label);
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/module.c linux-2.4.26-leo/kernel/module.c
--- linux-2.4.26/kernel/module.c	2003-08-25 12:44:44.000000000 +0100
+++ linux-2.4.26-leo/kernel/module.c	2004-06-22 20:53:47.000000000 +0100
@@ -900,6 +900,11 @@
 	struct module *mod;
 	int err;
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+	if (!capable(CAP_SYS_MODULE))
+		return -EPERM;
+#endif
+
 	lock_kernel();
 	if (name_user == NULL)
 		mod = &kernel_module;
@@ -969,6 +974,11 @@
 	int i;
 	struct kernel_sym ksym;
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+	if (!capable(CAP_SYS_MODULE))
+		return 0;
+#endif
+
 	lock_kernel();
 	for (mod = module_list, i = 0; mod; mod = mod->next) {
 		/* include the count for the module name! */
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/printk.c linux-2.4.26-leo/kernel/printk.c
--- linux-2.4.26/kernel/printk.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/kernel/printk.c	2004-06-22 20:53:47.000000000 +0100
@@ -26,6 +26,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>			/* For in_interrupt() */
 #include <linux/config.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 
@@ -298,6 +299,11 @@
 
 asmlinkage long sys_syslog(int type, char * buf, int len)
 {
+#ifdef CONFIG_GRKERNSEC_DMESG
+	if (!capable(CAP_SYS_ADMIN) && grsec_enable_dmesg)
+		return -EPERM;
+	else
+#endif
 	if ((type != 3) && !capable(CAP_SYS_ADMIN))
 		return -EPERM;
 	return do_syslog(type, buf, len);
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/sched.c linux-2.4.26-leo/kernel/sched.c
--- linux-2.4.26/kernel/sched.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/kernel/sched.c	2004-06-22 20:53:47.000000000 +0100
@@ -22,6 +22,7 @@
 #include <linux/config.h>
 #include <linux/mm.h>
 #include <linux/init.h>
+#include <linux/file.h>
 #include <linux/smp_lock.h>
 #include <linux/nmi.h>
 #include <linux/interrupt.h>
@@ -29,6 +30,7 @@
 #include <linux/completion.h>
 #include <linux/prefetch.h>
 #include <linux/compiler.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
@@ -489,7 +491,7 @@
 	task_lock(prev);
 	task_release_cpu(prev);
 	mb();
-	if (prev->state == TASK_RUNNING)
+	if (task_on_runqueue(prev))
 		goto needs_resched;
 
 out_unlock:
@@ -519,7 +521,7 @@
 			goto out_unlock;
 
 		spin_lock_irqsave(&runqueue_lock, flags);
-		if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
+		if (task_on_runqueue(prev) && !task_has_cpu(prev))
 			reschedule_idle(prev);
 		spin_unlock_irqrestore(&runqueue_lock, flags);
 		goto out_unlock;
@@ -532,6 +534,7 @@
 asmlinkage void schedule_tail(struct task_struct *prev)
 {
 	__schedule_tail(prev);
+	preempt_enable();
 }
 
 /*
@@ -551,9 +554,10 @@
 	struct list_head *tmp;
 	int this_cpu, c;
 
-
 	spin_lock_prefetch(&runqueue_lock);
 
+	preempt_disable();
+
 	BUG_ON(!current->active_mm);
 need_resched_back:
 	prev = current;
@@ -581,6 +585,14 @@
 			move_last_runqueue(prev);
 		}
 
+#ifdef CONFIG_PREEMPT
+	/*
+	 * entering from preempt_schedule, off a kernel preemption,
+	 * go straight to picking the next task.
+	 */
+	if (unlikely(preempt_get_count() & PREEMPT_ACTIVE))
+		goto treat_like_run;
+#endif
 	switch (prev->state) {
 		case TASK_INTERRUPTIBLE:
 			if (signal_pending(prev)) {
@@ -591,6 +603,9 @@
 			del_from_runqueue(prev);
 		case TASK_RUNNING:;
 	}
+#ifdef CONFIG_PREEMPT
+	treat_like_run:
+#endif
 	prev->need_resched = 0;
 
 	/*
@@ -699,9 +714,31 @@
 	reacquire_kernel_lock(current);
 	if (current->need_resched)
 		goto need_resched_back;
+	preempt_enable_no_resched();
 	return;
 }
 
+#ifdef CONFIG_PREEMPT
+/*
+ * this is is the entry point to schedule() from in-kernel preemption
+ */
+asmlinkage void preempt_schedule(void)
+{
+	if (unlikely(irqs_disabled()))
+			return;
+
+need_resched:
+	current->preempt_count += PREEMPT_ACTIVE;
+	schedule();
+	current->preempt_count -= PREEMPT_ACTIVE;
+
+	/* we could miss a preemption opportunity between schedule and now */
+	barrier();
+	if (unlikely(current->need_resched))
+		goto need_resched;
+}
+#endif /* CONFIG_PREEMPT */
+
 /*
  * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just wake everything
  * up.  If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
@@ -910,6 +947,9 @@
 			return -EPERM;
 		if (increment < -40)
 			increment = -40;
+
+		if (gr_handle_chroot_nice())
+			return -EPERM;
 	}
 	if (increment > 40)
 		increment = 40;
@@ -1288,12 +1328,21 @@
 
 	write_lock_irq(&tasklist_lock);
 
+#ifdef CONFIG_GRKERNSEC
+	if (this_task->exec_file) {
+		fput(this_task->exec_file);
+		this_task->exec_file = NULL;
+	}
+#endif
+
 	/* Reparent to init */
 	REMOVE_LINKS(this_task);
 	this_task->p_pptr = child_reaper;
 	this_task->p_opptr = child_reaper;
 	SET_LINKS(this_task);
 
+	gr_set_kernel_label(this_task);
+
 	/* Set the exit signal to SIGCHLD so we signal init on exit */
 	this_task->exit_signal = SIGCHLD;
 
@@ -1327,6 +1376,13 @@
 {
 	struct fs_struct *fs;
 
+#ifdef CONFIG_GRKERNSEC
+	if (current->exec_file) {
+		fput(current->exec_file);
+		current->exec_file = NULL;
+	}
+#endif
+	gr_set_kernel_label(current);
 
 	/*
 	 * If we were started as result of loading a module, close all of the
@@ -1365,6 +1421,13 @@
 	sched_data->curr = current;
 	sched_data->last_schedule = get_cycles();
 	clear_bit(current->processor, &wait_init_idle);
+#ifdef CONFIG_PREEMPT
+	/*
+	 * fix up the preempt_count for non-CPU0 idle threads
+	 */
+	if (current->processor)
+		current->preempt_count = 0;
+#endif
 }
 
 extern void init_timervecs (void);
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/signal.c linux-2.4.26-leo/kernel/signal.c
--- linux-2.4.26/kernel/signal.c	2004-02-20 14:11:47.000000000 +0000
+++ linux-2.4.26-leo/kernel/signal.c	2004-06-22 20:53:47.000000000 +0100
@@ -13,6 +13,8 @@
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 
@@ -554,6 +556,8 @@
 	if (!sig || !t->sig)
 		goto out_nolock;
 
+	gr_log_signal(sig, t);
+
 	spin_lock_irqsave(&t->sigmask_lock, flags);
 	handle_stop_signal(sig, t);
 
@@ -603,6 +607,8 @@
 	recalc_sigpending(t);
 	spin_unlock_irqrestore(&t->sigmask_lock, flags);
 
+	gr_handle_crash(t, sig);
+
 	return send_sig_info(sig, info, t);
 }
 
@@ -622,9 +628,13 @@
 		read_lock(&tasklist_lock);
 		for_each_task(p) {
 			if (p->pgrp == pgrp && thread_group_leader(p)) {
-				int err = send_sig_info(sig, info, p);
-				if (retval)
-					retval = err;
+				if (gr_handle_signal(p, sig))
+					retval = -EPERM;
+				else {
+					int err = send_sig_info(sig, info, p);
+					if (retval)
+						retval = err;
+				}
 			}
 		}
 		read_unlock(&tasklist_lock);
@@ -675,7 +685,10 @@
                        if (tg)
                                p = tg;
                 }
-		error = send_sig_info(sig, info, p);
+		if (gr_handle_signal(p, sig))
+			error = -EPERM;
+		else
+			error = send_sig_info(sig, info, p);
 	}
 	read_unlock(&tasklist_lock);
 	return error;
@@ -700,10 +713,14 @@
 		read_lock(&tasklist_lock);
 		for_each_task(p) {
 			if (p->pid > 1 && p != current && thread_group_leader(p)) {
-				int err = send_sig_info(sig, info, p);
-				++count;
-				if (err != -EPERM)
-					retval = err;
+				if (gr_handle_signal(p, sig))
+					retval = -EPERM;
+				else {
+					int err = send_sig_info(sig, info, p);
+					++count;
+					if (err != -EPERM)
+						retval = err;
+				}
 			}
 		}
 		read_unlock(&tasklist_lock);
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/softirq.c linux-2.4.26-leo/kernel/softirq.c
--- linux-2.4.26/kernel/softirq.c	2002-11-28 23:53:15.000000000 +0000
+++ linux-2.4.26-leo/kernel/softirq.c	2004-05-26 23:34:34.000000000 +0100
@@ -60,7 +60,7 @@
 
 asmlinkage void do_softirq()
 {
-	int cpu = smp_processor_id();
+	int cpu;
 	__u32 pending;
 	unsigned long flags;
 	__u32 mask;
@@ -70,6 +70,8 @@
 
 	local_irq_save(flags);
 
+	cpu = smp_processor_id();
+
 	pending = softirq_pending(cpu);
 
 	if (pending) {
@@ -151,10 +153,11 @@
 
 void __tasklet_schedule(struct tasklet_struct *t)
 {
-	int cpu = smp_processor_id();
+	int cpu;
 	unsigned long flags;
 
 	local_irq_save(flags);
+	cpu = smp_processor_id();
 	t->next = tasklet_vec[cpu].list;
 	tasklet_vec[cpu].list = t;
 	cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
@@ -175,10 +178,11 @@
 
 static void tasklet_action(struct softirq_action *a)
 {
-	int cpu = smp_processor_id();
+	int cpu;
 	struct tasklet_struct *list;
 
 	local_irq_disable();
+	cpu = smp_processor_id();
 	list = tasklet_vec[cpu].list;
 	tasklet_vec[cpu].list = NULL;
 	local_irq_enable();
@@ -209,10 +213,11 @@
 
 static void tasklet_hi_action(struct softirq_action *a)
 {
-	int cpu = smp_processor_id();
+	int cpu;
 	struct tasklet_struct *list;
 
 	local_irq_disable();
+	cpu = smp_processor_id();
 	list = tasklet_hi_vec[cpu].list;
 	tasklet_hi_vec[cpu].list = NULL;
 	local_irq_enable();
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/sys.c linux-2.4.26-leo/kernel/sys.c
--- linux-2.4.26/kernel/sys.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/kernel/sys.c	2004-06-22 20:53:47.000000000 +0100
@@ -4,6 +4,7 @@
  *  Copyright (C) 1991, 1992  Linus Torvalds
  */
 
+#include <linux/config.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/utsname.h>
@@ -14,6 +15,7 @@
 #include <linux/prctl.h>
 #include <linux/init.h>
 #include <linux/highuid.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -239,6 +241,12 @@
 		}
 		if (error == -ESRCH)
 			error = 0;
+
+		if (gr_handle_chroot_setpriority(p, niceval)) {
+			read_unlock(&tasklist_lock);
+			return -ESRCH;
+		}
+
 		if (niceval < p->nice && !capable(CAP_SYS_NICE))
 			error = -EACCES;
 		else
@@ -320,6 +328,7 @@
 		notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
 		printk(KERN_EMERG "System halted.\n");
 		machine_halt();
+		unlock_kernel();
 		do_exit(0);
 		break;
 
@@ -327,6 +336,7 @@
 		notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
 		printk(KERN_EMERG "Power down.\n");
 		machine_power_off();
+		unlock_kernel();
 		do_exit(0);
 		break;
 
@@ -417,6 +427,10 @@
 			return -EPERM;
 		}
 	}
+
+	if (gr_check_group_change(new_rgid, new_egid, -1))
+		return -EPERM;
+
 	if (new_egid != old_egid)
 	{
 		current->mm->dumpable = 0;
@@ -425,6 +439,9 @@
 	if (rgid != (gid_t) -1 ||
 	    (egid != (gid_t) -1 && egid != old_rgid))
 		current->sgid = new_egid;
+
+	gr_set_role_label(current, current->uid, new_rgid);
+
 	current->fsgid = new_egid;
 	current->egid = new_egid;
 	current->gid = new_rgid;
@@ -440,6 +457,9 @@
 {
 	int old_egid = current->egid;
 
+	if (gr_check_group_change(gid, gid, gid))
+		return -EPERM;
+
 	if (capable(CAP_SETGID))
 	{
 		if(old_egid != gid)
@@ -447,6 +467,9 @@
 			current->mm->dumpable=0;
 			wmb();
 		}
+
+		gr_set_role_label(current, current->uid, gid);
+
 		current->gid = current->egid = current->sgid = current->fsgid = gid;
 	}
 	else if ((gid == current->gid) || (gid == current->sgid))
@@ -523,6 +546,9 @@
 		current->mm->dumpable = 0;
 		wmb();
 	}
+
+	gr_set_role_label(current, new_ruid, current->gid);
+
 	current->uid = new_ruid;
 	return 0;
 }
@@ -567,6 +593,9 @@
 			return -EPERM;
 	}
 
+	if (gr_check_user_change(new_ruid, new_euid, -1))
+		return -EPERM;
+
 	if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
 		return -EAGAIN;
 
@@ -610,6 +639,12 @@
 	old_suid = current->suid;
 	new_suid = old_suid;
 	
+	if (gr_check_crash_uid(uid))
+		return -EPERM;
+
+	if (gr_check_user_change(uid, uid, uid))
+		return -EPERM;
+
 	if (capable(CAP_SETUID)) {
 		if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
 			return -EAGAIN;
@@ -654,6 +689,10 @@
 		    (suid != current->euid) && (suid != current->suid))
 			return -EPERM;
 	}
+
+	if (gr_check_user_change(ruid, euid, -1))
+		return -EPERM;
+
 	if (ruid != (uid_t) -1) {
 		if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
 			return -EAGAIN;
@@ -704,6 +743,10 @@
 		    (sgid != current->egid) && (sgid != current->sgid))
 			return -EPERM;
 	}
+
+	if (gr_check_group_change(rgid, egid, -1))
+		return -EPERM;
+
 	if (egid != (gid_t) -1) {
 		if (egid != current->egid)
 		{
@@ -713,8 +756,10 @@
 		current->egid = egid;
 	}
 	current->fsgid = current->egid;
-	if (rgid != (gid_t) -1)
+	if (rgid != (gid_t) -1) {
+		gr_set_role_label(current, current->uid, rgid);
 		current->gid = rgid;
+	}
 	if (sgid != (gid_t) -1)
 		current->sgid = sgid;
 	return 0;
@@ -747,6 +792,9 @@
 	    uid == current->suid || uid == current->fsuid || 
 	    capable(CAP_SETUID))
 	{
+		if (gr_check_user_change(-1, -1, uid))
+			return -EPERM;
+
 		if (uid != old_fsuid)
 		{
 			current->mm->dumpable = 0;
@@ -789,6 +837,9 @@
 	    gid == current->sgid || gid == current->fsgid || 
 	    capable(CAP_SETGID))
 	{
+		if (gr_check_group_change(-1, -1, gid))
+			return -EPERM;
+
 		if (gid != old_fsgid)
 		{
 			current->mm->dumpable = 0;
@@ -1137,6 +1188,10 @@
        if (new_rlim.rlim_cur > new_rlim.rlim_max)
                return -EINVAL;
 	old_rlim = current->rlim + resource;
+
+	if (old_rlim->rlim_max < old_rlim->rlim_cur)
+		return -EINVAL;
+
 	if (((new_rlim.rlim_cur > old_rlim->rlim_max) ||
 	     (new_rlim.rlim_max > old_rlim->rlim_max)) &&
 	    !capable(CAP_SYS_RESOURCE))
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/sysctl.c linux-2.4.26-leo/kernel/sysctl.c
--- linux-2.4.26/kernel/sysctl.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/kernel/sysctl.c	2004-06-22 20:53:47.000000000 +0100
@@ -39,6 +39,13 @@
 #endif
 
 #if defined(CONFIG_SYSCTL)
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+extern __u32 gr_handle_sysctl(const ctl_table * table, const void *oldval,
+			      const void *newval);
+extern int gr_handle_sysctl_mod(const char *dirname, const char *name, const int op);
+extern int gr_handle_chroot_sysctl(const int op);
 
 /* External variables not in a header file. */
 extern int panic_timeout;
@@ -127,6 +134,19 @@
 static ctl_table dev_table[];
 extern ctl_table random_table[];
 
+static ctl_table grsecurity_table[];
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+static ctl_table pax_table[] = {
+
+#if defined(CONFIG_GRKERNSEC_PAX_RANDMMAP) || defined(CONFIG_GRKERNSEC_PAX_RANDUSTACK) || defined(CONFIG_GRKERNSEC_PAX_RANDKSTACK)
+	{PAX_ASLR, "aslr", &pax_aslr, sizeof(unsigned int), 0600, NULL, &proc_dointvec},
+#endif
+
+	{PAX_SOFTMODE, "softmode", &pax_softmode, sizeof(unsigned int), 0600, NULL, &proc_dointvec}
+};
+#endif
+
 /* /proc declarations: */
 
 #ifdef CONFIG_PROC_FS
@@ -275,9 +295,201 @@
 	{KERN_EXCEPTION_TRACE,"exception-trace",
 	 &exception_trace,sizeof(int),0644,NULL,&proc_dointvec},
 #endif	
+#ifdef CONFIG_GRKERNSEC_SYSCTL
+	{KERN_GRSECURITY, "grsecurity", NULL, 0, 0500, grsecurity_table},
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE
+	{KERN_PAX,"pax",NULL,0,0500,pax_table},
+#endif
+
 	{0}
 };
 
+#ifdef CONFIG_GRKERNSEC_SYSCTL
+enum {GS_LINK=1, GS_FIFO, GS_EXECVE, GS_EXECLOG, GS_SIGNAL, 
+GS_FORKFAIL, GS_TIME, GS_CHROOT_SHMAT, GS_CHROOT_UNIX, GS_CHROOT_MNT,
+GS_CHROOT_FCHDIR, GS_CHROOT_DBL, GS_CHROOT_PVT, GS_CHROOT_CD, GS_CHROOT_CM,
+GS_CHROOT_MK, GS_CHROOT_NI, GS_CHROOT_EXECLOG, GS_CHROOT_CAPS, 
+GS_CHROOT_SYSCTL, GS_TPE, GS_TPE_GID, GS_TPE_ALL,
+GS_RANDPID, GS_RANDID, GS_RANDSRC, GS_RANDISN,
+GS_SOCKET_ALL, GS_SOCKET_ALL_GID, GS_SOCKET_CLIENT,
+GS_SOCKET_CLIENT_GID, GS_SOCKET_SERVER, GS_SOCKET_SERVER_GID,
+GS_GROUP, GS_GID, GS_ACHDIR, GS_AMOUNT, GS_AIPC, GS_DMSG, GS_RANDRPC,
+GS_TEXTREL, GS_FINDTASK, GS_LOCK};
+
+static ctl_table grsecurity_table[] = {
+#ifdef CONFIG_GRKERNSEC_LINK
+	{GS_LINK, "linking_restrictions", &grsec_enable_link, sizeof (int),
+	 0600, NULL, &proc_dointvec}, 
+#endif
+#ifdef CONFIG_GRKERNSEC_FIFO
+	{GS_FIFO, "fifo_restrictions", &grsec_enable_fifo, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_EXECVE
+	{GS_EXECVE, "execve_limiting", &grsec_enable_execve, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_EXECLOG
+	{GS_EXECLOG, "exec_logging", &grsec_enable_execlog, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_SIGNAL
+	{GS_SIGNAL, "signal_logging", &grsec_enable_signal, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_FORKFAIL
+	{GS_FORKFAIL, "forkfail_logging", &grsec_enable_forkfail, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_TIME
+	{GS_TIME, "timechange_logging", &grsec_enable_time, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
+	{GS_CHROOT_SHMAT, "chroot_deny_shmat", &grsec_enable_chroot_shmat, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
+	{GS_CHROOT_UNIX, "chroot_deny_unix", &grsec_enable_chroot_unix, sizeof(int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
+	{GS_CHROOT_MNT, "chroot_deny_mount", &grsec_enable_chroot_mount, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
+	{GS_CHROOT_FCHDIR, "chroot_deny_fchdir", &grsec_enable_chroot_fchdir, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
+	{GS_CHROOT_DBL, "chroot_deny_chroot", &grsec_enable_chroot_double, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
+	{GS_CHROOT_PVT, "chroot_deny_pivot", &grsec_enable_chroot_pivot, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
+	{GS_CHROOT_CD, "chroot_enforce_chdir", &grsec_enable_chroot_chdir, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
+	{GS_CHROOT_CM, "chroot_deny_chmod", &grsec_enable_chroot_chmod, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
+	{GS_CHROOT_MK, "chroot_deny_mknod", &grsec_enable_chroot_mknod, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
+	{GS_CHROOT_NI, "chroot_restrict_nice", &grsec_enable_chroot_nice, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
+	{GS_CHROOT_EXECLOG, "chroot_execlog",
+	 &grsec_enable_chroot_execlog, sizeof (int),
+         0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	{GS_CHROOT_CAPS, "chroot_caps", &grsec_enable_chroot_caps, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
+	{GS_CHROOT_SYSCTL, "chroot_deny_sysctl", &grsec_enable_chroot_sysctl, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE
+	{GS_TPE, "tpe", &grsec_enable_tpe, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+	{GS_TPE_GID, "tpe_gid", &grsec_tpe_gid, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE_ALL
+	{GS_TPE_ALL, "tpe_restrict_all", &grsec_enable_tpe_all, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDPID
+	{GS_RANDPID, "rand_pids", &grsec_enable_randpid, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDID
+	{GS_RANDID, "rand_ip_ids", &grsec_enable_randid, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDSRC
+	{GS_RANDSRC, "rand_tcp_src_ports", &grsec_enable_randsrc, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDISN
+	{GS_RANDISN, "rand_isns", &grsec_enable_randisn, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
+	{GS_SOCKET_ALL, "socket_all", &grsec_enable_socket_all, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+	{GS_SOCKET_ALL_GID, "socket_all_gid",
+	 &grsec_socket_all_gid, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
+	{GS_SOCKET_CLIENT, "socket_client", 
+	 &grsec_enable_socket_client, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+	{GS_SOCKET_CLIENT_GID, "socket_client_gid", 
+	 &grsec_socket_client_gid, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
+	{GS_SOCKET_SERVER, "socket_server", 
+	 &grsec_enable_socket_server, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+	{GS_SOCKET_SERVER_GID, "socket_server_gid",
+	 &grsec_socket_server_gid, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
+	{GS_GROUP, "audit_group", &grsec_enable_group, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+	{GS_GID, "audit_gid",
+	 &grsec_audit_gid, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
+	{GS_ACHDIR, "audit_chdir", &grsec_enable_chdir, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
+	{GS_AMOUNT, "audit_mount", &grsec_enable_mount, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
+	{GS_TEXTREL, "audit_textrel", &grsec_enable_audit_textrel, sizeof(int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
+	{GS_AIPC, "audit_ipc", &grsec_enable_audit_ipc, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_DMESG
+	{GS_DMSG, "dmesg", &grsec_enable_dmesg, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDRPC
+	{GS_RANDRPC, "rand_rpc", &grsec_enable_randrpc, sizeof (int),
+	 0600, NULL, &proc_dointvec},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
+	{GS_FINDTASK, "chroot_findtask", &grsec_enable_chroot_findtask, 
+	 sizeof (int), 0600, NULL, &proc_dointvec},
+#endif
+	{GS_LOCK, "grsec_lock", &grsec_lock, sizeof (int), 0600, NULL,
+	 &proc_dointvec},
+	{0}
+};
+#endif
+
 static ctl_table vm_table[] = {
 	{VM_GFP_DEBUG, "vm_gfp_debug", 
 	 &vm_gfp_debug, sizeof(int), 0644, NULL, &proc_dointvec},
@@ -425,6 +637,11 @@
 
 static inline int ctl_perm(ctl_table *table, int op)
 {
+	if (table->de && gr_handle_sysctl_mod(table->de->parent->name, table->de->name, op))
+		return -EACCES;
+	if (gr_handle_chroot_sysctl(op))
+		return -EACCES;
+
 	return test_perm(table->mode, op);
 }
 
@@ -458,6 +675,10 @@
 				table = table->child;
 				goto repeat;
 			}
+
+			if (!gr_handle_sysctl(table, oldval, newval))
+				return -EACCES;
+
 			error = do_sysctl_strategy(table, name, nlen,
 						   oldval, oldlenp,
 						   newval, newlen, context);
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/time.c linux-2.4.26-leo/kernel/time.c
--- linux-2.4.26/kernel/time.c	2002-11-28 23:53:15.000000000 +0000
+++ linux-2.4.26-leo/kernel/time.c	2004-06-22 20:53:47.000000000 +0100
@@ -27,6 +27,7 @@
 #include <linux/mm.h>
 #include <linux/timex.h>
 #include <linux/smp_lock.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 
@@ -89,6 +90,9 @@
 	time_maxerror = NTP_PHASE_LIMIT;
 	time_esterror = NTP_PHASE_LIMIT;
 	write_unlock_irq(&xtime_lock);
+
+	gr_log_timechange();
+
 	return 0;
 }
 
@@ -167,6 +171,8 @@
 		 * globally block out interrupts when it runs.
 		 */
 		do_settimeofday(tv);
+
+		gr_log_timechange();
 	}
 	return 0;
 }
diff -urN --exclude-from=diff-exclude linux-2.4.26/kernel/timer.c linux-2.4.26-leo/kernel/timer.c
--- linux-2.4.26/kernel/timer.c	2002-11-28 23:53:15.000000000 +0000
+++ linux-2.4.26-leo/kernel/timer.c	2004-06-22 20:53:47.000000000 +0100
@@ -541,6 +541,9 @@
 
 	psecs = (p->times.tms_utime += user);
 	psecs += (p->times.tms_stime += system);
+
+	gr_learn_resource(p, RLIMIT_CPU, psecs / HZ, 1);
+
 	if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
 		/* Send SIGXCPU every second.. */
 		if (!(psecs % HZ))
diff -urN --exclude-from=diff-exclude linux-2.4.26/lib/crc32table.h linux-2.4.26-leo/lib/crc32table.h
--- linux-2.4.26/lib/crc32table.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/lib/crc32table.h	2004-06-22 21:11:08.000000000 +0100
@@ -0,0 +1,134 @@
+/* this file is generated - do not edit */
+
+static const u32 crc32table_le[] = {
+tole(0x00000000L), tole(0x77073096L), tole(0xee0e612cL), tole(0x990951baL), 
+tole(0x076dc419L), tole(0x706af48fL), tole(0xe963a535L), tole(0x9e6495a3L), 
+tole(0x0edb8832L), tole(0x79dcb8a4L), tole(0xe0d5e91eL), tole(0x97d2d988L), 
+tole(0x09b64c2bL), tole(0x7eb17cbdL), tole(0xe7b82d07L), tole(0x90bf1d91L), 
+tole(0x1db71064L), tole(0x6ab020f2L), tole(0xf3b97148L), tole(0x84be41deL), 
+tole(0x1adad47dL), tole(0x6ddde4ebL), tole(0xf4d4b551L), tole(0x83d385c7L), 
+tole(0x136c9856L), tole(0x646ba8c0L), tole(0xfd62f97aL), tole(0x8a65c9ecL), 
+tole(0x14015c4fL), tole(0x63066cd9L), tole(0xfa0f3d63L), tole(0x8d080df5L), 
+tole(0x3b6e20c8L), tole(0x4c69105eL), tole(0xd56041e4L), tole(0xa2677172L), 
+tole(0x3c03e4d1L), tole(0x4b04d447L), tole(0xd20d85fdL), tole(0xa50ab56bL), 
+tole(0x35b5a8faL), tole(0x42b2986cL), tole(0xdbbbc9d6L), tole(0xacbcf940L), 
+tole(0x32d86ce3L), tole(0x45df5c75L), tole(0xdcd60dcfL), tole(0xabd13d59L), 
+tole(0x26d930acL), tole(0x51de003aL), tole(0xc8d75180L), tole(0xbfd06116L), 
+tole(0x21b4f4b5L), tole(0x56b3c423L), tole(0xcfba9599L), tole(0xb8bda50fL), 
+tole(0x2802b89eL), tole(0x5f058808L), tole(0xc60cd9b2L), tole(0xb10be924L), 
+tole(0x2f6f7c87L), tole(0x58684c11L), tole(0xc1611dabL), tole(0xb6662d3dL), 
+tole(0x76dc4190L), tole(0x01db7106L), tole(0x98d220bcL), tole(0xefd5102aL), 
+tole(0x71b18589L), tole(0x06b6b51fL), tole(0x9fbfe4a5L), tole(0xe8b8d433L), 
+tole(0x7807c9a2L), tole(0x0f00f934L), tole(0x9609a88eL), tole(0xe10e9818L), 
+tole(0x7f6a0dbbL), tole(0x086d3d2dL), tole(0x91646c97L), tole(0xe6635c01L), 
+tole(0x6b6b51f4L), tole(0x1c6c6162L), tole(0x856530d8L), tole(0xf262004eL), 
+tole(0x6c0695edL), tole(0x1b01a57bL), tole(0x8208f4c1L), tole(0xf50fc457L), 
+tole(0x65b0d9c6L), tole(0x12b7e950L), tole(0x8bbeb8eaL), tole(0xfcb9887cL), 
+tole(0x62dd1ddfL), tole(0x15da2d49L), tole(0x8cd37cf3L), tole(0xfbd44c65L), 
+tole(0x4db26158L), tole(0x3ab551ceL), tole(0xa3bc0074L), tole(0xd4bb30e2L), 
+tole(0x4adfa541L), tole(0x3dd895d7L), tole(0xa4d1c46dL), tole(0xd3d6f4fbL), 
+tole(0x4369e96aL), tole(0x346ed9fcL), tole(0xad678846L), tole(0xda60b8d0L), 
+tole(0x44042d73L), tole(0x33031de5L), tole(0xaa0a4c5fL), tole(0xdd0d7cc9L), 
+tole(0x5005713cL), tole(0x270241aaL), tole(0xbe0b1010L), tole(0xc90c2086L), 
+tole(0x5768b525L), tole(0x206f85b3L), tole(0xb966d409L), tole(0xce61e49fL), 
+tole(0x5edef90eL), tole(0x29d9c998L), tole(0xb0d09822L), tole(0xc7d7a8b4L), 
+tole(0x59b33d17L), tole(0x2eb40d81L), tole(0xb7bd5c3bL), tole(0xc0ba6cadL), 
+tole(0xedb88320L), tole(0x9abfb3b6L), tole(0x03b6e20cL), tole(0x74b1d29aL), 
+tole(0xead54739L), tole(0x9dd277afL), tole(0x04db2615L), tole(0x73dc1683L), 
+tole(0xe3630b12L), tole(0x94643b84L), tole(0x0d6d6a3eL), tole(0x7a6a5aa8L), 
+tole(0xe40ecf0bL), tole(0x9309ff9dL), tole(0x0a00ae27L), tole(0x7d079eb1L), 
+tole(0xf00f9344L), tole(0x8708a3d2L), tole(0x1e01f268L), tole(0x6906c2feL), 
+tole(0xf762575dL), tole(0x806567cbL), tole(0x196c3671L), tole(0x6e6b06e7L), 
+tole(0xfed41b76L), tole(0x89d32be0L), tole(0x10da7a5aL), tole(0x67dd4accL), 
+tole(0xf9b9df6fL), tole(0x8ebeeff9L), tole(0x17b7be43L), tole(0x60b08ed5L), 
+tole(0xd6d6a3e8L), tole(0xa1d1937eL), tole(0x38d8c2c4L), tole(0x4fdff252L), 
+tole(0xd1bb67f1L), tole(0xa6bc5767L), tole(0x3fb506ddL), tole(0x48b2364bL), 
+tole(0xd80d2bdaL), tole(0xaf0a1b4cL), tole(0x36034af6L), tole(0x41047a60L), 
+tole(0xdf60efc3L), tole(0xa867df55L), tole(0x316e8eefL), tole(0x4669be79L), 
+tole(0xcb61b38cL), tole(0xbc66831aL), tole(0x256fd2a0L), tole(0x5268e236L), 
+tole(0xcc0c7795L), tole(0xbb0b4703L), tole(0x220216b9L), tole(0x5505262fL), 
+tole(0xc5ba3bbeL), tole(0xb2bd0b28L), tole(0x2bb45a92L), tole(0x5cb36a04L), 
+tole(0xc2d7ffa7L), tole(0xb5d0cf31L), tole(0x2cd99e8bL), tole(0x5bdeae1dL), 
+tole(0x9b64c2b0L), tole(0xec63f226L), tole(0x756aa39cL), tole(0x026d930aL), 
+tole(0x9c0906a9L), tole(0xeb0e363fL), tole(0x72076785L), tole(0x05005713L), 
+tole(0x95bf4a82L), tole(0xe2b87a14L), tole(0x7bb12baeL), tole(0x0cb61b38L), 
+tole(0x92d28e9bL), tole(0xe5d5be0dL), tole(0x7cdcefb7L), tole(0x0bdbdf21L), 
+tole(0x86d3d2d4L), tole(0xf1d4e242L), tole(0x68ddb3f8L), tole(0x1fda836eL), 
+tole(0x81be16cdL), tole(0xf6b9265bL), tole(0x6fb077e1L), tole(0x18b74777L), 
+tole(0x88085ae6L), tole(0xff0f6a70L), tole(0x66063bcaL), tole(0x11010b5cL), 
+tole(0x8f659effL), tole(0xf862ae69L), tole(0x616bffd3L), tole(0x166ccf45L), 
+tole(0xa00ae278L), tole(0xd70dd2eeL), tole(0x4e048354L), tole(0x3903b3c2L), 
+tole(0xa7672661L), tole(0xd06016f7L), tole(0x4969474dL), tole(0x3e6e77dbL), 
+tole(0xaed16a4aL), tole(0xd9d65adcL), tole(0x40df0b66L), tole(0x37d83bf0L), 
+tole(0xa9bcae53L), tole(0xdebb9ec5L), tole(0x47b2cf7fL), tole(0x30b5ffe9L), 
+tole(0xbdbdf21cL), tole(0xcabac28aL), tole(0x53b39330L), tole(0x24b4a3a6L), 
+tole(0xbad03605L), tole(0xcdd70693L), tole(0x54de5729L), tole(0x23d967bfL), 
+tole(0xb3667a2eL), tole(0xc4614ab8L), tole(0x5d681b02L), tole(0x2a6f2b94L), 
+tole(0xb40bbe37L), tole(0xc30c8ea1L), tole(0x5a05df1bL), tole(0x2d02ef8dL)
+};
+static const u32 crc32table_be[] = {
+tobe(0x00000000L), tobe(0x04c11db7L), tobe(0x09823b6eL), tobe(0x0d4326d9L), 
+tobe(0x130476dcL), tobe(0x17c56b6bL), tobe(0x1a864db2L), tobe(0x1e475005L), 
+tobe(0x2608edb8L), tobe(0x22c9f00fL), tobe(0x2f8ad6d6L), tobe(0x2b4bcb61L), 
+tobe(0x350c9b64L), tobe(0x31cd86d3L), tobe(0x3c8ea00aL), tobe(0x384fbdbdL), 
+tobe(0x4c11db70L), tobe(0x48d0c6c7L), tobe(0x4593e01eL), tobe(0x4152fda9L), 
+tobe(0x5f15adacL), tobe(0x5bd4b01bL), tobe(0x569796c2L), tobe(0x52568b75L), 
+tobe(0x6a1936c8L), tobe(0x6ed82b7fL), tobe(0x639b0da6L), tobe(0x675a1011L), 
+tobe(0x791d4014L), tobe(0x7ddc5da3L), tobe(0x709f7b7aL), tobe(0x745e66cdL), 
+tobe(0x9823b6e0L), tobe(0x9ce2ab57L), tobe(0x91a18d8eL), tobe(0x95609039L), 
+tobe(0x8b27c03cL), tobe(0x8fe6dd8bL), tobe(0x82a5fb52L), tobe(0x8664e6e5L), 
+tobe(0xbe2b5b58L), tobe(0xbaea46efL), tobe(0xb7a96036L), tobe(0xb3687d81L), 
+tobe(0xad2f2d84L), tobe(0xa9ee3033L), tobe(0xa4ad16eaL), tobe(0xa06c0b5dL), 
+tobe(0xd4326d90L), tobe(0xd0f37027L), tobe(0xddb056feL), tobe(0xd9714b49L), 
+tobe(0xc7361b4cL), tobe(0xc3f706fbL), tobe(0xceb42022L), tobe(0xca753d95L), 
+tobe(0xf23a8028L), tobe(0xf6fb9d9fL), tobe(0xfbb8bb46L), tobe(0xff79a6f1L), 
+tobe(0xe13ef6f4L), tobe(0xe5ffeb43L), tobe(0xe8bccd9aL), tobe(0xec7dd02dL), 
+tobe(0x34867077L), tobe(0x30476dc0L), tobe(0x3d044b19L), tobe(0x39c556aeL), 
+tobe(0x278206abL), tobe(0x23431b1cL), tobe(0x2e003dc5L), tobe(0x2ac12072L), 
+tobe(0x128e9dcfL), tobe(0x164f8078L), tobe(0x1b0ca6a1L), tobe(0x1fcdbb16L), 
+tobe(0x018aeb13L), tobe(0x054bf6a4L), tobe(0x0808d07dL), tobe(0x0cc9cdcaL), 
+tobe(0x7897ab07L), tobe(0x7c56b6b0L), tobe(0x71159069L), tobe(0x75d48ddeL), 
+tobe(0x6b93dddbL), tobe(0x6f52c06cL), tobe(0x6211e6b5L), tobe(0x66d0fb02L), 
+tobe(0x5e9f46bfL), tobe(0x5a5e5b08L), tobe(0x571d7dd1L), tobe(0x53dc6066L), 
+tobe(0x4d9b3063L), tobe(0x495a2dd4L), tobe(0x44190b0dL), tobe(0x40d816baL), 
+tobe(0xaca5c697L), tobe(0xa864db20L), tobe(0xa527fdf9L), tobe(0xa1e6e04eL), 
+tobe(0xbfa1b04bL), tobe(0xbb60adfcL), tobe(0xb6238b25L), tobe(0xb2e29692L), 
+tobe(0x8aad2b2fL), tobe(0x8e6c3698L), tobe(0x832f1041L), tobe(0x87ee0df6L), 
+tobe(0x99a95df3L), tobe(0x9d684044L), tobe(0x902b669dL), tobe(0x94ea7b2aL), 
+tobe(0xe0b41de7L), tobe(0xe4750050L), tobe(0xe9362689L), tobe(0xedf73b3eL), 
+tobe(0xf3b06b3bL), tobe(0xf771768cL), tobe(0xfa325055L), tobe(0xfef34de2L), 
+tobe(0xc6bcf05fL), tobe(0xc27dede8L), tobe(0xcf3ecb31L), tobe(0xcbffd686L), 
+tobe(0xd5b88683L), tobe(0xd1799b34L), tobe(0xdc3abdedL), tobe(0xd8fba05aL), 
+tobe(0x690ce0eeL), tobe(0x6dcdfd59L), tobe(0x608edb80L), tobe(0x644fc637L), 
+tobe(0x7a089632L), tobe(0x7ec98b85L), tobe(0x738aad5cL), tobe(0x774bb0ebL), 
+tobe(0x4f040d56L), tobe(0x4bc510e1L), tobe(0x46863638L), tobe(0x42472b8fL), 
+tobe(0x5c007b8aL), tobe(0x58c1663dL), tobe(0x558240e4L), tobe(0x51435d53L), 
+tobe(0x251d3b9eL), tobe(0x21dc2629L), tobe(0x2c9f00f0L), tobe(0x285e1d47L), 
+tobe(0x36194d42L), tobe(0x32d850f5L), tobe(0x3f9b762cL), tobe(0x3b5a6b9bL), 
+tobe(0x0315d626L), tobe(0x07d4cb91L), tobe(0x0a97ed48L), tobe(0x0e56f0ffL), 
+tobe(0x1011a0faL), tobe(0x14d0bd4dL), tobe(0x19939b94L), tobe(0x1d528623L), 
+tobe(0xf12f560eL), tobe(0xf5ee4bb9L), tobe(0xf8ad6d60L), tobe(0xfc6c70d7L), 
+tobe(0xe22b20d2L), tobe(0xe6ea3d65L), tobe(0xeba91bbcL), tobe(0xef68060bL), 
+tobe(0xd727bbb6L), tobe(0xd3e6a601L), tobe(0xdea580d8L), tobe(0xda649d6fL), 
+tobe(0xc423cd6aL), tobe(0xc0e2d0ddL), tobe(0xcda1f604L), tobe(0xc960ebb3L), 
+tobe(0xbd3e8d7eL), tobe(0xb9ff90c9L), tobe(0xb4bcb610L), tobe(0xb07daba7L), 
+tobe(0xae3afba2L), tobe(0xaafbe615L), tobe(0xa7b8c0ccL), tobe(0xa379dd7bL), 
+tobe(0x9b3660c6L), tobe(0x9ff77d71L), tobe(0x92b45ba8L), tobe(0x9675461fL), 
+tobe(0x8832161aL), tobe(0x8cf30badL), tobe(0x81b02d74L), tobe(0x857130c3L), 
+tobe(0x5d8a9099L), tobe(0x594b8d2eL), tobe(0x5408abf7L), tobe(0x50c9b640L), 
+tobe(0x4e8ee645L), tobe(0x4a4ffbf2L), tobe(0x470cdd2bL), tobe(0x43cdc09cL), 
+tobe(0x7b827d21L), tobe(0x7f436096L), tobe(0x7200464fL), tobe(0x76c15bf8L), 
+tobe(0x68860bfdL), tobe(0x6c47164aL), tobe(0x61043093L), tobe(0x65c52d24L), 
+tobe(0x119b4be9L), tobe(0x155a565eL), tobe(0x18197087L), tobe(0x1cd86d30L), 
+tobe(0x029f3d35L), tobe(0x065e2082L), tobe(0x0b1d065bL), tobe(0x0fdc1becL), 
+tobe(0x3793a651L), tobe(0x3352bbe6L), tobe(0x3e119d3fL), tobe(0x3ad08088L), 
+tobe(0x2497d08dL), tobe(0x2056cd3aL), tobe(0x2d15ebe3L), tobe(0x29d4f654L), 
+tobe(0xc5a92679L), tobe(0xc1683bceL), tobe(0xcc2b1d17L), tobe(0xc8ea00a0L), 
+tobe(0xd6ad50a5L), tobe(0xd26c4d12L), tobe(0xdf2f6bcbL), tobe(0xdbee767cL), 
+tobe(0xe3a1cbc1L), tobe(0xe760d676L), tobe(0xea23f0afL), tobe(0xeee2ed18L), 
+tobe(0xf0a5bd1dL), tobe(0xf464a0aaL), tobe(0xf9278673L), tobe(0xfde69bc4L), 
+tobe(0x89b8fd09L), tobe(0x8d79e0beL), tobe(0x803ac667L), tobe(0x84fbdbd0L), 
+tobe(0x9abc8bd5L), tobe(0x9e7d9662L), tobe(0x933eb0bbL), tobe(0x97ffad0cL), 
+tobe(0xafb010b1L), tobe(0xab710d06L), tobe(0xa6322bdfL), tobe(0xa2f33668L), 
+tobe(0xbcb4666dL), tobe(0xb8757bdaL), tobe(0xb5365d03L), tobe(0xb1f740b4L)
+};
diff -urN --exclude-from=diff-exclude linux-2.4.26/lib/dec_and_lock.c linux-2.4.26-leo/lib/dec_and_lock.c
--- linux-2.4.26/lib/dec_and_lock.c	2001-10-03 17:11:26.000000000 +0100
+++ linux-2.4.26-leo/lib/dec_and_lock.c	2004-05-26 23:34:34.000000000 +0100
@@ -1,5 +1,6 @@
 #include <linux/module.h>
 #include <linux/spinlock.h>
+#include <linux/sched.h>
 #include <asm/atomic.h>
 
 /*
Binary files linux-2.4.26/lib/gen_crc32table and linux-2.4.26-leo/lib/gen_crc32table differ
diff -urN --exclude-from=diff-exclude linux-2.4.26/MAINTAINERS linux-2.4.26-leo/MAINTAINERS
--- linux-2.4.26/MAINTAINERS	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/MAINTAINERS	2004-05-26 23:34:34.000000000 +0100
@@ -587,6 +587,13 @@
 W:	http://www.debian.org/~dz/i8k/
 S:	Maintained
 
+DEVICE MAPPER
+P:	Joe Thornber
+M:	dm@uk.sistina.com
+L:	linux-LVM@sistina.com
+W:	http://www.sistina.com/lvm
+S:	Maintained
+
 DEVICE NUMBER REGISTRY
 P:	H. Peter Anvin
 M:	hpa@zytor.com
@@ -1508,6 +1515,14 @@
 M:	mostrows@styx.uwaterloo.ca
 S:	Maintained
 
+PREEMPTIBLE KERNEL
+P:	Robert M. Love
+M:	rml@tech9.net
+L:	linux-kernel@vger.kernel.org
+L:	kpreempt-tech@lists.sourceforge.net
+W:	http://tech9.net/rml/linux
+S:	Supported
+
 PROMISE DC4030 CACHING DISK CONTROLLER DRIVER
 P:	Peter Denison
 M:	promise@pnd-pc.demon.co.uk
diff -urN --exclude-from=diff-exclude linux-2.4.26/Makefile linux-2.4.26-leo/Makefile
--- linux-2.4.26/Makefile	2004-05-19 21:34:36.000000000 +0100
+++ linux-2.4.26-leo/Makefile	2004-06-22 20:55:32.000000000 +0100
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 4
 SUBLEVEL = 26
-EXTRAVERSION =
+EXTRAVERSION = -leo
 
 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
 
@@ -126,9 +126,10 @@
 
 CORE_FILES	=kernel/kernel.o mm/mm.o fs/fs.o ipc/ipc.o
 NETWORKS	=net/network.o
+GRSECURITY	=grsecurity/grsec.o
 
 LIBS		=$(TOPDIR)/lib/lib.a
-SUBDIRS		=kernel drivers mm fs net ipc lib crypto
+SUBDIRS		=kernel drivers mm fs net ipc lib crypto grsecurity
 
 DRIVERS-n :=
 DRIVERS-y :=
@@ -272,7 +273,7 @@
 
 export	CPPFLAGS CFLAGS CFLAGS_KERNEL AFLAGS AFLAGS_KERNEL
 
-export	NETWORKS DRIVERS LIBS HEAD LDFLAGS LINKFLAGS MAKEBOOT ASFLAGS
+export	NETWORKS DRIVERS LIBS HEAD LDFLAGS LINKFLAGS MAKEBOOT ASFLAGS GRSECURITY
 
 .S.s:
 	$(CPP) $(AFLAGS) $(AFLAGS_KERNEL) -traditional -o $*.s $<
@@ -291,6 +292,7 @@
 		$(CORE_FILES) \
 		$(DRIVERS) \
 		$(NETWORKS) \
+		$(GRSECURITY) \
 		$(LIBS) \
 		--end-group \
 		-o vmlinux
diff -urN --exclude-from=diff-exclude linux-2.4.26/mm/filemap.c linux-2.4.26-leo/mm/filemap.c
--- linux-2.4.26/mm/filemap.c	2004-05-19 21:34:41.000000000 +0100
+++ linux-2.4.26-leo/mm/filemap.c	2004-06-22 20:53:47.000000000 +0100
@@ -1809,7 +1809,8 @@
 			retval = do_generic_direct_read(filp, buf, count, ppos);
 		up(&inode->i_sem);
 		up_read(&inode->i_alloc_sem);
-		UPDATE_ATIME(filp->f_dentry->d_inode);
+		if (!S_ISBLK(inode->i_mode))
+			UPDATE_ATIME(filp->f_dentry->d_inode);
 		goto out;
 	}
 }
@@ -2324,6 +2325,12 @@
 	}
 	if (!mapping->a_ops->readpage)
 		return -ENOEXEC;
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	if (current->flags & PF_PAX_PAGEEXEC)
+		vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
+#endif
+
 	UPDATE_ATIME(inode);
 	vma->vm_ops = &generic_file_vm_ops;
 	return 0;
@@ -2553,8 +2560,42 @@
  * We can potentially split a vm area into separate
  * areas, each area with its own behavior.
  */
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+static long __madvise_behavior(struct vm_area_struct * vma,
+	unsigned long start, unsigned long end, int behavior);
+
+static long madvise_behavior(struct vm_area_struct * vma,
+	unsigned long start, unsigned long end, int behavior)
+{
+	if (vma->vm_flags & VM_MIRROR) {
+		struct vm_area_struct * vma_m, * prev_m;
+		unsigned long start_m, end_m;
+		int error;
+
+		start_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+		vma_m = find_vma_prev(vma->vm_mm, start_m, &prev_m);
+		if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) {
+			start_m = start + (unsigned long)vma->vm_private_data;
+			end_m = end + (unsigned long)vma->vm_private_data;
+			error = __madvise_behavior(vma_m, start_m, end_m, behavior);
+			if (error)
+				return error;
+		} else {
+			printk("PAX: VMMIRROR: madvise bug in %s, %08lx\n", current->comm, vma->vm_start);
+			return -ENOMEM;
+		}
+	}
+
+	return __madvise_behavior(vma, start, end, behavior);
+}
+
+static long __madvise_behavior(struct vm_area_struct * vma,
+	unsigned long start, unsigned long end, int behavior)
+#else
 static long madvise_behavior(struct vm_area_struct * vma,
 	unsigned long start, unsigned long end, int behavior)
+#endif
 {
 	int error = 0;
 
@@ -2608,6 +2649,7 @@
 	error = -EIO;
 	rlim_rss = current->rlim ?  current->rlim[RLIMIT_RSS].rlim_cur :
 				LONG_MAX; /* default: see resource.h */
+	gr_learn_resource(current, RLIMIT_RSS, vma->vm_mm->rss + (end - start), 1);
 	if ((vma->vm_mm->rss + (end - start)) > rlim_rss)
 		return error;
 
@@ -3083,6 +3125,7 @@
 	err = -EFBIG;
 	
 	if (!S_ISBLK(inode->i_mode) && limit != RLIM_INFINITY) {
+		gr_learn_resource(current, RLIMIT_FSIZE, pos, 0);
 		if (pos >= limit) {
 			send_sig(SIGXFSZ, current, 0);
 			goto out;
@@ -3118,6 +3161,7 @@
 	 */
 	 
 	if (!S_ISBLK(inode->i_mode)) {
+		gr_learn_resource(current, RLIMIT_FSIZE, *count + (u32)pos, 0);
 		if (pos >= inode->i_sb->s_maxbytes)
 		{
 			if (*count || pos > inode->i_sb->s_maxbytes) {
@@ -3187,8 +3231,12 @@
 		goto out;
 
 	remove_suid(inode);
-	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
-	mark_inode_dirty_sync(inode);
+
+	/* Don't update times for block devices using O_DIRECT */
+	if (!(file->f_flags & O_DIRECT) || !S_ISBLK(inode->i_mode)) {
+		inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+		mark_inode_dirty_sync(inode);
+	}
 
 	do {
 		unsigned long index, offset;
diff -urN --exclude-from=diff-exclude linux-2.4.26/mm/Makefile linux-2.4.26-leo/mm/Makefile
--- linux-2.4.26/mm/Makefile	2002-08-03 01:39:46.000000000 +0100
+++ linux-2.4.26-leo/mm/Makefile	2004-05-26 23:34:19.000000000 +0100
@@ -9,12 +9,12 @@
 
 O_TARGET := mm.o
 
-export-objs := shmem.o filemap.o memory.o page_alloc.o
+export-objs := shmem.o filemap.o memory.o page_alloc.o mempool.o
 
 obj-y	 := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
 	    vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \
 	    page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \
-	    shmem.o
+	    shmem.o mempool.o
 
 obj-$(CONFIG_HIGHMEM) += highmem.o
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/mm/memory.c linux-2.4.26-leo/mm/memory.c
--- linux-2.4.26/mm/memory.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/mm/memory.c	2004-06-22 20:53:47.000000000 +0100
@@ -925,6 +925,64 @@
 	establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
 }
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+/* PaX: if vma is mirrored, synchronize the mirror's PTE
+ *
+ * mm->page_table_lock is held on entry and is not released on exit or inside
+ * to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
+ */
+static void pax_mirror_fault(struct mm_struct *mm, struct vm_area_struct * vma,
+       unsigned long address, pte_t *pte)
+{
+	unsigned long address_m;
+	struct vm_area_struct * vma_m = NULL;
+	pte_t * pte_m, entry_m;
+	struct page * page_m;
+
+	if (!(vma->vm_flags & VM_MIRROR))
+		return;
+
+	address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+	vma_m = find_vma(mm, address_m);
+	BUG_ON(!vma_m || vma_m->vm_start != address_m);
+
+	address_m = address + (unsigned long)vma->vm_private_data;
+
+	{
+		pgd_t *pgd_m;
+		pmd_t *pmd_m;
+
+		pgd_m = pgd_offset(mm, address_m);
+		pmd_m = pmd_offset(pgd_m, address_m);
+		pte_m = pte_offset(pmd_m, address_m);
+	}
+
+	if (pte_present(*pte_m)) {
+		flush_cache_page(vma_m, address_m);
+		flush_icache_page(vma_m, pte_page(*pte_m));
+	}
+	entry_m = ptep_get_and_clear(pte_m);
+	if (pte_present(entry_m))
+		flush_tlb_page(vma_m, address_m);  
+
+	if (pte_none(entry_m)) {
+		++mm->rss;
+	} else if (pte_present(entry_m)) {
+		page_cache_release(pte_page(entry_m));
+	} else {
+		free_swap_and_cache(pte_to_swp_entry(entry_m));
+		++mm->rss;
+	}
+
+	page_m = pte_page(*pte);
+	page_cache_get(page_m);
+	entry_m = mk_pte(page_m, vma_m->vm_page_prot);
+	if (pte_write(*pte))
+		entry_m = pte_mkdirty(pte_mkwrite(entry_m));
+	establish_pte(vma_m, address_m, pte_m, entry_m);  
+}                      
+#endif
+
 /*
  * This routine handles present pages, when users try to write
  * to a shared page. It is done by copying the page to a new address
@@ -988,6 +1046,11 @@
 
 		/* Free the old page.. */
 		new_page = old_page;
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+		pax_mirror_fault(mm, vma, address, page_table);
+#endif
+
 	}
 	spin_unlock(&mm->page_table_lock);
 	page_cache_release(new_page);
@@ -1065,6 +1128,7 @@
 
 do_expand:
 	limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+	gr_learn_resource(current, RLIMIT_FSIZE, offset, 1);
 	if (limit != RLIM_INFINITY && offset > limit)
 		goto out_sig;
 	if (offset > inode->i_sb->s_maxbytes)
@@ -1178,6 +1242,11 @@
 
 	/* No need to invalidate - it was non-present before */
 	update_mmu_cache(vma, address, pte);
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	pax_mirror_fault(mm, vma, address, page_table);
+#endif
+
 	spin_unlock(&mm->page_table_lock);
 	return ret;
 }
@@ -1223,6 +1292,11 @@
 
 	/* No need to invalidate - it was non-present before */
 	update_mmu_cache(vma, addr, entry);
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	pax_mirror_fault(mm, vma, addr, page_table);
+#endif
+
 	spin_unlock(&mm->page_table_lock);
 	return 1;	/* Minor fault */
 
@@ -1304,6 +1378,11 @@
 
 	/* no need to invalidate: a not-present page shouldn't be cached */
 	update_mmu_cache(vma, address, entry);
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	pax_mirror_fault(mm, vma, address, page_table);
+#endif
+
 	spin_unlock(&mm->page_table_lock);
 	return 2;	/* Major fault */
 }
@@ -1368,6 +1447,11 @@
 	pgd_t *pgd;
 	pmd_t *pmd;
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	unsigned long address_m = 0UL;
+	struct vm_area_struct * vma_m = NULL;
+#endif
+
 	current->state = TASK_RUNNING;
 	pgd = pgd_offset(mm, address);
 
@@ -1376,6 +1460,47 @@
 	 * and the SMP-safe atomic PTE updates.
 	 */
 	spin_lock(&mm->page_table_lock);
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	if (vma->vm_flags & VM_MIRROR) {
+		pgd_t *pgd_m;
+		pmd_t *pmd_m;
+		pte_t *pte_m;
+
+		address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+		vma_m = find_vma(mm, address_m);
+
+		/* PaX: sanity checks */
+		if (!vma_m) {
+			spin_unlock(&mm->page_table_lock);
+			printk(KERN_ERR "PAX: VMMIRROR: fault bug, %08lx, %p, %08lx, %p\n",
+			       address, vma, address_m, vma_m);
+			return 0;
+		} else if (!(vma_m->vm_flags & VM_MIRROR) ||
+			   vma_m->vm_start != address_m ||
+			   vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start)
+		{
+			spin_unlock(&mm->page_table_lock);
+			printk(KERN_ERR "PAX: VMMIRROR: fault bug2, %08lx, %08lx, %08lx, %08lx, %08lx\n",
+				address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
+			return 0;
+		}
+
+		address_m = address + (unsigned long)vma->vm_private_data;
+		pgd_m = pgd_offset(mm, address_m);
+		pmd_m = pmd_alloc(mm, pgd_m, address_m);
+		if (!pmd_m) {
+			spin_unlock(&mm->page_table_lock);
+			return -1;
+		}
+		pte_m = pte_alloc(mm, pmd_m, address_m);
+		if (!pte_m) {
+			spin_unlock(&mm->page_table_lock);
+			return -1;
+		}
+	}			
+#endif
+
 	pmd = pmd_alloc(mm, pgd, address);
 
 	if (pmd) {
@@ -1460,6 +1585,36 @@
 	return pte_offset(pmd, address);
 }
 
+#ifndef pmd_populate_kernel
+#define pmd_populate_kernel(mm,pmd,new) pmd_populate(mm,pmd,new)
+#endif
+
+pte_t *pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+{
+	if (pmd_none(*pmd)) {
+		pte_t *new;
+
+		spin_unlock(&mm->page_table_lock);
+		new = pte_alloc_one(mm, address);
+		spin_lock(&mm->page_table_lock);
+		if (!new)
+			return NULL;
+
+		/*
+		 * Because we dropped the lock, we should re-check the
+		 * entry, as somebody else could have populated it..
+		 */
+		if (!pmd_none(*pmd)) {
+			pte_free(new);
+			check_pgt_cache();
+			goto out;
+		}
+		pmd_populate_kernel(mm, pmd, new);
+	}
+out:
+	return pte_offset(pmd, address);
+}
+
 int make_pages_present(unsigned long addr, unsigned long end)
 {
 	int ret, len, write;
diff -urN --exclude-from=diff-exclude linux-2.4.26/mm/mempool.c linux-2.4.26-leo/mm/mempool.c
--- linux-2.4.26/mm/mempool.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/mm/mempool.c	2004-05-26 23:34:19.000000000 +0100
@@ -0,0 +1,299 @@
+/*
+ *  linux/mm/mempool.c
+ *
+ *  memory buffer pool support. Such pools are mostly used
+ *  for guaranteed, deadlock-free memory allocations during
+ *  extreme VM load.
+ *
+ *  started by Ingo Molnar, Copyright (C) 2001
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+
+struct mempool_s {
+	spinlock_t lock;
+	int min_nr;		/* nr of elements at *elements */
+	int curr_nr;		/* Current nr of elements at *elements */
+	void **elements;
+
+	void *pool_data;
+	mempool_alloc_t *alloc;
+	mempool_free_t *free;
+	wait_queue_head_t wait;
+};
+
+static void add_element(mempool_t *pool, void *element)
+{
+	BUG_ON(pool->curr_nr >= pool->min_nr);
+	pool->elements[pool->curr_nr++] = element;
+}
+
+static void *remove_element(mempool_t *pool)
+{
+	BUG_ON(pool->curr_nr <= 0);
+	return pool->elements[--pool->curr_nr];
+}
+
+static void free_pool(mempool_t *pool)
+{
+	while (pool->curr_nr) {
+		void *element = remove_element(pool);
+		pool->free(element, pool->pool_data);
+	}
+	kfree(pool->elements);
+	kfree(pool);
+}
+
+/**
+ * mempool_create - create a memory pool
+ * @min_nr:    the minimum number of elements guaranteed to be
+ *             allocated for this pool.
+ * @alloc_fn:  user-defined element-allocation function.
+ * @free_fn:   user-defined element-freeing function.
+ * @pool_data: optional private data available to the user-defined functions.
+ *
+ * this function creates and allocates a guaranteed size, preallocated
+ * memory pool. The pool can be used from the mempool_alloc and mempool_free
+ * functions. This function might sleep. Both the alloc_fn() and the free_fn()
+ * functions might sleep - as long as the mempool_alloc function is not called
+ * from IRQ contexts.
+ */
+mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
+				mempool_free_t *free_fn, void *pool_data)
+{
+	mempool_t *pool;
+
+	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		return NULL;
+	memset(pool, 0, sizeof(*pool));
+	pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL);
+	if (!pool->elements) {
+		kfree(pool);
+		return NULL;
+	}
+	spin_lock_init(&pool->lock);
+	pool->min_nr = min_nr;
+	pool->pool_data = pool_data;
+	init_waitqueue_head(&pool->wait);
+	pool->alloc = alloc_fn;
+	pool->free = free_fn;
+
+	/*
+	 * First pre-allocate the guaranteed number of buffers.
+	 */
+	while (pool->curr_nr < pool->min_nr) {
+		void *element;
+
+		element = pool->alloc(GFP_KERNEL, pool->pool_data);
+		if (unlikely(!element)) {
+			free_pool(pool);
+			return NULL;
+		}
+		add_element(pool, element);
+	}
+	return pool;
+}
+
+/**
+ * mempool_resize - resize an existing memory pool
+ * @pool:       pointer to the memory pool which was allocated via
+ *              mempool_create().
+ * @new_min_nr: the new minimum number of elements guaranteed to be
+ *              allocated for this pool.
+ * @gfp_mask:   the usual allocation bitmask.
+ *
+ * This function shrinks/grows the pool. In the case of growing,
+ * it cannot be guaranteed that the pool will be grown to the new
+ * size immediately, but new mempool_free() calls will refill it.
+ *
+ * Note, the caller must guarantee that no mempool_destroy is called
+ * while this function is running. mempool_alloc() & mempool_free()
+ * might be called (eg. from IRQ contexts) while this function executes.
+ */
+int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask)
+{
+	void *element;
+	void **new_elements;
+	unsigned long flags;
+
+	BUG_ON(new_min_nr <= 0);
+
+	spin_lock_irqsave(&pool->lock, flags);
+	if (new_min_nr < pool->min_nr) {
+		while (pool->curr_nr > new_min_nr) {
+			element = remove_element(pool);
+			spin_unlock_irqrestore(&pool->lock, flags);
+			pool->free(element, pool->pool_data);
+			spin_lock_irqsave(&pool->lock, flags);
+		}
+		pool->min_nr = new_min_nr;
+		goto out_unlock;
+	}
+	spin_unlock_irqrestore(&pool->lock, flags);
+
+	/* Grow the pool */
+	new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
+	if (!new_elements)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&pool->lock, flags);
+	memcpy(new_elements, pool->elements,
+			pool->curr_nr * sizeof(*new_elements));
+	kfree(pool->elements);
+	pool->elements = new_elements;
+	pool->min_nr = new_min_nr;
+
+	while (pool->curr_nr < pool->min_nr) {
+		spin_unlock_irqrestore(&pool->lock, flags);
+		element = pool->alloc(gfp_mask, pool->pool_data);
+		if (!element)
+			goto out;
+		spin_lock_irqsave(&pool->lock, flags);
+		if (pool->curr_nr < pool->min_nr)
+			add_element(pool, element);
+		else
+			kfree(element);		/* Raced */
+	}
+out_unlock:
+	spin_unlock_irqrestore(&pool->lock, flags);
+out:
+	return 0;
+}
+
+/**
+ * mempool_destroy - deallocate a memory pool
+ * @pool:      pointer to the memory pool which was allocated via
+ *             mempool_create().
+ *
+ * this function only sleeps if the free_fn() function sleeps. The caller
+ * has to guarantee that all elements have been returned to the pool (ie:
+ * freed) prior to calling mempool_destroy().
+ */
+void mempool_destroy(mempool_t *pool)
+{
+	if (pool->curr_nr != pool->min_nr)
+		BUG();		/* There were outstanding elements */
+	free_pool(pool);
+}
+
+/**
+ * mempool_alloc - allocate an element from a specific memory pool
+ * @pool:      pointer to the memory pool which was allocated via
+ *             mempool_create().
+ * @gfp_mask:  the usual allocation bitmask.
+ *
+ * this function only sleeps if the alloc_fn function sleeps or
+ * returns NULL. Note that due to preallocation, this function
+ * *never* fails when called from process contexts. (it might
+ * fail if called from an IRQ context.)
+ */
+void * mempool_alloc(mempool_t *pool, int gfp_mask)
+{
+	void *element;
+	unsigned long flags;
+	int curr_nr;
+	DECLARE_WAITQUEUE(wait, current);
+	int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
+
+repeat_alloc:
+	element = pool->alloc(gfp_nowait, pool->pool_data);
+	if (likely(element != NULL))
+		return element;
+
+	/*
+	 * If the pool is less than 50% full then try harder
+	 * to allocate an element:
+	 */
+	if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) {
+		element = pool->alloc(gfp_mask, pool->pool_data);
+		if (likely(element != NULL))
+			return element;
+	}
+
+	/*
+	 * Kick the VM at this point.
+	 */
+	wakeup_bdflush();
+
+	spin_lock_irqsave(&pool->lock, flags);
+	if (likely(pool->curr_nr)) {
+		element = remove_element(pool);
+		spin_unlock_irqrestore(&pool->lock, flags);
+		return element;
+	}
+	spin_unlock_irqrestore(&pool->lock, flags);
+
+	/* We must not sleep in the GFP_ATOMIC case */
+	if (gfp_mask == gfp_nowait)
+		return NULL;
+
+	run_task_queue(&tq_disk);
+
+	add_wait_queue_exclusive(&pool->wait, &wait);
+	set_task_state(current, TASK_UNINTERRUPTIBLE);
+
+	spin_lock_irqsave(&pool->lock, flags);
+	curr_nr = pool->curr_nr;
+	spin_unlock_irqrestore(&pool->lock, flags);
+
+	if (!curr_nr)
+		schedule();
+
+	current->state = TASK_RUNNING;
+	remove_wait_queue(&pool->wait, &wait);
+
+	goto repeat_alloc;
+}
+
+/**
+ * mempool_free - return an element to the pool.
+ * @element:   pool element pointer.
+ * @pool:      pointer to the memory pool which was allocated via
+ *             mempool_create().
+ *
+ * this function only sleeps if the free_fn() function sleeps.
+ */
+void mempool_free(void *element, mempool_t *pool)
+{
+	unsigned long flags;
+
+	if (pool->curr_nr < pool->min_nr) {
+		spin_lock_irqsave(&pool->lock, flags);
+		if (pool->curr_nr < pool->min_nr) {
+			add_element(pool, element);
+			spin_unlock_irqrestore(&pool->lock, flags);
+			wake_up(&pool->wait);
+			return;
+		}
+		spin_unlock_irqrestore(&pool->lock, flags);
+	}
+	pool->free(element, pool->pool_data);
+}
+
+/*
+ * A commonly used alloc and free fn.
+ */
+void *mempool_alloc_slab(int gfp_mask, void *pool_data)
+{
+	kmem_cache_t *mem = (kmem_cache_t *) pool_data;
+	return kmem_cache_alloc(mem, gfp_mask);
+}
+
+void mempool_free_slab(void *element, void *pool_data)
+{
+	kmem_cache_t *mem = (kmem_cache_t *) pool_data;
+	kmem_cache_free(mem, element);
+}
+
+
+EXPORT_SYMBOL(mempool_create);
+EXPORT_SYMBOL(mempool_resize);
+EXPORT_SYMBOL(mempool_destroy);
+EXPORT_SYMBOL(mempool_alloc);
+EXPORT_SYMBOL(mempool_free);
+EXPORT_SYMBOL(mempool_alloc_slab);
+EXPORT_SYMBOL(mempool_free_slab);
diff -urN --exclude-from=diff-exclude linux-2.4.26/mm/mlock.c linux-2.4.26-leo/mm/mlock.c
--- linux-2.4.26/mm/mlock.c	2001-09-17 23:30:23.000000000 +0100
+++ linux-2.4.26-leo/mm/mlock.c	2004-06-22 20:53:47.000000000 +0100
@@ -114,9 +114,40 @@
 	return 0;
 }
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+static int __mlock_fixup(struct vm_area_struct * vma,
+	unsigned long start, unsigned long end, unsigned int newflags);
 static int mlock_fixup(struct vm_area_struct * vma, 
 	unsigned long start, unsigned long end, unsigned int newflags)
 {
+	if (vma->vm_flags & VM_MIRROR) {
+		struct vm_area_struct * vma_m;
+		unsigned long start_m, end_m;
+		int error;
+
+		start_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+		vma_m = find_vma(vma->vm_mm, start_m);
+		if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) {
+			start_m = start + (unsigned long)vma->vm_private_data;
+			end_m = end + (unsigned long)vma->vm_private_data;
+			error = __mlock_fixup(vma_m, start_m, end_m, newflags);
+			if (error)
+				return error;
+		} else {
+			printk("PAX: VMMIRROR: mlock bug in %s, %08lx\n", current->comm, vma->vm_start);
+			return -ENOMEM;
+		}
+	}
+	return __mlock_fixup(vma, start, end, newflags);
+}
+
+static int __mlock_fixup(struct vm_area_struct * vma,
+	unsigned long start, unsigned long end, unsigned int newflags)
+#else
+static int mlock_fixup(struct vm_area_struct * vma,
+	unsigned long start, unsigned long end, unsigned int newflags)
+#endif
+{
 	int pages, retval;
 
 	if (newflags == vma->vm_flags)
@@ -159,6 +190,17 @@
 		return -EINVAL;
 	if (end == start)
 		return 0;
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if (current->flags & PF_PAX_SEGMEXEC) {
+		if (end > SEGMEXEC_TASK_SIZE)
+			return -EINVAL;
+	} else
+#endif
+
+	if (end > TASK_SIZE)
+		return -EINVAL;
+
 	vma = find_vma(current->mm, start);
 	if (!vma || vma->vm_start > start)
 		return -ENOMEM;
@@ -209,6 +251,7 @@
 	lock_limit >>= PAGE_SHIFT;
 
 	/* check against resource limits */
+	gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1);
 	if (locked > lock_limit)
 		goto out;
 
@@ -253,6 +296,16 @@
 	for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
 		unsigned int newflags;
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+		if (current->flags & PF_PAX_SEGMEXEC) {
+			if (vma->vm_end > SEGMEXEC_TASK_SIZE)
+				break;
+		} else
+#endif
+
+		if (vma->vm_end > TASK_SIZE)
+			break;
+
 		newflags = vma->vm_flags | VM_LOCKED;
 		if (!(flags & MCL_CURRENT))
 			newflags &= ~VM_LOCKED;
@@ -276,6 +329,7 @@
 	lock_limit >>= PAGE_SHIFT;
 
 	ret = -ENOMEM;
+	gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1);
 	if (current->mm->total_vm > lock_limit)
 		goto out;
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/mm/mmap.c linux-2.4.26-leo/mm/mmap.c
--- linux-2.4.26/mm/mmap.c	2004-02-20 14:11:47.000000000 +0000
+++ linux-2.4.26-leo/mm/mmap.c	2004-06-22 20:53:47.000000000 +0100
@@ -15,6 +15,8 @@
 #include <linux/fs.h>
 #include <linux/personality.h>
 #include <linux/mount.h>
+#include <linux/random.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgalloc.h>
@@ -169,6 +171,7 @@
 
 	/* Check against rlimit.. */
 	rlim = current->rlim[RLIMIT_DATA].rlim_cur;
+	gr_learn_resource(current, RLIMIT_DATA, brk - mm->start_data, 1);
 	if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
 		goto out;
 
@@ -206,6 +209,11 @@
 		_trans(prot, PROT_WRITE, VM_WRITE) |
 		_trans(prot, PROT_EXEC, VM_EXEC);
 	flag_bits =
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+		_trans(flags, MAP_MIRROR, VM_MIRROR) |
+#endif
+
 		_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
 		_trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
 		_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
@@ -401,6 +409,28 @@
 	int error;
 	rb_node_t ** rb_link, * rb_parent;
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	struct vm_area_struct * vma_m = NULL;
+
+	if (flags & MAP_MIRROR) {
+		/* PaX: sanity checks, to be removed when proved to be stable */
+		if (file || len || ((flags & MAP_TYPE) != MAP_PRIVATE))
+			return -EINVAL;
+
+		vma_m = find_vma(mm, pgoff);
+
+		if (!vma_m ||
+		    vma_m->vm_start != pgoff ||
+		    (vma_m->vm_flags & VM_MIRROR) ||
+		    (!(vma_m->vm_flags & VM_WRITE) && (prot & PROT_WRITE)))
+			return -EINVAL;
+
+		file = vma_m->vm_file;
+		pgoff = vma_m->vm_pgoff;
+		len = vma_m->vm_end - vma_m->vm_start;
+	}
+#endif
+
 	if (file) {
 		if (!file->f_op || !file->f_op->mmap)
 			return -ENODEV;
@@ -438,10 +468,35 @@
 	 */
 	vm_flags = calc_vm_flags(prot,flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 
+	if (file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
+		vm_flags &= ~VM_MAYEXEC;
+
+#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC)
+	if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) {
+
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+		if (current->flags & PF_PAX_MPROTECT) {
+			if (!file || (prot & PROT_WRITE))
+				vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
+			else
+				vm_flags &= ~VM_MAYWRITE;
+
+#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC
+			if (file && (flags & MAP_MIRROR) && (vm_flags & VM_EXEC))
+				vma_m->vm_flags &= ~VM_MAYWRITE;
+#endif
+
+		}
+#endif
+
+	}
+#endif
+
 	/* mlock MCL_FUTURE? */
 	if (vm_flags & VM_LOCKED) {
 		unsigned long locked = mm->locked_vm << PAGE_SHIFT;
 		locked += len;
+		gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1);
 		if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
 			return -EAGAIN;
 	}
@@ -486,6 +541,9 @@
 		}
 	}
 
+	if (!gr_acl_handle_mmap(file, prot))
+		return -EACCES;
+
 	/* Clear old maps */
 munmap_back:
 	vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
@@ -496,10 +554,16 @@
 	}
 
 	/* Check against address space limit. */
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	if (!(vm_flags & VM_MIRROR)) {
+#endif
+	gr_learn_resource(current, RLIMIT_AS, (mm->total_vm << PAGE_SHIFT) + len, 1);
 	if ((mm->total_vm << PAGE_SHIFT) + len
 	    > current->rlim[RLIMIT_AS].rlim_cur)
 		return -ENOMEM;
-
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	}
+#endif
 	/* Private writable mapping? Check memory availability.. */
 	if ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
 	    !(flags & MAP_NORESERVE)				 &&
@@ -523,6 +587,13 @@
 	vma->vm_start = addr;
 	vma->vm_end = addr + len;
 	vma->vm_flags = vm_flags;
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	if ((file || !(current->flags & PF_PAX_PAGEEXEC)) && (vm_flags & (VM_READ|VM_WRITE)))
+		vma->vm_page_prot = protection_map[(vm_flags | VM_EXEC) & 0x0f];
+	else
+#endif
+
 	vma->vm_page_prot = protection_map[vm_flags & 0x0f];
 	vma->vm_ops = NULL;
 	vma->vm_pgoff = pgoff;
@@ -551,6 +622,14 @@
 			goto free_vma;
 	}
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	if (flags & MAP_MIRROR) {
+		vma_m->vm_flags |= VM_MIRROR;
+		vma_m->vm_private_data = (void *)(vma->vm_start - vma_m->vm_start);
+		vma->vm_private_data = (void *)(vma_m->vm_start - vma->vm_start);
+	}
+#endif
+
 	/* Can addr have changed??
 	 *
 	 * Answer: Yes, several device drivers can do it in their
@@ -586,6 +665,9 @@
 		atomic_inc(&file->f_dentry->d_inode->i_writecount);
 
 out:	
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	if (!(flags & MAP_MIRROR))
+#endif
 	mm->total_vm += len >> PAGE_SHIFT;
 	if (vm_flags & VM_LOCKED) {
 		mm->locked_vm += len >> PAGE_SHIFT;
@@ -622,20 +704,49 @@
 {
 	struct vm_area_struct *vma;
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if ((current->flags & PF_PAX_SEGMEXEC) && len > SEGMEXEC_TASK_SIZE)
+		return -ENOMEM;
+	else
+#endif
+
 	if (len > TASK_SIZE)
 		return -ENOMEM;
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP
+	if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
+#endif
+
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(current->mm, addr);
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+		if ((current->flags & PF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE-len < addr)		
+			return -ENOMEM;
+#endif
+
 		if (TASK_SIZE - len >= addr &&
 		    (!vma || addr + len <= vma->vm_start))
 			return addr;
 	}
 	addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
 
+#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP
+	/* PaX: randomize base address if requested */
+	if (current->flags & PF_PAX_RANDMMAP)
+		addr += current->mm->delta_mmap;
+#endif
+
 	for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
 		/* At this point:  (!vma || addr < vma->vm_end). */
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+		if ((current->flags & PF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE-len < addr)
+			return -ENOMEM;
+		else
+#endif
+
 		if (TASK_SIZE - len < addr)
 			return -ENOMEM;
 		if (!vma || addr + len <= vma->vm_start)
@@ -797,6 +908,9 @@
 	struct vm_area_struct *mpnt;
 	unsigned long end = addr + len;
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	if (!(area->vm_flags & VM_MIRROR))
+#endif
 	area->vm_mm->total_vm -= len >> PAGE_SHIFT;
 	if (area->vm_flags & VM_LOCKED)
 		area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
@@ -922,6 +1036,83 @@
 	}
 }
 
+static inline struct vm_area_struct *unmap_vma(struct mm_struct *mm,
+	unsigned long addr, size_t len, struct vm_area_struct *mpnt,
+	struct vm_area_struct *extra)
+{
+	unsigned long st, end, size;
+	struct file *file = NULL;
+
+	st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
+	end = addr+len;
+	end = end > mpnt->vm_end ? mpnt->vm_end : end;
+	size = end - st;
+
+	if (mpnt->vm_flags & VM_DENYWRITE &&
+	    (st != mpnt->vm_start || end != mpnt->vm_end) &&
+	    (file = mpnt->vm_file) != NULL) {
+		atomic_dec(&file->f_dentry->d_inode->i_writecount);
+	}
+	remove_shared_vm_struct(mpnt);
+	zap_page_range(mm, st, size);
+
+	/*
+	 * Fix the mapping, and free the old area if it wasn't reused.
+	 */
+	extra = unmap_fixup(mm, mpnt, st, size, extra);
+	if (file)
+		atomic_inc(&file->f_dentry->d_inode->i_writecount);
+	return extra;
+}
+       
+static struct vm_area_struct *unmap_vma_list(struct mm_struct *mm,
+	unsigned long addr, size_t len, struct vm_area_struct *free,
+	struct vm_area_struct *extra, struct vm_area_struct *prev)
+{
+	struct vm_area_struct *mpnt;
+
+	/* Ok - we have the memory areas we should free on the 'free' list,
+	 * so release them, and unmap the page range..
+	 * If the one of the segments is only being partially unmapped,
+	 * it will put new vm_area_struct(s) into the address space.
+	 * In that case we have to be careful with VM_DENYWRITE.
+	 */
+	while ((mpnt = free) != NULL) {
+		free = free->vm_next;
+		extra = unmap_vma(mm, addr, len, mpnt, extra);
+	}
+
+	free_pgtables(mm, prev, addr, addr+len);
+
+	return extra;
+}
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+static struct vm_area_struct *unmap_vma_mirror_list(struct mm_struct *mm,
+	unsigned long addr, size_t len, struct vm_area_struct *free_m,
+	struct vm_area_struct *extra_m)
+{
+	struct vm_area_struct *mpnt, *prev;
+
+	while ((mpnt = free_m) != NULL) {
+		unsigned long addr_m, start, end;
+
+		free_m = free_m->vm_next;
+
+		addr_m = addr - (unsigned long)mpnt->vm_private_data;
+		start = addr_m < mpnt->vm_start ? mpnt->vm_start : addr_m;
+		end = addr_m+len;
+		end = end > mpnt->vm_end ? mpnt->vm_end : end;
+		find_vma_prev(mm, mpnt->vm_start, &prev);
+		extra_m = unmap_vma(mm, addr_m, len, mpnt, extra_m);
+
+		free_pgtables(mm, prev, start, end);
+	}               
+
+	return extra_m;
+}
+#endif
+
 /* Munmap is split into 2 main parts -- this part which finds
  * what needs doing, and the areas themselves, which do the
  * work.  This now handles partial unmappings.
@@ -931,6 +1122,10 @@
 {
 	struct vm_area_struct *mpnt, *prev, **npp, *free, *extra;
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	struct vm_area_struct *free_m, *extra_m;
+#endif
+
 	if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
 		return -EINVAL;
 
@@ -963,60 +1158,69 @@
 	if (!extra)
 		return -ENOMEM;
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	if (current->flags & (PF_PAX_SEGMEXEC | PF_PAX_RANDEXEC)) { 
+		extra_m = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+		if (!extra_m) {
+			kmem_cache_free(vm_area_cachep, extra);
+			return -ENOMEM;
+		}
+	} else
+		extra_m = NULL;
+
+	free_m = NULL;
+#endif
+
 	npp = (prev ? &prev->vm_next : &mm->mmap);
 	free = NULL;
 	spin_lock(&mm->page_table_lock);
 	for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
+		mm->map_count--;
 		*npp = mpnt->vm_next;
 		mpnt->vm_next = free;
 		free = mpnt;
 		rb_erase(&mpnt->vm_rb, &mm->mm_rb);
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+		if (free->vm_flags & VM_MIRROR) {
+			struct vm_area_struct *mpnt_m, *prev_m, **npp_m;
+			unsigned long addr_m = free->vm_start + (unsigned long)free->vm_private_data;
+
+			mm->mmap_cache = NULL;  /* Kill the cache. */
+			mpnt_m = find_vma_prev(mm, addr_m, &prev_m);
+			if (mpnt_m && mpnt_m->vm_start == addr_m && (mpnt_m->vm_flags & VM_MIRROR)) {
+				mm->map_count--;
+				npp_m = (prev_m ? &prev_m->vm_next : &mm->mmap);
+				*npp_m = mpnt_m->vm_next;
+				mpnt_m->vm_next = free_m;
+				free_m = mpnt_m;
+				rb_erase(&mpnt_m->vm_rb, &mm->mm_rb);
+			} else
+				printk("PAX: VMMIRROR: munmap bug in %s, %08lx\n", current->comm, free->vm_start);
+		}
+#endif
+
 	}
 	mm->mmap_cache = NULL;	/* Kill the cache. */
 	spin_unlock(&mm->page_table_lock);
 
-	/* Ok - we have the memory areas we should free on the 'free' list,
-	 * so release them, and unmap the page range..
-	 * If the one of the segments is only being partially unmapped,
-	 * it will put new vm_area_struct(s) into the address space.
-	 * In that case we have to be careful with VM_DENYWRITE.
-	 */
-	while ((mpnt = free) != NULL) {
-		unsigned long st, end, size;
-		struct file *file = NULL;
-
-		free = free->vm_next;
-
-		st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
-		end = addr+len;
-		end = end > mpnt->vm_end ? mpnt->vm_end : end;
-		size = end - st;
-
-		if (mpnt->vm_flags & VM_DENYWRITE &&
-		    (st != mpnt->vm_start || end != mpnt->vm_end) &&
-		    (file = mpnt->vm_file) != NULL) {
-			atomic_dec(&file->f_dentry->d_inode->i_writecount);
-		}
-		remove_shared_vm_struct(mpnt);
-		mm->map_count--;
+	extra = unmap_vma_list(mm, addr, len, free, extra, prev);
 
-		zap_page_range(mm, st, size);
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	extra_m = unmap_vma_mirror_list(mm, addr, len, free_m, extra_m);
+#endif
 
-		/*
-		 * Fix the mapping, and free the old area if it wasn't reused.
-		 */
-		extra = unmap_fixup(mm, mpnt, st, size, extra);
-		if (file)
-			atomic_inc(&file->f_dentry->d_inode->i_writecount);
-	}
 	validate_mm(mm);
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	if (extra_m)
+		kmem_cache_free(vm_area_cachep, extra_m);
+#endif
+
 	/* Release the extra vma struct if it wasn't used */
 	if (extra)
 		kmem_cache_free(vm_area_cachep, extra);
 
-	free_pgtables(mm, prev, addr, addr+len);
-
 	return 0;
 }
 
@@ -1025,8 +1229,15 @@
 	int ret;
 	struct mm_struct *mm = current->mm;
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if ((current->flags & PF_PAX_SEGMEXEC) &&
+	    (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
+		return -EINVAL;
+#endif
+
 	down_write(&mm->mmap_sem);
 	ret = do_munmap(mm, addr, len);
+
 	up_write(&mm->mmap_sem);
 	return ret;
 }
@@ -1036,8 +1247,32 @@
  *  anonymous maps.  eventually we may be able to do some
  *  brk-specific accounting here.
  */
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) && defined(CONFIG_GRKERNSEC_PAX_MPROTECT)
+unsigned long __do_brk(unsigned long addr, unsigned long len);
+
 unsigned long do_brk(unsigned long addr, unsigned long len)
 {
+	unsigned long ret;
+
+	ret = __do_brk(addr, len);
+	if (ret == addr && (current->flags & (PF_PAX_SEGMEXEC | PF_PAX_MPROTECT)) == PF_PAX_SEGMEXEC) {
+		unsigned long ret_m;
+
+		ret_m = do_mmap_pgoff(NULL, addr + SEGMEXEC_TASK_SIZE, 0UL, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_MIRROR, addr);
+		if (ret_m > TASK_SIZE) {
+			do_munmap(current->mm, addr, len);
+			ret = ret_m;
+		}
+	}
+
+	return ret;
+}
+
+unsigned long __do_brk(unsigned long addr, unsigned long len)
+#else
+unsigned long do_brk(unsigned long addr, unsigned long len)
+#endif
+{
 	struct mm_struct * mm = current->mm;
 	struct vm_area_struct * vma, * prev;
 	unsigned long flags;
@@ -1047,6 +1282,13 @@
 	if (!len)
 		return addr;
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if (current->flags & PF_PAX_SEGMEXEC) {
+		if ((addr + len) > SEGMEXEC_TASK_SIZE || (addr + len) < addr)
+			return -EINVAL;
+	} else
+#endif
+
 	if ((addr + len) > TASK_SIZE || (addr + len) < addr)
 		return -EINVAL;
 
@@ -1056,6 +1298,7 @@
 	if (mm->def_flags & VM_LOCKED) {
 		unsigned long locked = mm->locked_vm << PAGE_SHIFT;
 		locked += len;
+		gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1);
 		if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
 			return -EAGAIN;
 	}
@@ -1072,6 +1315,7 @@
 	}
 
 	/* Check against address space limits *after* clearing old maps... */
+	gr_learn_resource(current, RLIMIT_AS, (mm->total_vm << PAGE_SHIFT) + len, 1);
 	if ((mm->total_vm << PAGE_SHIFT) + len
 	    > current->rlim[RLIMIT_AS].rlim_cur)
 		return -ENOMEM;
@@ -1084,6 +1328,17 @@
 
 	flags = VM_DATA_DEFAULT_FLAGS | mm->def_flags;
 
+#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC)
+	if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) {
+		flags &= ~VM_EXEC;
+
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+		if (current->flags & PF_PAX_MPROTECT)
+			flags &= ~VM_MAYEXEC;
+#endif
+
+	}
+#endif
 	/* Can we just expand an old anonymous mapping? */
 	if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags))
 		goto out;
@@ -1099,6 +1354,12 @@
 	vma->vm_start = addr;
 	vma->vm_end = addr + len;
 	vma->vm_flags = flags;
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	if (!(current->flags & PF_PAX_PAGEEXEC) && (flags & (VM_READ|VM_WRITE)))
+		vma->vm_page_prot = protection_map[(flags | VM_EXEC) & 0x0f];
+	else
+#endif
 	vma->vm_page_prot = protection_map[flags & 0x0f];
 	vma->vm_ops = NULL;
 	vma->vm_pgoff = 0;
diff -urN --exclude-from=diff-exclude linux-2.4.26/mm/mprotect.c linux-2.4.26-leo/mm/mprotect.c
--- linux-2.4.26/mm/mprotect.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/mm/mprotect.c	2004-06-22 20:53:47.000000000 +0100
@@ -7,6 +7,12 @@
 #include <linux/smp_lock.h>
 #include <linux/shm.h>
 #include <linux/mman.h>
+#include <linux/grsecurity.h>
+
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+#include <linux/elf.h>
+#include <linux/fs.h>
+#endif
 
 #include <asm/uaccess.h>
 #include <asm/pgalloc.h>
@@ -236,6 +242,44 @@
 	return 0;
 }
 
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+static int __mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
+	unsigned long start, unsigned long end, unsigned int newflags);
+
+static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
+	unsigned long start, unsigned long end, unsigned int newflags)
+{
+	if (vma->vm_flags & VM_MIRROR) {
+		struct vm_area_struct * vma_m, * prev_m;
+		unsigned long start_m, end_m;
+		int error;
+
+		start_m = vma->vm_start + (unsigned long)vma->vm_private_data;
+		vma_m = find_vma_prev(vma->vm_mm, start_m, &prev_m);
+		if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) {
+			start_m = start + (unsigned long)vma->vm_private_data;
+			end_m = end + (unsigned long)vma->vm_private_data;
+			if ((current->flags & PF_PAX_SEGMEXEC) && !(newflags & VM_EXEC))
+				error = __mprotect_fixup(vma_m, &prev_m, start_m, end_m, vma_m->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
+			else
+				error = __mprotect_fixup(vma_m, &prev_m, start_m, end_m, newflags);
+			if (error)
+				return error;
+		} else {
+			printk("PAX: VMMIRROR: mprotect bug in %s, %08lx\n", current->comm, vma->vm_start);
+			return -ENOMEM;
+		}
+	}
+
+	return __mprotect_fixup(vma, pprev, start, end, newflags);
+}
+
+static int __mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
+	unsigned long start, unsigned long end, unsigned int newflags)
+{
+	pgprot_t newprot;
+	int error;
+#else
 static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
 	unsigned long start, unsigned long end, unsigned int newflags)
 {
@@ -246,6 +290,13 @@
 		*pprev = vma;
 		return 0;
 	}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC
+	if (!(current->flags & PF_PAX_PAGEEXEC) && (newflags & (VM_READ|VM_WRITE)))
+		newprot = protection_map[(newflags | VM_EXEC) & 0xf];
+	else
+#endif
 	newprot = protection_map[newflags & 0xf];
 	if (start == vma->vm_start) {
 		if (end == vma->vm_end)
@@ -264,6 +315,69 @@
 	return 0;
 }
 
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+/* PaX: non-PIC ELF libraries need relocations on their executable segments
+ * therefore we'll grant them VM_MAYWRITE once during their life.
+ *
+ * The checks favor ld-linux.so behaviour which operates on a per ELF segment
+ * basis because we want to allow the common case and not the special ones.
+ */
+static inline void pax_handle_maywrite(struct vm_area_struct * vma, unsigned long start)
+{
+	struct elfhdr elf_h;
+	struct elf_phdr elf_p, p_dyn;
+	elf_dyn dyn;
+	unsigned long i, j = 65536UL / sizeof(struct elf_phdr);
+
+#ifndef CONFIG_GRKERNSEC_PAX_NOELFRELOCS
+	if ((vma->vm_start != start) ||
+	    !vma->vm_file ||
+	    !(vma->vm_flags & VM_MAYEXEC) ||
+	    (vma->vm_flags & VM_MAYNOTWRITE))
+#endif
+
+		return;
+
+	if (0 > kernel_read(vma->vm_file, 0UL, (char*)&elf_h, sizeof(elf_h)) ||
+	    memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
+
+#ifdef CONFIG_GRKERNSEC_PAX_ETEXECRELOCS
+	    (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) ||
+#else
+	    elf_h.e_type != ET_DYN ||
+#endif
+
+	    !elf_check_arch(&elf_h) ||
+	    elf_h.e_phentsize != sizeof(struct elf_phdr) ||
+	    elf_h.e_phnum > j)
+		return;
+
+	for (i = 0UL; i < elf_h.e_phnum; i++) {
+		if (0 > kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char*)&elf_p, sizeof(elf_p)))
+			return;
+		if (elf_p.p_type == PT_DYNAMIC) {
+			p_dyn = elf_p;
+			j = i;
+		}
+	}
+	if (elf_h.e_phnum <= j)
+		return;
+
+	i = 0UL;
+	do {
+		if (0 > kernel_read(vma->vm_file, p_dyn.p_offset + i*sizeof(dyn), (char*)&dyn, sizeof(dyn)))
+			return;
+		if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
+			vma->vm_flags |= VM_MAYWRITE | VM_MAYNOTWRITE;
+			gr_log_textrel(vma);
+			return;
+		}
+		i++;
+	} while (dyn.d_tag != DT_NULL);
+	return;
+}
+#endif
+
 asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
 {
 	unsigned long nstart, end, tmp;
@@ -276,6 +390,17 @@
 	end = start + len;
 	if (end < start)
 		return -ENOMEM;
+
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if (current->flags & PF_PAX_SEGMEXEC) {
+		if (end > SEGMEXEC_TASK_SIZE)
+			return -EINVAL;
+	} else
+#endif
+
+	if (end > TASK_SIZE)
+		return -EINVAL;
+
 	if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
 		return -EINVAL;
 	if (end == start)
@@ -288,6 +413,16 @@
 	if (!vma || vma->vm_start > start)
 		goto out;
 
+	if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
+		error = -EACCES;
+		goto out;
+	}
+
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+	if ((current->flags & PF_PAX_MPROTECT) && (prot & PROT_WRITE))
+		pax_handle_maywrite(vma, start);
+#endif
+
 	for (nstart = start ; ; ) {
 		unsigned int newflags;
 		int last = 0;
@@ -300,6 +435,12 @@
 			goto out;
 		}
 
+#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT
+		/* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
+		if ((current->flags & PF_PAX_MPROTECT) && (prot & PROT_WRITE) && (vma->vm_flags & VM_MAYNOTWRITE))
+			newflags &= ~VM_MAYWRITE;
+#endif
+
 		if (vma->vm_end > end) {
 			error = mprotect_fixup(vma, &prev, nstart, end, newflags);
 			goto out;
@@ -332,6 +473,7 @@
 		prev->vm_mm->map_count--;
 	}
 out:
+
 	up_write(&current->mm->mmap_sem);
 	return error;
 }
diff -urN --exclude-from=diff-exclude linux-2.4.26/mm/mremap.c linux-2.4.26-leo/mm/mremap.c
--- linux-2.4.26/mm/mremap.c	2004-05-19 21:34:41.000000000 +0100
+++ linux-2.4.26-leo/mm/mremap.c	2004-06-22 20:53:47.000000000 +0100
@@ -197,7 +197,9 @@
 		}
 
 		do_munmap(current->mm, addr, old_len);
-
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+		if (!(new_vma->vm_flags & VM_MIRROR))
+#endif
 		current->mm->total_vm += new_len >> PAGE_SHIFT;
 		if (vm_locked) {
 			current->mm->locked_vm += new_len >> PAGE_SHIFT;
@@ -236,6 +238,18 @@
 	old_len = PAGE_ALIGN(old_len);
 	new_len = PAGE_ALIGN(new_len);
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+	if (current->flags & PF_PAX_SEGMEXEC) {
+		if (new_len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-new_len ||
+	            old_len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-old_len)
+			goto out;
+	} else
+#endif
+
+	if (new_len > TASK_SIZE || addr > TASK_SIZE-new_len ||
+	    old_len > TASK_SIZE || addr > TASK_SIZE-old_len)
+		goto out;
+
 	/* new_addr is only valid if MREMAP_FIXED is specified */
 	if (flags & MREMAP_FIXED) {
 		if (new_addr & ~PAGE_MASK)
@@ -243,6 +257,13 @@
 		if (!(flags & MREMAP_MAYMOVE))
 			goto out;
 
+#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC
+		if (current->flags & PF_PAX_SEGMEXEC) {
+			if (new_len > SEGMEXEC_TASK_SIZE || new_addr > SEGMEXEC_TASK_SIZE-new_len)
+			goto out;
+		} else
+#endif
+
 		if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
 			goto out;
 		/*
@@ -287,6 +308,16 @@
 	vma = find_vma(current->mm, addr);
 	if (!vma || vma->vm_start > addr)
 		goto out;
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	if ((current->flags & (PF_PAX_SEGMEXEC | PF_PAX_RANDEXEC)) &&
+	    (vma->vm_flags & VM_MIRROR))
+	{
+		ret = -EINVAL;
+		goto out;
+	}
+#endif
+
 	/* We can't remap across vm area boundaries */
 	if (old_len > vma->vm_end - addr)
 		goto out;
@@ -298,13 +329,22 @@
 		unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
 		locked += new_len - old_len;
 		ret = -EAGAIN;
+		gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1);
 		if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
 			goto out;
 	}
 	ret = -ENOMEM;
+
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	if (!(vma->vm_flags & VM_MIRROR)) {
+#endif   
+	gr_learn_resource(current, RLIMIT_AS, (current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len), 1);
 	if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
 	    > current->rlim[RLIMIT_AS].rlim_cur)
 		goto out;
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+	}
+#endif
 	/* Private writable mapping? Check memory availability.. */
 	if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
 	    !(flags & MAP_NORESERVE)				 &&
@@ -326,6 +366,9 @@
 			spin_lock(&vma->vm_mm->page_table_lock);
 			vma->vm_end = addr + new_len;
 			spin_unlock(&vma->vm_mm->page_table_lock);
+#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC)
+			if (!(vma->vm_flags & VM_MIRROR))
+#endif   
 			current->mm->total_vm += pages;
 			if (vma->vm_flags & VM_LOCKED) {
 				current->mm->locked_vm += pages;
diff -urN --exclude-from=diff-exclude linux-2.4.26/mm/slab.c linux-2.4.26-leo/mm/slab.c
--- linux-2.4.26/mm/slab.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/mm/slab.c	2004-05-26 23:34:34.000000000 +0100
@@ -49,7 +49,8 @@
  *  constructors and destructors are called without any locking.
  *  Several members in kmem_cache_t and slab_t never change, they
  *	are accessed without any locking.
- *  The per-cpu arrays are never accessed from the wrong cpu, no locking.
+ *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
+ *  	and local interrupts are disabled so slab code is preempt-safe.
  *  The non-constant members are protected with a per-cache irq spinlock.
  *
  * Further notes from the original documentation:
@@ -858,12 +859,14 @@
  */
 static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
 {
+	preempt_disable();
 	local_irq_disable();
 	func(arg);
 	local_irq_enable();
 
 	if (smp_call_function(func, arg, 1, 1))
 		BUG();
+	preempt_enable();
 }
 typedef struct ccupdate_struct_s
 {
diff -urN --exclude-from=diff-exclude linux-2.4.26/mm/vmalloc.c linux-2.4.26-leo/mm/vmalloc.c
--- linux-2.4.26/mm/vmalloc.c	2004-05-19 21:34:41.000000000 +0100
+++ linux-2.4.26-leo/mm/vmalloc.c	2004-06-22 20:53:47.000000000 +0100
@@ -140,7 +140,7 @@
 	if (end > PGDIR_SIZE)
 		end = PGDIR_SIZE;
 	do {
-		pte_t * pte = pte_alloc(&init_mm, pmd, address);
+		pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
 		if (!pte)
 			return -ENOMEM;
 		if (alloc_area_pte(pte, address, end - address,
@@ -382,3 +382,22 @@
 	read_unlock(&vmlist_lock);
 	return buf - buf_start;
 }
+
+void *vcalloc(unsigned long nmemb, unsigned long elem_size)
+{
+	unsigned long size;
+	void *addr;
+
+	/*
+	 * Check that we're not going to overflow.
+	 */
+	if (nmemb > (ULONG_MAX / elem_size))
+		return NULL;
+
+	size = nmemb * elem_size;
+	addr = vmalloc(size);
+	if (addr)
+		memset(addr, 0, size);
+
+	return addr;
+}
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/core/dev.c linux-2.4.26-leo/net/core/dev.c
--- linux-2.4.26/net/core/dev.c	2004-05-19 21:34:42.000000000 +0100
+++ linux-2.4.26-leo/net/core/dev.c	2004-05-26 23:34:34.000000000 +0100
@@ -1093,9 +1093,15 @@
 		int cpu = smp_processor_id();
 
 		if (dev->xmit_lock_owner != cpu) {
+			/*
+			 * The spin_lock effectivly does a preempt lock, but 
+			 * we are about to drop that...
+			 */
+			preempt_disable();
 			spin_unlock(&dev->queue_lock);
 			spin_lock(&dev->xmit_lock);
 			dev->xmit_lock_owner = cpu;
+			preempt_enable();
 
 			if (!netif_queue_stopped(dev)) {
 				if (netdev_nit)
@@ -1274,7 +1280,7 @@
 
 int netif_rx(struct sk_buff *skb)
 {
-	int this_cpu = smp_processor_id();
+	int this_cpu;
 	struct softnet_data *queue;
 	unsigned long flags;
 
@@ -1284,9 +1290,10 @@
 	/* The code is rearranged so that the path is the most
 	   short when CPU is congested, but is still operating.
 	 */
-	queue = &softnet_data[this_cpu];
 
 	local_irq_save(flags);
+	this_cpu = smp_processor_id();
+	queue = &softnet_data[this_cpu];
 
 	netdev_rx_stat[this_cpu].total++;
 	if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/core/skbuff.c linux-2.4.26-leo/net/core/skbuff.c
--- linux-2.4.26/net/core/skbuff.c	2003-08-25 12:44:44.000000000 +0100
+++ linux-2.4.26-leo/net/core/skbuff.c	2004-05-26 23:34:34.000000000 +0100
@@ -111,33 +111,37 @@
 
 static __inline__ struct sk_buff *skb_head_from_pool(void)
 {
-	struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
+	struct sk_buff_head *list;
+	struct sk_buff *skb = NULL;
+	unsigned long flags;
 
-	if (skb_queue_len(list)) {
-		struct sk_buff *skb;
-		unsigned long flags;
+	local_irq_save(flags);
 
-		local_irq_save(flags);
+	list = &skb_head_pool[smp_processor_id()].list;
+
+	if (skb_queue_len(list))
 		skb = __skb_dequeue(list);
-		local_irq_restore(flags);
-		return skb;
-	}
-	return NULL;
+
+	local_irq_restore(flags);
+	return skb;
 }
 
 static __inline__ void skb_head_to_pool(struct sk_buff *skb)
 {
-	struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
+	struct sk_buff_head *list;
+	unsigned long flags;
 
-	if (skb_queue_len(list) < sysctl_hot_list_len) {
-		unsigned long flags;
+	local_irq_save(flags);
+	list = &skb_head_pool[smp_processor_id()].list;
 
-		local_irq_save(flags);
+	if (skb_queue_len(list) < sysctl_hot_list_len) {
 		__skb_queue_head(list, skb);
 		local_irq_restore(flags);
 
 		return;
 	}
+
+	local_irq_restore(flags);
 	kmem_cache_free(skbuff_head_cache, skb);
 }
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/ipv4/af_inet.c linux-2.4.26-leo/net/ipv4/af_inet.c
--- linux-2.4.26/net/ipv4/af_inet.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/net/ipv4/af_inet.c	2004-06-22 20:53:47.000000000 +0100
@@ -83,6 +83,7 @@
 #include <linux/init.h>
 #include <linux/poll.h>
 #include <linux/netfilter_ipv4.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -374,7 +375,12 @@
 	else
 		sk->protinfo.af_inet.pmtudisc = IP_PMTUDISC_WANT;
 
-	sk->protinfo.af_inet.id = 0;
+#ifdef CONFIG_GRKERNSEC_RANDID
+	if(grsec_enable_randid)
+		sk->protinfo.af_inet.id = htons(ip_randomid());
+	else
+#endif
+		sk->protinfo.af_inet.id = 0;
 
 	sock_init_data(sock,sk);
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/ipv4/ip_output.c linux-2.4.26-leo/net/ipv4/ip_output.c
--- linux-2.4.26/net/ipv4/ip_output.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/net/ipv4/ip_output.c	2004-06-22 20:53:47.000000000 +0100
@@ -77,6 +77,7 @@
 #include <linux/netfilter_ipv4.h>
 #include <linux/mroute.h>
 #include <linux/netlink.h>
+#include <linux/grsecurity.h>
 
 /*
  *      Shall we try to damage output packets if routing dev changes?
@@ -514,7 +515,13 @@
 	 *	Begin outputting the bytes.
 	 */
 
-	id = sk->protinfo.af_inet.id++;
+#ifdef CONFIG_GRKERNSEC_RANDID
+	if(grsec_enable_randid) { 
+		id = htons(ip_randomid());	
+		sk->protinfo.af_inet.id = htons(ip_randomid());
+	} else
+#endif
+		id = sk->protinfo.af_inet.id++;
 
 	do {
 		char *data;
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/ipv4/netfilter/Config.in linux-2.4.26-leo/net/ipv4/netfilter/Config.in
--- linux-2.4.26/net/ipv4/netfilter/Config.in	2003-08-25 12:44:44.000000000 +0100
+++ linux-2.4.26-leo/net/ipv4/netfilter/Config.in	2004-06-22 20:53:47.000000000 +0100
@@ -33,6 +33,7 @@
   dep_tristate '  LENGTH match support' CONFIG_IP_NF_MATCH_LENGTH $CONFIG_IP_NF_IPTABLES
   dep_tristate '  TTL match support' CONFIG_IP_NF_MATCH_TTL $CONFIG_IP_NF_IPTABLES
   dep_tristate '  tcpmss match support' CONFIG_IP_NF_MATCH_TCPMSS $CONFIG_IP_NF_IPTABLES
+  dep_tristate '  stealth match support' CONFIG_IP_NF_MATCH_STEALTH $CONFIG_IP_NF_IPTABLES
   if [ "$CONFIG_IP_NF_CONNTRACK" != "n" ]; then
     dep_tristate '  Helper match support' CONFIG_IP_NF_MATCH_HELPER $CONFIG_IP_NF_IPTABLES
   fi
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/ipv4/netfilter/ipt_stealth.c linux-2.4.26-leo/net/ipv4/netfilter/ipt_stealth.c
--- linux-2.4.26/net/ipv4/netfilter/ipt_stealth.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.4.26-leo/net/ipv4/netfilter/ipt_stealth.c	2004-06-22 20:53:47.000000000 +0100
@@ -0,0 +1,109 @@
+/* Kernel module to add stealth support.
+ *
+ * Copyright (C) 2002 Brad Spengler  <spender@grsecurity.net>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/sched.h>
+#include <linux/inet.h>
+#include <linux/stddef.h>
+
+#include <net/ip.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/route.h>
+#include <net/inet_common.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+
+MODULE_LICENSE("GPL");
+
+extern struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif);
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      const void *hdr,
+      u_int16_t datalen,
+      int *hotdrop)
+{
+	struct iphdr *ip = skb->nh.iph;
+	struct tcphdr *th = (struct tcphdr *) hdr;
+	struct udphdr *uh = (struct udphdr *) hdr;
+	struct sock *sk = NULL;
+
+	if (!ip || !hdr || offset) return 0;
+
+	switch(ip->protocol) {
+	case IPPROTO_TCP:
+		if (datalen < sizeof(struct tcphdr)) {
+			*hotdrop = 1;
+			return 0;
+		}
+		if (!(th->syn && !th->ack)) return 0;
+		sk = tcp_v4_lookup_listener(ip->daddr, ntohs(th->dest), ((struct rtable*)skb->dst)->rt_iif);	
+		break;
+	case IPPROTO_UDP:
+		if (datalen < sizeof(struct udphdr)) {
+			*hotdrop = 1;
+			return 0;
+		}
+		sk = udp_v4_lookup(ip->saddr, uh->source, ip->daddr, uh->dest, skb->dev->ifindex);
+		break;
+	default:
+		return 0;
+	}
+
+	if(!sk) // port is being listened on, match this
+		return 1;
+	else {
+		sock_put(sk);
+		return 0;
+	}
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+checkentry(const char *tablename,
+           const struct ipt_ip *ip,
+           void *matchinfo,
+           unsigned int matchsize,
+           unsigned int hook_mask)
+{
+        if (matchsize != IPT_ALIGN(0))
+                return 0;
+
+	if(((ip->proto == IPPROTO_TCP && !(ip->invflags & IPT_INV_PROTO)) ||
+		((ip->proto == IPPROTO_UDP) && !(ip->invflags & IPT_INV_PROTO)))
+		&& (hook_mask & (1 << NF_IP_LOCAL_IN)))
+			return 1;
+
+	printk("stealth: Only works on TCP and UDP for the INPUT chain.\n");
+
+        return 0;
+}
+
+
+static struct ipt_match stealth_match
+= { { NULL, NULL }, "stealth", &match, &checkentry, NULL, THIS_MODULE };
+
+static int __init init(void)
+{
+	return ipt_register_match(&stealth_match);
+}
+
+static void __exit fini(void)
+{
+	ipt_unregister_match(&stealth_match);
+}
+
+module_init(init);
+module_exit(fini);
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/ipv4/netfilter/Makefile linux-2.4.26-leo/net/ipv4/netfilter/Makefile
--- linux-2.4.26/net/ipv4/netfilter/Makefile	2003-08-25 12:44:44.000000000 +0100
+++ linux-2.4.26-leo/net/ipv4/netfilter/Makefile	2004-06-22 20:53:47.000000000 +0100
@@ -86,6 +86,7 @@
 obj-$(CONFIG_IP_NF_MATCH_CONNTRACK) += ipt_conntrack.o
 obj-$(CONFIG_IP_NF_MATCH_UNCLEAN) += ipt_unclean.o
 obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o
+obj-$(CONFIG_IP_NF_MATCH_STEALTH) += ipt_stealth.o
 
 # targets
 obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/ipv4/tcp_ipv4.c linux-2.4.26-leo/net/ipv4/tcp_ipv4.c
--- linux-2.4.26/net/ipv4/tcp_ipv4.c	2004-05-19 21:34:42.000000000 +0100
+++ linux-2.4.26-leo/net/ipv4/tcp_ipv4.c	2004-06-22 20:53:48.000000000 +0100
@@ -67,6 +67,7 @@
 #include <linux/inet.h>
 #include <linux/stddef.h>
 #include <linux/ipsec.h>
+#include <linux/grsecurity.h>
 
 extern int sysctl_ip_dynaddr;
 extern int sysctl_ip_default_ttl;
@@ -223,9 +224,18 @@
 
 		spin_lock(&tcp_portalloc_lock);
 		rover = tcp_port_rover;
-		do {	rover++;
-			if ((rover < low) || (rover > high))
-				rover = low;
+                do {
+#ifdef CONFIG_GRKERNSEC_RANDSRC
+			if (grsec_enable_randsrc && (high > low)) {
+				rover = low + (get_random_long() % (high - low));
+			} else
+#endif
+			{
+				rover++;
+				if ((rover < low) || (rover > high))
+					rover = low;
+			}
+
 			head = &tcp_bhash[tcp_bhashfn(rover)];
 			spin_lock(&head->lock);
 			for (tb = head->chain; tb; tb = tb->next)
@@ -548,6 +558,11 @@
 
 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
 {
+#ifdef CONFIG_GRKERNSEC_RANDISN
+	if (likely(grsec_enable_randisn))
+		return ip_randomisn();
+	else
+#endif
 	return secure_tcp_sequence_number(skb->nh.iph->daddr,
 					  skb->nh.iph->saddr,
 					  skb->h.th->dest,
@@ -683,9 +698,16 @@
 		rover = tcp_port_rover;
 
 		do {
-			rover++;
-			if ((rover < low) || (rover > high))
-				rover = low;
+#ifdef CONFIG_GRKERNSEC_RANDSRC
+			if(grsec_enable_randsrc && (high > low)) {
+				rover = low + (get_random_long() % (high - low));
+			} else
+#endif
+			{
+				rover++;
+				if ((rover < low) || (rover > high))
+					rover = low;
+			}
 			head = &tcp_bhash[tcp_bhashfn(rover)];
 			spin_lock(&head->lock);		
 
@@ -734,6 +756,15 @@
 		}
 		spin_unlock(&head->lock);
 
+#ifdef CONFIG_GRKERNSEC
+		gr_del_task_from_ip_table(current);
+		current->gr_saddr = sk->rcv_saddr;
+		current->gr_daddr = sk->daddr;
+		current->gr_sport = sk->sport;
+		current->gr_dport = sk->dport;
+		gr_add_to_task_ip_table(current);
+#endif
+
 		if (tw) {
 			tcp_tw_deschedule(tw);
 			tcp_timewait_kill(tw);
@@ -846,11 +877,22 @@
 	if (err)
 		goto failure;
 
-	if (!tp->write_seq)
+	if (!tp->write_seq) {
+#ifdef CONFIG_GRKERNSEC_RANDISN
+		if (likely(grsec_enable_randisn))
+			tp->write_seq = ip_randomisn();
+		else
+#endif
 		tp->write_seq = secure_tcp_sequence_number(sk->saddr, sk->daddr,
 							   sk->sport, usin->sin_port);
+	}
 
-	sk->protinfo.af_inet.id = tp->write_seq^jiffies;
+#ifdef CONFIG_GRKERNSEC_RANDID
+	if(grsec_enable_randid)
+		sk->protinfo.af_inet.id = htons(ip_randomid());
+	else
+#endif
+		sk->protinfo.af_inet.id = tp->write_seq^jiffies;
 
 	err = tcp_connect(sk);
 	if (err)
@@ -1572,7 +1614,13 @@
 	newtp->ext_header_len = 0;
 	if (newsk->protinfo.af_inet.opt)
 		newtp->ext_header_len = newsk->protinfo.af_inet.opt->optlen;
-	newsk->protinfo.af_inet.id = newtp->write_seq^jiffies;
+
+#ifdef CONFIG_GRKERNSEC_RANDID
+	if(grsec_enable_randid)
+		newsk->protinfo.af_inet.id = htons(ip_randomid());
+	else
+#endif
+		newsk->protinfo.af_inet.id = newtp->write_seq^jiffies;
 
 	tcp_sync_mss(newsk, dst->pmtu);
 	newtp->advmss = dst->advmss;
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/ipv4/udp.c linux-2.4.26-leo/net/ipv4/udp.c
--- linux-2.4.26/net/ipv4/udp.c	2004-05-19 21:34:42.000000000 +0100
+++ linux-2.4.26-leo/net/ipv4/udp.c	2004-06-22 20:53:48.000000000 +0100
@@ -91,6 +91,7 @@
 #include <net/ipv6.h>
 #include <net/protocol.h>
 #include <linux/skbuff.h>
+#include <linux/grsecurity.h>
 #include <net/sock.h>
 #include <net/udp.h>
 #include <net/icmp.h>
@@ -98,6 +99,11 @@
 #include <net/inet_common.h>
 #include <net/checksum.h>
 
+extern int gr_search_udp_recvmsg(const struct sock *sk,
+					const struct sk_buff *skb);
+extern int gr_search_udp_sendmsg(const struct sock *sk,
+					const struct sockaddr_in *addr);
+
 /*
  *	Snmp MIB for the UDP layer
  */
@@ -480,9 +486,16 @@
 		ufh.uh.dest = usin->sin_port;
 		if (ufh.uh.dest == 0)
 			return -EINVAL;
+
+		if (!gr_search_udp_sendmsg(sk, usin))
+			return -EPERM;
 	} else {
 		if (sk->state != TCP_ESTABLISHED)
 			return -EDESTADDRREQ;
+
+		if (!gr_search_udp_sendmsg(sk, NULL))
+			return -EPERM;
+
 		ufh.daddr = sk->daddr;
 		ufh.uh.dest = sk->dport;
 		/* Open fast path for connected socket.
@@ -662,6 +675,11 @@
 	if (!skb)
 		goto out;
   
+	if (!gr_search_udp_recvmsg(sk, skb)) {
+		err = -EPERM;
+		goto out_free;
+	}
+
   	copied = skb->len - sizeof(struct udphdr);
 	if (copied > len) {
 		copied = len;
@@ -768,7 +786,13 @@
 	sk->daddr = rt->rt_dst;
 	sk->dport = usin->sin_port;
 	sk->state = TCP_ESTABLISHED;
-	sk->protinfo.af_inet.id = jiffies;
+
+#ifdef CONFIG_GRKERNSEC_RANDID
+	if(grsec_enable_randid)
+		sk->protinfo.af_inet.id = htons(ip_randomid());
+	else
+#endif
+		sk->protinfo.af_inet.id = jiffies;
 
 	sk_dst_set(sk, &rt->u.dst);
 	return(0);
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/netlink/af_netlink.c linux-2.4.26-leo/net/netlink/af_netlink.c
--- linux-2.4.26/net/netlink/af_netlink.c	2004-02-20 14:11:47.000000000 +0000
+++ linux-2.4.26-leo/net/netlink/af_netlink.c	2004-06-22 20:53:48.000000000 +0100
@@ -40,6 +40,7 @@
 #include <linux/proc_fs.h>
 #include <linux/smp_lock.h>
 #include <linux/notifier.h>
+#include <linux/grsecurity.h>
 #include <net/sock.h>
 #include <net/scm.h>
 
@@ -626,7 +627,8 @@
 	   check them, when this message will be delivered
 	   to corresponding kernel module.   --ANK (980802)
 	 */
-	NETLINK_CB(skb).eff_cap = current->cap_effective;
+
+	NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink();
 
 	err = -EFAULT;
 	if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/netsyms.c linux-2.4.26-leo/net/netsyms.c
--- linux-2.4.26/net/netsyms.c	2004-05-19 21:34:42.000000000 +0100
+++ linux-2.4.26-leo/net/netsyms.c	2004-06-22 20:53:48.000000000 +0100
@@ -25,6 +25,7 @@
 #include <net/checksum.h>
 #include <linux/etherdevice.h>
 #include <net/route.h>
+#include <linux/grsecurity.h>
 #ifdef CONFIG_HIPPI
 #include <linux/hippidevice.h>
 #endif
@@ -612,6 +613,49 @@
 
 EXPORT_SYMBOL(softnet_data);
 
+#if defined(CONFIG_IP_NF_MATCH_STEALTH_MODULE)
+#if !defined (CONFIG_IPV6_MODULE) && !defined (CONFIG_KHTTPD) && !defined (CONFIG_KHTTPD_MODULE) && !defined (CONFIG_IP_SCTP_MODULE)
+EXPORT_SYMBOL(tcp_v4_lookup_listener);
+#endif
+extern struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif);
+EXPORT_SYMBOL(udp_v4_lookup);
+#endif
+
+#if defined(CONFIG_GRKERNSEC_RANDID)
+EXPORT_SYMBOL(ip_randomid);
+#endif
+#if defined(CONFIG_GRKERNSEC_RANDSRC) || defined(CONFIG_GRKERNSEC_RANDRPC)
+EXPORT_SYMBOL(get_random_long);
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDISN
+EXPORT_SYMBOL(ip_randomisn);
+EXPORT_SYMBOL(grsec_enable_randisn);
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDID
+EXPORT_SYMBOL(grsec_enable_randid);
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDSRC
+EXPORT_SYMBOL(grsec_enable_randsrc);
+#endif
+#ifdef CONFIG_GRKERNSEC_RANDRPC
+EXPORT_SYMBOL(grsec_enable_randrpc);
+#endif
+
+EXPORT_SYMBOL(gr_cap_rtnetlink);
+
+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
+
+EXPORT_SYMBOL(gr_search_udp_recvmsg);
+EXPORT_SYMBOL(gr_search_udp_sendmsg);
+
+#ifdef CONFIG_UNIX_MODULE
+EXPORT_SYMBOL(gr_acl_handle_unix);
+EXPORT_SYMBOL(gr_acl_handle_mknod);
+EXPORT_SYMBOL(gr_handle_chroot_unix);
+EXPORT_SYMBOL(gr_handle_create);
+#endif
+
 #if defined(CONFIG_NET_RADIO) || defined(CONFIG_NET_PCMCIA_RADIO)
 #include <net/iw_handler.h>
 EXPORT_SYMBOL(wireless_send_event);
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/socket.c linux-2.4.26-leo/net/socket.c
--- linux-2.4.26/net/socket.c	2004-02-20 14:11:48.000000000 +0000
+++ linux-2.4.26-leo/net/socket.c	2004-06-22 20:53:48.000000000 +0100
@@ -85,6 +85,18 @@
 #include <net/scm.h>
 #include <linux/netfilter.h>
 
+extern void gr_attach_curr_ip(const struct sock *sk);
+extern int gr_handle_sock_all(const int family, const int type,
+			      const int protocol);
+extern int gr_handle_sock_server(const struct sockaddr *sck);  
+extern int gr_handle_sock_client(const struct sockaddr *sck);
+extern int gr_search_connect(const struct socket * sock,
+			     const struct sockaddr_in * addr);  
+extern int gr_search_bind(const struct socket * sock,
+			  const struct sockaddr_in * addr);
+extern int gr_search_socket(const int domain, const int type,
+			    const int protocol);
+
 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
 static ssize_t sock_read(struct file *file, char *buf,
 			 size_t size, loff_t *ppos);
@@ -132,7 +144,7 @@
 
 static struct net_proto_family *net_families[NPROTO];
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
 static atomic_t net_family_lockct = ATOMIC_INIT(0);
 static spinlock_t net_family_lock = SPIN_LOCK_UNLOCKED;
 
@@ -903,6 +915,16 @@
 	int retval;
 	struct socket *sock;
 
+	if(!gr_search_socket(family, type, protocol)) {
+		retval = -EACCES;
+		goto out;
+	}
+
+	if (gr_handle_sock_all(family, type, protocol)) {
+		retval = -EACCES;
+		goto out;
+	}
+
 	retval = sock_create(family, type, protocol, &sock);
 	if (retval < 0)
 		goto out;
@@ -998,12 +1020,26 @@
 {
 	struct socket *sock;
 	char address[MAX_SOCK_ADDR];
+	struct sockaddr * sck;
 	int err;
 
 	if((sock = sockfd_lookup(fd,&err))!=NULL)
 	{
-		if((err=move_addr_to_kernel(umyaddr,addrlen,address))>=0)
+		if((err=move_addr_to_kernel(umyaddr,addrlen,address))>=0) {
+			sck = (struct sockaddr *) address;
+
+			if(!gr_search_bind(sock, (struct sockaddr_in *) sck)) {
+				sockfd_put(sock);
+				return -EACCES;
+			}
+
+			if (gr_handle_sock_server(sck)) {
+				sockfd_put(sock);
+				return -EACCES;
+			}
+
 			err = sock->ops->bind(sock, (struct sockaddr *)address, addrlen);
+		}
 		sockfd_put(sock);
 	}			
 	return err;
@@ -1081,6 +1117,8 @@
 	if ((err = sock_map_fd(newsock)) < 0)
 		goto out_release;
 
+	gr_attach_curr_ip(newsock->sk);
+
 out_put:
 	sockfd_put(sock);
 out:
@@ -1108,6 +1146,7 @@
 {
 	struct socket *sock;
 	char address[MAX_SOCK_ADDR];
+	struct sockaddr * sck;
 	int err;
 
 	sock = sockfd_lookup(fd, &err);
@@ -1116,6 +1155,19 @@
 	err = move_addr_to_kernel(uservaddr, addrlen, address);
 	if (err < 0)
 		goto out_put;
+
+	sck = (struct sockaddr *) address;
+
+	if (!gr_search_connect(sock, (struct sockaddr_in *) sck)) {
+		err = -EACCES;
+		goto out_put;
+	}
+
+	if (gr_handle_sock_client(sck)) {
+		err = -EACCES;
+		goto out_put;
+	}
+
 	err = sock->ops->connect(sock, (struct sockaddr *) address, addrlen,
 				 sock->file->f_flags);
 out_put:
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/sunrpc/pmap_clnt.c linux-2.4.26-leo/net/sunrpc/pmap_clnt.c
--- linux-2.4.26/net/sunrpc/pmap_clnt.c	2002-08-03 01:39:46.000000000 +0100
+++ linux-2.4.26-leo/net/sunrpc/pmap_clnt.c	2004-05-26 23:34:34.000000000 +0100
@@ -12,6 +12,7 @@
 #include <linux/config.h>
 #include <linux/types.h>
 #include <linux/socket.h>
+#include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/uio.h>
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/sunrpc/xprt.c linux-2.4.26-leo/net/sunrpc/xprt.c
--- linux-2.4.26/net/sunrpc/xprt.c	2003-11-28 18:26:21.000000000 +0000
+++ linux-2.4.26-leo/net/sunrpc/xprt.c	2004-06-22 20:53:48.000000000 +0100
@@ -59,6 +59,7 @@
 #include <linux/unistd.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/file.h>
+#include <linux/grsecurity.h>
 
 #include <net/sock.h>
 #include <net/checksum.h>
@@ -1297,6 +1298,12 @@
 	}
 	ret = xid++;
 	spin_unlock(&xid_lock);
+
+#ifdef CONFIG_GRKERNSEC_RANDRPC
+	if (grsec_enable_randrpc)
+		ret = (u32) get_random_long();
+#endif
+
 	return ret;
 }
 
diff -urN --exclude-from=diff-exclude linux-2.4.26/net/unix/af_unix.c linux-2.4.26-leo/net/unix/af_unix.c
--- linux-2.4.26/net/unix/af_unix.c	2002-11-28 23:53:16.000000000 +0000
+++ linux-2.4.26-leo/net/unix/af_unix.c	2004-06-22 20:53:48.000000000 +0100
@@ -109,6 +109,7 @@
 #include <linux/poll.h>
 #include <linux/smp_lock.h>
 #include <linux/rtnetlink.h>
+#include <linux/grsecurity.h>
 
 #include <asm/checksum.h>
 
@@ -599,6 +600,11 @@
 		if (err)
 			goto put_fail;
 
+		if (!gr_acl_handle_unix(nd.dentry, nd.mnt)) {
+			err = -EACCES;
+			goto put_fail;
+		}
+		
 		err = -ECONNREFUSED;
 		if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
 			goto put_fail;
@@ -622,6 +628,13 @@
 		if (u) {
 			struct dentry *dentry;
 			dentry = u->protinfo.af_unix.dentry;
+
+			if (!gr_handle_chroot_unix(u->peercred.pid)) {
+				err = -EPERM;
+				sock_put(u);
+				goto fail;
+			}
+
 			if (dentry)
 				UPDATE_ATIME(dentry->d_inode);
 		} else
@@ -720,9 +733,19 @@
 		 * All right, let's create it.
 		 */
 		mode = S_IFSOCK | (sock->inode->i_mode & ~current->fs->umask);
+	
+		if (!gr_acl_handle_mknod(dentry, nd.dentry, nd.mnt, mode)) {
+			err = -EACCES;
+			goto out_mknod_dput;
+		}	
+
 		err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
+
 		if (err)
 			goto out_mknod_dput;
+
+		gr_handle_create(dentry, nd.mnt);
+
 		up(&nd.dentry->d_inode->i_sem);
 		dput(nd.dentry);
 		nd.dentry = dentry;
@@ -740,6 +763,10 @@
 			goto out_unlock;
 		}
 
+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
+		sk->peercred.pid = current->pid;
+#endif
+
 		list = &unix_socket_table[addr->hash];
 	} else {
 		list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
@@ -866,6 +893,9 @@
 	int st;
 	int err;
 	long timeo;
+#ifdef CONFIG_GRKERNSEC
+	struct task_struct *p, **htable;
+#endif
 
 	err = unix_mkname(sunaddr, addr_len, &hash);
 	if (err < 0)
@@ -989,6 +1019,17 @@
 	/* Set credentials */
 	sk->peercred = other->peercred;
 
+#ifdef CONFIG_GRKERNSEC
+	read_lock(&tasklist_lock);
+	htable = &pidhash[pid_hashfn(other->peercred.pid)];
+	for (p = *htable; p && p->pid != other->peercred.pid; p = p->pidhash_next);
+	if (p) {
+		p->curr_ip = current->curr_ip;
+		p->used_accept = 1;
+	}
+	read_unlock(&tasklist_lock);
+#endif
+
 	sock_hold(newsk);
 	unix_peer(sk)=newsk;
 	sock->state=SS_CONNECTED;
