--- linux.orig/arch/alpha/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/alpha/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -509,7 +509,7 @@
 	depends on SMP
 	default "64"
 
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
 	bool "Discontiguous Memory Support (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
 	help
@@ -518,6 +518,8 @@
 	  or have huge holes in the physical address space for other reasons.
 	  See <file:Documentation/vm/numa> for more.
 
+source "mm/Kconfig"
+
 config NUMA
 	bool "NUMA Support (EXPERIMENTAL)"
 	depends on DISCONTIGMEM
--- linux.orig/arch/alpha/defconfig~FROM-2.6.12-rc5-mm1-update-all-defconfigs-for-arch_discontigmem_enable	2005-05-31 11:38:56.000000000 -0700
+++ linux/arch/alpha/defconfig	2005-05-31 11:38:56.000000000 -0700
@@ -96,7 +96,7 @@
 CONFIG_ALPHA_BROKEN_IRQ_MASK=y
 CONFIG_EISA=y
 # CONFIG_SMP is not set
-# CONFIG_DISCONTIGMEM is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
 CONFIG_VERBOSE_MCHECK=y
 CONFIG_VERBOSE_MCHECK_ON=1
 CONFIG_PCI_LEGACY_PROC=y
--- linux.orig/arch/alpha/mm/numa.c~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/arch/alpha/mm/numa.c	2005-05-31 12:41:33.000000000 -0700
@@ -327,8 +327,6 @@
 	extern char _text, _etext, _data, _edata;
 	extern char __init_begin, __init_end;
 	unsigned long nid, i;
-	struct page * lmem_map;
-
 	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
 
 	reservedpages = 0;
@@ -338,10 +336,10 @@
 		 */
 		totalram_pages += free_all_bootmem_node(NODE_DATA(nid));
 
-		lmem_map = node_mem_map(nid);
 		pfn = NODE_DATA(nid)->node_start_pfn;
 		for (i = 0; i < node_spanned_pages(nid); i++, pfn++)
-			if (page_is_ram(pfn) && PageReserved(lmem_map+i))
+			if (page_is_ram(pfn) &&
+			    PageReserved(nid_page_nr(nid, i)))
 				reservedpages++;
 	}
 
@@ -373,18 +371,18 @@
 	show_free_areas();
 	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
 	for_each_online_node(nid) {
-		struct page * lmem_map = node_mem_map(nid);
 		i = node_spanned_pages(nid);
 		while (i-- > 0) {
+			struct page *page = nid_page_nr(nid, i);
 			total++;
-			if (PageReserved(lmem_map+i))
+			if (PageReserved(page))
 				reserved++;
-			else if (PageSwapCache(lmem_map+i))
+			else if (PageSwapCache(page))
 				cached++;
-			else if (!page_count(lmem_map+i))
+			else if (!page_count(page))
 				free++;
 			else
-				shared += page_count(lmem_map + i) - 1;
+				shared += page_count(page) - 1;
 		}
 	}
 	printk("%ld pages of RAM\n",total);
--- linux.orig/arch/arm/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/arm/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -342,7 +342,7 @@
 	  Say Y here if you are building a kernel for a desktop, embedded
 	  or real-time system.  Say N if you are unsure.
 
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
 	bool
 	default (ARCH_LH7A40X && !LH7A40X_CONTIGMEM) || ARCH_SA1100
 	help
@@ -351,6 +351,8 @@
 	  or have huge holes in the physical address space for other reasons.
 	  See <file:Documentation/vm/numa> for more.
 
+source "mm/Kconfig"
+
 config LEDS
 	bool "Timer and CPU usage LEDs"
 	depends on ARCH_CDB89712 || ARCH_CO285 || ARCH_EBSA110 || \
--- linux.orig/arch/arm26/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/arm26/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -179,6 +179,8 @@
 	  time by entering them here. As a minimum, you should specify the
 	  memory size and the root device (e.g., mem=64M root=/dev/nfs).
 
+source "mm/Kconfig"
+
 endmenu
 
 source "drivers/base/Kconfig"
--- linux.orig/arch/cris/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/cris/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -74,6 +74,8 @@
 	  Say Y here if you are building a kernel for a desktop, embedded
 	  or real-time system.  Say N if you are unsure.
 
+source mm/Kconfig
+
 endmenu
 
 menu "Hardware setup"
--- linux.orig/arch/frv/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/frv/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -74,6 +74,8 @@
 	  with a lot of RAM, this can be wasteful of precious low memory.
 	  Setting this option will put user-space page tables in high memory.
 
+source "mm/Kconfig"
+
 choice
 	prompt "uClinux kernel load address"
 	depends on !MMU
--- linux.orig/arch/h8300/Kconfig.cpu~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/h8300/Kconfig.cpu	2005-05-31 11:38:56.000000000 -0700
@@ -180,4 +180,7 @@
 config PREEMPT
 	bool "Preemptible Kernel"
 	default n
+
+source "mm/Kconfig"
+
 endmenu
--- linux.orig/arch/i386/Kconfig~FROM-2.6.12-rc5-mm1-resubmit-sparsemem-base-simple-numa-remap-space-allocator	2005-05-31 11:38:52.000000000 -0700
+++ linux/arch/i386/Kconfig	2005-05-31 12:41:34.000000000 -0700
@@ -68,7 +68,6 @@
 
 config X86_NUMAQ
 	bool "NUMAQ (IBM/Sequent)"
-	select DISCONTIGMEM
 	select NUMA
 	help
 	  This option is used for getting Linux to run on a (IBM/Sequent) NUMA
@@ -783,24 +782,46 @@
 comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
 	depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
 
-config DISCONTIGMEM
-	bool
-	depends on NUMA
-	default y
-
 config HAVE_ARCH_BOOTMEM_NODE
 	bool
 	depends on NUMA
 	default y
 
-config HAVE_MEMORY_PRESENT
+config ARCH_HAVE_MEMORY_PRESENT
 	bool
 	depends on DISCONTIGMEM
 	default y
 
 config NEED_NODE_MEMMAP_SIZE
 	bool
-	depends on DISCONTIGMEM
+	depends on DISCONTIGMEM || SPARSEMEM
+	default y
+
+config HAVE_ARCH_ALLOC_REMAP
+	bool
+	depends on NUMA
+	default y
+
+config ARCH_DISCONTIGMEM_ENABLE
+	def_bool y
+	depends on NUMA
+
+config ARCH_DISCONTIGMEM_DEFAULT
+	def_bool y
+	depends on NUMA
+
+config ARCH_SPARSEMEM_ENABLE
+	def_bool y
+	depends on NUMA
+
+config ARCH_SELECT_MEMORY_MODEL
+	def_bool y
+	depends on ARCH_SPARSEMEM_ENABLE
+
+source "mm/Kconfig"
+
+config HAVE_ARCH_EARLY_PFN_TO_NID
+	bool
 	default y
 
 config HIGHPTE
--- linux.orig/arch/i386/kernel/setup.c~FROM-2.6.12-rc5-mm1-sparsemem-memory-model-for-i386	2005-05-31 11:39:03.000000000 -0700
+++ linux/arch/i386/kernel/setup.c	2005-05-31 12:41:32.000000000 -0700
@@ -25,6 +25,7 @@
 
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/mmzone.h>
 #include <linux/tty.h>
 #include <linux/ioport.h>
 #include <linux/acpi.h>
@@ -1022,7 +1023,7 @@
 		reserve_bootmem(addr, PAGE_SIZE);	
 }
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
 void __init setup_bootmem_allocator(void);
 static unsigned long __init setup_memory(void)
 {
@@ -1072,9 +1073,9 @@
 	free_area_init(zones_size);
 }
 #else
-extern unsigned long setup_memory(void);
+extern unsigned long __init setup_memory(void);
 extern void zone_sizes_init(void);
-#endif /* !CONFIG_DISCONTIGMEM */
+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
 
 void __init setup_bootmem_allocator(void)
 {
@@ -1475,6 +1476,7 @@
 #endif
 	paging_init();
 	remapped_pgdat_init();
+	sparse_init();
 	zone_sizes_init();
 
 	/*
--- linux.orig/arch/i386/kernel/sys_i386.c~AA-PM-22-vm_immovable	2005-05-31 12:42:07.000000000 -0700
+++ linux/arch/i386/kernel/sys_i386.c	2005-05-31 12:42:07.000000000 -0700
@@ -70,7 +70,7 @@
 	unsigned long prot, unsigned long flags,
 	unsigned long fd, unsigned long pgoff)
 {
-	return do_mmap2(addr, len, prot, flags, fd, pgoff);
+	return do_mmap2(addr, len, prot, flags & ~MAP_IMMOVABLE, fd, pgoff);
 }
 
 /*
@@ -101,7 +101,8 @@
 	if (a.offset & ~PAGE_MASK)
 		goto out;
 
-	err = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+	err = do_mmap2(a.addr, a.len, a.prot, a.flags & ~MAP_IMMOVABLE,
+	    a.fd, a.offset >> PAGE_SHIFT);
 out:
 	return err;
 }
--- linux.orig/arch/i386/mm/Makefile~FROM-2.6.12-rc5-mm1-sparsemem-memory-model-for-i386	2005-05-31 11:39:03.000000000 -0700
+++ linux/arch/i386/mm/Makefile	2005-05-31 11:39:03.000000000 -0700
@@ -4,7 +4,7 @@
 
 obj-y	:= init.o pgtable.o fault.o ioremap.o extable.o pageattr.o mmap.o
 
-obj-$(CONFIG_DISCONTIGMEM)	+= discontig.o
+obj-$(CONFIG_NUMA) += discontig.o
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
 obj-$(CONFIG_HIGHMEM) += highmem.o
 obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
--- linux.orig/arch/i386/mm/discontig.c~FROM-2.6.12-rc5-mm1-resubmit-sparsemem-base-early_pfn_to_nid-works-before-sparse-is-initialized	2005-05-31 11:38:51.000000000 -0700
+++ linux/arch/i386/mm/discontig.c	2005-05-31 12:41:33.000000000 -0700
@@ -42,12 +42,16 @@
  *                  populated the following initialisation.
  *
  * 1) node_online_map  - the map of all nodes configured (online) in the system
- * 2) physnode_map     - the mapping between a pfn and owning node
- * 3) node_start_pfn   - the starting page frame number for a node
+ * 2) node_start_pfn   - the starting page frame number for a node
  * 3) node_end_pfn     - the ending page fram number for a node
  */
+unsigned long node_start_pfn[MAX_NUMNODES];
+unsigned long node_end_pfn[MAX_NUMNODES];
+
 
+#ifdef CONFIG_DISCONTIGMEM
 /*
+ * 4) physnode_map     - the mapping between a pfn and owning node
  * physnode_map keeps track of the physical memory layout of a generic
  * numa node on a 256Mb break (each element of the array will
  * represent 256Mb of memory and will be marked by the node id.  so,
@@ -85,9 +89,7 @@
 
 	return (nr_pages + 1) * sizeof(struct page);
 }
-
-unsigned long node_start_pfn[MAX_NUMNODES];
-unsigned long node_end_pfn[MAX_NUMNODES];
+#endif
 
 extern unsigned long find_max_low_pfn(void);
 extern void find_max_pfn(void);
@@ -108,6 +110,9 @@
 void *node_remap_start_vaddr[MAX_NUMNODES];
 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
 
+void *node_remap_end_vaddr[MAX_NUMNODES];
+void *node_remap_alloc_vaddr[MAX_NUMNODES];
+
 /*
  * FLAT - support for basic PC memory model with discontig enabled, essentially
  *        a single node with all available processors in it with a flat
@@ -146,6 +151,21 @@
 		BUG();
 }
 
+/* Find the owning node for a pfn. */
+int early_pfn_to_nid(unsigned long pfn)
+{
+	int nid;
+
+	for_each_node(nid) {
+		if (node_end_pfn[nid] == 0)
+			break;
+		if (node_start_pfn[nid] <= pfn && node_end_pfn[nid] >= pfn)
+			return nid;
+	}
+
+	return 0;
+}
+
 /* 
  * Allocate memory for the pg_data_t for this node via a crude pre-bootmem
  * method.  For node zero take this from the bottom of memory, for
@@ -163,6 +183,21 @@
 	}
 }
 
+void *alloc_remap(int nid, unsigned long size)
+{
+	void *allocation = node_remap_alloc_vaddr[nid];
+
+	size = ALIGN(size, L1_CACHE_BYTES);
+
+	if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
+		return 0;
+
+	node_remap_alloc_vaddr[nid] += size;
+	memset(allocation, 0, size);
+
+	return allocation;
+}
+
 void __init remap_numa_kva(void)
 {
 	void *vaddr;
@@ -170,8 +205,6 @@
 	int node;
 
 	for_each_online_node(node) {
-		if (node == 0)
-			continue;
 		for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) {
 			vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT);
 			set_pmd_pfn((ulong) vaddr, 
@@ -185,13 +218,9 @@
 {
 	int nid;
 	unsigned long size, reserve_pages = 0;
+	unsigned long pfn;
 
 	for_each_online_node(nid) {
-		if (nid == 0)
-			continue;
-		if (!node_remap_size[nid])
-			continue;
-
 		/*
 		 * The acpi/srat node info can show hot-add memroy zones
 		 * where memory could be added but not currently present.
@@ -208,11 +237,24 @@
 		size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
 		/* now the roundup is correct, convert to PAGE_SIZE pages */
 		size = size * PTRS_PER_PTE;
+
+		/*
+		 * Validate the region we are allocating only contains valid
+		 * pages.
+		 */
+		for (pfn = node_end_pfn[nid] - size;
+		     pfn < node_end_pfn[nid]; pfn++)
+			if (!page_is_ram(pfn))
+				break;
+
+		if (pfn != node_end_pfn[nid])
+			size = 0;
+
 		printk("Reserving %ld pages of KVA for lmem_map of node %d\n",
 				size, nid);
 		node_remap_size[nid] = size;
-		reserve_pages += size;
 		node_remap_offset[nid] = reserve_pages;
+		reserve_pages += size;
 		printk("Shrinking node %d from %ld pages to %ld pages\n",
 			nid, node_end_pfn[nid], node_end_pfn[nid] - size);
 		node_end_pfn[nid] -= size;
@@ -265,12 +307,18 @@
 			(ulong) pfn_to_kaddr(max_low_pfn));
 	for_each_online_node(nid) {
 		node_remap_start_vaddr[nid] = pfn_to_kaddr(
-			(highstart_pfn + reserve_pages) - node_remap_offset[nid]);
+				highstart_pfn + node_remap_offset[nid]);
+		/* Init the node remap allocator */
+		node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
+			(node_remap_size[nid] * PAGE_SIZE);
+		node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
+			ALIGN(sizeof(pg_data_t), PAGE_SIZE);
+
 		allocate_pgdat(nid);
 		printk ("node %d will remap to vaddr %08lx - %08lx\n", nid,
 			(ulong) node_remap_start_vaddr[nid],
-			(ulong) pfn_to_kaddr(highstart_pfn + reserve_pages
-			    - node_remap_offset[nid] + node_remap_size[nid]));
+			(ulong) pfn_to_kaddr(highstart_pfn
+			   + node_remap_offset[nid] + node_remap_size[nid]));
 	}
 	printk("High memory starts at vaddr %08lx\n",
 			(ulong) pfn_to_kaddr(highstart_pfn));
@@ -333,23 +381,9 @@
 		}
 
 		zholes_size = get_zholes_size(nid);
-		/*
-		 * We let the lmem_map for node 0 be allocated from the
-		 * normal bootmem allocator, but other nodes come from the
-		 * remapped KVA area - mbligh
-		 */
-		if (!nid)
-			free_area_init_node(nid, NODE_DATA(nid),
-					zones_size, start, zholes_size);
-		else {
-			unsigned long lmem_map;
-			lmem_map = (unsigned long)node_remap_start_vaddr[nid];
-			lmem_map += sizeof(pg_data_t) + PAGE_SIZE - 1;
-			lmem_map &= PAGE_MASK;
-			NODE_DATA(nid)->node_mem_map = (struct page *)lmem_map;
-			free_area_init_node(nid, NODE_DATA(nid), zones_size,
-				start, zholes_size);
-		}
+
+		free_area_init_node(nid, NODE_DATA(nid), zones_size, start,
+				zholes_size);
 	}
 	return;
 }
@@ -358,24 +392,26 @@
 {
 #ifdef CONFIG_HIGHMEM
 	struct zone *zone;
+	struct page *page;
 
 	for_each_zone(zone) {
-		unsigned long node_pfn, node_high_size, zone_start_pfn;
-		struct page * zone_mem_map;
-		
+		unsigned long node_pfn, zone_start_pfn, zone_end_pfn;
+
 		if (!is_highmem(zone))
 			continue;
 
-		printk("Initializing %s for node %d\n", zone->name,
-			zone->zone_pgdat->node_id);
-
-		node_high_size = zone->spanned_pages;
-		zone_mem_map = zone->zone_mem_map;
 		zone_start_pfn = zone->zone_start_pfn;
+		zone_end_pfn = zone_start_pfn + zone->spanned_pages;
 
-		for (node_pfn = 0; node_pfn < node_high_size; node_pfn++) {
-			one_highpage_init((struct page *)(zone_mem_map + node_pfn),
-					  zone_start_pfn + node_pfn, bad_ppro);
+		printk("Initializing %s for node %d (%08lx:%08lx)\n",
+				zone->name, zone->zone_pgdat->node_id,
+				zone_start_pfn, zone_end_pfn);
+
+		for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) {
+			if (!pfn_valid(node_pfn))
+				continue;
+			page = pfn_to_page(node_pfn);
+			one_highpage_init(page, node_pfn, bad_ppro);
 		}
 	}
 	totalram_pages += totalhigh_pages;
--- linux.orig/arch/i386/mm/init.c~FROM-2.6.12-rc5-mm1-resubmit-sparsemem-base-teach-discontig-about-sparse-ranges	2005-05-31 11:38:53.000000000 -0700
+++ linux/arch/i386/mm/init.c	2005-05-31 12:41:33.000000000 -0700
@@ -191,7 +191,7 @@
 
 extern int is_available_memory(efi_memory_desc_t *);
 
-static inline int page_is_ram(unsigned long pagenr)
+int page_is_ram(unsigned long pagenr)
 {
 	int i;
 	unsigned long addr, end;
@@ -277,7 +277,9 @@
 		SetPageReserved(page);
 }
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
+extern void set_highmem_pages_init(int);
+#else
 static void __init set_highmem_pages_init(int bad_ppro)
 {
 	int pfn;
@@ -285,9 +287,7 @@
 		one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
 	totalram_pages += totalhigh_pages;
 }
-#else
-extern void set_highmem_pages_init(int);
-#endif /* !CONFIG_DISCONTIGMEM */
+#endif /* CONFIG_FLATMEM */
 
 #else
 #define kmap_init() do { } while (0)
@@ -298,10 +298,10 @@
 unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
 unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
 
-#ifndef CONFIG_DISCONTIGMEM
-#define remap_numa_kva() do {} while (0)
-#else
+#ifdef CONFIG_NUMA
 extern void __init remap_numa_kva(void);
+#else
+#define remap_numa_kva() do {} while (0)
 #endif
 
 static void __init pagetable_init (void)
@@ -526,7 +526,7 @@
 #else
 	num_physpages = max_low_pfn;
 #endif
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_FLATMEM
 	max_mapnr = num_physpages;
 #endif
 }
@@ -540,7 +540,7 @@
 	int tmp;
 	int bad_ppro;
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_FLATMEM
 	if (!mem_map)
 		BUG();
 #endif
--- linux.orig/arch/i386/mm/pgtable.c~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/arch/i386/mm/pgtable.c	2005-05-31 12:41:33.000000000 -0700
@@ -36,7 +36,7 @@
 	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
 	for_each_pgdat(pgdat) {
 		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-			page = pgdat->node_mem_map + i;
+			page = pgdat_page_nr(pgdat, i);
 			total++;
 			if (PageHighMem(page))
 				highmem++;
--- linux.orig/arch/ia64/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/ia64/Kconfig	2005-05-31 12:41:34.000000000 -0700
@@ -193,7 +193,7 @@
 	bool
 	default y if VIRTUAL_MEM_MAP
 
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
 	bool "Discontiguous memory support"
 	depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB) && NUMA && VIRTUAL_MEM_MAP
 	default y if (IA64_SGI_SN2 || IA64_GENERIC) && NUMA
@@ -296,6 +296,8 @@
           Say Y here if you are building a kernel for a desktop, embedded
           or real-time system.  Say N if you are unsure.
 
+source "mm/Kconfig"
+
 config HAVE_DEC_LOCK
 	bool
 	depends on (SMP || PREEMPT)
--- linux.orig/arch/ia64/configs/sn2_defconfig~FROM-2.6.12-rc5-mm1-update-all-defconfigs-for-arch_discontigmem_enable	2005-05-31 11:38:56.000000000 -0700
+++ linux/arch/ia64/configs/sn2_defconfig	2005-05-31 11:38:56.000000000 -0700
@@ -78,7 +78,7 @@
 CONFIG_NUMA=y
 CONFIG_VIRTUAL_MEM_MAP=y
 CONFIG_HOLES_IN_ZONE=y
-CONFIG_DISCONTIGMEM=y
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
 # CONFIG_IA64_CYCLONE is not set
 CONFIG_IOSAPIC=y
 CONFIG_IA64_SGI_SN_SIM=y
--- linux.orig/arch/ia64/defconfig~FROM-2.6.12-rc5-mm1-update-all-defconfigs-for-arch_discontigmem_enable	2005-05-31 11:38:56.000000000 -0700
+++ linux/arch/ia64/defconfig	2005-05-31 11:38:56.000000000 -0700
@@ -77,7 +77,7 @@
 CONFIG_IA64_L1_CACHE_SHIFT=7
 CONFIG_NUMA=y
 CONFIG_VIRTUAL_MEM_MAP=y
-CONFIG_DISCONTIGMEM=y
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
 CONFIG_IA64_CYCLONE=y
 CONFIG_IOSAPIC=y
 CONFIG_FORCE_MAX_ZONEORDER=18
--- linux.orig/arch/ia64/mm/discontig.c~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/arch/ia64/mm/discontig.c	2005-05-31 12:41:34.000000000 -0700
@@ -560,14 +560,15 @@
 		int shared = 0, cached = 0, reserved = 0;
 		printk("Node ID: %d\n", pgdat->node_id);
 		for(i = 0; i < pgdat->node_spanned_pages; i++) {
+			struct page *page = pgdat_page_nr(pgdat, i);
 			if (!ia64_pfn_valid(pgdat->node_start_pfn+i))
 				continue;
-			if (PageReserved(pgdat->node_mem_map+i))
+			if (PageReserved(page))
 				reserved++;
-			else if (PageSwapCache(pgdat->node_mem_map+i))
+			else if (PageSwapCache(page))
 				cached++;
-			else if (page_count(pgdat->node_mem_map+i))
-				shared += page_count(pgdat->node_mem_map+i)-1;
+			else if (page_count(page))
+				shared += page_count(page)-1;
 		}
 		total_present += present;
 		total_reserved += reserved;
--- linux.orig/arch/m32r/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/m32r/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -167,11 +167,13 @@
 	bool
 	default y
 
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
 	bool "Internal RAM Support"
 	depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP
 	default y
 
+source "mm/Kconfig"
+
 config IRAM_START
 	hex "Internal memory start address (hex)"
 	default "00f00000"
--- linux.orig/arch/m32r/mm/init.c~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/arch/m32r/mm/init.c	2005-05-31 12:41:33.000000000 -0700
@@ -49,7 +49,7 @@
 	printk("Free swap:       %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
 	for_each_pgdat(pgdat) {
 		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-			page = pgdat->node_mem_map + i;
+			page = pgdat_page_nr(pgdat, i);
 			total++;
 			if (PageHighMem(page))
 				highmem++;
@@ -152,7 +152,7 @@
 	reservedpages = 0;
 	for_each_online_node(nid)
 		for (i = 0 ; i < MAX_LOW_PFN(nid) - START_PFN(nid) ; i++)
-			if (PageReserved(NODE_DATA(nid)->node_mem_map + i))
+			if (PageReserved(nid_page_nr(nid, i)))
 				reservedpages++;
 
 	return reservedpages;
--- linux.orig/arch/m68k/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/m68k/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -357,6 +357,8 @@
 	  is hardwired on.  The 53c710 SCSI driver is known to suffer from
 	  this problem.
 
+source "mm/Kconfig"
+
 endmenu
 
 menu "General setup"
--- linux.orig/arch/m68knommu/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/m68knommu/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -532,6 +532,8 @@
 
 endchoice
 
+source "mm/Kconfig"
+
 endmenu
 
 config ISA_DMA_API
--- linux.orig/arch/mips/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/mips/Kconfig	2005-05-31 11:38:58.000000000 -0700
@@ -492,7 +492,7 @@
 	  which allows for more memory.  Your system is most probably
 	  running in M-Mode, so you should say N here.
 
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
 	bool
 	default y if SGI_IP27
 	help
--- linux.orig/arch/mips/configs/ip27_defconfig~FROM-2.6.12-rc5-mm1-update-all-defconfigs-for-arch_discontigmem_enable	2005-05-31 11:38:56.000000000 -0700
+++ linux/arch/mips/configs/ip27_defconfig	2005-05-31 11:38:56.000000000 -0700
@@ -82,7 +82,7 @@
 # CONFIG_SGI_IP22 is not set
 CONFIG_SGI_IP27=y
 # CONFIG_SGI_SN0_N_MODE is not set
-CONFIG_DISCONTIGMEM=y
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
 CONFIG_NUMA=y
 # CONFIG_MAPPED_KERNEL is not set
 # CONFIG_REPLICATE_KTEXT is not set
--- linux.orig/arch/mips/sgi-ip27/ip27-memory.c~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/arch/mips/sgi-ip27/ip27-memory.c	2005-05-31 11:38:50.000000000 -0700
@@ -549,9 +549,8 @@
 		 */
 		numslots = node_getlastslot(node);
 		for (slot = 1; slot <= numslots; slot++) {
-			p = NODE_DATA(node)->node_mem_map +
-				(slot_getbasepfn(node, slot) -
-				 slot_getbasepfn(node, 0));
+			p = nid_page_nr(node, slot_getbasepfn(node, slot) -
+					      slot_getbasepfn(node, 0));
 
 			/*
 			 * Free valid memory in current slot.
--- linux.orig/arch/parisc/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/parisc/Kconfig	2005-05-31 11:38:58.000000000 -0700
@@ -148,7 +148,7 @@
 	default y if SMP
 	select HOTPLUG
 
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
 	bool "Discontiguous memory support (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
 	help
@@ -157,6 +157,8 @@
 	  or have huge holes in the physical address space for other reasons.
 	  See <file:Documentation/vm/numa> for more.
 
+source "mm/Kconfig"
+
 config PREEMPT
 	bool
 #	bool "Preemptible Kernel"
--- linux.orig/arch/parisc/mm/init.c~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/arch/parisc/mm/init.c	2005-05-31 12:41:33.000000000 -0700
@@ -506,7 +506,7 @@
 		for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
 			struct page *p;
 
-			p = node_mem_map(i) + j - node_start_pfn(i);
+			p = nid_page_nr(i, j) - node_start_pfn(i);
 
 			total++;
 			if (PageReserved(p))
--- linux.orig/arch/ppc/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/ppc/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -910,6 +910,8 @@
 config HIGHMEM
 	bool "High memory support"
 
+source "mm/Kconfig"
+
 source "fs/Kconfig.binfmt"
 
 config PROC_DEVICETREE
--- linux.orig/arch/ppc64/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/ppc64/Kconfig	2005-05-31 12:41:31.000000000 -0700
@@ -198,13 +198,42 @@
 	  This option enables hardware multithreading on RS64 cpus.
 	  pSeries systems p620 and p660 have such a cpu type.
 
-config DISCONTIGMEM
+config ARCH_SELECT_MEMORY_MODEL
+	def_bool y
+
+config ARCH_FLATMEM_ENABLE
+       def_bool y
+       depends on !NUMA
+
+config ARCH_DISCONTIGMEM_ENABLE
 	bool "Discontiguous Memory Support"
 	depends on SMP && PPC_PSERIES
 
+config ARCH_SPARSEMEM_ENABLE
+	def_bool y
+	depends on ARCH_DISCONTIGMEM_ENABLE
+
+source "mm/Kconfig"
+
+config HAVE_ARCH_EARLY_PFN_TO_NID
+	def_bool y
+	depends on NEED_MULTIPLE_NODES
+
+# Some NUMA nodes have memory ranges that span
+# other nodes.  Even though a pfn is valid and
+# between a node's start and end pfns, it may not
+# reside on that node.
+#
+# This is a relatively temporary hack that should
+# be able to go away when sparsemem is fully in
+# place
+config NODES_SPAN_OTHER_NODES
+	def_bool y
+	depends on NEED_MULTIPLE_NODES
+
 config NUMA
 	bool "NUMA support"
-	depends on DISCONTIGMEM
+	default y if DISCONTIGMEM || SPARSEMEM
 
 config SCHED_SMT
 	bool "SMT (Hyperthreading) scheduler support"
--- linux.orig/arch/ppc64/configs/pSeries_defconfig~FROM-2.6.12-rc5-mm1-update-all-defconfigs-for-arch_discontigmem_enable	2005-05-31 11:38:56.000000000 -0700
+++ linux/arch/ppc64/configs/pSeries_defconfig	2005-05-31 11:38:56.000000000 -0700
@@ -82,7 +82,7 @@
 CONFIG_IOMMU_VMERGE=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=128
-CONFIG_DISCONTIGMEM=y
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
 CONFIG_NUMA=y
 CONFIG_SCHED_SMT=y
 # CONFIG_PREEMPT is not set
--- linux.orig/arch/ppc64/defconfig~FROM-2.6.12-rc5-mm1-update-all-defconfigs-for-arch_discontigmem_enable	2005-05-31 11:38:56.000000000 -0700
+++ linux/arch/ppc64/defconfig	2005-05-31 11:38:56.000000000 -0700
@@ -84,7 +84,7 @@
 CONFIG_IOMMU_VMERGE=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=32
-CONFIG_DISCONTIGMEM=y
+CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
 # CONFIG_NUMA is not set
 # CONFIG_SCHED_SMT is not set
 # CONFIG_PREEMPT is not set
--- linux.orig/arch/ppc64/kernel/setup.c~FROM-2.6.12-rc5-mm1-ppc64-sparsemem-memory-model	2005-05-31 11:39:07.000000000 -0700
+++ linux/arch/ppc64/kernel/setup.c	2005-05-31 12:41:30.000000000 -0700
@@ -1059,6 +1059,7 @@
 
 	/* set up the bootmem stuff with available memory */
 	do_init_bootmem();
+	sparse_init();
 
 	/* initialize the syscall map in systemcfg */
 	setup_syscall_map();
--- linux.orig/arch/ppc64/mm/Makefile~FROM-2.6.12-rc5-mm1-ppc64-sparsemem-memory-model	2005-05-31 11:39:07.000000000 -0700
+++ linux/arch/ppc64/mm/Makefile	2005-05-31 11:39:07.000000000 -0700
@@ -6,6 +6,6 @@
 
 obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \
 	slb_low.o slb.o stab.o mmap.o
-obj-$(CONFIG_DISCONTIGMEM) += numa.o
+obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
 obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o
--- linux.orig/arch/ppc64/mm/init.c~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/arch/ppc64/mm/init.c	2005-05-31 12:41:33.000000000 -0700
@@ -101,7 +101,7 @@
 	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
 	for_each_pgdat(pgdat) {
 		for (i = 0; i < pgdat->node_spanned_pages; i++) {
-			page = pgdat->node_mem_map + i;
+			page = pgdat_page_nr(pgdat, i);
 			total++;
 			if (PageReserved(page))
 				reserved++;
@@ -606,7 +606,7 @@
  * Initialize the bootmem system and give it all the memory we
  * have available.
  */
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
 void __init do_init_bootmem(void)
 {
 	unsigned long i;
@@ -628,12 +628,20 @@
 
 	max_pfn = max_low_pfn;
 
-	/* add all physical memory to the bootmem map. Also find the first */
+	/* Add all physical memory to the bootmem map, mark each area
+	 * present.
+	 */
 	for (i=0; i < lmb.memory.cnt; i++) {
 		unsigned long physbase, size;
+		unsigned long start_pfn, end_pfn;
 
 		physbase = lmb.memory.region[i].physbase;
 		size = lmb.memory.region[i].size;
+
+		start_pfn = physbase >> PAGE_SHIFT;
+		end_pfn = start_pfn + (size >> PAGE_SHIFT);
+		memory_present(0, start_pfn, end_pfn);
+
 		free_bootmem(physbase, size);
 	}
 
@@ -672,7 +680,7 @@
 	free_area_init_node(0, NODE_DATA(0), zones_size,
 			    __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
 }
-#endif /* CONFIG_DISCONTIGMEM */
+#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
 
 static struct kcore_list kcore_vmem;
 
@@ -703,7 +711,7 @@
 
 void __init mem_init(void)
 {
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NEED_MULTIPLE_NODES
 	int nid;
 #endif
 	pg_data_t *pgdat;
@@ -714,7 +722,7 @@
 	num_physpages = max_low_pfn;	/* RAM is assumed contiguous */
 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NEED_MULTIPLE_NODES
         for_each_online_node(nid) {
 		if (NODE_DATA(nid)->node_spanned_pages != 0) {
 			printk("freeing bootmem node %x\n", nid);
@@ -729,7 +737,7 @@
 
 	for_each_pgdat(pgdat) {
 		for (i = 0; i < pgdat->node_spanned_pages; i++) {
-			page = pgdat->node_mem_map + i;
+			page = pgdat_page_nr(pgdat, i);
 			if (PageReserved(page))
 				reservedpages++;
 		}
--- linux.orig/arch/ppc64/mm/numa.c~FROM-2.6.12-rc5-mm1-ppc64-add-memory-present	2005-05-31 11:39:07.000000000 -0700
+++ linux/arch/ppc64/mm/numa.c	2005-05-31 11:39:07.000000000 -0700
@@ -440,6 +440,8 @@
 		for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
 			numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
 				numa_domain;
+		memory_present(numa_domain, start >> PAGE_SHIFT,
+						(start + size) >> PAGE_SHIFT);
 
 		if (--ranges)
 			goto new_range;
@@ -481,6 +483,7 @@
 
 	for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT)
 		numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
+	memory_present(0, 0, init_node_data[0].node_end_pfn);
 }
 
 static void __init dump_numa_topology(void)
--- linux.orig/arch/s390/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/s390/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -226,6 +226,8 @@
 	  This allows you to specify the maximum frame size a function may
 	  have without the compiler complaining about it.
 
+source "mm/Kconfig"
+
 comment "I/O subsystem configuration"
 
 config MACHCHK_WARNING
--- linux.orig/arch/sh/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/sh/Kconfig	2005-05-31 11:38:58.000000000 -0700
@@ -486,7 +486,7 @@
        depends on CPU_SUBTYPE_ST40STB1 || CPU_SUBTYPE_ST40GX1
        default y
 
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
 	bool
 	depends on SH_HP690
 	default y
@@ -496,6 +496,8 @@
 	  or have huge holes in the physical address space for other reasons.
 	  See <file:Documentation/vm/numa> for more.
 
+source "mm/Kconfig"
+
 config ZERO_PAGE_OFFSET
 	hex "Zero page offset"
 	default "0x00001000" if !(SH_MPC1211 || SH_SH03)
--- linux.orig/arch/sh64/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/sh64/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -217,6 +217,8 @@
 	bool "Preemptible Kernel (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
 
+source "mm/Kconfig"
+
 endmenu
 
 menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
--- linux.orig/arch/sparc/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/sparc/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -291,6 +291,8 @@
 	  If you have more than 8 printers, you need to increase the LP_NO
 	  macro in lp.c and the PARPORT_MAX macro in parport.h.
 
+source "mm/Kconfig"
+
 endmenu
 
 source "drivers/base/Kconfig"
--- linux.orig/arch/sparc64/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/sparc64/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -484,6 +484,8 @@
 
 	  NOTE: This option WILL override the PROM bootargs setting!
 
+source "mm/Kconfig"
+
 endmenu
 
 source "drivers/base/Kconfig"
--- linux.orig/arch/um/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/um/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -69,6 +69,7 @@
 	option will shrink the UML binary slightly.
 
 source "arch/um/Kconfig_arch"
+source "mm/Kconfig"
 
 config LD_SCRIPT_STATIC
 	bool
--- linux.orig/arch/v850/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/v850/Kconfig	2005-05-31 11:38:54.000000000 -0700
@@ -218,6 +218,8 @@
 	     a lot of RAM, and you need to able to allocate very large
 	     contiguous chunks. If unsure, say N.
 
+source "mm/Kconfig"
+
 endmenu
 
 
--- linux.orig/arch/x86_64/Kconfig~FROM-2.6.12-rc5-mm1-make-each-arch-use-mm-kconfig	2005-05-31 11:38:54.000000000 -0700
+++ linux/arch/x86_64/Kconfig	2005-05-31 12:41:32.000000000 -0700
@@ -265,7 +265,7 @@
 	  into virtual nodes when booted with "numa=fake=N", where N is the
 	  number of nodes. This is only useful for debugging.
 
-config DISCONTIGMEM
+config ARCH_DISCONTIGMEM_ENABLE
        bool
        depends on NUMA
        default y
@@ -274,6 +274,27 @@
        bool
        default n
 
+config ARCH_DISCONTIGMEM_ENABLE
+	def_bool y
+	depends on NUMA
+
+config ARCH_DISCONTIGMEM_DEFAULT
+	def_bool y
+	depends on NUMA
+
+config ARCH_SPARSEMEM_ENABLE
+	def_bool y
+	depends on NUMA
+
+config ARCH_FLATMEM_ENABLE
+	def_bool y
+	depends on !NUMA
+
+source "mm/Kconfig"
+
+config HAVE_ARCH_EARLY_PFN_TO_NID
+	def_bool y
+
 config HAVE_DEC_LOCK
 	bool
 	depends on SMP
--- linux.orig/arch/x86_64/kernel/aperture.c~FROM-2.6.12-rc5-mm1-remove-direct-ref-to-contig_page_data-for-x86-64	2005-05-31 11:39:09.000000000 -0700
+++ linux/arch/x86_64/kernel/aperture.c	2005-05-31 11:39:09.000000000 -0700
@@ -42,11 +42,7 @@
 
 static u32 __init allocate_aperture(void) 
 {
-#ifdef CONFIG_DISCONTIGMEM
 	pg_data_t *nd0 = NODE_DATA(0);
-#else
-	pg_data_t *nd0 = &contig_page_data;
-#endif	
 	u32 aper_size;
 	void *p; 
 
--- linux.orig/arch/x86_64/kernel/head64.c~FROM-2.6.12-rc5-mm1-reorganize-x86-64-numa-and-discontigmem-config-options	2005-05-31 11:39:10.000000000 -0700
+++ linux/arch/x86_64/kernel/head64.c	2005-05-31 11:39:10.000000000 -0700
@@ -94,7 +94,7 @@
 	s = strstr(saved_command_line, "earlyprintk=");
 	if (s != NULL)
 		setup_early_printk(s);
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
 	s = strstr(saved_command_line, "numa=");
 	if (s != NULL)
 		numa_setup(s+5);
--- linux.orig/arch/x86_64/kernel/setup.c~FROM-2.6.12-rc5-mm1-remove-direct-ref-to-contig_page_data-for-x86-64	2005-05-31 11:39:09.000000000 -0700
+++ linux/arch/x86_64/kernel/setup.c	2005-05-31 11:39:12.000000000 -0700
@@ -40,6 +40,8 @@
 #include <linux/acpi.h>
 #include <linux/kallsyms.h>
 #include <linux/edd.h>
+#include <linux/mmzone.h>
+
 #include <asm/mtrr.h>
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -348,7 +350,7 @@
 		if (!memcmp(from, "mem=", 4))
 			parse_memopt(from+4, &from); 
 
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
 		if (!memcmp(from, "numa=", 5))
 			numa_setup(from+5); 
 #endif
@@ -377,17 +379,20 @@
 	*cmdline_p = command_line;
 }
 
-#ifndef CONFIG_DISCONTIGMEM
-static void __init contig_initmem_init(void)
-{
-        unsigned long bootmap_size, bootmap; 
-        bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
-        bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
-        if (bootmap == -1L) 
-                panic("Cannot find bootmem map of size %ld\n",bootmap_size);
-        bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
-        e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT); 
-        reserve_bootmem(bootmap, bootmap_size);
+#ifndef CONFIG_NUMA
+static void __init
+contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
+{
+	unsigned long bootmap_size, bootmap;
+
+	memory_present(0, start_pfn, end_pfn);
+	bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
+	bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
+	if (bootmap == -1L)
+		panic("Cannot find bootmem map of size %ld\n",bootmap_size);
+	bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
+	e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
+	reserve_bootmem(bootmap, bootmap_size);
 } 
 #endif
 
@@ -554,10 +559,10 @@
 	acpi_numa_init();
 #endif
 
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
 	numa_initmem_init(0, end_pfn); 
 #else
-	contig_initmem_init(); 
+	contig_initmem_init(0, end_pfn);
 #endif
 
 	/* Reserve direct mapping */
@@ -618,6 +623,8 @@
 		}
 	}
 #endif
+
+	sparse_init();
 	paging_init();
 
 	check_ioapic();
--- linux.orig/arch/x86_64/mm/Makefile~FROM-2.6.12-rc5-mm1-reorganize-x86-64-numa-and-discontigmem-config-options	2005-05-31 11:39:10.000000000 -0700
+++ linux/arch/x86_64/mm/Makefile	2005-05-31 11:39:10.000000000 -0700
@@ -4,7 +4,7 @@
 
 obj-y	 := init.o fault.o ioremap.o extable.o pageattr.o
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_DISCONTIGMEM) += numa.o
+obj-$(CONFIG_NUMA) += numa.o
 obj-$(CONFIG_K8_NUMA) += k8topology.o
 obj-$(CONFIG_ACPI_NUMA) += srat.o
 
--- linux.orig/arch/x86_64/mm/init.c~FROM-2.6.12-rc5-mm1-reorganize-x86-64-numa-and-discontigmem-config-options	2005-05-31 11:39:10.000000000 -0700
+++ linux/arch/x86_64/mm/init.c	2005-05-31 12:41:32.000000000 -0700
@@ -318,7 +318,7 @@
 	flush_tlb_all();
 }
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NUMA
 void __init paging_init(void)
 {
 	{
@@ -427,13 +427,16 @@
 	reservedpages = 0;
 
 	/* this will put all low memory onto the freelists */
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
 	totalram_pages += numa_free_all_bootmem();
 	tmp = 0;
 	/* should count reserved pages here for all nodes */ 
 #else
+
+#ifdef CONFIG_FLATMEM
 	max_mapnr = end_pfn;
 	if (!mem_map) BUG();
+#endif
 
 	totalram_pages += free_all_bootmem();
 
@@ -515,7 +518,7 @@
 void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 
 { 
 	/* Should check here against the e820 map to avoid double free */ 
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
 	int nid = phys_to_nid(phys);
   	reserve_bootmem_node(NODE_DATA(nid), phys, len);
 #else       		
--- linux.orig/arch/x86_64/mm/ioremap.c~FROM-2.6.12-rc5-mm1-reorganize-x86-64-numa-and-discontigmem-config-options	2005-05-31 11:39:10.000000000 -0700
+++ linux/arch/x86_64/mm/ioremap.c	2005-05-31 11:39:10.000000000 -0700
@@ -178,7 +178,7 @@
 	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
 		return (__force void __iomem *)phys_to_virt(phys_addr);
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_FLATMEM
 	/*
 	 * Don't allow anybody to remap normal RAM that we're using..
 	 */
--- linux.orig/arch/x86_64/mm/numa.c~FROM-2.6.12-rc5-mm1-add-x86-64-specific-support-for-sparsemem	2005-05-31 11:39:11.000000000 -0700
+++ linux/arch/x86_64/mm/numa.c	2005-05-31 11:39:11.000000000 -0700
@@ -66,6 +66,13 @@
 	return -1; 
 }
 
+#ifdef CONFIG_SPARSEMEM
+int early_pfn_to_nid(unsigned long pfn)
+{
+	return phys_to_nid(pfn << PAGE_SHIFT);
+}
+#endif
+
 /* Initialize bootmem allocator for a node */
 void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
 { 
@@ -80,6 +87,7 @@
 	start_pfn = start >> PAGE_SHIFT;
 	end_pfn = end >> PAGE_SHIFT;
 
+	memory_present(nodeid, start_pfn, end_pfn);
 	nodedata_phys = find_e820_area(start, end, pgdat_size); 
 	if (nodedata_phys == -1L) 
 		panic("Cannot find memory pgdat in node %d\n", nodeid);
--- linux.orig/fs/aio.c~AA-PM-24-aio	2005-05-31 12:42:08.000000000 -0700
+++ linux/fs/aio.c	2005-05-31 12:42:08.000000000 -0700
@@ -130,7 +130,8 @@
 	dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
 	down_write(&ctx->mm->mmap_sem);
 	info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 
-				  PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
+				  PROT_READ|PROT_WRITE,
+				  MAP_ANON|MAP_PRIVATE|MAP_IMMOVABLE,
 				  0);
 	if (IS_ERR((void *)info->mmap_base)) {
 		up_write(&ctx->mm->mmap_sem);
--- linux.orig/fs/buffer.c~AA-PM-20.0-nowriteback	2005-05-31 12:42:05.000000000 -0700
+++ linux/fs/buffer.c	2005-05-31 12:42:05.000000000 -0700
@@ -3021,6 +3021,50 @@
 	return 0;
 }
 
+void
+generic_move_buffer(struct page *page, struct page *newpage)
+{
+	struct buffer_head *bh, *head;
+
+	spin_lock(&page->mapping->private_lock);
+	bh = head = page_buffers(page);
+	do {
+		get_bh(bh);
+		lock_buffer(bh);
+	} while ((bh = bh->b_this_page) != head);
+
+	newpage->private = page->private;
+	page->private = 0;
+	page_cache_release(page);
+	page_cache_get(newpage);
+
+	/* XXX */
+	ClearPagePrivate(page);
+	SetPagePrivate(newpage);
+
+	bh = head;
+	do {
+		BUG_ON(bh->b_page != page);
+		set_bh_page(bh, newpage, (unsigned long)bh->b_data & (PAGE_SIZE - 1));
+	} while ((bh = bh->b_this_page) != head);
+	spin_unlock(&page->mapping->private_lock);
+	/* buffers are unlocked when remapping is complete */
+}
+
+void
+unlock_page_buffer(struct page *page)
+{
+	struct buffer_head *bh, *head;
+
+	spin_lock(&page->mapping->private_lock);
+	bh = head = page_buffers(page);
+	do {
+		put_bh(bh);
+		unlock_buffer(bh);
+	} while ((bh = bh->b_this_page) != head);
+	spin_unlock(&page->mapping->private_lock);
+}
+
 /*
  * Buffer-head allocation
  */
@@ -3145,6 +3189,7 @@
 EXPORT_SYMBOL(generic_block_bmap);
 EXPORT_SYMBOL(generic_commit_write);
 EXPORT_SYMBOL(generic_cont_expand);
+EXPORT_SYMBOL(generic_move_buffer);
 EXPORT_SYMBOL(init_buffer);
 EXPORT_SYMBOL(invalidate_bdev);
 EXPORT_SYMBOL(ll_rw_block);
--- linux.orig/fs/ext2/inode.c~AA-PM-21-nowriteback-ext2	2005-05-31 12:42:05.000000000 -0700
+++ linux/fs/ext2/inode.c	2005-05-31 12:42:05.000000000 -0700
@@ -31,6 +31,7 @@
 #include <linux/writeback.h>
 #include <linux/buffer_head.h>
 #include <linux/mpage.h>
+#include <linux/mmigrate.h>
 #include "ext2.h"
 #include "acl.h"
 
@@ -679,6 +680,12 @@
 	return mpage_writepages(mapping, wbc, ext2_get_block);
 }
 
+static int
+ext2_migrate_page(struct page *from, struct page *to)
+{
+	return generic_migrate_page(from, to, migrate_page_buffer);
+}
+
 struct address_space_operations ext2_aops = {
 	.readpage		= ext2_readpage,
 	.readpages		= ext2_readpages,
@@ -689,6 +696,7 @@
 	.bmap			= ext2_bmap,
 	.direct_IO		= ext2_direct_IO,
 	.writepages		= ext2_writepages,
+	.migrate_page		= ext2_migrate_page,
 };
 
 struct address_space_operations ext2_nobh_aops = {
@@ -701,6 +709,7 @@
 	.bmap			= ext2_bmap,
 	.direct_IO		= ext2_direct_IO,
 	.writepages		= ext2_writepages,
+	.migrate_page		= ext2_migrate_page,
 };
 
 /*
--- linux.orig/fs/ext3/inode.c~AA-PM-21-nowriteback-ext3	2005-05-31 12:42:06.000000000 -0700
+++ linux/fs/ext3/inode.c	2005-05-31 12:42:06.000000000 -0700
@@ -35,6 +35,7 @@
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
 #include <linux/mpage.h>
+#include <linux/mmigrate.h>
 #include <linux/uio.h>
 #include "xattr.h"
 #include "acl.h"
@@ -1537,6 +1538,12 @@
 	return __set_page_dirty_nobuffers(page);
 }
 
+static int
+ext3_migrate_page(struct page *from, struct page *to)
+{
+	return generic_migrate_page(from, to, migrate_page_buffer);
+}
+
 static struct address_space_operations ext3_ordered_aops = {
 	.readpage	= ext3_readpage,
 	.readpages	= ext3_readpages,
@@ -1548,6 +1555,7 @@
 	.invalidatepage	= ext3_invalidatepage,
 	.releasepage	= ext3_releasepage,
 	.direct_IO	= ext3_direct_IO,
+	.migrate_page	= ext3_migrate_page,
 };
 
 static struct address_space_operations ext3_writeback_aops = {
@@ -1561,6 +1569,7 @@
 	.invalidatepage	= ext3_invalidatepage,
 	.releasepage	= ext3_releasepage,
 	.direct_IO	= ext3_direct_IO,
+	.migrate_page	= ext3_migrate_page,
 };
 
 static struct address_space_operations ext3_journalled_aops = {
@@ -1574,6 +1583,7 @@
 	.bmap		= ext3_bmap,
 	.invalidatepage	= ext3_invalidatepage,
 	.releasepage	= ext3_releasepage,
+	.migrate_page	= ext3_migrate_page,
 };
 
 void ext3_set_aops(struct inode *inode)
--- linux.orig/fs/namei.c~AA-PM-27-symlink	2005-05-31 12:42:09.000000000 -0700
+++ linux/fs/namei.c	2005-05-31 12:42:09.000000000 -0700
@@ -2382,10 +2382,19 @@
 int page_symlink(struct inode *inode, const char *symname, int len)
 {
 	struct address_space *mapping = inode->i_mapping;
-	struct page *page = grab_cache_page(mapping, 0);
+	struct page *page;
 	int err = -ENOMEM;
 	char *kaddr;
 
+	/* XXXX:
+	 * This is temporary code. This code should be replaced with proper one
+	 * After the scheme to specify hot removable memory region has defined.
+	 * Or remove this code if pages for symlink files become hot-pluggable.
+	 *				5/Oct/2004	-- taka
+	 */
+	mapping_set_gfp_mask(mapping, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
+
+	page = grab_cache_page(mapping, 0);
 	if (!page)
 		goto fail;
 	err = mapping->a_ops->prepare_write(NULL, page, 0, len-1);
--- linux.orig/include/asm-alpha/mman.h~AA-PM-98-MAP_IMMOVABLE-lots-o-arches	2005-05-31 12:42:11.000000000 -0700
+++ linux/include/asm-alpha/mman.h	2005-05-31 12:42:11.000000000 -0700
@@ -28,6 +28,7 @@
 #define MAP_NORESERVE	0x10000		/* don't check for reservations */
 #define MAP_POPULATE	0x20000		/* populate (prefault) pagetables */
 #define MAP_NONBLOCK	0x40000		/* do not block on IO */
+#define MAP_IMMOVABLE	0x80000
 
 #define MS_ASYNC	1		/* sync memory asynchronously */
 #define MS_SYNC		2		/* synchronous memory sync */
--- linux.orig/include/asm-alpha/mmzone.h~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/include/asm-alpha/mmzone.h	2005-05-31 12:41:34.000000000 -0700
@@ -57,7 +57,6 @@
  * Given a kernel address, find the home node of the underlying memory.
  */
 #define kvaddr_to_nid(kaddr)	pa_to_nid(__pa(kaddr))
-#define node_mem_map(nid)	(NODE_DATA(nid)->node_mem_map)
 #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
 
 #define local_mapnr(kvaddr) \
@@ -108,7 +107,7 @@
 #define pfn_to_page(pfn)						\
 ({									\
  	unsigned long kaddr = (unsigned long)__va((pfn) << PAGE_SHIFT);	\
-	(node_mem_map(kvaddr_to_nid(kaddr)) + local_mapnr(kaddr));	\
+	(NODE_DATA(kvaddr_to_nid(kaddr))->node_mem_map + local_mapnr(kaddr));	\
 })
 
 #define page_to_pfn(page)						\
--- linux.orig/include/asm-arm/mman.h~AA-PM-98-MAP_IMMOVABLE-lots-o-arches	2005-05-31 12:42:11.000000000 -0700
+++ linux/include/asm-arm/mman.h	2005-05-31 12:42:11.000000000 -0700
@@ -22,6 +22,7 @@
 #define MAP_NORESERVE	0x4000		/* don't check for reservations */
 #define MAP_POPULATE	0x8000		/* populate (prefault) page tables */
 #define MAP_NONBLOCK	0x10000		/* do not block on IO */
+#define MAP_IMMOVABLE   0x20000
 
 #define MS_ASYNC	1		/* sync memory asynchronously */
 #define MS_INVALIDATE	2		/* invalidate the caches */
--- linux.orig/include/asm-arm26/mman.h~AA-PM-98-MAP_IMMOVABLE-lots-o-arches	2005-05-31 12:42:11.000000000 -0700
+++ linux/include/asm-arm26/mman.h	2005-05-31 12:42:11.000000000 -0700
@@ -22,6 +22,7 @@
 #define MAP_NORESERVE	0x4000		/* don't check for reservations */
 #define MAP_POPULATE    0x8000          /* populate (prefault) page tables */
 #define MAP_NONBLOCK    0x10000         /* do not block on IO */
+#define MAP_IMMOVABLE	0x20000
 
 #define MS_ASYNC	1		/* sync memory asynchronously */
 #define MS_INVALIDATE	2		/* invalidate the caches */
--- linux.orig/include/asm-i386/mman.h~AA-PM-22-vm_immovable	2005-05-31 12:42:07.000000000 -0700
+++ linux/include/asm-i386/mman.h	2005-05-31 12:42:07.000000000 -0700
@@ -22,6 +22,7 @@
 #define MAP_NORESERVE	0x4000		/* don't check for reservations */
 #define MAP_POPULATE	0x8000		/* populate (prefault) pagetables */
 #define MAP_NONBLOCK	0x10000		/* do not block on IO */
+#define MAP_IMMOVABLE	0x20000
 
 #define MS_ASYNC	1		/* sync memory asynchronously */
 #define MS_INVALIDATE	2		/* invalidate the caches */
--- linux.orig/include/asm-i386/mmzone.h~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/include/asm-i386/mmzone.h	2005-05-31 12:41:34.000000000 -0700
@@ -8,7 +8,9 @@
 
 #include <asm/smp.h>
 
-#ifdef CONFIG_DISCONTIGMEM
+#if CONFIG_NUMA
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid)	(node_data[nid])
 
 #ifdef CONFIG_NUMA
 	#ifdef CONFIG_X86_NUMAQ
@@ -21,8 +23,28 @@
 	#define get_zholes_size(n) (0)
 #endif /* CONFIG_NUMA */
 
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid)		(node_data[nid])
+extern int get_memcfg_numa_flat(void );
+/*
+ * This allows any one NUMA architecture to be compiled
+ * for, and still fall back to the flat function if it
+ * fails.
+ */
+static inline void get_memcfg_numa(void)
+{
+#ifdef CONFIG_X86_NUMAQ
+	if (get_memcfg_numaq())
+		return;
+#elif CONFIG_ACPI_SRAT
+	if (get_memcfg_from_srat())
+		return;
+#endif
+
+	get_memcfg_numa_flat();
+}
+
+#endif /* CONFIG_NUMA */
+
+#ifdef CONFIG_DISCONTIGMEM
 
 /*
  * generic node memory support, the following assumptions apply:
@@ -48,26 +70,6 @@
 #endif
 }
 
-/*
- * Following are macros that are specific to this numa platform.
- */
-#define reserve_bootmem(addr, size) \
-	reserve_bootmem_node(NODE_DATA(0), (addr), (size))
-#define alloc_bootmem(x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low(x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
-#define alloc_bootmem_pages(x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages(x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
-#define alloc_bootmem_node(ignore, x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_pages_node(ignore, x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages_node(ignore, x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
-
 #define node_localnr(pfn, nid)		((pfn) - node_data[nid]->node_start_pfn)
 
 /*
@@ -79,7 +81,6 @@
  */
 #define kvaddr_to_nid(kaddr)	pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
 
-#define node_mem_map(nid)	(NODE_DATA(nid)->node_mem_map)
 #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
 #define node_end_pfn(nid)						\
 ({									\
@@ -100,7 +101,7 @@
 ({									\
 	unsigned long __pfn = pfn;					\
 	int __node  = pfn_to_nid(__pfn);				\
-	&node_mem_map(__node)[node_localnr(__pfn,__node)];		\
+	&NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)];	\
 })
 
 #define page_to_pfn(pg)							\
@@ -122,26 +123,34 @@
 		return (pfn < node_end_pfn(nid));
 	return 0;
 }
-#endif
+#endif /* CONFIG_X86_NUMAQ */
+
+#endif /* CONFIG_DISCONTIGMEM */
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
 
-extern int get_memcfg_numa_flat(void );
 /*
- * This allows any one NUMA architecture to be compiled
- * for, and still fall back to the flat function if it
- * fails.
+ * Following are macros that are specific to this numa platform.
  */
-static inline void get_memcfg_numa(void)
-{
-#ifdef CONFIG_X86_NUMAQ
-	if (get_memcfg_numaq())
-		return;
-#elif CONFIG_ACPI_SRAT
-	if (get_memcfg_from_srat())
-		return;
-#endif
+#define reserve_bootmem(addr, size) \
+	reserve_bootmem_node(NODE_DATA(0), (addr), (size))
+#define alloc_bootmem(x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_low(x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
+#define alloc_bootmem_pages(x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_low_pages(x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
+#define alloc_bootmem_node(ignore, x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_pages_node(ignore, x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_low_pages_node(ignore, x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
 
-	get_memcfg_numa_flat();
-}
+#endif /* CONFIG_NEED_MULTIPLE_NODES */
+
+extern int early_pfn_to_nid(unsigned long pfn);
 
-#endif /* CONFIG_DISCONTIGMEM */
 #endif /* _ASM_MMZONE_H_ */
--- linux.orig/include/asm-i386/page.h~FROM-2.6.12-rc5-mm1-resubmit-sparsemem-base-teach-discontig-about-sparse-ranges	2005-05-31 11:38:53.000000000 -0700
+++ linux/include/asm-i386/page.h	2005-05-31 12:41:31.000000000 -0700
@@ -119,6 +119,8 @@
 
 extern int sysctl_legacy_va_layout;
 
+extern int page_is_ram(unsigned long pagenr);
+
 #endif /* __ASSEMBLY__ */
 
 #ifdef __ASSEMBLY__
@@ -134,11 +136,11 @@
 #define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
 #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_FLATMEM
 #define pfn_to_page(pfn)	(mem_map + (pfn))
 #define page_to_pfn(page)	((unsigned long)((page) - mem_map))
 #define pfn_valid(pfn)		((pfn) < max_mapnr)
-#endif /* !CONFIG_DISCONTIGMEM */
+#endif /* CONFIG_FLATMEM */
 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 
 #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
--- linux.orig/include/asm-i386/pgtable.h~FROM-2.6.12-rc5-mm1-sparsemem-memory-model-for-i386	2005-05-31 11:39:03.000000000 -0700
+++ linux/include/asm-i386/pgtable.h	2005-05-31 11:39:03.000000000 -0700
@@ -398,9 +398,9 @@
 
 #endif /* !__ASSEMBLY__ */
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_FLATMEM
 #define kern_addr_valid(addr)	(1)
-#endif /* !CONFIG_DISCONTIGMEM */
+#endif /* CONFIG_FLATMEM */
 
 #define io_remap_page_range(vma, vaddr, paddr, size, prot)		\
 		remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
--- /dev/null	2005-03-30 22:36:15.000000000 -0800
+++ linux/include/asm-i386/sparsemem.h	2005-05-31 12:41:32.000000000 -0700
@@ -0,0 +1,31 @@
+#ifndef _I386_SPARSEMEM_H
+#define _I386_SPARSEMEM_H
+#ifdef CONFIG_SPARSEMEM
+
+/*
+ * generic non-linear memory support:
+ *
+ * 1) we will not split memory into more chunks than will fit into the
+ *    flags field of the struct page
+ */
+
+/*
+ * SECTION_SIZE_BITS		2^N: how big each section will be
+ * MAX_PHYSADDR_BITS		2^N: how much physical address space we have
+ * MAX_PHYSMEM_BITS		2^N: how much memory we can have in that space
+ */
+#ifdef CONFIG_X86_PAE
+#define SECTION_SIZE_BITS       30
+#define MAX_PHYSADDR_BITS       36
+#define MAX_PHYSMEM_BITS	36
+#else
+#define SECTION_SIZE_BITS       26
+#define MAX_PHYSADDR_BITS       32
+#define MAX_PHYSMEM_BITS	32
+#endif
+
+/* XXX: FIXME -- wli */
+#define kern_addr_valid(kaddr)  (0)
+
+#endif /* CONFIG_SPARSEMEM */
+#endif /* _I386_SPARSEMEM_H */
--- linux.orig/include/asm-ia64/mman.h~AA-PM-22-vm_immovable	2005-05-31 12:42:07.000000000 -0700
+++ linux/include/asm-ia64/mman.h	2005-05-31 12:42:07.000000000 -0700
@@ -30,6 +30,7 @@
 #define MAP_NORESERVE	0x04000		/* don't check for reservations */
 #define MAP_POPULATE	0x08000		/* populate (prefault) pagetables */
 #define MAP_NONBLOCK	0x10000		/* do not block on IO */
+#define MAP_IMMOVABLE	0x20000
 
 #define MS_ASYNC	1		/* sync memory asynchronously */
 #define MS_INVALIDATE	2		/* invalidate the caches */
--- linux.orig/include/asm-m32r/mmzone.h~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/include/asm-m32r/mmzone.h	2005-05-31 12:41:34.000000000 -0700
@@ -14,7 +14,6 @@
 #define NODE_DATA(nid)		(node_data[nid])
 
 #define node_localnr(pfn, nid)	((pfn) - NODE_DATA(nid)->node_start_pfn)
-#define node_mem_map(nid)	(NODE_DATA(nid)->node_mem_map)
 #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
 #define node_end_pfn(nid)						\
 ({									\
@@ -32,7 +31,7 @@
 ({									\
 	unsigned long __pfn = pfn;					\
 	int __node  = pfn_to_nid(__pfn);				\
-	&node_mem_map(__node)[node_localnr(__pfn,__node)];		\
+	&NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)];	\
 })
 
 #define page_to_pfn(pg)							\
--- linux.orig/include/asm-parisc/mmzone.h~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/include/asm-parisc/mmzone.h	2005-05-31 12:41:34.000000000 -0700
@@ -19,7 +19,6 @@
  */
 #define kvaddr_to_nid(kaddr)	pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
 
-#define node_mem_map(nid)	(NODE_DATA(nid)->node_mem_map)
 #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
 #define node_end_pfn(nid)						\
 ({									\
@@ -38,7 +37,7 @@
 ({									\
 	unsigned long __pfn = (pfn);					\
 	int __node  = pfn_to_nid(__pfn);				\
-	&node_mem_map(__node)[node_localnr(__pfn,__node)];		\
+	&NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)];	\
 })
 
 #define page_to_pfn(pg)							\
--- linux.orig/include/asm-ppc64/mman.h~AA-PM-22-vm_immovable-ppc64	2005-05-31 12:42:06.000000000 -0700
+++ linux/include/asm-ppc64/mman.h	2005-05-31 12:42:06.000000000 -0700
@@ -38,6 +38,7 @@
 
 #define MAP_POPULATE	0x8000		/* populate (prefault) pagetables */
 #define MAP_NONBLOCK	0x10000		/* do not block on IO */
+#define MAP_IMMOVABLE	0x20000
 
 #define MADV_NORMAL	0x0		/* default page-in behavior */
 #define MADV_RANDOM	0x1		/* page-in minimum required */
--- linux.orig/include/asm-ppc64/mmzone.h~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/include/asm-ppc64/mmzone.h	2005-05-31 12:41:34.000000000 -0700
@@ -10,9 +10,20 @@
 #include <linux/config.h>
 #include <asm/smp.h>
 
-#ifdef CONFIG_DISCONTIGMEM
+/* generic non-linear memory support:
+ *
+ * 1) we will not split memory into more chunks than will fit into the
+ *    flags field of the struct page
+ */
+
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
 
 extern struct pglist_data *node_data[];
+/*
+ * Return a pointer to the node data for node n.
+ */
+#define NODE_DATA(nid)		(node_data[nid])
 
 /*
  * Following are specific to this numa platform.
@@ -47,36 +58,32 @@
 	return nid;
 }
 
-#define pfn_to_nid(pfn)		pa_to_nid((pfn) << PAGE_SHIFT)
-
-/*
- * Return a pointer to the node data for node n.
- */
-#define NODE_DATA(nid)		(node_data[nid])
-
 #define node_localnr(pfn, nid)	((pfn) - NODE_DATA(nid)->node_start_pfn)
 
 /*
  * Following are macros that each numa implmentation must define.
  */
 
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr)	pa_to_nid(__pa(kaddr))
-
-#define node_mem_map(nid)	(NODE_DATA(nid)->node_mem_map)
 #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
 #define node_end_pfn(nid)	(NODE_DATA(nid)->node_end_pfn)
 
 #define local_mapnr(kvaddr) \
 	( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) 
 
+#ifdef CONFIG_DISCONTIGMEM
+
+/*
+ * Given a kernel address, find the home node of the underlying memory.
+ */
+#define kvaddr_to_nid(kaddr)	pa_to_nid(__pa(kaddr))
+
+#define pfn_to_nid(pfn)		pa_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
+
 /* Written this way to avoid evaluating arguments twice */
 #define discontigmem_pfn_to_page(pfn) \
 ({ \
 	unsigned long __tmp = pfn; \
-	(node_mem_map(pfn_to_nid(__tmp)) + \
+	(NODE_DATA(pfn_to_nid(__tmp))->node_mem_map + \
 	 node_localnr(__tmp, pfn_to_nid(__tmp))); \
 })
 
@@ -91,4 +98,11 @@
 #define discontigmem_pfn_valid(pfn)		((pfn) < num_physpages)
 
 #endif /* CONFIG_DISCONTIGMEM */
+
+#endif /* CONFIG_NEED_MULTIPLE_NODES */
+
+#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
+#define early_pfn_to_nid(pfn)  pa_to_nid(((unsigned long)pfn) << PAGE_SHIFT)
+#endif
+
 #endif /* _ASM_MMZONE_H_ */
--- linux.orig/include/asm-ppc64/page.h~FROM-2.6.12-rc5-mm1-ppc64-sparsemem-memory-model	2005-05-31 11:39:07.000000000 -0700
+++ linux/include/asm-ppc64/page.h	2005-05-31 12:41:31.000000000 -0700
@@ -219,7 +219,8 @@
 #define page_to_pfn(page)	discontigmem_page_to_pfn(page)
 #define pfn_to_page(pfn)	discontigmem_pfn_to_page(pfn)
 #define pfn_valid(pfn)		discontigmem_pfn_valid(pfn)
-#else
+#endif
+#ifdef CONFIG_FLATMEM
 #define pfn_to_page(pfn)	(mem_map + (pfn))
 #define page_to_pfn(page)	((unsigned long)((page) - mem_map))
 #define pfn_valid(pfn)		((pfn) < max_mapnr)
--- /dev/null	2005-03-30 22:36:15.000000000 -0800
+++ linux/include/asm-ppc64/sparsemem.h	2005-05-31 11:39:07.000000000 -0700
@@ -0,0 +1,16 @@
+#ifndef _ASM_PPC64_SPARSEMEM_H
+#define _ASM_PPC64_SPARSEMEM_H 1
+
+#ifdef CONFIG_SPARSEMEM
+/*
+ * SECTION_SIZE_BITS		2^N: how big each section will be
+ * MAX_PHYSADDR_BITS		2^N: how much physical address space we have
+ * MAX_PHYSMEM_BITS		2^N: how much memory we can have in that space
+ */
+#define SECTION_SIZE_BITS       24
+#define MAX_PHYSADDR_BITS       38
+#define MAX_PHYSMEM_BITS        36
+
+#endif /* CONFIG_SPARSEMEM */
+
+#endif /* _ASM_PPC64_SPARSEMEM_H */
--- linux.orig/include/asm-x86_64/bitops.h~FROM-2.6.12-rc5-mm1-add-x86-64-specific-support-for-sparsemem	2005-05-31 11:39:11.000000000 -0700
+++ linux/include/asm-x86_64/bitops.h	2005-05-31 11:39:11.000000000 -0700
@@ -411,8 +411,6 @@
 /* find last set bit */
 #define fls(x) generic_fls(x)
 
-#define ARCH_HAS_ATOMIC_UNSIGNED 1
-
 #endif /* __KERNEL__ */
 
 #endif /* _X86_64_BITOPS_H */
--- linux.orig/include/asm-x86_64/io.h~FROM-2.6.12-rc5-mm1-reorganize-x86-64-numa-and-discontigmem-config-options	2005-05-31 11:39:10.000000000 -0700
+++ linux/include/asm-x86_64/io.h	2005-05-31 11:39:10.000000000 -0700
@@ -124,12 +124,7 @@
 /*
  * Change "struct page" to physical address.
  */
-#ifdef CONFIG_DISCONTIGMEM
-#include <asm/mmzone.h>
 #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-#else
-#define page_to_phys(page)	((page - mem_map) << PAGE_SHIFT)
-#endif
 
 #include <asm-generic/iomap.h>
 
--- linux.orig/include/asm-x86_64/mman.h~AA-PM-98-MAP_IMMOVABLE-lots-o-arches	2005-05-31 12:42:11.000000000 -0700
+++ linux/include/asm-x86_64/mman.h	2005-05-31 12:42:12.000000000 -0700
@@ -23,6 +23,7 @@
 #define MAP_NORESERVE	0x4000		/* don't check for reservations */
 #define MAP_POPULATE	0x8000		/* populate (prefault) pagetables */
 #define MAP_NONBLOCK	0x10000		/* do not block on IO */
+#define MAP_IMMOVABLE	0x20000
 
 #define MS_ASYNC	1		/* sync memory asynchronously */
 #define MS_INVALIDATE	2		/* invalidate the caches */
--- linux.orig/include/asm-x86_64/mmzone.h~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/include/asm-x86_64/mmzone.h	2005-05-31 12:41:34.000000000 -0700
@@ -6,7 +6,7 @@
 
 #include <linux/config.h>
 
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
 
 #define VIRTUAL_BUG_ON(x) 
 
@@ -30,27 +30,23 @@
 	return nid; 
 } 
 
-#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
-
-#define kvaddr_to_nid(kaddr)	phys_to_nid(__pa(kaddr))
 #define NODE_DATA(nid)		(node_data[nid])
 
-#define node_mem_map(nid)	(NODE_DATA(nid)->node_mem_map)
-
-#define node_mem_map(nid)	(NODE_DATA(nid)->node_mem_map)
 #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
 #define node_end_pfn(nid)       (NODE_DATA(nid)->node_start_pfn + \
 				 NODE_DATA(nid)->node_spanned_pages)
 
-#define local_mapnr(kvaddr) \
-	( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) )
+#ifdef CONFIG_DISCONTIGMEM
+
+#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
+#define kvaddr_to_nid(kaddr)	phys_to_nid(__pa(kaddr))
 
 /* AK: this currently doesn't deal with invalid addresses. We'll see 
    if the 2.5 kernel doesn't pass them
    (2.4 used to). */
 #define pfn_to_page(pfn) ({ \
 	int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); 	\
-	((pfn) - node_start_pfn(nid)) + node_mem_map(nid);		\
+	((pfn) - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map;	\
 })
 
 #define page_to_pfn(page) \
@@ -60,4 +56,8 @@
 			({ u8 nid__ = pfn_to_nid(pfn); \
 			   nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) <= node_end_pfn(nid__); }))
 #endif
+
+#define local_mapnr(kvaddr) \
+	( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) )
+#endif
 #endif
--- linux.orig/include/asm-x86_64/page.h~FROM-2.6.12-rc5-mm1-reorganize-x86-64-numa-and-discontigmem-config-options	2005-05-31 11:39:10.000000000 -0700
+++ linux/include/asm-x86_64/page.h	2005-05-31 11:39:10.000000000 -0700
@@ -118,7 +118,9 @@
 	  __pa(v); })
 
 #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
-#ifndef CONFIG_DISCONTIGMEM
+#define __boot_va(x)		__va(x)
+#define __boot_pa(x)		__pa(x)
+#ifdef CONFIG_FLATMEM
 #define pfn_to_page(pfn)	(mem_map + (pfn))
 #define page_to_pfn(page)	((unsigned long)((page) - mem_map))
 #define pfn_valid(pfn)		((pfn) < max_mapnr)
--- /dev/null	2005-03-30 22:36:15.000000000 -0800
+++ linux/include/asm-x86_64/sparsemem.h	2005-05-31 11:39:11.000000000 -0700
@@ -0,0 +1,26 @@
+#ifndef _ASM_X86_64_SPARSEMEM_H
+#define _ASM_X86_64_SPARSEMEM_H 1
+
+#ifdef CONFIG_SPARSEMEM
+
+/*
+ * generic non-linear memory support:
+ *
+ * 1) we will not split memory into more chunks than will fit into the flags
+ *    field of the struct page
+ *
+ * SECTION_SIZE_BITS		2^n: size of each section
+ * MAX_PHYSADDR_BITS		2^n: max size of physical address space
+ * MAX_PHYSMEM_BITS		2^n: how much memory we can have in that space
+ *
+ */
+
+#define SECTION_SIZE_BITS	27 /* matt - 128 is convenient right now */
+#define MAX_PHYSADDR_BITS	40
+#define MAX_PHYSMEM_BITS	40
+
+extern int early_pfn_to_nid(unsigned long pfn);
+
+#endif /* CONFIG_SPARSEMEM */
+
+#endif /* _ASM_X86_64_SPARSEMEM_H */
--- linux.orig/include/asm-x86_64/topology.h~FROM-2.6.12-rc5-mm1-reorganize-x86-64-numa-and-discontigmem-config-options	2005-05-31 11:39:10.000000000 -0700
+++ linux/include/asm-x86_64/topology.h	2005-05-31 11:39:10.000000000 -0700
@@ -3,7 +3,7 @@
 
 #include <linux/config.h>
 
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NUMA
 
 #include <asm/mpspec.h>
 #include <asm/bitops.h>
@@ -37,7 +37,6 @@
 }
 #define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus->number)
 
-#ifdef CONFIG_NUMA
 /* sched_domains SD_NODE_INIT for x86_64 machines */
 #define SD_NODE_INIT (struct sched_domain) {		\
 	.span			= CPU_MASK_NONE,	\
@@ -59,7 +58,6 @@
 	.balance_interval	= 1,			\
 	.nr_balance_failed	= 0,			\
 }
-#endif
 
 #endif
 
--- linux.orig/include/linux/bootmem.h~FROM-2.6.12-rc5-mm1-resubmit-sparsemem-base-simple-numa-remap-space-allocator	2005-05-31 11:38:52.000000000 -0700
+++ linux/include/linux/bootmem.h	2005-05-31 12:41:31.000000000 -0700
@@ -67,6 +67,15 @@
 	__alloc_bootmem_node((pgdat), (x), PAGE_SIZE, 0)
 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
 
+#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
+extern void *alloc_remap(int nid, unsigned long size);
+#else
+static inline void *alloc_remap(int nid, unsigned long size)
+{
+	return NULL;
+}
+#endif
+
 extern unsigned long __initdata nr_kernel_pages;
 extern unsigned long __initdata nr_all_pages;
 
--- linux.orig/include/linux/buffer_head.h~AA-PM-20.0-nowriteback	2005-05-31 12:42:05.000000000 -0700
+++ linux/include/linux/buffer_head.h	2005-05-31 12:42:05.000000000 -0700
@@ -205,7 +205,8 @@
 int nobh_truncate_page(struct address_space *, loff_t);
 int nobh_writepage(struct page *page, get_block_t *get_block,
                         struct writeback_control *wbc);
-
+void generic_move_buffer(struct page *, struct page *);
+void unlock_page_buffer(struct page *);
 
 /*
  * inline definitions
--- linux.orig/include/linux/fs.h~AA-PM-13.1-migrate_page-operation	2005-05-31 12:42:00.000000000 -0700
+++ linux/include/linux/fs.h	2005-05-31 12:42:00.000000000 -0700
@@ -330,6 +330,7 @@
 	int (*releasepage) (struct page *, int);
 	ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
 			loff_t offset, unsigned long nr_segs);
+	int (*migrate_page)(struct page *, struct page *);
 };
 
 struct backing_dev_info;
--- linux.orig/include/linux/mm.h~FROM-2.6.12-rc5-mm1-resubmit-sparsemem-base-reorganize-page-flags-bit-operations	2005-05-31 11:38:52.000000000 -0700
+++ linux/include/linux/mm.h	2005-05-31 12:42:07.000000000 -0700
@@ -161,6 +161,7 @@
 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
 #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
+#define VM_IMMOVABLE	0x02000000	/* Don't place in hot removable area */
 
 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
@@ -177,6 +178,11 @@
 #define VM_NormalReadHint(v)		(!((v)->vm_flags & VM_READHINTMASK))
 #define VM_SequentialReadHint(v)	((v)->vm_flags & VM_SEQ_READ)
 #define VM_RandomReadHint(v)		((v)->vm_flags & VM_RAND_READ)
+#ifdef CONFIG_MEMORY_HOTPLUG
+#define VM_Immovable(v)			((v)->vm_flags & VM_IMMOVABLE)
+#else
+#define VM_Immovable(v)			(0)
+#endif
 
 /*
  * mapping from the currently active vm_flags protection bits (the
@@ -395,19 +401,81 @@
 /*
  * The zone field is never updated after free_area_init_core()
  * sets it, so none of the operations on it need to be atomic.
- * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
- * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
  */
-#define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
-#define NODEZONE(node, zone)	((node << ZONES_SHIFT) | zone)
+
+
+/*
+ * page->flags layout:
+ *
+ * There are three possibilities for how page->flags get
+ * laid out.  The first is for the normal case, without
+ * sparsemem.  The second is for sparsemem when there is
+ * plenty of space for node and section.  The last is when
+ * we have run out of space and have to fall back to an
+ * alternate (slower) way of determining the node.
+ *
+ *        No sparsemem: |       NODE     | ZONE | ... | FLAGS |
+ * with space for node: | SECTION | NODE | ZONE | ... | FLAGS |
+ *   no space for node: | SECTION |     ZONE    | ... | FLAGS |
+ */
+#if SECTIONS_SHIFT+NODES_SHIFT+ZONES_SHIFT <= FLAGS_RESERVED
+#define NODES_WIDTH		NODES_SHIFT
+#else
+#define NODES_WIDTH		0
+#endif
+
+#ifdef CONFIG_SPARSEMEM
+#define SECTIONS_WIDTH		SECTIONS_SHIFT
+#else
+#define SECTIONS_WIDTH		0
+#endif
+
+#define ZONES_WIDTH		ZONES_SHIFT
+
+/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
+#define SECTIONS_PGOFF		((sizeof(page_flags_t)*8) - SECTIONS_WIDTH)
+#define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
+#define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)
+
+/*
+ * We are going to use the flags for the page to node mapping if its in
+ * there.  This includes the case where there is no node, so it is implicit.
+ */
+#define FLAGS_HAS_NODE		(NODES_WIDTH > 0 || NODES_SHIFT == 0)
+
+#ifndef PFN_SECTION_SHIFT
+#define PFN_SECTION_SHIFT 0
+#endif
+
+/*
+ * Define the bit shifts to access each section.  For non-existant
+ * sections we define the shift as 0; that plus a 0 mask ensures
+ * the compiler will optimise away reference to them.
+ */
+#define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
+#define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
+#define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
+
+/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */
+#if FLAGS_HAS_NODE
+#define ZONETABLE_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
+#else
+#define ZONETABLE_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
+#endif
+#define ZONETABLE_PGSHIFT	ZONES_PGSHIFT
+
+#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
+#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
+#endif
+
+#define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
+#define NODES_MASK		((1UL << NODES_WIDTH) - 1)
+#define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
+#define ZONETABLE_MASK		((1UL << ZONETABLE_SHIFT) - 1)
 
 static inline unsigned long page_zonenum(struct page *page)
 {
-	return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
-}
-static inline unsigned long page_to_nid(struct page *page)
-{
-	return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
+	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
 }
 
 struct zone;
@@ -415,16 +483,47 @@
 
 static inline struct zone *page_zone(struct page *page)
 {
-	return zone_table[page->flags >> NODEZONE_SHIFT];
+	return zone_table[(page->flags >> ZONETABLE_PGSHIFT) &
+			ZONETABLE_MASK];
+}
+
+static inline unsigned long page_to_nid(struct page *page)
+{
+	if (FLAGS_HAS_NODE)
+		return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
+	else
+		return page_zone(page)->zone_pgdat->node_id;
+}
+static inline unsigned long page_to_section(struct page *page)
+{
+	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
+}
+
+static inline void set_page_zone(struct page *page, unsigned long zone)
+{
+	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
+	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
+}
+static inline void set_page_node(struct page *page, unsigned long node)
+{
+	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
+	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
+}
+static inline void set_page_section(struct page *page, unsigned long section)
+{
+	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
+	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
 }
 
-static inline void set_page_zone(struct page *page, unsigned long nodezone_num)
+static inline void set_page_links(struct page *page, unsigned long zone,
+	unsigned long node, unsigned long pfn)
 {
-	page->flags &= ~(~0UL << NODEZONE_SHIFT);
-	page->flags |= nodezone_num << NODEZONE_SHIFT;
+	set_page_zone(page, zone);
+	set_page_node(page, node);
+	set_page_section(page, pfn_to_section_nr(pfn));
 }
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_FLATMEM
 /* The array of struct pages - for discontigmem use pgdat->lmem_map */
 extern struct page *mem_map;
 #endif
--- linux.orig/include/linux/mm_inline.h~AA-PM-01-steal_page_from_lru	2005-05-31 11:39:12.000000000 -0700
+++ linux/include/linux/mm_inline.h	2005-05-31 11:39:12.000000000 -0700
@@ -38,3 +38,71 @@
 		zone->nr_inactive--;
 	}
 }
+
+static inline int
+isolate_lru_onepage(struct page *page, struct list_head *src,
+			struct list_head *dst)
+{
+	if (!TestClearPageLRU(page))
+		BUG();
+	list_del(&page->lru);
+	if (get_page_testone(page)) {
+		/*
+		 * It is being freed elsewhere
+		 */
+		__put_page(page);
+		SetPageLRU(page);
+		list_add(&page->lru, src);
+		return 0;
+	}
+	list_add(&page->lru, dst);
+	return 1;
+}
+
+
+static inline int
+__steal_page_from_lru(struct zone *zone, struct page *page,
+			struct list_head *dst)
+{
+	if (PageActive(page)) {
+		if (!isolate_lru_onepage(page, &zone->active_list, dst))
+			return 0;
+		zone->nr_active--;
+	} else {
+		if (!isolate_lru_onepage(page, &zone->inactive_list, dst))
+			return 0;
+		zone->nr_inactive--;
+	}
+	return 1;
+}
+
+static inline int
+steal_page_from_lru(struct zone *zone, struct page *page,
+			struct list_head *dst)
+{
+	int ret;
+	spin_lock_irq(&zone->lru_lock);
+	ret = __steal_page_from_lru(zone, page, dst);
+	spin_unlock_irq(&zone->lru_lock);
+	return ret;
+}
+
+static inline void
+__putback_page_to_lru(struct zone *zone, struct page *page)
+{
+	if (TestSetPageLRU(page))
+		BUG();
+	if (PageActive(page))
+		add_page_to_active_list(zone, page);
+	else
+		add_page_to_inactive_list(zone, page);
+}
+
+static inline void
+putback_page_to_lru(struct zone *zone, struct page *page)
+{
+	spin_lock_irq(&zone->lru_lock);
+	__putback_page_to_lru(zone, page);
+	spin_unlock_irq(&zone->lru_lock);
+}
+
--- linux.orig/include/linux/mman.h~AA-PM-22-vm_immovable	2005-05-31 12:42:07.000000000 -0700
+++ linux/include/linux/mman.h	2005-05-31 12:42:07.000000000 -0700
@@ -61,7 +61,8 @@
 	return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) |
 	       _calc_vm_trans(flags, MAP_DENYWRITE,  VM_DENYWRITE ) |
 	       _calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) |
-	       _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    );
+	       _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    ) |
+	       _calc_vm_trans(flags, MAP_IMMOVABLE,  VM_IMMOVABLE );
 }
 
 #endif /* _LINUX_MMAN_H */
--- /dev/null	2005-03-30 22:36:15.000000000 -0800
+++ linux/include/linux/mmigrate.h	2005-05-31 12:42:10.000000000 -0700
@@ -0,0 +1,39 @@
+#ifndef _LINUX_MEMHOTPLUG_H
+#define _LINUX_MEMHOTPLUG_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+
+#define MIGRATE_NODE_ANY -1
+
+#ifdef CONFIG_MEMORY_MIGRATE
+extern int generic_migrate_page(struct page *, struct page *,
+		int (*)(struct page *, struct page *, struct list_head *));
+extern int migrate_page_common(struct page *, struct page *,
+					struct list_head *);
+extern int migrate_page_buffer(struct page *, struct page *,
+					struct list_head *);
+extern int page_migratable(struct page *, struct page *, int,
+					struct list_head *);
+extern struct page * migrate_onepage(struct page *, int nodeid);
+extern int try_to_migrate_pages(struct list_head *);
+
+#else
+static inline int generic_migrate_page(struct page *page, struct page *newpage,
+					int (*fn)(struct page *, struct page *))
+{
+	return -ENOSYS;
+}
+static inline int migrate_page_buffer(struct page* page, struct page* newpage)
+{
+	return -ENOSYS;
+}
+#endif
+
+#ifdef ARCH_HAS_PAGEMIGRATION
+extern void arch_migrate_page(struct page *, struct page *);
+#else
+static inline void arch_migrate_page(struct page *page, struct page *newpage) {}
+#endif
+
+#endif /* _LINUX_MEMHOTPLUG_H */
--- linux.orig/include/linux/mmzone.h~FROM-2.6.12-rc5-mm1-remove-non-discontig-use-of-pgdat-node_mem_map	2005-05-31 11:38:50.000000000 -0700
+++ linux/include/linux/mmzone.h	2005-05-31 12:41:34.000000000 -0700
@@ -252,7 +252,9 @@
 	struct zone node_zones[MAX_NR_ZONES];
 	struct zonelist node_zonelists[GFP_ZONETYPES];
 	int nr_zones;
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
 	struct page *node_mem_map;
+#endif
 	struct bootmem_data *bdata;
 	unsigned long node_start_pfn;
 	unsigned long node_present_pages; /* total number of physical pages */
@@ -267,6 +269,12 @@
 
 #define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
 #define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages)
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
+#define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr))
+#else
+#define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr))
+#endif
+#define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr))
 
 extern struct pglist_data *pgdat_list;
 
@@ -383,7 +391,7 @@
 /* Returns the number of the current Node. */
 #define numa_node_id()		(cpu_to_node(_smp_processor_id()))
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
 
 extern struct pglist_data contig_page_data;
 #define NODE_DATA(nid)		(&contig_page_data)
@@ -391,36 +399,171 @@
 #define MAX_NODES_SHIFT		1
 #define pfn_to_nid(pfn)		(0)
 
-#else /* CONFIG_DISCONTIGMEM */
+#else /* CONFIG_NEED_MULTIPLE_NODES */
 
 #include <asm/mmzone.h>
 
+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+
+#ifdef CONFIG_SPARSEMEM
+#include <asm/sparsemem.h>
+#endif
+
 #if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED)
 /*
  * with 32 bit page->flags field, we reserve 8 bits for node/zone info.
  * there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes.
  */
-#define MAX_NODES_SHIFT		6
+#define FLAGS_RESERVED		8
+
 #elif BITS_PER_LONG == 64
 /*
  * with 64 bit flags field, there's plenty of room.
  */
-#define MAX_NODES_SHIFT		10
+#define FLAGS_RESERVED		32
+
+#else
+
+#error BITS_PER_LONG not defined
+
+#endif
+
+#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
+#define early_pfn_to_nid(nid)  (0UL)
+#endif
+
+#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
+#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
+
+#ifdef CONFIG_SPARSEMEM
+
+/*
+ * SECTION_SHIFT    		#bits space required to store a section #
+ *
+ * PA_SECTION_SHIFT		physical address to/from section number
+ * PFN_SECTION_SHIFT		pfn to/from section number
+ */
+#define SECTIONS_SHIFT		(MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
+
+#define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
+#define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)
+
+#define NR_MEM_SECTIONS	(1 << SECTIONS_SHIFT)
+
+#define PAGES_PER_SECTION       (1 << PFN_SECTION_SHIFT)
+#define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1))
+
+#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
+#error Allocator MAX_ORDER exceeds SECTION_SIZE
 #endif
 
-#endif /* !CONFIG_DISCONTIGMEM */
+struct page;
+struct mem_section {
+	/*
+	 * This is, logically, a pointer to an array of struct
+	 * pages.  However, it is stored with some other magic.
+	 * (see sparse.c::sparse_init_one_section())
+	 *
+	 * Making it a UL at least makes someone do a cast
+	 * before using it wrong.
+	 */
+	unsigned long section_mem_map;
+};
+
+extern struct mem_section mem_section[NR_MEM_SECTIONS];
+
+/*
+ * We use the lower bits of the mem_map pointer to store
+ * a little bit of information.  There should be at least
+ * 3 bits here due to 32-bit alignment.
+ */
+#define	SECTION_MARKED_PRESENT	(1UL<<0)
+#define SECTION_HAS_MEM_MAP	(1UL<<1)
+#define SECTION_MAP_LAST_BIT	(1UL<<2)
+#define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1))
+
+static inline struct page *__section_mem_map_addr(struct mem_section *section)
+{
+	unsigned long map = section->section_mem_map;
+	map &= SECTION_MAP_MASK;
+	return (struct page *)map;
+}
+
+static inline int valid_section(struct mem_section *section)
+{
+	return (section->section_mem_map & SECTION_MARKED_PRESENT);
+}
+
+static inline int section_has_mem_map(struct mem_section *section)
+{
+	return (section->section_mem_map & SECTION_HAS_MEM_MAP);
+}
+
+static inline int valid_section_nr(int nr)
+{
+	return valid_section(&mem_section[nr]);
+}
+
+/*
+ * Given a kernel address, find the home node of the underlying memory.
+ */
+#define kvaddr_to_nid(kaddr)	pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
+
+static inline struct mem_section *__pfn_to_section(unsigned long pfn)
+{
+	return &mem_section[pfn_to_section_nr(pfn)];
+}
+
+#define pfn_to_page(pfn) 						\
+({ 									\
+	unsigned long __pfn = (pfn);					\
+	__section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn;	\
+})
+#define page_to_pfn(page)						\
+({									\
+	page - __section_mem_map_addr(&mem_section[page_to_section(page)]);	\
+})
+
+static inline int pfn_valid(unsigned long pfn)
+{
+	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+		return 0;
+	return valid_section(&mem_section[pfn_to_section_nr(pfn)]);
+}
 
-#if NODES_SHIFT > MAX_NODES_SHIFT
-#error NODES_SHIFT > MAX_NODES_SHIFT
+/*
+ * These are _only_ used during initialisation, therefore they
+ * can use __initdata ...  They could have names to indicate
+ * this restriction.
+ */
+#ifdef CONFIG_NUMA
+#define pfn_to_nid		early_pfn_to_nid
 #endif
 
-/* There are currently 3 zones: DMA, Normal & Highmem, thus we need 2 bits */
-#define MAX_ZONES_SHIFT		2
+#define pfn_to_pgdat(pfn)						\
+({									\
+	NODE_DATA(pfn_to_nid(pfn));					\
+})
 
-#if ZONES_SHIFT > MAX_ZONES_SHIFT
-#error ZONES_SHIFT > MAX_ZONES_SHIFT
+#define early_pfn_valid(pfn)	pfn_valid(pfn)
+void sparse_init(void);
+#else
+#define sparse_init()	do {} while (0)
+#endif /* CONFIG_SPARSEMEM */
+
+#ifdef CONFIG_NODES_SPAN_OTHER_NODES
+#define early_pfn_in_nid(pfn, nid)	(early_pfn_to_nid(pfn) == (nid))
+#else
+#define early_pfn_in_nid(pfn, nid)	(1)
+#endif
+
+#ifndef early_pfn_valid
+#define early_pfn_valid(pfn)	(1)
 #endif
 
+void memory_present(int nid, unsigned long start, unsigned long end);
+unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MMZONE_H */
--- linux.orig/include/linux/numa.h~FROM-2.6.12-rc5-mm1-sparsemem-memory-model	2005-05-31 11:39:01.000000000 -0700
+++ linux/include/linux/numa.h	2005-05-31 11:39:02.000000000 -0700
@@ -3,7 +3,7 @@
 
 #include <linux/config.h>
 
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NEED_MULTIPLE_NODES
 #include <asm/numnodes.h>
 #endif
 
--- linux.orig/include/linux/page-flags.h~AA-PM-04-config-noswap	2005-05-31 12:41:52.000000000 -0700
+++ linux/include/linux/page-flags.h	2005-05-31 12:41:52.000000000 -0700
@@ -300,6 +300,8 @@
 #define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
 #else
 #define PageSwapCache(page)	0
+#define SetPageSwapCache(page)
+#define ClearPageSwapCache(page)
 #endif
 
 #define PageUncached(page)	test_bit(PG_uncached, &(page)->flags)
--- linux.orig/include/linux/radix-tree.h~AA-PM-03-radix-tree-replace	2005-05-31 12:41:52.000000000 -0700
+++ linux/include/linux/radix-tree.h	2005-05-31 12:41:52.000000000 -0700
@@ -47,6 +47,7 @@
 int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
 void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
 void *radix_tree_delete(struct radix_tree_root *, unsigned long);
+void *radix_tree_replace(struct radix_tree_root *, unsigned long, void *);
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
 			unsigned long first_index, unsigned int max_items);
--- linux.orig/include/linux/rmap.h~AA-PM-14-try_to_unmap_force	2005-05-31 12:42:02.000000000 -0700
+++ linux/include/linux/rmap.h	2005-05-31 12:42:02.000000000 -0700
@@ -90,7 +90,9 @@
  * Called from mm/vmscan.c to handle paging out
  */
 int page_referenced(struct page *, int is_locked, int ignore_token);
-int try_to_unmap(struct page *);
+int try_to_unmap(struct page *, struct list_head *);
+int touch_unmapped_address(struct list_head *);
+
 
 /*
  * Used by swapoff to help locate where page is expected in vma.
@@ -104,7 +106,7 @@
 #define anon_vma_link(vma)	do {} while (0)
 
 #define page_referenced(page,l,i) TestClearPageReferenced(page)
-#define try_to_unmap(page)	SWAP_FAIL
+#define try_to_unmap(page, force)	SWAP_FAIL
 
 #endif	/* CONFIG_MMU */
 
--- linux.orig/include/linux/swap.h~AA-PM-02-export-pageout	2005-05-31 12:41:52.000000000 -0700
+++ linux/include/linux/swap.h	2005-05-31 12:42:04.000000000 -0700
@@ -174,6 +174,47 @@
 /* linux/mm/vmscan.c */
 extern int try_to_free_pages(struct zone **, unsigned int, unsigned int);
 extern int shrink_all_memory(int);
+typedef enum {
+	/* failed to write page out, page is locked */
+	PAGE_KEEP,
+	/* move page to the active list, page is locked */
+	PAGE_ACTIVATE,
+	/* page has been sent to the disk successfully, page is unlocked */
+	PAGE_SUCCESS,
+	/* page is clean and locked */
+	PAGE_CLEAN,
+} pageout_t;
+extern pageout_t pageout(struct page *, struct address_space *);
+struct scan_control {
+	/* Ask refill_inactive_zone, or shrink_cache to scan this many pages */
+	unsigned long nr_to_scan;
+
+	/* Incremented by the number of inactive pages that were scanned */
+	unsigned long nr_scanned;
+
+	/* Incremented by the number of pages reclaimed */
+	unsigned long nr_reclaimed;
+
+	unsigned long nr_mapped;	/* From page_state */
+
+	/* How many pages shrink_cache() should reclaim */
+	int nr_to_reclaim;
+
+	/* Ask shrink_caches, or shrink_zone to scan at this priority */
+	unsigned int priority;
+
+	/* This context's GFP mask */
+	unsigned int gfp_mask;
+
+	int may_writepage;
+
+	/* This context's SWAP_CLUSTER_MAX. If freeing memory for
+	 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
+	 * In this context, it doesn't matter that we scan the
+	 * whole list at once. */
+	int swap_cluster_max;
+};
+extern int shrink_list(struct list_head *, struct scan_control *);
 extern int vm_swappiness;
 
 #ifdef CONFIG_MMU
@@ -193,7 +234,7 @@
 extern struct address_space swapper_space;
 #define total_swapcache_pages  swapper_space.nrpages
 extern void show_swap_cache_info(void);
-extern int add_to_swap(struct page *);
+extern int add_to_swap(struct page *, unsigned int);
 extern void __delete_from_swap_cache(struct page *);
 extern void delete_from_swap_cache(struct page *);
 extern int move_to_swap_cache(struct page *, swp_entry_t);
@@ -217,7 +258,11 @@
 extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
 extern struct swap_info_struct *get_swap_info_struct(unsigned);
 extern int can_share_swap_page(struct page *);
-extern int remove_exclusive_swap_page(struct page *);
+extern int __remove_exclusive_swap_page(struct page *, int);
+static inline int remove_exclusive_swap_page(struct page *p)
+{
+	return __remove_exclusive_swap_page(p, 0);
+}
 struct backing_dev_info;
 
 extern struct swap_list_t swap_list;
@@ -271,11 +316,16 @@
 #define delete_from_swap_cache(p)		/*NOTHING*/
 #define swap_token_default_timeout		0
 
-static inline int remove_exclusive_swap_page(struct page *p)
+static inline int __remove_exclusive_swap_page(struct page *p, int force)
 {
 	return 0;
 }
 
+static inline int remove_exclusive_swap_page(struct page *p)
+{
+	return __remove_exclusive_swap_page(p, 0);
+}
+
 static inline swp_entry_t get_swap_page(void)
 {
 	swp_entry_t entry;
--- linux.orig/init/Kconfig~AA-PM-07.2-memory_migration-depends-swap	2005-05-31 12:41:54.000000000 -0700
+++ linux/init/Kconfig	2005-05-31 12:41:54.000000000 -0700
@@ -87,6 +87,9 @@
 	  used to provide more virtual memory than the actual RAM present
 	  in your computer.  If unsure say Y.
 
+comment "  Swap automatically enabled by selecting Memory Migration"
+	depends on MEMORY_MIGRATE
+
 config SYSVIPC
 	bool "System V IPC"
 	depends on MMU
--- linux.orig/kernel/fork.c~AA-PM-22-vm_immovable	2005-05-31 12:42:07.000000000 -0700
+++ linux/kernel/fork.c	2005-05-31 12:42:07.000000000 -0700
@@ -227,7 +227,7 @@
 		if (IS_ERR(pol))
 			goto fail_nomem_policy;
 		vma_set_policy(tmp, pol);
-		tmp->vm_flags &= ~VM_LOCKED;
+		tmp->vm_flags &= ~(VM_LOCKED|VM_IMMOVABLE);
 		tmp->vm_mm = mm;
 		tmp->vm_next = NULL;
 		anon_vma_link(tmp);
--- linux.orig/lib/radix-tree.c~AA-PM-03-radix-tree-replace	2005-05-31 12:41:52.000000000 -0700
+++ linux/lib/radix-tree.c	2005-05-31 12:41:52.000000000 -0700
@@ -100,7 +100,13 @@
 static inline void
 radix_tree_node_free(struct radix_tree_node *node)
 {
-	kmem_cache_free(radix_tree_node_cachep, node);
+	struct radix_tree_preload *rtp;
+
+	rtp = &__get_cpu_var(radix_tree_preloads);
+	if (rtp->nr < ARRAY_SIZE(rtp->nodes))
+		rtp->nodes[rtp->nr++] = node;
+	else
+		kmem_cache_free(radix_tree_node_cachep, node);
 }
 
 /*
@@ -733,6 +739,53 @@
 EXPORT_SYMBOL(radix_tree_delete);
 
 /**
+ *	radix_tree_replace    -    replace items in a radix tree
+ *	@root:		radix tree root
+ *	@index:		index key
+ *	@item:		item to insert
+ *
+ *	Replace the item at @index with @item.
+ *	Returns the address of the deleted item, or NULL if it was not present.
+ */
+void *radix_tree_replace(struct radix_tree_root *root,
+					unsigned long index, void *item)
+{
+	struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
+	unsigned int height, shift;
+	void *ret = NULL;
+
+	height = root->height;
+	if (index > radix_tree_maxindex(height))
+		goto out;
+
+	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
+	pathp->node = NULL;
+	pathp->slot = &root->rnode;
+
+	while (height > 0) {
+		int offset;
+
+		if (*pathp->slot == NULL)
+			goto out;
+
+		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
+		pathp[1].offset = offset;
+		pathp[1].node = *pathp[0].slot;
+		pathp[1].slot = (struct radix_tree_node **)
+				(pathp[1].node->slots + offset);
+		pathp++;
+		shift -= RADIX_TREE_MAP_SHIFT;
+		height--;
+	}
+
+	if ((ret = *pathp[0].slot))
+		*pathp[0].slot = item;
+out:
+	return ret;
+}
+EXPORT_SYMBOL(radix_tree_replace);
+
+/**
  *	radix_tree_tagged - test whether any items in the tree are tagged
  *	@root:		radix tree root
  *	@tag:		tag to test
--- linux.orig/mm/Kconfig~FROM-2.6.12-rc5-mm1-create-mm-kconfig-for-arch-independent-memory-options	2005-05-31 11:38:54.000000000 -0700
+++ linux/mm/Kconfig	2005-05-31 12:41:54.000000000 -0700
@@ -0,0 +1,106 @@
+config SELECT_MEMORY_MODEL
+	def_bool y
+	depends on EXPERIMENTAL || ARCH_SELECT_MEMORY_MODEL
+
+choice
+	prompt "Memory model"
+	depends on SELECT_MEMORY_MODEL
+	default DISCONTIGMEM_MANUAL if ARCH_DISCONTIGMEM_DEFAULT
+	default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
+	default FLATMEM_MANUAL
+
+config FLATMEM_MANUAL
+	bool "Flat Memory"
+	depends on !ARCH_DISCONTIGMEM_ENABLE || ARCH_FLATMEM_ENABLE
+	help
+	  This option allows you to change some of the ways that
+	  Linux manages its memory internally.  Most users will
+	  only have one option here: FLATMEM.  This is normal
+	  and a correct option.
+
+	  Some users of more advanced features like NUMA and
+	  memory hotplug may have different options here.
+	  DISCONTIGMEM is an more mature, better tested system,
+	  but is incompatible with memory hotplug and may suffer
+	  decreased performance over SPARSEMEM.  If unsure between
+	  "Sparse Memory" and "Discontiguous Memory", choose
+	  "Discontiguous Memory".
+
+	  If unsure, choose this option (Flat Memory) over any other.
+
+config DISCONTIGMEM_MANUAL
+	bool "Discontigious Memory"
+	depends on ARCH_DISCONTIGMEM_ENABLE
+	help
+	  This option provides enhanced support for discontiguous
+	  memory systems, over FLATMEM.  These systems have holes
+	  in their physical address spaces, and this option provides
+	  more efficient handling of these holes.  However, the vast
+	  majority of hardware has quite flat address spaces, and
+	  can have degraded performance from extra overhead that
+	  this option imposes.
+
+	  Many NUMA configurations will have this as the only option.
+
+	  If unsure, choose "Flat Memory" over this option.
+
+config SPARSEMEM_MANUAL
+	bool "Sparse Memory"
+	depends on ARCH_SPARSEMEM_ENABLE
+	help
+	  This will be the only option for some systems, including
+	  memory hotplug systems.  This is normal.
+
+	  For many other systems, this will be an alternative to
+	  "Discontigious Memory".  This option provides some potential
+	  performance benefits, along with decreased code complexity,
+	  but it is newer, and more experimental.
+
+	  If unsure, choose "Discontiguous Memory" or "Flat Memory"
+	  over this option.
+
+endchoice
+
+config DISCONTIGMEM
+	def_bool y
+	depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || DISCONTIGMEM_MANUAL
+
+config SPARSEMEM
+	def_bool y
+	depends on SPARSEMEM_MANUAL
+
+config FLATMEM
+	def_bool y
+	depends on (!DISCONTIGMEM && !SPARSEMEM) || FLATMEM_MANUAL
+
+config FLAT_NODE_MEM_MAP
+	def_bool y
+	depends on !SPARSEMEM
+
+#
+# Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
+# to represent different areas of memory.  This variable allows
+# those dependencies to exist individually.
+#
+config NEED_MULTIPLE_NODES
+	def_bool y
+	depends on DISCONTIGMEM || NUMA
+
+config HAVE_MEMORY_PRESENT
+	def_bool y
+	depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM
+
+config MEMORY_MIGRATE
+	select SWAP
+	bool "Memory migration"
+	default y if MEMORY_HOTPLUG
+
+config MEMORY_REMOVE
+	bool "Allow for memory hot-remove"
+	depends on MEMORY_HOTPLUG && MEMORY_MIGRATE && (X86 && !X86_64)
+	help
+	  Enabling this option allows you to hot-remove highmem zones
+	  on i386 systems.  The i386 depenence is a hack for now.
+
+comment "Selecting Memory Migration automatically enables CONFIG_SWAP"
+ 	depends on !SWAP
--- linux.orig/mm/Makefile~FROM-2.6.12-rc5-mm1-sparsemem-memory-model	2005-05-31 11:39:01.000000000 -0700
+++ linux/mm/Makefile	2005-05-31 12:41:54.000000000 -0700
@@ -15,6 +15,8 @@
 obj-$(CONFIG_SWAP)	+= page_io.o swap_state.o swapfile.o thrash.o
 obj-$(CONFIG_HUGETLBFS)	+= hugetlb.o
 obj-$(CONFIG_NUMA) 	+= mempolicy.o
+obj-$(CONFIG_SPARSEMEM)	+= sparse.o
 obj-$(CONFIG_SHMEM) += shmem.o
 obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o
+obj-$(CONFIG_MEMORY_MIGRATE) += mmigrate.o
 
--- linux.orig/mm/bootmem.c~FROM-2.6.12-rc5-mm1-sparsemem-memory-model	2005-05-31 11:39:01.000000000 -0700
+++ linux/mm/bootmem.c	2005-05-31 11:39:01.000000000 -0700
@@ -256,6 +256,7 @@
 static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
 {
 	struct page *page;
+	unsigned long pfn;
 	bootmem_data_t *bdata = pgdat->bdata;
 	unsigned long i, count, total = 0;
 	unsigned long idx;
@@ -266,7 +267,7 @@
 
 	count = 0;
 	/* first extant page of the node */
-	page = virt_to_page(phys_to_virt(bdata->node_boot_start));
+	pfn = bdata->node_boot_start >> PAGE_SHIFT;
 	idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
 	map = bdata->node_bootmem_map;
 	/* Check physaddr is O(LOG2(BITS_PER_LONG)) page aligned */
@@ -275,6 +276,9 @@
 		gofast = 1;
 	for (i = 0; i < idx; ) {
 		unsigned long v = ~map[i / BITS_PER_LONG];
+
+		page = pfn_to_page(pfn);
+
 		if (gofast && v == ~0UL) {
 			int j, order;
 
@@ -302,8 +306,8 @@
 			}
 		} else {
 			i+=BITS_PER_LONG;
-			page += BITS_PER_LONG;
 		}
+		pfn += BITS_PER_LONG;
 	}
 	total += count;
 
--- linux.orig/mm/memory.c~FROM-2.6.12-rc5-mm1-sparsemem-memory-model	2005-05-31 11:39:01.000000000 -0700
+++ linux/mm/memory.c	2005-05-31 12:42:07.000000000 -0700
@@ -58,7 +58,7 @@
 #include <linux/swapops.h>
 #include <linux/elf.h>
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
 /* use the per-pgdat data instead for discontigmem - mbligh */
 unsigned long max_mapnr;
 struct page *mem_map;
@@ -1290,12 +1290,22 @@
 
 	if (unlikely(anon_vma_prepare(vma)))
 		goto no_new_page;
+
 	if (old_page == ZERO_PAGE(address)) {
-		new_page = alloc_zeroed_user_highpage(vma, address);
+		if (VM_Immovable(vma)) {
+			new_page = alloc_page_vma(GFP_USER, vma, address);
+			if (new_page)
+				clear_user_page(address, address, new_page);
+		} else
+			new_page = alloc_zeroed_user_highpage(vma, address);
 		if (!new_page)
 			goto no_new_page;
 	} else {
-		new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+		if (VM_Immovable(vma))
+			new_page = alloc_page_vma(GFP_USER, vma, address);
+		else
+			new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+
 		if (!new_page)
 			goto no_new_page;
 		copy_user_highpage(new_page, old_page, address);
@@ -1665,6 +1675,7 @@
 
 	pte_unmap(page_table);
 	spin_unlock(&mm->page_table_lock);
+again:
 	page = lookup_swap_cache(entry);
 	if (!page) {
  		swapin_readahead(entry, address, vma);
@@ -1693,6 +1704,12 @@
 
 	mark_page_accessed(page);
 	lock_page(page);
+	if (!PageSwapCache(page)) {
+		/* page-migration has occured */
+		unlock_page(page);
+		page_cache_release(page);
+		goto again;
+	}
 
 	/*
 	 * Back out if somebody else faulted in this pte while we
@@ -1774,7 +1791,10 @@
 
 		if (unlikely(anon_vma_prepare(vma)))
 			goto no_mem;
-		page = alloc_zeroed_user_highpage(vma, addr);
+		if (VM_Immovable(vma))
+			page = alloc_page_vma(GFP_USER, vma, addr);
+		else
+			page = alloc_zeroed_user_highpage(vma, addr);
 		if (!page)
 			goto no_mem;
 
--- /dev/null	2005-03-30 22:36:15.000000000 -0800
+++ linux/mm/mmigrate.c	2005-05-31 12:42:11.000000000 -0700
@@ -0,0 +1,592 @@
+/*
+ *  linux/mm/mmigrate.c
+ *
+ *  Memory migration support.
+ *
+ *  Authors:	IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
+ *		Hirokazu Takahashi <taka@valinux.co.jp>
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/writeback.h>
+#include <linux/backing-dev.h>
+#include <linux/buffer_head.h>
+#include <linux/mm_inline.h>
+#include <linux/rmap.h>
+#include <linux/mmigrate.h>
+#include <linux/delay.h>
+
+/*
+ * The concept of memory migration is to replace a target page with
+ * a substitute page on a radix tree. New requests to access the target
+ * - including system calls and page faults - are redirected to the
+ * substitute that is locked and not up-to-date, so that all of these
+ * requests are blocked until the migration has done. Data of the target
+ * is copied into the substitute and then the requests are unblocked
+ * after all operations against the target have finished.
+ *
+ * By this approach, regular pages in the swapcache/pagecache and
+ * hugetlbpages can be handled in the same way.
+ */
+
+
+/*
+ * Try to writeback a dirty page to free its buffers.
+ */
+static int
+writeback_and_free_buffers(struct page *page)
+{
+	struct address_space *mapping = page_mapping(page);
+
+	BUG_ON(!PageLocked(page));
+	wait_on_page_writeback(page);
+	if (!PagePrivate(page))
+		return 0;
+
+	if (PageDirty(page)) {
+		switch(pageout(page, mapping)) {
+		case PAGE_ACTIVATE:
+			return -1;
+		case PAGE_SUCCESS:
+			lock_page(page);
+			return 1;
+		case PAGE_KEEP:
+		case PAGE_CLEAN:
+			break;
+		}
+	}
+	if (try_to_release_page(page, GFP_KERNEL))
+		return 0;
+
+	return -1;
+}
+
+/*
+ * Replace "page" with "newpage" on the radix tree, which the page belongs to.
+ */
+static int
+replace_pages(struct page *page, struct page *newpage)
+{
+	struct address_space *mapping = page_mapping(page);
+	int ret = 0;
+	struct page *delpage;
+
+	page_cache_get(newpage);
+	read_lock_irq(&mapping->tree_lock);
+	newpage->index = page->index;
+	if  (PageSwapCache(page)) {
+		SetPageSwapCache(newpage);
+		newpage->private = page->private;
+	} else
+		newpage->mapping = page->mapping;
+	if (PageWriteback(page))
+		SetPageWriteback(newpage);
+
+	delpage = radix_tree_replace(&mapping->page_tree, page_index(page), newpage);
+	read_unlock_irq(&mapping->tree_lock);
+	if (delpage == NULL) {
+		/*
+		 * Migration is unnecessary since truncating the page is
+		 * in progress. Just release the newpage.
+		 */
+		page_cache_release(newpage);
+		ret = -ENOENT;
+	}
+	return ret;
+}
+
+/*
+ * Check whether the page can be migrated or not.
+ */
+int
+page_migratable(struct page *page, struct page *newpage,
+			int freeable_page_count, struct list_head *vlist)
+{
+	int truncated;
+
+	if (page_mapped(page)) {
+		switch (try_to_unmap(page, vlist)) {
+		case SWAP_FAIL:
+			return -EBUSY;
+		case SWAP_AGAIN:
+			return -EAGAIN;
+		}
+	}
+	if (PageWriteback(page))
+		return -EAGAIN;
+	/* The page might have been truncated */
+	truncated = !PageSwapCache(newpage) && page_mapping(page) == NULL;
+	if (page_count(page) + truncated <= freeable_page_count)
+		return truncated ? -ENOENT : 0;
+	return -EAGAIN;
+}
+
+/*
+ * Wait for the completion of all operations, which are going on
+ * against the page, and copy it.
+ */
+int
+migrate_page_common(struct page *page, struct page *newpage,
+					struct list_head *vlist)
+{
+	long timeout = 5000;	/* XXXX */
+	int ret;
+
+	while (timeout > 0) {
+		BUG_ON(page_count(page) == 0);
+		ret = page_migratable(page, newpage, 2, vlist);
+		switch (ret) {
+		case 0:
+		case -ENOENT:
+			copy_highpage(newpage, page);
+			return ret;
+		case -EBUSY:
+			return ret;
+		case -EAGAIN:
+			writeback_and_free_buffers(page);
+			unlock_page(page);
+			msleep(10);
+			timeout -= 10;
+			lock_page(page);
+			continue;
+		}
+	}
+	return -EBUSY;
+}
+
+/*
+ * Wait for the completion of all operations, which are going on
+ * against the page. After that, move the buffers the page owns
+ * to the newpage and copy the page.
+ */
+int
+migrate_page_buffer(struct page *page, struct page *newpage,
+					struct list_head *vlist)
+{
+	long timeout = 5000;	/* XXXX */
+	int ret;
+
+	while (timeout > 0) {
+		BUG_ON(page_count(page) == 0);
+		ret = page_migratable(page, newpage,
+				2 + !!PagePrivate(page), vlist);
+		switch (ret) {
+		case 0:
+			if (PagePrivate(page))
+				generic_move_buffer(page, newpage);
+			/* fall thru */
+		case -ENOENT: /* truncated */
+			copy_highpage(newpage, page);
+			return ret;
+		case -EBUSY:
+			return ret;
+		case -EAGAIN:
+			wait_on_page_writeback(page);
+			unlock_page(page);
+			msleep(10);
+			timeout -= 10;
+			lock_page(page);
+			continue;
+		}
+	}
+	return -EBUSY;
+}
+
+/*
+ * In some cases, a page migration needs to be rolled back.
+ */
+static int
+unwind_page(struct page *page, struct page *newpage)
+{
+	struct address_space *mapping = page_mapping(newpage);
+	int truncated = !PageSwapCache(newpage) && page_mapping(page) == NULL;
+	long retry = 1000;
+
+	BUG_ON(mapping == NULL);
+
+	/*
+	 * Unwinding is not needed if the newpage has been already truncated.
+	 */
+	if (truncated)
+		goto out;
+
+	/*
+	 * Try to unwind by notifying waiters.  If someone misbehaves,
+	 * we die.
+	 */
+	read_lock_irq(&mapping->tree_lock);
+	page->index = newpage->index;
+	if (PageSwapCache(newpage)) {
+		SetPageSwapCache(page);
+		page->private = newpage->private;
+	} else
+		page->mapping = newpage->mapping;
+	if (radix_tree_replace(&mapping->page_tree, page_index(newpage), page) == NULL) {
+		printk(KERN_ERR "%s(): newpage:%p has gone. We can't roll back page:%p.\n", __FUNCTION__, newpage, page);
+		BUG();
+	}
+	/* no page_cache_get(page); needed */
+	read_unlock_irq(&mapping->tree_lock);
+out:
+	newpage->mapping = NULL;
+	if (PageWriteback(newpage))
+		end_page_writeback(newpage);	/* XXX */
+	newpage->private = 0;
+	ClearPageSwapCache(newpage);
+	/* XXX unmap needed?  No, it shouldn't.  Handled by fault handlers. */
+	unlock_page(newpage);
+	unlock_page(page);
+
+	/*
+	 *  Some requests may be blocked on the newpage. Wait until the
+	 *  requests have gone.
+	 */
+	while (page_count(newpage) > 2) {
+		msleep(10);
+		if (retry-- <= 0) {
+			retry = 1000;
+			printk(KERN_ERR "%s(): page:%p can't be rolled back, as there remain some references to newpage:%p yet.\n", __FUNCTION__, page, newpage);
+			printk(KERN_ERR "newpage %p flags %lx %d %d, page %p flags %lx %d\n",
+			    newpage, newpage->flags, page_count(newpage),
+			    page_mapcount(newpage),
+			    page, page->flags, page_count(page));
+		}
+	}
+
+	BUG_ON(PageUptodate(newpage));
+	BUG_ON(PageDirty(newpage));
+	BUG_ON(PageActive(newpage));
+	BUG_ON(PagePrivate(newpage));
+	BUG_ON(page_count(newpage) != 2);
+	page_cache_release(newpage);
+	return 0;
+}
+
+/*
+ * Try to migrate one page.  Returns non-zero on failure.
+ *   - Lock for the page must be held when invoked.
+ *   - The page must be attached to an address_space.
+ */
+int
+generic_migrate_page(struct page *page, struct page *newpage,
+	int (*migrate_fn)(struct page *, struct page *, struct list_head *))
+{
+	LIST_HEAD(vlist);
+	int ret;
+
+	/*
+	 * Make sure that the newpage must be locked and kept not up-to-date
+	 * during the page migration, so that it's guaranteed that all
+	 * accesses to the newpage will be blocked until everything has
+	 * become ok.
+	 */
+	if (TestSetPageLocked(newpage))
+		BUG();
+
+	if ((ret = replace_pages(page, newpage)))
+		goto out_removing;
+
+	/*
+	 * With cleared PTEs, any accesses via the PTEs to the page
+	 * can be caught and blocked in a pagefault handler.
+	 */
+	if (page_mapped(page)) {
+		while ((ret = try_to_unmap(page, &vlist)) == SWAP_AGAIN)
+			msleep(1);
+		if (ret != SWAP_SUCCESS) {
+			ret = -EBUSY;
+			goto out_busy;
+		}
+	}
+
+	wait_on_page_writeback(page);
+	if (PageSwapCache(page)) {
+		/*
+		 * The page is not mapped from anywhere now.
+		 * Detach it from the swapcache completely.
+		 */
+		ClearPageSwapCache(page);
+		page->private = 0;
+		page->mapping = NULL;
+	}
+
+	/* Wait for all operations against the page to finish. */
+	ret = migrate_fn(page, newpage, &vlist);
+	switch (ret) {
+	case -ENOENT:
+		/* The file the page belongs to has been truncated. */
+		page_cache_get(page);
+		page_cache_release(newpage);
+		newpage->mapping = NULL;
+		break;
+	case 0:
+		break;
+	default:
+		/* The page is busy. Try it later. */
+		goto out_busy;
+	}
+
+	arch_migrate_page(page, newpage);
+
+	if (PageError(page))
+		SetPageError(newpage);
+	if (PageReferenced(page))
+		SetPageReferenced(newpage);
+	if (PageActive(page)) {
+		SetPageActive(newpage);
+		ClearPageActive(page);
+	}
+	if (PageMappedToDisk(page))
+		SetPageMappedToDisk(newpage);
+	if (PageChecked(page))
+		SetPageChecked(newpage);
+	if (PageUptodate(page))
+		SetPageUptodate(newpage);
+	if (PageDirty(page)) {
+		clear_page_dirty_for_io(page);
+		set_page_dirty(newpage);
+	}
+	if (PagePrivate(newpage)) {
+		BUG_ON(newpage->mapping == NULL);
+		unlock_page_buffer(newpage);
+	}
+	/*
+	 * Finally, the newpage has become ready! Wake up all waiters,
+	 * which have been waiting for the completion of the migration.
+	 */
+	if (PageWriteback(newpage))
+		end_page_writeback(newpage);
+	unlock_page(newpage);
+
+	/* map the newpage where the old page have been mapped. */
+	touch_unmapped_address(&vlist);
+	if (PageSwapCache(newpage)) {
+		lock_page(newpage);
+		__remove_exclusive_swap_page(newpage, 1);
+		unlock_page(newpage);
+	}
+
+	page->mapping = NULL;
+	unlock_page(page);
+	page_cache_release(page);
+
+	return 0;
+
+out_busy:
+	/* Roll back all operations. */
+	unwind_page(page, newpage);
+	touch_unmapped_address(&vlist);
+	if (PageSwapCache(page)) {
+		lock_page(page);
+		__remove_exclusive_swap_page(page, 1);
+		unlock_page(page);
+	}
+
+	return ret;
+
+out_removing:
+	if (PagePrivate(newpage))
+		BUG();
+	unlock_page(page);
+	unlock_page(newpage);
+	return ret;
+}
+
+/*
+ * migrate_onepage() can migrate regular pages assigned to pagecache,
+ * swapcache or anonymous memory.
+ */
+struct page *
+migrate_onepage(struct page *page, int nodeid)
+{
+	struct page *newpage;
+	struct address_space *mapping;
+	int ret;
+
+	lock_page(page);
+
+	/*
+	 * Put the page in a radix tree if it isn't in the tree yet.
+	 */
+#ifdef CONFIG_SWAP
+	if (PageAnon(page) && !PageSwapCache(page))
+		if (!add_to_swap(page, GFP_KERNEL)) {
+			unlock_page(page);
+			return ERR_PTR(-ENOSPC);
+		}
+#endif /* CONFIG_SWAP */
+	if ((mapping = page_mapping(page)) == NULL) {
+		/* truncation is in progress */
+		if (PagePrivate(page))
+			try_to_release_page(page, GFP_KERNEL);
+		unlock_page(page);
+		return ERR_PTR(-ENOENT);
+	}
+
+	/*
+	 * Allocate a new page with the same gfp_mask
+	 * as the target page has.
+	 */
+	if (nodeid == MIGRATE_NODE_ANY)
+		newpage = page_cache_alloc(mapping);
+	else
+		newpage = alloc_pages_node(nodeid, mapping->flags, 0);
+	if (newpage == NULL) {
+		unlock_page(page);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (mapping->a_ops && mapping->a_ops->migrate_page)
+		ret = mapping->a_ops->migrate_page(page, newpage);
+	else
+		ret = generic_migrate_page(page, newpage, migrate_page_common);
+	if (ret) {
+		BUG_ON(page_count(newpage) != 1);
+		page_cache_release(newpage);
+		return ERR_PTR(ret);
+	}
+	BUG_ON(page_count(page) != 1);
+	page_cache_release(page);
+	return newpage;
+}
+
+static inline int
+need_writeback(struct page *page)
+{
+	return PageDirty(page) && PagePrivate(page) && !PageWriteback(page);
+}
+
+/*
+ * Start writeback I/O against a dirty page with filesystem
+ * specific private data to release them.
+ */
+static inline void page_start_writeback(struct page *page)
+{
+	struct address_space *mapping;
+	int ret;
+
+	if (!need_writeback(page))
+		return;
+	if (TestSetPageLocked(page))
+		return;
+
+	mapping = page_mapping(page);
+
+	if (!mapping)
+		goto out_unlock;
+ 	/*
+	 * Writeback is not needed if it has migrate_page method,
+	 * because it can move all of them without writeback I/O.
+	 */
+	if (mapping->a_ops && mapping->a_ops->migrate_page)
+		goto out_unlock;
+	if (!need_writeback(page))
+		goto out_unlock;
+
+	ret = pageout(page, mapping);
+
+	if (ret == PAGE_SUCCESS)
+		return;
+
+out_unlock:
+	unlock_page(page);
+}
+
+/*
+ * This is the main entry point to migrate pages in a specific region.
+ * If a page is inactive, the page may be just released instead of
+ * migration.
+ */
+int try_to_migrate_pages(struct list_head *page_list)
+{
+	struct page *page, *page2, *newpage;
+	LIST_HEAD(pass1_list);
+	LIST_HEAD(pass2_list);
+	LIST_HEAD(discharge_list);
+	int nr_busy = 0;
+	int nr_noswap = 0;
+	struct scan_control sc = {
+		.nr_scanned	= 0,
+		.nr_reclaimed	= 0,
+		.priority	= 0,
+		.gfp_mask	= GFP_ATOMIC,
+		.may_writepage	= 0,
+	};
+
+
+	current->flags |= PF_KSWAPD;    /*  It's fake */
+	list_for_each_entry_safe(page, page2, page_list, lru) {
+		page_start_writeback(page);
+		list_del(&page->lru);
+		if (PageActive(page))
+			list_add(&page->lru, &pass1_list);
+		else
+			list_add(&page->lru, &discharge_list);
+	}
+	/*
+	 * Try to free inactive pages only.
+	 */
+	shrink_list(&discharge_list, &sc);
+	list_splice(&discharge_list, &pass1_list);
+
+	/*
+	 * Try to migrate easily movable pages first.
+	 */
+	list_for_each_entry_safe(page, page2, &pass1_list, lru) {
+		list_del(&page->lru);
+		if (PageLocked(page) || PageWriteback(page) ||
+		    IS_ERR(newpage = migrate_onepage(page, MIGRATE_NODE_ANY))) {
+			if (page_count(page) == 1) {
+				/* the page is already unused */
+				putback_page_to_lru(page_zone(page), page);
+				page_cache_release(page);
+			} else {
+				list_add(&page->lru, &pass2_list);
+			}
+		} else {
+			putback_page_to_lru(page_zone(newpage), newpage);
+			page_cache_release(newpage);
+		}
+	}
+	/*
+	 * Try to migrate the rest of them.
+	 */
+	list_for_each_entry_safe(page, page2, &pass2_list, lru) {
+		list_del(&page->lru);
+		if (IS_ERR(newpage = migrate_onepage(page, MIGRATE_NODE_ANY))) {
+			if (page_count(page) == 1) {
+				/* the page is already unused */
+				putback_page_to_lru(page_zone(page), page);
+				page_cache_release(page);
+			} else {
+				/* truncation may be in progress now. */
+				nr_busy++;
+				if (PTR_ERR(newpage) == -ENOSPC)
+					nr_noswap++;
+				list_add(&page->lru, page_list);
+			}
+		} else {
+			putback_page_to_lru(page_zone(newpage), newpage);
+			page_cache_release(newpage);
+		}
+	}
+	current->flags &= ~PF_KSWAPD;
+	if (nr_noswap) {
+		if (printk_ratelimit())
+			printk(KERN_WARNING "memory migration failed: Any swap devices should be added.\n");
+		return -ENOSPC;
+	}
+	return nr_busy;
+}
+
+EXPORT_SYMBOL(generic_migrate_page);
+EXPORT_SYMBOL(migrate_page_common);
+EXPORT_SYMBOL(migrate_page_buffer);
+EXPORT_SYMBOL(page_migratable);
+EXPORT_SYMBOL(migrate_onepage);
--- linux.orig/mm/page_alloc.c~FROM-2.6.12-rc5-mm1-resubmit-sparsemem-base-simple-numa-remap-space-allocator	2005-05-31 11:38:52.000000000 -0700
+++ linux/mm/page_alloc.c	2005-05-31 12:41:34.000000000 -0700
@@ -68,7 +68,7 @@
  * Used by page_zone() to look up the address of the struct zone whose
  * id is encoded in the upper bits of page->flags
  */
-struct zone *zone_table[1 << (ZONES_SHIFT + NODES_SHIFT)];
+struct zone *zone_table[1 << ZONETABLE_SHIFT];
 EXPORT_SYMBOL(zone_table);
 
 static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
@@ -1587,11 +1587,17 @@
 void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone,
 		unsigned long start_pfn)
 {
-	struct page *start = pfn_to_page(start_pfn);
 	struct page *page;
+	int end_pfn = start_pfn + size;
+	int pfn;
 
-	for (page = start; page < (start + size); page++) {
-		set_page_zone(page, NODEZONE(nid, zone));
+	for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) {
+		if (!early_pfn_valid(pfn))
+			continue;
+		if (!early_pfn_in_nid(pfn, nid))
+			continue;
+		page = pfn_to_page(pfn);
+		set_page_links(page, zone, nid, pfn);
 		set_page_count(page, 0);
 		reset_page_mapcount(page);
 		SetPageReserved(page);
@@ -1615,6 +1621,20 @@
 	}
 }
 
+#define ZONETABLE_INDEX(x, zone_nr)	((x << ZONES_SHIFT) | zone_nr)
+void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
+		unsigned long size)
+{
+	unsigned long snum = pfn_to_section_nr(pfn);
+	unsigned long end = pfn_to_section_nr(pfn + size);
+
+	if (FLAGS_HAS_NODE)
+		zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
+	else
+		for (; snum <= end; snum++)
+			zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
+}
+
 #ifndef __HAVE_ARCH_MEMMAP_INIT
 #define memmap_init(size, nid, zone, start_pfn) \
 	memmap_init_zone((size), (nid), (zone), (start_pfn))
@@ -1643,7 +1663,6 @@
 		unsigned long size, realsize;
 		unsigned long batch;
 
-		zone_table[NODEZONE(nid, j)] = zone;
 		realsize = size = zones_size[j];
 		if (zholes_size)
 			realsize -= zholes_size[j];
@@ -1740,6 +1759,8 @@
 
 		memmap_init(size, nid, j, zone_start_pfn);
 
+		zonetable_add(zone, nid, j, zone_start_pfn, size);
+
 		zone_start_pfn += size;
 
 		zone_init_free_lists(pgdat, zone, zone->spanned_pages);
@@ -1748,24 +1769,30 @@
 
 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
 {
-	unsigned long size;
-
 	/* Skip empty nodes */
 	if (!pgdat->node_spanned_pages)
 		return;
 
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
 	/* ia64 gets its own node_mem_map, before this, without bootmem */
 	if (!pgdat->node_mem_map) {
+		unsigned long size;
+		struct page *map;
+
 		size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
-		pgdat->node_mem_map = alloc_bootmem_node(pgdat, size);
+		map = alloc_remap(pgdat->node_id, size);
+		if (!map)
+			map = alloc_bootmem_node(pgdat, size);
+		pgdat->node_mem_map = map;
 	}
-#ifndef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_FLATMEM
 	/*
 	 * With no DISCONTIG, the global mem_map is just set as node 0's
 	 */
 	if (pgdat == NODE_DATA(0))
 		mem_map = NODE_DATA(0)->node_mem_map;
 #endif
+#endif /* CONFIG_FLAT_NODE_MEM_MAP */
 }
 
 void __init free_area_init_node(int nid, struct pglist_data *pgdat,
@@ -1781,18 +1808,18 @@
 	free_area_init_core(pgdat, zones_size, zholes_size);
 }
 
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
 static bootmem_data_t contig_bootmem_data;
 struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
 
 EXPORT_SYMBOL(contig_page_data);
+#endif
 
 void __init free_area_init(unsigned long *zones_size)
 {
-	free_area_init_node(0, &contig_page_data, zones_size,
+	free_area_init_node(0, NODE_DATA(0), zones_size,
 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
 }
-#endif
 
 #ifdef CONFIG_PROC_FS
 
--- linux.orig/mm/rmap.c~AA-PM-14-try_to_unmap_force	2005-05-31 12:42:02.000000000 -0700
+++ linux/mm/rmap.c	2005-05-31 12:42:02.000000000 -0700
@@ -46,6 +46,7 @@
  */
 
 #include <linux/mm.h>
+#include <linux/sched.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
@@ -506,11 +507,81 @@
 	}
 }
 
+struct page_va_list {
+	struct mm_struct *mm;
+	unsigned long addr;
+	struct list_head list;
+};
+
+/*
+ * This function is invoked to record an address space and a mapped address
+ * to which a target page belongs, when it is unmapped forcibly.
+ */
+static int
+record_unmapped_address(struct list_head *force, struct mm_struct *mm,
+				unsigned long address)
+{
+	struct page_va_list *vlist;
+
+	vlist = kmalloc(sizeof(struct page_va_list), GFP_KERNEL);
+	if (vlist == NULL)
+		return -ENOMEM;
+	spin_lock(&mmlist_lock);
+	if (!atomic_read(&mm->mm_users))
+		vlist->mm = NULL;
+	else {
+		vlist->mm = mm;
+		atomic_inc(&mm->mm_users);
+	}
+	spin_unlock(&mmlist_lock);
+
+	if (vlist->mm == NULL)
+		kfree(vlist);
+	else {
+		vlist->addr = address;
+		list_add(&vlist->list, force);
+	}
+	return 0;
+}
+
+/*
+ * This function touches an address recorded in the vlist to map
+ * a page into an address space again.
+ */
+int
+touch_unmapped_address(struct list_head *vlist)
+{
+	struct page_va_list *v1, *v2;
+	struct vm_area_struct *vma;
+	int ret = 0;
+	int error;
+
+	list_for_each_entry_safe(v1, v2, vlist, list) {
+		list_del(&v1->list);
+		down_read(&v1->mm->mmap_sem);
+		if (atomic_read(&v1->mm->mm_users) == 1)
+			goto out;
+		vma = find_vma(v1->mm, v1->addr);
+		if (vma == NULL)
+			goto out;
+		error = get_user_pages(current, v1->mm, v1->addr, 1,
+					0, 0, NULL, NULL);
+		if (error < 0)
+			ret = error;
+	out:
+		up_read(&v1->mm->mmap_sem);
+		mmput(v1->mm);
+		kfree(v1);
+	}
+	return ret;
+}
+
 /*
  * Subfunctions of try_to_unmap: try_to_unmap_one called
  * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
  */
-static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
+static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+    struct list_head *force)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	unsigned long address;
@@ -528,15 +599,18 @@
 	if (IS_ERR(pte))
 		goto out;
 
+	if (force && record_unmapped_address(force, mm, address))
+		goto out_unmap;
+
 	/*
 	 * If the page is mlock()d, we cannot swap it out.
 	 * If it's recently referenced (perhaps page_referenced
 	 * skipped over this mm) then we should reactivate it.
 	 */
-	if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
-			ptep_clear_flush_young(vma, address, pte)) {
-		ret = SWAP_FAIL;
-		goto out_unmap;
+	if (((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
+		ptep_clear_flush_young(vma, address, pte)) && force == NULL) {
+			ret = SWAP_FAIL;
+			goto out_unmap;
 	}
 
 	/*
@@ -699,7 +773,7 @@
 	spin_unlock(&mm->page_table_lock);
 }
 
-static int try_to_unmap_anon(struct page *page)
+static int try_to_unmap_anon(struct page *page, struct list_head *force)
 {
 	struct anon_vma *anon_vma;
 	struct vm_area_struct *vma;
@@ -710,7 +784,7 @@
 		return ret;
 
 	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
-		ret = try_to_unmap_one(page, vma);
+		ret = try_to_unmap_one(page, vma, force);
 		if (ret == SWAP_FAIL || !page_mapped(page))
 			break;
 	}
@@ -727,7 +801,7 @@
  *
  * This function is only called from try_to_unmap for object-based pages.
  */
-static int try_to_unmap_file(struct page *page)
+static int try_to_unmap_file(struct page *page, struct list_head *force)
 {
 	struct address_space *mapping = page->mapping;
 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -741,7 +815,7 @@
 
 	spin_lock(&mapping->i_mmap_lock);
 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
-		ret = try_to_unmap_one(page, vma);
+		ret = try_to_unmap_one(page, vma, force);
 		if (ret == SWAP_FAIL || !page_mapped(page))
 			goto out;
 	}
@@ -830,7 +904,7 @@
  * SWAP_AGAIN	- we missed a mapping, try again later
  * SWAP_FAIL	- the page is unswappable
  */
-int try_to_unmap(struct page *page)
+int try_to_unmap(struct page *page, struct list_head *force)
 {
 	int ret;
 
@@ -838,9 +912,9 @@
 	BUG_ON(!PageLocked(page));
 
 	if (PageAnon(page))
-		ret = try_to_unmap_anon(page);
+		ret = try_to_unmap_anon(page, force);
 	else
-		ret = try_to_unmap_file(page);
+		ret = try_to_unmap_file(page, force);
 
 	if (!page_mapped(page))
 		ret = SWAP_SUCCESS;
--- linux.orig/mm/shmem.c~AA-PM-09-migrate-swapcache-validate	2005-05-31 12:41:56.000000000 -0700
+++ linux/mm/shmem.c	2005-05-31 12:42:08.000000000 -0700
@@ -93,7 +93,16 @@
 	 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
 	 * might be reconsidered if it ever diverges from PAGE_SIZE.
 	 */
+#ifdef CONFIG_MEMORY_HOTPLUG
+	/*
+	 * XXXX: This is temprary code, which should be replaced with proper one
+	 * 	 after the scheme to specify hot removable region has defined.
+	 *				25/Sep/2004	-- taka
+	 */
+	return alloc_pages(gfp_mask & ~__GFP_HIGHMEM, PAGE_CACHE_SHIFT-PAGE_SHIFT);
+#else
 	return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
+#endif
 }
 
 static inline void shmem_dir_free(struct page *page)
@@ -1017,6 +1026,14 @@
 			page_cache_release(swappage);
 			goto repeat;
 		}
+		if (!PageSwapCache(swappage)) {
+			/* page-migration has occured */
+			shmem_swp_unmap(entry);
+			spin_unlock(&info->lock);
+			unlock_page(swappage);
+			page_cache_release(swappage);
+			goto repeat;
+		}
 		if (PageWriteback(swappage)) {
 			shmem_swp_unmap(entry);
 			spin_unlock(&info->lock);
--- /dev/null	2005-03-30 22:36:15.000000000 -0800
+++ linux/mm/sparse.c	2005-05-31 11:39:05.000000000 -0700
@@ -0,0 +1,137 @@
+/*
+ * sparse memory mappings.
+ */
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <asm/dma.h>
+
+/*
+ * Permanent SPARSEMEM data:
+ *
+ * 1) mem_section	- memory sections, mem_map's for valid memory
+ */
+struct mem_section mem_section[NR_MEM_SECTIONS];
+EXPORT_SYMBOL(mem_section);
+
+/* Record a memory area against a node. */
+void memory_present(int nid, unsigned long start, unsigned long end)
+{
+	unsigned long pfn;
+
+	start &= PAGE_SECTION_MASK;
+	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
+		unsigned long section = pfn_to_section_nr(pfn);
+		if (!mem_section[section].section_mem_map)
+			mem_section[section].section_mem_map = SECTION_MARKED_PRESENT;
+	}
+}
+
+/*
+ * Only used by the i386 NUMA architecures, but relatively
+ * generic code.
+ */
+unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
+						     unsigned long end_pfn)
+{
+	unsigned long pfn;
+	unsigned long nr_pages = 0;
+
+	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+		if (nid != early_pfn_to_nid(pfn))
+			continue;
+
+		if (pfn_valid(pfn))
+			nr_pages += PAGES_PER_SECTION;
+	}
+
+	return nr_pages * sizeof(struct page);
+}
+
+/*
+ * Subtle, we encode the real pfn into the mem_map such that
+ * the identity pfn - section_mem_map will return the actual
+ * physical page frame number.
+ */
+static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
+{
+	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
+}
+
+/*
+ * We need this if we ever free the mem_maps.  While not implemented yet,
+ * this function is included for parity with its sibling.
+ */
+static __attribute((unused))
+struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
+{
+	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
+}
+
+static int sparse_init_one_section(struct mem_section *ms,
+		unsigned long pnum, struct page *mem_map)
+{
+	if (!valid_section(ms))
+		return -EINVAL;
+
+	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum);
+
+	return 1;
+}
+
+static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
+{
+	struct page *map;
+	int nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
+
+	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
+	if (map)
+		return map;
+
+	map = alloc_bootmem_node(NODE_DATA(nid),
+			sizeof(struct page) * PAGES_PER_SECTION);
+	if (map)
+		return map;
+
+	printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
+	mem_section[pnum].section_mem_map = 0;
+	return NULL;
+}
+
+/*
+ * Allocate the accumulated non-linear sections, allocate a mem_map
+ * for each and record the physical to section mapping.
+ */
+void sparse_init(void)
+{
+	unsigned long pnum;
+	struct page *map;
+
+	for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
+		if (!valid_section_nr(pnum))
+			continue;
+
+		map = sparse_early_mem_map_alloc(pnum);
+		if (map)
+			sparse_init_one_section(&mem_section[pnum], pnum, map);
+	}
+}
+
+/*
+ * returns the number of sections whose mem_maps were properly
+ * set.  If this is <=0, then that means that the passed-in
+ * map was not consumed and must be freed.
+ */
+int sparse_add_one_section(int start_pfn, int nr_pages, struct page *map)
+{
+	struct mem_section *ms = __pfn_to_section(start_pfn);
+
+	if (ms->section_mem_map & SECTION_MARKED_PRESENT)
+		return -EEXIST;
+
+	ms->section_mem_map |= SECTION_MARKED_PRESENT;
+
+	return sparse_init_one_section(ms, pfn_to_section_nr(start_pfn), map);
+}
--- linux.orig/mm/swap_state.c~AA-PM-05-swapper_space-gfpmask	2005-05-31 12:41:53.000000000 -0700
+++ linux/mm/swap_state.c	2005-05-31 12:41:53.000000000 -0700
@@ -37,6 +37,7 @@
 	.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
 	.tree_lock	= RW_LOCK_UNLOCKED,
 	.a_ops		= &swap_aops,
+	.flags		= GFP_HIGHUSER,
 	.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
 	.backing_dev_info = &swap_backing_dev_info,
 };
@@ -140,7 +141,7 @@
  * Allocate swap space for the page and add the page to the
  * swap cache.  Caller needs to hold the page lock. 
  */
-int add_to_swap(struct page * page)
+int add_to_swap(struct page * page, unsigned int gfp_mask)
 {
 	swp_entry_t entry;
 	int err;
@@ -165,7 +166,7 @@
 		 * Add it to the swap cache and mark it dirty
 		 */
 		err = __add_to_swap_cache(page, entry,
-				GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN);
+				gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
 
 		switch (err) {
 		case 0:				/* Success */
--- linux.orig/mm/swapfile.c~AA-PM-09-migrate-swapcache-validate	2005-05-31 12:41:56.000000000 -0700
+++ linux/mm/swapfile.c	2005-05-31 12:42:03.000000000 -0700
@@ -337,11 +337,12 @@
  * Work out if there are any other processes sharing this
  * swap cache page. Free it if you can. Return success.
  */
-int remove_exclusive_swap_page(struct page *page)
+int __remove_exclusive_swap_page(struct page *page, int force)
 {
 	int retval;
 	struct swap_info_struct * p;
 	swp_entry_t entry;
+	int mapcount = force ? page_mapcount(page) : 0;
 
 	BUG_ON(PagePrivate(page));
 	BUG_ON(!PageLocked(page));
@@ -350,7 +351,7 @@
 		return 0;
 	if (PageWriteback(page))
 		return 0;
-	if (page_count(page) != 2) /* 2: us + cache */
+	if (page_count(page) - mapcount != 2) /* 2: us + cache */
 		return 0;
 
 	entry.val = page->private;
@@ -363,7 +364,8 @@
 	if (p->swap_map[swp_offset(entry)] == 1) {
 		/* Recheck the page count with the swapcache lock held.. */
 		write_lock_irq(&swapper_space.tree_lock);
-		if ((page_count(page) == 2) && !PageWriteback(page)) {
+		mapcount = force ? page_mapcount(page) : 0;
+		if ((page_count(page) - mapcount == 2) && !PageWriteback(page)) {
 			__delete_from_swap_cache(page);
 			SetPageDirty(page);
 			retval = 1;
@@ -641,6 +643,7 @@
 		 */
 		swap_map = &si->swap_map[i];
 		entry = swp_entry(type, i);
+again:
 		page = read_swap_cache_async(entry, NULL, 0);
 		if (!page) {
 			/*
@@ -675,6 +678,12 @@
 		wait_on_page_locked(page);
 		wait_on_page_writeback(page);
 		lock_page(page);
+		if (!PageSwapCache(page)) {
+			/* page-migration has occured */
+			unlock_page(page);
+			page_cache_release(page);
+			goto again;
+		}
 		wait_on_page_writeback(page);
 
 		/*
--- linux.orig/mm/thrash.c~AA-PM-15-swap_token-kthread	2005-05-31 12:42:02.000000000 -0700
+++ linux/mm/thrash.c	2005-05-31 12:42:02.000000000 -0700
@@ -54,6 +54,9 @@
 	struct mm_struct *mm;
 	int reason;
 
+	if (current->mm == NULL)
+		return;
+
 	/* We have the token. Let others know we still need it. */
 	if (has_swap_token(current->mm)) {
 		current->mm->recent_pagein = 1;
--- linux.orig/mm/truncate.c~AA-PM-11.0-migrate-truncate	2005-05-31 12:41:57.000000000 -0700
+++ linux/mm/truncate.c	2005-05-31 12:41:58.000000000 -0700
@@ -90,6 +90,34 @@
 	return 1;
 }
 
+static inline struct page *lock_replace_page(struct page **p, struct address_space *mapping)
+{
+	struct page *page = *p;
+	struct page *newpage;
+
+	lock_page(page);
+
+	if (page->mapping != NULL)
+		return page;
+
+	unlock_page(page);
+
+	newpage = find_lock_page(mapping, page->index);
+	if (!newpage) {
+		/*
+		 * put the page back the way it was and let
+		 * the normal truncate code handle it
+		 */
+		lock_page(page);
+		return page;
+	}
+
+	/* memory migration has been rolled back. */
+	page_cache_release(page);
+	*p = newpage;
+	return newpage;
+}
+
 /**
  * truncate_inode_pages - truncate *all* the pages from an offset
  * @mapping: mapping to truncate
@@ -140,6 +168,9 @@
 				unlock_page(page);
 				continue;
 			}
+			/* page->mapping check is done in
+			 * truncate_complete_page() when the page has been
+			 * migrated. */
 			truncate_complete_page(mapping, page);
 			unlock_page(page);
 		}
@@ -167,9 +198,9 @@
 			continue;
 		}
 		for (i = 0; i < pagevec_count(&pvec); i++) {
-			struct page *page = pvec.pages[i];
+			struct page *page;
 
-			lock_page(page);
+			page = lock_replace_page(&pvec.pages[i], mapping);
 			wait_on_page_writeback(page);
 			if (page->index > next)
 				next = page->index;
@@ -267,11 +298,11 @@
 		pagevec_lookup(&pvec, mapping, next,
 			min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
 		for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
-			struct page *page = pvec.pages[i];
+			struct page *page;
 			pgoff_t page_index;
 			int was_dirty;
 
-			lock_page(page);
+			page = lock_replace_page(&pvec.pages[i], mapping);
 			if (page->mapping != mapping) {
 				unlock_page(page);
 				continue;
--- linux.orig/mm/vmalloc.c~AA-PM-26-vmalloc	2005-05-31 12:42:08.000000000 -0700
+++ linux/mm/vmalloc.c	2005-05-31 12:42:08.000000000 -0700
@@ -477,7 +477,16 @@
  */
 void *vmalloc(unsigned long size)
 {
+#ifdef CONFIG_MEMORY_HOTPLUG
+	/*
+	 * XXXX: This is temprary code, which should be replaced with proper one
+	 * 	 after the scheme to specify hot removable region has defined.
+	 *				25/Sep/2004	-- taka
+	 */
+       return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
+#else
        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+#endif
 }
 
 EXPORT_SYMBOL(vmalloc);
--- linux.orig/mm/vmscan.c~AA-PM-01-steal_page_from_lru	2005-05-31 11:39:12.000000000 -0700
+++ linux/mm/vmscan.c	2005-05-31 12:42:10.000000000 -0700
@@ -39,48 +39,6 @@
 
 #include <linux/swapops.h>
 
-/* possible outcome of pageout() */
-typedef enum {
-	/* failed to write page out, page is locked */
-	PAGE_KEEP,
-	/* move page to the active list, page is locked */
-	PAGE_ACTIVATE,
-	/* page has been sent to the disk successfully, page is unlocked */
-	PAGE_SUCCESS,
-	/* page is clean and locked */
-	PAGE_CLEAN,
-} pageout_t;
-
-struct scan_control {
-	/* Ask refill_inactive_zone, or shrink_cache to scan this many pages */
-	unsigned long nr_to_scan;
-
-	/* Incremented by the number of inactive pages that were scanned */
-	unsigned long nr_scanned;
-
-	/* Incremented by the number of pages reclaimed */
-	unsigned long nr_reclaimed;
-
-	unsigned long nr_mapped;	/* From page_state */
-
-	/* How many pages shrink_cache() should reclaim */
-	int nr_to_reclaim;
-
-	/* Ask shrink_caches, or shrink_zone to scan at this priority */
-	unsigned int priority;
-
-	/* This context's GFP mask */
-	unsigned int gfp_mask;
-
-	int may_writepage;
-
-	/* This context's SWAP_CLUSTER_MAX. If freeing memory for
-	 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
-	 * In this context, it doesn't matter that we scan the
-	 * whole list at once. */
-	int swap_cluster_max;
-};
-
 /*
  * The list of shrinker callbacks used by to apply pressure to
  * ageable caches.
@@ -292,7 +250,7 @@
 /*
  * pageout is called by shrink_list() for each dirty page. Calls ->writepage().
  */
-static pageout_t pageout(struct page *page, struct address_space *mapping)
+pageout_t pageout(struct page *page, struct address_space *mapping)
 {
 	/*
 	 * If the page is dirty, only perform writeback if that write
@@ -363,7 +321,7 @@
 /*
  * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed
  */
-static int shrink_list(struct list_head *page_list, struct scan_control *sc)
+int shrink_list(struct list_head *page_list, struct scan_control *sc)
 {
 	LIST_HEAD(ret_pages);
 	struct pagevec freed_pvec;
@@ -408,7 +366,7 @@
 		 * Try to allocate it some swap space here.
 		 */
 		if (PageAnon(page) && !PageSwapCache(page)) {
-			if (!add_to_swap(page))
+			if (!add_to_swap(page, GFP_ATOMIC))
 				goto activate_locked;
 		}
 #endif /* CONFIG_SWAP */
@@ -422,7 +380,7 @@
 		 * processes. Try to unmap it here.
 		 */
 		if (page_mapped(page) && mapping) {
-			switch (try_to_unmap(page)) {
+			switch (try_to_unmap(page, NULL)) {
 			case SWAP_FAIL:
 				goto activate_locked;
 			case SWAP_AGAIN:
@@ -572,22 +530,8 @@
 	while (scan++ < nr_to_scan && !list_empty(src)) {
 		page = lru_to_page(src);
 		prefetchw_prev_lru_page(page, src, flags);
-
-		if (!TestClearPageLRU(page))
-			BUG();
-		list_del(&page->lru);
-		if (get_page_testone(page)) {
-			/*
-			 * It is being freed elsewhere
-			 */
-			__put_page(page);
-			SetPageLRU(page);
-			list_add(&page->lru, src);
-			continue;
-		} else {
-			list_add(&page->lru, dst);
+		if (isolate_lru_onepage(page, src, dst))
 			nr_taken++;
-		}
 	}
 
 	*scanned = scan;
@@ -640,13 +584,10 @@
 		 */
 		while (!list_empty(&page_list)) {
 			page = lru_to_page(&page_list);
-			if (TestSetPageLRU(page))
-				BUG();
 			list_del(&page->lru);
-			if (PageActive(page))
-				add_page_to_active_list(zone, page);
-			else
-				add_page_to_inactive_list(zone, page);
+ 			if (PageActive(page))
+ 				ClearPageActive(page);
+ 			__putback_page_to_lru(zone, page);
 			if (!pagevec_add(&pvec, page)) {
 				spin_unlock_irq(&zone->lru_lock);
 				__pagevec_release(&pvec);